import{s as K,o as A,n as D}from"../chunks/scheduler.db6b1edd.js";import{S as O,i as tt,g as o,s as n,r as k,A as et,h as c,f as a,c as p,j as P,u as U,x as g,k as L,y as at,a as s,v as T,d as Z,t as b,w as G}from"../chunks/index.ef84021a.js";import{T as st}from"../chunks/Tip.67451c24.js";import{C as q}from"../chunks/CodeBlock.786162a6.js";import{H as lt,E as nt}from"../chunks/EditOnGithub.f0365ffe.js";function pt(F){let l,m='For a detailed guide on how to analyze datasets on the Hub with PySpark, check out this <a href="https://huggingface.co/blog/asoria/pyspark-hugging-face-datasets" rel="nofollow">blog</a>.';return{c(){l=o("p"),l.innerHTML=m},l(r){l=c(r,"P",{"data-svelte-h":!0}),g(l)!=="svelte-1fhx9bu"&&(l.innerHTML=m)},m(r,B){s(r,l,B)},p:D,d(r){r&&a(l)}}}function rt(F){let l,m,r,B,h,x,M,v='<a href="https://spark.apache.org/docs/latest/api/python" rel="nofollow">pyspark</a> is the Python interface for Apache Spark, enabling large-scale data processing and real-time analytics in a distributed environment using Python.',X,i,R,d,V="To start working with Parquet files in PySpark, you’ll first need to add the file(s) to a Spark context. Below is an example of how to read a single Parquet file:",S,u,Y,y,_="If your dataset is sharded into multiple Parquet files, you’ll need to add each file to the Spark context individually. Here’s how to do it:",C,f,W,w,z="Once you’ve loaded the data into a PySpark DataFrame, you can perform various operations to explore and analyze it:",I,J,E,j,Q,$,H;return h=new lt({props:{title:"PySpark",local:"pyspark",headingTag:"h1"}}),i=new st({props:{$$slots:{default:[pt]},$$scope:{ctx:F}}}),u=new q({props:{code:"ZnJvbSUyMHB5c3BhcmslMjBpbXBvcnQlMjBTcGFya0ZpbGVzJTJDJTIwU3BhcmtDb250ZXh0JTJDJTIwU3BhcmtGaWxlcyUwQWZyb20lMjBweXNwYXJrLnNxbCUyMGltcG9ydCUyMFNwYXJrU2Vzc2lvbiUwQSUwQSUyMyUyMEluaXRpYWxpemUlMjBhJTIwU3BhcmslMjBzZXNzaW9uJTBBc3BhcmslMjAlM0QlMjBTcGFya1Nlc3Npb24uYnVpbGRlci5hcHBOYW1lKCUyMldpbmVSZXZpZXdzJTIyKS5nZXRPckNyZWF0ZSgpJTBBJTBBJTIzJTIwQWRkJTIwdGhlJTIwUGFycXVldCUyMGZpbGUlMjB0byUyMHRoZSUyMFNwYXJrJTIwY29udGV4dCUwQXNwYXJrLnNwYXJrQ29udGV4dC5hZGRGaWxlKCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZhcGklMkZkYXRhc2V0cyUyRmphbWVzLWJ1cnRvbiUyRndpbmVfcmV2aWV3cyUyRnBhcnF1ZXQlMkZkZWZhdWx0JTJGdHJhaW4lMkYwLnBhcnF1ZXQlMjIpJTBBJTBBJTIzJTIwUmVhZCUyMHRoZSUyMFBhcnF1ZXQlMjBmaWxlJTIwaW50byUyMGElMjBEYXRhRnJhbWUlMEFkZiUyMCUzRCUyMHNwYXJrLnJlYWQucGFycXVldChTcGFya0ZpbGVzLmdldCglMjIwLnBhcnF1ZXQlMjIpKSUwQQ==",highlighted:`<span class="hljs-keyword">from</span> pyspark <span class="hljs-keyword">import</span> SparkFiles, SparkContext, SparkFiles
<span class="hljs-keyword">from</span> pyspark.sql <span class="hljs-keyword">import</span> SparkSession

<span class="hljs-comment"># Initialize a Spark session</span>
spark = SparkSession.builder.appName(<span class="hljs-string">&quot;WineReviews&quot;</span>).getOrCreate()

<span class="hljs-comment"># Add the Parquet file to the Spark context</span>
spark.sparkContext.addFile(<span class="hljs-string">&quot;https://huggingface.co/api/datasets/james-burton/wine_reviews/parquet/default/train/0.parquet&quot;</span>)

<span class="hljs-comment"># Read the Parquet file into a DataFrame</span>
df = spark.read.parquet(SparkFiles.get(<span class="hljs-string">&quot;0.parquet&quot;</span>))
`,wrap:!1}}),f=new q({props:{code:"aW1wb3J0JTIwcmVxdWVzdHMlMEElMEElMjMlMjBGZXRjaCUyMHRoZSUyMFVSTHMlMjBvZiUyMHRoZSUyMFBhcnF1ZXQlMjBmaWxlcyUyMGZvciUyMHRoZSUyMHRyYWluJTIwc3BsaXQlMEFyJTIwJTNEJTIwcmVxdWVzdHMuZ2V0KCdodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGYXBpJTJGZGF0YXNldHMlMkZqYW1lcy1idXJ0b24lMkZ3aW5lX3Jldmlld3MlMkZwYXJxdWV0JyklMEF0cmFpbl9wYXJxdWV0X2ZpbGVzJTIwJTNEJTIwci5qc29uKCklNUInZGVmYXVsdCclNUQlNUIndHJhaW4nJTVEJTBBJTBBJTIzJTIwQWRkJTIwZWFjaCUyMFBhcnF1ZXQlMjBmaWxlJTIwdG8lMjB0aGUlMjBTcGFyayUyMGNvbnRleHQlMEFmb3IlMjB1cmwlMjBpbiUyMHRyYWluX3BhcnF1ZXRfZmlsZXMlM0ElMEElMjAlMjBzcGFyay5zcGFya0NvbnRleHQuYWRkRmlsZSh1cmwpJTBBJTBBJTIzJTIwUmVhZCUyMGFsbCUyMFBhcnF1ZXQlMjBmaWxlcyUyMGludG8lMjBhJTIwc2luZ2xlJTIwRGF0YUZyYW1lJTBBZGYlMjAlM0QlMjBzcGFyay5yZWFkLnBhcnF1ZXQoU3BhcmtGaWxlcy5nZXRSb290RGlyZWN0b3J5KCklMjAlMkIlMjAlMjIlMkYqLnBhcnF1ZXQlMjIpJTBB",highlighted:`<span class="hljs-keyword">import</span> requests

<span class="hljs-comment"># Fetch the URLs of the Parquet files for the train split</span>
r = requests.get(<span class="hljs-string">&#x27;https://huggingface.co/api/datasets/james-burton/wine_reviews/parquet&#x27;</span>)
train_parquet_files = r.json()[<span class="hljs-string">&#x27;default&#x27;</span>][<span class="hljs-string">&#x27;train&#x27;</span>]

<span class="hljs-comment"># Add each Parquet file to the Spark context</span>
<span class="hljs-keyword">for</span> url <span class="hljs-keyword">in</span> train_parquet_files:
  spark.sparkContext.addFile(url)

<span class="hljs-comment"># Read all Parquet files into a single DataFrame</span>
df = spark.read.parquet(SparkFiles.getRootDirectory() + <span class="hljs-string">&quot;/*.parquet&quot;</span>)
`,wrap:!1}}),J=new q({props:{code:"cHJpbnQoZiUyMlNoYXBlJTIwb2YlMjB0aGUlMjBkYXRhc2V0JTNBJTIwJTdCZGYuY291bnQoKSU3RCUyQyUyMCU3QmxlbihkZi5jb2x1bW5zKSU3RCUyMiklMEElMEElMjMlMjBEaXNwbGF5JTIwZmlyc3QlMjAxMCUyMHJvd3MlMEFkZi5zaG93KG4lM0QxMCklMEElMEElMjMlMjBHZXQlMjBhJTIwc3RhdGlzdGljYWwlMjBzdW1tYXJ5JTIwb2YlMjB0aGUlMjBkYXRhJTBBZGYuZGVzY3JpYmUoKS5zaG93KCklMEElMEElMjMlMjBQcmludCUyMHRoZSUyMHNjaGVtYSUyMG9mJTIwdGhlJTIwRGF0YUZyYW1lJTBBZGYucHJpbnRTY2hlbWEoKSUwQQ==",highlighted:`<span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Shape of the dataset: <span class="hljs-subst">{df.count()}</span>, <span class="hljs-subst">{<span class="hljs-built_in">len</span>(df.columns)}</span>&quot;</span>)

<span class="hljs-comment"># Display first 10 rows</span>
df.show(n=<span class="hljs-number">10</span>)

<span class="hljs-comment"># Get a statistical summary of the data</span>
df.describe().show()

<span class="hljs-comment"># Print the schema of the DataFrame</span>
df.printSchema()
`,wrap:!1}}),j=new nt({props:{source:"https://github.com/huggingface/dataset-viewer/blob/main/docs/source/pyspark.md"}}),{c(){l=o("meta"),m=n(),r=o("p"),B=n(),k(h.$$.fragment),x=n(),M=o("p"),M.innerHTML=v,X=n(),k(i.$$.fragment),R=n(),d=o("p"),d.textContent=V,S=n(),k(u.$$.fragment),Y=n(),y=o("p"),y.textContent=_,C=n(),k(f.$$.fragment),W=n(),w=o("p"),w.textContent=z,I=n(),k(J.$$.fragment),E=n(),k(j.$$.fragment),Q=n(),$=o("p"),this.h()},l(t){const e=et("svelte-u9bgzb",document.head);l=c(e,"META",{name:!0,content:!0}),e.forEach(a),m=p(t),r=c(t,"P",{}),P(r).forEach(a),B=p(t),U(h.$$.fragment,t),x=p(t),M=c(t,"P",{"data-svelte-h":!0}),g(M)!=="svelte-1of9fch"&&(M.innerHTML=v),X=p(t),U(i.$$.fragment,t),R=p(t),d=c(t,"P",{"data-svelte-h":!0}),g(d)!=="svelte-1jkkndo"&&(d.textContent=V),S=p(t),U(u.$$.fragment,t),Y=p(t),y=c(t,"P",{"data-svelte-h":!0}),g(y)!=="svelte-11bpiey"&&(y.textContent=_),C=p(t),U(f.$$.fragment,t),W=p(t),w=c(t,"P",{"data-svelte-h":!0}),g(w)!=="svelte-1rrby5b"&&(w.textContent=z),I=p(t),U(J.$$.fragment,t),E=p(t),U(j.$$.fragment,t),Q=p(t),$=c(t,"P",{}),P($).forEach(a),this.h()},h(){L(l,"name","hf:doc:metadata"),L(l,"content",it)},m(t,e){at(document.head,l),s(t,m,e),s(t,r,e),s(t,B,e),T(h,t,e),s(t,x,e),s(t,M,e),s(t,X,e),T(i,t,e),s(t,R,e),s(t,d,e),s(t,S,e),T(u,t,e),s(t,Y,e),s(t,y,e),s(t,C,e),T(f,t,e),s(t,W,e),s(t,w,e),s(t,I,e),T(J,t,e),s(t,E,e),T(j,t,e),s(t,Q,e),s(t,$,e),H=!0},p(t,[e]){const N={};e&2&&(N.$$scope={dirty:e,ctx:t}),i.$set(N)},i(t){H||(Z(h.$$.fragment,t),Z(i.$$.fragment,t),Z(u.$$.fragment,t),Z(f.$$.fragment,t),Z(J.$$.fragment,t),Z(j.$$.fragment,t),H=!0)},o(t){b(h.$$.fragment,t),b(i.$$.fragment,t),b(u.$$.fragment,t),b(f.$$.fragment,t),b(J.$$.fragment,t),b(j.$$.fragment,t),H=!1},d(t){t&&(a(m),a(r),a(B),a(x),a(M),a(X),a(R),a(d),a(S),a(Y),a(y),a(C),a(W),a(w),a(I),a(E),a(Q),a($)),a(l),G(h,t),G(i,t),G(u,t),G(f,t),G(J,t),G(j,t)}}}const it='{"title":"PySpark","local":"pyspark","sections":[],"depth":1}';function ot(F){return A(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ut extends O{constructor(l){super(),tt(this,l,ot,rt,K,{})}}export{ut as component};
