Upload preprocess.py
Browse files- preprocess.py +84 -0
preprocess.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import requests
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
|
6 |
+
# Load environment variables from .env file
|
7 |
+
load_dotenv()
|
8 |
+
|
9 |
+
# Get the GitHub token from the environment variable
|
10 |
+
GITHUB_TOKEN = os.getenv("GITHUB_TOKEN")
|
11 |
+
|
12 |
+
# Set the GitHub repository URLs
|
13 |
+
repo_urls = [
|
14 |
+
"https://github.com/gizatechxyz/orion",
|
15 |
+
"https://github.com/gizatechxyz/Giza-Hub",
|
16 |
+
"https://github.com/zkonduit/ezkl",
|
17 |
+
"https://github.com/socathie/keras2circom",
|
18 |
+
"https://github.com/socathie/circomlib-ml"
|
19 |
+
"https://github.com/worldcoin/proto-neural-zkp",
|
20 |
+
"https://github.com/Modulus-Labs/RockyBot"
|
21 |
+
"https://github.com/ora-io/keras2circom",
|
22 |
+
"https://github.com/zk-ml/tachikoma",
|
23 |
+
"https://github.com/only4sim/ZK-DTP",
|
24 |
+
"https://github.com/ddkang/zkml",
|
25 |
+
"https://github.com/socathie/ZKaggleV2"
|
26 |
+
]
|
27 |
+
|
28 |
+
# Set the output file name
|
29 |
+
output_file = "dataset.json"
|
30 |
+
|
31 |
+
# Initialize an empty list to store the dataset
|
32 |
+
dataset = []
|
33 |
+
|
34 |
+
def retrieve_files(repo_url, path=""):
|
35 |
+
repo_owner, repo_name = repo_url.split("/")[-2:]
|
36 |
+
api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/contents/{path}"
|
37 |
+
headers = {
|
38 |
+
"Authorization": f"Bearer {GITHUB_TOKEN}",
|
39 |
+
"Accept": "application/vnd.github.v3+json"
|
40 |
+
}
|
41 |
+
response = requests.get(api_url, headers=headers)
|
42 |
+
|
43 |
+
if response.status_code == 200:
|
44 |
+
contents = response.json()
|
45 |
+
for item in contents:
|
46 |
+
# Skip issues, commits, and pull requests
|
47 |
+
if "/issues/" in item["url"] or "/commits/" in item["url"] or "/pulls/" in item["url"]:
|
48 |
+
continue
|
49 |
+
# Skip directories and files starting with a dot
|
50 |
+
if item["name"].startswith("."):
|
51 |
+
continue
|
52 |
+
if item["type"] == "file":
|
53 |
+
if item["name"].endswith(( ".py", ".js", ".java", ".c", ".cpp", ".h", ".hpp", ".rs", "cairo", ".zkey", ".sol", ".circom", ".ejs", ".ipynb" )):
|
54 |
+
file_url = item["download_url"]
|
55 |
+
file_response = requests.get(file_url)
|
56 |
+
file_content = file_response.text
|
57 |
+
data_entry = {
|
58 |
+
"repo": repo_url,
|
59 |
+
"file_path": item["path"],
|
60 |
+
"content": file_content
|
61 |
+
}
|
62 |
+
dataset.append(data_entry)
|
63 |
+
print("Appended ", item["path"])
|
64 |
+
elif item["type"] == "dir":
|
65 |
+
retrieve_files(repo_url, item["path"])
|
66 |
+
else:
|
67 |
+
print(f"Failed to retrieve contents for path: {path} in repository: {repo_url}")
|
68 |
+
|
69 |
+
# Load existing dataset if the file exists
|
70 |
+
if os.path.exists(output_file):
|
71 |
+
with open(output_file, "r") as file:
|
72 |
+
existing_dataset = json.load(file)
|
73 |
+
dataset.extend(existing_dataset)
|
74 |
+
|
75 |
+
# Iterate over each repository URL
|
76 |
+
for repo_url in repo_urls:
|
77 |
+
print("Scrapping ", repo_url)
|
78 |
+
retrieve_files(repo_url)
|
79 |
+
|
80 |
+
# Write the dataset to the output file in JSON format
|
81 |
+
with open(output_file, "w") as file:
|
82 |
+
json.dump(dataset, file, indent=4)
|
83 |
+
|
84 |
+
print(f"Dataset created successfully. Saved to {output_file}.")
|