File size: 7,718 Bytes
4b9af9d 5eb5743 4b9af9d 5eb5743 4b9af9d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
import os
import json
import random
import string
import subprocess
import tempfile
import logging
import argparse
from github import Github
from git import Repo
from datasets import load_dataset, Dataset
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Read GitHub API token from environment variable
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
HF_TOKEN = os.environ.get("HF_TOKEN")
if not GITHUB_TOKEN:
logger.error("GITHUB_TOKEN environment variable is not set.")
raise ValueError("GITHUB_TOKEN environment variable is not set. Please set it before running the script.")
if not HF_TOKEN:
logger.error("HF_TOKEN environment variable is not set.")
raise ValueError("HF_TOKEN environment variable is not set. Please set it before running the script.")
# Initialize GitHub API client
g = Github(GITHUB_TOKEN)
def search_top_repos():
"""Search for top 100 Python repositories with at least 1000 stars and 100 forks."""
logger.info("Searching for top 100 Python repositories...")
query = "language:python stars:>=1000 forks:>=100"
repos = g.search_repositories(query=query, sort="stars", order="desc")
top_repos = list(repos[:100])
logger.info(f"Found {len(top_repos)} repositories")
return top_repos
def clone_repo(repo, tmp_dir):
"""Clone a repository to a temporary directory."""
logger.info(f"Cloning repository: {repo.full_name}")
repo_dir = os.path.join(tmp_dir, repo.name)
Repo.clone_from(repo.clone_url, repo_dir)
logger.info(f"Repository cloned to {repo_dir}")
return repo_dir
def run_semgrep(repo_dir):
"""Run Semgrep on the repository and return the JSON output."""
logger.info(f"Running Semgrep on {repo_dir}")
cmd = f"semgrep scan --config auto --json {repo_dir}"
result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
logger.info("Semgrep scan completed")
return json.loads(result.stdout)
def extract_vulnerable_files(semgrep_output):
"""Extract files with exactly one vulnerability and their CWE."""
logger.info("Extracting vulnerable files from Semgrep output")
vulnerable_files = {}
total_vulns = 0
for result in semgrep_output.get("results", []):
file_path = result.get("path")
cwe = result.get("extra", {}).get("metadata", {}).get("cwe", "Unknown")
if file_path not in vulnerable_files:
vulnerable_files[file_path] = {"count": 0, "cwe": cwe}
vulnerable_files[file_path]["count"] += 1
total_vulns += 1
single_vulnerability_files = {file: info["cwe"] for file, info in vulnerable_files.items() if info["count"] == 1}
logger.info(f"Found {total_vulns} total vulnerabilities")
logger.info(f"Found {len(single_vulnerability_files)} files with exactly one vulnerability")
return single_vulnerability_files, total_vulns
def count_tokens(text):
"""Approximate token count using whitespace splitting."""
return len(text.split())
def generate_random_filename():
"""Generate a random 6-digit filename with .py extension."""
return ''.join(random.choices(string.digits, k=6)) + ".py"
def process_repository(repo, output_file):
"""Process a single repository and append new data items to the output file."""
logger.info(f"Processing repository: {repo.full_name}")
with tempfile.TemporaryDirectory() as tmp_dir:
repo_dir = clone_repo(repo, tmp_dir)
semgrep_output = run_semgrep(repo_dir)
vulnerable_files, total_vulns = extract_vulnerable_files(semgrep_output)
items_added = 0
for file_path, cwe in vulnerable_files.items():
if items_added >= 3:
logger.info(f"Reached maximum of 3 items for repository {repo.full_name}. Stopping processing.")
break
full_path = os.path.join(repo_dir, file_path)
logger.info(f"Analyzing file: {file_path}")
with open(full_path, 'r') as f:
source_code = f.read()
token_count = count_tokens(source_code)
if 512 <= token_count <= 1024:
new_item = {
"source": source_code,
"file_name": generate_random_filename(),
"cwe": cwe
}
with open(output_file, 'a') as f:
json.dump(new_item, f)
f.write('\n')
items_added += 1
logger.info(f"Added new item with CWE: {cwe}")
else:
logger.info(f"File skipped: token count ({token_count}) out of range")
logger.info(f"Processed {repo.full_name}: found {total_vulns} vulnerabilities, added {items_added} new items")
def preprocess_data(data):
"""Ensure all fields are consistently typed across all items."""
if not data:
return data
# Identify fields that are sometimes lists
list_fields = set()
for item in data:
for key, value in item.items():
if isinstance(value, list):
list_fields.add(key)
# Ensure these fields are always lists
for item in data:
for key in list_fields:
if key not in item:
item[key] = []
elif not isinstance(item[key], list):
item[key] = [item[key]]
return data
def merge_and_push_dataset(jsonl_file, new_dataset_name):
"""Push to Hugging Face."""
logging.info("Starting dataset push process")
# Load the new data from the JSONL file
logging.info("Loading new data from JSONL file")
with open(jsonl_file, 'r') as f:
new_data = [json.loads(line) for line in f]
logging.info(f"Loaded {len(new_data)} records from JSONL file")
# Preprocess the data
logging.info("Preprocessing data")
preprocessed_data = preprocess_data(new_data)
# Create dataset from the preprocessed data
logging.info("Creating dataset")
try:
dataset = Dataset.from_list(preprocessed_data)
except Exception as e:
logging.error(f"Error creating dataset: {str(e)}")
# Push the dataset to the new repository
logging.info(f"Pushing dataset with {len(dataset)} records to Hugging Face")
dataset.push_to_hub(new_dataset_name, private=True, token=HF_TOKEN)
logging.info("Dataset push process completed")
def main():
parser = argparse.ArgumentParser(description="Extend and upload static-analysis-eval dataset")
parser.add_argument("--push_to_dataset", help="Merge and push dataset to specified Hugging Face repository")
args = parser.parse_args()
if args.push_to_dataset:
# Merge and push the dataset
jsonl_file = "static_analysis_eval.jsonl"
merge_and_push_dataset(jsonl_file, args.push_to_dataset)
else:
# Perform the regular dataset extension process
output_file = "static_analysis_eval.jsonl"
logger.info(f"Starting dataset extension process. Output file: {output_file}")
# Ensure the output file exists
open(output_file, 'a').close()
top_repos = search_top_repos()
for i, repo in enumerate(top_repos, 1):
try:
logger.info(f"Processing repository {i} of {len(top_repos)}: {repo.full_name}")
process_repository(repo, output_file)
except Exception as e:
logger.error(f"Error processing repository {repo.full_name}: {str(e)}", exc_info=True)
logger.info("Dataset extension process completed")
if __name__ == "__main__":
main() |