Imane Momayiz commited on
Commit
ae41ba2
1 Parent(s): 9902e35

test commitscheduler

Browse files
Files changed (1) hide show
  1. app.py +26 -14
app.py CHANGED
@@ -6,6 +6,7 @@ import json
6
  import os
7
  from huggingface_hub import HfApi, CommitScheduler
8
  import uuid
 
9
 
10
 
11
  HF_API_KEY = os.environ.get("HF_TOKEN", None)
@@ -19,14 +20,21 @@ submissions_folder = "submissions"
19
  submissions_file = os.path.join(submissions_folder, f"submissions_{uuid.uuid4()}.json")
20
  os.makedirs(submissions_folder, exist_ok=True)
21
 
22
- scheduler = CommitScheduler(
23
- token=HF_API_KEY,
24
- repo_id=REPO_ID,
25
- repo_type="dataset",
26
- folder_path=submissions_folder,
27
- path_in_repo="submissions",
28
- every=1,
29
- )
 
 
 
 
 
 
 
30
 
31
  def load_data(repo_id):
32
  dataset = load_dataset(f'{repo_id}', name='sentences', split='sentences')
@@ -64,13 +72,17 @@ def store_submission(api: HfApi, sentence: str, translation: str, translation_fr
64
  # repo_type="dataset",
65
  # )
66
 
67
- with scheduler.lock:
68
- with submissions_file.open("a") as f:
69
- f.write(json.dumps({
70
- "darija": translation_fr,
 
 
 
 
 
71
  "eng": translation,
72
- "darija_ar": sentence}))
73
- f.write("\n")
74
 
75
  st.success(
76
  f"""Translation submitted successfully to
 
6
  import os
7
  from huggingface_hub import HfApi, CommitScheduler
8
  import uuid
9
+ from src.components import ParquetScheduler
10
 
11
 
12
  HF_API_KEY = os.environ.get("HF_TOKEN", None)
 
20
  submissions_file = os.path.join(submissions_folder, f"submissions_{uuid.uuid4()}.json")
21
  os.makedirs(submissions_folder, exist_ok=True)
22
 
23
+ # scheduler = CommitScheduler(
24
+ # token=HF_API_KEY,
25
+ # repo_id=REPO_ID,
26
+ # repo_type="dataset",
27
+ # folder_path=submissions_folder,
28
+ # path_in_repo="submissions",
29
+ # every=1,
30
+ # )
31
+
32
+ # Define the ParquetScheduler instance with your repo details
33
+ scheduler = ParquetScheduler(repo_id=REPO_ID,
34
+ token=HF_API_KEY, every=1,
35
+ path_in_repo=submissions_folder,
36
+ repo_type="dataset")
37
+
38
 
39
  def load_data(repo_id):
40
  dataset = load_dataset(f'{repo_id}', name='sentences', split='sentences')
 
72
  # repo_type="dataset",
73
  # )
74
 
75
+ # with scheduler.lock:
76
+ # with submissions_file.open("a") as f:
77
+ # f.write(json.dumps({
78
+ # "darija": translation_fr,
79
+ # "eng": translation,
80
+ # "darija_ar": sentence}))
81
+ # f.write("\n")
82
+
83
+ scheduler.append({"darija": translation_fr,
84
  "eng": translation,
85
+ "darija_ar": sentence})
 
86
 
87
  st.success(
88
  f"""Translation submitted successfully to