woods-today commited on
Commit
278270a
·
1 Parent(s): dfcd6b0

Working on it

Browse files
Files changed (2) hide show
  1. endpoints.py +1 -1
  2. routers/training.py +2 -74
endpoints.py CHANGED
@@ -16,7 +16,7 @@ app.add_middleware(
16
  allow_credentials=True,
17
  )
18
 
19
- app.include_router(inference.router, prefix="/api-inference/v1/sparrow-ml", tags=["Inference"])
20
  app.include_router(training.router, prefix="/api-training/v1/sparrow-ml", tags=["Training"])
21
 
22
 
 
16
  allow_credentials=True,
17
  )
18
 
19
+ # app.include_router(inference.router, prefix="/api-inference/v1/sparrow-ml", tags=["Inference"])
20
  app.include_router(training.router, prefix="/api-training/v1/sparrow-ml", tags=["Training"])
21
 
22
 
routers/training.py CHANGED
@@ -9,79 +9,7 @@ import utils
9
 
10
  router = APIRouter()
11
 
12
-
13
- def invoke_training(max_epochs, val_check_interval, warmup_steps, model_in_use, sparrow_key):
14
- if sparrow_key != settings.sparrow_key:
15
- return {"error": "Invalid Sparrow key."}
16
-
17
- if model_in_use == 'donut':
18
- processing_time = run_training_donut(max_epochs, val_check_interval, warmup_steps)
19
- utils.log_stats(settings.training_stats_file, [processing_time, settings.model])
20
- print(f"Processing time training: {processing_time:.2f} seconds")
21
-
22
-
23
- @router.post("/training")
24
- async def run_training(background_tasks: BackgroundTasks,
25
- max_epochs: int = Form(30),
26
- val_check_interval: float = Form(0.4),
27
- warmup_steps: int = Form(81),
28
- model_in_use: str = Form('donut'),
29
- sparrow_key: str = Form(None)):
30
-
31
- background_tasks.add_task(invoke_training, max_epochs, val_check_interval, warmup_steps, model_in_use, sparrow_key)
32
-
33
- return {"message": "Sparrow ML training started in the background"}
34
-
35
-
36
- def invoke_evaluate(model_in_use, sparrow_key):
37
- if sparrow_key != settings.sparrow_key:
38
- return {"error": "Invalid Sparrow key."}
39
-
40
- if model_in_use == 'donut':
41
- scores, accuracy, processing_time = run_evaluate_donut()
42
- utils.log_stats(settings.evaluate_stats_file, [processing_time, scores, accuracy, settings.model])
43
- print(f"Processing time evaluate: {processing_time:.2f} seconds")
44
-
45
-
46
- @router.post("/evaluate")
47
- async def run_evaluate(background_tasks: BackgroundTasks,
48
- model_in_use: str = Form('donut'),
49
- sparrow_key: str = Form(None)):
50
-
51
- background_tasks.add_task(invoke_evaluate, model_in_use, sparrow_key)
52
-
53
- return {"message": "Sparrow ML model evaluation started in the background"}
54
-
55
-
56
- @router.get("/statistics/training")
57
  async def get_statistics_training():
58
- file_path = settings.training_stats_file
59
-
60
- # Check if the file exists, and read its content
61
- if os.path.exists(file_path):
62
- with open(file_path, 'r') as file:
63
- try:
64
- content = json.load(file)
65
- except json.JSONDecodeError:
66
- content = []
67
- else:
68
- content = []
69
-
70
  return content
71
-
72
-
73
- @router.get("/statistics/evaluate")
74
- async def get_statistics_evaluate():
75
- file_path = settings.evaluate_stats_file
76
-
77
- # Check if the file exists, and read its content
78
- if os.path.exists(file_path):
79
- with open(file_path, 'r') as file:
80
- try:
81
- content = json.load(file)
82
- except json.JSONDecodeError:
83
- content = []
84
- else:
85
- content = []
86
-
87
- return content
 
9
 
10
  router = APIRouter()
11
 
12
+ @router.get("/hi")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  async def get_statistics_training():
14
+ content = ["HI"]
 
 
 
 
 
 
 
 
 
 
 
15
  return content