Rakshitjan commited on
Commit
c630e1d
·
verified ·
1 Parent(s): 9fec952

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +14 -0
  2. main.py +569 -0
  3. requirements.txt +5 -0
Dockerfile ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
+ # you will also find guides on how best to write your Dockerfile
3
+
4
+ FROM python:3.9
5
+
6
+ WORKDIR /code
7
+
8
+ COPY ./requirements.txt /code/requirements.txt
9
+
10
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
11
+
12
+ COPY . .
13
+
14
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
main.py ADDED
@@ -0,0 +1,569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # Import necessary libraries
2
+ # from fastapi import FastAPI, HTTPException
3
+ # from pydantic import BaseModel
4
+ # import gspread
5
+ # from google.oauth2.service_account import Credentials
6
+ # import pandas as pd
7
+ # from collections import defaultdict
8
+ # import os
9
+
10
+ # # Initialize the FastAPI app
11
+ # app = FastAPI()
12
+
13
+ # # Step 1: Define a function to get Google Sheets API credentials
14
+ # def get_credentials():
15
+ # """Get Google Sheets API credentials from environment variables."""
16
+ # try:
17
+ # # Construct the service account info dictionary
18
+ # service_account_info = {
19
+ # "type": os.getenv("SERVICE_ACCOUNT_TYPE"),
20
+ # "project_id": os.getenv("PROJECT_ID"),
21
+ # "private_key_id": os.getenv("PRIVATE_KEY_ID"),
22
+ # "private_key": os.getenv("PRIVATE_KEY").replace('\\n', '\n'),
23
+ # "client_email": os.getenv("CLIENT_EMAIL"),
24
+ # "client_id": os.getenv("CLIENT_ID"),
25
+ # "auth_uri": os.getenv("AUTH_URI"),
26
+ # "token_uri": os.getenv("TOKEN_URI"),
27
+ # "auth_provider_x509_cert_url": os.getenv("AUTH_PROVIDER_X509_CERT_URL"),
28
+ # "client_x509_cert_url": os.getenv("CLIENT_X509_CERT_URL"),
29
+ # "universe_domain": os.getenv("UNIVERSE_DOMAIN")
30
+ # }
31
+ # scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
32
+ # creds = Credentials.from_service_account_info(service_account_info, scopes=scope)
33
+ # return creds
34
+
35
+ # except Exception as e:
36
+ # print(f"Error getting credentials: {e}")
37
+ # return None
38
+
39
+ # # Step 2: Authorize gspread using the credentials
40
+ # creds = get_credentials()
41
+ # client = gspread.authorize(creds)
42
+
43
+ # # Input the paths and coaching code
44
+ # journal_file_path = ''
45
+ # panic_button_file_path = ''
46
+ # test_file_path = ''
47
+ # coachingCode = '1919'
48
+
49
+ # if coachingCode == '1919':
50
+ # journal_file_path = 'https://docs.google.com/spreadsheets/d/1EFf2lr4A10nt4RhIqxCD_fxe-l3sXH09II0TEkMmvhA/edit?usp=drive_link'
51
+ # panic_button_file_path = 'https://docs.google.com/spreadsheets/d/1nFZGkCvRV6qS-mhsORhX3dxI0JSge32_UwWgWKl3eyw/edit?usp=drive_link'
52
+ # test_file_path = 'https://docs.google.com/spreadsheets/d/13PUHySUXWtKBusjugoe7Dbsm39PwBUfG4tGLipspIx4/edit?usp=drive_link'
53
+
54
+ # # Step 3: Open Google Sheets using the URLs
55
+ # journal_file = client.open_by_url(journal_file_path).worksheet('Sheet1')
56
+ # panic_button_file = client.open_by_url(panic_button_file_path).worksheet('Sheet1') # Fixed missing part
57
+ # test_file = client.open_by_url(test_file_path).worksheet('Sheet1')
58
+
59
+ # # Step 4: Convert the sheets into Pandas DataFrames
60
+ # journal_df = pd.DataFrame(journal_file.get_all_values())
61
+ # panic_button_df = pd.DataFrame(panic_button_file.get_all_values())
62
+ # test_df = pd.DataFrame(test_file.get_all_values())
63
+
64
+ # # Label the columns manually since there are no headers
65
+ # journal_df.columns = ['user_id', 'productivity_yes_no', 'productivity_rate']
66
+ # panic_button_df.columns = ['user_id', 'panic_button']
67
+
68
+ # # Initialize a list for the merged data
69
+ # merged_data = []
70
+
71
+ # # Step 5: Group panic buttons by user_id and combine into a single comma-separated string
72
+ # panic_button_grouped = panic_button_df.groupby('user_id')['panic_button'].apply(lambda x: ','.join(x)).reset_index()
73
+
74
+ # # Merge journal and panic button data
75
+ # merged_journal_panic = pd.merge(journal_df, panic_button_grouped, on='user_id', how='outer')
76
+
77
+ # # Step 6: Process the test data
78
+ # test_data = []
79
+ # for index, row in test_df.iterrows():
80
+ # user_id = row[0]
81
+ # i = 1
82
+ # while i < len(row) and pd.notna(row[i]): # Process chapter and score pairs
83
+ # chapter = row[i].lower().strip()
84
+ # score = row[i + 1]
85
+ # if pd.notna(score):
86
+ # test_data.append({'user_id': user_id, 'test_chapter': chapter, 'test_score': score})
87
+ # i += 2
88
+
89
+ # # Convert the processed test data into a DataFrame
90
+ # test_df_processed = pd.DataFrame(test_data)
91
+
92
+ # # Step 7: Merge the journal+panic button data with the test data
93
+ # merged_data = pd.merge(merged_journal_panic, test_df_processed, on='user_id', how='outer')
94
+
95
+ # # Step 8: Drop rows where all data (except user_id and test_chapter) is missing
96
+ # merged_data_cleaned = merged_data.dropna(subset=['productivity_yes_no', 'productivity_rate', 'panic_button', 'test_chapter'], how='all')
97
+
98
+ # # Group the merged DataFrame by user_id
99
+ # df = pd.DataFrame(merged_data_cleaned)
100
+
101
+ # # Function to process panic button counts and test scores
102
+ # def process_group(group):
103
+ # # Panic button counts
104
+ # panic_button_series = group['panic_button'].dropna()
105
+ # panic_button_dict = panic_button_series.value_counts().to_dict()
106
+
107
+ # # Test scores aggregation
108
+ # test_scores = group[['test_chapter', 'test_score']].dropna()
109
+ # test_scores['test_score'] = pd.to_numeric(test_scores['test_score'], errors='coerce')
110
+
111
+ # # Create the test_scores_dict excluding NaN values
112
+ # test_scores_dict = test_scores.groupby('test_chapter')['test_score'].mean().dropna().to_dict()
113
+
114
+ # return pd.Series({
115
+ # 'productivity_yes_no': group['productivity_yes_no'].iloc[0],
116
+ # 'productivity_rate': group['productivity_rate'].iloc[0],
117
+ # 'panic_button': panic_button_dict,
118
+ # 'test_scores': test_scores_dict
119
+ # })
120
+
121
+ # # Apply the group processing function
122
+ # merged_df = df.groupby('user_id').apply(process_group).reset_index()
123
+
124
+ # # Step 9: Calculate potential score
125
+ # # Panic button weightages
126
+ # academic_weights = {'BACKLOGS': -5, 'MISSED CLASSES': -4, 'NOT UNDERSTANDING': -3, 'BAD MARKS': -3, 'LACK OF MOTIVATION': -3}
127
+ # non_academic_weights = {'EMOTIONAL FACTORS': -3, 'PROCRASTINATE': -2, 'LOST INTEREST': -4, 'LACK OF FOCUS': -2, 'GOALS NOT ACHIEVED': -2, 'LACK OF DISCIPLINE': -2}
128
+
129
+ # # Max weighted panic score
130
+ # max_weighted_panic_score = sum([max(academic_weights.values()) * 3, max(non_academic_weights.values()) * 3])
131
+
132
+ # # Function to calculate potential score
133
+ # def calculate_potential_score(row):
134
+ # # Test score normalization (70% weightage)
135
+ # if row['test_scores']: # Check if test_scores is not empty
136
+ # avg_test_score = sum(row['test_scores'].values()) / len(row['test_scores'])
137
+ # test_score_normalized = (avg_test_score / 40) * 70 # Scale test score to 70
138
+ # else:
139
+ # test_score_normalized = 0 # Default value for users with no test scores
140
+
141
+ # # Panic score calculation (20% weightage)
142
+ # student_panic_score = 0
143
+ # if row['panic_button']: # Ensure panic_button is not NaN or empty
144
+ # for factor, count in row['panic_button'].items():
145
+ # if factor in academic_weights:
146
+ # student_panic_score += academic_weights[factor] * count
147
+ # elif factor in non_academic_weights:
148
+ # student_panic_score += non_academic_weights[factor] * count
149
+ # else:
150
+ # student_panic_score = 0 # Default if no panic button issues
151
+
152
+ # # Panic score normalized to 20
153
+ # panic_score = 20 * (1 - (student_panic_score / max_weighted_panic_score) if max_weighted_panic_score != 0 else 1)
154
+
155
+ # # Journal score calculation (10% weightage)
156
+ # if pd.notna(row['productivity_yes_no']) and row['productivity_yes_no'] == 'Yes':
157
+ # if pd.notna(row['productivity_rate']):
158
+ # journal_score = (float(row['productivity_rate']) / 10) * 10 # Scale journal score to 10
159
+ # else:
160
+ # journal_score = 0 # Default if productivity_rate is missing
161
+ # elif pd.notna(row['productivity_yes_no']) and row['productivity_yes_no'] == 'No':
162
+ # if pd.notna(row['productivity_rate']):
163
+ # journal_score = (float(row['productivity_rate']) / 10) * 5 # Scale journal score to 5 if "No"
164
+ # else:
165
+ # journal_score = 0 # Default if productivity_rate is missing
166
+ # else:
167
+ # journal_score = 0 # Default if productivity_yes_no is missing
168
+
169
+ # # Total score based on new weightages
170
+ # total_potential_score = test_score_normalized + panic_score + journal_score
171
+ # return total_potential_score
172
+
173
+ # # Apply potential score calculation to the dataframe
174
+ # merged_df['potential_score'] = merged_df.apply(calculate_potential_score, axis=1)
175
+ # merged_df['potential_score'] = merged_df['potential_score'].round(2)
176
+
177
+ # # Step 10: Sort by potential score
178
+ # sorted_df = merged_df[['user_id', 'potential_score']].sort_values(by='potential_score', ascending=False)
179
+
180
+ # # Step 11: Define API endpoint to get the sorted potential scores
181
+ # @app.get("/sorted-potential-scores")
182
+ # async def get_sorted_potential_scores():
183
+ # try:
184
+ # result = sorted_df.to_dict(orient="records")
185
+ # return {"sorted_scores": result}
186
+ # except Exception as e:
187
+ # raise HTTPException(status_code=500, detail=str(e))
188
+
189
+
190
+ # Import necessary libraries
191
+ # from fastapi import FastAPI, HTTPException, Query
192
+ # from pydantic import BaseModel
193
+ # import gspread
194
+ # from google.oauth2.service_account import Credentials
195
+ # import pandas as pd
196
+ # from collections import defaultdict
197
+ # import os
198
+ # from fastapi.middleware.cors import CORSMiddleware
199
+ # # Initialize the FastAPI app
200
+ # app = FastAPI()
201
+ # app.add_middleware(
202
+ # CORSMiddleware,
203
+ # allow_origins=["*"], # You can specify domains instead of "*" to restrict access
204
+ # allow_credentials=True,
205
+ # allow_methods=["*"], # Allows all HTTP methods (POST, GET, OPTIONS, etc.)
206
+ # allow_headers=["*"], # Allows all headers
207
+ # )
208
+ # # Step 1: Define a function to get Google Sheets API credentials
209
+ # def get_credentials():
210
+ # """Get Google Sheets API credentials from environment variables."""
211
+ # try:
212
+ # # Construct the service account info dictionary
213
+ # service_account_info = {
214
+ # "type": os.getenv("SERVICE_ACCOUNT_TYPE"),
215
+ # "project_id": os.getenv("PROJECT_ID"),
216
+ # "private_key_id": os.getenv("PRIVATE_KEY_ID"),
217
+ # "private_key": os.getenv("PRIVATE_KEY").replace('\\n', '\n'),
218
+ # "client_email": os.getenv("CLIENT_EMAIL"),
219
+ # "client_id": os.getenv("CLIENT_ID"),
220
+ # "auth_uri": os.getenv("AUTH_URI"),
221
+ # "token_uri": os.getenv("TOKEN_URI"),
222
+ # "auth_provider_x509_cert_url": os.getenv("AUTH_PROVIDER_X509_CERT_URL"),
223
+ # "client_x509_cert_url": os.getenv("CLIENT_X509_CERT_URL"),
224
+ # "universe_domain": os.getenv("UNIVERSE_DOMAIN")
225
+ # }
226
+ # scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
227
+ # creds = Credentials.from_service_account_info(service_account_info, scopes=scope)
228
+ # return creds
229
+
230
+ # except Exception as e:
231
+ # print(f"Error getting credentials: {e}")
232
+ # return None
233
+
234
+ # # Step 2: Authorize gspread using the credentials
235
+ # creds = get_credentials()
236
+ # client = gspread.authorize(creds)
237
+
238
+ # # Function to get file paths based on coaching code
239
+ # def get_file_paths(coaching_code):
240
+ # if coaching_code == '1919':
241
+ # return {
242
+ # 'journal': 'https://docs.google.com/spreadsheets/d/1EFf2lr4A10nt4RhIqxCD_fxe-l3sXH09II0TEkMmvhA/edit?usp=drive_link',
243
+ # 'panic_button': 'https://docs.google.com/spreadsheets/d/1nFZGkCvRV6qS-mhsORhX3dxI0JSge32_UwWgWKl3eyw/edit?usp=drive_link',
244
+ # 'test': 'https://docs.google.com/spreadsheets/d/13PUHySUXWtKBusjugoe7Dbsm39PwBUfG4tGLipspIx4/edit?usp=drive_link'
245
+ # }
246
+ # if coaching_code == '0946':
247
+ # return {
248
+ # 'journal': 'https://docs.google.com/spreadsheets/d/1c1TkL7sOUvFn6UPz3gwp135UVjOou9u1weohWzpmx6I/edit?usp=drive_link',
249
+ # 'panic_button': 'https://docs.google.com/spreadsheets/d/1RhbPQnNNBUthKKJyoW4q6x3uaWl1YSqmsFlfJ2THphE/edit?usp=drive_link',
250
+ # 'test': 'https://docs.google.com/spreadsheets/d/1JO5wDkfl2fr2ZQenI8OEu48jkWm48veYN1Fsw5Ctkzw/edit?usp=drive_link'
251
+ # }
252
+ # # Panic button weightages
253
+ # academic_weights = {'BACKLOGS': -5, 'MISSED CLASSES': -4, 'NOT UNDERSTANDING': -3, 'BAD MARKS': -3, 'LACK OF MOTIVATION': -3}
254
+ # non_academic_weights = {'EMOTIONAL FACTORS': -3, 'PROCRASTINATE': -2, 'LOST INTEREST': -4, 'LACK OF FOCUS': -2, 'GOALS NOT ACHIEVED': -2, 'LACK OF DISCIPLINE': -2}
255
+
256
+ # # Max weighted panic score
257
+ # max_weighted_panic_score = sum([max(academic_weights.values()) * 3, max(non_academic_weights.values()) * 3])
258
+
259
+ # # Function to calculate potential score
260
+ # def calculate_potential_score(row):
261
+ # # Test score normalization (70% weightage)
262
+ # if row['test_scores']: # Check if test_scores is not empty
263
+ # avg_test_score = sum(row['test_scores'].values()) / len(row['test_scores'])
264
+ # test_score_normalized = (avg_test_score / 40) * 70 # Scale test score to 70
265
+ # else:
266
+ # test_score_normalized = 0 # Default value for users with no test scores
267
+
268
+ # # Panic score calculation (20% weightage)
269
+ # student_panic_score = 0
270
+ # if row['panic_button']: # Ensure panic_button is not NaN or empty
271
+ # for factor, count in row['panic_button'].items():
272
+ # if factor in academic_weights:
273
+ # student_panic_score += academic_weights[factor] * count
274
+ # elif factor in non_academic_weights:
275
+ # student_panic_score += non_academic_weights[factor] * count
276
+ # else:
277
+ # student_panic_score = 0 # Default if no panic button issues
278
+
279
+ # # Panic score normalized to 20
280
+ # panic_score = 20 * (1 - (student_panic_score / max_weighted_panic_score) if max_weighted_panic_score != 0 else 1)
281
+
282
+ # # Journal score calculation (10% weightage)
283
+ # if pd.notna(row['productivity_yes_no']) and row['productivity_yes_no'] == 'Yes':
284
+ # if pd.notna(row['productivity_rate']):
285
+ # journal_score = (float(row['productivity_rate']) / 10) * 10 # Scale journal score to 10
286
+ # else:
287
+ # journal_score = 0 # Default if productivity_rate is missing
288
+ # elif pd.notna(row['productivity_yes_no']) and row['productivity_yes_no'] == 'No':
289
+ # if pd.notna(row['productivity_rate']):
290
+ # journal_score = (float(row['productivity_rate']) / 10) * 5 # Scale journal score to 5 if "No"
291
+ # else:
292
+ # journal_score = 0 # Default if productivity_rate is missing
293
+ # else:
294
+ # journal_score = 0 # Default if productivity_yes_no is missing
295
+
296
+ # # Total score based on new weightages
297
+ # total_potential_score = test_score_normalized + panic_score + journal_score
298
+ # return total_potential_score
299
+
300
+ # # Step 11: Define API endpoint to get the sorted potential scores
301
+ # @app.get("/sorted-potential-scores")
302
+ # async def get_sorted_potential_scores(coaching_code: str = Query(..., description="Coaching code to determine file paths")):
303
+ # try:
304
+ # file_paths = get_file_paths(coaching_code)
305
+ # if not file_paths:
306
+ # raise HTTPException(status_code=400, detail="Invalid coaching code")
307
+ # print("A");
308
+ # # Open Google Sheets using the URLs
309
+ # journal_file = client.open_by_url(file_paths['journal']).worksheet('Sheet1')
310
+ # panic_button_file = client.open_by_url(file_paths['panic_button']).worksheet('Sheet1')
311
+ # test_file = client.open_by_url(file_paths['test']).worksheet('Sheet1')
312
+ # print("B");
313
+ # # Convert the sheets into Pandas DataFrames
314
+ # journal_df = pd.DataFrame(journal_file.get_all_values())
315
+ # panic_button_df = pd.DataFrame(panic_button_file.get_all_values())
316
+ # test_df = pd.DataFrame(test_file.get_all_values())
317
+ # print("C");
318
+ # # Label the columns manually since there are no headers
319
+ # journal_df.columns = ['user_id', 'productivity_yes_no', 'productivity_rate']
320
+ # panic_button_df.columns = ['user_id', 'panic_button']
321
+ # print("D")
322
+ # # Initialize a list for the merged data
323
+ # merged_data = []
324
+
325
+ # # Group panic buttons by user_id and combine into a single comma-separated string
326
+ # panic_button_grouped = panic_button_df.groupby('user_id')['panic_button'].apply(lambda x: ','.join(x)).reset_index()
327
+ # print("E")
328
+ # # Merge journal and panic button data
329
+ # merged_journal_panic = pd.merge(journal_df, panic_button_grouped, on='user_id', how='outer')
330
+ # print("F")
331
+ # # Process the test data
332
+ # test_data = []
333
+ # for index, row in test_df.iterrows():
334
+ # user_id = row[0]
335
+ # i = 1
336
+ # while i < len(row) and pd.notna(row[i]): # Process chapter and score pairs
337
+ # chapter = row[i].lower().strip()
338
+ # score = row[i + 1]
339
+ # if pd.notna(score):
340
+ # test_data.append({'user_id': user_id, 'test_chapter': chapter, 'test_score': score})
341
+ # i += 2
342
+ # print("G")
343
+ # # Convert the processed test data into a DataFrame
344
+ # test_df_processed = pd.DataFrame(test_data)
345
+ # print("H")
346
+ # # Merge the journal+panic button data with the test data
347
+ # merged_data = pd.merge(merged_journal_panic, test_df_processed, on='user_id', how='outer')
348
+ # print("I")
349
+ # # Drop rows where all data (except user_id and test_chapter) is missing
350
+ # merged_data_cleaned = merged_data.dropna(subset=['productivity_yes_no', 'productivity_rate', 'panic_button', 'test_chapter'], how='all')
351
+ # print("J")
352
+ # # Group the merged DataFrame by user_id
353
+ # df = pd.DataFrame(merged_data_cleaned)
354
+ # print("K")
355
+ # # Function to process panic button counts and test scores
356
+ # def process_group(group):
357
+ # # Panic button counts
358
+ # panic_button_series = group['panic_button'].dropna()
359
+ # panic_button_dict = panic_button_series.value_counts().to_dict()
360
+
361
+ # # Test scores aggregation
362
+ # test_scores = group[['test_chapter', 'test_score']].dropna()
363
+ # test_scores['test_score'] = pd.to_numeric(test_scores['test_score'], errors='coerce')
364
+
365
+ # # Create the test_scores_dict excluding NaN values
366
+ # test_scores_dict = test_scores.groupby('test_chapter')['test_score'].mean().dropna().to_dict()
367
+
368
+ # return pd.Series({
369
+ # 'productivity_yes_no': group['productivity_yes_no'].iloc[0],
370
+ # 'productivity_rate': group['productivity_rate'].iloc[0],
371
+ # 'panic_button': panic_button_dict,
372
+ # 'test_scores': test_scores_dict
373
+ # })
374
+
375
+ # # Apply the group processing function
376
+ # merged_df = df.groupby('user_id').apply(process_group).reset_index()
377
+ # print("L")
378
+ # # Calculate potential scores and sort
379
+ # merged_df['potential_score'] = merged_df.apply(calculate_potential_score, axis=1)
380
+ # merged_df['potential_score'] = merged_df['potential_score'].round(2)
381
+ # sorted_df = merged_df[['user_id', 'potential_score']].sort_values(by='potential_score', ascending=False)
382
+ # print("M")
383
+ # result = sorted_df.to_dict(orient="records")
384
+ # return {"sorted_scores": result}
385
+ # except Exception as e:
386
+ # raise HTTPException(status_code=500, detail=str(e))
387
+
388
+
389
+
390
+
391
+ from fastapi import FastAPI, HTTPException, Query
392
+ from pydantic import BaseModel
393
+ import gspread
394
+ from google.oauth2.service_account import Credentials
395
+ import pandas as pd
396
+ from collections import defaultdict
397
+ import os
398
+ from fastapi.middleware.cors import CORSMiddleware
399
+ app = FastAPI()
400
+ app.add_middleware(
401
+ CORSMiddleware,
402
+ allow_origins=["*"], # You can specify domains instead of "*" to restrict access
403
+ allow_credentials=True,
404
+ allow_methods=["*"], # Allows all HTTP methods (POST, GET, OPTIONS, etc.)
405
+ allow_headers=["*"], # Allows all headers
406
+ )
407
+
408
+ # Model for request
409
+ class CoachingCodeRequest(BaseModel):
410
+ coachingCode: str
411
+
412
+ # Function to get credentials
413
+ def get_credentials():
414
+ """Get Google Sheets API credentials from environment variables."""
415
+ try:
416
+ # Construct the service account info dictionary
417
+ service_account_info = {
418
+ "type": os.getenv("SERVICE_ACCOUNT_TYPE"),
419
+ "project_id": os.getenv("PROJECT_ID"),
420
+ "private_key_id": os.getenv("PRIVATE_KEY_ID"),
421
+ "private_key": os.getenv("PRIVATE_KEY").replace('\\n', '\n'),
422
+ "client_email": os.getenv("CLIENT_EMAIL"),
423
+ "client_id": os.getenv("CLIENT_ID"),
424
+ "auth_uri": os.getenv("AUTH_URI"),
425
+ "token_uri": os.getenv("TOKEN_URI"),
426
+ "auth_provider_x509_cert_url": os.getenv("AUTH_PROVIDER_X509_CERT_URL"),
427
+ "client_x509_cert_url": os.getenv("CLIENT_X509_CERT_URL"),
428
+ "universe_domain": os.getenv("UNIVERSE_DOMAIN")
429
+ }
430
+ scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
431
+ creds = Credentials.from_service_account_info(service_account_info, scopes=scope)
432
+ return creds
433
+
434
+ except Exception as e:
435
+ print(f"Error getting credentials: {e}")
436
+ return None
437
+
438
+
439
+ # Select files based on coaching code
440
+ def select_files(coaching_code):
441
+ creds = get_credentials()
442
+ client = gspread.authorize(creds)
443
+
444
+ if coaching_code == "1919":
445
+ journal_file = client.open_by_url('https://docs.google.com/spreadsheets/d/1EFf2lr4A10nt4RhIqxCD_fxe-l3sXH09II0TEkMmvhA/edit?gid=0#gid=0').worksheet('Sheet1')
446
+ panic_button_file = client.open_by_url('https://docs.google.com/spreadsheets/d/1nFZGkCvRV6qS-mhsORhX3dxI0JSge32_UwWgWKl3eyw/edit?gid=0#gid=0').worksheet('Sheet1')
447
+ test_file = client.open_by_url('https://docs.google.com/spreadsheets/d/13PUHySUXWtKBusjugoe7Dbsm39PwBUfG4tGLipspIx4/edit?gid=0#gid=0').worksheet('Sheet1')
448
+ else:
449
+ raise HTTPException(status_code=404, detail="Invalid coaching code")
450
+
451
+ return journal_file, panic_button_file, test_file
452
+
453
+ # Main route to get sorted scores
454
+ @app.post("/get_sorted_scores")
455
+ async def get_sorted_scores(data: CoachingCodeRequest):
456
+ journal_file, panic_button_file, test_file = select_files(data.coachingCode)
457
+
458
+ # Load data into DataFrames
459
+ journal_df = pd.DataFrame(journal_file.get_all_values())
460
+ panic_button_df = pd.DataFrame(panic_button_file.get_all_values())
461
+ test_df = pd.DataFrame(test_file.get_all_values())
462
+
463
+ # Processing logic
464
+ panic_data = []
465
+ for index, row in panic_button_df.iterrows():
466
+ user_id = row[0]
467
+ row_pairs = row[1:].dropna().to_list()[-5:]
468
+ for i in range(0, len(row_pairs), 2):
469
+ panic = row_pairs[i].upper().strip()
470
+ if pd.notna(panic):
471
+ panic_data.append({'user_id': user_id, 'panic_button': panic})
472
+ panic_df_processed = pd.DataFrame(panic_data)
473
+
474
+ test_data = []
475
+ for index, row in test_df.iterrows():
476
+ user_id = row[0]
477
+ row_pairs = row[1:].dropna().to_list()
478
+ chapter_scores = {}
479
+ for i in range(0, len(row_pairs), 2):
480
+ chapter = row_pairs[i].lower().strip()
481
+ score = row_pairs[i + 1]
482
+ if pd.notna(score):
483
+ if chapter not in chapter_scores:
484
+ chapter_scores[chapter] = []
485
+ chapter_scores[chapter].append(score)
486
+ for chapter, scores in chapter_scores.items():
487
+ last_5_scores = scores[-5:]
488
+ for score in last_5_scores:
489
+ test_data.append({'user_id': user_id, 'test_chapter': chapter, 'test_score': score})
490
+ test_df_processed = pd.DataFrame(test_data)
491
+
492
+ journal_data = []
493
+ for index, row in journal_df.iterrows():
494
+ user_id = row[0]
495
+ row_pairs = row[1:].dropna().to_list()[-10:]
496
+ for i in range(0, len(row_pairs), 2):
497
+ productivity_yes_no = row_pairs[i].lower().strip()
498
+ productivity_rate = row_pairs[i + 1]
499
+ if pd.notna(productivity_rate):
500
+ journal_data.append({'user_id': user_id, 'productivity_yes_no': productivity_yes_no, 'productivity_rate': productivity_rate})
501
+ journal_df_processed = pd.DataFrame(journal_data)
502
+
503
+ merged_journal_panic = pd.merge(panic_df_processed, journal_df_processed, on='user_id', how='outer')
504
+ merged_data = pd.merge(merged_journal_panic, test_df_processed, on='user_id', how='outer')
505
+ merged_data_cleaned = merged_data.dropna(subset=['productivity_yes_no', 'productivity_rate', 'panic_button', 'test_chapter'], how='all')
506
+
507
+ def process_group(group):
508
+ # Panic button counts
509
+ panic_button_series = group['panic_button'].dropna()
510
+ panic_button_dict = panic_button_series.value_counts().to_dict()
511
+
512
+ # Test scores aggregation
513
+ test_scores = group[['test_chapter', 'test_score']].dropna()
514
+ test_scores['test_score'] = pd.to_numeric(test_scores['test_score'], errors='coerce')
515
+
516
+ # Create the test_scores_dict excluding NaN values
517
+ test_scores_dict = test_scores.groupby('test_chapter')['test_score'].mean().dropna().to_dict()
518
+
519
+ return pd.Series({
520
+ 'productivity_yes_no': group['productivity_yes_no'].iloc[0],
521
+ 'productivity_rate': group['productivity_rate'].iloc[0],
522
+ 'panic_button': panic_button_dict,
523
+ 'test_scores': test_scores_dict
524
+ })
525
+
526
+ # Define scoring weights
527
+ academic_weights = {'BACKLOGS': -5, 'MISSED CLASSES': -4, 'NOT UNDERSTANDING': -3, 'BAD MARKS': -3, 'LACK OF MOTIVATION': -3}
528
+ non_academic_weights = {'EMOTIONAL FACTORS': -3, 'PROCRASTINATE': -2, 'LOST INTEREST': -4, 'LACK OF FOCUS': -2, 'GOALS NOT ACHIEVED': -2, 'LACK OF DISCIPLINE': -2}
529
+ max_weighted_panic_score = sum([max(academic_weights.values()) * 3, max(non_academic_weights.values()) * 3])
530
+
531
+ def calculate_potential_score(row):
532
+ if row['test_scores']:
533
+ avg_test_score = sum(row['test_scores'].values()) / len(row['test_scores'])
534
+ test_score_normalized = (avg_test_score / 40) * 70
535
+ else:
536
+ test_score_normalized = 0
537
+ student_panic_score = 0
538
+ if row['panic_button']:
539
+ for factor, count in row['panic_button'].items():
540
+ if factor in academic_weights:
541
+ student_panic_score += academic_weights[factor] * count
542
+ elif factor in non_academic_weights:
543
+ student_panic_score += non_academic_weights[factor] * count
544
+ else:
545
+ student_panic_score = 0
546
+ panic_score = 20 * (1 - (student_panic_score / max_weighted_panic_score) if max_weighted_panic_score != 0 else 1)
547
+ if pd.notna(row['productivity_yes_no']) and row['productivity_yes_no'] == 'Yes':
548
+ if pd.notna(row['productivity_rate']):
549
+ journal_score = (float(row['productivity_rate']) / 10) * 10
550
+ else:
551
+ journal_score = 0
552
+ elif pd.notna(row['productivity_yes_no']) and row['productivity_yes_no'] == 'No':
553
+ if pd.notna(row['productivity_rate']):
554
+ journal_score = (float(row['productivity_rate']) / 10) * 5
555
+ else:
556
+ journal_score = 0
557
+ else:
558
+ journal_score = 0
559
+ total_potential_score = test_score_normalized + panic_score + journal_score
560
+ return total_potential_score
561
+
562
+ merged_df = merged_data_cleaned.groupby('user_id').apply(process_group).reset_index()
563
+ merged_df['potential_score'] = merged_df.apply(calculate_potential_score, axis=1)
564
+ merged_df['potential_score'] = merged_df['potential_score'].round(2)
565
+ sorted_df = merged_df[['user_id', 'potential_score']].sort_values(by='potential_score', ascending=False)
566
+ result = sorted_df.to_dict(orient="records")
567
+
568
+ return {"sorted_scores": result}
569
+
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ pandas
4
+ gspread
5
+ google-auth