drod75 commited on
Commit
c5072c9
1 Parent(s): b39c6b9

visual changes

Browse files

Visual changes to the site, aka an about, link to our linktree, and quality of life stuff.

Files changed (1) hide show
  1. app.py +66 -41
app.py CHANGED
@@ -282,27 +282,45 @@ selected_dish = st.sidebar.selectbox(
282
  )
283
 
284
  # Right title
285
- st.title("Results")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286
  #################
287
 
288
  # Image Classification Section
289
  if uploaded_image and query:
290
- # Open the image
291
- input_image = Image.open(uploaded_image)
292
-
293
- # Display the image
294
- st.image(input_image, caption="Uploaded Image.", use_container_width=True)
295
-
296
- predictions = classifyImage(input_image)
297
- fpredictions = ""
298
-
299
- # Show the top predictions with percentages
300
- st.write("Top Predictions:")
301
- for class_name, confidence in predictions:
302
- if int(confidence) > 0.05:
303
- fpredictions += f"{class_name}: {confidence:.2f}%,"
304
- st.write(f"{class_name}: {confidence:.2f}%")
305
- print(fpredictions)
 
 
306
 
307
  # call openai to pick the best classification result based on query
308
  openAICall = [
@@ -325,31 +343,38 @@ if uploaded_image and query:
325
  openAIresponse = llm.invoke(openAICall)
326
  print("AI CALL RESPONSE: ", openAIresponse.content)
327
 
328
- # RAG the openai response and display
329
- print("RAG INPUT", openAIresponse.content + " " + query)
330
- RAGresponse = get_response(openAIresponse.content + " " + query)
331
- display_response(RAGresponse)
 
 
332
  elif uploaded_image is not None:
333
- # Open the image
334
- input_image = Image.open(uploaded_image)
335
-
336
- # Display the image
337
- st.image(input_image, caption="Uploaded Image.", use_column_width=True)
338
-
339
- # Classify the image and display the result
340
- predictions = classifyImage(input_image)
341
- fpredictions = ""
342
-
343
- # Show the top predictions with percentages
344
- st.write("Top Predictions:")
345
- for class_name, confidence in predictions:
346
- if int(confidence) > 0.05:
347
- fpredictions += f"{class_name}: {confidence:.2f}%,"
348
- st.write(f"{class_name}: {confidence:.2f}%")
349
- print(fpredictions)
 
 
350
 
351
  elif query:
352
- response = get_response(query)
353
- display_response(response)
 
 
354
  else:
355
- st.write("Please input an image and/or a prompt.")
 
 
282
  )
283
 
284
  # Right title
285
+ st.title("Welcome to FOOD CHAIN!")
286
+ with st.expander("**What is FOOD CHAIN?**"):
287
+ st.markdown(
288
+ """
289
+ The project aims to use machine learning and computer vision techniques to analyze food images
290
+ and identify them. By using diverse datasets, the model will learn to recognize dishes based on
291
+ visual features. Our project aims to inform users about what it is they are eating, including
292
+ potential nutritional value and an AI generated response on how their dish might have been prepared.
293
+ We want users to have an easy way to figure out what their favorite foods contain, to know any
294
+ allergens in the food and to better connect to the food around them. This tool can also tell users
295
+ the calories of their dish, they can figure out the nutrients with only a few steps!
296
+
297
+ Thank you for using our project!
298
+
299
+ Made by the Classify Crew: [Contact List](https://linktr.ee/classifycrew)
300
+ """
301
+ )
302
  #################
303
 
304
  # Image Classification Section
305
  if uploaded_image and query:
306
+ with st.expander("**Food Classification**", expanded=True, icon=':material/search_insights:'):
307
+ st.title("Results: Image Classification")
308
+ # Open the image
309
+ input_image = Image.open(uploaded_image)
310
+
311
+ # Display the image
312
+ st.image(input_image, caption="Uploaded Image.", use_container_width=True)
313
+
314
+ predictions = classifyImage(input_image)
315
+ fpredictions = ""
316
+
317
+ # Show the top predictions with percentages
318
+ st.write("Top Predictions:")
319
+ for class_name, confidence in predictions:
320
+ if int(confidence) > 0.05:
321
+ fpredictions += f"{class_name}: {confidence:.2f}%,"
322
+ st.write(f"{class_name}: {confidence:.2f}%")
323
+ print(fpredictions)
324
 
325
  # call openai to pick the best classification result based on query
326
  openAICall = [
 
343
  openAIresponse = llm.invoke(openAICall)
344
  print("AI CALL RESPONSE: ", openAIresponse.content)
345
 
346
+ with st.expander("Recipe Generation", expanded=True, icon=':material/menu_book:'):
347
+ st.title('Results: RAG')
348
+ # RAG the openai response and display
349
+ print("RAG INPUT", openAIresponse.content + " " + query)
350
+ RAGresponse = get_response(openAIresponse.content + " " + query)
351
+ display_response(RAGresponse)
352
  elif uploaded_image is not None:
353
+ with st.expander("**Food Classification**", expanded=True, icon=':material/search_insights:'):
354
+ st.title("Results: Image Classification")
355
+ # Open the image
356
+ input_image = Image.open(uploaded_image)
357
+
358
+ # Display the image
359
+ st.image(input_image, caption="Uploaded Image.", use_column_width=True)
360
+
361
+ # Classify the image and display the result
362
+ predictions = classifyImage(input_image)
363
+ fpredictions = ""
364
+
365
+ # Show the top predictions with percentages
366
+ st.write("Top Predictions:")
367
+ for class_name, confidence in predictions:
368
+ if int(confidence) > 0.05:
369
+ fpredictions += f"{class_name}: {confidence:.2f}%,"
370
+ st.write(f"{class_name}: {confidence:.2f}%")
371
+ print(fpredictions)
372
 
373
  elif query:
374
+ with st.expander("**Recipe Generation**", expanded=True, icon=':material/menu_book:'):
375
+ st.title("Results: RAG")
376
+ response = get_response(query)
377
+ display_response(response)
378
  else:
379
+ st.warning("Please input an image and/or a prompt.", icon=':material/no_meals:')
380
+