phyloforfun commited on
Commit
dc252b5
1 Parent(s): 0ee709f

file upload gallery

Browse files
Files changed (1) hide show
  1. app.py +5 -17
app.py CHANGED
@@ -1219,29 +1219,16 @@ def show_header_welcome():
1219
  st.image(st.session_state.logo, width=250)
1220
 
1221
  def content_header():
1222
- col_run_1, col_run_2, col_run_3 = st.columns([4,2,2])
1223
- col_test = st.container()
1224
 
1225
- st.write("")
1226
- st.write("")
1227
- st.write("")
1228
  st.write("")
1229
  st.subheader("Overall Progress")
1230
  col_run_info_1 = st.columns([1])[0]
1231
- st.write("")
1232
- st.write("")
1233
- st.write("")
1234
  st.write("")
1235
  st.header("Configuration Settings")
1236
 
1237
  with col_run_info_1:
1238
- # Progress
1239
- # Progress
1240
- # st.subheader('Project')
1241
- # bar = st.progress(0)
1242
- # new_text = st.empty() # Placeholder for current step name
1243
- # progress_report = ProgressReportVV(bar, new_text, n_images=10)
1244
-
1245
  # Progress
1246
  overall_progress_bar = st.progress(0)
1247
  text_overall = st.empty() # Placeholder for current step name
@@ -1303,11 +1290,12 @@ def content_header():
1303
  st.write('3. Choose a LLM version --- Only LLMs with valid keys will appear in the dropdown list.')
1304
  st.write('4. Select a prompt version --- Start with "Version 2". Custom Prompts will include ".yaml" in the name. You can build your own Custom Prompt in the Prompt Builder.')
1305
  st.markdown('5. Upload images --- Up to ~100 images can be uploaded in the Hugging Face Spaces implementation. If you want to process more images at once (and have more control in general) then use the [GitHub version](https://github.com/Gene-Weaver/VoucherVision). If you pay for persistent storage for your HF Space, then you may be able to process more too.')
 
1306
  st.write('6. LeafMachine2 collage --- If selected, LeafMachine2 will isolate all text from the image and create a label collage, which will be sent to the OCR algorithm instead of the full image. This improves OCR detection for small or finely written text.')
1307
  st.write('7. OCR overlay images --- If selected, VoucherVision will overlay the OCR detections onto the input image. This is useful for debugging transcription errors to see if the OCR failed or if the LLM failed.')
1308
  st.write('8. Start processing --- Wait for VoucherVision to finish.')
1309
  st.write('9. Download results --- Click the "Download Results" button to save the VoucherVision output to your computer. ***Output files will disappear if you start a new run or restart the Space.***')
1310
- st.write('8. Editing the LLM transcriptions --- Use the VoucherVisionEditor to revise and correct any mistakes or ommissions.')
1311
  # st.subheader('Run Tests', help="")
1312
  # st.write('We include a single image for testing. If you want to test all of the available prompts and LLMs on a different set of images, copy your images into `../VoucherVision/demo/demo_images`.')
1313
  # if st.button("Test GPT",disabled=True):
@@ -1324,7 +1312,7 @@ def content_header():
1324
  # display_test_results(test_results, JSON_results, 'palm')
1325
  # st.balloons()
1326
 
1327
- with col_run_3:
1328
  st.subheader('Available LLMs and APIs')
1329
  show_available_APIs()
1330
  st.info('Until the end of 2023, Azure OpenAI models will be available for anyone to use here. Then only PaLM 2 will be available. To use all services, duplicate this Space and provide your own API keys.')
 
1219
  st.image(st.session_state.logo, width=250)
1220
 
1221
  def content_header():
1222
+ col_run_1, col_run_2, col_run_3, col_run_4 = st.columns([2,2,2,2])
 
1223
 
 
 
 
1224
  st.write("")
1225
  st.subheader("Overall Progress")
1226
  col_run_info_1 = st.columns([1])[0]
1227
+
 
 
1228
  st.write("")
1229
  st.header("Configuration Settings")
1230
 
1231
  with col_run_info_1:
 
 
 
 
 
 
 
1232
  # Progress
1233
  overall_progress_bar = st.progress(0)
1234
  text_overall = st.empty() # Placeholder for current step name
 
1290
  st.write('3. Choose a LLM version --- Only LLMs with valid keys will appear in the dropdown list.')
1291
  st.write('4. Select a prompt version --- Start with "Version 2". Custom Prompts will include ".yaml" in the name. You can build your own Custom Prompt in the Prompt Builder.')
1292
  st.markdown('5. Upload images --- Up to ~100 images can be uploaded in the Hugging Face Spaces implementation. If you want to process more images at once (and have more control in general) then use the [GitHub version](https://github.com/Gene-Weaver/VoucherVision). If you pay for persistent storage for your HF Space, then you may be able to process more too.')
1293
+ with col_run_3:
1294
  st.write('6. LeafMachine2 collage --- If selected, LeafMachine2 will isolate all text from the image and create a label collage, which will be sent to the OCR algorithm instead of the full image. This improves OCR detection for small or finely written text.')
1295
  st.write('7. OCR overlay images --- If selected, VoucherVision will overlay the OCR detections onto the input image. This is useful for debugging transcription errors to see if the OCR failed or if the LLM failed.')
1296
  st.write('8. Start processing --- Wait for VoucherVision to finish.')
1297
  st.write('9. Download results --- Click the "Download Results" button to save the VoucherVision output to your computer. ***Output files will disappear if you start a new run or restart the Space.***')
1298
+ st.write('10. Editing the LLM transcriptions --- Use the VoucherVisionEditor to revise and correct any mistakes or ommissions.')
1299
  # st.subheader('Run Tests', help="")
1300
  # st.write('We include a single image for testing. If you want to test all of the available prompts and LLMs on a different set of images, copy your images into `../VoucherVision/demo/demo_images`.')
1301
  # if st.button("Test GPT",disabled=True):
 
1312
  # display_test_results(test_results, JSON_results, 'palm')
1313
  # st.balloons()
1314
 
1315
+ with col_run_4:
1316
  st.subheader('Available LLMs and APIs')
1317
  show_available_APIs()
1318
  st.info('Until the end of 2023, Azure OpenAI models will be available for anyone to use here. Then only PaLM 2 will be available. To use all services, duplicate this Space and provide your own API keys.')