andito HF staff merve HF staff commited on
Commit
833928a
1 Parent(s): ef3da92

Add description and links to nb/blog (#1)

Browse files

- Add description and links to nb/blog (bb7d946e84b57b0e04daa156d25e964152444d77)


Co-authored-by: Merve Noyan <merve@users.noreply.huggingface.co>

Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -14,7 +14,9 @@ model = AutoModelForCausalLM.from_pretrained('HuggingFaceM4/Florence-2-DocVQA',
14
  processor = AutoProcessor.from_pretrained('HuggingFaceM4/Florence-2-DocVQA', trust_remote_code=True)
15
 
16
 
17
- DESCRIPTION = "# [Florence-2-DocVQA Demo](https://huggingface.co/HuggingFaceM4/Florence-2-DocVQA)"
 
 
18
 
19
  colormap = ['blue','orange','green','purple','brown','pink','gray','olive','cyan','red',
20
  'lime','indigo','violet','aqua','magenta','coral','gold','tan','skyblue']
@@ -58,6 +60,7 @@ css = """
58
  """
59
 
60
  with gr.Blocks(css=css) as demo:
 
61
  gr.Markdown(DESCRIPTION)
62
  with gr.Tab(label="Florence-2 Image Captioning"):
63
  with gr.Row():
@@ -79,7 +82,7 @@ with gr.Blocks(css=css) as demo:
79
  outputs=[output_text],
80
  fn=process_image,
81
  cache_examples=True,
82
- label='Try examples'
83
  )
84
 
85
  submit_btn.click(process_image, [input_img, text_input], [output_text])
 
14
  processor = AutoProcessor.from_pretrained('HuggingFaceM4/Florence-2-DocVQA', trust_remote_code=True)
15
 
16
 
17
+ TITLE = "# [Florence-2-DocVQA Demo](https://huggingface.co/HuggingFaceM4/Florence-2-DocVQA)"
18
+ DESCRIPTION = "The demo for Florence-2 fine-tuned on DocVQA dataset. You can find the notebook [here](https://colab.research.google.com/drive/1hKDrJ5AH_o7I95PtZ9__VlCTNAo1Gjpf?usp=sharing). Read more about Florence-2 fine-tuning [here](finetune-florence2)."
19
+
20
 
21
  colormap = ['blue','orange','green','purple','brown','pink','gray','olive','cyan','red',
22
  'lime','indigo','violet','aqua','magenta','coral','gold','tan','skyblue']
 
60
  """
61
 
62
  with gr.Blocks(css=css) as demo:
63
+ gr.Markdown(TITLE)
64
  gr.Markdown(DESCRIPTION)
65
  with gr.Tab(label="Florence-2 Image Captioning"):
66
  with gr.Row():
 
82
  outputs=[output_text],
83
  fn=process_image,
84
  cache_examples=True,
85
+ label='Try the examples below'
86
  )
87
 
88
  submit_btn.click(process_image, [input_img, text_input], [output_text])