ShravanHN commited on
Commit
d0d017f
1 Parent(s): 3087065

modified the model and added template

Browse files
Files changed (1) hide show
  1. app.py +5 -13
app.py CHANGED
@@ -19,14 +19,13 @@ LICENSE = """
19
  <p/>
20
 
21
  ---
22
- Built with Dolphin Meta Llama 3
23
  """
24
 
25
  PLACEHOLDER = """
26
  <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
27
- <img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8e75e61cc9bab22b7ce3dec85ab0e6db1da5d107/Meta_lockup_positive%20primary_RGB.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
28
- <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Dolphin Meta llama3</h1>
29
- <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p>
30
  </div>
31
  """
32
 
@@ -37,12 +36,6 @@ h1 {
37
  display: block;
38
  }
39
 
40
- #duplicate-button {
41
- margin: auto;
42
- color: white;
43
- background: #1565c0;
44
- border-radius: 100vh;
45
- }
46
  """
47
 
48
  # Load the tokenizer and model
@@ -70,6 +63,7 @@ def chat_llama3_8b(message: str,
70
  str: The generated response.
71
  """
72
  conversation = []
 
73
  for user, assistant in history:
74
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
75
  conversation.append({"role": "user", "content": message})
@@ -105,7 +99,6 @@ chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterf
105
  with gr.Blocks(fill_height=True, css=css) as demo:
106
 
107
  gr.Markdown(DESCRIPTION)
108
- gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
109
  gr.ChatInterface(
110
  fn=chat_llama3_8b,
111
  chatbot=chatbot,
@@ -131,5 +124,4 @@ with gr.Blocks(fill_height=True, css=css) as demo:
131
  gr.Markdown(LICENSE)
132
 
133
  if __name__ == "__main__":
134
- demo.launch()
135
-
 
19
  <p/>
20
 
21
  ---
22
+ For more information, visit our [website](https://contentease.ai).
23
  """
24
 
25
  PLACEHOLDER = """
26
  <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
27
+ <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">ContenteaseAI Custom AI trained model</h1>
28
+ <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Enter the text extracted from the PDF:</p>
 
29
  </div>
30
  """
31
 
 
36
  display: block;
37
  }
38
 
 
 
 
 
 
 
39
  """
40
 
41
  # Load the tokenizer and model
 
63
  str: The generated response.
64
  """
65
  conversation = []
66
+ message+= "Extract all relevant keywords and add quantity from the following text and format the result in nested JSON:"
67
  for user, assistant in history:
68
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
69
  conversation.append({"role": "user", "content": message})
 
99
  with gr.Blocks(fill_height=True, css=css) as demo:
100
 
101
  gr.Markdown(DESCRIPTION)
 
102
  gr.ChatInterface(
103
  fn=chat_llama3_8b,
104
  chatbot=chatbot,
 
124
  gr.Markdown(LICENSE)
125
 
126
  if __name__ == "__main__":
127
+ demo.launch()