vilarin commited on
Commit
3a65db9
1 Parent(s): 9c028f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -12
app.py CHANGED
@@ -17,8 +17,6 @@ from mistral_common.protocol.instruct.request import ChatCompletionRequest
17
 
18
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
19
 
20
- TITLE = "<h1><center>Mistral-lab</center></h1>"
21
-
22
  PLACEHOLDER = """
23
  <center>
24
  <p>Chat with Mistral AI LLM.</p>
@@ -64,13 +62,15 @@ def stream_chat(
64
  print(f'history: {history}')
65
 
66
  conversation = []
67
- # for prompt, answer in history:
68
- # conversation.append(UserMessage(content=prompt))
69
- # conversation.append(AssistantMessage(content=answer))
70
- if history:
71
- conversation.extend(history)
72
  conversation.append(UserMessage(content=message))
73
-
 
 
74
  completion_request = ChatCompletionRequest(messages=conversation)
75
 
76
  tokens = tokenizer.encode_chat_completion(completion_request).tokens
@@ -87,13 +87,16 @@ def stream_chat(
87
  for i in range(len(result)):
88
  time.sleep(0.05)
89
  yield result[: i + 1]
90
-
91
- with gr.Blocks(theme="ocean", css=CSS) as demo:
92
- gr.HTML(TITLE)
 
93
  gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button")
94
  gr.ChatInterface(
95
  fn=stream_chat,
96
- type="messages",
 
 
97
  fill_height=True,
98
  examples=[
99
  ["Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."],
 
17
 
18
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
19
 
 
 
20
  PLACEHOLDER = """
21
  <center>
22
  <p>Chat with Mistral AI LLM.</p>
 
62
  print(f'history: {history}')
63
 
64
  conversation = []
65
+ for prompt, answer in history:
66
+ conversation.append(UserMessage(content=prompt))
67
+ conversation.append(AssistantMessage(content=answer))
68
+ # if history:
69
+ # conversation.extend(history)
70
  conversation.append(UserMessage(content=message))
71
+
72
+ print(f'history: {conversation}')
73
+
74
  completion_request = ChatCompletionRequest(messages=conversation)
75
 
76
  tokens = tokenizer.encode_chat_completion(completion_request).tokens
 
87
  for i in range(len(result)):
88
  time.sleep(0.05)
89
  yield result[: i + 1]
90
+
91
+ chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)
92
+
93
+ with gr.Blocks(theme="citrus", css=CSS) as demo:
94
  gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button")
95
  gr.ChatInterface(
96
  fn=stream_chat,
97
+ title="Mistral-lab",
98
+ chatbot=chatbot,
99
+ #type="messages",
100
  fill_height=True,
101
  examples=[
102
  ["Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."],