emeses commited on
Commit
0895bcd
·
1 Parent(s): 4cf6bf3

Update space

Browse files
Files changed (1) hide show
  1. app.py +15 -16
app.py CHANGED
@@ -7,6 +7,7 @@ For more information on `huggingface_hub` Inference API support, please check th
7
  client = InferenceClient("emeses/lab2_model")
8
 
9
 
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
@@ -15,34 +16,32 @@ def respond(
15
  temperature,
16
  top_p,
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
 
28
  response = ""
29
-
30
  try:
31
- for chunk in client.chat_completion(
32
- messages,
33
- max_tokens=max_tokens,
34
- stream=True,
35
  temperature=temperature,
36
  top_p=top_p,
 
37
  ):
38
- # The API returns the text directly in streaming mode
39
  if chunk:
40
  response += chunk
41
  yield response
42
  except Exception as e:
43
  yield f"Error: {str(e)}"
44
 
45
-
46
  """
47
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
48
  """
 
7
  client = InferenceClient("emeses/lab2_model")
8
 
9
 
10
+
11
  def respond(
12
  message,
13
  history: list[tuple[str, str]],
 
16
  temperature,
17
  top_p,
18
  ):
19
+ # Format conversation history into a text prompt
20
+ prompt = f"{system_message}\n\n"
21
+
22
+ for user_msg, assistant_msg in history:
23
+ prompt += f"User: {user_msg}\n"
24
+ if assistant_msg:
25
+ prompt += f"Assistant: {assistant_msg}\n"
26
+
27
+ prompt += f"User: {message}\nAssistant:"
28
 
29
  response = ""
 
30
  try:
31
+ # Use text_generation instead of chat_completion
32
+ for chunk in client.text_generation(
33
+ prompt,
34
+ max_new_tokens=max_tokens,
35
  temperature=temperature,
36
  top_p=top_p,
37
+ stream=True
38
  ):
 
39
  if chunk:
40
  response += chunk
41
  yield response
42
  except Exception as e:
43
  yield f"Error: {str(e)}"
44
 
 
45
  """
46
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
47
  """