Mykes commited on
Commit
53635ec
1 Parent(s): e2ef81d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -16
app.py CHANGED
@@ -8,22 +8,29 @@ llm = Llama.from_pretrained(
8
  )
9
 
10
  basic_prompt = "Below is the context which is your conversation history and the last user question. Write a response according the context and question. ### Context: user: Ответь мне на вопрос о моем здоровье. assistant: Конечно! Какой у Вас вопрос? ### Question: {question} ### Response:"
 
 
11
 
12
- def generate_response(question):
13
- model_input = basic_prompt.format(question=input_text)
14
- if question:
15
- output = llm(
16
- model_input, # Prompt
17
- max_tokens=32, # Generate up to 32 tokens, set to None to generate up to the end of the context window
18
- stop=["<end_of_turn>"],
19
- echo=False # Echo the prompt back in the output
20
- ) # Generate a completion, can also call create_completion
21
- st.write(output["choices"][0]["text"])
22
- else:
23
- st.write("Please enter a question to get a response.")
24
 
25
- input_text = st.text_input('Задайте мне медицинский вопрос...')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- # Button to trigger response generation
28
- if st.button('Generate Response'):
29
- generate_response(input_text)
 
8
  )
9
 
10
  basic_prompt = "Below is the context which is your conversation history and the last user question. Write a response according the context and question. ### Context: user: Ответь мне на вопрос о моем здоровье. assistant: Конечно! Какой у Вас вопрос? ### Question: {question} ### Response:"
11
+ input_text = st.text_input('text')
12
+ model_input = basic_prompt.format(question=input_text)
13
 
14
+ if input_text:
15
+ # Create an empty placeholder for the output
16
+ output_placeholder = st.empty()
 
 
 
 
 
 
 
 
 
17
 
18
+ # Initialize an empty string to store the generated text
19
+ generated_text = ""
20
+
21
+ # Stream the output
22
+ for token in llm(
23
+ model_input,
24
+ max_tokens=32,
25
+ stop=[""],
26
+ echo=True,
27
+ stream=True # Enable streaming
28
+ ):
29
+ # Append the new token to the generated text
30
+ generated_text += token['choices'][0]['text']
31
+
32
+ # Update the placeholder with the current generated text
33
+ output_placeholder.write(generated_text)
34
 
35
+ # After the generation is complete, you can do any final processing if needed
36
+ st.write("Generation complete!")