|
import streamlit as st |
|
from llama_cpp import Llama |
|
|
|
llm = Llama.from_pretrained( |
|
repo_id="Mykes/med_gemma7b_gguf", |
|
filename="*Q4_K_M.gguf", |
|
verbose=False |
|
) |
|
|
|
basic_prompt = "Below is the context which is your conversation history and the last user question. Write a response according the context and question. ### Context: user: Ответь мне на вопрос о моем здоровье. assistant: Конечно! Какой у Вас вопрос? ### Question: {question} ### Response:" |
|
|
|
def generate_response(question): |
|
model_input = basic_prompt.format(question=input_text) |
|
if question: |
|
output = llm( |
|
model_input, |
|
max_tokens=32, |
|
stop=["<end_of_turn>"], |
|
echo=False |
|
) |
|
st.write(output["choices"][0]["text"]) |
|
else: |
|
st.write("Please enter a question to get a response.") |
|
|
|
input_text = st.text_input('Задайте мне медицинский вопрос...') |
|
|
|
|
|
if st.button('Generate Response'): |
|
generate_response(input_text) |
|
|