File size: 3,221 Bytes
e7703db
 
 
d390001
e7703db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3998aa
6383ae5
 
351ed73
 
6383ae5
351ed73
 
 
e7703db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import streamlit as st
from gradio_client import Client
from time import sleep
from ctransformers import AutoModelForCausalLM
# Constants
TITLE = "Mistrial 7B Chatbot"
DESCRIPTION = """
This Space demonstrates model [Mistrial-7b-]
"""

# Initialize client


with st.sidebar:
    # system_promptSide = st.text_input("Optional system prompt:")
    temperatureSide = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.9, step=0.05)
    max_new_tokensSide = st.slider("Max new tokens", min_value=0.0, max_value=4096.0, value=4096.0, step=64.0)
    # ToppSide = st.slider("Top-p (nucleus sampling)", min_value=0.0, max_value=1.0, value=0.6, step=0.05)
    # RepetitionpenaltySide = st.slider("Repetition penalty", min_value=0.0, max_value=2.0, value=1.2, step=0.05)

# Load the model 
model = AutoModelForCausalLM.from_pretrained("TheBloke/Mistral-7B-Instruct-v0.1-GGUF", model_file="mistral-7b-instruct-v0.1.Q5_K_S.gguf", model_type="mistral", gpu_layers=0)
ins = '''[INST] <<SYS>>
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe.  Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
<</SYS>>
{} [/INST]
'''
# Define the conversation history
conversation_history = []
    
# Prediction function
def predict(message, system_prompt='', temperature=0.7, max_new_tokens=4096,Topp=0.5,Repetitionpenalty=1.2):
    global conversation_history
    question=message
    input_text=ins
    # Append the user's input to the conversation history
    conversation_history.append({"role": "system", "content": input_text})
    response_text = model(ins.format(question))
    conversation_history.append({"role": "user", "content": input_text})
    conversation_history.append({"role": "assistant", "content": response_text})
    return response_text

# Streamlit UI
st.title(TITLE)
st.write(DESCRIPTION)


if "messages" not in st.session_state:
    st.session_state.messages = []

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"], avatar=("πŸ§‘β€πŸ’»" if message["role"] == 'human' else 'πŸ¦™')):
        st.markdown(message["content"])
        
# React to user input
if prompt := st.chat_input("Ask Mistril-7b anything..."):
    # Display user message in chat message container
    st.chat_message("human",avatar = "πŸ§‘β€πŸ’»").markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "human", "content": prompt})

    response = predict(message=prompt)#, temperature= temperatureSide,max_new_tokens=max_new_tokensSide)
    # Display assistant response in chat message container
    with st.chat_message("assistant", avatar='πŸ¦™'):
        st.markdown(response)
    # Add assistant response to chat history
    st.session_state.messages.append({"role": "assistant", "content": response})