pvyas96 commited on
Commit
1937747
β€’
1 Parent(s): 86e52d3

Upload 5 files

Browse files
.streamlit/config.toml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [theme]
2
+ primaryColor="#F63366"
3
+ backgroundColor="#FFFFFF"
4
+ secondaryBackgroundColor="#F0F2F6"
5
+ textColor="#262730"
6
+ font="sans serif"
.streamlit/secrets.toml ADDED
File without changes
app.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ readme = """
4
+ This is a Simple chat application using streamlit and huggingface api
5
+ watch the below video to get started
6
+
7
+ """
8
+
9
+ st.write(readme)
pages/1_Simple_Chat_UI.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSeq2SeqLM
3
+
4
+ def load_model_tokenizer(model_name, hf_api_key):
5
+ if model_name == "Mistral-7B":
6
+ #model_name="mistralai/Mistral-7B-Instruct-v0.2"
7
+ #model = AutoModelForCausalLM.from_pretrained(model_name, token=hf_api_key)
8
+ #tokenizer = AutoTokenizer.from_pretrained(model_name, tokenizer=hf_api_key)
9
+ pass
10
+ elif model_name == "blenderbot-400M-distill":
11
+ model_name = "facebook/blenderbot-400M-distill"
12
+ model = AutoModelForCausalLM.from_pretrained(model_name)
13
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
14
+ return (model,tokenizer)
15
+
16
+ def generate_response(prompt_input, model, tokenizer):
17
+ inputs = tokenizer.encode_plus(prompt_input, return_tensors="pt")
18
+ # Generate the response from the model with additional parameters
19
+ outputs = model.generate(**inputs, max_length=max_length, do_sample=True ,temperature=temperature)
20
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
21
+ return response
22
+
23
+ st.set_page_config(page_title="Learn Geoscience")
24
+
25
+ with st.sidebar:
26
+ st.title('Learn Geoscience Chat')
27
+ if 'hf_key' in st.secrets:
28
+ st.success('Huggingface API key provided', icon='βœ…')
29
+ hf_api_key = st.secrets['hf_key']
30
+ else:
31
+ hf_api_key = st.text_input('Enter Huggingface API Key:', type='password')
32
+ if not hf_api_key:
33
+ st.warning('Please enter Huggingface API key!', icon='⚠️')
34
+ else:
35
+ st.success('Proceed to entering your prompt message!', icon='πŸ‘‰')
36
+ max_length = st.slider("Max Length", 10, 100, 50)
37
+ temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
38
+
39
+ if "messages" not in st.session_state.keys():
40
+ st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
41
+
42
+ model_name = st.radio("Select model to chat", options=["Mistral-7B", "LLaMa-2B", "blenderbot-400M-distill"], horizontal=True, key='model_selection')
43
+ model, tokenizer = load_model_tokenizer(model_name, hf_api_key)
44
+
45
+ for message in st.session_state.messages:
46
+ with st.chat_message(message["role"]):
47
+ st.write(message["content"])
48
+
49
+ if prompt := st.chat_input(disabled = not hf_api_key):
50
+ st.session_state.messages.append({"role": "user", "content": prompt})
51
+ with st.chat_message("user"):
52
+ st.write(prompt)
53
+
54
+ if st.session_state.messages[-1]["role"] != "assistant":
55
+ with st.chat_message("assistant"):
56
+ with st.spinner("Thinking..."):
57
+ response = generate_response(prompt, model, tokenizer)
58
+ st.write(response)
59
+ message = {"role": "assistant", "content": response}
60
+ st.session_state.messages.append(message)
requirements.txt CHANGED
@@ -1,2 +1,2 @@
1
- transformers
2
  torch
 
1
+ transformers
2
  torch