m96tkmok commited on
Commit
8842511
1 Parent(s): 3b6ad6b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -4
app.py CHANGED
@@ -1,13 +1,24 @@
1
  import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
3
 
4
- # mistralai/Mistral-Nemo-Instruct-2407
5
  # Load the model and tokenizer
6
  tokenizer = AutoTokenizer.from_pretrained("unsloth/Llama-3.2-3B-Instruct")
7
  model = AutoModelForCausalLM.from_pretrained("unsloth/Llama-3.2-3B-Instruct")
8
 
9
- st.title("Llama-3.2-3B-Instruct Text Generation")
10
- st.write("Enter a prompt and generate text using the Llama 3.2 3B model.")
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  with st.form("llm-form"):
13
  user_input = st.text_area("Enter your question or statement:")
@@ -15,6 +26,6 @@ with st.form("llm-form"):
15
 
16
  if submit:
17
  inputs = tokenizer(user_input, return_tensors="pt")
18
- outputs = model.generate(inputs["input_ids"], max_length=50)
19
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
20
  st.write(generated_text)
 
1
  import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from langchain_core.prompts import ChatPromptTemplate
4
 
 
5
  # Load the model and tokenizer
6
  tokenizer = AutoTokenizer.from_pretrained("unsloth/Llama-3.2-3B-Instruct")
7
  model = AutoModelForCausalLM.from_pretrained("unsloth/Llama-3.2-3B-Instruct")
8
 
9
+ st.title("Unsloth Llama-3.2-3B-Instruct Text Generation")
10
+ st.write("Enter a prompt and generate text using the Unsloth Llama 3.2 3B model.")
11
+
12
+ prompt = """
13
+ You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question.
14
+ If you don't know the answer, just say that you don't know.
15
+ Answer in bullet points. Make sure your answer is relevant to the question and it is answered from the context only.
16
+ Question: {question}
17
+ Context: {context}
18
+ Answer:
19
+ """
20
+
21
+ prompt = ChatPromptTemplate.from_template(prompt)
22
 
23
  with st.form("llm-form"):
24
  user_input = st.text_area("Enter your question or statement:")
 
26
 
27
  if submit:
28
  inputs = tokenizer(user_input, return_tensors="pt")
29
+ outputs = model.generate(inputs["input_ids"], max_length=200)
30
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
  st.write(generated_text)