YaserDS-777 commited on
Commit
9413ff7
โ€ข
1 Parent(s): f6cad85

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -78
app.py CHANGED
@@ -1,94 +1,38 @@
1
  import os
2
  import streamlit as st
3
- from langchain_huggingface import HuggingFaceEndpoint
4
 
5
- # Set the environment variable "m_token" to the value of sec_key
6
- sec_key = "YOUR_HUGGING_FACE_API_TOKEN_HERE"
7
- os.environ["m_token"] = sec_key
8
-
9
- # Specify the repository ID of the Hugging Face model you want to use
10
- repo_id_mistral = "mistralai/Mistral-7B-Instruct-v0.3"
11
 
12
  # Streamlit app layout
13
- st.title("๐Ÿค– Mistral-7B-Instruct-v0.3 ุชุฌุฑุจุฉ ู†ู…ูˆุฐุฌ ๐Ÿง™")
14
-
15
- # Input text area for user query with enhanced instructions
16
- user_query = st.text_area(
17
- "โœจ Enter your magical query:",
18
- height=100,
19
- help="""
20
- **Enhanced Prompting Instructions:**
21
- - Be clear and specific about what you want to know.
22
- - Use natural language to describe your query.
23
- - If asking a question, ensure it is well-formed and unambiguous.
24
- - For best results, provide context or background information if relevant.
25
- """
26
- )
27
 
28
- # Slider for adjusting the temperature
29
- temperature = st.slider(
30
- "Temperature",
31
- min_value=0.1,
32
- max_value=1.0,
33
- value=0.7,
34
- step=0.1,
35
- help="""
36
- **Temperature:**
37
- - Lower values (e.g., 0.1) make the output more deterministic and focused.
38
- - Higher values (e.g., 1.0) make the output more diverse and creative.
39
- """
40
- )
41
 
42
- # Slider for adjusting the max length
43
- max_length = st.slider(
44
- "Max Length",
45
- min_value=32,
46
- max_value=256,
47
- value=128,
48
- step=32,
49
- help="""
50
- **Max Length:**
51
- - Controls the maximum number of tokens in the generated response.
52
- - Adjust based on the desired length of the response.
53
- """
54
- )
55
 
56
  # Button to trigger the query
57
- if st.button("๐Ÿช„ Cast Spell"):
58
  if user_query:
59
- # Initialize the HuggingFaceEndpoint for Mistral
60
- llm_mistral = HuggingFaceEndpoint(
61
- repo_id=repo_id_mistral,
62
- max_length=max_length,
63
- temperature=temperature,
64
- token=sec_key
65
  )
66
-
67
- # Invoke the model with the user's query
68
- response_mistral = llm_mistral.invoke(user_query)
69
 
70
  # Display the response
71
- st.markdown("๐Ÿ”ฎ <span class='response'>Response from Mistral-7B-Instruct-v0.3:</span>", unsafe_allow_html=True)
72
- st.markdown(f"<span class='response'>{response_mistral}</span>", unsafe_allow_html=True)
73
 
74
- # Save query and response to session state
75
- if 'history' not in st.session_state:
76
- st.session_state.history = []
77
- st.session_state.history.append((user_query, response_mistral))
78
  else:
79
- st.write("๐Ÿšจ Please enter a query to cast your spell.")
80
-
81
- # Button to clear history
82
- if st.button("๐Ÿ—‘๏ธ Clear History"):
83
- if 'history' in st.session_state:
84
- st.session_state.history = []
85
- st.success("History cleared!")
86
 
87
- # Display history of queries and responses
88
- if 'history' in st.session_state:
89
- st.subheader("๐Ÿ“œ Scroll of Spells Cast")
90
- for query, response_mistral in st.session_state.history:
91
- st.write(f"**Query:** {query}")
92
- st.markdown(f"<span class='response'>**Response from Mistral-7B-Instruct-v0.3:** {response_mistral}</span>", unsafe_allow_html=True)
93
- st.write("---")
94
-
 
1
  import os
2
  import streamlit as st
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
 
5
+ # Load the fine-tuned model and tokenizer
6
+ model_path = "path/to/your/fine-tuned-model"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
8
+ model = AutoModelForCausalLM.from_pretrained(model_path)
 
 
9
 
10
  # Streamlit app layout
11
+ st.title("๐Ÿค– Fine-tuned Arabic Mistral Model ๐Ÿง™")
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ # Input text area for user query
14
+ user_query = st.text_area("โœจ Enter your query in Arabic:", height=100)
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ # Sliders for temperature and max length (as in your original code)
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  # Button to trigger the query
19
+ if st.button("๐Ÿช„ Generate Response"):
20
  if user_query:
21
+ # Tokenize input and generate response
22
+ inputs = tokenizer(user_query, return_tensors="pt")
23
+ outputs = model.generate(
24
+ inputs.input_ids,
25
+ max_length=max_length,
26
+ temperature=temperature
27
  )
28
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
29
 
30
  # Display the response
31
+ st.markdown("๐Ÿ”ฎ Response from Fine-tuned Arabic Model:")
32
+ st.write(response)
33
 
34
+ # Save query and response to session state (as in your original code)
 
 
 
35
  else:
36
+ st.write("๐Ÿšจ Please enter a query.")
 
 
 
 
 
 
37
 
38
+ # History display and clear button (as in your original code)