Shankarm08 commited on
Commit
dc79f72
·
verified ·
1 Parent(s): 2999a6f
Files changed (1) hide show
  1. app.py +15 -14
app.py CHANGED
@@ -1,24 +1,25 @@
1
- #Hello! It seems like you want to import the Streamlit library in Python. Streamlit is a powerful open-source framework used for building web applications with interactive data visualizations and machine learning models. To import Streamlit, you'll need to ensure that you have it installed in your Python environment.
2
- #Once you have Streamlit installed, you can import it into your Python script using the import statement,
3
-
4
  import streamlit as st
5
- from langchain import HuggingFaceHub # Correct import for Hugging Face
6
 
7
  # Set your Hugging Face API token
8
  HUGGINGFACE_API_TOKEN = "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"
9
 
10
  # Function to return the response from Hugging Face model
11
  def load_answer(question):
12
- # Initialize the Hugging Face model
13
- llm = HuggingFaceHub(
14
- repo_id="mistralai/Mistral-7B-Instruct-v0.3", # Specify the Hugging Face model
15
- huggingfacehub_api_token=HUGGINGFACE_API_TOKEN, # Pass your API token
16
- model_kwargs={"temperature": 0} # Optional: Control response randomness
17
- )
18
-
19
- # Call the model with the user's question and get the response
20
- answer = llm(question)
21
- return answer
 
 
 
 
22
 
23
  # Streamlit App UI starts here
24
  st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
 
 
 
 
1
  import streamlit as st
2
+ from langchain import HuggingFaceHub
3
 
4
  # Set your Hugging Face API token
5
  HUGGINGFACE_API_TOKEN = "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"
6
 
7
  # Function to return the response from Hugging Face model
8
  def load_answer(question):
9
+ try:
10
+ # Initialize the Hugging Face model using LangChain's HuggingFaceHub class
11
+ llm = HuggingFaceHub(
12
+ repo_id="mistralai/Mistral-7B-Instruct-v0.3", # Hugging Face model repo
13
+ huggingfacehub_api_token=HUGGINGFACE_API_TOKEN, # Pass your API token
14
+ model_kwargs={"temperature": 0} # Optional: Control response randomness
15
+ )
16
+
17
+ # Call the model with the user's question and get the response
18
+ answer = llm.run(question)
19
+ return answer
20
+ except Exception as e:
21
+ # Capture and return any exceptions or errors
22
+ return f"Error: {str(e)}"
23
 
24
  # Streamlit App UI starts here
25
  st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")