KushwanthK commited on
Commit
7544677
·
verified ·
1 Parent(s): 4452738

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +15 -2
  2. requirements.txt +2 -1
app.py CHANGED
@@ -23,14 +23,22 @@ import torch
23
  from langchain_community.llms.ollama import Ollama
24
  from langchain.prompts import ChatPromptTemplate
25
  from langchain_community.vectorstores import FAISS
 
 
26
  # from langchain.vectorstores import faiss
27
  # from langchain.vectorstores import FAISS
28
 
29
  import time
30
  from time import sleep
31
  from stqdm import stqdm
 
 
 
 
32
 
 
33
 
 
34
 
35
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
36
 
@@ -272,8 +280,13 @@ def chat_actions():
272
  prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
273
  prompt = prompt_template.format(context=context_text, question=query)
274
 
275
- model = Ollama(model="llama3")
276
- response_text = model.invoke(prompt)
 
 
 
 
 
277
 
278
  formatted_response = f"Response: {response_text}\nSources: {sources}"
279
  print(formatted_response)
 
23
  from langchain_community.llms.ollama import Ollama
24
  from langchain.prompts import ChatPromptTemplate
25
  from langchain_community.vectorstores import FAISS
26
+
27
+ #from langchain import PromtTemplate, HuggingFaceHub, LLMChain
28
  # from langchain.vectorstores import faiss
29
  # from langchain.vectorstores import FAISS
30
 
31
  import time
32
  from time import sleep
33
  from stqdm import stqdm
34
+ from dotenv import load_dotenv
35
+
36
+ # Load environment variables from .env file
37
+ load_dotenv()
38
 
39
+ os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv("HUGGINGFACEHUB_API_TOKEN")
40
 
41
+ print(os.environ)
42
 
43
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
44
 
 
280
  prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
281
  prompt = prompt_template.format(context=context_text, question=query)
282
 
283
+ try:
284
+ model = Ollama(model="llama3")
285
+ response_text = model.invoke(prompt)
286
+ st.write(response_text)
287
+ except Exception as e:
288
+ st.error(f"Error invoke: {e}")
289
+
290
 
291
  formatted_response = f"Response: {response_text}\nSources: {sources}"
292
  print(formatted_response)
requirements.txt CHANGED
@@ -16,4 +16,5 @@ langchain_community
16
  langchain
17
  # faiss-gpu
18
  faiss-cpu==1.7.2
19
- stqdm
 
 
16
  langchain
17
  # faiss-gpu
18
  faiss-cpu==1.7.2
19
+ stqdm
20
+ python-dotenv