mery22 medmac01 commited on
Commit
4f4aca6
1 Parent(s): 0f6a909

Refactor imports and update HuggingFaceEndpoint configuration in app.py (#1)

Browse files

- Refactor imports and update HuggingFaceEndpoint configuration in app.py (f459a311af8fa22bcb46b5fc9bd9965ef35a5e12)


Co-authored-by: Mohammed Machrouh <medmac01@users.noreply.huggingface.co>

Files changed (2) hide show
  1. app.py +13 -41
  2. requirements.txt +1 -2
app.py CHANGED
@@ -1,40 +1,23 @@
1
  import os
2
- import torch
3
- from transformers import (
4
- BitsAndBytesConfig,
5
- pipeline
6
- )
7
  import streamlit as st
8
  from langchain_community.vectorstores import FAISS
9
  from langchain_community.embeddings import HuggingFaceEmbeddings
10
- from langchain_community.llms import HuggingFacePipeline
11
- from transformers import BitsAndBytesConfig
12
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
13
- from langchain_community.llms import HuggingFaceEndpoint
14
 
15
  from langchain.prompts import PromptTemplate
16
  from langchain.schema.runnable import RunnablePassthrough
17
  from langchain.chains import LLMChain
18
- import transformers
19
- from ctransformers import AutoModelForCausalLM, AutoTokenizer
20
 
21
- import transformers
22
- from transformers import pipeline
23
- from datasets import load_dataset
24
-
25
- import transformers
26
- token=st.secrets["HF_TOKEN"]
27
  from huggingface_hub import login
28
  login(token=st.secrets["HF_TOKEN"])
29
- # Load model directly
30
- from transformers import AutoTokenizer, AutoModelForCausalLM
31
-
32
- tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
33
- model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
34
 
35
  from langchain_community.document_loaders import TextLoader
36
  from langchain_text_splitters import CharacterTextSplitter
37
  from langchain_community.document_loaders import PyPDFLoader
 
 
 
38
 
39
  # Montez Google Drive
40
  loader = PyPDFLoader("test-1.pdf")
@@ -53,21 +36,6 @@ retriever = db.as_retriever(
53
  )
54
 
55
 
56
- from langchain_community.llms import HuggingFacePipeline
57
- from langchain.prompts import PromptTemplate
58
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
59
-
60
- text_generation_pipeline = transformers.pipeline(
61
- model=model,
62
- tokenizer=tokenizer,
63
- task="text-generation",
64
-
65
- temperature=0.02,
66
- repetition_penalty=1.1,
67
- return_full_text=True,
68
- max_new_tokens=512,
69
- )
70
-
71
  prompt_template = """
72
  ### [INST]
73
  Instruction: You are a Q&A assistant. Your goal is to answer questions as accurately as possible based on the instructions and context provided without using prior knowledge.You answer in FRENCH
@@ -84,7 +52,11 @@ Answer in french only
84
 
85
  """
86
 
87
- mistral_llm = HuggingFacePipeline(pipeline=text_generation_pipeline)
 
 
 
 
88
 
89
  # Create prompt from prompt template
90
  prompt = PromptTemplate(
@@ -93,7 +65,7 @@ prompt = PromptTemplate(
93
  )
94
 
95
  # Create llm chain
96
- from langchain.chains import RetrievalQA
97
 
98
 
99
  retriever.search_kwargs = {'k':1}
@@ -111,7 +83,7 @@ st.title("Chatbot Interface")
111
 
112
  # Define function to handle user input and display chatbot response
113
  def chatbot_response(user_input):
114
- response = qa.get_answer(user_input)
115
  return response
116
 
117
  # Streamlit components
@@ -124,4 +96,4 @@ if submit_button:
124
  bot_response = chatbot_response(user_input)
125
  st.text_area("Bot:", value=bot_response, height=200)
126
  else:
127
- st.warning("Please enter a message.")
 
1
  import os
 
 
 
 
 
2
  import streamlit as st
3
  from langchain_community.vectorstores import FAISS
4
  from langchain_community.embeddings import HuggingFaceEmbeddings
5
+
6
+ from langchain_huggingface import HuggingFaceEndpoint
 
 
7
 
8
  from langchain.prompts import PromptTemplate
9
  from langchain.schema.runnable import RunnablePassthrough
10
  from langchain.chains import LLMChain
 
 
11
 
 
 
 
 
 
 
12
  from huggingface_hub import login
13
  login(token=st.secrets["HF_TOKEN"])
 
 
 
 
 
14
 
15
  from langchain_community.document_loaders import TextLoader
16
  from langchain_text_splitters import CharacterTextSplitter
17
  from langchain_community.document_loaders import PyPDFLoader
18
+ from langchain.chains import RetrievalQA
19
+ from langchain.prompts import PromptTemplate
20
+ from langchain.embeddings.huggingface import HuggingFaceEmbeddings
21
 
22
  # Montez Google Drive
23
  loader = PyPDFLoader("test-1.pdf")
 
36
  )
37
 
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  prompt_template = """
40
  ### [INST]
41
  Instruction: You are a Q&A assistant. Your goal is to answer questions as accurately as possible based on the instructions and context provided without using prior knowledge.You answer in FRENCH
 
52
 
53
  """
54
 
55
+ repo_id = "mistralai/Mistral-7B-Instruct-v0.2"
56
+
57
+ mistral_llm = HuggingFaceEndpoint(
58
+ repo_id=repo_id, max_length=128, temperature=0.5, huggingfacehub_api_token=st.secrets["HF_TOKEN"]
59
+ )
60
 
61
  # Create prompt from prompt template
62
  prompt = PromptTemplate(
 
65
  )
66
 
67
  # Create llm chain
68
+ llm_chain = LLMChain(llm=mistral_llm, prompt=prompt)
69
 
70
 
71
  retriever.search_kwargs = {'k':1}
 
83
 
84
  # Define function to handle user input and display chatbot response
85
  def chatbot_response(user_input):
86
+ response = qa.run(user_input)
87
  return response
88
 
89
  # Streamlit components
 
96
  bot_response = chatbot_response(user_input)
97
  st.text_area("Bot:", value=bot_response, height=200)
98
  else:
99
+ st.warning("Please enter a message.")
requirements.txt CHANGED
@@ -1,5 +1,4 @@
1
- bitsandbytes
2
- peft
3
  sentence_transformers
4
  scipy
5
  langchain
 
1
+ langchain_huggingface
 
2
  sentence_transformers
3
  scipy
4
  langchain