{user_name}
commited on
Commit
·
5711a5a
1
Parent(s):
fb8f647
ollama로 구동하던 부분 제외함
Browse files
app.py
CHANGED
@@ -27,19 +27,18 @@ def respond(
|
|
27 |
|
28 |
messages.append({"role": "user", "content": message})
|
29 |
|
30 |
-
response = ""
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
# token = message.choices[0].delta.content
|
40 |
|
41 |
-
|
42 |
-
|
43 |
|
44 |
"""
|
45 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
|
|
27 |
|
28 |
messages.append({"role": "user", "content": message})
|
29 |
|
30 |
+
response = "" + rag.SearchDocs(message, k=1)
|
31 |
+
for message in client.chat_completion(
|
32 |
+
messages,
|
33 |
+
max_tokens=max_tokens,
|
34 |
+
stream=True,
|
35 |
+
temperature=temperature,
|
36 |
+
top_p=top_p,
|
37 |
+
):
|
38 |
+
token = message.choices[0].delta.content
|
|
|
39 |
|
40 |
+
response += token
|
41 |
+
yield response
|
42 |
|
43 |
"""
|
44 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
rag.py
CHANGED
@@ -34,30 +34,31 @@ model_huggingface = HuggingFaceEmbeddings(model_name = 'jhgan/ko-sroberta-multit
|
|
34 |
db = Chroma.from_documents(sourceDocs, model_huggingface)
|
35 |
|
36 |
## 질의하기
|
37 |
-
def
|
38 |
results = db.similarity_search_with_relevance_scores(question, k = k)
|
39 |
-
|
|
|
40 |
|
41 |
-
################
|
42 |
-
# 찾은 문서를 프롬프트에 전달하여 LLM으로 답변 생성
|
43 |
-
################
|
44 |
-
from langchain_community.chat_models import ChatOllama
|
45 |
-
llm = ChatOllama(
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
)
|
50 |
-
|
51 |
-
from langchain_core.prompts import ChatPromptTemplate
|
52 |
-
|
53 |
-
prompt = ChatPromptTemplate.from_messages([
|
54 |
-
|
55 |
-
|
56 |
-
])
|
57 |
-
|
58 |
-
# print('-'*50)
|
59 |
-
chain = prompt | llm
|
60 |
-
def Response(question):
|
61 |
-
|
62 |
-
|
63 |
-
|
|
|
34 |
db = Chroma.from_documents(sourceDocs, model_huggingface)
|
35 |
|
36 |
## 질의하기
|
37 |
+
def SearchDocs(question, k=1):
|
38 |
results = db.similarity_search_with_relevance_scores(question, k = k)
|
39 |
+
merged = ' '.join([sourceDocs[result[0]][0] for result in results])
|
40 |
+
return merged
|
41 |
|
42 |
+
# ################
|
43 |
+
# # 찾은 문서를 프롬프트에 전달하여 LLM으로 답변 생성
|
44 |
+
# ################
|
45 |
+
# from langchain_community.chat_models import ChatOllama
|
46 |
+
# llm = ChatOllama(
|
47 |
+
# base_url='http://localhost:11434',
|
48 |
+
# # model="phi3:medium", # 너무 느려서 mini로 변경
|
49 |
+
# model="phi3:mini",
|
50 |
+
# )
|
51 |
+
|
52 |
+
# from langchain_core.prompts import ChatPromptTemplate
|
53 |
+
|
54 |
+
# prompt = ChatPromptTemplate.from_messages([
|
55 |
+
# ("system", "Please answer the following question from the document: {document}"),
|
56 |
+
# ("user", "{question}"),
|
57 |
+
# ])
|
58 |
+
|
59 |
+
# # print('-'*50)
|
60 |
+
# chain = prompt | llm
|
61 |
+
# def Response(question):
|
62 |
+
# searchedDocs = SearchDocs(question)
|
63 |
+
# mergedDoc = ' '.join(searchedDocs[0][0])
|
64 |
+
# return chain.invoke({"question": question, "document": mergedDoc})
|