sidcww commited on
Commit
c29f8db
·
verified ·
1 Parent(s): cf51fe4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -11
app.py CHANGED
@@ -7,20 +7,20 @@ import google.generativeai as genai
7
  from langchain.chains.question_answering import load_qa_chain
8
  import torch
9
  from transformers import AutoTokenizer, AutoModelForCausalLM
10
- from langsmith import LangSmithClient # Hypothetical import for LangSmith
11
 
12
- # Configure Gemini API
13
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
14
 
15
- # Load Mistral model
16
  model_path = "nvidia/Mistral-NeMo-Minitron-8B-Base"
17
  mistral_tokenizer = AutoTokenizer.from_pretrained(model_path)
18
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
19
  dtype = torch.bfloat16
20
  mistral_model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=dtype, device_map=device)
21
 
22
- # Initialize LangSmith client
23
- langsmith_client = LangSmithClient(api_key=os.getenv("LANGSMITH_API_KEY"))
24
 
25
  def initialize(file_path, question):
26
  try:
@@ -41,7 +41,7 @@ def initialize(file_path, question):
41
  stuff_answer = stuff_chain({"input_documents": pages, "question": question, "context": context}, return_only_outputs=True)
42
  gemini_answer = stuff_answer['output_text']
43
 
44
- # Use Mistral model for additional text generation
45
  mistral_prompt = f"Based on this answer: {gemini_answer}\nGenerate a follow-up question:"
46
  mistral_inputs = mistral_tokenizer.encode(mistral_prompt, return_tensors='pt').to(device)
47
  with torch.no_grad():
@@ -50,15 +50,16 @@ def initialize(file_path, question):
50
 
51
  combined_output = f"Gemini Answer: {gemini_answer}\n\nMistral Follow-up: {mistral_output}"
52
 
53
- # Log and evaluate the results using LangSmith
54
- evaluation = langsmith_client.evaluate(gemini_answer, mistral_output, question)
55
- return combined_output + f"\n\nEvaluation: {evaluation}"
 
56
  else:
57
  return "Error: Unable to process the document. Please ensure the PDF file is valid."
58
  except Exception as e:
59
  return f"An error occurred: {str(e)}"
60
 
61
- # Define Gradio Interface
62
  input_file = gr.File(label="Upload PDF File")
63
  input_question = gr.Textbox(label="Ask about the document")
64
  output_text = gr.Textbox(label="Answer - Combined Gemini and Mistral")
@@ -68,7 +69,7 @@ def pdf_qa(file, question):
68
  return "Please upload a PDF file first."
69
  return initialize(file.name, question)
70
 
71
- # Create Gradio Interface
72
  gr.Interface(
73
  fn=pdf_qa,
74
  inputs=[input_file, input_question],
@@ -76,3 +77,4 @@ gr.Interface(
76
  title="RAG Knowledge Retrieval using Gemini API and Mistral Model",
77
  description="Upload a PDF file and ask questions about the content."
78
  ).launch()
 
 
7
  from langchain.chains.question_answering import load_qa_chain
8
  import torch
9
  from transformers import AutoTokenizer, AutoModelForCausalLM
10
+ # 使用 LangSmith 实际提供的类或方法
11
 
12
+ # 配置 Gemini API
13
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
14
 
15
+ # 加载 Mistral 模型
16
  model_path = "nvidia/Mistral-NeMo-Minitron-8B-Base"
17
  mistral_tokenizer = AutoTokenizer.from_pretrained(model_path)
18
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
19
  dtype = torch.bfloat16
20
  mistral_model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=dtype, device_map=device)
21
 
22
+ # 初始化 LangSmith 客户端或其他相关的工具
23
+ # client = LangSmithClient(api_key=os.getenv("LANGSMITH_API_KEY")) # 请替换为实际的客户端或工具
24
 
25
  def initialize(file_path, question):
26
  try:
 
41
  stuff_answer = stuff_chain({"input_documents": pages, "question": question, "context": context}, return_only_outputs=True)
42
  gemini_answer = stuff_answer['output_text']
43
 
44
+ # 使用 Mistral 模型进行文本生成
45
  mistral_prompt = f"Based on this answer: {gemini_answer}\nGenerate a follow-up question:"
46
  mistral_inputs = mistral_tokenizer.encode(mistral_prompt, return_tensors='pt').to(device)
47
  with torch.no_grad():
 
50
 
51
  combined_output = f"Gemini Answer: {gemini_answer}\n\nMistral Follow-up: {mistral_output}"
52
 
53
+ # 在这里使用 LangSmith 进行评估或日志记录
54
+ # evaluation = client.evaluate(gemini_answer, mistral_output, question) # 替换为实际评估方法
55
+ # return combined_output + f"\n\nEvaluation: {evaluation}"
56
+ return combined_output
57
  else:
58
  return "Error: Unable to process the document. Please ensure the PDF file is valid."
59
  except Exception as e:
60
  return f"An error occurred: {str(e)}"
61
 
62
+ # 定义 Gradio 接口
63
  input_file = gr.File(label="Upload PDF File")
64
  input_question = gr.Textbox(label="Ask about the document")
65
  output_text = gr.Textbox(label="Answer - Combined Gemini and Mistral")
 
69
  return "Please upload a PDF file first."
70
  return initialize(file.name, question)
71
 
72
+ # 创建 Gradio 界面
73
  gr.Interface(
74
  fn=pdf_qa,
75
  inputs=[input_file, input_question],
 
77
  title="RAG Knowledge Retrieval using Gemini API and Mistral Model",
78
  description="Upload a PDF file and ask questions about the content."
79
  ).launch()
80
+