Moha782 commited on
Commit
578932f
·
verified ·
1 Parent(s): 4972635

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -10
app.py CHANGED
@@ -3,6 +3,7 @@ from huggingface_hub import InferenceClient
3
  from pathlib import Path
4
  from typing import List
5
  from pdfplumber import open as open_pdf
 
6
 
7
  """
8
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
@@ -10,7 +11,7 @@ For more information on `huggingface_hub` Inference API support, please check th
10
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
11
 
12
  # Load the PDF file
13
- pdf_path = Path("apexcustoms.pdf")
14
  with open_pdf(pdf_path) as pdf:
15
  text = "\n".join(page.extract_text() for page in pdf.pages)
16
 
@@ -18,6 +19,11 @@ with open_pdf(pdf_path) as pdf:
18
  chunk_size = 1000 # Adjust this value based on your needs
19
  text_chunks: List[str] = [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)]
20
 
 
 
 
 
 
21
  def respond(
22
  message,
23
  history: list[tuple[str, str]],
@@ -40,19 +46,26 @@ def respond(
40
 
41
  # Pass relevant chunks as context
42
  relevant_chunks = [chunk for chunk in text_chunks if message.lower() in chunk.lower()]
 
 
 
 
 
43
 
44
- for message in client.chat_completion(
45
- messages,
46
- max_tokens=max_tokens,
47
- stream=True,
 
48
  temperature=temperature,
49
  top_p=top_p,
50
- files={"context": "\n".join(relevant_chunks)}, # Pass relevant chunks as context
51
- ):
52
- token = message.choices[0].delta.content
 
 
53
 
54
- response += token
55
- yield response
56
 
57
  """
58
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
 
3
  from pathlib import Path
4
  from typing import List
5
  from pdfplumber import open as open_pdf
6
+ from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer
7
 
8
  """
9
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
11
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
12
 
13
  # Load the PDF file
14
+ pdf_path = Path("path/to/your/pdf/file.pdf")
15
  with open_pdf(pdf_path) as pdf:
16
  text = "\n".join(page.extract_text() for page in pdf.pages)
17
 
 
19
  chunk_size = 1000 # Adjust this value based on your needs
20
  text_chunks: List[str] = [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)]
21
 
22
+ # Load the VisionEncoderDecoderModel and tokenizer
23
+ model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
24
+ feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
25
+ tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
26
+
27
  def respond(
28
  message,
29
  history: list[tuple[str, str]],
 
46
 
47
  # Pass relevant chunks as context
48
  relevant_chunks = [chunk for chunk in text_chunks if message.lower() in chunk.lower()]
49
+ context = "\n".join(relevant_chunks)
50
+
51
+ # Encode the context and user's message
52
+ inputs = feature_extractor(context, return_tensors="pt")
53
+ query = tokenizer(message, return_tensors="pt")
54
 
55
+ # Generate the response using the VisionEncoderDecoderModel
56
+ output_ids = model.generate(
57
+ **inputs,
58
+ **query,
59
+ max_length=max_tokens,
60
  temperature=temperature,
61
  top_p=top_p,
62
+ num_beams=4,
63
+ early_stopping=True
64
+ )
65
+
66
+ response = tokenizer.decode(output_ids.squeeze(), skip_special_tokens=True)
67
 
68
+ yield response
 
69
 
70
  """
71
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface