sst1 commited on
Commit
ecdbb3d
1 Parent(s): 3fde99d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -116
app.py CHANGED
@@ -1,117 +1,27 @@
1
- from pymed import PubMed
2
- from typing import List
3
- from haystack import component
4
- from haystack import Document
5
- from haystack.components.generators import HuggingFaceTGIGenerator
6
- from dotenv import load_dotenv
7
- import os
8
- from haystack import Pipeline
9
- from haystack.components.builders.prompt_builder import PromptBuilder
10
  import gradio as gr
11
- import time
12
-
13
- # load_dotenv()
14
-
15
- # os.environ['HUGGINGFACE_API_KEY'] = os.getenv('HUGGINGFACE_API_KEY')
16
-
17
-
18
- pubmed = PubMed(tool="Haystack2.0Prototype", email="dummyemail@gmail.com")
19
-
20
- def documentize(article):
21
- return Document(content=article.abstract, meta={'title': article.title, 'keywords': article.keywords})
22
-
23
- @component
24
- class PubMedFetcher():
25
-
26
- @component.output_types(articles=List[Document])
27
- def run(self, queries: list[str]):
28
- cleaned_queries = queries[0].strip().split('\n')
29
-
30
- articles = []
31
- try:
32
- for query in cleaned_queries:
33
- response = pubmed.query(query, max_results = 1)
34
- documents = [documentize(article) for article in response]
35
- articles.extend(documents)
36
- except Exception as e:
37
- print(e)
38
- print(f"Couldn't fetch articles for queries: {queries}" )
39
- results = {'articles': articles}
40
- return results
41
-
42
- keyword_llm = HuggingFaceTGIGenerator("liuhaotian/llava-v1.6-mistral-7b")
43
- keyword_llm.warm_up()
44
-
45
- llm = HuggingFaceTGIGenerator("liuhaotian/llava-v1.6-mistral-7b")
46
- llm.warm_up()
47
-
48
-
49
- keyword_prompt_template = """
50
- Your task is to convert the following question into 3 keywords that can be used to find relevant medical research papers on PubMed.
51
- Here is an examples:
52
- question: "What are the latest treatments for major depressive disorder?"
53
- keywords:
54
- Antidepressive Agents
55
- Depressive Disorder, Major
56
- Treatment-Resistant depression
57
- ---
58
- question: {{ question }}
59
- keywords:
60
- """
61
-
62
- prompt_template = """
63
- Answer the question truthfully based on the given documents.
64
- If the documents don't contain an answer, use your existing knowledge base.
65
- q: {{ question }}
66
- Articles:
67
- {% for article in articles %}
68
- {{article.content}}
69
- keywords: {{article.meta['keywords']}}
70
- title: {{article.meta['title']}}
71
- {% endfor %}
72
- """
73
-
74
- keyword_prompt_builder = PromptBuilder(template=keyword_prompt_template)
75
-
76
- prompt_builder = PromptBuilder(template=prompt_template)
77
- fetcher = PubMedFetcher()
78
-
79
- pipe = Pipeline()
80
-
81
- pipe.add_component("keyword_prompt_builder", keyword_prompt_builder)
82
- pipe.add_component("keyword_llm", keyword_llm)
83
- pipe.add_component("pubmed_fetcher", fetcher)
84
- pipe.add_component("prompt_builder", prompt_builder)
85
- pipe.add_component("llm", llm)
86
-
87
- pipe.connect("keyword_prompt_builder.prompt", "keyword_llm.prompt")
88
- pipe.connect("keyword_llm.replies", "pubmed_fetcher.queries")
89
-
90
- pipe.connect("pubmed_fetcher.articles", "prompt_builder.articles")
91
- pipe.connect("prompt_builder.prompt", "llm.prompt")
92
-
93
- def ask(question):
94
- output = pipe.run(data={"keyword_prompt_builder":{"question":question},
95
- "prompt_builder":{"question": question},
96
- "llm":{"generation_kwargs": {"max_new_tokens": 500}}})
97
- print(question)
98
- print(output['llm']['replies'][0])
99
- return output['llm']['replies'][0]
100
-
101
- # result = ask("How are mRNA vaccines being used for cancer treatment?")
102
-
103
- # print(result)
104
-
105
- iface = gr.Interface(fn=ask, inputs=gr.Textbox(
106
- value="How are mRNA vaccines being used for cancer treatment?"),
107
- outputs="markdown",
108
- title="LLM Augmented Q&A over PubMed Search Engine",
109
- description="Ask a question about BioMedical and get an answer from a friendly AI assistant.",
110
- examples=[["How are mRNA vaccines being used for cancer treatment?"],
111
- ["Suggest me some Case Studies related to Pneumonia."],
112
- ["Tell me about HIV AIDS."],["Suggest some case studies related to Auto Immune Disorders."],
113
- ["How to treat a COVID infected Patient?"]],
114
- theme=gr.themes.Soft(),
115
- allow_flagging="never",)
116
-
117
- iface.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+
4
+ # Load the quantized model and tokenizer from the Hub
5
+ model = AutoModelForCausalLM.from_pretrained("my-quantized-llava-model")
6
+ tokenizer = AutoTokenizer.from_pretrained("my-quantized-llava-model")
7
+
8
+ # Define a function to generate a response given an input text and an optional image URL
9
+ def generate_response(text, image_url=None):
10
+ # Encode the input text and image URL as a single input_ids tensor
11
+ image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/3/3a/Roadrunner_Petrochelidon_pyrrhonota.jpg/1200px-Roadrunner_Petrochelidon_pyrrhonota.jpg"
12
+ if image_url:
13
+ input_ids = tokenizer(f"{text} <img>{image_url}</img>", return_tensors="pt").input_ids
14
+ else:
15
+ input_ids = tokenizer(text, return_tensors="pt").input_ids
16
+
17
+ # Generate a response using beam search with a length penalty of 0.8
18
+ output_ids = model.generate(input_ids, max_length=256, num_beams=5, length_penalty=0.8)
19
+
20
+ # Decode the output_ids tensor into a string
21
+ output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
22
+
23
+ # Return the output text
24
+ return output_text
25
+
26
+ # Use the HuggingFaceTGIGenerator class to automatically map inputs and outputs to Gradio components
27
+ gr.Interface(generate_response, gr.HuggingFaceTGIGenerator(model), "text").launch()