kristada673 commited on
Commit
e7127be
1 Parent(s): a056d74

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +114 -140
app.py CHANGED
@@ -1,147 +1,121 @@
1
- import io
2
- import random
3
- from typing import List, Tuple
4
-
5
- import aiohttp
6
- import panel as pn
7
- from PIL import Image
8
- from transformers import CLIPModel, CLIPProcessor
9
-
10
- pn.extension(design="bootstrap", sizing_mode="stretch_width")
11
-
12
- ICON_URLS = {
13
- "brand-github": "https://github.com/holoviz/panel",
14
- "brand-twitter": "https://twitter.com/Panel_Org",
15
- "brand-linkedin": "https://www.linkedin.com/company/panel-org",
16
- "message-circle": "https://discourse.holoviz.org/",
17
- "brand-discord": "https://discord.gg/AXRHnJU6sP",
18
- }
19
-
20
-
21
- async def random_url(_):
22
- pet = random.choice(["cat", "dog"])
23
- api_url = f"https://api.the{pet}api.com/v1/images/search"
24
- async with aiohttp.ClientSession() as session:
25
- async with session.get(api_url) as resp:
26
- return (await resp.json())[0]["url"]
27
-
28
-
29
- @pn.cache
30
- def load_processor_model(
31
- processor_name: str, model_name: str
32
- ) -> Tuple[CLIPProcessor, CLIPModel]:
33
- processor = CLIPProcessor.from_pretrained(processor_name)
34
- model = CLIPModel.from_pretrained(model_name)
35
- return processor, model
36
-
37
-
38
- async def open_image_url(image_url: str) -> Image:
39
- async with aiohttp.ClientSession() as session:
40
- async with session.get(image_url) as resp:
41
- return Image.open(io.BytesIO(await resp.read()))
42
-
43
-
44
- def get_similarity_scores(class_items: List[str], image: Image) -> List[float]:
45
- processor, model = load_processor_model(
46
- "openai/clip-vit-base-patch32", "openai/clip-vit-base-patch32"
47
- )
48
- inputs = processor(
49
- text=class_items,
50
- images=[image],
51
- return_tensors="pt", # pytorch tensors
52
- )
53
- outputs = model(**inputs)
54
- logits_per_image = outputs.logits_per_image
55
- class_likelihoods = logits_per_image.softmax(dim=1).detach().numpy()
56
- return class_likelihoods[0]
57
-
58
-
59
- async def process_inputs(class_names: List[str], image_url: str):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  """
61
- High level function that takes in the user inputs and returns the
62
- classification results as panel objects.
63
- """
64
- try:
65
- main.disabled = True
66
- if not image_url:
67
- yield "##### ⚠️ Provide an image URL"
68
- return
69
-
70
- yield "##### ⚙ Fetching image and running model..."
71
- try:
72
- pil_img = await open_image_url(image_url)
73
- img = pn.pane.Image(pil_img, height=400, align="center")
74
- except Exception as e:
75
- yield f"##### 😔 Something went wrong, please try a different URL!"
76
- return
77
-
78
- class_items = class_names.split(",")
79
- class_likelihoods = get_similarity_scores(class_items, pil_img)
80
-
81
- # build the results column
82
- results = pn.Column("##### 🎉 Here are the results!", img)
83
-
84
- for class_item, class_likelihood in zip(class_items, class_likelihoods):
85
- row_label = pn.widgets.StaticText(
86
- name=class_item.strip(), value=f"{class_likelihood:.2%}", align="center"
87
- )
88
- row_bar = pn.indicators.Progress(
89
- value=int(class_likelihood * 100),
90
- sizing_mode="stretch_width",
91
- bar_color="secondary",
92
- margin=(0, 10),
93
- design=pn.theme.Material,
94
- )
95
- results.append(pn.Column(row_label, row_bar))
96
- yield results
97
- finally:
98
- main.disabled = False
99
-
100
-
101
- # create widgets
102
- randomize_url = pn.widgets.Button(name="Randomize URL", align="end")
103
-
104
- image_url = pn.widgets.TextInput(
105
- name="Image URL to classify",
106
- value=pn.bind(random_url, randomize_url),
107
- )
108
- class_names = pn.widgets.TextInput(
109
- name="Comma separated class names",
110
- placeholder="Enter possible class names, e.g. cat, dog",
111
- value="cat, dog, parrot",
112
- )
113
 
114
- input_widgets = pn.Column(
115
- "##### 😊 Click randomize or paste a URL to start classifying!",
116
- pn.Row(image_url, randomize_url),
117
- class_names,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  )
119
 
120
- # add interactivity
121
- interactive_result = pn.panel(
122
- pn.bind(process_inputs, image_url=image_url, class_names=class_names),
123
- height=600,
124
  )
125
 
126
- # add footer
127
- footer_row = pn.Row(pn.Spacer(), align="center")
128
- for icon, url in ICON_URLS.items():
129
- href_button = pn.widgets.Button(icon=icon, width=35, height=35)
130
- href_button.js_on_click(code=f"window.open('{url}')")
131
- footer_row.append(href_button)
132
- footer_row.append(pn.Spacer())
133
-
134
- # create dashboard
135
- main = pn.WidgetBox(
136
- input_widgets,
137
- interactive_result,
138
- footer_row,
139
- )
140
 
141
- title = "Panel Demo - Image Classification"
142
- pn.template.BootstrapTemplate(
143
- title=title,
144
- main=main,
145
- main_max_width="min(50%, 698px)",
146
- header_background="#F08080",
147
- ).servable(title=title)
 
1
+ import os, dotenv, anthropic, panel, platform
2
+ from langchain_community.vectorstores import Chroma
3
+ from langchain.embeddings import HuggingFaceEmbeddings
4
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
5
+ from langchain_community.document_loaders import DirectoryLoader, PyPDFLoader
6
+
7
+ panel.extension()
8
+
9
+ # Set API key
10
+ dotenv.load_dotenv()
11
+ ANTHROPIC_API_KEY = os.getenv('ANTHROPIC_API_KEY')
12
+
13
+ @panel.cache
14
+ def load_vectorstore():
15
+ if "macOS" in platform.platform():
16
+ device="mps"
17
+ else:
18
+ device="cpu"
19
+
20
+ # Create the HF embeddings
21
+ model_name = "sentence-transformers/all-mpnet-base-v2"
22
+ model_kwargs = {'device': device}
23
+ encode_kwargs = {'normalize_embeddings': False}
24
+
25
+ hf_embeddings = HuggingFaceEmbeddings(
26
+ model_name=model_name,
27
+ model_kwargs=model_kwargs,
28
+ encode_kwargs=encode_kwargs
29
+ )
30
+
31
+ # If the vector embeddings of the documents have not been created
32
+ if not os.path.isfile('chroma_db/chroma.sqlite3'):
33
+
34
+ # Load the documents
35
+ loader = DirectoryLoader('Docs/', glob="./*.pdf", loader_cls=PyPDFLoader)
36
+ data = loader.load()
37
+
38
+ # Split the docs into chunks
39
+ splitter = RecursiveCharacterTextSplitter(
40
+ chunk_size=1000,
41
+ chunk_overlap=50
42
+ )
43
+ docs = splitter.split_documents(data)
44
+
45
+ # Embed the documents and store them in a Chroma DB
46
+ vectorstore = Chroma.from_documents(documents=docs,embedding=hf_embeddings, persist_directory="./chroma_db")
47
+ else:
48
+ # load ChromaDB from disk
49
+ vectorstore = Chroma(persist_directory="./chroma_db", embedding_function=hf_embeddings)
50
+
51
+ return vectorstore
52
+
53
+ # Initialize the chat history
54
+ chat_history = []
55
+
56
+ async def get_response(contents, user, instance):
57
+
58
+ # Load the vectorstore
59
+ vectorstore = load_vectorstore()
60
+
61
+ question = contents
62
+
63
+ # Get the relevant information to form the context on which to query with the LLM
64
+ docs = vectorstore.similarity_search(question)
65
+
66
+ context = "\n"
67
+ for doc in docs:
68
+ context += "\n" + doc.page_content + "\n"
69
+
70
+ # Update the global chat_history with the user's question
71
+ global chat_history
72
+ chat_history.append({"role": "user", "content": question})
73
+
74
+ # Define prompt template
75
+ prompt = f"""
76
+ Here are the Task Context and History
77
+
78
+ - Context: {context}
79
+ - Chat History: {chat_history}
80
+ - User Question: {question}
81
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
+ # Create the Anthropic client
84
+ client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)
85
+
86
+ response = ''
87
+ # Generate the completion with the updated chat_history
88
+ with client.messages.stream(
89
+ max_tokens=1024,
90
+ messages=[
91
+ {"role": "user", "content": prompt}
92
+ ],
93
+ model="claude-3-haiku-20240307",
94
+ ) as stream:
95
+ for text in stream.text_stream:
96
+ response += text
97
+ yield response
98
+
99
+ # Append the assistant's response to the chat_history
100
+ chat_history.append({"role": "assistant", "content": response})
101
+
102
+ chat_interface = panel.chat.ChatInterface(
103
+ callback=get_response,
104
+ callback_user="Sarathi",
105
+ sizing_mode="stretch_width",
106
+ callback_exception='verbose',
107
+ message_params=dict(
108
+ default_avatars={"Sarathi": "S", "User": "U"},
109
+ reaction_icons={"like": "thumb-up"},
110
+ ),
111
  )
112
 
113
+ chat_interface.send(
114
+ {"user": "Sarathi", "value": '''Welcome to Sarathi, your personal assistant for Assam Tourism.'''},
115
+ respond=False,
 
116
  )
117
 
118
+ template = panel.template.BootstrapTemplate(title="Sarathi", favicon="favicon.png", header_background = "#000000", main=[panel.Tabs( ('Chat', chat_interface), dynamic=True )])
119
+
120
+ template.servable()
 
 
 
 
 
 
 
 
 
 
 
121