Futuresony commited on
Commit
bf2110c
·
verified ·
1 Parent(s): f728e8f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -13
app.py CHANGED
@@ -1,35 +1,67 @@
1
  import gradio as gr
2
  import os
3
- from huggingface_hub import InferenceClient
4
- from textblob import TextBlob
5
- import json
6
- import time
7
  import logging
8
 
9
  # Set up logging
10
  logging.basicConfig(level=logging.INFO)
11
 
12
- # Get the API token from the environment variable
13
- api_token = os.getenv('HUGGINGFACEHUB_API_TOKEN')
 
 
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  client = InferenceClient(
16
- model="Futuresony/future_ai_12_10_2024.gguf",
17
- token=api_token
18
  )
19
 
 
 
 
 
 
 
 
 
 
 
 
20
  def format_alpaca_prompt(user_input, system_prompt, history):
21
  """Formats input in Alpaca/LLaMA style"""
 
22
  history_str = "\n".join([f"### Instruction:\n{h[0]}\n### Response:\n{h[1]}" for h in history])
 
23
  prompt = f"""{system_prompt}
24
  {history_str}
25
 
26
  ### Instruction:
27
  {user_input}
28
 
 
 
 
29
  ### Response:
30
  """
31
  return prompt
32
 
 
33
  def respond(message, history, system_message, max_tokens, temperature, top_p):
34
  formatted_prompt = format_alpaca_prompt(message, system_message, history)
35
 
@@ -40,17 +72,18 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
40
  top_p=top_p,
41
  )
42
 
43
- # Extract only the response
44
  cleaned_response = response.split("### Response:")[-1].strip()
45
 
46
- history.append((message, cleaned_response)) # Update history with the new message and response
47
 
48
- yield cleaned_response # Output only the answer
49
 
 
50
  demo = gr.ChatInterface(
51
  respond,
52
  additional_inputs=[
53
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
54
  gr.Slider(minimum=1, maximum=250, value=128, step=1, label="Max new tokens"),
55
  gr.Slider(minimum=0.1, maximum=4.0, value=0.9, step=0.1, label="Temperature"),
56
  gr.Slider(minimum=0.1, maximum=1.0, value=0.99, step=0.01, label="Top-p (nucleus sampling)"),
@@ -58,4 +91,4 @@ demo = gr.ChatInterface(
58
  )
59
 
60
  if __name__ == "__main__":
61
- demo.launch()
 
1
  import gradio as gr
2
  import os
3
+ import faiss
4
+ import torch
5
+ from huggingface_hub import InferenceClient, hf_hub_download
6
+ from sentence_transformers import SentenceTransformer
7
  import logging
8
 
9
  # Set up logging
10
  logging.basicConfig(level=logging.INFO)
11
 
12
+ # Hugging Face Credentials
13
+ HF_REPO = "Futuresony/future_ai_12_10_2024.gguf" # Your model repo
14
+ HF_FAISS_REPO = "Futuresony/future_ai_12_10_2024.gguf" # Your FAISS repo
15
+ HF_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN') # API token from env
16
 
17
+ # Load FAISS Index
18
+ faiss_index_path = hf_hub_download(
19
+ repo_id=HF_FAISS_REPO,
20
+ filename="asa_faiss.index",
21
+ repo_type="model",
22
+ token=HF_TOKEN
23
+ )
24
+ faiss_index = faiss.read_index(faiss_index_path)
25
+
26
+ # Load Sentence Transformer for embedding queries
27
+ embed_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
28
+
29
+ # Hugging Face Model Client
30
  client = InferenceClient(
31
+ model=HF_REPO,
32
+ token=HF_TOKEN
33
  )
34
 
35
+ # Function to retrieve relevant context from FAISS
36
+ def retrieve_context(query, top_k=3):
37
+ """Retrieve relevant past knowledge using FAISS"""
38
+ query_embedding = embed_model.encode([query], convert_to_tensor=True).cpu().numpy()
39
+ distances, indices = faiss_index.search(query_embedding, top_k)
40
+
41
+ # Convert indices to retrieved text (simulate as FAISS only returns IDs)
42
+ retrieved_context = "\n".join([f"Context {i+1}: Retrieved data for index {idx}" for i, idx in enumerate(indices[0])])
43
+ return retrieved_context
44
+
45
+ # Function to format input in Alpaca style
46
  def format_alpaca_prompt(user_input, system_prompt, history):
47
  """Formats input in Alpaca/LLaMA style"""
48
+ retrieved_context = retrieve_context(user_input) # Retrieve past knowledge
49
  history_str = "\n".join([f"### Instruction:\n{h[0]}\n### Response:\n{h[1]}" for h in history])
50
+
51
  prompt = f"""{system_prompt}
52
  {history_str}
53
 
54
  ### Instruction:
55
  {user_input}
56
 
57
+ ### Retrieved Context:
58
+ {retrieved_context}
59
+
60
  ### Response:
61
  """
62
  return prompt
63
 
64
+ # Chatbot response function
65
  def respond(message, history, system_message, max_tokens, temperature, top_p):
66
  formatted_prompt = format_alpaca_prompt(message, system_message, history)
67
 
 
72
  top_p=top_p,
73
  )
74
 
75
+ # Extract only the response
76
  cleaned_response = response.split("### Response:")[-1].strip()
77
 
78
+ history.append((message, cleaned_response)) # Update chat history
79
 
80
+ yield cleaned_response # Output only the answer
81
 
82
+ # Gradio Chat Interface
83
  demo = gr.ChatInterface(
84
  respond,
85
  additional_inputs=[
86
+ gr.Textbox(value="You are a helpful AI.", label="System message"),
87
  gr.Slider(minimum=1, maximum=250, value=128, step=1, label="Max new tokens"),
88
  gr.Slider(minimum=0.1, maximum=4.0, value=0.9, step=0.1, label="Temperature"),
89
  gr.Slider(minimum=0.1, maximum=1.0, value=0.99, step=0.01, label="Top-p (nucleus sampling)"),
 
91
  )
92
 
93
  if __name__ == "__main__":
94
+ demo.launch()