asif00 commited on
Commit
736e7a3
1 Parent(s): 5d643f3

Update: minor changes

Browse files
Files changed (2) hide show
  1. app.py +9 -4
  2. src/brain.py +1 -0
app.py CHANGED
@@ -1,13 +1,16 @@
 
1
  import gradio as gr
2
  from src.brain import generate_answers
3
  from huggingface_hub import login
4
- import os
5
  from dotenv import load_dotenv
6
- load_dotenv()
7
- HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
8
- login(token=HUGGINGFACE_TOKEN)
 
9
 
10
  processing = False
 
 
11
  def response(query, history):
12
  global processing
13
  processing = True
@@ -26,9 +29,11 @@ with open("src/content.html", "r") as file:
26
  title_html = parts[0]
27
  bts_html = parts[1] if len(parts) > 1 else ""
28
 
 
29
  def loading():
30
  return "Loading ..."
31
 
 
32
  with gr.Blocks(css=css) as app:
33
  with gr.Column(elem_id="column_container"):
34
  gr.HTML(title_html)
 
1
+ import os
2
  import gradio as gr
3
  from src.brain import generate_answers
4
  from huggingface_hub import login
 
5
  from dotenv import load_dotenv
6
+
7
+ load_dotenv()
8
+ token = os.environ.get("TOKEN")
9
+ login(token=token)
10
 
11
  processing = False
12
+
13
+
14
  def response(query, history):
15
  global processing
16
  processing = True
 
29
  title_html = parts[0]
30
  bts_html = parts[1] if len(parts) > 1 else ""
31
 
32
+
33
  def loading():
34
  return "Loading ..."
35
 
36
+
37
  with gr.Blocks(css=css) as app:
38
  with gr.Column(elem_id="column_container"):
39
  gr.HTML(title_html)
src/brain.py CHANGED
@@ -4,6 +4,7 @@ model_name = "google/gemma-2b"
4
  tokenizer = AutoTokenizer.from_pretrained(model_name)
5
  model = AutoModelForCausalLM.from_pretrained(model_name)
6
 
 
7
  def generate_answers(query):
8
  input_ids = tokenizer(query, return_tensors="pt")
9
  output = model.generate(**input_ids)
 
4
  tokenizer = AutoTokenizer.from_pretrained(model_name)
5
  model = AutoModelForCausalLM.from_pretrained(model_name)
6
 
7
+
8
  def generate_answers(query):
9
  input_ids = tokenizer(query, return_tensors="pt")
10
  output = model.generate(**input_ids)