cdancy commited on
Commit
30ac5c2
Β·
1 Parent(s): b3db947

towards transferring over work from notebook

Browse files
Files changed (2) hide show
  1. app.py +15 -4
  2. requirements.txt +7 -1
app.py CHANGED
@@ -5,7 +5,18 @@ from typing import Iterator
5
  import gradio as gr
6
  import spaces
7
  import torch
8
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  MAX_MAX_NEW_TOKENS = 2048
11
  DEFAULT_MAX_NEW_TOKENS = 512
@@ -14,9 +25,9 @@ DEFAULT_SYS_PROMPT = """\
14
  """
15
 
16
  DESCRIPTION = """\
17
- # Test Chat Information System for MEPO Summer Bridge 2024 courtesy of Dr. Dancy & THiCC Lab
18
 
19
- Duplicated then modified from [llama-2 7B example](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat)
20
  """
21
 
22
  LICENSE = """
@@ -38,7 +49,7 @@ if torch.cuda.is_available():
38
  tokenizer.use_default_system_prompt = False
39
 
40
 
41
- @spaces.GPU(duration=120)
42
  def generate(
43
  message: str,
44
  chat_history: list[tuple[str, str]],
 
5
  import gradio as gr
6
  import spaces
7
  import torch
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, TextStreamer
9
+ from llama_index.core.prompts.prompts import SimpleInputPrompt
10
+ from llama_index.llms.huggingface import HuggingFaceLLM
11
+ from llama_index.legacy.embeddings.langchain import LangchainEmbedding
12
+ from langchain.embeddings.huggingface import HuggingFaceEmbeddings # This import should now work
13
+ from sentence_transformers import SentenceTransformer
14
+
15
+ from llama_index.core import set_global_service_context, ServiceContext
16
+
17
+ from llama_index.core import VectorStoreIndex, download_loader, Document # Import Document
18
+ from pathlib import Path
19
+ import fitz # PyMuPDF
20
 
21
  MAX_MAX_NEW_TOKENS = 2048
22
  DEFAULT_MAX_NEW_TOKENS = 512
 
25
  """
26
 
27
  DESCRIPTION = """\
28
+ # Test Chat Information System for MEPO 2024 courtesy of Dr. Dancy & THiCC Lab
29
 
30
+ Duplicated, then modified from [llama-2 7B example](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat)
31
  """
32
 
33
  LICENSE = """
 
49
  tokenizer.use_default_system_prompt = False
50
 
51
 
52
+ @spaces.GPU(duration=240)
53
  def generate(
54
  message: str,
55
  chat_history: list[tuple[str, str]],
requirements.txt CHANGED
@@ -5,4 +5,10 @@ scipy==1.13.0
5
  sentencepiece==0.2.0
6
  spaces==0.28.3
7
  torch==2.0.0
8
- transformers==4.41.0
 
 
 
 
 
 
 
5
  sentencepiece==0.2.0
6
  spaces==0.28.3
7
  torch==2.0.0
8
+ transformers==4.41.0
9
+ #einops
10
+ sentence-transformers
11
+ PyPDF2
12
+ PyMuPDF
13
+ langchain
14
+ langchain-community