gufett0 commited on
Commit
708da42
·
1 Parent(s): 2d75926

moved splitter out

Browse files
Files changed (2) hide show
  1. backend.py +5 -9
  2. interface.py +1 -2
backend.py CHANGED
@@ -52,25 +52,21 @@ Settings.llm = GemmaLLMInterface(model=model, tokenizer=tokenizer)
52
 
53
  ############################---------------------------------
54
 
 
 
 
 
55
 
56
 
57
 
58
  @spaces.GPU(duration=120)
59
  def handle_query(query_str, chathistory):
60
 
61
- # Reading documents from disk
62
  documents = SimpleDirectoryReader(input_files=["data/blockchainprova.txt"]).load_data()
63
-
64
- # Splitting the document into chunks
65
- parser = SentenceSplitter.from_defaults(
66
- chunk_size=256, chunk_overlap=64, paragraph_separator="\n\n"
67
- )
68
  nodes = parser.get_nodes_from_documents(documents)
69
-
70
- # BUILD A VECTOR STORE
71
  index = VectorStoreIndex(nodes)
72
 
73
-
74
  qa_prompt_str = (
75
  "Context information is below.\n"
76
  "---------------------\n"
 
52
 
53
  ############################---------------------------------
54
 
55
+ # Get the parser
56
+ parser = SentenceSplitter.from_defaults(
57
+ chunk_size=256, chunk_overlap=64, paragraph_separator="\n\n"
58
+ )
59
 
60
 
61
 
62
  @spaces.GPU(duration=120)
63
  def handle_query(query_str, chathistory):
64
 
65
+ # build the vector
66
  documents = SimpleDirectoryReader(input_files=["data/blockchainprova.txt"]).load_data()
 
 
 
 
 
67
  nodes = parser.get_nodes_from_documents(documents)
 
 
68
  index = VectorStoreIndex(nodes)
69
 
 
70
  qa_prompt_str = (
71
  "Context information is below.\n"
72
  "---------------------\n"
interface.py CHANGED
@@ -5,11 +5,10 @@ from typing import Any
5
  import torch
6
  from transformers import TextIteratorStreamer
7
  from threading import Thread
8
- import spaces
9
 
10
 
11
 
12
- @spaces.GPU(duration=120)
13
  class GemmaLLMInterface(CustomLLM):
14
  model: Any
15
  tokenizer: Any
 
5
  import torch
6
  from transformers import TextIteratorStreamer
7
  from threading import Thread
 
8
 
9
 
10
 
11
+
12
  class GemmaLLMInterface(CustomLLM):
13
  model: Any
14
  tokenizer: Any