peder
commited on
Commit
·
9bc4391
1
Parent(s):
b08d994
cache 2
Browse files
app.py
CHANGED
@@ -107,7 +107,7 @@ class TextGeneration:
|
|
107 |
self.model_name_or_path = MODEL_NAME
|
108 |
set_seed(42)
|
109 |
|
110 |
-
|
111 |
def load(self):
|
112 |
print("Loading model... ", end="")
|
113 |
self.tokenizer = AutoTokenizer.from_pretrained(
|
@@ -169,7 +169,7 @@ def generate_prompt(instruction, input=None):
|
|
169 |
|
170 |
# @st.cache(allow_output_mutation=True, hash_funcs={AutoModelForCausalLM: lambda _: None})
|
171 |
# @st.cache(allow_output_mutation=True, hash_funcs={TextGeneration: lambda _: None})
|
172 |
-
|
173 |
def load_text_generator():
|
174 |
generator = TextGeneration()
|
175 |
generator.load()
|
|
|
107 |
self.model_name_or_path = MODEL_NAME
|
108 |
set_seed(42)
|
109 |
|
110 |
+
#@st.cache_resource
|
111 |
def load(self):
|
112 |
print("Loading model... ", end="")
|
113 |
self.tokenizer = AutoTokenizer.from_pretrained(
|
|
|
169 |
|
170 |
# @st.cache(allow_output_mutation=True, hash_funcs={AutoModelForCausalLM: lambda _: None})
|
171 |
# @st.cache(allow_output_mutation=True, hash_funcs={TextGeneration: lambda _: None})
|
172 |
+
@st.cache_resource
|
173 |
def load_text_generator():
|
174 |
generator = TextGeneration()
|
175 |
generator.load()
|