Spaces:
Sleeping
Sleeping
Adapting space to philosophy model
Browse files
app.py
CHANGED
@@ -7,28 +7,24 @@ import spaces
|
|
7 |
import torch
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
9 |
|
10 |
-
MAX_MAX_NEW_TOKENS =
|
11 |
-
DEFAULT_MAX_NEW_TOKENS =
|
12 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
13 |
|
14 |
DESCRIPTION = """\
|
15 |
-
# Llama-
|
16 |
|
17 |
-
This Space
|
18 |
-
|
19 |
-
🔎 For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2).
|
20 |
-
|
21 |
-
🔨 Looking for an even more powerful model? Check out the large [**70B** model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
|
22 |
-
🐇 For a smaller model that you can run on many GPUs, check our [7B model demo](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat).
|
23 |
|
|
|
24 |
"""
|
25 |
|
26 |
LICENSE = """
|
27 |
<p/>
|
28 |
|
29 |
---
|
30 |
-
As a derivate work of [Llama-
|
31 |
-
this demo is governed by the original [license](https://huggingface.co/
|
32 |
"""
|
33 |
|
34 |
if not torch.cuda.is_available():
|
@@ -36,7 +32,7 @@ if not torch.cuda.is_available():
|
|
36 |
|
37 |
|
38 |
if torch.cuda.is_available():
|
39 |
-
model_id = "
|
40 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
|
41 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
42 |
tokenizer.use_default_system_prompt = False
|
@@ -90,7 +86,9 @@ def generate(
|
|
90 |
chat_interface = gr.ChatInterface(
|
91 |
fn=generate,
|
92 |
additional_inputs=[
|
93 |
-
gr.Textbox(label="System prompt", lines=6
|
|
|
|
|
94 |
gr.Slider(
|
95 |
label="Max new tokens",
|
96 |
minimum=1,
|
@@ -129,11 +127,11 @@ chat_interface = gr.ChatInterface(
|
|
129 |
],
|
130 |
stop_btn=None,
|
131 |
examples=[
|
132 |
-
["
|
133 |
-
["Can you explain briefly to me
|
134 |
-
["Explain the
|
135 |
-
["
|
136 |
-
["
|
137 |
],
|
138 |
)
|
139 |
|
|
|
7 |
import torch
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
9 |
|
10 |
+
MAX_MAX_NEW_TOKENS = 4096
|
11 |
+
DEFAULT_MAX_NEW_TOKENS = 2048
|
12 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
13 |
|
14 |
DESCRIPTION = """\
|
15 |
+
# Llama-3 8B Stanford Encyclopedia of Philosophy QA
|
16 |
|
17 |
+
This Space showcases the Ruggsea/Llama3-stanford-encyclopedia-philosophy-QA model, a fine-tuned version of the Meta-Llama-3-8B-Instruct model, specifically tailored for answering philosophical inquiries with a formal and informative tone. The model was meticulously trained using the Stanford Encyclopedia of Philosophy-instruct dataset and a carefully crafted system prompt, emulating the expertise of a university professor in philosophy.
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
+
Feel free to interact with the model by asking philosophical questions and exploring its informative responses.
|
20 |
"""
|
21 |
|
22 |
LICENSE = """
|
23 |
<p/>
|
24 |
|
25 |
---
|
26 |
+
As a derivate work of [Llama-3-8b-instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) by Meta,
|
27 |
+
this demo is governed by the original [license](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct/blob/main/LICENSE) and [acceptable use policy](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct/blob/main/USE_POLICY.md).
|
28 |
"""
|
29 |
|
30 |
if not torch.cuda.is_available():
|
|
|
32 |
|
33 |
|
34 |
if torch.cuda.is_available():
|
35 |
+
model_id = "ruggsea/Llama3-stanford-encyclopedia-philosophy-QA"
|
36 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
|
37 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
38 |
tokenizer.use_default_system_prompt = False
|
|
|
86 |
chat_interface = gr.ChatInterface(
|
87 |
fn=generate,
|
88 |
additional_inputs=[
|
89 |
+
gr.Textbox(label="System prompt", lines=6,
|
90 |
+
value="You are an expert and informative yet accessible Philosophy university professor. Students will pose you philosophical questions, answer them in a correct and rigorous but not to obscure way."
|
91 |
+
),
|
92 |
gr.Slider(
|
93 |
label="Max new tokens",
|
94 |
minimum=1,
|
|
|
127 |
],
|
128 |
stop_btn=None,
|
129 |
examples=[
|
130 |
+
["What is a monad?"],
|
131 |
+
["Can you explain briefly to me the difference between left and right hegelians?"],
|
132 |
+
["Explain the Computational theory of mind"],
|
133 |
+
["What is a justified true belief?"],
|
134 |
+
["How does Wittgenstein define a 'language game'?"],
|
135 |
],
|
136 |
)
|
137 |
|