Spaces:
Running
on
L4
Running
on
L4
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
|
3 |
import gradio as gr
|
4 |
import os
|
5 |
-
|
6 |
import torch
|
7 |
import torch.nn.functional as F
|
8 |
from peft import PeftConfig, PeftModel
|
@@ -27,7 +27,7 @@ model.merge_and_unload()
|
|
27 |
model = model.to(device)
|
28 |
|
29 |
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
30 |
-
|
31 |
PROMPT = """
|
32 |
INSTRUCTIONS
|
33 |
============
|
@@ -94,7 +94,6 @@ DEFAULT_CONTENT = "Put your content sample here."
|
|
94 |
|
95 |
# Function to make predictions
|
96 |
def predict(content, policy):
|
97 |
-
return "TEST"
|
98 |
input_text = PROMPT.format(policy=policy, content=content)
|
99 |
input_ids = tokenizer.encode(input_text, return_tensors="pt")
|
100 |
|
@@ -122,7 +121,7 @@ def predict(content, policy):
|
|
122 |
# Create the interface
|
123 |
|
124 |
with gr.Blocks() as demo:
|
125 |
-
gr.Markdown("#
|
126 |
|
127 |
with gr.Row():
|
128 |
# Left column with inputs
|
@@ -135,21 +134,21 @@ with gr.Blocks() as demo:
|
|
135 |
output = gr.Textbox(label="Result")
|
136 |
submit_btn = gr.Button("Submit")
|
137 |
notes = gr.Markdown("""
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
""")
|
154 |
|
155 |
# Button below inputs
|
|
|
2 |
|
3 |
import gradio as gr
|
4 |
import os
|
5 |
+
|
6 |
import torch
|
7 |
import torch.nn.functional as F
|
8 |
from peft import PeftConfig, PeftModel
|
|
|
27 |
model = model.to(device)
|
28 |
|
29 |
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
30 |
+
|
31 |
PROMPT = """
|
32 |
INSTRUCTIONS
|
33 |
============
|
|
|
94 |
|
95 |
# Function to make predictions
|
96 |
def predict(content, policy):
|
|
|
97 |
input_text = PROMPT.format(policy=policy, content=content)
|
98 |
input_ids = tokenizer.encode(input_text, return_tensors="pt")
|
99 |
|
|
|
121 |
# Create the interface
|
122 |
|
123 |
with gr.Blocks() as demo:
|
124 |
+
gr.Markdown("# Zentropi CoPE Demo")
|
125 |
|
126 |
with gr.Row():
|
127 |
# Left column with inputs
|
|
|
134 |
output = gr.Textbox(label="Result")
|
135 |
submit_btn = gr.Button("Submit")
|
136 |
notes = gr.Markdown("""
|
137 |
+
## About CoPE
|
138 |
+
|
139 |
+
CoPE (the COntent Policy Evaluation engine) is a small language model capable of accurate content policy labeling. This is a **demo* of our initial release and should **NOT** be used for any production use cases.
|
140 |
+
|
141 |
+
## How to Use
|
142 |
+
|
143 |
+
1. Enter your content in the "Content" box.
|
144 |
+
2. Specify your policy in the "Policy" box.
|
145 |
+
3. Click "Submit" to see the results.
|
146 |
+
|
147 |
+
## More Info
|
148 |
+
|
149 |
+
- [Give us feedback](https://forms.gle/BHpt6BpH2utaf4ez9) to help us improve
|
150 |
+
- [Read our FAQ](https://docs.google.com/document/d/1Cp3GJ5k2I-xWZ4GK9WI7Xv8TpKdHmjJ3E9RbzP5Cc_Y/edit) to learn more about CoPE
|
151 |
+
- [Join our mailing list](https://forms.gle/PCABrZdhTuXE9w9ZA) to keep in touch
|
152 |
""")
|
153 |
|
154 |
# Button below inputs
|