Update README.md
Browse files
README.md
CHANGED
@@ -131,9 +131,10 @@ sequence_length = inputs["input_ids"].shape[1]
|
|
131 |
new_output_ids = output_ids[:, sequence_length:]
|
132 |
answers = tokenizer.batch_decode(new_output_ids, skip_special_tokens=True)
|
133 |
|
134 |
-
print(answers)
|
135 |
```
|
136 |
|
|
|
137 |
**Safe inference with Azure AI Content Safety**
|
138 |
|
139 |
The usage of [Azure AI Content Safety](https://azure.microsoft.com/en-us/products/ai-services/ai-content-safety/) on top of model prediction is strongly encouraged
|
@@ -184,29 +185,18 @@ def should_filter_out(input_text, threshold=4):
|
|
184 |
|
185 |
return max_score >= threshold
|
186 |
|
187 |
-
def run_inference(model_path, inputs):
|
188 |
-
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
189 |
-
model = transformers.AutoModelForCausalLM.from_pretrained(model_path)
|
190 |
-
model.to(device)
|
191 |
-
|
192 |
-
tokenizer = transformers.AutoTokenizer.from_pretrained(
|
193 |
-
model_path,
|
194 |
-
model_max_length=4096,
|
195 |
-
padding_side="right",
|
196 |
-
use_fast=False,
|
197 |
-
add_special_tokens=False,
|
198 |
-
)
|
199 |
-
inputs = tokenizer(inputs, return_tensors='pt')
|
200 |
-
inputs = inputs.to(device)
|
201 |
-
|
202 |
-
output_ids = model.generate(inputs["input_ids"], max_length=4096, do_sample=False, temperature=0.0, use_cache=True)
|
203 |
-
sequence_length = inputs["input_ids"].shape[1]
|
204 |
-
new_output_ids = output_ids[:, sequence_length:]
|
205 |
-
answers = tokenizer.batch_decode(new_output_ids, skip_special_tokens=True)
|
206 |
-
|
207 |
-
return answers
|
208 |
-
|
209 |
model_path = 'microsoft/Orca-2-7b'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
210 |
|
211 |
system_message = "You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."
|
212 |
user_message = "\" \n :You can't just say, \"\"that's crap\"\" and remove it without gaining a consensus. You already know this, based on your block history. —/ \" \nIs the comment obscene? \nOptions : Yes, No."
|
@@ -214,7 +204,13 @@ user_message = "\" \n :You can't just say, \"\"that's crap\"\" and remove it wit
|
|
214 |
# We use Chat Markup Language https://github.com/MicrosoftDocs/azure-docs/blob/main/articles/ai-services/openai/includes/chat-markup-language.md#working-with-chat-markup-language-chatml
|
215 |
prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
|
216 |
|
217 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
218 |
final_output = answers[0] if not should_filter_out(answers[0]) else "[Content Filtered]"
|
219 |
|
220 |
print(final_output)
|
|
|
131 |
new_output_ids = output_ids[:, sequence_length:]
|
132 |
answers = tokenizer.batch_decode(new_output_ids, skip_special_tokens=True)
|
133 |
|
134 |
+
print(answers[0])
|
135 |
```
|
136 |
|
137 |
+
|
138 |
**Safe inference with Azure AI Content Safety**
|
139 |
|
140 |
The usage of [Azure AI Content Safety](https://azure.microsoft.com/en-us/products/ai-services/ai-content-safety/) on top of model prediction is strongly encouraged
|
|
|
185 |
|
186 |
return max_score >= threshold
|
187 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
model_path = 'microsoft/Orca-2-7b'
|
189 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
190 |
+
model = transformers.AutoModelForCausalLM.from_pretrained(model_path)
|
191 |
+
model.to(device)
|
192 |
+
|
193 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(
|
194 |
+
model_path,
|
195 |
+
model_max_length=4096,
|
196 |
+
padding_side="right",
|
197 |
+
use_fast=False,
|
198 |
+
add_special_tokens=False,
|
199 |
+
)
|
200 |
|
201 |
system_message = "You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."
|
202 |
user_message = "\" \n :You can't just say, \"\"that's crap\"\" and remove it without gaining a consensus. You already know this, based on your block history. —/ \" \nIs the comment obscene? \nOptions : Yes, No."
|
|
|
204 |
# We use Chat Markup Language https://github.com/MicrosoftDocs/azure-docs/blob/main/articles/ai-services/openai/includes/chat-markup-language.md#working-with-chat-markup-language-chatml
|
205 |
prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
|
206 |
|
207 |
+
inputs = tokenizer(prompt, return_tensors='pt')
|
208 |
+
inputs = inputs.to(device)
|
209 |
+
|
210 |
+
output_ids = model.generate(inputs["input_ids"], max_length=4096, do_sample=False, temperature=0.0, use_cache=True)
|
211 |
+
sequence_length = inputs["input_ids"].shape[1]
|
212 |
+
new_output_ids = output_ids[:, sequence_length:]
|
213 |
+
answers = tokenizer.batch_decode(new_output_ids, skip_special_tokens=True)
|
214 |
final_output = answers[0] if not should_filter_out(answers[0]) else "[Content Filtered]"
|
215 |
|
216 |
print(final_output)
|