#This prompt is from message 2. #The goal is to generate 100 messages per prompt.

prompt2 = "Vaping is risky"

#Below, we specify to use pytorch machine learning framework. #You can also choose Tensorflow, but we use Pytorch here.

inputs = tokenizer(prompt2, return_tensors="pt")

#We generate 50 messages each time due to restrictions in Ram storage.

sample_outputs = bloom.generate(inputs["input_ids"], temperature = 0.7, max_new_tokens = 60, do_sample=True, top_k=40, top_p=0.9, num_return_sequences=50 )

print("Output:\n" + 100 * '-') messages = [] for i, sample_output in enumerate(sample_outputs): generated_messages = tokenizer.decode(sample_output, skip_special_tokens=True) print("{}: {}".format(i, generated_messages)) messages.append(generated_messages)

print(messages)

#We save the AI-generated messages to google drive.

AI_messages = pd.DataFrame(messages, columns = ['tweet']) AI_messages.to_csv('Vaping is risky1.csv', index = False)

#Then generate another 50 messages with prompt1 and then save to google drive.

AI_messages = pd.DataFrame(messages, columns = ['tweet']) AI_messages.to_csv('Vaping is risky2.csv', index = False)

#This prompt is from message 3. #The goal is to generate 100 messages per prompt.

prompt3 = "Vapes and e-cigarettes increase your risk"

#Below, we specify to use pytorch machine learning framework. #You can also choose Tensorflow, but we use Pytorch here.

inputs = tokenizer(prompt3, return_tensors="pt")

#We generate 50 messages each time due to restrictions in Ram storage.

sample_outputs = bloom.generate(inputs["input_ids"], temperature = 0.7, max_new_tokens = 60, do_sample=True, top_k=40, top_p=0.9, num_return_sequences=50 )

print("Output:\n" + 100 * '-') messages = [] for i, sample_output in enumerate(sample_outputs): generated_messages = tokenizer.decode(sample_output, skip_special_tokens=True) print("{}: {}".format(i, generated_messages)) messages.append(generated_messages)

print(messages)

#We save the AI-generated messages to google drive.

AI_messages = pd.DataFrame(messages, columns = ['tweet']) AI_messages.to_csv('Vapes and e-cigarettes increase your risk1.csv', index = False)

Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model is not currently available via any of the supported Inference Providers.
The model cannot be deployed to the HF Inference API: The model has no library tag.

Model tree for jackdoczy/Generating_Messages_Assignment

Finetuned
(330)
this model

Dataset used to train jackdoczy/Generating_Messages_Assignment