File size: 1,758 Bytes
4d62681 309674c 4d62681 309674c 4d62681 309674c 4d62681 309674c 4d62681 309674c c6dea39 309674c 4d62681 309674c 4d62681 309674c 4d62681 309674c 4d62681 309674c 4d62681 309674c 4d62681 309674c 4d62681 309674c 4d62681 309674c 4d62681 309674c 4d62681 2aacf31 4d62681 309674c 2aacf31 4d62681 4b71d3f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
---
library_name: transformers
tags:
- art
datasets:
- gokaygokay/prompt_description_stable_diffusion_3k
language:
- en
pipeline_tag: text2text-generation
---
# Model Card
Fine tuned EleutherAI/pythia-410m using gokaygokay/prompt_description_stable_diffusion_3k dataset.
### Direct Use
```
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "gokaygokay/phytia410m_desctoprompt"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Your description
test_description = """
View to a rustic terrace filled with pots with autumn flowers and a vine full of red leaves and bunches of grapes.
in the foreground a wooden table with a copious breakfast, coffee, bowls, vases and plates with fruits, nuts, chestnuts, hazelnuts, breads and buns.
"""
prompt_template = """### Description:
{description}
### Prompt:
"""
text = prompt_template.format(description=test_description)
def inference(text, model, tokenizer, max_input_tokens=1000, max_output_tokens=200):
# Tokenize
input_ids = tokenizer.encode(
text,
return_tensors="pt",
truncation=True,
max_length=max_input_tokens
)
# Generate
device = model.device
generated_tokens_with_prompt = model.generate(
input_ids=input_ids.to(device),
max_length=max_output_tokens,
)
# Decode
generated_text_with_prompt = tokenizer.batch_decode(generated_tokens_with_prompt, skip_special_tokens=True)
# Strip the prompt
generated_text_answer = generated_text_with_prompt[0][len(text):]
return generated_text_answer
print("Description input (test):", text)
print("Finetuned model's prompt: ")
print(inference(text, model, tokenizer))
``` |