Update README.md
Browse files
README.md
CHANGED
@@ -32,6 +32,34 @@ This model is a fine-tuned version of [bigscience/bloom-560m](https://huggingfac
|
|
32 |
It achieves the following results on the evaluation set:
|
33 |
- Loss: 0.8742
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
## Model description
|
36 |
|
37 |
More information needed
|
|
|
32 |
It achieves the following results on the evaluation set:
|
33 |
- Loss: 0.8742
|
34 |
|
35 |
+
|
36 |
+
## Example of usage
|
37 |
+
|
38 |
+
```py
|
39 |
+
import torch
|
40 |
+
from transformers import BloomTokenizerFast, BloomForCausalLM
|
41 |
+
|
42 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
43 |
+
ckpt = 'mrm8488/bloom-560m-finetuned-sd-prompts'
|
44 |
+
|
45 |
+
tokenizer = BloomTokenizerFast.from_pretrained(ckpt)
|
46 |
+
model = BloomForCausalLM.from_pretrained(ckpt).to(device)
|
47 |
+
|
48 |
+
def generate_prompt(text):
|
49 |
+
inputs = tokenizer(text, return_tensors='pt')
|
50 |
+
input_ids = inputs.input_ids.to("cuda")
|
51 |
+
attention_mask = inputs.attention_mask.to("cuda")
|
52 |
+
output = model.generate(input_ids, attention_mask=attention_mask, max_length=2048, eos_token_id=tokenizer.eos_token_id) # num_beams=3, temperature=1.9
|
53 |
+
|
54 |
+
return tokenizer.decode(output[0], skip_special_tokens=False)
|
55 |
+
|
56 |
+
text = "<s>Prompt: pikachu dinning in the eiffel tower"
|
57 |
+
|
58 |
+
generate_prompt(text)
|
59 |
+
|
60 |
+
# Output: <s>Prompt: pikachu dinning in the eiffel tower, intricate, elegant, highly detailed, digital painting, artstation, concept art, smooth, sharp focus, illustration, art by artgerm and greg rutkowski and alphonse mucha</s>
|
61 |
+
```
|
62 |
+
|
63 |
## Model description
|
64 |
|
65 |
More information needed
|