Blood076 commited on
Commit
bbcc394
1 Parent(s): 2fcf107

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -152
app.py CHANGED
@@ -1,155 +1,47 @@
1
- import gradio as gr
2
- import numpy as np
3
- import random
4
-
5
-
6
- # import spaces #[uncomment to use ZeroGPU]
7
- from diffusers import DiffusionPipeline
8
- import torch
9
-
10
- device = "cuda" if torch.cuda.is_available() else "cpu"
11
- model_repo_id = "models/prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA" # Replace to the model you would like to use
12
-
13
- if torch.cuda.is_available():
14
- torch_dtype = torch.float16
15
- else:
16
- torch_dtype = torch.float32
17
-
18
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
19
- pipe = pipe.to(device)
20
-
21
- MAX_SEED = np.iinfo(np.int32).max
22
- MAX_IMAGE_SIZE = 1024
23
-
24
-
25
- # @spaces.GPU #[uncomment to use ZeroGPU]
26
- def infer(
27
- prompt,
28
- negative_prompt,
29
- seed,
30
- randomize_seed,
31
- width,
32
- height,
33
- guidance_scale,
34
- num_inference_steps,
35
- progress=gr.Progress(track_tqdm=True),
36
- ):
37
- if randomize_seed:
38
- seed = random.randint(0, MAX_SEED)
39
-
40
- generator = torch.Generator().manual_seed(seed)
41
-
42
- image = pipe(
43
- prompt=prompt,
44
- negative_prompt=negative_prompt,
45
- guidance_scale=guidance_scale,
46
- num_inference_steps=num_inference_steps,
47
- width=width,
48
- height=height,
49
- generator=generator,
50
- ).images[0]
51
-
52
- return image, seed
53
-
54
-
55
- examples = [
56
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
57
- "An astronaut riding a green horse",
58
- "A delicious ceviche cheesecake slice",
59
- ]
60
-
61
- css = """
62
- #col-container {
63
- margin: 0 auto;
64
- max-width: 640px;
65
  }
66
- """
67
-
68
- with gr.Blocks(css=css) as demo:
69
- with gr.Column(elem_id="col-container"):
70
- gr.Markdown(" # Text-to-Image Gradio Template")
71
-
72
- with gr.Row():
73
- prompt = gr.Text(
74
- label="Prompt",
75
- show_label=False,
76
- max_lines=1,
77
- placeholder="Enter your prompt",
78
- container=False,
79
- )
80
-
81
- run_button = gr.Button("Run", scale=0, variant="primary")
82
-
83
- result = gr.Image(label="Result", show_label=False)
84
-
85
- with gr.Accordion("Advanced Settings", open=False):
86
- negative_prompt = gr.Text(
87
- label="Negative prompt",
88
- max_lines=1,
89
- placeholder="Enter a negative prompt",
90
- visible=False,
91
- )
92
-
93
- seed = gr.Slider(
94
- label="Seed",
95
- minimum=0,
96
- maximum=MAX_SEED,
97
- step=1,
98
- value=0,
99
- )
100
 
101
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
102
-
103
- with gr.Row():
104
- width = gr.Slider(
105
- label="Width",
106
- minimum=256,
107
- maximum=MAX_IMAGE_SIZE,
108
- step=32,
109
- value=1024, # Replace with defaults that work for your model
110
- )
111
-
112
- height = gr.Slider(
113
- label="Height",
114
- minimum=256,
115
- maximum=MAX_IMAGE_SIZE,
116
- step=32,
117
- value=1024, # Replace with defaults that work for your model
118
- )
119
-
120
- with gr.Row():
121
- guidance_scale = gr.Slider(
122
- label="Guidance scale",
123
- minimum=0.0,
124
- maximum=10.0,
125
- step=0.1,
126
- value=0.0, # Replace with defaults that work for your model
127
- )
128
-
129
- num_inference_steps = gr.Slider(
130
- label="Number of inference steps",
131
- minimum=1,
132
- maximum=50,
133
- step=1,
134
- value=1, # Replace with defaults that work for your model
135
- )
136
-
137
- gr.Examples(examples=examples, inputs=[prompt])
138
- gr.on(
139
- triggers=[run_button.click, prompt.submit],
140
- fn=infer,
141
- inputs=[
142
- prompt,
143
- negative_prompt,
144
- seed,
145
- randomize_seed,
146
- width,
147
- height,
148
- guidance_scale,
149
- num_inference_steps,
150
- ],
151
- outputs=[result, seed],
152
- )
153
 
154
- if __name__ == "__main__":
155
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sagemaker
2
+ import boto3
3
+ from sagemaker.huggingface import HuggingFace
4
+ from datasets import load_dataset
5
+
6
+ # Obtém o papel do SageMaker ou cria um caso ele não exista
7
+ try:
8
+ role = sagemaker.get_execution_role()
9
+ except ValueError:
10
+ iam = boto3.client('iam')
11
+ role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']
12
+
13
+ # Carrega o dataset
14
+ dataset = load_dataset("practical-dreamer/RPGPT_PublicDomain-ShareGPT")
15
+
16
+ # Configuração dos hiperparâmetros
17
+ hyperparameters = {
18
+ 'model_name_or_path': 'unsloth/Llama-3.2-11B-Vision-Instruct',
19
+ 'dataset_name': 'practical-dreamer/RPGPT_PublicDomain-ShareGPT',
20
+ 'output_dir': '/opt/ml/model',
21
+ 'learning_rate': 5e-5,
22
+ 'per_device_train_batch_size': 4,
23
+ 'num_train_epochs': 3,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ # Configuração do repositório Git para download do script de treinamento
27
+ git_config = {
28
+ 'repo': 'https://github.com/huggingface/transformers.git',
29
+ 'branch': 'v4.37.0'
30
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ # Configura o Estimador do Hugging Face
33
+ huggingface_estimator = HuggingFace(
34
+ entry_point='train.py',
35
+ source_dir='./path/to/script', # atualize para o caminho correto do script
36
+ instance_type='ml.p3.2xlarge',
37
+ instance_count=1,
38
+ role=role,
39
+ git_config=git_config,
40
+ transformers_version='4.37.0',
41
+ pytorch_version='2.1.0',
42
+ py_version='py310',
43
+ hyperparameters=hyperparameters
44
+ )
45
+
46
+ # Inicia o trabalho de treinamento
47
+ huggingface_estimator.fit()