ruslanmv commited on
Commit
027d724
1 Parent(s): 8981b0b

Merge branch 'dev'

Browse files
Files changed (9) hide show
  1. .gitignore +2 -0
  2. app.py +136 -135
  3. backup/app.py +146 -0
  4. backup/v1/run.py +187 -0
  5. backup/v2/run.py +161 -0
  6. env.bat +2 -0
  7. female.obj +0 -0
  8. install.bat +29 -0
  9. requirements.txt +6 -1
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+
2
+ /.venv
app.py CHANGED
@@ -1,146 +1,147 @@
1
  import gradio as gr
 
 
2
  import numpy as np
3
- import random
4
- from diffusers import DiffusionPipeline
5
  import torch
 
 
 
6
 
 
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
8
 
9
- if torch.cuda.is_available():
10
- torch.cuda.max_memory_allocated(device=device)
11
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
- pipe.enable_xformers_memory_efficient_attention()
13
- pipe = pipe.to(device)
14
- else:
15
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
16
- pipe = pipe.to(device)
17
-
18
- MAX_SEED = np.iinfo(np.int32).max
19
- MAX_IMAGE_SIZE = 1024
20
-
21
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
22
-
23
- if randomize_seed:
24
- seed = random.randint(0, MAX_SEED)
25
-
26
- generator = torch.Generator().manual_seed(seed)
27
-
28
- image = pipe(
29
- prompt = prompt,
30
- negative_prompt = negative_prompt,
31
- guidance_scale = guidance_scale,
32
- num_inference_steps = num_inference_steps,
33
- width = width,
34
- height = height,
35
- generator = generator
36
- ).images[0]
37
-
38
- return image
39
-
40
- examples = [
41
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
42
- "An astronaut riding a green horse",
43
- "A delicious ceviche cheesecake slice",
44
- ]
45
-
46
- css="""
47
- #col-container {
48
- margin: 0 auto;
49
- max-width: 520px;
50
- }
51
- """
52
-
53
- if torch.cuda.is_available():
54
- power_device = "GPU"
55
- else:
56
- power_device = "CPU"
57
-
58
- with gr.Blocks(css=css) as demo:
59
-
60
- with gr.Column(elem_id="col-container"):
61
- gr.Markdown(f"""
62
- # Text-to-Image Gradio Template
63
- Currently running on {power_device}.
64
- """)
65
-
66
- with gr.Row():
67
-
68
- prompt = gr.Text(
69
- label="Prompt",
70
- show_label=False,
71
- max_lines=1,
72
- placeholder="Enter your prompt",
73
- container=False,
74
- )
75
-
76
- run_button = gr.Button("Run", scale=0)
77
-
78
- result = gr.Image(label="Result", show_label=False)
79
-
80
- with gr.Accordion("Advanced Settings", open=False):
81
-
82
- negative_prompt = gr.Text(
83
- label="Negative prompt",
84
- max_lines=1,
85
- placeholder="Enter a negative prompt",
86
- visible=False,
87
- )
88
-
89
- seed = gr.Slider(
90
- label="Seed",
91
- minimum=0,
92
- maximum=MAX_SEED,
93
- step=1,
94
- value=0,
95
  )
96
-
97
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
98
-
99
- with gr.Row():
100
-
101
- width = gr.Slider(
102
- label="Width",
103
- minimum=256,
104
- maximum=MAX_IMAGE_SIZE,
105
- step=32,
106
- value=512,
107
- )
108
-
109
- height = gr.Slider(
110
- label="Height",
111
- minimum=256,
112
- maximum=MAX_IMAGE_SIZE,
113
- step=32,
114
- value=512,
115
- )
116
-
117
- with gr.Row():
118
-
119
- guidance_scale = gr.Slider(
120
- label="Guidance scale",
121
- minimum=0.0,
122
- maximum=10.0,
123
- step=0.1,
124
- value=0.0,
125
- )
126
-
127
- num_inference_steps = gr.Slider(
128
- label="Number of inference steps",
129
- minimum=1,
130
- maximum=12,
131
- step=1,
132
- value=2,
133
- )
134
-
135
- gr.Examples(
136
- examples = examples,
137
- inputs = [prompt]
138
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
 
140
- run_button.click(
141
- fn = infer,
142
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
143
- outputs = [result]
144
  )
145
 
146
- demo.queue().launch()
 
1
  import gradio as gr
2
+ import plotly.graph_objs as go
3
+ import trimesh
4
  import numpy as np
5
+ from PIL import Image
 
6
  import torch
7
+ from diffusers import StableDiffusionPipeline
8
+ import os
9
+ import matplotlib.pyplot as plt
10
 
11
+ # Load the Stable Diffusion model for text-to-image generation
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
13
+ pipeline = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(device)
14
 
15
+ # Get the current directory
16
+ current_dir = os.getcwd()
17
+ # Default object file path
18
+ DEFAULT_OBJ_FILE = os.path.join(current_dir, "female.obj")
19
+ # Temporary texture file path
20
+ TEMP_TEXTURE_FILE = os.path.join(current_dir, "generated_texture.png")
21
+ # File path to save the 2D image
22
+ OUTPUT_IMAGE_FILE = os.path.join(current_dir, "output_image.png")
23
+ DEFAULT_GLB_FILE= os.path.join(current_dir, "vroid_girl1.glb")
24
+
25
+ def apply_texture(mesh, texture_file):
26
+ texture_image = Image.open(texture_file)
27
+ uv_coords = mesh.visual.uv
28
+ uv_coords = np.clip(uv_coords, 0, 1)
29
+ texture_colors = np.array([
30
+ texture_image.getpixel((
31
+ int(u * (texture_image.width - 1)),
32
+ int(v * (texture_image.height - 1))
33
+ )) for u, v in uv_coords
34
+ ])
35
+ texture_colors = texture_colors / 255.0
36
+ return texture_colors
37
+
38
+ def display_3d_object(obj_file, texture_file, light_intensity, ambient_intensity, color):
39
+ file_extension = obj_file.split('.')[-1].lower()
40
+ if file_extension == 'obj':
41
+ mesh = trimesh.load(obj_file)
42
+ elif file_extension == 'glb':
43
+ mesh = load_glb_file(obj_file)
44
+ else:
45
+ raise ValueError("Unsupported file format. Please upload a .obj or .glb file.")
46
+
47
+ if texture_file:
48
+ colors = apply_texture(mesh, texture_file)
49
+ else:
50
+ colors = color
51
+
52
+ ambient_intensity = max(0, min(ambient_intensity, 1))
53
+ fig = go.Figure(data=[
54
+ go.Mesh3d(
55
+ x=mesh.vertices[:, 0],
56
+ y=mesh.vertices[:, 1],
57
+ z=mesh.vertices[:, 2],
58
+ i=mesh.faces[:, 0],
59
+ j=mesh.faces[:, 1],
60
+ k=mesh.faces[:, 2],
61
+ facecolor=colors if texture_file else None,
62
+ color=color if not texture_file else None,
63
+ opacity=0.50,
64
+ lighting=dict(
65
+ ambient=ambient_intensity,
66
+ diffuse=light_intensity,
67
+ specular=0.5,
68
+ roughness=0.1,
69
+ fresnel=0.2
70
+ ),
71
+ lightposition=dict(
72
+ x=100,
73
+ y=200,
74
+ z=300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  )
77
+ ])
78
+ fig.update_layout(scene=dict(aspectmode='data'))
79
+
80
+ #Cleaning Temp file
81
+ if os.path.exists(TEMP_TEXTURE_FILE):
82
+ os.remove(TEMP_TEXTURE_FILE)
83
+ print(f"Deleted existing file: {TEMP_TEXTURE_FILE}")
84
+ else:
85
+ print(f"File not found: {TEMP_TEXTURE_FILE}")
86
+
87
+ return fig
88
+
89
+ def load_glb_file(filename):
90
+ trimesh_scene = trimesh.load(filename)
91
+ if isinstance(trimesh_scene, trimesh.Scene):
92
+ mesh = trimesh_scene.dump(concatenate=True)
93
+ else:
94
+ mesh = trimesh_scene
95
+ return mesh
96
+
97
+ def generate_clothing_image(prompt):
98
+ image = pipeline(prompt).images[0]
99
+ image.save(TEMP_TEXTURE_FILE)
100
+ return TEMP_TEXTURE_FILE, image
101
+
102
+ def update_texture_display(prompt, texture_file):
103
+ if prompt:
104
+ texture_path, image = generate_clothing_image(prompt)
105
+ return image
106
+ elif texture_file:
107
+ return Image.open(texture_file)
108
+ return None
109
+
110
+ with gr.Blocks() as demo:
111
+ gr.Markdown("## 3D Object Viewer with Custom Texture, Color, and Adjustable Lighting")
112
+
113
+ with gr.Row():
114
+ with gr.Column(scale=1):
115
+ gr.Markdown("### Texture Options")
116
+ prompt_input = gr.Textbox(label="Enter a Prompt to Generate Texture", placeholder="Type a prompt...")
117
+ generate_button = gr.Button("Generate Texture")
118
+ texture_file = gr.File(label="Upload Texture file (PNG or JPG, optional)", type="filepath")
119
+ texture_preview = gr.Image(label="Texture Preview", visible=True)
120
+
121
+ gr.Markdown("### Lighting & Color Settings")
122
+ light_intensity_slider = gr.Slider(minimum=0, maximum=2, step=0.1, value=0.8, label="Light Intensity")
123
+ ambient_intensity_slider = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.5, label="Ambient Intensity")
124
+ color_picker = gr.ColorPicker(value="#D3D3D3", label="Object Color")
125
+ submit_button = gr.Button("Submit")
126
+ obj_file = gr.File(label="Upload OBJ or GLB file", value=DEFAULT_OBJ_FILE, type='filepath')
127
+
128
+ with gr.Column(scale=2):
129
+ display = gr.Plot(label="3D Viewer")
130
+
131
+ def update_display(file, texture, light_intensity, ambient_intensity, color):
132
+ texture_to_use = TEMP_TEXTURE_FILE if os.path.exists(TEMP_TEXTURE_FILE) else texture
133
+ return display_3d_object(file, texture_to_use, light_intensity, ambient_intensity, color)
134
+
135
+ submit_button.click(fn=update_display, inputs=[obj_file, texture_file, light_intensity_slider, ambient_intensity_slider, color_picker], outputs=display)
136
+ generate_button.click(fn=update_texture_display, inputs=[prompt_input, texture_file], outputs=texture_preview)
137
+ texture_file.change(fn=update_texture_display, inputs=[prompt_input, texture_file], outputs=texture_preview)
138
+
139
+ demo.load(fn=update_display, inputs=[obj_file, texture_file, light_intensity_slider, ambient_intensity_slider, color_picker], outputs=display)
140
 
141
+ gr.Examples(
142
+ examples=[[DEFAULT_OBJ_FILE, None],[DEFAULT_GLB_FILE, None]],
143
+ inputs=[obj_file, texture_file],
144
+ label="Example Files"
145
  )
146
 
147
+ demo.launch(debug=True)
backup/app.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import random
4
+ from diffusers import DiffusionPipeline
5
+ import torch
6
+
7
+ device = "cuda" if torch.cuda.is_available() else "cpu"
8
+
9
+ if torch.cuda.is_available():
10
+ torch.cuda.max_memory_allocated(device=device)
11
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
+ pipe.enable_xformers_memory_efficient_attention()
13
+ pipe = pipe.to(device)
14
+ else:
15
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
16
+ pipe = pipe.to(device)
17
+
18
+ MAX_SEED = np.iinfo(np.int32).max
19
+ MAX_IMAGE_SIZE = 1024
20
+
21
+ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
22
+
23
+ if randomize_seed:
24
+ seed = random.randint(0, MAX_SEED)
25
+
26
+ generator = torch.Generator().manual_seed(seed)
27
+
28
+ image = pipe(
29
+ prompt = prompt,
30
+ negative_prompt = negative_prompt,
31
+ guidance_scale = guidance_scale,
32
+ num_inference_steps = num_inference_steps,
33
+ width = width,
34
+ height = height,
35
+ generator = generator
36
+ ).images[0]
37
+
38
+ return image
39
+
40
+ examples = [
41
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
42
+ "An astronaut riding a green horse",
43
+ "A delicious ceviche cheesecake slice",
44
+ ]
45
+
46
+ css="""
47
+ #col-container {
48
+ margin: 0 auto;
49
+ max-width: 520px;
50
+ }
51
+ """
52
+
53
+ if torch.cuda.is_available():
54
+ power_device = "GPU"
55
+ else:
56
+ power_device = "CPU"
57
+
58
+ with gr.Blocks(css=css) as demo:
59
+
60
+ with gr.Column(elem_id="col-container"):
61
+ gr.Markdown(f"""
62
+ # Text-to-Image Gradio Template
63
+ Currently running on {power_device}.
64
+ """)
65
+
66
+ with gr.Row():
67
+
68
+ prompt = gr.Text(
69
+ label="Prompt",
70
+ show_label=False,
71
+ max_lines=1,
72
+ placeholder="Enter your prompt",
73
+ container=False,
74
+ )
75
+
76
+ run_button = gr.Button("Run", scale=0)
77
+
78
+ result = gr.Image(label="Result", show_label=False)
79
+
80
+ with gr.Accordion("Advanced Settings", open=False):
81
+
82
+ negative_prompt = gr.Text(
83
+ label="Negative prompt",
84
+ max_lines=1,
85
+ placeholder="Enter a negative prompt",
86
+ visible=False,
87
+ )
88
+
89
+ seed = gr.Slider(
90
+ label="Seed",
91
+ minimum=0,
92
+ maximum=MAX_SEED,
93
+ step=1,
94
+ value=0,
95
+ )
96
+
97
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
98
+
99
+ with gr.Row():
100
+
101
+ width = gr.Slider(
102
+ label="Width",
103
+ minimum=256,
104
+ maximum=MAX_IMAGE_SIZE,
105
+ step=32,
106
+ value=512,
107
+ )
108
+
109
+ height = gr.Slider(
110
+ label="Height",
111
+ minimum=256,
112
+ maximum=MAX_IMAGE_SIZE,
113
+ step=32,
114
+ value=512,
115
+ )
116
+
117
+ with gr.Row():
118
+
119
+ guidance_scale = gr.Slider(
120
+ label="Guidance scale",
121
+ minimum=0.0,
122
+ maximum=10.0,
123
+ step=0.1,
124
+ value=0.0,
125
+ )
126
+
127
+ num_inference_steps = gr.Slider(
128
+ label="Number of inference steps",
129
+ minimum=1,
130
+ maximum=12,
131
+ step=1,
132
+ value=2,
133
+ )
134
+
135
+ gr.Examples(
136
+ examples = examples,
137
+ inputs = [prompt]
138
+ )
139
+
140
+ run_button.click(
141
+ fn = infer,
142
+ inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
143
+ outputs = [result]
144
+ )
145
+
146
+ demo.queue().launch()
backup/v1/run.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import plotly.graph_objs as go
3
+ import trimesh
4
+ import numpy as np
5
+ from PIL import Image
6
+ import torch
7
+ from diffusers import StableDiffusionPipeline
8
+ import os
9
+ from pythreejs import *
10
+ from IPython.display import display
11
+ # Load the Stable Diffusion model for text-to-image generation
12
+ device = "cuda" if torch.cuda.is_available() else "cpu"
13
+ pipeline = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(device)
14
+ import os
15
+ # Get the current directory
16
+ current_dir = os.getcwd()
17
+ # Default object file path (replaced with current directory)
18
+ DEFAULT_OBJ_FILE = os.path.join(current_dir, "female.obj")
19
+ # Temporary texture file path (replaced with current directory)
20
+ TEMP_TEXTURE_FILE = os.path.join(current_dir, "generated_texture.png")
21
+ # File path to save the 2D image (replaced with current directory)
22
+ OUTPUT_IMAGE_FILE = os.path.join(current_dir, "output_image.png")
23
+ DEFAULT_GLB_FILE= os.path.join(current_dir, "vroid_girl1.glb")
24
+
25
+ def apply_texture(mesh, texture_file):
26
+ # Load the texture image
27
+ texture_image = Image.open(texture_file)
28
+
29
+ # Get texture coordinates and faces from the mesh
30
+ uv_coords = mesh.visual.uv
31
+
32
+ # Ensure UV coordinates are within the valid range
33
+ uv_coords = np.clip(uv_coords, 0, 1)
34
+
35
+ # Extract texture colors for each vertex
36
+ texture_colors = np.array([
37
+ texture_image.getpixel((
38
+ int(u * (texture_image.width - 1)),
39
+ int(v * (texture_image.height - 1))
40
+ )) for u, v in uv_coords
41
+ ])
42
+
43
+ # Normalize colors to be between 0 and 1
44
+ texture_colors = texture_colors / 255.0
45
+
46
+ return texture_colors
47
+
48
+ def display_3d_object(obj_file, texture_file, light_intensity, ambient_intensity, color):
49
+ # Determine the file type and load accordingly
50
+ file_extension = obj_file.split('.')[-1].lower()
51
+
52
+ if (file_extension == 'obj'):
53
+ mesh = trimesh.load(obj_file)
54
+ elif file_extension == 'glb':
55
+ mesh = load_glb_file(obj_file)
56
+ else:
57
+ raise ValueError("Unsupported file format. Please upload a .obj or .glb file.")
58
+
59
+ # Apply texture if available
60
+ if texture_file:
61
+ colors = apply_texture(mesh, texture_file)
62
+ else:
63
+ colors = color
64
+
65
+ # Clamp the ambient_intensity to be between 0 and 1
66
+ ambient_intensity = max(0, min(ambient_intensity, 1))
67
+
68
+ # Create a 3D plot using Plotly
69
+ fig = go.Figure(data=[
70
+ go.Mesh3d(
71
+ x=mesh.vertices[:, 0],
72
+ y=mesh.vertices[:, 1],
73
+ z=mesh.vertices[:, 2],
74
+ i=mesh.faces[:, 0],
75
+ j=mesh.faces[:, 1],
76
+ k=mesh.faces[:, 2],
77
+ facecolor=colors if texture_file else None,
78
+ color=color if not texture_file else None,
79
+ opacity=0.50,
80
+ lighting=dict(
81
+ ambient=ambient_intensity,
82
+ diffuse=light_intensity,
83
+ specular=0.5,
84
+ roughness=0.1,
85
+ fresnel=0.2
86
+ ),
87
+ lightposition=dict(
88
+ x=100,
89
+ y=200,
90
+ z=300
91
+ )
92
+ )
93
+ ])
94
+
95
+ fig.update_layout(scene=dict(aspectmode='data'))
96
+
97
+ return fig
98
+
99
+ def load_glb_file(filename):
100
+ # Load GLB file using trimesh
101
+ trimesh_scene = trimesh.load(filename)
102
+
103
+ # Extract the first mesh from the scene
104
+ if isinstance(trimesh_scene, trimesh.Scene):
105
+ mesh = trimesh_scene.dump(concatenate=True)
106
+ else:
107
+ mesh = trimesh_scene
108
+
109
+ return mesh
110
+
111
+ def extract_uv_texture(obj_file):
112
+ # Load the 3D model
113
+ mesh = trimesh.load(obj_file)
114
+
115
+ # Ensure the mesh has visual/texture information
116
+ if mesh.visual.uv is None or len(mesh.visual.uv) == 0:
117
+ raise ValueError("The mesh does not have UV mapping information.")
118
+
119
+ # Extract the texture map from the mesh, if it exists
120
+ texture_image = None
121
+ if mesh.visual.material.image is not None:
122
+ texture_image = mesh.visual.material.image
123
+ else:
124
+ # If no texture is found, create a blank white texture
125
+ texture_size = 1024
126
+ texture_image = Image.new('RGB', (texture_size, texture_size), color=(255, 255, 255))
127
+
128
+ return texture_image
129
+
130
+ def generate_clothing_image(prompt):
131
+ """Generate an image of clothing based on the text prompt."""
132
+ image = pipeline(prompt).images[0]
133
+ image.save(TEMP_TEXTURE_FILE)
134
+ return TEMP_TEXTURE_FILE, image
135
+
136
+ def update_texture_display(prompt, texture_file):
137
+ if prompt:
138
+ texture_path, image = generate_clothing_image(prompt)
139
+ return image
140
+ elif texture_file:
141
+ return Image.open(texture_file)
142
+ return None
143
+
144
+ with gr.Blocks() as demo:
145
+ gr.Markdown("## 3D Object Viewer with Custom Texture, Color, and Adjustable Lighting")
146
+
147
+ with gr.Row():
148
+ with gr.Column(scale=1):
149
+ gr.Markdown("### Texture Options")
150
+ prompt_input = gr.Textbox(label="Enter a Prompt to Generate Texture", placeholder="Type a prompt...")
151
+ generate_button = gr.Button("Generate Texture")
152
+ texture_file = gr.File(label="Upload Texture file (PNG or JPG, optional)", type="filepath")
153
+ texture_preview = gr.Image(label="Texture Preview", visible=True)
154
+
155
+ gr.Markdown("### Lighting & Color Settings")
156
+ light_intensity_slider = gr.Slider(minimum=0, maximum=2, step=0.1, value=0.8, label="Light Intensity")
157
+ ambient_intensity_slider = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.5, label="Ambient Intensity") # Updated the max value to 1
158
+ color_picker = gr.ColorPicker(value="#D3D3D3", label="Object Color")
159
+ submit_button = gr.Button("Submit")
160
+ obj_file = gr.File(label="Upload OBJ or GLB file", value=DEFAULT_OBJ_FILE, type='filepath')
161
+
162
+ with gr.Column(scale=2):
163
+ display = gr.Plot(label="3D Viewer")
164
+ extract_button = gr.Button("Extract UV Texture")
165
+ output_image = gr.Image(label="Extracted UV Texture", visible=True)
166
+
167
+ def update_display(file, texture, light_intensity, ambient_intensity, color):
168
+ texture_to_use = TEMP_TEXTURE_FILE if os.path.exists(TEMP_TEXTURE_FILE) else texture
169
+ return display_3d_object(file, texture_to_use, light_intensity, ambient_intensity, color)
170
+
171
+ def extract_and_display_uv_texture(file):
172
+ return extract_uv_texture(file)
173
+
174
+ submit_button.click(fn=update_display, inputs=[obj_file, texture_file, light_intensity_slider, ambient_intensity_slider, color_picker], outputs=display)
175
+ generate_button.click(fn=update_texture_display, inputs=[prompt_input, texture_file], outputs=texture_preview)
176
+ texture_file.change(fn=update_texture_display, inputs=[prompt_input, texture_file], outputs=texture_preview)
177
+ extract_button.click(fn=extract_and_display_uv_texture, inputs=[obj_file], outputs=output_image)
178
+
179
+ demo.load(fn=update_display, inputs=[obj_file, texture_file, light_intensity_slider, ambient_intensity_slider, color_picker], outputs=display)
180
+
181
+ gr.Examples(
182
+ examples=[[DEFAULT_OBJ_FILE, None],[DEFAULT_GLB_FILE, None]],
183
+ inputs=[obj_file, texture_file],
184
+ label="Example Files"
185
+ )
186
+
187
+ demo.launch(debug=True)
backup/v2/run.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import plotly.graph_objs as go
3
+ import trimesh
4
+ import numpy as np
5
+ from PIL import Image
6
+ import torch
7
+ from diffusers import StableDiffusionPipeline
8
+ import os
9
+ import matplotlib.pyplot as plt
10
+
11
+ # Load the Stable Diffusion model for text-to-image generation
12
+ device = "cuda" if torch.cuda.is_available() else "cpu"
13
+ pipeline = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(device)
14
+
15
+ # Get the current directory
16
+ current_dir = os.getcwd()
17
+ # Default object file path
18
+ DEFAULT_OBJ_FILE = os.path.join(current_dir, "female.obj")
19
+ # Temporary texture file path
20
+ TEMP_TEXTURE_FILE = os.path.join(current_dir, "generated_texture.png")
21
+ # File path to save the 2D image
22
+ OUTPUT_IMAGE_FILE = os.path.join(current_dir, "output_image.png")
23
+ DEFAULT_GLB_FILE= os.path.join(current_dir, "vroid_girl1.glb")
24
+
25
+ def apply_texture(mesh, texture_file):
26
+ texture_image = Image.open(texture_file)
27
+ uv_coords = mesh.visual.uv
28
+ uv_coords = np.clip(uv_coords, 0, 1)
29
+ texture_colors = np.array([
30
+ texture_image.getpixel((
31
+ int(u * (texture_image.width - 1)),
32
+ int(v * (texture_image.height - 1))
33
+ )) for u, v in uv_coords
34
+ ])
35
+ texture_colors = texture_colors / 255.0
36
+ return texture_colors
37
+
38
+ def display_3d_object(obj_file, texture_file, light_intensity, ambient_intensity, color):
39
+ file_extension = obj_file.split('.')[-1].lower()
40
+ if file_extension == 'obj':
41
+ mesh = trimesh.load(obj_file)
42
+ elif file_extension == 'glb':
43
+ mesh = load_glb_file(obj_file)
44
+ else:
45
+ raise ValueError("Unsupported file format. Please upload a .obj or .glb file.")
46
+
47
+ if texture_file:
48
+ colors = apply_texture(mesh, texture_file)
49
+ else:
50
+ colors = color
51
+
52
+ ambient_intensity = max(0, min(ambient_intensity, 1))
53
+ fig = go.Figure(data=[
54
+ go.Mesh3d(
55
+ x=mesh.vertices[:, 0],
56
+ y=mesh.vertices[:, 1],
57
+ z=mesh.vertices[:, 2],
58
+ i=mesh.faces[:, 0],
59
+ j=mesh.faces[:, 1],
60
+ k=mesh.faces[:, 2],
61
+ facecolor=colors if texture_file else None,
62
+ color=color if not texture_file else None,
63
+ opacity=0.50,
64
+ lighting=dict(
65
+ ambient=ambient_intensity,
66
+ diffuse=light_intensity,
67
+ specular=0.5,
68
+ roughness=0.1,
69
+ fresnel=0.2
70
+ ),
71
+ lightposition=dict(
72
+ x=100,
73
+ y=200,
74
+ z=300
75
+ )
76
+ )
77
+ ])
78
+ fig.update_layout(scene=dict(aspectmode='data'))
79
+ return fig
80
+
81
+ def load_glb_file(filename):
82
+ trimesh_scene = trimesh.load(filename)
83
+ if isinstance(trimesh_scene, trimesh.Scene):
84
+ mesh = trimesh_scene.dump(concatenate=True)
85
+ else:
86
+ mesh = trimesh_scene
87
+ return mesh
88
+
89
+ def extract_uv_texture(obj_file):
90
+ mesh = trimesh.load(obj_file)
91
+ if mesh.visual.uv is None or len(mesh.visual.uv) == 0:
92
+ raise ValueError("The mesh does not have UV mapping information.")
93
+
94
+ texture_image = None
95
+ if mesh.visual.material.image is not None:
96
+ texture_image = mesh.visual.material.image
97
+ else:
98
+ texture_size = 1024
99
+ texture_image = Image.new('RGB', (texture_size, texture_size), color=(255, 255, 255))
100
+
101
+ return texture_image
102
+
103
+ def generate_clothing_image(prompt):
104
+ image = pipeline(prompt).images[0]
105
+ image.save(TEMP_TEXTURE_FILE)
106
+ return TEMP_TEXTURE_FILE, image
107
+
108
+ def update_texture_display(prompt, texture_file):
109
+ if prompt:
110
+ texture_path, image = generate_clothing_image(prompt)
111
+ return image
112
+ elif texture_file:
113
+ return Image.open(texture_file)
114
+ return None
115
+
116
+ with gr.Blocks() as demo:
117
+ gr.Markdown("## 3D Object Viewer with Custom Texture, Color, and Adjustable Lighting")
118
+
119
+ with gr.Row():
120
+ with gr.Column(scale=1):
121
+ gr.Markdown("### Texture Options")
122
+ prompt_input = gr.Textbox(label="Enter a Prompt to Generate Texture", placeholder="Type a prompt...")
123
+ generate_button = gr.Button("Generate Texture")
124
+ texture_file = gr.File(label="Upload Texture file (PNG or JPG, optional)", type="filepath")
125
+ texture_preview = gr.Image(label="Texture Preview", visible=True)
126
+
127
+ gr.Markdown("### Lighting & Color Settings")
128
+ light_intensity_slider = gr.Slider(minimum=0, maximum=2, step=0.1, value=0.8, label="Light Intensity")
129
+ ambient_intensity_slider = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.5, label="Ambient Intensity")
130
+ color_picker = gr.ColorPicker(value="#D3D3D3", label="Object Color")
131
+ submit_button = gr.Button("Submit")
132
+ obj_file = gr.File(label="Upload OBJ or GLB file", value=DEFAULT_OBJ_FILE, type='filepath')
133
+
134
+ with gr.Column(scale=2):
135
+ display = gr.Plot(label="3D Viewer")
136
+ extract_button = gr.Button("Extract UV Texture")
137
+ output_image = gr.Image(label="Extracted UV Texture", visible=True)
138
+
139
+ def update_display(file, texture, light_intensity, ambient_intensity, color):
140
+ texture_to_use = TEMP_TEXTURE_FILE if os.path.exists(TEMP_TEXTURE_FILE) else texture
141
+ return display_3d_object(file, texture_to_use, light_intensity, ambient_intensity, color)
142
+
143
+ def extract_and_display_uv_texture(file):
144
+ uv_texture_image = extract_uv_texture(file)
145
+ uv_texture_image.save(OUTPUT_IMAGE_FILE)
146
+ return uv_texture_image
147
+
148
+ submit_button.click(fn=update_display, inputs=[obj_file, texture_file, light_intensity_slider, ambient_intensity_slider, color_picker], outputs=display)
149
+ generate_button.click(fn=update_texture_display, inputs=[prompt_input, texture_file], outputs=texture_preview)
150
+ texture_file.change(fn=update_texture_display, inputs=[prompt_input, texture_file], outputs=texture_preview)
151
+ extract_button.click(fn=extract_and_display_uv_texture, inputs=[obj_file], outputs=output_image)
152
+
153
+ demo.load(fn=update_display, inputs=[obj_file, texture_file, light_intensity_slider, ambient_intensity_slider, color_picker], outputs=display)
154
+
155
+ gr.Examples(
156
+ examples=[[DEFAULT_OBJ_FILE, None],[DEFAULT_GLB_FILE, None]],
157
+ inputs=[obj_file, texture_file],
158
+ label="Example Files"
159
+ )
160
+
161
+ demo.launch(debug=True)
env.bat ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ @echo off
2
+ \.venv\Scripts\activate.bat
female.obj ADDED
The diff for this file is too large to render. See raw diff
 
install.bat ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+
3
+ :: Check if Python 3.11 is installed
4
+ python -V 2>&1 | findstr /I "Python 3.11" >nul
5
+ if errorlevel 1 (
6
+ echo Python 3.11 is not installed. Please install it first.
7
+ exit /b 1
8
+ )
9
+
10
+ :: Create virtual environment named .venv using Python 3.11
11
+ python -m venv .venv
12
+
13
+ :: Activate the virtual environment
14
+ call .venv\Scripts\activate.bat
15
+
16
+ :: Install packages from requirements.txt
17
+ if exist requirements.txt (
18
+ pip install -r requirements.txt
19
+ ) else (
20
+ echo requirements.txt not found. Installing packages directly.
21
+ pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121
22
+ pip install pillow diffusers trimesh gradio pythreejs plotly
23
+
24
+ )
25
+
26
+ :: Deactivate the virtual environment
27
+ deactivate
28
+
29
+ echo Environment setup completed successfully!
requirements.txt CHANGED
@@ -3,4 +3,9 @@ diffusers
3
  invisible_watermark
4
  torch
5
  transformers
6
- xformers
 
 
 
 
 
 
3
  invisible_watermark
4
  torch
5
  transformers
6
+ xformers
7
+ pillow
8
+ torch
9
+ trimesh
10
+ gradio
11
+ pythreejs