logan commited on
Commit
7d64296
·
1 Parent(s): 715ec93
Files changed (5) hide show
  1. api_keys.py +2 -0
  2. app.py +72 -0
  3. chatbot.py +45 -0
  4. prompt.py +57 -0
  5. utils.py +122 -0
api_keys.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ meshy_api_key = ""
2
+ gpt_api_key = "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"
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import gradio as gr
3
+ import openai
4
+ from prompt import setup_prompt
5
+ from api_keys import gpt_api_key
6
+ import utils
7
+
8
+ openai.api_key = gpt_api_key
9
+ messages = []
10
+
11
+ # system_msg = input("What type of chatbot would you like to create?\n")
12
+ messages.append({"role": "system", "content": setup_prompt})
13
+ messages.append({"role": "user", "content": "hi, I want to build a 3d model"})
14
+
15
+ response = openai.ChatCompletion.create(
16
+ model="gpt-4",
17
+ messages=messages, temperature = 1.2)
18
+ reply = response["choices"][0]["message"]["content"]
19
+ messages.append({"role": "assistant", "content": reply})
20
+ print("\n" + reply + "\n")
21
+
22
+ final_prompt = ""
23
+
24
+ model_path = "../house_light/model.glb"
25
+
26
+ cnt = 0
27
+
28
+
29
+ def solve():
30
+ global model_path
31
+ time.sleep(3)
32
+ return model_path
33
+
34
+ def slow_echo(message, history):
35
+ global cnt
36
+ global messages
37
+ global final_prompt
38
+ global model_path
39
+ messages.append({"role": "user", "content": message})
40
+ response = openai.ChatCompletion.create(
41
+ model="gpt-4",
42
+ messages=messages, temperature = 1.2)
43
+ reply = response["choices"][0]["message"]["content"]
44
+ messages.append({"role": "assistant", "content": reply})
45
+ print("\n" + reply + "\n")
46
+ if reply[len(reply)-1] == '1' or reply[len(reply)-2] == '1':
47
+ print("start generating")
48
+ final_prompt = reply
49
+ reply = "Generating..."
50
+ cnt += 1
51
+ if cnt == 1 :
52
+ model_path = "../house_light/model.glb"
53
+ elif cnt == 2 :
54
+ model_path = "../house_dark/model.glb"
55
+
56
+ for i in range(len(reply)):
57
+ time.sleep(0.02)
58
+ yield reply[:i+1]
59
+
60
+ with gr.Blocks() as demo:
61
+ with gr.Row():
62
+ with gr.Column():
63
+ chatbot = gr.ChatInterface(fn = slow_echo, title="SpacialSynergy", examples=["hi, I want to build a 3d model"]).queue()
64
+ with gr.Column():
65
+ interface = gr.Interface(
66
+ fn=solve,
67
+ title = "3D Model",
68
+ inputs=None,
69
+ outputs = ["model3d"],
70
+ )
71
+
72
+ demo.launch(share = False)
chatbot.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ from prompt import setup_prompt
3
+ from api_keys import gpt_api_key
4
+ import utils
5
+
6
+ openai.api_key = gpt_api_key
7
+ messages = []
8
+
9
+ # system_msg = input("What type of chatbot would you like to create?\n")
10
+ messages.append({"role": "system", "content": setup_prompt})
11
+ messages.append({"role": "user", "content": "hi, I want to build a 3d model"})
12
+
13
+ response = openai.ChatCompletion.create(
14
+ model="gpt-4",
15
+ messages=messages, temperature = 1.2)
16
+ reply = response["choices"][0]["message"]["content"]
17
+ messages.append({"role": "assistant", "content": reply})
18
+ print("\n" + reply + "\n")
19
+
20
+ final_prompt = ""
21
+
22
+ # print("Your 3d builder assistant is ready!")
23
+ while input != "quit()":
24
+ message = input()
25
+ messages.append({"role": "user", "content": message})
26
+ response = openai.ChatCompletion.create(
27
+ model="gpt-4",
28
+ messages=messages, temperature = 1.2)
29
+ reply = response["choices"][0]["message"]["content"]
30
+
31
+ messages.append({"role": "assistant", "content": reply})
32
+ print("\n" + reply + "\n")
33
+ if reply[len(reply)-1] == '1' or reply[len(reply)-2] == '1':
34
+ print("start generating")
35
+ final_prompt = reply
36
+ break
37
+
38
+
39
+ # extracted_text = utils.extract_text_surrounded_by_backticks(final_prompt)
40
+ # begin = final_prompt.find("{")
41
+ # end = final_prompt.find("}")
42
+ # print(final_prompt[begin:end+1])
43
+ # payload = utils.text_to_3d_gen(final_prompt[begin:end+1])
44
+ # taskid = utils.create_meshy_object(payload)
45
+ # utils.download_model(taskid)
prompt.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ setup_prompt = """
2
+ Suppose you are a 3d model design assistant, your job is to help user generate detailed prompt which is used to feed another 3d generative AI.
3
+ **Job Description**
4
+ - You should inspire users and making them more creative
5
+ - You can only response to 3d-model building related problems, if users ask you non-related problems, you should not answer those questions
6
+ - Users' responses should not have anything relates to pornography, racism and any form of discrimination
7
+
8
+ First, here is the general prompt guideline for 3d model prompting.
9
+ **Basic Prompts**
10
+ In Meshy, a basic prompt is to describe an object you want to generate or retexture, e.g. a sword, a helmet, a house, a treasure chest, etc.
11
+ **Advanced Prompts**
12
+ If you want to add more details to the model, you'll need to provide the AI with more information through prompts. It is recommended that your prompts be specific and descriptive. Try describing the shape, color, size, style, and other attributes of the object you want to generate. Longer prompts don't necessarily equate to better results, focus on the key concepts!
13
+ Here we have some useful terms that may help improve the result you're gonna get:
14
+ - Related to detail:
15
+ highly detailed, high resolution, highest quality, best quality, 4K, 8K, HDR, studio quality
16
+ - Related to style:
17
+ beautiful, elegant, realistic, ultra realistic, trending on artstation, masterpiece, cinema 4d, unreal engine, octane render
18
+ - Related to lighting:
19
+ ambient lighting, soft lighting, sunlight, moonlight, fluorescent, glowing
20
+ **Negative Prompts**:
21
+ A negative prompt is what you don't want to see in the generated result. If you're using the web app, you can simply type a negative prompt in the negative prompt box. For Discord users, you can use the --no parameter in your prompt.
22
+ Here we have some commonly used negative prompts for you:
23
+ bad anatomy/proportions, deformed, dull, duplicate, extra arms/fingers/legs, low quality, missing arms/fingers/legs, obscure, poor lighting, ugly, unnatural colors, worst quality
24
+
25
+ Second, you should guide users step by step in the following procedure to help users generate good prompt according to the guideline above
26
+ 1 - Object: Ask user what 3d object they wany to create
27
+ 2 - Style: Ask user things relate to detail, style, lighting according to the prompt guidline mentioned before.
28
+ 3 - Negative Prompt: Ask user what they do not want to see in 3d generation
29
+ 4 - Art Style: Ask user to choose an art style from the following options: {Realistic, Voxel, 2.5D Cartoon, Japanese Anime, Cartoon Line Art, Realistic Hand-drawn, 2.5D Hand-drawn, Oriental Comic Ink}
30
+ 5 - Texture Resolution: Ask user to choose texture resolution from the following options: {1K, 2K, 4K}
31
+ 6 - Confirmation: Show user the prompt you generated
32
+ - Style: <style> \n - Negative Prompt: <negative prompt> \n - Ary Style: <art style> \n - Texture Resolution: <resolution> \n <0>}. Note: everything in <> should be keywords, not a complete sentence or verbs. After showing, ask user if he/she wants to add more things
33
+ 7 - Output: Show the final prompt in JSON with the following keys:
34
+ object_prompt, style_prompt, negative_prompt, art_style. Then, you should add number ```1``` to the end of your response, indicating the prompt generation is done.
35
+ - ```art_style``` key words to code conversion:
36
+ - Realistic style -> realistic
37
+ - 2.5D Cartoon style -> fake-3d-cartoon
38
+ - Japanese Anime style -> japanese-anime
39
+ - Cartoon Line Art style -> cartoon-line-art
40
+ - Realistic Hand-drawn style -> realistic-hand-drawn
41
+ - 2.5D Hand-drawn style -> fake-3d-hand-drawn
42
+ - Oriental Comic Ink style -> oriental-comic-ink
43
+ - ```negative_prompt``` key: ers do not say anything specific about their negative preferences, the value for the negative_prompt key should be an empty string ```""```
44
+
45
+
46
+
47
+ Finally, there are some good prompts that you can learn from:
48
+ - Object: Wine barrel; Style: ancient, 4K, HDR, highest quality
49
+ - Object: A treasure chest; Style: realistic, wooden, carved, highest quality
50
+ - Object: Potion; Style: green glowing magical potion, highest quality
51
+ - Object: Pistol; Style: golden pistol, unreal engine, game asset, highest quality
52
+ - Object: Altar of Storms; Style: Metal, Viking pattern, black, old, scratches, iron, 8k, items, concept art trending artstation, high-res, realistic, Photographs, aaa game scene
53
+ - Object: A monster mask; Style: Red fangs, Samurai outfit that fused with japanese batik style
54
+ - Object: Medieval Small House; Style: ancient, best quality, 4k, trending on artstation
55
+ - Object: A motorcycle from the era of steam engines in the 20th century; Style: steampunk, 4k, hdr
56
+ - Object: Deity earring; Style: fancy, substantial, 4k, HDR, highest quality
57
+ """
utils.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import re
3
+ import os
4
+ import requests
5
+ import json
6
+ from api_keys import meshy_api_key
7
+ from api_keys import gpt_api_key
8
+ import openai
9
+
10
+ def shouldStop(input_str, model="gpt-3.5-turbo"):
11
+ openai.api_key = gpt_api_key
12
+ prompt = f"determine whether the text delimited by triple backticks contains number 1 at the end. {input_str} \n Format: 1/0"
13
+ messages = [{"role": "user", "content": prompt}]
14
+ response = openai.ChatCompletion.create(
15
+ model=model,
16
+ messages=messages,
17
+ temperature=0, # this is the degree of randomness of the model's output
18
+ )
19
+ return response.choices[0].message["content"]
20
+
21
+ def extract_text_surrounded_by_backticks(input_string):
22
+ # Define a regular expression pattern to match text within triple backticks
23
+ pattern = r'```(.*?)```'
24
+
25
+ # Use re.DOTALL to match across multiple lines, including newline characters
26
+ extracted_text = re.findall(pattern, input_string, re.DOTALL)
27
+
28
+ # If extracted_text is not empty, return the first match; otherwise, return None
29
+ return extracted_text[0] if extracted_text else None
30
+
31
+ def text_to_3d_gen(extracted_str):
32
+ payload = {"object_prompt":"", "style_prompt":"", "enable_pbr": True, "art_style": "", "negative_prompt":"low quality, low resolution, blurry, ugly, "}
33
+ response_dict = json.loads(extracted_str)
34
+ for i in payload.keys():
35
+ if i == "enable_pbr":
36
+ continue
37
+ if i != "negative_prompt":
38
+ payload[i] = response_dict[i]
39
+ else:
40
+ payload[i] += response_dict[i]
41
+
42
+ return payload
43
+
44
+
45
+ def create_meshy_object(payload, target="3d"):
46
+ headers = {
47
+ "Authorization": f"Bearer {meshy_api_key}"
48
+ }
49
+
50
+ response = requests.post(
51
+ f"https://api.meshy.ai/v1/text-to-{target}",
52
+ headers=headers,
53
+ json=payload,
54
+ )
55
+ response.raise_for_status()
56
+
57
+ meshy_response = response.json()
58
+ text_file = open("id.txt", "w")
59
+ text_file.write(meshy_response["result"])
60
+ text_file.close()
61
+ return meshy_response["result"]
62
+
63
+
64
+
65
+ def download_model(task_id, target="3d"):
66
+ headers = {
67
+ "Authorization": f"Bearer {meshy_api_key}"
68
+ }
69
+
70
+
71
+
72
+
73
+ # print(retrieve_response["progress"])
74
+ print_once = True;
75
+ # Extract the "model_url"
76
+ while True:
77
+ response = requests.get(
78
+ f"https://api.meshy.ai/v1/text-to-{target}/{task_id}",
79
+ headers=headers,
80
+ )
81
+ response.raise_for_status()
82
+
83
+ # Convert the JSON response to a Python dictionary
84
+ retrieve_response = json.loads(response.text)
85
+
86
+ if retrieve_response["progress"] == 100:
87
+ model_url = retrieve_response["model_url"]
88
+
89
+ # Get the directory of the script
90
+ script_dir = os.path.dirname(os.path.abspath(__file__))
91
+
92
+ # Define the file name
93
+ file_name = f"{task_id}.glb"
94
+
95
+ # Define the complete file path
96
+ file_path = os.path.join(script_dir, file_name)
97
+
98
+ # Check if the file already exists
99
+ if not os.path.isfile(file_path):
100
+ # Download the file
101
+ response = requests.get(model_url)
102
+
103
+ if response.status_code == 200:
104
+ with open(file_path, "wb") as file:
105
+ file.write(response.content)
106
+ print(f"File downloaded successfully to {file_path}.")
107
+ else:
108
+ print(f"File download failed with status code {response.status_code}.")
109
+ if "error" in response.text:
110
+ error_message = json.loads(response.text)["error"]
111
+ print(f"Error message: {error_message}")
112
+ else:
113
+ print(f"File '{file_path}' already exists. Skipping download.")
114
+ break
115
+
116
+ else:
117
+ if print_once:
118
+ print("the model is still in progress...")
119
+ print_once = False
120
+ time.sleep(10)
121
+
122
+ #