|
import os |
|
import requests |
|
from tqdm import tqdm |
|
from datasets import load_dataset |
|
import numpy as np |
|
import tensorflow as tf |
|
from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input |
|
from tensorflow.keras.preprocessing import image |
|
from tensorflow.keras.layers import Dense, Input, Concatenate, Embedding, Flatten |
|
from tensorflow.keras.models import Model |
|
from tensorflow.keras.preprocessing.text import Tokenizer |
|
from tensorflow.keras.preprocessing.sequence import pad_sequences |
|
from sklearn.preprocessing import LabelEncoder |
|
import joblib |
|
from PIL import UnidentifiedImageError, Image |
|
import gradio as gr |
|
|
|
|
|
MAX_TEXT_LENGTH = 100 |
|
EMBEDDING_DIM = 50 |
|
IMAGE_SIZE = 160 |
|
BATCH_SIZE = 64 |
|
|
|
|
|
model_examples = {} |
|
|
|
def load_and_preprocess_data(subset_size=20000): |
|
|
|
dataset = load_dataset("thefcraft/civitai-stable-diffusion-337k") |
|
dataset_subset = dataset['train'].shuffle(seed=42).select(range(subset_size)) |
|
|
|
|
|
dataset_subset = dataset_subset.filter(lambda x: not x['nsfw']) |
|
|
|
|
|
for item in dataset_subset: |
|
if item['Model'] not in model_examples: |
|
model_examples[item['Model']] = item['url'] |
|
|
|
return dataset_subset |
|
|
|
def process_text_data(dataset_subset): |
|
|
|
text_data = ["default prompt" for _ in dataset_subset] |
|
|
|
tokenizer = Tokenizer(num_words=10000) |
|
tokenizer.fit_on_texts(text_data) |
|
sequences = tokenizer.texts_to_sequences(text_data) |
|
text_data_padded = pad_sequences(sequences, maxlen=MAX_TEXT_LENGTH) |
|
|
|
return text_data_padded, tokenizer |
|
|
|
def download_image(url): |
|
try: |
|
response = requests.get(url, timeout=5) |
|
response.raise_for_status() |
|
return Image.open(requests.get(url, stream=True).raw) |
|
except: |
|
return None |
|
|
|
def process_image_data(dataset_subset): |
|
image_dir = 'civitai_images' |
|
os.makedirs(image_dir, exist_ok=True) |
|
|
|
image_data = [] |
|
valid_indices = [] |
|
|
|
for idx, sample in enumerate(tqdm(dataset_subset)): |
|
img_url = sample['url'] |
|
img_path = os.path.join(image_dir, os.path.basename(img_url)) |
|
|
|
try: |
|
response = requests.get(img_url, timeout=5) |
|
response.raise_for_status() |
|
|
|
if 'image' not in response.headers['Content-Type']: |
|
continue |
|
|
|
with open(img_path, 'wb') as f: |
|
f.write(response.content) |
|
|
|
img = image.load_img(img_path, target_size=(IMAGE_SIZE, IMAGE_SIZE)) |
|
img_array = image.img_to_array(img) |
|
img_array = preprocess_input(img_array) |
|
|
|
image_data.append(img_array) |
|
valid_indices.append(idx) |
|
|
|
except Exception as e: |
|
continue |
|
|
|
return np.array(image_data), valid_indices |
|
|
|
def create_multimodal_model(num_words, num_classes): |
|
image_input = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3)) |
|
cnn_base = ResNet50(weights='imagenet', include_top=False, pooling='avg') |
|
|
|
for layer in cnn_base.layers[:-10]: |
|
layer.trainable = False |
|
|
|
cnn_features = cnn_base(image_input) |
|
|
|
text_input = Input(shape=(MAX_TEXT_LENGTH,)) |
|
embedding_layer = Embedding(num_words, EMBEDDING_DIM)(text_input) |
|
flatten_text = Flatten()(embedding_layer) |
|
text_features = Dense(128, activation='relu')(flatten_text) |
|
|
|
combined = Concatenate()([cnn_features, text_features]) |
|
|
|
x = Dense(256, activation='relu')(combined) |
|
output = Dense(num_classes, activation='softmax')(x) |
|
|
|
model = Model(inputs=[image_input, text_input], outputs=output) |
|
return model |
|
|
|
def train_model(): |
|
dataset_subset = load_and_preprocess_data() |
|
|
|
text_data_padded, tokenizer = process_text_data(dataset_subset) |
|
|
|
image_data, valid_indices = process_image_data(dataset_subset) |
|
|
|
text_data_padded = text_data_padded[valid_indices] |
|
model_names = [dataset_subset[i]['Model'] for i in valid_indices] |
|
|
|
label_encoder = LabelEncoder() |
|
encoded_labels = label_encoder.fit_transform(model_names) |
|
|
|
model = create_multimodal_model( |
|
num_words=10000, |
|
num_classes=len(label_encoder.classes_) |
|
) |
|
|
|
model.compile( |
|
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), |
|
loss='sparse_categorical_crossentropy', |
|
metrics=['accuracy'] |
|
) |
|
|
|
history = model.fit( |
|
[image_data, text_data_padded], |
|
encoded_labels, |
|
batch_size=BATCH_SIZE, |
|
epochs=3, |
|
validation_split=0.2 |
|
) |
|
|
|
model.save('multimodal_model.keras') |
|
joblib.dump(tokenizer, 'tokenizer.pkl') |
|
joblib.dump(label_encoder, 'label_encoder.pkl') |
|
|
|
|
|
joblib.dump(model_examples, 'model_examples.pkl') |
|
|
|
return model, tokenizer, label_encoder |
|
|
|
def get_recommendations(image_input, model, tokenizer, label_encoder, top_k=5): |
|
img_array = image.img_to_array(image_input) |
|
img_array = tf.image.resize(img_array, (IMAGE_SIZE, IMAGE_SIZE)) |
|
img_array = preprocess_input(img_array) |
|
img_array = np.expand_dims(img_array, axis=0) |
|
|
|
|
|
text_sequence = tokenizer.texts_to_sequences(["default prompt"]) |
|
text_padded = pad_sequences(text_sequence, maxlen=MAX_TEXT_LENGTH) |
|
|
|
predictions = model.predict([img_array, text_padded]) |
|
top_indices = np.argsort(predictions[0])[-top_k:][::-1] |
|
|
|
recommendations = [] |
|
for idx in top_indices: |
|
model_name = label_encoder.inverse_transform([idx])[0] |
|
confidence = predictions[0][idx] |
|
if model_name in model_examples: |
|
example_image = download_image(model_examples[model_name]) |
|
if example_image: |
|
recommendations.append((model_name, confidence, example_image)) |
|
|
|
return recommendations |
|
|
|
def create_gradio_interface(): |
|
model = tf.keras.models.load_model('multimodal_model.keras') |
|
tokenizer = joblib.load('tokenizer.pkl') |
|
label_encoder = joblib.load('label_encoder.pkl') |
|
model_examples_data = joblib.load('model_examples.pkl') |
|
|
|
def predict(img): |
|
recommendations = get_recommendations(img, model, tokenizer, label_encoder) |
|
result_text = "" |
|
result_images = [] |
|
|
|
for model_name, conf, example_img in recommendations: |
|
result_text += f"Model: {model_name}\n" |
|
result_images.append(example_img) |
|
|
|
return [result_text] + result_images |
|
|
|
outputs = [gr.Textbox(label="Recommended Models")] + [gr.Image(label=f"Example {i+1}") for i in range(5)] |
|
|
|
interface = gr.Interface( |
|
fn=predict, |
|
inputs=gr.Image(type="pil", label="Upload Image"), |
|
outputs=outputs, |
|
title="AI Model Recommendation System", |
|
description="Upload an image to get model recommendations with examples" |
|
) |
|
|
|
return interface |
|
|
|
if __name__ == "__main__": |
|
if not os.path.exists('multimodal_model.keras'): |
|
print("Training new model...") |
|
model, tokenizer, label_encoder = train_model() |
|
print("Training completed!") |
|
else: |
|
print("Loading existing model...") |
|
|
|
interface = create_gradio_interface() |
|
interface.launch() |