# svjack/GenshinImpact_XL_Base
This model is derived from [CivitAI](https://civitai.com/models/386505).
## Acknowledgments
Special thanks to [mobeimunan](https://civitai.com/user/mobeimunan) for their contributions to the development of this model.
## Supported Characters
The model currently supports the following 73 characters from Genshin Impact:
```python
name_dict = {
'旅行者女': 'lumine',
'旅行者男': 'aether',
'派蒙': 'PAIMON',
'迪奥娜': 'DIONA',
'菲米尼': 'FREMINET',
'甘雨': 'GANYU',
'凯亚': 'KAEYA',
'莱依拉': 'LAYLA',
'罗莎莉亚': 'ROSARIA',
'七七': 'QIQI',
'申鹤': 'SHENHE',
'神里绫华': 'KAMISATO AYAKA',
'优菈': 'EULA',
'重云': 'CHONGYUN',
'夏洛蒂': 'charlotte',
'莱欧斯利': 'WRIOTHESLEY',
'艾尔海森': 'ALHAITHAM',
'柯莱': 'COLLEI',
'纳西妲': 'NAHIDA',
'绮良良': 'KIRARA',
'提纳里': 'TIGHNARI',
'瑶瑶': 'YAOYAO',
'珐露珊': 'FARUZAN',
'枫原万叶': 'KAEDEHARA KAZUHA',
'琳妮特': 'LYNETTE',
'流浪者 散兵': 'scaramouche',
'鹿野院平藏': 'SHIKANOIN HEIZOU',
'琴': 'JEAN',
'砂糖': 'SUCROSE',
'温迪': 'VENTI',
'魈': 'XIAO',
'早柚': 'SAYU',
'安柏': 'AMBER',
'班尼特': 'BENNETT',
'迪卢克': 'DILUC',
'迪西娅': 'DEHYA',
'胡桃': 'HU TAO',
'可莉': 'KLEE',
'林尼': 'LYNEY',
'托马': 'THOMA',
'香菱': 'XIANG LING',
'宵宫': 'YOIMIYA',
'辛焱': 'XINYAN',
'烟绯': 'YANFEI',
'八重神子': 'YAE MIKO',
'北斗': 'BEIDOU',
'菲谢尔': 'FISCHL',
'九条裟罗': 'KUJO SARA',
'久岐忍': 'KUKI SHINOBU',
'刻晴': 'KEQING',
'雷电将军': 'RAIDEN SHOGUN',
'雷泽': 'RAZOR',
'丽莎': 'LISA',
'赛诺': 'CYNO',
'芙宁娜': 'FURINA',
'芭芭拉': 'BARBARA',
'公子 达达利亚': 'TARTAGLIA',
'坎蒂丝': 'CANDACE',
'莫娜': 'MONA',
'妮露': 'NILOU',
'珊瑚宫心海': 'SANGONOMIYA KOKOMI',
'神里绫人': 'KAMISATO AYATO',
'行秋': 'XINGQIU',
'夜兰': 'YELAN',
'那维莱特': 'NEUVILLETTE',
'娜维娅': 'NAVIA',
'阿贝多': 'ALBEDO',
'荒泷一斗': 'ARATAKI ITTO',
'凝光': 'NING GUANG',
'诺艾尔': 'NOELLE',
'五郎': 'GOROU',
'云堇': 'YUN JIN',
'钟离': 'ZHONGLI'
}
```
## Installation
To use this model, you need to install the following dependencies:
```bash
pip install -U diffusers transformers sentencepiece peft controlnet-aux
```
## Example Usage
### Generating an Image of Zhongli
Here's an example of how to generate an image of Zhongli using this model:
```python
from diffusers import StableDiffusionXLPipeline
import torch
pipeline = StableDiffusionXLPipeline.from_pretrained(
"svjack/GenshinImpact_XL_Base",
torch_dtype=torch.float16
).to("cuda")
prompt = "solo,ZHONGLI\(genshin impact\),1boy,portrait,upper_body,highres,"
negative_prompt = "nsfw,lowres,(bad),text,error,fewer,extra,missing,worst quality,jpeg artifacts,low quality,watermark,unfinished,displeasing,oldest,early,chromatic aberration,signature,extra digits,artistic error,username,scan,[abstract],"
image = pipeline(
prompt=prompt,
negative_prompt=negative_prompt,
generator=torch.manual_seed(0),
).images[0]
image
image.save("zhongli_1024x1024.png")
```
钟离
### Using Canny ControlNet to Restore 2D Images from 3D Toy Photos
Here's an example of how to use Canny ControlNet to restore 2D images from 3D toy photos:
#### Genshin Impact 3D Toys
钟离
派蒙
```python
from diffusers import AutoPipelineForText2Image, ControlNetModel
from diffusers.utils import load_image
import torch
from PIL import Image
from controlnet_aux import CannyDetector
controlnet = ControlNetModel.from_pretrained(
"diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16
)
pipeline = AutoPipelineForText2Image.from_pretrained(
"svjack/GenshinImpact_XL_Base",
controlnet=controlnet,
torch_dtype=torch.float16
).to("cuda")
#pipeline.enable_model_cpu_offload()
canny = CannyDetector()
canny(Image.open("zhongli-cb.jpg")).save("zhongli-cb-canny.jpg")
canny_image = load_image(
"zhongli-cb-canny.jpg"
)
controlnet_conditioning_scale = 0.5
generator = torch.Generator(device="cpu").manual_seed(1)
images = pipeline(
prompt="solo,ZHONGLI\(genshin impact\),1boy,portrait,highres",
controlnet_conditioning_scale=controlnet_conditioning_scale,
image=canny_image,
num_inference_steps=50,
guidance_scale=7.0,
generator=generator,
).images
images[0]
images[0].save("zhongli_trans.png")
canny = CannyDetector()
canny(Image.open("paimon-cb-crop.jpg")).save("paimon-cb-canny.jpg")
canny_image = load_image(
"paimon-cb-canny.jpg"
)
controlnet_conditioning_scale = 0.7
generator = torch.Generator(device="cpu").manual_seed(3)
images = pipeline(
prompt="solo,PAIMON\(genshin impact\),1girl,portrait,highres, bright, shiny, high detail, anime",
controlnet_conditioning_scale=controlnet_conditioning_scale,
image=canny_image,
num_inference_steps=50,
guidance_scale=8.0,
generator=generator,
).images
images[0]
images[0].save("paimon_trans.png")
```
### Creating a Grid Image
You can also create a grid image from a list of PIL Image objects:
```python
from PIL import Image
def create_grid_image(image_list, rows, cols, cell_width, cell_height):
"""
Create a grid image from a list of PIL Image objects.
:param image_list: A list of PIL Image objects
:param rows: Number of rows in the grid
:param cols: Number of columns in the grid
:param cell_width: Width of each cell in the grid
:param cell_height: Height of each cell in the grid
:return: The resulting grid image
"""
total_width = cols * cell_width
total_height = rows * cell_height
grid_image = Image.new('RGB', (total_width, total_height))
for i, img in enumerate(image_list):
row = i // cols
col = i % cols
img = img.resize((cell_width, cell_height))
x_offset = col * cell_width
y_offset = row * cell_height
grid_image.paste(img, (x_offset, y_offset))
return grid_image
create_grid_image([Image.open("zhongli-cb.jpg") ,Image.open("zhongli-cb-canny.jpg"), Image.open("zhongli_trans.png")], 1, 3, 512, 768)
create_grid_image([Image.open("paimon-cb-crop.jpg") ,Image.open("paimon-cb-canny.jpg"), Image.open("paimon_trans.png")], 1, 3, 512, 768)
```
This will create a grid image showing the original, Canny edge detection, and transformed images side by side.
Below image list in : (Genshin Impact Toy/ Canny Image / Gemshin Impact Restore 2D Image)
钟离
派蒙