somq commited on
Commit
cea2cd9
1 Parent(s): f8b729c

build: bump tokens & add install/update scripts

Browse files
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ updated_model/
2
+ venv/
install.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ python -m venv venv
4
+ source venv/bin/activate
5
+ pip install -r requirements.txt
requirements.txt ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.24.1
2
+ certifi==2023.11.17
3
+ charset-normalizer==3.3.2
4
+ diffusers==0.23.1
5
+ filelock==3.13.1
6
+ fsspec==2023.10.0
7
+ huggingface-hub==0.19.4
8
+ idna==3.4
9
+ importlib-metadata==6.8.0
10
+ Jinja2==3.1.2
11
+ MarkupSafe==2.1.3
12
+ mpmath==1.3.0
13
+ networkx==3.2.1
14
+ numpy==1.26.2
15
+ nvidia-cublas-cu12==12.1.3.1
16
+ nvidia-cuda-cupti-cu12==12.1.105
17
+ nvidia-cuda-nvrtc-cu12==12.1.105
18
+ nvidia-cuda-runtime-cu12==12.1.105
19
+ nvidia-cudnn-cu12==8.9.2.26
20
+ nvidia-cufft-cu12==11.0.2.54
21
+ nvidia-curand-cu12==10.3.2.106
22
+ nvidia-cusolver-cu12==11.4.5.107
23
+ nvidia-cusparse-cu12==12.1.0.106
24
+ nvidia-nccl-cu12==2.18.1
25
+ nvidia-nvjitlink-cu12==12.3.101
26
+ nvidia-nvtx-cu12==12.1.105
27
+ packaging==23.2
28
+ Pillow==10.1.0
29
+ psutil==5.9.6
30
+ PyYAML==6.0.1
31
+ regex==2023.10.3
32
+ requests==2.31.0
33
+ safetensors==0.4.0
34
+ sympy==1.12
35
+ tokenizers==0.15.0
36
+ torch==2.1.1
37
+ tqdm==4.66.1
38
+ transformers==4.35.2
39
+ triton==2.1.0
40
+ typing_extensions==4.8.0
41
+ urllib3==2.1.0
42
+ zipp==3.17.0
tokenizer/special_tokens_map.json CHANGED
@@ -13,7 +13,13 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": "<|endoftext|>",
 
 
 
 
 
 
17
  "unk_token": {
18
  "content": "<|endoftext|>",
19
  "lstrip": false,
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
  "unk_token": {
24
  "content": "<|endoftext|>",
25
  "lstrip": false,
update_model.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import GPT2Config, AutoConfig
2
+ from diffusers import StableDiffusionPipeline
3
+
4
+ model_name = "somq/fantassified_icons_v2"
5
+ # model_name = "updated_model"
6
+
7
+
8
+ # model = AutoModelForSequenceClassification.from_pretrained(model_name)
9
+ # print(model)
10
+
11
+ # config = AutoConfig.from_pretrained(model_name)
12
+ # print(config)
13
+
14
+ model = StableDiffusionPipeline.from_pretrained(model_name)
15
+ # image = model("a photograph of an astronaut riding a horse").images[0]
16
+ # print(image)
17
+
18
+
19
+ # Load the existing configuration and model
20
+ # existing_config = GPT2Config.from_pretrained("your_model_name_or_path")
21
+ # existing_model = GPT2ForTextToImage.from_pretrained("your_model_name_or_path")
22
+
23
+ # Update the configuration as needed
24
+ # updated_config = existing_config # Modify as needed
25
+
26
+ # Save the updated model and configuration
27
+ # updated_model_path = "./updated_model"
28
+ updated_model_path = "./"
29
+ # updated_config.save_pretrained(updated_model_path)
30
+ model.save_pretrained(updated_model_path)