Granther commited on
Commit
8204c34
β€’
1 Parent(s): daccf71

Upload sft_phi3_2.ipynb with huggingface_hub

Browse files
Files changed (1) hide show
  1. sft_phi3_2.ipynb +123 -44
sft_phi3_2.ipynb CHANGED
@@ -69,6 +69,63 @@
69
  "from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig"
70
  ]
71
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  {
73
  "cell_type": "code",
74
  "execution_count": 4,
@@ -574,63 +631,85 @@
574
  },
575
  {
576
  "cell_type": "code",
577
- "execution_count": 93,
578
  "id": "7bdb1138-7da5-44dc-9549-df7d40ec68e1",
579
  "metadata": {},
580
  "outputs": [
581
- {
582
- "name": "stderr",
583
- "output_type": "stream",
584
- "text": [
585
- "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
586
- "To disable this warning, you can either:\n",
587
- "\t- Avoid using `tokenizers` before the fork if possible\n",
588
- "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
589
- ]
590
- },
591
  {
592
  "name": "stdout",
593
  "output_type": "stream",
594
  "text": [
595
- "2024-07-04:16:44:18,903 INFO [__main__.py:272] Verbosity set to INFO\n",
596
- "2024-07-04:16:44:24,067 INFO [__main__.py:369] Selected Tasks: ['hellaswag']\n",
597
- "2024-07-04:16:44:24,115 INFO [evaluator.py:152] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234\n",
598
- "2024-07-04:16:44:24,115 INFO [evaluator.py:189] Initializing hf model, with arguments: {'pretrained': 'EleutherAI/gpt-j-6B'}\n",
599
- "2024-07-04:16:44:24,156 INFO [huggingface.py:170] Using device 'cuda:0'\n",
600
- "config.json: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 930/930 [00:00<00:00, 7.62MB/s]\n",
601
- "tokenizer_config.json: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 619/619 [00:00<00:00, 5.78MB/s]\n",
602
- "vocab.json: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 798k/798k [00:00<00:00, 20.4MB/s]\n",
603
- "merges.txt: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 456k/456k [00:00<00:00, 6.58MB/s]\n",
604
- "tokenizer.json: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1.37M/1.37M [00:00<00:00, 14.4MB/s]\n",
605
- "added_tokens.json: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4.04k/4.04k [00:00<00:00, 44.5MB/s]\n",
606
- "special_tokens_map.json: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 357/357 [00:00<00:00, 4.00MB/s]\n",
607
- "pytorch_model.bin: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 24.2G/24.2G [02:04<00:00, 195MB/s]\n",
608
- "Traceback (most recent call last):\n",
 
 
 
 
 
 
 
609
  " File \"/opt/conda/bin/lm_eval\", line 8, in <module>\n",
610
  " sys.exit(cli_evaluate())\n",
611
  " File \"/lm-evaluation-harness/lm_eval/__main__.py\", line 375, in cli_evaluate\n",
612
  " results = evaluator.simple_evaluate(\n",
613
  " File \"/lm-evaluation-harness/lm_eval/utils.py\", line 395, in _wrapper\n",
614
  " return fn(*args, **kwargs)\n",
615
- " File \"/lm-evaluation-harness/lm_eval/evaluator.py\", line 192, in simple_evaluate\n",
616
- " lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n",
617
- " File \"/lm-evaluation-harness/lm_eval/api/model.py\", line 148, in create_from_arg_string\n",
618
- " return cls(**args, **args2)\n",
619
- " File \"/lm-evaluation-harness/lm_eval/models/huggingface.py\", line 218, in __init__\n",
620
- " self._create_model(\n",
621
- " File \"/lm-evaluation-harness/lm_eval/models/huggingface.py\", line 541, in _create_model\n",
622
- " self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n",
623
- " File \"/opt/conda/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n",
624
- " return model_class.from_pretrained(\n",
625
- " File \"/opt/conda/lib/python3.10/site-packages/transformers/modeling_utils.py\", line 3838, in from_pretrained\n",
626
- " ) = cls._load_pretrained_model(\n",
627
- " File \"/opt/conda/lib/python3.10/site-packages/transformers/modeling_utils.py\", line 4298, in _load_pretrained_model\n",
628
- " new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n",
629
- " File \"/opt/conda/lib/python3.10/site-packages/transformers/modeling_utils.py\", line 895, in _load_state_dict_into_meta_model\n",
630
- " set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n",
631
- " File \"/opt/conda/lib/python3.10/site-packages/accelerate/utils/modeling.py\", line 404, in set_module_tensor_to_device\n",
632
- " new_value = value.to(device)\n",
633
- "torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 MiB. GPU 0 has a total capacity of 23.65 GiB of which 216.25 MiB is free. Process 1653457 has 14.78 GiB memory in use. Process 1705644 has 8.65 GiB memory in use. Of the allocated memory 8.27 GiB is allocated by PyTorch, and 667.00 KiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
634
  ]
635
  }
636
  ],
 
69
  "from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig"
70
  ]
71
  },
72
+ {
73
+ "cell_type": "code",
74
+ "execution_count": 2,
75
+ "id": "d2e94000-8097-4f09-a8dc-506801bb9f12",
76
+ "metadata": {},
77
+ "outputs": [
78
+ {
79
+ "data": {
80
+ "application/vnd.jupyter.widget-view+json": {
81
+ "model_id": "8c9a7b9e2bb5463c979a2659118c4912",
82
+ "version_major": 2,
83
+ "version_minor": 0
84
+ },
85
+ "text/plain": [
86
+ "VBox(children=(HTML(value='<center> <img\\nsrc=https://huggingface.co/front/assets/huggingface_logo-noborder.sv…"
87
+ ]
88
+ },
89
+ "metadata": {},
90
+ "output_type": "display_data"
91
+ }
92
+ ],
93
+ "source": [
94
+ "from huggingface_hub import HfApi, notebook_login\n",
95
+ "\n",
96
+ "notebook_login()"
97
+ ]
98
+ },
99
+ {
100
+ "cell_type": "code",
101
+ "execution_count": 3,
102
+ "id": "8b994f91-1973-405d-8275-07948eadab99",
103
+ "metadata": {},
104
+ "outputs": [
105
+ {
106
+ "data": {
107
+ "text/plain": [
108
+ "CommitInfo(commit_url='https://huggingface.co/datasets/Granther/assorted-notebooks-bin/commit/daccf7107bc7eac6717dea879fde2ba3a51cccf4', commit_message='Upload sft_phi3_2.ipynb with huggingface_hub', commit_description='', oid='daccf7107bc7eac6717dea879fde2ba3a51cccf4', pr_url=None, pr_revision=None, pr_num=None)"
109
+ ]
110
+ },
111
+ "execution_count": 3,
112
+ "metadata": {},
113
+ "output_type": "execute_result"
114
+ }
115
+ ],
116
+ "source": [
117
+ "api = HfApi()\n",
118
+ "\n",
119
+ "quant_repo = \"Granther/assorted-notebooks-bin\"\n",
120
+ "\n",
121
+ "api.upload_file(\n",
122
+ " path_or_fileobj=\"sft_phi3_2.ipynb\",\n",
123
+ " path_in_repo=\"sft_phi3_2.ipynb\",\n",
124
+ " repo_id=quant_repo,\n",
125
+ " repo_type=\"dataset\",\n",
126
+ ")"
127
+ ]
128
+ },
129
  {
130
  "cell_type": "code",
131
  "execution_count": 4,
 
631
  },
632
  {
633
  "cell_type": "code",
634
+ "execution_count": 1,
635
  "id": "7bdb1138-7da5-44dc-9549-df7d40ec68e1",
636
  "metadata": {},
637
  "outputs": [
 
 
 
 
 
 
 
 
 
 
638
  {
639
  "name": "stdout",
640
  "output_type": "stream",
641
  "text": [
642
+ "2024-07-04:16:55:06,590 INFO [__main__.py:272] Verbosity set to INFO\n",
643
+ "2024-07-04:16:55:11,201 INFO [__main__.py:369] Selected Tasks: ['hellaswag']\n",
644
+ "2024-07-04:16:55:11,202 INFO [evaluator.py:152] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234\n",
645
+ "2024-07-04:16:55:11,202 INFO [evaluator.py:189] Initializing hf model, with arguments: {'pretrained': 'EleutherAI/gpt-j-6B'}\n",
646
+ "2024-07-04:16:55:11,241 INFO [huggingface.py:170] Using device 'cuda:0'\n",
647
+ "Downloading builder script: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4.36k/4.36k [00:00<00:00, 19.3MB/s]\n",
648
+ "Downloading metadata: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2.53k/2.53k [00:00<00:00, 17.6MB/s]\n",
649
+ "Downloading readme: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 6.84k/6.84k [00:00<00:00, 24.9MB/s]\n",
650
+ "Downloading data: 47.5MB [00:00, 137MB/s] \n",
651
+ "Downloading data: 11.8MB [00:00, 92.0MB/s] \n",
652
+ "Downloading data: 12.2MB [00:00, 93.1MB/s] \n",
653
+ "Generating train split: 100%|β–ˆβ–ˆβ–ˆ| 39905/39905 [00:02<00:00, 17573.67 examples/s]\n",
654
+ "Generating test split: 100%|β–ˆβ–ˆβ–ˆβ–ˆ| 10003/10003 [00:00<00:00, 17738.73 examples/s]\n",
655
+ "Generating validation split: 100%|β–ˆ| 10042/10042 [00:00<00:00, 17489.21 examples\n",
656
+ "Map: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 39905/39905 [00:03<00:00, 10310.72 examples/s]\n",
657
+ "Map: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 10042/10042 [00:01<00:00, 9523.34 examples/s]\n",
658
+ "2024-07-04:16:56:20,566 INFO [evaluator.py:261] Setting fewshot random generator seed to 1234\n",
659
+ "2024-07-04:16:56:20,567 INFO [task.py:411] Building contexts for hellaswag on rank 0...\n",
660
+ "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 10042/10042 [00:02<00:00, 3806.50it/s]\n",
661
+ "2024-07-04:16:56:23,924 INFO [evaluator.py:438] Running loglikelihood requests\n",
662
+ "Running loglikelihood requests: 0%| | 0/40168 [00:00<?, ?it/s]Traceback (most recent call last):\n",
663
  " File \"/opt/conda/bin/lm_eval\", line 8, in <module>\n",
664
  " sys.exit(cli_evaluate())\n",
665
  " File \"/lm-evaluation-harness/lm_eval/__main__.py\", line 375, in cli_evaluate\n",
666
  " results = evaluator.simple_evaluate(\n",
667
  " File \"/lm-evaluation-harness/lm_eval/utils.py\", line 395, in _wrapper\n",
668
  " return fn(*args, **kwargs)\n",
669
+ " File \"/lm-evaluation-harness/lm_eval/evaluator.py\", line 277, in simple_evaluate\n",
670
+ " results = evaluate(\n",
671
+ " File \"/lm-evaluation-harness/lm_eval/utils.py\", line 395, in _wrapper\n",
672
+ " return fn(*args, **kwargs)\n",
673
+ " File \"/lm-evaluation-harness/lm_eval/evaluator.py\", line 449, in evaluate\n",
674
+ " resps = getattr(lm, reqtype)(cloned_reqs)\n",
675
+ " File \"/lm-evaluation-harness/lm_eval/api/model.py\", line 371, in loglikelihood\n",
676
+ " return self._loglikelihood_tokens(new_reqs, disable_tqdm=disable_tqdm)\n",
677
+ " File \"/lm-evaluation-harness/lm_eval/models/huggingface.py\", line 1086, in _loglikelihood_tokens\n",
678
+ " self._model_call(batched_inps, **call_kwargs), dim=-1\n",
679
+ " File \"/lm-evaluation-harness/lm_eval/models/huggingface.py\", line 801, in _model_call\n",
680
+ " return self.model(inps).logits\n",
681
+ " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n",
682
+ " return self._call_impl(*args, **kwargs)\n",
683
+ " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n",
684
+ " return forward_call(*args, **kwargs)\n",
685
+ " File \"/opt/conda/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py\", line 1124, in forward\n",
686
+ " transformer_outputs = self.transformer(\n",
687
+ " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n",
688
+ " return self._call_impl(*args, **kwargs)\n",
689
+ " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n",
690
+ " return forward_call(*args, **kwargs)\n",
691
+ " File \"/opt/conda/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py\", line 950, in forward\n",
692
+ " outputs = block(\n",
693
+ " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n",
694
+ " return self._call_impl(*args, **kwargs)\n",
695
+ " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n",
696
+ " return forward_call(*args, **kwargs)\n",
697
+ " File \"/opt/conda/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py\", line 590, in forward\n",
698
+ " feed_forward_hidden_states = self.mlp(hidden_states)\n",
699
+ " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n",
700
+ " return self._call_impl(*args, **kwargs)\n",
701
+ " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n",
702
+ " return forward_call(*args, **kwargs)\n",
703
+ " File \"/opt/conda/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py\", line 552, in forward\n",
704
+ " hidden_states = self.act(hidden_states)\n",
705
+ " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n",
706
+ " return self._call_impl(*args, **kwargs)\n",
707
+ " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n",
708
+ " return forward_call(*args, **kwargs)\n",
709
+ " File \"/opt/conda/lib/python3.10/site-packages/transformers/activations.py\", line 56, in forward\n",
710
+ " return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))\n",
711
+ "torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 76.00 MiB. GPU 0 has a total capacity of 23.65 GiB of which 27.81 MiB is free. Process 1711577 has 23.62 GiB memory in use. Of the allocated memory 23.07 GiB is allocated by PyTorch, and 103.80 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n",
712
+ "Running loglikelihood requests: 0%| | 0/40168 [00:00<?, ?it/s]\n"
713
  ]
714
  }
715
  ],