{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/kyrylo/Sem-7/Anlp/Grokking/Minimal/lib/python3.8/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n" ] } ], "source": [ "import torch\n", "from transformers import AutoModelForCausalLM, AutoTokenizer\n", "from huggingface_hub import hf_hub_download, upload_folder\n", "from pathlib import Path" ] }, { "cell_type": "code", "execution_count": 32, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'tokenizer.json'" ] }, "execution_count": 32, "metadata": {}, "output_type": "execute_result" } ], "source": [ "\n", "\n", "model_name = \"gpt2\" # Replace with the Hugging Face model name you want to convert\n", "local_dir = \"./\" # to store the GGML model\n", "\n", "model_path = hf_hub_download(repo_id='openai-community/gpt2', filename=\"pytorch_model.bin\", local_dir=local_dir) \n", "model_path = hf_hub_download(repo_id='openai-community/gpt2', filename=\"config.json\", local_dir=local_dir) \n", "model_path = hf_hub_download(repo_id='openai-community/gpt2', filename=\"tokenizer.json\", local_dir=local_dir) \n", "model_path" ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/tmp/ipykernel_96016/408161957.py:1: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", " model =torch.load(f'./pytorch_model.bin')\n" ] } ], "source": [ "model =torch.load(f'./pytorch_model.bin')\n", "torch.save(model, './pytorch_model.bin', _use_new_zipfile_serialization=True)\n", "# ! rm pytorch_model.bin" ] }, { "cell_type": "code", "execution_count": 34, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "INFO:hf-to-gguf:Loading model: \n", "INFO:gguf.gguf_writer:gguf: This GGUF file is for Little Endian only\n", "INFO:hf-to-gguf:Exporting model...\n", "INFO:hf-to-gguf:gguf: loading model part 'pytorch_model.bin'\n", "INFO:hf-to-gguf:token_embd.weight, torch.float32 --> Q8_0, shape = {768, 50257}\n", "INFO:hf-to-gguf:output.weight, torch.float32 --> Q8_0, shape = {768, 50257}\n", "INFO:hf-to-gguf:position_embd.weight, torch.float32 --> F32, shape = {768, 1024}\n", "INFO:hf-to-gguf:blk.0.attn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.0.attn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.0.attn_qkv.weight, torch.float32 --> Q8_0, shape = {768, 2304}\n", "INFO:hf-to-gguf:blk.0.attn_qkv.bias, torch.float32 --> F32, shape = {2304}\n", "INFO:hf-to-gguf:blk.0.attn_output.weight, torch.float32 --> Q8_0, shape = {768, 768}\n", "INFO:hf-to-gguf:blk.0.attn_output.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.0.ffn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.0.ffn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.0.ffn_up.weight, torch.float32 --> Q8_0, shape = {768, 3072}\n", "INFO:hf-to-gguf:blk.0.ffn_up.bias, torch.float32 --> F32, shape = {3072}\n", "INFO:hf-to-gguf:blk.0.ffn_down.weight, torch.float32 --> Q8_0, shape = {3072, 768}\n", "INFO:hf-to-gguf:blk.0.ffn_down.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.1.attn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.1.attn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.1.attn_qkv.weight, torch.float32 --> Q8_0, shape = {768, 2304}\n", "INFO:hf-to-gguf:blk.1.attn_qkv.bias, torch.float32 --> F32, shape = {2304}\n", "INFO:hf-to-gguf:blk.1.attn_output.weight, torch.float32 --> Q8_0, shape = {768, 768}\n", "INFO:hf-to-gguf:blk.1.attn_output.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.1.ffn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.1.ffn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.1.ffn_up.weight, torch.float32 --> Q8_0, shape = {768, 3072}\n", "INFO:hf-to-gguf:blk.1.ffn_up.bias, torch.float32 --> F32, shape = {3072}\n", "INFO:hf-to-gguf:blk.1.ffn_down.weight, torch.float32 --> Q8_0, shape = {3072, 768}\n", "INFO:hf-to-gguf:blk.1.ffn_down.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.2.attn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.2.attn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.2.attn_qkv.weight, torch.float32 --> Q8_0, shape = {768, 2304}\n", "INFO:hf-to-gguf:blk.2.attn_qkv.bias, torch.float32 --> F32, shape = {2304}\n", "INFO:hf-to-gguf:blk.2.attn_output.weight, torch.float32 --> Q8_0, shape = {768, 768}\n", "INFO:hf-to-gguf:blk.2.attn_output.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.2.ffn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.2.ffn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.2.ffn_up.weight, torch.float32 --> Q8_0, shape = {768, 3072}\n", "INFO:hf-to-gguf:blk.2.ffn_up.bias, torch.float32 --> F32, shape = {3072}\n", "INFO:hf-to-gguf:blk.2.ffn_down.weight, torch.float32 --> Q8_0, shape = {3072, 768}\n", "INFO:hf-to-gguf:blk.2.ffn_down.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.3.attn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.3.attn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.3.attn_qkv.weight, torch.float32 --> Q8_0, shape = {768, 2304}\n", "INFO:hf-to-gguf:blk.3.attn_qkv.bias, torch.float32 --> F32, shape = {2304}\n", "INFO:hf-to-gguf:blk.3.attn_output.weight, torch.float32 --> Q8_0, shape = {768, 768}\n", "INFO:hf-to-gguf:blk.3.attn_output.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.3.ffn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.3.ffn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.3.ffn_up.weight, torch.float32 --> Q8_0, shape = {768, 3072}\n", "INFO:hf-to-gguf:blk.3.ffn_up.bias, torch.float32 --> F32, shape = {3072}\n", "INFO:hf-to-gguf:blk.3.ffn_down.weight, torch.float32 --> Q8_0, shape = {3072, 768}\n", "INFO:hf-to-gguf:blk.3.ffn_down.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.4.attn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.4.attn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.4.attn_qkv.weight, torch.float32 --> Q8_0, shape = {768, 2304}\n", "INFO:hf-to-gguf:blk.4.attn_qkv.bias, torch.float32 --> F32, shape = {2304}\n", "INFO:hf-to-gguf:blk.4.attn_output.weight, torch.float32 --> Q8_0, shape = {768, 768}\n", "INFO:hf-to-gguf:blk.4.attn_output.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.4.ffn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.4.ffn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.4.ffn_up.weight, torch.float32 --> Q8_0, shape = {768, 3072}\n", "INFO:hf-to-gguf:blk.4.ffn_up.bias, torch.float32 --> F32, shape = {3072}\n", "INFO:hf-to-gguf:blk.4.ffn_down.weight, torch.float32 --> Q8_0, shape = {3072, 768}\n", "INFO:hf-to-gguf:blk.4.ffn_down.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.5.attn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.5.attn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.5.attn_qkv.weight, torch.float32 --> Q8_0, shape = {768, 2304}\n", "INFO:hf-to-gguf:blk.5.attn_qkv.bias, torch.float32 --> F32, shape = {2304}\n", "INFO:hf-to-gguf:blk.5.attn_output.weight, torch.float32 --> Q8_0, shape = {768, 768}\n", "INFO:hf-to-gguf:blk.5.attn_output.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.5.ffn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.5.ffn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.5.ffn_up.weight, torch.float32 --> Q8_0, shape = {768, 3072}\n", "INFO:hf-to-gguf:blk.5.ffn_up.bias, torch.float32 --> F32, shape = {3072}\n", "INFO:hf-to-gguf:blk.5.ffn_down.weight, torch.float32 --> Q8_0, shape = {3072, 768}\n", "INFO:hf-to-gguf:blk.5.ffn_down.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.6.attn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.6.attn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.6.attn_qkv.weight, torch.float32 --> Q8_0, shape = {768, 2304}\n", "INFO:hf-to-gguf:blk.6.attn_qkv.bias, torch.float32 --> F32, shape = {2304}\n", "INFO:hf-to-gguf:blk.6.attn_output.weight, torch.float32 --> Q8_0, shape = {768, 768}\n", "INFO:hf-to-gguf:blk.6.attn_output.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.6.ffn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.6.ffn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.6.ffn_up.weight, torch.float32 --> Q8_0, shape = {768, 3072}\n", "INFO:hf-to-gguf:blk.6.ffn_up.bias, torch.float32 --> F32, shape = {3072}\n", "INFO:hf-to-gguf:blk.6.ffn_down.weight, torch.float32 --> Q8_0, shape = {3072, 768}\n", "INFO:hf-to-gguf:blk.6.ffn_down.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.7.attn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.7.attn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.7.attn_qkv.weight, torch.float32 --> Q8_0, shape = {768, 2304}\n", "INFO:hf-to-gguf:blk.7.attn_qkv.bias, torch.float32 --> F32, shape = {2304}\n", "INFO:hf-to-gguf:blk.7.attn_output.weight, torch.float32 --> Q8_0, shape = {768, 768}\n", "INFO:hf-to-gguf:blk.7.attn_output.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.7.ffn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.7.ffn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.7.ffn_up.weight, torch.float32 --> Q8_0, shape = {768, 3072}\n", "INFO:hf-to-gguf:blk.7.ffn_up.bias, torch.float32 --> F32, shape = {3072}\n", "INFO:hf-to-gguf:blk.7.ffn_down.weight, torch.float32 --> Q8_0, shape = {3072, 768}\n", "INFO:hf-to-gguf:blk.7.ffn_down.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.8.attn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.8.attn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.8.attn_qkv.weight, torch.float32 --> Q8_0, shape = {768, 2304}\n", "INFO:hf-to-gguf:blk.8.attn_qkv.bias, torch.float32 --> F32, shape = {2304}\n", "INFO:hf-to-gguf:blk.8.attn_output.weight, torch.float32 --> Q8_0, shape = {768, 768}\n", "INFO:hf-to-gguf:blk.8.attn_output.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.8.ffn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.8.ffn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.8.ffn_up.weight, torch.float32 --> Q8_0, shape = {768, 3072}\n", "INFO:hf-to-gguf:blk.8.ffn_up.bias, torch.float32 --> F32, shape = {3072}\n", "INFO:hf-to-gguf:blk.8.ffn_down.weight, torch.float32 --> Q8_0, shape = {3072, 768}\n", "INFO:hf-to-gguf:blk.8.ffn_down.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.9.attn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.9.attn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.9.attn_qkv.weight, torch.float32 --> Q8_0, shape = {768, 2304}\n", "INFO:hf-to-gguf:blk.9.attn_qkv.bias, torch.float32 --> F32, shape = {2304}\n", "INFO:hf-to-gguf:blk.9.attn_output.weight, torch.float32 --> Q8_0, shape = {768, 768}\n", "INFO:hf-to-gguf:blk.9.attn_output.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.9.ffn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.9.ffn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.9.ffn_up.weight, torch.float32 --> Q8_0, shape = {768, 3072}\n", "INFO:hf-to-gguf:blk.9.ffn_up.bias, torch.float32 --> F32, shape = {3072}\n", "INFO:hf-to-gguf:blk.9.ffn_down.weight, torch.float32 --> Q8_0, shape = {3072, 768}\n", "INFO:hf-to-gguf:blk.9.ffn_down.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.10.attn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.10.attn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.10.attn_qkv.weight, torch.float32 --> Q8_0, shape = {768, 2304}\n", "INFO:hf-to-gguf:blk.10.attn_qkv.bias, torch.float32 --> F32, shape = {2304}\n", "INFO:hf-to-gguf:blk.10.attn_output.weight, torch.float32 --> Q8_0, shape = {768, 768}\n", "INFO:hf-to-gguf:blk.10.attn_output.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.10.ffn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.10.ffn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.10.ffn_up.weight, torch.float32 --> Q8_0, shape = {768, 3072}\n", "INFO:hf-to-gguf:blk.10.ffn_up.bias, torch.float32 --> F32, shape = {3072}\n", "INFO:hf-to-gguf:blk.10.ffn_down.weight, torch.float32 --> Q8_0, shape = {3072, 768}\n", "INFO:hf-to-gguf:blk.10.ffn_down.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.11.attn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.11.attn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.11.attn_qkv.weight, torch.float32 --> Q8_0, shape = {768, 2304}\n", "INFO:hf-to-gguf:blk.11.attn_qkv.bias, torch.float32 --> F32, shape = {2304}\n", "INFO:hf-to-gguf:blk.11.attn_output.weight, torch.float32 --> Q8_0, shape = {768, 768}\n", "INFO:hf-to-gguf:blk.11.attn_output.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.11.ffn_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.11.ffn_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:blk.11.ffn_up.weight, torch.float32 --> Q8_0, shape = {768, 3072}\n", "INFO:hf-to-gguf:blk.11.ffn_up.bias, torch.float32 --> F32, shape = {3072}\n", "INFO:hf-to-gguf:blk.11.ffn_down.weight, torch.float32 --> Q8_0, shape = {3072, 768}\n", "INFO:hf-to-gguf:blk.11.ffn_down.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:output_norm.weight, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:output_norm.bias, torch.float32 --> F32, shape = {768}\n", "INFO:hf-to-gguf:Set meta model\n", "INFO:hf-to-gguf:Set model parameters\n", "INFO:hf-to-gguf:Set model tokenizer\n", "DEBUG:hf-to-gguf:chktok: [198, 220, 628, 220, 628, 198, 220, 197, 220, 197, 197, 220, 197, 198, 220, 220, 198, 220, 220, 220, 198, 220, 220, 220, 220, 198, 220, 220, 220, 220, 220, 198, 8582, 248, 222, 357, 11265, 8, 30325, 114, 447, 235, 8582, 234, 104, 37929, 357, 48101, 795, 13210, 271, 1673, 36686, 515, 8, 14519, 227, 12520, 99, 247, 8582, 99, 247, 513, 4747, 23460, 513, 20370, 23460, 2091, 23460, 20370, 23460, 24840, 23460, 2091, 20370, 513, 13, 18, 513, 492, 18, 513, 986, 18, 28053, 252, 222, 157, 252, 114, 157, 252, 241, 157, 253, 233, 157, 252, 237, 157, 253, 224, 157, 252, 244, 157, 252, 115, 157, 252, 253, 157, 253, 223, 157, 252, 253, 157, 252, 95, 157, 252, 114, 157, 252, 227, 47249, 223, 5633, 22755, 239, 46349, 111, 28839, 101, 18040, 32432, 98, 43291, 1485, 1415, 24309, 25465, 171, 121, 252, 40103, 1421, 18604, 12466, 121, 16843, 141, 231, 15166, 12466, 121, 16142, 12466, 239, 141, 232, 30143, 140, 111, 16142, 21169, 21727, 31583, 18849, 705, 39115, 6, 33153, 15506, 63, 15931, 15931, 16317, 13896, 3228, 9805, 3548, 314, 1053, 587, 705, 44040, 339, 338, 612, 11, 705, 2200, 345, 1654, 30, 705, 44, 407, 1654, 314, 1183, 787, 340, 11, 705, 35, 345, 588, 617, 8887, 30, 775, 6, 26979, 257, 6, 75, 43]\n", "DEBUG:hf-to-gguf:chkhsh: 3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454\n", "DEBUG:hf-to-gguf:tokenizer.ggml.pre: 'gpt-2'\n", "DEBUG:hf-to-gguf:chkhsh: 3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454\n", "INFO:gguf.vocab:Adding 50000 merge(s).\n", "INFO:gguf.vocab:Setting special token type bos to 50256\n", "INFO:gguf.vocab:Setting special token type eos to 50256\n", "INFO:hf-to-gguf:Set model quantization version\n", "INFO:gguf.gguf_writer:Writing the following files:\n", "INFO:gguf.gguf_writer:gpt2.ggml: n_tensors = 149, total_size = 175.9M\n", "Writing: 100%|█████████████████████████████| 176M/176M [00:01<00:00, 129Mbyte/s]\n", "INFO:hf-to-gguf:Model successfully exported to gpt2.ggml\n" ] } ], "source": [ "!../llama.cpp/convert_hf_to_gguf.py --outfile {local_dir}/{model_name}.ggml --outtype q8_0 --verbose ./" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from huggingface_hub import create_repo, upload_folder\n", "\n", "\n", "repo_id = \"kyrylokumar/gpt2-quantzed-gguf\" \n", "create_repo(repo_id=repo_id, exist_ok=True) # exist_ok=True avoids errors if the repo already exists\n", "\n", "# Upload the folder\n", "local_dir = \"./\" # Path to the directory you want to upload\n", "upload_folder(\n", " repo_id=repo_id,\n", " folder_path=local_dir,\n", " commit_message=\"Added extra files\", # Optional commit message\n", " ignore_patterns=\".git*\", # Optional: ignore .git files and other patterns\n", ")\n", "\n", "print(f\"Directory '{local_dir}' pushed to: {repo_id}\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Minimal", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" } }, "nbformat": 4, "nbformat_minor": 2 }