{ "_name_or_path": "model/falcon/falcon-rw-1b-bf16-train_batch=4-train=rw_code_falcon_dataset", "alibi": true, "apply_residual_connection_post_layernorm": false, "architectures": [ "FalconForDistill" ], "attention_dropout": 0.0, "auto_map": { "AutoConfig": "tiiuae/falcon-rw-1b--configuration_falcon.FalconConfig", "AutoModel": "tiiuae/falcon-rw-1b--modeling_falcon.FalconModel", "AutoModelForCausalLM": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForCausalLM", "AutoModelForQuestionAnswering": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForQuestionAnswering", "AutoModelForSequenceClassification": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForSequenceClassification", "AutoModelForTokenClassification": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForTokenClassification" }, "bias": true, "bos_token_id": 1, "eos_token_id": null, "hidden_dropout": 0.0, "hidden_size": 2048, "initializer_range": 0.02, "layer_norm_epsilon": 1e-05, "max_position_embeddings": 2048, "model_type": "falcon", "multi_query": false, "new_decoder_architecture": false, "num_attention_heads": 32, "num_hidden_layers": 24, "num_kv_heads": 32, "parallel_attn": false, "rope_scaling": null, "rope_theta": 10000.0, "torch_dtype": "bfloat16", "transformers_version": "4.34.1", "use_cache": true, "vocab_size": 50304 }