modelId
stringlengths
5
122
author
stringlengths
2
42
last_modified
unknown
downloads
int64
0
738M
likes
int64
0
11k
library_name
stringclasses
245 values
tags
sequencelengths
1
4.05k
pipeline_tag
stringclasses
48 values
createdAt
unknown
card
stringlengths
1
901k
ILKT/2024-06-24_00-11-56_epoch_63
ILKT
"2024-06-28T19:41:57Z"
0
0
sentence-transformers
[ "sentence-transformers", "sentence-similarity", "mteb", "feature-extraction", "en", "pl", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-06-28T19:41:55Z"
--- language: - en - pl model-index: - name: PLACEHOLDER results: [] pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - mteb - feature-extraction ---
ILKT/2024-06-24_00-11-56_epoch_64
ILKT
"2024-06-28T19:42:14Z"
0
0
sentence-transformers
[ "sentence-transformers", "sentence-similarity", "mteb", "feature-extraction", "en", "pl", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-06-28T19:42:14Z"
--- language: - en - pl model-index: - name: PLACEHOLDER results: [] pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - mteb - feature-extraction ---
ILKT/2024-06-24_00-11-56_epoch_65
ILKT
"2024-06-28T19:42:32Z"
0
0
sentence-transformers
[ "sentence-transformers", "sentence-similarity", "mteb", "feature-extraction", "en", "pl", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-06-28T19:42:31Z"
--- language: - en - pl model-index: - name: PLACEHOLDER results: [] pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - mteb - feature-extraction ---
ILKT/2024-06-24_00-11-56_epoch_66
ILKT
"2024-06-28T19:42:50Z"
0
0
sentence-transformers
[ "sentence-transformers", "sentence-similarity", "mteb", "feature-extraction", "en", "pl", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-06-28T19:42:49Z"
--- language: - en - pl model-index: - name: PLACEHOLDER results: [] pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - mteb - feature-extraction ---
ILKT/2024-06-24_00-11-56_epoch_67
ILKT
"2024-06-28T19:43:08Z"
0
0
sentence-transformers
[ "sentence-transformers", "sentence-similarity", "mteb", "feature-extraction", "en", "pl", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-06-28T19:43:07Z"
--- language: - en - pl model-index: - name: PLACEHOLDER results: [] pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - mteb - feature-extraction ---
ILKT/2024-06-24_00-11-56_epoch_68
ILKT
"2024-06-28T19:43:26Z"
0
0
sentence-transformers
[ "sentence-transformers", "sentence-similarity", "mteb", "feature-extraction", "en", "pl", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-06-28T19:43:25Z"
--- language: - en - pl model-index: - name: PLACEHOLDER results: [] pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - mteb - feature-extraction ---
ILKT/2024-06-24_00-11-56_epoch_69
ILKT
"2024-06-28T19:43:44Z"
0
0
sentence-transformers
[ "sentence-transformers", "sentence-similarity", "mteb", "feature-extraction", "en", "pl", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-06-28T19:43:43Z"
--- language: - en - pl model-index: - name: PLACEHOLDER results: [] pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - mteb - feature-extraction ---
ILKT/2024-06-24_00-11-56_epoch_70
ILKT
"2024-06-28T19:44:01Z"
0
0
sentence-transformers
[ "sentence-transformers", "sentence-similarity", "mteb", "feature-extraction", "en", "pl", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-06-28T19:44:00Z"
--- language: - en - pl model-index: - name: PLACEHOLDER results: [] pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - mteb - feature-extraction ---
ILKT/2024-06-24_00-11-56_epoch_71
ILKT
"2024-06-28T19:44:19Z"
0
0
sentence-transformers
[ "sentence-transformers", "sentence-similarity", "mteb", "feature-extraction", "en", "pl", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-06-28T19:44:18Z"
--- language: - en - pl model-index: - name: PLACEHOLDER results: [] pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - mteb - feature-extraction ---
habulaj/1543115210
habulaj
"2024-06-28T19:44:32Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T19:44:25Z"
Entry not found
ILKT/2024-06-24_00-11-56_epoch_72
ILKT
"2024-06-28T19:44:37Z"
0
0
sentence-transformers
[ "sentence-transformers", "sentence-similarity", "mteb", "feature-extraction", "en", "pl", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-06-28T19:44:36Z"
--- language: - en - pl model-index: - name: PLACEHOLDER results: [] pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - mteb - feature-extraction ---
ILKT/2024-06-24_00-11-56_epoch_73
ILKT
"2024-06-28T19:44:54Z"
0
0
sentence-transformers
[ "sentence-transformers", "sentence-similarity", "mteb", "feature-extraction", "en", "pl", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-06-28T19:44:54Z"
--- language: - en - pl model-index: - name: PLACEHOLDER results: [] pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - mteb - feature-extraction ---
ILKT/2024-06-24_00-11-56_epoch_74
ILKT
"2024-06-28T19:45:12Z"
0
0
sentence-transformers
[ "sentence-transformers", "sentence-similarity", "mteb", "feature-extraction", "en", "pl", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-06-28T19:45:12Z"
--- language: - en - pl model-index: - name: PLACEHOLDER results: [] pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - mteb - feature-extraction ---
ILKT/2024-06-24_00-11-56_epoch_75
ILKT
"2024-06-28T19:45:31Z"
0
0
sentence-transformers
[ "sentence-transformers", "sentence-similarity", "mteb", "feature-extraction", "en", "pl", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-06-28T19:45:30Z"
--- language: - en - pl model-index: - name: PLACEHOLDER results: [] pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - mteb - feature-extraction ---
Hoodg/Bonbong_2
Hoodg
"2024-06-28T19:48:20Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T19:48:20Z"
Entry not found
TheFinAI/finllm-8B-closed-raw
TheFinAI
"2024-06-28T19:48:47Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T19:48:47Z"
Entry not found
vitoriapope/Srgarrison
vitoriapope
"2024-06-28T19:52:22Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T19:50:06Z"
Entry not found
Cringe1324/Testing
Cringe1324
"2024-06-28T19:52:17Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T19:52:17Z"
Entry not found
bdsaglam/llama-3-8b-jerx-rltf-peft-na1862fl
bdsaglam
"2024-06-28T19:55:15Z"
0
0
transformers
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
"2024-06-28T19:55:03Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
sert121/defog-sqlcoder3-dpo
sert121
"2024-06-28T19:55:44Z"
0
0
transformers
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
"2024-06-28T19:55:34Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Imohsinali/code-search-net-tokenizer
Imohsinali
"2024-06-28T19:55:56Z"
0
0
transformers
[ "transformers", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
"2024-06-28T19:55:53Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
mikmak2/matmikmiak
mikmak2
"2024-06-28T19:59:26Z"
0
0
null
[ "license:apache-2.0", "region:us" ]
null
"2024-06-28T19:59:26Z"
--- license: apache-2.0 ---
LowFace/test
LowFace
"2024-06-30T15:57:31Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:03:32Z"
Hello
RichardErkhov/Steelskull_-_Etheria-55b-v0.1-gguf
RichardErkhov
"2024-06-28T20:04:59Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:04:59Z"
Entry not found
lashao/miewid-msv2-v3-imagenet
lashao
"2024-06-28T20:08:01Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:08:01Z"
Entry not found
habulaj/6068245732
habulaj
"2024-06-28T20:15:21Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:15:13Z"
Entry not found
habulaj/391516357306
habulaj
"2024-06-28T20:16:13Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:15:52Z"
Entry not found
habulaj/202285175643
habulaj
"2024-06-28T20:17:37Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:17:35Z"
Entry not found
habulaj/150140127330
habulaj
"2024-06-28T20:20:34Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:20:28Z"
Entry not found
hannesthu/make_one_hot_first
hannesthu
"2024-06-28T20:27:08Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:23:12Z"
# hannesthu/make_one_hot_first This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_absolute
hannesthu
"2024-06-28T20:27:10Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:23:16Z"
# hannesthu/make_absolute This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_swap_first_last
hannesthu
"2024-06-28T20:27:12Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:23:19Z"
# hannesthu/make_swap_first_last This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_swap_min_max
hannesthu
"2024-06-28T20:27:14Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:23:23Z"
# hannesthu/make_swap_min_max This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_rank
hannesthu
"2024-06-28T20:27:18Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:23:27Z"
# hannesthu/make_rank This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_hyperbolic_cosine
hannesthu
"2024-06-28T20:27:20Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:23:30Z"
# hannesthu/make_hyperbolic_cosine This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_index_parity
hannesthu
"2024-06-28T20:27:23Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:23:34Z"
# hannesthu/make_index_parity This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_reverse
hannesthu
"2024-06-28T20:27:25Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:23:37Z"
# hannesthu/make_reverse This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_sort_freq
hannesthu
"2024-06-28T20:27:28Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:23:40Z"
# hannesthu/make_sort_freq This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_check_square
hannesthu
"2024-06-28T20:27:30Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:23:44Z"
# hannesthu/make_check_square This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_polynomial
hannesthu
"2024-06-28T20:27:32Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:23:47Z"
# hannesthu/make_polynomial This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_element_divide
hannesthu
"2024-06-28T20:27:34Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:23:51Z"
# hannesthu/make_element_divide This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_count_prime_factors
hannesthu
"2024-06-28T20:27:36Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:23:54Z"
# hannesthu/make_count_prime_factors This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_factorial
hannesthu
"2024-06-28T20:27:38Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:23:57Z"
# hannesthu/make_factorial This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_element_second
hannesthu
"2024-06-28T20:27:40Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:24:01Z"
# hannesthu/make_element_second This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_next_prime
hannesthu
"2024-06-28T20:27:42Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:24:04Z"
# hannesthu/make_next_prime This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_triple
hannesthu
"2024-06-28T20:27:44Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:24:07Z"
# hannesthu/make_triple This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_hyperbolic_tangent
hannesthu
"2024-06-28T20:27:46Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:24:11Z"
# hannesthu/make_hyperbolic_tangent This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_swap_consecutive
hannesthu
"2024-06-28T20:27:48Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:24:14Z"
# hannesthu/make_swap_consecutive This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
habulaj/541179516695
habulaj
"2024-06-28T20:24:28Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:24:24Z"
Entry not found
habulaj/6340247536
habulaj
"2024-06-28T20:26:54Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:26:50Z"
Entry not found
hannesthu/make_cube_each_element
hannesthu
"2024-06-28T20:27:52Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:27:49Z"
# hannesthu/make_cube_each_element This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_halve_second_half
hannesthu
"2024-06-28T20:27:55Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:27:52Z"
# hannesthu/make_halve_second_half This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_invert_if_sorted
hannesthu
"2024-06-28T20:27:59Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:27:56Z"
# hannesthu/make_invert_if_sorted This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_reflect
hannesthu
"2024-06-28T20:28:03Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:28:00Z"
# hannesthu/make_reflect This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_check_multiple_of_n
hannesthu
"2024-06-28T20:28:07Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:28:03Z"
# hannesthu/make_check_multiple_of_n This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_flip_halves
hannesthu
"2024-06-28T20:28:10Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:28:07Z"
# hannesthu/make_flip_halves This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_set_to_index
hannesthu
"2024-06-28T20:28:13Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:28:10Z"
# hannesthu/make_set_to_index This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_check_last_two_equal
hannesthu
"2024-06-28T20:28:17Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:28:14Z"
# hannesthu/make_check_last_two_equal This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_sign
hannesthu
"2024-06-28T20:28:20Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:28:18Z"
# hannesthu/make_sign This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_count
hannesthu
"2024-06-28T20:28:24Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:28:21Z"
# hannesthu/make_count This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_compute_median
hannesthu
"2024-06-28T20:28:28Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:28:24Z"
# hannesthu/make_compute_median This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_zero_if_less_than_previous
hannesthu
"2024-06-28T20:28:33Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:28:29Z"
# hannesthu/make_zero_if_less_than_previous This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_remove_duplicates
hannesthu
"2024-06-28T20:28:40Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:28:34Z"
# hannesthu/make_remove_duplicates This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_cube_root
hannesthu
"2024-06-28T20:28:44Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:28:41Z"
# hannesthu/make_cube_root This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_sum_of_last_two
hannesthu
"2024-06-28T20:28:50Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:28:45Z"
# hannesthu/make_sum_of_last_two This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_sort_unique
hannesthu
"2024-06-28T20:28:59Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:28:51Z"
# hannesthu/make_sort_unique This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_arccosine
hannesthu
"2024-06-28T20:29:02Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:28:59Z"
# hannesthu/make_arccosine This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_swap_odd_index
hannesthu
"2024-06-28T20:29:06Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:03Z"
# hannesthu/make_swap_odd_index This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_rescale
hannesthu
"2024-06-28T20:29:10Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:06Z"
# hannesthu/make_rescale This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_check_multiple_of_first
hannesthu
"2024-06-28T20:29:13Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:10Z"
# hannesthu/make_check_multiple_of_first This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_tangent
hannesthu
"2024-06-28T20:29:16Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:14Z"
# hannesthu/make_tangent This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_check_prime
hannesthu
"2024-06-28T20:29:20Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:17Z"
# hannesthu/make_check_prime This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_increment_by_index
hannesthu
"2024-06-28T20:29:24Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:21Z"
# hannesthu/make_increment_by_index This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_min_element
hannesthu
"2024-06-28T20:29:27Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:24Z"
# hannesthu/make_min_element This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_wrap
hannesthu
"2024-06-28T20:29:31Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:27Z"
# hannesthu/make_wrap This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_sum_digits
hannesthu
"2024-06-28T20:29:34Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:31Z"
# hannesthu/make_sum_digits This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_replace_small_tokens
hannesthu
"2024-06-28T20:29:37Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:35Z"
# hannesthu/make_replace_small_tokens This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_rescale_by_max
hannesthu
"2024-06-28T20:29:41Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:38Z"
# hannesthu/make_rescale_by_max This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_difference_to_next
hannesthu
"2024-06-28T20:29:45Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:42Z"
# hannesthu/make_difference_to_next This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_count_occurrences
hannesthu
"2024-06-28T20:29:48Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:45Z"
# hannesthu/make_count_occurrences This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_check_alternating
hannesthu
"2024-06-28T20:29:52Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:49Z"
# hannesthu/make_check_alternating This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_exponential
hannesthu
"2024-06-28T20:29:55Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:52Z"
# hannesthu/make_exponential This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_check_fibonacci
hannesthu
"2024-06-28T20:29:59Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:56Z"
# hannesthu/make_check_fibonacci This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_hist
hannesthu
"2024-06-28T20:30:02Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:29:59Z"
# hannesthu/make_hist This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_square_root
hannesthu
"2024-06-28T20:30:06Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:03Z"
# hannesthu/make_square_root This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_test_at_least_two_equal
hannesthu
"2024-06-28T20:30:09Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:06Z"
# hannesthu/make_test_at_least_two_equal This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_check_descending
hannesthu
"2024-06-28T20:30:12Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:09Z"
# hannesthu/make_check_descending This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_negation
hannesthu
"2024-06-28T20:30:15Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:13Z"
# hannesthu/make_negation This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_round
hannesthu
"2024-06-28T20:30:19Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:16Z"
# hannesthu/make_round This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_count_less_freq
hannesthu
"2024-06-28T20:30:22Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:19Z"
# hannesthu/make_count_less_freq This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_length
hannesthu
"2024-06-28T20:30:25Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:22Z"
# hannesthu/make_length This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_one_hot_decode
hannesthu
"2024-06-28T20:30:28Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:26Z"
# hannesthu/make_one_hot_decode This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_identity
hannesthu
"2024-06-28T20:30:31Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:29Z"
# hannesthu/make_identity This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
jamking/dqn-SpaceInvadersNoFrameskip-v4
jamking
"2024-06-28T20:30:29Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:29Z"
Entry not found
hannesthu/make_check_divisibility
hannesthu
"2024-06-28T20:30:35Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:32Z"
# hannesthu/make_check_divisibility This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_logarithm
hannesthu
"2024-06-28T20:30:38Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:35Z"
# hannesthu/make_logarithm This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_double_first_half
hannesthu
"2024-06-28T20:30:41Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:38Z"
# hannesthu/make_double_first_half This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_check_palindrome
hannesthu
"2024-06-28T20:30:45Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:42Z"
# hannesthu/make_check_palindrome This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_arcsine
hannesthu
"2024-06-28T20:30:48Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:45Z"
# hannesthu/make_arcsine This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder
hannesthu/make_count_less_than
hannesthu
"2024-06-28T20:30:51Z"
0
0
null
[ "region:us" ]
null
"2024-06-28T20:30:48Z"
# hannesthu/make_count_less_than This is a custom model created using TransformerLens. Files: - tl_model.pt: The PyTorch model file - input&output_encoder.pkl: The input and output encoder