modelId
stringlengths
4
81
tags
sequence
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
unknown
card
stringlengths
51
438k
Bharathdamu/wav2vec2-large-xls-r-300m-hindi-colab
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-300m-hindi-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-hindi-colab This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu111 - Datasets 1.13.3 - Tokenizers 0.10.3
Bharathdamu/wav2vec2-large-xls-r-300m-hindi
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-300m-hindi results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-hindi This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu111 - Datasets 1.13.3 - Tokenizers 0.10.3
Bhuvana/t5-base-spellchecker
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
93
null
--- widget: - text: "christmas is celbrated on decembr 25 evry ear" --- # Spell checker using T5 base transformer A simple spell checker built using T5-Base transformer. To use this model ``` from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("Bhuvana/t5-base-spellchecker") model = AutoModelForSeq2SeqLM.from_pretrained("Bhuvana/t5-base-spellchecker") def correct(inputs): input_ids = tokenizer.encode(inputs,return_tensors='pt') sample_output = model.generate( input_ids, do_sample=True, max_length=50, top_p=0.99, num_return_sequences=1 ) res = tokenizer.decode(sample_output[0], skip_special_tokens=True) return res text = "christmas is celbrated on decembr 25 evry ear" print(correct(text)) ``` This should print the corrected statement ``` christmas is celebrated on december 25 every year ``` You can also type the text under the Hosted inference API and get predictions online.
BigSalmon/GPTNeo350MInformalToFormalLincoln2
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
Trained on this model: https://huggingface.co/xhyi/PT_GPTNEO350_ATG/tree/main ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` - declining viewership facing the nba. - does not have to be this way. - in fact, many solutions exist. - the four point line would surely draw in eyes. Text: failing to draw in the masses, the NBA has fallen into disrepair. such does not have to be the case, however. in fact, a myriad of simple, relatively cheap solutions could revive the league. the addition of the much-hyped four-point line would surely juice viewership. *** - ```
BigSalmon/GPTNeo350MInformalToFormalLincoln3
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
Trained on this model: https://huggingface.co/xhyi/PT_GPTNEO350_ATG/tree/main ``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln3") model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln3") ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` - declining viewership facing the nba. - does not have to be this way. - in fact, many solutions exist. - the four point line would surely draw in eyes. Text: failing to draw in the masses, the NBA has fallen into disrepair. such does not have to be the case, however. in fact, a myriad of simple, relatively cheap solutions could revive the league. the addition of the much-hyped four-point line would surely juice viewership. *** - ``` ``` infill: chrome extensions [MASK] accomplish everyday tasks. Translated into the Style of Abraham Lincoln: chrome extensions ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks. infill: at a time when nintendo has become inflexible, [MASK] consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. Translated into the Style of Abraham Lincoln: at a time when nintendo has become inflexible, ( stubbornly [MASK] on / firmly set on / unyielding in its insistence on ) consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. infill: ``` ``` Essay Intro (California High-Speed Rail): built with an eye on the future, california's high-speed rail service resolves to change the face of travel. Essay Intro (YIMBY's Need To Win): home to the most expensive housing market in the united states, san francisco is the city in which the yimby and anti-yimby hordes wage an eternal battle. Essay Intro ( ```
BigSalmon/GPTNeo350MInformalToFormalLincoln4
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
Trained on this model: https://huggingface.co/xhyi/PT_GPTNEO350_ATG/tree/main ``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln3") model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln3") ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` - declining viewership facing the nba. - does not have to be this way. - in fact, many solutions exist. - the four point line would surely draw in eyes. Text: failing to draw in the masses, the NBA has fallen into disrepair. such does not have to be the case, however. in fact, a myriad of simple, relatively cheap solutions could revive the league. the addition of the much-hyped four-point line would surely juice viewership. *** - ``` ``` infill: chrome extensions [MASK] accomplish everyday tasks. Translated into the Style of Abraham Lincoln: chrome extensions ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks. infill: at a time when nintendo has become inflexible, [MASK] consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. Translated into the Style of Abraham Lincoln: at a time when nintendo has become inflexible, ( stubbornly [MASK] on / firmly set on / unyielding in its insistence on ) consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. infill: ``` ``` Essay Intro (California High-Speed Rail): built with an eye on the future, california's high-speed rail service resolves to change the face of travel. Essay Intro (YIMBY's Need To Win): home to the most expensive housing market in the united states, san francisco is the city in which the yimby and anti-yimby hordes wage an eternal battle. Essay Intro ( ```
BigSalmon/GPTNeo350MInformalToFormalLincoln5
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
Trained on this model: https://huggingface.co/xhyi/PT_GPTNEO350_ATG/tree/main ``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln3") model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln3") ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` - declining viewership facing the nba. - does not have to be this way. - in fact, many solutions exist. - the four point line would surely draw in eyes. Text: failing to draw in the masses, the NBA has fallen into disrepair. such does not have to be the case, however. in fact, a myriad of simple, relatively cheap solutions could revive the league. the addition of the much-hyped four-point line would surely juice viewership. *** - ``` ``` infill: chrome extensions [MASK] accomplish everyday tasks. Translated into the Style of Abraham Lincoln: chrome extensions ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks. infill: at a time when nintendo has become inflexible, [MASK] consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. Translated into the Style of Abraham Lincoln: at a time when nintendo has become inflexible, ( stubbornly [MASK] on / firmly set on / unyielding in its insistence on ) consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. infill: ``` ``` Essay Intro (California High-Speed Rail): built with an eye on the future, california's high-speed rail service resolves to change the face of travel. Essay Intro (YIMBY's Need To Win): home to the most expensive housing market in the united states, san francisco is the city in which the yimby and anti-yimby hordes wage an eternal battle. Essay Intro ( ``` ``` Search: What is the definition of Checks and Balances? https://en.wikipedia.org/wiki/Checks_and_balances Checks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate. https://www.harvard.edu/glossary/Checks_and_Balances Checks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power https://www.law.cornell.edu/library/constitution/Checks_and_Balances Checks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power. *** Search: What is the definition of Separation of Powers? https://en.wikipedia.org/wiki/Separation_of_powers The separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power. https://www.yale.edu/tcf/Separation_of_Powers.html Separation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined. *** Search: What is the definition of Connection of Powers? https://en.wikipedia.org/wiki/Connection_of_powers Connection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches. https://simple.wikipedia.org/wiki/Connection_of_powers The term Connection of Powers describes a system of government in which there is overlap between different parts of the government. *** Search: What is the definition of ``` ``` Search: What are phrase synonyms for "second-guess"? https://www.powerthesaurus.org/second-guess/synonyms Shortest to Longest: - feel dubious about - raise an eyebrow at - wrinkle their noses at - cast a jaundiced eye at - teeter on the fence about *** Search: What are phrase synonyms for "mean to newbies"? https://www.powerthesaurus.org/mean_to_newbies/synonyms Shortest to Longest: - readiness to balk at rookies - absence of tolerance for novices - hostile attitude toward newcomers *** Search: What are phrase synonyms for "make use of"? https://www.powerthesaurus.org/make_use_of/synonyms Shortest to Longest: - call upon - glean value from - reap benefits from - derive utility from - seize on the merits of - draw on the strength of - tap into the potential of *** Search: What are phrase synonyms for "hurting itself"? https://www.powerthesaurus.org/hurting_itself/synonyms Shortest to Longest: - erring - slighting itself - forfeiting its integrity - doing itself a disservice - evincing a lack of backbone *** Search: What are phrase synonyms for " ```
BigSalmon/InformalToFormalLincoln20
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
Informal to Formal: Wordy to Concise: Fill Missing Phrase: ``` from transformers import AutoTokenizer, AutoModelWithLMHead tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln20") model = AutoModelWithLMHead.from_pretrained("BigSalmon/InformalToFormalLincoln20") ``` ``` https://huggingface.co/spaces/BigSalmon/GPT2 (The model for this space changes over time) ``` ``` https://huggingface.co/spaces/BigSalmon/GPT2_Most_Probable (The model for this space changes over time) ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ```` ``` infill: increasing the number of sidewalks in suburban areas will [MASK]. Translated into the Style of Abraham Lincoln: increasing the number of sidewalks in suburban areas will ( ( enhance / maximize ) community cohesion / facilitate ( communal ties / the formation of neighborhood camaraderie ) / forge neighborly relations / lend themselves to the advancement of neighborly ties / plant the seeds of community building / flower anew the bonds of friendship / invite the budding of neighborhood rapport / enrich neighborhood life ). infill: corn fields [MASK], [MASK] visibly as one ventures beyond chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), ( manifesting themselves ) visibly as one ventures beyond chicago. infill: the [MASK] the SAT will soon be [MASK]. [MASK] an examination undertaken on one's laptop. [MASK] will allow students to retrieve test results promptly. Translated into the Style of Abraham Lincoln: the ( conventional form of ) the SAT will soon be ( consigned to history ). ( replacing it will be ) an examination undertaken on one's laptop. ( so doing ) will allow students to retrieve test results promptly. infill: ``` ``` *** wordy: chancing upon a linux user is a rare occurrence in the present day. Translate into Concise Text: present-day linux users are rare. *** wordy: an interest in classical music is becoming more and more less popular. Translate into Concise Text: classical music appreciation is dwindling. Translate into Concise Text: waning interest in classic music persists. Translate into Concise Text: interest in classic music is fading. *** wordy: the ice cream was only one dollar, but it was not a good value for the size. Translate into Concise Text: the one dollar ice cream was overpriced for its size. Translate into Concise Text: overpriced, the one dollar ice cream was small. *** wordy: ```
BigSalmon/InformalToFormalLincoln21
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
Informal to Formal: Wordy to Concise: Fill Missing Phrase: ``` from transformers import AutoTokenizer, AutoModelWithLMHead tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln21") model = AutoModelWithLMHead.from_pretrained("BigSalmon/InformalToFormalLincoln21") ``` ``` https://huggingface.co/spaces/BigSalmon/GPT2 (The model for this space changes over time) ``` ``` https://huggingface.co/spaces/BigSalmon/GPT2_Most_Probable (The model for this space changes over time) ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ```` ``` infill: increasing the number of sidewalks in suburban areas will [MASK]. Translated into the Style of Abraham Lincoln: increasing the number of sidewalks in suburban areas will ( ( enhance / maximize ) community cohesion / facilitate ( communal ties / the formation of neighborhood camaraderie ) / forge neighborly relations / lend themselves to the advancement of neighborly ties / plant the seeds of community building / flower anew the bonds of friendship / invite the budding of neighborhood rapport / enrich neighborhood life ). infill: corn fields [MASK], [MASK] visibly as one ventures beyond chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), ( manifesting themselves ) visibly as one ventures beyond chicago. infill: the [MASK] the SAT will soon be [MASK]. [MASK] an examination undertaken on one's laptop. [MASK] will allow students to retrieve test results promptly. Translated into the Style of Abraham Lincoln: the ( conventional form of ) the SAT will soon be ( consigned to history ). ( replacing it will be ) an examination undertaken on one's laptop. ( so doing ) will allow students to retrieve test results promptly. infill: ``` ``` *** wordy: chancing upon a linux user is a rare occurrence in the present day. Translate into Concise Text: present-day linux users are rare. *** wordy: an interest in classical music is becoming more and more less popular. Translate into Concise Text: classical music appreciation is dwindling. Translate into Concise Text: waning interest in classic music persists. Translate into Concise Text: interest in classic music is fading. *** wordy: the ice cream was only one dollar, but it was not a good value for the size. Translate into Concise Text: the one dollar ice cream was overpriced for its size. Translate into Concise Text: overpriced, the one dollar ice cream was small. *** wordy: ```
BigSalmon/InformalToFormalLincoln22
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln22") model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln22") ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` - declining viewership facing the nba. - does not have to be this way. - in fact, many solutions exist. - the four point line would surely draw in eyes. Text: failing to draw in the masses, the NBA has fallen into disrepair. such does not have to be the case, however. in fact, a myriad of simple, relatively cheap solutions could revive the league. the addition of the much-hyped four-point line would surely juice viewership. *** - ``` ``` infill: chrome extensions [MASK] accomplish everyday tasks. Translated into the Style of Abraham Lincoln: chrome extensions ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks. infill: at a time when nintendo has become inflexible, [MASK] consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. Translated into the Style of Abraham Lincoln: at a time when nintendo has become inflexible, ( stubbornly [MASK] on / firmly set on / unyielding in its insistence on ) consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. infill: ``` ``` Essay Intro (California High-Speed Rail): built with an eye on the future, california's high-speed rail service resolves to change the face of travel. Essay Intro (YIMBY's Need To Win): home to the most expensive housing market in the united states, san francisco is the city in which the yimby and anti-yimby hordes wage an eternal battle. Essay Intro ( ``` ``` Search: What is the definition of Checks and Balances? https://en.wikipedia.org/wiki/Checks_and_balances Checks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate. https://www.harvard.edu/glossary/Checks_and_Balances Checks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power https://www.law.cornell.edu/library/constitution/Checks_and_Balances Checks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power. *** Search: What is the definition of Separation of Powers? https://en.wikipedia.org/wiki/Separation_of_powers The separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power. https://www.yale.edu/tcf/Separation_of_Powers.html Separation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined. *** Search: What is the definition of Connection of Powers? https://en.wikipedia.org/wiki/Connection_of_powers Connection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches. https://simple.wikipedia.org/wiki/Connection_of_powers The term Connection of Powers describes a system of government in which there is overlap between different parts of the government. *** Search: What is the definition of ``` ``` Search: What are phrase synonyms for "second-guess"? https://www.powerthesaurus.org/second-guess/synonyms Shortest to Longest: - feel dubious about - raise an eyebrow at - wrinkle their noses at - cast a jaundiced eye at - teeter on the fence about *** Search: What are phrase synonyms for "mean to newbies"? https://www.powerthesaurus.org/mean_to_newbies/synonyms Shortest to Longest: - readiness to balk at rookies - absence of tolerance for novices - hostile attitude toward newcomers *** Search: What are phrase synonyms for "make use of"? https://www.powerthesaurus.org/make_use_of/synonyms Shortest to Longest: - call upon - glean value from - reap benefits from - derive utility from - seize on the merits of - draw on the strength of - tap into the potential of *** Search: What are phrase synonyms for "hurting itself"? https://www.powerthesaurus.org/hurting_itself/synonyms Shortest to Longest: - erring - slighting itself - forfeiting its integrity - doing itself a disservice - evincing a lack of backbone *** Search: What are phrase synonyms for " ```
BigSalmon/InformalToFormalLincoln24
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln24") model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln24") ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` - declining viewership facing the nba. - does not have to be this way. - in fact, many solutions exist. - the four point line would surely draw in eyes. Text: failing to draw in the masses, the NBA has fallen into disrepair. such does not have to be the case, however. in fact, a myriad of simple, relatively cheap solutions could revive the league. the addition of the much-hyped four-point line would surely juice viewership. *** - ``` ``` infill: chrome extensions [MASK] accomplish everyday tasks. Translated into the Style of Abraham Lincoln: chrome extensions ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks. infill: at a time when nintendo has become inflexible, [MASK] consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. Translated into the Style of Abraham Lincoln: at a time when nintendo has become inflexible, ( stubbornly [MASK] on / firmly set on / unyielding in its insistence on ) consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. infill: ``` ``` Essay Intro (California High-Speed Rail): built with an eye on the future, california's high-speed rail service resolves to change the face of travel. Essay Intro (YIMBY's Need To Win): home to the most expensive housing market in the united states, san francisco is the city in which the yimby and anti-yimby hordes wage an eternal battle. Essay Intro ( ``` ``` Search: What is the definition of Checks and Balances? https://en.wikipedia.org/wiki/Checks_and_balances Checks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate. https://www.harvard.edu/glossary/Checks_and_Balances Checks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power https://www.law.cornell.edu/library/constitution/Checks_and_Balances Checks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power. *** Search: What is the definition of Separation of Powers? https://en.wikipedia.org/wiki/Separation_of_powers The separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power. https://www.yale.edu/tcf/Separation_of_Powers.html Separation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined. *** Search: What is the definition of Connection of Powers? https://en.wikipedia.org/wiki/Connection_of_powers Connection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches. https://simple.wikipedia.org/wiki/Connection_of_powers The term Connection of Powers describes a system of government in which there is overlap between different parts of the government. *** Search: What is the definition of ``` ``` Search: What are phrase synonyms for "second-guess"? https://www.powerthesaurus.org/second-guess/synonyms Shortest to Longest: - feel dubious about - raise an eyebrow at - wrinkle their noses at - cast a jaundiced eye at - teeter on the fence about *** Search: What are phrase synonyms for "mean to newbies"? https://www.powerthesaurus.org/mean_to_newbies/synonyms Shortest to Longest: - readiness to balk at rookies - absence of tolerance for novices - hostile attitude toward newcomers *** Search: What are phrase synonyms for "make use of"? https://www.powerthesaurus.org/make_use_of/synonyms Shortest to Longest: - call upon - glean value from - reap benefits from - derive utility from - seize on the merits of - draw on the strength of - tap into the potential of *** Search: What are phrase synonyms for "hurting itself"? https://www.powerthesaurus.org/hurting_itself/synonyms Shortest to Longest: - erring - slighting itself - forfeiting its integrity - doing itself a disservice - evincing a lack of backbone *** Search: What are phrase synonyms for " ```
BigSalmon/InformalToFormalLincoln25
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln25") model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln25") ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` - declining viewership facing the nba. - does not have to be this way. - in fact, many solutions exist. - the four point line would surely draw in eyes. Text: failing to draw in the masses, the NBA has fallen into disrepair. such does not have to be the case, however. in fact, a myriad of simple, relatively cheap solutions could revive the league. the addition of the much-hyped four-point line would surely juice viewership. *** - ``` ``` infill: chrome extensions [MASK] accomplish everyday tasks. Translated into the Style of Abraham Lincoln: chrome extensions ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks. infill: at a time when nintendo has become inflexible, [MASK] consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. Translated into the Style of Abraham Lincoln: at a time when nintendo has become inflexible, ( stubbornly [MASK] on / firmly set on / unyielding in its insistence on ) consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. infill: ``` ``` Essay Intro (California High-Speed Rail): built with an eye on the future, california's high-speed rail service resolves to change the face of travel. Essay Intro (YIMBY's Need To Win): home to the most expensive housing market in the united states, san francisco is the city in which the yimby and anti-yimby hordes wage an eternal battle. Essay Intro ( ``` ``` Search: What is the definition of Checks and Balances? https://en.wikipedia.org/wiki/Checks_and_balances Checks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate. https://www.harvard.edu/glossary/Checks_and_Balances Checks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power https://www.law.cornell.edu/library/constitution/Checks_and_Balances Checks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power. *** Search: What is the definition of Separation of Powers? https://en.wikipedia.org/wiki/Separation_of_powers The separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power. https://www.yale.edu/tcf/Separation_of_Powers.html Separation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined. *** Search: What is the definition of Connection of Powers? https://en.wikipedia.org/wiki/Connection_of_powers Connection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches. https://simple.wikipedia.org/wiki/Connection_of_powers The term Connection of Powers describes a system of government in which there is overlap between different parts of the government. *** Search: What is the definition of ``` ``` Search: What are phrase synonyms for "second-guess"? https://www.powerthesaurus.org/second-guess/synonyms Shortest to Longest: - feel dubious about - raise an eyebrow at - wrinkle their noses at - cast a jaundiced eye at - teeter on the fence about *** Search: What are phrase synonyms for "mean to newbies"? https://www.powerthesaurus.org/mean_to_newbies/synonyms Shortest to Longest: - readiness to balk at rookies - absence of tolerance for novices - hostile attitude toward newcomers *** Search: What are phrase synonyms for "make use of"? https://www.powerthesaurus.org/make_use_of/synonyms Shortest to Longest: - call upon - glean value from - reap benefits from - derive utility from - seize on the merits of - draw on the strength of - tap into the potential of *** Search: What are phrase synonyms for "hurting itself"? https://www.powerthesaurus.org/hurting_itself/synonyms Shortest to Longest: - erring - slighting itself - forfeiting its integrity - doing itself a disservice - evincing a lack of backbone *** Search: What are phrase synonyms for " ``` ``` original: sports teams are profitable for owners. [MASK], their valuations experience a dramatic uptick. infill: sports teams are profitable for owners. ( accumulating vast sums / stockpiling treasure / realizing benefits / cashing in / registering robust financials / scoring on balance sheets ), their valuations experience a dramatic uptick. *** original: ```
BigSalmon/MrLincoln11
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
Informal to Formal: ``` from transformers import AutoTokenizer, AutoModelWithLMHead tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelWithLMHead.from_pretrained("BigSalmon/MrLincoln11") ``` ``` How To Make Prompt: Original: freedom of the press is a check against political corruption. Edited: fundamental to the spirit of democracy, freedom of the press is a check against political corruption. Edited 2: ever at odds with tyranny, freedom of the press is a check against political corruption. Edited 3: never to be neglected, freedom of the press is a check against political corruption. Original: solar is a beacon of achievement. Edited: central to decoupling from the perils of unsustainable energy, solar is a beacon of achievement. Edited 2: key to a future beyond fossil fuels, solar is a beacon of achievement. Original: milan is nevertheless ambivalent towards his costly terms. Edited: keen on contracting him, milan is nevertheless ambivalent towards his costly terms. Edited 2: intent on securing his services, milan is nevertheless ambivalent towards his costly terms. Original: ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. informal english: meteors are much harder to see, because they are only there for a fraction of a second. Translated into the Style of Abraham Lincoln: meteors are not ( easily / readily ) detectable, lasting for mere fractions of a second. informal english: ````
BigSalmon/MrLincoln12
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
Informal to Formal: ``` from transformers import AutoTokenizer, AutoModelWithLMHead tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelWithLMHead.from_pretrained("BigSalmon/MrLincoln12") ``` ``` https://huggingface.co/spaces/BigSalmon/InformalToFormal ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. informal english: meteors are much harder to see, because they are only there for a fraction of a second. Translated into the Style of Abraham Lincoln: meteors are not ( easily / readily ) detectable, lasting for mere fractions of a second. informal english: ````
BigSalmon/MrLincoln7
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Informal to Formal: ``` from transformers import AutoTokenizer, AutoModelWithLMHead tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelWithLMHead.from_pretrained("BigSalmon/MrLincoln7") ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. informal english: meteors are much harder to see, because they are only there for a fraction of a second. Translated into the Style of Abraham Lincoln: meteors are not ( easily / readily ) detectable, lasting for mere fractions of a second. informal english: ````
BigSalmon/MrLincoln8
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
Informal to Formal: ``` from transformers import AutoTokenizer, AutoModelWithLMHead tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelWithLMHead.from_pretrained("BigSalmon/MrLincoln7") ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. informal english: meteors are much harder to see, because they are only there for a fraction of a second. Translated into the Style of Abraham Lincoln: meteors are not ( easily / readily ) detectable, lasting for mere fractions of a second. informal english: ````
BigSalmon/MrLincolnBerta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
Example Prompt: ``` informal english: things are better when they are open source, because they are constantly being updated to enhance experience. Translated into the Style of Abraham Lincoln: in the open-source paradigm, code is ( ceaselessly / perpetually ) being ( reengineered / revamped / polished ), thereby ( advancing / enhancing / optimizing / <mask> ) the user experience. ``` Demo: https://huggingface.co/spaces/BigSalmon/MASK2
BigTooth/DialoGPT-small-tohru
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - conversational --- # Tohru DialoGPT model
BigTooth/Megumin-v0.2
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- tags: - conversational --- # Megumin-v0.2 model
BigeS/DialoGPT-small-Rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - conversational --- #Rick Sanchez DialoGPT Model
Blaine-Mason/hackMIT-finetuned-sst2
[ "pytorch", "tensorboard", "bert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
36
null
--- tags: - generated_from_trainer datasets: - glue metrics: - accuracy model_index: - name: hackMIT-finetuned-sst2 results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: sst2 metric: name: Accuracy type: accuracy value: 0.8027522935779816 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hackMIT-finetuned-sst2 This model is a fine-tuned version of [Blaine-Mason/hackMIT-finetuned-sst2](https://huggingface.co/Blaine-Mason/hackMIT-finetuned-sst2) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 1.1086 - Accuracy: 0.8028 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2.033238621168611e-06 - train_batch_size: 16 - eval_batch_size: 8 - seed: 30 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.0674 | 1.0 | 4210 | 1.1086 | 0.8028 | ### Framework versions - Transformers 4.9.2 - Pytorch 1.9.0+cu102 - Datasets 1.11.0 - Tokenizers 0.10.3
Broadus20/DialoGPT-small-joshua
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: conversational --- #Harry Potter DialoGPT Model
Brykee/DialoGPT-medium-Morty
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - conversational --- # Morty DialoGPT Model
CAMeL-Lab/bert-base-arabic-camelbert-ca-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
85
null
--- language: - ar license: apache-2.0 widget: - text: "إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع" --- # CAMeLBERT-CA NER Model ## Model description **CAMeLBERT-CA NER Model** is a Named Entity Recognition (NER) model that was built by fine-tuning the [CAMeLBERT Classical Arabic (CA)](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-ca/) model. For the fine-tuning, we used the [ANERcorp](https://camel.abudhabi.nyu.edu/anercorp/) dataset. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)." * Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-CA NER model directly as part of our [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) NER component (*recommended*) or as part of the transformers pipeline. #### How to use To use the model with the [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) NER component: ```python >>> from camel_tools.ner import NERecognizer >>> from camel_tools.tokenizers.word import simple_word_tokenize >>> ner = NERecognizer('CAMeL-Lab/bert-base-arabic-camelbert-ca-ner') >>> sentence = simple_word_tokenize('إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع') >>> ner.predict_sentence(sentence) >>> ['O', 'B-LOC', 'O', 'O', 'O', 'O', 'B-LOC', 'I-LOC', 'I-LOC', 'O'] ``` You can also use the NER model directly with a transformers pipeline: ```python >>> from transformers import pipeline >>> ner = pipeline('ner', model='CAMeL-Lab/bert-base-arabic-camelbert-ca-ner') >>> ner("إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع") [{'word': 'أبوظبي', 'score': 0.9895730018615723, 'entity': 'B-LOC', 'index': 2, 'start': 6, 'end': 12}, {'word': 'الإمارات', 'score': 0.8156259655952454, 'entity': 'B-LOC', 'index': 8, 'start': 33, 'end': 41}, {'word': 'العربية', 'score': 0.890906810760498, 'entity': 'I-LOC', 'index': 9, 'start': 42, 'end': 49}, {'word': 'المتحدة', 'score': 0.8169114589691162, 'entity': 'I-LOC', 'index': 10, 'start': 50, 'end': 57}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a da of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16,451
null
--- language: - ar license: apache-2.0 widget: - text: 'عامل ايه ؟' --- # CAMeLBERT-CA POS-EGY Model ## Model description **CAMeLBERT-CA POS-EGY Model** is a Egyptian Arabic POS tagging model that was built by fine-tuning the [CAMeLBERT-CA](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-ca/) model. For the fine-tuning, we used the ARZTB dataset . Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-CA POS-EGY model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> pos = pipeline('token-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-egy') >>> text = 'عامل ايه ؟' >>> pos(text) [{'entity': 'adj', 'score': 0.9990943, 'index': 1, 'word': 'عامل', 'start': 0, 'end': 4}, {'entity': 'pron_interrog', 'score': 0.99863535, 'index': 2, 'word': 'ايه', 'start': 5, 'end': 8}, {'entity': 'punc', 'score': 0.99990875, 'index': 3, 'word': '؟', 'start': 9, 'end': 10}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-glf
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
18
null
--- language: - ar license: apache-2.0 widget: - text: 'شلونك ؟ شخبارك ؟' --- # CAMeLBERT-CA POS-GLF Model ## Model description **CAMeLBERT-CA POS-GLF Model** is a Gulf Arabic POS tagging model that was built by fine-tuning the [CAMeLBERT-CA](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-ca/) model. For the fine-tuning, we used the [Gumar](https://camel.abudhabi.nyu.edu/annotated-gumar-corpus/) dataset. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-CA POS-GLF model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> pos = pipeline('token-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-glf') >>> text = 'شلونك ؟ شخبارك ؟' >>> pos(text) [{'entity': 'noun', 'score': 0.99572617, 'index': 1, 'word': 'شلون', 'start': 0, 'end': 4}, {'entity': 'noun', 'score': 0.9411187, 'index': 2, 'word': '##ك', 'start': 4, 'end': 5}, {'entity': 'punc', 'score': 0.9999661, 'index': 3, 'word': '؟', 'start': 6, 'end': 7}, {'entity': 'noun', 'score': 0.99286526, 'index': 4, 'word': 'ش', 'start': 8, 'end': 9}, {'entity': 'noun', 'score': 0.9983397, 'index': 5, 'word': '##خبار', 'start': 9, 'end': 13}, {'entity': 'noun', 'score': 0.9609381, 'index': 6, 'word': '##ك', 'start': 13, 'end': 14}, {'entity': 'punc', 'score': 0.9999668, 'index': 7, 'word': '؟', 'start': 15, 'end': 16}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
71
null
--- language: - ar license: apache-2.0 widget: - text: 'إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع' --- # CAMeLBERT-CA POS-MSA Model ## Model description **CAMeLBERT-CA POS-MSA Model** is a Modern Standard Arabic (MSA) POS tagging model that was built by fine-tuning the [CAMeLBERT-CA](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-ca/) model. For the fine-tuning, we used the [PATB](https://dl.acm.org/doi/pdf/10.5555/1621804.1621808) dataset. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-CA POS-MSA model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> pos = pipeline('token-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-msa') >>> text = 'إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع' >>> pos(text) [{'entity': 'noun', 'score': 0.9999758, 'index': 1, 'word': 'إمارة', 'start': 0, 'end': 5}, {'entity': 'noun_prop', 'score': 0.9997559, 'index': 2, 'word': 'أبوظبي', 'start': 6, 'end': 12}, {'entity': 'pron', 'score': 0.99996257, 'index': 3, 'word': 'هي', 'start': 13, 'end': 15}, {'entity': 'noun', 'score': 0.9958452, 'index': 4, 'word': 'إحدى', 'start': 16, 'end': 20}, {'entity': 'noun', 'score': 0.9999635, 'index': 5, 'word': 'إما', 'start': 21, 'end': 24}, {'entity': 'noun', 'score': 0.99991685, 'index': 6, 'word': '##رات', 'start': 24, 'end': 27}, {'entity': 'noun', 'score': 0.99997497, 'index': 7, 'word': 'دولة', 'start': 28, 'end': 32}, {'entity': 'noun', 'score': 0.9999795, 'index': 8, 'word': 'الإمارات', 'start': 33, 'end': 41}, {'entity': 'adj', 'score': 0.99924207, 'index': 9, 'word': 'العربية', 'start': 42, 'end': 49}, {'entity': 'adj', 'score': 0.99994195, 'index': 10, 'word': 'المتحدة', 'start': 50, 'end': 57}, {'entity': 'noun_num', 'score': 0.9997414, 'index': 11, 'word': 'السبع', 'start': 58, 'end': 63}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-ca-sentiment
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
73
null
--- language: - ar license: apache-2.0 widget: - text: "أنا بخير" --- # CAMeLBERT-CA SA Model ## Model description **CAMeLBERT-CA SA Model** is a Sentiment Analysis (SA) model that was built by fine-tuning the [CAMeLBERT Classical Arabic (CA)](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-ca/) model. For the fine-tuning, we used the [ASTD](https://aclanthology.org/D15-1299.pdf), [ArSAS](http://lrec-conf.org/workshops/lrec2018/W30/pdf/22_W30.pdf), and [SemEval](https://aclanthology.org/S17-2088.pdf) datasets. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)." * Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-CA SA model directly as part of our [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) SA component (*recommended*) or as part of the transformers pipeline. #### How to use To use the model with the [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) SA component: ```python >>> from camel_tools.sentiment import SentimentAnalyzer >>> sa = SentimentAnalyzer("CAMeL-Lab/bert-base-arabic-camelbert-ca-sentiment") >>> sentences = ['أنا بخير', 'أنا لست بخير'] >>> sa.predict(sentences) >>> ['positive', 'negative'] ``` You can also use the SA model directly with a transformers pipeline: ```python >>> from transformers import pipeline e >>> sa = pipeline('text-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-ca-sentiment') >>> sentences = ['أنا بخير', 'أنا لست بخير'] >>> sa(sentences) [{'label': 'positive', 'score': 0.9616648554801941}, {'label': 'negative', 'score': 0.9779177904129028}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-ca
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
580
null
--- language: - ar license: apache-2.0 widget: - text: "الهدف من الحياة هو [MASK] ." --- # CAMeLBERT: A collection of pre-trained models for Arabic NLP tasks ## Model description **CAMeLBERT** is a collection of BERT models pre-trained on Arabic texts with different sizes and variants. We release pre-trained language models for Modern Standard Arabic (MSA), dialectal Arabic (DA), and classical Arabic (CA), in addition to a model pre-trained on a mix of the three. We also provide additional models that are pre-trained on a scaled-down set of the MSA variant (half, quarter, eighth, and sixteenth). The details are described in the paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* This model card describes **CAMeLBERT-CA** (`bert-base-arabic-camelbert-ca`), a model pre-trained on the CA (classical Arabic) dataset. ||Model|Variant|Size|#Word| |-|-|:-:|-:|-:| ||`bert-base-arabic-camelbert-mix`|CA,DA,MSA|167GB|17.3B| |✔|`bert-base-arabic-camelbert-ca`|CA|6GB|847M| ||`bert-base-arabic-camelbert-da`|DA|54GB|5.8B| ||`bert-base-arabic-camelbert-msa`|MSA|107GB|12.6B| ||`bert-base-arabic-camelbert-msa-half`|MSA|53GB|6.3B| ||`bert-base-arabic-camelbert-msa-quarter`|MSA|27GB|3.1B| ||`bert-base-arabic-camelbert-msa-eighth`|MSA|14GB|1.6B| ||`bert-base-arabic-camelbert-msa-sixteenth`|MSA|6GB|746M| ## Intended uses You can use the released model for either masked language modeling or next sentence prediction. However, it is mostly intended to be fine-tuned on an NLP task, such as NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. We release our fine-tuninig code [here](https://github.com/CAMeL-Lab/CAMeLBERT). #### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='CAMeL-Lab/bert-base-arabic-camelbert-ca') >>> unmasker("الهدف من الحياة هو [MASK] .") [{'sequence': '[CLS] الهدف من الحياة هو الحياة. [SEP]', 'score': 0.11048116534948349, 'token': 3696, 'token_str': 'الحياة'}, {'sequence': '[CLS] الهدف من الحياة هو الإسلام. [SEP]', 'score': 0.03481195122003555, 'token': 4677, 'token_str': 'الإسلام'}, {'sequence': '[CLS] الهدف من الحياة هو الموت. [SEP]', 'score': 0.03402028977870941, 'token': 4295, 'token_str': 'الموت'}, {'sequence': '[CLS] الهدف من الحياة هو العلم. [SEP]', 'score': 0.027655426412820816, 'token': 2789, 'token_str': 'العلم'}, {'sequence': '[CLS] الهدف من الحياة هو هذا. [SEP]', 'score': 0.023059621453285217, 'token': 2085, 'token_str': 'هذا'}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-ca') model = AutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-ca') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import AutoTokenizer, TFAutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-ca') model = TFAutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-ca') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ## Training data - CA (classical Arabic) - [OpenITI (Version 2020.1.2)](https://zenodo.org/record/3891466#.YEX4-F0zbzc) ## Training procedure We use [the original implementation](https://github.com/google-research/bert) released by Google for pre-training. We follow the original English BERT model's hyperparameters for pre-training, unless otherwise specified. ### Preprocessing - After extracting the raw text from each corpus, we apply the following pre-processing. - We first remove invalid characters and normalize white spaces using the utilities provided by [the original BERT implementation](https://github.com/google-research/bert/blob/eedf5716ce1268e56f0a50264a88cafad334ac61/tokenization.py#L286-L297). - We also remove lines without any Arabic characters. - We then remove diacritics and kashida using [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools). - Finally, we split each line into sentences with a heuristics-based sentence segmenter. - We train a WordPiece tokenizer on the entire dataset (167 GB text) with a vocabulary size of 30,000 using [HuggingFace's tokenizers](https://github.com/huggingface/tokenizers). - We do not lowercase letters nor strip accents. ### Pre-training - The model was trained on a single cloud TPU (`v3-8`) for one million steps in total. - The first 90,000 steps were trained with a batch size of 1,024 and the rest was trained with a batch size of 256. - The sequence length was limited to 128 tokens for 90% of the steps and 512 for the remaining 10%. - We use whole word masking and a duplicate factor of 10. - We set max predictions per sequence to 20 for the dataset with max sequence length of 128 tokens and 80 for the dataset with max sequence length of 512 tokens. - We use a random seed of 12345, masked language model probability of 0.15, and short sequence probability of 0.1. - The optimizer used is Adam with a learning rate of 1e-4, \\(\beta_{1} = 0.9\\) and \\(\beta_{2} = 0.999\\), a weight decay of 0.01, learning rate warmup for 10,000 steps and linear decay of the learning rate after. ## Evaluation results - We evaluate our pre-trained language models on five NLP tasks: NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. - We fine-tune and evaluate the models using 12 dataset. - We used Hugging Face's transformers to fine-tune our CAMeLBERT models. - We used transformers `v3.1.0` along with PyTorch `v1.5.1`. - The fine-tuning was done by adding a fully connected linear layer to the last hidden state. - We use \\(F_{1}\\) score as a metric for all tasks. - Code used for fine-tuning is available [here](https://github.com/CAMeL-Lab/CAMeLBERT). ### Results | Task | Dataset | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | --------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | NER | ANERcorp | MSA | 80.8% | 67.9% | 74.1% | 82.4% | 82.0% | 82.1% | 82.6% | 80.8% | | POS | PATB (MSA) | MSA | 98.1% | 97.8% | 97.7% | 98.3% | 98.2% | 98.3% | 98.2% | 98.2% | | | ARZTB (EGY) | DA | 93.6% | 92.3% | 92.7% | 93.6% | 93.6% | 93.7% | 93.6% | 93.6% | | | Gumar (GLF) | DA | 97.3% | 97.7% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | | SA | ASTD | MSA | 76.3% | 69.4% | 74.6% | 76.9% | 76.0% | 76.8% | 76.7% | 75.3% | | | ArSAS | MSA | 92.7% | 89.4% | 91.8% | 93.0% | 92.6% | 92.5% | 92.5% | 92.3% | | | SemEval | MSA | 69.0% | 58.5% | 68.4% | 72.1% | 70.7% | 72.8% | 71.6% | 71.2% | | DID | MADAR-26 | DA | 62.9% | 61.9% | 61.8% | 62.6% | 62.0% | 62.8% | 62.0% | 62.2% | | | MADAR-6 | DA | 92.5% | 91.5% | 92.2% | 91.9% | 91.8% | 92.2% | 92.1% | 92.0% | | | MADAR-Twitter-5 | MSA | 75.7% | 71.4% | 74.2% | 77.6% | 78.5% | 77.3% | 77.7% | 76.2% | | | NADI | DA | 24.7% | 17.3% | 20.1% | 24.9% | 24.6% | 24.6% | 24.9% | 23.8% | | Poetry | APCD | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | ### Results (Average) | | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | Variant-wise-average<sup>[[1]](#footnote-1)</sup> | MSA | 82.1% | 75.7% | 80.1% | 83.4% | 83.0% | 83.3% | 83.2% | 82.3% | | | DA | 74.4% | 72.1% | 72.9% | 74.2% | 74.0% | 74.3% | 74.1% | 73.9% | | | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | | Macro-Average | ALL | 78.7% | 74.7% | 77.1% | 79.2% | 79.0% | 79.2% | 79.1% | 78.6% | <a name="footnote-1">[1]</a>: Variant-wise-average refers to average over a group of tasks in the same language variant. ## Acknowledgements This research was supported with Cloud TPUs from Google’s TensorFlow Research Cloud (TFRC). ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-da-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
42
null
--- language: - ar license: apache-2.0 widget: - text: "إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع" --- # CAMeLBERT-DA NER Model ## Model description **CAMeLBERT-DA NER Model** is a Named Entity Recognition (NER) model that was built by fine-tuning the [CAMeLBERT Dialectal Arabic (DA)](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-da/) model. For the fine-tuning, we used the [ANERcorp](https://camel.abudhabi.nyu.edu/anercorp/) dataset. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)." * Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-DA NER model directly as part of our [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) NER component (*recommended*) or as part of the transformers pipeline. #### How to use To use the model with the [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) NER component: ```python >>> from camel_tools.ner import NERecognizer >>> from camel_tools.tokenizers.word import simple_word_tokenize >>> ner = NERecognizer('CAMeL-Lab/bert-base-arabic-camelbert-da-ner') >>> sentence = simple_word_tokenize('إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع') >>> ner.predict_sentence(sentence) >>> ['O', 'B-LOC', 'O', 'O', 'O', 'O', 'B-LOC', 'I-LOC', 'I-LOC', 'O'] ``` You can also use the NER model directly with a transformers pipeline: ```python >>> from transformers import pipeline >>> ner = pipeline('ner', model='CAMeL-Lab/bert-base-arabic-camelbert-da-ner') >>> ner("إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع") [{'word': 'أبوظبي', 'score': 0.9895730018615723, 'entity': 'B-LOC', 'index': 2, 'start': 6, 'end': 12}, {'word': 'الإمارات', 'score': 0.8156259655952454, 'entity': 'B-LOC', 'index': 8, 'start': 33, 'end': 41}, {'word': 'العربية', 'score': 0.890906810760498, 'entity': 'I-LOC', 'index': 9, 'start': 42, 'end': 49}, {'word': 'المتحدة', 'score': 0.8169114589691162, 'entity': 'I-LOC', 'index': 10, 'start': 50, 'end': 57}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a da of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-da-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
null
--- language: - ar license: apache-2.0 widget: - text: 'الخيل والليل والبيداء تعرفني [SEP] والسيف والرمح والقرطاس والقلم' --- # CAMeLBERT-DA Poetry Classification Model ## Model description **CAMeLBERT-DA Poetry Classification Model** is a poetry classification model that was built by fine-tuning the [CAMeLBERT Dialectal Arabic (DA)](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-da/) model. For the fine-tuning, we used the [APCD](https://arxiv.org/pdf/1905.05700.pdf) dataset. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-DA Poetry Classification model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> poetry = pipeline('text-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-da-poetry') >>> # A list of verses where each verse consists of two parts. >>> verses = [ ['الخيل والليل والبيداء تعرفني' ,'والسيف والرمح والقرطاس والقلم'], ['قم للمعلم وفه التبجيلا' ,'كاد المعلم ان يكون رسولا'] ] >>> # A function that concatenates the halves of each verse by using the [SEP] token. >>> join_verse = lambda half: ' [SEP] '.join(half) >>> # Apply this to all the verses in the list. >>> verses = [join_verse(verse) for verse in verses] >>> poetry(sentences) [{'label': 'البسيط', 'score': 0.9874765276908875}, {'label': 'السلسلة', 'score': 0.6877778172492981}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-da-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- language: - ar license: apache-2.0 widget: - text: 'عامل ايه ؟' --- # CAMeLBERT-DA POS-EGY Model ## Model description **CAMeLBERT-DA POS-EGY Model** is a Egyptian Arabic POS tagging model that was built by fine-tuning the [CAMeLBERT-DA](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-da/) model. For the fine-tuning, we used the ARZTB dataset . Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-DA POS-EGY model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> pos = pipeline('token-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-da-pos-egy') >>> text = 'عامل ايه ؟' >>> pos(text) [{'entity': 'adj', 'score': 0.99843216, 'index': 1, 'word': 'عامل', 'start': 0, 'end': 4}, {'entity': 'pron_interrog', 'score': 0.9990083, 'index': 2, 'word': 'ايه', 'start': 5, 'end': 8}, {'entity': 'punc', 'score': 0.82973784, 'index': 3, 'word': '؟', 'start': 9, 'end': 10}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-da-pos-glf
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
54
"2021-10-18T05:53:49Z"
--- language: - ar license: apache-2.0 widget: - text: 'شلونك ؟ شخبارك ؟' --- # CAMeLBERT-DA POS-GLF Model ## Model description **CAMeLBERT-DA POS-GLF Model** is a Gulf Arabic POS tagging model that was built by fine-tuning the [CAMeLBERT-DA](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-da/) model. For the fine-tuning, we used the [Gumar](https://camel.abudhabi.nyu.edu/annotated-gumar-corpus/) dataset. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-DA POS-GLF model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> pos = pipeline('token-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-da-pos-glf') >>> text = 'شلونك ؟ شخبارك ؟' >>> pos(text) [{'entity': 'noun', 'score': 0.84596395, 'index': 1, 'word': 'شلون', 'start': 0, 'end': 4}, {'entity': 'prep', 'score': 0.7230489, 'index': 2, 'word': '##ك', 'start': 4, 'end': 5}, {'entity': 'punc', 'score': 0.99996364, 'index': 3, 'word': '؟', 'start': 6, 'end': 7}, {'entity': 'noun', 'score': 0.9990874, 'index': 4, 'word': 'ش', 'start': 8, 'end': 9}, {'entity': 'noun', 'score': 0.99985224, 'index': 5, 'word': '##خبار', 'start': 9, 'end': 13}, {'entity': 'noun', 'score': 0.9988868, 'index': 6, 'word': '##ك', 'start': 13, 'end': 14}, {'entity': 'punc', 'score': 0.9999683, 'index': 7, 'word': '؟', 'start': 15, 'end': 16}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-da-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- language: - ar license: apache-2.0 widget: - text: 'إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع' --- # CAMeLBERT-DA POS-MSA Model ## Model description **CAMeLBERT-DA POS-MSA Model** is a Modern Standard Arabic (MSA) POS tagging model that was built by fine-tuning the [CAMeLBERT-DA](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-da/) model. For the fine-tuning, we used the [PATB](https://dl.acm.org/doi/pdf/10.5555/1621804.1621808) dataset. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-DA POS-MSA model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> pos = pipeline('token-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-da-pos-msa') >>> text = 'إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع' >>> pos(text) [{'entity': 'noun', 'score': 0.9999913, 'index': 1, 'word': 'إمارة', 'start': 0, 'end': 5}, {'entity': 'noun_prop', 'score': 0.9992475, 'index': 2, 'word': 'أبوظبي', 'start': 6, 'end': 12}, {'entity': 'pron', 'score': 0.999919, 'index': 3, 'word': 'هي', 'start': 13, 'end': 15}, {'entity': 'noun', 'score': 0.99993193, 'index': 4, 'word': 'إحدى', 'start': 16, 'end': 20}, {'entity': 'noun', 'score': 0.99999106, 'index': 5, 'word': 'إما', 'start': 21, 'end': 24}, {'entity': 'noun', 'score': 0.99998987, 'index': 6, 'word': '##رات', 'start': 24, 'end': 27}, {'entity': 'noun', 'score': 0.9999933, 'index': 7, 'word': 'دولة', 'start': 28, 'end': 32}, {'entity': 'noun', 'score': 0.9999899, 'index': 8, 'word': 'الإمارات', 'start': 33, 'end': 41}, {'entity': 'adj', 'score': 0.99990565, 'index': 9, 'word': 'العربية', 'start': 42, 'end': 49}, {'entity': 'adj', 'score': 0.99997944, 'index': 10, 'word': 'المتحدة', 'start': 50, 'end': 57}, {'entity': 'noun_num', 'score': 0.99938935, 'index': 11, 'word': 'السبع', 'start': 58, 'end': 63}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-da-sentiment
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "has_space" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
19,850
null
--- language: - ar license: apache-2.0 widget: - text: "أنا بخير" --- # CAMeLBERT-DA SA Model ## Model description **CAMeLBERT-DA SA Model** is a Sentiment Analysis (SA) model that was built by fine-tuning the [CAMeLBERT Dialectal Arabic (DA)](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-da/) model. For the fine-tuning, we used the [ASTD](https://aclanthology.org/D15-1299.pdf), [ArSAS](http://lrec-conf.org/workshops/lrec2018/W30/pdf/22_W30.pdf), and [SemEval](https://aclanthology.org/S17-2088.pdf) datasets. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)." * Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-DA SA model directly as part of our [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) SA component (*recommended*) or as part of the transformers pipeline. #### How to use To use the model with the [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) SA component: ```python >>> from camel_tools.sentiment import SentimentAnalyzer >>> sa = SentimentAnalyzer("CAMeL-Lab/bert-base-arabic-camelbert-da-sentiment") >>> sentences = ['أنا بخير', 'أنا لست بخير'] >>> sa.predict(sentences) >>> ['positive', 'negative'] ``` You can also use the SA model directly with a transformers pipeline: ```python >>> from transformers import pipeline >>> sa = pipeline('text-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-da-sentiment') >>> sentences = ['أنا بخير', 'أنا لست بخير'] >>> sa(sentences) [{'label': 'positive', 'score': 0.9616648554801941}, {'label': 'negative', 'score': 0.9779177904129028}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-da
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
449
null
--- language: - ar license: apache-2.0 widget: - text: "الهدف من الحياة هو [MASK] ." --- # CAMeLBERT: A collection of pre-trained models for Arabic NLP tasks ## Model description **CAMeLBERT** is a collection of BERT models pre-trained on Arabic texts with different sizes and variants. We release pre-trained language models for Modern Standard Arabic (MSA), dialectal Arabic (DA), and classical Arabic (CA), in addition to a model pre-trained on a mix of the three. We also provide additional models that are pre-trained on a scaled-down set of the MSA variant (half, quarter, eighth, and sixteenth). The details are described in the paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* This model card describes **CAMeLBERT-DA** (`bert-base-arabic-camelbert-da`), a model pre-trained on the DA (dialectal Arabic) dataset. ||Model|Variant|Size|#Word| |-|-|:-:|-:|-:| ||`bert-base-arabic-camelbert-mix`|CA,DA,MSA|167GB|17.3B| ||`bert-base-arabic-camelbert-ca`|CA|6GB|847M| |✔|`bert-base-arabic-camelbert-da`|DA|54GB|5.8B| ||`bert-base-arabic-camelbert-msa`|MSA|107GB|12.6B| ||`bert-base-arabic-camelbert-msa-half`|MSA|53GB|6.3B| ||`bert-base-arabic-camelbert-msa-quarter`|MSA|27GB|3.1B| ||`bert-base-arabic-camelbert-msa-eighth`|MSA|14GB|1.6B| ||`bert-base-arabic-camelbert-msa-sixteenth`|MSA|6GB|746M| ## Intended uses You can use the released model for either masked language modeling or next sentence prediction. However, it is mostly intended to be fine-tuned on an NLP task, such as NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. We release our fine-tuninig code [here](https://github.com/CAMeL-Lab/CAMeLBERT). #### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='CAMeL-Lab/bert-base-arabic-camelbert-da') >>> unmasker("الهدف من الحياة هو [MASK] .") [{'sequence': '[CLS] الهدف من الحياة هو.. [SEP]', 'score': 0.062508225440979, 'token': 18, 'token_str': '.'}, {'sequence': '[CLS] الهدف من الحياة هو الموت. [SEP]', 'score': 0.033172328025102615, 'token': 4295, 'token_str': 'الموت'}, {'sequence': '[CLS] الهدف من الحياة هو الحياة. [SEP]', 'score': 0.029575437307357788, 'token': 3696, 'token_str': 'الحياة'}, {'sequence': '[CLS] الهدف من الحياة هو الرحيل. [SEP]', 'score': 0.02724040113389492, 'token': 11449, 'token_str': 'الرحيل'}, {'sequence': '[CLS] الهدف من الحياة هو الحب. [SEP]', 'score': 0.01564178802073002, 'token': 3088, 'token_str': 'الحب'}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-da') model = AutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-da') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import AutoTokenizer, TFAutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-da') model = TFAutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-da') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ## Training data - DA (dialectal Arabic) - A collection of dialectal Arabic data described in [our paper](https://arxiv.org/abs/2103.06678). ## Training procedure We use [the original implementation](https://github.com/google-research/bert) released by Google for pre-training. We follow the original English BERT model's hyperparameters for pre-training, unless otherwise specified. ### Preprocessing - After extracting the raw text from each corpus, we apply the following pre-processing. - We first remove invalid characters and normalize white spaces using the utilities provided by [the original BERT implementation](https://github.com/google-research/bert/blob/eedf5716ce1268e56f0a50264a88cafad334ac61/tokenization.py#L286-L297). - We also remove lines without any Arabic characters. - We then remove diacritics and kashida using [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools). - Finally, we split each line into sentences with a heuristics-based sentence segmenter. - We train a WordPiece tokenizer on the entire dataset (167 GB text) with a vocabulary size of 30,000 using [HuggingFace's tokenizers](https://github.com/huggingface/tokenizers). - We do not lowercase letters nor strip accents. ### Pre-training - The model was trained on a single cloud TPU (`v3-8`) for one million steps in total. - The first 90,000 steps were trained with a batch size of 1,024 and the rest was trained with a batch size of 256. - The sequence length was limited to 128 tokens for 90% of the steps and 512 for the remaining 10%. - We use whole word masking and a duplicate factor of 10. - We set max predictions per sequence to 20 for the dataset with max sequence length of 128 tokens and 80 for the dataset with max sequence length of 512 tokens. - We use a random seed of 12345, masked language model probability of 0.15, and short sequence probability of 0.1. - The optimizer used is Adam with a learning rate of 1e-4, \\(\beta_{1} = 0.9\\) and \\(\beta_{2} = 0.999\\), a weight decay of 0.01, learning rate warmup for 10,000 steps and linear decay of the learning rate after. ## Evaluation results - We evaluate our pre-trained language models on five NLP tasks: NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. - We fine-tune and evaluate the models using 12 dataset. - We used Hugging Face's transformers to fine-tune our CAMeLBERT models. - We used transformers `v3.1.0` along with PyTorch `v1.5.1`. - The fine-tuning was done by adding a fully connected linear layer to the last hidden state. - We use \\(F_{1}\\) score as a metric for all tasks. - Code used for fine-tuning is available [here](https://github.com/CAMeL-Lab/CAMeLBERT). ### Results | Task | Dataset | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | --------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | NER | ANERcorp | MSA | 80.8% | 67.9% | 74.1% | 82.4% | 82.0% | 82.1% | 82.6% | 80.8% | | POS | PATB (MSA) | MSA | 98.1% | 97.8% | 97.7% | 98.3% | 98.2% | 98.3% | 98.2% | 98.2% | | | ARZTB (EGY) | DA | 93.6% | 92.3% | 92.7% | 93.6% | 93.6% | 93.7% | 93.6% | 93.6% | | | Gumar (GLF) | DA | 97.3% | 97.7% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | | SA | ASTD | MSA | 76.3% | 69.4% | 74.6% | 76.9% | 76.0% | 76.8% | 76.7% | 75.3% | | | ArSAS | MSA | 92.7% | 89.4% | 91.8% | 93.0% | 92.6% | 92.5% | 92.5% | 92.3% | | | SemEval | MSA | 69.0% | 58.5% | 68.4% | 72.1% | 70.7% | 72.8% | 71.6% | 71.2% | | DID | MADAR-26 | DA | 62.9% | 61.9% | 61.8% | 62.6% | 62.0% | 62.8% | 62.0% | 62.2% | | | MADAR-6 | DA | 92.5% | 91.5% | 92.2% | 91.9% | 91.8% | 92.2% | 92.1% | 92.0% | | | MADAR-Twitter-5 | MSA | 75.7% | 71.4% | 74.2% | 77.6% | 78.5% | 77.3% | 77.7% | 76.2% | | | NADI | DA | 24.7% | 17.3% | 20.1% | 24.9% | 24.6% | 24.6% | 24.9% | 23.8% | | Poetry | APCD | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | ### Results (Average) | | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | Variant-wise-average<sup>[[1]](#footnote-1)</sup> | MSA | 82.1% | 75.7% | 80.1% | 83.4% | 83.0% | 83.3% | 83.2% | 82.3% | | | DA | 74.4% | 72.1% | 72.9% | 74.2% | 74.0% | 74.3% | 74.1% | 73.9% | | | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | | Macro-Average | ALL | 78.7% | 74.7% | 77.1% | 79.2% | 79.0% | 79.2% | 79.1% | 78.6% | <a name="footnote-1">[1]</a>: Variant-wise-average refers to average over a group of tasks in the same language variant. ## Acknowledgements This research was supported with Cloud TPUs from Google’s TensorFlow Research Cloud (TFRC). ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-mix-did-madar-corpus26
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
45
null
--- language: - ar license: apache-2.0 widget: - text: "عامل ايه ؟" --- # CAMeLBERT-Mix DID Madar Corpus26 Model ## Model description **CAMeLBERT-Mix DID Madar Corpus26 Model** is a dialect identification (DID) model that was built by fine-tuning the [CAMeLBERT-Mix](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-mix/) model. For the fine-tuning, we used the [MADAR Corpus 26](https://camel.abudhabi.nyu.edu/madar-shared-task-2019/) dataset, which includes 26 labels. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-Mix DID Madar Corpus26 model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> did = pipeline('text-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-mix-did-madar26') >>> sentences = ['عامل ايه ؟', 'شلونك ؟ شخبارك ؟'] >>> did(sentences) [{'label': 'CAI', 'score': 0.8751305937767029}, {'label': 'DOH', 'score': 0.9867215156555176}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-mix-did-madar-corpus6
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
34
null
--- language: - ar license: apache-2.0 widget: - text: "عامل ايه ؟" --- # CAMeLBERT-Mix DID MADAR Corpus6 Model ## Model description **CAMeLBERT-Mix DID MADAR Corpus6 Model** is a dialect identification (DID) model that was built by fine-tuning the [CAMeLBERT-Mix](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-mix/) model. For the fine-tuning, we used the [MADAR Corpus 6](https://camel.abudhabi.nyu.edu/madar-shared-task-2019/) dataset, which includes 6 labels. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-Mix DID MADAR Corpus6 model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> did = pipeline('text-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-mix-did-madar6') >>> sentences = ['عامل ايه ؟', 'شلونك ؟ شخبارك ؟'] >>> did(sentences) [{'label': 'CAI', 'score': 0.9996405839920044}, {'label': 'DOH', 'score': 0.9997853636741638}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-mix-did-nadi
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
63
null
--- language: - ar license: apache-2.0 widget: - text: "عامل ايه ؟" --- # CAMeLBERT-Mix DID NADI Model ## Model description **CAMeLBERT-Mix DID NADI Model** is a dialect identification (DID) model that was built by fine-tuning the [CAMeLBERT-Mix](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-mix/) model. For the fine-tuning, we used the [NADI Coountry-level](https://sites.google.com/view/nadi-shared-task) dataset, which includes 21 labels. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-Mix DID NADI model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> did = pipeline('text-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-mix-did-nadi') >>> sentences = ['عامل ايه ؟', 'شلونك ؟ شخبارك ؟'] >>> did(sentences) [{'label': 'Egypt', 'score': 0.920274019241333}, {'label': 'Saudi_Arabia', 'score': 0.26750022172927856}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-mix-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,860
null
--- language: - ar license: apache-2.0 widget: - text: "إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع" --- # CAMeLBERT-Mix NER Model ## Model description **CAMeLBERT-Mix NER Model** is a Named Entity Recognition (NER) model that was built by fine-tuning the [CAMeLBERT Mix](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-mix/) model. For the fine-tuning, we used the [ANERcorp](https://camel.abudhabi.nyu.edu/anercorp/) dataset. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678). "* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-Mix NER model directly as part of our [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) NER component (*recommended*) or as part of the transformers pipeline. #### How to use To use the model with the [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) NER component: ```python >>> from camel_tools.ner import NERecognizer >>> from camel_tools.tokenizers.word import simple_word_tokenize >>> ner = NERecognizer('CAMeL-Lab/bert-base-arabic-camelbert-mix-ner') >>> sentence = simple_word_tokenize('إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع') >>> ner.predict_sentence(sentence) >>> ['O', 'B-LOC', 'O', 'O', 'O', 'O', 'B-LOC', 'I-LOC', 'I-LOC', 'O'] ``` You can also use the NER model directly with a transformers pipeline: ```python >>> from transformers import pipeline >>> ner = pipeline('ner', model='CAMeL-Lab/bert-base-arabic-camelbert-mix-ner') >>> ner("إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع") [{'word': 'أبوظبي', 'score': 0.9895730018615723, 'entity': 'B-LOC', 'index': 2, 'start': 6, 'end': 12}, {'word': 'الإمارات', 'score': 0.8156259655952454, 'entity': 'B-LOC', 'index': 8, 'start': 33, 'end': 41}, {'word': 'العربية', 'score': 0.890906810760498, 'entity': 'I-LOC', 'index': 9, 'start': 42, 'end': 49}, {'word': 'المتحدة', 'score': 0.8169114589691162, 'entity': 'I-LOC', 'index': 10, 'start': 50, 'end': 57}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-mix-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- language: - ar license: apache-2.0 widget: - text: 'الخيل والليل والبيداء تعرفني [SEP] والسيف والرمح والقرطاس والقلم' --- # CAMeLBERT-Mix Poetry Classification Model ## Model description **CAMeLBERT-Mix Poetry Classification Model** is a poetry classification model that was built by fine-tuning the [CAMeLBERT Mix](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-mix/) model. For the fine-tuning, we used the [APCD](https://arxiv.org/pdf/1905.05700.pdf) dataset. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-Mix Poetry Classification model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> poetry = pipeline('text-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-mix-poetry') >>> # A list of verses where each verse consists of two parts. >>> verses = [ ['الخيل والليل والبيداء تعرفني' ,'والسيف والرمح والقرطاس والقلم'], ['قم للمعلم وفه التبجيلا' ,'كاد المعلم ان يكون رسولا'] ] >>> # A function that concatenates the halves of each verse by using the [SEP] token. >>> join_verse = lambda half: ' [SEP] '.join(half) >>> # Apply this to all the verses in the list. >>> verses = [join_verse(verse) for verse in verses] >>> poetry(sentences) [{'label': 'البسيط', 'score': 0.9937475919723511}, {'label': 'الكامل', 'score': 0.971284031867981}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
62
null
--- language: - ar license: apache-2.0 widget: - text: 'عامل ايه ؟' --- # CAMeLBERT-Mix POS-EGY Model ## Model description **CAMeLBERT-Mix POS-EGY Model** is a Egyptian Arabic POS tagging model that was built by fine-tuning the [CAMeLBERT-Mix](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-mix/) model. For the fine-tuning, we used the ARZTB dataset . Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-Mix POS-EGY model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> pos = pipeline('token-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-egy') >>> text = 'عامل ايه ؟' >>> pos(text) [{'entity': 'adj', 'score': 0.9972628, 'index': 1, 'word': 'عامل', 'start': 0, 'end': 4}, {'entity': 'pron_interrog', 'score': 0.9525163, 'index': 2, 'word': 'ايه', 'start': 5, 'end': 8}, {'entity': 'punc', 'score': 0.99869114, 'index': 3, 'word': '؟', 'start': 9, 'end': 10}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-glf
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
132
null
--- language: - ar license: apache-2.0 widget: - text: 'شلونك ؟ شخبارك ؟' --- # CAMeLBERT-Mix POS-GLF Model ## Model description **CAMeLBERT-Mix POS-GLF Model** is a Gulf Arabic POS tagging model that was built by fine-tuning the [CAMeLBERT-Mix](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-mix/) model. For the fine-tuning, we used the [Gumar](https://camel.abudhabi.nyu.edu/annotated-gumar-corpus/) dataset . Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-Mix POS-GLF model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> pos = pipeline('token-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-glf') >>> text = 'شلونك ؟ شخبارك ؟' >>> pos(text) [{'entity': 'pron_interrog', 'score': 0.82657206, 'index': 1, 'word': 'شلون', 'start': 0, 'end': 4}, {'entity': 'prep', 'score': 0.9771731, 'index': 2, 'word': '##ك', 'start': 4, 'end': 5}, {'entity': 'punc', 'score': 0.9999568, 'index': 3, 'word': '؟', 'start': 6, 'end': 7}, {'entity': 'noun', 'score': 0.9977217, 'index': 4, 'word': 'ش', 'start': 8, 'end': 9}, {'entity': 'noun', 'score': 0.99993783, 'index': 5, 'word': '##خبار', 'start': 9, 'end': 13}, {'entity': 'prep', 'score': 0.5309442, 'index': 6, 'word': '##ك', 'start': 13, 'end': 14}, {'entity': 'punc', 'score': 0.9999575, 'index': 7, 'word': '؟', 'start': 15, 'end': 16}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,862
null
--- language: - ar license: apache-2.0 widget: - text: 'إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع' --- # CAMeLBERT-Mix POS-MSA Model ## Model description **CAMeLBERT-Mix POS-MSA Model** is a Modern Standard Arabic (MSA) POS tagging model that was built by fine-tuning the [CAMeLBERT-Mix](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-mix/) model. For the fine-tuning, we used the [PATB](https://dl.acm.org/doi/pdf/10.5555/1621804.1621808) dataset. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-Mix POS-MSA model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> pos = pipeline('token-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-msa') >>> text = 'إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع' >>> pos(text) [{'entity': 'noun', 'score': 0.9999592, 'index': 1, 'word': 'إمارة', 'start': 0, 'end': 5}, {'entity': 'noun_prop', 'score': 0.9997877, 'index': 2, 'word': 'أبوظبي', 'start': 6, 'end': 12}, {'entity': 'pron', 'score': 0.9998405, 'index': 3, 'word': 'هي', 'start': 13, 'end': 15}, {'entity': 'noun', 'score': 0.9697179, 'index': 4, 'word': 'إحدى', 'start': 16, 'end': 20}, {'entity': 'noun', 'score': 0.99967164, 'index': 5, 'word': 'إما', 'start': 21, 'end': 24}, {'entity': 'noun', 'score': 0.99980617, 'index': 6, 'word': '##رات', 'start': 24, 'end': 27}, {'entity': 'noun', 'score': 0.99997973, 'index': 7, 'word': 'دولة', 'start': 28, 'end': 32}, {'entity': 'noun', 'score': 0.99995637, 'index': 8, 'word': 'الإمارات', 'start': 33, 'end': 41}, {'entity': 'adj', 'score': 0.9983974, 'index': 9, 'word': 'العربية', 'start': 42, 'end': 49}, {'entity': 'adj', 'score': 0.9999469, 'index': 10, 'word': 'المتحدة', 'start': 50, 'end': 57}, {'entity': 'noun_num', 'score': 0.9993273, 'index': 11, 'word': 'السبع', 'start': 58, 'end': 63}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-mix-sentiment
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
855
null
--- language: - ar license: apache-2.0 widget: - text: "أنا بخير" --- # CAMeLBERT Mix SA Model ## Model description **CAMeLBERT Mix SA Model** is a Sentiment Analysis (SA) model that was built by fine-tuning the [CAMeLBERT Mix](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-mix/) model. For the fine-tuning, we used the [ASTD](https://aclanthology.org/D15-1299.pdf), [ArSAS](http://lrec-conf.org/workshops/lrec2018/W30/pdf/22_W30.pdf), and [SemEval](https://aclanthology.org/S17-2088.pdf) datasets. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT Mix SA model directly as part of our [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) SA component (*recommended*) or as part of the transformers pipeline. #### How to use To use the model with the [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) SA component: ```python >>> from camel_tools.sentiment import SentimentAnalyzer >>> sa = SentimentAnalyzer("CAMeL-Lab/bert-base-arabic-camelbert-mix-sentiment") >>> sentences = ['أنا بخير', 'أنا لست بخير'] >>> sa.predict(sentences) >>> ['positive', 'negative'] ``` You can also use the SA model directly with a transformers pipeline: ```python >>> from transformers import pipeline >>> sa = pipeline('sentiment-analysis', model='CAMeL-Lab/bert-base-arabic-camelbert-mix-sentiment') >>> sentences = ['أنا بخير', 'أنا لست بخير'] >>> sa(sentences) [{'label': 'positive', 'score': 0.9616648554801941}, {'label': 'negative', 'score': 0.9779177904129028}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-mix
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "Arabic", "Dialect", "Egyptian", "Gulf", "Levantine", "Classical Arabic", "MSA", "Modern Standard Arabic", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
20,880
null
--- language: - ar license: apache-2.0 tags: - Arabic - Dialect - Egyptian - Gulf - Levantine - Classical Arabic - MSA - Modern Standard Arabic widget: - text: "الهدف من الحياة هو [MASK] ." --- # CAMeLBERT: A collection of pre-trained models for Arabic NLP tasks ## Model description **CAMeLBERT** is a collection of BERT models pre-trained on Arabic texts with different sizes and variants. We release pre-trained language models for Modern Standard Arabic (MSA), dialectal Arabic (DA), and classical Arabic (CA), in addition to a model pre-trained on a mix of the three. We also provide additional models that are pre-trained on a scaled-down set of the MSA variant (half, quarter, eighth, and sixteenth). The details are described in the paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* This model card describes **CAMeLBERT-Mix** (`bert-base-arabic-camelbert-mix`), a model pre-trained on a mixture of these variants: MSA, DA, and CA. ||Model|Variant|Size|#Word| |-|-|:-:|-:|-:| |✔|`bert-base-arabic-camelbert-mix`|CA,DA,MSA|167GB|17.3B| ||`bert-base-arabic-camelbert-ca`|CA|6GB|847M| ||`bert-base-arabic-camelbert-da`|DA|54GB|5.8B| ||`bert-base-arabic-camelbert-msa`|MSA|107GB|12.6B| ||`bert-base-arabic-camelbert-msa-half`|MSA|53GB|6.3B| ||`bert-base-arabic-camelbert-msa-quarter`|MSA|27GB|3.1B| ||`bert-base-arabic-camelbert-msa-eighth`|MSA|14GB|1.6B| ||`bert-base-arabic-camelbert-msa-sixteenth`|MSA|6GB|746M| ## Intended uses You can use the released model for either masked language modeling or next sentence prediction. However, it is mostly intended to be fine-tuned on an NLP task, such as NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. We release our fine-tuninig code [here](https://github.com/CAMeL-Lab/CAMeLBERT). #### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='CAMeL-Lab/bert-base-arabic-camelbert-mix') >>> unmasker("الهدف من الحياة هو [MASK] .") [{'sequence': '[CLS] الهدف من الحياة هو النجاح. [SEP]', 'score': 0.10861027985811234, 'token': 6232, 'token_str': 'النجاح'}, {'sequence': '[CLS] الهدف من الحياة هو.. [SEP]', 'score': 0.07626965641975403, 'token': 18, 'token_str': '.'}, {'sequence': '[CLS] الهدف من الحياة هو الحياة. [SEP]', 'score': 0.05131986364722252, 'token': 3696, 'token_str': 'الحياة'}, {'sequence': '[CLS] الهدف من الحياة هو الموت. [SEP]', 'score': 0.03734956309199333, 'token': 4295, 'token_str': 'الموت'}, {'sequence': '[CLS] الهدف من الحياة هو العمل. [SEP]', 'score': 0.027189988642930984, 'token': 2854, 'token_str': 'العمل'}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-mix') model = AutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-mix') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import AutoTokenizer, TFAutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-mix') model = TFAutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-mix') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ## Training data - MSA (Modern Standard Arabic) - [The Arabic Gigaword Fifth Edition](https://catalog.ldc.upenn.edu/LDC2011T11) - [Abu El-Khair Corpus](http://www.abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus) - [OSIAN corpus](https://vlo.clarin.eu/search;jsessionid=31066390B2C9E8C6304845BA79869AC1?1&q=osian) - [Arabic Wikipedia](https://archive.org/details/arwiki-20190201) - The unshuffled version of the Arabic [OSCAR corpus](https://oscar-corpus.com/) - DA (dialectal Arabic) - A collection of dialectal Arabic data described in [our paper](https://arxiv.org/abs/2103.06678). - CA (classical Arabic) - [OpenITI (Version 2020.1.2)](https://zenodo.org/record/3891466#.YEX4-F0zbzc) ## Training procedure We use [the original implementation](https://github.com/google-research/bert) released by Google for pre-training. We follow the original English BERT model's hyperparameters for pre-training, unless otherwise specified. ### Preprocessing - After extracting the raw text from each corpus, we apply the following pre-processing. - We first remove invalid characters and normalize white spaces using the utilities provided by [the original BERT implementation](https://github.com/google-research/bert/blob/eedf5716ce1268e56f0a50264a88cafad334ac61/tokenization.py#L286-L297). - We also remove lines without any Arabic characters. - We then remove diacritics and kashida using [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools). - Finally, we split each line into sentences with a heuristics-based sentence segmenter. - We train a WordPiece tokenizer on the entire dataset (167 GB text) with a vocabulary size of 30,000 using [HuggingFace's tokenizers](https://github.com/huggingface/tokenizers). - We do not lowercase letters nor strip accents. ### Pre-training - The model was trained on a single cloud TPU (`v3-8`) for one million steps in total. - The first 90,000 steps were trained with a batch size of 1,024 and the rest was trained with a batch size of 256. - The sequence length was limited to 128 tokens for 90% of the steps and 512 for the remaining 10%. - We use whole word masking and a duplicate factor of 10. - We set max predictions per sequence to 20 for the dataset with max sequence length of 128 tokens and 80 for the dataset with max sequence length of 512 tokens. - We use a random seed of 12345, masked language model probability of 0.15, and short sequence probability of 0.1. - The optimizer used is Adam with a learning rate of 1e-4, \\(\beta_{1} = 0.9\\) and \\(\beta_{2} = 0.999\\), a weight decay of 0.01, learning rate warmup for 10,000 steps and linear decay of the learning rate after. ## Evaluation results - We evaluate our pre-trained language models on five NLP tasks: NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. - We fine-tune and evaluate the models using 12 dataset. - We used Hugging Face's transformers to fine-tune our CAMeLBERT models. - We used transformers `v3.1.0` along with PyTorch `v1.5.1`. - The fine-tuning was done by adding a fully connected linear layer to the last hidden state. - We use \\(F_{1}\\) score as a metric for all tasks. - Code used for fine-tuning is available [here](https://github.com/CAMeL-Lab/CAMeLBERT). ### Results | Task | Dataset | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | --------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | NER | ANERcorp | MSA | 80.8% | 67.9% | 74.1% | 82.4% | 82.0% | 82.1% | 82.6% | 80.8% | | POS | PATB (MSA) | MSA | 98.1% | 97.8% | 97.7% | 98.3% | 98.2% | 98.3% | 98.2% | 98.2% | | | ARZTB (EGY) | DA | 93.6% | 92.3% | 92.7% | 93.6% | 93.6% | 93.7% | 93.6% | 93.6% | | | Gumar (GLF) | DA | 97.3% | 97.7% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | | SA | ASTD | MSA | 76.3% | 69.4% | 74.6% | 76.9% | 76.0% | 76.8% | 76.7% | 75.3% | | | ArSAS | MSA | 92.7% | 89.4% | 91.8% | 93.0% | 92.6% | 92.5% | 92.5% | 92.3% | | | SemEval | MSA | 69.0% | 58.5% | 68.4% | 72.1% | 70.7% | 72.8% | 71.6% | 71.2% | | DID | MADAR-26 | DA | 62.9% | 61.9% | 61.8% | 62.6% | 62.0% | 62.8% | 62.0% | 62.2% | | | MADAR-6 | DA | 92.5% | 91.5% | 92.2% | 91.9% | 91.8% | 92.2% | 92.1% | 92.0% | | | MADAR-Twitter-5 | MSA | 75.7% | 71.4% | 74.2% | 77.6% | 78.5% | 77.3% | 77.7% | 76.2% | | | NADI | DA | 24.7% | 17.3% | 20.1% | 24.9% | 24.6% | 24.6% | 24.9% | 23.8% | | Poetry | APCD | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | ### Results (Average) | | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | Variant-wise-average<sup>[[1]](#footnote-1)</sup> | MSA | 82.1% | 75.7% | 80.1% | 83.4% | 83.0% | 83.3% | 83.2% | 82.3% | | | DA | 74.4% | 72.1% | 72.9% | 74.2% | 74.0% | 74.3% | 74.1% | 73.9% | | | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | | Macro-Average | ALL | 78.7% | 74.7% | 77.1% | 79.2% | 79.0% | 79.2% | 79.1% | 78.6% | <a name="footnote-1">[1]</a>: Variant-wise-average refers to average over a group of tasks in the same language variant. ## Acknowledgements This research was supported with Cloud TPUs from Google’s TensorFlow Research Cloud (TFRC). ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-did-madar-twitter5
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
75
null
--- language: - ar license: apache-2.0 widget: - text: "عامل ايه ؟" --- # CAMeLBERT-MSA DID MADAR Twitter-5 Model ## Model description **CAMeLBERT-MSA DID MADAR Twitter-5 Model** is a dialect identification (DID) model that was built by fine-tuning the [CAMeLBERT-MSA](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-msa/) model. For the fine-tuning, we used the [MADAR Twitter-5](https://camel.abudhabi.nyu.edu/madar-shared-task-2019/) dataset, which includes 21 labels. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-MSA DID MADAR Twitter-5 model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> did = pipeline('text-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-msa-did-madar-twitter5') >>> sentences = ['عامل ايه ؟', 'شلونك ؟ شخبارك ؟'] >>> did(sentences) [{'label': 'Egypt', 'score': 0.5741344094276428}, {'label': 'Kuwait', 'score': 0.5225679278373718}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-did-nadi
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
71
null
--- language: - ar license: apache-2.0 widget: - text: "عامل ايه ؟" --- # CAMeLBERT-MSA DID NADI Model ## Model description **CAMeLBERT-MSA DID NADI Model** is a dialect identification (DID) model that was built by fine-tuning the [CAMeLBERT Modern Standard Arabic (MSA)](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-msa/) model. For the fine-tuning, we used the [NADI Coountry-level](https://sites.google.com/view/nadi-shared-task) dataset, which includes 21 labels. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-MSA DID NADI model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> did = pipeline('text-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-msa-did-nadi') >>> sentences = ['عامل ايه ؟', 'شلونك ؟ شخبارك ؟'] >>> did(sentences) [{'label': 'Egypt', 'score': 0.9242768287658691}, {'label': 'Saudi_Arabia', 'score': 0.3400847613811493}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-eighth
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
--- language: - ar license: apache-2.0 widget: - text: "الهدف من الحياة هو [MASK] ." --- # CAMeLBERT: A collection of pre-trained models for Arabic NLP tasks ## Model description **CAMeLBERT** is a collection of BERT models pre-trained on Arabic texts with different sizes and variants. We release pre-trained language models for Modern Standard Arabic (MSA), dialectal Arabic (DA), and classical Arabic (CA), in addition to a model pre-trained on a mix of the three. We also provide additional models that are pre-trained on a scaled-down set of the MSA variant (half, quarter, eighth, and sixteenth). The details are described in the paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* This model card describes **CAMeLBERT-MSA-eighth** (`bert-base-arabic-camelbert-msa-eighth`), a model pre-trained on an eighth of the full MSA dataset. ||Model|Variant|Size|#Word| |-|-|:-:|-:|-:| ||`bert-base-arabic-camelbert-mix`|CA,DA,MSA|167GB|17.3B| ||`bert-base-arabic-camelbert-ca`|CA|6GB|847M| ||`bert-base-arabic-camelbert-da`|DA|54GB|5.8B| ||`bert-base-arabic-camelbert-msa`|MSA|107GB|12.6B| ||`bert-base-arabic-camelbert-msa-half`|MSA|53GB|6.3B| ||`bert-base-arabic-camelbert-msa-quarter`|MSA|27GB|3.1B| |✔|`bert-base-arabic-camelbert-msa-eighth`|MSA|14GB|1.6B| ||`bert-base-arabic-camelbert-msa-sixteenth`|MSA|6GB|746M| ## Intended uses You can use the released model for either masked language modeling or next sentence prediction. However, it is mostly intended to be fine-tuned on an NLP task, such as NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. We release our fine-tuninig code [here](https://github.com/CAMeL-Lab/CAMeLBERT). #### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='CAMeL-Lab/bert-base-arabic-camelbert-msa-eighth') >>> unmasker("الهدف من الحياة هو [MASK] .") [{'sequence': '[CLS] الهدف من الحياة هو الحياة. [SEP]', 'score': 0.057812128216028214, 'token': 3696, 'token_str': 'الحياة'}, {'sequence': '[CLS] الهدف من الحياة هو النجاح. [SEP]', 'score': 0.05573025345802307, 'token': 6232, 'token_str': 'النجاح'}, {'sequence': '[CLS] الهدف من الحياة هو الكمال. [SEP]', 'score': 0.035942986607551575, 'token': 17188, 'token_str': 'الكمال'}, {'sequence': '[CLS] الهدف من الحياة هو التعلم. [SEP]', 'score': 0.03375256434082985, 'token': 12554, 'token_str': 'التعلم'}, {'sequence': '[CLS] الهدف من الحياة هو العمل. [SEP]', 'score': 0.030303971841931343, 'token': 2854, 'token_str': 'العمل'}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-eighth') model = AutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-eighth') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import AutoTokenizer, TFAutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-eighth') model = TFAutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-eighth') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ## Training data - MSA (Modern Standard Arabic) - [The Arabic Gigaword Fifth Edition](https://catalog.ldc.upenn.edu/LDC2011T11) - [Abu El-Khair Corpus](http://www.abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus) - [OSIAN corpus](https://vlo.clarin.eu/search;jsessionid=31066390B2C9E8C6304845BA79869AC1?1&q=osian) - [Arabic Wikipedia](https://archive.org/details/arwiki-20190201) - The unshuffled version of the Arabic [OSCAR corpus](https://oscar-corpus.com/) ## Training procedure We use [the original implementation](https://github.com/google-research/bert) released by Google for pre-training. We follow the original English BERT model's hyperparameters for pre-training, unless otherwise specified. ### Preprocessing - After extracting the raw text from each corpus, we apply the following pre-processing. - We first remove invalid characters and normalize white spaces using the utilities provided by [the original BERT implementation](https://github.com/google-research/bert/blob/eedf5716ce1268e56f0a50264a88cafad334ac61/tokenization.py#L286-L297). - We also remove lines without any Arabic characters. - We then remove diacritics and kashida using [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools). - Finally, we split each line into sentences with a heuristics-based sentence segmenter. - We train a WordPiece tokenizer on the entire dataset (167 GB text) with a vocabulary size of 30,000 using [HuggingFace's tokenizers](https://github.com/huggingface/tokenizers). - We do not lowercase letters nor strip accents. ### Pre-training - The model was trained on a single cloud TPU (`v3-8`) for one million steps in total. - The first 90,000 steps were trained with a batch size of 1,024 and the rest was trained with a batch size of 256. - The sequence length was limited to 128 tokens for 90% of the steps and 512 for the remaining 10%. - We use whole word masking and a duplicate factor of 10. - We set max predictions per sequence to 20 for the dataset with max sequence length of 128 tokens and 80 for the dataset with max sequence length of 512 tokens. - We use a random seed of 12345, masked language model probability of 0.15, and short sequence probability of 0.1. - The optimizer used is Adam with a learning rate of 1e-4, \\(\beta_{1} = 0.9\\) and \\(\beta_{2} = 0.999\\), a weight decay of 0.01, learning rate warmup for 10,000 steps and linear decay of the learning rate after. ## Evaluation results - We evaluate our pre-trained language models on five NLP tasks: NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. - We fine-tune and evaluate the models using 12 dataset. - We used Hugging Face's transformers to fine-tune our CAMeLBERT models. - We used transformers `v3.1.0` along with PyTorch `v1.5.1`. - The fine-tuning was done by adding a fully connected linear layer to the last hidden state. - We use \\(F_{1}\\) score as a metric for all tasks. - Code used for fine-tuning is available [here](https://github.com/CAMeL-Lab/CAMeLBERT). ### Results | Task | Dataset | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | --------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | NER | ANERcorp | MSA | 80.8% | 67.9% | 74.1% | 82.4% | 82.0% | 82.1% | 82.6% | 80.8% | | POS | PATB (MSA) | MSA | 98.1% | 97.8% | 97.7% | 98.3% | 98.2% | 98.3% | 98.2% | 98.2% | | | ARZTB (EGY) | DA | 93.6% | 92.3% | 92.7% | 93.6% | 93.6% | 93.7% | 93.6% | 93.6% | | | Gumar (GLF) | DA | 97.3% | 97.7% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | | SA | ASTD | MSA | 76.3% | 69.4% | 74.6% | 76.9% | 76.0% | 76.8% | 76.7% | 75.3% | | | ArSAS | MSA | 92.7% | 89.4% | 91.8% | 93.0% | 92.6% | 92.5% | 92.5% | 92.3% | | | SemEval | MSA | 69.0% | 58.5% | 68.4% | 72.1% | 70.7% | 72.8% | 71.6% | 71.2% | | DID | MADAR-26 | DA | 62.9% | 61.9% | 61.8% | 62.6% | 62.0% | 62.8% | 62.0% | 62.2% | | | MADAR-6 | DA | 92.5% | 91.5% | 92.2% | 91.9% | 91.8% | 92.2% | 92.1% | 92.0% | | | MADAR-Twitter-5 | MSA | 75.7% | 71.4% | 74.2% | 77.6% | 78.5% | 77.3% | 77.7% | 76.2% | | | NADI | DA | 24.7% | 17.3% | 20.1% | 24.9% | 24.6% | 24.6% | 24.9% | 23.8% | | Poetry | APCD | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | ### Results (Average) | | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | Variant-wise-average<sup>[[1]](#footnote-1)</sup> | MSA | 82.1% | 75.7% | 80.1% | 83.4% | 83.0% | 83.3% | 83.2% | 82.3% | | | DA | 74.4% | 72.1% | 72.9% | 74.2% | 74.0% | 74.3% | 74.1% | 73.9% | | | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | | Macro-Average | ALL | 78.7% | 74.7% | 77.1% | 79.2% | 79.0% | 79.2% | 79.1% | 78.6% | <a name="footnote-1">[1]</a>: Variant-wise-average refers to average over a group of tasks in the same language variant. ## Acknowledgements This research was supported with Cloud TPUs from Google’s TensorFlow Research Cloud (TFRC). ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-half
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
null
--- language: - ar license: apache-2.0 widget: - text: "الهدف من الحياة هو [MASK] ." --- # CAMeLBERT: A collection of pre-trained models for Arabic NLP tasks ## Model description **CAMeLBERT** is a collection of BERT models pre-trained on Arabic texts with different sizes and variants. We release pre-trained language models for Modern Standard Arabic (MSA), dialectal Arabic (DA), and classical Arabic (CA), in addition to a model pre-trained on a mix of the three. We also provide additional models that are pre-trained on a scaled-down set of the MSA variant (half, quarter, eighth, and sixteenth). The details are described in the paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* This model card describes **CAMeLBERT-MSA-half** (`bert-base-arabic-camelbert-msa-half`), a model pre-trained on a half of the full MSA dataset. ||Model|Variant|Size|#Word| |-|-|:-:|-:|-:| ||`bert-base-arabic-camelbert-mix`|CA,DA,MSA|167GB|17.3B| ||`bert-base-arabic-camelbert-ca`|CA|6GB|847M| ||`bert-base-arabic-camelbert-da`|DA|54GB|5.8B| ||`bert-base-arabic-camelbert-msa`|MSA|107GB|12.6B| |✔|`bert-base-arabic-camelbert-msa-half`|MSA|53GB|6.3B| ||`bert-base-arabic-camelbert-msa-quarter`|MSA|27GB|3.1B| ||`bert-base-arabic-camelbert-msa-eighth`|MSA|14GB|1.6B| ||`bert-base-arabic-camelbert-msa-sixteenth`|MSA|6GB|746M| ## Intended uses You can use the released model for either masked language modeling or next sentence prediction. However, it is mostly intended to be fine-tuned on an NLP task, such as NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. We release our fine-tuninig code [here](https://github.com/CAMeL-Lab/CAMeLBERT). #### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='CAMeL-Lab/bert-base-arabic-camelbert-msa-half') >>> unmasker("الهدف من الحياة هو [MASK] .") [{'sequence': '[CLS] الهدف من الحياة هو الحياة. [SEP]', 'score': 0.09132730215787888, 'token': 3696, 'token_str': 'الحياة'}, {'sequence': '[CLS] الهدف من الحياة هو.. [SEP]', 'score': 0.08282623440027237, 'token': 18, 'token_str': '.'}, {'sequence': '[CLS] الهدف من الحياة هو البقاء. [SEP]', 'score': 0.04031957685947418, 'token': 9331, 'token_str': 'البقاء'}, {'sequence': '[CLS] الهدف من الحياة هو النجاح. [SEP]', 'score': 0.032019514590501785, 'token': 6232, 'token_str': 'النجاح'}, {'sequence': '[CLS] الهدف من الحياة هو الحب. [SEP]', 'score': 0.028731243684887886, 'token': 3088, 'token_str': 'الحب'}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-half') model = AutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-half') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import AutoTokenizer, TFAutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-half') model = TFAutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-half') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ## Training data - MSA (Modern Standard Arabic) - [The Arabic Gigaword Fifth Edition](https://catalog.ldc.upenn.edu/LDC2011T11) - [Abu El-Khair Corpus](http://www.abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus) - [OSIAN corpus](https://vlo.clarin.eu/search;jsessionid=31066390B2C9E8C6304845BA79869AC1?1&q=osian) - [Arabic Wikipedia](https://archive.org/details/arwiki-20190201) - The unshuffled version of the Arabic [OSCAR corpus](https://oscar-corpus.com/) ## Training procedure We use [the original implementation](https://github.com/google-research/bert) released by Google for pre-training. We follow the original English BERT model's hyperparameters for pre-training, unless otherwise specified. ### Preprocessing - After extracting the raw text from each corpus, we apply the following pre-processing. - We first remove invalid characters and normalize white spaces using the utilities provided by [the original BERT implementation](https://github.com/google-research/bert/blob/eedf5716ce1268e56f0a50264a88cafad334ac61/tokenization.py#L286-L297). - We also remove lines without any Arabic characters. - We then remove diacritics and kashida using [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools). - Finally, we split each line into sentences with a heuristics-based sentence segmenter. - We train a WordPiece tokenizer on the entire dataset (167 GB text) with a vocabulary size of 30,000 using [HuggingFace's tokenizers](https://github.com/huggingface/tokenizers). - We do not lowercase letters nor strip accents. ### Pre-training - The model was trained on a single cloud TPU (`v3-8`) for one million steps in total. - The first 90,000 steps were trained with a batch size of 1,024 and the rest was trained with a batch size of 256. - The sequence length was limited to 128 tokens for 90% of the steps and 512 for the remaining 10%. - We use whole word masking and a duplicate factor of 10. - We set max predictions per sequence to 20 for the dataset with max sequence length of 128 tokens and 80 for the dataset with max sequence length of 512 tokens. - We use a random seed of 12345, masked language model probability of 0.15, and short sequence probability of 0.1. - The optimizer used is Adam with a learning rate of 1e-4, \\(\beta_{1} = 0.9\\) and \\(\beta_{2} = 0.999\\), a weight decay of 0.01, learning rate warmup for 10,000 steps and linear decay of the learning rate after. ## Evaluation results - We evaluate our pre-trained language models on five NLP tasks: NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. - We fine-tune and evaluate the models using 12 dataset. - We used Hugging Face's transformers to fine-tune our CAMeLBERT models. - We used transformers `v3.1.0` along with PyTorch `v1.5.1`. - The fine-tuning was done by adding a fully connected linear layer to the last hidden state. - We use \\(F_{1}\\) score as a metric for all tasks. - Code used for fine-tuning is available [here](https://github.com/CAMeL-Lab/CAMeLBERT). ### Results | Task | Dataset | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | --------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | NER | ANERcorp | MSA | 80.8% | 67.9% | 74.1% | 82.4% | 82.0% | 82.1% | 82.6% | 80.8% | | POS | PATB (MSA) | MSA | 98.1% | 97.8% | 97.7% | 98.3% | 98.2% | 98.3% | 98.2% | 98.2% | | | ARZTB (EGY) | DA | 93.6% | 92.3% | 92.7% | 93.6% | 93.6% | 93.7% | 93.6% | 93.6% | | | Gumar (GLF) | DA | 97.3% | 97.7% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | | SA | ASTD | MSA | 76.3% | 69.4% | 74.6% | 76.9% | 76.0% | 76.8% | 76.7% | 75.3% | | | ArSAS | MSA | 92.7% | 89.4% | 91.8% | 93.0% | 92.6% | 92.5% | 92.5% | 92.3% | | | SemEval | MSA | 69.0% | 58.5% | 68.4% | 72.1% | 70.7% | 72.8% | 71.6% | 71.2% | | DID | MADAR-26 | DA | 62.9% | 61.9% | 61.8% | 62.6% | 62.0% | 62.8% | 62.0% | 62.2% | | | MADAR-6 | DA | 92.5% | 91.5% | 92.2% | 91.9% | 91.8% | 92.2% | 92.1% | 92.0% | | | MADAR-Twitter-5 | MSA | 75.7% | 71.4% | 74.2% | 77.6% | 78.5% | 77.3% | 77.7% | 76.2% | | | NADI | DA | 24.7% | 17.3% | 20.1% | 24.9% | 24.6% | 24.6% | 24.9% | 23.8% | | Poetry | APCD | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | ### Results (Average) | | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | Variant-wise-average<sup>[[1]](#footnote-1)</sup> | MSA | 82.1% | 75.7% | 80.1% | 83.4% | 83.0% | 83.3% | 83.2% | 82.3% | | | DA | 74.4% | 72.1% | 72.9% | 74.2% | 74.0% | 74.3% | 74.1% | 73.9% | | | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | | Macro-Average | ALL | 78.7% | 74.7% | 77.1% | 79.2% | 79.0% | 79.2% | 79.1% | 78.6% | <a name="footnote-1">[1]</a>: Variant-wise-average refers to average over a group of tasks in the same language variant. ## Acknowledgements This research was supported with Cloud TPUs from Google’s TensorFlow Research Cloud (TFRC). ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
229
null
--- language: - ar license: apache-2.0 widget: - text: "إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع" --- # CAMeLBERT MSA NER Model ## Model description **CAMeLBERT MSA NER Model** is a Named Entity Recognition (NER) model that was built by fine-tuning the [CAMeLBERT Modern Standard Arabic (MSA)](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-msa/) model. For the fine-tuning, we used the [ANERcorp](https://camel.abudhabi.nyu.edu/anercorp/) dataset. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678). "* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT MSA NER model directly as part of our [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) NER component (*recommended*) or as part of the transformers pipeline. #### How to use To use the model with the [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) NER component: ```python >>> from camel_tools.ner import NERecognizer >>> from camel_tools.tokenizers.word import simple_word_tokenize >>> ner = NERecognizer('CAMeL-Lab/bert-base-arabic-camelbert-msa-ner') >>> sentence = simple_word_tokenize('إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع') >>> ner.predict_sentence(sentence) >>> ['O', 'B-LOC', 'O', 'O', 'O', 'O', 'B-LOC', 'I-LOC', 'I-LOC', 'O'] ``` You can also use the NER model directly with a transformers pipeline: ```python >>> from transformers import pipeline >>> ner = pipeline('ner', model='CAMeL-Lab/bert-base-arabic-camelbert-msa-ner') >>> ner("إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع") [{'word': 'أبوظبي', 'score': 0.9895730018615723, 'entity': 'B-LOC', 'index': 2, 'start': 6, 'end': 12}, {'word': 'الإمارات', 'score': 0.8156259655952454, 'entity': 'B-LOC', 'index': 8, 'start': 33, 'end': 41}, {'word': 'العربية', 'score': 0.890906810760498, 'entity': 'I-LOC', 'index': 9, 'start': 42, 'end': 49}, {'word': 'المتحدة', 'score': 0.8169114589691162, 'entity': 'I-LOC', 'index': 10, 'start': 50, 'end': 57}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- language: - ar license: apache-2.0 widget: - text: 'الخيل والليل والبيداء تعرفني [SEP] والسيف والرمح والقرطاس والقلم' --- # CAMeLBERT-MSA Poetry Classification Model ## Model description **CAMeLBERT-MSA Poetry Classification Model** is a poetry classification model that was built by fine-tuning the [CAMeLBERT Modern Standard Arabic (MSA)](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-msa/) model. For the fine-tuning, we used the [APCD](https://arxiv.org/pdf/1905.05700.pdf) dataset. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-MSA Poetry Classification model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> poetry = pipeline('text-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-msa-poetry') >>> # A list of verses where each verse consists of two parts. >>> verses = [ ['الخيل والليل والبيداء تعرفني' ,'والسيف والرمح والقرطاس والقلم'], ['قم للمعلم وفه التبجيلا' ,'كاد المعلم ان يكون رسولا'] ] >>> # A function that concatenates the halves of each verse by using the [SEP] token. >>> join_verse = lambda half: ' [SEP] '.join(half) >>> # Apply this to all the verses in the list. >>> verses = [join_verse(verse) for verse in verses] >>> poetry(sentences) [{'label': 'البسيط', 'score': 0.9914996027946472}, {'label': 'الكامل', 'score': 0.917242169380188}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
52
null
--- language: - ar license: apache-2.0 widget: - text: 'عامل ايه ؟' --- # CAMeLBERT-MSA POS-EGY Model ## Model description **CAMeLBERT-MSA POS-EGY Model** is a Egyptian Arabic POS tagging model that was built by fine-tuning the [CAMeLBERT-MSA](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-msa/) model. For the fine-tuning, we used the ARZTB dataset . Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-MSA POS-EGY model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> pos = pipeline('token-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-msa-pos-egy') >>> text = 'عامل ايه ؟' >>> pos(text) [{'entity': 'adj', 'score': 0.99979395, 'index': 1, 'word': 'عامل', 'start': 0, 'end': 4}, {'entity': 'pron_interrog', 'score': 0.998192, 'index': 2, 'word': 'ايه', 'start': 5, 'end': 8}, {'entity': 'punc', 'score': 0.99929804, 'index': 3, 'word': '؟', 'start': 9, 'end': 10}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-pos-glf
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
--- language: - ar license: apache-2.0 widget: - text: 'شلونك ؟ شخبارك ؟' --- # CAMeLBERT-MSA POS-GLF Model ## Model description **CAMeLBERT-MSA POS-GLF Model** is a Gulf Arabic POS tagging model that was built by fine-tuning the [CAMeLBERT-MSA](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-msa/) model. For the fine-tuning, we used the [Gumar](https://camel.abudhabi.nyu.edu/annotated-gumar-corpus/) dataset. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-MSA POS-GLF model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> pos = pipeline('token-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-msa-pos-glf') >>> text = 'شلونك ؟ شخبارك ؟' >>> pos(text) [{'entity': 'adv_interrog', 'score': 0.5622676, 'index': 1, 'word': 'شلون', 'start': 0, 'end': 4}, {'entity': 'prep', 'score': 0.99969727, 'index': 2, 'word': '##ك', 'start': 4, 'end': 5}, {'entity': 'punc', 'score': 0.9999299, 'index': 3, 'word': '؟', 'start': 6, 'end': 7}, {'entity': 'noun', 'score': 0.9843815, 'index': 4, 'word': 'ش', 'start': 8, 'end': 9}, {'entity': 'noun', 'score': 0.9998467, 'index': 5, 'word': '##خبار', 'start': 9, 'end': 13}, {'entity': 'prep', 'score': 0.9993611, 'index': 6, 'word': '##ك', 'start': 13, 'end': 14}, {'entity': 'punc', 'score': 0.99993765, 'index': 7, 'word': '؟', 'start': 15, 'end': 16}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
133
null
--- language: - ar license: apache-2.0 widget: - text: 'إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع' --- # CAMeLBERT-MSA POS-MSA Model ## Model description **CAMeLBERT-MSA POS-MSA Model** is a Modern Standard Arabic (MSA) POS tagging model that was built by fine-tuning the [CAMeLBERT-MSA](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-msa/) model. For the fine-tuning, we used the [PATB](https://dl.acm.org/doi/pdf/10.5555/1621804.1621808) dataset . Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-MSA POS-MSA model as part of the transformers pipeline. This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon. #### How to use To use the model with a transformers pipeline: ```python >>> from transformers import pipeline >>> pos = pipeline('token-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-msa-pos-msa') >>> text = 'إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع' >>> pos(text) [{'entity': 'noun', 'score': 0.9999764, 'index': 1, 'word': 'إمارة', 'start': 0, 'end': 5}, {'entity': 'noun_prop', 'score': 0.99991846, 'index': 2, 'word': 'أبوظبي', 'start': 6, 'end': 12}, {'entity': 'pron', 'score': 0.9998356, 'index': 3, 'word': 'هي', 'start': 13, 'end': 15}, {'entity': 'noun', 'score': 0.99368894, 'index': 4, 'word': 'إحدى', 'start': 16, 'end': 20}, {'entity': 'noun', 'score': 0.9999426, 'index': 5, 'word': 'إما', 'start': 21, 'end': 24}, {'entity': 'noun', 'score': 0.9999339, 'index': 6, 'word': '##رات', 'start': 24, 'end': 27}, {'entity': 'noun', 'score': 0.99996775, 'index': 7, 'word': 'دولة', 'start': 28, 'end': 32}, {'entity': 'noun', 'score': 0.99996895, 'index': 8, 'word': 'الإمارات', 'start': 33, 'end': 41}, {'entity': 'adj', 'score': 0.99990183, 'index': 9, 'word': 'العربية', 'start': 42, 'end': 49}, {'entity': 'adj', 'score': 0.9999347, 'index': 10, 'word': 'المتحدة', 'start': 50, 'end': 57}, {'entity': 'noun_num', 'score': 0.99931145, 'index': 11, 'word': 'السبع', 'start': 58, 'end': 63}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-quarter
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- language: - ar license: apache-2.0 widget: - text: "الهدف من الحياة هو [MASK] ." --- # CAMeLBERT: A collection of pre-trained models for Arabic NLP tasks ## Model description **CAMeLBERT** is a collection of BERT models pre-trained on Arabic texts with different sizes and variants. We release pre-trained language models for Modern Standard Arabic (MSA), dialectal Arabic (DA), and classical Arabic (CA), in addition to a model pre-trained on a mix of the three. We also provide additional models that are pre-trained on a scaled-down set of the MSA variant (half, quarter, eighth, and sixteenth). The details are described in the paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* This model card describes **CAMeLBERT-MSA-quarter** (`bert-base-arabic-camelbert-msa-quarter`), a model pre-trained on a quarter of the full MSA dataset. ||Model|Variant|Size|#Word| |-|-|:-:|-:|-:| ||`bert-base-arabic-camelbert-mix`|CA,DA,MSA|167GB|17.3B| ||`bert-base-arabic-camelbert-ca`|CA|6GB|847M| ||`bert-base-arabic-camelbert-da`|DA|54GB|5.8B| ||`bert-base-arabic-camelbert-msa`|MSA|107GB|12.6B| ||`bert-base-arabic-camelbert-msa-half`|MSA|53GB|6.3B| |✔|`bert-base-arabic-camelbert-msa-quarter`|MSA|27GB|3.1B| ||`bert-base-arabic-camelbert-msa-eighth`|MSA|14GB|1.6B| ||`bert-base-arabic-camelbert-msa-sixteenth`|MSA|6GB|746M| ## Intended uses You can use the released model for either masked language modeling or next sentence prediction. However, it is mostly intended to be fine-tuned on an NLP task, such as NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. We release our fine-tuninig code [here](https://github.com/CAMeL-Lab/CAMeLBERT). #### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='CAMeL-Lab/bert-base-arabic-camelbert-msa-quarter') >>> unmasker("الهدف من الحياة هو [MASK] .") [{'sequence': '[CLS] الهدف من الحياة هو الحياة. [SEP]', 'score': 0.17437894642353058, 'token': 3696, 'token_str': 'الحياة'}, {'sequence': '[CLS] الهدف من الحياة هو النجاح. [SEP]', 'score': 0.042852893471717834, 'token': 6232, 'token_str': 'النجاح'}, {'sequence': '[CLS] الهدف من الحياة هو البقاء. [SEP]', 'score': 0.030925093218684196, 'token': 9331, 'token_str': 'البقاء'}, {'sequence': '[CLS] الهدف من الحياة هو الحب. [SEP]', 'score': 0.02964409440755844, 'token': 3088, 'token_str': 'الحب'}, {'sequence': '[CLS] الهدف من الحياة هو الكمال. [SEP]', 'score': 0.028030086308717728, 'token': 17188, 'token_str': 'الكمال'}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-quarter') model = AutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-quarter') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import AutoTokenizer, TFAutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-quarter') model = TFAutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-quarter') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ## Training data - MSA (Modern Standard Arabic) - [The Arabic Gigaword Fifth Edition](https://catalog.ldc.upenn.edu/LDC2011T11) - [Abu El-Khair Corpus](http://www.abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus) - [OSIAN corpus](https://vlo.clarin.eu/search;jsessionid=31066390B2C9E8C6304845BA79869AC1?1&q=osian) - [Arabic Wikipedia](https://archive.org/details/arwiki-20190201) - The unshuffled version of the Arabic [OSCAR corpus](https://oscar-corpus.com/) ## Training procedure We use [the original implementation](https://github.com/google-research/bert) released by Google for pre-training. We follow the original English BERT model's hyperparameters for pre-training, unless otherwise specified. ### Preprocessing - After extracting the raw text from each corpus, we apply the following pre-processing. - We first remove invalid characters and normalize white spaces using the utilities provided by [the original BERT implementation](https://github.com/google-research/bert/blob/eedf5716ce1268e56f0a50264a88cafad334ac61/tokenization.py#L286-L297). - We also remove lines without any Arabic characters. - We then remove diacritics and kashida using [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools). - Finally, we split each line into sentences with a heuristics-based sentence segmenter. - We train a WordPiece tokenizer on the entire dataset (167 GB text) with a vocabulary size of 30,000 using [HuggingFace's tokenizers](https://github.com/huggingface/tokenizers). - We do not lowercase letters nor strip accents. ### Pre-training - The model was trained on a single cloud TPU (`v3-8`) for one million steps in total. - The first 90,000 steps were trained with a batch size of 1,024 and the rest was trained with a batch size of 256. - The sequence length was limited to 128 tokens for 90% of the steps and 512 for the remaining 10%. - We use whole word masking and a duplicate factor of 10. - We set max predictions per sequence to 20 for the dataset with max sequence length of 128 tokens and 80 for the dataset with max sequence length of 512 tokens. - We use a random seed of 12345, masked language model probability of 0.15, and short sequence probability of 0.1. - The optimizer used is Adam with a learning rate of 1e-4, \\(\beta_{1} = 0.9\\) and \\(\beta_{2} = 0.999\\), a weight decay of 0.01, learning rate warmup for 10,000 steps and linear decay of the learning rate after. ## Evaluation results - We evaluate our pre-trained language models on five NLP tasks: NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. - We fine-tune and evaluate the models using 12 dataset. - We used Hugging Face's transformers to fine-tune our CAMeLBERT models. - We used transformers `v3.1.0` along with PyTorch `v1.5.1`. - The fine-tuning was done by adding a fully connected linear layer to the last hidden state. - We use \\(F_{1}\\) score as a metric for all tasks. - Code used for fine-tuning is available [here](https://github.com/CAMeL-Lab/CAMeLBERT). ### Results | Task | Dataset | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | --------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | NER | ANERcorp | MSA | 80.8% | 67.9% | 74.1% | 82.4% | 82.0% | 82.1% | 82.6% | 80.8% | | POS | PATB (MSA) | MSA | 98.1% | 97.8% | 97.7% | 98.3% | 98.2% | 98.3% | 98.2% | 98.2% | | | ARZTB (EGY) | DA | 93.6% | 92.3% | 92.7% | 93.6% | 93.6% | 93.7% | 93.6% | 93.6% | | | Gumar (GLF) | DA | 97.3% | 97.7% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | | SA | ASTD | MSA | 76.3% | 69.4% | 74.6% | 76.9% | 76.0% | 76.8% | 76.7% | 75.3% | | | ArSAS | MSA | 92.7% | 89.4% | 91.8% | 93.0% | 92.6% | 92.5% | 92.5% | 92.3% | | | SemEval | MSA | 69.0% | 58.5% | 68.4% | 72.1% | 70.7% | 72.8% | 71.6% | 71.2% | | DID | MADAR-26 | DA | 62.9% | 61.9% | 61.8% | 62.6% | 62.0% | 62.8% | 62.0% | 62.2% | | | MADAR-6 | DA | 92.5% | 91.5% | 92.2% | 91.9% | 91.8% | 92.2% | 92.1% | 92.0% | | | MADAR-Twitter-5 | MSA | 75.7% | 71.4% | 74.2% | 77.6% | 78.5% | 77.3% | 77.7% | 76.2% | | | NADI | DA | 24.7% | 17.3% | 20.1% | 24.9% | 24.6% | 24.6% | 24.9% | 23.8% | | Poetry | APCD | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | ### Results (Average) | | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | Variant-wise-average<sup>[[1]](#footnote-1)</sup> | MSA | 82.1% | 75.7% | 80.1% | 83.4% | 83.0% | 83.3% | 83.2% | 82.3% | | | DA | 74.4% | 72.1% | 72.9% | 74.2% | 74.0% | 74.3% | 74.1% | 73.9% | | | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | | Macro-Average | ALL | 78.7% | 74.7% | 77.1% | 79.2% | 79.0% | 79.2% | 79.1% | 78.6% | <a name="footnote-1">[1]</a>: Variant-wise-average refers to average over a group of tasks in the same language variant. ## Acknowledgements This research was supported with Cloud TPUs from Google’s TensorFlow Research Cloud (TFRC). ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-sentiment
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
574
null
--- language: - ar license: apache-2.0 widget: - text: "أنا بخير" --- # CAMeLBERT MSA SA Model ## Model description **CAMeLBERT MSA SA Model** is a Sentiment Analysis (SA) model that was built by fine-tuning the [CAMeLBERT Modern Standard Arabic (MSA)](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-msa/) model. For the fine-tuning, we used the [ASTD](https://aclanthology.org/D15-1299.pdf), [ArSAS](http://lrec-conf.org/workshops/lrec2018/W30/pdf/22_W30.pdf), and [SemEval](https://aclanthology.org/S17-2088.pdf) datasets. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT MSA SA model directly as part of our [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) SA component (*recommended*) or as part of the transformers pipeline. #### How to use To use the model with the [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) SA component: ```python >>> from camel_tools.sentiment import SentimentAnalyzer >>> sa = SentimentAnalyzer("CAMeL-Lab/bert-base-arabic-camelbert-msa-sentiment") >>> sentences = ['أنا بخير', 'أنا لست بخير'] >>> sa.predict(sentences) >>> ['positive', 'negative'] ``` You can also use the SA model directly with a transformers pipeline: ```python >>> from transformers import pipeline >>> sa = pipeline('sentiment-analysis', model='CAMeL-Lab/bert-base-arabic-camelbert-msa-sentiment') >>> sentences = ['أنا بخير', 'أنا لست بخير'] >>> sa(sentences) [{'label': 'positive', 'score': 0.9616648554801941}, {'label': 'negative', 'score': 0.9779177904129028}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-sixteenth
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- language: - ar license: apache-2.0 widget: - text: "الهدف من الحياة هو [MASK] ." --- # CAMeLBERT: A collection of pre-trained models for Arabic NLP tasks ## Model description **CAMeLBERT** is a collection of BERT models pre-trained on Arabic texts with different sizes and variants. We release pre-trained language models for Modern Standard Arabic (MSA), dialectal Arabic (DA), and classical Arabic (CA), in addition to a model pre-trained on a mix of the three. We also provide additional models that are pre-trained on a scaled-down set of the MSA variant (half, quarter, eighth, and sixteenth). The details are described in the paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* This model card describes **CAMeLBERT-MSA-sixteenth** (`bert-base-arabic-camelbert-msa-sixteenth`), a model pre-trained on a sixteenth of the full MSA dataset. ||Model|Variant|Size|#Word| |-|-|:-:|-:|-:| ||`bert-base-arabic-camelbert-mix`|CA,DA,MSA|167GB|17.3B| ||`bert-base-arabic-camelbert-ca`|CA|6GB|847M| ||`bert-base-arabic-camelbert-da`|DA|54GB|5.8B| ||`bert-base-arabic-camelbert-msa`|MSA|107GB|12.6B| ||`bert-base-arabic-camelbert-msa-half`|MSA|53GB|6.3B| ||`bert-base-arabic-camelbert-msa-quarter`|MSA|27GB|3.1B| ||`bert-base-arabic-camelbert-msa-eighth`|MSA|14GB|1.6B| |✔|`bert-base-arabic-camelbert-msa-sixteenth`|MSA|6GB|746M| ## Intended uses You can use the released model for either masked language modeling or next sentence prediction. However, it is mostly intended to be fine-tuned on an NLP task, such as NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. We release our fine-tuninig code [here](https://github.com/CAMeL-Lab/CAMeLBERT). #### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='CAMeL-Lab/bert-base-arabic-camelbert-msa-sixteenth') >>> unmasker("الهدف من الحياة هو [MASK] .") [{'sequence': '[CLS] الهدف من الحياة هو التغيير. [SEP]', 'score': 0.08320745080709457, 'token': 7946, 'token_str': 'التغيير'}, {'sequence': '[CLS] الهدف من الحياة هو التعلم. [SEP]', 'score': 0.04305094853043556, 'token': 12554, 'token_str': 'التعلم'}, {'sequence': '[CLS] الهدف من الحياة هو العمل. [SEP]', 'score': 0.0417640283703804, 'token': 2854, 'token_str': 'العمل'}, {'sequence': '[CLS] الهدف من الحياة هو الحياة. [SEP]', 'score': 0.041371218860149384, 'token': 3696, 'token_str': 'الحياة'}, {'sequence': '[CLS] الهدف من الحياة هو المعرفة. [SEP]', 'score': 0.039794355630874634, 'token': 7344, 'token_str': 'المعرفة'}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-sixteenth') model = AutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-sixteenth') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import AutoTokenizer, TFAutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-sixteenth') model = TFAutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa-sixteenth') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ## Training data - MSA (Modern Standard Arabic) - [The Arabic Gigaword Fifth Edition](https://catalog.ldc.upenn.edu/LDC2011T11) - [Abu El-Khair Corpus](http://www.abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus) - [OSIAN corpus](https://vlo.clarin.eu/search;jsessionid=31066390B2C9E8C6304845BA79869AC1?1&q=osian) - [Arabic Wikipedia](https://archive.org/details/arwiki-20190201) - The unshuffled version of the Arabic [OSCAR corpus](https://oscar-corpus.com/) ## Training procedure We use [the original implementation](https://github.com/google-research/bert) released by Google for pre-training. We follow the original English BERT model's hyperparameters for pre-training, unless otherwise specified. ### Preprocessing - After extracting the raw text from each corpus, we apply the following pre-processing. - We first remove invalid characters and normalize white spaces using the utilities provided by [the original BERT implementation](https://github.com/google-research/bert/blob/eedf5716ce1268e56f0a50264a88cafad334ac61/tokenization.py#L286-L297). - We also remove lines without any Arabic characters. - We then remove diacritics and kashida using [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools). - Finally, we split each line into sentences with a heuristics-based sentence segmenter. - We train a WordPiece tokenizer on the entire dataset (167 GB text) with a vocabulary size of 30,000 using [HuggingFace's tokenizers](https://github.com/huggingface/tokenizers). - We do not lowercase letters nor strip accents. ### Pre-training - The model was trained on a single cloud TPU (`v3-8`) for one million steps in total. - The first 90,000 steps were trained with a batch size of 1,024 and the rest was trained with a batch size of 256. - The sequence length was limited to 128 tokens for 90% of the steps and 512 for the remaining 10%. - We use whole word masking and a duplicate factor of 10. - We set max predictions per sequence to 20 for the dataset with max sequence length of 128 tokens and 80 for the dataset with max sequence length of 512 tokens. - We use a random seed of 12345, masked language model probability of 0.15, and short sequence probability of 0.1. - The optimizer used is Adam with a learning rate of 1e-4, \\(\beta_{1} = 0.9\\) and \\(\beta_{2} = 0.999\\), a weight decay of 0.01, learning rate warmup for 10,000 steps and linear decay of the learning rate after. ## Evaluation results - We evaluate our pre-trained language models on five NLP tasks: NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. - We fine-tune and evaluate the models using 12 dataset. - We used Hugging Face's transformers to fine-tune our CAMeLBERT models. - We used transformers `v3.1.0` along with PyTorch `v1.5.1`. - The fine-tuning was done by adding a fully connected linear layer to the last hidden state. - We use \\(F_{1}\\) score as a metric for all tasks. - Code used for fine-tuning is available [here](https://github.com/CAMeL-Lab/CAMeLBERT). ### Results | Task | Dataset | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | --------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | NER | ANERcorp | MSA | 80.8% | 67.9% | 74.1% | 82.4% | 82.0% | 82.1% | 82.6% | 80.8% | | POS | PATB (MSA) | MSA | 98.1% | 97.8% | 97.7% | 98.3% | 98.2% | 98.3% | 98.2% | 98.2% | | | ARZTB (EGY) | DA | 93.6% | 92.3% | 92.7% | 93.6% | 93.6% | 93.7% | 93.6% | 93.6% | | | Gumar (GLF) | DA | 97.3% | 97.7% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | | SA | ASTD | MSA | 76.3% | 69.4% | 74.6% | 76.9% | 76.0% | 76.8% | 76.7% | 75.3% | | | ArSAS | MSA | 92.7% | 89.4% | 91.8% | 93.0% | 92.6% | 92.5% | 92.5% | 92.3% | | | SemEval | MSA | 69.0% | 58.5% | 68.4% | 72.1% | 70.7% | 72.8% | 71.6% | 71.2% | | DID | MADAR-26 | DA | 62.9% | 61.9% | 61.8% | 62.6% | 62.0% | 62.8% | 62.0% | 62.2% | | | MADAR-6 | DA | 92.5% | 91.5% | 92.2% | 91.9% | 91.8% | 92.2% | 92.1% | 92.0% | | | MADAR-Twitter-5 | MSA | 75.7% | 71.4% | 74.2% | 77.6% | 78.5% | 77.3% | 77.7% | 76.2% | | | NADI | DA | 24.7% | 17.3% | 20.1% | 24.9% | 24.6% | 24.6% | 24.9% | 23.8% | | Poetry | APCD | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | ### Results (Average) | | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | Variant-wise-average<sup>[[1]](#footnote-1)</sup> | MSA | 82.1% | 75.7% | 80.1% | 83.4% | 83.0% | 83.3% | 83.2% | 82.3% | | | DA | 74.4% | 72.1% | 72.9% | 74.2% | 74.0% | 74.3% | 74.1% | 73.9% | | | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | | Macro-Average | ALL | 78.7% | 74.7% | 77.1% | 79.2% | 79.0% | 79.2% | 79.1% | 78.6% | <a name="footnote-1">[1]</a>: Variant-wise-average refers to average over a group of tasks in the same language variant. ## Acknowledgements This research was supported with Cloud TPUs from Google’s TensorFlow Research Cloud (TFRC). ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAMeL-Lab/bert-base-arabic-camelbert-msa
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,967
"2021-03-11T11:20:49Z"
--- language: - ar license: apache-2.0 widget: - text: "الهدف من الحياة هو [MASK] ." --- # CAMeLBERT: A collection of pre-trained models for Arabic NLP tasks ## Model description **CAMeLBERT** is a collection of BERT models pre-trained on Arabic texts with different sizes and variants. We release pre-trained language models for Modern Standard Arabic (MSA), dialectal Arabic (DA), and classical Arabic (CA), in addition to a model pre-trained on a mix of the three. We also provide additional models that are pre-trained on a scaled-down set of the MSA variant (half, quarter, eighth, and sixteenth). The details are described in the paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* This model card describes **CAMeLBERT-MSA** (`bert-base-arabic-camelbert-msa`), a model pre-trained on the entire MSA dataset. ||Model|Variant|Size|#Word| |-|-|:-:|-:|-:| ||`bert-base-arabic-camelbert-mix`|CA,DA,MSA|167GB|17.3B| ||`bert-base-arabic-camelbert-ca`|CA|6GB|847M| ||`bert-base-arabic-camelbert-da`|DA|54GB|5.8B| |✔|`bert-base-arabic-camelbert-msa`|MSA|107GB|12.6B| ||`bert-base-arabic-camelbert-msa-half`|MSA|53GB|6.3B| ||`bert-base-arabic-camelbert-msa-quarter`|MSA|27GB|3.1B| ||`bert-base-arabic-camelbert-msa-eighth`|MSA|14GB|1.6B| ||`bert-base-arabic-camelbert-msa-sixteenth`|MSA|6GB|746M| ## Intended uses You can use the released model for either masked language modeling or next sentence prediction. However, it is mostly intended to be fine-tuned on an NLP task, such as NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. We release our fine-tuninig code [here](https://github.com/CAMeL-Lab/CAMeLBERT). #### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='CAMeL-Lab/bert-base-arabic-camelbert-msa') >>> unmasker("الهدف من الحياة هو [MASK] .") [{'sequence': '[CLS] الهدف من الحياة هو العمل. [SEP]', 'score': 0.08507660031318665, 'token': 2854, 'token_str': 'العمل'}, {'sequence': '[CLS] الهدف من الحياة هو الحياة. [SEP]', 'score': 0.058905381709337234, 'token': 3696, 'token_str': 'الحياة'}, {'sequence': '[CLS] الهدف من الحياة هو النجاح. [SEP]', 'score': 0.04660581797361374, 'token': 6232, 'token_str': 'النجاح'}, {'sequence': '[CLS] الهدف من الحياة هو الربح. [SEP]', 'score': 0.04156001657247543, 'token': 12413, 'token_str': 'الربح'}, {'sequence': '[CLS] الهدف من الحياة هو الحب. [SEP]', 'score': 0.03534102067351341, 'token': 3088, 'token_str': 'الحب'}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa') model = AutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import AutoTokenizer, TFAutoModel tokenizer = AutoTokenizer.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa') model = TFAutoModel.from_pretrained('CAMeL-Lab/bert-base-arabic-camelbert-msa') text = "مرحبا يا عالم." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ## Training data - MSA (Modern Standard Arabic) - [The Arabic Gigaword Fifth Edition](https://catalog.ldc.upenn.edu/LDC2011T11) - [Abu El-Khair Corpus](http://www.abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus) - [OSIAN corpus](https://vlo.clarin.eu/search;jsessionid=31066390B2C9E8C6304845BA79869AC1?1&q=osian) - [Arabic Wikipedia](https://archive.org/details/arwiki-20190201) - The unshuffled version of the Arabic [OSCAR corpus](https://oscar-corpus.com/) ## Training procedure We use [the original implementation](https://github.com/google-research/bert) released by Google for pre-training. We follow the original English BERT model's hyperparameters for pre-training, unless otherwise specified. ### Preprocessing - After extracting the raw text from each corpus, we apply the following pre-processing. - We first remove invalid characters and normalize white spaces using the utilities provided by [the original BERT implementation](https://github.com/google-research/bert/blob/eedf5716ce1268e56f0a50264a88cafad334ac61/tokenization.py#L286-L297). - We also remove lines without any Arabic characters. - We then remove diacritics and kashida using [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools). - Finally, we split each line into sentences with a heuristics-based sentence segmenter. - We train a WordPiece tokenizer on the entire dataset (167 GB text) with a vocabulary size of 30,000 using [HuggingFace's tokenizers](https://github.com/huggingface/tokenizers). - We do not lowercase letters nor strip accents. ### Pre-training - The model was trained on a single cloud TPU (`v3-8`) for one million steps in total. - The first 90,000 steps were trained with a batch size of 1,024 and the rest was trained with a batch size of 256. - The sequence length was limited to 128 tokens for 90% of the steps and 512 for the remaining 10%. - We use whole word masking and a duplicate factor of 10. - We set max predictions per sequence to 20 for the dataset with max sequence length of 128 tokens and 80 for the dataset with max sequence length of 512 tokens. - We use a random seed of 12345, masked language model probability of 0.15, and short sequence probability of 0.1. - The optimizer used is Adam with a learning rate of 1e-4, \\(\beta_{1} = 0.9\\) and \\(\beta_{2} = 0.999\\), a weight decay of 0.01, learning rate warmup for 10,000 steps and linear decay of the learning rate after. ## Evaluation results - We evaluate our pre-trained language models on five NLP tasks: NER, POS tagging, sentiment analysis, dialect identification, and poetry classification. - We fine-tune and evaluate the models using 12 dataset. - We used Hugging Face's transformers to fine-tune our CAMeLBERT models. - We used transformers `v3.1.0` along with PyTorch `v1.5.1`. - The fine-tuning was done by adding a fully connected linear layer to the last hidden state. - We use \\(F_{1}\\) score as a metric for all tasks. - Code used for fine-tuning is available [here](https://github.com/CAMeL-Lab/CAMeLBERT). ### Results | Task | Dataset | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | --------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | NER | ANERcorp | MSA | 80.8% | 67.9% | 74.1% | 82.4% | 82.0% | 82.1% | 82.6% | 80.8% | | POS | PATB (MSA) | MSA | 98.1% | 97.8% | 97.7% | 98.3% | 98.2% | 98.3% | 98.2% | 98.2% | | | ARZTB (EGY) | DA | 93.6% | 92.3% | 92.7% | 93.6% | 93.6% | 93.7% | 93.6% | 93.6% | | | Gumar (GLF) | DA | 97.3% | 97.7% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | 97.9% | | SA | ASTD | MSA | 76.3% | 69.4% | 74.6% | 76.9% | 76.0% | 76.8% | 76.7% | 75.3% | | | ArSAS | MSA | 92.7% | 89.4% | 91.8% | 93.0% | 92.6% | 92.5% | 92.5% | 92.3% | | | SemEval | MSA | 69.0% | 58.5% | 68.4% | 72.1% | 70.7% | 72.8% | 71.6% | 71.2% | | DID | MADAR-26 | DA | 62.9% | 61.9% | 61.8% | 62.6% | 62.0% | 62.8% | 62.0% | 62.2% | | | MADAR-6 | DA | 92.5% | 91.5% | 92.2% | 91.9% | 91.8% | 92.2% | 92.1% | 92.0% | | | MADAR-Twitter-5 | MSA | 75.7% | 71.4% | 74.2% | 77.6% | 78.5% | 77.3% | 77.7% | 76.2% | | | NADI | DA | 24.7% | 17.3% | 20.1% | 24.9% | 24.6% | 24.6% | 24.9% | 23.8% | | Poetry | APCD | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | ### Results (Average) | | Variant | Mix | CA | DA | MSA | MSA-1/2 | MSA-1/4 | MSA-1/8 | MSA-1/16 | | -------------------- | ------- | ----- | ----- | ----- | ----- | ------- | ------- | ------- | -------- | | Variant-wise-average<sup>[[1]](#footnote-1)</sup> | MSA | 82.1% | 75.7% | 80.1% | 83.4% | 83.0% | 83.3% | 83.2% | 82.3% | | | DA | 74.4% | 72.1% | 72.9% | 74.2% | 74.0% | 74.3% | 74.1% | 73.9% | | | CA | 79.8% | 80.9% | 79.6% | 79.7% | 79.9% | 80.0% | 79.7% | 79.8% | | Macro-Average | ALL | 78.7% | 74.7% | 77.1% | 79.2% | 79.0% | 79.2% | 79.1% | 78.6% | <a name="footnote-1">[1]</a>: Variant-wise-average refers to average over a group of tasks in the same language variant. ## Acknowledgements This research was supported with Cloud TPUs from Google’s TensorFlow Research Cloud (TFRC). ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
CAUKiel/JavaBERT-uncased
[ "pytorch", "safetensors", "bert", "fill-mask", "java", "code", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: - java - code license: apache-2.0 widget: - text: 'public [MASK] isOdd(Integer num){if (num % 2 == 0) {return "even";} else {return "odd";}}' --- ## JavaBERT A BERT-like model pretrained on Java software code. ### Training Data The model was trained on 2,998,345 Java files retrieved from open source projects on GitHub. A ```bert-base-uncased``` tokenizer is used by this model. ### Training Objective A MLM (Masked Language Model) objective was used to train this model. ### Usage ```python from transformers import pipeline pipe = pipeline('fill-mask', model='CAUKiel/JavaBERT') output = pipe(CODE) # Replace with Java code; Use '[MASK]' to mask tokens/words in the code. ```
CAUKiel/JavaBERT
[ "pytorch", "safetensors", "bert", "fill-mask", "code", "arxiv:2110.10404", "arxiv:1910.09700", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
388
null
--- language: - code license: apache-2.0 widget: - text: public [MASK] isOdd(Integer num) {if (num % 2 == 0) {return "even";} else {return "odd";}} --- # Model Card for JavaBERT A BERT-like model pretrained on Java software code. # Model Details ## Model Description A BERT-like model pretrained on Java software code. - **Developed by:** Christian-Albrechts-University of Kiel (CAUKiel) - **Shared by [Optional]:** Hugging Face - **Model type:** Fill-Mask - **Language(s) (NLP):** en - **License:** Apache-2.0 - **Related Models:** A version of this model using an uncased tokenizer is available at [CAUKiel/JavaBERT-uncased](https://huggingface.co/CAUKiel/JavaBERT-uncased). - **Parent Model:** BERT - **Resources for more information:** - [Associated Paper](https://arxiv.org/pdf/2110.10404.pdf) # Uses ## Direct Use Fill-Mask ## Downstream Use [Optional] More information needed. ## Out-of-Scope Use The model should not be used to intentionally create hostile or alienating environments for people. # Bias, Risks, and Limitations Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)). Predictions generated by the model may include disturbing and harmful stereotypes across protected classes; identity characteristics; and sensitive, social, and occupational groups. ## Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. { see paper= word something) # Training Details ## Training Data The model was trained on 2,998,345 Java files retrieved from open source projects on GitHub. A ```bert-base-cased``` tokenizer is used by this model. ## Training Procedure ### Training Objective A MLM (Masked Language Model) objective was used to train this model. ### Preprocessing More information needed. ### Speeds, Sizes, Times More information needed. # Evaluation ## Testing Data, Factors & Metrics ### Testing Data More information needed. ### Factors ### Metrics More information needed. ## Results More information needed. # Model Examination More information needed. # Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** More information needed. - **Hours used:** More information needed. - **Cloud Provider:** More information needed. - **Compute Region:** More information needed. - **Carbon Emitted:** More information needed. # Technical Specifications [optional] ## Model Architecture and Objective More information needed. ## Compute Infrastructure More information needed. ### Hardware More information needed. ### Software More information needed. # Citation **BibTeX:** More information needed. **APA:** More information needed. # Glossary [optional] More information needed. # More Information [optional] More information needed. # Model Card Authors [optional] Christian-Albrechts-University of Kiel (CAUKiel) in collaboration with Ezi Ozoani and the team at Hugging Face # Model Card Contact More information needed. # How to Get Started with the Model Use the code below to get started with the model. <details> <summary> Click to expand </summary> ```python from transformers import pipeline pipe = pipeline('fill-mask', model='CAUKiel/JavaBERT') output = pipe(CODE) # Replace with Java code; Use '[MASK]' to mask tokens/words in the code. ``` </details>
CLAck/en-vi
[ "pytorch", "marian", "text2text-generation", "en", "vi", "dataset:ALT", "transformers", "translation", "license:apache-2.0", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
"2022-02-12T17:04:02Z"
--- language: - en - vi tags: - translation license: apache-2.0 datasets: - ALT metrics: - sacrebleu --- This is a finetuning of a MarianMT pretrained on English-Chinese. The target language pair is English-Vietnamese. The first phase of training (mixed) is performed on a dataset containing both English-Chinese and English-Vietnamese sentences. The second phase of training (pure) is performed on a dataset containing only English-Vietnamese sentences. ### Example ``` %%capture !pip install transformers transformers[sentencepiece] from transformers import AutoModelForSeq2SeqLM, AutoTokenizer # Download the pretrained model for English-Vietnamese available on the hub model = AutoModelForSeq2SeqLM.from_pretrained("CLAck/en-vi") tokenizer = AutoTokenizer.from_pretrained("CLAck/en-vi") # Download a tokenizer that can tokenize English since the model Tokenizer doesn't know anymore how to do it # We used the one coming from the initial model # This tokenizer is used to tokenize the input sentence tokenizer_en = AutoTokenizer.from_pretrained('Helsinki-NLP/opus-mt-en-zh') # These special tokens are needed to reproduce the original tokenizer tokenizer_en.add_tokens(["<2zh>", "<2vi>"], special_tokens=True) sentence = "The cat is on the table" # This token is needed to identify the target language input_sentence = "<2vi> " + sentence translated = model.generate(**tokenizer_en(input_sentence, return_tensors="pt", padding=True)) output_sentence = [tokenizer.decode(t, skip_special_tokens=True) for t in translated] ``` ### Training results MIXED | Epoch | Bleu | |:-----:|:-------:| | 1.0 | 26.2407 | | 2.0 | 32.6016 | | 3.0 | 35.4060 | | 4.0 | 36.6737 | | 5.0 | 37.3774 | PURE | Epoch | Bleu | |:-----:|:-------:| | 1.0 | 37.3169 | | 2.0 | 37.4407 | | 3.0 | 37.6696 | | 4.0 | 37.8765 | | 5.0 | 38.0105 |
CLAck/indo-pure
[ "pytorch", "marian", "text2text-generation", "en", "id", "dataset:ALT", "transformers", "translation", "license:apache-2.0", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
"2022-02-02T09:13:34Z"
--- language: - en - id tags: - translation license: apache-2.0 datasets: - ALT metrics: - sacrebleu --- Pure fine-tuning version of MarianMT en-zh on Indonesian Language ### Example ``` %%capture !pip install transformers transformers[sentencepiece] from transformers import AutoModelForSeq2SeqLM, AutoTokenizer # Download the pretrained model for English-Vietnamese available on the hub model = AutoModelForSeq2SeqLM.from_pretrained("CLAck/indo-pure") tokenizer = AutoTokenizer.from_pretrained("CLAck/indo-pure") # Download a tokenizer that can tokenize English since the model Tokenizer doesn't know anymore how to do it # We used the one coming from the initial model # This tokenizer is used to tokenize the input sentence tokenizer_en = AutoTokenizer.from_pretrained('Helsinki-NLP/opus-mt-en-zh') # These special tokens are needed to reproduce the original tokenizer tokenizer_en.add_tokens(["<2zh>", "<2indo>"], special_tokens=True) sentence = "The cat is on the table" # This token is needed to identify the target language input_sentence = "<2indo> " + sentence translated = model.generate(**tokenizer_en(input_sentence, return_tensors="pt", padding=True)) output_sentence = [tokenizer.decode(t, skip_special_tokens=True) for t in translated] ``` ### Training results | Epoch | Bleu | |:-----:|:-------:| | 1.0 | 15.9336 | | 2.0 | 28.0175 | | 3.0 | 31.6603 | | 4.0 | 33.9151 | | 5.0 | 35.0472 | | 6.0 | 35.8469 | | 7.0 | 36.1180 | | 8.0 | 36.6018 | | 9.0 | 37.1973 | | 10.0 | 37.2738 |
CLAck/vi-en
[ "pytorch", "marian", "text2text-generation", "en", "vi", "dataset:ALT", "transformers", "translation", "license:apache-2.0", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
"2022-02-12T16:50:15Z"
--- language: - en - vi tags: - translation license: apache-2.0 datasets: - ALT metrics: - sacrebleu --- This is a finetuning of a MarianMT pretrained on Chinese-English. The target language pair is Vietnamese-English. ### Example ``` %%capture !pip install transformers transformers[sentencepiece] from transformers import AutoModelForSeq2SeqLM, AutoTokenizer # Download the pretrained model for English-Vietnamese available on the hub model = AutoModelForSeq2SeqLM.from_pretrained("CLAck/vi-en") tokenizer = AutoTokenizer.from_pretrained("CLAck/vi-en") sentence = your_vietnamese_sentence # This token is needed to identify the source language input_sentence = "<2vi> " + sentence translated = model.generate(**tokenizer(input_sentence, return_tensors="pt", padding=True)) output_sentence = [tokenizer.decode(t, skip_special_tokens=True) for t in translated] ``` ### Training results | Epoch | Bleu | |:-----:|:-------:| | 1.0 | 21.3180 | | 2.0 | 26.8012 | | 3.0 | 29.3578 | | 4.0 | 31.5178 | | 5.0 | 32.8740 |
CLTL/gm-ner-xlmrbase
[ "pytorch", "tf", "xlm-roberta", "token-classification", "nl", "transformers", "dighum", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
"2021-10-27T11:16:42Z"
--- language: nl license: apache-2.0 tags: - dighum pipeline_tag: token-classification --- # Early-modern Dutch NER (General Letters) ## Description This is a fine-tuned NER model for early-modern Dutch United East India Company (VOC) letters based on XLM-R_base [(Conneau et al., 2020)](https://aclanthology.org/2020.acl-main.747/). The model identifies *locations*, *persons*, *organisations*, but also *ships* as well as derived forms of locations and religions. ## Intended uses and limitations This model was fine-tuned (trained, validated and tested) on a single source of data, the General Letters (Generale Missiven). These letters span a large variety of Dutch, as they cover the largest part of the 17th and 18th centuries, and have been extended with editorial notes between 1960 and 2017. As the model was only fine-tuned on this data however, it may perform less well on other texts from the same period. ## How to use The model can run on raw text through the *token-classification* pipeline: ``` from transformers import AutoTokenizer, AutoModelForTokenClassification from transformers import pipeline tokenizer = AutoTokenizer.from_pretrained("CLTL/gm-ner-xlmrbase") model = AutoModelForTokenClassification.from_pretrained("CLTL/gm-ner-xlmrbase") nlp = pipeline("ner", model=model, tokenizer=tokenizer) example = "Batavia heeft om advies gevraagd." ner_results = nlp(example) print(ner_results) ``` This outputs a list of entities with their character offsets in the input text: ``` [{'entity': 'B-LOC', 'score': 0.99739265, 'index': 1, 'word': '▁Bata', 'start': 0, 'end': 4}, {'entity': 'I-LOC', 'score': 0.5373179, 'index': 2, 'word': 'via', 'start': 4, 'end': 7}] ``` ## Training data and tagset The model was fine-tuned on the General Letters [GM-NER](https://github.com/cltl/voc-missives/tree/master/data/ner/datasplit_all_standard) dataset, with the following tagset: | tag | description | notes | | --- | ----------- | ----- | | LOC | locations | | | LOCderiv | derived forms of locations | by derivation, e.g. *Bandanezen*, or composition, e.g. *Javakoffie* | | ORG | organisations | includes forms derived by composition, e.g. *Compagnieszaken* | PER | persons | | RELderiv | forms related to religion | merges religion names (*Christendom*), derived forms (*christenen*) and composed forms (*Christen-orangkay*) | | SHP | ships | The base text for this dataset is OCR text that has been partially corrected. The text is clean overall but errors remain. ## Training procedure The model was fine-tuned with [xlm-roberta-base](https://huggingface.co/xlm-roberta-base), using [this script](https://github.com/huggingface/transformers/blob/master/examples/legacy/token-classification/run_ner.py). Non-default training parameters are: * training batch size: 16 * max sequence length: 256 * number of epochs: 4 -- loading the best checkpoint model by loss at the end, with checkpoints every 200 steps * (seed: 1) ## Evaluation ### Metric * entity-level F1 ### Results | overall | 92.7 | | --- | ----------- | | LOC | 95.8 | | LOCderiv | 92.7 | | ORG | 92.5 | | PER | 86.2 | | RELderiv | 90.7 | | SHP | 81.6 | ## Reference The model and fine-tuning data presented here were developed as part of: ```bibtex @inproceedings{arnoult-etal-2021-batavia, title = "Batavia asked for advice. Pretrained language models for Named Entity Recognition in historical texts.", author = "Arnoult, Sophie I. and Petram, Lodewijk and Vossen, Piek", booktitle = "Proceedings of the 5th Joint SIGHUM Workshop on Computational Linguistics for Cultural Heritage, Social Sciences, Humanities and Literature", month = nov, year = "2021", address = "Punta Cana, Dominican Republic (online)", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.latechclfl-1.3", pages = "21--30" } ```
CLTL/icf-domains
[ "pytorch", "roberta", "nl", "transformers", "license:mit", "text-classification" ]
text-classification
{ "architectures": [ "RobertaForMultiLabelSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
null
--- language: nl license: mit pipeline_tag: text-classification inference: false --- # A-PROOF ICF-domains Classification ## Description A fine-tuned multi-label classification model that detects 9 [WHO-ICF](https://www.who.int/standards/classifications/international-classification-of-functioning-disability-and-health) domains in clinical text in Dutch. The model is based on a pre-trained Dutch medical language model ([link to be added]()), a RoBERTa model, trained from scratch on clinical notes of the Amsterdam UMC. ## ICF domains The model can detect 9 domains, which were chosen due to their relevance to recovery from COVID-19: ICF code | Domain | name in repo ---|---|--- b440 | Respiration functions | ADM b140 | Attention functions | ATT d840-d859 | Work and employment | BER b1300 | Energy level | ENR d550 | Eating | ETN d450 | Walking | FAC b455 | Exercise tolerance functions | INS b530 | Weight maintenance functions | MBW b152 | Emotional functions | STM ## Intended uses and limitations - The model was fine-tuned (trained, validated and tested) on medical records from the Amsterdam UMC (the two academic medical centers of Amsterdam). It might perform differently on text from a different hospital or text from non-hospital sources (e.g. GP records). - The model was fine-tuned with the [Simple Transformers](https://simpletransformers.ai/) library. This library is based on Transformers but the model cannot be used directly with Transformers `pipeline` and classes; doing so would generate incorrect outputs. For this reason, the API on this page is disabled. ## How to use To generate predictions with the model, use the [Simple Transformers](https://simpletransformers.ai/) library: ``` from simpletransformers.classification import MultiLabelClassificationModel model = MultiLabelClassificationModel( 'roberta', 'CLTL/icf-domains', use_cuda=False, ) example = 'Nu sinds 5-6 dagen progressieve benauwdheidsklachten (bij korte stukken lopen al kortademig), terwijl dit eerder niet zo was.' predictions, raw_outputs = model.predict([example]) ``` The predictions look like this: ``` [[1, 0, 0, 0, 0, 1, 1, 0, 0]] ``` The indices of the multi-label stand for: ``` [ADM, ATT, BER, ENR, ETN, FAC, INS, MBW, STM] ``` In other words, the above prediction corresponds to assigning the labels ADM, FAC and INS to the example sentence. The raw outputs look like this: ``` [[0.51907885 0.00268032 0.0030862 0.03066113 0.00616694 0.64720929 0.67348498 0.0118863 0.0046311 ]] ``` For this model, the threshold at which the prediction for a label flips from 0 to 1 is **0.5**. ## Training data - The training data consists of clinical notes from medical records (in Dutch) of the Amsterdam UMC. Due to privacy constraints, the data cannot be released. - The annotation guidelines used for the project can be found [here](https://github.com/cltl/a-proof-zonmw/tree/main/resources/annotation_guidelines). ## Training procedure The default training parameters of Simple Transformers were used, including: - Optimizer: AdamW - Learning rate: 4e-5 - Num train epochs: 1 - Train batch size: 8 - Threshold: 0.5 ## Evaluation results The evaluation is done on a sentence-level (the classification unit) and on a note-level (the aggregated unit which is meaningful for the healthcare professionals). ### Sentence-level | | ADM | ATT | BER | ENR | ETN | FAC | INS | MBW | STM |---|---|---|---|---|---|---|---|---|--- precision | 0.98 | 0.98 | 0.56 | 0.96 | 0.92 | 0.84 | 0.89 | 0.79 | 0.70 recall | 0.49 | 0.41 | 0.29 | 0.57 | 0.49 | 0.71 | 0.26 | 0.62 | 0.75 F1-score | 0.66 | 0.58 | 0.35 | 0.72 | 0.63 | 0.76 | 0.41 | 0.70 | 0.72 support | 775 | 39 | 54 | 160 | 382 | 253 | 287 | 125 | 181 ### Note-level | | ADM | ATT | BER | ENR | ETN | FAC | INS | MBW | STM |---|---|---|---|---|---|---|---|---|--- precision | 1.0 | 1.0 | 0.66 | 0.96 | 0.95 | 0.84 | 0.95 | 0.87 | 0.80 recall | 0.89 | 0.56 | 0.44 | 0.70 | 0.72 | 0.89 | 0.46 | 0.87 | 0.87 F1-score | 0.94 | 0.71 | 0.50 | 0.81 | 0.82 | 0.86 | 0.61 | 0.87 | 0.84 support | 231 | 27 | 34 | 92 | 165 | 95 | 116 | 64 | 94 ## Authors and references ### Authors Jenia Kim, Piek Vossen ### References TBD
CLTL/icf-levels-att
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- language: nl license: mit pipeline_tag: text-classification inference: false --- # Regression Model for Attention Functioning Levels (ICF b140) ## Description A fine-tuned regression model that assigns a functioning level to Dutch sentences describing attention functions. The model is based on a pre-trained Dutch medical language model ([link to be added]()): a RoBERTa model, trained from scratch on clinical notes of the Amsterdam UMC. To detect sentences about attention functions in clinical text in Dutch, use the [icf-domains](https://huggingface.co/CLTL/icf-domains) classification model. ## Functioning levels Level | Meaning ---|--- 4 | No problem with concentrating / directing / holding / dividing attention. 3 | Slight problem with concentrating / directing / holding / dividing attention for a longer period of time or for complex tasks. 2 | Can concentrate / direct / hold / divide attention only for a short time. 1 | Can barely concentrate / direct / hold / divide attention. 0 | Unable to concentrate / direct / hold / divide attention. The predictions generated by the model might sometimes be outside of the scale (e.g. 4.2); this is normal in a regression model. ## Intended uses and limitations - The model was fine-tuned (trained, validated and tested) on medical records from the Amsterdam UMC (the two academic medical centers of Amsterdam). It might perform differently on text from a different hospital or text from non-hospital sources (e.g. GP records). - The model was fine-tuned with the [Simple Transformers](https://simpletransformers.ai/) library. This library is based on Transformers but the model cannot be used directly with Transformers `pipeline` and classes; doing so would generate incorrect outputs. For this reason, the API on this page is disabled. ## How to use To generate predictions with the model, use the [Simple Transformers](https://simpletransformers.ai/) library: ``` from simpletransformers.classification import ClassificationModel model = ClassificationModel( 'roberta', 'CLTL/icf-levels-att', use_cuda=False, ) example = 'Snel afgeleid, moeite aandacht te behouden.' _, raw_outputs = model.predict([example]) predictions = np.squeeze(raw_outputs) ``` The prediction on the example is: ``` 2.89 ``` The raw outputs look like this: ``` [[2.89226103]] ``` ## Training data - The training data consists of clinical notes from medical records (in Dutch) of the Amsterdam UMC. Due to privacy constraints, the data cannot be released. - The annotation guidelines used for the project can be found [here](https://github.com/cltl/a-proof-zonmw/tree/main/resources/annotation_guidelines). ## Training procedure The default training parameters of Simple Transformers were used, including: - Optimizer: AdamW - Learning rate: 4e-5 - Num train epochs: 1 - Train batch size: 8 ## Evaluation results The evaluation is done on a sentence-level (the classification unit) and on a note-level (the aggregated unit which is meaningful for the healthcare professionals). | | Sentence-level | Note-level |---|---|--- mean absolute error | 0.99 | 1.03 mean squared error | 1.35 | 1.47 root mean squared error | 1.16 | 1.21 ## Authors and references ### Authors Jenia Kim, Piek Vossen ### References TBD
CLTL/icf-levels-ber
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
"2021-10-06T12:23:03Z"
--- language: nl license: mit pipeline_tag: text-classification inference: false --- # Regression Model for Work and Employment Functioning Levels (ICF d840-d859) ## Description A fine-tuned regression model that assigns a functioning level to Dutch sentences describing work and employment functions. The model is based on a pre-trained Dutch medical language model ([link to be added]()): a RoBERTa model, trained from scratch on clinical notes of the Amsterdam UMC. To detect sentences about work and employment functions in clinical text in Dutch, use the [icf-domains](https://huggingface.co/CLTL/icf-domains) classification model. ## Functioning levels Level | Meaning ---|--- 4 | Can work/study fully (like when healthy). 3 | Can work/study almost fully. 2 | Can work/study only for about 50\%, or can only work at home and cannot go to school / office. 1 | Work/study is severely limited. 0 | Cannot work/study. The predictions generated by the model might sometimes be outside of the scale (e.g. 4.2); this is normal in a regression model. ## Intended uses and limitations - The model was fine-tuned (trained, validated and tested) on medical records from the Amsterdam UMC (the two academic medical centers of Amsterdam). It might perform differently on text from a different hospital or text from non-hospital sources (e.g. GP records). - The model was fine-tuned with the [Simple Transformers](https://simpletransformers.ai/) library. This library is based on Transformers but the model cannot be used directly with Transformers `pipeline` and classes; doing so would generate incorrect outputs. For this reason, the API on this page is disabled. ## How to use To generate predictions with the model, use the [Simple Transformers](https://simpletransformers.ai/) library: ``` from simpletransformers.classification import ClassificationModel model = ClassificationModel( 'roberta', 'CLTL/icf-levels-ber', use_cuda=False, ) example = 'Fysiek zwaar werk is niet mogelijk, maar administrative taken zou zij wel aan moeten kunnen.' _, raw_outputs = model.predict([example]) predictions = np.squeeze(raw_outputs) ``` The prediction on the example is: ``` 2.41 ``` The raw outputs look like this: ``` [[2.40793037]] ``` ## Training data - The training data consists of clinical notes from medical records (in Dutch) of the Amsterdam UMC. Due to privacy constraints, the data cannot be released. - The annotation guidelines used for the project can be found [here](https://github.com/cltl/a-proof-zonmw/tree/main/resources/annotation_guidelines). ## Training procedure The default training parameters of Simple Transformers were used, including: - Optimizer: AdamW - Learning rate: 4e-5 - Num train epochs: 1 - Train batch size: 8 ## Evaluation results The evaluation is done on a sentence-level (the classification unit) and on a note-level (the aggregated unit which is meaningful for the healthcare professionals). | | Sentence-level | Note-level |---|---|--- mean absolute error | 1.56 | 1.49 mean squared error | 3.06 | 2.85 root mean squared error | 1.75 | 1.69 ## Authors and references ### Authors Jenia Kim, Piek Vossen ### References TBD
CLTL/icf-levels-fac
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
"2021-10-06T12:23:36Z"
--- language: nl license: mit pipeline_tag: text-classification inference: false --- # Regression Model for Walking Functioning Levels (ICF d550) ## Description A fine-tuned regression model that assigns a functioning level to Dutch sentences describing walking functions. The model is based on a pre-trained Dutch medical language model ([link to be added]()): a RoBERTa model, trained from scratch on clinical notes of the Amsterdam UMC. To detect sentences about walking functions in clinical text in Dutch, use the [icf-domains](https://huggingface.co/CLTL/icf-domains) classification model. ## Functioning levels Level | Meaning ---|--- 5 | Patient can walk independently anywhere: level surface, uneven surface, slopes, stairs. 4 | Patient can walk independently on level surface but requires help on stairs, inclines, uneven surface; or, patient can walk independently, but the walking is not fully normal. 3 | Patient requires verbal supervision for walking, without physical contact. 2 | Patient needs continuous or intermittent support of one person to help with balance and coordination. 1 | Patient needs firm continuous support from one person who helps carrying weight and with balance. 0 | Patient cannot walk or needs help from two or more people; or, patient walks on a treadmill. The predictions generated by the model might sometimes be outside of the scale (e.g. 5.2); this is normal in a regression model. ## Intended uses and limitations - The model was fine-tuned (trained, validated and tested) on medical records from the Amsterdam UMC (the two academic medical centers of Amsterdam). It might perform differently on text from a different hospital or text from non-hospital sources (e.g. GP records). - The model was fine-tuned with the [Simple Transformers](https://simpletransformers.ai/) library. This library is based on Transformers but the model cannot be used directly with Transformers `pipeline` and classes; doing so would generate incorrect outputs. For this reason, the API on this page is disabled. ## How to use To generate predictions with the model, use the [Simple Transformers](https://simpletransformers.ai/) library: ``` from simpletransformers.classification import ClassificationModel model = ClassificationModel( 'roberta', 'CLTL/icf-levels-fac', use_cuda=False, ) example = 'kan nog goed traplopen, maar flink ingeleverd aan conditie na Corona' _, raw_outputs = model.predict([example]) predictions = np.squeeze(raw_outputs) ``` The prediction on the example is: ``` 4.2 ``` The raw outputs look like this: ``` [[4.20903111]] ``` ## Training data - The training data consists of clinical notes from medical records (in Dutch) of the Amsterdam UMC. Due to privacy constraints, the data cannot be released. - The annotation guidelines used for the project can be found [here](https://github.com/cltl/a-proof-zonmw/tree/main/resources/annotation_guidelines). ## Training procedure The default training parameters of Simple Transformers were used, including: - Optimizer: AdamW - Learning rate: 4e-5 - Num train epochs: 1 - Train batch size: 8 ## Evaluation results The evaluation is done on a sentence-level (the classification unit) and on a note-level (the aggregated unit which is meaningful for the healthcare professionals). | | Sentence-level | Note-level |---|---|--- mean absolute error | 0.70 | 0.66 mean squared error | 0.91 | 0.93 root mean squared error | 0.95 | 0.96 ## Authors and references ### Authors Jenia Kim, Piek Vossen ### References TBD
CLTL/icf-levels-mbw
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- language: nl license: mit pipeline_tag: text-classification inference: false --- # Regression Model for Weight Maintenance Functioning Levels (ICF b530) ## Description A fine-tuned regression model that assigns a functioning level to Dutch sentences describing weight maintenance functions. The model is based on a pre-trained Dutch medical language model ([link to be added]()): a RoBERTa model, trained from scratch on clinical notes of the Amsterdam UMC. To detect sentences about weight maintenance functions in clinical text in Dutch, use the [icf-domains](https://huggingface.co/CLTL/icf-domains) classification model. ## Functioning levels Level | Meaning ---|--- 4 | Healthy weight, no unintentional weight loss or gain, SNAQ 0 or 1. 3 | Some unintentional weight loss or gain, or lost a lot of weight but gained some of it back afterwards. 2 | Moderate unintentional weight loss or gain (more than 3 kg in the last month), SNAQ 2. 1 | Severe unintentional weight loss or gain (more than 6 kg in the last 6 months), SNAQ &ge; 3. 0 | Severe unintentional weight loss or gain (more than 6 kg in the last 6 months) and admitted to ICU. The predictions generated by the model might sometimes be outside of the scale (e.g. 4.2); this is normal in a regression model. ## Intended uses and limitations - The model was fine-tuned (trained, validated and tested) on medical records from the Amsterdam UMC (the two academic medical centers of Amsterdam). It might perform differently on text from a different hospital or text from non-hospital sources (e.g. GP records). - The model was fine-tuned with the [Simple Transformers](https://simpletransformers.ai/) library. This library is based on Transformers but the model cannot be used directly with Transformers `pipeline` and classes; doing so would generate incorrect outputs. For this reason, the API on this page is disabled. ## How to use To generate predictions with the model, use the [Simple Transformers](https://simpletransformers.ai/) library: ``` from simpletransformers.classification import ClassificationModel model = ClassificationModel( 'roberta', 'CLTL/icf-levels-mbw', use_cuda=False, ) example = 'Tijdens opname >10 kg afgevallen.' _, raw_outputs = model.predict([example]) predictions = np.squeeze(raw_outputs) ``` The prediction on the example is: ``` 1.95 ``` The raw outputs look like this: ``` [[1.95429301]] ``` ## Training data - The training data consists of clinical notes from medical records (in Dutch) of the Amsterdam UMC. Due to privacy constraints, the data cannot be released. - The annotation guidelines used for the project can be found [here](https://github.com/cltl/a-proof-zonmw/tree/main/resources/annotation_guidelines). ## Training procedure The default training parameters of Simple Transformers were used, including: - Optimizer: AdamW - Learning rate: 4e-5 - Num train epochs: 1 - Train batch size: 8 ## Evaluation results The evaluation is done on a sentence-level (the classification unit) and on a note-level (the aggregated unit which is meaningful for the healthcare professionals). | | Sentence-level | Note-level |---|---|--- mean absolute error | 0.81 | 0.60 mean squared error | 0.83 | 0.56 root mean squared error | 0.91 | 0.75 ## Authors and references ### Authors Jenia Kim, Piek Vossen ### References TBD
Canadiancaleb/DialoGPT-small-jesse
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - conversational --- # Jesse (Breaking Bad) DialoGPT Model
Captain-1337/CrudeBERT
[ "pytorch", "bert", "text-classification", "arxiv:1908.10063", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
# Master Thesis ## Predictive Value of Sentiment Analysis from Headlines for Crude Oil Prices ### Understanding and Exploiting Deep Learning-based Sentiment Analysis from News Headlines for Predicting Price Movements of WTI Crude Oil The focus of this thesis deals with the task of research and development of state-of-the-art sentiment analysis methods, which can potentially provide helpful quantification of news that can be used to assess the future price movements of crude oil. CrudeBERT is a pre-trained NLP model to analyze sentiment of news headlines relevant to crude oil. It was developed by fine tuning [FinBERT: Financial Sentiment Analysis with Pre-trained Language Models](https://arxiv.org/pdf/1908.10063.pdf). ![CrudeBERT comparison_white_2](https://user-images.githubusercontent.com/42164041/135273552-4a9c4457-70e4-48d0-ac97-169daefab79e.png) Performing sentiment analysis on the news regarding a specific asset requires domain adaptation. Domain adaptation requires training data made up of examples with text and its associated polarity of sentiment. The experiments show that pre-trained deep learning-based sentiment analysis can be further fine-tuned, and the conclusions of these experiments are as follows: * Deep learning-based sentiment analysis models from the general financial world such as FinBERT are of little or hardly any significance concerning the price development of crude oil. The reason behind this is a lack of domain adaptation of the sentiment. Moreover, the polarity of sentiment cannot be generalized and is highly dependent on the properties of its target. * The properties of crude oil prices are, according to the literature, determined by changes in supply and demand. News can convey information about these direction changes and can broadly be identified through query searches and serve as a foundation for creating a training dataset to perform domain adaptation. For this purpose, news headlines tend to be rich enough in content to provide insights into supply and demand changes. Even when significantly reducing the number of headlines to more reputable sources. * Domain adaptation can be achieved to some extend by analyzing the properties of the target through literature review and creating a corresponding training dataset to fine-tune the model. For example, considering supply and demand changes regarding crude oil seems to be a suitable component for a domain adaptation. In order to advance sentiment analysis applications in the domain of crude oil, this paper presents CrudeBERT. In general, sentiment analysis of headlines from crude oil through CrudeBERT could be a viable source of insight for the price behaviour of WTI crude oil. However, further research is required to see if CrudeBERT can serve as beneficial for predicting oil prices. For this matter, the codes and the thesis is made publicly available on [GitHub] (https://github.com/Captain-1337/Master-Thesis).
CarlosPR/mt5-spanish-memmories-analysis
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
**mt5-spanish-memmories-analysis** **// ES** Este es un trabajo en proceso. Este modelo aún es solo un punto de control inicial que mejoraré en los próximos meses. El objetivo es proporcionar un modelo capaz de, utilizando una combinación de tareas del modelo mT5, comprender los recuerdos y proporcionar una interacción útil para las personas con alzeimer o personas como mi propio abuelo que escribió sus recuerdos, pero ahora es solo un libro en la estantería. por lo que este modelo puede hacer que esos recuerdos parezcan "vivos". Pronto (si aún no está cargado) cargaré un cuaderno de **Google Colaboratory con una aplicación visual** que al usar este modelo proporcionará toda la interacción necesaria y deseada con una interfaz fácil de usar. **LINK APLICACIÓN (sobre él se actualizará la versión):** https://drive.google.com/drive/folders/1ewGcxxCYHHwhHhWtGlLiryZfV8wEAaBa?usp=sharing -> Debe descargarse la carpeta "memorium" del enlace y subirse a Google Drive sin incluir en ninguna otra carpeta (directamente en "Mi unidad"). -> A continuación se podrá abrir la app, encontrada dentro de dicha carpeta "memorium" con nombre "APP-Memorium" (el nombre puede incluir además un indicador de versión). -> Si haciendo doble click en el archivo de la app no permite abrirla, debe hacerse pulsando el botón derecho sobre el archivo y seleccionar "Abrir con", "Conectar más aplicaciones", y a continuación escoger Colaboratory (se pedirá instalar). Completada la instalación (tiempo aproximado: 2 minutos) se podrá cerrar la ventana de instalación para volver a visualizar la carpeta donde se encuentra el fichero de la app, que de ahora en adelante se podrá abrir haciendo doble click. -> Se podrán añadir memorias en la carpeta "perfiles" como se indica en la aplicación en el apartado "crear perfil". **// EN** This is a work in process. This model is just an initial checkpoint yet that I will be improving the following months. **APP LINK (it will contain the latest version):** https://drive.google.com/drive/folders/1ewGcxxCYHHwhHhWtGlLiryZfV8wEAaBa?usp=sharing -> The folder "memorium" must be downloaded and then uploaded to Google Drive at "My Drive", NOT inside any other folder. The aim is to provide a model able to, using a mixture of mT5 model's tasks, understand memories and provide an interaction useful for people with alzeimer or people like my own grandfather who wrote his memories but it is now just a book in the shelf, so this model can make those memories seem 'alive'. I will soon (if it is´t uploaded by now) upload a **Google Colaboratory notebook with a visual App** that using this model will provide all the needed and wanted interaction with an easy-to-use Interface.
CennetOguz/distilbert-base-uncased-finetuned-recipe
[ "pytorch", "tensorboard", "distilbert", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-recipe results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-recipe This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.9488 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 3 | 3.2689 | | No log | 2.0 | 6 | 3.0913 | | No log | 3.0 | 9 | 3.0641 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.2+cu102 - Datasets 1.18.3 - Tokenizers 0.11.0
Chae/botman
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - conversational --- # Lego Batman DialoGPT Model
Chakita/KannadaBERT
[ "pytorch", "roberta", "fill-mask", "transformers", "masked-lm", "fill-in-the-blanks", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - masked-lm - fill-in-the-blanks --- RoBERTa model trained on OSCAR Kannada corpus.
ChristianOrr/madnet_keras
[ "tensorboard", "dataset:flyingthings-3d", "dataset:kitti", "arxiv:1810.05424", "vision", "deep-stereo", "depth-estimation", "Tensorflow2", "Keras", "license:apache-2.0" ]
depth-estimation
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - vision - deep-stereo - depth-estimation - Tensorflow2 - Keras datasets: - flyingthings-3d - kitti --- # MADNet Keras MADNet is a deep stereo depth estimation model. Its key defining features are: 1. It has a light-weight architecture which means it has low latency. 2. It supports self-supervised training, so it can be conveniently adapted in the field with no training data. 3. It's a stereo depth model, which means it's capable of high accuracy. The MADNet weights in this repository were trained using a Tensorflow 2 / Keras implementation of the original code. The model was created using the Keras Functional API, which enables the following features: 1. Good optimization. 2. High level Keras methods (.fit, .predict and .evaluate). 3. Little boilerplate code. 4. Decent support from external packages (like Weights and Biases). 5. Callbacks. The weights provided were either trained on the 2012 / 2015 kitti stereo dataset or flyingthings-3d dataset. The weights of the pretrained models from the original paper (tf1_conversion_kitti.h5 and tf1_conversion_synthetic.h5) are provided in tensorflow 2 format. The TF1 weights help speed up fine-tuning, but its recommended to use either synthetic.h5 (trained on flyingthings-3d) or kitti.h5 (trained on 2012 and 2015 kitti stereo datasets). **Abstract**: Deep convolutional neural networks trained end-to-end are the undisputed state-of-the-art methods to regress dense disparity maps directly from stereo pairs. However, such methods suffer from notable accuracy drops when exposed to scenarios significantly different from those seen in the training phase (e.g.real vs synthetic images, indoor vs outdoor, etc). As it is unlikely to be able to gather enough samples to achieve effective training/ tuning in any target domain, we propose to perform unsupervised and continuous online adaptation of a deep stereo network in order to preserve its accuracy independently of the sensed environment. However, such a strategy can be extremely demanding regarding computational resources and thus not enabling real-time performance. Therefore, we address this side effect by introducing a new lightweight, yet effective, deep stereo architecture Modularly ADaptive Network (MADNet) and by developing Modular ADaptation (MAD), an algorithm to train independently only sub-portions of our model. By deploying MADNet together with MAD we propose the first ever realtime self-adaptive deep stereo system. ## Usage Instructions See the accompanying codes readme for details on how to perform training and inferencing with the model: [madnet-deep-stereo-with-keras](https://github.com/ChristianOrr/madnet-deep-stereo-with-keras). ## Training ### TF1 Kitti and TF1 Synthetic Training details for the TF1 weights are available in the supplementary material (at the end) of this paper: [Real-time self-adaptive deep stereo](https://arxiv.org/abs/1810.05424) ### Synthetic The synthetic model was finetuned using the tf1 synthetic weights. It was trained on the flyingthings-3d dataset with the following parameters: - Steps: 1.5 million - Learning Rate: 0.0001 - Decay Rate: 0.999 - Minimum Learning Rate Cap: 0.000001 - Batch Size: 1 - Optimizer: Adam - Image Height: 480 - Image Width: 640 ### Kitti The kitti model was finetuned using the synthetic weights. Tensorboard events file is available in the logs directory. It was trained on the 2012 and 2015 kitti stereo dataset with the following parameters: - Steps: 0.5 million - Learning Rate: 0.0001 - Decay Rate: 0.999 - Minimum Learning Rate Cap: 0.0000001 - Batch Size: 1 - Optimizer: Adam - Image Height: 480 - Image Width: 640 ## BibTeX entry and citation info ```bibtex @InProceedings{Tonioni_2019_CVPR, author = {Tonioni, Alessio and Tosi, Fabio and Poggi, Matteo and Mattoccia, Stefano and Di Stefano, Luigi}, title = {Real-time self-adaptive deep stereo}, booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2019} } ``` ```bibtex @article{Poggi2021continual, author={Poggi, Matteo and Tonioni, Alessio and Tosi, Fabio and Mattoccia, Stefano and Di Stefano, Luigi}, title={Continual Adaptation for Deep Stereo}, journal={IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)}, year={2021} } ``` ```bibtex @InProceedings{MIFDB16, author = "N. Mayer and E. Ilg and P. Hausser and P. Fischer and D. Cremers and A. Dosovitskiy and T. Brox", title = "A Large Dataset to Train Convolutional Networks for Disparity, Optical Flow, and Scene Flow Estimation", booktitle = "IEEE International Conference on Computer Vision and Pattern Recognition (CVPR)", year = "2016", note = "arXiv:1512.02134", url = "http://lmb.informatik.uni-freiburg.de/Publications/2016/MIFDB16" } ``` ```bibtex @INPROCEEDINGS{Geiger2012CVPR, author = {Andreas Geiger and Philip Lenz and Raquel Urtasun}, title = {Are we ready for Autonomous Driving? The KITTI Vision Benchmark Suite}, booktitle = {Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2012} } ``` ```bibtex @INPROCEEDINGS{Menze2015CVPR, author = {Moritz Menze and Andreas Geiger}, title = {Object Scene Flow for Autonomous Vehicles}, booktitle = {Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2015} } ```
ChristopherA08/IndoELECTRA
[ "pytorch", "electra", "pretraining", "id", "dataset:oscar", "transformers" ]
null
{ "architectures": [ "ElectraForPreTraining" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: id datasets: - oscar --- # IndoBERT (Indonesian BERT Model) ## Model description ELECTRA is a new method for self-supervised language representation learning. This repository contains the pre-trained Electra Base model (tensorflow 1.15.0) trained in a Large Indonesian corpus (~16GB of raw text | ~2B indonesian words). IndoELECTRA is a pre-trained language model based on ELECTRA architecture for the Indonesian Language. This model is base version which use electra-base config. ## Intended uses & limitations #### How to use ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained("ChristopherA08/IndoELECTRA") model = AutoModel.from_pretrained("ChristopherA08/IndoELECTRA") tokenizer.encode("hai aku mau makan.") [2, 8078, 1785, 2318, 1946, 18, 4] ``` ## Training procedure The training of the model has been performed using Google's original Tensorflow code on eight core Google Cloud TPU v2. We used a Google Cloud Storage bucket, for persistent storage of training data and models.
ChukSamuels/DialoGPT-small-Dr.FauciBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- tags: - conversational --- # Dr. Fauci DialoGPT Model
Cinnamon/electra-small-japanese-discriminator
[ "pytorch", "electra", "pretraining", "ja", "transformers", "license:apache-2.0" ]
null
{ "architectures": [ "ElectraForPreTraining" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
419
null
--- language: ja license: apache-2.0 --- ## Japanese ELECTRA-small We provide a Japanese **ELECTRA-Small** model, as described in [ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators](https://openreview.net/pdf?id=r1xMH1BtvB). Our pretraining process employs subword units derived from the [Japanese Wikipedia](https://dumps.wikimedia.org/jawiki/latest), using the [Byte-Pair Encoding](https://www.aclweb.org/anthology/P16-1162.pdf) method and building on an initial tokenization with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd). For optimal performance, please take care to set your MeCab dictionary appropriately. ## How to use the discriminator in `transformers` ``` from transformers import BertJapaneseTokenizer, ElectraForPreTraining tokenizer = BertJapaneseTokenizer.from_pretrained('Cinnamon/electra-small-japanese-discriminator', mecab_kwargs={"mecab_option": "-d /usr/lib/x86_64-linux-gnu/mecab/dic/mecab-ipadic-neologd"}) model = ElectraForPreTraining.from_pretrained('Cinnamon/electra-small-japanese-discriminator') ```
Cinnamon/electra-small-japanese-generator
[ "pytorch", "electra", "fill-mask", "ja", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "ElectraForMaskedLM" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
19
null
--- language: ja --- ## Japanese ELECTRA-small We provide a Japanese **ELECTRA-Small** model, as described in [ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators](https://openreview.net/pdf?id=r1xMH1BtvB). Our pretraining process employs subword units derived from the [Japanese Wikipedia](https://dumps.wikimedia.org/jawiki/latest), using the [Byte-Pair Encoding](https://www.aclweb.org/anthology/P16-1162.pdf) method and building on an initial tokenization with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd). For optimal performance, please take care to set your MeCab dictionary appropriately. ``` # ELECTRA-small generator usage from transformers import BertJapaneseTokenizer, ElectraForMaskedLM tokenizer = BertJapaneseTokenizer.from_pretrained('Cinnamon/electra-small-japanese-generator', mecab_kwargs={"mecab_option": "-d /usr/lib/x86_64-linux-gnu/mecab/dic/mecab-ipadic-neologd"}) model = ElectraForMaskedLM.from_pretrained('Cinnamon/electra-small-japanese-generator') ```
Ciruzzo/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - conversational --- # Harry Potter DialoGPT Model
ClaudeCOULOMBE/RickBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - conversational --- # RickBot built for [Chai](https://chai.ml/) Make your own [here](https://colab.research.google.com/drive/1o5LxBspm-C28HQvXN-PRQavapDbm5WjG?usp=sharing)
ClaudeYang/awesome_fb_model
[ "pytorch", "bart", "text-classification", "dataset:multi_nli", "transformers", "zero-shot-classification" ]
zero-shot-classification
{ "architectures": [ "BartForSequenceClassification" ], "model_type": "bart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- pipeline_tag: zero-shot-classification datasets: - multi_nli widget: - text: "ETH" candidate_labels: "Location & Address, Employment, Organizational, Name, Service, Studies, Science" hypothesis_template: "This is {}." --- ETH Zeroshot
CogComp/bart-faithful-summary-detector
[ "pytorch", "jax", "bart", "text-classification", "en", "dataset:xsum", "transformers", "xsum", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BartForSequenceClassification" ], "model_type": "bart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": 1, "max_length": 128, "min_length": 12, "no_repeat_ngram_size": null, "num_beams": 4, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
234
null
--- language: - en thumbnail: https://cogcomp.seas.upenn.edu/images/logo.png tags: - text-classification - bart - xsum license: cc-by-sa-4.0 datasets: - xsum widget: - text: "<s> Ban Ki-moon was elected for a second term in 2007. </s></s> Ban Ki-Moon was re-elected for a second term by the UN General Assembly, unopposed and unanimously, on 21 June 2011." - text: "<s> Ban Ki-moon was elected for a second term in 2011. </s></s> Ban Ki-Moon was re-elected for a second term by the UN General Assembly, unopposed and unanimously, on 21 June 2011." --- # bart-faithful-summary-detector ## Model description A BART (base) model trained to classify whether a summary is *faithful* to the original article. See our [paper in NAACL'21](https://www.seas.upenn.edu/~sihaoc/static/pdf/CZSR21.pdf) for details. ## Usage Concatenate a summary and a source document as input (note that the summary needs to be the **first** sentence). Here's an example usage (with PyTorch) ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("CogComp/bart-faithful-summary-detector") model = AutoModelForSequenceClassification.from_pretrained("CogComp/bart-faithful-summary-detector") article = "Ban Ki-Moon was re-elected for a second term by the UN General Assembly, unopposed and unanimously, on 21 June 2011." bad_summary = "Ban Ki-moon was elected for a second term in 2007." good_summary = "Ban Ki-moon was elected for a second term in 2011." bad_pair = tokenizer(text=bad_summary, text_pair=article, return_tensors='pt') good_pair = tokenizer(text=good_summary, text_pair=article, return_tensors='pt') bad_score = model(**bad_pair) good_score = model(**good_pair) print(good_score[0][:, 1] > bad_score[0][:, 1]) # True, label mapping: "0" -> "Hallucinated" "1" -> "Faithful" ``` ### BibTeX entry and citation info ```bibtex @inproceedings{CZSR21, author = {Sihao Chen and Fan Zhang and Kazoo Sone and Dan Roth}, title = {{Improving Faithfulness in Abstractive Summarization with Contrast Candidate Generation and Selection}}, booktitle = {NAACL}, year = {2021} } ```
Coolhand/Abuela
[ "en", "image_restoration", "superresolution", "license:mit" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - en thumbnail: https://github.com/Nick-Harvey/for_my_abuela/blob/master/cuban_large.jpg tags: - image_restoration - superresolution license: mit metrics: --- @inproceedings{wan2020bringing, title={Bringing Old Photos Back to Life}, author={Wan, Ziyu and Zhang, Bo and Chen, Dongdong and Zhang, Pan and Chen, Dong and Liao, Jing and Wen, Fang}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pages={2747--2757}, year={2020} } @article{wan2020old, title={Old Photo Restoration via Deep Latent Space Translation}, author={Wan, Ziyu and Zhang, Bo and Chen, Dongdong and Zhang, Pan and Chen, Dong and Liao, Jing and Wen, Fang}, journal={arXiv preprint arXiv:2009.07047}, year={2020} }
CopymySkill/DialoGPT-medium-atakan
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
"2021-09-23T17:08:19Z"
--- tags: - conversational --- # Atakan DialoGPT Model
Corvus/DialoGPT-medium-CaptainPrice-Extended
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - conversational --- #DiabloGPT Captain Price (Extended)
Corvus/DialoGPT-medium-CaptainPrice
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - conversational --- # Captain Price DialoGPT Model
CouchCat/ma_mlc_v7_distil
[ "pytorch", "distilbert", "text-classification", "en", "transformers", "multi-label", "license:mit" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
"2021-02-14T11:20:32Z"
--- language: en license: mit tags: - multi-label widget: - text: "I would like to return these pants and shoes" --- ### Description A Multi-label text classification model trained on a customer feedback data using DistilBert. Possible labels are: - Delivery (delivery status, time of arrival, etc.) - Return (return confirmation, return label requests, etc.) - Product (quality, complaint, etc.) - Monetary (pending transactions, refund, etc.) ### Usage ``` from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("CouchCat/ma_mlc_v7_distil") model = AutoModelForSequenceClassification.from_pretrained("CouchCat/ma_mlc_v7_distil") ```
CouchCat/ma_ner_v6_distil
[ "pytorch", "distilbert", "token-classification", "en", "transformers", "ner", "license:mit", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
"2021-02-14T11:34:25Z"
--- language: en license: mit tags: - ner widget: - text: "These shoes from Adidas fit quite well" --- ### Description A Named Entity Recognition model trained on a customer feedback data using DistilBert. Possible labels are: - PRD: for certain products - BRND: for brands ### Usage ``` from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("CouchCat/ma_ner_v6_distil") model = AutoModelForTokenClassification.from_pretrained("CouchCat/ma_ner_v6_distil") ```
CouchCat/ma_ner_v7_distil
[ "pytorch", "distilbert", "token-classification", "en", "transformers", "ner", "license:mit", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- language: en license: mit tags: - ner widget: - text: "These shoes I recently bought from Tommy Hilfiger fit quite well. The shirt, however, has got a hole" --- ### Description A Named Entity Recognition model trained on a customer feedback data using DistilBert. Possible labels are in BIO-notation. Performance of the PERS tag could be better because of low data samples: - PROD: for certain products - BRND: for brands - PERS: people names The following tags are simply in place to help better categorize the previous tags - MATR: relating to materials, e.g. cloth, leather, seam, etc. - TIME: time related entities - MISC: any other entity that might skew the results ### Usage ``` from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("CouchCat/ma_ner_v7_distil") model = AutoModelForTokenClassification.from_pretrained("CouchCat/ma_ner_v7_distil") ```
CouchCat/ma_sa_v7_distil
[ "pytorch", "distilbert", "text-classification", "en", "transformers", "sentiment-analysis", "license:mit" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
38
"2021-02-12T17:16:43Z"
--- language: en license: mit tags: - sentiment-analysis widget: - text: "I am disappointed in the terrible quality of my dress" --- ### Description A Sentiment Analysis model trained on customer feedback data using DistilBert. Possible sentiments are: * negative * neutral * positive ### Usage ``` from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("CouchCat/ma_sa_v7_distil") model = AutoModelForSequenceClassification.from_pretrained("CouchCat/ma_sa_v7_distil") ```
Cryptikdw/DialoGPT-small-rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - conversational --- #rick DialoGPT Model
CuongLD/wav2vec2-large-xlsr-vietnamese
[ "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "vi", "dataset:common_voice, infore_25h", "arxiv:2006.11477", "arxiv:2006.13979", "transformers", "audio", "speech", "xlsr-fine-tuning-week", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- language: vi datasets: - common_voice, infore_25h metrics: - wer tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: Cuong-Cong XLSR Wav2Vec2 Large 53 results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice vi type: common_voice args: vi metrics: - name: Test WER type: wer value: 58.63 --- # Wav2Vec2-Large-XLSR-53-Vietnamese Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Vietnamese using the [Common Voice](https://huggingface.co/datasets/common_voice), [Infore_25h dataset](https://files.huylenguyen.com/25hours.zip) (Password: BroughtToYouByInfoRe) When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "vi", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("CuongLD/wav2vec2-large-xlsr-vietnamese") model = Wav2Vec2ForCTC.from_pretrained("CuongLD/wav2vec2-large-xlsr-vietnamese") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on the Vietnamese test data of Common Voice. ```python import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re test_dataset = load_dataset("common_voice", "vi", split="test") wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("CuongLD/wav2vec2-large-xlsr-vietnamese") model = Wav2Vec2ForCTC.from_pretrained("CuongLD/wav2vec2-large-xlsr-vietnamese") model.to("cuda") chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“]' resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the aduio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 58.63 % ## Training The Common Voice `train`, `validation`, and `Infore_25h` datasets were used for training The script used for training can be found [here](https://drive.google.com/file/d/1AW9R8IlsapiSGh9n3aECf23t-zhk3wUh/view?usp=sharing) =======================To here===============================> Your model in then available under *huggingface.co/CuongLD/wav2vec2-large-xlsr-vietnamese* for everybody to use 🎉. ## How to evaluate my trained checkpoint Having uploaded your model, you should now evaluate your model in a final step. This should be as simple as copying the evaluation code of your model card into a python script and running it. Make sure to note the final result on the model card **both** under the YAML tags at the very top **and** below your evaluation code under "Test Results". ## Rules of training and evaluation In this section, we will quickly go over what data is allowed to be used as training data, what kind of data preprocessing is allowed be used, and how the model should be evaluated. To make it very simple regarding the first point: **All data except the official common voice `test` data set can be used as training data**. For models trained in a language that is not included in Common Voice, the author of the model is responsible to leave a reasonable amount of data for evaluation. Second, the rules regarding the preprocessing are not that as straight-forward. It is allowed (and recommended) to normalize the data to only have lower-case characters. It is also allowed (and recommended) to remove typographical symbols and punctuation marks. A list of such symbols can *e.g.* be fonud [here](https://en.wikipedia.org/wiki/List_of_typographical_symbols_and_punctuation_marks) - however here we already must be careful. We should **not** remove a symbol that would change the meaning of the words, *e.g.* in English, we should not remove the single quotation mark `'` since it would change the meaning of the word `"it's"` to `"its"` which would then be incorrect. So the golden rule here is to not remove any characters that could change the meaning of a word into another word. This is not always obvious and should be given some consideration. As another example, it is fine to remove the "Hypen-minus" sign "`-`" since it doesn't change the meaninng of a word to another one. *E.g.* "`fine-tuning`" would be changed to "`finetuning`" which has still the same meaning. Since those choices are not always obvious when in doubt feel free to ask on Slack or even better post on the forum, as was done, *e.g.* [here](https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586). ## Tips and tricks This section summarizes a couple of tips and tricks across various topics. It will continously be updated during the week. ### How to combine multiple datasets into one Check out [this](https://discuss.huggingface.co/t/how-to-combine-local-data-files-with-an-official-dataset/4685) post. ### How to effectively preprocess the data ### How to do efficiently load datasets with limited ram and hard drive space Check out [this](https://discuss.huggingface.co/t/german-asr-fine-tuning-wav2vec2/4558/8?u=patrickvonplaten) post. ### How to do hyperparameter tuning ### How to preprocess and evaluate character based languages ## Further reading material It is recommended that take some time to read up on how Wav2vec2 works in theory. Getting a better understanding of the theory and the inner mechanisms of the model often helps when fine-tuning the model. **However**, if you don't like reading blog posts/papers, don't worry - it is by no means necessary to go through the theory to fine-tune Wav2Vec2 on your language of choice. If you are interested in learning more about the model though, here are a couple of resources that are important to better understand Wav2Vec2: - [Facebook's Wav2Vec2 blog post](https://ai.facebook.com/blog/wav2vec-state-of-the-art-speech-recognition-through-self-supervision/) - [Official Wav2Vec2 paper](https://arxiv.org/abs/2006.11477) - [Official XLSR Wav2vec2 paper](https://arxiv.org/pdf/2006.13979.pdf) - [Hugging Face Blog](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) - [How does CTC (Connectionist Temporal Classification) work](https://distill.pub/2017/ctc/) It helps to have a good understanding of the following points: - How was XLSR-Wav2Vec2 pretrained? -> Feature vectors were masked and had to be predicted by the model; very similar in spirit to masked language model of BERT. - What parts of XLSR-Wav2Vec2 are responsible for what? What is the feature extractor part used for? -> extract feature vectors from the 1D raw audio waveform; What is the transformer part doing? -> mapping feature vectors to contextualized feature vectors; ... - What part of the model needs to be fine-tuned? -> The pretrained model **does not** include a language head to classify the contextualized features to letters. This is randomly initialized when loading the pretrained checkpoint and has to be fine-tuned. Also, note that the authors recommend to **not** further fine-tune the feature extractor. - What data was used to XLSR-Wav2Vec2? The checkpoint we will use for further fine-tuning was pretrained on **53** languages. - What languages are considered to be similar by XLSR-Wav2Vec2? In the official [XLSR Wav2Vec2 paper](https://arxiv.org/pdf/2006.13979.pdf), the authors show nicely which languages share a common contextualized latent space. It might be useful for you to extend your training data with data of other languages that are considered to be very similar by the model (or you). ## FAQ - Can a participant fine-tune models for more than one language? Yes! A participant can fine-tune models in as many languages she/he likes - Can a participant use extra data (apart from the common voice data)? Yes! All data except the official common voice `test data` can be used for training. If a participant wants to train a model on a language that is not part of Common Voice (which is very much encouraged!), the participant should make sure that some test data is held out to make sure the model is not overfitting. - Can we fine-tune for high-resource languages? Yes! While we do not really recommend people to fine-tune models in English since there are already so many fine-tuned speech recognition models in English. However, it is very much appreciated if participants want to fine-tune models in other "high-resource" languages, such as French, Spanish, or German. For such cases, one probably needs to train locally and apply might have to apply tricks such as lazy data loading (check the ["Lazy data loading"](#how-to-do-lazy-data-loading) section for more details).
CurtisBowser/DialoGPT-medium-sora-two
[ "pytorch", "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
"2021-11-04T03:17:16Z"
--- tags: - conversational --- # Sora DialoGPT Model
CurtisBowser/DialoGPT-medium-sora
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
"2021-10-20T23:52:19Z"
--- tags: - conversational --- # Sora DialoGPT Model
CurtisBowser/DialoGPT-small-sora
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
"2021-10-20T20:22:23Z"
--- tags: - conversational --- # Sora DialoGPT Model
CyberMuffin/DialoGPT-small-ChandlerBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - conversational --- # Chandler Bot DialoGPT model
D3xter1922/electra-base-discriminator-finetuned-cola
[ "pytorch", "tensorboard", "electra", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "ElectraForSequenceClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: electra-base-discriminator-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.6824089073723449 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # electra-base-discriminator-finetuned-cola This model is a fine-tuned version of [google/electra-base-discriminator](https://huggingface.co/google/electra-base-discriminator) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.6367 - Matthews Correlation: 0.6824 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.4139 | 1.0 | 535 | 0.4137 | 0.6381 | | 0.2452 | 2.0 | 1070 | 0.4887 | 0.6504 | | 0.17 | 3.0 | 1605 | 0.5335 | 0.6757 | | 0.1135 | 4.0 | 2140 | 0.6367 | 0.6824 | | 0.0817 | 5.0 | 2675 | 0.6742 | 0.6755 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.17.0 - Tokenizers 0.10.3