Update with commit 3fcfbe7549d9694f96e1f19630add4adf99dd421
Browse filesSee: https://github.com/huggingface/transformers/commit/3fcfbe7549d9694f96e1f19630add4adf99dd421
- frameworks.json +1 -0
- pipeline_tags.json +1 -0
frameworks.json
CHANGED
@@ -166,6 +166,7 @@
|
|
166 |
{"model_type":"seamless_m4t","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
167 |
{"model_type":"seamless_m4t_v2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
168 |
{"model_type":"segformer","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoImageProcessor"}
|
|
|
169 |
{"model_type":"sew","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
170 |
{"model_type":"sew-d","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
171 |
{"model_type":"siglip","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
|
|
166 |
{"model_type":"seamless_m4t","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
167 |
{"model_type":"seamless_m4t_v2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
168 |
{"model_type":"segformer","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoImageProcessor"}
|
169 |
+
{"model_type":"seggpt","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoImageProcessor"}
|
170 |
{"model_type":"sew","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
171 |
{"model_type":"sew-d","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
172 |
{"model_type":"siglip","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
pipeline_tags.json
CHANGED
@@ -674,6 +674,7 @@
|
|
674 |
{"model_class":"SeamlessM4Tv2ForTextToSpeech","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
|
675 |
{"model_class":"SeamlessM4Tv2ForTextToText","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
|
676 |
{"model_class":"SeamlessM4Tv2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
677 |
{"model_class":"SegformerForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
678 |
{"model_class":"SegformerModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|
679 |
{"model_class":"SiglipForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
|
|
674 |
{"model_class":"SeamlessM4Tv2ForTextToSpeech","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
|
675 |
{"model_class":"SeamlessM4Tv2ForTextToText","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
|
676 |
{"model_class":"SeamlessM4Tv2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
677 |
+
{"model_class":"SegGptModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
678 |
{"model_class":"SegformerForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
679 |
{"model_class":"SegformerModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|
680 |
{"model_class":"SiglipForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|