lysandre HF Staff commited on
Commit
5e7b78f
·
verified ·
1 Parent(s): 277f2f2

Update with commit 967045082faaaaf3d653bfe665080fd746b2bb60

Browse files

See: https://github.com/huggingface/transformers/commit/967045082faaaaf3d653bfe665080fd746b2bb60

Files changed (2) hide show
  1. frameworks.json +1 -0
  2. pipeline_tags.json +2 -0
frameworks.json CHANGED
@@ -337,6 +337,7 @@
337
  {"model_type":"vits","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
338
  {"model_type":"vivit","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
339
  {"model_type":"vjepa2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
 
340
  {"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
341
  {"model_type":"wav2vec2-bert","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
342
  {"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
 
337
  {"model_type":"vits","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
338
  {"model_type":"vivit","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
339
  {"model_type":"vjepa2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
340
+ {"model_type":"voxtral","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
341
  {"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
342
  {"model_type":"wav2vec2-bert","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
343
  {"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
pipeline_tags.json CHANGED
@@ -1316,6 +1316,8 @@
1316
  {"model_class":"VitsModel","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
1317
  {"model_class":"VivitForVideoClassification","pipeline_tag":"video-classification","auto_class":"AutoModelForVideoClassification"}
1318
  {"model_class":"VivitModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
 
 
1319
  {"model_class":"Wav2Vec2BertForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
1320
  {"model_class":"Wav2Vec2BertForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
1321
  {"model_class":"Wav2Vec2BertForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}
 
1316
  {"model_class":"VitsModel","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
1317
  {"model_class":"VivitForVideoClassification","pipeline_tag":"video-classification","auto_class":"AutoModelForVideoClassification"}
1318
  {"model_class":"VivitModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
1319
+ {"model_class":"VoxtralEncoder","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
1320
+ {"model_class":"VoxtralForConditionalGeneration","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
1321
  {"model_class":"Wav2Vec2BertForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
1322
  {"model_class":"Wav2Vec2BertForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
1323
  {"model_class":"Wav2Vec2BertForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}