kvaishnavi commited on
Commit
2dbd23b
·
1 Parent(s): 7a8fd12

Fix capitalization issues

Browse files
Files changed (1) hide show
  1. onnx/modeling_phi4mm.py +7 -7
onnx/modeling_phi4mm.py CHANGED
@@ -1412,7 +1412,7 @@ class Phi4MMAttention(nn.Module):
1412
 
1413
  class Phi4MMFlashAttention2(Phi4MMAttention):
1414
  """
1415
- Phi-O flash attention module. This module inherits from `Phi4MMAttention` as the weights of the module stays
1416
  untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
1417
  flash attention and deal with padding tokens in case the input contains any of them.
1418
  """
@@ -1736,7 +1736,7 @@ PHI4MM_START_DOCSTRING = r"""
1736
 
1737
 
1738
  @add_start_docstrings(
1739
- "The bare Phi-O model outputting raw hidden-states without any specific head on top.",
1740
  PHI4MM_START_DOCSTRING,
1741
  )
1742
  class Phi4MMPreTrainedModel(PreTrainedModel):
@@ -1839,7 +1839,7 @@ PHI4MM_INPUTS_DOCSTRING = r"""
1839
 
1840
 
1841
  @add_start_docstrings(
1842
- "The bare Phi-O model outputting raw hidden-states without any specific head on top.",
1843
  PHI4MM_START_DOCSTRING,
1844
  )
1845
  class Phi4MMModel(Phi4MMPreTrainedModel):
@@ -1883,7 +1883,7 @@ class Phi4MMModel(Phi4MMPreTrainedModel):
1883
  def set_input_embeddings(self, value):
1884
  self.embed_tokens = value
1885
 
1886
- @add_start_docstrings_to_model_forward(Phi4MM_INPUTS_DOCSTRING)
1887
  def forward(
1888
  self,
1889
  input_ids: torch.LongTensor = None,
@@ -2260,7 +2260,7 @@ class Phi4MMForCausalLM(Phi4MMPreTrainedModel, GenerationMixin):
2260
  return self.model
2261
 
2262
  # Ignore copy
2263
- @add_start_docstrings_to_model_forward(Phi4MM_INPUTS_DOCSTRING)
2264
  @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
2265
  def forward(
2266
  self,
@@ -2473,7 +2473,7 @@ class Phi4MMForSequenceClassification(Phi4MMPreTrainedModel):
2473
  def set_input_embeddings(self, value):
2474
  self.model.embed_tokens = value
2475
 
2476
- @add_start_docstrings_to_model_forward(Phi4MM_INPUTS_DOCSTRING)
2477
  def forward(
2478
  self,
2479
  input_ids: torch.LongTensor = None,
@@ -2572,7 +2572,7 @@ class Phi4MMForTokenClassification(Phi4MMPreTrainedModel):
2572
  # Initialize weights and apply final processing
2573
  self.post_init()
2574
 
2575
- @add_start_docstrings_to_model_forward(Phi4MM_INPUTS_DOCSTRING)
2576
  @add_code_sample_docstrings(
2577
  checkpoint=_CHECKPOINT_FOR_DOC,
2578
  output_type=TokenClassifierOutput,
 
1412
 
1413
  class Phi4MMFlashAttention2(Phi4MMAttention):
1414
  """
1415
+ Phi-4-MM flash attention module. This module inherits from `Phi4MMAttention` as the weights of the module stays
1416
  untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
1417
  flash attention and deal with padding tokens in case the input contains any of them.
1418
  """
 
1736
 
1737
 
1738
  @add_start_docstrings(
1739
+ "The bare Phi-4-MM model outputting raw hidden-states without any specific head on top.",
1740
  PHI4MM_START_DOCSTRING,
1741
  )
1742
  class Phi4MMPreTrainedModel(PreTrainedModel):
 
1839
 
1840
 
1841
  @add_start_docstrings(
1842
+ "The bare Phi-4-MM model outputting raw hidden-states without any specific head on top.",
1843
  PHI4MM_START_DOCSTRING,
1844
  )
1845
  class Phi4MMModel(Phi4MMPreTrainedModel):
 
1883
  def set_input_embeddings(self, value):
1884
  self.embed_tokens = value
1885
 
1886
+ @add_start_docstrings_to_model_forward(PHI4MM_INPUTS_DOCSTRING)
1887
  def forward(
1888
  self,
1889
  input_ids: torch.LongTensor = None,
 
2260
  return self.model
2261
 
2262
  # Ignore copy
2263
+ @add_start_docstrings_to_model_forward(PHI4MM_INPUTS_DOCSTRING)
2264
  @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
2265
  def forward(
2266
  self,
 
2473
  def set_input_embeddings(self, value):
2474
  self.model.embed_tokens = value
2475
 
2476
+ @add_start_docstrings_to_model_forward(PHI4MM_INPUTS_DOCSTRING)
2477
  def forward(
2478
  self,
2479
  input_ids: torch.LongTensor = None,
 
2572
  # Initialize weights and apply final processing
2573
  self.post_init()
2574
 
2575
+ @add_start_docstrings_to_model_forward(PHI4MM_INPUTS_DOCSTRING)
2576
  @add_code_sample_docstrings(
2577
  checkpoint=_CHECKPOINT_FOR_DOC,
2578
  output_type=TokenClassifierOutput,