LPX55 commited on
Commit
43b5c70
·
1 Parent(s): 75827f2

minor: undo

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -3,7 +3,6 @@ import spaces
3
  import gradio as gr
4
  import torch
5
  import logging
6
- from diffusers import DiffusionPipeline
7
  from transformers import LlamaForCausalLM, PreTrainedTokenizerFast, BitsAndBytesConfig
8
  from transformer_hidream_image import HiDreamImageTransformer2DModel
9
  from pipeline_hidream_image import HiDreamImagePipeline
@@ -34,7 +33,7 @@ RESOLUTION_OPTIONS = [
34
  "1248 × 832 (Landscape)",
35
  "832 × 1248 (Portrait)"
36
  ]
37
- quantization_config = BitsAndBytesConfig(load_in_4bit=True)
38
 
39
  MODEL_PREFIX = "azaneko"
40
  LLAMA_MODEL_NAME = "hugging-quants/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4"
@@ -72,7 +71,7 @@ pipe = HiDreamImagePipeline.from_pretrained(
72
  tokenizer_4=tokenizer_4,
73
  text_encoder_4=text_encoder_4,
74
  torch_dtype=torch.bfloat16,
75
- quantization_config=quantization_config
76
  )
77
  pipe.transformer = transformer
78
  log_vram("✅ Pipeline loaded!")
@@ -149,7 +148,7 @@ def parse_resolution(resolution_str):
149
  @spaces.GPU()
150
  def generate_image(pipe: HiDreamImagePipeline, model_type: str, prompt: str, resolution: tuple[int, int], seed: int):
151
  # Get configuration for current model
152
- config = MODEL_CONFIGS[model_type]
153
  guidance_scale = 0.0
154
  num_inference_steps = 16
155
 
 
3
  import gradio as gr
4
  import torch
5
  import logging
 
6
  from transformers import LlamaForCausalLM, PreTrainedTokenizerFast, BitsAndBytesConfig
7
  from transformer_hidream_image import HiDreamImageTransformer2DModel
8
  from pipeline_hidream_image import HiDreamImagePipeline
 
33
  "1248 × 832 (Landscape)",
34
  "832 × 1248 (Portrait)"
35
  ]
36
+ # quantization_config = BitsAndBytesConfig(load_in_4bit=True)
37
 
38
  MODEL_PREFIX = "azaneko"
39
  LLAMA_MODEL_NAME = "hugging-quants/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4"
 
71
  tokenizer_4=tokenizer_4,
72
  text_encoder_4=text_encoder_4,
73
  torch_dtype=torch.bfloat16,
74
+ # quantization_config=quantization_config
75
  )
76
  pipe.transformer = transformer
77
  log_vram("✅ Pipeline loaded!")
 
148
  @spaces.GPU()
149
  def generate_image(pipe: HiDreamImagePipeline, model_type: str, prompt: str, resolution: tuple[int, int], seed: int):
150
  # Get configuration for current model
151
+ # config = MODEL_CONFIGS[model_type]
152
  guidance_scale = 0.0
153
  num_inference_steps = 16
154