LPX55 commited on
Commit
93b5fa7
·
verified ·
1 Parent(s): b6ffc30

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -34
app.py CHANGED
@@ -1,38 +1,10 @@
1
  import spaces
2
  import gradio as gr
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
- from transformers import BitsAndBytesConfig
 
5
 
6
- quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16)
7
 
8
- model_name = "unsloth/Llama-4-Scout-17B-16E-Instruct-unsloth-bnb-4bit"
9
-
10
- @spaces.GPU(duration=180)
11
- def load_model():
12
- model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config)
13
- tokenizer = AutoTokenizer.from_pretrained(model_name)
14
- return model, tokenizer
15
-
16
- @spaces.GPU
17
- def generate_text(prompt, model, tokenizer):
18
- inputs = tokenizer(prompt, return_tensors="pt")
19
- outputs = model.generate(**inputs, max_length=100)
20
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
21
-
22
- def gradio_interface():
23
- model, tokenizer = load_model()
24
-
25
- def wrapped_generate(prompt):
26
- return generate_text(prompt, model, tokenizer)
27
-
28
- iface = gr.Interface(
29
- fn=wrapped_generate,
30
- inputs="text",
31
- outputs="text",
32
- title="Meta-Llama 4 Scout 17B Instruct 4bit bnb"
33
- )
34
- return iface
35
-
36
- if __name__ == "__main__":
37
- demo = gradio_interface()
38
- demo.launch()
 
1
  import spaces
2
  import gradio as gr
3
+ import torch
4
+ import logging
5
+ from diffusers import DiffusionPipeline
6
 
7
+ pipe = DiffusionPipeline.from_pretrained("azaneko/HiDream-I1-Fast-nf4")
8
 
9
+ prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
10
+ image = pipe(prompt).images[0]