File size: 1,775 Bytes
12c4198
 
 
 
 
 
 
935873d
 
 
 
 
 
 
 
 
 
 
 
 
 
12c4198
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import { AutoTokenizer, AutoModelForCausalLM } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.6.0';
import { prompt_cluster } from "./prompt_cluster.js";

const tokenizer = await AutoTokenizer.from_pretrained("onnx-community/Qwen3-0.6B-ONNX");
const model = await AutoModelForCausalLM.from_pretrained("onnx-community/Qwen3-0.6B-ONNX", { device: "webgpu", dtype: "q4f16" });

export async function nameCluster(lines) {
    // If more than 5 lines, randomly pick 5
    let selected = lines;
    if (lines.length > 5) {
        selected = [];
        const used = new Set();
        while (selected.length < 5) {
            const idx = Math.floor(Math.random() * lines.length);
            if (!used.has(idx)) {
                used.add(idx);
                selected.push(lines[idx]);
            }
        }
    }
    const joined = selected.join("\n");
    const messages = [
        { role: "system", content: prompt_cluster },
        { role: "user", content: `Input:\n${joined}\nOutput:` }
    ];
    const inputs = tokenizer.apply_chat_template(messages, {
        add_generation_prompt: true,
        return_dict: true,
        enable_thinking: false,
    });
    const outputTokens = await model.generate({
        ...inputs,
        max_new_tokens: 1024,
        do_sample: true,
        temperature: 0.6
    });
    let rawName = tokenizer.decode(outputTokens[0], { skip_special_tokens: false }).trim();
    const THINK_TAG = "</think>";
    const END_TAG = "<|im_end|>";
    if (rawName.includes(THINK_TAG)) {
        rawName = rawName.substring(rawName.lastIndexOf(THINK_TAG) + THINK_TAG.length).trim();
    }
    if (rawName.includes(END_TAG)) {
        rawName = rawName.substring(0, rawName.indexOf(END_TAG)).trim();
    }
    return rawName;
}