jrsimuix commited on
Commit
86f89a6
1 Parent(s): 102748f

experiment and look at finetuning with ms-florence..

Browse files
Files changed (1) hide show
  1. nodejs/customVision.js +85 -0
nodejs/customVision.js ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ AutoModelForImageClassification,
3
+ AutoProcessor,
4
+ AutoTokenizer,
5
+ env,
6
+ RawImage,
7
+ } from '@huggingface/transformers';
8
+
9
+ // Configure environment
10
+ env.localModelPath = './'; // Path to your ONNX model
11
+ env.allowRemoteModels = false; // Disable remote models
12
+
13
+ async function hasFp16() {
14
+ try {
15
+ const adapter = await navigator.gpu.requestAdapter();
16
+ return adapter.features.has('shader-f16');
17
+ } catch (e) {
18
+ return false;
19
+ }
20
+ }
21
+
22
+ class CustomModelSingleton {
23
+ static model_id = 'saved-model/'; // Path to your custom ONNX model
24
+
25
+ static async getInstance(progress_callback = null) {
26
+ this.processor ??= await AutoProcessor.from_pretrained(this.model_id);
27
+ this.tokenizer ??= await AutoTokenizer.from_pretrained(this.model_id);
28
+
29
+ this.supports_fp16 ??= await hasFp16();
30
+ this.model ??= await AutoModelForImageClassification.from_pretrained(this.model_id, {
31
+ dtype: this.supports_fp16 ? 'fp16' : 'fp32',
32
+ device: 'webgpu', // Change as per your hardware
33
+ progress_callback,
34
+ });
35
+
36
+ return Promise.all([this.model, this.tokenizer, this.processor]);
37
+ }
38
+ }
39
+
40
+ async function load() {
41
+ self.postMessage({
42
+ status: 'loading',
43
+ data: 'Loading custom model...',
44
+ });
45
+
46
+ const [model, tokenizer, processor] = await CustomModelSingleton.getInstance((x) => {
47
+ self.postMessage(x);
48
+ });
49
+
50
+ self.postMessage({
51
+ status: 'ready',
52
+ data: 'Model loaded successfully.',
53
+ });
54
+ }
55
+
56
+ async function run({ imagePath, task }) {
57
+ const [model, tokenizer, processor] = await CustomModelSingleton.getInstance();
58
+
59
+ // Read and preprocess image
60
+ const image = await RawImage.fromURL(imagePath); // Or use fromBlob if required
61
+ const vision_inputs = await processor(image);
62
+
63
+ // Run inference
64
+ const results = await model.predict(vision_inputs);
65
+
66
+ self.postMessage({ status: 'complete', result: results });
67
+ }
68
+
69
+ self.addEventListener('message', async (e) => {
70
+ const { type, data } = e.data;
71
+
72
+ switch (type) {
73
+ case 'load':
74
+ load();
75
+ break;
76
+
77
+ case 'run':
78
+ run(data);
79
+ break;
80
+
81
+ case 'reset':
82
+ vision_inputs = null;
83
+ break;
84
+ }
85
+ });