|
import {
|
|
AutoModelForImageClassification,
|
|
AutoProcessor,
|
|
AutoTokenizer,
|
|
env,
|
|
RawImage,
|
|
} from '@huggingface/transformers';
|
|
|
|
|
|
env.localModelPath = './';
|
|
env.allowRemoteModels = false;
|
|
|
|
async function hasFp16() {
|
|
try {
|
|
const adapter = await navigator.gpu.requestAdapter();
|
|
return adapter.features.has('shader-f16');
|
|
} catch (e) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
class CustomModelSingleton {
|
|
static model_id = 'saved-model/';
|
|
|
|
static async getInstance(progress_callback = null) {
|
|
this.processor ??= await AutoProcessor.from_pretrained(this.model_id);
|
|
this.tokenizer ??= await AutoTokenizer.from_pretrained(this.model_id);
|
|
|
|
this.supports_fp16 ??= await hasFp16();
|
|
this.model ??= await AutoModelForImageClassification.from_pretrained(this.model_id, {
|
|
dtype: this.supports_fp16 ? 'fp16' : 'fp32',
|
|
device: 'webgpu',
|
|
progress_callback,
|
|
});
|
|
|
|
return Promise.all([this.model, this.tokenizer, this.processor]);
|
|
}
|
|
}
|
|
|
|
async function load() {
|
|
self.postMessage({
|
|
status: 'loading',
|
|
data: 'Loading custom model...',
|
|
});
|
|
|
|
const [model, tokenizer, processor] = await CustomModelSingleton.getInstance((x) => {
|
|
self.postMessage(x);
|
|
});
|
|
|
|
self.postMessage({
|
|
status: 'ready',
|
|
data: 'Model loaded successfully.',
|
|
});
|
|
}
|
|
|
|
async function run({ imagePath, task }) {
|
|
const [model, tokenizer, processor] = await CustomModelSingleton.getInstance();
|
|
|
|
|
|
const image = await RawImage.fromURL(imagePath);
|
|
const vision_inputs = await processor(image);
|
|
|
|
|
|
const results = await model.predict(vision_inputs);
|
|
|
|
self.postMessage({ status: 'complete', result: results });
|
|
}
|
|
|
|
self.addEventListener('message', async (e) => {
|
|
const { type, data } = e.data;
|
|
|
|
switch (type) {
|
|
case 'load':
|
|
load();
|
|
break;
|
|
|
|
case 'run':
|
|
run(data);
|
|
break;
|
|
|
|
case 'reset':
|
|
vision_inputs = null;
|
|
break;
|
|
}
|
|
});
|
|
|