Transformer.js

#3
by ninini1 - opened

Hello,

Thank you for your work. Do you think you can make the model usable in javascript with transformer.js or any other way?

    <script type="module">
        import { pipeline } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';

        document.getElementById('runModel').addEventListener('click', async () => {
            const fileInput = document.getElementById('imageInput');
            if (fileInput.files.length === 0) {
                alert('Veuillez sélectionner une image.');
                return;
            }

            const imageFile = fileInput.files[0];
            const imageBitmap = await createImageBitmap(imageFile);

            const classifier = await pipeline('image-classification', 'umm-maybe/AI-image-detector');

            const results = await classifier(imageBitmap);

            document.getElementById('outputText').innerText = `Résultat : ${JSON.stringify(results, null, 2)}`;
        });
    </script>

Results in

Uncaught (in promise) Error: Could not locate file: "https://huggingface.co/umm-maybe/AI-image-detector/resolve/main/onnx/model_quantized.onnx".

Thank you!

By the way i tried to generate an onnx file from your network, but it gives me this error

async function runExample() {
  // Create an ONNX inference session with WebGL backend.
  const session = new onnx.InferenceSession({ backendHint: 'webgl' });

  // Load an ONNX model. This model is Resnet50 that takes a 1*3*224*224 image and classifies it.
  await session.loadModel("./models/AI-image-detector.onnx");

  // Load image.
  const imageLoader = new ImageLoader(imageSize, imageSize);
  const imageData = await imageLoader.getImageData('./resnet-cat.jpg');

  // Preprocess the image data to match input dimension requirement, which is 1*3*224*224.
  const width = imageSize;
  const height = imageSize;
  const preprocessedData = preprocess(imageData.data, width, height);

  const inputTensor = new onnx.Tensor(preprocessedData, 'float32', [1, 3, width, height]);
  // Run model with Tensor inputs and get the result.
  const outputMap = await session.run([inputTensor]);
  const outputData = outputMap.values().next().value.data;

  // Render the output result in html.
  printMatches(outputData);
}

Uncaught (in promise) TypeError: int64 is not supported

Sign up or log in to comment