Add layer norm usage for Transformers.js
Browse filesRelevant discussion: https://huggingface.co/nomic-ai/nomic-embed-text-v1.5/discussions/4#65cce8d0c52afc14ceac26c2
README.md
CHANGED
@@ -2730,7 +2730,7 @@ The model natively supports scaling of the sequence length past 2048 tokens. To
|
|
2730 |
### Transformers.js
|
2731 |
|
2732 |
```js
|
2733 |
-
import { pipeline } from '@xenova/transformers';
|
2734 |
|
2735 |
// Create a feature extraction pipeline
|
2736 |
const extractor = await pipeline('feature-extraction', 'nomic-ai/nomic-embed-text-v1.5', {
|
@@ -2745,8 +2745,10 @@ let embeddings = await extractor(texts, { pooling: 'mean' });
|
|
2745 |
console.log(embeddings); // Tensor of shape [2, 768]
|
2746 |
|
2747 |
const matryoshka_dim = 512;
|
2748 |
-
embeddings = embeddings
|
2749 |
-
|
|
|
|
|
2750 |
```
|
2751 |
|
2752 |
# Join the Nomic Community
|
|
|
2730 |
### Transformers.js
|
2731 |
|
2732 |
```js
|
2733 |
+
import { pipeline, layer_norm } from '@xenova/transformers';
|
2734 |
|
2735 |
// Create a feature extraction pipeline
|
2736 |
const extractor = await pipeline('feature-extraction', 'nomic-ai/nomic-embed-text-v1.5', {
|
|
|
2745 |
console.log(embeddings); // Tensor of shape [2, 768]
|
2746 |
|
2747 |
const matryoshka_dim = 512;
|
2748 |
+
embeddings = layer_norm(embeddings, [embeddings.dims[1]])
|
2749 |
+
.slice(null, [0, matryoshka_dim])
|
2750 |
+
.normalize(2, -1);
|
2751 |
+
console.log(embeddings.tolist());
|
2752 |
```
|
2753 |
|
2754 |
# Join the Nomic Community
|