zpn Xenova HF staff commited on
Commit
7a5549b
1 Parent(s): 1c45008

Add layer norm usage for Transformers.js (#11)

Browse files

- Add layer norm usage for Transformers.js (0351d4f49f2b7816a4cbbe488dc3d4aaab8296c9)


Co-authored-by: Joshua <[email protected]>

Files changed (1) hide show
  1. README.md +5 -3
README.md CHANGED
@@ -2730,7 +2730,7 @@ The model natively supports scaling of the sequence length past 2048 tokens. To
2730
  ### Transformers.js
2731
 
2732
  ```js
2733
- import { pipeline } from '@xenova/transformers';
2734
 
2735
  // Create a feature extraction pipeline
2736
  const extractor = await pipeline('feature-extraction', 'nomic-ai/nomic-embed-text-v1.5', {
@@ -2745,8 +2745,10 @@ let embeddings = await extractor(texts, { pooling: 'mean' });
2745
  console.log(embeddings); // Tensor of shape [2, 768]
2746
 
2747
  const matryoshka_dim = 512;
2748
- embeddings = embeddings.slice(null, [0, matryoshka_dim]).normalize(2, -1);
2749
- console.log(embeddings); // Tensor of shape [2, 512]
 
 
2750
  ```
2751
 
2752
  # Join the Nomic Community
 
2730
  ### Transformers.js
2731
 
2732
  ```js
2733
+ import { pipeline, layer_norm } from '@xenova/transformers';
2734
 
2735
  // Create a feature extraction pipeline
2736
  const extractor = await pipeline('feature-extraction', 'nomic-ai/nomic-embed-text-v1.5', {
 
2745
  console.log(embeddings); // Tensor of shape [2, 768]
2746
 
2747
  const matryoshka_dim = 512;
2748
+ embeddings = layer_norm(embeddings, [embeddings.dims[1]])
2749
+ .slice(null, [0, matryoshka_dim])
2750
+ .normalize(2, -1);
2751
+ console.log(embeddings.tolist());
2752
  ```
2753
 
2754
  # Join the Nomic Community