File size: 2,120 Bytes
f885f04
 
2443bb5
f885f04
 
2443bb5
f885f04
b431032
 
 
f885f04
2443bb5
f854c7a
2443bb5
 
 
 
 
 
 
 
 
 
 
 
 
f8098be
f885f04
2443bb5
 
 
 
f8098be
 
f885f04
f8098be
 
 
 
f885f04
2443bb5
f8098be
b431032
f8098be
2443bb5
f8098be
2443bb5
 
 
f8098be
f885f04
2443bb5
f8098be
 
 
 
 
2cb6772
2443bb5
f8098be
f885f04
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';

// Disable local models to fetch models from Hugging Face Hub
env.allowLocalModels = false;

// Reference the DOM elements
const status = document.getElementById('status');
const userInput = document.getElementById('user-input');
const outputContainer = document.getElementById('output');
const submitButton = document.getElementById('submit-button');

// Initialize variables
let generator;

// Function to load the model
async function loadModel() {
    try {
        status.textContent = 'Loading model...';
        
        // Load the LLaMA or GPT model for text generation
        generator = await pipeline('text-generation', 'meta-llama/Llama-2-7b-chat-hf'); // Replace with your model
        status.textContent = 'Model loaded. Ready to chat!';
    } catch (error) {
        console.error('Error loading model:', error);
        status.textContent = 'Failed to load model. Check the console for details.';
    }
}

// Load the model at startup
await loadModel();

// Add event listener for button clicks
submitButton.addEventListener('click', async () => {
    const inputText = userInput.value.trim();

    if (!inputText) {
        outputContainer.innerText = 'Please enter a prompt.';
        return;
    }

    // Show the user that the model is processing
    status.textContent = 'Generating response...';

    try {
        // Generate a response using the pipeline
        const response = await generator(inputText, {
            max_new_tokens: 100, // Adjust as needed for response length
            temperature: 0.7,    // Controls randomness
            top_p: 0.95,         // Nucleus sampling
        });

        // Display the generated response
        outputContainer.innerText = response[0].generated_text;
    } catch (error) {
        console.error('Error generating response:', error);
        outputContainer.innerText = 'Error generating response. Please try again.';
    }

    // Reset status to indicate the model is ready again
    status.textContent = 'Model loaded. Ready to chat!';
});