File size: 4,535 Bytes
1ee0b41
 
 
 
320af82
 
 
 
 
 
 
 
 
 
 
 
51cf19b
1ee0b41
51cf19b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1ee0b41
 
 
51cf19b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1ee0b41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5063494
1ee0b41
 
b85b616
 
 
 
1ee0b41
 
 
b5ec15f
5063494
1ee0b41
 
b85b616
320af82
 
cbc7df9
1ee0b41
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import numpy as np
import matplotlib.pyplot as plt
import gradio as gr

description = """## Token Probability Distribution Explorer

This interactive tool lets you visualize how different parameters affect the probability distribution of tokens.

- **Temperature**: Controls the randomness of predictions. Higher values (e.g., 2.0) make the distribution more uniform, while lower values (e.g., 0.1) make it peakier.
- **Top-k**: Limits the number of most likely tokens to consider. For example, `top_k=5` means only the top 5 tokens are considered, and others are set to zero probability.
- **Top-p (nucleus sampling)**: Limits the tokens to those whose cumulative probability mass is below a certain threshold. For instance, `top_p=0.9` means only tokens contributing to the top 90% of probability are considered.

Adjust the sliders to see how each parameter influences the token probabilities. All tokens will always have some non-zero probability in the initial distribution.
To learn more about LLM generation, check out the early release of [Hands-On Generative AI with Transformers and Diffusion Models](https://learning.oreilly.com/library/view/hands-on-generative-ai/9781098149239/).
"""

def get_initial_distribution(num_tokens=10, min_prob=1e-3, seed=42):
    np.random.seed(seed)  # For reproducibility

    # Ensure each token has at least `min_prob`
    baseline_probs = np.full(num_tokens, min_prob)
    remaining_prob = 1.0 - num_tokens * min_prob
    
    # Distribute the remaining probability randomly
    if remaining_prob > 0:
        random_probs = np.random.rand(num_tokens)
        random_probs /= np.sum(random_probs)  # Normalize to sum to 1
        token_probs = baseline_probs + remaining_prob * random_probs
    else:
        # If min_prob is too high, adjust probabilities to sum to 1
        token_probs = baseline_probs
        token_probs /= np.sum(token_probs)
    
    return token_probs

def adjust_distribution(temperature, top_k, top_p, initial_probs):
    if temperature == 0:
        # Greedy sampling: pick the token with the highest probability
        max_index = np.argmax(initial_probs)
        token_probs = np.zeros_like(initial_probs)
        token_probs[max_index] = 1.0
    else:
        # Apply temperature scaling
        token_probs = np.exp(np.log(initial_probs) / temperature)
        token_probs /= np.sum(token_probs)
    
        # Apply Top-K filtering
        if top_k > 0:
            top_k_indices = np.argsort(token_probs)[-top_k:]
            top_k_probs = np.zeros_like(token_probs)
            top_k_probs[top_k_indices] = token_probs[top_k_indices]
            top_k_probs /= np.sum(top_k_probs) # Normalize after filtering
            token_probs = top_k_probs
    
        # Apply top_p (nucleus) filtering
        if top_p < 1.0:
            # Sort probabilities in descending order and compute cumulative sum
            sorted_indices = np.argsort(token_probs)[::-1]
            cumulative_probs = np.cumsum(token_probs[sorted_indices])
    
            # Find the cutoff index for nucleus sampling
            cutoff_index = np.searchsorted(cumulative_probs, top_p) + 1
    
            # Get the indices that meet the threshold
            top_p_indices = sorted_indices[:cutoff_index]
            top_p_probs = np.zeros_like(token_probs)
            top_p_probs[top_p_indices] = token_probs[top_p_indices]
            top_p_probs /= np.sum(top_p_probs) # Normalize after filtering
            token_probs = top_p_probs

    # Plotting the probabilities
    plt.figure(figsize=(10, 6))
    plt.bar(range(10), token_probs, tick_label=[f'Token {i}' for i in range(10)])
    plt.xlabel('Tokens')
    plt.ylabel('Probabilities')
    plt.title('Token Probability Distribution')
    plt.ylim(0, 1)
    plt.grid(True)
    plt.tight_layout()
    
    return plt

initial_probs = get_initial_distribution()

def update_plot(temperature=1.0, top_k=8, top_p=0.9):
    return adjust_distribution(temperature, top_k, top_p, initial_probs)

# Generate an initial plot with default values
initial_plot = update_plot()


interface = gr.Interface(
    fn=update_plot,
    inputs=[
        gr.Slider(0, 5.0, step=0.1, value=1.0, label="Temperature"),
        gr.Slider(0, 10, step=1, value=8, label="Top-k"),
        gr.Slider(0.0, 1.0, step=0.01, value=0.9, label="Top-p"),
    ],
    outputs=gr.Plot(value=initial_plot, label="Token Probability Distribution"),
    live=True,
    title="Explore generation parameters of LLMs",
    description=description,
)

interface.launch()