File size: 4,952 Bytes
5861145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import os
import re
import io
import requests
from zipfile import ZipFile
from tqdm import tqdm
import chess.pgn as pgn
import pandas as pd
from datasets import Dataset, DatasetInfo

tqdm.pandas()

ZIP_URLS = [
    "https://database.nikonoel.fr/lichess_elite_2023-01.zip",
    "https://database.nikonoel.fr/lichess_elite_2023-02.zip",
    "https://database.nikonoel.fr/lichess_elite_2023-03.zip",
    "https://database.nikonoel.fr/lichess_elite_2023-04.zip",
    "https://database.nikonoel.fr/lichess_elite_2023-05.zip",
    "https://database.nikonoel.fr/lichess_elite_2023-06.zip",
    "https://database.nikonoel.fr/lichess_elite_2023-07.zip",
    "https://database.nikonoel.fr/lichess_elite_2023-08.zip",
    "https://database.nikonoel.fr/lichess_elite_2023-09.zip",
    "https://database.nikonoel.fr/lichess_elite_2023-10.zip",
    "https://database.nikonoel.fr/lichess_elite_2023-11.zip",
    "https://database.nikonoel.fr/lichess_elite_2023-12.zip",
]


def download_and_unzip(url, save_directory, force_download=False):
    # Extract the filename from the URL
    filename = url.split("/")[-1]
    file_path = os.path.join(save_directory, filename)

    # Check if the file already exists
    if force_download or not os.path.exists(file_path):
        # Download the file if it does not exist
        print(f"Downloading {filename}...")
        response = requests.get(url)
        with open(file_path, "wb") as file:
            file.write(response.content)
        print(f"Downloaded {filename}.")
    else:
        print(f"{filename} already exists. Skipping download.")

    # Unzip the file
    with ZipFile(file_path, "r") as zip_ref:
        print(f"Unzipping {filename}...")
        zip_ref.extractall(save_directory)
        print(f"Unzipped {filename}.")


def parse_pgn_dataset_to_dataframe(pgn_file_path):
    # Regular expressions for matching headers and moves
    header_pattern = re.compile(r"\[([A-Za-z0-9]+) \"(.+?)\"\]")

    games_list = []
    current_game = {}
    transcript = []

    with open(pgn_file_path, "r") as file:
        for line in file:
            line = line.encode("utf-8").decode("ascii", "ignore")
            header_match = header_pattern.match(line)
            if header_match:
                # If a new game starts and current_game is not empty, save the current game
                if header_match.group(1) == "Event" and current_game:
                    current_game["transcript"] = " ".join(transcript).strip()
                    games_list.append(current_game)
                    current_game = {}
                    transcript = []
                current_game[header_match.group(1)] = header_match.group(2)
            else:
                # Add moves to transcript, ignoring empty lines and game results
                clean_line = line.strip()
                if (
                    clean_line
                    and not clean_line.startswith("1-0")
                    and not clean_line.startswith("1/2-1/2")
                    and not clean_line.startswith("0-1")
                ):
                    transcript.append(clean_line)

        # Add the last game if it exists
        if current_game:
            current_game["transcript"] = " ".join(transcript).strip()
            games_list.append(current_game)

    # Create a DataFrame
    df = pd.DataFrame(games_list)
    return df


def pgn_to_uci_transcript(pgn_transcript):
    game = pgn.read_game(io.StringIO(pgn_transcript))
    if game is None:
        return

    board = game.board()
    move_list = []
    for move in game.mainline_moves():
        move_list.append(board.uci(move))
        board.push(move)

    return " ".join(move_list)


if __name__ == "__main__":
    save_directory = "."

    if not os.path.exists(save_directory):
        os.makedirs(save_directory)

    for url in ZIP_URLS:
        download_and_unzip(url, save_directory)

    pgn_files = [file for file in os.listdir(
        save_directory) if file.endswith(".pgn")]

    file_dfs = []

    for pgn_file in pgn_files:
        print(f"Parsing PGN from: {pgn_file}")
        df = parse_pgn_dataset_to_dataframe(pgn_file)
        df = df[df["EventType"] == "rapid"]

        file_dfs.append(df)

    df = pd.concat(file_dfs)

    # cast numeric columns
    for column in df.columns:
        if df[column].str.isnumeric().all():
            df[column] = df[column].astype(int)

    df["transcript"] = df["transcript"].progress_apply(pgn_to_uci_transcript)

    # filter unresolved games
    df = df[df["Result"] != "*"]

    df.to_feather("elite_dataset.feather")
    ds_info = DatasetInfo(
        description="The Lichess Elite Dataset includes all (rapid) games from Lichess by players rated 2500+ against players rated 2300+ played during the year 2023. Only games with an outcome of 1/2-1/2, 1-0, or 0-1 are included."
    )
    ds = Dataset.from_pandas(df, info=ds_info)
    ds.push_to_hub("austindavis/chess_world_lichess_elite")