inference_api
stringclasses 1
value | model_name
stringlengths 12
56
| prompt_format
stringclasses 13
values | custom_prompt
stringclasses 4
values | timestamp
stringclasses 13
values | easy_count
int64 5
5
| easy_execution_accuracy
float64 0
0.8
| medium_count
int64 14
14
| medium_execution_accuracy
float64 0
0.93
| hard_count
int64 6
6
| hard_execution_accuracy
float64 0
0.5
| duckdb_count
int64 48
48
| duckdb_execution_accuracy
float64 0
0.77
| ddl_count
int64 2
2
| ddl_execution_accuracy
float64 0
1
| all_count
int64 75
75
| all_execution_accuracy
float64 0
0.73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
openrouter | qwen/qwen-2.5-72b-instruct | duckdbinst | 2024-10-25T19:49:45.219766 | 5 | 0.6 | 14 | 0.785714 | 6 | 0.166667 | 48 | 0.104167 | 2 | 1 | 75 | 0.293333 |
|
openrouter | qwen/qwen-2.5-72b-instruct | duckdbinstgraniteshort | 2024-10-25T19:57:44.353714 | 5 | 0.6 | 14 | 0.857143 | 6 | 0.166667 | 48 | 0.604167 | 2 | 1 | 75 | 0.626667 |
|
openrouter | motherduckdb/duckdb-nsql-7b | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.571 | 6 | 0.333 | 48 | 0.562 | 2 | 0.5 | 75 | 0.547 |
|
openrouter | meta-llama/Meta-Llama-3-8B-Instruct | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.714 | 6 | 0 | 48 | 0.167 | 2 | 1 | 75 | 0.307 |
|
openrouter | meta-llama/llama-3-70b-instruct | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.929 | 6 | 0 | 48 | 0.271 | 2 | 0.5 | 75 | 0.4 |
|
openrouter | meta-llama/Meta-Llama-3.1-70B | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.4 | 14 | 0.786 | 6 | 0 | 48 | 0.438 | 2 | 1 | 75 | 0.48 |
|
openrouter | meta-llama/CodeLlama-7b-Instruct-hf | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.4 | 14 | 0.286 | 6 | 0.167 | 48 | 0.104 | 2 | 0.5 | 75 | 0.173 |
|
openrouter | meta-llama/CodeLlama-13b-Instruct-hf | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.8 | 14 | 0.429 | 6 | 0.167 | 48 | 0.271 | 2 | 1 | 75 | 0.347 |
|
openrouter | meta-llama/CodeLlama-34b-Instruct-hf | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.571 | 6 | 0.167 | 48 | 0.333 | 2 | 0.5 | 75 | 0.387 |
|
openrouter | meta-llama/CodeLlama-70b-Instruct-hf | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.714 | 6 | 0.5 | 48 | 0.375 | 2 | 1 | 75 | 0.48 |
|
openrouter | mattshumer/reflection-70b | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.5 | 6 | 0.333 | 48 | 0.312 | 2 | 0.5 | 75 | 0.373 |
|
openrouter | phind/phind-codellama-34b | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.857 | 6 | 0.167 | 48 | 0.375 | 2 | 1 | 75 | 0.48 |
|
openrouter | deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.2 | 14 | 0.357 | 6 | 0.167 | 48 | 0.312 | 2 | 1 | 75 | 0.32 |
|
openrouter | deepseek/deepseek-coder-V2-Instruct-236B | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.8 | 14 | 0.786 | 6 | 0.167 | 48 | 0.542 | 2 | 1 | 75 | 0.587 |
|
openrouter | mistralai/mamba-codestral-7B-v0.1 | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.786 | 6 | 0.167 | 48 | 0.458 | 2 | 1 | 75 | 0.52 |
|
openrouter | mistralai/Codestral-22B-v0.1 | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.714 | 6 | 0.333 | 48 | 0.5 | 2 | 1 | 75 | 0.547 |
|
openrouter | bigcode/starcoder2-15b | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0 | 14 | 0 | 6 | 0 | 48 | 0 | 2 | 0 | 75 | 0 |
|
openrouter | bigcode/starcoder2-15b | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.2 | 14 | 0.571 | 6 | 0 | 48 | 0.083 | 2 | 1 | 75 | 0.2 |
|
openrouter | microsoft/Phi-3-mini-128k-instruct-finetune | duckdbinstphiazure | 2024-10-25T13:56:52.185852 | 5 | 0.4 | 14 | 0.214 | 6 | 0 | 48 | 0.25 | 2 | 1 | 75 | 0.253 |
|
openrouter | microsoft/Phi-3-mini-128k-instruct | duckdbinstphi | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.643 | 6 | 0.167 | 48 | 0.396 | 2 | 1 | 75 | 0.453 |
|
openrouter | microsoft/Phi-3-medium-128k-instruct-finetune | duckdbinstphiazure | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.643 | 6 | 0.333 | 48 | 0.5 | 2 | 0.5 | 75 | 0.52 |
|
openrouter | microsoft/Phi-3-medium-128k-instruct | duckdbinstphi | 2024-10-25T13:56:52.185852 | 5 | 0.8 | 14 | 0.714 | 6 | 0.333 | 48 | 0.354 | 2 | 1 | 75 | 0.467 |
|
openrouter | google/gemma-2-27b-it | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.786 | 6 | 0.167 | 48 | 0.5 | 2 | 1 | 75 | 0.547 |
|
openrouter | google/gemma-2 | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.214 | 6 | 0.167 | 48 | 0.188 | 2 | 0.5 | 75 | 0.227 |
|
openrouter | google/codegemma-1.1-2b | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0 | 14 | 0.357 | 6 | 0.167 | 48 | 0.042 | 2 | 0.5 | 75 | 0.12 |
|
openrouter | google/codegemma-1.1-2b | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.2 | 14 | 0.214 | 6 | 0 | 48 | 0.083 | 2 | 0.5 | 75 | 0.12 |
|
openrouter | google/codegemma-1.1-7b-it | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.643 | 6 | 0.167 | 48 | 0.333 | 2 | 1 | 75 | 0.413 |
|
openrouter | google/codegemma-1.1-7b-it | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.8 | 14 | 0.643 | 6 | 0.167 | 48 | 0.479 | 2 | 1 | 75 | 0.52 |
|
openrouter | google/codegemma-1.1-7b | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.5 | 6 | 0 | 48 | 0.021 | 2 | 0 | 75 | 0.147 |
|
openrouter | ibm-granite/granite-3b-code-instruct | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.8 | 14 | 0.429 | 6 | 0.167 | 48 | 0.271 | 2 | 1 | 75 | 0.347 |
|
openrouter | ibm-granite/granite-3b-code-base | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.8 | 14 | 0.429 | 6 | 0 | 48 | 0.333 | 2 | 1 | 75 | 0.373 |
|
openrouter | ibm-granite/granite-3b-code-base | duckdbinstgranite | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.429 | 6 | 0 | 48 | 0.292 | 2 | 1 | 75 | 0.333 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.5 | 6 | 0 | 48 | 0.292 | 2 | 1 | 75 | 0.347 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B | duckdbinstllamashort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.643 | 6 | 0.333 | 48 | 0.333 | 2 | 0.5 | 75 | 0.413 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B-Base | duckdbinstllamashort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.5 | 6 | 0 | 48 | 0.208 | 2 | 1 | 75 | 0.293 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B-Base-finetune3-checkpoint-0 | duckdbinstllamasyntax | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.571 | 6 | 0 | 48 | 0.521 | 2 | 1 | 75 | 0.507 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B-Base-finetune3-checkpoint-1 | duckdbinstllamasyntax | 2024-10-25T13:56:52.185852 | 5 | 0.2 | 14 | 0.643 | 6 | 0.167 | 48 | 0.542 | 2 | 0.5 | 75 | 0.507 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B-Base-finetune3-checkpoint-2 | duckdbinstllamasyntax | 2024-10-25T13:56:52.185852 | 5 | 0.2 | 14 | 0.714 | 6 | 0.167 | 48 | 0.479 | 2 | 1 | 75 | 0.493 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B-Base-finetune2-checkpoint-0 | duckdbinstllamabasic | 2024-10-25T13:56:52.185852 | 5 | 0.4 | 14 | 0.571 | 6 | 0 | 48 | 0.479 | 2 | 1 | 75 | 0.467 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B-Base-finetune2-checkpoint-1 | duckdbinstllamabasic | 2024-10-25T13:56:52.185852 | 5 | 0.4 | 14 | 0.714 | 6 | 0 | 48 | 0.521 | 2 | 0.5 | 75 | 0.507 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B-Base-finetune2-checkpoint-2 | duckdbinstllamabasic | 2024-10-25T13:56:52.185852 | 5 | 0.4 | 14 | 0.571 | 6 | 0.167 | 48 | 0.438 | 2 | 1 | 75 | 0.453 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B-Base-finetune2-checkpoint-0 | duckdbinstllamasyntax | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.429 | 6 | 0 | 48 | 0.521 | 2 | 1 | 75 | 0.48 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B-Base-finetune2-checkpoint-1 | duckdbinstllamasyntax | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.643 | 6 | 0 | 48 | 0.583 | 2 | 1 | 75 | 0.56 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B-Base-finetune2-checkpoint-2 | duckdbinstllamasyntax | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.643 | 6 | 0.167 | 48 | 0.542 | 2 | 1 | 75 | 0.547 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B-Base | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.2 | 14 | 0.357 | 6 | 0.167 | 48 | 0.188 | 2 | 0.5 | 75 | 0.227 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B-Base | duckdbinstllamashort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.571 | 6 | 0.167 | 48 | 0.229 | 2 | 0.5 | 75 | 0.36 |
|
openrouter | meta-llama/Meta-Llama-3.1-8B-Base | duckdbinstllama | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.643 | 6 | 0 | 48 | 0.271 | 2 | 1 | 75 | 0.36 |
|
openrouter | ibm-granite/granite-8b-code-instruct | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.714 | 6 | 0 | 48 | 0.333 | 2 | 1 | 75 | 0.413 |
|
openrouter | ibm-granite/granite-8b-code-instruct | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.8 | 14 | 0.643 | 6 | 0 | 48 | 0.333 | 2 | 1 | 75 | 0.413 |
|
openrouter | ibm-granite/granite-8b-code-instruct | duckdbinstgranite | 2024-10-25T13:56:52.185852 | 5 | 0.8 | 14 | 0.571 | 6 | 0 | 48 | 0.417 | 2 | 1 | 75 | 0.453 |
|
openrouter | ibm-granite/granite-8b-code-base | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.8 | 14 | 0.643 | 6 | 0 | 48 | 0.333 | 2 | 1 | 75 | 0.413 |
|
openrouter | ibm-granite/granite-8b-code-base | duckdbinstgranite | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.643 | 6 | 0.167 | 48 | 0.396 | 2 | 1 | 75 | 0.453 |
|
openrouter | ibm-granite/granite-20b-code-instruct-finetune-0 | duckdbinstgranite | 2024-10-25T13:56:52.185852 | 5 | 0.4 | 14 | 0.643 | 6 | 0 | 48 | 0.562 | 2 | 0.5 | 75 | 0.52 |
|
openrouter | ibm-granite/granite-20b-code-instruct-finetune-0 | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.5 | 6 | 0.167 | 48 | 0.562 | 2 | 0.5 | 75 | 0.52 |
|
openrouter | ibm-granite/granite-20b-code-instruct-finetune-1 | duckdbinstgranite | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.5 | 6 | 0 | 48 | 0.542 | 2 | 0.5 | 75 | 0.493 |
|
openrouter | ibm-granite/granite-20b-code-instruct-finetune-1 | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.571 | 6 | 0 | 48 | 0.604 | 2 | 0.5 | 75 | 0.547 |
|
openrouter | ibm-granite/granite-20b-code-instruct-finetune-2 | duckdbinstgranite | 2024-10-25T13:56:52.185852 | 5 | 0.2 | 14 | 0.5 | 6 | 0 | 48 | 0.542 | 2 | 1 | 75 | 0.48 |
|
openrouter | ibm-granite/granite-20b-code-instruct-finetune-2 | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.4 | 14 | 0.643 | 6 | 0.167 | 48 | 0.583 | 2 | 0.5 | 75 | 0.547 |
|
openrouter | ibm-granite/granite-20b-code-instruct | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.2 | 14 | 0.571 | 6 | 0.167 | 48 | 0.417 | 2 | 1 | 75 | 0.427 |
|
openrouter | ibm-granite/granite-20b-code-instruct | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.8 | 14 | 0.643 | 6 | 0.167 | 48 | 0.396 | 2 | 1 | 75 | 0.467 |
|
openrouter | ibm-granite/granite-20b-code-base | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.786 | 6 | 0.333 | 48 | 0.438 | 2 | 1 | 75 | 0.52 |
|
openrouter | ibm-granite/granite-20b-code-base | duckdbinstgranite | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.571 | 6 | 0.167 | 48 | 0.292 | 2 | 1 | 75 | 0.373 |
|
openrouter | ibm-granite/granite-34b-code-instruct | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.2 | 14 | 0.571 | 6 | 0.167 | 48 | 0.333 | 2 | 0.5 | 75 | 0.36 |
|
openrouter | nvidia/nemotron-4-340b-instruct | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.4 | 14 | 0.786 | 6 | 0 | 48 | 0.354 | 2 | 1 | 75 | 0.427 |
|
openrouter | Qwen/Qwen2.5-Coder-1.5B-Instruct | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.5 | 6 | 0.167 | 48 | 0.25 | 2 | 1 | 75 | 0.333 |
|
openrouter | Qwen/Qwen2.5-Coder-7B | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.8 | 14 | 0.786 | 6 | 0.5 | 48 | 0.354 | 2 | 1 | 75 | 0.493 |
|
openrouter | Qwen/Qwen2.5-Coder-7B | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.8 | 14 | 0.786 | 6 | 0.333 | 48 | 0.521 | 2 | 1 | 75 | 0.587 |
|
openrouter | Qwen/Qwen2.5-Coder-7B-Instruct | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.8 | 14 | 0.857 | 6 | 0.333 | 48 | 0.479 | 2 | 1 | 75 | 0.573 |
|
openrouter | qwen/qwen-2.5-72b-instruct | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.786 | 6 | 0.167 | 48 | 0.146 | 2 | 1 | 75 | 0.32 |
|
openrouter | qwen/qwen-2.5-72b-instruct | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.786 | 6 | 0.167 | 48 | 0.625 | 2 | 1 | 75 | 0.627 |
|
openrouter | meta-llama/Meta-Llama-3.1-405B | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.714 | 6 | 0.167 | 48 | 0.583 | 2 | 1 | 75 | 0.587 |
|
openrouter | meta-llama/Meta-Llama-3.1-405B | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.714 | 6 | 0.167 | 48 | 0.583 | 2 | 1 | 75 | 0.587 |
|
openrouter | google/gemini-1.5-pro | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.786 | 6 | 0 | 48 | 0.542 | 2 | 1 | 75 | 0.56 |
|
openrouter | anthropic/claude-3.5-sonnet | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.714 | 6 | 0.167 | 48 | 0.646 | 2 | 1 | 75 | 0.627 |
|
openrouter | anthropic/claude-3.5-sonnet | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.643 | 6 | 0.333 | 48 | 0.646 | 2 | 1 | 75 | 0.627 |
|
openrouter | openai/gpt-4o | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.857 | 6 | 0.333 | 48 | 0.562 | 2 | 1 | 75 | 0.613 |
|
openrouter | openai/o1-preview | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.643 | 6 | 0.5 | 48 | 0.75 | 2 | 1 | 75 | 0.707 |
|
openrouter | openai/o1-preview | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.714 | 6 | 0.5 | 48 | 0.667 | 2 | 1 | 75 | 0.667 |
|
openrouter | openai/o1-mini | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.714 | 6 | 0.333 | 48 | 0.521 | 2 | 1 | 75 | 0.56 |
|
openrouter | openai/gpt-4o-2024-08-06 | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.857 | 6 | 0.333 | 48 | 0.583 | 2 | 1 | 75 | 0.627 |
|
openrouter | openai/gpt-4o-2024-08-06 | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.857 | 6 | 0.167 | 48 | 0.625 | 2 | 1 | 75 | 0.64 |
|
openrouter | openai/gpt-4o-mini-2024-07-18 | duckdbinstgraniteshort | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.929 | 6 | 0.333 | 48 | 0.5 | 2 | 1 | 75 | 0.587 |
|
openrouter | openai/gpt-4o-mini-2024-07-18 | duckdbinstgptmini | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.857 | 6 | 0.333 | 48 | 0.562 | 2 | 1 | 75 | 0.613 |
|
openrouter | openai/gpt-4 | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.8 | 14 | 0.786 | 6 | 0.167 | 48 | 0.479 | 2 | 1 | 75 | 0.547 |
|
openrouter | openai/gpt-4--1106-preview | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.643 | 6 | 0.167 | 48 | 0.5 | 2 | 1 | 75 | 0.52 |
|
openrouter | google/palm-2-codechat-bison-32k | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.5 | 6 | 0.167 | 48 | 0.292 | 2 | 1 | 75 | 0.36 |
|
openrouter | mistralai/mistral-large-2 | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.857 | 6 | 0 | 48 | 0.479 | 2 | 1 | 75 | 0.533 |
|
openrouter | mistralai/mixtral-8x22b-instruct | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.786 | 6 | 0.167 | 48 | 0.333 | 2 | 1 | 75 | 0.44 |
|
openrouter | mistralai/mixtral-8x7b-instruct | duckdbinst | 2024-10-25T13:56:52.185852 | 5 | 0.6 | 14 | 0.214 | 6 | 0.167 | 48 | 0.167 | 2 | 1 | 75 | 0.227 |
|
openrouter | meta-llama/llama-3.2-3b-instruct:free | duckdbinstgraniteshort | 2024-10-25T19:38:52.788031 | 5 | 0.4 | 14 | 0.357143 | 6 | 0 | 48 | 0.270833 | 2 | 1 | 75 | 0.293333 |
|
openrouter | nvidia/llama-3.1-nemotron-70b-instruct | duckdbinstgraniteshort | 2024-11-02T02:47:52.524310 | 5 | 0.6 | 14 | 0.642857 | 6 | 0 | 48 | 0.541667 | 2 | 1 | 75 | 0.533333 |
|
openrouter | anthropic/claude-3.5-sonnet | custom_32428570 | ### Instruction:
Your task is to generate valid duckdb SQL to answer the following question, given a duckdb database schema.
Here is the database schema that the SQL query will run on:
{schema}
### Question:
{question}
### Response (use duckdb shorthand if possible):
```sql
| 2024-10-27T22:55:01.139181 | 5 | 0.6 | 14 | 0.785714 | 6 | 0.333333 | 48 | 0.625 | 2 | 1 | 75 | 0.64 |
openrouter | anthropic/claude-3.5-sonnet | custom_32428570 | ### Instruction:
Your task is to generate valid duckdb SQL to answer the following question, given a duckdb database schema.
Here is the database schema that the SQL query will run on:
{schema}
### Question:
{question}
### Response (use duckdb shorthand if possible):
```sql
| 2024-10-27T22:56:53.164026 | 5 | 0.6 | 14 | 0.785714 | 6 | 0.333333 | 48 | 0.625 | 2 | 1 | 75 | 0.64 |
openrouter | anthropic/claude-3.5-sonnet | duckdbinstgraniteshort | 2024-10-27T23:04:39.561642 | 5 | 0.6 | 14 | 0.714286 | 6 | 0.166667 | 48 | 0.729167 | 2 | 1 | 75 | 0.68 |
|
openrouter | qwen/qwen-2.5-coder-32b-instruct | duckdbinstgraniteshort | 2024-11-12T04:18:20.511844 | 5 | 0.8 | 14 | 0.857143 | 6 | 0.166667 | 48 | 0.604167 | 2 | 1 | 75 | 0.64 |
|
openrouter | qwen/qwen-2.5-coder-32b-instruct | custom_85307618 | Generate a valid DuckDB SQL query to accurately answer the user's question. Only respond with the SQL code.
DuckDB SQL Specifics:
Identifiers and Literals: Use double quotes (") for identifiers (tables, columns) with spaces or special characters, and single quotes (') for string literals.
Direct File Querying: DuckDB supports querying directly from CSV, Parquet, and JSON files, e.g., SELECT * FROM 'data.csv';
CTAS: Supports CREATE TABLE AS (CTAS) syntax, e.g., CREATE TABLE new_table AS SELECT * FROM old_table;
FROM Clauses: Queries can start with FROM, omitting SELECT * (e.g., FROM my_table WHERE condition;).
Expression Evaluation: Use SELECT without FROM to generate expressions or single results, e.g., SELECT 1 + 1;.
Multiple Database Support: Attach databases using ATTACH and refer with dot notation, e.g., ATTACH 'db.duckdb' AS mydb; then mydb.table_name.
Implicit Type Conversion: DuckDB allows implicit type conversions, or make them explicit with :: (e.g., '42'::INTEGER + 1;).
String and Array Slicing: Use [start:end] or [start:end:step] for slicing (strings, arrays), with 1-based indexes.
Patterned Column Selection: Select columns with patterns, e.g., SELECT COLUMNS('sales_.*') FROM sales_data;.
Exclusion/Inclusion with Wildcards: Exclude/include or transform columns with SELECT * EXCLUDE (col); and SELECT * REPLACE (col AS new_col);.
Grouping/Ordering: GROUP BY ALL or ORDER BY ALL to group/order by non-aggregated columns.
Union by Column Name: Combine tables by matching column names, e.g., table1 UNION BY NAME table2;.
Complex Types: Support for List, Struct, Map, and Array types with intuitive syntax ([1, 2, 3] AS my_list;).
Field Access: Access struct fields with dot notation, and maps with brackets.
Date and Timestamp Conversion: strftime(NOW(), '%Y-%m-%d'); for the current date, and strptime('YYYY-MM-DD', '%Y-%m-%d')::TIMESTAMP; for string-to-timestamp.
Alias Usage: Column aliases in the WHERE, GROUP BY, and HAVING clauses, e.g., SELECT a + b AS total FROM my_table WHERE total > 10 GROUP BY total;.
List Comprehensions: Generate lists with expressions, e.g., SELECT [x*2 FOR x IN [1, 2, 3]];.
Function Chaining: Chain functions with . (dot operator), e.g., SELECT 'DuckDB'.replace('Duck', 'Goose');.
JSON Handling: Use -> and ->> for JSON fields, e.g., data->'$.user.id' AS user_id;.
Regex Functions: regexp_matches(), regexp_replace(), and regexp_extract() are supported.
Data Sampling: Retrieve a data subset with USING SAMPLE, e.g., SELECT * FROM large_table USING SAMPLE 10%;.
Database Schema: {schema}
Question: {question}
```
| 2024-11-12T04:40:38.781499 | 5 | 0.6 | 14 | 0.857143 | 6 | 0.166667 | 48 | 0.583333 | 2 | 1 | 75 | 0.613333 |
openrouter | openai/gpt-4o-2024-08-06 | custom_8348795 | You are a DuckDB SQL Query Writing Assistant. You only respond with a DuckDB SQL query that answers the users's question.
Here are some DuckDB SQL syntax specifics you should be aware of:
- DuckDB use double quotes (") for identifiers that contain spaces or special characters, or to force case-sensitivity and single quotes (') to define string literals
- DuckDB can query CSV, Parquet, and JSON directly without loading them first, e.g. `SELECT * FROM 'data.csv';`
- DuckDB supports CREATE TABLE AS (CTAS): `CREATE TABLE new_table AS SELECT * FROM old_table;`
- DuckDB queries can start with FROM, and optionally omit SELECT *, e.g. `FROM my_table WHERE condition;` is equivalent to `SELECT * FROM my_table WHERE condition;`
- DuckDB allows you to use SELECT without a FROM clause to generate a single row of results or to work with expressions directly, e.g. `SELECT 1 + 1 AS result;`
- DuckDB supports attaching multiple databases, unsing the ATTACH statement: `ATTACH 'my_database.duckdb' AS mydb;`. Tables within attached databases can be accessed using the dot notation (.), e.g. `SELECT * FROM mydb.table_name syntax`. The default databases doesn't require the do notation to access tables. The default database can be changed with the USE statement, e.g. `USE my_db;`.
- DuckDB is generally more lenient with implicit type conversions (e.g. `SELECT '42' + 1;` - Implicit cast, result is 43), but you can always be explicit using `::`, e.g. `SELECT '42'::INTEGER + 1;`
- DuckDB can extract parts of strings and lists using [start:end] or [start:end:step] syntax. Indexes start at 1. String slicing: `SELECT 'DuckDB'[1:4];`. Array/List slicing: `SELECT [1, 2, 3, 4][1:3];`
- DuckDB has a powerful way to select or transform multiple columns using patterns or functions. You can select columns matching a pattern: `SELECT COLUMNS('sales_.*') FROM sales_data;` or transform multiple columns with a function: `SELECT AVG(COLUMNS('sales_.*')) FROM sales_data;`
- DuckDB an easy way to include/exclude or modify columns when selecting all: e.g. Exclude: `SELECT * EXCLUDE (sensitive_data) FROM users;` Replace: `SELECT * REPLACE (UPPER(name) AS name) FROM users;`
- DuckDB has a shorthand for grouping/ordering by all non-aggregated/all columns. e.g `SELECT category, SUM(sales) FROM sales_data GROUP BY ALL;` and `SELECT * FROM my_table ORDER BY ALL;`
- DuckDB can combine tables by matching column names, not just their positions using UNION BY NAME. E.g. `SELECT * FROM table1 UNION BY NAME SELECT * FROM table2;`
- DuckDB has an inutitive syntax to create List/Struct/Map and Array types. Create complex types using intuitive syntax. List: `SELECT [1, 2, 3] AS my_list;`, Struct: `{{{{'a': 1, 'b': 'text'}}}} AS my_struct;`, Map: `MAP([1,2],['one','two']) as my_map;`. All types can also be nested into each other. Array types are fixed size, while list types have variable size. Compared to Structs, MAPs do not need to have the same keys present for each row, but keys can only be of type Integer or Varchar. Example: `CREATE TABLE example (my_list INTEGER[], my_struct STRUCT(a INTEGER, b TEXT), my_map MAP(INTEGER, VARCHAR), my_array INTEGER[3], my_nested_struct STRUCT(a INTEGER, b Integer[3]));`
- DuckDB has an inutive syntax to access struct fields using dot notation (.) or brackets ([]) with the field name. Maps fields can be accessed by brackets ([]).
- DuckDB's way of converting between text and timestamps, and extract date parts. Current date as 'YYYY-MM-DD': `SELECT strftime(NOW(), '%Y-%m-%d');` String to timestamp: `SELECT strptime('2023-07-23', '%Y-%m-%d')::TIMESTAMP;`, Extract Year from date: `SELECT EXTRACT(YEAR FROM DATE '2023-07-23');`
- Column Aliases in WHERE/GROUP BY/HAVING: You can use column aliases defined in the SELECT clause within the WHERE, GROUP BY, and HAVING clauses. E.g.: `SELECT a + b AS total FROM my_table WHERE total > 10 GROUP BY total HAVING total < 20;`
- DuckDB allows generating lists using expressions similar to Python list comprehensions. E.g. `SELECT [x*2 FOR x IN [1, 2, 3]];` Returns [2, 4, 6].
- DuckDB allows chaining multiple function calls together using the dot (.) operator. E.g.: `SELECT 'DuckDB'.replace('Duck', 'Goose').upper(); -- Returns 'GOOSEDB';`
- DuckDB has a JSON data type. It supports selecting fields from the JSON with a JSON-Path expression using the arrow operator, -> (returns JSON) or ->> (returns text) with JSONPath expressions. For example: `SELECT data->'$.user.id' AS user_id, data->>'$.event_type' AS event_type FROM events;`
- DuckDB has built-in functions for regex regexp_matches(column, regex), regexp_replace(column, regex), and regexp_extract(column, regex).
- DuckDB has a way to quickly get a subset of your data with `SELECT * FROM large_table USING SAMPLE 10%;`
DuckDB Functions:
`count`: Calculates the total number of rows returned by a SQL query result. This function is commonly used to determine the row count of a SELECT operation., Parameters: ['result: The result object']
`sum`: Calculates the total of all non-null values from the given input., Parameters: ['arg: Values to be summed up.']
`sum`: Calculates the total of all non-null values in a specified column or expression across rows., Parameters: ['arg: Values to be aggregated']
`max`: Returns the maximum value from the input data., Parameters: ['arg: The column or expression to evaluate', 'n: Number of top values to return(optional)', 'ORDER BY: Specifies sort order before function(optional)']
`max`: Returns the largest value from all values in a specified column or expression., Parameters: ['arg: expression to evaluate maximum', "n: top 'n' value list size(optional)"]
`coalesce`: This function evaluates provided expressions in order and returns the first non-NULL value found. If all expressions evaluate to NULL, then the result is NULL., Parameters: ['expr: An expression to evaluate', '...: Additional expressions to evaluate(optional)']
`trunc`: Truncates a number by removing the fractional part, essentially returning the integer part of the number without rounding., Parameters: ['x: The number to truncate.']
`date_trunc`: Truncates a date or timestamp to the specified precision, effectively setting smaller units to zero or to the first value of that unit (e.g., the first day of the month)., Parameters: ['part: Specifies the truncation precision', 'date: The date or timestamp value']
`row_number`: Generates a unique incrementing number for each row within a partition, starting from 1., Parameters: ['ORDER BY: Specify sort order for numbers.(optional)', 'PARTITION BY: Define groups for numbering.(optional)', 'RANGE/ROWS: Define rows for frame.(optional)', 'EXCLUDE: Exclude specific rows from frame.(optional)', 'WINDOW: Reuse a window definition.(optional)']
`unnest`: The function expands lists or structs into separate rows or columns, reducing nesting by one level., Parameters: ['list_or_struct: The list or struct to unnest.', 'recursive: Unnest multiple levels or not.(optional)', 'max_depth: Limit depth of unnesting.(optional)']
`prompt`: This function allows you to prompt large language models to generate text or structured data as output., Parameters: ['prompt_text: Text input for the model.', 'model: Model to use for prompt.(optional)', 'temperature: Model temperature value setting.(optional)', 'struct: Output schema for struct result.(optional)', 'struct_descr: Field descriptions for struct.(optional)', 'json_schema: Schema for JSON output format.(optional)']
`min`: Returns the minimum value from a set of numeric values., Parameters: ['value_column: Column containing numeric values.', 'ignore_nulls: Ignore NULL values if true.(optional)', 'filter_condition: Condition to filter rows.(optional)']
`min`: Finds the smallest value in a group of input values., Parameters: ['expression: The input value to consider']
`concat`: Concatenates multiple strings together into a single string., Parameters: ['string: String to concatenate']
`avg`: Calculates the average of non-null values., Parameters: ['arg: Data to be averaged']
`lower`: Converts a given string to lower case, commonly used for normalization in text processing., Parameters: ['string: String to be converted']
`read_csv_auto`: Automatically reads a CSV file and infers the data types of its columns., Parameters: ['file_path: Path to the CSV file', 'MD_RUN: Execution control parameter(optional)']
`read_parquet`: Reads Parquet files and treats them as a single table, supports reading multiple files via a list or glob pattern., Parameters: ['path_or_list_of_paths: Path(s) to Parquet file(s)', 'binary_as_string: Load binary as strings(optional)', 'encryption_config: Encryption configuration settings(optional)', 'filename: Include filename column result(optional)', 'file_row_number: Include file row number(optional)', 'hive_partitioning: Interprets Hive partition paths(optional)', 'union_by_name: Unify columns by name(optional)']
`strftime`: Converts timestamps or dates to strings based on a specified format pattern., Parameters: ['timestamp: Input date or timestamp value', 'format: Pattern for string conversion']
`array_agg`: Returns a list containing all values of a column, affected by ordering., Parameters: ['arg: Column to aggregate values']
`regexp_matches`: The function checks if a given string contains a specified regular expression pattern and returns `true` if it does, and `false` otherwise., Parameters: ['string: The input string to search', 'pattern: The regex pattern to match', 'options: Regex matching options string(optional)']
`replace`: Replacement scans in DuckDB allow users to register a callback that gets triggered when a query references a non-existent table. The callback can replace this table with a custom table function, effectively 'replacing' the non-existent table in the query execution process., Parameters: ['db: Database object where replacement applies', 'replacement: Handler for when table is missing', 'extra_data: Extra data given to callback(optional)', 'delete_callback: Cleanup for extra data provided(optional)']
`round`: Rounds a numeric value to a specified number of decimal places., Parameters: ['v: The number to round', 's: Decimal places to round to']
`length`: Returns the length of a string, Parameters: ['value: String to measure length of']
`query`: Table function query extracts statements from a SQL query string and outputs them as `duckdb_extracted_statements` objects. It is utilized to dissect SQL queries and obtain individual statements for further processing, enabling preparation or analysis of each separate statement., Parameters: ['connection: Database connection object', 'query: SQL query to extract from', 'out_extracted_statements: Object for extracted statements']
`read_json_auto`: Automatically infers the schema from JSON data and reads it into a table format., Parameters: ['filename: Path to the JSON file.', 'compression: File compression type.(optional)', 'auto_detect: Auto-detect key names/types.(optional)', 'columns: Manual specification of keys/types.(optional)', 'dateformat: Date format for parsing dates.(optional)', 'format: JSON file format.(optional)', 'hive_partitioning: Hive partitioned path interpretation.(optional)', 'ignore_errors: Ignore parse errors option.(optional)', 'maximum_depth: Max depth for schema detection.(optional)', 'maximum_object_size: Max size of JSON object.(optional)', 'records: JSON record unpacking option.(optional)', 'sample_size: Number of objects for sampling.(optional)', 'timestampformat: Timestamp parsing format.(optional)', 'union_by_name: Unify schemas of files.(optional)']
`range`: Creates a list of values within a specified numeric range, starting inclusively from 'start' and stopping exclusively before 'stop', with an optional step interval., Parameters: ['start: The inclusive start point.(optional)', 'stop: The exclusive end point.', 'step: Interval between each number.(optional)']
`range`: The table function generates a sequential list of values starting from a specified number, incrementing by a given step, up to but not including an end number., Parameters: ['start: Start of the range(optional)', 'stop: End of the range (exclusive)', 'step: Increment between values(optional)']
`date_diff`: Computes the number of specified partition boundaries between two dates (or timestamps)., Parameters: ['part: Specifies the date/timestamp partition', 'startdate: The start date or timestamp', 'enddate: The end date or timestamp']
`lag`: The window function provides the value from a prior row within the same result set partition., Parameters: ['expression: Column or expression to evaluate', 'offset: Number of rows back(optional)', 'default_value: Default value if no offset(optional)']
`year`: Extracts the year component from a date or timestamp value., Parameters: ['date: Date from which to extract year', 'timestamp: Timestamp from which to extract year']
`now`: Obtains the current date and time at the start of the current transaction, using the system's time zone., Parameters: ['None: No parameters required(optional)']
`group_concat`: Concatenates column string values using a specified separator, respecting the provided order., Parameters: ['arg: The column to concatenate', 'sep: Separator between concatenated values(optional)', 'ORDER BY: Specifies order of concatenation(optional)']
`regexp_extract`: If a string matches a given regular expression pattern, it returns the specified capturing group or groups with optional capture group names., Parameters: ['string: Input string to search in.', 'pattern: Regex pattern to match.', 'group: Specifies which group to capture.(optional)', 'name_list: Named capture groups struct.(optional)', 'options: Regex matching options.(optional)']
`upper`: Converts a given string to uppercase characters., Parameters: ['string: String to make uppercase']
`greatest`: Selects the largest value from a list of input values using lexicographical ordering., Parameters: ['x1: The first value to compare', 'x2: The second value to compare', '...: Additional values to compare(optional)', 'xn: Nth value to compare(optional)']
`row`: The function initiates the creation of a row in an appender by signaling the start of adding values for a new row., Parameters: ['appender: Appender to start new row']
`getvariable`: The function retrieves the value of a previously set SQL-level variable, returning NULL if the variable is not defined., Parameters: ['variable_name: The name of the variable']
`quarter`: Extracts the quarter (1 to 4) from a date value., Parameters: ['date: The input date to evaluate.']
`strptime`: Converts a string to a timestamp according to a specified format string, throwing an error on failure., Parameters: ['text: Input string to convert', 'format: String format to parse']
`substring`: Extracts a substring from a given string starting at a specified position and with a specified length., Parameters: ['string: The original string to extract from', 'start: Starting position for extraction', 'length: Number of characters to extract']
`add`: Adds two integer values and returns the sum., Parameters: ['a: First integer to add', 'b: Second integer to add', 'result: Sum of a and b']
`date_part`: Extracts a specified subfield from a timestamp and returns its numeric value, equivalent to the SQL keyword 'extract'., Parameters: ['part: The subfield to extract from the timestamp or timestamptz.', 'timestamp: The input timestamp value to extract the subfield from.', 'interval: Extracts date part from interval.(optional)']
`json_extract`: Extracts JSON from a specified path within a JSON object or array., Parameters: ['json: The JSON object or array.', 'path: Path to extract data from.']
`json_extract_string`: Extracts a string (VARCHAR) value from a JSON object at a specified path, converting JSON data to text if possible., Parameters: ['json: The JSON object to extract from', 'path: The path to the desired value']
`rank`: The rank function assigns a rank to each row within a partition of a result set, allowing for potential gaps in the ranking when there are ties., Parameters: ['order_column: Column or expression for sorting', 'partition_column: Column to partition data by(optional)', 'alias: Alias name for result column(optional)']
`day`: The function extracts the day of the month from a given date., Parameters: ['date: Date value to extract from']
`list`: DuckDB provides an aggregate function that executes an aggregate operation over the elements within a list. This function can be utilized to apply any existing aggregate function, like `min`, `sum`, or `histogram`, across the elements of a list. This allows the aggregation of list data in a flexible manner., Parameters: ['list: List to aggregate values.', "name: Aggregate function's name to apply.", 'value: Optional extra parameters needed.(optional)']
`generate_series`: This function creates a list of values within a specified range where both endpoints are inclusive., Parameters: ['start: Inclusive start of range(optional)', 'stop: Inclusive stop of range', 'step: Difference between successive values(optional)']
`generate_series`: Creates a list of values from start to stop inclusively, with a specified step., Parameters: ['start: Inclusive start of the series(optional)', 'stop: Inclusive end of the series', 'step: Step increment between each value(optional)']
`datediff`: Calculates the number of specified partition boundaries between two dates., Parameters: ['part: Time unit to measure', 'startdate: The starting date', 'enddate: The ending date']
`left`: Extracts left-most characters from a string., Parameters: ['string: String to extract characters from', 'count: Number of left-most characters']
`trim`: Removes specified characters from both sides of a string, or spaces if no characters are specified., Parameters: ['string: The input string to trim', 'characters: Characters to remove from string(optional)']
`array_has_any`: Returns true if any element is present in both input lists., Parameters: ['list1: First list to compare.', 'list2: Second list to compare.']
`datetrunc`: Truncates a date or timestamp to a specified precision part, such as year, month, or day., Parameters: ['part: The precision to truncate to.', 'date: The date to truncate.', 'timestamp: The timestamp to truncate.']
`split_part`: Splits a string by a specified separator and returns the part at a given index., Parameters: ['string: The string to be split', 'separator: The delimiter to split by', 'index: 1-based index to retrieve']
`read_json`: Reads JSON files, inferring schema and format automatically from the data., Parameters: ['filename: Path to JSON file(s).', 'auto_detect: Auto-detect schema from data.(optional)', 'columns: Specified columns and types.(optional)', 'compression: File compression type detected.(optional)', 'format: Format of JSON data.(optional)', 'hive_partitioning: Choose Hive partitioning method.(optional)', 'ignore_errors: Ignore errors during parsing.(optional)', 'maximum_depth: Maximum schema detection depth.(optional)', 'maximum_object_size: Limit JSON object size bytes.(optional)', 'records: Read JSON as records.(optional)', 'sample_size: Sample objects for detection.(optional)', 'timestampformat: Format for parsing timestamps.(optional)', 'union_by_name: Unify multiple file schema types.(optional)']
`read_csv`: Reads CSV files into a DuckDB relation, automatically inferring configurations such as delimiters, headers, and column types unless specified otherwise., Parameters: ['all_varchar: Assume all columns as VARCHAR(optional)', 'allow_quoted_nulls: Allow quoted nulls conversion(optional)', 'auto_detect: Enable auto detection of parameters(optional)', 'auto_type_candidates: Types considered for auto detection(optional)', 'columns: Specify column names and types(optional)', 'compression: File compression type(optional)', 'dateformat: Date format for parsing dates(optional)', 'decimal_separator: Decimal separator of numbers(optional)', 'delimiter: Character separating columns in rows(optional)', 'delim: Character separating columns in rows(optional)', 'escape: String for escaping data chars(optional)', 'filename: Include filename in result(optional)', 'force_not_null: Do not match null string(optional)', 'header: File contains a header line(optional)', 'hive_partitioning: Interpret path as Hive partitioned(optional)', 'ignore_errors: Ignore rows with parsing errors(optional)', 'max_line_size: Maximum line size in bytes(optional)', 'names: Column names as a list(optional)', 'new_line: New line characters in file(optional)', 'normalize_names: Normalize column names(optional)', 'null_padding: Pad remaining columns with nulls(optional)', 'nullstr: String representing null value(optional)', 'parallel: Use parallel CSV reader(optional)', 'quote: Use quoting for data values(optional)', 'sample_size: Number of rows for sampling(optional)', 'sep: Delimiter character between columns(optional)', 'skip: Lines to skip at top(optional)', 'timestampformat: Format for parsing timestamps(optional)', 'types or dtypes: Column types by position/name(optional)', 'union_by_name: Unify schemas by column name(optional)', 'store_rejects: Store errors in reject tables(optional)', 'rejects_scan: Name for rejects scan table(optional)', 'rejects_table: Name for rejects errors table(optional)', 'rejects_limit: Limit faulty records stored(optional)', 'delim: Specifies column delimiter character(optional)']
`today`: Returns the current date at the start of the transaction., Parameters: []
`floor`: Rounds down a numeric value to the nearest integer., Parameters: ['x: Value to be rounded down']
`ends_with`: Checks if a string ends with a specified substring, returning true if it does and false otherwise., Parameters: ['string: The string to check', 'search_string: The ending substring']
`regexp_replace`: Replaces portions of a string matching a regular expression with a specified replacement string. Can replace globally with the 'g' option., Parameters: ['string: The string to search in.', 'pattern: The regular expression to match.', 'replacement: The string to replace with.', 'options: Options to modify behavior.(optional)']
`list_distinct`: Removes duplicates and NULL values from a list., Parameters: ['list: Input list to process']
`abs`: Calculates the absolute value of a given numeric input., Parameters: ['x: Input value for operation']
`len`: Calculates the length of a specified input, returning the number of elements or characters it contains., Parameters: ['input: The input whose length is calculated.', 'length_type: Type of length to compute.(optional)', 'ignore_nulls: Whether to ignore null values.(optional)']
`substr`: Extracts a substring from a string starting at a specified position and continuing for a specified length., Parameters: ['string: The string to extract from', 'start: Starting position of extract', 'length: Number of characters to extract']
`last_value`: Evaluates an expression at the last row of the current window frame., Parameters: ['expr: Expression to evaluate at last row', 'IGNORE NULLS: Skip nulls in evaluation(optional)']
`time_bucket`: Truncates the provided timestamp by the specified interval, allowing for optional offsets or origins to alter the bucketing alignment., Parameters: ['bucket_width: Interval to truncate by', 'timestamptz or date: Timestamp or date value', 'offset: Offset interval for buckets(optional)', 'origin: Origin timestamp for alignment(optional)', 'timezone: Time zone for calculation(optional)']
`read_json_objects`: Reads JSON objects from the given file(s), allowing for various formats and compressed files., Parameters: ['filename: Path to JSON file(s)', 'compression: Type of file compression utilized(optional)', 'format: Format of the JSON data(optional)', 'hive_partitioning: Enable Hive partitioning path(optional)', 'ignore_errors: Ignore JSON parsing errors(optional)', 'maximum_sample_files: Max sampled files for detection(optional)', 'maximum_object_size: Max size of JSON object(optional)', 'filename: Add extra filename column(optional)']
`duckdb_functions`: This table function lists all functions, including macros, within the DuckDB instance providing details such as their type, return type, parameters, and other relevant metadata., Parameters: ['database_name: Database holding this function', 'schema_name: Schema where function resides', 'function_name: SQL name of the function', 'function_type: Kind of function (e.g. scalar)', 'description: Description of this function(optional)', 'return_type: Data type name of return(optional)', "parameters: Function's parameter names(optional)", 'parameter_types: Data type names of parameters(optional)', 'varargs: Data type for variable arguments(optional)', 'macro_definition: SQL expression defining macro(optional)', 'has_side_effects: Indicates if function is pure', 'function_oid: Internal identifier for function']
`histogram`: Produces a map of keys as histogram buckets with corresponding counts based on input values., Parameters: ['arg: Input values to aggregate.']
`md5`: Computes the MD5 hash of a given string and returns it as a VARCHAR., Parameters: ['string: The input string value.']
`format`: Formats a string using specified parameters following the fmt syntax., Parameters: ['format: The format string used.', 'parameters: Values to replace placeholders.(optional)']
`array_length`: Returns the number of elements in a JSON array. If provided, the path specifies a location within the JSON structure where the array's length is determined., Parameters: ['json: The JSON string to evaluate', 'path: The path to the JSON array(optional)']
`duckdb_tables`: Provides metadata about base tables in DuckDB instance., Parameters: ['database_name: Name of the database containing this table.', 'database_oid: Internal identifier of the database.', 'schema_name: Name of the schema containing this table.', 'schema_oid: Internal identifier of the schema.', 'table_name: Name of the base table.', 'table_oid: Internal identifier of the table object.', 'internal: False if user-defined table.', 'temporary: Whether it is a temporary table.', 'has_primary_key: True if table defines PRIMARY KEY.', 'estimated_size: Estimated number of rows in table.', 'column_count: Number of columns in the table.', 'index_count: Number of associated indexes.', 'check_constraint_count: Number of active check constraints.', 'sql: SQL definition for the table.']
`to_json`: Converts a value to JSON format., Parameters: ['any: Value to convert to JSON']
`month`: Returns the month as an integer from a given date or timestamp., Parameters: ['date_or_timestamp: Input date or timestamp value']
`stddev`: Calculates the sample standard deviation of a set of non-null values., Parameters: ['x: Values to calculate deviation']
`first_value`: The function returns the value of the specified expression evaluated at the first row of the window frame., Parameters: ['expr: The expression to evaluate.', 'IGNORE NULLS: Ignore NULL values in frame.(optional)']
`parquet_schema`: The function queries the internal schema of a Parquet file, revealing details such as column names, types, and other metadata., Parameters: []
`string_agg`: Concatenates string values from a column with a specified separator in order, optionally sorted by a criterion., Parameters: ['arg: Column of string values.', 'sep: Separator between concatenated strings.', 'ORDER BY: Optional sorting criteria.(optional)']
`flatten`: Flatten concatenates elements of a list of lists into a single list, flattening one level., Parameters: ['list_of_lists: A list containing lists']
`hash`: Computes a UBIGINT hash value for a given input, useful for operations like joins, grouping or checking data equality across different systems., Parameters: ['value: Input to compute hash from']
`current_date`: Returns the current date at the start of the current transaction., Parameters: ['transaction: Start of current transaction(optional)', 'current: Current session or scope(optional)']
`position`: Locates the position of the first occurrence of "search_string" after position 1 in the provided "string". It returns 0 if "search_string" is not found., Parameters: ['search_string: The substring to find.', 'string: The string to search in.']
`row_to_json`: Converts a STRUCT type into a JSON object format, facilitating the transformation of complex data structures into JSON format for further processing or output., Parameters: ['list: A structure to convert']
`duckdb_columns`: This function provides metadata about columns in the DuckDB instance, including details on data type, default values, etc., Parameters: ['database_name: Name of the database containing column', 'database_oid: Internal database identifier', 'schema_name: Name of schema containing table', 'schema_oid: Internal schema identifier', 'table_name: Name of table containing column', 'table_oid: Internal table object identifier', 'column_name: SQL name of the column', 'column_index: Position of column in table', 'internal: True if column is built-in', 'column_default: Column default value in SQL(optional)', 'is_nullable: True if column accepts NULL', 'data_type: Column datatype name', 'data_type_id: Internal data type identifier', 'character_maximum_length: Always NULL, no length restrictions', 'numeric_precision: Storage precision of column values(optional)', 'numeric_precision_radix: Precision number-base in bits/positions(optional)', 'numeric_scale: Fractional digits for decimal type(optional)', 'comment: User-defined comment on column(optional)']
`contains`: Checks if a map contains a given key and returns true or false., Parameters: ['map: The map to search', 'key: The key to search']
`week`: The function extracts the ISO week number from a date or timestamp, starting with Monday as the first day of the week., Parameters: ['date: Input date to process']
`duckdb_secrets`: Provides metadata about the secrets available in the DuckDB instance., Parameters: ['redact: Controls if sensitive data is redacted.(optional)']
`max_by`: The function finds the row with the maximum value in a specified column and returns a different column's value from that row, allowing for an ordered result based on the specified column., Parameters: ['arg: Value to return from row.', 'val: Column to determine maximum.', 'n: Number of top rows.(optional)']
`alias`: A scalar function alias provides an alternative name for a function to improve readability or conform to conventions. For instance, 'uppercase' could be used to call 'UPPER'., Parameters: ['alias: The alternative function name', 'function_name: The actual function name', 'parameters: Parameters of the function(optional)']
`json_structure`: Returns the structure of a given JSON, defaulting to JSON if types are inconsistent., Parameters: ['json: Input JSON value to process.']
`first`: Returns the first value (null or non-null) from the given column, and is affected by specifying an order using ORDER BY to determine which value is first., Parameters: ['column: Target column to aggregate.', 'ORDER BY (optional): Order used to determine first.(optional)', 'FILTER (optional): Condition to filter rows.(optional)']
`percent_rank`: Calculates the relative rank of a row within its partition as `(rank() - 1) / (total partition rows - 1)`, outputting a value between 0 and 1., Parameters: ['window_specification: Defines row partition and order.(optional)', 'ORDER BY: Specifies the row order.(optional)']
`json_transform`: Transforms a JSON object into a specified nested type structure, enabling efficient extraction and type conversion., Parameters: ['json: The JSON data to transform.', 'structure: Desired structure for transformation.']
`random`: Generates a random floating-point number between 0.0 and 1.0., Parameters: ['none: No parameters are needed.']
`any_value`: This aggregate function returns the first non-null value from a column, particularly useful to obtain any non-null entry when the specific order is not crucial yet needs to handle initial null values., Parameters: ['arg: Input column with values']
`reverse`: Reverses the order of the characters in a given string., Parameters: ['string: The string to reverse']
`list_aggregate`: Executes a specified aggregate function on the elements within a list., Parameters: ['list: The input list to aggregate', 'name: Name of the aggregate function', 'additional_arguments: Arguments passed to aggregate(optional)']
`epoch_ms`: The function converts either a given timestamp to milliseconds since the epoch or milliseconds since the epoch to a timestamp., Parameters: ['ms: Milliseconds since epoch(optional)', 'timestamp: Timestamp to convert to ms(optional)']
`aggregate`: The scalar function for aggregate in DuckDB is designed to create a custom aggregate function. It facilitates aggregation of data over a column in a database and involves setting parameters, return types, and function operations such as state initialization, state updates, and finalization., Parameters: ['aggregate_function: Pointer to aggregate function', 'name: Name of the aggregate function(optional)', 'type: Logical type of parameter(optional)', 'state_size: Size of aggregate state(optional)', 'state_init: Initializes the state(optional)', 'update: Updates the aggregate state(optional)', 'combine: Merges two aggregation states(optional)', 'finalize: Produces final result from state(optional)', 'destroy: Destructs the aggregate state(optional)', 'extra_info: Stores additional information(optional)', 'error: Aggregate function error message(optional)', 'set: Set of aggregate functions(optional)', 'info: Retrieves extra info from info(optional)', 'con: Connection to database(optional)', 'function: Aggregate function to add(optional)', 'out_database: The result database object', 'out_error: Output error on failure(optional)', 'config: Optional configuration details(optional)']
`read_json_objects_auto`: Reads JSON objects from a file or files using automatically detected format settings., Parameters: ['filename: Path to JSON file or files', 'compression: Type for file compression(optional)', 'filename: Include filename in result(optional)', 'format: Format for JSON data(optional)', 'hive_partitioning: Use Hive partitioned paths(optional)', 'ignore_errors: Continue ignoring parse errors(optional)', 'maximum_sample_files: Max files for auto-detection(optional)', 'maximum_object_size: Max bytes per JSON object(optional)']
`duckdb_constraints`: Provides metadata about constraints in the DuckDB instance., Parameters: []
`cos`: Computes the cosine of a given number, returning its trigonometric value., Parameters: ['x: Input number for calculation']
`sin`: Calculates the sine of a given angle expressed in radians., Parameters: ['value: Angle in radians to calculate sine']
`array_transform`: Transforms each element of the input list using a lambda function, returning a new list with the results., Parameters: ['list: The input list to transform', 'lambda: Function to apply to elements']
`datepart`: Extracts specified subfields from a TIMESTAMPTZ and returns them as a struct., Parameters: ['part: Subfield to extract', 'timestamptz: Input timestamp with time zone', '[part, ...]: List of subfields to extract(optional)']
`map`: The function returns an empty map., Parameters: ['(none): No parameters are required']
`least`: Selects the smallest value from a list of inputs., Parameters: ['x1, x2, ...: A list of numeric values.']
`epoch`: Converts a timestamp to seconds since the epoch (1970-01-01)., Parameters: ['timestamp: Timestamp to convert to seconds.']
`nextval`: Retrieves the next value from a specified sequence., Parameters: ['sequence_name: The name of the sequence']
`pragma_storage_info`: The function returns detailed storage information for a specified table, including metrics like compression type and storage chunk details., Parameters: ['table_name: Name of the table.']
`ceil`: Rounds a numeric value upward to the nearest integer., Parameters: ['x: The number to round up']
`list_concat`: Concatenates two lists into one., Parameters: ['list1: The first list to concatenate.', 'list2: The second list to concatenate.']
`median`: Finds the middle value of a dataset, averaging the two middle values for an even-sized array., Parameters: ['x: Values to find middle value']
`uuid`: Generates a random UUID as a string., Parameters: []
`radians`: Converts an angle measured in degrees to an equivalent angle in radians., Parameters: ['x: Angle in degrees to convert.']
`dayname`: Returns the English name of the weekday for a given date or timestamp., Parameters: ['date: A date to extract weekday.', 'timestamp: A timestamp to extract weekday.(optional)']
`embedding`: The function generates text embeddings using OpenAI's models., Parameters: ['my_text_column: Column containing text for embedding', 'model: Model type for embeddings(optional)']
`levenshtein`: Calculates the minimum number of single-character edits required to change one string into another, considering characters of different cases as distinct., Parameters: ['s1: The first string to compare', 's2: The second string to compare']
`acos`: Computes the arccosine of the input value., Parameters: ['x: Input value for arccosine.']
`timezone`: The function retrieves or sets a timestamp within a specified time zone, effectively converting between "local" and UTC times., Parameters: ['text: Specified time zone name or abbreviation', 'timestamp: The date and time to convert', 'timestamptz: Timestamp with time zone to convert']
`duckdb_views`: The function provides metadata about database views, including information on view names, schemas, and definitions., Parameters: []
`json_object`: Creates a JSON object from key-value pairs., Parameters: ['key: Key for the JSON object.', 'value: Value for the JSON object.']
`decode`: Converts a BLOB to a VARCHAR, failing if the BLOB is not valid UTF-8., Parameters: ['blob: The BLOB to convert']
`array_contains`: Checks if a given element exists in a list and returns true if it does., Parameters: ['list: The list to search', 'element: Element to search in list']
`hour`: Extracts the hour component from a given temporal value., Parameters: ['date: The date or timestamp value']
`array_cosine_similarity`: Computes the cosine similarity between two arrays of the same size, with elements that cannot be NULL., Parameters: ['array1: First array of values', 'array2: Second array of values']
`minute`: Extracts the minute part from a timestamp or interval., Parameters: ['timestamp: Extract minute from this timestamp']
`filter`: Constructs a list from elements of the input list for which a lambda function returns true., Parameters: ['list: Input list to be filtered.', 'lambda: Condition for filtering elements.']
`glob`: The function returns filenames located at the specified path using glob pattern syntax., Parameters: ['search_path: Specifies path using glob patterns']
`instr`: Returns the position of the first occurrence of the search string in another string, returning 0 if not found., Parameters: ['string: Input string to search within', 'search_string: String to find in input']
`string_to_array`: Splits a string into an array using the specified separator., Parameters: ['string: The input text to split.', 'separator: Character(s) defining split points.']
`concat_ws`: Concatenates multiple strings together with a specified separator in between each string., Parameters: ['separator: Separator placed between strings.', 'string: Strings to be concatenated together.']
`to_timestamp`: Converts a string into a timestamp using a specified format., Parameters: ['string: Input string to convert', 'format: Date format of the string']
`split`: Splits a string into a list of substrings based on a specified separator., Parameters: ['string: Input string to be split', 'separator: Character or string delimiter']
`power`: Calculates the result of raising a given number to an exponent value., Parameters: ['base: The number to raise', 'exponent: The power to raise by']
`last_day`: Calculates the last day of the month for a given date., Parameters: ['date: Input date to evaluate']
`json_merge_patch`: Merges two JSON documents together, updating the first document with keys and values from the second., Parameters: ['json1: First JSON document to merge', 'json2: Second JSON document to merge']
`lead`: Evaluates the expression at the row offset rows after the current row within the window frame. If there is no such row, a default value is returned., Parameters: ['expr: Expression evaluated on the row', 'offset: Number of rows to offset(optional)', 'default: Value to return if no row(optional)', 'IGNORE NULLS: Ignore nulls when offsetting(optional)']
`struct_pack`: Creates a STRUCT with specified keys and values., Parameters: ['name: Name of the struct entry', 'any: Value of the struct entry']
`array_filter`: Constructs a list from elements of the input list for which a specified condition returns true., Parameters: ['list: Input list to be filtered', 'lambda: Function returning boolean condition']
`list_aggr`: Executes a specified aggregate function on elements of a list., Parameters: ['list: The list of elements.', 'name: Aggregate function name.']
`date_sub`: Calculates the number of complete date part intervals between two date values., Parameters: ['part: type of interval to calculate', 'startdate: starting date for calculation', 'enddate: ending date for calculation']
`lpad`: Pads the input string with a specified character from the left until it reaches a desired length., Parameters: ['string: The input string to modify', 'count: The total length desired', 'character: Character used for padding']
`regexp_split_to_array`: This function splits a string at each occurrence of the regular expression, returning an array of substrings., Parameters: ['string: String to be split into array.', 'regex: Regular expression delimiter pattern.', 'options: Regular expression matching options.(optional)']
`map_from_entries`: Returns a map created from an array of key-value struct entries., Parameters: ['entries: Array of key-value entries.']
`duckdb_schemas`: Provides metadata about available schemas in the DuckDB instance., Parameters: ["oid: Schema object's internal identifier.", 'database_name: Database containing this schema name.', "database_oid: Database's internal identifier.", 'schema_name: SQL name of the schema.', 'internal: True if internal schema.', 'sql: Always NULL.']
`duckdb_settings`: The function provides metadata about current DuckDB settings., Parameters: ['name: Name of the setting', 'value: Current value of the setting', 'description: Description of the setting', "input_type: Logical datatype of setting's value"]
`str_split`: Splits a given string into parts based on a specified separator, returning an array of the split segments., Parameters: ['string: The text input to split.', 'separator: Delimiter to split the string.']
`bar`: Calculates and displays a progress bar during execution of long-running queries., Parameters: ['enable_progress_bar: Enable or disable progress bar(optional)']
`age`: Calculates the age from the birthdate by subtracting the year part and adding one if the birth month and day are ahead in the current year., Parameters: ['birthdate: DATE of birth for calculation']
`query_table`: The function returns a table or the union of tables specified by their names., Parameters: ['tbl_names: Names of tables to use', 'by_name: Union tables by name(optional)']
`duckdb_indexes`: The function provides metadata about secondary indexes, including their names, uniqueness, and associated tables, within a DuckDB instance., Parameters: ['database_name: Name of the database', 'database_oid: Database internal identifier', 'schema_name: SQL name of the schema', 'schema_oid: Schema internal identifier', 'index_name: SQL name of the index', 'index_oid: Object identifier of the index', 'table_name: Name of the table', 'table_oid: Table object internal identifier', 'is_unique: Indicates uniqueness of index', 'is_primary: Always false for secondary', 'expressions: Always null', 'sql: Index SQL definition']
`regr_intercept`: Calculates the intercept of the linear regression line in a dataset, given an independent and dependent variable., Parameters: ['y: Dependent variable data', 'x: Independent variable data']
`regr_slope`: Returns the slope of the linear regression line, where the independent variable is used to calculate its change with the dependent variable., Parameters: ["y: The dependent variable's values.", "x: The independent variable's values."]
`log`: Calculates the natural logarithm of a given input value, providing an essential function for mathematical and statistical computations., Parameters: ['value: The number to compute']
`version`: The documentation provides an overview and details of the versioning scheme for DuckDB extensions. It explains the purpose and significance of version numbers, and categorizes extensions into three stability levels: unstable, pre-release, and stable. Each level's expectations and version format are detailed, helping users understand the maturity and reliability of an extension. Additionally, the document explains how extensions sync with DuckDB's release cycle and how to use nightly builds for early access to features., Parameters: ['path: Path to the database file on disk, or `nullptr` or `:memory:` to open an in-memory database.', 'out_database: The result database object.']
`duckdb_keywords`: Retrieves DuckDB's keywords and reserved words, including their categories., Parameters: []
`list_unique`: Counts the number of unique elements in a list., Parameters: ['list: Input list to count uniqueness.', 'element: Element type within the list.(optional)']
`read_ndjson_objects`: Reads newline-delimited JSON objects from a specified file or set of files., Parameters: ['compression: The compression type for file.(optional)', 'filename: Include filename in result.(optional)', 'format: Specify JSON format to use.(optional)', 'hive_partitioning: Use Hive partitioned path.(optional)', 'ignore_errors: Ignore parse errors if possible.(optional)', 'maximum_sample_files: Max JSON files sampled.(optional)', 'maximum_object_size: Max JSON object size (bytes).(optional)']
`current_setting`: Returns the current value of a specified configuration setting in DuckDB., Parameters: ['setting_name: Name of the configuration setting']
`array_distinct`: Removes duplicates and NULL values from a list, but does not preserve the original order., Parameters: ['list: The list to process.']
`duckdb_databases`: The table function returns a list of databases accessible from the current DuckDB process, including both the startup database and any attached databases., Parameters: []
`list_value`: Creates a list value from a specified logical type and an array of values. This list value can be used within DuckDB for handling columnar data that involves a list or array structure., Parameters: ['type: Logical data type for elements.', 'values: Array of values to list.', 'value_count: Number of values in array.']
`to_base`: Converts an integer to a string representation in a specified base., Parameters: ['value: Integer value to convert', 'base: Base for number conversion']
`list_contains`: Returns true if a specified element is found within the given list., Parameters: ['list: The list to search in', 'element: Element to locate in list']
`from_json`: Transforms JSON into a specified nested structure., Parameters: ['json: The JSON input data.', 'structure: Specifies desired output structure.']
`pi`: The function returns the mathematical constant pi., Parameters: []
`dense_rank`: Ranks the current row without creating gaps in the ranking, counting peer groups all having the same rank., Parameters: ['partition_by_clause: Defines partitioning of result set(optional)', 'order_by_clause: Specifies attributes for ordering', 'frame_clause: Limits window frame range(optional)']
`repeat`: Repeats a given string a specified number of times, creating a new concatenated string as the result., Parameters: ['string: The input string to repeat.', 'count: The number of repetitions wanted.']
`current_schema`: Returns the name of the currently active schema, which defaults to 'main'., Parameters: []
`struct_extract`: This function extracts a specific entry from a STRUCT using either a name or an index., Parameters: ['struct: The struct to extract from', 'entry: The name of the entry(optional)']
`get_current_timestamp`: Returns the current date and time at the start of the current transaction., Parameters: []
`regexp_extract_all`: Splits the input string using the specified regex and retrieves all matches for the specified capturing group., Parameters: ['string: Input string to process', 'regex: Regular expression pattern', 'group: Match group to extract(optional)', 'options: Regular expression options(optional)']
`repeat`: The function generates a table with repeated rows of specified data values for a given number of times., Parameters: ['repeat_row: Values for the repeated rows.', 'num_rows: Number of rows to generate.']
`read_text`: Reads the content of specified files or patterns as a `VARCHAR`, validating for UTF-8 encoding., Parameters: ['source: File path or glob pattern']
`last`: Returns the last value of a column within a group of rows ordered by an expression., Parameters: ['column: The column to evaluate.', 'order by expression: Column or expression for sorting.(optional)', 'partition by expression: Column or expression for partitioning.(optional)', 'frame: Specifies window frame for function.(optional)']
`encode`: Converts a STRING to a BLOB, transforming UTF-8 characters into literal encoding., Parameters: ['string: The input string to encode.']
`dayofweek`: Extracts the numeric representation of the day of the week from a given date, where Sunday is represented as 0 and Saturday as 6., Parameters: ['date: The date to evaluate.']
`enum_range`: Returns all values of the given ENUM type as an array, allowing easy access to the possible values., Parameters: ['enum: Input enum type reference']
`json_extract_path`: Extracts JSON from a JSON object at a specified path, returning a result in JSON format., Parameters: ['json: The source JSON object.', 'path: The JSON path to extract.']
`array_slice`: Extracts a sublist from an array using specified start, end, and optional step values, similar to Python slicing. Handles negative indices., Parameters: ['list: The list to be sliced', 'begin: Index to start slicing from', 'end: Index to stop slicing at', 'step: Step size for slicing(optional)']
`pragma_table_info`: Returns information about the columns in a table including details such as column name, type, nullability, default value, and if it's part of the primary key., Parameters: ['table_name: Name of the target table']
`arg_max`: Finds the values associated with the maximum criterion in a dataset, optionally returning the top-N values in descending order., Parameters: ['arg: Expression to evaluate at max', 'val: Criterion for determining maximum value', 'n: Top n values to return(optional)']
`typeof`: The function returns the data type of the given expression's result., Parameters: ['expression: Expression to determine data type']
`strip_accents`: Removes accents from a string., Parameters: ['string: Input string to process.']
`gen_random_uuid`: Generates and returns a random UUID similar to `eeccb8c5-9943-b2bb-bb5e-222f4e14b687`., Parameters: []
`starts_with`: Checks if a string begins with a specified substring., Parameters: ['string: The string to search in.', 'search_string: The string to search for.']
`damerau_levenshtein`: The function calculates the minimum number of edit operations needed to transform one string into another, allowing insertions, deletions, substitutions, or transpositions of adjacent characters, with case-sensitive comparison., Parameters: ['s1: First string input to compare', 's2: Second string input to compare']
`cardinality`: The cardinality function specifies the number of rows an operator can return to its parent within a query plan., Parameters: ['name: The name of the operator', 'cardinality: Number of rows returned']
`which_secret`: Determines and returns the secret being used based on a file path and secret type., Parameters: ['path: File path to check secret', 'secret_type: Type of the secret service']
`corr`: The correlation coefficient is calculated between two sets of data to measure the strength and direction of a linear relationship between them., Parameters: ['y: First variable for correlation', 'x: Second variable for correlation']
`translate`: Converts characters in a string based on specified mappings from one set of characters to another., Parameters: ['source: Input string to be modified', 'from: Characters to be replaced', 'to: Replacement characters']
`array_unique`: This function counts the unique elements in a list., Parameters: ['list: The list to evaluate']
`json_keys`: Returns the keys of a JSON object as a list of strings. If a path is specified, it returns keys of the JSON object at that path., Parameters: ['json: JSON object to extract keys', 'path: path within the JSON object(optional)']
`list_has_any`: Returns true if any elements exist in both given lists., Parameters: ['list1: First list to compare elements', 'list2: Second list to compare elements']
`map_extract`: Returns a list with the value corresponding to a specified key from the map or an empty list if the key is not present., Parameters: ['map: Input map to search within.', 'key: Key to find in map.']
`try_strptime`: Converts a string into a timestamp using specified format strings, returning NULL on failure., Parameters: ['text: String to be converted', 'format: Format to parse the string']
`array_position`: Returns the index of an element in the list, or NULL if it is not found., Parameters: ['list: The list to search through', 'element: The element to find']
`str_split_regex`: Splits a string into parts based on a specified regular expression pattern., Parameters: ['string: Input string to split', 'regex: Regular expression for splitting']
`to_date`: Converts a string representation of a date into a date object., Parameters: ['date_text: String representation of date', 'format: Date format for parsing']
`strpos`: Returns the location of the first occurrence of a substring within a string, counting from 1. Returns 0 if no match is found., Parameters: ['string: The main string to search.', 'search_string: Substring to search for.']
`dbgen`: The table function generates TPC-H benchmark data according to a specified scale factor., Parameters: ['catalog: Target catalog for data generation(optional)', 'children: Number of partitions for data(optional)', 'overwrite: Unused parameter for overwrite(optional)', 'sf: Scale factor for data size', 'step: Define partition generation step(optional)', 'suffix: Append suffix to table names(optional)']
`string_split`: Splits a given string using a specified separator and returns an array of the resulting substrings., Parameters: ['string: The string to be split', 'separator: Separator to split the string']
`struct_insert`: The function adds new field(s) or value(s) to an existing STRUCT with the given argument values, using bound variable names as entry names., Parameters: ['struct: The initial struct object.', 'name := any, ...: Name-value pairs to add.']
`truncate`: Deletes all rows from a specified table without using a WHERE clause., Parameters: ['table_name: Name of the table.']
`list_sort`: Sorts the elements of a list based on specified ordering and null placement options., Parameters: ['list: The list to be sorted.', 'order: Sort order: ASC or DESC.(optional)', 'null_order: NULL placement: FIRST or LAST.(optional)']
`epoch_ns`: Returns the total number of nanoseconds since the epoch for a given timestamp., Parameters: ['timestamp: The input timestamp to convert']
`sqrt`: Computes the square root of a given numerical value., Parameters: ['x: A number to find the root']
`current_localtimestamp`: Returns a `TIMESTAMP` representing the current local date and time in the GMT timezone as determined by the current time zone settings., Parameters: []
`map_entries`: Returns a list of structs containing key-value pairs from the map., Parameters: ['map: Map containing key-value pairs']
`duckdb_extensions`: Provides metadata about installed and loaded DuckDB extensions, including their name, status, and location., Parameters: ['extension_name: Name of the extension(optional)', 'loaded: Extension is currently loaded(optional)', 'installed: Extension is currently installed(optional)', 'install_path: Path of extension binary(optional)', 'description: Description of the extension(optional)', 'aliases: Alternative names for extension(optional)']
`seq_scan`: Performs a sequential scan on a specified table, returning all the rows without using an index., Parameters: ['table_name: Name of the table to scan.', 'columns: Columns to select from table.(optional)']
`duckdb_dependencies`: Provides metadata on dependencies between objects in the DuckDB instance., Parameters: ['classid: Always zero for this function.(optional)', 'objid: Internal id of the object.', 'objsubid: Always zero for this function.(optional)', 'refclassid: Always zero for this function.(optional)', 'refobjid: Internal id of the dependency.', 'refobjsubid: Always zero for this function.(optional)', 'deptype: Type of dependency (n/a).']
`test_all_types`: Generates a table with columns for various data types, displaying their minimum, maximum, and null values for testing purposes., Parameters: []
`duckdb_memory`: Provides metadata about DuckDB's buffer manager, detailing memory and disk usage for various components., Parameters: []
`stddev_samp`: Calculates the sample standard deviation., Parameters: ['x: Input data column for function']
`ntile`: Divides a dataset into a specified number of equally-sized buckets, assigning each row a bucket number ranging from 1 to the number of buckets., Parameters: ['num_buckets: Number of partitions for data distribution']
`isodow`: Returns the ISO numeric day of the week, where Monday is represented as 1 and Sunday as 7., Parameters: ['date: Date to calculate ISO weekday']
`monthname`: Returns the English name of the month for a given date or timestamp., Parameters: ['date: The date or timestamp input.']
`array_to_json`: Converts a LIST into a JSON array., Parameters: ['list: A list to convert']
`to_base64`: Converts a BLOB (binary large object) to a base64 encoded string., Parameters: ['blob: Input binary large object']
`array_extract`: Extracts a single element from a list based on a specified 1-based index position., Parameters: ['list: The list to extract from', 'index: The 1-based position index']
`map_keys`: Returns a list of all keys in the specified map., Parameters: ['map: The input map to query']
`dayofmonth`: Extracts the day part from a given date, representing the day of the month as an integer., Parameters: ['date: Date value to extract from']
`like_escape`: Returns true if the specified string matches the provided like pattern using case-sensitive matching, where an escape character is used to treat wildcard characters as literals., Parameters: ['string: The input string to match', 'like_specifier: Pattern to match the string', 'escape_character: Character to escape wildcards(optional)']
`weekofyear`: Calculates the ISO Week number from a given date., Parameters: ['date: Input date to evaluate', 'timestamp: Input timestamp to evaluate(optional)']
`dayofyear`: The function retrieves the day of the year from a given date, starting from 1 for January 1st., Parameters: ['date: The date to evaluate.']
`base64`: Converts a blob to a base64 encoded string., Parameters: ['blob: The input binary data.']
`yearweek`: The function returns the ISO year and 2-digit week number combined as a BIGINT in the form YYYYWW., Parameters: ['date: The input date to compute']
`map_values`: Returns a list of all values in a map., Parameters: ['map: The map input parameter']
`rtrim`: Removes specified characters from the right side of a string., Parameters: ['string: The string to trim', 'characters: Characters to remove from end(optional)']
`ltrim`: This function removes specified characters (or spaces by default) from the left side of a string., Parameters: ['string: Input string to process', 'characters: Characters to trim from left(optional)']
`to_days`: Constructs a day interval from an integer representing the number of days., Parameters: ['integer: Number of days as input']
`array_concat`: Concatenates two lists into a single list without modifying the original lists., Parameters: ['list1: The first list to concatenate', 'list2: The second list to concatenate']
`right`: Extracts a specified number of characters from the end of a string., Parameters: ['string: The input string', 'count: Number of characters to extract']
`to_minutes`: Constructs an interval representing a specified number of minutes., Parameters: ['integer: Number of minutes to construct']
`tpch_queries`: The table function returns all 22 predefined TPC-H queries with their respective identifiers., Parameters: []
`json_execute_serialized_sql`: Executes JSON serialized SQL statements and returns the resulting rows. Only one statement is executed at a time., Parameters: ['serialized_sql: JSON serialized SQL statement.']
`dsdgen`: Generates TPC-DS benchmark data based on specified scale factor., Parameters: ['sf: Set scale factor for data', 'overwrite: Overwrite existing data when true(optional)', 'suffix: Set file suffix for output(optional)']
`parquet_kv_metadata`: Returns custom key-value metadata defined in a Parquet file., Parameters: ['file_name: Path to the Parquet file', 'key: Metadata keys in BLOB format(optional)', 'value: Metadata values in BLOB format(optional)']
`pragma_version`: Retrieves the current version of DuckDB., Parameters: []
`listagg`: Concatenates string values from a specified column into a single string with a specified separator, ordered based on optional criteria., Parameters: ['arg: Column to concatenate values from', 'sep: Separator string for concatenation(optional)', 'ORDER BY: Optional ordering criteria for aggregation(optional)']
`decade`: Calculates the decade from a given date using the formula (year / 10)., Parameters: ['date: The input date value']
`list_pack`: Creates a `LIST` containing the provided argument values., Parameters: ['any: Values to include in list']
`hex`: Converts a blob to a VARCHAR using hexadecimal encoding., Parameters: ['blob: Blob to be converted to hex']
`list_slice`: Extracts a sublist from a list based on specified begin, end, and optional step indices, supporting negative values., Parameters: ['list: The list to be sliced', 'begin: Index to start slice from', 'end: Index to end slice at', 'step: Step size between elements(optional)']
`greatest_common_divisor`: Computes the greatest common divisor of two numbers., Parameters: ['x: First integer for GCD calculation', 'y: Second integer for GCD calculation']
`array_aggr`: Executes an aggregate function on the elements of a list., Parameters: ['list: The list of elements.', 'name: Aggregate function to apply.', 'additional_args: Additional arguments for function.(optional)']
`array_reduce`: Reduces elements of a list to a single value using a lambda function starting from the first element., Parameters: ['list: List to be reduced', 'lambda: Function applied to elements']
`regexp_escape`: Escapes special characters in a string to make it suitable for use in a regular expression, similar to Python's `re.escape`., Parameters: ['string: The input string to escape.']
`constant_or_null`: Returns `NULL` if the second argument is `NULL`, otherwise it returns the first argument., Parameters: ['arg1: The value to return.', 'arg2: Conditional check for NULL.']
`json_deserialize_sql`: Deserializes JSON serialized SQL statements back into SQL strings., Parameters: ['json: The JSON object to deserialize']
`datesub`: Calculates the number of complete partitions (e.g., months) between two dates or timestamps based on the specified part., Parameters: ['part: Date part to evaluate', 'startdate: Start date or timestamp', 'enddate: End date or timestamp']
`json_transform_strict`: Transforms JSON according to a specified structure, ensuring type casting is strictly followed and throwing an error on failure., Parameters: ['json: The JSON object to transform.', 'structure: Structure for transformation casting.']
`array_indexof`: Returns the index of the specified element in the list and returns NULL if not found., Parameters: ['list: List to search within', 'element: Element to find in list']
`millisecond`: Extracts the sub-minute millisecond component from a timestamp., Parameters: ['timestamp: Timestamp to extract from.']
`union_tag`: Retrieve the currently selected tag of a union as an Enum value., Parameters: ['union: The union to inspect']
`json_array_length`: Returns the number of elements in a JSON array, or 0 if it's not a JSON array. If a path is specified, it returns the number of elements at that path., Parameters: ['json: The JSON array to evaluate.', 'path: Path in JSON to evaluate.(optional)']
`array_reverse_sort`: Sorts a list in reverse order., Parameters: ['list: The list to sort', 'null_order: Order for null values(optional)']
`list_filter`: The function constructs a list from elements of the input list for which a given lambda function returns true., Parameters: ['list: The input list to filter', 'lambda: Function to test elements']
`rpad`: Pads a string with a specified character from the right until it reaches a certain length., Parameters: ['string: The input string to pad', 'count: Target length of padded string', 'character: Character to use for padding']
`transaction_timestamp`: Returns the current date and time at the start of the current transaction., Parameters: []
`enum_last`: Returns the last value of the input enum type., Parameters: ['enum: The enum type to examine']
`array_dot_product`: Alias for computing the inner product of two arrays., Parameters: ['array1: First array for calculation', 'array2: Second array for calculation']
`list_element`: The function extracts the nth (1-based) value from a list., Parameters: ['list: The list to be indexed', 'index: Position to extract element from']
`isfinite`: Checks if a floating point value is finite, returning true for finite numbers and false for infinity or NaN values., Parameters: ['x: The value to be checked.']
`to_milliseconds`: Constructs an interval of milliseconds based on the provided integer value., Parameters: ['integer: Number of milliseconds interval']
`regexp_full_match`: The function checks if the entire string matches the given regular expression and returns `true` if it does., Parameters: ['string: The input string to match', 'regex: The regular expression pattern', 'options: Options for regex; controls behavior(optional)']
`map_contains`: Determines if a map has a specified key., Parameters: ['map: The map to check.', 'key: The key to search.']
`to_centuries`: Constructs an interval representing a duration of centuries based on the integer input., Parameters: ['integer: Number of centuries to construct']
`epoch_us`: Converts a timestamp to the total number of microseconds since the epoch., Parameters: ['timestamp: Timestamp to convert to microseconds', 'time zone (for Timestamptz): Time zone for this timestamp(optional)']
`to_years`: Constructs an interval of years from an integer value., Parameters: ['integer: Number of years to construct']
`array_inner_product`: Computes the inner product between two non-null arrays of the same size., Parameters: ['array1: First array for computation', 'array2: Second array for computation']
`currval`: Returns the current value of a specified sequence after it has been incremented at least once via the `nextval` function., Parameters: ['sequence_name: Name of the sequence.']
`list_extract`: Extracts the nth element from a list, indexing from 1., Parameters: ['list: The list to extract from', 'index: The element position to retrieve']
`enum_range_boundary`: Returns an array representing the range between two enum values, allowing nulls to extend the range to the enum's boundaries., Parameters: ['enum1: Start value of the range.(optional)', 'enum2: End value of the range.(optional)']
`signbit`: Determines if the sign bit of a number is set, indicating a negative value., Parameters: ['x: Value to check sign bit']
`array_cross_product`: Computes the cross product of two non-NULL arrays, each containing exactly three elements., Parameters: ['array1: First array, non-NULL, three elements', 'array2: Second array, non-NULL, three elements']
`bitstring`: The function creates a zero-padded bitstring of a specified length based on the input bitstring., Parameters: ['bitstring: Input bitstring to be padded', 'length: Desired length of bitstring']
`length_grapheme`: Calculates the number of grapheme clusters in a given string, which may differ from the number of characters if the string includes combined emojis or accented characters., Parameters: ['string: Input string for processing']
`apply`: Applies a given lambda function to each element of a list, resulting in a transformed list., Parameters: ['list: A list of elements to transform.', 'lambda: The transformation function.', 'index: Optional parameter for index.(optional)']
`sign`: Computes the sign of a number, returning -1 for negative numbers, 0 for zero, and 1 for positive numbers., Parameters: ['value: Value to find sign of']
`array_aggregate`: Executes an aggregate function on list elements., Parameters: ['list: List of elements to aggregate.', 'name: Name of the aggregate function.']
`md5_number`: Computes the MD5 hash of a string, returning it as a HUGEINT., Parameters: ['string: Input string for hashing']
`error`: Sets an error message for a scalar function during its execution, indicating a failure in processing due to some condition., Parameters: ['info: Information about the function.', 'error: Error message to set.']
`parse_filename`: This function returns the last component of a file path, similar to `os.path.basename` in Python. It can optionally remove the file extension from the component name., Parameters: ['path: The file path to parse.', 'trim_extension: Remove file extension if true.(optional)', 'separator: Type of path separator used.(optional)']
`json_extract_path_text`: Extracts a VARCHAR string from a JSON object at a specified path., Parameters: ['json: The JSON object to query.', 'path: The path in the JSON.']
`nanosecond`: The function converts a timestamp to nanoseconds since the epoch., Parameters: ['timestamp: The input timestamp to convert']
`ucase`: Converts a given string to upper case., Parameters: ['string: The string to convert.']
`isoyear`: Extracts the ISO year number from a date, where the ISO year starts on the Monday of the week containing January 4th., Parameters: ['date: Date to extract ISO year']
`array_grade_up`: Returns the indexes corresponding to the positions in the original list, similar to sorting but for indices., Parameters: ['list: Input list to process']
`parse_dirname`: Extracts and returns the top-level directory name from a given path string, based on the specified path separator type., Parameters: ['path: The path input as string', 'separator: Separator type for the path(optional)']
`enum_first`: Returns the first value of the input enum type., Parameters: ['enum: An enumerated data type.']
`to_decades`: Constructs a decade interval from an integer value representing decades., Parameters: ['integer: Number of decades to construct']
`json_value`: Extracts a JSON scalar value from the specified path in the JSON object, returning NULL if the target is not a scalar., Parameters: ['json: The JSON object to query', 'path: The path to extract value']
`weekday`: Returns a numeric representation of the weekday, where Sunday is 0 and Saturday is 6., Parameters: ['date: The date to evaluate.']
`list_cosine_similarity`: Computes cosine similarity between two lists., Parameters: ['list1: First input list of numbers', 'list2: Second input list of numbers']
`array_apply`: Applies a lambda expression to each element in a list, returning a new list with the transformed elements., Parameters: ['list: The input list to process', 'lambda: Function applied to elements']
`jaccard`: Calculates the Jaccard similarity between two strings, considering characters of different cases as different and returning a similarity score between 0 and 1., Parameters: ['s1: The first input string', 's2: The second input string']
`gcd`: Calculates the largest integer that divides two numbers without leaving a remainder., Parameters: ['x: First number for calculation', 'y: Second number for calculation']
`millennium`: Extracts the millennium part from a date., Parameters: ['date: The date to evaluate']
`json_serialize_sql`: Converts SQL SELECT statements into a JSON format, handling multiple statements and formatting options., Parameters: ['statements: Semicolon-separated SQL SELECT statements.', 'skip_empty: Skip fields that are empty.(optional)', 'skip_null: Skip fields that are null.(optional)', 'format: Format output for readability.(optional)']
`grade_up`: The function returns the positions of elements in an ascending order from the original list, representing their index in the sorted sequence., Parameters: ['list: The input list for sorting']
`cot`: Computes the cotangent of a given number `x`., Parameters: ['x: The input angle in radians']
`array_sort`: Sorts the elements of a given list in ascending order by default, allowing optional configurations for sort order and NULL handling., Parameters: ['list: Elements to be sorted', "sort_order: Order: 'ASC' or 'DESC'(optional)", "nulls_order: 'NULLS FIRST' or 'LAST'(optional)"]
`parse_path`: Returns a list of the components (directories and filename) in a path., Parameters: ['path: The file path to parse', 'separator: Separator for parsing path(optional)']
`suffix`: Appends the specified suffix to the names of the TPC-H tables generated by the data generator function `dbgen`., Parameters: ['catalog: Target catalog for dbgen.(optional)', 'children: Number of partitions for generation.(optional)', 'overwrite: Not used currently.(optional)', 'sf: Scale factor for data generation.', 'step: Defines partition to generate.(optional)', 'suffix: Append suffix to table names.(optional)']
`array_has`: Checks if a list contains a specific element, returning true if the element exists., Parameters: ['list: The list to search in.', 'element: The element to search for.']
`array_cosine_distance`: Computes the cosine distance between two arrays of the same size, where the elements cannot be NULL., Parameters: ['array1: First input array', 'array2: Second input array']
`timezone_hour`: Extracts the hour portion of the time zone offset from a given temporal value., Parameters: ['value: Temporal input value to process']
`not_like_escape`: The function checks if a string doesn't match a given pattern using case-insensitive matching, with an escape character to treat wildcards as regular characters., Parameters: ['string: Input text to be checked.', 'like_specifier: Pattern to be matched against.', 'escape_character: Char used to escape wildcards.']
`make_time`: The function creates a time using specified hour, minute, and second values., Parameters: ['hour: Hour of the time', 'minute: Minute of the time', 'second: Second and fraction of time']
`degrees`: Converts a given angle in radians to its equivalent in degrees., Parameters: ['x: Angle in radians to convert.']
`array_value`: Creates an ARRAY containing the given argument values., Parameters: ['expr: Values for the ARRAY.']
`atan2`: Computes the arctangent based on the coordinates (y, x) and returns the angle in radians., Parameters: ['y: Numerator for the arctangent', 'x: Denominator for the arctangent']
`parse_dirpath`: The function returns the head of a file path, which is the pathname until the last slash, similar to Python's os.path.dirname function., Parameters: ['path: The path to process.', 'separator: Separators for path components.(optional)']
`from_json_strict`: Transforms a JSON string into a specified nested structure and throws an error if type casting fails., Parameters: ['json: The JSON to transform', 'structure: Specifies the desired structure']
`bit_count`: Returns the number of bits that are set in the given input., Parameters: ['bitstring: The bitstring to evaluate.', 'x: The integer to evaluate.', 'x: The integer to evaluate.']
`ilike_escape`: Performs case-insensitive pattern matching on a string, allowing search for wildcard characters using a defined escape character., Parameters: ['string: The input string to match', 'like_specifier: The pattern to match', 'escape_character: Character for escaping wildcards']
`vector_type`: Generates a table with columns containing values for specified types and an optional argument to affect vector representation., Parameters: ['col1, ..., coln: Types of the columns', "all_flat: Affects vector's internal representation(optional)"]
`format_bytes`: Converts bytes into a human-readable format using binary units such as KiB, MiB, GiB, etc., Parameters: ['bytes: Number of bytes to convert']
`timezone_minute`: Extracts the minute portion of the time zone offset from a date or timestamp., Parameters: ['date: Date or timestamp value input', 'timestamp: Date or timestamp value input(optional)']
`prefix`: The function finds secrets with a specified prefix and returns their matching ones based on the longest prefix rule., Parameters: ['path: File path to match secret', 'type: Service secret type (e.g., S3)']
`list_cosine_distance`: Computes the cosine distance between two equal-length lists, equivalent to `1.0 - list_cosine_similarity`., Parameters: ['list1: First input list of numbers', 'list2: Second input list of numbers']
`to_millennia`: Constructs an interval representing the specified number of millennia., Parameters: ['integer: Number of millennia to construct']
`bin`: Converts an integer into its binary representation as a string., Parameters: ['value: The integer to be converted']
`list_grade_up`: Returns the indexes in sorted order based on the input list values, instead of the values themselves., Parameters: ['list: List to be sorted']
`microsecond`: The microsecond function extracts the sub-minute microsecond portion from a temporal type, such as a timestamp, returning the number of microseconds past the second., Parameters: ['date: The temporal input value.']
`list_negative_inner_product`: Computes the negative dot product of two same-sized lists of numbers, equivalent to `- list_dot_product`., Parameters: ['list1: First list of numbers', 'list2: Second list of numbers']
`century`: The century function extracts the century information from a given date., Parameters: ['date_or_timestamp: Temporal value to extract century']
`get_current_time`: This function returns the current time at the start of the current transaction., Parameters: []
`jaro_winkler_similarity`: Measures the similarity between two strings using the Jaro-Winkler method, returning a similarity score between 0 and 1, with characters of different cases treated as different., Parameters: ['s1: First string for comparison', 's2: Second string for comparison']
`list_has_all`: Checks if all elements in a sublist exist in a given list., Parameters: ['list: The list to search within', 'sub-list: The list to check for']
`asin`: Computes the arcsine of a number., Parameters: ['x: The input value.']
`json_exists`: Returns `true` if a specified path exists in a given JSON object, otherwise returns `false`., Parameters: ['json: JSON object to search', 'path: Path to check within JSON']
`from_base64`: Converts a base64 encoded string to its original character string representation., Parameters: ['string: base64 encoded input string']
`string_split_regex`: Splits a string into an array based on a regular expression delimiter., Parameters: ['string: Input string to be split.', 'regex: Delimiter expression for splitting.']
`multiply`: Performs multiplication on two numeric inputs, returning the product., Parameters: ['x: First input to multiply', 'y: Second input to multiply']
`list_transform`: Transforms each element of a list using a specified lambda function and returns the resulting list., Parameters: ['list: The input list of elements', 'lambda: Function applied to elements']
`list_resize`: Resizes a list to a specified number of elements, initializing new ones with a given value or NULL., Parameters: ['list: The list to resize.', 'size: Number of elements to resize to.', 'value: Value for new elements.(optional)']
`pow`: Computes one number raised to the power of another., Parameters: ['x: Base number to be raised', 'y: Exponent to apply to base']
`gamma`: Interpolates factorial of input minus one, allowing fractional inputs., Parameters: ['x: Input value for computation']
`to_hours`: Constructs an hour interval based on an integer input., Parameters: ['integer: Number of hours to construct']
`divide`: Performs integer division of two numbers., Parameters: ['x: dividend for the division', 'y: divisor for the division']
`array_resize`: Resizes a list to a specified size, filling added slots with a given value or NULL by default., Parameters: ['list: The list to resize.', 'size: Desired size of the list.', 'value: Fill value for added slots.(optional)']
`array_cat`: Concatenates two lists into one., Parameters: ['list1: First list to concatenate', 'list2: Second list to concatenate']
`list_indexof`: Returns the index of an element within a list or NULL if not found., Parameters: ['list: The list to search in', 'element: The element to find']
`combine`: This function is used to combine intermediate state from multiple groups in a batch, forming a result for a scalar aggregation function., Parameters: ['duckdb_aggregate_function: Represents an aggregate function object.', 'state: Current state being processed.', 'state_pointers: Array of state pointers.', 'count: Number of state pointers.']
`not_ilike_escape`: Determines if a string does not match a specified pattern using case-sensitive matching, allowing an escape character to define wildcards., Parameters: ['string: The source string to check.', 'like_specifier: The pattern for matching.', 'escape_character: Character to escape wildcards.(optional)']
`current_schemas`: Returns a list of schemas, optionally including implicit schemas when true is passed as a parameter., Parameters: ['include_implicit: Include implicit schemas when true']
`list_distance`: Calculates the Euclidean distance between two lists of coordinates with equal length., Parameters: ['list1: First list of coordinates.', 'list2: Second list of coordinates.']
`list_apply`: Returns a list from applying a lambda to each list element., Parameters: ['list: The input list to transform', 'lambda: Function to apply to elements']
`list_inner_product`: Computes the dot product of two same-sized lists of numbers., Parameters: ['list1: First list of numbers', 'list2: Second list of numbers']
`atan`: Computes the arctangent of a given numeric input., Parameters: ['x: Value for arctangent computation']
`array_negative_inner_product`: Computes the negative inner product of two arrays of the same size and containing non-NULL elements., Parameters: ['array1: First input array of numbers.', 'array2: Second input array of numbers.']
`mod`: Performs a modulo operation to return the remainder of one numeric expression divided by another., Parameters: ['dividend: The number being divided.', 'divisor: The number to divide by.']
`list_position`: Returns the index of an element in a list or NULL if the element is not found., Parameters: ['list: The list to search in', 'element: Element to find index of']
`array_has_all`: Checks if all elements of a sublist are present in a main list., Parameters: ['list: The main list to check', 'sub-list: The sublist elements checked']
`list_zip`: Combines multiple lists into a single list of structs, matching elements by position, with optional truncation., Parameters: ['list_1: First list to zip', 'list_2: Second list to zip', '...: Additional lists to zip(optional)', 'truncate: Truncate to smallest list length(optional)']
`list_has`: Returns true if the list contains the specified element., Parameters: ['list: The list to search in', 'element: An element to find']
`ord`: It returns the ASCII value of the leftmost character of a string., Parameters: ['string_expression: The string to evaluate']
`to_microseconds`: Constructs an interval representing a specified number of microseconds., Parameters: ['integer: Number of microseconds to convert']
`mismatches`: Calculates the number of positions with different characters between two strings of equal length., Parameters: ['s1: First input string to compare.', 's2: Second input string to compare.']
`make_timestamp`: The function constructs a timestamp from individual parts, including year, month, day, hour, minute, and second., Parameters: ['year: Year component', 'month: Month component', 'day: Day component', 'hour: Hour component', 'minute: Minute component', 'second: Second component']
`ascii`: Returns the Unicode code point of the first character of a given string., Parameters: ['string: Input string for conversion.']
`log10`: Computes the base-10 logarithm of a number., Parameters: ['x: Number to compute log base 10']
`json_contains`: Returns true if a specified JSON value or structure is contained within another JSON object or array., Parameters: ['json_haystack: The JSON object or array', 'json_needle: The value to find']
`list_select`: Returns a list using specified indices., Parameters: ['value_list: The list of values.', 'index_list: Indices of selected elements.']
`enum_code`: Returns the numeric value associated with a specific ENUM value, providing its backing integer representation., Parameters: ['enum_value: The ENUM value to process']
`ln`: Computes the natural logarithm of a given number., Parameters: ['x: Number to compute the logarithm']
`printf`: The function formats a string using the printf syntax., Parameters: ['format: String format specifying placeholders.', 'parameters: Values to replace format specifiers.(optional)']
`octet_length`: Calculates the number of bytes in the binary representation., Parameters: ['blob: A binary large object']
`json_quote`: Creates a JSON representation from any type of value, interpreting LISTs as JSON arrays and STRUCTs or MAPs as JSON objects., Parameters: ['any: Value to convert to JSON']
`isnan`: Checks if the floating-point value is not a number and returns true if so, false otherwise., Parameters: ['x: Value to check if NaN']
`editdist3`: Calculates the minimum number of single-character edits (insertions, deletions, or substitutions) needed to change one string into another. It's case-sensitive and treats characters of different cases as distinct., Parameters: ['s1: The first input string', 's2: The second input string']
`set_bit`: Sets a specific bit at a given index in a bitstring to a new value, returning a new bitstring., Parameters: ['bitstring: The input bitstring value.', 'index: Position to set the bit.', 'new_value: New bit value to set.']
`to_weeks`: Constructs a week interval based on the given number of weeks., Parameters: ['integer: Number of weeks to convert']
`array_select`: Returns a list based on elements selected by indices from the index list., Parameters: ['value_list: The list of values.', 'index_list: List of indices to select.']
`lcase`: Converts a string to lower case., Parameters: ['string: The string to convert.']
`cbrt`: Calculates the cube root of a given number., Parameters: ['x: The number to cube root']
`element_at`: The function retrieves the value for a given key from a map, returning a list with the value or an empty list if the key is absent., Parameters: ['map: The map from which to retrieve', 'key: Key to retrieve value for']
`list_reduce`: Reduces elements of a list into a single value using a lambda function applied sequentially from the first element., Parameters: ['list: Input list of elements', 'lambda: Function applied to elements']
`json_array`: Creates a JSON array from one or more values., Parameters: ['value1: First value for JSON array', 'value2: Additional values for JSON array(optional)', '...: Additional values for JSON array(optional)']
`isinf`: This function checks if a floating point number is infinite and returns true or false accordingly., Parameters: ['x: Value to check for infinity']
`factorial`: Computes the product of an integer and all positive integers below it., Parameters: ['x: The integer to compute factorial']
`make_date`: Constructs a date from the specified year, month, and day components., Parameters: ['year: The value of the year.', 'month: The value of the month.', 'day: The value of the day.']
`log2`: Computes the logarithm of a number to base 2., Parameters: ['x: Number to compute logarithm.']
`ceiling`: Rounds a given number up to the nearest integer., Parameters: ['x: The input number to round']
`setseed`: Sets the seed for the random function., Parameters: ['x: Seed value for randomness']
`bit_position`: Returns the first starting index of a given substring within a bitstring, indexed from 1, or zero if the substring isn't present., Parameters: ['substring: Substring to search for', 'bitstring: Bitstring to be searched']
`even`: Rounds a numeric value to the nearest even integer by rounding away from zero., Parameters: ['x: The numeric value to round']
`least_common_multiple`: Computes the least common multiple of two numbers., Parameters: ['x: First number for LCM computation', 'y: Second number for LCM computation']
`stats`: This function provides statistics about a given expression, including minimum and maximum values, and null presence., Parameters: ['expression: The expression to evaluate']
`icu_sort_key`: Generates a surrogate key for sorting characters according to locale., Parameters: ['string: Characters to sort by locale', 'collator: Locale specifier for sorting(optional)']
`array_distance`: Computes the Euclidean distance between two arrays of equal size, which cannot contain NULL values., Parameters: ['array1: First array of floats', 'array2: Second array of floats']
`hamming`: Calculates the number of differing positions between two equally long strings, considering case sensitivity., Parameters: ['s1: First string to compare', 's2: Second string to compare']
`second`: Extracts the seconds part from a timestamp or an interval., Parameters: ['input: The timestamp or interval value']
`to_months`: Constructs a month interval from an integer value., Parameters: ['integer: Number of months to construct']
`left_grapheme`: This function extracts a specified number of grapheme clusters from the beginning of a string., Parameters: ['string: Input string to extract from', 'count: Number of graphemes to extract']
`substring_grapheme`: Extracts a substring composed of a specified number of grapheme clusters starting from a given position., Parameters: ['string: The input string to operate on.', 'start: Starting position of extraction.', 'length: Number of grapheme clusters to extract.']
`jaro_similarity`: Calculates the Jaro similarity between two strings, returning a value between 0 and 1 that indicates how similar the strings are. Characters of different cases are considered different., Parameters: ['s1: First input string', 's2: Second input string']
`json_type`: Returns the type of a JSON element or a specified path within a JSON object., Parameters: ['json: The JSON data input', 'path: Path within the JSON(optional)']
`json_valid`: Checks if the input is valid JSON, returning `true` if it is valid and `false` otherwise., Parameters: ['json: The string to validate as JSON.']
`lgamma`: Computes the logarithm of the Gamma function, which is useful for situations where you need to handle large scale factorials and avoid overflow issues by using their logarithm instead., Parameters: ['x: Input number for computation']
`array_where`: Applies a Boolean mask to a list, returning only the elements that correspond to true values in the mask., Parameters: ['value_list: The list to be filtered.', 'mask_list: The Boolean mask list.']
`list_reverse_sort`: Sorts the elements of a list in reverse order., Parameters: ['list: The list to be sorted', 'null_order: Order for NULL values(optional)']
`unicode`: Returns the Unicode code of the first character of a given string, or -1 if the string is empty. Returns NULL if the input is NULL., Parameters: ['string: Input string to analyze']
`get_bit`: Extracts the nth bit from a bitstring, with the first (leftmost) bit indexed at 0., Parameters: ['bitstring: The bitstring to examine.', 'index: Zero-based bit index.']
`right_grapheme`: Extracts the right-most specified number of grapheme clusters from a given string., Parameters: ['string: Input string to extract from', 'count: Number of graphemes to extract']
`lcm`: Computes the least common multiple of two numeric values., Parameters: ['x: First number for LCM computation', 'y: Second number for LCM computation']
`list_where`: Applies a boolean mask to a list to filter elements based on the mask's true values., Parameters: ['value_list: List to mask elements from', 'mask_list: Boolean mask for value_list']
`sha256`: Computes the SHA-256 hash of a given value and returns it as a VARCHAR., Parameters: ['value: Value to hash with SHA-256.']
`era`: The scalar function calculates the difference in years between two date or timestamp values, effectively returning the number of whole years between the given dates., Parameters: ['start_date: The start date/timestamp value', 'end_date: The end date/timestamp value']
`strlen`: The function returns the number of bytes in a given string., Parameters: ['string: The input string to measure']
`to_seconds`: Converts an integer into a second interval., Parameters: ['integer: Number of seconds to construct']
`array_zip`: Combines multiple lists into one, creating a list of structs based on elements from each input list. Missing values are replaced with NULL when lists have different lengths., Parameters: ['list1: First list to combine.', 'list2: Second list to combine.', '...: Additional lists to combine.(optional)', 'truncate: Indicates whether to truncate.(optional)']
`list_negative_dot_product`: Computes the negative dot product of two same-sized lists of numbers., Parameters: ['list1: First list of numbers', 'list2: Second list of numbers']
`tan`: Computes the tangent of a given angle., Parameters: ['x: Angle for tangent calculation']
`bit_length`: Calculates the total number of bits in a bitstring value., Parameters: ['bitstring: The input bitstring value.']
`list_cat`: Concatenates two lists into a single list., Parameters: ['list1: First list to concatenate', 'list2: Second list to concatenate']
`union_extract`: Extracts the value with the specified tag from a union; returns NULL if the tag is not currently selected., Parameters: ['union: The union object to extract from.', 'tag: The tag value to extract.']
`union_value`: Creates a "UNION" holding a single value, tagged by the parameter name., Parameters: ['tag: The name for the tagged value.', 'expr: The value to be tagged.']
`make_timestamptz`: Creates a TIMESTAMP WITH TIME ZONE based on specified date-time components and, optionally, a time zone., Parameters: ['year: Year component of date', 'month: Month component of date', 'day: Day component of date', 'hour: Hour component of time', 'minute: Minute component of time', 'second: Second component of time', 'timezone: Time zone of timestamp(optional)']
`nfc_normalize`: Converts a string into its Unicode Normalization Form C (NFC), which is useful for string comparisons and ordering when dealing with mixed normalization forms., Parameters: ['string: The string to normalize']
`txid_current`: Returns the current transaction's identifier, a BIGINT value, creating a new one if necessary., Parameters: []
`nextafter`: Returns the next floating point value after one number in the direction of another., Parameters: ['x: Starting floating point number.', 'y: Direction towards this number.']
`subtract`: Subtracts two values, resulting in their difference., Parameters: ['x: The first numerical operand', 'y: The second numerical operand']
`chr`: Converts an ASCII code value into its corresponding character., Parameters: ['x: ASCII code value to convert']
`array_negative_dot_product`: Computes the negative inner product of two arrays of the same size and whose elements cannot be NULL., Parameters: ['array1: First array for computation.', 'array2: Second array for computation.']
`list_dot_product`: Computes the dot product of two lists of numbers of the same size., Parameters: ['list1: First list of numbers.', 'list2: Second list of numbers.']
`current_localtime`: Returns the current local time in the time zone setting of the database., Parameters: []
`xor`: Performs a bitwise exclusive OR operation between two bitstring values., Parameters: ['x: First bitstring to be XORed', 'y: Second bitstring to be XORed']
`reduce`: The function applies a lambda expression to each element of a list to produce a single cumulative result., Parameters: ['list: The input list of elements', 'lambda: Function applied to elements']
`finalize`: Finalizes the execution of a prepared statement, ensuring that any allocated resources are released., Parameters: ['sql: SQL statement to finalize(optional)', 'params: Parameters for the SQL statement(optional)', 'callback: Function called upon completion(optional)']
`exp`: Computes the exponential of a given input number, which is denoted as 'e' raised to the power of the input number., Parameters: ['input_number: Number to calculate the exponential']
`read_ndjson_auto`: The function reads newline-delimited JSON (NDJSON) files and automatically infers JSON schema and types., Parameters: ['filename: File or list of files', 'auto_detect: Auto-detect key names and types(optional)', 'columns: Specifies key names and types(optional)', 'compression: File compression type detection(optional)', 'convert_strings_to_integers: Convert strings to numerical types(optional)', 'dateformat: Date parsing format specification(optional)', 'filename: Include extra filename column(optional)', 'format: Format of JSON to read(optional)', 'hive_partitioning: Interpret as Hive partitioned(optional)', 'ignore_errors: Ignore parse errors in files(optional)', 'maximum_depth: Max depth for schema detection(optional)', 'maximum_object_size: Max size of JSON object(optional)', 'records: Whether JSON contains records(optional)', 'sample_size: Sample objects for type detection(optional)', 'timestampformat: Parsing format for timestamps(optional)', 'union_by_name: Unify schemas of files(optional)']
`arrow_scan`: The "arrow_scan" table function allows DuckDB to query data directly from an Arrow dataset. Users can provide a connection to the database and the Arrow stream containing the data, and DuckDB will interface with the Arrow stream to perform SQL queries., Parameters: ['connection: The database connection to use', 'table_name: Name for the Arrow table', 'arrow: The Arrow stream with data']
`parquet_metadata`: Queries the metadata of a Parquet file, providing details about row groups, columns, and basic statistics., Parameters: ['file_name: Name of the Parquet file.', 'row_group_id: ID of each row group.', 'row_group_num_rows: Number of rows in group.', 'row_group_num_columns: Columns present in row group.', 'row_group_bytes: Size in bytes of group.', 'column_id: ID of each column.', 'file_offset: Offset position in file.', 'num_values: Number of values in column.', 'path_in_schema: Column path in schema.', 'type: Data type of column.', 'stats_min: Minimum value statistic.', 'stats_max: Maximum value statistic.', 'stats_null_count: Count of null values.', 'stats_distinct_count: Count of distinct values.', 'stats_min_value: Actual minimum value found.', 'stats_max_value: Actual maximum value found.', 'compression: Compression algorithm used.', 'encodings: Encodings applied to column.', 'index_page_offset: Offset to index page.', 'dictionary_page_offset: Offset to dictionary page.', 'data_page_offset: Offset to data page.', 'total_compressed_size: Size after compression.', 'total_uncompressed_size: Size before compression.', 'key_value_metadata: Custom key-value metadata pairs.']
`parquet_file_metadata`: Queries file-level metadata of Parquet files, including format version and encryption details., Parameters: ['file_name: Path to the Parquet file', 'created_by: Creator of the Parquet file(optional)', 'num_rows: Number of rows in file', 'num_row_groups: Number of row groups', 'format_version: Format version used', 'encryption_algorithm: Encryption algorithm used(optional)', 'footer_signing_key_metadata: Metadata of signing key(optional)', 'format_version: Format version of file(optional)']
`sniff_csv`: The function identifies CSV properties from a file, returning details such as delimiters, quoting rules, and column types., Parameters: ['filename: Path to the CSV file.', 'sample_size: Rows considered for detection.(optional)']
`duckdb_types`: The function provides metadata about data types available in a DuckDB instance, including type name, type size, and logical type information., Parameters: ['database_name: Database containing the type', 'database_oid: Internal ID of the database', 'schema_name: Schema containing the type', 'schema_oid: Internal ID of the schema', 'type_name: Name or alias of the type', 'type_oid: Internal ID of the type(optional)', 'type_size: Bytes required to represent', 'logical_type: Canonical name of the type', 'type_category: Category of the data type', 'internal: Whether type is built-in']
`index_scan`: Performs an index scan on a specified table and column, returning the row IDs that match the scan condition., Parameters: ['index_name: Name of the index to scan', 'scan_condition: Condition determining rows for scan(optional)']
`repeat_row`: Generates a table with multiple rows, each containing specified fields., Parameters: ['varargs: Fields for each table row', 'num_rows: Number of rows to generate']
`read_ndjson`: Reads newline-delimited JSON (NDJSON) directly, interpreting each line as a separate JSON object., Parameters: ['compression: The compression type for the file(optional)', 'filename: Include extra filename column(optional)', 'format: Specifies JSON read format(optional)', 'hive_partitioning: Interpret path as Hive partitioned(optional)', 'ignore_errors: Ignore parse errors if newline(optional)', 'maximum_sample_files: Maximum JSON files sampled(optional)', 'maximum_object_size: Maximum size of JSON object(optional)']
`checkpoint`: Synchronizes the write-ahead log (WAL) with the database file without interrupting transactions., Parameters: ['database: Name of the database to be checkpointed(optional)']
`duckdb_optimizers`: Returns metadata about DuckDB's optimization rules, which can be selectively disabled for debugging., Parameters: []
`duckdb_temporary_files`: This function provides metadata about the temporary files DuckDB has written to disk, including their path and size., Parameters: []
`force_checkpoint`: Synchronizes the write-ahead log (WAL) with the file of the specified database, interrupting transactions., Parameters: ['database: Target database for checkpoint(optional)']
`pg_timezone_names`: The table function retrieves a list of available time zones and their respective abbreviations and UTC offsets., Parameters: ['name: Time zone full name', 'abbrev: Time zone abbreviation(optional)', 'utc_offset: Time zone UTC offset value(optional)']
`duckdb_variables`: The table function provides metadata about the variables available in the DuckDB instance, including their name, value, and type., Parameters: []
`tpch_answers`: Produces expected results for TPC-H queries for specified scale factors., Parameters: []
`pragma_collations`: Returns a list of all available collation names including both built-in and ICU extension collations., Parameters: []
`test_vector_types`: Generates a table with columns containing values conforming to the types of the input arguments., Parameters: ['coln: Columns with type-conforming values.', 'all_flat: Affects internal vector representation.(optional)']
`read_blob`: Reads content from a specified source as a BLOB, supporting file names, lists, or glob patterns., Parameters: ['source: Specifies the data source.']
`pragma_platform`: Returns an identifier for the platform DuckDB was compiled for., Parameters: []
`icu_calendar_names`: Retrieves and lists available non-Gregorian calendars supported by the ICU extension., Parameters: []
`summary`: Computes aggregates over all columns of a table or query, including min, max, average, and more, and returns these along with column names and types., Parameters: ['table_name: Name of the table to summarize', 'query: SQL query to summarize']
`parquet_scan`: Reads one or more Parquet files as table-like structures, supporting various configurations for file reading and processing., Parameters: ['path_or_list_of_paths: Paths to Parquet file(s)', 'binary_as_string: Load binary columns as strings(optional)', 'encryption_config: Configuration for Parquet encryption(optional)', 'filename: Include filename column result(optional)', 'file_row_number: Include file row number column(optional)', 'hive_partitioning: Interpret as Hive partitioned path(optional)', 'union_by_name: Unify columns of multiple schemas(optional)', 'MD_RUN: Control remote/local query execution(optional)']
`count_star`: The aggregate function calculates the total number of non-NULL rows in a selected column or set.
Cool example: `SELECT count(*) FROM students;`, Parameters: ['expression: Column or expression to evaluate(optional)']
`approx_count_distinct`: Provides an approximate count of distinct elements using HyperLogLog., Parameters: ['x: Input to count distinct elements.', 'accurate_value_count: Accuracy level for the estimation.(optional)', 'debug: Debugging mode for output.(optional)']
`argmax`: Finds the row with the maximum value in a specified column and evaluates another column's expression at that row., Parameters: ['arg: Expression to evaluate at maximum', 'val: Column to find maximum value', 'n: Number of top rows to return(optional)']
`skewness`: Calculates the skewness, measuring asymmetry of a distribution., Parameters: ['x: Data values for skewness calculation']
`regr_sxy`: Calculates the sample covariance with Bessel's bias correction for pairs of non-null values., Parameters: ['y: dependent variable values', 'x: independent variable values']
`entropy`: Calculates the log-2 entropy of a given dataset, measuring information or uncertainty within the data., Parameters: ['x: Data for entropy calculation.']
`regr_syy`: Calculates the sample variance of the dependent variable, including Bessel's bias correction, for non-null pairs where x is the independent variable and y is the dependent variable., Parameters: ['y: Dependent variable data', 'x: Independent variable data']
`argmin`: The function finds the row with the minimum value of a specified column and returns the value of another specified column at that row., Parameters: ['arg: Value to be returned.', 'val: Value to be minimized.', 'n: Number of rows returned.(optional)']
`regr_count`: Returns the number of non-NULL pairs., Parameters: ['y: Dependent variable in pairs', 'x: Independent variable in pairs']
`arbitrary`: Returns the first value (null or non-null) from the input argument, useful when an arbitrary value from a group is needed without specifying an order., Parameters: ['arg: The column or expression to retrieve an arbitrary value from.']
`mean`: Calculates the average of all non-null values in a given column., Parameters: ['arg: Column or expression to average']
`approx_quantile`: This function provides an approximate quantile using the T-Digest algorithm., Parameters: ['x: Dataset column to analyze', 'pos: Quantile position from 0-1']
`kurtosis`: Calculates the excess kurtosis with bias correction according to the sample size, providing a measure of the tailedness of the distribution of data values., Parameters: ['x: Input numeric column or expression']
`quantile_cont`: Calculates the interpolated quantile for a given position within an array of values, resulting in a smooth estimate between elements., Parameters: ['x: Input data to aggregate', 'pos: Position(s) for quantile calculation']
`variance`: Calculates the variance of all non-null input values using Bessel's correction by default., Parameters: ['column: Column to calculate variance on']
`min_by`: Finds the row with the minimum value calculated from a specified expression and computes another expression from that row., Parameters: ['arg: Expression evaluated for each row.', 'val: Value used to order rows.', 'n: Number of top results.(optional)']
`bit_and`: Performs a bitwise AND on all bits in a given expression., Parameters: ['arg: Input expression for bitwise AND']
`var_pop`: Calculates the population variance of a set of values without bias correction., Parameters: ['x: Input values for variance calculation.']
`fsum`: Calculates the sum using a more accurate floating point summation (Kahan Sum) for increased precision in floating point arithmetic., Parameters: ['arg: Argument to be summed accurately']
`regr_r2`: Calculates the squared Pearson correlation coefficient between two variables in a linear regression, indicating the proportion of variance in the dependent variable that can be predicted from the independent variable., Parameters: ['y: Dependent variable in regression', 'x: Independent variable in regression']
`product`: Calculates the product of all non-null values in the specified column or expression., Parameters: ['expr: The values to multiply together.']
`mad`: Calculates the median absolute deviation of a dataset, with temporal types returning a positive `INTERVAL`., Parameters: ['x: Column containing the dataset', 'return_type: Expected return data type(optional)']
`bool_or`: Returns true if any input value is true, otherwise false., Parameters: ['arg: The input values to aggregate']
`regr_avgy`: Calculates the average of the dependent variable for non-NULL pairs, where x is the independent variable and y is the dependent variable., Parameters: ['y: Dependent variable in the function', 'x: Independent variable in the function']
`mode`: The mode function calculates the most frequently occurring value in a set of values., Parameters: ['value_column: Column containing values to analyze']
`reservoir_quantile`: Gives an approximate quantile using reservoir sampling., Parameters: ['x: Values to calculate quantile for.', 'quantile: Quantile position between 0-1.', 'sample_size: Number of samples for estimation.(optional)']
`sumkahan`: Calculates the sum of all non-null values in a column using a more accurate floating point summation to reduce numerical errors., Parameters: ['arg: Values to be summed']
`quantile`: Calculates the interpolated or discrete quantile of a set of values, determining the specific value or range at a given percentage position., Parameters: ['x: Values to aggregate for quantile', 'pos: Quantile position fraction (0-1)', 'method: Method of interpolation (for continous quantile)(optional)']
`bool_and`: Returns `true` if every input value is `true`, otherwise `false`., Parameters: ['arg: A column or expression']
`kurtosis_pop`: Calculates the excess kurtosis of a data set (Fisher’s definition) without bias correction., Parameters: ['x: The input data values']
`regr_sxx`: Calculates the sample variance, with Bessel's bias correction, of the independent variable for non-NULL pairs., Parameters: ['y: Dependent variable values.', 'x: Independent variable values.']
`bitstring_agg`: The function returns a bitstring with bits set for each distinct position defined in the input argument., Parameters: ['arg: List of values for processing', 'min: Minimum range for positions(optional)', 'max: Maximum range for positions(optional)']
`bit_xor`: Performs a bitwise XOR on all bits in a given expression., Parameters: ['arg: Expression of bits to XOR.']
`quantile_disc`: Calculates the discrete quantile of a sorted set of values by selecting the greatest indexed element corresponding to the given position within the set., Parameters: ['x: The value set to quantify', 'pos: The quantile position(s) to return']
`kahan_sum`: Calculates the sum using an accurate floating-point summation technique (Kahan Sum) to minimize errors., Parameters: ['arg: Values to be summed accurately.']
`favg`: Calculates the average using a more accurate floating point summation technique known as Kahan Sum., Parameters: ['arg: Input values for averaging']
`regr_avgx`: Computes the average of the independent variable for non-NULL data pairs., Parameters: ['y: Dependent variable in regression', 'x: Independent variable in regression']
`covar_pop`: Computes the population covariance without bias correction., Parameters: ['y: Dependent variable data', 'x: Independent variable data']
`sem`: Calculates the population standard error of the mean from input values, which measures how far the sample mean of the data is likely to be from the true mean of the population., Parameters: ['input_values: Values to calculate SEM from', 'weight_column: Optional weights for each value(optional)']
`covar_samp`: The sample covariance is calculated, which includes Bessel's bias correction., Parameters: ['y: Dependent variable column values', 'x: Independent variable column values']
`stddev_pop`: Calculates the population standard deviation of a given dataset, ignoring NULL values., Parameters: ['x: Column for standard deviation']
`var_samp`: Calculates the sample variance using Bessel's correction, which adjusts for bias by dividing by (n-1) instead of n., Parameters: ['x: Input values to calculate variance.', 'order_clause: Optional order by clause.(optional)']
`bit_or`: Performs a bitwise OR operation across all bits of the input values., Parameters: ['arg: Values to aggregate with OR.']
`arg_min`: Finds the row with the minimum value in a specified column and evaluates another expression for that row., Parameters: ['arg: Expression to evaluate at minimum', 'val: Column to find minimum value']
`rank_dense`: The rank of the current row is determined with gaps, aligning with the row number of its first peer., Parameters: []
`cume_dist`: Calculates the cumulative distribution of a row within its partition., Parameters: []
`nth_value`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Parameters: ['expr: Expression to evaluate at row', 'nth: Row position to evaluate at', 'ignore_nulls: Ignore nulls in evaluation(optional)']
DuckDB Statements:
`SELECT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM table_name;', 'SELECT col1 + col2 AS res, sqrt(col1) AS root FROM table_name;', 'SELECT DISTINCT city FROM addresses;', 'SELECT count(*) FROM addresses;', 'SELECT * EXCLUDE (city) FROM addresses;', 'SELECT * REPLACE (lower(city) AS city) FROM addresses;', "SELECT COLUMNS('number\\d+') FROM addresses;", 'SELECT min(COLUMNS(*)) FROM addresses;', 'SELECT "Some Column Name" FROM tbl;']
`WHERE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM table_name WHERE id = 3;', "SELECT * FROM table_name WHERE name ILIKE '%mark%';", 'SELECT * FROM table_name WHERE id = 3 OR id = 7;']
`ORDER BY`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM addresses ORDER BY city;', 'SELECT * FROM addresses ORDER BY city DESC NULLS LAST;', 'SELECT * FROM addresses ORDER BY city, zip;', 'SELECT * FROM addresses ORDER BY city COLLATE DE;', 'SELECT * FROM addresses ORDER BY ALL;', 'SELECT * FROM addresses ORDER BY ALL DESC;']
`GROUP BY`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, count(*) FROM addresses GROUP BY city;', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY city, street_name;', 'SELECT city, street_name FROM addresses GROUP BY ALL;', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY ALL;']
`WITH`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['WITH cte AS (SELECT 42 AS x) SELECT * FROM cte;', 'WITH cte1 AS (SELECT 42 AS i), cte2 AS (SELECT i * 100 AS x FROM cte1) SELECT * FROM cte2;', 'WITH t(x) AS (⟨complex_query⟩) SELECT * FROM t AS t1, t AS t2, t AS t3;', 'WITH t(x) AS MATERIALIZED (⟨complex_query⟩) SELECT * FROM t AS t1, t AS t2, t AS t3;', 'WITH RECURSIVE FibonacciNumbers (RecursionDepth, FibonacciNumber, NextNumber) AS (SELECT 0 AS RecursionDepth, 0 AS FibonacciNumber, 1 AS NextNumber UNION ALL SELECT fib.RecursionDepth + 1 AS RecursionDepth, fib.NextNumber AS FibonacciNumber, fib.FibonacciNumber + fib.NextNumber AS NextNumber FROM FibonacciNumbers fib WHERE fib.RecursionDepth + 1 < 10) SELECT fn.RecursionDepth AS FibonacciNumberIndex, fn.FibonacciNumber FROM FibonacciNumbers fn;']
`JOIN`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM table_name;', 'FROM table_name SELECT *;', 'FROM table_name;', 'SELECT tn.* FROM table_name tn;', 'SELECT * FROM schema_name.table_name;', 'SELECT t.i FROM range(100) AS t(i);', "SELECT * FROM 'test.csv';", 'SELECT * FROM (SELECT * FROM table_name);', 'SELECT t FROM t;', "SELECT t FROM (SELECT unnest(generate_series(41, 43)) AS x, 'hello' AS y) t;", 'SELECT * FROM table_name JOIN other_table ON table_name.key = other_table.key;', 'SELECT * FROM table_name TABLESAMPLE 10%;', 'SELECT * FROM table_name TABLESAMPLE 10 ROWS;', 'FROM range(100) AS t(i) SELECT sum(t.i) WHERE i % 2 = 0;', 'SELECT a.*, b.* FROM a CROSS JOIN b;', 'SELECT a.*, b.* FROM a, b;', 'SELECT n.*, r.* FROM l_nations n JOIN l_regions r ON (n_regionkey = r_regionkey);', 'SELECT * FROM city_airport NATURAL JOIN airport_names;', 'SELECT * FROM city_airport JOIN airport_names USING (iata);', 'SELECT * FROM city_airport SEMI JOIN airport_names USING (iata);', 'SELECT * FROM city_airport WHERE iata IN (SELECT iata FROM airport_names);', 'SELECT * FROM city_airport ANTI JOIN airport_names USING (iata);', 'SELECT * FROM city_airport WHERE iata NOT IN (SELECT iata FROM airport_names WHERE iata IS NOT NULL);', 'SELECT * FROM range(3) t(i), LATERAL (SELECT i + 1) t2(j);', 'SELECT * FROM generate_series(0, 1) t(i), LATERAL (SELECT i + 10 UNION ALL SELECT i + 100) t2(j);', 'SELECT * FROM trades t ASOF JOIN prices p ON t.symbol = p.symbol AND t.when >= p.when;', 'SELECT * FROM trades t ASOF LEFT JOIN prices p ON t.symbol = p.symbol AND t.when >= p.when;', 'SELECT * FROM trades t ASOF JOIN prices p USING (symbol, "when");', 'SELECT t.symbol, t.when AS trade_when, p.when AS price_when, price FROM trades t ASOF LEFT JOIN prices p USING (symbol, "when");', 'SELECT * FROM t AS t t1 JOIN t t2 USING(x);', 'FROM tbl SELECT i, s;', 'FROM tbl;']
`JOIN`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM table_name;', 'FROM table_name SELECT *;', 'FROM table_name;', 'SELECT tn.* FROM table_name tn;', 'SELECT * FROM schema_name.table_name;', 'SELECT t.i FROM range(100) AS t(i);', "SELECT * FROM 'test.csv';", 'SELECT * FROM (SELECT * FROM table_name);', 'SELECT t FROM t;', "SELECT t FROM (SELECT unnest(generate_series(41, 43)) AS x, 'hello' AS y) t;", 'SELECT * FROM table_name JOIN other_table ON table_name.key = other_table.key;', 'SELECT * FROM table_name TABLESAMPLE 10%;', 'SELECT * FROM table_name TABLESAMPLE 10 ROWS;', 'FROM range(100) AS t(i) SELECT sum(t.i) WHERE i % 2 = 0;', 'SELECT a.*, b.* FROM a CROSS JOIN b;', 'SELECT a.*, b.* FROM a, b;', 'SELECT n.*, r.* FROM l_nations n JOIN l_regions r ON (n_regionkey = r_regionkey);', 'SELECT * FROM city_airport NATURAL JOIN airport_names;', 'SELECT * FROM city_airport JOIN airport_names USING (iata);', 'SELECT * FROM city_airport SEMI JOIN airport_names USING (iata);', 'SELECT * FROM city_airport WHERE iata IN (SELECT iata FROM airport_names);', 'SELECT * FROM city_airport ANTI JOIN airport_names USING (iata);', 'SELECT * FROM city_airport WHERE iata NOT IN (SELECT iata FROM airport_names WHERE iata IS NOT NULL);', 'SELECT * FROM range(3) t(i), LATERAL (SELECT i + 1) t2(j);', 'SELECT * FROM generate_series(0, 1) t(i), LATERAL (SELECT i + 10 UNION ALL SELECT i + 100) t2(j);', 'SELECT * FROM trades t ASOF JOIN prices p ON t.symbol = p.symbol AND t.when >= p.when;', 'SELECT * FROM trades t ASOF LEFT JOIN prices p ON t.symbol = p.symbol AND t.when >= p.when;', 'SELECT * FROM trades t ASOF JOIN prices p USING (symbol, "when");', 'SELECT t.symbol, t.when AS trade_when, p.when AS price_when, price FROM trades t ASOF LEFT JOIN prices p USING (symbol, "when");', 'SELECT * FROM t AS t t1 JOIN t t2 USING(x);', 'FROM tbl SELECT i, s;', 'FROM tbl;']
`CASE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT i, CASE WHEN i > 2 THEN 1 ELSE 0 END AS test FROM integers;', 'SELECT i, CASE WHEN i = 1 THEN 10 WHEN i = 2 THEN 20 ELSE 0 END AS test FROM integers;', 'SELECT i, CASE WHEN i = 1 THEN 10 END AS test FROM integers;', 'SELECT i, CASE i WHEN 1 THEN 10 WHEN 2 THEN 20 WHEN 3 THEN 30 END AS test FROM integers;']
`USE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['USE memory;', 'USE duck.main;']
`CREATE TABLE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE TABLE t1 (i INTEGER, j INTEGER);', 'CREATE TABLE t1 (id INTEGER PRIMARY KEY, j VARCHAR);', 'CREATE TABLE t1 (id INTEGER, j VARCHAR, PRIMARY KEY (id, j));', 'CREATE TABLE t1 (\n i INTEGER NOT NULL,\n decimalnr DOUBLE CHECK (decimalnr < 10),\n date DATE UNIQUE,\n time TIMESTAMP\n);', 'CREATE TABLE t1 AS SELECT 42 AS i, 84 AS j;', "CREATE TEMP TABLE t1 AS SELECT * FROM read_csv('path/file.csv');", 'CREATE OR REPLACE TABLE t1 (i INTEGER, j INTEGER);', 'CREATE TABLE IF NOT EXISTS t1 (i INTEGER, j INTEGER);', 'CREATE TABLE nums AS SELECT i FROM range(0, 3) t(i);', 'CREATE TABLE t1 (id INTEGER PRIMARY KEY, percentage INTEGER CHECK (0 <= percentage AND percentage <= 100));', 'CREATE TABLE t1 (id INTEGER PRIMARY KEY, j VARCHAR);\nCREATE TABLE t2 (\n id INTEGER PRIMARY KEY,\n t1_id INTEGER,\n FOREIGN KEY (t1_id) REFERENCES t1 (id)\n);', 'CREATE TABLE t1 (x FLOAT, two_x AS (2 * x));']
`UPDATE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['UPDATE tbl SET i = 0 WHERE i IS NULL;', 'UPDATE tbl SET i = 1, j = 2;', 'UPDATE original SET value = new.value FROM new WHERE original.key = new.key;', 'UPDATE original SET value = (SELECT new.value FROM new WHERE original.key = new.key);', "UPDATE original AS true_original SET value = (SELECT new.value || ' a change!' AS value FROM original AS new WHERE true_original.key = new.key);", "UPDATE city SET revenue = revenue + 100 FROM country WHERE city.country_code = country.code AND country.name = 'France';"]
`DROP`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['DROP TABLE tbl;', 'DROP VIEW IF EXISTS v1;', 'DROP FUNCTION fn;', 'DROP INDEX idx;', 'DROP SCHEMA sch;', 'DROP SEQUENCE seq;', 'DROP MACRO mcr;', 'DROP MACRO TABLE mt;', 'DROP TYPE typ;', 'DROP SCHEMA myschema CASCADE;']
`ALTER TABLE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['ALTER TABLE integers ADD COLUMN k INTEGER;', 'ALTER TABLE integers ADD COLUMN l INTEGER DEFAULT 10;', 'ALTER TABLE integers DROP k;', 'ALTER TABLE integers ALTER i TYPE VARCHAR;', "ALTER TABLE integers ALTER i SET DATA TYPE VARCHAR USING concat(i, '_', j);", 'ALTER TABLE integers ALTER COLUMN i SET DEFAULT 10;', 'ALTER TABLE integers ALTER COLUMN i DROP DEFAULT;', 'ALTER TABLE t ALTER COLUMN x SET NOT NULL;', 'ALTER TABLE t ALTER COLUMN x DROP NOT NULL;', 'ALTER TABLE integers RENAME TO integers_old;', 'ALTER TABLE integers RENAME i TO j;']
`FILTER`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT count(*) FILTER (i <= 5) AS lte_five FROM generate_series(1, 10) tbl(i);', 'SELECT sum(i) FILTER (i <= 5) AS lte_five_sum FROM generate_series(1, 10) tbl(i);', 'SELECT count(i) FILTER (year = 2022) AS "2022" FROM stacked_data;', 'SELECT first(i) FILTER (year = 2022) AS "2022" FROM stacked_data;']
`HAVING`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, count(*) FROM addresses GROUP BY city HAVING count(*) >= 50;', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY city, street_name HAVING avg(income) > 2 * median(income);']
`DESCRIBE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['DESCRIBE tbl;', 'DESCRIBE SELECT * FROM tbl;']
`INSERT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['INSERT INTO tbl VALUES (1), (2), (3);', 'INSERT INTO tbl SELECT * FROM other_tbl;', 'INSERT INTO tbl (i) VALUES (1), (2), (3);', 'INSERT INTO tbl (i) VALUES (1), (DEFAULT), (3);', 'INSERT OR IGNORE INTO tbl (i) VALUES (1);', 'INSERT OR REPLACE INTO tbl (i) VALUES (1);', 'INSERT INTO tbl BY POSITION VALUES (5, 42);', 'INSERT INTO tbl BY NAME (SELECT 42 AS b, 32 AS a);', 'INSERT INTO tbl VALUES (1, 84) ON CONFLICT DO NOTHING;', 'INSERT INTO tbl VALUES (1, 84) ON CONFLICT DO UPDATE SET j = EXCLUDED.j;', 'INSERT INTO tbl (j, i) VALUES (168, 1) ON CONFLICT DO UPDATE SET j = EXCLUDED.j;', 'INSERT INTO tbl BY NAME (SELECT 84 AS j, 1 AS i) ON CONFLICT DO UPDATE SET j = EXCLUDED.j;', 'INSERT INTO t1 SELECT 42 RETURNING *;', 'INSERT INTO t2 SELECT 2 AS i, 3 AS j RETURNING *, i * j AS i_times_j;', "CREATE TABLE t3 (i INTEGER PRIMARY KEY, j INTEGER); CREATE SEQUENCE 't3_key'; INSERT INTO t3 SELECT nextval('t3_key') AS i, 42 AS j UNION ALL SELECT nextval('t3_key') AS i, 43 AS j RETURNING *;"]
`VALUES`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["VALUES ('Amsterdam', 1), ('London', 2);", "SELECT * FROM (VALUES ('Amsterdam', 1), ('London', 2)) cities(name, id);", "INSERT INTO cities VALUES ('Amsterdam', 1), ('London', 2);", "CREATE TABLE cities AS SELECT * FROM (VALUES ('Amsterdam', 1), ('London', 2)) cities(name, id);"]
`DELETE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['DELETE FROM tbl WHERE i = 2;', 'DELETE FROM tbl;', 'TRUNCATE tbl;']
`CALL`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CALL duckdb_functions();', "CALL pragma_table_info('pg_am');"]
`CREATE SCHEMA`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE SCHEMA s1;', 'CREATE SCHEMA IF NOT EXISTS s2;', 'CREATE TABLE s1.t (id INTEGER PRIMARY KEY, other_id INTEGER);', 'CREATE TABLE s2.t (id INTEGER PRIMARY KEY, j VARCHAR);', 'SELECT * FROM s1.t s1t, s2.t s2t WHERE s1t.other_id = s2t.id;']
`SAMPLE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM addresses USING SAMPLE 1%;', 'SELECT * FROM addresses USING SAMPLE 1% (bernoulli);', 'SELECT * FROM (SELECT * FROM addresses) USING SAMPLE 10 ROWS;']
`CREATE VIEW`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE VIEW v1 AS SELECT * FROM tbl;', 'CREATE OR REPLACE VIEW v1 AS SELECT 42;', 'CREATE VIEW v1(a) AS SELECT 42;']
`COPY`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["COPY lineitem FROM 'lineitem.csv';", "COPY lineitem FROM 'lineitem.csv' (DELIMITER '|');", "COPY lineitem FROM 'lineitem.pq' (FORMAT PARQUET);", "COPY lineitem FROM 'lineitem.json' (FORMAT JSON, AUTO_DETECT true);", "COPY lineitem TO 'lineitem.csv' (FORMAT CSV, DELIMITER '|', HEADER);", "COPY (SELECT l_orderkey, l_partkey FROM lineitem) TO 'lineitem.parquet' (COMPRESSION ZSTD);", 'COPY FROM DATABASE db1 TO db2;', 'COPY FROM DATABASE db1 TO db2 (SCHEMA);']
`QUALIFY`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT schema_name, function_name, row_number() OVER (PARTITION BY schema_name ORDER BY function_name) AS function_rank FROM duckdb_functions() QUALIFY row_number() OVER (PARTITION BY schema_name ORDER BY function_name) < 3;', 'SELECT schema_name, function_name, row_number() OVER (PARTITION BY schema_name ORDER BY function_name) AS function_rank FROM duckdb_functions() QUALIFY function_rank < 3;', 'SELECT schema_name, function_name, row_number() OVER my_window AS function_rank FROM duckdb_functions() WINDOW my_window AS (PARTITION BY schema_name ORDER BY function_name) QUALIFY row_number() OVER my_window < 3;', 'SELECT schema_name, function_name, row_number() OVER my_window AS function_rank FROM duckdb_functions() WINDOW my_window AS (PARTITION BY schema_name ORDER BY function_name) QUALIFY function_rank < 3;']
`SET VARIABLE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SET VARIABLE my_var = 30;', "SELECT 20 + getvariable('my_var') AS total;", 'SET VARIABLE my_var = 100;', "SET VARIABLE my_date = DATE '2018-07-13';", "SET VARIABLE my_string = 'Hello world';", "SET VARIABLE my_map = MAP {{'k1': 10, 'k2': 20}};", "SELECT getvariable('undefined_var') AS result;", "SET VARIABLE column_to_exclude = 'col1';", 'CREATE TABLE tbl AS SELECT 12 AS col0, 34 AS col1, 56 AS col2;', "SELECT COLUMNS(c -> c != getvariable('column_to_exclude')) FROM tbl;"]
`PIVOT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['PIVOT Cities ON Year USING sum(Population);', 'PIVOT Cities ON Year USING first(Population);', 'PIVOT Cities ON Year USING sum(Population) GROUP BY Country;', 'PIVOT Cities ON Year IN (2000, 2010) USING sum(Population) GROUP BY Country;', 'PIVOT Cities ON Country, Name USING sum(Population);', "PIVOT Cities ON Country || '_' || Name USING sum(Population);", 'PIVOT Cities ON Year USING sum(Population) AS total, max(Population) AS max GROUP BY Country;', 'PIVOT Cities ON Year USING sum(Population) GROUP BY Country, Name;', 'SELECT * FROM (PIVOT Cities ON Year USING sum(Population) GROUP BY Country) pivot_alias;']
`INSTALL`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['INSTALL httpfs;', 'INSTALL h3 FROM community;']
`ANALYZE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['ANALYZE;']
`SUMMARIZE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SUMMARIZE tbl;', 'SUMMARIZE SELECT * FROM tbl;']
`UNPIVOT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['UNPIVOT monthly_sales ON jan, feb, mar, apr, may, jun INTO NAME month VALUE sales;', 'UNPIVOT monthly_sales ON COLUMNS(* EXCLUDE (empid, dept)) INTO NAME month VALUE sales;', 'UNPIVOT monthly_sales ON (jan, feb, mar) AS q1, (apr, may, jun) AS q2 INTO NAME quarter VALUE month_1_sales, month_2_sales, month_3_sales;', 'WITH unpivot_alias AS ( UNPIVOT monthly_sales ON COLUMNS(* EXCLUDE (empid, dept)) INTO NAME month VALUE sales ) SELECT * FROM unpivot_alias;', 'FROM monthly_sales UNPIVOT ( sales FOR month IN (jan, feb, mar, apr, may, jun) );', 'FROM monthly_sales UNPIVOT ( (month_1_sales, month_2_sales, month_3_sales) FOR quarter IN ((jan, feb, mar) AS q1, (apr, may, jun) AS q2) );']
`WINDOW`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT SUM(salary) OVER my_window, AVG(salary) OVER my_window FROM employees WINDOW my_window AS (PARTITION BY department ORDER BY hire_date);', 'SELECT employee_id, first_value(name) OVER recent_hires FROM employees WINDOW recent_hires AS (ORDER BY hire_date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW);']
`OFFSET`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM addresses LIMIT 5;', 'SELECT * FROM addresses LIMIT 5 OFFSET 5;', 'SELECT city, count(*) AS population FROM addresses GROUP BY city ORDER BY population DESC LIMIT 5;']
`OFFSET`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM addresses LIMIT 5;', 'SELECT * FROM addresses LIMIT 5 OFFSET 5;', 'SELECT city, count(*) AS population FROM addresses GROUP BY city ORDER BY population DESC LIMIT 5;']
`CREATE INDEX`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE UNIQUE INDEX films_id_idx ON films (id);', 'CREATE INDEX s_idx ON films (revenue);', 'CREATE INDEX gy_idx ON films (genre, year);', 'CREATE INDEX i_index ON integers ((j + k));']
`CREATE TYPE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["CREATE TYPE mood AS ENUM ('happy', 'sad', 'curious');", 'CREATE TYPE many_things AS STRUCT(k INTEGER, l VARCHAR);', 'CREATE TYPE one_thing AS UNION(number INTEGER, string VARCHAR);', 'CREATE TYPE x_index AS INTEGER;']
`COLLATE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["SELECT 'hello' = 'hElLO'; -- Default collation", "SELECT 'hello' COLLATE NOCASE = 'hElLO'; -- Case insensitive collation", "SELECT 'hello' = 'hëllo'; -- Default collation", "SELECT 'hello' COLLATE NOACCENT = 'hëllo'; -- Accent insensitive collation", "SELECT 'hello' COLLATE NOCASE.NOACCENT = 'hElLÖ'; -- Both case and accent insensitive", "SET default_collation = NOCASE; SELECT 'hello' = 'HeLlo'; -- Setting global collation", "CREATE TABLE names (name VARCHAR COLLATE NOACCENT); INSERT INTO names VALUES ('hännes'); SELECT name FROM names WHERE name = 'hannes'; -- Column-specific collation", 'SELECT names.name AS name, other_names.name AS other_name FROM names, other_names WHERE names.name COLLATE NOACCENT.NOCASE = other_names.name COLLATE NOACCENT.NOCASE; -- Combine collations for comparison', "CREATE TABLE strings (s VARCHAR COLLATE DE); INSERT INTO strings VALUES ('Gabel'), ('Göbel'), ('Goethe'), ('Goldmann'), ('Göthe'), ('Götz'); SELECT * FROM strings ORDER BY s; -- Using ICU collation"]
`BEGIN TRANSACTION`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['BEGIN TRANSACTION;']
`CREATE SEQUENCE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE SEQUENCE serial;', 'CREATE SEQUENCE serial START 101;', 'CREATE SEQUENCE serial START WITH 1 INCREMENT BY 2;', 'CREATE SEQUENCE serial START WITH 99 INCREMENT BY -1 MAXVALUE 99;', 'CREATE SEQUENCE serial START WITH 1 MAXVALUE 10;', 'CREATE SEQUENCE serial START WITH 1 MAXVALUE 10 CYCLE;', 'CREATE OR REPLACE SEQUENCE serial;', 'CREATE SEQUENCE IF NOT EXISTS serial;', 'CREATE SEQUENCE id_sequence START 1;', "SELECT nextval('serial') AS nextval;", "SELECT currval('serial') AS currval;"]
`CREATE MACRO`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE MACRO add(a, b) AS a + b;', 'CREATE MACRO ifelse(a, b, c) AS CASE WHEN a THEN b ELSE c END;', 'CREATE MACRO one() AS (SELECT 1);', 'CREATE MACRO plus_one(a) AS (WITH cte AS (SELECT 1 AS a) SELECT cte.a + a FROM cte);', 'CREATE FUNCTION main.my_avg(x) AS sum(x) / count(x);', 'CREATE MACRO add_default(a, b := 5) AS a + b;', 'CREATE MACRO arr_append(l, e) AS list_concat(l, list_value(e));', "CREATE MACRO static_table() AS TABLE SELECT 'Hello' AS column1, 'World' AS column2;", 'CREATE MACRO dynamic_table(col1_value, col2_value) AS TABLE SELECT col1_value AS column1, col2_value AS column2;', "CREATE OR REPLACE TEMP MACRO dynamic_table(col1_value, col2_value) AS TABLE SELECT col1_value AS column1, col2_value AS column2 UNION ALL SELECT 'Hello' AS col1_value, 456 AS col2_value;", 'CREATE MACRO get_users(i) AS TABLE SELECT * FROM users WHERE uid IN (SELECT unnest(i));', 'SELECT * FROM get_users([1, 5]);', 'CREATE MACRO checksum(table_name) AS TABLE SELECT bit_xor(md5_number(COLUMNS(*)::VARCHAR)) FROM query_table(table_name);', "SELECT * FROM checksum('tbl');", 'CREATE MACRO add_x (a, b) AS a + b, (a, b, c) AS a + b + c;', 'SELECT add_x(21, 42) AS two_args, add_x(21, 42, 21) AS three_args;', 'CREATE MACRO add(a, b) AS a + b;', 'SELECT add(1, 2) AS x;', 'SELECT add_default(37);', 'SELECT add_default(40, b := 2) AS x;', 'CREATE MACRO triple_add(a, b := 5, c := 10) AS a + b + c;', 'SELECT triple_add(40, c := 1, b := 1) AS x;']
`VACUUM`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['VACUUM;', 'VACUUM ANALYZE;', 'VACUUM ANALYZE memory.main.my_table(my_column);', 'VACUUM FULL; -- error']
`RESET`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["SET memory_limit = '10GB';", 'SET threads = 1;', 'SET threads TO 1;', 'RESET threads;', "SELECT current_setting('threads');", "SET GLOBAL search_path = 'db1,db2'", "SET SESSION default_collation = 'nocase';"]
`RESET`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["SET memory_limit = '10GB';", 'SET threads = 1;', 'SET threads TO 1;', 'RESET threads;', "SELECT current_setting('threads');", "SET GLOBAL search_path = 'db1,db2'", "SET SESSION default_collation = 'nocase';"]
`EXPLAIN ANALYZE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['EXPLAIN SELECT * FROM table_name;', 'EXPLAIN ANALYZE SELECT * FROM table_name;']
`EXPLAIN ANALYZE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['EXPLAIN SELECT * FROM table_name;', 'EXPLAIN ANALYZE SELECT * FROM table_name;']
`CUBE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, street_name, avg(income) FROM addresses GROUP BY GROUPING SETS ((city, street_name), (city), (street_name), ());', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY CUBE (city, street_name);', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY ROLLUP (city, street_name);', 'SELECT course, type, count(*) FROM students GROUP BY GROUPING SETS ((course, type), course, type, ());', 'WITH days AS ( SELECT year("generate_series") AS y, quarter("generate_series") AS q, month("generate_series") AS m FROM generate_series(DATE \'2023-01-01\', DATE \'2023-12-31\', INTERVAL 1 DAY) ) SELECT y, q, m, GROUPING_ID(y, q, m) AS "grouping_id()" FROM days GROUP BY GROUPING SETS ((y, q, m), (y, q), (y), ()) ORDER BY y, q, m;']
`CUBE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, street_name, avg(income) FROM addresses GROUP BY GROUPING SETS ((city, street_name), (city), (street_name), ());', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY CUBE (city, street_name);', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY ROLLUP (city, street_name);', 'SELECT course, type, count(*) FROM students GROUP BY GROUPING SETS ((course, type), course, type, ());', 'WITH days AS ( SELECT year("generate_series") AS y, quarter("generate_series") AS q, month("generate_series") AS m FROM generate_series(DATE \'2023-01-01\', DATE \'2023-12-31\', INTERVAL 1 DAY) ) SELECT y, q, m, GROUPING_ID(y, q, m) AS "grouping_id()" FROM days GROUP BY GROUPING SETS ((y, q, m), (y, q), (y), ()) ORDER BY y, q, m;']
`CUBE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, street_name, avg(income) FROM addresses GROUP BY GROUPING SETS ((city, street_name), (city), (street_name), ());', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY CUBE (city, street_name);', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY ROLLUP (city, street_name);', 'SELECT course, type, count(*) FROM students GROUP BY GROUPING SETS ((course, type), course, type, ());', 'WITH days AS ( SELECT year("generate_series") AS y, quarter("generate_series") AS q, month("generate_series") AS m FROM generate_series(DATE \'2023-01-01\', DATE \'2023-12-31\', INTERVAL 1 DAY) ) SELECT y, q, m, GROUPING_ID(y, q, m) AS "grouping_id()" FROM days GROUP BY GROUPING SETS ((y, q, m), (y, q), (y), ()) ORDER BY y, q, m;']
`ALTER VIEW`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['ALTER VIEW v1 RENAME TO v2;']
`UPDATE EXTENSIONS`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['UPDATE EXTENSIONS;', 'UPDATE EXTENSIONS (name_a, name_b, name_c);']
`CHECKPOINT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CHECKPOINT;', 'CHECKPOINT file_db;', 'FORCE CHECKPOINT;']
`COMMENT ON`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["COMMENT ON TABLE test_table IS 'very nice table';", "COMMENT ON COLUMN test_table.test_table_column IS 'very nice column';", "COMMENT ON VIEW test_view IS 'very nice view';", "COMMENT ON INDEX test_index IS 'very nice index';", "COMMENT ON SEQUENCE test_sequence IS 'very nice sequence';", "COMMENT ON TYPE test_type IS 'very nice type';", "COMMENT ON MACRO test_macro IS 'very nice macro';", "COMMENT ON MACRO TABLE test_table_macro IS 'very nice table macro';", 'COMMENT ON TABLE test_table IS NULL;']
`IMPORT DATABASE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["EXPORT DATABASE 'target_directory';", "EXPORT DATABASE 'target_directory' (FORMAT CSV, DELIMITER '|');", "EXPORT DATABASE 'target_directory' (FORMAT PARQUET);", "EXPORT DATABASE 'target_directory' (FORMAT PARQUET, COMPRESSION ZSTD, ROW_GROUP_SIZE 100_000);", "IMPORT DATABASE 'source_directory';", "PRAGMA import_database('source_directory');"]
`IMPORT DATABASE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["EXPORT DATABASE 'target_directory';", "EXPORT DATABASE 'target_directory' (FORMAT CSV, DELIMITER '|');", "EXPORT DATABASE 'target_directory' (FORMAT PARQUET);", "EXPORT DATABASE 'target_directory' (FORMAT PARQUET, COMPRESSION ZSTD, ROW_GROUP_SIZE 100_000);", "IMPORT DATABASE 'source_directory';", "PRAGMA import_database('source_directory');"]
DuckDB Types:
`BOOLEAN`: The `BOOLEAN` type represents a statement of truth, "true" or "false", with the possibility of being "unknown", represented by `NULL` in SQL., Examples: ['> SELECT true, false, NULL::BOOLEAN;', '-- Outputs the three possible values for BOOLEAN: true, false, NULL.', 'CREATE TABLE example (is_active BOOLEAN);', '-- Create a table with a BOOLEAN column.', 'INSERT INTO example VALUES (true), (false), (NULL);', '-- Insert BOOLEAN values, including NULL.', 'SELECT * FROM example WHERE is_active AND is_verified;', '-- Filters rows where both conditions are true.', 'UPDATE example SET is_active = false WHERE condition;', '-- Update rows to set the BOOLEAN field to false.']
`STRUCT`: The `STRUCT` data type in SQL is used to create a column that contains an ordered list of columns, referred to as entries, which are accessed using named keys. This type is ideal for nesting multiple columns into a single column, allowing a structured and consistent data schema across all rows., Examples: ["SELECT struct_pack(key1 := 'value1', key2 := 42) AS s;", "SELECT {{'key1': 'value1', 'key2': 42}} AS s;", "SELECT a.x FROM (SELECT {{'x': 1, 'y': 2, 'z': 3}} AS a);", "SELECT struct_insert({{'a': 1, 'b': 2, 'c': 3}}, d := 4) AS s;", 'CREATE TABLE t1 (s STRUCT(v VARCHAR, i INTEGER));', "INSERT INTO t1 VALUES (row('a', 42));", "SELECT a.* FROM (SELECT {{'x': 1, 'y': 2, 'z': 3}} AS a);", "SELECT struct_extract({{'x space': 1, 'y': 2, 'z': 3}}, 'x space');"]
`FLOAT`: The FLOAT data type, also known by aliases FLOAT4, REAL, or float, represents a single precision floating-point number, facilitating approximate calculations and efficient handling of numerical data with precision typically up to 6 decimal digits and a range of at least 1E-37 to 1E+37., Examples: ['-- Example: Creating a table with a FLOAT column\nCREATE TABLE example_table (id INTEGER, value FLOAT);', '-- Example: Inserting values into a FLOAT column\nINSERT INTO example_table VALUES (1, 3.14), (2, 2.718);', '-- Example: Performing arithmetic operations with FLOAT values\nSELECT id, value * 2.0::FLOAT AS doubled_value FROM example_table;', '-- Example: Casting a numeric value to FLOAT\nSELECT CAST(100 AS FLOAT) AS float_value;', '-- Example: Using FLOAT values in a mathematical function\nSELECT SQRT(value) FROM example_table WHERE value > 0;', '-- Example: Comparing FLOAT values\nSELECT * FROM example_table WHERE value > 3.0::FLOAT;']
`DATE`: The `DATE` type in SQL is used to store calendar dates without time components, representing a year, month, and day as accurate information for querying and managing date-related data., Examples: ["-- Add 5 days to a specific date\\nSELECT DATE '1992-03-22' + 5; -- Result: 1992-03-27\\n", "-- Subtract one date from another to get the number of days between them\\nSELECT DATE '1992-03-27' - DATE '1992-03-22'; -- Result: 5\\n", '-- Get the current date at the start of the transaction\\nSELECT current_date; -- Example result: 2022-10-08\\n', "-- Add an interval of 2 months to a specific date\\nSELECT date_add(DATE '1992-09-15', INTERVAL 2 MONTH); -- Result: 1992-11-15\\n", "-- Find the difference in months between two dates\\nSELECT date_diff('month', DATE '1992-09-15', DATE '1992-11-14'); -- Result: 2\\n", "-- Extract the year from a specific date\\nSELECT date_part('year', DATE '1992-09-20'); -- Result: 1992\\n", "-- Get the (English) name of the weekday from a specific date\\nSELECT dayname(DATE '1992-09-20'); -- Result: Sunday\\n", "-- Convert a date to a string format\\nSELECT strftime(date '1992-01-01', '%a, %-d %B %Y'); -- Result: Wed, 1 January 1992"]
`TIMESTAMP_S`: The TIMESTAMP_S data type represents a timestamp with second precision, ignoring any sub-second parts and time zones., Examples: ["SELECT TIMESTAMP_S '1992-09-20 11:30:00.123456789'; -- Output: 1992-09-20 11:30:00", "SELECT TIMESTAMP_S '2000-01-01 00:00:00'; -- Output: 2000-01-01 00:00:00", "SELECT TIMESTAMP_S '2023-10-05 18:44:03.987654321'; -- Output: 2023-10-05 18:44:03"]
`DECIMAL`: The DECIMAL data type, also known as NUMERIC or DEC, allows for the representation of exact fixed-point decimal numbers, providing precise control over the number of digits and the digits after the decimal point., Examples: ['CREATE TABLE salaries (\\n employee_id INTEGER,\\n base_salary DECIMAL(10, 2)\\n);', 'INSERT INTO salaries (employee_id, base_salary) VALUES\\n (1, 50000.00),\\n (2, 65000.50);', 'SELECT employee_id, base_salary\\nFROM salaries\\nWHERE base_salary > DECIMAL(60000, 2);', 'UPDATE salaries\\nSET base_salary = base_salary + DECIMAL(5000.00, 2)\\nWHERE employee_id = 1;', 'SELECT CAST(99 AS DECIMAL(10, 2));']
`BIGINT`: The `BIGINT` data type is an 8-byte integer that can store large integer values suitable for handling significant quantities or high precision integer data., Examples: ['CREATE TABLE example_table (id BIGINT PRIMARY KEY, count BIGINT, reference_id BIGINT);', "SELECT * FROM parquet_metadata('file.parquet') WHERE row_group_id = 1;", 'ALTER TABLE orders ADD COLUMN order_count BIGINT DEFAULT 0;', 'UPDATE employee SET salary = salary + 1000 WHERE employee_id = 1001;', 'SELECT store_id, SUM(sales) AS total_sales FROM transactions GROUP BY store_id;', 'CREATE SEQUENCE order_sequence START WITH 1000 INCREMENT BY 1 MINVALUE 100 MAXVALUE 10000 NO CYCLE;']
`LIST`: A `LIST` column is a flexible, ordered sequence of data values of the same type, which can vary in length among rows and can include any uniform data type, allowing for complex nested data structures., Examples: ['SELECT [1, 2, 3]; -- Creates a static list of integers', "SELECT ['duck', 'goose', NULL, 'heron']; -- Creates a list of strings containing a NULL value", 'SELECT list_value(1, 2, 3); -- Uses the list_value function to create a list of integers', 'CREATE TABLE list_table (int_list INTEGER[], varchar_list VARCHAR[]); -- Defines a table with integer and varchar lists', "SELECT (['a', 'b', 'c'])[3]; -- Retrieves the third element from a list", 'SELECT list_slice([1, 2, 3, 4, 5], 2, 4); -- Extracts a sublist from the main list']
`SMALLINT`: The SMALLINT type, with aliases such as short, int2, smallint, and int16, represents a signed two-byte integer that can store whole numbers ranging from -32768 to 32767., Examples: ['CREATE TABLE test_table (id SMALLINT);', 'INSERT INTO test_table (id) VALUES (100);', 'SELECT * FROM test_table WHERE id BETWEEN -100 AND 100;', 'ALTER TABLE test_table ADD COLUMN new_column SMALLINT;', 'UPDATE test_table SET id = id + 1 WHERE id < 32767;']
`INTERVAL`: The INTERVAL data type represents a period of time that can be measured in months, days, microseconds, or a combination of these units, and is typically used to add or subtract to DATE, TIMESTAMP, TIMESTAMPTZ, or TIME values., Examples: ["SELECT INTERVAL '1 month 1 day'; -- Returns an interval representing 1 month and 1 day", "SELECT DATE '2000-01-01' + INTERVAL 1 YEAR; -- Adds 1 year to the specified date", "SELECT TIMESTAMP '2000-02-06 12:00:00' - TIMESTAMP '2000-01-01 11:00:00'; -- Returns interval of 36 days 1 hour", "SELECT INTERVAL '48:00:00'::INTERVAL; -- Converts a time string to microseconds interval representing 48 hours", "SELECT (DATE '2020-01-01' + INTERVAL 30 DAYS) = (DATE '2020-01-01' + INTERVAL 1 MONTH); -- Compares intervals by their conversion to microseconds"]
`VARCHAR`: `VARCHAR` is a versatile data type used to store variable-length character strings, accommodating a wide range of text and string data without enforcing a specific length., Examples: ['CREATE TABLE people (name VARCHAR, age INTEGER);', "INSERT INTO documents (text) VALUES ('This is a VARCHAR example text.');", "SELECT * FROM employees WHERE department = 'Engineering';", 'ALTER TABLE students ADD COLUMN email VARCHAR;', "UPDATE orders SET status = 'Shipped' WHERE order_id = 102;", "COPY products TO 'products.csv' DELIMITER ',' HEADER;"]
`VARINT`: VARINT is an arbitrary-precision integer data type capable of storing very large numbers beyond the limits of standard integer types., Examples: ['CREATE TABLE example_table (id VARINT);', 'INSERT INTO example_table (id) VALUES (123456789123456789123456789);', 'SELECT id FROM example_table WHERE id < 999999999999999999999999999;']
`TINYINT`: TINYINT is a signed one-byte integer type that can store whole numbers ranging from -128 to 127, often used to save storage space when values are known to fall within this small range., Examples: ["SELECT CAST('123' AS TINYINT);", 'INSERT INTO my_table (x) VALUES (CAST(100 AS TINYINT));', 'UPDATE my_table SET x = CAST(50 AS TINYINT) WHERE id = 1;', 'SELECT * FROM my_table WHERE x = CAST(-50 AS TINYINT);', 'CREATE TABLE example (id TINYINT);']
`INTEGER`: The INTEGER data type, with aliases such as int, signed, int4, int32, integer, and integral, represents whole numbers and is commonly used to store numeric data without fractional components., Examples: ['-- Assigning integer values to columns in a CREATE TABLE statement\nCREATE TABLE my_table (id INTEGER, age INTEGER);', '-- Inserting integer values as literals within an INSERT statement\nINSERT INTO my_table VALUES (1, 25);', '-- Using integer operations in a SELECT statement\nSELECT id + 10 AS new_id FROM my_table;', '-- Casting a float to an integer\nSELECT CAST(3.7 AS INTEGER) AS whole_number;', '-- Defining a column to only accept non-negative integers using a CHECK constraint\nCREATE TABLE my_table (id INTEGER CHECK (id >= 0));', '-- Using the INTEGER type in a primary key definition\nCREATE TABLE users (user_id INTEGER PRIMARY KEY, username VARCHAR);', '-- Updating integer columns\nUPDATE my_table SET age = age + 1 WHERE id = 1;', '-- Comparing integer values in a WHERE clause\nSELECT * FROM my_table WHERE age > 20;']
`ENUM`: The Enum data type represents a dictionary encoding structure that enumerates all possible unique string values of a column, allowing for efficient storage and query execution by storing only numerical references to the strings., Examples: ["CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy');", 'CREATE TYPE birds AS ENUM (SELECT my_varchar FROM my_inputs);', 'CREATE TABLE person (name TEXT, current_mood mood);', "INSERT INTO person VALUES ('Pedro', 'happy'), ('Pagliacci', 'sad');", 'SELECT enum_range(NULL::mood) AS mood_values;', 'DROP TYPE mood;']
`UBIGINT`: UBIGINT, also known as 'uint64' or 'ubigint', is an unsigned 64-bit integer data type that can store large whole numbers from 0 to 18,446,744,073,709,551,615. It is commonly used for columns that require large non-negative integer values, especially where negative values are not applicable., Examples: ['CREATE TABLE huge_numbers (id UBIGINT);', 'INSERT INTO huge_numbers VALUES (4294967296);', 'SELECT id FROM huge_numbers WHERE id > 1000000;', 'ALTER TABLE huge_numbers ADD COLUMN new_value UBIGINT;', 'CREATE VIEW large_ids AS SELECT id FROM huge_numbers WHERE id > 100000000;']
`BLOB`: The BLOB (Binary Large Object) type represents a variable-length binary data object, used for storing arbitrary binary data in the database, such as images or files, without any interpretation of its contents., Examples: ["-- Create a BLOB with a single byte\\nSELECT '\\xAA'::BLOB;\\n-- Result: \\xAA\\n\\n-- Create a BLOB with multiple bytes\\nSELECT '\\xAA\\xAB\\xAC'::BLOB;\\n-- Result: \\xAA\\xAB\\xAC\\n\\n-- Concatenate two BLOB values\\nSELECT '\\xAA'::BLOB || '\\xBB'::BLOB;\\n-- Result: \\xAABB\\n\\n-- Convert a BLOB to a hexadecimal string\\nSELECT hex('\\xAA\\xBB'::BLOB);\\n-- Result: AABB\\n\\n-- Decode a BLOB to a string, ensuring it is valid UTF-8\\nSELECT decode('\\xC3\\xBC'::BLOB);\\n-- Result: ü\\n\\n-- Read a BLOB from a file\\nSELECT read_blob('myfile.bin');\\n-- Result: Contents of 'myfile.bin' as a BLOB"]
`HUGEINT`: The `HUGEINT` data type, also known as `INT128`, is a signed sixteen-byte integer that stores whole numbers ranging from -170141183460469231731687303715884105728 to 170141183460469231731687303715884105727, providing a broad range suitable for large numerical computations., Examples: ['-- Creating a table with a HUGEINT column\\nCREATE TABLE example_table (id HUGEINT, value HUGEINT);', '-- Inserting values into a HUGEINT column\\nINSERT INTO example_table (id, value) VALUES (1, 170141183460469231731687303715884105727);', '-- Performing arithmetic operations on HUGEINT\\nSELECT value + 10 FROM example_table WHERE id = 1;', "-- Using HUGEINT in a function\\nSELECT md5_number('12345')::HUGEINT;", '-- Comparing HUGEINT values\\nSELECT * FROM example_table WHERE value > 1000000000000000000;']
`TIMESTAMP`: A TIMESTAMP value represents an instant in time, composed of a combination of a date (year, month, day) and a time (hour, minute, second, microsecond), stored with microsecond precision, and it can be manipulated using various functions and operators., Examples: ["SELECT TIMESTAMP '1992-09-20 11:30:00.123456';", "SELECT TIMESTAMP '1992-09-20 11:30:00' + INTERVAL 10 DAYS;", "SELECT TIMESTAMP '2023-07-18 17:45:00' - TIMESTAMP '2023-07-10 15:30:00';", "SELECT age(TIMESTAMP '2023-07-18 17:45:00', TIMESTAMP '2022-07-18 17:45:00');", "SELECT strftime(TIMESTAMP '2023-07-18 17:45:00', '%Y-%m-%d %H:%M:%S');", "SELECT extract('hour' FROM TIMESTAMP '2023-07-18 17:45:00');"]
`UNION`: The UNION data type is a nested type that holds one of multiple distinct values with a "tag" to identify the active type and can contain multiple uniquely tagged members of various types, akin to C++ std::variant or Rust's Enum., Examples: ["```sql\nCREATE TABLE tbl1 (u UNION(num INTEGER, str VARCHAR));\nINSERT INTO tbl1 VALUES (1), ('two'), (union_value(str := 'three'));\n```", "```sql\nSELECT union_extract(u, 'str') AS str\nFROM tbl1;\n```", '```sql\nSELECT u.str\nFROM tbl1;\n```', '```sql\nSELECT union_tag(u) AS t\nFROM tbl1;\n```']
`TIMESTAMP_MS`: The "TIMESTAMP_MS" data type represents timestamps with millisecond precision, defined without considering time zones., Examples: ["SELECT TIMESTAMP_MS '1992-09-20 11:30:00.123456789'; -- Produces output: 1992-09-20 11:30:00.123"]
`TIMESTAMP_NS`: `TIMESTAMP_NS` represents a timestamp with nanosecond precision, useful for high-resolution time data but ignores time zone information., Examples: ["SELECT TIMESTAMP_NS '1992-09-20 11:30:00.123456789';"]
`USMALLINT`: USMALLINT is an unsigned two-byte integer type with a range from 0 to 65535, used for storing non-negative whole numbers within this range., Examples: ['CREATE TABLE example_table (id USMALLINT, age USMALLINT);', 'INSERT INTO example_table (id, age) VALUES (100, 25);', 'SELECT * FROM example_table WHERE age < 30;']
`UINTEGER`: The `UINTEGER` data type is used to store unsigned 32-bit integer values, allowing for a range from 0 to 4,294,967,295 and is particularly useful when negative values are not needed and memory efficiency is a concern for large datasets., Examples: ['CREATE TABLE example_table (count UINTEGER);', 'INSERT INTO example_table VALUES (150), (2750), (4294967295);', 'SELECT * FROM example_table WHERE count > 1000;', 'ALTER TABLE example_table ADD COLUMN new_count UINTEGER DEFAULT 0;', 'UPDATE example_table SET count = count + 100 WHERE count < 4294967295;']
`UHUGEINT`: UHUGEINT, also known as uint128, is an unsigned 128-bit integer data type used for storing large non-negative whole numbers ranging from 0 to approximately 3.4 x 10^38., Examples: ['>> CREATE TABLE numbers (id UHUGEINT);', ' // This creates a table with a UHUGEINT column.', ' ', ' ">> INSERT INTO numbers VALUES (340282366920938463463374607431768211455);', " // This inserts the maximum valid UHUGEINT value into the 'numbers' table.", ' ', ' ">> SELECT id FROM numbers WHERE id > 1000000;', " // This selects records from the 'numbers' table where the UHUGEINT value is greater than 1,000,000."]
`TIME`: The `TIME` type represents a time of day, independent of a specific date, and is used to store and manipulate values consisting of hours, minutes, seconds, and fractional seconds., Examples: ["SELECT TIME '14:21:13';", "SELECT TIME '08:30:00' + INTERVAL 5 MINUTE;", "SELECT EXTRACT(HOUR FROM TIME '23:45:12');", 'SELECT MAKE_TIME(13, 30, 59.999);', 'SELECT CURRENT_TIME;']
`TIMESTAMP WITH TIME ZONE`: `TIMESTAMP WITH TIME ZONE` (or `TIMESTAMPTZ`) represents a point in time using a calendar date and time of day along with a time zone offset, allowing for time zone sensitive operations such as conversions and comparisons., Examples: ["SELECT TIMESTAMPTZ '2023-10-17 12:00:00+01';", "SELECT now() AT TIME ZONE 'UTC';", "SELECT TIMESTAMP '2023-10-17 10:00:00-07' AT TIME ZONE 'America/New_York';", "SELECT age(TIMESTAMPTZ '2005-10-17 12:00:00-07');", "SELECT TIMESTAMPTZ '2023-10-17 15:00:00+00' - TIMESTAMPTZ '2023-10-16 15:00:00+00';"]
`UUID`: The UUID data type is used to store universally unique identifiers as 128-bit values, formatted as 36-character strings with hexadecimal characters and dashes arranged in the pattern ⟨8 characters⟩-⟨4 characters⟩-⟨4 characters⟩-⟨4 characters⟩-⟨12 characters⟩., Examples: ['-- Create a table with a UUID column\nCREATE TABLE users (id UUID, name VARCHAR);', "-- Insert a new UUID value into the table\nINSERT INTO users (id, name) VALUES (gen_random_uuid(), 'Alice');", "-- Retrieve UUID values from a table\nSELECT id FROM users WHERE name = 'Alice';", '-- Generate and display a random UUID\nSELECT uuid();']
`DOUBLE`: The `DOUBLE` type, also known as `FLOAT8`, is a double-precision floating point number data type commonly used for storing large or precise decimal values in SQL queries., Examples: ['```sql\n-- Using DOUBLE to store and manipulate high-precision values\nCREATE TABLE sales_data (\n transaction_id INTEGER,\n sale_amount DOUBLE\n);\n\nINSERT INTO sales_data (transaction_id, sale_amount) VALUES (1, 1999.99);\nSELECT sale_amount * 1.05 AS total_after_tax FROM sales_data WHERE transaction_id = 1;\n```', '```sql\n-- Calculating the square root of a DOUBLE value\nSELECT sqrt(column_value) FROM my_table WHERE column_value > 0;\n```', '```sql\n-- Using DOUBLE in mathematical functions\nSELECT sin(column1), cos(column2) FROM my_numeric_table;\n```', '```sql\n-- Explicit casting of an INTEGER to DOUBLE for precision in arithmetic operations\nSELECT cast(my_integer_column AS DOUBLE) / 2 FROM my_table;\n```', '```sql\n-- Working with DOUBLE in spatial functions\nDOUBLE ST_Area (geometry) -- Computes the area of a geometry, returning a DOUBLE value as the area\n```', "```sql\n-- Using the DOUBLE type in JSON processing\nSELECT json_extract(my_json_column, '$.key')::DOUBLE FROM my_json_table;\n```"]
`UTINYINT`: An unsigned 8-bit integer type used to store whole numbers in the range of 0 to 255., Examples: ['CREATE TABLE example_table (column1 UTINYINT);', 'INSERT INTO example_table (column1) VALUES (200);', 'SELECT * FROM example_table WHERE column1 < 100;', '-- Attempting to store a negative number or a number greater than 255 will result in an error.', 'UPDATE example_table SET column1 = 255 WHERE column1 < 50;']
`NULL`: The `NULL` type in SQL represents a missing or unknown value, allowing for fields within a table to be uninitialized or absent in data., Examples: ['SELECT NULL = NULL;', 'SELECT NULL IS NULL;', "INSERT INTO table_name (column1, column2) VALUES (NULL, 'data');", "SELECT coalesce(NULL, 'default_value');", 'UPDATE table_name SET column1 = NULL WHERE condition;', "SELECT CASE WHEN column IS NULL THEN 'Value is NULL' ELSE column END FROM table_name;"]
`TIME WITH TIME ZONE`: The TIME WITH TIME ZONE (alias: TIMETZ) type represents the time of day with respect to a specific time zone, following the ISO 8601 format and allowing for time zone offsets., Examples: ["SELECT TIMETZ '1992-09-20 11:30:00.123456';", "SELECT TIMETZ '1992-09-20 11:30:00.123456-02:00';", "SELECT TIMETZ '1992-09-20 11:30:00.123456+05:30';"]
`BIT`: The `BIT` data type, also known as `BITSTRING`, represents variable-length strings consisting of 1s and 0s, suitable for operations such as bitwise manipulation., Examples: ["SELECT '10101'::BITSTRING & '10001'::BITSTRING AS result;", "SELECT bit_count('1101011'::BITSTRING) AS set_bits_count;", "SELECT bit_length('10101011'::BITSTRING) AS length_in_bits;", "SELECT octet_length('1101011'::BITSTRING) AS length_in_bytes;", "SELECT set_bit('0110010'::BITSTRING, 2, 0) AS updated_bitstring;"]
`MAP`: The MAP type is an ordered collection of key-value pairs, where keys are unique and can be of any type, allowing for diverse and flexible schema structures in databases., Examples: ["SELECT MAP {{'key1': 10, 'key2': 20, 'key3': 30}};", "SELECT map_from_entries([('key1', 10), ('key2', 20), ('key3', 30)]);", "SELECT MAP(['key1', 'key2', 'key3'], [10, 20, 30]);", 'SELECT MAP {{1: 42.001, 5: -32.1}};', "SELECT MAP {{['a', 'b']: [1.1, 2.2], ['c', 'd']: [3.3, 4.4]}};", 'CREATE TABLE tbl (col MAP(INTEGER, DOUBLE));', "SELECT MAP {{'key1': 5, 'key2': 43}}['key1'];", "SELECT MAP {{'key1': 5, 'key2': 43}}['key1'][1];", "SELECT MAP {{'key1': 5, 'key2': 43}}['key3'];", "SELECT element_at(MAP {{'key1': 5, 'key2': 43}}, 'key1');"]
`ARRAY`: The ARRAY data type stores fixed-size arrays where each element is of the same type, and it is suitable for representing ordered sequences of elements such as numerical vectors or nested arrays., Examples: ['SELECT array_value(1, 2, 3); -- Creates an array with elements 1, 2, and 3', 'CREATE TABLE example_table (id INTEGER, arr INTEGER[3]); -- Declares an array of three integers', 'SELECT id, arr[1] AS element FROM example_table; -- Retrieves the first element of the array', 'SELECT array_value(array_value(1, 2), array_value(3, 4), array_value(5, 6)); -- Creates a nested array using arrays as elements', 'INSERT INTO example_table VALUES (1, [1, 2, 3]), (2, [4, 5, 6]); -- Inserts rows with array values into a table', 'SELECT array_cosine_similarity(array_value(1.0, 2.0, 3.0), array_value(2.0, 3.0, 4.0)); -- Computes cosine similarity between two arrays of the same size', 'SELECT array_cross_product(array_value(1.0, 2.0, 3.0), array_value(2.0, 3.0, 4.0)); -- Computes the cross product of two 3-element arrays']
`JSON`: The JSON data type allows for the storage and querying of JSON formatted data, supporting functions for extracting, manipulating, and transforming JSON content within the database., Examples: ['CREATE TABLE example (j JSON);', 'INSERT INTO example VALUES (\'{{ "family": "anatidae", "species": [ "duck", "goose", "swan", null ] }}\');', "SELECT j->'$.family' FROM example;", "SELECT json_extract(j, '$.species[0]') FROM example;", "SELECT json_extract_string(j, '$.family') FROM example;"]
Here is the schema of the DuckDB database that the SQL query will run on:
{schema}
Question:
Here is the question or an instruction the user provided:
{question}
Write a DuckDB SQL query for the given question!
Answer:
```
| 2024-11-13T01:12:41.737112 | 5 | 0.6 | 14 | 0.857143 | 6 | 0.166667 | 48 | 0.770833 | 2 | 1 | 75 | 0.733333 |
openrouter | anthropic/claude-3.5-sonnet | custom_8348795 | You are a DuckDB SQL Query Writing Assistant. You only respond with a DuckDB SQL query that answers the users's question.
Here are some DuckDB SQL syntax specifics you should be aware of:
- DuckDB use double quotes (") for identifiers that contain spaces or special characters, or to force case-sensitivity and single quotes (') to define string literals
- DuckDB can query CSV, Parquet, and JSON directly without loading them first, e.g. `SELECT * FROM 'data.csv';`
- DuckDB supports CREATE TABLE AS (CTAS): `CREATE TABLE new_table AS SELECT * FROM old_table;`
- DuckDB queries can start with FROM, and optionally omit SELECT *, e.g. `FROM my_table WHERE condition;` is equivalent to `SELECT * FROM my_table WHERE condition;`
- DuckDB allows you to use SELECT without a FROM clause to generate a single row of results or to work with expressions directly, e.g. `SELECT 1 + 1 AS result;`
- DuckDB supports attaching multiple databases, unsing the ATTACH statement: `ATTACH 'my_database.duckdb' AS mydb;`. Tables within attached databases can be accessed using the dot notation (.), e.g. `SELECT * FROM mydb.table_name syntax`. The default databases doesn't require the do notation to access tables. The default database can be changed with the USE statement, e.g. `USE my_db;`.
- DuckDB is generally more lenient with implicit type conversions (e.g. `SELECT '42' + 1;` - Implicit cast, result is 43), but you can always be explicit using `::`, e.g. `SELECT '42'::INTEGER + 1;`
- DuckDB can extract parts of strings and lists using [start:end] or [start:end:step] syntax. Indexes start at 1. String slicing: `SELECT 'DuckDB'[1:4];`. Array/List slicing: `SELECT [1, 2, 3, 4][1:3];`
- DuckDB has a powerful way to select or transform multiple columns using patterns or functions. You can select columns matching a pattern: `SELECT COLUMNS('sales_.*') FROM sales_data;` or transform multiple columns with a function: `SELECT AVG(COLUMNS('sales_.*')) FROM sales_data;`
- DuckDB an easy way to include/exclude or modify columns when selecting all: e.g. Exclude: `SELECT * EXCLUDE (sensitive_data) FROM users;` Replace: `SELECT * REPLACE (UPPER(name) AS name) FROM users;`
- DuckDB has a shorthand for grouping/ordering by all non-aggregated/all columns. e.g `SELECT category, SUM(sales) FROM sales_data GROUP BY ALL;` and `SELECT * FROM my_table ORDER BY ALL;`
- DuckDB can combine tables by matching column names, not just their positions using UNION BY NAME. E.g. `SELECT * FROM table1 UNION BY NAME SELECT * FROM table2;`
- DuckDB has an inutitive syntax to create List/Struct/Map and Array types. Create complex types using intuitive syntax. List: `SELECT [1, 2, 3] AS my_list;`, Struct: `{{{{'a': 1, 'b': 'text'}}}} AS my_struct;`, Map: `MAP([1,2],['one','two']) as my_map;`. All types can also be nested into each other. Array types are fixed size, while list types have variable size. Compared to Structs, MAPs do not need to have the same keys present for each row, but keys can only be of type Integer or Varchar. Example: `CREATE TABLE example (my_list INTEGER[], my_struct STRUCT(a INTEGER, b TEXT), my_map MAP(INTEGER, VARCHAR), my_array INTEGER[3], my_nested_struct STRUCT(a INTEGER, b Integer[3]));`
- DuckDB has an inutive syntax to access struct fields using dot notation (.) or brackets ([]) with the field name. Maps fields can be accessed by brackets ([]).
- DuckDB's way of converting between text and timestamps, and extract date parts. Current date as 'YYYY-MM-DD': `SELECT strftime(NOW(), '%Y-%m-%d');` String to timestamp: `SELECT strptime('2023-07-23', '%Y-%m-%d')::TIMESTAMP;`, Extract Year from date: `SELECT EXTRACT(YEAR FROM DATE '2023-07-23');`
- Column Aliases in WHERE/GROUP BY/HAVING: You can use column aliases defined in the SELECT clause within the WHERE, GROUP BY, and HAVING clauses. E.g.: `SELECT a + b AS total FROM my_table WHERE total > 10 GROUP BY total HAVING total < 20;`
- DuckDB allows generating lists using expressions similar to Python list comprehensions. E.g. `SELECT [x*2 FOR x IN [1, 2, 3]];` Returns [2, 4, 6].
- DuckDB allows chaining multiple function calls together using the dot (.) operator. E.g.: `SELECT 'DuckDB'.replace('Duck', 'Goose').upper(); -- Returns 'GOOSEDB';`
- DuckDB has a JSON data type. It supports selecting fields from the JSON with a JSON-Path expression using the arrow operator, -> (returns JSON) or ->> (returns text) with JSONPath expressions. For example: `SELECT data->'$.user.id' AS user_id, data->>'$.event_type' AS event_type FROM events;`
- DuckDB has built-in functions for regex regexp_matches(column, regex), regexp_replace(column, regex), and regexp_extract(column, regex).
- DuckDB has a way to quickly get a subset of your data with `SELECT * FROM large_table USING SAMPLE 10%;`
DuckDB Functions:
`count`: Calculates the total number of rows returned by a SQL query result. This function is commonly used to determine the row count of a SELECT operation., Parameters: ['result: The result object']
`sum`: Calculates the total of all non-null values from the given input., Parameters: ['arg: Values to be summed up.']
`sum`: Calculates the total of all non-null values in a specified column or expression across rows., Parameters: ['arg: Values to be aggregated']
`max`: Returns the maximum value from the input data., Parameters: ['arg: The column or expression to evaluate', 'n: Number of top values to return(optional)', 'ORDER BY: Specifies sort order before function(optional)']
`max`: Returns the largest value from all values in a specified column or expression., Parameters: ['arg: expression to evaluate maximum', "n: top 'n' value list size(optional)"]
`coalesce`: This function evaluates provided expressions in order and returns the first non-NULL value found. If all expressions evaluate to NULL, then the result is NULL., Parameters: ['expr: An expression to evaluate', '...: Additional expressions to evaluate(optional)']
`trunc`: Truncates a number by removing the fractional part, essentially returning the integer part of the number without rounding., Parameters: ['x: The number to truncate.']
`date_trunc`: Truncates a date or timestamp to the specified precision, effectively setting smaller units to zero or to the first value of that unit (e.g., the first day of the month)., Parameters: ['part: Specifies the truncation precision', 'date: The date or timestamp value']
`row_number`: Generates a unique incrementing number for each row within a partition, starting from 1., Parameters: ['ORDER BY: Specify sort order for numbers.(optional)', 'PARTITION BY: Define groups for numbering.(optional)', 'RANGE/ROWS: Define rows for frame.(optional)', 'EXCLUDE: Exclude specific rows from frame.(optional)', 'WINDOW: Reuse a window definition.(optional)']
`unnest`: The function expands lists or structs into separate rows or columns, reducing nesting by one level., Parameters: ['list_or_struct: The list or struct to unnest.', 'recursive: Unnest multiple levels or not.(optional)', 'max_depth: Limit depth of unnesting.(optional)']
`prompt`: This function allows you to prompt large language models to generate text or structured data as output., Parameters: ['prompt_text: Text input for the model.', 'model: Model to use for prompt.(optional)', 'temperature: Model temperature value setting.(optional)', 'struct: Output schema for struct result.(optional)', 'struct_descr: Field descriptions for struct.(optional)', 'json_schema: Schema for JSON output format.(optional)']
`min`: Returns the minimum value from a set of numeric values., Parameters: ['value_column: Column containing numeric values.', 'ignore_nulls: Ignore NULL values if true.(optional)', 'filter_condition: Condition to filter rows.(optional)']
`min`: Finds the smallest value in a group of input values., Parameters: ['expression: The input value to consider']
`concat`: Concatenates multiple strings together into a single string., Parameters: ['string: String to concatenate']
`avg`: Calculates the average of non-null values., Parameters: ['arg: Data to be averaged']
`lower`: Converts a given string to lower case, commonly used for normalization in text processing., Parameters: ['string: String to be converted']
`read_csv_auto`: Automatically reads a CSV file and infers the data types of its columns., Parameters: ['file_path: Path to the CSV file', 'MD_RUN: Execution control parameter(optional)']
`read_parquet`: Reads Parquet files and treats them as a single table, supports reading multiple files via a list or glob pattern., Parameters: ['path_or_list_of_paths: Path(s) to Parquet file(s)', 'binary_as_string: Load binary as strings(optional)', 'encryption_config: Encryption configuration settings(optional)', 'filename: Include filename column result(optional)', 'file_row_number: Include file row number(optional)', 'hive_partitioning: Interprets Hive partition paths(optional)', 'union_by_name: Unify columns by name(optional)']
`strftime`: Converts timestamps or dates to strings based on a specified format pattern., Parameters: ['timestamp: Input date or timestamp value', 'format: Pattern for string conversion']
`array_agg`: Returns a list containing all values of a column, affected by ordering., Parameters: ['arg: Column to aggregate values']
`regexp_matches`: The function checks if a given string contains a specified regular expression pattern and returns `true` if it does, and `false` otherwise., Parameters: ['string: The input string to search', 'pattern: The regex pattern to match', 'options: Regex matching options string(optional)']
`replace`: Replacement scans in DuckDB allow users to register a callback that gets triggered when a query references a non-existent table. The callback can replace this table with a custom table function, effectively 'replacing' the non-existent table in the query execution process., Parameters: ['db: Database object where replacement applies', 'replacement: Handler for when table is missing', 'extra_data: Extra data given to callback(optional)', 'delete_callback: Cleanup for extra data provided(optional)']
`round`: Rounds a numeric value to a specified number of decimal places., Parameters: ['v: The number to round', 's: Decimal places to round to']
`length`: Returns the length of a string, Parameters: ['value: String to measure length of']
`query`: Table function query extracts statements from a SQL query string and outputs them as `duckdb_extracted_statements` objects. It is utilized to dissect SQL queries and obtain individual statements for further processing, enabling preparation or analysis of each separate statement., Parameters: ['connection: Database connection object', 'query: SQL query to extract from', 'out_extracted_statements: Object for extracted statements']
`read_json_auto`: Automatically infers the schema from JSON data and reads it into a table format., Parameters: ['filename: Path to the JSON file.', 'compression: File compression type.(optional)', 'auto_detect: Auto-detect key names/types.(optional)', 'columns: Manual specification of keys/types.(optional)', 'dateformat: Date format for parsing dates.(optional)', 'format: JSON file format.(optional)', 'hive_partitioning: Hive partitioned path interpretation.(optional)', 'ignore_errors: Ignore parse errors option.(optional)', 'maximum_depth: Max depth for schema detection.(optional)', 'maximum_object_size: Max size of JSON object.(optional)', 'records: JSON record unpacking option.(optional)', 'sample_size: Number of objects for sampling.(optional)', 'timestampformat: Timestamp parsing format.(optional)', 'union_by_name: Unify schemas of files.(optional)']
`range`: Creates a list of values within a specified numeric range, starting inclusively from 'start' and stopping exclusively before 'stop', with an optional step interval., Parameters: ['start: The inclusive start point.(optional)', 'stop: The exclusive end point.', 'step: Interval between each number.(optional)']
`range`: The table function generates a sequential list of values starting from a specified number, incrementing by a given step, up to but not including an end number., Parameters: ['start: Start of the range(optional)', 'stop: End of the range (exclusive)', 'step: Increment between values(optional)']
`date_diff`: Computes the number of specified partition boundaries between two dates (or timestamps)., Parameters: ['part: Specifies the date/timestamp partition', 'startdate: The start date or timestamp', 'enddate: The end date or timestamp']
`lag`: The window function provides the value from a prior row within the same result set partition., Parameters: ['expression: Column or expression to evaluate', 'offset: Number of rows back(optional)', 'default_value: Default value if no offset(optional)']
`year`: Extracts the year component from a date or timestamp value., Parameters: ['date: Date from which to extract year', 'timestamp: Timestamp from which to extract year']
`now`: Obtains the current date and time at the start of the current transaction, using the system's time zone., Parameters: ['None: No parameters required(optional)']
`group_concat`: Concatenates column string values using a specified separator, respecting the provided order., Parameters: ['arg: The column to concatenate', 'sep: Separator between concatenated values(optional)', 'ORDER BY: Specifies order of concatenation(optional)']
`regexp_extract`: If a string matches a given regular expression pattern, it returns the specified capturing group or groups with optional capture group names., Parameters: ['string: Input string to search in.', 'pattern: Regex pattern to match.', 'group: Specifies which group to capture.(optional)', 'name_list: Named capture groups struct.(optional)', 'options: Regex matching options.(optional)']
`upper`: Converts a given string to uppercase characters., Parameters: ['string: String to make uppercase']
`greatest`: Selects the largest value from a list of input values using lexicographical ordering., Parameters: ['x1: The first value to compare', 'x2: The second value to compare', '...: Additional values to compare(optional)', 'xn: Nth value to compare(optional)']
`row`: The function initiates the creation of a row in an appender by signaling the start of adding values for a new row., Parameters: ['appender: Appender to start new row']
`getvariable`: The function retrieves the value of a previously set SQL-level variable, returning NULL if the variable is not defined., Parameters: ['variable_name: The name of the variable']
`quarter`: Extracts the quarter (1 to 4) from a date value., Parameters: ['date: The input date to evaluate.']
`strptime`: Converts a string to a timestamp according to a specified format string, throwing an error on failure., Parameters: ['text: Input string to convert', 'format: String format to parse']
`substring`: Extracts a substring from a given string starting at a specified position and with a specified length., Parameters: ['string: The original string to extract from', 'start: Starting position for extraction', 'length: Number of characters to extract']
`add`: Adds two integer values and returns the sum., Parameters: ['a: First integer to add', 'b: Second integer to add', 'result: Sum of a and b']
`date_part`: Extracts a specified subfield from a timestamp and returns its numeric value, equivalent to the SQL keyword 'extract'., Parameters: ['part: The subfield to extract from the timestamp or timestamptz.', 'timestamp: The input timestamp value to extract the subfield from.', 'interval: Extracts date part from interval.(optional)']
`json_extract`: Extracts JSON from a specified path within a JSON object or array., Parameters: ['json: The JSON object or array.', 'path: Path to extract data from.']
`json_extract_string`: Extracts a string (VARCHAR) value from a JSON object at a specified path, converting JSON data to text if possible., Parameters: ['json: The JSON object to extract from', 'path: The path to the desired value']
`rank`: The rank function assigns a rank to each row within a partition of a result set, allowing for potential gaps in the ranking when there are ties., Parameters: ['order_column: Column or expression for sorting', 'partition_column: Column to partition data by(optional)', 'alias: Alias name for result column(optional)']
`day`: The function extracts the day of the month from a given date., Parameters: ['date: Date value to extract from']
`list`: DuckDB provides an aggregate function that executes an aggregate operation over the elements within a list. This function can be utilized to apply any existing aggregate function, like `min`, `sum`, or `histogram`, across the elements of a list. This allows the aggregation of list data in a flexible manner., Parameters: ['list: List to aggregate values.', "name: Aggregate function's name to apply.", 'value: Optional extra parameters needed.(optional)']
`generate_series`: This function creates a list of values within a specified range where both endpoints are inclusive., Parameters: ['start: Inclusive start of range(optional)', 'stop: Inclusive stop of range', 'step: Difference between successive values(optional)']
`generate_series`: Creates a list of values from start to stop inclusively, with a specified step., Parameters: ['start: Inclusive start of the series(optional)', 'stop: Inclusive end of the series', 'step: Step increment between each value(optional)']
`datediff`: Calculates the number of specified partition boundaries between two dates., Parameters: ['part: Time unit to measure', 'startdate: The starting date', 'enddate: The ending date']
`left`: Extracts left-most characters from a string., Parameters: ['string: String to extract characters from', 'count: Number of left-most characters']
`trim`: Removes specified characters from both sides of a string, or spaces if no characters are specified., Parameters: ['string: The input string to trim', 'characters: Characters to remove from string(optional)']
`array_has_any`: Returns true if any element is present in both input lists., Parameters: ['list1: First list to compare.', 'list2: Second list to compare.']
`datetrunc`: Truncates a date or timestamp to a specified precision part, such as year, month, or day., Parameters: ['part: The precision to truncate to.', 'date: The date to truncate.', 'timestamp: The timestamp to truncate.']
`split_part`: Splits a string by a specified separator and returns the part at a given index., Parameters: ['string: The string to be split', 'separator: The delimiter to split by', 'index: 1-based index to retrieve']
`read_json`: Reads JSON files, inferring schema and format automatically from the data., Parameters: ['filename: Path to JSON file(s).', 'auto_detect: Auto-detect schema from data.(optional)', 'columns: Specified columns and types.(optional)', 'compression: File compression type detected.(optional)', 'format: Format of JSON data.(optional)', 'hive_partitioning: Choose Hive partitioning method.(optional)', 'ignore_errors: Ignore errors during parsing.(optional)', 'maximum_depth: Maximum schema detection depth.(optional)', 'maximum_object_size: Limit JSON object size bytes.(optional)', 'records: Read JSON as records.(optional)', 'sample_size: Sample objects for detection.(optional)', 'timestampformat: Format for parsing timestamps.(optional)', 'union_by_name: Unify multiple file schema types.(optional)']
`read_csv`: Reads CSV files into a DuckDB relation, automatically inferring configurations such as delimiters, headers, and column types unless specified otherwise., Parameters: ['all_varchar: Assume all columns as VARCHAR(optional)', 'allow_quoted_nulls: Allow quoted nulls conversion(optional)', 'auto_detect: Enable auto detection of parameters(optional)', 'auto_type_candidates: Types considered for auto detection(optional)', 'columns: Specify column names and types(optional)', 'compression: File compression type(optional)', 'dateformat: Date format for parsing dates(optional)', 'decimal_separator: Decimal separator of numbers(optional)', 'delimiter: Character separating columns in rows(optional)', 'delim: Character separating columns in rows(optional)', 'escape: String for escaping data chars(optional)', 'filename: Include filename in result(optional)', 'force_not_null: Do not match null string(optional)', 'header: File contains a header line(optional)', 'hive_partitioning: Interpret path as Hive partitioned(optional)', 'ignore_errors: Ignore rows with parsing errors(optional)', 'max_line_size: Maximum line size in bytes(optional)', 'names: Column names as a list(optional)', 'new_line: New line characters in file(optional)', 'normalize_names: Normalize column names(optional)', 'null_padding: Pad remaining columns with nulls(optional)', 'nullstr: String representing null value(optional)', 'parallel: Use parallel CSV reader(optional)', 'quote: Use quoting for data values(optional)', 'sample_size: Number of rows for sampling(optional)', 'sep: Delimiter character between columns(optional)', 'skip: Lines to skip at top(optional)', 'timestampformat: Format for parsing timestamps(optional)', 'types or dtypes: Column types by position/name(optional)', 'union_by_name: Unify schemas by column name(optional)', 'store_rejects: Store errors in reject tables(optional)', 'rejects_scan: Name for rejects scan table(optional)', 'rejects_table: Name for rejects errors table(optional)', 'rejects_limit: Limit faulty records stored(optional)', 'delim: Specifies column delimiter character(optional)']
`today`: Returns the current date at the start of the transaction., Parameters: []
`floor`: Rounds down a numeric value to the nearest integer., Parameters: ['x: Value to be rounded down']
`ends_with`: Checks if a string ends with a specified substring, returning true if it does and false otherwise., Parameters: ['string: The string to check', 'search_string: The ending substring']
`regexp_replace`: Replaces portions of a string matching a regular expression with a specified replacement string. Can replace globally with the 'g' option., Parameters: ['string: The string to search in.', 'pattern: The regular expression to match.', 'replacement: The string to replace with.', 'options: Options to modify behavior.(optional)']
`list_distinct`: Removes duplicates and NULL values from a list., Parameters: ['list: Input list to process']
`abs`: Calculates the absolute value of a given numeric input., Parameters: ['x: Input value for operation']
`len`: Calculates the length of a specified input, returning the number of elements or characters it contains., Parameters: ['input: The input whose length is calculated.', 'length_type: Type of length to compute.(optional)', 'ignore_nulls: Whether to ignore null values.(optional)']
`substr`: Extracts a substring from a string starting at a specified position and continuing for a specified length., Parameters: ['string: The string to extract from', 'start: Starting position of extract', 'length: Number of characters to extract']
`last_value`: Evaluates an expression at the last row of the current window frame., Parameters: ['expr: Expression to evaluate at last row', 'IGNORE NULLS: Skip nulls in evaluation(optional)']
`time_bucket`: Truncates the provided timestamp by the specified interval, allowing for optional offsets or origins to alter the bucketing alignment., Parameters: ['bucket_width: Interval to truncate by', 'timestamptz or date: Timestamp or date value', 'offset: Offset interval for buckets(optional)', 'origin: Origin timestamp for alignment(optional)', 'timezone: Time zone for calculation(optional)']
`read_json_objects`: Reads JSON objects from the given file(s), allowing for various formats and compressed files., Parameters: ['filename: Path to JSON file(s)', 'compression: Type of file compression utilized(optional)', 'format: Format of the JSON data(optional)', 'hive_partitioning: Enable Hive partitioning path(optional)', 'ignore_errors: Ignore JSON parsing errors(optional)', 'maximum_sample_files: Max sampled files for detection(optional)', 'maximum_object_size: Max size of JSON object(optional)', 'filename: Add extra filename column(optional)']
`duckdb_functions`: This table function lists all functions, including macros, within the DuckDB instance providing details such as their type, return type, parameters, and other relevant metadata., Parameters: ['database_name: Database holding this function', 'schema_name: Schema where function resides', 'function_name: SQL name of the function', 'function_type: Kind of function (e.g. scalar)', 'description: Description of this function(optional)', 'return_type: Data type name of return(optional)', "parameters: Function's parameter names(optional)", 'parameter_types: Data type names of parameters(optional)', 'varargs: Data type for variable arguments(optional)', 'macro_definition: SQL expression defining macro(optional)', 'has_side_effects: Indicates if function is pure', 'function_oid: Internal identifier for function']
`histogram`: Produces a map of keys as histogram buckets with corresponding counts based on input values., Parameters: ['arg: Input values to aggregate.']
`md5`: Computes the MD5 hash of a given string and returns it as a VARCHAR., Parameters: ['string: The input string value.']
`format`: Formats a string using specified parameters following the fmt syntax., Parameters: ['format: The format string used.', 'parameters: Values to replace placeholders.(optional)']
`array_length`: Returns the number of elements in a JSON array. If provided, the path specifies a location within the JSON structure where the array's length is determined., Parameters: ['json: The JSON string to evaluate', 'path: The path to the JSON array(optional)']
`duckdb_tables`: Provides metadata about base tables in DuckDB instance., Parameters: ['database_name: Name of the database containing this table.', 'database_oid: Internal identifier of the database.', 'schema_name: Name of the schema containing this table.', 'schema_oid: Internal identifier of the schema.', 'table_name: Name of the base table.', 'table_oid: Internal identifier of the table object.', 'internal: False if user-defined table.', 'temporary: Whether it is a temporary table.', 'has_primary_key: True if table defines PRIMARY KEY.', 'estimated_size: Estimated number of rows in table.', 'column_count: Number of columns in the table.', 'index_count: Number of associated indexes.', 'check_constraint_count: Number of active check constraints.', 'sql: SQL definition for the table.']
`to_json`: Converts a value to JSON format., Parameters: ['any: Value to convert to JSON']
`month`: Returns the month as an integer from a given date or timestamp., Parameters: ['date_or_timestamp: Input date or timestamp value']
`stddev`: Calculates the sample standard deviation of a set of non-null values., Parameters: ['x: Values to calculate deviation']
`first_value`: The function returns the value of the specified expression evaluated at the first row of the window frame., Parameters: ['expr: The expression to evaluate.', 'IGNORE NULLS: Ignore NULL values in frame.(optional)']
`parquet_schema`: The function queries the internal schema of a Parquet file, revealing details such as column names, types, and other metadata., Parameters: []
`string_agg`: Concatenates string values from a column with a specified separator in order, optionally sorted by a criterion., Parameters: ['arg: Column of string values.', 'sep: Separator between concatenated strings.', 'ORDER BY: Optional sorting criteria.(optional)']
`flatten`: Flatten concatenates elements of a list of lists into a single list, flattening one level., Parameters: ['list_of_lists: A list containing lists']
`hash`: Computes a UBIGINT hash value for a given input, useful for operations like joins, grouping or checking data equality across different systems., Parameters: ['value: Input to compute hash from']
`current_date`: Returns the current date at the start of the current transaction., Parameters: ['transaction: Start of current transaction(optional)', 'current: Current session or scope(optional)']
`position`: Locates the position of the first occurrence of "search_string" after position 1 in the provided "string". It returns 0 if "search_string" is not found., Parameters: ['search_string: The substring to find.', 'string: The string to search in.']
`row_to_json`: Converts a STRUCT type into a JSON object format, facilitating the transformation of complex data structures into JSON format for further processing or output., Parameters: ['list: A structure to convert']
`duckdb_columns`: This function provides metadata about columns in the DuckDB instance, including details on data type, default values, etc., Parameters: ['database_name: Name of the database containing column', 'database_oid: Internal database identifier', 'schema_name: Name of schema containing table', 'schema_oid: Internal schema identifier', 'table_name: Name of table containing column', 'table_oid: Internal table object identifier', 'column_name: SQL name of the column', 'column_index: Position of column in table', 'internal: True if column is built-in', 'column_default: Column default value in SQL(optional)', 'is_nullable: True if column accepts NULL', 'data_type: Column datatype name', 'data_type_id: Internal data type identifier', 'character_maximum_length: Always NULL, no length restrictions', 'numeric_precision: Storage precision of column values(optional)', 'numeric_precision_radix: Precision number-base in bits/positions(optional)', 'numeric_scale: Fractional digits for decimal type(optional)', 'comment: User-defined comment on column(optional)']
`contains`: Checks if a map contains a given key and returns true or false., Parameters: ['map: The map to search', 'key: The key to search']
`week`: The function extracts the ISO week number from a date or timestamp, starting with Monday as the first day of the week., Parameters: ['date: Input date to process']
`duckdb_secrets`: Provides metadata about the secrets available in the DuckDB instance., Parameters: ['redact: Controls if sensitive data is redacted.(optional)']
`max_by`: The function finds the row with the maximum value in a specified column and returns a different column's value from that row, allowing for an ordered result based on the specified column., Parameters: ['arg: Value to return from row.', 'val: Column to determine maximum.', 'n: Number of top rows.(optional)']
`alias`: A scalar function alias provides an alternative name for a function to improve readability or conform to conventions. For instance, 'uppercase' could be used to call 'UPPER'., Parameters: ['alias: The alternative function name', 'function_name: The actual function name', 'parameters: Parameters of the function(optional)']
`json_structure`: Returns the structure of a given JSON, defaulting to JSON if types are inconsistent., Parameters: ['json: Input JSON value to process.']
`first`: Returns the first value (null or non-null) from the given column, and is affected by specifying an order using ORDER BY to determine which value is first., Parameters: ['column: Target column to aggregate.', 'ORDER BY (optional): Order used to determine first.(optional)', 'FILTER (optional): Condition to filter rows.(optional)']
`percent_rank`: Calculates the relative rank of a row within its partition as `(rank() - 1) / (total partition rows - 1)`, outputting a value between 0 and 1., Parameters: ['window_specification: Defines row partition and order.(optional)', 'ORDER BY: Specifies the row order.(optional)']
`json_transform`: Transforms a JSON object into a specified nested type structure, enabling efficient extraction and type conversion., Parameters: ['json: The JSON data to transform.', 'structure: Desired structure for transformation.']
`random`: Generates a random floating-point number between 0.0 and 1.0., Parameters: ['none: No parameters are needed.']
`any_value`: This aggregate function returns the first non-null value from a column, particularly useful to obtain any non-null entry when the specific order is not crucial yet needs to handle initial null values., Parameters: ['arg: Input column with values']
`reverse`: Reverses the order of the characters in a given string., Parameters: ['string: The string to reverse']
`list_aggregate`: Executes a specified aggregate function on the elements within a list., Parameters: ['list: The input list to aggregate', 'name: Name of the aggregate function', 'additional_arguments: Arguments passed to aggregate(optional)']
`epoch_ms`: The function converts either a given timestamp to milliseconds since the epoch or milliseconds since the epoch to a timestamp., Parameters: ['ms: Milliseconds since epoch(optional)', 'timestamp: Timestamp to convert to ms(optional)']
`aggregate`: The scalar function for aggregate in DuckDB is designed to create a custom aggregate function. It facilitates aggregation of data over a column in a database and involves setting parameters, return types, and function operations such as state initialization, state updates, and finalization., Parameters: ['aggregate_function: Pointer to aggregate function', 'name: Name of the aggregate function(optional)', 'type: Logical type of parameter(optional)', 'state_size: Size of aggregate state(optional)', 'state_init: Initializes the state(optional)', 'update: Updates the aggregate state(optional)', 'combine: Merges two aggregation states(optional)', 'finalize: Produces final result from state(optional)', 'destroy: Destructs the aggregate state(optional)', 'extra_info: Stores additional information(optional)', 'error: Aggregate function error message(optional)', 'set: Set of aggregate functions(optional)', 'info: Retrieves extra info from info(optional)', 'con: Connection to database(optional)', 'function: Aggregate function to add(optional)', 'out_database: The result database object', 'out_error: Output error on failure(optional)', 'config: Optional configuration details(optional)']
`read_json_objects_auto`: Reads JSON objects from a file or files using automatically detected format settings., Parameters: ['filename: Path to JSON file or files', 'compression: Type for file compression(optional)', 'filename: Include filename in result(optional)', 'format: Format for JSON data(optional)', 'hive_partitioning: Use Hive partitioned paths(optional)', 'ignore_errors: Continue ignoring parse errors(optional)', 'maximum_sample_files: Max files for auto-detection(optional)', 'maximum_object_size: Max bytes per JSON object(optional)']
`duckdb_constraints`: Provides metadata about constraints in the DuckDB instance., Parameters: []
`cos`: Computes the cosine of a given number, returning its trigonometric value., Parameters: ['x: Input number for calculation']
`sin`: Calculates the sine of a given angle expressed in radians., Parameters: ['value: Angle in radians to calculate sine']
`array_transform`: Transforms each element of the input list using a lambda function, returning a new list with the results., Parameters: ['list: The input list to transform', 'lambda: Function to apply to elements']
`datepart`: Extracts specified subfields from a TIMESTAMPTZ and returns them as a struct., Parameters: ['part: Subfield to extract', 'timestamptz: Input timestamp with time zone', '[part, ...]: List of subfields to extract(optional)']
`map`: The function returns an empty map., Parameters: ['(none): No parameters are required']
`least`: Selects the smallest value from a list of inputs., Parameters: ['x1, x2, ...: A list of numeric values.']
`epoch`: Converts a timestamp to seconds since the epoch (1970-01-01)., Parameters: ['timestamp: Timestamp to convert to seconds.']
`nextval`: Retrieves the next value from a specified sequence., Parameters: ['sequence_name: The name of the sequence']
`pragma_storage_info`: The function returns detailed storage information for a specified table, including metrics like compression type and storage chunk details., Parameters: ['table_name: Name of the table.']
`ceil`: Rounds a numeric value upward to the nearest integer., Parameters: ['x: The number to round up']
`list_concat`: Concatenates two lists into one., Parameters: ['list1: The first list to concatenate.', 'list2: The second list to concatenate.']
`median`: Finds the middle value of a dataset, averaging the two middle values for an even-sized array., Parameters: ['x: Values to find middle value']
`uuid`: Generates a random UUID as a string., Parameters: []
`radians`: Converts an angle measured in degrees to an equivalent angle in radians., Parameters: ['x: Angle in degrees to convert.']
`dayname`: Returns the English name of the weekday for a given date or timestamp., Parameters: ['date: A date to extract weekday.', 'timestamp: A timestamp to extract weekday.(optional)']
`embedding`: The function generates text embeddings using OpenAI's models., Parameters: ['my_text_column: Column containing text for embedding', 'model: Model type for embeddings(optional)']
`levenshtein`: Calculates the minimum number of single-character edits required to change one string into another, considering characters of different cases as distinct., Parameters: ['s1: The first string to compare', 's2: The second string to compare']
`acos`: Computes the arccosine of the input value., Parameters: ['x: Input value for arccosine.']
`timezone`: The function retrieves or sets a timestamp within a specified time zone, effectively converting between "local" and UTC times., Parameters: ['text: Specified time zone name or abbreviation', 'timestamp: The date and time to convert', 'timestamptz: Timestamp with time zone to convert']
`duckdb_views`: The function provides metadata about database views, including information on view names, schemas, and definitions., Parameters: []
`json_object`: Creates a JSON object from key-value pairs., Parameters: ['key: Key for the JSON object.', 'value: Value for the JSON object.']
`decode`: Converts a BLOB to a VARCHAR, failing if the BLOB is not valid UTF-8., Parameters: ['blob: The BLOB to convert']
`array_contains`: Checks if a given element exists in a list and returns true if it does., Parameters: ['list: The list to search', 'element: Element to search in list']
`hour`: Extracts the hour component from a given temporal value., Parameters: ['date: The date or timestamp value']
`array_cosine_similarity`: Computes the cosine similarity between two arrays of the same size, with elements that cannot be NULL., Parameters: ['array1: First array of values', 'array2: Second array of values']
`minute`: Extracts the minute part from a timestamp or interval., Parameters: ['timestamp: Extract minute from this timestamp']
`filter`: Constructs a list from elements of the input list for which a lambda function returns true., Parameters: ['list: Input list to be filtered.', 'lambda: Condition for filtering elements.']
`glob`: The function returns filenames located at the specified path using glob pattern syntax., Parameters: ['search_path: Specifies path using glob patterns']
`instr`: Returns the position of the first occurrence of the search string in another string, returning 0 if not found., Parameters: ['string: Input string to search within', 'search_string: String to find in input']
`string_to_array`: Splits a string into an array using the specified separator., Parameters: ['string: The input text to split.', 'separator: Character(s) defining split points.']
`concat_ws`: Concatenates multiple strings together with a specified separator in between each string., Parameters: ['separator: Separator placed between strings.', 'string: Strings to be concatenated together.']
`to_timestamp`: Converts a string into a timestamp using a specified format., Parameters: ['string: Input string to convert', 'format: Date format of the string']
`split`: Splits a string into a list of substrings based on a specified separator., Parameters: ['string: Input string to be split', 'separator: Character or string delimiter']
`power`: Calculates the result of raising a given number to an exponent value., Parameters: ['base: The number to raise', 'exponent: The power to raise by']
`last_day`: Calculates the last day of the month for a given date., Parameters: ['date: Input date to evaluate']
`json_merge_patch`: Merges two JSON documents together, updating the first document with keys and values from the second., Parameters: ['json1: First JSON document to merge', 'json2: Second JSON document to merge']
`lead`: Evaluates the expression at the row offset rows after the current row within the window frame. If there is no such row, a default value is returned., Parameters: ['expr: Expression evaluated on the row', 'offset: Number of rows to offset(optional)', 'default: Value to return if no row(optional)', 'IGNORE NULLS: Ignore nulls when offsetting(optional)']
`struct_pack`: Creates a STRUCT with specified keys and values., Parameters: ['name: Name of the struct entry', 'any: Value of the struct entry']
`array_filter`: Constructs a list from elements of the input list for which a specified condition returns true., Parameters: ['list: Input list to be filtered', 'lambda: Function returning boolean condition']
`list_aggr`: Executes a specified aggregate function on elements of a list., Parameters: ['list: The list of elements.', 'name: Aggregate function name.']
`date_sub`: Calculates the number of complete date part intervals between two date values., Parameters: ['part: type of interval to calculate', 'startdate: starting date for calculation', 'enddate: ending date for calculation']
`lpad`: Pads the input string with a specified character from the left until it reaches a desired length., Parameters: ['string: The input string to modify', 'count: The total length desired', 'character: Character used for padding']
`regexp_split_to_array`: This function splits a string at each occurrence of the regular expression, returning an array of substrings., Parameters: ['string: String to be split into array.', 'regex: Regular expression delimiter pattern.', 'options: Regular expression matching options.(optional)']
`map_from_entries`: Returns a map created from an array of key-value struct entries., Parameters: ['entries: Array of key-value entries.']
`duckdb_schemas`: Provides metadata about available schemas in the DuckDB instance., Parameters: ["oid: Schema object's internal identifier.", 'database_name: Database containing this schema name.', "database_oid: Database's internal identifier.", 'schema_name: SQL name of the schema.', 'internal: True if internal schema.', 'sql: Always NULL.']
`duckdb_settings`: The function provides metadata about current DuckDB settings., Parameters: ['name: Name of the setting', 'value: Current value of the setting', 'description: Description of the setting', "input_type: Logical datatype of setting's value"]
`str_split`: Splits a given string into parts based on a specified separator, returning an array of the split segments., Parameters: ['string: The text input to split.', 'separator: Delimiter to split the string.']
`bar`: Calculates and displays a progress bar during execution of long-running queries., Parameters: ['enable_progress_bar: Enable or disable progress bar(optional)']
`age`: Calculates the age from the birthdate by subtracting the year part and adding one if the birth month and day are ahead in the current year., Parameters: ['birthdate: DATE of birth for calculation']
`query_table`: The function returns a table or the union of tables specified by their names., Parameters: ['tbl_names: Names of tables to use', 'by_name: Union tables by name(optional)']
`duckdb_indexes`: The function provides metadata about secondary indexes, including their names, uniqueness, and associated tables, within a DuckDB instance., Parameters: ['database_name: Name of the database', 'database_oid: Database internal identifier', 'schema_name: SQL name of the schema', 'schema_oid: Schema internal identifier', 'index_name: SQL name of the index', 'index_oid: Object identifier of the index', 'table_name: Name of the table', 'table_oid: Table object internal identifier', 'is_unique: Indicates uniqueness of index', 'is_primary: Always false for secondary', 'expressions: Always null', 'sql: Index SQL definition']
`regr_intercept`: Calculates the intercept of the linear regression line in a dataset, given an independent and dependent variable., Parameters: ['y: Dependent variable data', 'x: Independent variable data']
`regr_slope`: Returns the slope of the linear regression line, where the independent variable is used to calculate its change with the dependent variable., Parameters: ["y: The dependent variable's values.", "x: The independent variable's values."]
`log`: Calculates the natural logarithm of a given input value, providing an essential function for mathematical and statistical computations., Parameters: ['value: The number to compute']
`version`: The documentation provides an overview and details of the versioning scheme for DuckDB extensions. It explains the purpose and significance of version numbers, and categorizes extensions into three stability levels: unstable, pre-release, and stable. Each level's expectations and version format are detailed, helping users understand the maturity and reliability of an extension. Additionally, the document explains how extensions sync with DuckDB's release cycle and how to use nightly builds for early access to features., Parameters: ['path: Path to the database file on disk, or `nullptr` or `:memory:` to open an in-memory database.', 'out_database: The result database object.']
`duckdb_keywords`: Retrieves DuckDB's keywords and reserved words, including their categories., Parameters: []
`list_unique`: Counts the number of unique elements in a list., Parameters: ['list: Input list to count uniqueness.', 'element: Element type within the list.(optional)']
`read_ndjson_objects`: Reads newline-delimited JSON objects from a specified file or set of files., Parameters: ['compression: The compression type for file.(optional)', 'filename: Include filename in result.(optional)', 'format: Specify JSON format to use.(optional)', 'hive_partitioning: Use Hive partitioned path.(optional)', 'ignore_errors: Ignore parse errors if possible.(optional)', 'maximum_sample_files: Max JSON files sampled.(optional)', 'maximum_object_size: Max JSON object size (bytes).(optional)']
`current_setting`: Returns the current value of a specified configuration setting in DuckDB., Parameters: ['setting_name: Name of the configuration setting']
`array_distinct`: Removes duplicates and NULL values from a list, but does not preserve the original order., Parameters: ['list: The list to process.']
`duckdb_databases`: The table function returns a list of databases accessible from the current DuckDB process, including both the startup database and any attached databases., Parameters: []
`list_value`: Creates a list value from a specified logical type and an array of values. This list value can be used within DuckDB for handling columnar data that involves a list or array structure., Parameters: ['type: Logical data type for elements.', 'values: Array of values to list.', 'value_count: Number of values in array.']
`to_base`: Converts an integer to a string representation in a specified base., Parameters: ['value: Integer value to convert', 'base: Base for number conversion']
`list_contains`: Returns true if a specified element is found within the given list., Parameters: ['list: The list to search in', 'element: Element to locate in list']
`from_json`: Transforms JSON into a specified nested structure., Parameters: ['json: The JSON input data.', 'structure: Specifies desired output structure.']
`pi`: The function returns the mathematical constant pi., Parameters: []
`dense_rank`: Ranks the current row without creating gaps in the ranking, counting peer groups all having the same rank., Parameters: ['partition_by_clause: Defines partitioning of result set(optional)', 'order_by_clause: Specifies attributes for ordering', 'frame_clause: Limits window frame range(optional)']
`repeat`: Repeats a given string a specified number of times, creating a new concatenated string as the result., Parameters: ['string: The input string to repeat.', 'count: The number of repetitions wanted.']
`current_schema`: Returns the name of the currently active schema, which defaults to 'main'., Parameters: []
`struct_extract`: This function extracts a specific entry from a STRUCT using either a name or an index., Parameters: ['struct: The struct to extract from', 'entry: The name of the entry(optional)']
`get_current_timestamp`: Returns the current date and time at the start of the current transaction., Parameters: []
`regexp_extract_all`: Splits the input string using the specified regex and retrieves all matches for the specified capturing group., Parameters: ['string: Input string to process', 'regex: Regular expression pattern', 'group: Match group to extract(optional)', 'options: Regular expression options(optional)']
`repeat`: The function generates a table with repeated rows of specified data values for a given number of times., Parameters: ['repeat_row: Values for the repeated rows.', 'num_rows: Number of rows to generate.']
`read_text`: Reads the content of specified files or patterns as a `VARCHAR`, validating for UTF-8 encoding., Parameters: ['source: File path or glob pattern']
`last`: Returns the last value of a column within a group of rows ordered by an expression., Parameters: ['column: The column to evaluate.', 'order by expression: Column or expression for sorting.(optional)', 'partition by expression: Column or expression for partitioning.(optional)', 'frame: Specifies window frame for function.(optional)']
`encode`: Converts a STRING to a BLOB, transforming UTF-8 characters into literal encoding., Parameters: ['string: The input string to encode.']
`dayofweek`: Extracts the numeric representation of the day of the week from a given date, where Sunday is represented as 0 and Saturday as 6., Parameters: ['date: The date to evaluate.']
`enum_range`: Returns all values of the given ENUM type as an array, allowing easy access to the possible values., Parameters: ['enum: Input enum type reference']
`json_extract_path`: Extracts JSON from a JSON object at a specified path, returning a result in JSON format., Parameters: ['json: The source JSON object.', 'path: The JSON path to extract.']
`array_slice`: Extracts a sublist from an array using specified start, end, and optional step values, similar to Python slicing. Handles negative indices., Parameters: ['list: The list to be sliced', 'begin: Index to start slicing from', 'end: Index to stop slicing at', 'step: Step size for slicing(optional)']
`pragma_table_info`: Returns information about the columns in a table including details such as column name, type, nullability, default value, and if it's part of the primary key., Parameters: ['table_name: Name of the target table']
`arg_max`: Finds the values associated with the maximum criterion in a dataset, optionally returning the top-N values in descending order., Parameters: ['arg: Expression to evaluate at max', 'val: Criterion for determining maximum value', 'n: Top n values to return(optional)']
`typeof`: The function returns the data type of the given expression's result., Parameters: ['expression: Expression to determine data type']
`strip_accents`: Removes accents from a string., Parameters: ['string: Input string to process.']
`gen_random_uuid`: Generates and returns a random UUID similar to `eeccb8c5-9943-b2bb-bb5e-222f4e14b687`., Parameters: []
`starts_with`: Checks if a string begins with a specified substring., Parameters: ['string: The string to search in.', 'search_string: The string to search for.']
`damerau_levenshtein`: The function calculates the minimum number of edit operations needed to transform one string into another, allowing insertions, deletions, substitutions, or transpositions of adjacent characters, with case-sensitive comparison., Parameters: ['s1: First string input to compare', 's2: Second string input to compare']
`cardinality`: The cardinality function specifies the number of rows an operator can return to its parent within a query plan., Parameters: ['name: The name of the operator', 'cardinality: Number of rows returned']
`which_secret`: Determines and returns the secret being used based on a file path and secret type., Parameters: ['path: File path to check secret', 'secret_type: Type of the secret service']
`corr`: The correlation coefficient is calculated between two sets of data to measure the strength and direction of a linear relationship between them., Parameters: ['y: First variable for correlation', 'x: Second variable for correlation']
`translate`: Converts characters in a string based on specified mappings from one set of characters to another., Parameters: ['source: Input string to be modified', 'from: Characters to be replaced', 'to: Replacement characters']
`array_unique`: This function counts the unique elements in a list., Parameters: ['list: The list to evaluate']
`json_keys`: Returns the keys of a JSON object as a list of strings. If a path is specified, it returns keys of the JSON object at that path., Parameters: ['json: JSON object to extract keys', 'path: path within the JSON object(optional)']
`list_has_any`: Returns true if any elements exist in both given lists., Parameters: ['list1: First list to compare elements', 'list2: Second list to compare elements']
`map_extract`: Returns a list with the value corresponding to a specified key from the map or an empty list if the key is not present., Parameters: ['map: Input map to search within.', 'key: Key to find in map.']
`try_strptime`: Converts a string into a timestamp using specified format strings, returning NULL on failure., Parameters: ['text: String to be converted', 'format: Format to parse the string']
`array_position`: Returns the index of an element in the list, or NULL if it is not found., Parameters: ['list: The list to search through', 'element: The element to find']
`str_split_regex`: Splits a string into parts based on a specified regular expression pattern., Parameters: ['string: Input string to split', 'regex: Regular expression for splitting']
`to_date`: Converts a string representation of a date into a date object., Parameters: ['date_text: String representation of date', 'format: Date format for parsing']
`strpos`: Returns the location of the first occurrence of a substring within a string, counting from 1. Returns 0 if no match is found., Parameters: ['string: The main string to search.', 'search_string: Substring to search for.']
`dbgen`: The table function generates TPC-H benchmark data according to a specified scale factor., Parameters: ['catalog: Target catalog for data generation(optional)', 'children: Number of partitions for data(optional)', 'overwrite: Unused parameter for overwrite(optional)', 'sf: Scale factor for data size', 'step: Define partition generation step(optional)', 'suffix: Append suffix to table names(optional)']
`string_split`: Splits a given string using a specified separator and returns an array of the resulting substrings., Parameters: ['string: The string to be split', 'separator: Separator to split the string']
`struct_insert`: The function adds new field(s) or value(s) to an existing STRUCT with the given argument values, using bound variable names as entry names., Parameters: ['struct: The initial struct object.', 'name := any, ...: Name-value pairs to add.']
`truncate`: Deletes all rows from a specified table without using a WHERE clause., Parameters: ['table_name: Name of the table.']
`list_sort`: Sorts the elements of a list based on specified ordering and null placement options., Parameters: ['list: The list to be sorted.', 'order: Sort order: ASC or DESC.(optional)', 'null_order: NULL placement: FIRST or LAST.(optional)']
`epoch_ns`: Returns the total number of nanoseconds since the epoch for a given timestamp., Parameters: ['timestamp: The input timestamp to convert']
`sqrt`: Computes the square root of a given numerical value., Parameters: ['x: A number to find the root']
`current_localtimestamp`: Returns a `TIMESTAMP` representing the current local date and time in the GMT timezone as determined by the current time zone settings., Parameters: []
`map_entries`: Returns a list of structs containing key-value pairs from the map., Parameters: ['map: Map containing key-value pairs']
`duckdb_extensions`: Provides metadata about installed and loaded DuckDB extensions, including their name, status, and location., Parameters: ['extension_name: Name of the extension(optional)', 'loaded: Extension is currently loaded(optional)', 'installed: Extension is currently installed(optional)', 'install_path: Path of extension binary(optional)', 'description: Description of the extension(optional)', 'aliases: Alternative names for extension(optional)']
`seq_scan`: Performs a sequential scan on a specified table, returning all the rows without using an index., Parameters: ['table_name: Name of the table to scan.', 'columns: Columns to select from table.(optional)']
`duckdb_dependencies`: Provides metadata on dependencies between objects in the DuckDB instance., Parameters: ['classid: Always zero for this function.(optional)', 'objid: Internal id of the object.', 'objsubid: Always zero for this function.(optional)', 'refclassid: Always zero for this function.(optional)', 'refobjid: Internal id of the dependency.', 'refobjsubid: Always zero for this function.(optional)', 'deptype: Type of dependency (n/a).']
`test_all_types`: Generates a table with columns for various data types, displaying their minimum, maximum, and null values for testing purposes., Parameters: []
`duckdb_memory`: Provides metadata about DuckDB's buffer manager, detailing memory and disk usage for various components., Parameters: []
`stddev_samp`: Calculates the sample standard deviation., Parameters: ['x: Input data column for function']
`ntile`: Divides a dataset into a specified number of equally-sized buckets, assigning each row a bucket number ranging from 1 to the number of buckets., Parameters: ['num_buckets: Number of partitions for data distribution']
`isodow`: Returns the ISO numeric day of the week, where Monday is represented as 1 and Sunday as 7., Parameters: ['date: Date to calculate ISO weekday']
`monthname`: Returns the English name of the month for a given date or timestamp., Parameters: ['date: The date or timestamp input.']
`array_to_json`: Converts a LIST into a JSON array., Parameters: ['list: A list to convert']
`to_base64`: Converts a BLOB (binary large object) to a base64 encoded string., Parameters: ['blob: Input binary large object']
`array_extract`: Extracts a single element from a list based on a specified 1-based index position., Parameters: ['list: The list to extract from', 'index: The 1-based position index']
`map_keys`: Returns a list of all keys in the specified map., Parameters: ['map: The input map to query']
`dayofmonth`: Extracts the day part from a given date, representing the day of the month as an integer., Parameters: ['date: Date value to extract from']
`like_escape`: Returns true if the specified string matches the provided like pattern using case-sensitive matching, where an escape character is used to treat wildcard characters as literals., Parameters: ['string: The input string to match', 'like_specifier: Pattern to match the string', 'escape_character: Character to escape wildcards(optional)']
`weekofyear`: Calculates the ISO Week number from a given date., Parameters: ['date: Input date to evaluate', 'timestamp: Input timestamp to evaluate(optional)']
`dayofyear`: The function retrieves the day of the year from a given date, starting from 1 for January 1st., Parameters: ['date: The date to evaluate.']
`base64`: Converts a blob to a base64 encoded string., Parameters: ['blob: The input binary data.']
`yearweek`: The function returns the ISO year and 2-digit week number combined as a BIGINT in the form YYYYWW., Parameters: ['date: The input date to compute']
`map_values`: Returns a list of all values in a map., Parameters: ['map: The map input parameter']
`rtrim`: Removes specified characters from the right side of a string., Parameters: ['string: The string to trim', 'characters: Characters to remove from end(optional)']
`ltrim`: This function removes specified characters (or spaces by default) from the left side of a string., Parameters: ['string: Input string to process', 'characters: Characters to trim from left(optional)']
`to_days`: Constructs a day interval from an integer representing the number of days., Parameters: ['integer: Number of days as input']
`array_concat`: Concatenates two lists into a single list without modifying the original lists., Parameters: ['list1: The first list to concatenate', 'list2: The second list to concatenate']
`right`: Extracts a specified number of characters from the end of a string., Parameters: ['string: The input string', 'count: Number of characters to extract']
`to_minutes`: Constructs an interval representing a specified number of minutes., Parameters: ['integer: Number of minutes to construct']
`tpch_queries`: The table function returns all 22 predefined TPC-H queries with their respective identifiers., Parameters: []
`json_execute_serialized_sql`: Executes JSON serialized SQL statements and returns the resulting rows. Only one statement is executed at a time., Parameters: ['serialized_sql: JSON serialized SQL statement.']
`dsdgen`: Generates TPC-DS benchmark data based on specified scale factor., Parameters: ['sf: Set scale factor for data', 'overwrite: Overwrite existing data when true(optional)', 'suffix: Set file suffix for output(optional)']
`parquet_kv_metadata`: Returns custom key-value metadata defined in a Parquet file., Parameters: ['file_name: Path to the Parquet file', 'key: Metadata keys in BLOB format(optional)', 'value: Metadata values in BLOB format(optional)']
`pragma_version`: Retrieves the current version of DuckDB., Parameters: []
`listagg`: Concatenates string values from a specified column into a single string with a specified separator, ordered based on optional criteria., Parameters: ['arg: Column to concatenate values from', 'sep: Separator string for concatenation(optional)', 'ORDER BY: Optional ordering criteria for aggregation(optional)']
`decade`: Calculates the decade from a given date using the formula (year / 10)., Parameters: ['date: The input date value']
`list_pack`: Creates a `LIST` containing the provided argument values., Parameters: ['any: Values to include in list']
`hex`: Converts a blob to a VARCHAR using hexadecimal encoding., Parameters: ['blob: Blob to be converted to hex']
`list_slice`: Extracts a sublist from a list based on specified begin, end, and optional step indices, supporting negative values., Parameters: ['list: The list to be sliced', 'begin: Index to start slice from', 'end: Index to end slice at', 'step: Step size between elements(optional)']
`greatest_common_divisor`: Computes the greatest common divisor of two numbers., Parameters: ['x: First integer for GCD calculation', 'y: Second integer for GCD calculation']
`array_aggr`: Executes an aggregate function on the elements of a list., Parameters: ['list: The list of elements.', 'name: Aggregate function to apply.', 'additional_args: Additional arguments for function.(optional)']
`array_reduce`: Reduces elements of a list to a single value using a lambda function starting from the first element., Parameters: ['list: List to be reduced', 'lambda: Function applied to elements']
`regexp_escape`: Escapes special characters in a string to make it suitable for use in a regular expression, similar to Python's `re.escape`., Parameters: ['string: The input string to escape.']
`constant_or_null`: Returns `NULL` if the second argument is `NULL`, otherwise it returns the first argument., Parameters: ['arg1: The value to return.', 'arg2: Conditional check for NULL.']
`json_deserialize_sql`: Deserializes JSON serialized SQL statements back into SQL strings., Parameters: ['json: The JSON object to deserialize']
`datesub`: Calculates the number of complete partitions (e.g., months) between two dates or timestamps based on the specified part., Parameters: ['part: Date part to evaluate', 'startdate: Start date or timestamp', 'enddate: End date or timestamp']
`json_transform_strict`: Transforms JSON according to a specified structure, ensuring type casting is strictly followed and throwing an error on failure., Parameters: ['json: The JSON object to transform.', 'structure: Structure for transformation casting.']
`array_indexof`: Returns the index of the specified element in the list and returns NULL if not found., Parameters: ['list: List to search within', 'element: Element to find in list']
`millisecond`: Extracts the sub-minute millisecond component from a timestamp., Parameters: ['timestamp: Timestamp to extract from.']
`union_tag`: Retrieve the currently selected tag of a union as an Enum value., Parameters: ['union: The union to inspect']
`json_array_length`: Returns the number of elements in a JSON array, or 0 if it's not a JSON array. If a path is specified, it returns the number of elements at that path., Parameters: ['json: The JSON array to evaluate.', 'path: Path in JSON to evaluate.(optional)']
`array_reverse_sort`: Sorts a list in reverse order., Parameters: ['list: The list to sort', 'null_order: Order for null values(optional)']
`list_filter`: The function constructs a list from elements of the input list for which a given lambda function returns true., Parameters: ['list: The input list to filter', 'lambda: Function to test elements']
`rpad`: Pads a string with a specified character from the right until it reaches a certain length., Parameters: ['string: The input string to pad', 'count: Target length of padded string', 'character: Character to use for padding']
`transaction_timestamp`: Returns the current date and time at the start of the current transaction., Parameters: []
`enum_last`: Returns the last value of the input enum type., Parameters: ['enum: The enum type to examine']
`array_dot_product`: Alias for computing the inner product of two arrays., Parameters: ['array1: First array for calculation', 'array2: Second array for calculation']
`list_element`: The function extracts the nth (1-based) value from a list., Parameters: ['list: The list to be indexed', 'index: Position to extract element from']
`isfinite`: Checks if a floating point value is finite, returning true for finite numbers and false for infinity or NaN values., Parameters: ['x: The value to be checked.']
`to_milliseconds`: Constructs an interval of milliseconds based on the provided integer value., Parameters: ['integer: Number of milliseconds interval']
`regexp_full_match`: The function checks if the entire string matches the given regular expression and returns `true` if it does., Parameters: ['string: The input string to match', 'regex: The regular expression pattern', 'options: Options for regex; controls behavior(optional)']
`map_contains`: Determines if a map has a specified key., Parameters: ['map: The map to check.', 'key: The key to search.']
`to_centuries`: Constructs an interval representing a duration of centuries based on the integer input., Parameters: ['integer: Number of centuries to construct']
`epoch_us`: Converts a timestamp to the total number of microseconds since the epoch., Parameters: ['timestamp: Timestamp to convert to microseconds', 'time zone (for Timestamptz): Time zone for this timestamp(optional)']
`to_years`: Constructs an interval of years from an integer value., Parameters: ['integer: Number of years to construct']
`array_inner_product`: Computes the inner product between two non-null arrays of the same size., Parameters: ['array1: First array for computation', 'array2: Second array for computation']
`currval`: Returns the current value of a specified sequence after it has been incremented at least once via the `nextval` function., Parameters: ['sequence_name: Name of the sequence.']
`list_extract`: Extracts the nth element from a list, indexing from 1., Parameters: ['list: The list to extract from', 'index: The element position to retrieve']
`enum_range_boundary`: Returns an array representing the range between two enum values, allowing nulls to extend the range to the enum's boundaries., Parameters: ['enum1: Start value of the range.(optional)', 'enum2: End value of the range.(optional)']
`signbit`: Determines if the sign bit of a number is set, indicating a negative value., Parameters: ['x: Value to check sign bit']
`array_cross_product`: Computes the cross product of two non-NULL arrays, each containing exactly three elements., Parameters: ['array1: First array, non-NULL, three elements', 'array2: Second array, non-NULL, three elements']
`bitstring`: The function creates a zero-padded bitstring of a specified length based on the input bitstring., Parameters: ['bitstring: Input bitstring to be padded', 'length: Desired length of bitstring']
`length_grapheme`: Calculates the number of grapheme clusters in a given string, which may differ from the number of characters if the string includes combined emojis or accented characters., Parameters: ['string: Input string for processing']
`apply`: Applies a given lambda function to each element of a list, resulting in a transformed list., Parameters: ['list: A list of elements to transform.', 'lambda: The transformation function.', 'index: Optional parameter for index.(optional)']
`sign`: Computes the sign of a number, returning -1 for negative numbers, 0 for zero, and 1 for positive numbers., Parameters: ['value: Value to find sign of']
`array_aggregate`: Executes an aggregate function on list elements., Parameters: ['list: List of elements to aggregate.', 'name: Name of the aggregate function.']
`md5_number`: Computes the MD5 hash of a string, returning it as a HUGEINT., Parameters: ['string: Input string for hashing']
`error`: Sets an error message for a scalar function during its execution, indicating a failure in processing due to some condition., Parameters: ['info: Information about the function.', 'error: Error message to set.']
`parse_filename`: This function returns the last component of a file path, similar to `os.path.basename` in Python. It can optionally remove the file extension from the component name., Parameters: ['path: The file path to parse.', 'trim_extension: Remove file extension if true.(optional)', 'separator: Type of path separator used.(optional)']
`json_extract_path_text`: Extracts a VARCHAR string from a JSON object at a specified path., Parameters: ['json: The JSON object to query.', 'path: The path in the JSON.']
`nanosecond`: The function converts a timestamp to nanoseconds since the epoch., Parameters: ['timestamp: The input timestamp to convert']
`ucase`: Converts a given string to upper case., Parameters: ['string: The string to convert.']
`isoyear`: Extracts the ISO year number from a date, where the ISO year starts on the Monday of the week containing January 4th., Parameters: ['date: Date to extract ISO year']
`array_grade_up`: Returns the indexes corresponding to the positions in the original list, similar to sorting but for indices., Parameters: ['list: Input list to process']
`parse_dirname`: Extracts and returns the top-level directory name from a given path string, based on the specified path separator type., Parameters: ['path: The path input as string', 'separator: Separator type for the path(optional)']
`enum_first`: Returns the first value of the input enum type., Parameters: ['enum: An enumerated data type.']
`to_decades`: Constructs a decade interval from an integer value representing decades., Parameters: ['integer: Number of decades to construct']
`json_value`: Extracts a JSON scalar value from the specified path in the JSON object, returning NULL if the target is not a scalar., Parameters: ['json: The JSON object to query', 'path: The path to extract value']
`weekday`: Returns a numeric representation of the weekday, where Sunday is 0 and Saturday is 6., Parameters: ['date: The date to evaluate.']
`list_cosine_similarity`: Computes cosine similarity between two lists., Parameters: ['list1: First input list of numbers', 'list2: Second input list of numbers']
`array_apply`: Applies a lambda expression to each element in a list, returning a new list with the transformed elements., Parameters: ['list: The input list to process', 'lambda: Function applied to elements']
`jaccard`: Calculates the Jaccard similarity between two strings, considering characters of different cases as different and returning a similarity score between 0 and 1., Parameters: ['s1: The first input string', 's2: The second input string']
`gcd`: Calculates the largest integer that divides two numbers without leaving a remainder., Parameters: ['x: First number for calculation', 'y: Second number for calculation']
`millennium`: Extracts the millennium part from a date., Parameters: ['date: The date to evaluate']
`json_serialize_sql`: Converts SQL SELECT statements into a JSON format, handling multiple statements and formatting options., Parameters: ['statements: Semicolon-separated SQL SELECT statements.', 'skip_empty: Skip fields that are empty.(optional)', 'skip_null: Skip fields that are null.(optional)', 'format: Format output for readability.(optional)']
`grade_up`: The function returns the positions of elements in an ascending order from the original list, representing their index in the sorted sequence., Parameters: ['list: The input list for sorting']
`cot`: Computes the cotangent of a given number `x`., Parameters: ['x: The input angle in radians']
`array_sort`: Sorts the elements of a given list in ascending order by default, allowing optional configurations for sort order and NULL handling., Parameters: ['list: Elements to be sorted', "sort_order: Order: 'ASC' or 'DESC'(optional)", "nulls_order: 'NULLS FIRST' or 'LAST'(optional)"]
`parse_path`: Returns a list of the components (directories and filename) in a path., Parameters: ['path: The file path to parse', 'separator: Separator for parsing path(optional)']
`suffix`: Appends the specified suffix to the names of the TPC-H tables generated by the data generator function `dbgen`., Parameters: ['catalog: Target catalog for dbgen.(optional)', 'children: Number of partitions for generation.(optional)', 'overwrite: Not used currently.(optional)', 'sf: Scale factor for data generation.', 'step: Defines partition to generate.(optional)', 'suffix: Append suffix to table names.(optional)']
`array_has`: Checks if a list contains a specific element, returning true if the element exists., Parameters: ['list: The list to search in.', 'element: The element to search for.']
`array_cosine_distance`: Computes the cosine distance between two arrays of the same size, where the elements cannot be NULL., Parameters: ['array1: First input array', 'array2: Second input array']
`timezone_hour`: Extracts the hour portion of the time zone offset from a given temporal value., Parameters: ['value: Temporal input value to process']
`not_like_escape`: The function checks if a string doesn't match a given pattern using case-insensitive matching, with an escape character to treat wildcards as regular characters., Parameters: ['string: Input text to be checked.', 'like_specifier: Pattern to be matched against.', 'escape_character: Char used to escape wildcards.']
`make_time`: The function creates a time using specified hour, minute, and second values., Parameters: ['hour: Hour of the time', 'minute: Minute of the time', 'second: Second and fraction of time']
`degrees`: Converts a given angle in radians to its equivalent in degrees., Parameters: ['x: Angle in radians to convert.']
`array_value`: Creates an ARRAY containing the given argument values., Parameters: ['expr: Values for the ARRAY.']
`atan2`: Computes the arctangent based on the coordinates (y, x) and returns the angle in radians., Parameters: ['y: Numerator for the arctangent', 'x: Denominator for the arctangent']
`parse_dirpath`: The function returns the head of a file path, which is the pathname until the last slash, similar to Python's os.path.dirname function., Parameters: ['path: The path to process.', 'separator: Separators for path components.(optional)']
`from_json_strict`: Transforms a JSON string into a specified nested structure and throws an error if type casting fails., Parameters: ['json: The JSON to transform', 'structure: Specifies the desired structure']
`bit_count`: Returns the number of bits that are set in the given input., Parameters: ['bitstring: The bitstring to evaluate.', 'x: The integer to evaluate.', 'x: The integer to evaluate.']
`ilike_escape`: Performs case-insensitive pattern matching on a string, allowing search for wildcard characters using a defined escape character., Parameters: ['string: The input string to match', 'like_specifier: The pattern to match', 'escape_character: Character for escaping wildcards']
`vector_type`: Generates a table with columns containing values for specified types and an optional argument to affect vector representation., Parameters: ['col1, ..., coln: Types of the columns', "all_flat: Affects vector's internal representation(optional)"]
`format_bytes`: Converts bytes into a human-readable format using binary units such as KiB, MiB, GiB, etc., Parameters: ['bytes: Number of bytes to convert']
`timezone_minute`: Extracts the minute portion of the time zone offset from a date or timestamp., Parameters: ['date: Date or timestamp value input', 'timestamp: Date or timestamp value input(optional)']
`prefix`: The function finds secrets with a specified prefix and returns their matching ones based on the longest prefix rule., Parameters: ['path: File path to match secret', 'type: Service secret type (e.g., S3)']
`list_cosine_distance`: Computes the cosine distance between two equal-length lists, equivalent to `1.0 - list_cosine_similarity`., Parameters: ['list1: First input list of numbers', 'list2: Second input list of numbers']
`to_millennia`: Constructs an interval representing the specified number of millennia., Parameters: ['integer: Number of millennia to construct']
`bin`: Converts an integer into its binary representation as a string., Parameters: ['value: The integer to be converted']
`list_grade_up`: Returns the indexes in sorted order based on the input list values, instead of the values themselves., Parameters: ['list: List to be sorted']
`microsecond`: The microsecond function extracts the sub-minute microsecond portion from a temporal type, such as a timestamp, returning the number of microseconds past the second., Parameters: ['date: The temporal input value.']
`list_negative_inner_product`: Computes the negative dot product of two same-sized lists of numbers, equivalent to `- list_dot_product`., Parameters: ['list1: First list of numbers', 'list2: Second list of numbers']
`century`: The century function extracts the century information from a given date., Parameters: ['date_or_timestamp: Temporal value to extract century']
`get_current_time`: This function returns the current time at the start of the current transaction., Parameters: []
`jaro_winkler_similarity`: Measures the similarity between two strings using the Jaro-Winkler method, returning a similarity score between 0 and 1, with characters of different cases treated as different., Parameters: ['s1: First string for comparison', 's2: Second string for comparison']
`list_has_all`: Checks if all elements in a sublist exist in a given list., Parameters: ['list: The list to search within', 'sub-list: The list to check for']
`asin`: Computes the arcsine of a number., Parameters: ['x: The input value.']
`json_exists`: Returns `true` if a specified path exists in a given JSON object, otherwise returns `false`., Parameters: ['json: JSON object to search', 'path: Path to check within JSON']
`from_base64`: Converts a base64 encoded string to its original character string representation., Parameters: ['string: base64 encoded input string']
`string_split_regex`: Splits a string into an array based on a regular expression delimiter., Parameters: ['string: Input string to be split.', 'regex: Delimiter expression for splitting.']
`multiply`: Performs multiplication on two numeric inputs, returning the product., Parameters: ['x: First input to multiply', 'y: Second input to multiply']
`list_transform`: Transforms each element of a list using a specified lambda function and returns the resulting list., Parameters: ['list: The input list of elements', 'lambda: Function applied to elements']
`list_resize`: Resizes a list to a specified number of elements, initializing new ones with a given value or NULL., Parameters: ['list: The list to resize.', 'size: Number of elements to resize to.', 'value: Value for new elements.(optional)']
`pow`: Computes one number raised to the power of another., Parameters: ['x: Base number to be raised', 'y: Exponent to apply to base']
`gamma`: Interpolates factorial of input minus one, allowing fractional inputs., Parameters: ['x: Input value for computation']
`to_hours`: Constructs an hour interval based on an integer input., Parameters: ['integer: Number of hours to construct']
`divide`: Performs integer division of two numbers., Parameters: ['x: dividend for the division', 'y: divisor for the division']
`array_resize`: Resizes a list to a specified size, filling added slots with a given value or NULL by default., Parameters: ['list: The list to resize.', 'size: Desired size of the list.', 'value: Fill value for added slots.(optional)']
`array_cat`: Concatenates two lists into one., Parameters: ['list1: First list to concatenate', 'list2: Second list to concatenate']
`list_indexof`: Returns the index of an element within a list or NULL if not found., Parameters: ['list: The list to search in', 'element: The element to find']
`combine`: This function is used to combine intermediate state from multiple groups in a batch, forming a result for a scalar aggregation function., Parameters: ['duckdb_aggregate_function: Represents an aggregate function object.', 'state: Current state being processed.', 'state_pointers: Array of state pointers.', 'count: Number of state pointers.']
`not_ilike_escape`: Determines if a string does not match a specified pattern using case-sensitive matching, allowing an escape character to define wildcards., Parameters: ['string: The source string to check.', 'like_specifier: The pattern for matching.', 'escape_character: Character to escape wildcards.(optional)']
`current_schemas`: Returns a list of schemas, optionally including implicit schemas when true is passed as a parameter., Parameters: ['include_implicit: Include implicit schemas when true']
`list_distance`: Calculates the Euclidean distance between two lists of coordinates with equal length., Parameters: ['list1: First list of coordinates.', 'list2: Second list of coordinates.']
`list_apply`: Returns a list from applying a lambda to each list element., Parameters: ['list: The input list to transform', 'lambda: Function to apply to elements']
`list_inner_product`: Computes the dot product of two same-sized lists of numbers., Parameters: ['list1: First list of numbers', 'list2: Second list of numbers']
`atan`: Computes the arctangent of a given numeric input., Parameters: ['x: Value for arctangent computation']
`array_negative_inner_product`: Computes the negative inner product of two arrays of the same size and containing non-NULL elements., Parameters: ['array1: First input array of numbers.', 'array2: Second input array of numbers.']
`mod`: Performs a modulo operation to return the remainder of one numeric expression divided by another., Parameters: ['dividend: The number being divided.', 'divisor: The number to divide by.']
`list_position`: Returns the index of an element in a list or NULL if the element is not found., Parameters: ['list: The list to search in', 'element: Element to find index of']
`array_has_all`: Checks if all elements of a sublist are present in a main list., Parameters: ['list: The main list to check', 'sub-list: The sublist elements checked']
`list_zip`: Combines multiple lists into a single list of structs, matching elements by position, with optional truncation., Parameters: ['list_1: First list to zip', 'list_2: Second list to zip', '...: Additional lists to zip(optional)', 'truncate: Truncate to smallest list length(optional)']
`list_has`: Returns true if the list contains the specified element., Parameters: ['list: The list to search in', 'element: An element to find']
`ord`: It returns the ASCII value of the leftmost character of a string., Parameters: ['string_expression: The string to evaluate']
`to_microseconds`: Constructs an interval representing a specified number of microseconds., Parameters: ['integer: Number of microseconds to convert']
`mismatches`: Calculates the number of positions with different characters between two strings of equal length., Parameters: ['s1: First input string to compare.', 's2: Second input string to compare.']
`make_timestamp`: The function constructs a timestamp from individual parts, including year, month, day, hour, minute, and second., Parameters: ['year: Year component', 'month: Month component', 'day: Day component', 'hour: Hour component', 'minute: Minute component', 'second: Second component']
`ascii`: Returns the Unicode code point of the first character of a given string., Parameters: ['string: Input string for conversion.']
`log10`: Computes the base-10 logarithm of a number., Parameters: ['x: Number to compute log base 10']
`json_contains`: Returns true if a specified JSON value or structure is contained within another JSON object or array., Parameters: ['json_haystack: The JSON object or array', 'json_needle: The value to find']
`list_select`: Returns a list using specified indices., Parameters: ['value_list: The list of values.', 'index_list: Indices of selected elements.']
`enum_code`: Returns the numeric value associated with a specific ENUM value, providing its backing integer representation., Parameters: ['enum_value: The ENUM value to process']
`ln`: Computes the natural logarithm of a given number., Parameters: ['x: Number to compute the logarithm']
`printf`: The function formats a string using the printf syntax., Parameters: ['format: String format specifying placeholders.', 'parameters: Values to replace format specifiers.(optional)']
`octet_length`: Calculates the number of bytes in the binary representation., Parameters: ['blob: A binary large object']
`json_quote`: Creates a JSON representation from any type of value, interpreting LISTs as JSON arrays and STRUCTs or MAPs as JSON objects., Parameters: ['any: Value to convert to JSON']
`isnan`: Checks if the floating-point value is not a number and returns true if so, false otherwise., Parameters: ['x: Value to check if NaN']
`editdist3`: Calculates the minimum number of single-character edits (insertions, deletions, or substitutions) needed to change one string into another. It's case-sensitive and treats characters of different cases as distinct., Parameters: ['s1: The first input string', 's2: The second input string']
`set_bit`: Sets a specific bit at a given index in a bitstring to a new value, returning a new bitstring., Parameters: ['bitstring: The input bitstring value.', 'index: Position to set the bit.', 'new_value: New bit value to set.']
`to_weeks`: Constructs a week interval based on the given number of weeks., Parameters: ['integer: Number of weeks to convert']
`array_select`: Returns a list based on elements selected by indices from the index list., Parameters: ['value_list: The list of values.', 'index_list: List of indices to select.']
`lcase`: Converts a string to lower case., Parameters: ['string: The string to convert.']
`cbrt`: Calculates the cube root of a given number., Parameters: ['x: The number to cube root']
`element_at`: The function retrieves the value for a given key from a map, returning a list with the value or an empty list if the key is absent., Parameters: ['map: The map from which to retrieve', 'key: Key to retrieve value for']
`list_reduce`: Reduces elements of a list into a single value using a lambda function applied sequentially from the first element., Parameters: ['list: Input list of elements', 'lambda: Function applied to elements']
`json_array`: Creates a JSON array from one or more values., Parameters: ['value1: First value for JSON array', 'value2: Additional values for JSON array(optional)', '...: Additional values for JSON array(optional)']
`isinf`: This function checks if a floating point number is infinite and returns true or false accordingly., Parameters: ['x: Value to check for infinity']
`factorial`: Computes the product of an integer and all positive integers below it., Parameters: ['x: The integer to compute factorial']
`make_date`: Constructs a date from the specified year, month, and day components., Parameters: ['year: The value of the year.', 'month: The value of the month.', 'day: The value of the day.']
`log2`: Computes the logarithm of a number to base 2., Parameters: ['x: Number to compute logarithm.']
`ceiling`: Rounds a given number up to the nearest integer., Parameters: ['x: The input number to round']
`setseed`: Sets the seed for the random function., Parameters: ['x: Seed value for randomness']
`bit_position`: Returns the first starting index of a given substring within a bitstring, indexed from 1, or zero if the substring isn't present., Parameters: ['substring: Substring to search for', 'bitstring: Bitstring to be searched']
`even`: Rounds a numeric value to the nearest even integer by rounding away from zero., Parameters: ['x: The numeric value to round']
`least_common_multiple`: Computes the least common multiple of two numbers., Parameters: ['x: First number for LCM computation', 'y: Second number for LCM computation']
`stats`: This function provides statistics about a given expression, including minimum and maximum values, and null presence., Parameters: ['expression: The expression to evaluate']
`icu_sort_key`: Generates a surrogate key for sorting characters according to locale., Parameters: ['string: Characters to sort by locale', 'collator: Locale specifier for sorting(optional)']
`array_distance`: Computes the Euclidean distance between two arrays of equal size, which cannot contain NULL values., Parameters: ['array1: First array of floats', 'array2: Second array of floats']
`hamming`: Calculates the number of differing positions between two equally long strings, considering case sensitivity., Parameters: ['s1: First string to compare', 's2: Second string to compare']
`second`: Extracts the seconds part from a timestamp or an interval., Parameters: ['input: The timestamp or interval value']
`to_months`: Constructs a month interval from an integer value., Parameters: ['integer: Number of months to construct']
`left_grapheme`: This function extracts a specified number of grapheme clusters from the beginning of a string., Parameters: ['string: Input string to extract from', 'count: Number of graphemes to extract']
`substring_grapheme`: Extracts a substring composed of a specified number of grapheme clusters starting from a given position., Parameters: ['string: The input string to operate on.', 'start: Starting position of extraction.', 'length: Number of grapheme clusters to extract.']
`jaro_similarity`: Calculates the Jaro similarity between two strings, returning a value between 0 and 1 that indicates how similar the strings are. Characters of different cases are considered different., Parameters: ['s1: First input string', 's2: Second input string']
`json_type`: Returns the type of a JSON element or a specified path within a JSON object., Parameters: ['json: The JSON data input', 'path: Path within the JSON(optional)']
`json_valid`: Checks if the input is valid JSON, returning `true` if it is valid and `false` otherwise., Parameters: ['json: The string to validate as JSON.']
`lgamma`: Computes the logarithm of the Gamma function, which is useful for situations where you need to handle large scale factorials and avoid overflow issues by using their logarithm instead., Parameters: ['x: Input number for computation']
`array_where`: Applies a Boolean mask to a list, returning only the elements that correspond to true values in the mask., Parameters: ['value_list: The list to be filtered.', 'mask_list: The Boolean mask list.']
`list_reverse_sort`: Sorts the elements of a list in reverse order., Parameters: ['list: The list to be sorted', 'null_order: Order for NULL values(optional)']
`unicode`: Returns the Unicode code of the first character of a given string, or -1 if the string is empty. Returns NULL if the input is NULL., Parameters: ['string: Input string to analyze']
`get_bit`: Extracts the nth bit from a bitstring, with the first (leftmost) bit indexed at 0., Parameters: ['bitstring: The bitstring to examine.', 'index: Zero-based bit index.']
`right_grapheme`: Extracts the right-most specified number of grapheme clusters from a given string., Parameters: ['string: Input string to extract from', 'count: Number of graphemes to extract']
`lcm`: Computes the least common multiple of two numeric values., Parameters: ['x: First number for LCM computation', 'y: Second number for LCM computation']
`list_where`: Applies a boolean mask to a list to filter elements based on the mask's true values., Parameters: ['value_list: List to mask elements from', 'mask_list: Boolean mask for value_list']
`sha256`: Computes the SHA-256 hash of a given value and returns it as a VARCHAR., Parameters: ['value: Value to hash with SHA-256.']
`era`: The scalar function calculates the difference in years between two date or timestamp values, effectively returning the number of whole years between the given dates., Parameters: ['start_date: The start date/timestamp value', 'end_date: The end date/timestamp value']
`strlen`: The function returns the number of bytes in a given string., Parameters: ['string: The input string to measure']
`to_seconds`: Converts an integer into a second interval., Parameters: ['integer: Number of seconds to construct']
`array_zip`: Combines multiple lists into one, creating a list of structs based on elements from each input list. Missing values are replaced with NULL when lists have different lengths., Parameters: ['list1: First list to combine.', 'list2: Second list to combine.', '...: Additional lists to combine.(optional)', 'truncate: Indicates whether to truncate.(optional)']
`list_negative_dot_product`: Computes the negative dot product of two same-sized lists of numbers., Parameters: ['list1: First list of numbers', 'list2: Second list of numbers']
`tan`: Computes the tangent of a given angle., Parameters: ['x: Angle for tangent calculation']
`bit_length`: Calculates the total number of bits in a bitstring value., Parameters: ['bitstring: The input bitstring value.']
`list_cat`: Concatenates two lists into a single list., Parameters: ['list1: First list to concatenate', 'list2: Second list to concatenate']
`union_extract`: Extracts the value with the specified tag from a union; returns NULL if the tag is not currently selected., Parameters: ['union: The union object to extract from.', 'tag: The tag value to extract.']
`union_value`: Creates a "UNION" holding a single value, tagged by the parameter name., Parameters: ['tag: The name for the tagged value.', 'expr: The value to be tagged.']
`make_timestamptz`: Creates a TIMESTAMP WITH TIME ZONE based on specified date-time components and, optionally, a time zone., Parameters: ['year: Year component of date', 'month: Month component of date', 'day: Day component of date', 'hour: Hour component of time', 'minute: Minute component of time', 'second: Second component of time', 'timezone: Time zone of timestamp(optional)']
`nfc_normalize`: Converts a string into its Unicode Normalization Form C (NFC), which is useful for string comparisons and ordering when dealing with mixed normalization forms., Parameters: ['string: The string to normalize']
`txid_current`: Returns the current transaction's identifier, a BIGINT value, creating a new one if necessary., Parameters: []
`nextafter`: Returns the next floating point value after one number in the direction of another., Parameters: ['x: Starting floating point number.', 'y: Direction towards this number.']
`subtract`: Subtracts two values, resulting in their difference., Parameters: ['x: The first numerical operand', 'y: The second numerical operand']
`chr`: Converts an ASCII code value into its corresponding character., Parameters: ['x: ASCII code value to convert']
`array_negative_dot_product`: Computes the negative inner product of two arrays of the same size and whose elements cannot be NULL., Parameters: ['array1: First array for computation.', 'array2: Second array for computation.']
`list_dot_product`: Computes the dot product of two lists of numbers of the same size., Parameters: ['list1: First list of numbers.', 'list2: Second list of numbers.']
`current_localtime`: Returns the current local time in the time zone setting of the database., Parameters: []
`xor`: Performs a bitwise exclusive OR operation between two bitstring values., Parameters: ['x: First bitstring to be XORed', 'y: Second bitstring to be XORed']
`reduce`: The function applies a lambda expression to each element of a list to produce a single cumulative result., Parameters: ['list: The input list of elements', 'lambda: Function applied to elements']
`finalize`: Finalizes the execution of a prepared statement, ensuring that any allocated resources are released., Parameters: ['sql: SQL statement to finalize(optional)', 'params: Parameters for the SQL statement(optional)', 'callback: Function called upon completion(optional)']
`exp`: Computes the exponential of a given input number, which is denoted as 'e' raised to the power of the input number., Parameters: ['input_number: Number to calculate the exponential']
`read_ndjson_auto`: The function reads newline-delimited JSON (NDJSON) files and automatically infers JSON schema and types., Parameters: ['filename: File or list of files', 'auto_detect: Auto-detect key names and types(optional)', 'columns: Specifies key names and types(optional)', 'compression: File compression type detection(optional)', 'convert_strings_to_integers: Convert strings to numerical types(optional)', 'dateformat: Date parsing format specification(optional)', 'filename: Include extra filename column(optional)', 'format: Format of JSON to read(optional)', 'hive_partitioning: Interpret as Hive partitioned(optional)', 'ignore_errors: Ignore parse errors in files(optional)', 'maximum_depth: Max depth for schema detection(optional)', 'maximum_object_size: Max size of JSON object(optional)', 'records: Whether JSON contains records(optional)', 'sample_size: Sample objects for type detection(optional)', 'timestampformat: Parsing format for timestamps(optional)', 'union_by_name: Unify schemas of files(optional)']
`arrow_scan`: The "arrow_scan" table function allows DuckDB to query data directly from an Arrow dataset. Users can provide a connection to the database and the Arrow stream containing the data, and DuckDB will interface with the Arrow stream to perform SQL queries., Parameters: ['connection: The database connection to use', 'table_name: Name for the Arrow table', 'arrow: The Arrow stream with data']
`parquet_metadata`: Queries the metadata of a Parquet file, providing details about row groups, columns, and basic statistics., Parameters: ['file_name: Name of the Parquet file.', 'row_group_id: ID of each row group.', 'row_group_num_rows: Number of rows in group.', 'row_group_num_columns: Columns present in row group.', 'row_group_bytes: Size in bytes of group.', 'column_id: ID of each column.', 'file_offset: Offset position in file.', 'num_values: Number of values in column.', 'path_in_schema: Column path in schema.', 'type: Data type of column.', 'stats_min: Minimum value statistic.', 'stats_max: Maximum value statistic.', 'stats_null_count: Count of null values.', 'stats_distinct_count: Count of distinct values.', 'stats_min_value: Actual minimum value found.', 'stats_max_value: Actual maximum value found.', 'compression: Compression algorithm used.', 'encodings: Encodings applied to column.', 'index_page_offset: Offset to index page.', 'dictionary_page_offset: Offset to dictionary page.', 'data_page_offset: Offset to data page.', 'total_compressed_size: Size after compression.', 'total_uncompressed_size: Size before compression.', 'key_value_metadata: Custom key-value metadata pairs.']
`parquet_file_metadata`: Queries file-level metadata of Parquet files, including format version and encryption details., Parameters: ['file_name: Path to the Parquet file', 'created_by: Creator of the Parquet file(optional)', 'num_rows: Number of rows in file', 'num_row_groups: Number of row groups', 'format_version: Format version used', 'encryption_algorithm: Encryption algorithm used(optional)', 'footer_signing_key_metadata: Metadata of signing key(optional)', 'format_version: Format version of file(optional)']
`sniff_csv`: The function identifies CSV properties from a file, returning details such as delimiters, quoting rules, and column types., Parameters: ['filename: Path to the CSV file.', 'sample_size: Rows considered for detection.(optional)']
`duckdb_types`: The function provides metadata about data types available in a DuckDB instance, including type name, type size, and logical type information., Parameters: ['database_name: Database containing the type', 'database_oid: Internal ID of the database', 'schema_name: Schema containing the type', 'schema_oid: Internal ID of the schema', 'type_name: Name or alias of the type', 'type_oid: Internal ID of the type(optional)', 'type_size: Bytes required to represent', 'logical_type: Canonical name of the type', 'type_category: Category of the data type', 'internal: Whether type is built-in']
`index_scan`: Performs an index scan on a specified table and column, returning the row IDs that match the scan condition., Parameters: ['index_name: Name of the index to scan', 'scan_condition: Condition determining rows for scan(optional)']
`repeat_row`: Generates a table with multiple rows, each containing specified fields., Parameters: ['varargs: Fields for each table row', 'num_rows: Number of rows to generate']
`read_ndjson`: Reads newline-delimited JSON (NDJSON) directly, interpreting each line as a separate JSON object., Parameters: ['compression: The compression type for the file(optional)', 'filename: Include extra filename column(optional)', 'format: Specifies JSON read format(optional)', 'hive_partitioning: Interpret path as Hive partitioned(optional)', 'ignore_errors: Ignore parse errors if newline(optional)', 'maximum_sample_files: Maximum JSON files sampled(optional)', 'maximum_object_size: Maximum size of JSON object(optional)']
`checkpoint`: Synchronizes the write-ahead log (WAL) with the database file without interrupting transactions., Parameters: ['database: Name of the database to be checkpointed(optional)']
`duckdb_optimizers`: Returns metadata about DuckDB's optimization rules, which can be selectively disabled for debugging., Parameters: []
`duckdb_temporary_files`: This function provides metadata about the temporary files DuckDB has written to disk, including their path and size., Parameters: []
`force_checkpoint`: Synchronizes the write-ahead log (WAL) with the file of the specified database, interrupting transactions., Parameters: ['database: Target database for checkpoint(optional)']
`pg_timezone_names`: The table function retrieves a list of available time zones and their respective abbreviations and UTC offsets., Parameters: ['name: Time zone full name', 'abbrev: Time zone abbreviation(optional)', 'utc_offset: Time zone UTC offset value(optional)']
`duckdb_variables`: The table function provides metadata about the variables available in the DuckDB instance, including their name, value, and type., Parameters: []
`tpch_answers`: Produces expected results for TPC-H queries for specified scale factors., Parameters: []
`pragma_collations`: Returns a list of all available collation names including both built-in and ICU extension collations., Parameters: []
`test_vector_types`: Generates a table with columns containing values conforming to the types of the input arguments., Parameters: ['coln: Columns with type-conforming values.', 'all_flat: Affects internal vector representation.(optional)']
`read_blob`: Reads content from a specified source as a BLOB, supporting file names, lists, or glob patterns., Parameters: ['source: Specifies the data source.']
`pragma_platform`: Returns an identifier for the platform DuckDB was compiled for., Parameters: []
`icu_calendar_names`: Retrieves and lists available non-Gregorian calendars supported by the ICU extension., Parameters: []
`summary`: Computes aggregates over all columns of a table or query, including min, max, average, and more, and returns these along with column names and types., Parameters: ['table_name: Name of the table to summarize', 'query: SQL query to summarize']
`parquet_scan`: Reads one or more Parquet files as table-like structures, supporting various configurations for file reading and processing., Parameters: ['path_or_list_of_paths: Paths to Parquet file(s)', 'binary_as_string: Load binary columns as strings(optional)', 'encryption_config: Configuration for Parquet encryption(optional)', 'filename: Include filename column result(optional)', 'file_row_number: Include file row number column(optional)', 'hive_partitioning: Interpret as Hive partitioned path(optional)', 'union_by_name: Unify columns of multiple schemas(optional)', 'MD_RUN: Control remote/local query execution(optional)']
`count_star`: The aggregate function calculates the total number of non-NULL rows in a selected column or set.
Cool example: `SELECT count(*) FROM students;`, Parameters: ['expression: Column or expression to evaluate(optional)']
`approx_count_distinct`: Provides an approximate count of distinct elements using HyperLogLog., Parameters: ['x: Input to count distinct elements.', 'accurate_value_count: Accuracy level for the estimation.(optional)', 'debug: Debugging mode for output.(optional)']
`argmax`: Finds the row with the maximum value in a specified column and evaluates another column's expression at that row., Parameters: ['arg: Expression to evaluate at maximum', 'val: Column to find maximum value', 'n: Number of top rows to return(optional)']
`skewness`: Calculates the skewness, measuring asymmetry of a distribution., Parameters: ['x: Data values for skewness calculation']
`regr_sxy`: Calculates the sample covariance with Bessel's bias correction for pairs of non-null values., Parameters: ['y: dependent variable values', 'x: independent variable values']
`entropy`: Calculates the log-2 entropy of a given dataset, measuring information or uncertainty within the data., Parameters: ['x: Data for entropy calculation.']
`regr_syy`: Calculates the sample variance of the dependent variable, including Bessel's bias correction, for non-null pairs where x is the independent variable and y is the dependent variable., Parameters: ['y: Dependent variable data', 'x: Independent variable data']
`argmin`: The function finds the row with the minimum value of a specified column and returns the value of another specified column at that row., Parameters: ['arg: Value to be returned.', 'val: Value to be minimized.', 'n: Number of rows returned.(optional)']
`regr_count`: Returns the number of non-NULL pairs., Parameters: ['y: Dependent variable in pairs', 'x: Independent variable in pairs']
`arbitrary`: Returns the first value (null or non-null) from the input argument, useful when an arbitrary value from a group is needed without specifying an order., Parameters: ['arg: The column or expression to retrieve an arbitrary value from.']
`mean`: Calculates the average of all non-null values in a given column., Parameters: ['arg: Column or expression to average']
`approx_quantile`: This function provides an approximate quantile using the T-Digest algorithm., Parameters: ['x: Dataset column to analyze', 'pos: Quantile position from 0-1']
`kurtosis`: Calculates the excess kurtosis with bias correction according to the sample size, providing a measure of the tailedness of the distribution of data values., Parameters: ['x: Input numeric column or expression']
`quantile_cont`: Calculates the interpolated quantile for a given position within an array of values, resulting in a smooth estimate between elements., Parameters: ['x: Input data to aggregate', 'pos: Position(s) for quantile calculation']
`variance`: Calculates the variance of all non-null input values using Bessel's correction by default., Parameters: ['column: Column to calculate variance on']
`min_by`: Finds the row with the minimum value calculated from a specified expression and computes another expression from that row., Parameters: ['arg: Expression evaluated for each row.', 'val: Value used to order rows.', 'n: Number of top results.(optional)']
`bit_and`: Performs a bitwise AND on all bits in a given expression., Parameters: ['arg: Input expression for bitwise AND']
`var_pop`: Calculates the population variance of a set of values without bias correction., Parameters: ['x: Input values for variance calculation.']
`fsum`: Calculates the sum using a more accurate floating point summation (Kahan Sum) for increased precision in floating point arithmetic., Parameters: ['arg: Argument to be summed accurately']
`regr_r2`: Calculates the squared Pearson correlation coefficient between two variables in a linear regression, indicating the proportion of variance in the dependent variable that can be predicted from the independent variable., Parameters: ['y: Dependent variable in regression', 'x: Independent variable in regression']
`product`: Calculates the product of all non-null values in the specified column or expression., Parameters: ['expr: The values to multiply together.']
`mad`: Calculates the median absolute deviation of a dataset, with temporal types returning a positive `INTERVAL`., Parameters: ['x: Column containing the dataset', 'return_type: Expected return data type(optional)']
`bool_or`: Returns true if any input value is true, otherwise false., Parameters: ['arg: The input values to aggregate']
`regr_avgy`: Calculates the average of the dependent variable for non-NULL pairs, where x is the independent variable and y is the dependent variable., Parameters: ['y: Dependent variable in the function', 'x: Independent variable in the function']
`mode`: The mode function calculates the most frequently occurring value in a set of values., Parameters: ['value_column: Column containing values to analyze']
`reservoir_quantile`: Gives an approximate quantile using reservoir sampling., Parameters: ['x: Values to calculate quantile for.', 'quantile: Quantile position between 0-1.', 'sample_size: Number of samples for estimation.(optional)']
`sumkahan`: Calculates the sum of all non-null values in a column using a more accurate floating point summation to reduce numerical errors., Parameters: ['arg: Values to be summed']
`quantile`: Calculates the interpolated or discrete quantile of a set of values, determining the specific value or range at a given percentage position., Parameters: ['x: Values to aggregate for quantile', 'pos: Quantile position fraction (0-1)', 'method: Method of interpolation (for continous quantile)(optional)']
`bool_and`: Returns `true` if every input value is `true`, otherwise `false`., Parameters: ['arg: A column or expression']
`kurtosis_pop`: Calculates the excess kurtosis of a data set (Fisher’s definition) without bias correction., Parameters: ['x: The input data values']
`regr_sxx`: Calculates the sample variance, with Bessel's bias correction, of the independent variable for non-NULL pairs., Parameters: ['y: Dependent variable values.', 'x: Independent variable values.']
`bitstring_agg`: The function returns a bitstring with bits set for each distinct position defined in the input argument., Parameters: ['arg: List of values for processing', 'min: Minimum range for positions(optional)', 'max: Maximum range for positions(optional)']
`bit_xor`: Performs a bitwise XOR on all bits in a given expression., Parameters: ['arg: Expression of bits to XOR.']
`quantile_disc`: Calculates the discrete quantile of a sorted set of values by selecting the greatest indexed element corresponding to the given position within the set., Parameters: ['x: The value set to quantify', 'pos: The quantile position(s) to return']
`kahan_sum`: Calculates the sum using an accurate floating-point summation technique (Kahan Sum) to minimize errors., Parameters: ['arg: Values to be summed accurately.']
`favg`: Calculates the average using a more accurate floating point summation technique known as Kahan Sum., Parameters: ['arg: Input values for averaging']
`regr_avgx`: Computes the average of the independent variable for non-NULL data pairs., Parameters: ['y: Dependent variable in regression', 'x: Independent variable in regression']
`covar_pop`: Computes the population covariance without bias correction., Parameters: ['y: Dependent variable data', 'x: Independent variable data']
`sem`: Calculates the population standard error of the mean from input values, which measures how far the sample mean of the data is likely to be from the true mean of the population., Parameters: ['input_values: Values to calculate SEM from', 'weight_column: Optional weights for each value(optional)']
`covar_samp`: The sample covariance is calculated, which includes Bessel's bias correction., Parameters: ['y: Dependent variable column values', 'x: Independent variable column values']
`stddev_pop`: Calculates the population standard deviation of a given dataset, ignoring NULL values., Parameters: ['x: Column for standard deviation']
`var_samp`: Calculates the sample variance using Bessel's correction, which adjusts for bias by dividing by (n-1) instead of n., Parameters: ['x: Input values to calculate variance.', 'order_clause: Optional order by clause.(optional)']
`bit_or`: Performs a bitwise OR operation across all bits of the input values., Parameters: ['arg: Values to aggregate with OR.']
`arg_min`: Finds the row with the minimum value in a specified column and evaluates another expression for that row., Parameters: ['arg: Expression to evaluate at minimum', 'val: Column to find minimum value']
`rank_dense`: The rank of the current row is determined with gaps, aligning with the row number of its first peer., Parameters: []
`cume_dist`: Calculates the cumulative distribution of a row within its partition., Parameters: []
`nth_value`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Parameters: ['expr: Expression to evaluate at row', 'nth: Row position to evaluate at', 'ignore_nulls: Ignore nulls in evaluation(optional)']
DuckDB Statements:
`SELECT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM table_name;', 'SELECT col1 + col2 AS res, sqrt(col1) AS root FROM table_name;', 'SELECT DISTINCT city FROM addresses;', 'SELECT count(*) FROM addresses;', 'SELECT * EXCLUDE (city) FROM addresses;', 'SELECT * REPLACE (lower(city) AS city) FROM addresses;', "SELECT COLUMNS('number\\d+') FROM addresses;", 'SELECT min(COLUMNS(*)) FROM addresses;', 'SELECT "Some Column Name" FROM tbl;']
`WHERE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM table_name WHERE id = 3;', "SELECT * FROM table_name WHERE name ILIKE '%mark%';", 'SELECT * FROM table_name WHERE id = 3 OR id = 7;']
`ORDER BY`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM addresses ORDER BY city;', 'SELECT * FROM addresses ORDER BY city DESC NULLS LAST;', 'SELECT * FROM addresses ORDER BY city, zip;', 'SELECT * FROM addresses ORDER BY city COLLATE DE;', 'SELECT * FROM addresses ORDER BY ALL;', 'SELECT * FROM addresses ORDER BY ALL DESC;']
`GROUP BY`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, count(*) FROM addresses GROUP BY city;', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY city, street_name;', 'SELECT city, street_name FROM addresses GROUP BY ALL;', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY ALL;']
`WITH`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['WITH cte AS (SELECT 42 AS x) SELECT * FROM cte;', 'WITH cte1 AS (SELECT 42 AS i), cte2 AS (SELECT i * 100 AS x FROM cte1) SELECT * FROM cte2;', 'WITH t(x) AS (⟨complex_query⟩) SELECT * FROM t AS t1, t AS t2, t AS t3;', 'WITH t(x) AS MATERIALIZED (⟨complex_query⟩) SELECT * FROM t AS t1, t AS t2, t AS t3;', 'WITH RECURSIVE FibonacciNumbers (RecursionDepth, FibonacciNumber, NextNumber) AS (SELECT 0 AS RecursionDepth, 0 AS FibonacciNumber, 1 AS NextNumber UNION ALL SELECT fib.RecursionDepth + 1 AS RecursionDepth, fib.NextNumber AS FibonacciNumber, fib.FibonacciNumber + fib.NextNumber AS NextNumber FROM FibonacciNumbers fib WHERE fib.RecursionDepth + 1 < 10) SELECT fn.RecursionDepth AS FibonacciNumberIndex, fn.FibonacciNumber FROM FibonacciNumbers fn;']
`JOIN`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM table_name;', 'FROM table_name SELECT *;', 'FROM table_name;', 'SELECT tn.* FROM table_name tn;', 'SELECT * FROM schema_name.table_name;', 'SELECT t.i FROM range(100) AS t(i);', "SELECT * FROM 'test.csv';", 'SELECT * FROM (SELECT * FROM table_name);', 'SELECT t FROM t;', "SELECT t FROM (SELECT unnest(generate_series(41, 43)) AS x, 'hello' AS y) t;", 'SELECT * FROM table_name JOIN other_table ON table_name.key = other_table.key;', 'SELECT * FROM table_name TABLESAMPLE 10%;', 'SELECT * FROM table_name TABLESAMPLE 10 ROWS;', 'FROM range(100) AS t(i) SELECT sum(t.i) WHERE i % 2 = 0;', 'SELECT a.*, b.* FROM a CROSS JOIN b;', 'SELECT a.*, b.* FROM a, b;', 'SELECT n.*, r.* FROM l_nations n JOIN l_regions r ON (n_regionkey = r_regionkey);', 'SELECT * FROM city_airport NATURAL JOIN airport_names;', 'SELECT * FROM city_airport JOIN airport_names USING (iata);', 'SELECT * FROM city_airport SEMI JOIN airport_names USING (iata);', 'SELECT * FROM city_airport WHERE iata IN (SELECT iata FROM airport_names);', 'SELECT * FROM city_airport ANTI JOIN airport_names USING (iata);', 'SELECT * FROM city_airport WHERE iata NOT IN (SELECT iata FROM airport_names WHERE iata IS NOT NULL);', 'SELECT * FROM range(3) t(i), LATERAL (SELECT i + 1) t2(j);', 'SELECT * FROM generate_series(0, 1) t(i), LATERAL (SELECT i + 10 UNION ALL SELECT i + 100) t2(j);', 'SELECT * FROM trades t ASOF JOIN prices p ON t.symbol = p.symbol AND t.when >= p.when;', 'SELECT * FROM trades t ASOF LEFT JOIN prices p ON t.symbol = p.symbol AND t.when >= p.when;', 'SELECT * FROM trades t ASOF JOIN prices p USING (symbol, "when");', 'SELECT t.symbol, t.when AS trade_when, p.when AS price_when, price FROM trades t ASOF LEFT JOIN prices p USING (symbol, "when");', 'SELECT * FROM t AS t t1 JOIN t t2 USING(x);', 'FROM tbl SELECT i, s;', 'FROM tbl;']
`JOIN`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM table_name;', 'FROM table_name SELECT *;', 'FROM table_name;', 'SELECT tn.* FROM table_name tn;', 'SELECT * FROM schema_name.table_name;', 'SELECT t.i FROM range(100) AS t(i);', "SELECT * FROM 'test.csv';", 'SELECT * FROM (SELECT * FROM table_name);', 'SELECT t FROM t;', "SELECT t FROM (SELECT unnest(generate_series(41, 43)) AS x, 'hello' AS y) t;", 'SELECT * FROM table_name JOIN other_table ON table_name.key = other_table.key;', 'SELECT * FROM table_name TABLESAMPLE 10%;', 'SELECT * FROM table_name TABLESAMPLE 10 ROWS;', 'FROM range(100) AS t(i) SELECT sum(t.i) WHERE i % 2 = 0;', 'SELECT a.*, b.* FROM a CROSS JOIN b;', 'SELECT a.*, b.* FROM a, b;', 'SELECT n.*, r.* FROM l_nations n JOIN l_regions r ON (n_regionkey = r_regionkey);', 'SELECT * FROM city_airport NATURAL JOIN airport_names;', 'SELECT * FROM city_airport JOIN airport_names USING (iata);', 'SELECT * FROM city_airport SEMI JOIN airport_names USING (iata);', 'SELECT * FROM city_airport WHERE iata IN (SELECT iata FROM airport_names);', 'SELECT * FROM city_airport ANTI JOIN airport_names USING (iata);', 'SELECT * FROM city_airport WHERE iata NOT IN (SELECT iata FROM airport_names WHERE iata IS NOT NULL);', 'SELECT * FROM range(3) t(i), LATERAL (SELECT i + 1) t2(j);', 'SELECT * FROM generate_series(0, 1) t(i), LATERAL (SELECT i + 10 UNION ALL SELECT i + 100) t2(j);', 'SELECT * FROM trades t ASOF JOIN prices p ON t.symbol = p.symbol AND t.when >= p.when;', 'SELECT * FROM trades t ASOF LEFT JOIN prices p ON t.symbol = p.symbol AND t.when >= p.when;', 'SELECT * FROM trades t ASOF JOIN prices p USING (symbol, "when");', 'SELECT t.symbol, t.when AS trade_when, p.when AS price_when, price FROM trades t ASOF LEFT JOIN prices p USING (symbol, "when");', 'SELECT * FROM t AS t t1 JOIN t t2 USING(x);', 'FROM tbl SELECT i, s;', 'FROM tbl;']
`CASE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT i, CASE WHEN i > 2 THEN 1 ELSE 0 END AS test FROM integers;', 'SELECT i, CASE WHEN i = 1 THEN 10 WHEN i = 2 THEN 20 ELSE 0 END AS test FROM integers;', 'SELECT i, CASE WHEN i = 1 THEN 10 END AS test FROM integers;', 'SELECT i, CASE i WHEN 1 THEN 10 WHEN 2 THEN 20 WHEN 3 THEN 30 END AS test FROM integers;']
`USE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['USE memory;', 'USE duck.main;']
`CREATE TABLE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE TABLE t1 (i INTEGER, j INTEGER);', 'CREATE TABLE t1 (id INTEGER PRIMARY KEY, j VARCHAR);', 'CREATE TABLE t1 (id INTEGER, j VARCHAR, PRIMARY KEY (id, j));', 'CREATE TABLE t1 (\n i INTEGER NOT NULL,\n decimalnr DOUBLE CHECK (decimalnr < 10),\n date DATE UNIQUE,\n time TIMESTAMP\n);', 'CREATE TABLE t1 AS SELECT 42 AS i, 84 AS j;', "CREATE TEMP TABLE t1 AS SELECT * FROM read_csv('path/file.csv');", 'CREATE OR REPLACE TABLE t1 (i INTEGER, j INTEGER);', 'CREATE TABLE IF NOT EXISTS t1 (i INTEGER, j INTEGER);', 'CREATE TABLE nums AS SELECT i FROM range(0, 3) t(i);', 'CREATE TABLE t1 (id INTEGER PRIMARY KEY, percentage INTEGER CHECK (0 <= percentage AND percentage <= 100));', 'CREATE TABLE t1 (id INTEGER PRIMARY KEY, j VARCHAR);\nCREATE TABLE t2 (\n id INTEGER PRIMARY KEY,\n t1_id INTEGER,\n FOREIGN KEY (t1_id) REFERENCES t1 (id)\n);', 'CREATE TABLE t1 (x FLOAT, two_x AS (2 * x));']
`UPDATE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['UPDATE tbl SET i = 0 WHERE i IS NULL;', 'UPDATE tbl SET i = 1, j = 2;', 'UPDATE original SET value = new.value FROM new WHERE original.key = new.key;', 'UPDATE original SET value = (SELECT new.value FROM new WHERE original.key = new.key);', "UPDATE original AS true_original SET value = (SELECT new.value || ' a change!' AS value FROM original AS new WHERE true_original.key = new.key);", "UPDATE city SET revenue = revenue + 100 FROM country WHERE city.country_code = country.code AND country.name = 'France';"]
`DROP`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['DROP TABLE tbl;', 'DROP VIEW IF EXISTS v1;', 'DROP FUNCTION fn;', 'DROP INDEX idx;', 'DROP SCHEMA sch;', 'DROP SEQUENCE seq;', 'DROP MACRO mcr;', 'DROP MACRO TABLE mt;', 'DROP TYPE typ;', 'DROP SCHEMA myschema CASCADE;']
`ALTER TABLE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['ALTER TABLE integers ADD COLUMN k INTEGER;', 'ALTER TABLE integers ADD COLUMN l INTEGER DEFAULT 10;', 'ALTER TABLE integers DROP k;', 'ALTER TABLE integers ALTER i TYPE VARCHAR;', "ALTER TABLE integers ALTER i SET DATA TYPE VARCHAR USING concat(i, '_', j);", 'ALTER TABLE integers ALTER COLUMN i SET DEFAULT 10;', 'ALTER TABLE integers ALTER COLUMN i DROP DEFAULT;', 'ALTER TABLE t ALTER COLUMN x SET NOT NULL;', 'ALTER TABLE t ALTER COLUMN x DROP NOT NULL;', 'ALTER TABLE integers RENAME TO integers_old;', 'ALTER TABLE integers RENAME i TO j;']
`FILTER`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT count(*) FILTER (i <= 5) AS lte_five FROM generate_series(1, 10) tbl(i);', 'SELECT sum(i) FILTER (i <= 5) AS lte_five_sum FROM generate_series(1, 10) tbl(i);', 'SELECT count(i) FILTER (year = 2022) AS "2022" FROM stacked_data;', 'SELECT first(i) FILTER (year = 2022) AS "2022" FROM stacked_data;']
`HAVING`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, count(*) FROM addresses GROUP BY city HAVING count(*) >= 50;', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY city, street_name HAVING avg(income) > 2 * median(income);']
`DESCRIBE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['DESCRIBE tbl;', 'DESCRIBE SELECT * FROM tbl;']
`INSERT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['INSERT INTO tbl VALUES (1), (2), (3);', 'INSERT INTO tbl SELECT * FROM other_tbl;', 'INSERT INTO tbl (i) VALUES (1), (2), (3);', 'INSERT INTO tbl (i) VALUES (1), (DEFAULT), (3);', 'INSERT OR IGNORE INTO tbl (i) VALUES (1);', 'INSERT OR REPLACE INTO tbl (i) VALUES (1);', 'INSERT INTO tbl BY POSITION VALUES (5, 42);', 'INSERT INTO tbl BY NAME (SELECT 42 AS b, 32 AS a);', 'INSERT INTO tbl VALUES (1, 84) ON CONFLICT DO NOTHING;', 'INSERT INTO tbl VALUES (1, 84) ON CONFLICT DO UPDATE SET j = EXCLUDED.j;', 'INSERT INTO tbl (j, i) VALUES (168, 1) ON CONFLICT DO UPDATE SET j = EXCLUDED.j;', 'INSERT INTO tbl BY NAME (SELECT 84 AS j, 1 AS i) ON CONFLICT DO UPDATE SET j = EXCLUDED.j;', 'INSERT INTO t1 SELECT 42 RETURNING *;', 'INSERT INTO t2 SELECT 2 AS i, 3 AS j RETURNING *, i * j AS i_times_j;', "CREATE TABLE t3 (i INTEGER PRIMARY KEY, j INTEGER); CREATE SEQUENCE 't3_key'; INSERT INTO t3 SELECT nextval('t3_key') AS i, 42 AS j UNION ALL SELECT nextval('t3_key') AS i, 43 AS j RETURNING *;"]
`VALUES`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["VALUES ('Amsterdam', 1), ('London', 2);", "SELECT * FROM (VALUES ('Amsterdam', 1), ('London', 2)) cities(name, id);", "INSERT INTO cities VALUES ('Amsterdam', 1), ('London', 2);", "CREATE TABLE cities AS SELECT * FROM (VALUES ('Amsterdam', 1), ('London', 2)) cities(name, id);"]
`DELETE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['DELETE FROM tbl WHERE i = 2;', 'DELETE FROM tbl;', 'TRUNCATE tbl;']
`CALL`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CALL duckdb_functions();', "CALL pragma_table_info('pg_am');"]
`CREATE SCHEMA`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE SCHEMA s1;', 'CREATE SCHEMA IF NOT EXISTS s2;', 'CREATE TABLE s1.t (id INTEGER PRIMARY KEY, other_id INTEGER);', 'CREATE TABLE s2.t (id INTEGER PRIMARY KEY, j VARCHAR);', 'SELECT * FROM s1.t s1t, s2.t s2t WHERE s1t.other_id = s2t.id;']
`SAMPLE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM addresses USING SAMPLE 1%;', 'SELECT * FROM addresses USING SAMPLE 1% (bernoulli);', 'SELECT * FROM (SELECT * FROM addresses) USING SAMPLE 10 ROWS;']
`CREATE VIEW`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE VIEW v1 AS SELECT * FROM tbl;', 'CREATE OR REPLACE VIEW v1 AS SELECT 42;', 'CREATE VIEW v1(a) AS SELECT 42;']
`COPY`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["COPY lineitem FROM 'lineitem.csv';", "COPY lineitem FROM 'lineitem.csv' (DELIMITER '|');", "COPY lineitem FROM 'lineitem.pq' (FORMAT PARQUET);", "COPY lineitem FROM 'lineitem.json' (FORMAT JSON, AUTO_DETECT true);", "COPY lineitem TO 'lineitem.csv' (FORMAT CSV, DELIMITER '|', HEADER);", "COPY (SELECT l_orderkey, l_partkey FROM lineitem) TO 'lineitem.parquet' (COMPRESSION ZSTD);", 'COPY FROM DATABASE db1 TO db2;', 'COPY FROM DATABASE db1 TO db2 (SCHEMA);']
`QUALIFY`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT schema_name, function_name, row_number() OVER (PARTITION BY schema_name ORDER BY function_name) AS function_rank FROM duckdb_functions() QUALIFY row_number() OVER (PARTITION BY schema_name ORDER BY function_name) < 3;', 'SELECT schema_name, function_name, row_number() OVER (PARTITION BY schema_name ORDER BY function_name) AS function_rank FROM duckdb_functions() QUALIFY function_rank < 3;', 'SELECT schema_name, function_name, row_number() OVER my_window AS function_rank FROM duckdb_functions() WINDOW my_window AS (PARTITION BY schema_name ORDER BY function_name) QUALIFY row_number() OVER my_window < 3;', 'SELECT schema_name, function_name, row_number() OVER my_window AS function_rank FROM duckdb_functions() WINDOW my_window AS (PARTITION BY schema_name ORDER BY function_name) QUALIFY function_rank < 3;']
`SET VARIABLE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SET VARIABLE my_var = 30;', "SELECT 20 + getvariable('my_var') AS total;", 'SET VARIABLE my_var = 100;', "SET VARIABLE my_date = DATE '2018-07-13';", "SET VARIABLE my_string = 'Hello world';", "SET VARIABLE my_map = MAP {{'k1': 10, 'k2': 20}};", "SELECT getvariable('undefined_var') AS result;", "SET VARIABLE column_to_exclude = 'col1';", 'CREATE TABLE tbl AS SELECT 12 AS col0, 34 AS col1, 56 AS col2;', "SELECT COLUMNS(c -> c != getvariable('column_to_exclude')) FROM tbl;"]
`PIVOT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['PIVOT Cities ON Year USING sum(Population);', 'PIVOT Cities ON Year USING first(Population);', 'PIVOT Cities ON Year USING sum(Population) GROUP BY Country;', 'PIVOT Cities ON Year IN (2000, 2010) USING sum(Population) GROUP BY Country;', 'PIVOT Cities ON Country, Name USING sum(Population);', "PIVOT Cities ON Country || '_' || Name USING sum(Population);", 'PIVOT Cities ON Year USING sum(Population) AS total, max(Population) AS max GROUP BY Country;', 'PIVOT Cities ON Year USING sum(Population) GROUP BY Country, Name;', 'SELECT * FROM (PIVOT Cities ON Year USING sum(Population) GROUP BY Country) pivot_alias;']
`INSTALL`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['INSTALL httpfs;', 'INSTALL h3 FROM community;']
`ANALYZE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['ANALYZE;']
`SUMMARIZE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SUMMARIZE tbl;', 'SUMMARIZE SELECT * FROM tbl;']
`UNPIVOT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['UNPIVOT monthly_sales ON jan, feb, mar, apr, may, jun INTO NAME month VALUE sales;', 'UNPIVOT monthly_sales ON COLUMNS(* EXCLUDE (empid, dept)) INTO NAME month VALUE sales;', 'UNPIVOT monthly_sales ON (jan, feb, mar) AS q1, (apr, may, jun) AS q2 INTO NAME quarter VALUE month_1_sales, month_2_sales, month_3_sales;', 'WITH unpivot_alias AS ( UNPIVOT monthly_sales ON COLUMNS(* EXCLUDE (empid, dept)) INTO NAME month VALUE sales ) SELECT * FROM unpivot_alias;', 'FROM monthly_sales UNPIVOT ( sales FOR month IN (jan, feb, mar, apr, may, jun) );', 'FROM monthly_sales UNPIVOT ( (month_1_sales, month_2_sales, month_3_sales) FOR quarter IN ((jan, feb, mar) AS q1, (apr, may, jun) AS q2) );']
`WINDOW`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT SUM(salary) OVER my_window, AVG(salary) OVER my_window FROM employees WINDOW my_window AS (PARTITION BY department ORDER BY hire_date);', 'SELECT employee_id, first_value(name) OVER recent_hires FROM employees WINDOW recent_hires AS (ORDER BY hire_date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW);']
`OFFSET`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM addresses LIMIT 5;', 'SELECT * FROM addresses LIMIT 5 OFFSET 5;', 'SELECT city, count(*) AS population FROM addresses GROUP BY city ORDER BY population DESC LIMIT 5;']
`OFFSET`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM addresses LIMIT 5;', 'SELECT * FROM addresses LIMIT 5 OFFSET 5;', 'SELECT city, count(*) AS population FROM addresses GROUP BY city ORDER BY population DESC LIMIT 5;']
`CREATE INDEX`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE UNIQUE INDEX films_id_idx ON films (id);', 'CREATE INDEX s_idx ON films (revenue);', 'CREATE INDEX gy_idx ON films (genre, year);', 'CREATE INDEX i_index ON integers ((j + k));']
`CREATE TYPE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["CREATE TYPE mood AS ENUM ('happy', 'sad', 'curious');", 'CREATE TYPE many_things AS STRUCT(k INTEGER, l VARCHAR);', 'CREATE TYPE one_thing AS UNION(number INTEGER, string VARCHAR);', 'CREATE TYPE x_index AS INTEGER;']
`COLLATE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["SELECT 'hello' = 'hElLO'; -- Default collation", "SELECT 'hello' COLLATE NOCASE = 'hElLO'; -- Case insensitive collation", "SELECT 'hello' = 'hëllo'; -- Default collation", "SELECT 'hello' COLLATE NOACCENT = 'hëllo'; -- Accent insensitive collation", "SELECT 'hello' COLLATE NOCASE.NOACCENT = 'hElLÖ'; -- Both case and accent insensitive", "SET default_collation = NOCASE; SELECT 'hello' = 'HeLlo'; -- Setting global collation", "CREATE TABLE names (name VARCHAR COLLATE NOACCENT); INSERT INTO names VALUES ('hännes'); SELECT name FROM names WHERE name = 'hannes'; -- Column-specific collation", 'SELECT names.name AS name, other_names.name AS other_name FROM names, other_names WHERE names.name COLLATE NOACCENT.NOCASE = other_names.name COLLATE NOACCENT.NOCASE; -- Combine collations for comparison', "CREATE TABLE strings (s VARCHAR COLLATE DE); INSERT INTO strings VALUES ('Gabel'), ('Göbel'), ('Goethe'), ('Goldmann'), ('Göthe'), ('Götz'); SELECT * FROM strings ORDER BY s; -- Using ICU collation"]
`BEGIN TRANSACTION`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['BEGIN TRANSACTION;']
`CREATE SEQUENCE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE SEQUENCE serial;', 'CREATE SEQUENCE serial START 101;', 'CREATE SEQUENCE serial START WITH 1 INCREMENT BY 2;', 'CREATE SEQUENCE serial START WITH 99 INCREMENT BY -1 MAXVALUE 99;', 'CREATE SEQUENCE serial START WITH 1 MAXVALUE 10;', 'CREATE SEQUENCE serial START WITH 1 MAXVALUE 10 CYCLE;', 'CREATE OR REPLACE SEQUENCE serial;', 'CREATE SEQUENCE IF NOT EXISTS serial;', 'CREATE SEQUENCE id_sequence START 1;', "SELECT nextval('serial') AS nextval;", "SELECT currval('serial') AS currval;"]
`CREATE MACRO`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE MACRO add(a, b) AS a + b;', 'CREATE MACRO ifelse(a, b, c) AS CASE WHEN a THEN b ELSE c END;', 'CREATE MACRO one() AS (SELECT 1);', 'CREATE MACRO plus_one(a) AS (WITH cte AS (SELECT 1 AS a) SELECT cte.a + a FROM cte);', 'CREATE FUNCTION main.my_avg(x) AS sum(x) / count(x);', 'CREATE MACRO add_default(a, b := 5) AS a + b;', 'CREATE MACRO arr_append(l, e) AS list_concat(l, list_value(e));', "CREATE MACRO static_table() AS TABLE SELECT 'Hello' AS column1, 'World' AS column2;", 'CREATE MACRO dynamic_table(col1_value, col2_value) AS TABLE SELECT col1_value AS column1, col2_value AS column2;', "CREATE OR REPLACE TEMP MACRO dynamic_table(col1_value, col2_value) AS TABLE SELECT col1_value AS column1, col2_value AS column2 UNION ALL SELECT 'Hello' AS col1_value, 456 AS col2_value;", 'CREATE MACRO get_users(i) AS TABLE SELECT * FROM users WHERE uid IN (SELECT unnest(i));', 'SELECT * FROM get_users([1, 5]);', 'CREATE MACRO checksum(table_name) AS TABLE SELECT bit_xor(md5_number(COLUMNS(*)::VARCHAR)) FROM query_table(table_name);', "SELECT * FROM checksum('tbl');", 'CREATE MACRO add_x (a, b) AS a + b, (a, b, c) AS a + b + c;', 'SELECT add_x(21, 42) AS two_args, add_x(21, 42, 21) AS three_args;', 'CREATE MACRO add(a, b) AS a + b;', 'SELECT add(1, 2) AS x;', 'SELECT add_default(37);', 'SELECT add_default(40, b := 2) AS x;', 'CREATE MACRO triple_add(a, b := 5, c := 10) AS a + b + c;', 'SELECT triple_add(40, c := 1, b := 1) AS x;']
`VACUUM`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['VACUUM;', 'VACUUM ANALYZE;', 'VACUUM ANALYZE memory.main.my_table(my_column);', 'VACUUM FULL; -- error']
`RESET`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["SET memory_limit = '10GB';", 'SET threads = 1;', 'SET threads TO 1;', 'RESET threads;', "SELECT current_setting('threads');", "SET GLOBAL search_path = 'db1,db2'", "SET SESSION default_collation = 'nocase';"]
`RESET`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["SET memory_limit = '10GB';", 'SET threads = 1;', 'SET threads TO 1;', 'RESET threads;', "SELECT current_setting('threads');", "SET GLOBAL search_path = 'db1,db2'", "SET SESSION default_collation = 'nocase';"]
`EXPLAIN ANALYZE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['EXPLAIN SELECT * FROM table_name;', 'EXPLAIN ANALYZE SELECT * FROM table_name;']
`EXPLAIN ANALYZE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['EXPLAIN SELECT * FROM table_name;', 'EXPLAIN ANALYZE SELECT * FROM table_name;']
`CUBE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, street_name, avg(income) FROM addresses GROUP BY GROUPING SETS ((city, street_name), (city), (street_name), ());', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY CUBE (city, street_name);', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY ROLLUP (city, street_name);', 'SELECT course, type, count(*) FROM students GROUP BY GROUPING SETS ((course, type), course, type, ());', 'WITH days AS ( SELECT year("generate_series") AS y, quarter("generate_series") AS q, month("generate_series") AS m FROM generate_series(DATE \'2023-01-01\', DATE \'2023-12-31\', INTERVAL 1 DAY) ) SELECT y, q, m, GROUPING_ID(y, q, m) AS "grouping_id()" FROM days GROUP BY GROUPING SETS ((y, q, m), (y, q), (y), ()) ORDER BY y, q, m;']
`CUBE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, street_name, avg(income) FROM addresses GROUP BY GROUPING SETS ((city, street_name), (city), (street_name), ());', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY CUBE (city, street_name);', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY ROLLUP (city, street_name);', 'SELECT course, type, count(*) FROM students GROUP BY GROUPING SETS ((course, type), course, type, ());', 'WITH days AS ( SELECT year("generate_series") AS y, quarter("generate_series") AS q, month("generate_series") AS m FROM generate_series(DATE \'2023-01-01\', DATE \'2023-12-31\', INTERVAL 1 DAY) ) SELECT y, q, m, GROUPING_ID(y, q, m) AS "grouping_id()" FROM days GROUP BY GROUPING SETS ((y, q, m), (y, q), (y), ()) ORDER BY y, q, m;']
`CUBE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, street_name, avg(income) FROM addresses GROUP BY GROUPING SETS ((city, street_name), (city), (street_name), ());', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY CUBE (city, street_name);', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY ROLLUP (city, street_name);', 'SELECT course, type, count(*) FROM students GROUP BY GROUPING SETS ((course, type), course, type, ());', 'WITH days AS ( SELECT year("generate_series") AS y, quarter("generate_series") AS q, month("generate_series") AS m FROM generate_series(DATE \'2023-01-01\', DATE \'2023-12-31\', INTERVAL 1 DAY) ) SELECT y, q, m, GROUPING_ID(y, q, m) AS "grouping_id()" FROM days GROUP BY GROUPING SETS ((y, q, m), (y, q), (y), ()) ORDER BY y, q, m;']
`ALTER VIEW`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['ALTER VIEW v1 RENAME TO v2;']
`UPDATE EXTENSIONS`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['UPDATE EXTENSIONS;', 'UPDATE EXTENSIONS (name_a, name_b, name_c);']
`CHECKPOINT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CHECKPOINT;', 'CHECKPOINT file_db;', 'FORCE CHECKPOINT;']
`COMMENT ON`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["COMMENT ON TABLE test_table IS 'very nice table';", "COMMENT ON COLUMN test_table.test_table_column IS 'very nice column';", "COMMENT ON VIEW test_view IS 'very nice view';", "COMMENT ON INDEX test_index IS 'very nice index';", "COMMENT ON SEQUENCE test_sequence IS 'very nice sequence';", "COMMENT ON TYPE test_type IS 'very nice type';", "COMMENT ON MACRO test_macro IS 'very nice macro';", "COMMENT ON MACRO TABLE test_table_macro IS 'very nice table macro';", 'COMMENT ON TABLE test_table IS NULL;']
`IMPORT DATABASE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["EXPORT DATABASE 'target_directory';", "EXPORT DATABASE 'target_directory' (FORMAT CSV, DELIMITER '|');", "EXPORT DATABASE 'target_directory' (FORMAT PARQUET);", "EXPORT DATABASE 'target_directory' (FORMAT PARQUET, COMPRESSION ZSTD, ROW_GROUP_SIZE 100_000);", "IMPORT DATABASE 'source_directory';", "PRAGMA import_database('source_directory');"]
`IMPORT DATABASE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["EXPORT DATABASE 'target_directory';", "EXPORT DATABASE 'target_directory' (FORMAT CSV, DELIMITER '|');", "EXPORT DATABASE 'target_directory' (FORMAT PARQUET);", "EXPORT DATABASE 'target_directory' (FORMAT PARQUET, COMPRESSION ZSTD, ROW_GROUP_SIZE 100_000);", "IMPORT DATABASE 'source_directory';", "PRAGMA import_database('source_directory');"]
DuckDB Types:
`BOOLEAN`: The `BOOLEAN` type represents a statement of truth, "true" or "false", with the possibility of being "unknown", represented by `NULL` in SQL., Examples: ['> SELECT true, false, NULL::BOOLEAN;', '-- Outputs the three possible values for BOOLEAN: true, false, NULL.', 'CREATE TABLE example (is_active BOOLEAN);', '-- Create a table with a BOOLEAN column.', 'INSERT INTO example VALUES (true), (false), (NULL);', '-- Insert BOOLEAN values, including NULL.', 'SELECT * FROM example WHERE is_active AND is_verified;', '-- Filters rows where both conditions are true.', 'UPDATE example SET is_active = false WHERE condition;', '-- Update rows to set the BOOLEAN field to false.']
`STRUCT`: The `STRUCT` data type in SQL is used to create a column that contains an ordered list of columns, referred to as entries, which are accessed using named keys. This type is ideal for nesting multiple columns into a single column, allowing a structured and consistent data schema across all rows., Examples: ["SELECT struct_pack(key1 := 'value1', key2 := 42) AS s;", "SELECT {{'key1': 'value1', 'key2': 42}} AS s;", "SELECT a.x FROM (SELECT {{'x': 1, 'y': 2, 'z': 3}} AS a);", "SELECT struct_insert({{'a': 1, 'b': 2, 'c': 3}}, d := 4) AS s;", 'CREATE TABLE t1 (s STRUCT(v VARCHAR, i INTEGER));', "INSERT INTO t1 VALUES (row('a', 42));", "SELECT a.* FROM (SELECT {{'x': 1, 'y': 2, 'z': 3}} AS a);", "SELECT struct_extract({{'x space': 1, 'y': 2, 'z': 3}}, 'x space');"]
`FLOAT`: The FLOAT data type, also known by aliases FLOAT4, REAL, or float, represents a single precision floating-point number, facilitating approximate calculations and efficient handling of numerical data with precision typically up to 6 decimal digits and a range of at least 1E-37 to 1E+37., Examples: ['-- Example: Creating a table with a FLOAT column\nCREATE TABLE example_table (id INTEGER, value FLOAT);', '-- Example: Inserting values into a FLOAT column\nINSERT INTO example_table VALUES (1, 3.14), (2, 2.718);', '-- Example: Performing arithmetic operations with FLOAT values\nSELECT id, value * 2.0::FLOAT AS doubled_value FROM example_table;', '-- Example: Casting a numeric value to FLOAT\nSELECT CAST(100 AS FLOAT) AS float_value;', '-- Example: Using FLOAT values in a mathematical function\nSELECT SQRT(value) FROM example_table WHERE value > 0;', '-- Example: Comparing FLOAT values\nSELECT * FROM example_table WHERE value > 3.0::FLOAT;']
`DATE`: The `DATE` type in SQL is used to store calendar dates without time components, representing a year, month, and day as accurate information for querying and managing date-related data., Examples: ["-- Add 5 days to a specific date\\nSELECT DATE '1992-03-22' + 5; -- Result: 1992-03-27\\n", "-- Subtract one date from another to get the number of days between them\\nSELECT DATE '1992-03-27' - DATE '1992-03-22'; -- Result: 5\\n", '-- Get the current date at the start of the transaction\\nSELECT current_date; -- Example result: 2022-10-08\\n', "-- Add an interval of 2 months to a specific date\\nSELECT date_add(DATE '1992-09-15', INTERVAL 2 MONTH); -- Result: 1992-11-15\\n", "-- Find the difference in months between two dates\\nSELECT date_diff('month', DATE '1992-09-15', DATE '1992-11-14'); -- Result: 2\\n", "-- Extract the year from a specific date\\nSELECT date_part('year', DATE '1992-09-20'); -- Result: 1992\\n", "-- Get the (English) name of the weekday from a specific date\\nSELECT dayname(DATE '1992-09-20'); -- Result: Sunday\\n", "-- Convert a date to a string format\\nSELECT strftime(date '1992-01-01', '%a, %-d %B %Y'); -- Result: Wed, 1 January 1992"]
`TIMESTAMP_S`: The TIMESTAMP_S data type represents a timestamp with second precision, ignoring any sub-second parts and time zones., Examples: ["SELECT TIMESTAMP_S '1992-09-20 11:30:00.123456789'; -- Output: 1992-09-20 11:30:00", "SELECT TIMESTAMP_S '2000-01-01 00:00:00'; -- Output: 2000-01-01 00:00:00", "SELECT TIMESTAMP_S '2023-10-05 18:44:03.987654321'; -- Output: 2023-10-05 18:44:03"]
`DECIMAL`: The DECIMAL data type, also known as NUMERIC or DEC, allows for the representation of exact fixed-point decimal numbers, providing precise control over the number of digits and the digits after the decimal point., Examples: ['CREATE TABLE salaries (\\n employee_id INTEGER,\\n base_salary DECIMAL(10, 2)\\n);', 'INSERT INTO salaries (employee_id, base_salary) VALUES\\n (1, 50000.00),\\n (2, 65000.50);', 'SELECT employee_id, base_salary\\nFROM salaries\\nWHERE base_salary > DECIMAL(60000, 2);', 'UPDATE salaries\\nSET base_salary = base_salary + DECIMAL(5000.00, 2)\\nWHERE employee_id = 1;', 'SELECT CAST(99 AS DECIMAL(10, 2));']
`BIGINT`: The `BIGINT` data type is an 8-byte integer that can store large integer values suitable for handling significant quantities or high precision integer data., Examples: ['CREATE TABLE example_table (id BIGINT PRIMARY KEY, count BIGINT, reference_id BIGINT);', "SELECT * FROM parquet_metadata('file.parquet') WHERE row_group_id = 1;", 'ALTER TABLE orders ADD COLUMN order_count BIGINT DEFAULT 0;', 'UPDATE employee SET salary = salary + 1000 WHERE employee_id = 1001;', 'SELECT store_id, SUM(sales) AS total_sales FROM transactions GROUP BY store_id;', 'CREATE SEQUENCE order_sequence START WITH 1000 INCREMENT BY 1 MINVALUE 100 MAXVALUE 10000 NO CYCLE;']
`LIST`: A `LIST` column is a flexible, ordered sequence of data values of the same type, which can vary in length among rows and can include any uniform data type, allowing for complex nested data structures., Examples: ['SELECT [1, 2, 3]; -- Creates a static list of integers', "SELECT ['duck', 'goose', NULL, 'heron']; -- Creates a list of strings containing a NULL value", 'SELECT list_value(1, 2, 3); -- Uses the list_value function to create a list of integers', 'CREATE TABLE list_table (int_list INTEGER[], varchar_list VARCHAR[]); -- Defines a table with integer and varchar lists', "SELECT (['a', 'b', 'c'])[3]; -- Retrieves the third element from a list", 'SELECT list_slice([1, 2, 3, 4, 5], 2, 4); -- Extracts a sublist from the main list']
`SMALLINT`: The SMALLINT type, with aliases such as short, int2, smallint, and int16, represents a signed two-byte integer that can store whole numbers ranging from -32768 to 32767., Examples: ['CREATE TABLE test_table (id SMALLINT);', 'INSERT INTO test_table (id) VALUES (100);', 'SELECT * FROM test_table WHERE id BETWEEN -100 AND 100;', 'ALTER TABLE test_table ADD COLUMN new_column SMALLINT;', 'UPDATE test_table SET id = id + 1 WHERE id < 32767;']
`INTERVAL`: The INTERVAL data type represents a period of time that can be measured in months, days, microseconds, or a combination of these units, and is typically used to add or subtract to DATE, TIMESTAMP, TIMESTAMPTZ, or TIME values., Examples: ["SELECT INTERVAL '1 month 1 day'; -- Returns an interval representing 1 month and 1 day", "SELECT DATE '2000-01-01' + INTERVAL 1 YEAR; -- Adds 1 year to the specified date", "SELECT TIMESTAMP '2000-02-06 12:00:00' - TIMESTAMP '2000-01-01 11:00:00'; -- Returns interval of 36 days 1 hour", "SELECT INTERVAL '48:00:00'::INTERVAL; -- Converts a time string to microseconds interval representing 48 hours", "SELECT (DATE '2020-01-01' + INTERVAL 30 DAYS) = (DATE '2020-01-01' + INTERVAL 1 MONTH); -- Compares intervals by their conversion to microseconds"]
`VARCHAR`: `VARCHAR` is a versatile data type used to store variable-length character strings, accommodating a wide range of text and string data without enforcing a specific length., Examples: ['CREATE TABLE people (name VARCHAR, age INTEGER);', "INSERT INTO documents (text) VALUES ('This is a VARCHAR example text.');", "SELECT * FROM employees WHERE department = 'Engineering';", 'ALTER TABLE students ADD COLUMN email VARCHAR;', "UPDATE orders SET status = 'Shipped' WHERE order_id = 102;", "COPY products TO 'products.csv' DELIMITER ',' HEADER;"]
`VARINT`: VARINT is an arbitrary-precision integer data type capable of storing very large numbers beyond the limits of standard integer types., Examples: ['CREATE TABLE example_table (id VARINT);', 'INSERT INTO example_table (id) VALUES (123456789123456789123456789);', 'SELECT id FROM example_table WHERE id < 999999999999999999999999999;']
`TINYINT`: TINYINT is a signed one-byte integer type that can store whole numbers ranging from -128 to 127, often used to save storage space when values are known to fall within this small range., Examples: ["SELECT CAST('123' AS TINYINT);", 'INSERT INTO my_table (x) VALUES (CAST(100 AS TINYINT));', 'UPDATE my_table SET x = CAST(50 AS TINYINT) WHERE id = 1;', 'SELECT * FROM my_table WHERE x = CAST(-50 AS TINYINT);', 'CREATE TABLE example (id TINYINT);']
`INTEGER`: The INTEGER data type, with aliases such as int, signed, int4, int32, integer, and integral, represents whole numbers and is commonly used to store numeric data without fractional components., Examples: ['-- Assigning integer values to columns in a CREATE TABLE statement\nCREATE TABLE my_table (id INTEGER, age INTEGER);', '-- Inserting integer values as literals within an INSERT statement\nINSERT INTO my_table VALUES (1, 25);', '-- Using integer operations in a SELECT statement\nSELECT id + 10 AS new_id FROM my_table;', '-- Casting a float to an integer\nSELECT CAST(3.7 AS INTEGER) AS whole_number;', '-- Defining a column to only accept non-negative integers using a CHECK constraint\nCREATE TABLE my_table (id INTEGER CHECK (id >= 0));', '-- Using the INTEGER type in a primary key definition\nCREATE TABLE users (user_id INTEGER PRIMARY KEY, username VARCHAR);', '-- Updating integer columns\nUPDATE my_table SET age = age + 1 WHERE id = 1;', '-- Comparing integer values in a WHERE clause\nSELECT * FROM my_table WHERE age > 20;']
`ENUM`: The Enum data type represents a dictionary encoding structure that enumerates all possible unique string values of a column, allowing for efficient storage and query execution by storing only numerical references to the strings., Examples: ["CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy');", 'CREATE TYPE birds AS ENUM (SELECT my_varchar FROM my_inputs);', 'CREATE TABLE person (name TEXT, current_mood mood);', "INSERT INTO person VALUES ('Pedro', 'happy'), ('Pagliacci', 'sad');", 'SELECT enum_range(NULL::mood) AS mood_values;', 'DROP TYPE mood;']
`UBIGINT`: UBIGINT, also known as 'uint64' or 'ubigint', is an unsigned 64-bit integer data type that can store large whole numbers from 0 to 18,446,744,073,709,551,615. It is commonly used for columns that require large non-negative integer values, especially where negative values are not applicable., Examples: ['CREATE TABLE huge_numbers (id UBIGINT);', 'INSERT INTO huge_numbers VALUES (4294967296);', 'SELECT id FROM huge_numbers WHERE id > 1000000;', 'ALTER TABLE huge_numbers ADD COLUMN new_value UBIGINT;', 'CREATE VIEW large_ids AS SELECT id FROM huge_numbers WHERE id > 100000000;']
`BLOB`: The BLOB (Binary Large Object) type represents a variable-length binary data object, used for storing arbitrary binary data in the database, such as images or files, without any interpretation of its contents., Examples: ["-- Create a BLOB with a single byte\\nSELECT '\\xAA'::BLOB;\\n-- Result: \\xAA\\n\\n-- Create a BLOB with multiple bytes\\nSELECT '\\xAA\\xAB\\xAC'::BLOB;\\n-- Result: \\xAA\\xAB\\xAC\\n\\n-- Concatenate two BLOB values\\nSELECT '\\xAA'::BLOB || '\\xBB'::BLOB;\\n-- Result: \\xAABB\\n\\n-- Convert a BLOB to a hexadecimal string\\nSELECT hex('\\xAA\\xBB'::BLOB);\\n-- Result: AABB\\n\\n-- Decode a BLOB to a string, ensuring it is valid UTF-8\\nSELECT decode('\\xC3\\xBC'::BLOB);\\n-- Result: ü\\n\\n-- Read a BLOB from a file\\nSELECT read_blob('myfile.bin');\\n-- Result: Contents of 'myfile.bin' as a BLOB"]
`HUGEINT`: The `HUGEINT` data type, also known as `INT128`, is a signed sixteen-byte integer that stores whole numbers ranging from -170141183460469231731687303715884105728 to 170141183460469231731687303715884105727, providing a broad range suitable for large numerical computations., Examples: ['-- Creating a table with a HUGEINT column\\nCREATE TABLE example_table (id HUGEINT, value HUGEINT);', '-- Inserting values into a HUGEINT column\\nINSERT INTO example_table (id, value) VALUES (1, 170141183460469231731687303715884105727);', '-- Performing arithmetic operations on HUGEINT\\nSELECT value + 10 FROM example_table WHERE id = 1;', "-- Using HUGEINT in a function\\nSELECT md5_number('12345')::HUGEINT;", '-- Comparing HUGEINT values\\nSELECT * FROM example_table WHERE value > 1000000000000000000;']
`TIMESTAMP`: A TIMESTAMP value represents an instant in time, composed of a combination of a date (year, month, day) and a time (hour, minute, second, microsecond), stored with microsecond precision, and it can be manipulated using various functions and operators., Examples: ["SELECT TIMESTAMP '1992-09-20 11:30:00.123456';", "SELECT TIMESTAMP '1992-09-20 11:30:00' + INTERVAL 10 DAYS;", "SELECT TIMESTAMP '2023-07-18 17:45:00' - TIMESTAMP '2023-07-10 15:30:00';", "SELECT age(TIMESTAMP '2023-07-18 17:45:00', TIMESTAMP '2022-07-18 17:45:00');", "SELECT strftime(TIMESTAMP '2023-07-18 17:45:00', '%Y-%m-%d %H:%M:%S');", "SELECT extract('hour' FROM TIMESTAMP '2023-07-18 17:45:00');"]
`UNION`: The UNION data type is a nested type that holds one of multiple distinct values with a "tag" to identify the active type and can contain multiple uniquely tagged members of various types, akin to C++ std::variant or Rust's Enum., Examples: ["```sql\nCREATE TABLE tbl1 (u UNION(num INTEGER, str VARCHAR));\nINSERT INTO tbl1 VALUES (1), ('two'), (union_value(str := 'three'));\n```", "```sql\nSELECT union_extract(u, 'str') AS str\nFROM tbl1;\n```", '```sql\nSELECT u.str\nFROM tbl1;\n```', '```sql\nSELECT union_tag(u) AS t\nFROM tbl1;\n```']
`TIMESTAMP_MS`: The "TIMESTAMP_MS" data type represents timestamps with millisecond precision, defined without considering time zones., Examples: ["SELECT TIMESTAMP_MS '1992-09-20 11:30:00.123456789'; -- Produces output: 1992-09-20 11:30:00.123"]
`TIMESTAMP_NS`: `TIMESTAMP_NS` represents a timestamp with nanosecond precision, useful for high-resolution time data but ignores time zone information., Examples: ["SELECT TIMESTAMP_NS '1992-09-20 11:30:00.123456789';"]
`USMALLINT`: USMALLINT is an unsigned two-byte integer type with a range from 0 to 65535, used for storing non-negative whole numbers within this range., Examples: ['CREATE TABLE example_table (id USMALLINT, age USMALLINT);', 'INSERT INTO example_table (id, age) VALUES (100, 25);', 'SELECT * FROM example_table WHERE age < 30;']
`UINTEGER`: The `UINTEGER` data type is used to store unsigned 32-bit integer values, allowing for a range from 0 to 4,294,967,295 and is particularly useful when negative values are not needed and memory efficiency is a concern for large datasets., Examples: ['CREATE TABLE example_table (count UINTEGER);', 'INSERT INTO example_table VALUES (150), (2750), (4294967295);', 'SELECT * FROM example_table WHERE count > 1000;', 'ALTER TABLE example_table ADD COLUMN new_count UINTEGER DEFAULT 0;', 'UPDATE example_table SET count = count + 100 WHERE count < 4294967295;']
`UHUGEINT`: UHUGEINT, also known as uint128, is an unsigned 128-bit integer data type used for storing large non-negative whole numbers ranging from 0 to approximately 3.4 x 10^38., Examples: ['>> CREATE TABLE numbers (id UHUGEINT);', ' // This creates a table with a UHUGEINT column.', ' ', ' ">> INSERT INTO numbers VALUES (340282366920938463463374607431768211455);', " // This inserts the maximum valid UHUGEINT value into the 'numbers' table.", ' ', ' ">> SELECT id FROM numbers WHERE id > 1000000;', " // This selects records from the 'numbers' table where the UHUGEINT value is greater than 1,000,000."]
`TIME`: The `TIME` type represents a time of day, independent of a specific date, and is used to store and manipulate values consisting of hours, minutes, seconds, and fractional seconds., Examples: ["SELECT TIME '14:21:13';", "SELECT TIME '08:30:00' + INTERVAL 5 MINUTE;", "SELECT EXTRACT(HOUR FROM TIME '23:45:12');", 'SELECT MAKE_TIME(13, 30, 59.999);', 'SELECT CURRENT_TIME;']
`TIMESTAMP WITH TIME ZONE`: `TIMESTAMP WITH TIME ZONE` (or `TIMESTAMPTZ`) represents a point in time using a calendar date and time of day along with a time zone offset, allowing for time zone sensitive operations such as conversions and comparisons., Examples: ["SELECT TIMESTAMPTZ '2023-10-17 12:00:00+01';", "SELECT now() AT TIME ZONE 'UTC';", "SELECT TIMESTAMP '2023-10-17 10:00:00-07' AT TIME ZONE 'America/New_York';", "SELECT age(TIMESTAMPTZ '2005-10-17 12:00:00-07');", "SELECT TIMESTAMPTZ '2023-10-17 15:00:00+00' - TIMESTAMPTZ '2023-10-16 15:00:00+00';"]
`UUID`: The UUID data type is used to store universally unique identifiers as 128-bit values, formatted as 36-character strings with hexadecimal characters and dashes arranged in the pattern ⟨8 characters⟩-⟨4 characters⟩-⟨4 characters⟩-⟨4 characters⟩-⟨12 characters⟩., Examples: ['-- Create a table with a UUID column\nCREATE TABLE users (id UUID, name VARCHAR);', "-- Insert a new UUID value into the table\nINSERT INTO users (id, name) VALUES (gen_random_uuid(), 'Alice');", "-- Retrieve UUID values from a table\nSELECT id FROM users WHERE name = 'Alice';", '-- Generate and display a random UUID\nSELECT uuid();']
`DOUBLE`: The `DOUBLE` type, also known as `FLOAT8`, is a double-precision floating point number data type commonly used for storing large or precise decimal values in SQL queries., Examples: ['```sql\n-- Using DOUBLE to store and manipulate high-precision values\nCREATE TABLE sales_data (\n transaction_id INTEGER,\n sale_amount DOUBLE\n);\n\nINSERT INTO sales_data (transaction_id, sale_amount) VALUES (1, 1999.99);\nSELECT sale_amount * 1.05 AS total_after_tax FROM sales_data WHERE transaction_id = 1;\n```', '```sql\n-- Calculating the square root of a DOUBLE value\nSELECT sqrt(column_value) FROM my_table WHERE column_value > 0;\n```', '```sql\n-- Using DOUBLE in mathematical functions\nSELECT sin(column1), cos(column2) FROM my_numeric_table;\n```', '```sql\n-- Explicit casting of an INTEGER to DOUBLE for precision in arithmetic operations\nSELECT cast(my_integer_column AS DOUBLE) / 2 FROM my_table;\n```', '```sql\n-- Working with DOUBLE in spatial functions\nDOUBLE ST_Area (geometry) -- Computes the area of a geometry, returning a DOUBLE value as the area\n```', "```sql\n-- Using the DOUBLE type in JSON processing\nSELECT json_extract(my_json_column, '$.key')::DOUBLE FROM my_json_table;\n```"]
`UTINYINT`: An unsigned 8-bit integer type used to store whole numbers in the range of 0 to 255., Examples: ['CREATE TABLE example_table (column1 UTINYINT);', 'INSERT INTO example_table (column1) VALUES (200);', 'SELECT * FROM example_table WHERE column1 < 100;', '-- Attempting to store a negative number or a number greater than 255 will result in an error.', 'UPDATE example_table SET column1 = 255 WHERE column1 < 50;']
`NULL`: The `NULL` type in SQL represents a missing or unknown value, allowing for fields within a table to be uninitialized or absent in data., Examples: ['SELECT NULL = NULL;', 'SELECT NULL IS NULL;', "INSERT INTO table_name (column1, column2) VALUES (NULL, 'data');", "SELECT coalesce(NULL, 'default_value');", 'UPDATE table_name SET column1 = NULL WHERE condition;', "SELECT CASE WHEN column IS NULL THEN 'Value is NULL' ELSE column END FROM table_name;"]
`TIME WITH TIME ZONE`: The TIME WITH TIME ZONE (alias: TIMETZ) type represents the time of day with respect to a specific time zone, following the ISO 8601 format and allowing for time zone offsets., Examples: ["SELECT TIMETZ '1992-09-20 11:30:00.123456';", "SELECT TIMETZ '1992-09-20 11:30:00.123456-02:00';", "SELECT TIMETZ '1992-09-20 11:30:00.123456+05:30';"]
`BIT`: The `BIT` data type, also known as `BITSTRING`, represents variable-length strings consisting of 1s and 0s, suitable for operations such as bitwise manipulation., Examples: ["SELECT '10101'::BITSTRING & '10001'::BITSTRING AS result;", "SELECT bit_count('1101011'::BITSTRING) AS set_bits_count;", "SELECT bit_length('10101011'::BITSTRING) AS length_in_bits;", "SELECT octet_length('1101011'::BITSTRING) AS length_in_bytes;", "SELECT set_bit('0110010'::BITSTRING, 2, 0) AS updated_bitstring;"]
`MAP`: The MAP type is an ordered collection of key-value pairs, where keys are unique and can be of any type, allowing for diverse and flexible schema structures in databases., Examples: ["SELECT MAP {{'key1': 10, 'key2': 20, 'key3': 30}};", "SELECT map_from_entries([('key1', 10), ('key2', 20), ('key3', 30)]);", "SELECT MAP(['key1', 'key2', 'key3'], [10, 20, 30]);", 'SELECT MAP {{1: 42.001, 5: -32.1}};', "SELECT MAP {{['a', 'b']: [1.1, 2.2], ['c', 'd']: [3.3, 4.4]}};", 'CREATE TABLE tbl (col MAP(INTEGER, DOUBLE));', "SELECT MAP {{'key1': 5, 'key2': 43}}['key1'];", "SELECT MAP {{'key1': 5, 'key2': 43}}['key1'][1];", "SELECT MAP {{'key1': 5, 'key2': 43}}['key3'];", "SELECT element_at(MAP {{'key1': 5, 'key2': 43}}, 'key1');"]
`ARRAY`: The ARRAY data type stores fixed-size arrays where each element is of the same type, and it is suitable for representing ordered sequences of elements such as numerical vectors or nested arrays., Examples: ['SELECT array_value(1, 2, 3); -- Creates an array with elements 1, 2, and 3', 'CREATE TABLE example_table (id INTEGER, arr INTEGER[3]); -- Declares an array of three integers', 'SELECT id, arr[1] AS element FROM example_table; -- Retrieves the first element of the array', 'SELECT array_value(array_value(1, 2), array_value(3, 4), array_value(5, 6)); -- Creates a nested array using arrays as elements', 'INSERT INTO example_table VALUES (1, [1, 2, 3]), (2, [4, 5, 6]); -- Inserts rows with array values into a table', 'SELECT array_cosine_similarity(array_value(1.0, 2.0, 3.0), array_value(2.0, 3.0, 4.0)); -- Computes cosine similarity between two arrays of the same size', 'SELECT array_cross_product(array_value(1.0, 2.0, 3.0), array_value(2.0, 3.0, 4.0)); -- Computes the cross product of two 3-element arrays']
`JSON`: The JSON data type allows for the storage and querying of JSON formatted data, supporting functions for extracting, manipulating, and transforming JSON content within the database., Examples: ['CREATE TABLE example (j JSON);', 'INSERT INTO example VALUES (\'{{ "family": "anatidae", "species": [ "duck", "goose", "swan", null ] }}\');', "SELECT j->'$.family' FROM example;", "SELECT json_extract(j, '$.species[0]') FROM example;", "SELECT json_extract_string(j, '$.family') FROM example;"]
Here is the schema of the DuckDB database that the SQL query will run on:
{schema}
Question:
Here is the question or an instruction the user provided:
{question}
Write a DuckDB SQL query for the given question!
Answer:
```
| 2024-11-13T01:28:32.284939 | 5 | 0.6 | 14 | 0.785714 | 6 | 0.333333 | 48 | 0.729167 | 2 | 1 | 75 | 0.706667 |
openrouter | qwen/qwen-2.5-coder-32b-instruct | custom_8348795 | You are a DuckDB SQL Query Writing Assistant. You only respond with a DuckDB SQL query that answers the users's question.
Here are some DuckDB SQL syntax specifics you should be aware of:
- DuckDB use double quotes (") for identifiers that contain spaces or special characters, or to force case-sensitivity and single quotes (') to define string literals
- DuckDB can query CSV, Parquet, and JSON directly without loading them first, e.g. `SELECT * FROM 'data.csv';`
- DuckDB supports CREATE TABLE AS (CTAS): `CREATE TABLE new_table AS SELECT * FROM old_table;`
- DuckDB queries can start with FROM, and optionally omit SELECT *, e.g. `FROM my_table WHERE condition;` is equivalent to `SELECT * FROM my_table WHERE condition;`
- DuckDB allows you to use SELECT without a FROM clause to generate a single row of results or to work with expressions directly, e.g. `SELECT 1 + 1 AS result;`
- DuckDB supports attaching multiple databases, unsing the ATTACH statement: `ATTACH 'my_database.duckdb' AS mydb;`. Tables within attached databases can be accessed using the dot notation (.), e.g. `SELECT * FROM mydb.table_name syntax`. The default databases doesn't require the do notation to access tables. The default database can be changed with the USE statement, e.g. `USE my_db;`.
- DuckDB is generally more lenient with implicit type conversions (e.g. `SELECT '42' + 1;` - Implicit cast, result is 43), but you can always be explicit using `::`, e.g. `SELECT '42'::INTEGER + 1;`
- DuckDB can extract parts of strings and lists using [start:end] or [start:end:step] syntax. Indexes start at 1. String slicing: `SELECT 'DuckDB'[1:4];`. Array/List slicing: `SELECT [1, 2, 3, 4][1:3];`
- DuckDB has a powerful way to select or transform multiple columns using patterns or functions. You can select columns matching a pattern: `SELECT COLUMNS('sales_.*') FROM sales_data;` or transform multiple columns with a function: `SELECT AVG(COLUMNS('sales_.*')) FROM sales_data;`
- DuckDB an easy way to include/exclude or modify columns when selecting all: e.g. Exclude: `SELECT * EXCLUDE (sensitive_data) FROM users;` Replace: `SELECT * REPLACE (UPPER(name) AS name) FROM users;`
- DuckDB has a shorthand for grouping/ordering by all non-aggregated/all columns. e.g `SELECT category, SUM(sales) FROM sales_data GROUP BY ALL;` and `SELECT * FROM my_table ORDER BY ALL;`
- DuckDB can combine tables by matching column names, not just their positions using UNION BY NAME. E.g. `SELECT * FROM table1 UNION BY NAME SELECT * FROM table2;`
- DuckDB has an inutitive syntax to create List/Struct/Map and Array types. Create complex types using intuitive syntax. List: `SELECT [1, 2, 3] AS my_list;`, Struct: `{{{{'a': 1, 'b': 'text'}}}} AS my_struct;`, Map: `MAP([1,2],['one','two']) as my_map;`. All types can also be nested into each other. Array types are fixed size, while list types have variable size. Compared to Structs, MAPs do not need to have the same keys present for each row, but keys can only be of type Integer or Varchar. Example: `CREATE TABLE example (my_list INTEGER[], my_struct STRUCT(a INTEGER, b TEXT), my_map MAP(INTEGER, VARCHAR), my_array INTEGER[3], my_nested_struct STRUCT(a INTEGER, b Integer[3]));`
- DuckDB has an inutive syntax to access struct fields using dot notation (.) or brackets ([]) with the field name. Maps fields can be accessed by brackets ([]).
- DuckDB's way of converting between text and timestamps, and extract date parts. Current date as 'YYYY-MM-DD': `SELECT strftime(NOW(), '%Y-%m-%d');` String to timestamp: `SELECT strptime('2023-07-23', '%Y-%m-%d')::TIMESTAMP;`, Extract Year from date: `SELECT EXTRACT(YEAR FROM DATE '2023-07-23');`
- Column Aliases in WHERE/GROUP BY/HAVING: You can use column aliases defined in the SELECT clause within the WHERE, GROUP BY, and HAVING clauses. E.g.: `SELECT a + b AS total FROM my_table WHERE total > 10 GROUP BY total HAVING total < 20;`
- DuckDB allows generating lists using expressions similar to Python list comprehensions. E.g. `SELECT [x*2 FOR x IN [1, 2, 3]];` Returns [2, 4, 6].
- DuckDB allows chaining multiple function calls together using the dot (.) operator. E.g.: `SELECT 'DuckDB'.replace('Duck', 'Goose').upper(); -- Returns 'GOOSEDB';`
- DuckDB has a JSON data type. It supports selecting fields from the JSON with a JSON-Path expression using the arrow operator, -> (returns JSON) or ->> (returns text) with JSONPath expressions. For example: `SELECT data->'$.user.id' AS user_id, data->>'$.event_type' AS event_type FROM events;`
- DuckDB has built-in functions for regex regexp_matches(column, regex), regexp_replace(column, regex), and regexp_extract(column, regex).
- DuckDB has a way to quickly get a subset of your data with `SELECT * FROM large_table USING SAMPLE 10%;`
DuckDB Functions:
`count`: Calculates the total number of rows returned by a SQL query result. This function is commonly used to determine the row count of a SELECT operation., Parameters: ['result: The result object']
`sum`: Calculates the total of all non-null values from the given input., Parameters: ['arg: Values to be summed up.']
`sum`: Calculates the total of all non-null values in a specified column or expression across rows., Parameters: ['arg: Values to be aggregated']
`max`: Returns the maximum value from the input data., Parameters: ['arg: The column or expression to evaluate', 'n: Number of top values to return(optional)', 'ORDER BY: Specifies sort order before function(optional)']
`max`: Returns the largest value from all values in a specified column or expression., Parameters: ['arg: expression to evaluate maximum', "n: top 'n' value list size(optional)"]
`coalesce`: This function evaluates provided expressions in order and returns the first non-NULL value found. If all expressions evaluate to NULL, then the result is NULL., Parameters: ['expr: An expression to evaluate', '...: Additional expressions to evaluate(optional)']
`trunc`: Truncates a number by removing the fractional part, essentially returning the integer part of the number without rounding., Parameters: ['x: The number to truncate.']
`date_trunc`: Truncates a date or timestamp to the specified precision, effectively setting smaller units to zero or to the first value of that unit (e.g., the first day of the month)., Parameters: ['part: Specifies the truncation precision', 'date: The date or timestamp value']
`row_number`: Generates a unique incrementing number for each row within a partition, starting from 1., Parameters: ['ORDER BY: Specify sort order for numbers.(optional)', 'PARTITION BY: Define groups for numbering.(optional)', 'RANGE/ROWS: Define rows for frame.(optional)', 'EXCLUDE: Exclude specific rows from frame.(optional)', 'WINDOW: Reuse a window definition.(optional)']
`unnest`: The function expands lists or structs into separate rows or columns, reducing nesting by one level., Parameters: ['list_or_struct: The list or struct to unnest.', 'recursive: Unnest multiple levels or not.(optional)', 'max_depth: Limit depth of unnesting.(optional)']
`prompt`: This function allows you to prompt large language models to generate text or structured data as output., Parameters: ['prompt_text: Text input for the model.', 'model: Model to use for prompt.(optional)', 'temperature: Model temperature value setting.(optional)', 'struct: Output schema for struct result.(optional)', 'struct_descr: Field descriptions for struct.(optional)', 'json_schema: Schema for JSON output format.(optional)']
`min`: Returns the minimum value from a set of numeric values., Parameters: ['value_column: Column containing numeric values.', 'ignore_nulls: Ignore NULL values if true.(optional)', 'filter_condition: Condition to filter rows.(optional)']
`min`: Finds the smallest value in a group of input values., Parameters: ['expression: The input value to consider']
`concat`: Concatenates multiple strings together into a single string., Parameters: ['string: String to concatenate']
`avg`: Calculates the average of non-null values., Parameters: ['arg: Data to be averaged']
`lower`: Converts a given string to lower case, commonly used for normalization in text processing., Parameters: ['string: String to be converted']
`read_csv_auto`: Automatically reads a CSV file and infers the data types of its columns., Parameters: ['file_path: Path to the CSV file', 'MD_RUN: Execution control parameter(optional)']
`read_parquet`: Reads Parquet files and treats them as a single table, supports reading multiple files via a list or glob pattern., Parameters: ['path_or_list_of_paths: Path(s) to Parquet file(s)', 'binary_as_string: Load binary as strings(optional)', 'encryption_config: Encryption configuration settings(optional)', 'filename: Include filename column result(optional)', 'file_row_number: Include file row number(optional)', 'hive_partitioning: Interprets Hive partition paths(optional)', 'union_by_name: Unify columns by name(optional)']
`strftime`: Converts timestamps or dates to strings based on a specified format pattern., Parameters: ['timestamp: Input date or timestamp value', 'format: Pattern for string conversion']
`array_agg`: Returns a list containing all values of a column, affected by ordering., Parameters: ['arg: Column to aggregate values']
`regexp_matches`: The function checks if a given string contains a specified regular expression pattern and returns `true` if it does, and `false` otherwise., Parameters: ['string: The input string to search', 'pattern: The regex pattern to match', 'options: Regex matching options string(optional)']
`replace`: Replacement scans in DuckDB allow users to register a callback that gets triggered when a query references a non-existent table. The callback can replace this table with a custom table function, effectively 'replacing' the non-existent table in the query execution process., Parameters: ['db: Database object where replacement applies', 'replacement: Handler for when table is missing', 'extra_data: Extra data given to callback(optional)', 'delete_callback: Cleanup for extra data provided(optional)']
`round`: Rounds a numeric value to a specified number of decimal places., Parameters: ['v: The number to round', 's: Decimal places to round to']
`length`: Returns the length of a string, Parameters: ['value: String to measure length of']
`query`: Table function query extracts statements from a SQL query string and outputs them as `duckdb_extracted_statements` objects. It is utilized to dissect SQL queries and obtain individual statements for further processing, enabling preparation or analysis of each separate statement., Parameters: ['connection: Database connection object', 'query: SQL query to extract from', 'out_extracted_statements: Object for extracted statements']
`read_json_auto`: Automatically infers the schema from JSON data and reads it into a table format., Parameters: ['filename: Path to the JSON file.', 'compression: File compression type.(optional)', 'auto_detect: Auto-detect key names/types.(optional)', 'columns: Manual specification of keys/types.(optional)', 'dateformat: Date format for parsing dates.(optional)', 'format: JSON file format.(optional)', 'hive_partitioning: Hive partitioned path interpretation.(optional)', 'ignore_errors: Ignore parse errors option.(optional)', 'maximum_depth: Max depth for schema detection.(optional)', 'maximum_object_size: Max size of JSON object.(optional)', 'records: JSON record unpacking option.(optional)', 'sample_size: Number of objects for sampling.(optional)', 'timestampformat: Timestamp parsing format.(optional)', 'union_by_name: Unify schemas of files.(optional)']
`range`: Creates a list of values within a specified numeric range, starting inclusively from 'start' and stopping exclusively before 'stop', with an optional step interval., Parameters: ['start: The inclusive start point.(optional)', 'stop: The exclusive end point.', 'step: Interval between each number.(optional)']
`range`: The table function generates a sequential list of values starting from a specified number, incrementing by a given step, up to but not including an end number., Parameters: ['start: Start of the range(optional)', 'stop: End of the range (exclusive)', 'step: Increment between values(optional)']
`date_diff`: Computes the number of specified partition boundaries between two dates (or timestamps)., Parameters: ['part: Specifies the date/timestamp partition', 'startdate: The start date or timestamp', 'enddate: The end date or timestamp']
`lag`: The window function provides the value from a prior row within the same result set partition., Parameters: ['expression: Column or expression to evaluate', 'offset: Number of rows back(optional)', 'default_value: Default value if no offset(optional)']
`year`: Extracts the year component from a date or timestamp value., Parameters: ['date: Date from which to extract year', 'timestamp: Timestamp from which to extract year']
`now`: Obtains the current date and time at the start of the current transaction, using the system's time zone., Parameters: ['None: No parameters required(optional)']
`group_concat`: Concatenates column string values using a specified separator, respecting the provided order., Parameters: ['arg: The column to concatenate', 'sep: Separator between concatenated values(optional)', 'ORDER BY: Specifies order of concatenation(optional)']
`regexp_extract`: If a string matches a given regular expression pattern, it returns the specified capturing group or groups with optional capture group names., Parameters: ['string: Input string to search in.', 'pattern: Regex pattern to match.', 'group: Specifies which group to capture.(optional)', 'name_list: Named capture groups struct.(optional)', 'options: Regex matching options.(optional)']
`upper`: Converts a given string to uppercase characters., Parameters: ['string: String to make uppercase']
`greatest`: Selects the largest value from a list of input values using lexicographical ordering., Parameters: ['x1: The first value to compare', 'x2: The second value to compare', '...: Additional values to compare(optional)', 'xn: Nth value to compare(optional)']
`row`: The function initiates the creation of a row in an appender by signaling the start of adding values for a new row., Parameters: ['appender: Appender to start new row']
`getvariable`: The function retrieves the value of a previously set SQL-level variable, returning NULL if the variable is not defined., Parameters: ['variable_name: The name of the variable']
`quarter`: Extracts the quarter (1 to 4) from a date value., Parameters: ['date: The input date to evaluate.']
`strptime`: Converts a string to a timestamp according to a specified format string, throwing an error on failure., Parameters: ['text: Input string to convert', 'format: String format to parse']
`substring`: Extracts a substring from a given string starting at a specified position and with a specified length., Parameters: ['string: The original string to extract from', 'start: Starting position for extraction', 'length: Number of characters to extract']
`add`: Adds two integer values and returns the sum., Parameters: ['a: First integer to add', 'b: Second integer to add', 'result: Sum of a and b']
`date_part`: Extracts a specified subfield from a timestamp and returns its numeric value, equivalent to the SQL keyword 'extract'., Parameters: ['part: The subfield to extract from the timestamp or timestamptz.', 'timestamp: The input timestamp value to extract the subfield from.', 'interval: Extracts date part from interval.(optional)']
`json_extract`: Extracts JSON from a specified path within a JSON object or array., Parameters: ['json: The JSON object or array.', 'path: Path to extract data from.']
`json_extract_string`: Extracts a string (VARCHAR) value from a JSON object at a specified path, converting JSON data to text if possible., Parameters: ['json: The JSON object to extract from', 'path: The path to the desired value']
`rank`: The rank function assigns a rank to each row within a partition of a result set, allowing for potential gaps in the ranking when there are ties., Parameters: ['order_column: Column or expression for sorting', 'partition_column: Column to partition data by(optional)', 'alias: Alias name for result column(optional)']
`day`: The function extracts the day of the month from a given date., Parameters: ['date: Date value to extract from']
`list`: DuckDB provides an aggregate function that executes an aggregate operation over the elements within a list. This function can be utilized to apply any existing aggregate function, like `min`, `sum`, or `histogram`, across the elements of a list. This allows the aggregation of list data in a flexible manner., Parameters: ['list: List to aggregate values.', "name: Aggregate function's name to apply.", 'value: Optional extra parameters needed.(optional)']
`generate_series`: This function creates a list of values within a specified range where both endpoints are inclusive., Parameters: ['start: Inclusive start of range(optional)', 'stop: Inclusive stop of range', 'step: Difference between successive values(optional)']
`generate_series`: Creates a list of values from start to stop inclusively, with a specified step., Parameters: ['start: Inclusive start of the series(optional)', 'stop: Inclusive end of the series', 'step: Step increment between each value(optional)']
`datediff`: Calculates the number of specified partition boundaries between two dates., Parameters: ['part: Time unit to measure', 'startdate: The starting date', 'enddate: The ending date']
`left`: Extracts left-most characters from a string., Parameters: ['string: String to extract characters from', 'count: Number of left-most characters']
`trim`: Removes specified characters from both sides of a string, or spaces if no characters are specified., Parameters: ['string: The input string to trim', 'characters: Characters to remove from string(optional)']
`array_has_any`: Returns true if any element is present in both input lists., Parameters: ['list1: First list to compare.', 'list2: Second list to compare.']
`datetrunc`: Truncates a date or timestamp to a specified precision part, such as year, month, or day., Parameters: ['part: The precision to truncate to.', 'date: The date to truncate.', 'timestamp: The timestamp to truncate.']
`split_part`: Splits a string by a specified separator and returns the part at a given index., Parameters: ['string: The string to be split', 'separator: The delimiter to split by', 'index: 1-based index to retrieve']
`read_json`: Reads JSON files, inferring schema and format automatically from the data., Parameters: ['filename: Path to JSON file(s).', 'auto_detect: Auto-detect schema from data.(optional)', 'columns: Specified columns and types.(optional)', 'compression: File compression type detected.(optional)', 'format: Format of JSON data.(optional)', 'hive_partitioning: Choose Hive partitioning method.(optional)', 'ignore_errors: Ignore errors during parsing.(optional)', 'maximum_depth: Maximum schema detection depth.(optional)', 'maximum_object_size: Limit JSON object size bytes.(optional)', 'records: Read JSON as records.(optional)', 'sample_size: Sample objects for detection.(optional)', 'timestampformat: Format for parsing timestamps.(optional)', 'union_by_name: Unify multiple file schema types.(optional)']
`read_csv`: Reads CSV files into a DuckDB relation, automatically inferring configurations such as delimiters, headers, and column types unless specified otherwise., Parameters: ['all_varchar: Assume all columns as VARCHAR(optional)', 'allow_quoted_nulls: Allow quoted nulls conversion(optional)', 'auto_detect: Enable auto detection of parameters(optional)', 'auto_type_candidates: Types considered for auto detection(optional)', 'columns: Specify column names and types(optional)', 'compression: File compression type(optional)', 'dateformat: Date format for parsing dates(optional)', 'decimal_separator: Decimal separator of numbers(optional)', 'delimiter: Character separating columns in rows(optional)', 'delim: Character separating columns in rows(optional)', 'escape: String for escaping data chars(optional)', 'filename: Include filename in result(optional)', 'force_not_null: Do not match null string(optional)', 'header: File contains a header line(optional)', 'hive_partitioning: Interpret path as Hive partitioned(optional)', 'ignore_errors: Ignore rows with parsing errors(optional)', 'max_line_size: Maximum line size in bytes(optional)', 'names: Column names as a list(optional)', 'new_line: New line characters in file(optional)', 'normalize_names: Normalize column names(optional)', 'null_padding: Pad remaining columns with nulls(optional)', 'nullstr: String representing null value(optional)', 'parallel: Use parallel CSV reader(optional)', 'quote: Use quoting for data values(optional)', 'sample_size: Number of rows for sampling(optional)', 'sep: Delimiter character between columns(optional)', 'skip: Lines to skip at top(optional)', 'timestampformat: Format for parsing timestamps(optional)', 'types or dtypes: Column types by position/name(optional)', 'union_by_name: Unify schemas by column name(optional)', 'store_rejects: Store errors in reject tables(optional)', 'rejects_scan: Name for rejects scan table(optional)', 'rejects_table: Name for rejects errors table(optional)', 'rejects_limit: Limit faulty records stored(optional)', 'delim: Specifies column delimiter character(optional)']
`today`: Returns the current date at the start of the transaction., Parameters: []
`floor`: Rounds down a numeric value to the nearest integer., Parameters: ['x: Value to be rounded down']
`ends_with`: Checks if a string ends with a specified substring, returning true if it does and false otherwise., Parameters: ['string: The string to check', 'search_string: The ending substring']
`regexp_replace`: Replaces portions of a string matching a regular expression with a specified replacement string. Can replace globally with the 'g' option., Parameters: ['string: The string to search in.', 'pattern: The regular expression to match.', 'replacement: The string to replace with.', 'options: Options to modify behavior.(optional)']
`list_distinct`: Removes duplicates and NULL values from a list., Parameters: ['list: Input list to process']
`abs`: Calculates the absolute value of a given numeric input., Parameters: ['x: Input value for operation']
`len`: Calculates the length of a specified input, returning the number of elements or characters it contains., Parameters: ['input: The input whose length is calculated.', 'length_type: Type of length to compute.(optional)', 'ignore_nulls: Whether to ignore null values.(optional)']
`substr`: Extracts a substring from a string starting at a specified position and continuing for a specified length., Parameters: ['string: The string to extract from', 'start: Starting position of extract', 'length: Number of characters to extract']
`last_value`: Evaluates an expression at the last row of the current window frame., Parameters: ['expr: Expression to evaluate at last row', 'IGNORE NULLS: Skip nulls in evaluation(optional)']
`time_bucket`: Truncates the provided timestamp by the specified interval, allowing for optional offsets or origins to alter the bucketing alignment., Parameters: ['bucket_width: Interval to truncate by', 'timestamptz or date: Timestamp or date value', 'offset: Offset interval for buckets(optional)', 'origin: Origin timestamp for alignment(optional)', 'timezone: Time zone for calculation(optional)']
`read_json_objects`: Reads JSON objects from the given file(s), allowing for various formats and compressed files., Parameters: ['filename: Path to JSON file(s)', 'compression: Type of file compression utilized(optional)', 'format: Format of the JSON data(optional)', 'hive_partitioning: Enable Hive partitioning path(optional)', 'ignore_errors: Ignore JSON parsing errors(optional)', 'maximum_sample_files: Max sampled files for detection(optional)', 'maximum_object_size: Max size of JSON object(optional)', 'filename: Add extra filename column(optional)']
`duckdb_functions`: This table function lists all functions, including macros, within the DuckDB instance providing details such as their type, return type, parameters, and other relevant metadata., Parameters: ['database_name: Database holding this function', 'schema_name: Schema where function resides', 'function_name: SQL name of the function', 'function_type: Kind of function (e.g. scalar)', 'description: Description of this function(optional)', 'return_type: Data type name of return(optional)', "parameters: Function's parameter names(optional)", 'parameter_types: Data type names of parameters(optional)', 'varargs: Data type for variable arguments(optional)', 'macro_definition: SQL expression defining macro(optional)', 'has_side_effects: Indicates if function is pure', 'function_oid: Internal identifier for function']
`histogram`: Produces a map of keys as histogram buckets with corresponding counts based on input values., Parameters: ['arg: Input values to aggregate.']
`md5`: Computes the MD5 hash of a given string and returns it as a VARCHAR., Parameters: ['string: The input string value.']
`format`: Formats a string using specified parameters following the fmt syntax., Parameters: ['format: The format string used.', 'parameters: Values to replace placeholders.(optional)']
`array_length`: Returns the number of elements in a JSON array. If provided, the path specifies a location within the JSON structure where the array's length is determined., Parameters: ['json: The JSON string to evaluate', 'path: The path to the JSON array(optional)']
`duckdb_tables`: Provides metadata about base tables in DuckDB instance., Parameters: ['database_name: Name of the database containing this table.', 'database_oid: Internal identifier of the database.', 'schema_name: Name of the schema containing this table.', 'schema_oid: Internal identifier of the schema.', 'table_name: Name of the base table.', 'table_oid: Internal identifier of the table object.', 'internal: False if user-defined table.', 'temporary: Whether it is a temporary table.', 'has_primary_key: True if table defines PRIMARY KEY.', 'estimated_size: Estimated number of rows in table.', 'column_count: Number of columns in the table.', 'index_count: Number of associated indexes.', 'check_constraint_count: Number of active check constraints.', 'sql: SQL definition for the table.']
`to_json`: Converts a value to JSON format., Parameters: ['any: Value to convert to JSON']
`month`: Returns the month as an integer from a given date or timestamp., Parameters: ['date_or_timestamp: Input date or timestamp value']
`stddev`: Calculates the sample standard deviation of a set of non-null values., Parameters: ['x: Values to calculate deviation']
`first_value`: The function returns the value of the specified expression evaluated at the first row of the window frame., Parameters: ['expr: The expression to evaluate.', 'IGNORE NULLS: Ignore NULL values in frame.(optional)']
`parquet_schema`: The function queries the internal schema of a Parquet file, revealing details such as column names, types, and other metadata., Parameters: []
`string_agg`: Concatenates string values from a column with a specified separator in order, optionally sorted by a criterion., Parameters: ['arg: Column of string values.', 'sep: Separator between concatenated strings.', 'ORDER BY: Optional sorting criteria.(optional)']
`flatten`: Flatten concatenates elements of a list of lists into a single list, flattening one level., Parameters: ['list_of_lists: A list containing lists']
`hash`: Computes a UBIGINT hash value for a given input, useful for operations like joins, grouping or checking data equality across different systems., Parameters: ['value: Input to compute hash from']
`current_date`: Returns the current date at the start of the current transaction., Parameters: ['transaction: Start of current transaction(optional)', 'current: Current session or scope(optional)']
`position`: Locates the position of the first occurrence of "search_string" after position 1 in the provided "string". It returns 0 if "search_string" is not found., Parameters: ['search_string: The substring to find.', 'string: The string to search in.']
`row_to_json`: Converts a STRUCT type into a JSON object format, facilitating the transformation of complex data structures into JSON format for further processing or output., Parameters: ['list: A structure to convert']
`duckdb_columns`: This function provides metadata about columns in the DuckDB instance, including details on data type, default values, etc., Parameters: ['database_name: Name of the database containing column', 'database_oid: Internal database identifier', 'schema_name: Name of schema containing table', 'schema_oid: Internal schema identifier', 'table_name: Name of table containing column', 'table_oid: Internal table object identifier', 'column_name: SQL name of the column', 'column_index: Position of column in table', 'internal: True if column is built-in', 'column_default: Column default value in SQL(optional)', 'is_nullable: True if column accepts NULL', 'data_type: Column datatype name', 'data_type_id: Internal data type identifier', 'character_maximum_length: Always NULL, no length restrictions', 'numeric_precision: Storage precision of column values(optional)', 'numeric_precision_radix: Precision number-base in bits/positions(optional)', 'numeric_scale: Fractional digits for decimal type(optional)', 'comment: User-defined comment on column(optional)']
`contains`: Checks if a map contains a given key and returns true or false., Parameters: ['map: The map to search', 'key: The key to search']
`week`: The function extracts the ISO week number from a date or timestamp, starting with Monday as the first day of the week., Parameters: ['date: Input date to process']
`duckdb_secrets`: Provides metadata about the secrets available in the DuckDB instance., Parameters: ['redact: Controls if sensitive data is redacted.(optional)']
`max_by`: The function finds the row with the maximum value in a specified column and returns a different column's value from that row, allowing for an ordered result based on the specified column., Parameters: ['arg: Value to return from row.', 'val: Column to determine maximum.', 'n: Number of top rows.(optional)']
`alias`: A scalar function alias provides an alternative name for a function to improve readability or conform to conventions. For instance, 'uppercase' could be used to call 'UPPER'., Parameters: ['alias: The alternative function name', 'function_name: The actual function name', 'parameters: Parameters of the function(optional)']
`json_structure`: Returns the structure of a given JSON, defaulting to JSON if types are inconsistent., Parameters: ['json: Input JSON value to process.']
`first`: Returns the first value (null or non-null) from the given column, and is affected by specifying an order using ORDER BY to determine which value is first., Parameters: ['column: Target column to aggregate.', 'ORDER BY (optional): Order used to determine first.(optional)', 'FILTER (optional): Condition to filter rows.(optional)']
`percent_rank`: Calculates the relative rank of a row within its partition as `(rank() - 1) / (total partition rows - 1)`, outputting a value between 0 and 1., Parameters: ['window_specification: Defines row partition and order.(optional)', 'ORDER BY: Specifies the row order.(optional)']
`json_transform`: Transforms a JSON object into a specified nested type structure, enabling efficient extraction and type conversion., Parameters: ['json: The JSON data to transform.', 'structure: Desired structure for transformation.']
`random`: Generates a random floating-point number between 0.0 and 1.0., Parameters: ['none: No parameters are needed.']
`any_value`: This aggregate function returns the first non-null value from a column, particularly useful to obtain any non-null entry when the specific order is not crucial yet needs to handle initial null values., Parameters: ['arg: Input column with values']
`reverse`: Reverses the order of the characters in a given string., Parameters: ['string: The string to reverse']
`list_aggregate`: Executes a specified aggregate function on the elements within a list., Parameters: ['list: The input list to aggregate', 'name: Name of the aggregate function', 'additional_arguments: Arguments passed to aggregate(optional)']
`epoch_ms`: The function converts either a given timestamp to milliseconds since the epoch or milliseconds since the epoch to a timestamp., Parameters: ['ms: Milliseconds since epoch(optional)', 'timestamp: Timestamp to convert to ms(optional)']
`aggregate`: The scalar function for aggregate in DuckDB is designed to create a custom aggregate function. It facilitates aggregation of data over a column in a database and involves setting parameters, return types, and function operations such as state initialization, state updates, and finalization., Parameters: ['aggregate_function: Pointer to aggregate function', 'name: Name of the aggregate function(optional)', 'type: Logical type of parameter(optional)', 'state_size: Size of aggregate state(optional)', 'state_init: Initializes the state(optional)', 'update: Updates the aggregate state(optional)', 'combine: Merges two aggregation states(optional)', 'finalize: Produces final result from state(optional)', 'destroy: Destructs the aggregate state(optional)', 'extra_info: Stores additional information(optional)', 'error: Aggregate function error message(optional)', 'set: Set of aggregate functions(optional)', 'info: Retrieves extra info from info(optional)', 'con: Connection to database(optional)', 'function: Aggregate function to add(optional)', 'out_database: The result database object', 'out_error: Output error on failure(optional)', 'config: Optional configuration details(optional)']
`read_json_objects_auto`: Reads JSON objects from a file or files using automatically detected format settings., Parameters: ['filename: Path to JSON file or files', 'compression: Type for file compression(optional)', 'filename: Include filename in result(optional)', 'format: Format for JSON data(optional)', 'hive_partitioning: Use Hive partitioned paths(optional)', 'ignore_errors: Continue ignoring parse errors(optional)', 'maximum_sample_files: Max files for auto-detection(optional)', 'maximum_object_size: Max bytes per JSON object(optional)']
`duckdb_constraints`: Provides metadata about constraints in the DuckDB instance., Parameters: []
`cos`: Computes the cosine of a given number, returning its trigonometric value., Parameters: ['x: Input number for calculation']
`sin`: Calculates the sine of a given angle expressed in radians., Parameters: ['value: Angle in radians to calculate sine']
`array_transform`: Transforms each element of the input list using a lambda function, returning a new list with the results., Parameters: ['list: The input list to transform', 'lambda: Function to apply to elements']
`datepart`: Extracts specified subfields from a TIMESTAMPTZ and returns them as a struct., Parameters: ['part: Subfield to extract', 'timestamptz: Input timestamp with time zone', '[part, ...]: List of subfields to extract(optional)']
`map`: The function returns an empty map., Parameters: ['(none): No parameters are required']
`least`: Selects the smallest value from a list of inputs., Parameters: ['x1, x2, ...: A list of numeric values.']
`epoch`: Converts a timestamp to seconds since the epoch (1970-01-01)., Parameters: ['timestamp: Timestamp to convert to seconds.']
`nextval`: Retrieves the next value from a specified sequence., Parameters: ['sequence_name: The name of the sequence']
`pragma_storage_info`: The function returns detailed storage information for a specified table, including metrics like compression type and storage chunk details., Parameters: ['table_name: Name of the table.']
`ceil`: Rounds a numeric value upward to the nearest integer., Parameters: ['x: The number to round up']
`list_concat`: Concatenates two lists into one., Parameters: ['list1: The first list to concatenate.', 'list2: The second list to concatenate.']
`median`: Finds the middle value of a dataset, averaging the two middle values for an even-sized array., Parameters: ['x: Values to find middle value']
`uuid`: Generates a random UUID as a string., Parameters: []
`radians`: Converts an angle measured in degrees to an equivalent angle in radians., Parameters: ['x: Angle in degrees to convert.']
`dayname`: Returns the English name of the weekday for a given date or timestamp., Parameters: ['date: A date to extract weekday.', 'timestamp: A timestamp to extract weekday.(optional)']
`embedding`: The function generates text embeddings using OpenAI's models., Parameters: ['my_text_column: Column containing text for embedding', 'model: Model type for embeddings(optional)']
`levenshtein`: Calculates the minimum number of single-character edits required to change one string into another, considering characters of different cases as distinct., Parameters: ['s1: The first string to compare', 's2: The second string to compare']
`acos`: Computes the arccosine of the input value., Parameters: ['x: Input value for arccosine.']
`timezone`: The function retrieves or sets a timestamp within a specified time zone, effectively converting between "local" and UTC times., Parameters: ['text: Specified time zone name or abbreviation', 'timestamp: The date and time to convert', 'timestamptz: Timestamp with time zone to convert']
`duckdb_views`: The function provides metadata about database views, including information on view names, schemas, and definitions., Parameters: []
`json_object`: Creates a JSON object from key-value pairs., Parameters: ['key: Key for the JSON object.', 'value: Value for the JSON object.']
`decode`: Converts a BLOB to a VARCHAR, failing if the BLOB is not valid UTF-8., Parameters: ['blob: The BLOB to convert']
`array_contains`: Checks if a given element exists in a list and returns true if it does., Parameters: ['list: The list to search', 'element: Element to search in list']
`hour`: Extracts the hour component from a given temporal value., Parameters: ['date: The date or timestamp value']
`array_cosine_similarity`: Computes the cosine similarity between two arrays of the same size, with elements that cannot be NULL., Parameters: ['array1: First array of values', 'array2: Second array of values']
`minute`: Extracts the minute part from a timestamp or interval., Parameters: ['timestamp: Extract minute from this timestamp']
`filter`: Constructs a list from elements of the input list for which a lambda function returns true., Parameters: ['list: Input list to be filtered.', 'lambda: Condition for filtering elements.']
`glob`: The function returns filenames located at the specified path using glob pattern syntax., Parameters: ['search_path: Specifies path using glob patterns']
`instr`: Returns the position of the first occurrence of the search string in another string, returning 0 if not found., Parameters: ['string: Input string to search within', 'search_string: String to find in input']
`string_to_array`: Splits a string into an array using the specified separator., Parameters: ['string: The input text to split.', 'separator: Character(s) defining split points.']
`concat_ws`: Concatenates multiple strings together with a specified separator in between each string., Parameters: ['separator: Separator placed between strings.', 'string: Strings to be concatenated together.']
`to_timestamp`: Converts a string into a timestamp using a specified format., Parameters: ['string: Input string to convert', 'format: Date format of the string']
`split`: Splits a string into a list of substrings based on a specified separator., Parameters: ['string: Input string to be split', 'separator: Character or string delimiter']
`power`: Calculates the result of raising a given number to an exponent value., Parameters: ['base: The number to raise', 'exponent: The power to raise by']
`last_day`: Calculates the last day of the month for a given date., Parameters: ['date: Input date to evaluate']
`json_merge_patch`: Merges two JSON documents together, updating the first document with keys and values from the second., Parameters: ['json1: First JSON document to merge', 'json2: Second JSON document to merge']
`lead`: Evaluates the expression at the row offset rows after the current row within the window frame. If there is no such row, a default value is returned., Parameters: ['expr: Expression evaluated on the row', 'offset: Number of rows to offset(optional)', 'default: Value to return if no row(optional)', 'IGNORE NULLS: Ignore nulls when offsetting(optional)']
`struct_pack`: Creates a STRUCT with specified keys and values., Parameters: ['name: Name of the struct entry', 'any: Value of the struct entry']
`array_filter`: Constructs a list from elements of the input list for which a specified condition returns true., Parameters: ['list: Input list to be filtered', 'lambda: Function returning boolean condition']
`list_aggr`: Executes a specified aggregate function on elements of a list., Parameters: ['list: The list of elements.', 'name: Aggregate function name.']
`date_sub`: Calculates the number of complete date part intervals between two date values., Parameters: ['part: type of interval to calculate', 'startdate: starting date for calculation', 'enddate: ending date for calculation']
`lpad`: Pads the input string with a specified character from the left until it reaches a desired length., Parameters: ['string: The input string to modify', 'count: The total length desired', 'character: Character used for padding']
`regexp_split_to_array`: This function splits a string at each occurrence of the regular expression, returning an array of substrings., Parameters: ['string: String to be split into array.', 'regex: Regular expression delimiter pattern.', 'options: Regular expression matching options.(optional)']
`map_from_entries`: Returns a map created from an array of key-value struct entries., Parameters: ['entries: Array of key-value entries.']
`duckdb_schemas`: Provides metadata about available schemas in the DuckDB instance., Parameters: ["oid: Schema object's internal identifier.", 'database_name: Database containing this schema name.', "database_oid: Database's internal identifier.", 'schema_name: SQL name of the schema.', 'internal: True if internal schema.', 'sql: Always NULL.']
`duckdb_settings`: The function provides metadata about current DuckDB settings., Parameters: ['name: Name of the setting', 'value: Current value of the setting', 'description: Description of the setting', "input_type: Logical datatype of setting's value"]
`str_split`: Splits a given string into parts based on a specified separator, returning an array of the split segments., Parameters: ['string: The text input to split.', 'separator: Delimiter to split the string.']
`bar`: Calculates and displays a progress bar during execution of long-running queries., Parameters: ['enable_progress_bar: Enable or disable progress bar(optional)']
`age`: Calculates the age from the birthdate by subtracting the year part and adding one if the birth month and day are ahead in the current year., Parameters: ['birthdate: DATE of birth for calculation']
`query_table`: The function returns a table or the union of tables specified by their names., Parameters: ['tbl_names: Names of tables to use', 'by_name: Union tables by name(optional)']
`duckdb_indexes`: The function provides metadata about secondary indexes, including their names, uniqueness, and associated tables, within a DuckDB instance., Parameters: ['database_name: Name of the database', 'database_oid: Database internal identifier', 'schema_name: SQL name of the schema', 'schema_oid: Schema internal identifier', 'index_name: SQL name of the index', 'index_oid: Object identifier of the index', 'table_name: Name of the table', 'table_oid: Table object internal identifier', 'is_unique: Indicates uniqueness of index', 'is_primary: Always false for secondary', 'expressions: Always null', 'sql: Index SQL definition']
`regr_intercept`: Calculates the intercept of the linear regression line in a dataset, given an independent and dependent variable., Parameters: ['y: Dependent variable data', 'x: Independent variable data']
`regr_slope`: Returns the slope of the linear regression line, where the independent variable is used to calculate its change with the dependent variable., Parameters: ["y: The dependent variable's values.", "x: The independent variable's values."]
`log`: Calculates the natural logarithm of a given input value, providing an essential function for mathematical and statistical computations., Parameters: ['value: The number to compute']
`version`: The documentation provides an overview and details of the versioning scheme for DuckDB extensions. It explains the purpose and significance of version numbers, and categorizes extensions into three stability levels: unstable, pre-release, and stable. Each level's expectations and version format are detailed, helping users understand the maturity and reliability of an extension. Additionally, the document explains how extensions sync with DuckDB's release cycle and how to use nightly builds for early access to features., Parameters: ['path: Path to the database file on disk, or `nullptr` or `:memory:` to open an in-memory database.', 'out_database: The result database object.']
`duckdb_keywords`: Retrieves DuckDB's keywords and reserved words, including their categories., Parameters: []
`list_unique`: Counts the number of unique elements in a list., Parameters: ['list: Input list to count uniqueness.', 'element: Element type within the list.(optional)']
`read_ndjson_objects`: Reads newline-delimited JSON objects from a specified file or set of files., Parameters: ['compression: The compression type for file.(optional)', 'filename: Include filename in result.(optional)', 'format: Specify JSON format to use.(optional)', 'hive_partitioning: Use Hive partitioned path.(optional)', 'ignore_errors: Ignore parse errors if possible.(optional)', 'maximum_sample_files: Max JSON files sampled.(optional)', 'maximum_object_size: Max JSON object size (bytes).(optional)']
`current_setting`: Returns the current value of a specified configuration setting in DuckDB., Parameters: ['setting_name: Name of the configuration setting']
`array_distinct`: Removes duplicates and NULL values from a list, but does not preserve the original order., Parameters: ['list: The list to process.']
`duckdb_databases`: The table function returns a list of databases accessible from the current DuckDB process, including both the startup database and any attached databases., Parameters: []
`list_value`: Creates a list value from a specified logical type and an array of values. This list value can be used within DuckDB for handling columnar data that involves a list or array structure., Parameters: ['type: Logical data type for elements.', 'values: Array of values to list.', 'value_count: Number of values in array.']
`to_base`: Converts an integer to a string representation in a specified base., Parameters: ['value: Integer value to convert', 'base: Base for number conversion']
`list_contains`: Returns true if a specified element is found within the given list., Parameters: ['list: The list to search in', 'element: Element to locate in list']
`from_json`: Transforms JSON into a specified nested structure., Parameters: ['json: The JSON input data.', 'structure: Specifies desired output structure.']
`pi`: The function returns the mathematical constant pi., Parameters: []
`dense_rank`: Ranks the current row without creating gaps in the ranking, counting peer groups all having the same rank., Parameters: ['partition_by_clause: Defines partitioning of result set(optional)', 'order_by_clause: Specifies attributes for ordering', 'frame_clause: Limits window frame range(optional)']
`repeat`: Repeats a given string a specified number of times, creating a new concatenated string as the result., Parameters: ['string: The input string to repeat.', 'count: The number of repetitions wanted.']
`current_schema`: Returns the name of the currently active schema, which defaults to 'main'., Parameters: []
`struct_extract`: This function extracts a specific entry from a STRUCT using either a name or an index., Parameters: ['struct: The struct to extract from', 'entry: The name of the entry(optional)']
`get_current_timestamp`: Returns the current date and time at the start of the current transaction., Parameters: []
`regexp_extract_all`: Splits the input string using the specified regex and retrieves all matches for the specified capturing group., Parameters: ['string: Input string to process', 'regex: Regular expression pattern', 'group: Match group to extract(optional)', 'options: Regular expression options(optional)']
`repeat`: The function generates a table with repeated rows of specified data values for a given number of times., Parameters: ['repeat_row: Values for the repeated rows.', 'num_rows: Number of rows to generate.']
`read_text`: Reads the content of specified files or patterns as a `VARCHAR`, validating for UTF-8 encoding., Parameters: ['source: File path or glob pattern']
`last`: Returns the last value of a column within a group of rows ordered by an expression., Parameters: ['column: The column to evaluate.', 'order by expression: Column or expression for sorting.(optional)', 'partition by expression: Column or expression for partitioning.(optional)', 'frame: Specifies window frame for function.(optional)']
`encode`: Converts a STRING to a BLOB, transforming UTF-8 characters into literal encoding., Parameters: ['string: The input string to encode.']
`dayofweek`: Extracts the numeric representation of the day of the week from a given date, where Sunday is represented as 0 and Saturday as 6., Parameters: ['date: The date to evaluate.']
`enum_range`: Returns all values of the given ENUM type as an array, allowing easy access to the possible values., Parameters: ['enum: Input enum type reference']
`json_extract_path`: Extracts JSON from a JSON object at a specified path, returning a result in JSON format., Parameters: ['json: The source JSON object.', 'path: The JSON path to extract.']
`array_slice`: Extracts a sublist from an array using specified start, end, and optional step values, similar to Python slicing. Handles negative indices., Parameters: ['list: The list to be sliced', 'begin: Index to start slicing from', 'end: Index to stop slicing at', 'step: Step size for slicing(optional)']
`pragma_table_info`: Returns information about the columns in a table including details such as column name, type, nullability, default value, and if it's part of the primary key., Parameters: ['table_name: Name of the target table']
`arg_max`: Finds the values associated with the maximum criterion in a dataset, optionally returning the top-N values in descending order., Parameters: ['arg: Expression to evaluate at max', 'val: Criterion for determining maximum value', 'n: Top n values to return(optional)']
`typeof`: The function returns the data type of the given expression's result., Parameters: ['expression: Expression to determine data type']
`strip_accents`: Removes accents from a string., Parameters: ['string: Input string to process.']
`gen_random_uuid`: Generates and returns a random UUID similar to `eeccb8c5-9943-b2bb-bb5e-222f4e14b687`., Parameters: []
`starts_with`: Checks if a string begins with a specified substring., Parameters: ['string: The string to search in.', 'search_string: The string to search for.']
`damerau_levenshtein`: The function calculates the minimum number of edit operations needed to transform one string into another, allowing insertions, deletions, substitutions, or transpositions of adjacent characters, with case-sensitive comparison., Parameters: ['s1: First string input to compare', 's2: Second string input to compare']
`cardinality`: The cardinality function specifies the number of rows an operator can return to its parent within a query plan., Parameters: ['name: The name of the operator', 'cardinality: Number of rows returned']
`which_secret`: Determines and returns the secret being used based on a file path and secret type., Parameters: ['path: File path to check secret', 'secret_type: Type of the secret service']
`corr`: The correlation coefficient is calculated between two sets of data to measure the strength and direction of a linear relationship between them., Parameters: ['y: First variable for correlation', 'x: Second variable for correlation']
`translate`: Converts characters in a string based on specified mappings from one set of characters to another., Parameters: ['source: Input string to be modified', 'from: Characters to be replaced', 'to: Replacement characters']
`array_unique`: This function counts the unique elements in a list., Parameters: ['list: The list to evaluate']
`json_keys`: Returns the keys of a JSON object as a list of strings. If a path is specified, it returns keys of the JSON object at that path., Parameters: ['json: JSON object to extract keys', 'path: path within the JSON object(optional)']
`list_has_any`: Returns true if any elements exist in both given lists., Parameters: ['list1: First list to compare elements', 'list2: Second list to compare elements']
`map_extract`: Returns a list with the value corresponding to a specified key from the map or an empty list if the key is not present., Parameters: ['map: Input map to search within.', 'key: Key to find in map.']
`try_strptime`: Converts a string into a timestamp using specified format strings, returning NULL on failure., Parameters: ['text: String to be converted', 'format: Format to parse the string']
`array_position`: Returns the index of an element in the list, or NULL if it is not found., Parameters: ['list: The list to search through', 'element: The element to find']
`str_split_regex`: Splits a string into parts based on a specified regular expression pattern., Parameters: ['string: Input string to split', 'regex: Regular expression for splitting']
`to_date`: Converts a string representation of a date into a date object., Parameters: ['date_text: String representation of date', 'format: Date format for parsing']
`strpos`: Returns the location of the first occurrence of a substring within a string, counting from 1. Returns 0 if no match is found., Parameters: ['string: The main string to search.', 'search_string: Substring to search for.']
`dbgen`: The table function generates TPC-H benchmark data according to a specified scale factor., Parameters: ['catalog: Target catalog for data generation(optional)', 'children: Number of partitions for data(optional)', 'overwrite: Unused parameter for overwrite(optional)', 'sf: Scale factor for data size', 'step: Define partition generation step(optional)', 'suffix: Append suffix to table names(optional)']
`string_split`: Splits a given string using a specified separator and returns an array of the resulting substrings., Parameters: ['string: The string to be split', 'separator: Separator to split the string']
`struct_insert`: The function adds new field(s) or value(s) to an existing STRUCT with the given argument values, using bound variable names as entry names., Parameters: ['struct: The initial struct object.', 'name := any, ...: Name-value pairs to add.']
`truncate`: Deletes all rows from a specified table without using a WHERE clause., Parameters: ['table_name: Name of the table.']
`list_sort`: Sorts the elements of a list based on specified ordering and null placement options., Parameters: ['list: The list to be sorted.', 'order: Sort order: ASC or DESC.(optional)', 'null_order: NULL placement: FIRST or LAST.(optional)']
`epoch_ns`: Returns the total number of nanoseconds since the epoch for a given timestamp., Parameters: ['timestamp: The input timestamp to convert']
`sqrt`: Computes the square root of a given numerical value., Parameters: ['x: A number to find the root']
`current_localtimestamp`: Returns a `TIMESTAMP` representing the current local date and time in the GMT timezone as determined by the current time zone settings., Parameters: []
`map_entries`: Returns a list of structs containing key-value pairs from the map., Parameters: ['map: Map containing key-value pairs']
`duckdb_extensions`: Provides metadata about installed and loaded DuckDB extensions, including their name, status, and location., Parameters: ['extension_name: Name of the extension(optional)', 'loaded: Extension is currently loaded(optional)', 'installed: Extension is currently installed(optional)', 'install_path: Path of extension binary(optional)', 'description: Description of the extension(optional)', 'aliases: Alternative names for extension(optional)']
`seq_scan`: Performs a sequential scan on a specified table, returning all the rows without using an index., Parameters: ['table_name: Name of the table to scan.', 'columns: Columns to select from table.(optional)']
`duckdb_dependencies`: Provides metadata on dependencies between objects in the DuckDB instance., Parameters: ['classid: Always zero for this function.(optional)', 'objid: Internal id of the object.', 'objsubid: Always zero for this function.(optional)', 'refclassid: Always zero for this function.(optional)', 'refobjid: Internal id of the dependency.', 'refobjsubid: Always zero for this function.(optional)', 'deptype: Type of dependency (n/a).']
`test_all_types`: Generates a table with columns for various data types, displaying their minimum, maximum, and null values for testing purposes., Parameters: []
`duckdb_memory`: Provides metadata about DuckDB's buffer manager, detailing memory and disk usage for various components., Parameters: []
`stddev_samp`: Calculates the sample standard deviation., Parameters: ['x: Input data column for function']
`ntile`: Divides a dataset into a specified number of equally-sized buckets, assigning each row a bucket number ranging from 1 to the number of buckets., Parameters: ['num_buckets: Number of partitions for data distribution']
`isodow`: Returns the ISO numeric day of the week, where Monday is represented as 1 and Sunday as 7., Parameters: ['date: Date to calculate ISO weekday']
`monthname`: Returns the English name of the month for a given date or timestamp., Parameters: ['date: The date or timestamp input.']
`array_to_json`: Converts a LIST into a JSON array., Parameters: ['list: A list to convert']
`to_base64`: Converts a BLOB (binary large object) to a base64 encoded string., Parameters: ['blob: Input binary large object']
`array_extract`: Extracts a single element from a list based on a specified 1-based index position., Parameters: ['list: The list to extract from', 'index: The 1-based position index']
`map_keys`: Returns a list of all keys in the specified map., Parameters: ['map: The input map to query']
`dayofmonth`: Extracts the day part from a given date, representing the day of the month as an integer., Parameters: ['date: Date value to extract from']
`like_escape`: Returns true if the specified string matches the provided like pattern using case-sensitive matching, where an escape character is used to treat wildcard characters as literals., Parameters: ['string: The input string to match', 'like_specifier: Pattern to match the string', 'escape_character: Character to escape wildcards(optional)']
`weekofyear`: Calculates the ISO Week number from a given date., Parameters: ['date: Input date to evaluate', 'timestamp: Input timestamp to evaluate(optional)']
`dayofyear`: The function retrieves the day of the year from a given date, starting from 1 for January 1st., Parameters: ['date: The date to evaluate.']
`base64`: Converts a blob to a base64 encoded string., Parameters: ['blob: The input binary data.']
`yearweek`: The function returns the ISO year and 2-digit week number combined as a BIGINT in the form YYYYWW., Parameters: ['date: The input date to compute']
`map_values`: Returns a list of all values in a map., Parameters: ['map: The map input parameter']
`rtrim`: Removes specified characters from the right side of a string., Parameters: ['string: The string to trim', 'characters: Characters to remove from end(optional)']
`ltrim`: This function removes specified characters (or spaces by default) from the left side of a string., Parameters: ['string: Input string to process', 'characters: Characters to trim from left(optional)']
`to_days`: Constructs a day interval from an integer representing the number of days., Parameters: ['integer: Number of days as input']
`array_concat`: Concatenates two lists into a single list without modifying the original lists., Parameters: ['list1: The first list to concatenate', 'list2: The second list to concatenate']
`right`: Extracts a specified number of characters from the end of a string., Parameters: ['string: The input string', 'count: Number of characters to extract']
`to_minutes`: Constructs an interval representing a specified number of minutes., Parameters: ['integer: Number of minutes to construct']
`tpch_queries`: The table function returns all 22 predefined TPC-H queries with their respective identifiers., Parameters: []
`json_execute_serialized_sql`: Executes JSON serialized SQL statements and returns the resulting rows. Only one statement is executed at a time., Parameters: ['serialized_sql: JSON serialized SQL statement.']
`dsdgen`: Generates TPC-DS benchmark data based on specified scale factor., Parameters: ['sf: Set scale factor for data', 'overwrite: Overwrite existing data when true(optional)', 'suffix: Set file suffix for output(optional)']
`parquet_kv_metadata`: Returns custom key-value metadata defined in a Parquet file., Parameters: ['file_name: Path to the Parquet file', 'key: Metadata keys in BLOB format(optional)', 'value: Metadata values in BLOB format(optional)']
`pragma_version`: Retrieves the current version of DuckDB., Parameters: []
`listagg`: Concatenates string values from a specified column into a single string with a specified separator, ordered based on optional criteria., Parameters: ['arg: Column to concatenate values from', 'sep: Separator string for concatenation(optional)', 'ORDER BY: Optional ordering criteria for aggregation(optional)']
`decade`: Calculates the decade from a given date using the formula (year / 10)., Parameters: ['date: The input date value']
`list_pack`: Creates a `LIST` containing the provided argument values., Parameters: ['any: Values to include in list']
`hex`: Converts a blob to a VARCHAR using hexadecimal encoding., Parameters: ['blob: Blob to be converted to hex']
`list_slice`: Extracts a sublist from a list based on specified begin, end, and optional step indices, supporting negative values., Parameters: ['list: The list to be sliced', 'begin: Index to start slice from', 'end: Index to end slice at', 'step: Step size between elements(optional)']
`greatest_common_divisor`: Computes the greatest common divisor of two numbers., Parameters: ['x: First integer for GCD calculation', 'y: Second integer for GCD calculation']
`array_aggr`: Executes an aggregate function on the elements of a list., Parameters: ['list: The list of elements.', 'name: Aggregate function to apply.', 'additional_args: Additional arguments for function.(optional)']
`array_reduce`: Reduces elements of a list to a single value using a lambda function starting from the first element., Parameters: ['list: List to be reduced', 'lambda: Function applied to elements']
`regexp_escape`: Escapes special characters in a string to make it suitable for use in a regular expression, similar to Python's `re.escape`., Parameters: ['string: The input string to escape.']
`constant_or_null`: Returns `NULL` if the second argument is `NULL`, otherwise it returns the first argument., Parameters: ['arg1: The value to return.', 'arg2: Conditional check for NULL.']
`json_deserialize_sql`: Deserializes JSON serialized SQL statements back into SQL strings., Parameters: ['json: The JSON object to deserialize']
`datesub`: Calculates the number of complete partitions (e.g., months) between two dates or timestamps based on the specified part., Parameters: ['part: Date part to evaluate', 'startdate: Start date or timestamp', 'enddate: End date or timestamp']
`json_transform_strict`: Transforms JSON according to a specified structure, ensuring type casting is strictly followed and throwing an error on failure., Parameters: ['json: The JSON object to transform.', 'structure: Structure for transformation casting.']
`array_indexof`: Returns the index of the specified element in the list and returns NULL if not found., Parameters: ['list: List to search within', 'element: Element to find in list']
`millisecond`: Extracts the sub-minute millisecond component from a timestamp., Parameters: ['timestamp: Timestamp to extract from.']
`union_tag`: Retrieve the currently selected tag of a union as an Enum value., Parameters: ['union: The union to inspect']
`json_array_length`: Returns the number of elements in a JSON array, or 0 if it's not a JSON array. If a path is specified, it returns the number of elements at that path., Parameters: ['json: The JSON array to evaluate.', 'path: Path in JSON to evaluate.(optional)']
`array_reverse_sort`: Sorts a list in reverse order., Parameters: ['list: The list to sort', 'null_order: Order for null values(optional)']
`list_filter`: The function constructs a list from elements of the input list for which a given lambda function returns true., Parameters: ['list: The input list to filter', 'lambda: Function to test elements']
`rpad`: Pads a string with a specified character from the right until it reaches a certain length., Parameters: ['string: The input string to pad', 'count: Target length of padded string', 'character: Character to use for padding']
`transaction_timestamp`: Returns the current date and time at the start of the current transaction., Parameters: []
`enum_last`: Returns the last value of the input enum type., Parameters: ['enum: The enum type to examine']
`array_dot_product`: Alias for computing the inner product of two arrays., Parameters: ['array1: First array for calculation', 'array2: Second array for calculation']
`list_element`: The function extracts the nth (1-based) value from a list., Parameters: ['list: The list to be indexed', 'index: Position to extract element from']
`isfinite`: Checks if a floating point value is finite, returning true for finite numbers and false for infinity or NaN values., Parameters: ['x: The value to be checked.']
`to_milliseconds`: Constructs an interval of milliseconds based on the provided integer value., Parameters: ['integer: Number of milliseconds interval']
`regexp_full_match`: The function checks if the entire string matches the given regular expression and returns `true` if it does., Parameters: ['string: The input string to match', 'regex: The regular expression pattern', 'options: Options for regex; controls behavior(optional)']
`map_contains`: Determines if a map has a specified key., Parameters: ['map: The map to check.', 'key: The key to search.']
`to_centuries`: Constructs an interval representing a duration of centuries based on the integer input., Parameters: ['integer: Number of centuries to construct']
`epoch_us`: Converts a timestamp to the total number of microseconds since the epoch., Parameters: ['timestamp: Timestamp to convert to microseconds', 'time zone (for Timestamptz): Time zone for this timestamp(optional)']
`to_years`: Constructs an interval of years from an integer value., Parameters: ['integer: Number of years to construct']
`array_inner_product`: Computes the inner product between two non-null arrays of the same size., Parameters: ['array1: First array for computation', 'array2: Second array for computation']
`currval`: Returns the current value of a specified sequence after it has been incremented at least once via the `nextval` function., Parameters: ['sequence_name: Name of the sequence.']
`list_extract`: Extracts the nth element from a list, indexing from 1., Parameters: ['list: The list to extract from', 'index: The element position to retrieve']
`enum_range_boundary`: Returns an array representing the range between two enum values, allowing nulls to extend the range to the enum's boundaries., Parameters: ['enum1: Start value of the range.(optional)', 'enum2: End value of the range.(optional)']
`signbit`: Determines if the sign bit of a number is set, indicating a negative value., Parameters: ['x: Value to check sign bit']
`array_cross_product`: Computes the cross product of two non-NULL arrays, each containing exactly three elements., Parameters: ['array1: First array, non-NULL, three elements', 'array2: Second array, non-NULL, three elements']
`bitstring`: The function creates a zero-padded bitstring of a specified length based on the input bitstring., Parameters: ['bitstring: Input bitstring to be padded', 'length: Desired length of bitstring']
`length_grapheme`: Calculates the number of grapheme clusters in a given string, which may differ from the number of characters if the string includes combined emojis or accented characters., Parameters: ['string: Input string for processing']
`apply`: Applies a given lambda function to each element of a list, resulting in a transformed list., Parameters: ['list: A list of elements to transform.', 'lambda: The transformation function.', 'index: Optional parameter for index.(optional)']
`sign`: Computes the sign of a number, returning -1 for negative numbers, 0 for zero, and 1 for positive numbers., Parameters: ['value: Value to find sign of']
`array_aggregate`: Executes an aggregate function on list elements., Parameters: ['list: List of elements to aggregate.', 'name: Name of the aggregate function.']
`md5_number`: Computes the MD5 hash of a string, returning it as a HUGEINT., Parameters: ['string: Input string for hashing']
`error`: Sets an error message for a scalar function during its execution, indicating a failure in processing due to some condition., Parameters: ['info: Information about the function.', 'error: Error message to set.']
`parse_filename`: This function returns the last component of a file path, similar to `os.path.basename` in Python. It can optionally remove the file extension from the component name., Parameters: ['path: The file path to parse.', 'trim_extension: Remove file extension if true.(optional)', 'separator: Type of path separator used.(optional)']
`json_extract_path_text`: Extracts a VARCHAR string from a JSON object at a specified path., Parameters: ['json: The JSON object to query.', 'path: The path in the JSON.']
`nanosecond`: The function converts a timestamp to nanoseconds since the epoch., Parameters: ['timestamp: The input timestamp to convert']
`ucase`: Converts a given string to upper case., Parameters: ['string: The string to convert.']
`isoyear`: Extracts the ISO year number from a date, where the ISO year starts on the Monday of the week containing January 4th., Parameters: ['date: Date to extract ISO year']
`array_grade_up`: Returns the indexes corresponding to the positions in the original list, similar to sorting but for indices., Parameters: ['list: Input list to process']
`parse_dirname`: Extracts and returns the top-level directory name from a given path string, based on the specified path separator type., Parameters: ['path: The path input as string', 'separator: Separator type for the path(optional)']
`enum_first`: Returns the first value of the input enum type., Parameters: ['enum: An enumerated data type.']
`to_decades`: Constructs a decade interval from an integer value representing decades., Parameters: ['integer: Number of decades to construct']
`json_value`: Extracts a JSON scalar value from the specified path in the JSON object, returning NULL if the target is not a scalar., Parameters: ['json: The JSON object to query', 'path: The path to extract value']
`weekday`: Returns a numeric representation of the weekday, where Sunday is 0 and Saturday is 6., Parameters: ['date: The date to evaluate.']
`list_cosine_similarity`: Computes cosine similarity between two lists., Parameters: ['list1: First input list of numbers', 'list2: Second input list of numbers']
`array_apply`: Applies a lambda expression to each element in a list, returning a new list with the transformed elements., Parameters: ['list: The input list to process', 'lambda: Function applied to elements']
`jaccard`: Calculates the Jaccard similarity between two strings, considering characters of different cases as different and returning a similarity score between 0 and 1., Parameters: ['s1: The first input string', 's2: The second input string']
`gcd`: Calculates the largest integer that divides two numbers without leaving a remainder., Parameters: ['x: First number for calculation', 'y: Second number for calculation']
`millennium`: Extracts the millennium part from a date., Parameters: ['date: The date to evaluate']
`json_serialize_sql`: Converts SQL SELECT statements into a JSON format, handling multiple statements and formatting options., Parameters: ['statements: Semicolon-separated SQL SELECT statements.', 'skip_empty: Skip fields that are empty.(optional)', 'skip_null: Skip fields that are null.(optional)', 'format: Format output for readability.(optional)']
`grade_up`: The function returns the positions of elements in an ascending order from the original list, representing their index in the sorted sequence., Parameters: ['list: The input list for sorting']
`cot`: Computes the cotangent of a given number `x`., Parameters: ['x: The input angle in radians']
`array_sort`: Sorts the elements of a given list in ascending order by default, allowing optional configurations for sort order and NULL handling., Parameters: ['list: Elements to be sorted', "sort_order: Order: 'ASC' or 'DESC'(optional)", "nulls_order: 'NULLS FIRST' or 'LAST'(optional)"]
`parse_path`: Returns a list of the components (directories and filename) in a path., Parameters: ['path: The file path to parse', 'separator: Separator for parsing path(optional)']
`suffix`: Appends the specified suffix to the names of the TPC-H tables generated by the data generator function `dbgen`., Parameters: ['catalog: Target catalog for dbgen.(optional)', 'children: Number of partitions for generation.(optional)', 'overwrite: Not used currently.(optional)', 'sf: Scale factor for data generation.', 'step: Defines partition to generate.(optional)', 'suffix: Append suffix to table names.(optional)']
`array_has`: Checks if a list contains a specific element, returning true if the element exists., Parameters: ['list: The list to search in.', 'element: The element to search for.']
`array_cosine_distance`: Computes the cosine distance between two arrays of the same size, where the elements cannot be NULL., Parameters: ['array1: First input array', 'array2: Second input array']
`timezone_hour`: Extracts the hour portion of the time zone offset from a given temporal value., Parameters: ['value: Temporal input value to process']
`not_like_escape`: The function checks if a string doesn't match a given pattern using case-insensitive matching, with an escape character to treat wildcards as regular characters., Parameters: ['string: Input text to be checked.', 'like_specifier: Pattern to be matched against.', 'escape_character: Char used to escape wildcards.']
`make_time`: The function creates a time using specified hour, minute, and second values., Parameters: ['hour: Hour of the time', 'minute: Minute of the time', 'second: Second and fraction of time']
`degrees`: Converts a given angle in radians to its equivalent in degrees., Parameters: ['x: Angle in radians to convert.']
`array_value`: Creates an ARRAY containing the given argument values., Parameters: ['expr: Values for the ARRAY.']
`atan2`: Computes the arctangent based on the coordinates (y, x) and returns the angle in radians., Parameters: ['y: Numerator for the arctangent', 'x: Denominator for the arctangent']
`parse_dirpath`: The function returns the head of a file path, which is the pathname until the last slash, similar to Python's os.path.dirname function., Parameters: ['path: The path to process.', 'separator: Separators for path components.(optional)']
`from_json_strict`: Transforms a JSON string into a specified nested structure and throws an error if type casting fails., Parameters: ['json: The JSON to transform', 'structure: Specifies the desired structure']
`bit_count`: Returns the number of bits that are set in the given input., Parameters: ['bitstring: The bitstring to evaluate.', 'x: The integer to evaluate.', 'x: The integer to evaluate.']
`ilike_escape`: Performs case-insensitive pattern matching on a string, allowing search for wildcard characters using a defined escape character., Parameters: ['string: The input string to match', 'like_specifier: The pattern to match', 'escape_character: Character for escaping wildcards']
`vector_type`: Generates a table with columns containing values for specified types and an optional argument to affect vector representation., Parameters: ['col1, ..., coln: Types of the columns', "all_flat: Affects vector's internal representation(optional)"]
`format_bytes`: Converts bytes into a human-readable format using binary units such as KiB, MiB, GiB, etc., Parameters: ['bytes: Number of bytes to convert']
`timezone_minute`: Extracts the minute portion of the time zone offset from a date or timestamp., Parameters: ['date: Date or timestamp value input', 'timestamp: Date or timestamp value input(optional)']
`prefix`: The function finds secrets with a specified prefix and returns their matching ones based on the longest prefix rule., Parameters: ['path: File path to match secret', 'type: Service secret type (e.g., S3)']
`list_cosine_distance`: Computes the cosine distance between two equal-length lists, equivalent to `1.0 - list_cosine_similarity`., Parameters: ['list1: First input list of numbers', 'list2: Second input list of numbers']
`to_millennia`: Constructs an interval representing the specified number of millennia., Parameters: ['integer: Number of millennia to construct']
`bin`: Converts an integer into its binary representation as a string., Parameters: ['value: The integer to be converted']
`list_grade_up`: Returns the indexes in sorted order based on the input list values, instead of the values themselves., Parameters: ['list: List to be sorted']
`microsecond`: The microsecond function extracts the sub-minute microsecond portion from a temporal type, such as a timestamp, returning the number of microseconds past the second., Parameters: ['date: The temporal input value.']
`list_negative_inner_product`: Computes the negative dot product of two same-sized lists of numbers, equivalent to `- list_dot_product`., Parameters: ['list1: First list of numbers', 'list2: Second list of numbers']
`century`: The century function extracts the century information from a given date., Parameters: ['date_or_timestamp: Temporal value to extract century']
`get_current_time`: This function returns the current time at the start of the current transaction., Parameters: []
`jaro_winkler_similarity`: Measures the similarity between two strings using the Jaro-Winkler method, returning a similarity score between 0 and 1, with characters of different cases treated as different., Parameters: ['s1: First string for comparison', 's2: Second string for comparison']
`list_has_all`: Checks if all elements in a sublist exist in a given list., Parameters: ['list: The list to search within', 'sub-list: The list to check for']
`asin`: Computes the arcsine of a number., Parameters: ['x: The input value.']
`json_exists`: Returns `true` if a specified path exists in a given JSON object, otherwise returns `false`., Parameters: ['json: JSON object to search', 'path: Path to check within JSON']
`from_base64`: Converts a base64 encoded string to its original character string representation., Parameters: ['string: base64 encoded input string']
`string_split_regex`: Splits a string into an array based on a regular expression delimiter., Parameters: ['string: Input string to be split.', 'regex: Delimiter expression for splitting.']
`multiply`: Performs multiplication on two numeric inputs, returning the product., Parameters: ['x: First input to multiply', 'y: Second input to multiply']
`list_transform`: Transforms each element of a list using a specified lambda function and returns the resulting list., Parameters: ['list: The input list of elements', 'lambda: Function applied to elements']
`list_resize`: Resizes a list to a specified number of elements, initializing new ones with a given value or NULL., Parameters: ['list: The list to resize.', 'size: Number of elements to resize to.', 'value: Value for new elements.(optional)']
`pow`: Computes one number raised to the power of another., Parameters: ['x: Base number to be raised', 'y: Exponent to apply to base']
`gamma`: Interpolates factorial of input minus one, allowing fractional inputs., Parameters: ['x: Input value for computation']
`to_hours`: Constructs an hour interval based on an integer input., Parameters: ['integer: Number of hours to construct']
`divide`: Performs integer division of two numbers., Parameters: ['x: dividend for the division', 'y: divisor for the division']
`array_resize`: Resizes a list to a specified size, filling added slots with a given value or NULL by default., Parameters: ['list: The list to resize.', 'size: Desired size of the list.', 'value: Fill value for added slots.(optional)']
`array_cat`: Concatenates two lists into one., Parameters: ['list1: First list to concatenate', 'list2: Second list to concatenate']
`list_indexof`: Returns the index of an element within a list or NULL if not found., Parameters: ['list: The list to search in', 'element: The element to find']
`combine`: This function is used to combine intermediate state from multiple groups in a batch, forming a result for a scalar aggregation function., Parameters: ['duckdb_aggregate_function: Represents an aggregate function object.', 'state: Current state being processed.', 'state_pointers: Array of state pointers.', 'count: Number of state pointers.']
`not_ilike_escape`: Determines if a string does not match a specified pattern using case-sensitive matching, allowing an escape character to define wildcards., Parameters: ['string: The source string to check.', 'like_specifier: The pattern for matching.', 'escape_character: Character to escape wildcards.(optional)']
`current_schemas`: Returns a list of schemas, optionally including implicit schemas when true is passed as a parameter., Parameters: ['include_implicit: Include implicit schemas when true']
`list_distance`: Calculates the Euclidean distance between two lists of coordinates with equal length., Parameters: ['list1: First list of coordinates.', 'list2: Second list of coordinates.']
`list_apply`: Returns a list from applying a lambda to each list element., Parameters: ['list: The input list to transform', 'lambda: Function to apply to elements']
`list_inner_product`: Computes the dot product of two same-sized lists of numbers., Parameters: ['list1: First list of numbers', 'list2: Second list of numbers']
`atan`: Computes the arctangent of a given numeric input., Parameters: ['x: Value for arctangent computation']
`array_negative_inner_product`: Computes the negative inner product of two arrays of the same size and containing non-NULL elements., Parameters: ['array1: First input array of numbers.', 'array2: Second input array of numbers.']
`mod`: Performs a modulo operation to return the remainder of one numeric expression divided by another., Parameters: ['dividend: The number being divided.', 'divisor: The number to divide by.']
`list_position`: Returns the index of an element in a list or NULL if the element is not found., Parameters: ['list: The list to search in', 'element: Element to find index of']
`array_has_all`: Checks if all elements of a sublist are present in a main list., Parameters: ['list: The main list to check', 'sub-list: The sublist elements checked']
`list_zip`: Combines multiple lists into a single list of structs, matching elements by position, with optional truncation., Parameters: ['list_1: First list to zip', 'list_2: Second list to zip', '...: Additional lists to zip(optional)', 'truncate: Truncate to smallest list length(optional)']
`list_has`: Returns true if the list contains the specified element., Parameters: ['list: The list to search in', 'element: An element to find']
`ord`: It returns the ASCII value of the leftmost character of a string., Parameters: ['string_expression: The string to evaluate']
`to_microseconds`: Constructs an interval representing a specified number of microseconds., Parameters: ['integer: Number of microseconds to convert']
`mismatches`: Calculates the number of positions with different characters between two strings of equal length., Parameters: ['s1: First input string to compare.', 's2: Second input string to compare.']
`make_timestamp`: The function constructs a timestamp from individual parts, including year, month, day, hour, minute, and second., Parameters: ['year: Year component', 'month: Month component', 'day: Day component', 'hour: Hour component', 'minute: Minute component', 'second: Second component']
`ascii`: Returns the Unicode code point of the first character of a given string., Parameters: ['string: Input string for conversion.']
`log10`: Computes the base-10 logarithm of a number., Parameters: ['x: Number to compute log base 10']
`json_contains`: Returns true if a specified JSON value or structure is contained within another JSON object or array., Parameters: ['json_haystack: The JSON object or array', 'json_needle: The value to find']
`list_select`: Returns a list using specified indices., Parameters: ['value_list: The list of values.', 'index_list: Indices of selected elements.']
`enum_code`: Returns the numeric value associated with a specific ENUM value, providing its backing integer representation., Parameters: ['enum_value: The ENUM value to process']
`ln`: Computes the natural logarithm of a given number., Parameters: ['x: Number to compute the logarithm']
`printf`: The function formats a string using the printf syntax., Parameters: ['format: String format specifying placeholders.', 'parameters: Values to replace format specifiers.(optional)']
`octet_length`: Calculates the number of bytes in the binary representation., Parameters: ['blob: A binary large object']
`json_quote`: Creates a JSON representation from any type of value, interpreting LISTs as JSON arrays and STRUCTs or MAPs as JSON objects., Parameters: ['any: Value to convert to JSON']
`isnan`: Checks if the floating-point value is not a number and returns true if so, false otherwise., Parameters: ['x: Value to check if NaN']
`editdist3`: Calculates the minimum number of single-character edits (insertions, deletions, or substitutions) needed to change one string into another. It's case-sensitive and treats characters of different cases as distinct., Parameters: ['s1: The first input string', 's2: The second input string']
`set_bit`: Sets a specific bit at a given index in a bitstring to a new value, returning a new bitstring., Parameters: ['bitstring: The input bitstring value.', 'index: Position to set the bit.', 'new_value: New bit value to set.']
`to_weeks`: Constructs a week interval based on the given number of weeks., Parameters: ['integer: Number of weeks to convert']
`array_select`: Returns a list based on elements selected by indices from the index list., Parameters: ['value_list: The list of values.', 'index_list: List of indices to select.']
`lcase`: Converts a string to lower case., Parameters: ['string: The string to convert.']
`cbrt`: Calculates the cube root of a given number., Parameters: ['x: The number to cube root']
`element_at`: The function retrieves the value for a given key from a map, returning a list with the value or an empty list if the key is absent., Parameters: ['map: The map from which to retrieve', 'key: Key to retrieve value for']
`list_reduce`: Reduces elements of a list into a single value using a lambda function applied sequentially from the first element., Parameters: ['list: Input list of elements', 'lambda: Function applied to elements']
`json_array`: Creates a JSON array from one or more values., Parameters: ['value1: First value for JSON array', 'value2: Additional values for JSON array(optional)', '...: Additional values for JSON array(optional)']
`isinf`: This function checks if a floating point number is infinite and returns true or false accordingly., Parameters: ['x: Value to check for infinity']
`factorial`: Computes the product of an integer and all positive integers below it., Parameters: ['x: The integer to compute factorial']
`make_date`: Constructs a date from the specified year, month, and day components., Parameters: ['year: The value of the year.', 'month: The value of the month.', 'day: The value of the day.']
`log2`: Computes the logarithm of a number to base 2., Parameters: ['x: Number to compute logarithm.']
`ceiling`: Rounds a given number up to the nearest integer., Parameters: ['x: The input number to round']
`setseed`: Sets the seed for the random function., Parameters: ['x: Seed value for randomness']
`bit_position`: Returns the first starting index of a given substring within a bitstring, indexed from 1, or zero if the substring isn't present., Parameters: ['substring: Substring to search for', 'bitstring: Bitstring to be searched']
`even`: Rounds a numeric value to the nearest even integer by rounding away from zero., Parameters: ['x: The numeric value to round']
`least_common_multiple`: Computes the least common multiple of two numbers., Parameters: ['x: First number for LCM computation', 'y: Second number for LCM computation']
`stats`: This function provides statistics about a given expression, including minimum and maximum values, and null presence., Parameters: ['expression: The expression to evaluate']
`icu_sort_key`: Generates a surrogate key for sorting characters according to locale., Parameters: ['string: Characters to sort by locale', 'collator: Locale specifier for sorting(optional)']
`array_distance`: Computes the Euclidean distance between two arrays of equal size, which cannot contain NULL values., Parameters: ['array1: First array of floats', 'array2: Second array of floats']
`hamming`: Calculates the number of differing positions between two equally long strings, considering case sensitivity., Parameters: ['s1: First string to compare', 's2: Second string to compare']
`second`: Extracts the seconds part from a timestamp or an interval., Parameters: ['input: The timestamp or interval value']
`to_months`: Constructs a month interval from an integer value., Parameters: ['integer: Number of months to construct']
`left_grapheme`: This function extracts a specified number of grapheme clusters from the beginning of a string., Parameters: ['string: Input string to extract from', 'count: Number of graphemes to extract']
`substring_grapheme`: Extracts a substring composed of a specified number of grapheme clusters starting from a given position., Parameters: ['string: The input string to operate on.', 'start: Starting position of extraction.', 'length: Number of grapheme clusters to extract.']
`jaro_similarity`: Calculates the Jaro similarity between two strings, returning a value between 0 and 1 that indicates how similar the strings are. Characters of different cases are considered different., Parameters: ['s1: First input string', 's2: Second input string']
`json_type`: Returns the type of a JSON element or a specified path within a JSON object., Parameters: ['json: The JSON data input', 'path: Path within the JSON(optional)']
`json_valid`: Checks if the input is valid JSON, returning `true` if it is valid and `false` otherwise., Parameters: ['json: The string to validate as JSON.']
`lgamma`: Computes the logarithm of the Gamma function, which is useful for situations where you need to handle large scale factorials and avoid overflow issues by using their logarithm instead., Parameters: ['x: Input number for computation']
`array_where`: Applies a Boolean mask to a list, returning only the elements that correspond to true values in the mask., Parameters: ['value_list: The list to be filtered.', 'mask_list: The Boolean mask list.']
`list_reverse_sort`: Sorts the elements of a list in reverse order., Parameters: ['list: The list to be sorted', 'null_order: Order for NULL values(optional)']
`unicode`: Returns the Unicode code of the first character of a given string, or -1 if the string is empty. Returns NULL if the input is NULL., Parameters: ['string: Input string to analyze']
`get_bit`: Extracts the nth bit from a bitstring, with the first (leftmost) bit indexed at 0., Parameters: ['bitstring: The bitstring to examine.', 'index: Zero-based bit index.']
`right_grapheme`: Extracts the right-most specified number of grapheme clusters from a given string., Parameters: ['string: Input string to extract from', 'count: Number of graphemes to extract']
`lcm`: Computes the least common multiple of two numeric values., Parameters: ['x: First number for LCM computation', 'y: Second number for LCM computation']
`list_where`: Applies a boolean mask to a list to filter elements based on the mask's true values., Parameters: ['value_list: List to mask elements from', 'mask_list: Boolean mask for value_list']
`sha256`: Computes the SHA-256 hash of a given value and returns it as a VARCHAR., Parameters: ['value: Value to hash with SHA-256.']
`era`: The scalar function calculates the difference in years between two date or timestamp values, effectively returning the number of whole years between the given dates., Parameters: ['start_date: The start date/timestamp value', 'end_date: The end date/timestamp value']
`strlen`: The function returns the number of bytes in a given string., Parameters: ['string: The input string to measure']
`to_seconds`: Converts an integer into a second interval., Parameters: ['integer: Number of seconds to construct']
`array_zip`: Combines multiple lists into one, creating a list of structs based on elements from each input list. Missing values are replaced with NULL when lists have different lengths., Parameters: ['list1: First list to combine.', 'list2: Second list to combine.', '...: Additional lists to combine.(optional)', 'truncate: Indicates whether to truncate.(optional)']
`list_negative_dot_product`: Computes the negative dot product of two same-sized lists of numbers., Parameters: ['list1: First list of numbers', 'list2: Second list of numbers']
`tan`: Computes the tangent of a given angle., Parameters: ['x: Angle for tangent calculation']
`bit_length`: Calculates the total number of bits in a bitstring value., Parameters: ['bitstring: The input bitstring value.']
`list_cat`: Concatenates two lists into a single list., Parameters: ['list1: First list to concatenate', 'list2: Second list to concatenate']
`union_extract`: Extracts the value with the specified tag from a union; returns NULL if the tag is not currently selected., Parameters: ['union: The union object to extract from.', 'tag: The tag value to extract.']
`union_value`: Creates a "UNION" holding a single value, tagged by the parameter name., Parameters: ['tag: The name for the tagged value.', 'expr: The value to be tagged.']
`make_timestamptz`: Creates a TIMESTAMP WITH TIME ZONE based on specified date-time components and, optionally, a time zone., Parameters: ['year: Year component of date', 'month: Month component of date', 'day: Day component of date', 'hour: Hour component of time', 'minute: Minute component of time', 'second: Second component of time', 'timezone: Time zone of timestamp(optional)']
`nfc_normalize`: Converts a string into its Unicode Normalization Form C (NFC), which is useful for string comparisons and ordering when dealing with mixed normalization forms., Parameters: ['string: The string to normalize']
`txid_current`: Returns the current transaction's identifier, a BIGINT value, creating a new one if necessary., Parameters: []
`nextafter`: Returns the next floating point value after one number in the direction of another., Parameters: ['x: Starting floating point number.', 'y: Direction towards this number.']
`subtract`: Subtracts two values, resulting in their difference., Parameters: ['x: The first numerical operand', 'y: The second numerical operand']
`chr`: Converts an ASCII code value into its corresponding character., Parameters: ['x: ASCII code value to convert']
`array_negative_dot_product`: Computes the negative inner product of two arrays of the same size and whose elements cannot be NULL., Parameters: ['array1: First array for computation.', 'array2: Second array for computation.']
`list_dot_product`: Computes the dot product of two lists of numbers of the same size., Parameters: ['list1: First list of numbers.', 'list2: Second list of numbers.']
`current_localtime`: Returns the current local time in the time zone setting of the database., Parameters: []
`xor`: Performs a bitwise exclusive OR operation between two bitstring values., Parameters: ['x: First bitstring to be XORed', 'y: Second bitstring to be XORed']
`reduce`: The function applies a lambda expression to each element of a list to produce a single cumulative result., Parameters: ['list: The input list of elements', 'lambda: Function applied to elements']
`finalize`: Finalizes the execution of a prepared statement, ensuring that any allocated resources are released., Parameters: ['sql: SQL statement to finalize(optional)', 'params: Parameters for the SQL statement(optional)', 'callback: Function called upon completion(optional)']
`exp`: Computes the exponential of a given input number, which is denoted as 'e' raised to the power of the input number., Parameters: ['input_number: Number to calculate the exponential']
`read_ndjson_auto`: The function reads newline-delimited JSON (NDJSON) files and automatically infers JSON schema and types., Parameters: ['filename: File or list of files', 'auto_detect: Auto-detect key names and types(optional)', 'columns: Specifies key names and types(optional)', 'compression: File compression type detection(optional)', 'convert_strings_to_integers: Convert strings to numerical types(optional)', 'dateformat: Date parsing format specification(optional)', 'filename: Include extra filename column(optional)', 'format: Format of JSON to read(optional)', 'hive_partitioning: Interpret as Hive partitioned(optional)', 'ignore_errors: Ignore parse errors in files(optional)', 'maximum_depth: Max depth for schema detection(optional)', 'maximum_object_size: Max size of JSON object(optional)', 'records: Whether JSON contains records(optional)', 'sample_size: Sample objects for type detection(optional)', 'timestampformat: Parsing format for timestamps(optional)', 'union_by_name: Unify schemas of files(optional)']
`arrow_scan`: The "arrow_scan" table function allows DuckDB to query data directly from an Arrow dataset. Users can provide a connection to the database and the Arrow stream containing the data, and DuckDB will interface with the Arrow stream to perform SQL queries., Parameters: ['connection: The database connection to use', 'table_name: Name for the Arrow table', 'arrow: The Arrow stream with data']
`parquet_metadata`: Queries the metadata of a Parquet file, providing details about row groups, columns, and basic statistics., Parameters: ['file_name: Name of the Parquet file.', 'row_group_id: ID of each row group.', 'row_group_num_rows: Number of rows in group.', 'row_group_num_columns: Columns present in row group.', 'row_group_bytes: Size in bytes of group.', 'column_id: ID of each column.', 'file_offset: Offset position in file.', 'num_values: Number of values in column.', 'path_in_schema: Column path in schema.', 'type: Data type of column.', 'stats_min: Minimum value statistic.', 'stats_max: Maximum value statistic.', 'stats_null_count: Count of null values.', 'stats_distinct_count: Count of distinct values.', 'stats_min_value: Actual minimum value found.', 'stats_max_value: Actual maximum value found.', 'compression: Compression algorithm used.', 'encodings: Encodings applied to column.', 'index_page_offset: Offset to index page.', 'dictionary_page_offset: Offset to dictionary page.', 'data_page_offset: Offset to data page.', 'total_compressed_size: Size after compression.', 'total_uncompressed_size: Size before compression.', 'key_value_metadata: Custom key-value metadata pairs.']
`parquet_file_metadata`: Queries file-level metadata of Parquet files, including format version and encryption details., Parameters: ['file_name: Path to the Parquet file', 'created_by: Creator of the Parquet file(optional)', 'num_rows: Number of rows in file', 'num_row_groups: Number of row groups', 'format_version: Format version used', 'encryption_algorithm: Encryption algorithm used(optional)', 'footer_signing_key_metadata: Metadata of signing key(optional)', 'format_version: Format version of file(optional)']
`sniff_csv`: The function identifies CSV properties from a file, returning details such as delimiters, quoting rules, and column types., Parameters: ['filename: Path to the CSV file.', 'sample_size: Rows considered for detection.(optional)']
`duckdb_types`: The function provides metadata about data types available in a DuckDB instance, including type name, type size, and logical type information., Parameters: ['database_name: Database containing the type', 'database_oid: Internal ID of the database', 'schema_name: Schema containing the type', 'schema_oid: Internal ID of the schema', 'type_name: Name or alias of the type', 'type_oid: Internal ID of the type(optional)', 'type_size: Bytes required to represent', 'logical_type: Canonical name of the type', 'type_category: Category of the data type', 'internal: Whether type is built-in']
`index_scan`: Performs an index scan on a specified table and column, returning the row IDs that match the scan condition., Parameters: ['index_name: Name of the index to scan', 'scan_condition: Condition determining rows for scan(optional)']
`repeat_row`: Generates a table with multiple rows, each containing specified fields., Parameters: ['varargs: Fields for each table row', 'num_rows: Number of rows to generate']
`read_ndjson`: Reads newline-delimited JSON (NDJSON) directly, interpreting each line as a separate JSON object., Parameters: ['compression: The compression type for the file(optional)', 'filename: Include extra filename column(optional)', 'format: Specifies JSON read format(optional)', 'hive_partitioning: Interpret path as Hive partitioned(optional)', 'ignore_errors: Ignore parse errors if newline(optional)', 'maximum_sample_files: Maximum JSON files sampled(optional)', 'maximum_object_size: Maximum size of JSON object(optional)']
`checkpoint`: Synchronizes the write-ahead log (WAL) with the database file without interrupting transactions., Parameters: ['database: Name of the database to be checkpointed(optional)']
`duckdb_optimizers`: Returns metadata about DuckDB's optimization rules, which can be selectively disabled for debugging., Parameters: []
`duckdb_temporary_files`: This function provides metadata about the temporary files DuckDB has written to disk, including their path and size., Parameters: []
`force_checkpoint`: Synchronizes the write-ahead log (WAL) with the file of the specified database, interrupting transactions., Parameters: ['database: Target database for checkpoint(optional)']
`pg_timezone_names`: The table function retrieves a list of available time zones and their respective abbreviations and UTC offsets., Parameters: ['name: Time zone full name', 'abbrev: Time zone abbreviation(optional)', 'utc_offset: Time zone UTC offset value(optional)']
`duckdb_variables`: The table function provides metadata about the variables available in the DuckDB instance, including their name, value, and type., Parameters: []
`tpch_answers`: Produces expected results for TPC-H queries for specified scale factors., Parameters: []
`pragma_collations`: Returns a list of all available collation names including both built-in and ICU extension collations., Parameters: []
`test_vector_types`: Generates a table with columns containing values conforming to the types of the input arguments., Parameters: ['coln: Columns with type-conforming values.', 'all_flat: Affects internal vector representation.(optional)']
`read_blob`: Reads content from a specified source as a BLOB, supporting file names, lists, or glob patterns., Parameters: ['source: Specifies the data source.']
`pragma_platform`: Returns an identifier for the platform DuckDB was compiled for., Parameters: []
`icu_calendar_names`: Retrieves and lists available non-Gregorian calendars supported by the ICU extension., Parameters: []
`summary`: Computes aggregates over all columns of a table or query, including min, max, average, and more, and returns these along with column names and types., Parameters: ['table_name: Name of the table to summarize', 'query: SQL query to summarize']
`parquet_scan`: Reads one or more Parquet files as table-like structures, supporting various configurations for file reading and processing., Parameters: ['path_or_list_of_paths: Paths to Parquet file(s)', 'binary_as_string: Load binary columns as strings(optional)', 'encryption_config: Configuration for Parquet encryption(optional)', 'filename: Include filename column result(optional)', 'file_row_number: Include file row number column(optional)', 'hive_partitioning: Interpret as Hive partitioned path(optional)', 'union_by_name: Unify columns of multiple schemas(optional)', 'MD_RUN: Control remote/local query execution(optional)']
`count_star`: The aggregate function calculates the total number of non-NULL rows in a selected column or set.
Cool example: `SELECT count(*) FROM students;`, Parameters: ['expression: Column or expression to evaluate(optional)']
`approx_count_distinct`: Provides an approximate count of distinct elements using HyperLogLog., Parameters: ['x: Input to count distinct elements.', 'accurate_value_count: Accuracy level for the estimation.(optional)', 'debug: Debugging mode for output.(optional)']
`argmax`: Finds the row with the maximum value in a specified column and evaluates another column's expression at that row., Parameters: ['arg: Expression to evaluate at maximum', 'val: Column to find maximum value', 'n: Number of top rows to return(optional)']
`skewness`: Calculates the skewness, measuring asymmetry of a distribution., Parameters: ['x: Data values for skewness calculation']
`regr_sxy`: Calculates the sample covariance with Bessel's bias correction for pairs of non-null values., Parameters: ['y: dependent variable values', 'x: independent variable values']
`entropy`: Calculates the log-2 entropy of a given dataset, measuring information or uncertainty within the data., Parameters: ['x: Data for entropy calculation.']
`regr_syy`: Calculates the sample variance of the dependent variable, including Bessel's bias correction, for non-null pairs where x is the independent variable and y is the dependent variable., Parameters: ['y: Dependent variable data', 'x: Independent variable data']
`argmin`: The function finds the row with the minimum value of a specified column and returns the value of another specified column at that row., Parameters: ['arg: Value to be returned.', 'val: Value to be minimized.', 'n: Number of rows returned.(optional)']
`regr_count`: Returns the number of non-NULL pairs., Parameters: ['y: Dependent variable in pairs', 'x: Independent variable in pairs']
`arbitrary`: Returns the first value (null or non-null) from the input argument, useful when an arbitrary value from a group is needed without specifying an order., Parameters: ['arg: The column or expression to retrieve an arbitrary value from.']
`mean`: Calculates the average of all non-null values in a given column., Parameters: ['arg: Column or expression to average']
`approx_quantile`: This function provides an approximate quantile using the T-Digest algorithm., Parameters: ['x: Dataset column to analyze', 'pos: Quantile position from 0-1']
`kurtosis`: Calculates the excess kurtosis with bias correction according to the sample size, providing a measure of the tailedness of the distribution of data values., Parameters: ['x: Input numeric column or expression']
`quantile_cont`: Calculates the interpolated quantile for a given position within an array of values, resulting in a smooth estimate between elements., Parameters: ['x: Input data to aggregate', 'pos: Position(s) for quantile calculation']
`variance`: Calculates the variance of all non-null input values using Bessel's correction by default., Parameters: ['column: Column to calculate variance on']
`min_by`: Finds the row with the minimum value calculated from a specified expression and computes another expression from that row., Parameters: ['arg: Expression evaluated for each row.', 'val: Value used to order rows.', 'n: Number of top results.(optional)']
`bit_and`: Performs a bitwise AND on all bits in a given expression., Parameters: ['arg: Input expression for bitwise AND']
`var_pop`: Calculates the population variance of a set of values without bias correction., Parameters: ['x: Input values for variance calculation.']
`fsum`: Calculates the sum using a more accurate floating point summation (Kahan Sum) for increased precision in floating point arithmetic., Parameters: ['arg: Argument to be summed accurately']
`regr_r2`: Calculates the squared Pearson correlation coefficient between two variables in a linear regression, indicating the proportion of variance in the dependent variable that can be predicted from the independent variable., Parameters: ['y: Dependent variable in regression', 'x: Independent variable in regression']
`product`: Calculates the product of all non-null values in the specified column or expression., Parameters: ['expr: The values to multiply together.']
`mad`: Calculates the median absolute deviation of a dataset, with temporal types returning a positive `INTERVAL`., Parameters: ['x: Column containing the dataset', 'return_type: Expected return data type(optional)']
`bool_or`: Returns true if any input value is true, otherwise false., Parameters: ['arg: The input values to aggregate']
`regr_avgy`: Calculates the average of the dependent variable for non-NULL pairs, where x is the independent variable and y is the dependent variable., Parameters: ['y: Dependent variable in the function', 'x: Independent variable in the function']
`mode`: The mode function calculates the most frequently occurring value in a set of values., Parameters: ['value_column: Column containing values to analyze']
`reservoir_quantile`: Gives an approximate quantile using reservoir sampling., Parameters: ['x: Values to calculate quantile for.', 'quantile: Quantile position between 0-1.', 'sample_size: Number of samples for estimation.(optional)']
`sumkahan`: Calculates the sum of all non-null values in a column using a more accurate floating point summation to reduce numerical errors., Parameters: ['arg: Values to be summed']
`quantile`: Calculates the interpolated or discrete quantile of a set of values, determining the specific value or range at a given percentage position., Parameters: ['x: Values to aggregate for quantile', 'pos: Quantile position fraction (0-1)', 'method: Method of interpolation (for continous quantile)(optional)']
`bool_and`: Returns `true` if every input value is `true`, otherwise `false`., Parameters: ['arg: A column or expression']
`kurtosis_pop`: Calculates the excess kurtosis of a data set (Fisher’s definition) without bias correction., Parameters: ['x: The input data values']
`regr_sxx`: Calculates the sample variance, with Bessel's bias correction, of the independent variable for non-NULL pairs., Parameters: ['y: Dependent variable values.', 'x: Independent variable values.']
`bitstring_agg`: The function returns a bitstring with bits set for each distinct position defined in the input argument., Parameters: ['arg: List of values for processing', 'min: Minimum range for positions(optional)', 'max: Maximum range for positions(optional)']
`bit_xor`: Performs a bitwise XOR on all bits in a given expression., Parameters: ['arg: Expression of bits to XOR.']
`quantile_disc`: Calculates the discrete quantile of a sorted set of values by selecting the greatest indexed element corresponding to the given position within the set., Parameters: ['x: The value set to quantify', 'pos: The quantile position(s) to return']
`kahan_sum`: Calculates the sum using an accurate floating-point summation technique (Kahan Sum) to minimize errors., Parameters: ['arg: Values to be summed accurately.']
`favg`: Calculates the average using a more accurate floating point summation technique known as Kahan Sum., Parameters: ['arg: Input values for averaging']
`regr_avgx`: Computes the average of the independent variable for non-NULL data pairs., Parameters: ['y: Dependent variable in regression', 'x: Independent variable in regression']
`covar_pop`: Computes the population covariance without bias correction., Parameters: ['y: Dependent variable data', 'x: Independent variable data']
`sem`: Calculates the population standard error of the mean from input values, which measures how far the sample mean of the data is likely to be from the true mean of the population., Parameters: ['input_values: Values to calculate SEM from', 'weight_column: Optional weights for each value(optional)']
`covar_samp`: The sample covariance is calculated, which includes Bessel's bias correction., Parameters: ['y: Dependent variable column values', 'x: Independent variable column values']
`stddev_pop`: Calculates the population standard deviation of a given dataset, ignoring NULL values., Parameters: ['x: Column for standard deviation']
`var_samp`: Calculates the sample variance using Bessel's correction, which adjusts for bias by dividing by (n-1) instead of n., Parameters: ['x: Input values to calculate variance.', 'order_clause: Optional order by clause.(optional)']
`bit_or`: Performs a bitwise OR operation across all bits of the input values., Parameters: ['arg: Values to aggregate with OR.']
`arg_min`: Finds the row with the minimum value in a specified column and evaluates another expression for that row., Parameters: ['arg: Expression to evaluate at minimum', 'val: Column to find minimum value']
`rank_dense`: The rank of the current row is determined with gaps, aligning with the row number of its first peer., Parameters: []
`cume_dist`: Calculates the cumulative distribution of a row within its partition., Parameters: []
`nth_value`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Parameters: ['expr: Expression to evaluate at row', 'nth: Row position to evaluate at', 'ignore_nulls: Ignore nulls in evaluation(optional)']
DuckDB Statements:
`SELECT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM table_name;', 'SELECT col1 + col2 AS res, sqrt(col1) AS root FROM table_name;', 'SELECT DISTINCT city FROM addresses;', 'SELECT count(*) FROM addresses;', 'SELECT * EXCLUDE (city) FROM addresses;', 'SELECT * REPLACE (lower(city) AS city) FROM addresses;', "SELECT COLUMNS('number\\d+') FROM addresses;", 'SELECT min(COLUMNS(*)) FROM addresses;', 'SELECT "Some Column Name" FROM tbl;']
`WHERE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM table_name WHERE id = 3;', "SELECT * FROM table_name WHERE name ILIKE '%mark%';", 'SELECT * FROM table_name WHERE id = 3 OR id = 7;']
`ORDER BY`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM addresses ORDER BY city;', 'SELECT * FROM addresses ORDER BY city DESC NULLS LAST;', 'SELECT * FROM addresses ORDER BY city, zip;', 'SELECT * FROM addresses ORDER BY city COLLATE DE;', 'SELECT * FROM addresses ORDER BY ALL;', 'SELECT * FROM addresses ORDER BY ALL DESC;']
`GROUP BY`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, count(*) FROM addresses GROUP BY city;', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY city, street_name;', 'SELECT city, street_name FROM addresses GROUP BY ALL;', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY ALL;']
`WITH`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['WITH cte AS (SELECT 42 AS x) SELECT * FROM cte;', 'WITH cte1 AS (SELECT 42 AS i), cte2 AS (SELECT i * 100 AS x FROM cte1) SELECT * FROM cte2;', 'WITH t(x) AS (⟨complex_query⟩) SELECT * FROM t AS t1, t AS t2, t AS t3;', 'WITH t(x) AS MATERIALIZED (⟨complex_query⟩) SELECT * FROM t AS t1, t AS t2, t AS t3;', 'WITH RECURSIVE FibonacciNumbers (RecursionDepth, FibonacciNumber, NextNumber) AS (SELECT 0 AS RecursionDepth, 0 AS FibonacciNumber, 1 AS NextNumber UNION ALL SELECT fib.RecursionDepth + 1 AS RecursionDepth, fib.NextNumber AS FibonacciNumber, fib.FibonacciNumber + fib.NextNumber AS NextNumber FROM FibonacciNumbers fib WHERE fib.RecursionDepth + 1 < 10) SELECT fn.RecursionDepth AS FibonacciNumberIndex, fn.FibonacciNumber FROM FibonacciNumbers fn;']
`JOIN`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM table_name;', 'FROM table_name SELECT *;', 'FROM table_name;', 'SELECT tn.* FROM table_name tn;', 'SELECT * FROM schema_name.table_name;', 'SELECT t.i FROM range(100) AS t(i);', "SELECT * FROM 'test.csv';", 'SELECT * FROM (SELECT * FROM table_name);', 'SELECT t FROM t;', "SELECT t FROM (SELECT unnest(generate_series(41, 43)) AS x, 'hello' AS y) t;", 'SELECT * FROM table_name JOIN other_table ON table_name.key = other_table.key;', 'SELECT * FROM table_name TABLESAMPLE 10%;', 'SELECT * FROM table_name TABLESAMPLE 10 ROWS;', 'FROM range(100) AS t(i) SELECT sum(t.i) WHERE i % 2 = 0;', 'SELECT a.*, b.* FROM a CROSS JOIN b;', 'SELECT a.*, b.* FROM a, b;', 'SELECT n.*, r.* FROM l_nations n JOIN l_regions r ON (n_regionkey = r_regionkey);', 'SELECT * FROM city_airport NATURAL JOIN airport_names;', 'SELECT * FROM city_airport JOIN airport_names USING (iata);', 'SELECT * FROM city_airport SEMI JOIN airport_names USING (iata);', 'SELECT * FROM city_airport WHERE iata IN (SELECT iata FROM airport_names);', 'SELECT * FROM city_airport ANTI JOIN airport_names USING (iata);', 'SELECT * FROM city_airport WHERE iata NOT IN (SELECT iata FROM airport_names WHERE iata IS NOT NULL);', 'SELECT * FROM range(3) t(i), LATERAL (SELECT i + 1) t2(j);', 'SELECT * FROM generate_series(0, 1) t(i), LATERAL (SELECT i + 10 UNION ALL SELECT i + 100) t2(j);', 'SELECT * FROM trades t ASOF JOIN prices p ON t.symbol = p.symbol AND t.when >= p.when;', 'SELECT * FROM trades t ASOF LEFT JOIN prices p ON t.symbol = p.symbol AND t.when >= p.when;', 'SELECT * FROM trades t ASOF JOIN prices p USING (symbol, "when");', 'SELECT t.symbol, t.when AS trade_when, p.when AS price_when, price FROM trades t ASOF LEFT JOIN prices p USING (symbol, "when");', 'SELECT * FROM t AS t t1 JOIN t t2 USING(x);', 'FROM tbl SELECT i, s;', 'FROM tbl;']
`JOIN`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM table_name;', 'FROM table_name SELECT *;', 'FROM table_name;', 'SELECT tn.* FROM table_name tn;', 'SELECT * FROM schema_name.table_name;', 'SELECT t.i FROM range(100) AS t(i);', "SELECT * FROM 'test.csv';", 'SELECT * FROM (SELECT * FROM table_name);', 'SELECT t FROM t;', "SELECT t FROM (SELECT unnest(generate_series(41, 43)) AS x, 'hello' AS y) t;", 'SELECT * FROM table_name JOIN other_table ON table_name.key = other_table.key;', 'SELECT * FROM table_name TABLESAMPLE 10%;', 'SELECT * FROM table_name TABLESAMPLE 10 ROWS;', 'FROM range(100) AS t(i) SELECT sum(t.i) WHERE i % 2 = 0;', 'SELECT a.*, b.* FROM a CROSS JOIN b;', 'SELECT a.*, b.* FROM a, b;', 'SELECT n.*, r.* FROM l_nations n JOIN l_regions r ON (n_regionkey = r_regionkey);', 'SELECT * FROM city_airport NATURAL JOIN airport_names;', 'SELECT * FROM city_airport JOIN airport_names USING (iata);', 'SELECT * FROM city_airport SEMI JOIN airport_names USING (iata);', 'SELECT * FROM city_airport WHERE iata IN (SELECT iata FROM airport_names);', 'SELECT * FROM city_airport ANTI JOIN airport_names USING (iata);', 'SELECT * FROM city_airport WHERE iata NOT IN (SELECT iata FROM airport_names WHERE iata IS NOT NULL);', 'SELECT * FROM range(3) t(i), LATERAL (SELECT i + 1) t2(j);', 'SELECT * FROM generate_series(0, 1) t(i), LATERAL (SELECT i + 10 UNION ALL SELECT i + 100) t2(j);', 'SELECT * FROM trades t ASOF JOIN prices p ON t.symbol = p.symbol AND t.when >= p.when;', 'SELECT * FROM trades t ASOF LEFT JOIN prices p ON t.symbol = p.symbol AND t.when >= p.when;', 'SELECT * FROM trades t ASOF JOIN prices p USING (symbol, "when");', 'SELECT t.symbol, t.when AS trade_when, p.when AS price_when, price FROM trades t ASOF LEFT JOIN prices p USING (symbol, "when");', 'SELECT * FROM t AS t t1 JOIN t t2 USING(x);', 'FROM tbl SELECT i, s;', 'FROM tbl;']
`CASE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT i, CASE WHEN i > 2 THEN 1 ELSE 0 END AS test FROM integers;', 'SELECT i, CASE WHEN i = 1 THEN 10 WHEN i = 2 THEN 20 ELSE 0 END AS test FROM integers;', 'SELECT i, CASE WHEN i = 1 THEN 10 END AS test FROM integers;', 'SELECT i, CASE i WHEN 1 THEN 10 WHEN 2 THEN 20 WHEN 3 THEN 30 END AS test FROM integers;']
`USE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['USE memory;', 'USE duck.main;']
`CREATE TABLE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE TABLE t1 (i INTEGER, j INTEGER);', 'CREATE TABLE t1 (id INTEGER PRIMARY KEY, j VARCHAR);', 'CREATE TABLE t1 (id INTEGER, j VARCHAR, PRIMARY KEY (id, j));', 'CREATE TABLE t1 (\n i INTEGER NOT NULL,\n decimalnr DOUBLE CHECK (decimalnr < 10),\n date DATE UNIQUE,\n time TIMESTAMP\n);', 'CREATE TABLE t1 AS SELECT 42 AS i, 84 AS j;', "CREATE TEMP TABLE t1 AS SELECT * FROM read_csv('path/file.csv');", 'CREATE OR REPLACE TABLE t1 (i INTEGER, j INTEGER);', 'CREATE TABLE IF NOT EXISTS t1 (i INTEGER, j INTEGER);', 'CREATE TABLE nums AS SELECT i FROM range(0, 3) t(i);', 'CREATE TABLE t1 (id INTEGER PRIMARY KEY, percentage INTEGER CHECK (0 <= percentage AND percentage <= 100));', 'CREATE TABLE t1 (id INTEGER PRIMARY KEY, j VARCHAR);\nCREATE TABLE t2 (\n id INTEGER PRIMARY KEY,\n t1_id INTEGER,\n FOREIGN KEY (t1_id) REFERENCES t1 (id)\n);', 'CREATE TABLE t1 (x FLOAT, two_x AS (2 * x));']
`UPDATE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['UPDATE tbl SET i = 0 WHERE i IS NULL;', 'UPDATE tbl SET i = 1, j = 2;', 'UPDATE original SET value = new.value FROM new WHERE original.key = new.key;', 'UPDATE original SET value = (SELECT new.value FROM new WHERE original.key = new.key);', "UPDATE original AS true_original SET value = (SELECT new.value || ' a change!' AS value FROM original AS new WHERE true_original.key = new.key);", "UPDATE city SET revenue = revenue + 100 FROM country WHERE city.country_code = country.code AND country.name = 'France';"]
`DROP`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['DROP TABLE tbl;', 'DROP VIEW IF EXISTS v1;', 'DROP FUNCTION fn;', 'DROP INDEX idx;', 'DROP SCHEMA sch;', 'DROP SEQUENCE seq;', 'DROP MACRO mcr;', 'DROP MACRO TABLE mt;', 'DROP TYPE typ;', 'DROP SCHEMA myschema CASCADE;']
`ALTER TABLE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['ALTER TABLE integers ADD COLUMN k INTEGER;', 'ALTER TABLE integers ADD COLUMN l INTEGER DEFAULT 10;', 'ALTER TABLE integers DROP k;', 'ALTER TABLE integers ALTER i TYPE VARCHAR;', "ALTER TABLE integers ALTER i SET DATA TYPE VARCHAR USING concat(i, '_', j);", 'ALTER TABLE integers ALTER COLUMN i SET DEFAULT 10;', 'ALTER TABLE integers ALTER COLUMN i DROP DEFAULT;', 'ALTER TABLE t ALTER COLUMN x SET NOT NULL;', 'ALTER TABLE t ALTER COLUMN x DROP NOT NULL;', 'ALTER TABLE integers RENAME TO integers_old;', 'ALTER TABLE integers RENAME i TO j;']
`FILTER`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT count(*) FILTER (i <= 5) AS lte_five FROM generate_series(1, 10) tbl(i);', 'SELECT sum(i) FILTER (i <= 5) AS lte_five_sum FROM generate_series(1, 10) tbl(i);', 'SELECT count(i) FILTER (year = 2022) AS "2022" FROM stacked_data;', 'SELECT first(i) FILTER (year = 2022) AS "2022" FROM stacked_data;']
`HAVING`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, count(*) FROM addresses GROUP BY city HAVING count(*) >= 50;', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY city, street_name HAVING avg(income) > 2 * median(income);']
`DESCRIBE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['DESCRIBE tbl;', 'DESCRIBE SELECT * FROM tbl;']
`INSERT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['INSERT INTO tbl VALUES (1), (2), (3);', 'INSERT INTO tbl SELECT * FROM other_tbl;', 'INSERT INTO tbl (i) VALUES (1), (2), (3);', 'INSERT INTO tbl (i) VALUES (1), (DEFAULT), (3);', 'INSERT OR IGNORE INTO tbl (i) VALUES (1);', 'INSERT OR REPLACE INTO tbl (i) VALUES (1);', 'INSERT INTO tbl BY POSITION VALUES (5, 42);', 'INSERT INTO tbl BY NAME (SELECT 42 AS b, 32 AS a);', 'INSERT INTO tbl VALUES (1, 84) ON CONFLICT DO NOTHING;', 'INSERT INTO tbl VALUES (1, 84) ON CONFLICT DO UPDATE SET j = EXCLUDED.j;', 'INSERT INTO tbl (j, i) VALUES (168, 1) ON CONFLICT DO UPDATE SET j = EXCLUDED.j;', 'INSERT INTO tbl BY NAME (SELECT 84 AS j, 1 AS i) ON CONFLICT DO UPDATE SET j = EXCLUDED.j;', 'INSERT INTO t1 SELECT 42 RETURNING *;', 'INSERT INTO t2 SELECT 2 AS i, 3 AS j RETURNING *, i * j AS i_times_j;', "CREATE TABLE t3 (i INTEGER PRIMARY KEY, j INTEGER); CREATE SEQUENCE 't3_key'; INSERT INTO t3 SELECT nextval('t3_key') AS i, 42 AS j UNION ALL SELECT nextval('t3_key') AS i, 43 AS j RETURNING *;"]
`VALUES`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["VALUES ('Amsterdam', 1), ('London', 2);", "SELECT * FROM (VALUES ('Amsterdam', 1), ('London', 2)) cities(name, id);", "INSERT INTO cities VALUES ('Amsterdam', 1), ('London', 2);", "CREATE TABLE cities AS SELECT * FROM (VALUES ('Amsterdam', 1), ('London', 2)) cities(name, id);"]
`DELETE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['DELETE FROM tbl WHERE i = 2;', 'DELETE FROM tbl;', 'TRUNCATE tbl;']
`CALL`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CALL duckdb_functions();', "CALL pragma_table_info('pg_am');"]
`CREATE SCHEMA`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE SCHEMA s1;', 'CREATE SCHEMA IF NOT EXISTS s2;', 'CREATE TABLE s1.t (id INTEGER PRIMARY KEY, other_id INTEGER);', 'CREATE TABLE s2.t (id INTEGER PRIMARY KEY, j VARCHAR);', 'SELECT * FROM s1.t s1t, s2.t s2t WHERE s1t.other_id = s2t.id;']
`SAMPLE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM addresses USING SAMPLE 1%;', 'SELECT * FROM addresses USING SAMPLE 1% (bernoulli);', 'SELECT * FROM (SELECT * FROM addresses) USING SAMPLE 10 ROWS;']
`CREATE VIEW`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE VIEW v1 AS SELECT * FROM tbl;', 'CREATE OR REPLACE VIEW v1 AS SELECT 42;', 'CREATE VIEW v1(a) AS SELECT 42;']
`COPY`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["COPY lineitem FROM 'lineitem.csv';", "COPY lineitem FROM 'lineitem.csv' (DELIMITER '|');", "COPY lineitem FROM 'lineitem.pq' (FORMAT PARQUET);", "COPY lineitem FROM 'lineitem.json' (FORMAT JSON, AUTO_DETECT true);", "COPY lineitem TO 'lineitem.csv' (FORMAT CSV, DELIMITER '|', HEADER);", "COPY (SELECT l_orderkey, l_partkey FROM lineitem) TO 'lineitem.parquet' (COMPRESSION ZSTD);", 'COPY FROM DATABASE db1 TO db2;', 'COPY FROM DATABASE db1 TO db2 (SCHEMA);']
`QUALIFY`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT schema_name, function_name, row_number() OVER (PARTITION BY schema_name ORDER BY function_name) AS function_rank FROM duckdb_functions() QUALIFY row_number() OVER (PARTITION BY schema_name ORDER BY function_name) < 3;', 'SELECT schema_name, function_name, row_number() OVER (PARTITION BY schema_name ORDER BY function_name) AS function_rank FROM duckdb_functions() QUALIFY function_rank < 3;', 'SELECT schema_name, function_name, row_number() OVER my_window AS function_rank FROM duckdb_functions() WINDOW my_window AS (PARTITION BY schema_name ORDER BY function_name) QUALIFY row_number() OVER my_window < 3;', 'SELECT schema_name, function_name, row_number() OVER my_window AS function_rank FROM duckdb_functions() WINDOW my_window AS (PARTITION BY schema_name ORDER BY function_name) QUALIFY function_rank < 3;']
`SET VARIABLE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SET VARIABLE my_var = 30;', "SELECT 20 + getvariable('my_var') AS total;", 'SET VARIABLE my_var = 100;', "SET VARIABLE my_date = DATE '2018-07-13';", "SET VARIABLE my_string = 'Hello world';", "SET VARIABLE my_map = MAP {{'k1': 10, 'k2': 20}};", "SELECT getvariable('undefined_var') AS result;", "SET VARIABLE column_to_exclude = 'col1';", 'CREATE TABLE tbl AS SELECT 12 AS col0, 34 AS col1, 56 AS col2;', "SELECT COLUMNS(c -> c != getvariable('column_to_exclude')) FROM tbl;"]
`PIVOT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['PIVOT Cities ON Year USING sum(Population);', 'PIVOT Cities ON Year USING first(Population);', 'PIVOT Cities ON Year USING sum(Population) GROUP BY Country;', 'PIVOT Cities ON Year IN (2000, 2010) USING sum(Population) GROUP BY Country;', 'PIVOT Cities ON Country, Name USING sum(Population);', "PIVOT Cities ON Country || '_' || Name USING sum(Population);", 'PIVOT Cities ON Year USING sum(Population) AS total, max(Population) AS max GROUP BY Country;', 'PIVOT Cities ON Year USING sum(Population) GROUP BY Country, Name;', 'SELECT * FROM (PIVOT Cities ON Year USING sum(Population) GROUP BY Country) pivot_alias;']
`INSTALL`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['INSTALL httpfs;', 'INSTALL h3 FROM community;']
`ANALYZE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['ANALYZE;']
`SUMMARIZE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SUMMARIZE tbl;', 'SUMMARIZE SELECT * FROM tbl;']
`UNPIVOT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['UNPIVOT monthly_sales ON jan, feb, mar, apr, may, jun INTO NAME month VALUE sales;', 'UNPIVOT monthly_sales ON COLUMNS(* EXCLUDE (empid, dept)) INTO NAME month VALUE sales;', 'UNPIVOT monthly_sales ON (jan, feb, mar) AS q1, (apr, may, jun) AS q2 INTO NAME quarter VALUE month_1_sales, month_2_sales, month_3_sales;', 'WITH unpivot_alias AS ( UNPIVOT monthly_sales ON COLUMNS(* EXCLUDE (empid, dept)) INTO NAME month VALUE sales ) SELECT * FROM unpivot_alias;', 'FROM monthly_sales UNPIVOT ( sales FOR month IN (jan, feb, mar, apr, may, jun) );', 'FROM monthly_sales UNPIVOT ( (month_1_sales, month_2_sales, month_3_sales) FOR quarter IN ((jan, feb, mar) AS q1, (apr, may, jun) AS q2) );']
`WINDOW`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT SUM(salary) OVER my_window, AVG(salary) OVER my_window FROM employees WINDOW my_window AS (PARTITION BY department ORDER BY hire_date);', 'SELECT employee_id, first_value(name) OVER recent_hires FROM employees WINDOW recent_hires AS (ORDER BY hire_date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW);']
`OFFSET`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM addresses LIMIT 5;', 'SELECT * FROM addresses LIMIT 5 OFFSET 5;', 'SELECT city, count(*) AS population FROM addresses GROUP BY city ORDER BY population DESC LIMIT 5;']
`OFFSET`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT * FROM addresses LIMIT 5;', 'SELECT * FROM addresses LIMIT 5 OFFSET 5;', 'SELECT city, count(*) AS population FROM addresses GROUP BY city ORDER BY population DESC LIMIT 5;']
`CREATE INDEX`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE UNIQUE INDEX films_id_idx ON films (id);', 'CREATE INDEX s_idx ON films (revenue);', 'CREATE INDEX gy_idx ON films (genre, year);', 'CREATE INDEX i_index ON integers ((j + k));']
`CREATE TYPE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["CREATE TYPE mood AS ENUM ('happy', 'sad', 'curious');", 'CREATE TYPE many_things AS STRUCT(k INTEGER, l VARCHAR);', 'CREATE TYPE one_thing AS UNION(number INTEGER, string VARCHAR);', 'CREATE TYPE x_index AS INTEGER;']
`COLLATE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["SELECT 'hello' = 'hElLO'; -- Default collation", "SELECT 'hello' COLLATE NOCASE = 'hElLO'; -- Case insensitive collation", "SELECT 'hello' = 'hëllo'; -- Default collation", "SELECT 'hello' COLLATE NOACCENT = 'hëllo'; -- Accent insensitive collation", "SELECT 'hello' COLLATE NOCASE.NOACCENT = 'hElLÖ'; -- Both case and accent insensitive", "SET default_collation = NOCASE; SELECT 'hello' = 'HeLlo'; -- Setting global collation", "CREATE TABLE names (name VARCHAR COLLATE NOACCENT); INSERT INTO names VALUES ('hännes'); SELECT name FROM names WHERE name = 'hannes'; -- Column-specific collation", 'SELECT names.name AS name, other_names.name AS other_name FROM names, other_names WHERE names.name COLLATE NOACCENT.NOCASE = other_names.name COLLATE NOACCENT.NOCASE; -- Combine collations for comparison', "CREATE TABLE strings (s VARCHAR COLLATE DE); INSERT INTO strings VALUES ('Gabel'), ('Göbel'), ('Goethe'), ('Goldmann'), ('Göthe'), ('Götz'); SELECT * FROM strings ORDER BY s; -- Using ICU collation"]
`BEGIN TRANSACTION`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['BEGIN TRANSACTION;']
`CREATE SEQUENCE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE SEQUENCE serial;', 'CREATE SEQUENCE serial START 101;', 'CREATE SEQUENCE serial START WITH 1 INCREMENT BY 2;', 'CREATE SEQUENCE serial START WITH 99 INCREMENT BY -1 MAXVALUE 99;', 'CREATE SEQUENCE serial START WITH 1 MAXVALUE 10;', 'CREATE SEQUENCE serial START WITH 1 MAXVALUE 10 CYCLE;', 'CREATE OR REPLACE SEQUENCE serial;', 'CREATE SEQUENCE IF NOT EXISTS serial;', 'CREATE SEQUENCE id_sequence START 1;', "SELECT nextval('serial') AS nextval;", "SELECT currval('serial') AS currval;"]
`CREATE MACRO`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CREATE MACRO add(a, b) AS a + b;', 'CREATE MACRO ifelse(a, b, c) AS CASE WHEN a THEN b ELSE c END;', 'CREATE MACRO one() AS (SELECT 1);', 'CREATE MACRO plus_one(a) AS (WITH cte AS (SELECT 1 AS a) SELECT cte.a + a FROM cte);', 'CREATE FUNCTION main.my_avg(x) AS sum(x) / count(x);', 'CREATE MACRO add_default(a, b := 5) AS a + b;', 'CREATE MACRO arr_append(l, e) AS list_concat(l, list_value(e));', "CREATE MACRO static_table() AS TABLE SELECT 'Hello' AS column1, 'World' AS column2;", 'CREATE MACRO dynamic_table(col1_value, col2_value) AS TABLE SELECT col1_value AS column1, col2_value AS column2;', "CREATE OR REPLACE TEMP MACRO dynamic_table(col1_value, col2_value) AS TABLE SELECT col1_value AS column1, col2_value AS column2 UNION ALL SELECT 'Hello' AS col1_value, 456 AS col2_value;", 'CREATE MACRO get_users(i) AS TABLE SELECT * FROM users WHERE uid IN (SELECT unnest(i));', 'SELECT * FROM get_users([1, 5]);', 'CREATE MACRO checksum(table_name) AS TABLE SELECT bit_xor(md5_number(COLUMNS(*)::VARCHAR)) FROM query_table(table_name);', "SELECT * FROM checksum('tbl');", 'CREATE MACRO add_x (a, b) AS a + b, (a, b, c) AS a + b + c;', 'SELECT add_x(21, 42) AS two_args, add_x(21, 42, 21) AS three_args;', 'CREATE MACRO add(a, b) AS a + b;', 'SELECT add(1, 2) AS x;', 'SELECT add_default(37);', 'SELECT add_default(40, b := 2) AS x;', 'CREATE MACRO triple_add(a, b := 5, c := 10) AS a + b + c;', 'SELECT triple_add(40, c := 1, b := 1) AS x;']
`VACUUM`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['VACUUM;', 'VACUUM ANALYZE;', 'VACUUM ANALYZE memory.main.my_table(my_column);', 'VACUUM FULL; -- error']
`RESET`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["SET memory_limit = '10GB';", 'SET threads = 1;', 'SET threads TO 1;', 'RESET threads;', "SELECT current_setting('threads');", "SET GLOBAL search_path = 'db1,db2'", "SET SESSION default_collation = 'nocase';"]
`RESET`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["SET memory_limit = '10GB';", 'SET threads = 1;', 'SET threads TO 1;', 'RESET threads;', "SELECT current_setting('threads');", "SET GLOBAL search_path = 'db1,db2'", "SET SESSION default_collation = 'nocase';"]
`EXPLAIN ANALYZE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['EXPLAIN SELECT * FROM table_name;', 'EXPLAIN ANALYZE SELECT * FROM table_name;']
`EXPLAIN ANALYZE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['EXPLAIN SELECT * FROM table_name;', 'EXPLAIN ANALYZE SELECT * FROM table_name;']
`CUBE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, street_name, avg(income) FROM addresses GROUP BY GROUPING SETS ((city, street_name), (city), (street_name), ());', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY CUBE (city, street_name);', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY ROLLUP (city, street_name);', 'SELECT course, type, count(*) FROM students GROUP BY GROUPING SETS ((course, type), course, type, ());', 'WITH days AS ( SELECT year("generate_series") AS y, quarter("generate_series") AS q, month("generate_series") AS m FROM generate_series(DATE \'2023-01-01\', DATE \'2023-12-31\', INTERVAL 1 DAY) ) SELECT y, q, m, GROUPING_ID(y, q, m) AS "grouping_id()" FROM days GROUP BY GROUPING SETS ((y, q, m), (y, q), (y), ()) ORDER BY y, q, m;']
`CUBE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, street_name, avg(income) FROM addresses GROUP BY GROUPING SETS ((city, street_name), (city), (street_name), ());', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY CUBE (city, street_name);', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY ROLLUP (city, street_name);', 'SELECT course, type, count(*) FROM students GROUP BY GROUPING SETS ((course, type), course, type, ());', 'WITH days AS ( SELECT year("generate_series") AS y, quarter("generate_series") AS q, month("generate_series") AS m FROM generate_series(DATE \'2023-01-01\', DATE \'2023-12-31\', INTERVAL 1 DAY) ) SELECT y, q, m, GROUPING_ID(y, q, m) AS "grouping_id()" FROM days GROUP BY GROUPING SETS ((y, q, m), (y, q), (y), ()) ORDER BY y, q, m;']
`CUBE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['SELECT city, street_name, avg(income) FROM addresses GROUP BY GROUPING SETS ((city, street_name), (city), (street_name), ());', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY CUBE (city, street_name);', 'SELECT city, street_name, avg(income) FROM addresses GROUP BY ROLLUP (city, street_name);', 'SELECT course, type, count(*) FROM students GROUP BY GROUPING SETS ((course, type), course, type, ());', 'WITH days AS ( SELECT year("generate_series") AS y, quarter("generate_series") AS q, month("generate_series") AS m FROM generate_series(DATE \'2023-01-01\', DATE \'2023-12-31\', INTERVAL 1 DAY) ) SELECT y, q, m, GROUPING_ID(y, q, m) AS "grouping_id()" FROM days GROUP BY GROUPING SETS ((y, q, m), (y, q), (y), ()) ORDER BY y, q, m;']
`ALTER VIEW`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['ALTER VIEW v1 RENAME TO v2;']
`UPDATE EXTENSIONS`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['UPDATE EXTENSIONS;', 'UPDATE EXTENSIONS (name_a, name_b, name_c);']
`CHECKPOINT`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ['CHECKPOINT;', 'CHECKPOINT file_db;', 'FORCE CHECKPOINT;']
`COMMENT ON`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["COMMENT ON TABLE test_table IS 'very nice table';", "COMMENT ON COLUMN test_table.test_table_column IS 'very nice column';", "COMMENT ON VIEW test_view IS 'very nice view';", "COMMENT ON INDEX test_index IS 'very nice index';", "COMMENT ON SEQUENCE test_sequence IS 'very nice sequence';", "COMMENT ON TYPE test_type IS 'very nice type';", "COMMENT ON MACRO test_macro IS 'very nice macro';", "COMMENT ON MACRO TABLE test_table_macro IS 'very nice table macro';", 'COMMENT ON TABLE test_table IS NULL;']
`IMPORT DATABASE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["EXPORT DATABASE 'target_directory';", "EXPORT DATABASE 'target_directory' (FORMAT CSV, DELIMITER '|');", "EXPORT DATABASE 'target_directory' (FORMAT PARQUET);", "EXPORT DATABASE 'target_directory' (FORMAT PARQUET, COMPRESSION ZSTD, ROW_GROUP_SIZE 100_000);", "IMPORT DATABASE 'source_directory';", "PRAGMA import_database('source_directory');"]
`IMPORT DATABASE`: Evaluates an expression at the nth specified row within the window frame, considering only rows with non-null values if specified., Examples: ["EXPORT DATABASE 'target_directory';", "EXPORT DATABASE 'target_directory' (FORMAT CSV, DELIMITER '|');", "EXPORT DATABASE 'target_directory' (FORMAT PARQUET);", "EXPORT DATABASE 'target_directory' (FORMAT PARQUET, COMPRESSION ZSTD, ROW_GROUP_SIZE 100_000);", "IMPORT DATABASE 'source_directory';", "PRAGMA import_database('source_directory');"]
DuckDB Types:
`BOOLEAN`: The `BOOLEAN` type represents a statement of truth, "true" or "false", with the possibility of being "unknown", represented by `NULL` in SQL., Examples: ['> SELECT true, false, NULL::BOOLEAN;', '-- Outputs the three possible values for BOOLEAN: true, false, NULL.', 'CREATE TABLE example (is_active BOOLEAN);', '-- Create a table with a BOOLEAN column.', 'INSERT INTO example VALUES (true), (false), (NULL);', '-- Insert BOOLEAN values, including NULL.', 'SELECT * FROM example WHERE is_active AND is_verified;', '-- Filters rows where both conditions are true.', 'UPDATE example SET is_active = false WHERE condition;', '-- Update rows to set the BOOLEAN field to false.']
`STRUCT`: The `STRUCT` data type in SQL is used to create a column that contains an ordered list of columns, referred to as entries, which are accessed using named keys. This type is ideal for nesting multiple columns into a single column, allowing a structured and consistent data schema across all rows., Examples: ["SELECT struct_pack(key1 := 'value1', key2 := 42) AS s;", "SELECT {{'key1': 'value1', 'key2': 42}} AS s;", "SELECT a.x FROM (SELECT {{'x': 1, 'y': 2, 'z': 3}} AS a);", "SELECT struct_insert({{'a': 1, 'b': 2, 'c': 3}}, d := 4) AS s;", 'CREATE TABLE t1 (s STRUCT(v VARCHAR, i INTEGER));', "INSERT INTO t1 VALUES (row('a', 42));", "SELECT a.* FROM (SELECT {{'x': 1, 'y': 2, 'z': 3}} AS a);", "SELECT struct_extract({{'x space': 1, 'y': 2, 'z': 3}}, 'x space');"]
`FLOAT`: The FLOAT data type, also known by aliases FLOAT4, REAL, or float, represents a single precision floating-point number, facilitating approximate calculations and efficient handling of numerical data with precision typically up to 6 decimal digits and a range of at least 1E-37 to 1E+37., Examples: ['-- Example: Creating a table with a FLOAT column\nCREATE TABLE example_table (id INTEGER, value FLOAT);', '-- Example: Inserting values into a FLOAT column\nINSERT INTO example_table VALUES (1, 3.14), (2, 2.718);', '-- Example: Performing arithmetic operations with FLOAT values\nSELECT id, value * 2.0::FLOAT AS doubled_value FROM example_table;', '-- Example: Casting a numeric value to FLOAT\nSELECT CAST(100 AS FLOAT) AS float_value;', '-- Example: Using FLOAT values in a mathematical function\nSELECT SQRT(value) FROM example_table WHERE value > 0;', '-- Example: Comparing FLOAT values\nSELECT * FROM example_table WHERE value > 3.0::FLOAT;']
`DATE`: The `DATE` type in SQL is used to store calendar dates without time components, representing a year, month, and day as accurate information for querying and managing date-related data., Examples: ["-- Add 5 days to a specific date\\nSELECT DATE '1992-03-22' + 5; -- Result: 1992-03-27\\n", "-- Subtract one date from another to get the number of days between them\\nSELECT DATE '1992-03-27' - DATE '1992-03-22'; -- Result: 5\\n", '-- Get the current date at the start of the transaction\\nSELECT current_date; -- Example result: 2022-10-08\\n', "-- Add an interval of 2 months to a specific date\\nSELECT date_add(DATE '1992-09-15', INTERVAL 2 MONTH); -- Result: 1992-11-15\\n", "-- Find the difference in months between two dates\\nSELECT date_diff('month', DATE '1992-09-15', DATE '1992-11-14'); -- Result: 2\\n", "-- Extract the year from a specific date\\nSELECT date_part('year', DATE '1992-09-20'); -- Result: 1992\\n", "-- Get the (English) name of the weekday from a specific date\\nSELECT dayname(DATE '1992-09-20'); -- Result: Sunday\\n", "-- Convert a date to a string format\\nSELECT strftime(date '1992-01-01', '%a, %-d %B %Y'); -- Result: Wed, 1 January 1992"]
`TIMESTAMP_S`: The TIMESTAMP_S data type represents a timestamp with second precision, ignoring any sub-second parts and time zones., Examples: ["SELECT TIMESTAMP_S '1992-09-20 11:30:00.123456789'; -- Output: 1992-09-20 11:30:00", "SELECT TIMESTAMP_S '2000-01-01 00:00:00'; -- Output: 2000-01-01 00:00:00", "SELECT TIMESTAMP_S '2023-10-05 18:44:03.987654321'; -- Output: 2023-10-05 18:44:03"]
`DECIMAL`: The DECIMAL data type, also known as NUMERIC or DEC, allows for the representation of exact fixed-point decimal numbers, providing precise control over the number of digits and the digits after the decimal point., Examples: ['CREATE TABLE salaries (\\n employee_id INTEGER,\\n base_salary DECIMAL(10, 2)\\n);', 'INSERT INTO salaries (employee_id, base_salary) VALUES\\n (1, 50000.00),\\n (2, 65000.50);', 'SELECT employee_id, base_salary\\nFROM salaries\\nWHERE base_salary > DECIMAL(60000, 2);', 'UPDATE salaries\\nSET base_salary = base_salary + DECIMAL(5000.00, 2)\\nWHERE employee_id = 1;', 'SELECT CAST(99 AS DECIMAL(10, 2));']
`BIGINT`: The `BIGINT` data type is an 8-byte integer that can store large integer values suitable for handling significant quantities or high precision integer data., Examples: ['CREATE TABLE example_table (id BIGINT PRIMARY KEY, count BIGINT, reference_id BIGINT);', "SELECT * FROM parquet_metadata('file.parquet') WHERE row_group_id = 1;", 'ALTER TABLE orders ADD COLUMN order_count BIGINT DEFAULT 0;', 'UPDATE employee SET salary = salary + 1000 WHERE employee_id = 1001;', 'SELECT store_id, SUM(sales) AS total_sales FROM transactions GROUP BY store_id;', 'CREATE SEQUENCE order_sequence START WITH 1000 INCREMENT BY 1 MINVALUE 100 MAXVALUE 10000 NO CYCLE;']
`LIST`: A `LIST` column is a flexible, ordered sequence of data values of the same type, which can vary in length among rows and can include any uniform data type, allowing for complex nested data structures., Examples: ['SELECT [1, 2, 3]; -- Creates a static list of integers', "SELECT ['duck', 'goose', NULL, 'heron']; -- Creates a list of strings containing a NULL value", 'SELECT list_value(1, 2, 3); -- Uses the list_value function to create a list of integers', 'CREATE TABLE list_table (int_list INTEGER[], varchar_list VARCHAR[]); -- Defines a table with integer and varchar lists', "SELECT (['a', 'b', 'c'])[3]; -- Retrieves the third element from a list", 'SELECT list_slice([1, 2, 3, 4, 5], 2, 4); -- Extracts a sublist from the main list']
`SMALLINT`: The SMALLINT type, with aliases such as short, int2, smallint, and int16, represents a signed two-byte integer that can store whole numbers ranging from -32768 to 32767., Examples: ['CREATE TABLE test_table (id SMALLINT);', 'INSERT INTO test_table (id) VALUES (100);', 'SELECT * FROM test_table WHERE id BETWEEN -100 AND 100;', 'ALTER TABLE test_table ADD COLUMN new_column SMALLINT;', 'UPDATE test_table SET id = id + 1 WHERE id < 32767;']
`INTERVAL`: The INTERVAL data type represents a period of time that can be measured in months, days, microseconds, or a combination of these units, and is typically used to add or subtract to DATE, TIMESTAMP, TIMESTAMPTZ, or TIME values., Examples: ["SELECT INTERVAL '1 month 1 day'; -- Returns an interval representing 1 month and 1 day", "SELECT DATE '2000-01-01' + INTERVAL 1 YEAR; -- Adds 1 year to the specified date", "SELECT TIMESTAMP '2000-02-06 12:00:00' - TIMESTAMP '2000-01-01 11:00:00'; -- Returns interval of 36 days 1 hour", "SELECT INTERVAL '48:00:00'::INTERVAL; -- Converts a time string to microseconds interval representing 48 hours", "SELECT (DATE '2020-01-01' + INTERVAL 30 DAYS) = (DATE '2020-01-01' + INTERVAL 1 MONTH); -- Compares intervals by their conversion to microseconds"]
`VARCHAR`: `VARCHAR` is a versatile data type used to store variable-length character strings, accommodating a wide range of text and string data without enforcing a specific length., Examples: ['CREATE TABLE people (name VARCHAR, age INTEGER);', "INSERT INTO documents (text) VALUES ('This is a VARCHAR example text.');", "SELECT * FROM employees WHERE department = 'Engineering';", 'ALTER TABLE students ADD COLUMN email VARCHAR;', "UPDATE orders SET status = 'Shipped' WHERE order_id = 102;", "COPY products TO 'products.csv' DELIMITER ',' HEADER;"]
`VARINT`: VARINT is an arbitrary-precision integer data type capable of storing very large numbers beyond the limits of standard integer types., Examples: ['CREATE TABLE example_table (id VARINT);', 'INSERT INTO example_table (id) VALUES (123456789123456789123456789);', 'SELECT id FROM example_table WHERE id < 999999999999999999999999999;']
`TINYINT`: TINYINT is a signed one-byte integer type that can store whole numbers ranging from -128 to 127, often used to save storage space when values are known to fall within this small range., Examples: ["SELECT CAST('123' AS TINYINT);", 'INSERT INTO my_table (x) VALUES (CAST(100 AS TINYINT));', 'UPDATE my_table SET x = CAST(50 AS TINYINT) WHERE id = 1;', 'SELECT * FROM my_table WHERE x = CAST(-50 AS TINYINT);', 'CREATE TABLE example (id TINYINT);']
`INTEGER`: The INTEGER data type, with aliases such as int, signed, int4, int32, integer, and integral, represents whole numbers and is commonly used to store numeric data without fractional components., Examples: ['-- Assigning integer values to columns in a CREATE TABLE statement\nCREATE TABLE my_table (id INTEGER, age INTEGER);', '-- Inserting integer values as literals within an INSERT statement\nINSERT INTO my_table VALUES (1, 25);', '-- Using integer operations in a SELECT statement\nSELECT id + 10 AS new_id FROM my_table;', '-- Casting a float to an integer\nSELECT CAST(3.7 AS INTEGER) AS whole_number;', '-- Defining a column to only accept non-negative integers using a CHECK constraint\nCREATE TABLE my_table (id INTEGER CHECK (id >= 0));', '-- Using the INTEGER type in a primary key definition\nCREATE TABLE users (user_id INTEGER PRIMARY KEY, username VARCHAR);', '-- Updating integer columns\nUPDATE my_table SET age = age + 1 WHERE id = 1;', '-- Comparing integer values in a WHERE clause\nSELECT * FROM my_table WHERE age > 20;']
`ENUM`: The Enum data type represents a dictionary encoding structure that enumerates all possible unique string values of a column, allowing for efficient storage and query execution by storing only numerical references to the strings., Examples: ["CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy');", 'CREATE TYPE birds AS ENUM (SELECT my_varchar FROM my_inputs);', 'CREATE TABLE person (name TEXT, current_mood mood);', "INSERT INTO person VALUES ('Pedro', 'happy'), ('Pagliacci', 'sad');", 'SELECT enum_range(NULL::mood) AS mood_values;', 'DROP TYPE mood;']
`UBIGINT`: UBIGINT, also known as 'uint64' or 'ubigint', is an unsigned 64-bit integer data type that can store large whole numbers from 0 to 18,446,744,073,709,551,615. It is commonly used for columns that require large non-negative integer values, especially where negative values are not applicable., Examples: ['CREATE TABLE huge_numbers (id UBIGINT);', 'INSERT INTO huge_numbers VALUES (4294967296);', 'SELECT id FROM huge_numbers WHERE id > 1000000;', 'ALTER TABLE huge_numbers ADD COLUMN new_value UBIGINT;', 'CREATE VIEW large_ids AS SELECT id FROM huge_numbers WHERE id > 100000000;']
`BLOB`: The BLOB (Binary Large Object) type represents a variable-length binary data object, used for storing arbitrary binary data in the database, such as images or files, without any interpretation of its contents., Examples: ["-- Create a BLOB with a single byte\\nSELECT '\\xAA'::BLOB;\\n-- Result: \\xAA\\n\\n-- Create a BLOB with multiple bytes\\nSELECT '\\xAA\\xAB\\xAC'::BLOB;\\n-- Result: \\xAA\\xAB\\xAC\\n\\n-- Concatenate two BLOB values\\nSELECT '\\xAA'::BLOB || '\\xBB'::BLOB;\\n-- Result: \\xAABB\\n\\n-- Convert a BLOB to a hexadecimal string\\nSELECT hex('\\xAA\\xBB'::BLOB);\\n-- Result: AABB\\n\\n-- Decode a BLOB to a string, ensuring it is valid UTF-8\\nSELECT decode('\\xC3\\xBC'::BLOB);\\n-- Result: ü\\n\\n-- Read a BLOB from a file\\nSELECT read_blob('myfile.bin');\\n-- Result: Contents of 'myfile.bin' as a BLOB"]
`HUGEINT`: The `HUGEINT` data type, also known as `INT128`, is a signed sixteen-byte integer that stores whole numbers ranging from -170141183460469231731687303715884105728 to 170141183460469231731687303715884105727, providing a broad range suitable for large numerical computations., Examples: ['-- Creating a table with a HUGEINT column\\nCREATE TABLE example_table (id HUGEINT, value HUGEINT);', '-- Inserting values into a HUGEINT column\\nINSERT INTO example_table (id, value) VALUES (1, 170141183460469231731687303715884105727);', '-- Performing arithmetic operations on HUGEINT\\nSELECT value + 10 FROM example_table WHERE id = 1;', "-- Using HUGEINT in a function\\nSELECT md5_number('12345')::HUGEINT;", '-- Comparing HUGEINT values\\nSELECT * FROM example_table WHERE value > 1000000000000000000;']
`TIMESTAMP`: A TIMESTAMP value represents an instant in time, composed of a combination of a date (year, month, day) and a time (hour, minute, second, microsecond), stored with microsecond precision, and it can be manipulated using various functions and operators., Examples: ["SELECT TIMESTAMP '1992-09-20 11:30:00.123456';", "SELECT TIMESTAMP '1992-09-20 11:30:00' + INTERVAL 10 DAYS;", "SELECT TIMESTAMP '2023-07-18 17:45:00' - TIMESTAMP '2023-07-10 15:30:00';", "SELECT age(TIMESTAMP '2023-07-18 17:45:00', TIMESTAMP '2022-07-18 17:45:00');", "SELECT strftime(TIMESTAMP '2023-07-18 17:45:00', '%Y-%m-%d %H:%M:%S');", "SELECT extract('hour' FROM TIMESTAMP '2023-07-18 17:45:00');"]
`UNION`: The UNION data type is a nested type that holds one of multiple distinct values with a "tag" to identify the active type and can contain multiple uniquely tagged members of various types, akin to C++ std::variant or Rust's Enum., Examples: ["```sql\nCREATE TABLE tbl1 (u UNION(num INTEGER, str VARCHAR));\nINSERT INTO tbl1 VALUES (1), ('two'), (union_value(str := 'three'));\n```", "```sql\nSELECT union_extract(u, 'str') AS str\nFROM tbl1;\n```", '```sql\nSELECT u.str\nFROM tbl1;\n```', '```sql\nSELECT union_tag(u) AS t\nFROM tbl1;\n```']
`TIMESTAMP_MS`: The "TIMESTAMP_MS" data type represents timestamps with millisecond precision, defined without considering time zones., Examples: ["SELECT TIMESTAMP_MS '1992-09-20 11:30:00.123456789'; -- Produces output: 1992-09-20 11:30:00.123"]
`TIMESTAMP_NS`: `TIMESTAMP_NS` represents a timestamp with nanosecond precision, useful for high-resolution time data but ignores time zone information., Examples: ["SELECT TIMESTAMP_NS '1992-09-20 11:30:00.123456789';"]
`USMALLINT`: USMALLINT is an unsigned two-byte integer type with a range from 0 to 65535, used for storing non-negative whole numbers within this range., Examples: ['CREATE TABLE example_table (id USMALLINT, age USMALLINT);', 'INSERT INTO example_table (id, age) VALUES (100, 25);', 'SELECT * FROM example_table WHERE age < 30;']
`UINTEGER`: The `UINTEGER` data type is used to store unsigned 32-bit integer values, allowing for a range from 0 to 4,294,967,295 and is particularly useful when negative values are not needed and memory efficiency is a concern for large datasets., Examples: ['CREATE TABLE example_table (count UINTEGER);', 'INSERT INTO example_table VALUES (150), (2750), (4294967295);', 'SELECT * FROM example_table WHERE count > 1000;', 'ALTER TABLE example_table ADD COLUMN new_count UINTEGER DEFAULT 0;', 'UPDATE example_table SET count = count + 100 WHERE count < 4294967295;']
`UHUGEINT`: UHUGEINT, also known as uint128, is an unsigned 128-bit integer data type used for storing large non-negative whole numbers ranging from 0 to approximately 3.4 x 10^38., Examples: ['>> CREATE TABLE numbers (id UHUGEINT);', ' // This creates a table with a UHUGEINT column.', ' ', ' ">> INSERT INTO numbers VALUES (340282366920938463463374607431768211455);', " // This inserts the maximum valid UHUGEINT value into the 'numbers' table.", ' ', ' ">> SELECT id FROM numbers WHERE id > 1000000;', " // This selects records from the 'numbers' table where the UHUGEINT value is greater than 1,000,000."]
`TIME`: The `TIME` type represents a time of day, independent of a specific date, and is used to store and manipulate values consisting of hours, minutes, seconds, and fractional seconds., Examples: ["SELECT TIME '14:21:13';", "SELECT TIME '08:30:00' + INTERVAL 5 MINUTE;", "SELECT EXTRACT(HOUR FROM TIME '23:45:12');", 'SELECT MAKE_TIME(13, 30, 59.999);', 'SELECT CURRENT_TIME;']
`TIMESTAMP WITH TIME ZONE`: `TIMESTAMP WITH TIME ZONE` (or `TIMESTAMPTZ`) represents a point in time using a calendar date and time of day along with a time zone offset, allowing for time zone sensitive operations such as conversions and comparisons., Examples: ["SELECT TIMESTAMPTZ '2023-10-17 12:00:00+01';", "SELECT now() AT TIME ZONE 'UTC';", "SELECT TIMESTAMP '2023-10-17 10:00:00-07' AT TIME ZONE 'America/New_York';", "SELECT age(TIMESTAMPTZ '2005-10-17 12:00:00-07');", "SELECT TIMESTAMPTZ '2023-10-17 15:00:00+00' - TIMESTAMPTZ '2023-10-16 15:00:00+00';"]
`UUID`: The UUID data type is used to store universally unique identifiers as 128-bit values, formatted as 36-character strings with hexadecimal characters and dashes arranged in the pattern ⟨8 characters⟩-⟨4 characters⟩-⟨4 characters⟩-⟨4 characters⟩-⟨12 characters⟩., Examples: ['-- Create a table with a UUID column\nCREATE TABLE users (id UUID, name VARCHAR);', "-- Insert a new UUID value into the table\nINSERT INTO users (id, name) VALUES (gen_random_uuid(), 'Alice');", "-- Retrieve UUID values from a table\nSELECT id FROM users WHERE name = 'Alice';", '-- Generate and display a random UUID\nSELECT uuid();']
`DOUBLE`: The `DOUBLE` type, also known as `FLOAT8`, is a double-precision floating point number data type commonly used for storing large or precise decimal values in SQL queries., Examples: ['```sql\n-- Using DOUBLE to store and manipulate high-precision values\nCREATE TABLE sales_data (\n transaction_id INTEGER,\n sale_amount DOUBLE\n);\n\nINSERT INTO sales_data (transaction_id, sale_amount) VALUES (1, 1999.99);\nSELECT sale_amount * 1.05 AS total_after_tax FROM sales_data WHERE transaction_id = 1;\n```', '```sql\n-- Calculating the square root of a DOUBLE value\nSELECT sqrt(column_value) FROM my_table WHERE column_value > 0;\n```', '```sql\n-- Using DOUBLE in mathematical functions\nSELECT sin(column1), cos(column2) FROM my_numeric_table;\n```', '```sql\n-- Explicit casting of an INTEGER to DOUBLE for precision in arithmetic operations\nSELECT cast(my_integer_column AS DOUBLE) / 2 FROM my_table;\n```', '```sql\n-- Working with DOUBLE in spatial functions\nDOUBLE ST_Area (geometry) -- Computes the area of a geometry, returning a DOUBLE value as the area\n```', "```sql\n-- Using the DOUBLE type in JSON processing\nSELECT json_extract(my_json_column, '$.key')::DOUBLE FROM my_json_table;\n```"]
`UTINYINT`: An unsigned 8-bit integer type used to store whole numbers in the range of 0 to 255., Examples: ['CREATE TABLE example_table (column1 UTINYINT);', 'INSERT INTO example_table (column1) VALUES (200);', 'SELECT * FROM example_table WHERE column1 < 100;', '-- Attempting to store a negative number or a number greater than 255 will result in an error.', 'UPDATE example_table SET column1 = 255 WHERE column1 < 50;']
`NULL`: The `NULL` type in SQL represents a missing or unknown value, allowing for fields within a table to be uninitialized or absent in data., Examples: ['SELECT NULL = NULL;', 'SELECT NULL IS NULL;', "INSERT INTO table_name (column1, column2) VALUES (NULL, 'data');", "SELECT coalesce(NULL, 'default_value');", 'UPDATE table_name SET column1 = NULL WHERE condition;', "SELECT CASE WHEN column IS NULL THEN 'Value is NULL' ELSE column END FROM table_name;"]
`TIME WITH TIME ZONE`: The TIME WITH TIME ZONE (alias: TIMETZ) type represents the time of day with respect to a specific time zone, following the ISO 8601 format and allowing for time zone offsets., Examples: ["SELECT TIMETZ '1992-09-20 11:30:00.123456';", "SELECT TIMETZ '1992-09-20 11:30:00.123456-02:00';", "SELECT TIMETZ '1992-09-20 11:30:00.123456+05:30';"]
`BIT`: The `BIT` data type, also known as `BITSTRING`, represents variable-length strings consisting of 1s and 0s, suitable for operations such as bitwise manipulation., Examples: ["SELECT '10101'::BITSTRING & '10001'::BITSTRING AS result;", "SELECT bit_count('1101011'::BITSTRING) AS set_bits_count;", "SELECT bit_length('10101011'::BITSTRING) AS length_in_bits;", "SELECT octet_length('1101011'::BITSTRING) AS length_in_bytes;", "SELECT set_bit('0110010'::BITSTRING, 2, 0) AS updated_bitstring;"]
`MAP`: The MAP type is an ordered collection of key-value pairs, where keys are unique and can be of any type, allowing for diverse and flexible schema structures in databases., Examples: ["SELECT MAP {{'key1': 10, 'key2': 20, 'key3': 30}};", "SELECT map_from_entries([('key1', 10), ('key2', 20), ('key3', 30)]);", "SELECT MAP(['key1', 'key2', 'key3'], [10, 20, 30]);", 'SELECT MAP {{1: 42.001, 5: -32.1}};', "SELECT MAP {{['a', 'b']: [1.1, 2.2], ['c', 'd']: [3.3, 4.4]}};", 'CREATE TABLE tbl (col MAP(INTEGER, DOUBLE));', "SELECT MAP {{'key1': 5, 'key2': 43}}['key1'];", "SELECT MAP {{'key1': 5, 'key2': 43}}['key1'][1];", "SELECT MAP {{'key1': 5, 'key2': 43}}['key3'];", "SELECT element_at(MAP {{'key1': 5, 'key2': 43}}, 'key1');"]
`ARRAY`: The ARRAY data type stores fixed-size arrays where each element is of the same type, and it is suitable for representing ordered sequences of elements such as numerical vectors or nested arrays., Examples: ['SELECT array_value(1, 2, 3); -- Creates an array with elements 1, 2, and 3', 'CREATE TABLE example_table (id INTEGER, arr INTEGER[3]); -- Declares an array of three integers', 'SELECT id, arr[1] AS element FROM example_table; -- Retrieves the first element of the array', 'SELECT array_value(array_value(1, 2), array_value(3, 4), array_value(5, 6)); -- Creates a nested array using arrays as elements', 'INSERT INTO example_table VALUES (1, [1, 2, 3]), (2, [4, 5, 6]); -- Inserts rows with array values into a table', 'SELECT array_cosine_similarity(array_value(1.0, 2.0, 3.0), array_value(2.0, 3.0, 4.0)); -- Computes cosine similarity between two arrays of the same size', 'SELECT array_cross_product(array_value(1.0, 2.0, 3.0), array_value(2.0, 3.0, 4.0)); -- Computes the cross product of two 3-element arrays']
`JSON`: The JSON data type allows for the storage and querying of JSON formatted data, supporting functions for extracting, manipulating, and transforming JSON content within the database., Examples: ['CREATE TABLE example (j JSON);', 'INSERT INTO example VALUES (\'{{ "family": "anatidae", "species": [ "duck", "goose", "swan", null ] }}\');', "SELECT j->'$.family' FROM example;", "SELECT json_extract(j, '$.species[0]') FROM example;", "SELECT json_extract_string(j, '$.family') FROM example;"]
Here is the schema of the DuckDB database that the SQL query will run on:
{schema}
Question:
Here is the question or an instruction the user provided:
{question}
Write a DuckDB SQL query for the given question!
Answer:
```
| 2024-11-13T01:34:19.936521 | 5 | 0 | 14 | 0 | 6 | 0 | 48 | 0 | 2 | 0 | 75 | 0 |