repo_name
stringclasses 5
values | repo_url
stringclasses 5
values | repo_description
stringclasses 3
values | repo_stars
int64 6
15.8k
| repo_forks
int64 192
3.6k
| repo_last_updated
stringclasses 5
values | repo_created_at
stringclasses 5
values | repo_size
int64 513
2.13k
| repo_license
stringclasses 4
values | language
stringclasses 2
values | text
stringlengths 0
27.5k
| avg_line_length
float64 0
74.3
| max_line_length
int64 0
652
| alphnanum_fraction
float64 0
0.8
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | 0 | 0 | 0 |
|
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import configparser
# CONFIG
config = configparser.ConfigParser()
config.read('dwh.cfg')
# DROP TABLES
staging_events_table_drop = "DROP TABle IF EXISTS staging_events;"
staging_songs_table_drop = "DROP TABLE IF EXISTS staging_songs;"
songplay_table_drop = "DROP TABLE IF EXISTS songplays;"
user_table_drop = "DROP TABLE IF EXISTS users;"
song_table_drop = "DROP TABLE IF EXISTS songs;"
artist_table_drop = "DROP TABLE IF EXISTS artists;"
time_table_drop = "DROP TABLE IF EXISTS time;"
# CREATE TABLES
staging_events_table_create= ("""
CREATE TABLE IF NOT EXISTS staging_events
(
artist VARCHAR,
auth VARCHAR,
firstName VARCHAR(50),
gender CHAR,
itemInSession INTEGER,
lastName VARCHAR(50),
length FLOAT,
level VARCHAR,
location VARCHAR,
method VARCHAR,
page VARCHAR,
registration FLOAT,
sessionId INTEGER,
song VARCHAR,
status INTEGER,
ts BIGINT,
userAgent VARCHAR,
userId INTEGER
);
""")
staging_songs_table_create = ("""
CREATE TABLE IF NOT EXISTS staging_songs
(
num_songs INTEGER,
artist_id VARCHAR,
artist_latitude FLOAT,
artist_longitude FLOAT,
artist_location VARCHAR,
artist_name VARCHAR,
song_id VARCHAR,
title VARCHAR,
duration FLOAT,
year FLOAT
);
""")
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplays
(
songplay_id INTEGER IDENTITY (1, 1) PRIMARY KEY ,
start_time TIMESTAMP,
user_id INTEGER,
level VARCHAR,
song_id VARCHAR,
artist_id VARCHAR,
session_id INTEGER,
location VARCHAR,
user_agent VARCHAR
)
DISTSTYLE KEY
DISTKEY ( start_time )
SORTKEY ( start_time );
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS users
(
userId INTEGER PRIMARY KEY,
firsname VARCHAR(50),
lastname VARCHAR(50),
gender CHAR(1) ENCODE BYTEDICT,
level VARCHAR ENCODE BYTEDICT
)
SORTKEY (userId);
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS songs
(
song_id VARCHAR PRIMARY KEY,
title VARCHAR,
artist_id VARCHAR,
year INTEGER ENCODE BYTEDICT,
duration FLOAT
)
SORTKEY (song_id);
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artists
(
artist_id VARCHAR PRIMARY KEY ,
name VARCHAR,
location VARCHAR,
latitude FLOAT,
longitude FLOAT
)
SORTKEY (artist_id);
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time
(
start_time TIMESTAMP PRIMARY KEY ,
hour INTEGER,
day INTEGER,
week INTEGER,
month INTEGER,
year INTEGER ENCODE BYTEDICT ,
weekday VARCHAR(9) ENCODE BYTEDICT
)
DISTSTYLE KEY
DISTKEY ( start_time )
SORTKEY (start_time);
""")
# STAGING TABLES
staging_events_copy = ("""
COPY staging_events
FROM {}
iam_role {}
FORMAT AS json {};
""").format(config['S3']['LOG_DATA'], config['IAM_ROLE']['ARN'], config['S3']['LOG_JSONPATH'])
staging_songs_copy = ("""
COPY staging_songs
FROM {}
iam_role {}
FORMAT AS json 'auto';
""").format(config['S3']['SONG_DATA'], config['IAM_ROLE']['ARN'])
# FINAL TABLES
songplay_table_insert = ("""
INSERT INTO songplays (START_TIME, USER_ID, LEVEL, SONG_ID, ARTIST_ID, SESSION_ID, LOCATION, USER_AGENT)
SELECT DISTINCT
TIMESTAMP 'epoch' + (se.ts / 1000) * INTERVAL '1 second' as start_time,
se.userId,
se.level,
ss.song_id,
ss.artist_id,
se.sessionId,
se.location,
se.userAgent
FROM staging_songs ss
INNER JOIN staging_events se
ON (ss.title = se.song AND se.artist = ss.artist_name)
AND se.page = 'NextSong';
""")
user_table_insert = ("""
INSERT INTO users
SELECT DISTINCT userId, firstName, lastName, gender, level
FROM staging_events
WHERE userId IS NOT NULL
AND page = 'NextSong';
""")
song_table_insert = ("""
INSERT INTO songs
SELECT
DISTINCT song_id, title, artist_id, year, duration
FROM staging_songs
WHERE song_id IS NOT NULL;
""")
artist_table_insert = ("""
INSERT INTO artists
SELECT
DISTINCT artist_id, artist_name, artist_location, artist_latitude, artist_longitude
FROM staging_songs;
""")
time_table_insert = ("""
insert into time
SELECT DISTINCT
TIMESTAMP 'epoch' + (ts/1000) * INTERVAL '1 second' as start_time,
EXTRACT(HOUR FROM start_time) AS hour,
EXTRACT(DAY FROM start_time) AS day,
EXTRACT(WEEKS FROM start_time) AS week,
EXTRACT(MONTH FROM start_time) AS month,
EXTRACT(YEAR FROM start_time) AS year,
to_char(start_time, 'Day') AS weekday
FROM staging_events;
""")
# QUERY LISTS
create_table_queries = [staging_events_table_create, staging_songs_table_create, songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
drop_table_queries = [staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
copy_table_queries = [staging_events_copy, staging_songs_copy]
insert_table_queries = [songplay_table_insert, user_table_insert, song_table_insert, artist_table_insert, time_table_insert]
| 23.429952 | 181 | 0.68038 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class CreateTableOperator(BaseOperator):
ui_color = '#358140'
@apply_defaults
def __init__(self, redshift_conn_id = "", *args, **kwargs):
super(CreateTableOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
def execute(self, context):
self.log.info('Creating Postgres SQL Hook')
redshift = PostgresHook(postgres_conn_id = self.redshift_conn_id)
self.log.info('Executing creating tables in Redshift.')
queries = open('/home/workspace/airflow/create_tables.sql', 'r').read()
redshift.run(queries)
self.log.info("Tables created ")
| 26.7 | 80 | 0.651807 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataQualityOperator(BaseOperator):
ui_color = '#89DA59'
@apply_defaults
def __init__(self,
redshift_conn_id="",
tables = [],
*args, **kwargs):
super(DataQualityOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
self.tables = tables
def execute(self, context):
redshift_hook = PostgresHook(postgres_conn_id = self.redshift_conn_id)
for table in self.tables:
self.log.info(f"Starting data quality validation on table : {table}")
records = redshift_hook.get_records(f"select count(*) from {table};")
if len(records) < 1 or len(records[0]) < 1 or records[0][0] < 1:
self.log.error(f"Data Quality validation failed for table : {table}.")
raise ValueError(f"Data Quality validation failed for table : {table}")
self.log.info(f"Data Quality Validation Passed on table : {table}!!!")
self.log.info('DataQualityOperator not implemented yet') | 36.823529 | 94 | 0.595331 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class LoadDimensionOperator(BaseOperator):
ui_color = '#80BD9E'
@apply_defaults
def __init__(self,
redshift_conn_id="",
sql_query = "",
delete_load = False,
table_name = "",
*args, **kwargs):
super(LoadDimensionOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
self.sql_query = sql_query
self.table_name = table_name
self.delete_load = delete_load
def execute(self, context):
redshift_hook = PostgresHook(postgres_conn_id = self.redshift_conn_id)
if self.delete_load:
self.log.info(f"Delete load operation set to TRUE. Running delete statement on table {self.table_name}")
redshift_hook.run(f"DELETE FROM {self.table_name}")
self.log.info(f"Running query to load data into Dimension Table {self.table_name}")
redshift_hook.run(self.sql_query)
self.log.info(f"Dimension Table {self.table_name} loaded.")
| 36.65625 | 116 | 0.622924 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class LoadFactOperator(BaseOperator):
ui_color = '#F98866'
@apply_defaults
def __init__(self,
redshift_conn_id="",
sql_query = "",
*args, **kwargs):
super(LoadFactOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
self.sql_query = sql_query
def execute(self, context):
redshift_hook = PostgresHook(postgres_conn_id = self.redshift_conn_id)
redshift_hook.run(self.sql_query)
| 27.416667 | 78 | 0.621145 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.aws_hook import AwsHook
class StageToRedshiftOperator(BaseOperator):
ui_color = '#358140'
copy_query = " COPY {} \
FROM '{}' \
ACCESS_KEY_ID '{}' \
SECRET_ACCESS_KEY '{}' \
FORMAT AS json '{}'; \
"
@apply_defaults
def __init__(self,
redshift_conn_id="",
aws_credential_id="",
table_name = "",
s3_bucket="",
s3_key = "",
file_format = "",
log_json_file = "",
*args, **kwargs):
super(StageToRedshiftOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
self.aws_credential_id = aws_credential_id
self.table_name = table_name
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.file_format = file_format
self.log_json_file = log_json_file
self.execution_date = kwargs.get('execution_date')
def execute(self, context):
aws_hook = AwsHook(self.aws_credential_id)
credentials = aws_hook.get_credentials()
s3_path = "s3://{}/{}".format(self.s3_bucket, self.s3_key)
self.log.info(f"Picking staging file for table {self.table_name} from location : {s3_path}")
if self.log_json_file != "":
self.log_json_file = "s3://{}/{}".format(self.s3_bucket, self.log_json_file)
copy_query = self.copy_query.format(self.table_name, s3_path, credentials.access_key, credentials.secret_key, self.log_json_file)
else:
copy_query = self.copy_query.format(self.table_name, s3_path, credentials.access_key, credentials.secret_key, 'auto')
self.log.info(f"Running copy query : {copy_query}")
redshift_hook = PostgresHook(postgres_conn_id = self.redshift_conn_id)
redshift_hook.run(copy_query)
self.log.info(f"Table {self.table_name} staged successfully!!")
| 37.280702 | 141 | 0.577258 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import configparser
from pathlib import Path
config = configparser.ConfigParser()
config.read_file(open(f"{Path(__file__).parents[0]}/config.cfg"))
api_key = config['KEYS']['API_KEY']
headers = {'Authorization': 'Bearer %s' % api_key} | 28.625 | 65 | 0.711864 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | # This is request module of this project
from request import Request
from auth import headers
import json
class BusinessSearch:
def __init__(self, term, location, price=None):
self._param = {'term' : term, 'location' : location}
if price:
self._param['price'] = price
self._base_url = 'https://api.yelp.com/v3/businesses/search'
self._business_list = self._search_business()
def _search_business(self):
business_search_request = Request.get_content(url=self._base_url, param=self._param)
return business_search_request['businesses'] if business_search_request is not None else []
def _parse_results(self, data):
# Categories data : 'categories': [{'alias': 'bakeries', 'title': 'Bakeries'}]
categories = ' '.join([category['title'] for category in data['categories']])
# Longitude and latitude data : 'coordinates': {'latitude': 45.5232, 'longitude': -73.583459}
longitude = data['coordinates']['longitude']
latitude = data['coordinates']['latitude']
# Location example : 'location': { 'display_address': ['316 Avenue du Mont-Royal E', 'Montreal, QC H2T 1P7', 'Canada']}
location = ','.join(data['location']['display_address'])
return {"id" : data['id'], "name" : self._add_escape_character(data['name']), "image_url" : data['image_url'], "url" : data['url'],
"review_count" : data['review_count'], "categories" : categories, "rating" : data['rating'],
"latitude" : latitude, "longitude" : longitude, "price" : data['price'], "location" : location,
"display_phone" : data['display_phone']
}
def _add_escape_character(self, data):
return data.replace("'", "''")
def get_results(self):
return [self._parse_results(business) for business in self._business_list] | 47.512821 | 139 | 0.615547 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import psycopg2
import configparser
from pathlib import Path
from queries import create_business_schema, create_business_table
config = configparser.ConfigParser()
config.read_file(open(f"{Path(__file__).parents[0]}/config.cfg"))
class DatabaseDriver:
def __init__(self):
self._conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['DATABASE'].values()))
self._cur = self._conn.cursor()
def execute_query(self, query):
self._cur.execute(query)
def setup(self):
self.execute_query(create_business_schema)
self.execute_query(create_business_table) | 30.8 | 123 | 0.685039 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import configparser
from pathlib import Path
from businesssearch import BusinessSearch
from queries import create_business_schema, create_business_table, insert_business_table
from databasedriver import DatabaseDriver
import argparse
config = configparser.ConfigParser()
config.read_file(open(f"{Path(__file__).parents[0]}/config.cfg"))
parser = argparse.ArgumentParser(
description="A Example yelp business finder based on parameters such as term, location, price, ")
api_key = config['KEYS']['API_KEY']
headers = {'Authorization': 'Bearer %s' % api_key}
def to_string(data):
return [str(value) for value in data.values()]
def main():
args = parser.parse_args()
# Pricing levels to filter the search result with: 1 = $, 2 = $$, 3 = $$$, 4 = $$$$.
b = BusinessSearch(term=args.term, location=args.location, price=args.price)
db = DatabaseDriver()
db.setup()
queries = [insert_business_table.format(*to_string(result)) for result in b.get_results()]
query_to_execute = "BEGIN; \n" + '\n'.join(queries) + "\nCOMMIT;"
db.execute_query(query_to_execute)
if __name__ == "__main__":
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument("-t", "--term", metavar='', required=True,
help="Search term, for example \"food\" or \"restaurants\". The term may also be business names, such as \"Starbucks.\".")
required.add_argument("-l", "--location", metavar='', required=True,
help="This string indicates the geographic area to be used when searching for businesses. ")
optional.add_argument("-p", "--price", type=int, metavar='', required=False, default=1,
help="Pricing levels to filter the search result with: 1 = $, 2 = $$, 3 = $$$, 4 = $$$$.")
main() | 44.690476 | 148 | 0.657977 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | create_business_schema = """CREATE SCHEMA IF NOT EXISTS yelp;"""
create_business_table = """
CREATE TABLE IF NOT EXISTS yelp.business (
business_id varchar PRIMARY KEY,
business_name varchar,
image_url varchar,
url varchar,
review_count int,
categories varchar,
rating float,
latitude float,
longitude float,
price varchar,
location varchar,
phone varchar
);
"""
insert_business_table = """INSERT INTO yelp.business VALUES ('{}', '{}', '{}', '{}', {}, '{}', {}, {}, {}, '{}', '{}', '{}')
ON CONFLICT (business_id)
DO UPDATE SET
business_id = EXCLUDED.business_id,
business_name = EXCLUDED.business_name,
image_url = EXCLUDED.image_url,
url = EXCLUDED.url,
review_count = EXCLUDED.review_count,
categories = EXCLUDED.categories,
rating = EXCLUDED.rating,
latitude = EXCLUDED.latitude,
longitude = EXCLUDED.longitude,
price = EXCLUDED.price,
location = EXCLUDED.location,
phone = EXCLUDED.phone;
""" | 35.371429 | 124 | 0.505503 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import requests
from auth import headers
import json
class Request:
def __init__(self):
self._header = headers
@staticmethod
def get_content(url, param):
response = requests.get(url, headers=headers, params=param)
if response.status_code == 200:
return json.loads(response.content)
else:
print(f"Request completed with Error. Response Code : {response.status_code}")
return None | 27.875 | 90 | 0.635575 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import configparser
import psycopg2
from sql_queries import copy_table_queries, insert_table_queries
def load_staging_tables(cur, conn):
for query in copy_table_queries:
cur.execute(query)
conn.commit()
def insert_tables(cur, conn):
for query in insert_table_queries:
cur.execute(query)
conn.commit()
def main():
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
load_staging_tables(cur, conn)
insert_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() | 20.625 | 112 | 0.638205 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import configparser
import psycopg2
from sql_queries import create_table_queries, drop_table_queries
def drop_tables(cur, conn):
for query in drop_table_queries:
cur.execute(query)
conn.commit()
def create_tables(cur, conn):
for query in create_table_queries:
cur.execute(query)
conn.commit()
def main():
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
drop_tables(cur, conn)
create_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() | 20 | 112 | 0.636364 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from create_tables import main as create_table_main
from etl import main as etl_main
if __name__ == "__main__":
create_table_main()
etl_main()
| 20.857143 | 51 | 0.664474 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import boto3
import configparser
from botocore.exceptions import ClientError
import json
import logging
import logging.config
from pathlib import Path
import argparse
import time
# Setting up logger, Logger properties are defined in logging.ini file
logging.config.fileConfig(f"{Path(__file__).parents[0]}/logging.ini")
logger = logging.getLogger(__name__)
# Loading cluster configurations from cluster.config
config = configparser.ConfigParser()
config.read_file(open('cluster.config'))
def create_IAM_role(iam_client):
"""
Create and IAM_role, Define configuration in cluster.config
:param iam_client: an IAM service client instance
:return: True if IAM role created and policy applied successfully.
"""
role_name = config.get('IAM_ROLE', 'NAME')
role_description = config.get('IAM_ROLE', 'DESCRIPTION')
role_policy_arn = config.get('IAM_ROLE','POLICY_ARN')
logging.info(f"Creating IAM role with name : {role_name}, description : {role_description} and policy : {role_policy_arn}")
# Creating Role.
# Policy Documentation reference - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html#aws-resource-iam-role--examples
role_policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": [ "redshift.amazonaws.com" ] },
"Action": [ "sts:AssumeRole" ]
}
]
}
)
try:
create_response = iam_client.create_role(
Path='/',
RoleName=role_name,
Description=role_description,
AssumeRolePolicyDocument = role_policy_document
)
logger.debug(f"Got response from IAM client for creating role : {create_response}")
logger.info(f"Role create response code : {create_response['ResponseMetadata']['HTTPStatusCode']}")
except Exception as e:
logger.error(f"Error occured while creating role : {e}")
return False
try:
# Attaching policy using ARN's( Amazon Resource Names )
policy_response = iam_client.attach_role_policy(
RoleName=role_name,
PolicyArn=role_policy_arn
)
logger.debug(f"Got response from IAM client for applying policy to role : {policy_response}")
logger.info(f"Attach policy response code : {policy_response['ResponseMetadata']['HTTPStatusCode']}")
except Exception as e:
logger.error(f"Error occured while applying policy : {e}")
return False
return True if( (create_response['ResponseMetadata']['HTTPStatusCode'] == 200) and (policy_response['ResponseMetadata']['HTTPStatusCode'] == 200) ) else False
def delete_IAM_role(iam_client):
"""
Delete and IAM role
Make sure that you do not have any Amazon EC2 instances running with the role you are about to delete.
Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance.
:param iam_client: an IAM service client instance
:return: True if role deleted successfully.
"""
role_name = config.get('IAM_ROLE', 'NAME')
existing_roles = [role['RoleName'] for role in iam_client.list_roles()['Roles']]
if(role_name not in existing_roles):
logger.info(f"Role {role_name} does not exist.")
return True
logger.info(f"Processing deleting IAM role : {role_name}")
try:
detach_response = iam_client.detach_role_policy(RoleName=role_name, PolicyArn=config.get('IAM_ROLE','POLICY_ARN'))
logger.debug(f"Response for policy detach from IAM role : {detach_response}")
logger.info(f"Detach policy response code : {detach_response['ResponseMetadata']['HTTPStatusCode']}")
delete_response = iam_client.delete_role(RoleName=role_name)
logger.debug(f"Response for deleting IAM role : {delete_response}")
logger.info(f"Delete role response code : {delete_response['ResponseMetadata']['HTTPStatusCode']}")
except Exception as e:
logger.error(f"Exception occured while deleting role : {e}")
return False
return True if( (detach_response['ResponseMetadata']['HTTPStatusCode'] == 200) and (delete_response['ResponseMetadata']['HTTPStatusCode'] == 200) ) else False
def create_cluster(redshift_client, iam_role_arn, vpc_security_group_id):
"""
Create a Redshift cluster using the IAM role and security group created.
:param redshift_client: a redshift client instance
:param iam_role_arn: IAM role arn to give permission to cluster to communicate with other AWS service
:param vpc_security_group_id: vpc group for network setting for cluster
:return: True if cluster created successfully.
"""
# Cluster Hardware config
cluster_type = config.get('DWH','DWH_CLUSTER_TYPE')
node_type = config.get('DWH', 'DWH_NODE_TYPE')
num_nodes = int(config.get('DWH', 'DWH_NUM_NODES'))
# Cluster identifiers and credentials
cluster_identifier = config.get('DWH','DWH_CLUSTER_IDENTIFIER')
db_name = config.get('DWH', 'DWH_DB')
database_port=int(config.get('DWH','DWH_PORT'))
master_username = config.get('DWH', 'DWH_DB_USER')
master_user_password = config.get('DWH', 'DWH_DB_PASSWORD')
# Cluster adding IAM role
iam_role = None
# Security settings
security_group = config.get('SECURITY_GROUP', 'NAME')
# Documentation - https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/redshift.html?highlight=create_cluster#Redshift.Client.create_cluster
try:
response = redshift_client.create_cluster(
DBName=db_name,
ClusterIdentifier=cluster_identifier,
ClusterType=cluster_type,
NodeType=node_type,
NumberOfNodes=num_nodes,
MasterUsername=master_username,
MasterUserPassword=master_user_password,
VpcSecurityGroupIds=vpc_security_group_id,
IamRoles = [iam_role_arn]
)
logger.debug(f"Cluster creation response : {response}")
logger.info(f"Cluster creation response code : {response['ResponseMetadata']['HTTPStatusCode']} ")
except Exception as e:
logger.error(f"Exception occured while creating cluster : {e}")
return False
return (response['ResponseMetadata']['HTTPStatusCode'] == 200)
def get_cluster_status(redshift_client, cluster_identifier):
response = redshift_client.describe_clusters(ClusterIdentifier = cluster_identifier)
cluster_status = response['Clusters'][0]['ClusterStatus']
logger.info(f"Cluster status : {cluster_status.upper()}")
return True if(cluster_status.upper() in ('AVAILABLE','ACTIVE', 'INCOMPATIBLE_NETWORK', 'INCOMPATIBLE_HSM', 'INCOMPATIBLE_RESTORE', 'INSUFFICIENT_CAPACITY', 'HARDWARE_FAILURE')) else False
def delete_cluster(redshift_client):
"""
Deleting the redshift cluster
:param redshift_client: a redshift client instance
:return: True if cluster deleted successfully.
"""
cluster_identifier = config.get('DWH', 'DWH_CLUSTER_IDENTIFIER')
if(len(redshift_client.describe_clusters()['Clusters']) == 0):
logger.info(f"Cluster {cluster_identifier} does not exist.")
return True
try:
while(not get_cluster_status(redshift_client, cluster_identifier=cluster_identifier)):
logger.info("Can't delete cluster. Waiting for cluster to become ACTIVE")
time.sleep(10)
response = \
redshift_client.delete_cluster(ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=True)
logger.debug(f"Cluster deleted with response : {response}")
logger.info(f"Cluster deleted response code : {response['ResponseMetadata']['HTTPStatusCode']}")
except Exception as e:
logger.error(f"Exception occured while deleting cluster : {e}")
return False
return response['ResponseMetadata']['HTTPStatusCode']
def get_group(ec2_client, group_name):
groups = \
ec2_client.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': [group_name]}])[
'SecurityGroups']
return None if(len(groups) == 0) else groups[0]
def create_ec2_security_group(ec2_client):
if(get_group(ec2_client, config.get('SECURITY_GROUP','NAME')) is not None):
logger.info("Group already exists!!")
return True
# Fetch VPC ID
vpc_id = ec2_client.describe_security_groups()['SecurityGroups'][0]['VpcId']
response = ec2_client.create_security_group(
Description=config.get('SECURITY_GROUP','DESCRIPTION'),
GroupName=config.get('SECURITY_GROUP','NAME'),
VpcId=vpc_id,
DryRun=False # Checks whether you have the required permissions for the action, without actually making the request, and provides an error response
)
logger.debug(f"Security group creation response : {response}")
logger.info(f"Group created!! Response code {response['ResponseMetadata']['HTTPStatusCode']}")
logger.info("Authorizing security group ingress")
ec2_client.authorize_security_group_ingress(
GroupId=response['GroupId'],
GroupName=config.get('SECURITY_GROUP','NAME'),
FromPort=int(config.get('INBOUND_RULE','PORT_RANGE')),
ToPort=int(config.get('INBOUND_RULE', 'PORT_RANGE')),
CidrIp=config.get('INBOUND_RULE','CIDRIP'),
IpProtocol=config.get('INBOUND_RULE','PROTOCOL'),
DryRun=False
)
return (response['ResponseMetadata']['HTTPStatusCode'] == 200)
def delete_ec2_security_group(ec2_client):
"""
Delete a security group
:param ec2_client: ec2 client instance
:return: True if security group deleted successfully
"""
group_name = config.get('SECURITY_GROUP','NAME')
group = get_group(ec2_client, group_name)
if(group is None):
logger.info(f"Group {group_name} does not exist")
return True
try:
response = ec2_client.delete_security_group(
GroupId=group['GroupId'],
GroupName=group_name,
DryRun=False
)
logger.debug(f"Deleting security group response : {response}")
logger.info(f"Delete response {response['ResponseMetadata']['HTTPStatusCode']}")
except Exception as e:
logger.error(f"Error occured while deleting group : {e}")
return False
return (response['ResponseMetadata']['HTTPStatusCode'] == 200)
def boolean_parser(val):
if val.upper() not in ['FALSE', 'TRUE']:
logging.error(f"Invalid arguemnt : {val}. Must be TRUE or FALSE")
raise ValueError('Not a valid boolean string')
return val.upper() == 'TRUE'
if __name__ == "__main__":
# Parsing arguments
parser = argparse.ArgumentParser(description="A Redshift cluster IaC (Infrastructure as Code). It creates IAM role for the Redshift, creates security group and sets up ingress parameters."
" Finally spin-up a redshift cluster.")
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument("-c", "--create", type=boolean_parser, metavar='', required=True,
help="True or False. Create IAM roles, security group and redshift cluster if ie does not exist.")
required.add_argument("-d", "--delete", type=boolean_parser, metavar='', required=True,
help="True or False. Delete the roles, securitygroup and cluster. WARNING: Deletes the Redshift cluster, IAM role and security group. ")
optional.add_argument("-v", "--verbosity", type=boolean_parser, metavar='', required=False, default=True,
help="Increase output verbosity. Default set to DEBUG.")
args = parser.parse_args()
logger.info(f"ARGS : {args}")
if(not args.verbosity):
logger.setLevel(logging.INFO)
logger.info("LOGGING LEVEL SET TO INFO.")
# print(boto3._get_default_session().get_available_services() ) # Getting aws services list
# Creating low-level service clients
ec2 = boto3.client(service_name = 'ec2', region_name = 'us-east-1', aws_access_key_id=config.get('AWS', 'Key'), aws_secret_access_key=config.get('AWS', 'SECRET'))
s3 = boto3.client(service_name = 's3', region_name = 'us-east-1', aws_access_key_id=config.get('AWS', 'Key'), aws_secret_access_key=config.get('AWS', 'SECRET'))
iam = boto3.client(service_name = 'iam', region_name = 'us-east-1', aws_access_key_id=config.get('AWS', 'Key'), aws_secret_access_key=config.get('AWS', 'SECRET'))
redshift = boto3.client(service_name = 'redshift', region_name = 'us-east-1', aws_access_key_id=config.get('AWS', 'Key'), aws_secret_access_key=config.get('AWS', 'SECRET'))
logger.info("Clients setup for all services.")
# Setting up IAM Role, security group and cluster
if(args.create):
if(create_IAM_role(iam)):
logger.info("IAM role created. Creating security group....")
if(create_ec2_security_group(ec2)):
logger.info("Security group created. Spinning redshift cluster....")
role_arn = iam.get_role(RoleName = config.get('IAM_ROLE', 'NAME'))['Role']['Arn']
vpc_security_group_id = get_group(ec2, config.get('SECURITY_GROUP', 'NAME'))['GroupId']
create_cluster(redshift, role_arn, [vpc_security_group_id])
else:
logger.error("Failed to create security group")
else:
logger.error("Failed to create IAM role")
else:
logger.info("Skipping Creation.")
# cleanup
if(args.delete):
delete_cluster(redshift)
delete_ec2_security_group(ec2)
delete_IAM_role(iam)
| 42.537267 | 192 | 0.657868 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import psycopg2
import configparser
# Loading cluster configurations from cluster.config
config = configparser.ConfigParser()
config.read_file(open('cluster.config'))
def test_connection(host):
dbname = config.get('DWH','DWH_DB')
port = config.get('DWH','DWH_PORT')
user = config.get('DWH','DWH_DB_USER')
password = config.get('DWH','DWH_DB_PASSWORD')
con=psycopg2.connect(dbname= dbname, host=host, port= port, user= user, password= password)
cur = con.cursor()
cur.execute("CREATE TABLE test (id int);")
cur.execute("INSERT INTO test VALUES (10);")
print(cur.execute('SELECT * FROM test'))
con.close() | 28.590909 | 95 | 0.683077 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | 0 | 0 | 0 |
|
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | import json
from concurrent.futures import ThreadPoolExecutor
from retry import RetryOnException as retry
from proxypool import (
ProxyPoolValidator,
ProxyPoolScraper,
RedisProxyPoolClient
)
from airflow.models.baseoperator import BaseOperator
from airflow.utils.decorators import apply_defaults
class ProxyPoolOperator(BaseOperator):
@apply_defaults
def __init__(
self,
proxy_webpage,
number_of_proxies,
testing_url,
max_workers,
redis_config,
redis_key,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.proxy_webpage = proxy_webpage
self.testing_url = testing_url
self.number_of_proxies = number_of_proxies
self.max_workers = max_workers
self.redis_config = redis_config
self.redis_key = redis_key
@retry(5)
def execute(self, context):
proxy_scraper = ProxyPoolScraper(self.proxy_webpage)
proxy_validator = ProxyPoolValidator(self.testing_url)
proxy_stream = proxy_scraper.get_proxy_stream(self.number_of_proxies)
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
results = executor.map(
proxy_validator.validate_proxy, proxy_stream
)
valid_proxies = filter(lambda x: x.is_valid is True, results)
sorted_valid_proxies = sorted(
valid_proxies, key=lambda x: x.health, reverse=True
)
with RedisProxyPoolClient(self.redis_key, self.redis_config) as client:
client.override_existing_proxies(
[
json.dumps(record.proxy)
for record in sorted_valid_proxies[:5]
]
)
| 31.160714 | 79 | 0.611111 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from log import log
from retry import RetryOnException as retry
from proxypool import RedisProxyPoolClient
from rss_news import (
NewsProducer,
NewsExporter,
NewsValidator
)
from airflow.models.baseoperator import BaseOperator
from airflow.utils.decorators import apply_defaults
@log
class RSSNewsOperator(BaseOperator):
@apply_defaults
def __init__(
self,
validator_config,
rss_feed,
language,
redis_config,
redis_key,
bootstrap_servers,
topic,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.validator_config = validator_config
self.rss_feed = rss_feed
self.language = language
self.redis_config = redis_config
self.redis_key = redis_key
self.bootstrap_servers = bootstrap_servers
self.topic = topic
@retry(5)
def execute(self, context):
validator = NewsValidator(self.validator_config)
producer = NewsProducer(self.rss_feed, self.language)
redis = RedisProxyPoolClient(self.redis_key, self.redis_config)
with NewsExporter(self.bootstrap_servers) as exporter:
proxy = redis.get_proxy()
self.logger.info(proxy)
try:
for news in producer.get_news_stream(proxy):
self.logger.info(news)
validator.validate_news(news)
exporter.export_news_to_broker(
self.topic,
news.as_dict()
)
except Exception as err:
redis.lpop_proxy()
self.logger.error(f"Exception: {err}")
raise err
| 29.293103 | 71 | 0.573462 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python |
class Config:
PROXY_WEBPAGE = "https://free-proxy-list.net/"
TESTING_URL = "https://google.com"
REDIS_CONFIG = {
"host": "redis",
"port": "6379",
"db": 0
}
REDIS_KEY = "proxies"
MAX_WORKERS = 50
NUMBER_OF_PROXIES = 50
RSS_FEEDS = {
"en": [
"https://www.goal.com/feeds/en/news",
"https://www.eyefootball.com/football_news.xml",
"https://www.101greatgoals.com/feed/",
"https://sportslens.com/feed/",
"https://deadspin.com/rss"
],
"pl": [
"https://weszlo.com/feed/",
"https://sportowefakty.wp.pl/rss.xml",
"https://futbolnews.pl/feed",
"https://igol.pl/feed/"
],
"es": [
"https://as.com/rss/tags/ultimas_noticias.xml",
"https://e00-marca.uecdn.es/rss/futbol/mas-futbol.xml",
"https://www.futbolred.com/rss-news/liga-de-espana.xml",
"https://www.futbolya.com/rss/noticias.xml"
],
"de": [
"https://www.spox.com/pub/rss/sport-media.xml",
"https://www.dfb.de/news/rss/feed/"
]
}
BOOTSTRAP_SERVERS = ["kafka:9092"]
TOPIC = "rss_news"
VALIDATOR_CONFIG = {
"description_length": 10,
"languages": [
"en", "pl", "es", "de"
]
}
| 23.280702 | 68 | 0.484454 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from urllib.parse import urlparse
from datetime import datetime
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from dags_config import Config as config
from custom_operators import (
ProxyPoolOperator,
RSSNewsOperator
)
def extract_feed_name(url):
parsed_url = urlparse(url)
return parsed_url.netloc.replace("www.", "")
def dummy_callable(action):
return f"{datetime.now()}: {action} scrapping RSS feeds!"
def export_events(config, rss_feed, language, dag):
feed_name = extract_feed_name(rss_feed)
return RSSNewsOperator(
task_id=f"exporting_{feed_name}_news_to_broker",
validator_config=config.VALIDATOR_CONFIG,
rss_feed=rss_feed,
language=language,
redis_config=config.REDIS_CONFIG,
redis_key=config.REDIS_KEY,
bootstrap_servers=config.BOOTSTRAP_SERVERS,
topic=config.TOPIC,
dag=dag
)
def create_dag(dag_id, interval, config, language, rss_feeds):
with DAG(
dag_id=dag_id,
description=f"Scrape latest ({language}) sport RSS feeds",
schedule_interval=interval,
start_date=datetime(2020, 1, 1),
catchup=False,
is_paused_upon_creation=False
) as dag:
start = PythonOperator(
task_id="starting_pipeline",
python_callable=dummy_callable,
op_kwargs={"action": "starting"},
dag=dag
)
proxypool = ProxyPoolOperator(
task_id="updating_proxypoool",
proxy_webpage=config.PROXY_WEBPAGE,
number_of_proxies=config.NUMBER_OF_PROXIES,
testing_url=config.TESTING_URL,
max_workers=config.NUMBER_OF_PROXIES,
redis_config=config.REDIS_CONFIG,
redis_key=config.REDIS_KEY,
dag=dag
)
events = [
export_events(config, rss_feed, language, dag)
for rss_feed in rss_feeds
]
finish = PythonOperator(
task_id="finishing_pipeline",
python_callable=dummy_callable,
op_kwargs={"action": "finishing"},
dag=dag
)
start >> proxypool >> events >> finish
return dag
for n, item in enumerate(config.RSS_FEEDS.items()):
language, rss_feeds = item
dag_id = f"rss_news_{language}"
interval = f"{n*4}-59/10 * * * *"
globals()[dag_id] = create_dag(
dag_id,
interval,
config,
language,
rss_feeds
)
| 25.861702 | 66 | 0.604596 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | import logging
class Logger:
__register = False
def __init__(self):
if not self.__register:
self._init_default_register()
def _init_default_register(self):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
Logger.__register = True
logging.info("Logger initialized")
def get_logger(self, filename):
return logging.getLogger(filename)
def log(cls):
cls.logger = Logger().get_logger(cls.__name__)
return cls
| 20.083333 | 50 | 0.613861 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python |
headers_list = [
{
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Referer": "https://www.google.com/",
"DNT": "1",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1"
},
{
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Referer": "https://www.google.com/",
"DNT": "1",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1"
},
{
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8",
"Dnt": "1",
"Referer": "https://www.google.com/",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
"X-Amzn-Trace-Id": "Root=1-5ee7bae0-82260c065baf5ad7f0b3a3e3"
},
{
"User-Agent": 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0',
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "pl-PL,pl;q=0.9,en-US;q=0.8,en;q=0.7",
"Referer": "https://www.reddit.com/",
"DNT": "1",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1"
}
] | 42.609756 | 146 | 0.564633 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python |
import re
import random
from contextlib import closing
from requests import get
from log import log
from parser.random_headers_list import headers_list
@log
class WebParser:
def __init__(self, website_url, rotate_header=True):
self.url = website_url
self._rotate_header = rotate_header
def get_random_header(self):
if self._rotate_header:
return random.choice(headers_list)
def get_content(self, timeout=30, proxies=None):
kwargs = {
"timeout": timeout,
"proxies": proxies,
"headers": self.get_random_header()
}
try:
with closing(get(self.url, **kwargs)) as response:
if self.is_good_response(response):
return (
response.content
)
except Exception as err:
self.logger.info(f"Error occurred: {err}")
@staticmethod
def is_good_response(response):
content_type = response.headers['Content-Type'].lower()
return (
response.status_code == 200
and content_type is not None
)
def __str__(self):
domain = re.sub("(http[s]?://|www.)", "", self.url)
return f"WebParser of {domain.upper()}"
| 26.934783 | 63 | 0.566199 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from bs4 import BeautifulSoup
from dataclasses import dataclass, field
from parser import WebParser
from log import log
@dataclass
class ProxyRecord:
ip_address: str
port: int
country_code: str
country: str
anonymity: str
google: str
https: str
last_checked: str
proxy: dict = field(init=False, default=None)
def __post_init__(self):
self.proxy = self.format_proxy()
def format_proxy(self):
protocol = "https" if self.https == "yes" else "http"
url = f"{protocol}://{self.ip_address}:{self.port}"
return {"http": url, "https": url}
@log
class ProxyPoolScraper:
def __init__(self, url, bs_parser="lxml"):
self.parser = WebParser(url)
self.bs_parser = bs_parser
def get_proxy_stream(self, limit):
raw_records = self.extract_table_raw_records()
clean_records = list(
map(self._clear_up_record, raw_records)
)
for record in clean_records[:limit]:
self.logger.info(f"Proxy record: {record}")
if record:
yield ProxyRecord(*record)
def extract_table_raw_records(self):
content = self.parser.get_content()
soup_object = BeautifulSoup(content, self.bs_parser)
return (
soup_object
.find(id="list")
.find_all("tr")
)
def _clear_up_record(self, raw_record):
return [
val.text for val
in raw_record.find_all("td")
]
| 25.103448 | 61 | 0.58427 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | import time
from dataclasses import dataclass
from parser import WebParser
from log import log
@dataclass(frozen=True)
class ProxyStatus:
proxy: str
health: float
is_valid: bool
@log
class ProxyPoolValidator:
def __init__(self, url, timeout=10, checks=3, sleep_interval=0.1):
self.timeout = timeout
self.checks = checks
self.sleep_interval = sleep_interval
self.parser = WebParser(url, rotate_header=True)
def validate_proxy(self, proxy_record):
consecutive_checks = []
for _ in range(self.checks):
content = self.parser.get_content(
timeout=self.timeout,
proxies=proxy_record.proxy
)
time.sleep(self.sleep_interval)
consecutive_checks.append(int(content is not None))
health = sum(consecutive_checks) / self.checks
proxy_status = ProxyStatus(
proxy=proxy_record.proxy,
health=health,
is_valid=health > 0.66
)
self.logger.info(f"Proxy status: {proxy_status}")
return proxy_status
| 26.775 | 70 | 0.616216 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | import json
import redis
from log import log
@log
class RedisProxyPoolClient:
def __init__(self, key, redis_config):
self.key = key
self.redis = redis.StrictRedis(
**redis_config
)
def __enter__(self):
return self
def override_existing_proxies(self, proxies):
self.logger.info(f"Overriding existing proxies {proxies}")
self.redis.delete(self.key)
self.redis.lpush(self.key, *proxies)
def list_existing_proxies(self):
response = self.redis.lrange(self.key, 0, -1)
return [
json.loads(proxy) for proxy in response
]
def get_proxy(self):
existing_proxies = self.list_existing_proxies()
if len(existing_proxies) > 0:
return existing_proxies[0]
def lpop_proxy(self):
self.logger.info("Deleting proxy!")
self.redis.lpop(self.key)
def __exit__(self, type, value, traceback):
client_id = self.redis.client_id()
self.redis.client_kill_filter(
_id=client_id
)
| 24.5 | 66 | 0.594393 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | import functools
from log import log
@log
class RetryOnException:
def __init__(self, retries):
self._retries = retries
def __call__(self, function):
functools.update_wrapper(self, function)
def wrapper(*args, **kwargs):
self.logger.info(f"Retries: {self._retries}")
while self._retries != 0:
try:
return function(*args, **kwargs)
self._retries = 0
except Exception as err:
self.logger.info(f"Error occured: {err}")
self._retries -= 1
self._raise_on_condition(self._retries, err)
return wrapper
def _raise_on_condition(self, retries, exception):
if retries == 0:
raise exception
else:
self.logger.info(f"Retries: {retries}")
| 28 | 64 | 0.527043 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | import json
import time
from kafka import KafkaProducer
class NewsExporter:
def __init__(self, bootstrap_servers):
self._producer = self._connect_producer(
bootstrap_servers
)
def _connect_producer(self, bootstrap_servers):
def encode_news(value):
return json.dumps(value).encode("utf-8")
producer = KafkaProducer(
bootstrap_servers=bootstrap_servers,
value_serializer=lambda x: encode_news(x)
)
return producer
def __enter__(self):
return self
def export_news_to_broker(self, topic, record, sleep_time=0.01):
response = self._producer.send(
topic,
value=record
)
time.sleep(sleep_time)
return response.get(
timeout=60
)
def __exit__(self, type, value, traceback):
self._producer.close()
| 23.432432 | 68 | 0.581395 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | import re
from dataclasses import dataclass
import atoma
from parser import WebParser
@dataclass(frozen=True)
class News:
_id: str
title: str
link: str
published: str
description: str
author: str
language: str
def as_dict(self):
return self.__dict__
class NewsProducer:
def __init__(self, rss_feed, language):
self.parser = WebParser(rss_feed, rotate_header=True)
self.formatter = NewsFormatter(language)
def _extract_news_feed_items(self, proxies):
content = self.parser.get_content(proxies=proxies)
news_feed = atoma.parse_rss_bytes(content)
return news_feed.items
def get_news_stream(self, proxies):
news_feed_items = self._extract_news_feed_items(proxies)
for entry in news_feed_items:
formatted_entry = self.formatter.format_entry(entry)
yield formatted_entry
class NewsFormatter:
def __init__(self, language):
self.language = language
self.date_format = "%Y-%m-%d %H:%M:%S"
self.id_regex = "[^0-9a-zA-Z_-]+"
self.default_author = "Unknown"
def format_entry(self, entry):
description = self.format_description(entry)
return News(
self.construct_id(entry.title),
entry.title,
entry.link,
self.unify_date(entry.pub_date),
description,
self.assign_author(entry.author),
self.language
)
def construct_id(self, title):
return re.sub(self.id_regex, "", title).lower()
def unify_date(self, date):
return date.strftime(self.date_format)
def assign_author(self, author):
return self.default_author if not author else author
def format_description(self, entry):
tmp_description = re.sub("<.*?>", "", entry.description[:1000])
index = tmp_description.rfind(".")
short_description = tmp_description[:index+1]
return (
short_description if short_description
else entry.title
)
| 26.945946 | 71 | 0.610547 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python |
class NewsValidator:
def __init__(self, config):
self._config = config
def validate_news(self, news):
news = news.as_dict()
assert self.check_languages(news), "Wrong language!"
assert self.check_null_values(news), "Null values!"
assert self.check_description_length(news), "Short description!"
def check_null_values(self, news):
news_values = list(news.values())
return all(news_values)
def check_description_length(self, news):
description_length = self._config.get("description_length")
return len(news.get("description")) >= description_length
def check_languages(self, news):
languages = self._config.get("languages")
lang = news.get("language")
return any(
filter(lambda x: x == lang, languages)
)
| 30.333333 | 72 | 0.620118 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from pkg_resources import resource_string
import pytest
import fakeredis
from parser import WebParser
from requests import Response
from rss_news import NewsProducer, NewsFormatter, NewsValidator, News
from proxypool import ProxyPoolScraper, ProxyRecord
from retry import RetryOnException as retry
TEST_URL = "https://test.com"
@pytest.fixture
def web_parser():
yield WebParser(TEST_URL)
@pytest.fixture
def scraper():
yield ProxyPoolScraper(TEST_URL)
@pytest.fixture
def proxies():
yield [
{
"http": "http://127.0.0.1:8080",
"https": "http://127.0.0.1:8080"
}
]
@pytest.fixture
def proxy_record():
yield ProxyRecord(
"127.0.0.1",
8080,
"PL",
"POLAND",
"gold",
"no",
"no",
"30 minutes ago"
)
@pytest.fixture
def producer():
yield NewsProducer(TEST_URL, "en")
@pytest.fixture
def formatter():
yield NewsFormatter("en")
@pytest.fixture
def validator():
yield NewsValidator(
{
"description_length": 10,
"languages": ["en"]
}
)
@pytest.fixture
def news_record():
yield News(
"test_id", "test_title", "test_link",
"test_pub", "test_desc", "test_author", "en"
)
@pytest.fixture
def redis_mock():
yield fakeredis.FakeStrictRedis()
@pytest.fixture
def redis_config():
yield {
"host": "redis",
"port": "6379",
"db": 0
}
@pytest.fixture
def response():
def helper(status_code):
response = Response()
response.status_code = status_code
response.headers['Content-Type'] = "text/html"
return response
yield helper
@pytest.fixture
def raw_content():
def helper(filename):
return resource_string(
"tests",
f"dataresources/{filename}"
)
yield helper
@pytest.fixture
def add_function():
@retry(5)
def func(a, b):
return a + b
yield func
| 15.708333 | 69 | 0.590818 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from proxypool import ProxyRecord
from unittest.mock import patch
from ..fixtures import web_parser, scraper, raw_content
@patch("parser.web_parser.WebParser.get_content")
def test_get_proxy_stream(get_content, raw_content, web_parser, scraper):
get_content.return_value = raw_content("proxy_list_file.txt")
scraper.parser = web_parser
stream = scraper.get_proxy_stream(5)
result = list(stream)[-1]
assert isinstance(result, ProxyRecord)
| 26.352941 | 73 | 0.741379 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from unittest.mock import patch
from proxypool import ProxyPoolValidator
from ..fixtures import web_parser, raw_content, proxy_record
@patch("parser.web_parser.WebParser.get_content")
def test_validate_proxy(get_content, raw_content, web_parser, proxy_record):
expected = True
get_content.return_value = raw_content("proxy_list_file.txt")
validator = ProxyPoolValidator("https://google.com", sleep_interval=0)
validator.parser = web_parser
proxy_record = validator.validate_proxy(proxy_record)
result = proxy_record.is_valid
assert result == expected
@patch("parser.web_parser.WebParser.get_content")
def test_invalid_proxy(get_content, raw_content, web_parser, proxy_record):
expected = False
get_content.return_value = None
validator = ProxyPoolValidator("https://google.com", sleep_interval=0)
validator.parser = web_parser
proxy_record = validator.validate_proxy(proxy_record)
result = proxy_record.is_valid
assert result == expected
@patch("parser.web_parser.WebParser.get_content")
def test_unstable_valid_proxy(get_content, raw_content, web_parser, proxy_record):
expected = True
valid_content = raw_content("proxy_list_file.txt")
get_content.side_effect = [valid_content, valid_content, None]
validator = ProxyPoolValidator("https://google.com", sleep_interval=0)
validator.parser = web_parser
proxy_record = validator.validate_proxy(proxy_record)
result = proxy_record.is_valid
assert result == expected
assert round(proxy_record.health, 2) == 0.67
@patch("parser.web_parser.WebParser.get_content")
def test_unstable_invalid_proxy(get_content, raw_content, web_parser, proxy_record):
expected = False
valid_content = raw_content("proxy_list_file.txt")
get_content.side_effect = [None, None, valid_content]
validator = ProxyPoolValidator("https://google.com", sleep_interval=0)
validator.parser = web_parser
proxy_record = validator.validate_proxy(proxy_record)
result = proxy_record.is_valid
assert result == expected
assert round(proxy_record.health, 2) == 0.33
| 33.177419 | 84 | 0.725685 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | import json
from unittest.mock import patch
from proxypool import RedisProxyPoolClient
from ..fixtures import redis_config, redis_mock, proxies
@patch("proxypool.redis_proxypool_client.redis.StrictRedis")
def test_override_existing_proxies(redis, redis_config, redis_mock, proxies):
new_proxies = [{"http": "http://127.0.0.1:8081", "https": "http://127.0.0.1:8081"}]
key = "test"
redis_mock.lpush(key, *[json.dumps(_) for _ in proxies])
redis_client = RedisProxyPoolClient(key, redis_config)
redis_client.redis = redis_mock
redis_client.override_existing_proxies(
[json.dumps(_) for _ in new_proxies]
)
current_proxies = redis_mock.lrange(key, 0, -1)
result = [json.loads(_) for _ in current_proxies]
assert result != proxies
@patch("proxypool.redis_proxypool_client.redis.StrictRedis")
def test_list_existing_proxies(redis, redis_config, redis_mock, proxies):
key = "test"
redis_mock.lpush(key, *[json.dumps(_) for _ in proxies])
redis_client = RedisProxyPoolClient(key, redis_config)
redis_client.redis = redis_mock
result = redis_client.list_existing_proxies()
assert result == proxies
@patch("proxypool.redis_proxypool_client.redis.StrictRedis")
def test_lpop_proxy(redis, redis_config, redis_mock, proxies):
expected = 1
key = "test"
redis_mock.lpush(key, *[json.dumps(_) for _ in proxies])
redis_client = RedisProxyPoolClient(key, redis_config)
redis_client.redis = redis_mock
redis_client.lpop_proxy()
assert len(proxies) == expected
@patch("proxypool.redis_proxypool_client.redis.StrictRedis")
def test_get_proxy(redis, redis_config, redis_mock, proxies):
expected = proxies[0]
key = "test"
redis_mock.lpush(key, *[json.dumps(_) for _ in proxies])
redis_client = RedisProxyPoolClient(key, redis_config)
redis_client.redis = redis_mock
result = redis_client.get_proxy()
assert result == expected
@patch("proxypool.redis_proxypool_client.redis.StrictRedis")
def test_redis_client_context_manager(redis, redis_config):
key = "test"
with RedisProxyPoolClient(key, redis_config) as redis_client:
pass
| 28.486486 | 87 | 0.695552 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | import pytest
from ..fixtures import add_function
def test_retry_on_exception_valid(add_function):
expected = 2
result = add_function(1, 1)
assert result == expected
def test_retry_on_exception_wrong(add_function):
with pytest.raises(TypeError):
add_function("Test", 0.0001)
| 16.111111 | 48 | 0.690554 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from unittest.mock import patch, Mock
import pytest
from rss_news import NewsExporter
@patch("rss_news.rss_news_exporter.KafkaProducer")
def test_connect_producer(mock_producer):
exporter = NewsExporter(["test_broker:9092"])
assert exporter._producer is not None
@patch("rss_news.NewsExporter")
def test_export_news_to_broker(exporter):
topic = "test_topic"
news = {
"_id": "test_id",
"title": "test_title",
"link": "www.test.com",
"date": "2020-01-01 00:00:00",
"description": "Test",
"author": "Test",
"language": "pl"
}
exporter.export_news_to_broker(topic, news)
exporter.export_news_to_broker.assert_called_once_with(
topic, news
)
@patch("rss_news.rss_news_exporter.KafkaProducer")
def test_export_news_to_broker_context_manager(mock_producer):
topic = "test_topic"
news = {
"_id": "test_id",
"title": "test_title",
"link": "www.test.com",
"date": "2020-01-01 00:00:00",
"description": "Test",
"author": "Test",
"language": "pl"
}
with NewsExporter(["test_broker:9092"]) as exporter:
exporter.export_news_to_broker(topic, news)
exporter._producer.send.assert_called_once_with(
topic, value=news
)
| 24.843137 | 62 | 0.601367 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | import datetime
from unittest.mock import patch
import pytest
from rss_news import News
from ..fixtures import web_parser, raw_content, producer, proxies, formatter
@patch("parser.web_parser.WebParser.get_content")
def test_get_news_stream(get_content, web_parser, raw_content, producer, proxies):
get_content.return_value = raw_content("rss_news_file.txt")
producer.parser = web_parser
stream = producer.get_news_stream(proxies)
result = list(stream)[-1]
assert isinstance(result, News)
@pytest.mark.parametrize(
"title, expected_id",
[
("example////1 example", "example1example"),
("example%%%%%%%2 example", "example2example"),
("*******example-3_ xx example", "example-3_xxexample")]
)
def test_construct_id(formatter, title, expected_id):
result = formatter.construct_id(title)
assert result == expected_id
def test_unify_date(formatter):
expected = "2020-05-17 00:00:00"
date = datetime.datetime(2020, 5, 17)
result = formatter.unify_date(date)
assert result == expected
def test_format_description(formatter):
expected = """Lorem ipsum dolor sit amet, consectetur adipiscing elit,
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua."""
class Entry:
description = """Lorem ipsum dolor sit amet, consectetur adipiscing elit,
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation"""
title = "Lorem ipsum"
class EmptyEntry:
description = ""
title = "Lorem ipsum"
result = formatter.format_description(Entry)
result_empty = formatter.format_description(EmptyEntry)
assert result == expected
assert result_empty == EmptyEntry.title
@pytest.mark.parametrize(
"author, expected",[(None, "Unknown"), ("Test", "Test")]
)
def test_assing_author(formatter, author, expected):
result = formatter.assign_author(author)
assert result == expected
| 26.657534 | 82 | 0.68335 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | import pytest
from rss_news import News
from ..fixtures import validator, news_record
def test_check_null_values(validator, news_record):
expected = True
news = news_record.as_dict()
result = validator.check_null_values(news)
assert result is expected
def test_check_null_values_with_nones(validator, news_record):
expected = False
news = news_record.as_dict()
news["id"] = None
result = validator.check_null_values(news)
assert result is expected
def test_check_languages(validator, news_record):
expected = True
news = news_record.as_dict()
result = validator.check_languages(news)
assert result is expected
def test_validate_news_raises_error(validator, news_record):
with pytest.raises(AssertionError):
validator.validate_news(news_record)
| 19.095238 | 62 | 0.69395 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from unittest.mock import patch
import pytest
from pytest import fixture
from requests.exceptions import ConnectionError
from parser import WebParser
from ..fixtures import web_parser, response
@patch("parser.web_parser.get")
def test_get_content(mock_get, web_parser):
expected = "TEST CONTENT"
mock_get.return_value.content = "TEST CONTENT"
mock_get.return_value.status_code = 200
mock_get.return_value.headers['Content-Type'] = "text/html"
result = web_parser.get_content()
assert result == expected
@patch("parser.web_parser.get")
def test_get_content_silence_exception(mock_get, web_parser):
expected = None
mock_get.side_effect = ConnectionError()
result = web_parser.get_content()
assert result == expected
@pytest.mark.parametrize(
"status_code, expected",
[(200, True), (403, False), (500, False)]
)
def test_is_good_response(web_parser, response, status_code, expected):
http_response = response(status_code)
result = web_parser.is_good_response(http_response)
assert result == expected
def test_get_random_header(web_parser):
expected = "User-Agent"
random_header = web_parser.get_random_header()
result = list(random_header.keys())
assert expected in result
@pytest.mark.parametrize(
"url, expected",
[
("https://test.com", "WebParser of TEST.COM"),
("https://www.test.com", "WebParser of TEST.COM"),
("www.test.com", "WebParser of TEST.COM")
]
)
def test__str__representation(url, expected):
web_parser = WebParser(url)
result = str(web_parser)
assert result == expected
| 22.985507 | 71 | 0.674123 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | """
Django settings for core project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('DJANGO_SECRET', 'default_secret_key')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = os.environ['ALLOWED_HOSTS'].split(",")
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'django_elasticsearch_dsl',
'django_elasticsearch_dsl_drf',
'users',
'news',
'search'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'djongo',
'ENFORCE_SCHEMA': True,
'LOGGING': {
'version': 1,
'loggers': {
'djongo': {
'level': 'DEBUG',
'propogate': False,
}
},
},
'NAME': 'rss_news',
'CLIENT': {
'host': os.environ['MONGO_HOST'],
'port': 27017,
'username': os.environ['MONGO_USR'],
'password': os.environ['MONGO_PASSWD'],
'authSource': 'admin'
}
}
}
ELASTICSEARCH_DSL = {
'default': {
'hosts': f"{os.environ['ELASTIC_HOST']}:9200"
},
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10,
'DEFAULT_PARSER_CLASSES': [
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser',
'rest_framework.parsers.JSONParser',
]
}
SWAGGER_SETTINGS = {
'SECURITY_DEFINITIONS': {
'api_key': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
},
'is_authenticated': True,
},
}
LOGIN_URL='user/register/'
AUTH_USER_MODEL = 'users.UserModel' | 24.912088 | 91 | 0.627784 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from django.urls import path
from users.views import UserCreateView, ObtainTokenView
app_name = "user"
urlpatterns = [
path("register/", UserCreateView.as_view(), name="register"),
path("login/", ObtainTokenView.as_view(), name="login")
]
| 21.818182 | 65 | 0.712 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | """
WSGI config for core project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
application = get_wsgi_application()
| 21.705882 | 78 | 0.766234 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 27.409091 | 73 | 0.655449 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from rest_framework import routers
from django.contrib import admin
from rest_framework.authtoken.models import Token
from news.models import News
from news.urls import urlpatterns
from users.models import UserModel
class NewsAdminSite(admin.AdminSite):
def get_urls(self):
urls = super(NewsAdminSite, self).get_urls()
custom_urls = [*urlpatterns]
return custom_urls + urls
admin_site = NewsAdminSite()
admin_site.register(UserModel)
admin_site.register(Token)
admin_site.register(News)
| 24 | 52 | 0.753817 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
| 13.333333 | 33 | 0.741176 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from django.db import models
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager, PermissionsMixin
)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **kwargs):
if not email:
raise ValueError("Must have an email address")
email = self.normalize_email(email)
user = self.model(email=email, **kwargs)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class UserModel(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = "email"
| 27.027027 | 58 | 0.666023 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from django.contrib.auth import authenticate
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from users.models import UserModel
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = UserModel
fields = ("email", "password")
extra_kwargs = {
"password": {
"write_only": True,
"min_length": 5
}
}
def create(self, validated_data):
user = UserModel(
email=validated_data["email"]
)
user.set_password(validated_data["password"])
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
email = serializers.CharField()
password = serializers.CharField(
style={
"input_tye": "password"
},
trim_whitespace=False
)
def validate(self, attrs):
email = attrs.get("email")
password = attrs.get("password")
user = authenticate(
request=self.context.get("request"),
email=email,
password=password
)
if not user:
raise serializers.ValidationError(
"Unable to authenticate with provided credentials",
code="authentication"
)
attrs["user"] = user
return attrs
| 23.614035 | 67 | 0.560628 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from django.test import TestCase
from django.urls.resolvers import URLResolver
from news.admin import NewsAdminSite
class TestAdminSite(TestCase):
def setUp(self):
self.admin_site = NewsAdminSite()
def test_api_urls_in_admin_site(self):
expected = "'api/'"
urls_objects = self.admin_site.get_urls()
urls = list(
filter(lambda x: isinstance(x, URLResolver), urls_objects)
)
result = urls[0].pattern.describe()
self.assertEqual(result, expected)
| 22.347826 | 70 | 0.641791 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
ADMIN_SEARCH_NEWS_URL = reverse("admin:news-list")
ADMIN_SEARCH_NEWS_URL = reverse(
"admin:news-detail", kwargs={"_id":"missing_news"}
)
class PublicAdminNewsApiTests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required_news_list(self):
response = self.client.get(ADMIN_SEARCH_NEWS_URL)
self.assertEqual(
response.status_code, status.HTTP_401_UNAUTHORIZED
)
def test_login_required_news_detail(self):
response = self.client.get(ADMIN_SEARCH_NEWS_URL)
self.assertEqual(
response.status_code, status.HTTP_401_UNAUTHORIZED
)
| 24.515152 | 62 | 0.693222 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from django.test import TestCase
from news.models import News
class NewsModelTests(TestCase):
TEST_NEWS_DATA = {
"_id": "test_id",
"title": "test titile",
"link": "www.testnews.com",
"published": "2020-01-01 00:00:00",
"description": "test description",
"author": "test author",
"language": "en"
}
def test_news_model_create(self):
news = News.objects.create(**self.TEST_NEWS_DATA)
self.assertEqual(
news.link, self.TEST_NEWS_DATA["link"]
)
self.assertEqual(
news.title, self.TEST_NEWS_DATA["title"]
)
def test_news_model_retrive(self):
News.objects.create(**self.TEST_NEWS_DATA)
news = News.objects.get(_id="test_id")
self.assertTrue(news)
| 23.088235 | 57 | 0.564792 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from rest_framework import generics
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from users.serializers import UserSerializer, AuthTokenSerializer
class UserCreateView(generics.CreateAPIView):
serializer_class = UserSerializer
permission_classes = ()
class ObtainTokenView(ObtainAuthToken):
serializer_class = AuthTokenSerializer
permission_classes = ()
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
| 24.15 | 65 | 0.804781 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | import os
from elasticsearch_dsl import analyzer
from django_elasticsearch_dsl import Document, fields, Index
from elasticsearch_dsl import analyzer
from news.models import News
news_index = Index(os.environ["ELASTIC_INDEX"])
news_index.settings(
number_of_shards=1,
number_of_replicas=1
)
html_strip = analyzer(
"html_strip",
tokenizer="standard",
filter=["lowercase", "stop", "snowball"],
char_filter=["html_strip"]
)
@news_index.doc_type
class NewsDocument(Document):
id = fields.TextField(attr="_id")
title = fields.TextField(
analyzer=html_strip,
fields={
"raw": fields.TextField(analyzer="keyword"),
}
)
link = fields.TextField()
published = fields.TextField(
fields={
"raw": fields.TextField(analyzer="keyword"),
},
fielddata=True
)
description = fields.TextField(
analyzer=html_strip,
fields={
"raw": fields.TextField(analyzer="keyword"),
}
)
author = fields.TextField(
fielddata=True
)
language = fields.TextField(
fielddata=True
)
class Django:
model = News
| 20.545455 | 60 | 0.617399 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from elasticsearch.exceptions import NotFoundError
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
SEARCH_NEWS_URL = reverse("search:search-list")
SEARCH_DETAIL_NEWS_URL = reverse(
"search:search-detail", kwargs={"id":"missing_news"}
)
class PublicNewsApiTests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required_news_list(self):
response = self.client.get(SEARCH_NEWS_URL)
self.assertEqual(
response.status_code, status.HTTP_401_UNAUTHORIZED
)
def test_login_required_news_detail(self):
response = self.client.get(SEARCH_DETAIL_NEWS_URL)
self.assertEqual(
response.status_code, status.HTTP_401_UNAUTHORIZED
)
class PrivateNewsApiTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
"[email protected]",
"testpass123"
)
self.client.force_authenticate(
self.user
)
def test_retrieve_news_list(self):
response = self.client.get(SEARCH_NEWS_URL)
self.assertEqual(
response.status_code, status.HTTP_200_OK
)
def test_retrieve_missing_news_detail(self):
with self.assertRaises(NotFoundError):
self.client.head(
SEARCH_DETAIL_NEWS_URL
)
| 24.666667 | 62 | 0.645874 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse("user:register")
GET_TOKEN_URL = reverse("user:login")
class UserApiTests(TestCase):
def setUp(self):
self.client = APIClient()
@staticmethod
def create_user(**kwargs):
return get_user_model().objects.create_user(
**kwargs
)
def test_create_valid_user(self):
attrs = {
"email": "[email protected]",
"password": "test123"
}
response = self.client.post(CREATE_USER_URL, attrs)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**response.data)
self.assertTrue(user.check_password(attrs["password"]))
self.assertNotIn("password", response.data)
def test_password_too_short(self):
attrs = {
"email": "[email protected]",
"password": "123"
}
response = self.client.post(CREATE_USER_URL, attrs)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST
)
def test_user_exists(self):
attrs = {
"email": "[email protected]",
"password": "123"
}
self.create_user(**attrs)
response = self.client.post(CREATE_USER_URL, attrs)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST
)
def test_create_token_for_user(self):
attrs = {
"email": "[email protected]",
"password": "123"
}
self.create_user(**attrs)
response = self.client.post(GET_TOKEN_URL, attrs)
self.assertIn("token", response.data)
self.assertEqual(
response.status_code, status.HTTP_200_OK
)
def test_create_token_invalid_credentials(self):
attrs = {
"email": "[email protected]",
"password": "12345"
}
self.create_user(email="[email protected]", password="wrong")
response = self.client.post(GET_TOKEN_URL, attrs)
self.assertNotIn("token", response.data)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST
)
def test_create_token_no_user(self):
attrs = {
"email": "[email protected]",
"password": "12345"
}
response = self.client.post(GET_TOKEN_URL, attrs)
self.assertNotIn("token", response.data)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST
)
| 28.858696 | 72 | 0.583394 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from django.test import TestCase
from django.contrib.auth import get_user_model
class UserModelTests(TestCase):
email = "[email protected]"
email_upper = "[email protected]"
password = "testpassword"
def test_create_user_check_email(self):
user = get_user_model().objects.create_user(
email=self.email,
password=self.password
)
self.assertEqual(user.email, self.email)
def test_create_user_check_password(self):
user = get_user_model().objects.create_user(
email=self.email,
password=self.password
)
self.assertTrue(user.check_password(self.password))
def test_user_email_normalized(self):
user = get_user_model().objects.create_user(
email=self.email_upper,
password=self.password
)
self.assertEqual(user.email, self.email_upper.lower())
def test_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(
email=None,
password=self.password
)
def test_create_superuser(self):
user = get_user_model().objects.create_superuser(
email=self.email,
password=self.password
)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
| 28.659574 | 62 | 0.612347 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Just Enough Python for Databricks SQL
# MAGIC
# MAGIC While Databricks SQL provides an ANSI-compliant flavor of SQL with many additional custom methods (including the entire Delta Lake SQL syntax), users migrating from some systems may run into missing features, especially around control flow and error handling.
# MAGIC
# MAGIC Databricks notebooks allow users to write SQL and Python and execute logic cell-by-cell. PySpark has extensive support for executing SQL queries, and can easily exchange data with tables and temporary views.
# MAGIC
# MAGIC Mastering just a handful of Python concepts will unlock powerful new design practices for engineers and analysts proficient in SQL. Rather than trying to teach the entire language, this lesson focuses on those features that can immediately be leveraged to write more extensible SQL programs on Databricks.
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, students should be able to:
# MAGIC * Print and manipulate multi-line Python strings
# MAGIC * Define variables and functions
# MAGIC * Use f-strings for variable substitution
# COMMAND ----------
# MAGIC %md
# MAGIC ## Strings
# MAGIC Characters enclosed in single (`'`) or double (`"`) quotes are considered strings.
# COMMAND ----------
"This is a string"
# COMMAND ----------
# MAGIC %md
# MAGIC To preview how a string will render, we can call `print()`.
# COMMAND ----------
print("This is a string")
# COMMAND ----------
# MAGIC %md
# MAGIC By wrapping a string in triple quotes (`"""`), it's possible to use multiple lines.
# COMMAND ----------
print("""
This
is
a
multi-line
string
""")
# COMMAND ----------
# MAGIC %md
# MAGIC This makes it easy to turn SQL queries into Python strings.
# COMMAND ----------
print("""
SELECT *
FROM test_table
""")
# COMMAND ----------
# MAGIC %md
# MAGIC When we execute SQL from a Python cell, we will pass a string as an argument to `spark.sql()`.
# COMMAND ----------
spark.sql("SELECT 1 AS test")
# COMMAND ----------
# MAGIC %md
# MAGIC To render a query the way it would appear in a normal SQL notebook, we call `display()` on this function.
# COMMAND ----------
display(spark.sql("SELECT 1 AS test"))
# COMMAND ----------
# MAGIC %md
# MAGIC **NOTE**: Executing a cell with only a Python string in it will just print the string. Using `print()` with a string just renders it back to the notebook.
# MAGIC
# MAGIC To execute a string that contains SQL using Python, it must be passed within a call to `spark.sql()`.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Variables
# MAGIC Python variables are assigned using the `=`.
# MAGIC
# MAGIC Python variable names need to start with a letter, and can only contain letters, numbers, hyphens, and underscores.
# MAGIC
# MAGIC Many Python programmers favor snake casing, which uses only lowercase letters and underscores for all variables.
# MAGIC
# MAGIC The cell below creates the variable `my_string`.
# COMMAND ----------
my_string = "This is a string"
# COMMAND ----------
# MAGIC %md
# MAGIC Executing a cell with this variable will return its value.
# COMMAND ----------
my_string
# COMMAND ----------
# MAGIC %md
# MAGIC The output here is the same as if we typed `"This is a string"` into the cell and ran it.
# MAGIC
# MAGIC Note that the quotation marks aren't part of the string, as shown when we print it.
# COMMAND ----------
print(my_string)
# COMMAND ----------
# MAGIC %md
# MAGIC This variable can be used the same way a string would be.
# MAGIC
# MAGIC String concatenation (joining to strings together) can be performed with a `+`.
# COMMAND ----------
print("This is a new string and " + my_string)
# COMMAND ----------
# MAGIC %md
# MAGIC We can join string variables with other string variables.
# COMMAND ----------
new_string = "This is a new string and "
print(new_string + my_string)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Functions
# MAGIC Functions allow you to specify local variables as arguments and then apply custom logic. We define a function using the keyword `def` followed by the function name and, enclosed in parentheses, any variable arguments we wish to pass into the function. Finally, the function header has a `:` at the end.
# MAGIC
# MAGIC Note: In Python, indentation matters. You can see in the cell below that the logic of the function is indented in from the left margin. Any code that is indented to this level is part of the function.
# MAGIC
# MAGIC The function below takes one argument (`arg`) and then prints it.
# COMMAND ----------
def print_string(arg):
print(arg)
# COMMAND ----------
# MAGIC %md
# MAGIC When we pass a string as the argument, it will be printed.
# COMMAND ----------
print_string("foo")
# COMMAND ----------
# MAGIC %md
# MAGIC We can also pass a variable as an argument.
# COMMAND ----------
print_string(my_string)
# COMMAND ----------
# MAGIC %md
# MAGIC Oftentimes we want to return the results of our function for use elsewhere. For this we use the `return` keyword.
# MAGIC
# MAGIC The function below constructs a new string by concatenating our argument. Note that both functions and arguments can have arbitrary names, just like variables (and follow the same rules).
# COMMAND ----------
def return_new_string(string_arg):
return "The string passed to this function was " + string_arg
# COMMAND ----------
# MAGIC %md
# MAGIC Running this function returns the output.
# COMMAND ----------
return_new_string("foobar")
# COMMAND ----------
# MAGIC %md
# MAGIC Assigning it to a variable captures the output for reuse elsewhere.
# COMMAND ----------
function_output = return_new_string("foobar")
# COMMAND ----------
# MAGIC %md
# MAGIC This variable doesn't contain our function, just the results of our function (a string).
# COMMAND ----------
function_output
# COMMAND ----------
# MAGIC %md
# MAGIC ## F-strings
# MAGIC By adding the letter `f` before a Python string, you can inject variables or evaluated Python code by inserted them inside curly braces (`{}`).
# MAGIC
# MAGIC Evaluate the cell below to see string variable substitution.
# COMMAND ----------
f"I can substitute {my_string} here"
# COMMAND ----------
# MAGIC %md
# MAGIC The following cell inserts the string returned by a function.
# COMMAND ----------
f"I can substitute functions like {return_new_string('foobar')} here"
# COMMAND ----------
# MAGIC %md
# MAGIC Combine this with triple quotes and you can format a paragraph or list, like below.
# COMMAND ----------
multi_line_string = f"""
I can have many lines of text with variable substitution:
- A variable: {my_string}
- A function output: {return_new_string('foobar')}
"""
print(multi_line_string)
# COMMAND ----------
# MAGIC %md
# MAGIC Or you could format a SQL query.
# COMMAND ----------
table_name = "users"
filter_clause = "WHERE state = 'CA'"
query = f"""
SELECT *
FROM {table_name}
{filter_clause}
"""
print(query)
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 26.276224 | 313 | 0.683205 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Just Enough Python for Databricks SQL
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, students should be able to:
# MAGIC * Leverage `if/else`
# MAGIC * Describe how errors impact notebook execution
# MAGIC * Write simple tests with `assert`
# MAGIC * Use `try/except` to handle errors
# COMMAND ----------
# MAGIC %md
# MAGIC ## `if/else`
# MAGIC
# MAGIC `if/else` clauses are common in many programming languages.
# MAGIC
# MAGIC Note that SQL has the `CASE WHEN ... ELSE` construct, which is similar.
# MAGIC
# MAGIC **If you're seeking to evaluate conditions within your tables or queries, use `CASE WHEN`.** Python control flow should be reserved for evaluating conditions outside of your query.
# MAGIC
# MAGIC More on this later. First, an example with `"beans"`.
# COMMAND ----------
food = "beans"
# COMMAND ----------
# MAGIC %md
# MAGIC Working with `if` and `else` is all about evaluating whether or not certain conditions are true in your execution environment.
# MAGIC
# MAGIC Note that in Python, we have the following comparison operators:
# MAGIC
# MAGIC | Syntax | Operation |
# MAGIC | --- | --- |
# MAGIC | `==` | equals |
# MAGIC | `>` | greater than |
# MAGIC | `<` | less than |
# MAGIC | `>=` | greater than or equal |
# MAGIC | `<=` | less than or equal |
# MAGIC | `!=` | not equal |
# MAGIC
# MAGIC If you read the sentence below out loud, you will be describing the control flow of your program.
# COMMAND ----------
if food == "beans":
print(f"I love {food}")
else:
print(f"I don't eat {food}")
# COMMAND ----------
# MAGIC %md
# MAGIC As expected, because the variable `food` is the string literal `"beans"`, the `if` statement evaluated to `True` and the first print statement evaluated.
# MAGIC
# MAGIC Let's assign a different value to the variable.
# COMMAND ----------
food = "beef"
# COMMAND ----------
# MAGIC %md
# MAGIC Now the first condition will evaluate as `False`. What do you think will happen when you run the following cell?
# COMMAND ----------
if food == "beans":
print(f"I love {food}")
else:
print(f"I don't eat {food}")
# COMMAND ----------
# MAGIC %md
# MAGIC Note that each time we assign a new value to a variable, this completely erases the old variable.
# COMMAND ----------
food = "potatoes"
print(food)
# COMMAND ----------
# MAGIC %md
# MAGIC The Python keyword `elif` (short for `else if`) allows us to evaluate multiple conditions.
# MAGIC
# MAGIC Note that conditions are evaluated from top to bottom. Once a condition evaluates to true, no further conditions will be evaluated.
# MAGIC
# MAGIC `if/else` control flow patterns:
# MAGIC 1. Must contain an `if` clause
# MAGIC 1. Can contain any number of `elif` clauses
# MAGIC 1. Can contain at most one `else` clause
# COMMAND ----------
if food == "beans":
print(f"I love {food}")
elif food == "potatoes":
print(f"My favorite vegetable is {food}")
elif food != "beef":
print(f"Do you have any good recipes for {food}?")
else:
print(f"I don't eat {food}")
# COMMAND ----------
# MAGIC %md
# MAGIC By encapsulating the above logic in a function, we can reuse this logic and formatting with arbitrary arguments rather than referencing globally-defined variables.
# COMMAND ----------
def foods_i_like(food):
if food == "beans":
print(f"I love {food}")
elif food == "potatoes":
print(f"My favorite vegetable is {food}")
elif food != "beef":
print(f"Do you have any good recipes for {food}?")
else:
print(f"I don't eat {food}")
# COMMAND ----------
# MAGIC %md
# MAGIC Here, we pass the string `"bread"` to the function.
# COMMAND ----------
foods_i_like("bread")
# COMMAND ----------
# MAGIC %md
# MAGIC As we evaluate the function, we locally assign the string `"bread"` to the `food` variable, and the logic behaves as expected.
# MAGIC
# MAGIC Note that we don't overwrite the value of the `food` variable as previously defined in the notebook.
# COMMAND ----------
food
# COMMAND ----------
# MAGIC %md
# MAGIC ## try/except
# MAGIC
# MAGIC While `if/else` clauses allow us to define conditional logic based on evaluating conditional statements, `try/except` focuses on providing robust error handling.
# MAGIC
# MAGIC Let's begin by considering a simple function.
# COMMAND ----------
def three_times(number):
return number * 3
# COMMAND ----------
# MAGIC %md
# MAGIC Let's assume that the desired use of this function is to multiply an integer value by 3.
# MAGIC
# MAGIC The below cell demonstrates this behavior.
# COMMAND ----------
three_times(2)
# COMMAND ----------
# MAGIC %md
# MAGIC Note what happens if a string is passed to the function.
# COMMAND ----------
three_times("2")
# COMMAND ----------
# MAGIC %md
# MAGIC In this case, we don't get an error, but we also do not get the desired outcome.
# MAGIC
# MAGIC `assert` statements allow us to run simple tests of Python code. If an `assert` statement evaluates to true, nothing happens. If it evaluates to false, an error is raised.
# MAGIC
# MAGIC Run the two cells below to assert the types of `2` and `"2"`.
# COMMAND ----------
assert type(2) == int
# COMMAND ----------
assert type("2") == int
# COMMAND ----------
# MAGIC %md
# MAGIC As expected, the string `"2"` does not evaluate as an integer.
# MAGIC
# MAGIC Python strings have a property to report whether or not they can be safely cast as numeric values.
# COMMAND ----------
assert "2".isnumeric()
# COMMAND ----------
# MAGIC %md
# MAGIC String numbers are common; you may see them as results from an API query, raw records in a JSON or CSV file, or returned by a SQL query.
# MAGIC
# MAGIC `int()` and `float()` are two common methods for casting values to numeric types. An `int` will always be a whole number, while a `float` will always have a decimal.
# COMMAND ----------
int("2")
# COMMAND ----------
# MAGIC %md
# MAGIC While Python will gladly cast a string containing numeric characters to a numeric type, it will not allow you to change other strings to numbers.
# COMMAND ----------
int("two")
# COMMAND ----------
# MAGIC %md
# MAGIC Note that errors will stop the execution of a notebook script; all cells after an error will be skipped when a notebook is scheduled as a production job.
# MAGIC
# MAGIC If we enclose code that might throw an error in a `try` statement, we can define alternate logic when an error is encountered.
# MAGIC
# MAGIC Below is a simple function that demonstrates this.
# COMMAND ----------
def try_int(num_string):
try:
return int(num_string)
except:
print(f"{num_string} is not a number!")
# COMMAND ----------
# MAGIC %md
# MAGIC When a numeric string is passed, the function will return the result as an integer.
# COMMAND ----------
try_int("2")
# COMMAND ----------
# MAGIC %md
# MAGIC When a non-numeric string is passed, an informative message is printed out.
# MAGIC
# MAGIC **NOTE**: An error is **not** raised, even though an error occurred, and no value was returned. Implementing logic that suppresses errors can lead to logic silently failing.
# COMMAND ----------
try_int("two")
# COMMAND ----------
# MAGIC %md
# MAGIC Below, our earlier function is updated to include logic for handling errors to return an informative message.
# COMMAND ----------
def three_times(number):
try:
return int(number) * 3
except ValueError as e:
print(f"""
You passed the string variable '{number}'.
The result of using this function would be to return the string '{number * 3}'.
Try passing an integer instead.
""")
# COMMAND ----------
# MAGIC %md
# MAGIC Now our function can process numbers passed as strings.
# COMMAND ----------
three_times("2")
# COMMAND ----------
# MAGIC %md
# MAGIC And prints an informative message when a string is passed.
# COMMAND ----------
three_times("two")
# COMMAND ----------
# MAGIC %md
# MAGIC Note that as implemented, this logic would only be useful for interactive execution of this logic (the message isn't currently being logged anywhere, and the code will not return the data in the desired format; human intervention would be required to act upon the printed message).
# COMMAND ----------
# MAGIC %md
# MAGIC ## Applying Python Control Flow for SQL Queries
# MAGIC
# MAGIC While the above examples demonstrate the basic principles of using these designs in Python, the goal of this lesson is to learn how to apply these concepts to executing SQL logic on Databricks.
# MAGIC
# MAGIC Let's revisit converting a SQL cell to execute in Python.
# MAGIC
# MAGIC **NOTE**: The following setup script ensures an isolated execution environment.
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TEMP VIEW demo_tmp_vw(id, name, value) AS VALUES
# MAGIC (1, "Yve", 1.0),
# MAGIC (2, "Omar", 2.5),
# MAGIC (3, "Elia", 3.3);
# COMMAND ----------
# MAGIC %md
# MAGIC Run the SQL cell below to preview the contents of this temp view.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM demo_tmp_vw
# COMMAND ----------
# MAGIC %md
# MAGIC Running SQL in a Python cell simply requires passing the string query to `spark.sql()`.
# COMMAND ----------
query = "SELECT * FROM demo_tmp_vw"
spark.sql(query)
# COMMAND ----------
# MAGIC %md
# MAGIC But recall that executing a query with `spark.sql()` returns the results as a DataFrame rather than displaying them; below, the code is augmented to capture the result and display it.
# COMMAND ----------
query = "SELECT * FROM demo_tmp_vw"
result = spark.sql(query)
display(result)
# COMMAND ----------
# MAGIC %md
# MAGIC Using a simple `if` clause with a function allows us to execute arbitrary SQL queries, optionally displaying the results, and always returning the resultant DataFrame.
# COMMAND ----------
def simple_query_function(query, preview=True):
query_result = spark.sql(query)
if preview:
display(query_result)
return query_result
# COMMAND ----------
result = simple_query_function(query)
# COMMAND ----------
# MAGIC %md
# MAGIC Below, we execute a different query and set preview to `False`, as the purpose of the query is to create a temp view rather than return a preview of data.
# COMMAND ----------
new_query = "CREATE OR REPLACE TEMP VIEW id_name_tmp_vw AS SELECT id, name FROM demo_tmp_vw"
simple_query_function(new_query, preview=False)
# COMMAND ----------
# MAGIC %md
# MAGIC We now have a simple extensible function that could be further parameterized depending on the needs of our organization.
# MAGIC
# MAGIC For example, suppose we want to protect our company from malicious SQL, like the query below.
# COMMAND ----------
injection_query = "SELECT * FROM demo_tmp_vw; DROP DATABASE prod_db CASCADE; SELECT * FROM demo_tmp_vw"
# COMMAND ----------
# MAGIC %md
# MAGIC Below, we define a simple search for a semi-colon in the text, then use an assert statement with `try/except` to raise a custom error message.
# COMMAND ----------
def injection_check(query):
semicolon_index = query.find(";")
try:
assert semicolon_index < 0, f"Query contains semi-colon at index {semicolon_index}\nBlocking execution to avoid SQL injection attack"
except AssertionError as e:
print(query)
raise e
# COMMAND ----------
# MAGIC %md
# MAGIC **NOTE**: The example shown here is not sophisticated, but seeks to demonstrate a general principle. Always be wary of allowing untrusted users to pass text that will be passed to SQL queries. Also note that only one query can be executed using `spark.sql()`, so text with a semi-colon will always throw an error.
# COMMAND ----------
injection_check(injection_query)
# COMMAND ----------
# MAGIC %md
# MAGIC If we add this method to our earlier query function, we now have a more robust function that will assess each query for potential threats before execution.
# COMMAND ----------
def secure_query_function(query, preview=True):
injection_check(query)
query_result = spark.sql(query)
if preview:
display(query_result)
return query_result
# COMMAND ----------
# MAGIC %md
# MAGIC As expected, we see normal performance with a safe query.
# COMMAND ----------
secure_query_function(query)
# COMMAND ----------
# MAGIC %md
# MAGIC But prevent execution when when bad logic is run.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 27.69697 | 321 | 0.670061 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Just Enough Python for Databricks SQL Lab
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, students should be able to:
# MAGIC * Review basic Python code and describe expected outcomes of code execution
# MAGIC * Reason through control flow statements in Python functions
# MAGIC * Add parameters to a SQL query by wrapping it in a Python function
# COMMAND ----------
# MAGIC %md
# MAGIC # Reviewing Python Basics
# MAGIC
# MAGIC In the previous notebook, we briefly explored using `spark.sql()` to execute arbitrary SQL commands from Python.
# MAGIC
# MAGIC Look at the following 3 cells. Before executing each cell, identify:
# MAGIC 1. The expected output of cell execution
# MAGIC 1. What logic is being executed
# MAGIC 1. Changes to the resultant state of the environment
# MAGIC
# MAGIC Then execute the cells, compare the results to your expectations, and see the explanations below.
# COMMAND ----------
course = "python_for_sql"
# COMMAND ----------
spark.sql(f"SELECT '{course}'")
# COMMAND ----------
display(spark.sql(f"SELECT '{course}'"))
# COMMAND ----------
# MAGIC %md
# MAGIC 1. `Cmd 3` assigns a string to a variable. When a variable assignment is successful, no output is displayed to the notebook. A new variable is added to the current execution environment.
# MAGIC 1. `Cmd 4` executes a SQL query and returns the results as a DataFrame. In this case, the SQL query is just to select a string, so no changes to our environment occur. When a returned DataFrame is not captured, the schema for the DataFrame is displayed alongside the word `DataFrame`.
# MAGIC 1. `Cmd 5` executes the same SQL query and displays the returned DataFrame. This combination of `display()` and `spark.sql()` most closely mirrors executing logic in a `%sql` cell; the results will always be printed in a formatted table, assuming results are returned by the query; some queries will instead manipulate tables or databases, in which case the work `OK` will print to show successful execution. In this case, no changes to our environment occur from running this code.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Setting Up a Development Environment
# MAGIC
# MAGIC Throughout this course, we use logic similar to the follow cell to capture information about the user currently executing the notebook and create an isolated development database.
# MAGIC
# MAGIC The `re` library is the [standard Python library for regex](https://docs.python.org/3/library/re.html).
# MAGIC
# MAGIC Databricks SQL has a special method to capture the username of the `current_user()`; and the `.first()[0]` code is a quick hack to capture the first row of the first column of a query executed with `spark.sql()` (in this case, we do this safely knowing that there will only be 1 row and 1 column).
# MAGIC
# MAGIC All other logic below is just string formatting.
# COMMAND ----------
import re
username = spark.sql("SELECT current_user()").first()[0]
userhome = f"dbfs:/user/{username}/{course}"
database = f"""{course}_{re.sub("[^a-zA-Z0-9]", "_", username)}_db"""
print(f"""
username: {username}
userhome: {userhome}
database: {database}
""")
# COMMAND ----------
# MAGIC %md
# MAGIC Below, we add a simple control flow statement to this logic to create and use this user-specific database. Optionally, we will reset this database and drop all of the contents on repeat execution. (Note the the default mode is `"reset"`).
# COMMAND ----------
def create_database(course, mode="reset"):
import re
username = spark.sql("SELECT current_user()").first()[0]
userhome = f"dbfs:/user/{username}/{course}"
database = f"""{course}_{re.sub("[^a-zA-Z0-9]", "_", username)}_db"""
print(f"""
username: {username}
userhome: {userhome}
database: {database}
""")
if mode == "reset":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
spark.sql(f"""
CREATE DATABASE IF NOT EXISTS {database}
LOCATION '{userhome}'
""")
spark.sql(f"USE {database}")
create_database(course)
# COMMAND ----------
# MAGIC %md
# MAGIC While this logic as defined is geared toward isolating students in shared workspaces for instructional purposes, the same basic design could be leveraged for testing new logic in an isolated environment before pushing to production.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Handling Errors Gracefully
# MAGIC
# MAGIC Review the logic in the function below.
# MAGIC
# MAGIC Note that we've just declared a new database that currently contains no tables.
# COMMAND ----------
def query_or_make_demo_table(table):
try:
display(spark.sql(f"SELECT * FROM {table}"))
except:
spark.sql(f"""
CREATE TABLE {table}
(id INT, name STRING, value DOUBLE, state STRING)
""")
spark.sql(f"""
INSERT INTO {table}
VALUES (1, "Yve", 1.0, "CA"),
(2, "Omar", 2.5, "NY"),
(3, "Elia", 3.3, "OH"),
(4, "Rebecca", 4.7, "TX"),
(5, "Ameena", 5.3, "CA"),
(6, "Ling", 6.6, "NY"),
(7, "Pedro", 7.1, "KY")
""")
display(spark.sql(f"SELECT * FROM {table}"))
# COMMAND ----------
# MAGIC %md
# MAGIC Try to identify the following before executing the next cell:
# MAGIC 1. The expected output of cell execution
# MAGIC 1. What logic is being executed
# MAGIC 1. Changes to the resultant state of the environment
# COMMAND ----------
query_or_make_demo_table("demo_table")
# COMMAND ----------
# MAGIC %md
# MAGIC Now answer the same three questions before running the same query below.
# COMMAND ----------
query_or_make_demo_table("demo_table")
# COMMAND ----------
# MAGIC %md
# MAGIC - On the first execution, the table `demo_table` did not yet exist. As such, the attempt to return the contents of the table created an error, which resulted in our `except` block of logic executing. This block:
# MAGIC 1. Created the table
# MAGIC 1. Inserted values
# MAGIC 1. Returned the contents of the table
# MAGIC - On the second execution, the table `demo_table` already exists, and so the first query in the `try` block executes without error. As a result, we just display the results of the query without modifying anything in our environment.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Adapting SQL to Python
# MAGIC Let's consider the following SQL query against our demo table created above.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT id, value
# MAGIC FROM demo_table
# MAGIC WHERE state = "CA"
# COMMAND ----------
# MAGIC %md
# MAGIC Let's use this simple example to practice creating a Python function that adds optional functionality.
# MAGIC
# MAGIC Our target function will:
# MAGIC * Always return only the `id` and `value` column from the a table named `demo_table`
# MAGIC * Allow filtering results by state, but default to all states
# MAGIC * Optionally return the query result object (a PySpark DataFrame)
# MAGIC
# MAGIC Stretch Goal:
# MAGIC * Add logic to check that if the value passed for the `state` filter contains two uppercase letters
# MAGIC
# MAGIC Some starter logic has been provided below.
# COMMAND ----------
# ANSWER
def preview_values(state=None, return_results=False):
query = "SELECT id, value FROM demo_table"
if state is not None:
# assert state == state.upper() and len(state) == 2, "Please use standard 2 letter uppercase state abbreviations"
query += f" WHERE state = '{state}'"
query_results = spark.sql(query)
display(query_results)
if return_results == True:
return query_results
# COMMAND ----------
# MAGIC %md
# MAGIC The assert statements below can be used to check whether or not your function works as intended.
# COMMAND ----------
import pyspark.sql.dataframe
assert preview_values(return_results=True).columns == ["id", "value"], "Query should only return `id` and `value` columns"
assert preview_values() == None, "Function should not return anything by default"
assert type(preview_values(return_results=True)) == pyspark.sql.dataframe.DataFrame, "Function should optionally return the DataFrame results"
assert preview_values(state="OH", return_results=True).first()[0] == 3, "Function should allow filtering by state"
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 37.485356 | 490 | 0.684897 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Structured Streaming Concepts
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Describe the programming model used by Spark Structured Streaming
# MAGIC * Configure required options to perform a streaming read on a source
# MAGIC * Describe the requirements for end-to-end fault tolerance
# MAGIC * Configure required options to perform a streaming write to a sink
# MAGIC * Interact with streaming queries and stop active streams
# MAGIC
# MAGIC ## Datasets Used
# MAGIC The source contains smartphone accelerometer samples from devices and users with the following columns:
# MAGIC
# MAGIC | Field | Description |
# MAGIC | ------------- | ----------- |
# MAGIC | Arrival_Time | time data was received |
# MAGIC | Creation_Time | event time |
# MAGIC | Device | type of Model |
# MAGIC | Index | unique identifier of event |
# MAGIC | Model | i.e Nexus4 |
# MAGIC | User | unique user identifier |
# MAGIC | geolocation | city & country |
# MAGIC | gt | transportation mode |
# MAGIC | id | unused null field |
# MAGIC | x | acceleration in x-dir |
# MAGIC | y | acceleration in y-dir |
# MAGIC | z | acceleration in z-dir |
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Getting Started
# MAGIC
# MAGIC Run the following cell to configure our "classroom."
# COMMAND ----------
# MAGIC %run ../Includes/classic-setup $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Micro-Batches as a Table
# MAGIC
# MAGIC For more information, see the analogous section in the [Structured Streaming Programming Guide](http://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#basic-concepts) (from which several images have been borrowed).
# MAGIC
# MAGIC Spark Structured Streaming approaches streaming data by modeling it as a series of continuous appends to an unbounded table. While similar to defining **micro-batch** logic, this model allows incremental queries to be defined against streaming sources as if they were static input (though the fact that the input is an unbounded tables does impose some constraints).
# MAGIC
# MAGIC <img src="http://spark.apache.org/docs/latest/img/structured-streaming-stream-as-a-table.png"/>
# MAGIC
# MAGIC ### Basic Concepts
# MAGIC
# MAGIC - The developer defines an **input table** by configuring a streaming read against a **source**. The syntax for doing this is similar to working with static data.
# MAGIC - A **query** is defined against the input table. Both the DataFrames API and Spark SQL can be used to easily define transformations and actions against the input table.
# MAGIC - This logical query on the input table generates the **results table**. The results table contains the incremental state information of the stream.
# MAGIC - The **output** of a streaming pipeline will persist updates to the results table by writing to an external **sink**. Generally, a sink will be a durable system such as files or a pub/sub messaging bus.
# MAGIC - New rows are appended to the input table for each **trigger interval**. These new rows are essentially analogous to micro-batch transactions and will be automatically propagated through the results table to the sink.
# MAGIC
# MAGIC <img src="http://spark.apache.org/docs/latest/img/structured-streaming-model.png"/>
# COMMAND ----------
# MAGIC %md
# MAGIC ## End-to-end Fault Tolerance
# MAGIC
# MAGIC Structured Streaming ensures end-to-end exactly-once fault-tolerance guarantees through _checkpointing_ (discussed below) and <a href="https://en.wikipedia.org/wiki/Write-ahead_logging" target="_blank">Write Ahead Logs</a>.
# MAGIC
# MAGIC Structured Streaming sources, sinks, and the underlying execution engine work together to track the progress of stream processing. If a failure occurs, the streaming engine attempts to restart and/or reprocess the data.
# MAGIC For best practices on recovering from a failed streaming query see <a href="https://docs.databricks.com/spark/latest/structured-streaming/production.html#recover-from-query-failures" target="_blank">docs</a>.
# MAGIC
# MAGIC This approach _only_ works if the streaming source is replayable; replayable sources include cloud-based object storage and pub/sub messaging services.
# MAGIC
# MAGIC At a high level, the underlying streaming mechanism relies on a couple approaches:
# MAGIC
# MAGIC * First, Structured Streaming uses checkpointing and write-ahead logs to record the offset range of data being processed during each trigger interval.
# MAGIC * Next, the streaming sinks are designed to be _idempotent_—that is, multiple writes of the same data (as identified by the offset) do _not_ result in duplicates being written to the sink.
# MAGIC
# MAGIC Taken together, replayable data sources and idempotent sinks allow Structured Streaming to ensure **end-to-end, exactly-once semantics** under any failure condition.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Reading a Stream
# MAGIC
# MAGIC The `spark.readStream()` method returns a `DataStreamReader` used to configure and query the stream.
# MAGIC
# MAGIC Configuring a streaming read on a source requires:
# MAGIC * The schema of the data
# MAGIC * **NOTE**: Some streaming sources allow for schema inference
# MAGIC * The `format` of the source <a href="https://docs.databricks.com/spark/latest/structured-streaming/data-sources.html" target="_blank">file format or named connector</a>
# MAGIC * **NOTE**: `delta` is the default format for all reads and writes in Databricks
# MAGIC * Additional source-specific configuration options. For example:
# MAGIC * [`cloudFiles`](https://docs.databricks.com/spark/latest/structured-streaming/auto-loader-s3.html)
# MAGIC * <a href="https://docs.databricks.com/spark/latest/structured-streaming/kafka.html" target="_blank">Kafka</a>
# MAGIC * The name of the source table or the location of the files in object storage
# MAGIC
# MAGIC Below, we define a streaming read against a source (represented by `dataSource`) consisting of files from cloud storage.
# MAGIC
# MAGIC **NOTE**: We can think of this `DataStreamReader` as an incremental temp view defined against an ever-appending source table. Just as with a temp view, we only store the query plan when we set up an incremental read. It's not until we query results that we'll see compute happen.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ### The Schema
# MAGIC
# MAGIC Working with `cloudFiles` allows Databricks to automatically infer the schema from most file sources.
# MAGIC
# MAGIC Once data is loaded into a Delta Lake table, all schema for downstream incremental reads will be grabbed automatically from the table metadata.
# MAGIC
# MAGIC Here, we'll provide an explicit schema for our data.
# COMMAND ----------
schema = """Arrival_Time BIGINT,
Creation_Time BIGINT,
Device STRING,
Index BIGINT,
Model STRING,
User STRING,
geolocation STRUCT<
city: STRING,
country: STRING>,
gt STRING,
Id BIGINT,
x DOUBLE,
y DOUBLE,
z DOUBLE"""
# COMMAND ----------
# MAGIC %md
# MAGIC ## Creating a Streaming Temp View
# MAGIC
# MAGIC Below we pull all of the above concepts together to define a streaming read.
# MAGIC
# MAGIC If we were continuing to build out our query with PySpark, we would capture this as a DataFrame. Instead, we use `createOrReplaceTempView` to create an entity that can be queried locally with SQL.
# COMMAND ----------
(spark
.readStream
.schema(schema)
.format("cloudFiles")
.option("cloudFiles.format", "json")
.load(dataSource)
.createOrReplaceTempView("streaming_tmp_vw")
)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Comparing to Static Reads
# MAGIC
# MAGIC The above logic provides us with more or less the same result as the static query below.
# COMMAND ----------
spark.sql(f"CREATE OR REPLACE TEMP VIEW static_tmp_vw AS SELECT * FROM json.`{dataSource}`")
# COMMAND ----------
# MAGIC %md
# MAGIC When we query a static read on data, we display the results of the query at a point in time.
# MAGIC
# MAGIC **NOTE**: The `display(spark.table())` pattern shown in the next cell is the same as executing a `SELECT * FROM` for a table or view. Later, we'll see that this allows us to pass streaming temp views back to the DataFrame API to write out a stream.
# COMMAND ----------
display(spark.table("static_tmp_vw"))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC But when we execute a query on a streaming temporary view, we'll continue to update the results of the query as new data arrives in the source.
# MAGIC
# MAGIC Think of a query executed against a streaming temp view as an **always-on incremental query**.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM streaming_tmp_vw
# COMMAND ----------
# MAGIC %md
# MAGIC Before continuing, click `Stop Execution` at the top of the notebook, `Cancel` immediately under the cell, or run the following cell to stop all active streaming queries.
# COMMAND ----------
for s in spark.streams.active:
print("Stopping " + s.id)
s.stop()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Working with Streaming Data
# MAGIC We can execute most transformation against streaming temp views the same way we would with static data. Here, we'll run a simple aggregation to get counts of records for each `device`.
# MAGIC
# MAGIC Because we are querying a streaming temp view, this becomes a streaming query that executes indefinitely, rather than completing after retrieving a single set of results. For streaming queries like this, Databricks Notebooks include interactive dashboards that allow users to monitor streaming performance. Explore this below.
# MAGIC
# MAGIC ![](https://files.training.databricks.com/images/adbcore/streaming-dashboard.png)
# MAGIC
# MAGIC One important note regarding this example: this is merely displaying an aggregation of input as seen by the stream. **None of these records are being persisted anywhere at this point.**
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT device, COUNT(device) AS total_records
# MAGIC FROM streaming_tmp_vw
# MAGIC GROUP BY device
# COMMAND ----------
# MAGIC %md
# MAGIC Before continuing, click `Stop Execution` at the top of the notebook, `Cancel` immediately under the cell, or run the following cell to stop all active streaming queries.
# COMMAND ----------
for s in spark.streams.active:
print("Stopping " + s.id)
s.stop()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Persisting Streaming Results
# MAGIC
# MAGIC In order to persist incremental results, we need to pass our logic back to the PySpark Structured Streaming DataFrames API.
# MAGIC
# MAGIC Above, we created a temp view from a PySpark streaming DataFrame. If we create another temp view from the results of a query against a streaming temp view, we'll again have a streaming temp view.
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TEMP VIEW device_counts_tmp_vw AS (
# MAGIC SELECT device, COUNT(device) AS total_records
# MAGIC FROM streaming_tmp_vw
# MAGIC GROUP BY device
# MAGIC )
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Writing a Stream
# MAGIC
# MAGIC To persist the results of a streaming query, we must write them out to durable storage. The `DataFrame.writeStream` method returns a `DataStreamWriter` used to configure the output.
# MAGIC
# MAGIC There are a number of required parameters to configure a streaming write:
# MAGIC * The `format` of the **output sink**; see <a href="https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#output-sinks" target="_blank">documentation</a>
# MAGIC * The location of the **checkpoint directory**
# MAGIC * The **output mode**
# MAGIC * Configurations specific to the output sink, such as:
# MAGIC * <a href="https://docs.databricks.com/spark/latest/structured-streaming/kafka.html" target="_blank">Kafka</a>
# MAGIC * A <a href="https://spark.apache.org/docs/latest/api/python/pyspark.sql.html?highlight=foreach#pyspark.sql.streaming.DataStreamWriter.foreach"target="_blank">custom sink</a> via `writeStream.foreach(...)`
# MAGIC
# MAGIC Once the configuration is completed, we trigger the job with a call to `.table()`. If we didn't want to create a table and instead wanted to write directly to storage, we would use `.start()` instead.
# COMMAND ----------
# MAGIC %md
# MAGIC ### Checkpointing
# MAGIC
# MAGIC Databricks creates checkpoints by storing the current state of your streaming job to cloud storage.
# MAGIC
# MAGIC Checkpointing combines with write ahead logs to allow a terminated stream to be restarted and continue from where it left off.
# MAGIC
# MAGIC Checkpoints cannot be shared between separate streams.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Output Modes
# MAGIC
# MAGIC Streaming jobs have output modes similar to static/batch workloads. [More details here](https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#output-modes).
# MAGIC
# MAGIC | Mode | Example | Notes |
# MAGIC | ------------- | ----------- | --- |
# MAGIC | **Append** | `.outputMode("append")` | Only the new rows appended to the Result Table since the last trigger are written to the sink. This is the default. |
# MAGIC | **Complete** | `.outputMode("complete")` | The entire updated Result Table is written to the sink. The individual sink implementation decides how to handle writing the entire table. |
# MAGIC | **Update** | `.outputMode("update")` | Only the rows in the Result Table that were updated since the last trigger will be outputted to the sink.|
# MAGIC
# MAGIC **NOTE**: Not all sinks will support `update` mode.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ### Defining the Trigger Interval
# MAGIC
# MAGIC When defining a streaming write, the `trigger` method specifies when the system should process the next set of data..
# MAGIC
# MAGIC | Trigger Type | Example | Notes |
# MAGIC |----------------------------------------|-----------|-------------|
# MAGIC | Unspecified | | The query will be executed as soon as the system has completed processing the previous query (this is the default) |
# MAGIC | Fixed interval micro-batches | `.trigger(processingTime="2 minutes")` | The query will be executed in micro-batches and kicked off at the user-specified intervals |
# MAGIC | One-time micro-batch | `.trigger(once=True)` | The query will execute _only one_ micro-batch to process all the available data and then stop on its own |
# MAGIC | Continuous w/fixed checkpoint interval | `.trigger(continuous="1 second")` | The query will be executed in a low-latency, <a href="http://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#continuous-processing" target = "_blank">continuous processing mode</a>. _EXPERIMENTAL_ |
# MAGIC
# MAGIC Note that triggers are specified when defining how data will be written to a sink and control the frequency of micro-batches. By default, Spark will automatically detect and process all data in the source that has been added since the last trigger; some sources allow configuration to limit the size of each micro-batch.
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/icon_best_24.png"/> Read <a href="https://databricks.com/blog/2017/05/22/running-streaming-jobs-day-10x-cost-savings.html" target="_blank">this blog post</a> to learn more about using one-time triggers to simplify CDC with a hybrid streaming/batch design.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Pulling It All Together
# MAGIC
# MAGIC The code below demonstrates using `spark.table()` to load data from a streaming temp view back to a DataFrame. Note that Spark will always load streaming views as a streaming DataFrame and static views as static DataFrames (meaning that incremental processing must be defined with read logic to support incremental writing).
# COMMAND ----------
streamingQuery = (spark.table("device_counts_tmp_vw")
.writeStream
.option("checkpointLocation", checkpointPath)
.outputMode("complete")
.trigger(processingTime='10 seconds')
.table("device_counts")
)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Querying the Output
# MAGIC Now let's query the output we've written from SQL. Because the result is a table, we only need to deserialize the data to return the results.
# MAGIC
# MAGIC Because we are now querying a table (not a streaming DataFrame), the following will **not** be a streaming query.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT *
# MAGIC FROM device_counts
# COMMAND ----------
# MAGIC %md
# MAGIC ## Debugging with the Memory Sink
# MAGIC
# MAGIC The **memory** sink can be a useful tool for debugging. It provides a quick and easy sink requiring no setup. The output is stored as an in-memory table, with a name defined using `queryName`.
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/icon_warn_24.png"/> This should be used only for debugging purposes with low data volumes, since the entire output is collected and stored in the driver’s memory.
# COMMAND ----------
streamingQueryMem = (spark.table("streaming_tmp_vw")
.writeStream
.format("memory") # memory = store in-memory table (for testing only)
.queryName("streaming_query_mem") # name of the in-memory table
.outputMode("append")
.start()
)
# COMMAND ----------
# MAGIC %md
# MAGIC Let's examine the contents of the in-memory table with the same query used previously. Like the previous query we ran against the Delta output, this will **not** be a streaming query. In this case, we are simply querying the in-memory table established by the memory sink in the previous cell.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT device, COUNT(device) AS total_records
# MAGIC FROM streaming_query_mem
# MAGIC GROUP BY device
# COMMAND ----------
# MAGIC %md
# MAGIC ## Interacting with Streaming Queries
# MAGIC
# MAGIC
# MAGIC the logic defined above, data is read from JSON files and then saved out in the Delta Lake format. Note that because Delta creates a new version for each transaction, when working with streaming data this will mean that the Delta table creates a new version for each trigger interval in which new data is processed. [More info on streaming with Delta](https://docs.databricks.com/delta/delta-streaming.html#table-streaming-reads-and-writes).
# COMMAND ----------
# MAGIC %md
# MAGIC The `recentProgress` attribute allows access to metadata about recently processed micro-batches. Let's dump the contents for the streaming query created earlier.
# COMMAND ----------
streamingQuery.recentProgress
# COMMAND ----------
# MAGIC %md
# MAGIC In addition to referencing `StreamingQuery` objects returned by `writeStream`, as we did above, we can iterate on the `streams.active` attribute in `SparkSession` to identify all active streaming queries.
# COMMAND ----------
for s in spark.streams.active: # Iterate over all streams
print(s.id) # Print the stream's id
# COMMAND ----------
# MAGIC %md
# MAGIC Let's iterate on all active streams and stop them. This is an important thing to do here, for if you don't then your cluster will run indefinitely!
# MAGIC
# MAGIC After running the following cell, feel free to examine the cells earlier that initiated streaming queries; notice they have both been canceled.
# COMMAND ----------
for s in spark.streams.active:
print("Stopping " + s.id)
s.stop()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Incremental Ingestion with Auto Loader
# MAGIC
# MAGIC Incremental ETL is important since it allows us to deal solely with new data that has been encountered since the last ingestion. Reliably processing only the new data is key to achieving scalability.
# MAGIC
# MAGIC Ingesting into a Delta Lake table from a data lake is a common use case that has traditionally been challenging to properly set up, typically relying on the integration of always-on services like Kafka to track the files that have been ingested, and to monitor cloud storage for new file arrivals. Databricks Auto Loader abstracts all this and provides an easy-to-use mechanism for incrementally and efficiently processing new data files as they arrive in cloud file storage, in the form of a structured streaming source.
# MAGIC
# MAGIC Given an input directory path on the cloud file storage, the `cloudFiles` source automatically processes new files as they arrive, with the option of also processing existing files in that directory. For full details, refer to the <a href="https://docs.databricks.com/spark/latest/structured-streaming/auto-loader.html" target="_blank">documentation</a>.
# MAGIC
# MAGIC **Due to the benefits and scalability that Auto Loader delivers, Databricks recommends its use as general best practice when ingesting data from cloud storage.**
# COMMAND ----------
# MAGIC %md
# MAGIC Reset the output directory in preparation to stream data using Auto Loader.
# COMMAND ----------
# MAGIC %run ../Includes/classic-setup $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC ### Reading Data with Auto Loader
# MAGIC
# MAGIC An example invocation of Auto Loader is provided below. Comparing against the standard streaming read from earlier, notice the following differences:
# MAGIC
# MAGIC * Specify a `format` of `cloudFiles`
# MAGIC * Specify the underlying format of the data using the `cloudFiles.format` option
# MAGIC * The `dataLandingLocation` source below represents a cloud storage location from where data is being ingested
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ### Schema Inference and Evolution
# MAGIC
# MAGIC As mentioned earlier, every streaming DataFrame must have a schema. But Auto Loader can be configured to take a more active role in inferring and maintaining the schema of the data as it evolves.
# MAGIC
# MAGIC By omitting a schema specification, Auto Loader will detect the schema based on the data seen on the input. Specifying the `cloudFiles.schemaLocation` option allows Auto Loader to track the schema, thereby improving performances and ensuring stability of the schema across stream restart. A common pattern is to use `checkpointLocation` for this purpose.
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/icon_warn_24.png"/> There must be data present for schema inference to work; otherwise you must specify a schema.
# MAGIC
# MAGIC **Schema evolution** allows changes to a schema in response to data that changes over time. This can be an important feature in some use cases.
# COMMAND ----------
incrementalStreamingDF = (spark
.readStream
.format("cloudFiles")
.option("cloudFiles.format", "json")
.option("cloudFiles.schemaLocation", checkpointPath)
.load(dataLandingLocation)
)
# COMMAND ----------
# MAGIC %md
# MAGIC Writing the output also takes a similar form as the previous streaming case. Note the following differences:
# MAGIC * Specify `mergeSchema` option to activate schema evolution. If any changes to the schema occur over time, the schema is adapted rather than rejecting the write. This can be useful in some use cases.
# MAGIC * Omitting the trigger to allow the query to continue running, ingesting new data as it arrives. If you wish to schedule your ETL process to run in batch mode, consider using a one-time trigger instead.
# COMMAND ----------
(incrementalStreamingDF
.writeStream
.format("delta")
.option("checkpointLocation", checkpointPath)
.option("mergeSchema", "true")
.table(outputTable)
)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Querying the Output
# MAGIC By now the following query against the output table will likely seem familiar. Run it a few times, and it will become apparent that nothing changes, as no data is arriving in our simulated cloud storage.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT Device,COUNT(Device) AS Count
# MAGIC FROM ${c.outputTable}
# MAGIC GROUP BY Device
# COMMAND ----------
# MAGIC %md
# MAGIC ## Land New Data
# MAGIC Run the following cell to simulate the arrival of new data in our cloud storage. Each time you execute the cell below, a new file will be written to our source directory. Following this cell, observe the stream monitor above, and notice the impact on the results when re-running the query.
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Clean Up
# MAGIC Stop active streams and remove created resources before continuing.
# COMMAND ----------
# MAGIC %run ../Includes/classic-setup $mode="clean"
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC <h2><img src="https://files.training.databricks.com/images/105/logo_spark_tiny.png"> Summary</h2>
# MAGIC
# MAGIC We used `readStream` to stream input from a variety of sources, including Databricks Auto Loader. Auto Loader augments Structured Streaming functionality by providing an easy-to-use interface of performing incremental ETL from cloud storage.
# MAGIC
# MAGIC We also explored various options for consuming, writing and querying the streamed input data.
# MAGIC
# MAGIC Finally, we explored the array of active streams maintained in the `SparkSession` object.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC <h2><img src="https://files.training.databricks.com/images/105/logo_spark_tiny.png"> Additional Topics & Resources</h2>
# MAGIC
# MAGIC * <a href="https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#" target="_blank">Structured Streaming Programming Guide</a><br>
# MAGIC * <a href="https://www.youtube.com/watch?v=rl8dIzTpxrI" target="_blank">A Deep Dive into Structured Streaming</a> by Tathagata Das. This is an excellent video describing how Structured Streaming works.
# MAGIC * <a href="https://docs.databricks.com/spark/latest/structured-streaming/production.html#id2" target="_blank">Failed Streaming Query Recovery</a> Best Practices for Recovery.
# MAGIC * <a href="https://databricks.com/blog/2018/03/20/low-latency-continuous-processing-mode-in-structured-streaming-in-apache-spark-2-3-0.html" target="_blank">Continuous Processing Mode</a> Lowest possible latency stream processing. Currently Experimental.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 48.116071 | 529 | 0.72135 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Windows and Watermarks
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Explain why some methods will not work on streaming data
# MAGIC * Use windows to aggregate over chunks of data rather than all data
# MAGIC * Compare tumbling windows and sliding windows
# MAGIC * Apply watermarking to manage state
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Getting Started
# MAGIC
# MAGIC Run the following cell to configure our "classroom."
# COMMAND ----------
# MAGIC %run ../Includes/classic-setup $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Configure Streaming Read
# MAGIC
# MAGIC This lesson uses the same data as the previous notebook, again loaded with AutoLoader.
# MAGIC
# MAGIC The code below registers a streaming DataFrame (which we'll use again in a moment) and a streaming temp view.
# MAGIC
# MAGIC Note the use of the `selectExpr` method, which allows multiple SQL operations to be configured on a per column basis in PySpark DataFrames. Here, we're simplifying the data to be dealt with by selecting only two columns:
# MAGIC * `Creation_Time`, originally encoded in nanoseconds, is converted to unixtime and renamed to `creation_time`
# MAGIC * `gt` is renamed to `action`
# COMMAND ----------
from pyspark.sql.functions import col
schema = """Arrival_Time BIGINT,
Creation_Time BIGINT,
Device STRING,
Index BIGINT,
Model STRING,
User STRING,
geolocation STRUCT<
city: STRING,
country: STRING>,
gt STRING,
Id BIGINT,
x DOUBLE,
y DOUBLE,
z DOUBLE"""
streamingDF = (spark
.readStream
.format("cloudFiles")
.option("cloudFiles.format", "json")
.schema(schema)
.load(dataLandingLocation)
.selectExpr("cast(Creation_Time/1E9 AS timestamp) AS creation_time", "gt AS action")
)
streamingDF.createOrReplaceTempView("streaming_tmp_vw")
# COMMAND ----------
# MAGIC %md
# MAGIC ### Unsupported Operations
# MAGIC
# MAGIC Most operations on a streaming DataFrame are identical to a static DataFrame. There are <a href="https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#unsupported-operations" target="_blank">some exceptions to this</a>.
# MAGIC
# MAGIC Consider the model of the data as a constantly appending table. Sorting is one of a handful of operations that is either too complex or logically not possible to do when working with streaming data.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM streaming_tmp_vw
# MAGIC ORDER BY creation_time DESC
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Streaming Aggregations
# MAGIC
# MAGIC Continuous applications often require near real-time decisions on real-time, aggregated statistics.
# MAGIC
# MAGIC Some examples include
# MAGIC * Aggregating errors in data from IoT devices by type
# MAGIC * Detecting anomalous behavior in a server's log file by aggregating by country
# MAGIC * Performing behavior analysis on instant messages via hash tags
# MAGIC
# MAGIC While these streaming aggregates may need to reference historic trends, analytics will generally be calculated over discrete units of time. Spark Structured Streaming supports time-based **windows** on streaming DataFrames to make these calculations easy.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ### What is Time?
# MAGIC
# MAGIC Multiple times may be associated with each streaming event. Consider the discrete differences between the time at which the event data was:
# MAGIC - Generated
# MAGIC - Written to the streaming source
# MAGIC - Processed into Spark
# MAGIC
# MAGIC Each of these times will be recorded from the system clock of the machine running the process, with discrepancies and latencies being introduced due to many different causes.
# MAGIC
# MAGIC Generally speaking, most analytics will be interested in the time the data was generated. As such, this lesson will focus on timestamps recorded at the time of data generation, which we will refer to as the **event time**.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Windowing
# MAGIC
# MAGIC Defining windows on a time series field imposes a time range constraint on an otherwise unbounded input. This allows users to utilize this field for aggregations in the same way they would use distinct values when calling `GROUP BY`. Spark maintains a state table with aggregates for each user-defined bucket of time.
# MAGIC
# MAGIC Spark supports three types of windows:
# MAGIC
# MAGIC * **Tumbling windows**: fixed-size windows, regularly recurring windows that do not overlap. Each event will be aggregated into only one window.
# MAGIC * **Sliding windows**: fixed-size windows, regularly recurring windows that overlap. Each event may be aggregated into multiple windows.
# MAGIC * **Session windows**: dynamic windows whose start time and duration depends on the inputs. An event will trigger the start of a window that will, in general, continue until a predetermined duration after the last event received.
# MAGIC
# MAGIC <img src="https://spark.apache.org/docs/latest/img/structured-streaming-time-window-types.jpg">
# COMMAND ----------
# MAGIC %md
# MAGIC The following diagram illustrates in greater detail the concept of sliding windows and how events received at various times will be aggregated into the various windows (assuming that the slide duration is less than the window duration, which leads to overlapping windows):
# MAGIC
# MAGIC <img src="https://spark.apache.org/docs/latest/img/structured-streaming-window.png"/>
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ### Consuming with a Windowed Aggregation
# MAGIC
# MAGIC Let's consume the stream from SQL in a windowed aggregation using the SQL `window` function, which accepts a timestamp column and a window duration to define the tumbling windows. An optional third argument specifies a slide duration that allows the definition of a sliding window.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT
# MAGIC window.start AS start,
# MAGIC action,
# MAGIC count(action) AS count
# MAGIC FROM streaming_tmp_vw
# MAGIC GROUP BY
# MAGIC window(creation_time, '1 hour'),
# MAGIC action
# MAGIC ORDER BY
# MAGIC start,
# MAGIC action
# COMMAND ----------
# MAGIC %md
# MAGIC Once a batch of data has loaded, render the results as a bar graph with the following settings:
# MAGIC
# MAGIC * **Keys** is set to `start`
# MAGIC * **Series groupings** is set to `action`
# MAGIC * **Values** is set to `count`
# COMMAND ----------
# MAGIC %md
# MAGIC ### Land New Data
# MAGIC Recall that our stream has been set up for incremental ingestion. Invoke the following cell a few times to simulate the arrival of new data. Note the impact on the results reported above.
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Performance Considerations
# MAGIC Because aggregation is a <a href="https://databricks.com/glossary/what-are-transformations" target="_blank">wide transformation</a>, it will trigger a shuffle. Configuring the number of partitions can reduce the number of tasks and properly balance the workload for the cluster.
# MAGIC
# MAGIC In most cases, a 1-to-1 mapping of partitions to cores is ideal for streaming applications. The code below sets the number of partitions to 4, which maps perfectly to a cluster with 4 cores.
# COMMAND ----------
spark.conf.set("spark.sql.shuffle.partitions", 4)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Watermarking
# MAGIC
# MAGIC
# MAGIC When aggregating with an unbounded input, Spark's fault-tolerant state management naturally incurs some processing overhead. To keep these overheads bounded within acceptable limits, the size of the state data should not grow indefinitely. However, with sliding windows, the number of windows/groups will grow indefinitely, and so can the size of state (proportional to the number of groups). To bound the state size, we have to be able to drop old aggregates that are not going to be updated anymore. We achieve this using **watermarking**.
# MAGIC
# MAGIC Watermarking allows users to define a cutoff threshold for how much state should be maintained. This cutoff is calculated against the most recently seen event time. Data arriving after this threshold will be discarded.
# MAGIC
# MAGIC The <a href="https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.DataFrame.withWatermark.html" target="_blank">`withWatermark`</a> method allows users to easily define this cutoff threshold.
# MAGIC
# MAGIC Note that there is no built-in support for watermarking in Spark SQL, but we can define this in PySpark before creating a temp view, as shown below.
# COMMAND ----------
(streamingDF
.withWatermark("creation_time", "2 hours") # Specify a 2-hour watermark
.createOrReplaceTempView("watermarked_tmp_vw")
)
# COMMAND ----------
# MAGIC %md
# MAGIC By directing our windowed aggregation at this new temp view, we can easily achieve the same outcome while managing state information.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT
# MAGIC window.start AS start,
# MAGIC action,
# MAGIC count(action) AS count
# MAGIC FROM watermarked_tmp_vw
# MAGIC GROUP BY
# MAGIC window(creation_time, '1 hour'),
# MAGIC action
# MAGIC ORDER BY
# MAGIC start,
# MAGIC action
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC
# MAGIC ## Example Details
# MAGIC
# MAGIC The threshold is always calculated against the max event time seen.
# MAGIC
# MAGIC In the example above,
# MAGIC * The in-memory state is limited to two hours of historic data.
# MAGIC * Data arriving more than 2 hours late should be dropped.
# MAGIC * Data received within 2 hours of being generated will never be dropped.
# MAGIC
# MAGIC <img alt="Caution" title="Caution" style="vertical-align: text-bottom; position: relative; height:1.3em; top:0.0em" src="https://files.training.databricks.com/static/images/icon-warning.svg"/> This guarantee is strict in only one direction. Data delayed by more than 2 hours is not guaranteed to be dropped; it may or may not get aggregated. The more delayed the data is, the less likely the engine is going to process it.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Writing Results
# MAGIC
# MAGIC Previously we used `spark.table()` to pass SQL logic stored in temp views back to a DataFrame to write out streaming results.
# MAGIC
# MAGIC Below, we instead use `spark.sql()` and pass the entire SQL query.
# COMMAND ----------
(spark.sql("""
SELECT
window.start AS start,
action,
count(action) AS count
FROM watermarked_tmp_vw
GROUP BY
window(creation_time, '1 hour'),
action
ORDER BY
start,
action
""").writeStream
.option("checkpointLocation", checkpointPath)
.outputMode("complete")
.table("action_counts")
)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Clean Up
# COMMAND ----------
# MAGIC %run ../Includes/classic-setup $mode="clean"
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Summary
# MAGIC
# MAGIC * A handful of operations valid for static DataFrames will not work with streaming data
# MAGIC * Windows allow users to define time-based buckets for aggregating streaming data
# MAGIC * Watermarking allows users to manage the amount of state being calculated with each trigger and define how late-arriving data should be handled
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 38.604502 | 549 | 0.725317 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Incremental Multi-Hop in the Lakehouse
# MAGIC
# MAGIC Now that we have a better understanding of how to work with incremental data processing by combining Structured Streaming APIs and Spark SQL, we can explore the tight integration between Structured Streaming and Delta Lake.
# MAGIC
# MAGIC
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Describe Bronze, Silver, and Gold tables
# MAGIC * Create a Delta Lake multi-hop pipeline
# COMMAND ----------
# MAGIC %md
# MAGIC ## Incremental Updates in the Lakehouse
# MAGIC
# MAGIC Delta Lake allows users to easily combine streaming and batch workloads in a unified multi-hop pipeline. Each stage of the pipeline represents a state of our data valuable to driving core use cases within the business. Because all data and metadata lives in object storage in the cloud, multiple users and applications can access data in near-real time, allowing analysts to access the freshest data as it's being processed.
# MAGIC
# MAGIC ![](https://files.training.databricks.com/images/sslh/multi-hop-simple.png)
# MAGIC
# MAGIC - **Bronze** tables contain raw data ingested from various sources (JSON files, RDBMS data, IoT data, to name a few examples).
# MAGIC
# MAGIC - **Silver** tables provide a more refined view of our data. We can join fields from various bronze tables to enrich streaming records, or update account statuses based on recent activity.
# MAGIC
# MAGIC - **Gold** tables provide business level aggregates often used for reporting and dashboarding. This would include aggregations such as daily active website users, weekly sales per store, or gross revenue per quarter by department.
# MAGIC
# MAGIC The end outputs are actionable insights, dashboards and reports of business metrics.
# MAGIC
# MAGIC By considering our business logic at all steps of the ETL pipeline, we can ensure that storage and compute costs are optimized by reducing unnecessary duplication of data and limiting ad hoc querying against full historic data.
# MAGIC
# MAGIC Each stage can be configured as a batch or streaming job, and ACID transactions ensure that we succeed or fail completely.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Datasets Used
# MAGIC
# MAGIC This demo uses simplified artificially generated medical data. The schema of our two datasets is represented below. Note that we will be manipulating these schema during various steps.
# MAGIC
# MAGIC #### Recordings
# MAGIC The main dataset uses heart rate recordings from medical devices delivered in the JSON format.
# MAGIC
# MAGIC | Field | Type |
# MAGIC | --- | --- |
# MAGIC | device_id | int |
# MAGIC | mrn | long |
# MAGIC | time | double |
# MAGIC | heartrate | double |
# MAGIC
# MAGIC #### PII
# MAGIC These data will later be joined with a static table of patient information stored in an external system to identify patients by name.
# MAGIC
# MAGIC | Field | Type |
# MAGIC | --- | --- |
# MAGIC | mrn | long |
# MAGIC | name | string |
# COMMAND ----------
# MAGIC %md
# MAGIC ## Getting Started
# MAGIC
# MAGIC Run the following cell to configure the lab environment.
# COMMAND ----------
# MAGIC %run "../Includes/multi-hop-setup" $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Data Simulator
# MAGIC Databricks Auto Loader can automatically process files as they land in your cloud object stores. To simulate this process, you will be asked to run the following operation several times throughout the course.
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Bronze Table: Ingesting Raw JSON Recordings
# MAGIC
# MAGIC Below, we configure a read on a raw JSON source using Auto Loader with schema inference.
# MAGIC
# MAGIC Note that while you need to use the Spark DataFrame API to set up an incremental read, once configured you can immediately register a temp view to leverage Spark SQL for streaming transformations on your data.
# MAGIC
# MAGIC **NOTE**: For a JSON data source, Auto Loader will default to inferring each column as a string. Here, we demonstrate specifying the data type for the `time` column using the `cloudFiles.schemaHints` option. Note that specifying improper types for a field will result in null values.
# COMMAND ----------
(spark.readStream
.format("cloudFiles")
.option("cloudFiles.format", "json")
.option("cloudFiles.schemaHints", "time DOUBLE")
.option("cloudFiles.schemaLocation", bronzeCheckpoint)
.load(dataLandingLocation)
.createOrReplaceTempView("recordings_raw_temp"))
# COMMAND ----------
# MAGIC %md
# MAGIC Here, we'll enrich our raw data with additional metadata describing the source file and the time it was ingested. This additional metadata can be ignored during downstream processing while providing useful information for troubleshooting errors if corrupt data is encountered.
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TEMPORARY VIEW recordings_bronze_temp AS (
# MAGIC SELECT *, current_timestamp() receipt_time, input_file_name() source_file
# MAGIC FROM recordings_raw_temp
# MAGIC )
# COMMAND ----------
# MAGIC %md
# MAGIC The code below passes our enriched raw data back to PySpark API to process an incremental write to a Delta Lake table.
# COMMAND ----------
(spark.table("recordings_bronze_temp")
.writeStream
.format("delta")
.option("checkpointLocation", bronzeCheckpoint)
.outputMode("append")
.table("bronze"))
# COMMAND ----------
# MAGIC %md
# MAGIC Trigger another file arrival with the following cell and you'll see the changes immediately detected by the streaming query you've written.
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Load Static Lookup Table
# MAGIC The ACID guarantees that Delta Lake brings to your data are managed at the table level, ensuring that only fully successfully commits are reflected in your tables. If you choose to merge these data with other data sources, be aware of how those sources version data and what sort of consistency guarantees they have.
# MAGIC
# MAGIC In this simplified demo, we are loading a static CSV file to add patient data to our recordings. In production, we could use Databricks' <a href="https://docs.databricks.com/spark/latest/structured-streaming/auto-loader.html" target="_blank">Auto Loader</a> feature to keep an up-to-date view of these data in our Delta Lake.
# COMMAND ----------
(spark
.read
.format("csv")
.schema("mrn STRING, name STRING")
.option("header", True)
.load(f"{dataSource}/patient/patient_info.csv")
.createOrReplaceTempView("pii"))
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM pii
# COMMAND ----------
# MAGIC %md
# MAGIC ## Silver Table: Enriched Recording Data
# MAGIC As a second hop in our silver level, we will do the follow enrichments and checks:
# MAGIC - Our recordings data will be joined with the PII to add patient names
# MAGIC - The time for our recordings will be parsed to the format `'yyyy-MM-dd HH:mm:ss'` to be human-readable
# MAGIC - We will exclude heart rates that are <= 0, as we know that these either represent the absence of the patient or an error in transmission
# COMMAND ----------
(spark.readStream
.table("bronze")
.createOrReplaceTempView("bronze_tmp"))
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TEMPORARY VIEW recordings_w_pii AS (
# MAGIC SELECT device_id, a.mrn, b.name, cast(from_unixtime(time, 'yyyy-MM-dd HH:mm:ss') AS timestamp) time, heartrate
# MAGIC FROM bronze_tmp a
# MAGIC INNER JOIN pii b
# MAGIC ON a.mrn = b.mrn
# MAGIC WHERE heartrate > 0)
# COMMAND ----------
(spark.table("recordings_w_pii")
.writeStream
.format("delta")
.option("checkpointLocation", recordingsEnrichedCheckpoint)
.outputMode("append")
.table("recordings_enriched"))
# COMMAND ----------
# MAGIC %md
# MAGIC Trigger another new file and wait for it propagate through both previous queries.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT COUNT(*) FROM recordings_enriched
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Gold Table: Daily Averages
# MAGIC
# MAGIC Here we read a stream of data from `recordingsEnrichedPath` and write another stream to create an aggregate gold table of daily averages for each patient.
# COMMAND ----------
(spark.readStream
.table("recordings_enriched")
.createOrReplaceTempView("recordings_enriched_temp"))
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TEMP VIEW patient_avg AS (
# MAGIC SELECT mrn, name, mean(heartrate) avg_heartrate, date_trunc("DD", time) date
# MAGIC FROM recordings_enriched_temp
# MAGIC GROUP BY mrn, name, date_trunc("DD", time))
# COMMAND ----------
# MAGIC %md
# MAGIC Note that we're using `.trigger(once=True)` below. This provides us the ability to continue to use the strengths of structured streaming while trigger this job as a single batch. To recap, these strengths include:
# MAGIC - exactly once end-to-end fault tolerant processing
# MAGIC - automatic detection of changes in upstream data sources
# MAGIC
# MAGIC If we know the approximate rate at which our data grows, we can appropriately size the cluster we schedule for this job to ensure fast, cost-effective processing. The customer will be able to evaluate how much updating this final aggregate view of their data costs and make informed decisions about how frequently this operation needs to be run.
# MAGIC
# MAGIC Downstream processes subscribing to this table do not need to re-run any expensive aggregations. Rather, files just need to be de-serialized and then queries based on included fields can quickly be pushed down against this already-aggregated source.
# COMMAND ----------
(spark.table("patient_avg")
.writeStream
.format("delta")
.outputMode("complete")
.option("checkpointLocation", dailyAvgCheckpoint)
.trigger(once=True)
.table("daily_patient_avg")
)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Important Considerations for `complete` Output with Delta
# MAGIC
# MAGIC When using `complete` output mode, we rewrite the entire state of our table each time our logic runs. While this is ideal for calculating aggregates, we **cannot** read a stream from this directory, as Structured Streaming assumes data is only being appended in the upstream logic.
# MAGIC
# MAGIC **NOTE**: Certain options can be set to change this behavior, but have other limitations attached. For more details, refer to [Delta Streaming: Ignoring Updates and Deletes](https://docs.databricks.com/delta/delta-streaming.html#ignoring-updates-and-deletes).
# MAGIC
# MAGIC The gold Delta table we have just registered will perform a static read of the current state of the data each time we run the following query.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM daily_patient_avg
# COMMAND ----------
# MAGIC %md
# MAGIC Note the above table includes all days for all users. If the predicates for our ad hoc queries match the data encoded here, we can push down our predicates to files at the source and very quickly generate more limited aggregate views.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT *
# MAGIC FROM daily_patient_avg
# MAGIC WHERE date BETWEEN "2020-01-17" AND "2020-01-31"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Process Remaining Records
# MAGIC The following cell will land additional files for the rest of 2020 in your source directory. You'll be able to see these process through the first 3 tables in your Delta Lake, but will need to re-run your final query to update your `daily_patient_avg` table, since this query uses the trigger once syntax.
# COMMAND ----------
File.newData(continuous=True)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Wrapping Up
# MAGIC
# MAGIC Finally, make sure all streams are stopped.
# COMMAND ----------
# MAGIC %run "../Includes/multi-hop-setup" $mode="clean"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Summary
# MAGIC
# MAGIC Delta Lake and Structured Streaming combine to provide near real-time analytic access to data in the lakehouse.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Additional Topics & Resources
# MAGIC
# MAGIC * <a href="https://docs.databricks.com/delta/delta-streaming.html" target="_blank">Table Streaming Reads and Writes</a>
# MAGIC * <a href="https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html" target="_blank">Structured Streaming Programming Guide</a>
# MAGIC * <a href="https://www.youtube.com/watch?v=rl8dIzTpxrI" target="_blank">A Deep Dive into Structured Streaming</a> by Tathagata Das. This is an excellent video describing how Structured Streaming works.
# MAGIC * <a href="https://databricks.com/glossary/lambda-architecture" target="_blank">Lambda Architecture</a>
# MAGIC * <a href="https://bennyaustin.wordpress.com/2010/05/02/kimball-and-inmon-dw-models/#" target="_blank">Data Warehouse Models</a>
# MAGIC * <a href="http://spark.apache.org/docs/latest/structured-streaming-kafka-integration.html" target="_blank">Create a Kafka Source Stream</a>
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 40.638806 | 432 | 0.726627 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Using the Delta Live Tables UI
# MAGIC
# MAGIC This demo will explore the DLT UI. By the end of this lesson you will be able to:
# MAGIC
# MAGIC * Deploy a DLT pipeline
# MAGIC * Explore the resultant DAG
# MAGIC * Execute an update of the pipeline
# MAGIC * Look at metrics
# COMMAND ----------
# MAGIC %md
# MAGIC ## Run Setup
# MAGIC
# MAGIC The following cell is configured to reset this demo.
# COMMAND ----------
# MAGIC %run ../Includes/dlt-setup $course="dlt_demo" $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC Execute the following cell to print out two values that will be used during the following configuration steps.
# COMMAND ----------
print(f"Target: {database}")
print(f"Storage location: {userhome.split(':')[1]}")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create and configure a pipeline
# MAGIC
# MAGIC In this section you will create a pipeline using a notebook provided with the courseware. We'll explore the contents of the notebook in the following lesson.
# MAGIC
# MAGIC 1. Click the **Jobs** button on the sidebar,
# MAGIC 1. Select the **Delta Live Tables** tab.
# MAGIC 1. Click **Create Pipeline**.
# MAGIC 1. Fill in a **Pipeline Name** of your choosing.
# MAGIC 1. For **Notebook Libraries**, use the navigator to locate and select the companion notebook called **3.3.2 - SQL for Delta Live Tables**.
# MAGIC * Though this document is a standard Databricks Notebook, the SQL syntax is specialized to DLT table declarations. We will be exploring the syntax in the exercise that follows.
# MAGIC 1. In the **Target** field, specify the database name printed out next to **Target** in the cell above. (This should follow the pattern `dbacademy_<username>_dlt_demo`)
# MAGIC * This field is optional; if not specified, then tables will not be registered to a metastore, but will still be available in the DBFS. Refer to the <a href="https://docs.databricks.com/data-engineering/delta-live-tables/delta-live-tables-user-guide.html#publish-tables" target="_blank">documentation</a> for more information on this option.
# MAGIC 1. In the **Storage location** field, copy the directory as printed above.
# MAGIC * This optional field allows the user to specify a location to store logs, tables, and other information related to pipeline execution. If not specified, DLT will automatically generate a directory.
# MAGIC 1. For **Pipeline Mode**, select **Triggered**
# MAGIC * This field specifies how the pipeline will be run. **Triggered** pipelines run once and then shut down until the next manual or scheduled update. **Continuous** pipelines run continuously, ingesting new data as it arrives. Choose the mode based on latency and cost requirements.
# MAGIC 1. Uncheck the **Enable autoscaling** box, and set the number of workers to 1.,
# MAGIC * **Enable autoscaling**, **Min Workers** and **Max Workers** control the worker configuration for the underlying cluster processing the pipeline. Notice the DBU estimate provided, similar to that provided when configuring interactive clusters.
# MAGIC 1. Click **Create**.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Run a pipeline
# MAGIC
# MAGIC With a pipeline created, you will now run the pipeline.
# MAGIC
# MAGIC 1. Select **Development** to run the pipeline in development mode. Development mode provides for more expeditious iterative development by reusing the cluster (as opposed to creating a new cluster for each run) and disabling retries so that you can readily identify and fix errors. Refer to the <a href="https://docs.databricks.com/data-engineering/delta-live-tables/delta-live-tables-user-guide.html#optimize-execution" target="_blank">documentation</a> for more information on this feature.
# MAGIC 2. Click **Start**.
# MAGIC
# MAGIC The initial run will take several minutes while a cluster is provisioned. Subsequent runs will be appreciably quicker.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Exploring the DAG
# MAGIC
# MAGIC As the pipeline completes, the execution flow is graphed. Select the tables review the details.
# MAGIC
# MAGIC Select **sales_orders_cleaned**. Notice the results reported in the **Data Quality** section. Because this flow has data expectations declared, those metrics are tracked here. No records are dropped because the constraint is declared in a way that allows violating records to be included in the output. This will be covered in more details in the next exercise.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 56.163043 | 500 | 0.732408 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md
# MAGIC # Exploring the Results of a DLT Pipeline
# MAGIC
# MAGIC This Notebook explores the execution results of a DLT pipeline.
# COMMAND ----------
# MAGIC %run ../Includes/dlt-setup $course="dlt_demo"
# COMMAND ----------
storage_location = userhome
# COMMAND ----------
dbutils.fs.ls(storage_location)
# COMMAND ----------
# MAGIC %md
# MAGIC The `system` directory captures events associated with the pipeline.
# COMMAND ----------
dbutils.fs.ls(f"{storage_location}/system/events")
# COMMAND ----------
# MAGIC %md
# MAGIC These event logs are stored as a Delta table. Let's query the table.
# COMMAND ----------
display(spark.sql(f"SELECT * FROM delta.`{storage_location}/system/events`"))
# COMMAND ----------
# MAGIC %md
# MAGIC Let's view the contents of the *tables* directory.
# COMMAND ----------
dbutils.fs.ls(f"{storage_location}/tables")
# COMMAND ----------
# MAGIC %md
# MAGIC Let's query the gold table.
# COMMAND ----------
display(spark.sql(f"SELECT * FROM {database}.sales_order_in_la"))
| 18.581818 | 77 | 0.642193 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Using Auto Loader and Structured Streaming with Spark SQL
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lab, you will be able to:
# MAGIC * Ingest data using Auto Loader
# MAGIC * Aggregate streaming data
# MAGIC * Stream data to a Delta table
# COMMAND ----------
# MAGIC %md
# MAGIC ## Setup
# MAGIC Run the following script to setup necessary variables and clear out past runs of this notebook. Note that re-executing this cell will allow you to start the lab over.
# COMMAND ----------
# MAGIC %run ../../Includes/classic-setup $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Configure Streaming Read
# MAGIC
# MAGIC This lab uses a collection of customer-related CSV data from DBFS found in */databricks-datasets/retail-org/customers/*.
# MAGIC
# MAGIC Read this data using Auto Loader using its schema inference (use `customersCheckpointPath` to store the schema info). Create a streaming temporary view called `customers_raw_temp`.
# COMMAND ----------
# ANSWER
customersCheckpointPath = userhome + "/customersCheckpoint"
(spark
.readStream
.format("cloudFiles")
.option("cloudFiles.format", "csv")
.option("cloudFiles.schemaLocation", customersCheckpointPath)
.load("/databricks-datasets/retail-org/customers/")
.createOrReplaceTempView("customers_raw_temp")
)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Define a streaming aggregation
# MAGIC
# MAGIC Using CTAS syntax, define a new streaming view called `customer_count_by_state_temp` that counts the number of customers per `state`, in a field called `customer_count`.
# COMMAND ----------
# MAGIC %sql
# MAGIC -- ANSWER
# MAGIC CREATE OR REPLACE TEMPORARY VIEW customer_count_by_state_temp AS
# MAGIC SELECT
# MAGIC state,
# MAGIC count(state) AS customer_count
# MAGIC FROM customers_raw_temp
# MAGIC GROUP BY
# MAGIC state
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Write aggregated data to a Delta table
# MAGIC
# MAGIC Stream data from the `customer_count_by_state_temp` view to a Delta table called `customer_count_by_state`.
# COMMAND ----------
# ANSWER
customersCountCheckpointPath = userhome + "/customersCountCheckpoint"
(spark
.table("customer_count_by_state_temp")
.writeStream
.format("delta")
.option("checkpointLocation", customersCountCheckpointPath)
.outputMode("complete")
.table("customer_count_by_state"))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Query the results
# MAGIC
# MAGIC Query the `customer_count_by_state` table (this will not be a streaming query). Plot the results as a bar graph and also using the map plot.
# COMMAND ----------
# MAGIC %sql
# MAGIC -- ANSWER
# MAGIC SELECT * FROM customer_count_by_state
# COMMAND ----------
# MAGIC %md
# MAGIC ## Wrapping Up
# MAGIC
# MAGIC Run the following cell to remove the database and all data associated with this lab.
# COMMAND ----------
# MAGIC %run ../../Includes/classic-setup $mode="clean"
# COMMAND ----------
# MAGIC %md
# MAGIC By completing this lab, you should now feel comfortable:
# MAGIC * Using PySpark to configure Auto Loader for incremental data ingestion
# MAGIC * Using Spark SQL to aggregate streaming data
# MAGIC * Streaming data to a Delta table
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 29.223881 | 192 | 0.700914 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Propagating Incremental Updates with Structured Streaming and Delta Lake
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lab, you will be able to:
# MAGIC * Apply your knowledge of structured streaming and Auto Loader to implement a simple multi-hop architecture
# COMMAND ----------
# MAGIC %md
# MAGIC ## Setup
# MAGIC Run the following script to setup necessary variables and clear out past runs of this notebook. Note that re-executing this cell will allow you to start the lab over.
# COMMAND ----------
# MAGIC %run ../../Includes/classic-setup $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Ingest data
# MAGIC
# MAGIC This lab uses a collection of customer-related CSV data from DBFS found in */databricks-datasets/retail-org/customers/*.
# MAGIC
# MAGIC Read this data using Auto Loader using its schema inference (use `customersCheckpointPath` to store the schema info). Stream the raw data to a Delta table called `bronze`.
# COMMAND ----------
# ANSWER
customersCheckpointPath = userhome + "/customersCheckpoint"
(spark
.readStream
.format("cloudFiles")
.option("cloudFiles.format", "csv")
.option("cloudFiles.schemaLocation", customersCheckpointPath)
.load("/databricks-datasets/retail-org/customers/")
.writeStream
.format("delta")
.option("checkpointLocation", customersCheckpointPath)
.outputMode("append")
.table("bronze"))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Let's create a streaming temporary view into the bronze table, so that we can perform transforms using SQL.
# COMMAND ----------
(spark
.readStream
.table("bronze")
.createOrReplaceTempView("bronze_temp"))
# COMMAND ----------
# MAGIC %md
# MAGIC ## Clean and enhance data
# MAGIC
# MAGIC Using CTAS syntax, define a new streaming view called `bronze_enhanced_temp` that does the following:
# MAGIC * Skips records with a null `postcode` (set to zero)
# MAGIC * Inserts a column called `receipt_time` containing a current timestamp
# MAGIC * Inserts a column called `source_file` intaining the input filename
# COMMAND ----------
# MAGIC %sql
# MAGIC -- ANSWER
# MAGIC CREATE OR REPLACE TEMPORARY VIEW bronze_enhanced_temp AS
# MAGIC SELECT
# MAGIC *, current_timestamp() receipt_time, input_file_name() source_file
# MAGIC FROM bronze_temp
# MAGIC WHERE postcode > 0
# COMMAND ----------
# MAGIC %md
# MAGIC ## Silver table
# MAGIC
# MAGIC Stream the data from `bronze_enhanced_temp` to a table called `silver`.
# COMMAND ----------
# ANSWER
silverCheckpointPath = userhome + "/silverCheckpoint"
(spark.table("bronze_enhanced_temp")
.writeStream
.format("delta")
.option("checkpointLocation", silverCheckpointPath)
.outputMode("append")
.table("silver"))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Let's create a streaming temporary view into the silver table, so that we can perform business-level using SQL.
# COMMAND ----------
(spark
.readStream
.table("silver")
.createOrReplaceTempView("silver_temp"))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Gold tables
# MAGIC
# MAGIC Using CTAS syntax, define a new streaming view called `customer_count_temp` that counts customers per state.
# COMMAND ----------
# MAGIC %sql
# MAGIC -- ANSWER
# MAGIC CREATE OR REPLACE TEMPORARY VIEW customer_count_by_state_temp AS
# MAGIC SELECT state, count(state) AS customer_count
# MAGIC FROM silver_temp
# MAGIC GROUP BY
# MAGIC state
# COMMAND ----------
# MAGIC %md
# MAGIC Finally, stream the data from the `customer_count_by_state_temp` view to a Delta table called `gold_customer_count_by_state`.
# COMMAND ----------
# ANSWER
customersCountCheckpointPath = userhome + "/customersCountCheckpoint"
(spark
.table("customer_count_by_state_temp")
.writeStream
.format("delta")
.option("checkpointLocation", customersCountCheckpointPath)
.outputMode("complete")
.table("gold_customer_count_by_state"))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Query the results
# MAGIC
# MAGIC Query the `gold_customer_count_by_state` table (this will not be a streaming query). Plot the results as a bar graph and also using the map plot.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM gold_customer_count_by_state
# COMMAND ----------
# MAGIC %md
# MAGIC ## Wrapping Up
# MAGIC
# MAGIC Run the following cell to remove the database and all data associated with this lab.
# COMMAND ----------
# MAGIC %run ../../Includes/classic-setup $mode="clean"
# COMMAND ----------
# MAGIC %md
# MAGIC By completing this lab, you should now feel comfortable:
# MAGIC * Using PySpark to configure Auto Loader for incremental data ingestion
# MAGIC * Using Spark SQL to aggregate streaming data
# MAGIC * Streaming data to a Delta table
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 27.747423 | 192 | 0.698888 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Lab: Migrating SQL Notebooks to Delta Live Tables
# MAGIC
# MAGIC This notebook dictates an overall structure for the lab exercise, configures the environment for the lab, provides simulated data streaming, and performs cleanup once you are done. A notebook like this is not typically needed in a production pipeline scenario.
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Convert existing data pipelines to Delta Live Tables
# COMMAND ----------
# MAGIC %md
# MAGIC ## Datasets Used
# MAGIC
# MAGIC This demo uses simplified artificially generated medical data. The schema of our two datasets is represented below. Note that we will be manipulating these schema during various steps.
# MAGIC
# MAGIC #### Recordings
# MAGIC The main dataset uses heart rate recordings from medical devices delivered in the JSON format.
# MAGIC
# MAGIC | Field | Type |
# MAGIC | --- | --- |
# MAGIC | device_id | int |
# MAGIC | mrn | long |
# MAGIC | time | double |
# MAGIC | heartrate | double |
# MAGIC
# MAGIC #### PII
# MAGIC These data will later be joined with a static table of patient information stored in an external system to identify patients by name.
# MAGIC
# MAGIC | Field | Type |
# MAGIC | --- | --- |
# MAGIC | mrn | long |
# MAGIC | name | string |
# COMMAND ----------
# MAGIC %md
# MAGIC ## Getting Started
# MAGIC
# MAGIC Begin by running the following cell to configure the lab environment.
# COMMAND ----------
# MAGIC %run "../../Includes/dlt-setup" $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Land Initial Data
# MAGIC Seed the landing zone with some data before proceeding. You will re-run this command to land additional data later.
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create and Configure a Pipeline
# MAGIC
# MAGIC 1. Click the **Jobs** button on the sidebar, then select the **Delta Live Tables** tab.
# MAGIC 1. Click **Create Pipeline**.
# MAGIC 1. Fill in a **Pipeline Name** of your choosing.
# MAGIC 1. For **Notebook Libraries**, use the navigator to locate and select the notebook `3.3.4 - LAB - Migrating a SQL Pipeline to DLT`.
# MAGIC 1. Run the cell below to generate values for **source**, **Target** and **Storage Location**. (All of these will include your current username).
# MAGIC * Click `Add configuration`; enter the word `source` in the **Key** field and the output printed next to `source` below in the value field.
# MAGIC * Enter the database name printed next to `Target` below in the **Target** field.
# MAGIC * Enter the location printed next to `Storage Location` below in the **Storage Location** field.
# MAGIC 1. Set **Pipeline Mode** to **Triggered**.
# MAGIC 1. Disable autoscaling.
# MAGIC 1. Set the number of wokers to 1.
# MAGIC 1. Click **Create**.
# COMMAND ----------
storage_location = userhome + "/output"
print(f"source : {dataLandingLocation.split(':')[1]}")
print(f"Target: {database}")
print(f"Storage Location: {storage_location.split(':')[1]}")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Run your Pipeline
# MAGIC
# MAGIC Select **Development** mode, which accelerates the development lifecycle by reusing the same cluster across runs. It will also turn off automatic retries when jobs fail.
# MAGIC
# MAGIC Click **Start** to begin the first update to your table.
# MAGIC
# MAGIC Delta Live Tables will automatically deploy all the necessary infrastructure and resolve the dependencies between all datasets.
# MAGIC
# MAGIC **NOTE**: The first table update make take several minutes as relationships are resolved and infrastructure deploys.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Open and Complete DLT Pipeline Notebook
# MAGIC
# MAGIC You will perform your work in this <a href="$./3.3.4 - LAB - Migrating a SQL Pipeline to DLT" target="_blank">companion Notebook</a>, which you will ultimately deploy as a pipeline.
# MAGIC
# MAGIC Open the Notebook and, following the guidelines provided therein, fill in the cells where prompted to implement a multi-hop architecture similar to the one we worked with in the previous section.
# MAGIC
# MAGIC **NOTE**: As a first step to preparing your pipeline, run the following cell to obtain the cloud file location. Substitue this value for the text that reads `<CLOUD_FILES LOCATION>`. This value will be unique within the workspace to your user identity to prevent possible interference between users within the same workspace.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Troubleshooting Code in Development Mode
# MAGIC
# MAGIC Don't despair if your pipeline fails the first time. Delta Live Tables is in active development, and error messages are improving all the time.
# MAGIC
# MAGIC Because relationships between tables are mapped as a DAG, error messages will often indicate that a dataset isn't found.
# MAGIC
# MAGIC Let's consider our DAG below:
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/dlt_dag.png" width="400">
# MAGIC
# MAGIC If the error message `Dataset not found: 'recordings_parsed'` is raised, there may be several culprits:
# MAGIC 1. The logic defining `recordings_parsed` is invalid
# MAGIC 1. There is an error reading from `recordings_bronze`
# MAGIC 1. A typo exists in either `recordings_parsed` or `recordings_bronze`
# MAGIC
# MAGIC The safest way to identify the culprit is to iteratively add table/view definitions back into your DAG starting from your initial ingestion tables. You can simply comment out later table/view definitions and uncomment these between runs.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Display Results
# MAGIC
# MAGIC Assuming your pipeline runs successfully, display the contents of the gold table.
# MAGIC
# MAGIC **NOTE**: Because we specified a value for **Target**, tables are published to the specified database. Without a **Target** specification, we would need to query the table based on its underlying location in DBFS (relative to the **Storage Location**).
# COMMAND ----------
spark.sql(f"SELECT * FROM {database}daily_patient_avg")
# COMMAND ----------
# MAGIC %md
# MAGIC Trigger another file arrival with the following cell. Feel free to run it a couple more times if desired. Following this, run the pipeline again and view the results. Feel free to re-run the cell above to gain an updated view of the `daily_patient_avg` table.
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Wrapping Up
# MAGIC
# MAGIC Ensure that you delete your pipeline from the DLT UI, and run the following cell to clean up the files and tables that were created as part of the lab setup and execution.
# COMMAND ----------
# MAGIC %run "../../Includes/dlt-setup" $mode="clean"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Summary
# MAGIC
# MAGIC In this lab, you learned to convert an existing data pipeline to a Delta Live Tables SQL pipeline, and deployed that pipeline using the DLT UI.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Additional Topics & Resources
# MAGIC
# MAGIC * <a href="https://docs.databricks.com/data-engineering/delta-live-tables/index.html" target="_blank">Delta Live Tables Documentation</a>
# MAGIC * <a href="https://youtu.be/6Q8qPZ7c1O0" target="_blank">Delta Live Tables Demo</a>
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 42.010582 | 333 | 0.71469 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Orchestrating Jobs with Databricks
# MAGIC
# MAGIC New updates to the Databricks Jobs UI have added the ability to schedule multiple tasks as part of a job, allowing Databricks Jobs to fully handle orchestration for most production workloads.
# MAGIC
# MAGIC Here, we'll start by reviewing the steps for scheduling a notebook as a triggered standalone job, and then add a dependent job using a DLT pipeline.
# MAGIC
# MAGIC
# MAGIC By the end of this lesson, you should feel confident:
# MAGIC * Scheduling a notebook as a Databricks Job
# MAGIC * Describing job scheduling options and differences between cluster types
# MAGIC * Review Job Runs to track progress and see results
# MAGIC * Scheduling a DLT pipeline as a Databricks Job
# MAGIC * Configuring linear dependencies between tasks using the Databricks Jobs UI
# MAGIC
# MAGIC ## Schedule a Notebook Job
# MAGIC
# MAGIC When using the Jobs UI to orchestrate a workload with multiple tasks, you'll always begin by scheduling a single task.
# MAGIC
# MAGIC Here, we'll start by scheduling the notebook `1 - Reset`.
# MAGIC
# MAGIC Steps:
# MAGIC 1. Navigate to the Jobs UI using the Databricks left side navigation bar.
# MAGIC 1. Click the blue `Create Job` button
# MAGIC 1. Configure the task:
# MAGIC 1. Enter `reset` for the task name
# MAGIC 1. Select the notebook `1 - Reset` using the notebook picker
# MAGIC 1. Select an Existing All Purpose Cluster from the **Cluster** dropdown
# MAGIC 1. Click **Create**
# MAGIC
# MAGIC **Note**: When selecting your all purpose cluster, you will get a warning about how this will be billed as all purpose compute. Production jobs should always be scheduled against new job clusters appropriately sized for the workload, as this is billed at a much lower rate.
# MAGIC
# MAGIC Click the blue **Run now** button in the top right to start the job.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Review Run
# MAGIC
# MAGIC As currently scheduled, our single notebook provides identical performance to the legacy Databricks Jobs UI, which only allowed a single notebook to be scheduled.
# MAGIC
# MAGIC From the **Runs** tab, clicking on the start time field will display a preview of the notebook with results. If the job is still running, this will be under **Active Runs**, and the displayed notebook will ocassionaly update to show progress throughout execution. If it has already completed, it will be under **Completed Runs** and just display the static results of the executed notebook.
# MAGIC
# MAGIC The notebook scheduled using the magic command `%run` to call an additional notebook using a relative path. Note that while not covered in this course, [new functionality added to Databricks Repos allows loading Python modules using relative paths](https://docs.databricks.com/repos.html#work-with-non-notebook-files-in-a-databricks-repo).
# MAGIC
# MAGIC The actual outcome of the scheduled notebook is to reset the output of the DLT pipeline configured earlier in the course, as well as to print out the necessary variables used to configure this pipeline for users that may not have coded along previously.
# MAGIC
# MAGIC Before continuing to the next step, make sure you either have access to a
# COMMAND ----------
# MAGIC %md
# MAGIC ## Chron Scheduling of Databricks Jobs
# MAGIC
# MAGIC Note that on the right hand side of the Jobs UI, directly under the **Job Details** section is a section labeled **Schedule**.
# MAGIC
# MAGIC Click on the **Edit schedule** button to explore scheduling options.
# MAGIC
# MAGIC Changing the **Schedule type** field from **Manual** to **Scheduled** will bring up a chron scheduling UI.
# MAGIC
# MAGIC This UI provides extensive options for setting up chronological scheduling of your Jobs. Settings configured with the UI can also be output in cron syntax, which can be editted if custom configuration not available with the UI is needed.
# MAGIC
# MAGIC At this time, we'll leave our job set with **Manual** scheduling.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Schedule a DLT Pipeline as a Task
# MAGIC
# MAGIC In this step, we'll add a DLT pipeline to execute after the success of the task we configured in the previous step.
# MAGIC
# MAGIC **NOTE**: This step assumes that the DLT pipeline describe in the lab for module 3 of this course was configured successfully. If this is not the case, instructions are included for configuring this DLT pipeline in the run output of the `reset` notebook executed above.
# MAGIC
# MAGIC Steps:
# MAGIC 1. Navigate to the Jobs UI using the Databricks left side navigation bar
# MAGIC 1. Select the job you defined above by clicking on the name
# MAGIC 1. At the top left of your screen, you'll see the **Runs** tab is currently selected; click the **Tasks** tab.
# MAGIC 1. Click the large blue circle with a **+** at the center bottom of the screen to add a new task
# MAGIC 1. Specify the **Task name** as `dlt`
# MAGIC 1. From **Type**, select `Pipeline`
# MAGIC 1. Click the **Pipeline** field and select the DLT pipeline you configured previously
# MAGIC 1. Note that the **Depends on** field defaults to your previously defined task
# MAGIC 1. Click the blue **Create task** button
# MAGIC
# MAGIC You should now see a screen with 2 boxes and a downward arrow between them. Your `reset` task will be at the top, leading into your `dlt` task. This visualization represents the dependencies between these tasks.
# MAGIC
# MAGIC Click **Run now** to execute your job.
# MAGIC
# MAGIC **NOTE**: You may need to wait a few minutes as infrastructure for your DLT pipeline is deployed.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Review Multi-Task Run Results
# MAGIC
# MAGIC Clicking into the job run will replicate the UI showing both tasks. The visualizations for tasks will update in real time to reflect which tasks are actively running, and will change colors if task failure occur. Clicking on a task box will render the scheduled notebook in the UI. (You can think of this as just an additional layer of orchestration on top of the previous Databricks Jobs UI, if that helps; note that if you have workloads scheduling jobs with the CLI or REST API, [the JSON structure used to configure and get results about jobs has seen similar updates to the UI](https://docs.databricks.com/dev-tools/api/latest/jobs.html)).
# MAGIC
# MAGIC **NOTE**: At this time, DLT pipelines scheduled as tasks do not directly render results in the Runs GUI; instead, you will be directed back to the DLT Pipeline GUI for the scheduled Pipeline.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 62.801724 | 652 | 0.746757 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %run ../../Includes/dlt-setup $course="dlt_demo" $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC If you have not previously configured this DLT pipeline successfully, the following cell prints out two values that will be used during the configuration steps that follow.
# COMMAND ----------
print(f"Target: {database}")
print(f"Storage location: {userhome.split(':')[1]}")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create and configure a pipeline
# MAGIC
# MAGIC The instructions below refer to the same pipeline created during the previous codealong for DLT; if you successfully configured this notebook previously, you should not need to reconfigure this pipeline now.
# MAGIC
# MAGIC
# MAGIC Steps:
# MAGIC 1. Click the **Jobs** button on the sidebar,
# MAGIC 1. Select the **Delta Live Tables** tab.
# MAGIC 1. Click **Create Pipeline**.
# MAGIC 1. Fill in a **Pipeline Name** of your choosing.
# MAGIC 1. For **Notebook Libraries**, use the navigator to locate and select the companion notebook called **2 - DLT Job**.
# MAGIC 1. In the **Target** field, specify the database name printed out next to **Target** in the cell above. (This should follow the pattern `dbacademy_<username>_dlt_demo`)
# MAGIC 1. In the **Storage location** field, copy the directory as printed above.
# MAGIC 1. For **Pipeline Mode**, select **Triggered**
# MAGIC 1. Uncheck the **Enable autoscaling** box, and set the number of workers to 1.,
# MAGIC 1. Click **Create**.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 45.204082 | 215 | 0.70084 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Orchestrating Jobs with Databricks
# MAGIC
# MAGIC In this lab, you'll be configuring a multi-task job comprising of:
# MAGIC * A notebook that lands a new batch of data in a storage directory
# MAGIC * A Delta Live Table pipeline that processes this data through a series of tables
# MAGIC * A notebook that queries the gold table produced by this pipeline as well as various metrics output by DLT
# MAGIC
# MAGIC By the end of this lab, you should feel confident:
# MAGIC * Scheduling a notebook as a Databricks Job
# MAGIC * Scheduling a DLT pipeline as a Databricks Job
# MAGIC * Configuring linear dependencies between tasks using the Databricks Jobs UI
# MAGIC
# MAGIC ## Schedule a Notebook Job
# MAGIC
# MAGIC When using the Jobs UI to orchestrate a workload with multiple tasks, you'll always begin by scheduling a single task.
# MAGIC
# MAGIC Here, we'll start by scheduling the notebook `1 - Batch Job`.
# MAGIC
# MAGIC Steps:
# MAGIC 1. Navigate to the Jobs UI using the Databricks left side navigation bar.
# MAGIC 1. Click the blue `Create Job` button
# MAGIC 1. Configure the task:
# MAGIC 1. Enter `Batch-Job` for the task name
# MAGIC 1. Select the notebook `1 - Batch Job` using the notebook picker
# MAGIC 1. Select an Existing All Purpose Cluster from the **Cluster** dropdown
# MAGIC 1. Click **Create**
# MAGIC
# MAGIC **Note**: When selecting your all purpose cluster, you will get a warning about how this will be billed as all purpose compute. Production jobs should always be scheduled against new job clusters appropriately sized for the workload, as this is billed at a much lower rate.
# MAGIC
# MAGIC Click the blue **Run now** button in the top right to confirm that you have successfully configured this task. From the **Runs** tab, clicking on the start time field will pull up the notebook with results.
# MAGIC
# MAGIC ## Schedule a DLT Pipeline as a Task
# MAGIC
# MAGIC In this step, we'll add a DLT pipeline to execute after the success of the task we configured in the previous step.
# MAGIC
# MAGIC **NOTE**: This step assumes that the DLT pipeline describe in the lab for module 3 of this course was configured successfully. If this is not the case, instructions are included for configuring this DLT pipeline in the run output of the `Batch-Job` notebook executed above.
# MAGIC
# MAGIC Steps:
# MAGIC 1. Navigate to the Jobs UI using the Databricks left side navigation bar
# MAGIC 1. Select the job you defined above by clicking on the name (this should have the name `Batch-Job`)
# MAGIC 1. At the top left of your screen, you'll see the **Runs** tab is currently selected; click the **Tasks** tab.
# MAGIC 1. Click the large blue circle with a **+** at the center bottom of the screen to add a new task
# MAGIC 1. Specify the **Task name** as `DLT-Pipeline`
# MAGIC 1. From **Type**, select `Pipeline`
# MAGIC 1. Click the **Pipeline** field and select the DLT pipeline you configured previously
# MAGIC 1. Note that the **Depends on** field defaults to your previously defined task
# MAGIC 1. Click the blue **Create task** button
# MAGIC
# MAGIC You should now see a screen with 2 boxes and a downward arrow between them. Your `Batch-Job` task will be at the top, leading into your `DLT-Pipeline` task. This visualization represents the dependencies between these tasks.
# MAGIC
# MAGIC Before clicking **Run now**, click the job name in the top left and provide something unique and descriptive, like `<your_initials>-MTJ-lab`
# MAGIC
# MAGIC **NOTE**: You may need to wait a few minutes as infrastructure for your DLT pipeline is deployed. Feel free to skip clicking **Run now** until the next task is configured if you don't want to wait.
# MAGIC
# MAGIC ## Schedule an Additional Notebook Task
# MAGIC
# MAGIC An additional notebook has been provided which queries some of the DLT metrics and the gold table defined in the DLT pipeline. We'll add this as a final task in our job.
# MAGIC
# MAGIC Steps:
# MAGIC 1. Navigate to the **Tasks** tab of the job you've been configuring
# MAGIC 1. Click the blue **+** button to add another task
# MAGIC 1. Specify the **Task name** as `Query-Results`
# MAGIC 1. Leave the **Type** set to `Notebook`
# MAGIC 1. Select the notebook `3 - Query Results Job` using the notebook picker
# MAGIC 1. Note that the **Depends on** field defaults to your previously defined task
# MAGIC 1. Select an Existing All Purpose Cluster from the **Cluster** dropdown
# MAGIC 1. Click the blue **Create task** button
# MAGIC
# MAGIC Click the blue **Run now** button in the top right of the screen to run this job.
# MAGIC
# MAGIC From the **Runs** tab, you will be able to click on the start time for this run under the **Active runs** section and visually track task progress.
# MAGIC
# MAGIC Once all your tasks have succeeded, review the contents of each task to confirm expected behavior.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 61.5 | 281 | 0.732127 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ../../Includes/dlt-setup
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create and Configure a Pipeline
# MAGIC
# MAGIC **NOTE**: This lab is configured to work with the DLT pipeline completed as part of the DLT lab in the previous module. If you have not successfully completed this lab, follow the instructions below to configure a pipeline using specified notebook.
# MAGIC
# MAGIC Instructions for configuring DLT pipeline:
# MAGIC 1. Click the **Jobs** button on the sidebar, then select the **Delta Live Tables** tab.
# MAGIC 1. Click **Create Pipeline**.
# MAGIC 1. Fill in a **Pipeline Name** of your choosing.
# MAGIC 1. For **Notebook Libraries**, use the navigator to locate and select the notebook `4.1.2 - DLT Job`.
# MAGIC 1. Run the cell below to generate values for **source**, **Target** and **Storage Location**. (All of these will include your current username).
# MAGIC * Click `Add configuration`; enter the word `source` in the **Key** field and the output printed next to `source` below in the value field.
# MAGIC * Enter the database name printed next to `Target` below in the **Target** field.
# MAGIC * Enter the location printed next to `Storage Location` below in the **Storage Location** field.
# MAGIC 1. Set **Pipeline Mode** to **Triggered**.
# MAGIC 1. Disable autoscaling.
# MAGIC 1. Set the number of wokers to 1.
# MAGIC 1. Click **Create**.
# COMMAND ----------
storage_location = userhome + "/output"
print(f"source : {dataLandingLocation.split(':')[1]}")
print(f"Target: {database}")
print(f"Storage Location: {storage_location.split(':')[1]}")
| 45.638889 | 256 | 0.70143 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ../../Includes/dlt-setup
# COMMAND ----------
# MAGIC %md
# MAGIC # Exploring the Results of a DLT Pipeline
# MAGIC
# MAGIC This Notebook explores the execution results of a DLT pipeline. Before proceeding, you will need one piece of information specific to your pipeline instance: the location in DBFS where results are stored. Because we did not specify a value for **Storage Location** when creating the pipeline, DLT automatically created a folder for us. Obtain this information as follows.
# MAGIC
# MAGIC Click **Settings** on the **Pipeline Details** page. This provides a JSON representation of the pipeline configuration. Copy the value specified for **storage** and substitute for `<storage>` throughout the rest of this Notebook.
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/icon_hint_24.png"/> Generally, and particularly in production systems, you will specify **Storage Location** in your pipeline configurations to have full control of where pipeline results are stored.
# COMMAND ----------
storage_location = userhome + "/output"
# COMMAND ----------
dbutils.fs.ls(storage_location)
# COMMAND ----------
# MAGIC %md
# MAGIC The `system` directory captures events associated with the pipeline.
# COMMAND ----------
dbutils.fs.ls(f"{storage_location}/system/events")
# COMMAND ----------
# MAGIC %md
# MAGIC These event logs are stored as a Delta table. Let's query the table.
# COMMAND ----------
display(spark.sql(f"SELECT * FROM delta.`{storage_location}/system/events`"))
# COMMAND ----------
# MAGIC %md
# MAGIC Let's view the contents of the *tables* directory.
# COMMAND ----------
dbutils.fs.ls(f"{storage_location}/tables")
# COMMAND ----------
# MAGIC %md
# MAGIC Let's query the gold table.
# COMMAND ----------
display(spark.sql(f"SELECT * FROM {database}.daily_patient_avg"))
# COMMAND ----------
database
| 29.68254 | 379 | 0.700311 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Navigating Databricks SQL and Attaching to Endpoints
# MAGIC
# MAGIC * Navigate to Databricks SQL
# MAGIC * Make sure that SQL is selected from the workspace option in the sidebar (directly below the Databricks logo)
# MAGIC * Make sure a SQL endpoint is on and accessible
# MAGIC * Navigate to SQL enpoints in the sidebar
# MAGIC * If a SQL endpoint exists and has the State `Running`, you'll use this endpoint
# MAGIC * If a SQL endpoint exists but is `Stopped`, click the `Start` button if you have this option (**NOTE**: Start the smallest endpoint you have available to you)
# MAGIC * If no endpoints exist and you have the option, click `Create SQL Endpoint`; name the endpoint something you'll recognize and set the cluster size to 2X-Small. Leave all other options as default.
# MAGIC * If you have no way to create or attach to a SQL endpoint, you'll need to contact a workspace administrator and request access to compute resources in Databricks SQL to continue.
# MAGIC * Navigate to home page in Databricks SQL
# MAGIC * Click the Databricks logo at the top of the side nav bar
# MAGIC * Locate the **Sample dashboards** and click `Visit gallery`
# MAGIC * Click `Import` next to the **Retail Revenue & Supply Chain** option
# MAGIC * Assuming you have a SQL endpoint available, this should load a dashboard and immediately display results
# MAGIC * Click **Refresh** in the top right (the underlying data has not changed, but this is the button that would be used to pick up changes)
# MAGIC
# MAGIC # Updating a DBSQL Dashboard
# MAGIC
# MAGIC * Use the sidebar navigator to find the **Dashboards**
# MAGIC * Locate the sample dashboard you just loaded; it should be called **Retail Revenue & Supply Chain** and have your username under the `Created By` field. **NOTE**: the **My Dashboards** option on the right hand side can serve as a shortcut to filter out other dashboards in the workspace
# MAGIC * Click on the dashboard name to view it
# MAGIC * View the query behind the **Shifts in Pricing Priorities** plot
# MAGIC * Hover over the plot; three vertical dots should appear. Click on these
# MAGIC * Select **View Query** from the menu that appears
# MAGIC * Review the SQL code used to populate this plot
# MAGIC * Note that 3 tier namespacing is used to identify the source table; this is a preview of new functionality to be supported by Unity Catalog
# MAGIC * Click `Run` in the top right of the screen to preview the results of the query
# MAGIC * Review the visualization
# MAGIC * Under the query, a tab named **Table** should be selected; click **Price by Priority over Time** to switch to a preview of your plot
# MAGIC * Click **Edit Visualization** at the bottom of the screen to review settings
# MAGIC * Explore how changing settings impacts your visualization
# MAGIC * If you wish to apply your changes, click **Save**; otherwise, click **Cancel**
# MAGIC * Back in the query editor, click the **Add Visualization** button to the right of the visualization name
# MAGIC * Create a bar graph
# MAGIC * Set the **X Column** as `Date`
# MAGIC * Set the **Y Column** as `Total Price`
# MAGIC * **Group by** `Priority`
# MAGIC * Set **Stacking** to `Stack`
# MAGIC * Leave all other settings as defaults
# MAGIC * Click **Save**
# MAGIC * Back in the query editor, click the default name for this visualization to edit it; change the visualization name to `Stacked Price`
# MAGIC * Add the bottom of the screen, click the three vertical dots to the left of the `Edit Visualization` button
# MAGIC * Select **Add to Dashboard** from the menu
# MAGIC * Select your `Retail Revenue & Supply Chain` dashboard
# MAGIC * Navigate back to your dashboard to view this change
# MAGIC
# MAGIC # Create a New Query
# MAGIC
# MAGIC * Use the sidebar to navigate to **Queries**
# MAGIC * Click the `Create Query` button
# MAGIC * In the **Schema Browser**, click on the current metastore and select `samples`
# MAGIC * Select the `tpch` database
# MAGIC * Click on the `partsupp` table to get a preview of the schema
# MAGIC * While hovering over the `partsupp` table name, click the `>>` button to insert the table name into your query text
# MAGIC * Write your first query:
# MAGIC * `SELECT * FROM` the `partsupp` table using the full name imported in the last step; click **Run** to preview results
# MAGIC * Modify this query to `GROUP BY ps_partkey` and return the `ps_partkey` and `sum(ps_availqty)`; click **Run** to preview results
# MAGIC * Update your query to alias the 2nd column to be named `total_availqty` and re-execute the query
# MAGIC * Save your query
# MAGIC * Click the **Save** button next to **Run** near the top right of the screen
# MAGIC * Give the query a name you'll remember
# MAGIC * Add the query to your dashboard
# MAGIC * Click the three vertical buttons at the bottom of the screen
# MAGIC * Click **Add to Dashboard**
# MAGIC * Select your `Retail Revenue & Supply Chain` dashboard
# MAGIC * Navigate back to your dashboard to view this change
# MAGIC * If you wish to change the organization of visualizations, click the three vertical buttons in the top right of the screen; click **Edit** in the menu that appears and you'll be able to drag and resize visualizations
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 69.321839 | 297 | 0.726827 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Managing Permissions for Databases, Tables, and Views
# MAGIC
# MAGIC The instructions as detailed below are provided for groups of users to explore how Table ACLs on Databricks work. It leverages Databricks SQL and the Data Explorer to accomplish these tasks, and assumes that at least one user in the group has administrator status (or that an admin has previously configured permissions to allow proper permissions for users to create databases, tables, and views).
# MAGIC
# MAGIC As written, these instructions are for the admin user to complete. The following notebook will have a similar exercise for users to complete in pairs.
# MAGIC
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Describe the default permissions for users and admins in DBSQL
# MAGIC * Identify the default owner for databases, tables, and views created in DBSQL and change ownership
# MAGIC * Use Data Explorer to navigate relational entities
# MAGIC * Configure permissions for tables and views with Data Explorer
# MAGIC * Configure minimal permissions to allow for table discovery and querying
# COMMAND ----------
# MAGIC %md
# MAGIC ## Generate Setup Statements
# MAGIC
# MAGIC The following cell uses Python to extract username of the present user and format this into several statements used to create databases, tables, and views.
# MAGIC
# MAGIC Only the admin needs to execute the following cell. Successful execution will print out a series of formatted SQL queries, which can be copied into the DBSQL query editor and executed.
# COMMAND ----------
def generate_query(course, mode="reset"):
import re
username = spark.sql("SELECT current_user()").first()[0]
userhome = f"dbfs:/user/{username}/{course}"
database = f"""dbacademy_{re.sub("[^a-zA-Z0-9]", "_", username)}_{course}"""
if mode == "reset":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
print(f"""
CREATE DATABASE IF NOT EXISTS {database}
LOCATION '{userhome}';
USE {database};
CREATE TABLE users
(id INT, name STRING, value DOUBLE, state STRING);
INSERT INTO users
VALUES (1, "Yve", 1.0, "CA"),
(2, "Omar", 2.5, "NY"),
(3, "Elia", 3.3, "OH"),
(4, "Rebecca", 4.7, "TX"),
(5, "Ameena", 5.3, "CA"),
(6, "Ling", 6.6, "NY"),
(7, "Pedro", 7.1, "KY");
CREATE VIEW ny_users_vw
AS SELECT * FROM users WHERE state = 'NY';
""")
generate_query("acls_demo")
# COMMAND ----------
# MAGIC %md
# MAGIC Steps:
# MAGIC * Run the cell above
# MAGIC * Copy the entire output to your clipboard
# MAGIC * Navigate to the Databricks SQL workspace
# MAGIC * Make sure that a DBSQL endpoint is running
# MAGIC * Use the left sidebar to select the **SQL Editor**
# MAGIC * Paste the query above and click the blue **Run** in the top right
# MAGIC
# MAGIC **NOTE**: You will need to be connected to a DBSQL endpoint to execute these queries successfully. If you cannot connect to a DBSQL endpoint, you will need to contact your administrator to give you access.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Using Data Explorer
# MAGIC
# MAGIC * Use the left sidebar navigator to select the **Data** tab; this places you in the **Data Explorer**
# MAGIC
# MAGIC ## What is the Data Explorer?
# MAGIC
# MAGIC The data explorer allows users and admins to:
# MAGIC * Navigate databases, tables, and views
# MAGIC * Explore data schema, metadata, and history
# MAGIC * Set and modify permissions of relational entities
# MAGIC
# MAGIC Note that at the moment these instructions are being written, Unity Catalog is not yet generally available. The 3 tier namespacing functionality it adds can be previewed to an extent by switching between the default `hive_metastore` and the `sample` catalog used for example dashboards and queries. Expect the Data Explorer UI and functionality to evolve as Unity Catalog is added to workspaces.
# MAGIC
# MAGIC ## Configuring Permissions
# MAGIC
# MAGIC By default, admins will have the ability to view all objects registered to the metastore and will be able to control permissions for other users in the workspace. Users will default to having **no** permissions on anything registered to the metastore, other than objects that they create in DBSQL; note that before users can create any databases, tables, or views, they must have create and usage privileges specifically granted to them.
# MAGIC
# MAGIC Generally, permissions will be set using Groups that have been configured by an administrator, often by importing organizational structures from SCIM integration with a different identity provider. This lesson will explore Access Control Lists (ACLs) used to control permissions, but will use individuals rather than groups.
# MAGIC
# MAGIC ## Table ACLs
# MAGIC
# MAGIC Databricks allows you to configure permissions for the following objects:
# MAGIC
# MAGIC | Object | Scope |
# MAGIC | --- | --- |
# MAGIC | CATALOG | controls access to the entire data catalog. |
# MAGIC | DATABASE | controls access to a database. |
# MAGIC | TABLE | controls access to a managed or external table. |
# MAGIC | VIEW | controls access to SQL views. |
# MAGIC | FUNCTION | controls access to a named function. |
# MAGIC | ANY FILE | controls access to the underlying filesystem. Users granted access to ANY FILE can bypass the restrictions put on the catalog, databases, tables, and views by reading from the file system directly. |
# MAGIC
# MAGIC **NOTE**: At present, the `ANY FILE` object cannot be set from Data Explorer.
# MAGIC
# MAGIC ## Granting Privileges
# MAGIC
# MAGIC Databricks admins and object owners can grant privileges according to the following rules:
# MAGIC
# MAGIC | Role | Can grant access privileges for |
# MAGIC | --- | --- |
# MAGIC | Databricks administrator | All objects in the catalog and the underlying filesystem. |
# MAGIC | Catalog owner | All objects in the catalog. |
# MAGIC | Database owner | All objects in the database. |
# MAGIC | Table owner | Only the table (similar options for views and functions). |
# MAGIC
# MAGIC **NOTE**: At present, Data Explorer can only be used to modify ownership of databases, tables, and views. Catalog permissions can be set interactively with the SQL Query Editor.
# MAGIC
# MAGIC ## Privileges
# MAGIC
# MAGIC The following privileges can be configured in Data Explorer:
# MAGIC
# MAGIC | Privilege | Ability |
# MAGIC | --- | --- |
# MAGIC | ALL PRIVILEGES | gives all privileges (is translated into all the below privileges). |
# MAGIC | SELECT | gives read access to an object. |
# MAGIC | MODIFY | gives ability to add, delete, and modify data to or from an object. |
# MAGIC | READ_METADATA | gives ability to view an object and its metadata. |
# MAGIC | USAGE | does not give any abilities, but is an additional requirement to perform any action on a database object. |
# MAGIC | CREATE | gives ability to create an object (for example, a table in a database). |
# COMMAND ----------
# MAGIC %md
# MAGIC ## Review the Default Permissions
# MAGIC In the Data Explorer, find the database you created earlier (this should follow the pattern `dbacademy_<username>_acls_demo`).
# MAGIC
# MAGIC Clicking on the database name should display a list of the contained tables and views on the left hand side. On the right, you'll see some details about the database, including the **Owner** and **Location**.
# MAGIC
# MAGIC Click the **Permissions** tab to review who presently has permissions (depending on your workspace configuration, some permissions may have been inherited from settings on the catalog).
# COMMAND ----------
# MAGIC %md
# MAGIC ## Assigning Ownership
# MAGIC
# MAGIC Click the blue pencil next to the **Owner** field. Note that an owner can be set as an individual OR a group. For most implementations, having one or several small groups of trusted power users as owners will limit admin access to important datasets while ensuring that a single user does not create a choke point in productivity.
# MAGIC
# MAGIC Here, we'll set the owner to **Admins**, which is a default group containing all workspace administrators.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Change Database Permissions
# MAGIC
# MAGIC Begin by allowing all users to review metadata about the database.
# MAGIC
# MAGIC Step:
# MAGIC 1. Make sure you have the **Permissions** tab selected for the database
# MAGIC 1. Click the blue **Grant** button
# MAGIC 1. Select the **USAGE** and **READ_METADATA** options
# MAGIC 1. Select the **All Users** group from the drop down menu at the top
# MAGIC 1. Click **OK**
# MAGIC
# MAGIC Note that users may need to refresh their view to see these permissions updated. Updates should be reflected for users in near real time for both the Data Explorer and the SQL Editor.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Change View Permissions
# MAGIC
# MAGIC While users can now see information about this database, they won't be able to interact with the table of view declared above.
# MAGIC
# MAGIC Let's start by giving users the ability to query our view.
# MAGIC
# MAGIC Step:
# MAGIC 1. Select the `ny_users_vw`
# MAGIC 1. Select the **Permissions** tab
# MAGIC * Users should have inherited the permissions granted at the database level; you'll be able to see which permissions users currently have on an asset, as well as where that permission is inherited from
# MAGIC 1. Click the blue **Grant** button
# MAGIC 1. Select the **SELECT** and **READ_METADATA** options
# MAGIC * **READ_METADATA** is technically redundant, as users have already inherited this from the database. However, granting it at the view level allows us to ensure users still have this permission even if the database permissions are revoked
# MAGIC 1. Select the **All Users** group from the drop down menu at the top
# MAGIC 1. Click **OK**
# COMMAND ----------
# MAGIC %md
# MAGIC ## Run a Query to Confirm
# MAGIC
# MAGIC In the **SQL Editor**, all users should use the **Schema Browser** on the lefthand side to navigate to the database being controlled by the admin.
# MAGIC
# MAGIC Users should start a query by typing `SELECT * FROM ` and then click the **>>** that appears while hovering over the view name to insert it into their query.
# MAGIC
# MAGIC This query should return 2 results.
# MAGIC
# MAGIC **NOTE**: This view is defined against the `users` table, which has not had any permissions set yet. Note that users have access only to that portion of the data that passes through the filters defined on the view; this pattern demonstrates how a single underlying table can be used to drive controlled access to data for relevant stakeholders.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Change Table Permissions
# MAGIC
# MAGIC Perform the same steps as above, but now for the `users` table.
# MAGIC
# MAGIC Step:
# MAGIC 1. Select the `users` table
# MAGIC 1. Select the **Permissions** tab
# MAGIC 1. Click the blue **Grant** button
# MAGIC 1. Select the **SELECT** and **READ_METADATA** options
# MAGIC 1. Select the **All Users** group from the drop down menu at the top
# MAGIC 1. Click **OK**
# COMMAND ----------
# MAGIC %md
# MAGIC ## Have Users Attempt to `DROP TABLE`
# MAGIC
# MAGIC In the **SQL Editor**, encourage users to explore the data in this table.
# MAGIC
# MAGIC Encourage users to try to modify the data here; assuming permissions were set correctly, these commands should error out.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a Database for Derivative Datasets
# MAGIC
# MAGIC In most cases users will need a location to save out derivative datasets. At present, users may not have the ability to create new tables in any location (depending on existing ACLs in the workspace and databases created during previous lessons students have completed).
# MAGIC
# MAGIC The cell below prints out the code to generate a new database and grant permissions to all users.
# MAGIC
# MAGIC **NOTE**: Here we set permissions using the SQL Editor rather than the Data Explorer. You can review the Query History to note that all of our previous permission changes from Data Explorer were executed as SQL queries and logged here (additionally, most actions in the Data Explorer are logged with the corresponding SQL query used to populate the UI fields).
# COMMAND ----------
import re
username = spark.sql("SELECT current_user()").first()[0]
database = f"""dbacademy_{re.sub("[^a-zA-Z0-9]", "_", username)}_derivative"""
print(f"""
CREATE DATABASE {database};
GRANT USAGE, READ_METADATA, CREATE, MODIFY, SELECT ON DATABASE `{database}` TO `users`;
SHOW GRANT ON DATABASE `{database}`
""")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Have Users Create New Tables or Views
# MAGIC
# MAGIC Give users a moment to test that they can create tables and views in this new database.
# MAGIC
# MAGIC **NOTE**: because users were also granted **MODIFY** and **SELECT** permissions, all users will immediately be able to query and modify entities created by their peers.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Admin Configuration
# MAGIC
# MAGIC At present, users do not have any Table ACL permissions granted on the default catalog `hive_metastore` by default. The next lab assumes that users will be able to create databases.
# MAGIC
# MAGIC To enable the ability to create databases and tables in the default catalog using Databricks SQL, have a workspace admin run the following command in the DBSQL query editor:
# MAGIC
# MAGIC ```
# MAGIC GRANT usage, create ON CATALOG `hive_metastore` TO `users`
# MAGIC ```
# MAGIC
# MAGIC To confirm this has run successfully, execute the following query:
# MAGIC
# MAGIC ```
# MAGIC SHOW GRANT ON CATALOG `hive_metastore`
# MAGIC ```
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 48.063973 | 445 | 0.729874 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Configuring Privileges for Production Data and Derived Tables
# MAGIC
# MAGIC The instructions as detailed below are provided for pairs of users to explore how Table ACLs on Databricks work. It leverages Databricks SQL and the Data Explorer to accomplish these tasks, and assumes that neither user has admin privileges for the workspace. An admin will need to have previously granted `CREATE` and `USAGE` privileges on a catalog for users to be able to create databases in Databricksd SQL
# MAGIC
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Use Data Explorer to navigate relational entities
# MAGIC * Configure permissions for tables and views with Data Explorer
# MAGIC * Configure minimal permissions to allow for table discovery and querying
# MAGIC * Change ownership for databases, tables, and views created in DBSQL
# COMMAND ----------
# MAGIC %md
# MAGIC ## Exchange User Names with your Partner
# MAGIC If you are not in a workspace where you usernames correspond with your email address, make sure your partner has your username. They will need this when assigning privileges and searching for your database at later steps.
# MAGIC
# MAGIC The following query will print your username.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT current_user()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Generate Setup Statements
# MAGIC
# MAGIC The following cell uses Python to extract username of the present user and format this into several statements used to create databases, tables, and views.
# MAGIC
# MAGIC Both students should execute the following cell. Successful execution will print out a series of formatted SQL queries, which can be copied into the DBSQL query editor and executed.
# COMMAND ----------
def generate_query(course, mode="reset"):
import re
import random
username = spark.sql("SELECT current_user()").first()[0]
userhome = f"dbfs:/user/{username}/{course}"
database = f"""dbacademy_{re.sub("[^a-zA-Z0-9]", "_", username)}_{course}"""
if mode == "reset":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
print(f"""
CREATE DATABASE IF NOT EXISTS {database}
LOCATION '{userhome}';
USE {database};
CREATE TABLE beans
(name STRING, color STRING, grams FLOAT, delicious BOOLEAN);
INSERT INTO beans
VALUES ('black', 'black', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('lentils', 'brown', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('jelly', 'rainbow', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('pinto', 'brown', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('green', 'green', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('beanbag chair', 'white', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('lentils', 'green', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('kidney', 'red', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('castor', 'brown', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])});
CREATE VIEW tasty_beans
AS SELECT * FROM beans WHERE delicious = true;
""")
generate_query("acls_lab")
# COMMAND ----------
# MAGIC %md
# MAGIC Steps:
# MAGIC * Run the cell above
# MAGIC * Copy the entire output to your clipboard
# MAGIC * Navigate to the Databricks SQL workspace
# MAGIC * Make sure that a DBSQL endpoint is running
# MAGIC * Use the left sidebar to select the **SQL Editor**
# MAGIC * Paste the query above and click the blue **Run** in the top right
# MAGIC
# MAGIC **NOTE**: You will need to be connected to a DBSQL endpoint to execute these queries successfully. If you cannot connect to a DBSQL endpoint, you will need to contact your administrator to give you access.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Find Your Database
# MAGIC In the Data Explorer, find the database you created earlier (this should follow the pattern `dbacademy_<username>_acls_lab`).
# MAGIC
# MAGIC Clicking on the database name should display a list of the contained tables and views on the left hand side. On the right, you'll see some details about the database, including the **Owner** and **Location**.
# MAGIC
# MAGIC Click the **Permissions** tab to review who presently has permissions (depending on your workspace configuration, some permissions may have been inherited from settings on the catalog).
# COMMAND ----------
# MAGIC %md
# MAGIC ## Change Database Permissions
# MAGIC
# MAGIC Step:
# MAGIC 1. Make sure you have the **Permissions** tab selected for the database
# MAGIC 1. Click the blue **Grant** button
# MAGIC 1. Select the **USAGE**, **SELECT**, and **READ_METADATA** options
# MAGIC 1. Enter the username of your partner in the field at the top.
# MAGIC 1. Click **OK**
# MAGIC
# MAGIC Confirm with your partner that you can each see each others' databases and tables.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Run a Query to Confirm
# MAGIC
# MAGIC By granting `USAGE`, `SELECT`, and `READ_METADATA` on your database, your partner should now be able to freely query the tables and views in this database, but will not be able to create new tables OR modify your data.
# MAGIC
# MAGIC In the SQL Editor, each user should run a series of queries to confirm this behavior in the database they were just added to.
# MAGIC
# MAGIC **Make sure you specify your partner's database while running the queries below.**
# MAGIC
# MAGIC Queries to execute:
# MAGIC * `SELECT * FROM <database>.beans`
# MAGIC * `SELECT * FROM <database>.tasty_beans`
# MAGIC * `SELECT * FROM <database>.beans MINUS SELECT * FROM <database>.tasty_beans`
# MAGIC * ```
# MAGIC UPDATE <database>.beans
# MAGIC SET color = 'pink'
# MAGIC WHERE name = 'black'
# MAGIC ```
# MAGIC
# MAGIC **NOTE**: These first 3 queries should succeed, but the last should fail.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Execute a Query to Generate the Union of Your Beans
# MAGIC
# MAGIC Modify the query below to specify the `beans` tables in each of your databases.
# MAGIC
# MAGIC ```
# MAGIC SELECT * FROM <database>.beans
# MAGIC UNION ALL TABLE <database>.beans
# MAGIC ```
# MAGIC
# MAGIC **NOTE**: Because random values were inserted for the `grams` and `delicious` columns, you should see 2 distinct rows for each `name`, `color` pair.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Register a Derivative View to Your Database
# MAGIC
# MAGIC Modify the query below to register the results of the previous query to your database.
# MAGIC
# MAGIC ```
# MAGIC CREATE VIEW <database>.our_beans AS
# MAGIC SELECT * FROM <database>.beans
# MAGIC UNION ALL TABLE <database>.beans
# MAGIC ```
# COMMAND ----------
# MAGIC %md
# MAGIC ## Query Your Partner's View
# MAGIC
# MAGIC Once your partner has successfully completed the previous step, run the following query against each of your tables; you should get the same results:
# MAGIC
# MAGIC ```
# MAGIC SELECT name, color, delicious, sum(grams)
# MAGIC FROM our_beans
# MAGIC GROUP BY name, color, delicious
# MAGIC ```
# COMMAND ----------
# MAGIC %md
# MAGIC ## Add Modify Permissions
# MAGIC
# MAGIC Now try to drop each other's `beans` tables. At the moment, this shouldn't work.
# MAGIC
# MAGIC Using the Data Explorer, add the `MODIFY` permission for your `beans` table for your partner.
# MAGIC
# MAGIC Again, attempt to drop your partner's `beans` table. This time, it should succeed.
# MAGIC
# MAGIC Try to re-execute queries against any of the views of tables you'd previously queried in this lab.
# MAGIC
# MAGIC **NOTE**: If steps were completed successfully, none of your previous queries should work, as the data referenced by your views has been permanently deleted. This demonstrates the risks associated with providing `MODIFY` privileges to users on data that will be used in production applications and dashboards.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 42.068627 | 418 | 0.708253 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Last Mile ETL with Databricks SQL
# MAGIC
# MAGIC Before we continue, let's do a recap of some of the things we've learned so far:
# MAGIC 1. The Databricks workspace contains a suite of tools to simplify the data engineering development lifecycle
# MAGIC 1. Databricks notebooks allow users to mix SQL with other programming languages to define ETL workloads
# MAGIC 1. Delta Lake provides ACID compliant transactions and makes incremental data processing easy in the Lakehouse
# MAGIC 1. Delta Live Tables extends the SQL syntax to support many design patterns in the Lakehouse, and simplifies infrastructure deployment
# MAGIC 1. Multi-task jobs allows for full task orchestration, adding dependencies while scheduling a mix of notebooks and DLT pipelines
# MAGIC 1. Databricks SQL allows users to edit and execute SQL queries, build visualizations, and define dashboards
# MAGIC 1. Data Explorer simplifies managing Table ACLs, making Lakehouse data available to SQL analysts (soon to be expanded greatly by Unity Catalog)
# MAGIC
# MAGIC In this section, we'll focus on exploring more DBSQL functionality to support production workloads.
# MAGIC
# MAGIC We'll start by focusing on leveraging Databricks SQL to configure queries that support last mile ETL for analytics. Note that while we'll be using the Databricks SQL UI for this demo, SQL Endpoints [integrate with a number of other tools to allow external query execution](https://docs.databricks.com/integrations/partners.html, as well as having [full API support for executing arbitrary queries programmatically](https://docs.databricks.com/sql/api/index.html).
# MAGIC
# MAGIC From these query results, we'll generate a series of visualizations, which we'll combine into a dashboard.
# MAGIC
# MAGIC Finally, we'll walk through scheduling updates for queries and dashboards, and demonstrate setting alerts to help monitor the state of production datasets over time.
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, you will feel confident:
# MAGIC * Using Databricks SQL as a tool to support production ETL tasks backing analytic workloads
# MAGIC * Configuring SQL queries and visualizations with the Databricks SQL Editor
# MAGIC * Creating dashboards in Databricks SQL
# MAGIC * Scheduling updates for queries and dashboards
# MAGIC * Setting alerts for SQL queries
# COMMAND ----------
# MAGIC %md
# MAGIC ## Run Setup Script
# MAGIC The following cells runs a notebook that defines a class we'll use to generate SQL queries.
# COMMAND ----------
# MAGIC %run ../Includes/query_generator
# COMMAND ----------
# MAGIC %md
# MAGIC Executing the following cell will reset the database set variables for later query formatting. You can remove the `mode="reset"` argument if you wish to print out the queries without resetting the target database.
# COMMAND ----------
Gen = QueryGenerator(course="4_4", mode="reset")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a Demo Database
# MAGIC Execute the following cell and copy the results into the Databricks SQL Editor.
# MAGIC
# MAGIC These queries:
# MAGIC * Create a new database
# MAGIC * Declare two tables (we'll use these for loading data)
# MAGIC * Declare two functions (we'll use these for generating data)
# MAGIC
# MAGIC Once copied, execute the query using the **Run** button.
# COMMAND ----------
Gen.config()
# COMMAND ----------
# MAGIC %md
# MAGIC **NOTE**: The queries above are only designed to be run once after resetting the demo completely to reconfigure the environment. Users will need to have `CREATE` and `USAGE` permissions on the catalog to execute them.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a Query to Load Data
# MAGIC Execute the cell below to print out a formatted SQL query for loading data in the `user_ping` table created in the previous step.
# MAGIC
# MAGIC Save this query with the name **Load Ping Data**.
# MAGIC
# MAGIC Run this query to load a batch of data.
# COMMAND ----------
Gen.load()
# COMMAND ----------
# MAGIC %md
# MAGIC Executing the query should load some data and return a preview of the data in the table.
# MAGIC
# MAGIC **NOTE**: Random numbers are being used to define and load data, so each user will have slightly different values present.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Set a Query Refresh Schedule
# MAGIC
# MAGIC Steps:
# MAGIC * Locate the **Refresh Schedule** field at the bottom right of the SQL query editor box; click the blue **Never**
# MAGIC * Use the drop down to change to Refresh every **1 minute**
# MAGIC * For **Ends**, click the **On** radio button
# MAGIC * Select tomorrow's date
# MAGIC * Click **OK**
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a Query to Track Total Records
# MAGIC
# MAGIC Execute the cell below to print out a formatted SQL query to track total records in the `user_ping` table.
# MAGIC
# MAGIC Save this query with the name **User Counts**.
# MAGIC
# MAGIC Run the query to calculate the current results.
# COMMAND ----------
Gen.user_counts()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a Bar Graph Visualization
# MAGIC
# MAGIC Steps:
# MAGIC * Click the **Add Visualization** button
# MAGIC * Click on the name (should default to something like `Visualization 1`) and change the name to **Total User Records**
# MAGIC * Set `user_id` for the **X Column**
# MAGIC * Set `total_records` for the **Y Columns**
# MAGIC * Click **Save**
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a New Dashboard
# MAGIC
# MAGIC Steps:
# MAGIC * Click the button with three vertical dots at the bottom of the screen and select **Add to Dashboard**.
# MAGIC * Click the **Create new dashboard** option
# MAGIC * Name your dashboard **User Ping Summary `<your_initials_here>`**
# MAGIC * Click **Save** to create the new dashboard
# MAGIC * Your newly created dashboard should now be selected as the target; click **OK** to add your visualization
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a Query to Calculate the Recent Average Ping
# MAGIC
# MAGIC Execute the cell below to print out a formatted SQL query to calculate the average ping observed per user over a 3 minute window.
# MAGIC
# MAGIC Save this query with the name **Avg Ping**.
# MAGIC
# MAGIC Run the query to calculate the current results.
# COMMAND ----------
Gen.avg_ping()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Add a Line Plot Visualization to your Dashboard
# MAGIC
# MAGIC Steps:
# MAGIC * Click the **Add Visualization** button
# MAGIC * Click on the name (should default to something like `Visualization 1`) and change the name to **Avg User Ping**
# MAGIC * Select `Line` for the **Visualization Type**
# MAGIC * Set `end_time` for the **X Column**
# MAGIC * Set `avg_ping` for the **Y Columns**
# MAGIC * Set `user_id` for the **Group by**
# MAGIC * Click **Save**
# MAGIC * Click the button with three vertical dots at the bottom of the screen and select **Add to Dashboard**.
# MAGIC * Select the dashboard you created earlier
# MAGIC * Click **OK** to add your visualization
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a Query to Report Summary Statistics
# MAGIC
# MAGIC Execute the cell below to print out a formatted SQL query that summarizes all records for a user.
# MAGIC
# MAGIC Save this query with the name **Ping Summary**.
# MAGIC
# MAGIC Run the query to calculate the current results.
# COMMAND ----------
Gen.summary()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Add the Summary Table to your Dashboard
# MAGIC
# MAGIC Steps:
# MAGIC * Click the button with three vertical dots at the bottom of the screen and select **Add to Dashboard**.
# MAGIC * Select the dashboard you created earlier
# MAGIC * Click **OK** to add your visualization
# COMMAND ----------
# MAGIC %md
# MAGIC ## Review and Refresh your Dashboard
# MAGIC
# MAGIC Steps:
# MAGIC * Use the left side bar to navigate to **Dashboards**
# MAGIC * Find the dashboard you've added your queries to
# MAGIC * Click the blue **Refresh** button to update your dashboard
# MAGIC * Click the **Schedule** button to review dashboard scheduling options
# MAGIC * Note that scheduling a dashboard to update will execute all queries associated with that dashboard
# MAGIC * Do not schedule the dashboard at this time
# COMMAND ----------
# MAGIC %md
# MAGIC ## Share your Dashboard
# MAGIC
# MAGIC Steps:
# MAGIC * Click the blue **Share** button
# MAGIC * Select **All Users** from the top field
# MAGIC * Choose **Can Run** from the right field
# MAGIC * Click **Add**
# MAGIC * Change the **Credentials** to **Run as viewer**
# MAGIC
# MAGIC **NOTE**: At present, no other users should have any permissions to run your dashboard, as they have not been granted permissions to the underlying databases and tables using Table ACLs. If you wish other users to be able to trigger updates to your dashboard, you will either need to grant them permissions to **Run as owner** or add permissions for the tables referenced in your queries.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Set Up an Alert
# MAGIC
# MAGIC Steps:
# MAGIC * Use the left side bar to navigate to **Alerts**
# MAGIC * Click **Create Alert** in the top right
# MAGIC * Click the field at the top left of the screen to give the alert a name **`<your_initials> Count Check`**
# MAGIC * Select your **User Counts** query
# MAGIC * For the **Trigger when** options, configure:
# MAGIC * **Value column**: `total_records`
# MAGIC * **Condition**: `>`
# MAGIC * **Threshold**: 15
# MAGIC * For **Refresh**, select **Never**
# MAGIC * Click **Create Alert**
# MAGIC * On the next screen, click the blue **Refresh** in the top right to evaluate the alert
# COMMAND ----------
# MAGIC %md
# MAGIC ## Review Alert Destination Options
# MAGIC
# MAGIC
# MAGIC
# MAGIC Steps:
# MAGIC * From the preview of your alert, click the blue **Add** button to the right of **Destinations** on the right side of the screen
# MAGIC * At the bottom of the window that pops up, locate the and click the blue text in the message **Create new destinations in Alert Destinations**
# MAGIC * Review the available alerting options
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 39.430147 | 471 | 0.715442 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## End-to-End ETL in the Lakehouse
# MAGIC
# MAGIC In this notebook, you will pull together concepts learned throughout the course to complete an example data pipeline.
# MAGIC
# MAGIC The following is a non-exhaustive list of skills and tasks necessary to successfully complete this exercise:
# MAGIC * Using Databricks notebooks to write queries in SQL and Python
# MAGIC * Creating and modifying databases, tables, and views
# MAGIC * Using Auto Loader and Spark Structured Streaming for incremental data processing in a multi-hop architecture
# MAGIC * Using Delta Live Table SQL syntax
# MAGIC * Configuring a Delta Live Table pipeline for continuous processing
# MAGIC * Using Databricks Jobs to orchestrate tasks from notebooks stored in Repos
# MAGIC * Setting chronological scheduling for Databricks Jobs
# MAGIC * Defining queries in Databricks SQL
# MAGIC * Creating visualizations in Databricks SQL
# MAGIC * Defining Databricks SQL dashboards to review metrics and results
# COMMAND ----------
# MAGIC %md
# MAGIC ## Run Setup
# MAGIC Run the following cell to reset all the databases and diretories associated with this lab.
# COMMAND ----------
# MAGIC %run "../../Includes/dlt-setup" $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Land Initial Data
# MAGIC Seed the landing zone with some data before proceeding.
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create and Configure a DLT Pipeline
# MAGIC **NOTE**: The main difference between the instructions here and in previous labs with DLT is that in this instance, we will be setting up our pipeline for **Continuous** execution in **Production** mode.
# MAGIC
# MAGIC Steps:
# MAGIC 1. Click the **Jobs** button on the sidebar, then select the **Delta Live Tables** tab.
# MAGIC 1. Click **Create Pipeline**.
# MAGIC 1. Fill in a **Pipeline Name** of your choosing.
# MAGIC 1. For **Notebook Libraries**, use the navigator to locate and select the notebook `1 - DLT Task`.
# MAGIC 1. Run the cell below to generate values for **source**, **Target** and **Storage Location**. (All of these will include your current username).
# MAGIC * Click `Add configuration`; enter the word `source` in the **Key** field and the output printed next to `source` below in the value field.
# MAGIC * Enter the database name printed next to `Target` below in the **Target** field.
# MAGIC * Enter the location printed next to `Storage Location` below in the **Storage Location** field.
# MAGIC 1. Set **Pipeline Mode** to **Continuous**.
# MAGIC 1. Disable autoscaling.
# MAGIC 1. Set the number of wokers to 1.
# MAGIC 1. Click **Create**.
# MAGIC
# MAGIC In the UI that populates, change from **Development** to **Production** mode. This should begin the deployment of infrastructure.
# COMMAND ----------
storage_location = userhome + "/output"
print(f"source : {dataLandingLocation.split(':')[1]}")
print(f"Target: {database}")
print(f"Storage Location: {storage_location.split(':')[1]}")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Schedule a Notebook Job
# MAGIC
# MAGIC Our DLT pipeline is setup to process data as soon as it arrives. We'll schedule a notebook to land a new batch of data each minute so we can see this functionality in action.
# MAGIC
# MAGIC Steps:
# MAGIC 1. Navigate to the Jobs UI using the Databricks left side navigation bar.
# MAGIC 1. Click the blue `Create Job` button
# MAGIC 1. Configure the task:
# MAGIC 1. Enter `Land-Data` for the task name
# MAGIC 1. Select the notebook `2 - Land New Data` using the notebook picker
# MAGIC 1. Select an Existing All Purpose Cluster from the **Cluster** dropdown
# MAGIC 1. Click **Create**
# MAGIC
# MAGIC **Note**: When selecting your all purpose cluster, you will get a warning about how this will be billed as all purpose compute. Production jobs should always be scheduled against new job clusters appropriately sized for the workload, as this is billed at a much lower rate.
# MAGIC
# MAGIC ## Set a Chronological Schedule for your Job
# MAGIC Steps:
# MAGIC * On the right hand side of the Jobs UI, locate **Schedule** section.
# MAGIC * Click on the **Edit schedule** button to explore scheduling options.
# MAGIC * Change **Schedule type** field from **Manual** to **Scheduled** will bring up a chron scheduling UI.
# MAGIC * Set the schedule to update every **2 minutes**
# MAGIC * Click **Save**
# MAGIC
# MAGIC **NOTE**: If you wish, you can click **Run now** to trigger the first run, or wait until the top of the next minute to make sure your scheduling has worked successfully.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Register DLT Event Metrics for Querying with DBSQL
# MAGIC
# MAGIC The following cell prints out SQL statements to register the DLT event logs to your target database for querying in DBSQL.
# MAGIC
# MAGIC Execute the output code with the DBSQL Query Editor to register these tables and views. Explore each and make note of the logged event metrics.
# COMMAND ----------
print(f"""
CREATE TABLE IF NOT EXISTS {database}.dlt_events
LOCATION '{storage_location}/system/events';
CREATE VIEW IF NOT EXISTS {database}.dlt_success AS
SELECT * FROM {database}.dlt_events
WHERE details:flow_progress:metrics IS NOT NULL;
CREATE VIEW IF NOT EXISTS {database}.dlt_metrics AS
SELECT timestamp, origin.flow_name, details
FROM {database}.dlt_success
ORDER BY timestamp DESC;
""")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Define a Query on the Gold Table
# MAGIC
# MAGIC The `daily_patient_avg` table is automatically updated each time a new batch of data is processed through the DLT pipeline. Each time a query is executed against this table, DBSQL will confirm if there is a newer version and then materialize results from the newest available version.
# MAGIC
# MAGIC Run the following cell to print out a query with your database name. Save this as a DBSQL query.
# COMMAND ----------
print(f"SELECT * FROM {database}.daily_patient_avg")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Add a Line Plot Visualization
# MAGIC
# MAGIC To track trends in patient averages over time, create a line plot and add it to a new dashboard.
# MAGIC
# MAGIC Create a line plot with the following settings:
# MAGIC * **X Column**: `date`
# MAGIC * **Y Columns**: `avg_heartrate`
# MAGIC * **Group By**: `name`
# MAGIC
# MAGIC Add this visualization to a dashboard.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Track Data Processing Progress
# MAGIC
# MAGIC The code below extracts the `flow_name`, `timestamp`, and `num_output_rows` from the DLT event logs.
# MAGIC
# MAGIC Save this query in DBSQL, then define a bar plot visualization that shows:
# MAGIC * **X Column**: `timestamp`
# MAGIC * **Y Columns**: `num_output_rows`
# MAGIC * **Group By**: `flow_name`
# MAGIC
# MAGIC Add your visualization to your dashboard.
# COMMAND ----------
print(f"""
SELECT flow_name, timestamp, int(details:flow_progress:metrics:num_output_rows) num_output_rows
FROM {database}.dlt_metrics
ORDER BY timestamp DESC
""")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Refresh your Dashboard and Track Results
# MAGIC
# MAGIC The `Land-Data` notebook scheduled with Jobs above has 12 batches of data, each representing a month of recordings for our small sampling of patients. As configured per our instructions, it should take just over 20 minutes for all of these batches of data to be triggered and processed (we scheduled the Databricks Job to run every 2 minutes, and batches of data will process through our pipeline very quickly after initial ingestion).
# MAGIC
# MAGIC Refresh your dashboard and review your visualizations to see how many batches of data have been processed. (If you followed the instructions as outlined here, there should be 12 distinct flow updates tracked by your DLT metrics.) If all source data has not yet been processed, you can go back to the Databricks Jobs UI and manually trigger additional batches.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Execute a Query to Repair Broken Data
# MAGIC
# MAGIC Review the code that defined the `recordings_enriched` table to identify the filter applied for the quality check.
# MAGIC
# MAGIC In the cell below, write a query that returns all the records from the `recordings_bronze` table that were refused by this quality check.
# COMMAND ----------
# ANSWER
display(spark.sql(f"SELECT * FROM {database}.recordings_bronze WHERE heartrate <= 0"))
# COMMAND ----------
# MAGIC %md
# MAGIC For the purposes of our demo, let's assume that thorough manual review of our data and systems has demonstrated that occassionally otherwise valid heartrate recordings are returned as negative values.
# MAGIC
# MAGIC Run the following query to examine these same rows with the negative sign removed.
# COMMAND ----------
display(spark.sql(f"SELECT abs(heartrate), * FROM {database}.recordings_bronze WHERE heartrate <= 0"))
# COMMAND ----------
# MAGIC %md
# MAGIC To complete our dataset, we wish to insert these fixed records into the silver `recordings_enriched` table.
# MAGIC
# MAGIC Use the cell below to update the query used in the DLT pipeline to execute this repair.
# MAGIC
# MAGIC **NOTE**: Make sure you update the code to only process those records that were previously rejected due to the quality check.
# COMMAND ----------
# ANSWER
spark.sql(f"""
MERGE INTO {database}.recordings_enriched t
USING (SELECT
CAST(a.device_id AS INTEGER) device_id,
CAST(a.mrn AS LONG) mrn,
abs(CAST(a.heartrate AS DOUBLE)) heartrate,
CAST(from_unixtime(a.time, 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP) time,
b.name
FROM {database}.recordings_bronze a
INNER JOIN {database}.pii b
ON a.mrn = b.mrn
WHERE heartrate <= 0) v
ON t.mrn=v.mrn AND t.time=v.time
WHEN NOT MATCHED THEN INSERT *
""")
# COMMAND ----------
# MAGIC %md
# MAGIC Use the cell below to manually or programmatically confirm that this update has been successful.
# MAGIC
# MAGIC (The total number of records in the `recordings_bronze` should now be equal to the total records in `recordings_enriched`).
# COMMAND ----------
# ANSWER
assert spark.table(f"{database}.recordings_bronze").count() == spark.table(f"{database}.recordings_enriched").count()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Consider Production Data Permissions
# MAGIC
# MAGIC Note that while our manual repair of the data was successful, as the owner of these datasets, by default we have permissions to modify or delete these data from any location we're executing code.
# MAGIC
# MAGIC To put this another way: our current permissions would allow us to change or drop our production tables permanently if an errant SQL query is accidentally executed with the current user's permissions (or if other users are granted similar permissions).
# MAGIC
# MAGIC While for the purposes of this lab, we desired to have full permissions on our data, as we move code from development to production, it is safer to leverage [service principals](https://docs.databricks.com/administration-guide/users-groups/service-principals.html) when scheduling Jobs and DLT Pipelines to avoid accidental data modifications.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Shut Down Production Infrastructure
# MAGIC
# MAGIC Note that Databricks Jobs, DLT Pipelines, and scheduled DBSQL queries and dashboards are all designed to provide sustained execution of production code. In this end-to-end demo, you were instructed to configure a Job and Pipeline for continuous data processing. To prevent these workloads from continuing to execute, you should **Pause** your Databricks Job and **Stop** your DLT pipeline. Deleting these assets will also ensure that production infrastructure is terminated.
# MAGIC
# MAGIC **NOTE**: All instructions for DBSQL asset scheduling in previous lessons instructed users to set the update schedule to end tomorrow. You may choose to go back and also cancel these updates to prevent DBSQL endpoints from staying on until that time.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 44.786477 | 482 | 0.730198 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ../../Includes/dlt-setup
# COMMAND ----------
File.newData()
| 11 | 37 | 0.616822 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
import pyspark.sql.functions as F
import re
course_name = "eltsql"
username = spark.sql("SELECT current_user()").first()[0]
clean_username = re.sub("[^a-zA-Z0-9]", "_", username)
database = f"""{clean_username}_dbacademy_{course_name}"""
userhome = f"dbfs:/user/{username}/dbacademy/{course_name}"
print(f"username: {username}")
print(f"clean_username: {clean_username}")
print(f"database: {database}")
print(f"userhome: {userhome}")
dbutils.fs.rm(userhome, True)
print(f"Dropping the database {database}")
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
| 27.090909 | 59 | 0.696921 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
for stream in spark.streams.active:
stream.stop()
# COMMAND ----------
import pyspark.sql.functions as F
import re
course_name = "dewd"
username = spark.sql("SELECT current_user()").first()[0]
clean_username = re.sub("[^a-zA-Z0-9]", "_", username)
database = f"dbacademy_{clean_username}_{course_name}"
userhome = f"dbfs:/user/{username}/dbacademy/{course_name}"
print(f"""
username: {username}
userhome: {userhome}
database: {database}""")
dbutils.widgets.text("mode", "setup")
mode = dbutils.widgets.get("mode")
if mode == "reset" or mode == "clean":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
if mode != "clean":
spark.sql(f"CREATE DATABASE IF NOT EXISTS {database}")
spark.sql(f"USE {database}")
# COMMAND ----------
# MAGIC %run ./mount-datasets
# COMMAND ----------
outputPath = userhome + "/streaming-concepts"
checkpointPath = outputPath + "/checkpoint"
# original dataset
dataSource = "/mnt/training/definitive-guide/data/activity-json/streaming"
# data landing location; files will be copies from original dataset one at a time for incremental ingestion use case
dataLandingLocation = outputPath + "/landing-zone"
outputTable = "bronze_table"
spark.conf.set('c.outputTable', outputTable)
# COMMAND ----------
class FileArrival:
def __init__(self, dataSource, landingZone):
self.sourceFiles = dbutils.fs.ls(dataSource)
dbutils.fs.mkdirs(landingZone)
self.landingZone = landingZone
self.fileID = 0
def newData(self, numFiles=1):
for i in range(numFiles):
dbutils.fs.cp(self.sourceFiles[self.fileID].path, self.landingZone)
self.fileID+=1
# COMMAND ----------
File = FileArrival(dataSource, dataLandingLocation)
File.newData()
| 24.375 | 116 | 0.67908 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
for stream in spark.streams.active:
stream.stop()
# COMMAND ----------
import pyspark.sql.functions as F
import re
dbutils.widgets.text("course", "dewd")
course_name = dbutils.widgets.get("course")
username = spark.sql("SELECT current_user()").first()[0]
clean_username = re.sub("[^a-zA-Z0-9]", "_", username)
database = f"dbacademy_{clean_username}_{course_name}"
userhome = f"dbfs:/user/{username}/dbacademy/{course_name}"
print(f"""
username: {username}
userhome: {userhome}
database: {database}""")
dbutils.widgets.text("mode", "setup")
mode = dbutils.widgets.get("mode")
if mode == "reset" or mode == "clean":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
# COMMAND ----------
# MAGIC %run ./mount-datasets
# COMMAND ----------
sqlContext.setConf("spark.sql.shuffle.partitions", spark.sparkContext.defaultParallelism)
# COMMAND ----------
dataSource = "/mnt/training/healthcare"
dataLandingLocation = userhome + "/source"
bronzePath = userhome + "/bronze"
recordingsParsedPath = userhome + "/silver/recordings_parsed"
recordingsEnrichedPath = userhome + "/silver/recordings_enriched"
dailyAvgPath = userhome + "/gold/dailyAvg"
checkpointPath = userhome + "/checkpoints"
bronzeCheckpoint = userhome + "/checkpoints/bronze"
recordingsParsedCheckpoint = userhome + "/checkpoints/recordings_parsed"
recordingsEnrichedCheckpoint = userhome + "/checkpoints/recordings_enriched"
dailyAvgCheckpoint = userhome + "/checkpoints/dailyAvgPath"
# COMMAND ----------
class FileArrival:
def __init__(self):
self.source = dataSource + "/tracker/streaming/"
self.userdir = dataLandingLocation + "/"
try:
self.curr_mo = 1 + int(max([x[1].split(".")[0] for x in dbutils.fs.ls(self.userdir)]))
except:
self.curr_mo = 1
def newData(self, continuous=False):
if self.curr_mo > 12:
print("Data source exhausted\n")
elif continuous == True:
while self.curr_mo <= 12:
curr_file = f"{self.curr_mo:02}.json"
dbutils.fs.cp(self.source + curr_file, self.userdir + curr_file)
self.curr_mo += 1
else:
curr_file = f"{str(self.curr_mo).zfill(2)}.json"
dbutils.fs.cp(self.source + curr_file, self.userdir + curr_file)
self.curr_mo += 1
File = FileArrival()
| 29.814815 | 98 | 0.633267 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %scala
# MAGIC def cloudAndRegion = {
# MAGIC import com.databricks.backend.common.util.Project
# MAGIC import com.databricks.conf.trusted.ProjectConf
# MAGIC import com.databricks.backend.daemon.driver.DriverConf
# MAGIC val conf = new DriverConf(ProjectConf.loadLocalConfig(Project.Driver))
# MAGIC (conf.cloudProvider.getOrElse("Unknown"), conf.region)
# MAGIC }
# MAGIC
# MAGIC // These keys are read-only so they're okay to have here
# MAGIC val awsAccessKey = "AKIAJBRYNXGHORDHZB4A"
# MAGIC val awsSecretKey = "a0BzE1bSegfydr3%2FGE3LSPM6uIV5A4hOUfpH8aFF"
# MAGIC val awsAuth = s"${awsAccessKey}:${awsSecretKey}"
# MAGIC
# MAGIC def getAwsMapping(region:String):(String,Map[String,String]) = {
# MAGIC
# MAGIC val MAPPINGS = Map(
# MAGIC "ap-northeast-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-ap-northeast-1/common", Map[String,String]()),
# MAGIC "ap-northeast-2" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-ap-northeast-2/common", Map[String,String]()),
# MAGIC "ap-south-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-ap-south-1/common", Map[String,String]()),
# MAGIC "ap-southeast-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-ap-southeast-1/common", Map[String,String]()),
# MAGIC "ap-southeast-2" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-ap-southeast-2/common", Map[String,String]()),
# MAGIC "ca-central-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-ca-central-1/common", Map[String,String]()),
# MAGIC "eu-central-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-eu-central-1/common", Map[String,String]()),
# MAGIC "eu-west-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-eu-west-1/common", Map[String,String]()),
# MAGIC "eu-west-2" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-eu-west-2/common", Map[String,String]()),
# MAGIC "eu-west-3" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-eu-central-1/common", Map[String,String]()),
# MAGIC
# MAGIC "sa-east-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-sa-east-1/common", Map[String,String]()),
# MAGIC "us-east-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-us-east-1/common", Map[String,String]()),
# MAGIC "us-east-2" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-us-east-2/common", Map[String,String]()),
# MAGIC "us-west-2" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training/common", Map[String,String]()),
# MAGIC "_default" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training/common", Map[String,String]())
# MAGIC )
# MAGIC
# MAGIC MAPPINGS.getOrElse(region, MAPPINGS("_default"))
# MAGIC }
# MAGIC
# MAGIC def getAzureMapping(region:String):(String,Map[String,String]) = {
# MAGIC
# MAGIC var MAPPINGS = Map(
# MAGIC "australiacentral" -> ("dbtrainaustraliasoutheas",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=br8%2B5q2ZI9osspeuPtd3haaXngnuWPnZaHKFoLmr370%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "australiacentral2" -> ("dbtrainaustraliasoutheas",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=br8%2B5q2ZI9osspeuPtd3haaXngnuWPnZaHKFoLmr370%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "australiaeast" -> ("dbtrainaustraliaeast",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=FM6dy59nmw3f4cfN%2BvB1cJXVIVz5069zHmrda5gZGtU%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "australiasoutheast" -> ("dbtrainaustraliasoutheas",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=br8%2B5q2ZI9osspeuPtd3haaXngnuWPnZaHKFoLmr370%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "canadacentral" -> ("dbtraincanadacentral",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=dwAT0CusWjvkzcKIukVnmFPTmi4JKlHuGh9GEx3OmXI%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "canadaeast" -> ("dbtraincanadaeast",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=SYmfKBkbjX7uNDnbSNZzxeoj%2B47PPa8rnxIuPjxbmgk%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "centralindia" -> ("dbtraincentralindia",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=afrYm3P5%2BB4gMg%2BKeNZf9uvUQ8Apc3T%2Bi91fo/WOZ7E%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "centralus" -> ("dbtraincentralus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=As9fvIlVMohuIV8BjlBVAKPv3C/xzMRYR1JAOB%2Bbq%2BQ%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "eastasia" -> ("dbtraineastasia",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=sK7g5pki8bE88gEEsrh02VGnm9UDlm55zTfjZ5YXVMc%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "eastus" -> ("dbtraineastus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=tlw5PMp1DMeyyBGTgZwTbA0IJjEm83TcCAu08jCnZUo%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "eastus2" -> ("dbtraineastus2",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=Y6nGRjkVj6DnX5xWfevI6%2BUtt9dH/tKPNYxk3CNCb5A%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "japaneast" -> ("dbtrainjapaneast",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=q6r9MS/PC9KLZ3SMFVYO94%2BfM5lDbAyVsIsbBKEnW6Y%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "japanwest" -> ("dbtrainjapanwest",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=M7ic7/jOsg/oiaXfo8301Q3pt9OyTMYLO8wZ4q8bko8%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "northcentralus" -> ("dbtrainnorthcentralus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=GTLU0g3pajgz4dpGUhOpJHBk3CcbCMkKT8wxlhLDFf8%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "northcentralus" -> ("dbtrainnorthcentralus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=GTLU0g3pajgz4dpGUhOpJHBk3CcbCMkKT8wxlhLDFf8%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "northeurope" -> ("dbtrainnortheurope",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=35yfsQBGeddr%2BcruYlQfSasXdGqJT3KrjiirN/a3dM8%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "southcentralus" -> ("dbtrainsouthcentralus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=3cnVg/lzWMx5XGz%2BU4wwUqYHU5abJdmfMdWUh874Grc%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "southcentralus" -> ("dbtrainsouthcentralus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=3cnVg/lzWMx5XGz%2BU4wwUqYHU5abJdmfMdWUh874Grc%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "southindia" -> ("dbtrainsouthindia",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=0X0Ha9nFBq8qkXEO0%2BXd%2B2IwPpCGZrS97U4NrYctEC4%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "southeastasia" -> ("dbtrainsoutheastasia",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=H7Dxi1yqU776htlJHbXd9pdnI35NrFFsPVA50yRC9U0%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "uksouth" -> ("dbtrainuksouth",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=SPAI6IZXmm%2By/WMSiiFVxp1nJWzKjbBxNc5JHUz1d1g%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "ukwest" -> ("dbtrainukwest",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=olF4rjQ7V41NqWRoK36jZUqzDBz3EsyC6Zgw0QWo0A8%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "westcentralus" -> ("dbtrainwestcentralus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=UP0uTNZKMCG17IJgJURmL9Fttj2ujegj%2BrFN%2B0OszUE%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "westeurope" -> ("dbtrainwesteurope",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=csG7jGsNFTwCArDlsaEcU4ZUJFNLgr//VZl%2BhdSgEuU%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "westindia" -> ("dbtrainwestindia",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=fI6PNZ7YvDGKjArs1Et2rAM2zgg6r/bsKEjnzQxgGfA%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "westus" -> ("dbtrainwestus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=%2B1XZDXbZqnL8tOVsmRtWTH/vbDAKzih5ThvFSZMa3Tc%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "westus2" -> ("dbtrainwestus2",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=DD%2BO%2BeIZ35MO8fnh/fk4aqwbne3MAJ9xh9aCIU/HiD4%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "_default" -> ("dbtrainwestus2",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=DD%2BO%2BeIZ35MO8fnh/fk4aqwbne3MAJ9xh9aCIU/HiD4%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z")
# MAGIC )
# MAGIC
# MAGIC val (account: String, sasKey: String) = MAPPINGS.getOrElse(region, MAPPINGS("_default"))
# MAGIC
# MAGIC val blob = "training"
# MAGIC val source = s"wasbs://$blob@$account.blob.core.windows.net/"
# MAGIC val configMap = Map(
# MAGIC s"fs.azure.sas.$blob.$account.blob.core.windows.net" -> sasKey
# MAGIC )
# MAGIC
# MAGIC (source, configMap)
# MAGIC }
# MAGIC
# MAGIC def retryMount(source: String, mountPoint: String): Unit = {
# MAGIC try {
# MAGIC // Mount with IAM roles instead of keys for PVC
# MAGIC dbutils.fs.mount(source, mountPoint)
# MAGIC dbutils.fs.ls(mountPoint) // Test read to confirm successful mount.
# MAGIC } catch {
# MAGIC case e: Exception => throw new RuntimeException(s"*** ERROR: Unable to mount $mountPoint: ${e.getMessage}", e)
# MAGIC }
# MAGIC }
# MAGIC
# MAGIC def mount(source: String, extraConfigs:Map[String,String], mountPoint: String): Unit = {
# MAGIC try {
# MAGIC dbutils.fs.mount(source, mountPoint, extraConfigs=extraConfigs)
# MAGIC dbutils.fs.ls(mountPoint) // Test read to confirm successful mount.
# MAGIC } catch {
# MAGIC case ioe: java.lang.IllegalArgumentException => retryMount(source, mountPoint)
# MAGIC case e: Exception => throw new RuntimeException(s"*** ERROR: Unable to mount $mountPoint: ${e.getMessage}", e)
# MAGIC }
# MAGIC }
# MAGIC
# MAGIC def autoMount(fix:Boolean = false, failFast:Boolean = false, mountPoint:String = "/mnt/training"): Unit = {
# MAGIC val (cloud, region) = cloudAndRegion
# MAGIC spark.conf.set("com.databricks.training.cloud.name", cloud)
# MAGIC spark.conf.set("com.databricks.training.region.name", region)
# MAGIC if (cloud=="AWS") {
# MAGIC val (source, extraConfigs) = getAwsMapping(region)
# MAGIC val resultMsg = mountSource(fix, failFast, mountPoint, source, extraConfigs)
# MAGIC displayHTML(s"Mounting course-specific datasets to <b>$mountPoint</b>...<br/>"+resultMsg)
# MAGIC } else if (cloud=="Azure") {
# MAGIC val (source, extraConfigs) = initAzureDataSource(region)
# MAGIC val resultMsg = mountSource(fix, failFast, mountPoint, source, extraConfigs)
# MAGIC displayHTML(s"Mounting course-specific datasets to <b>$mountPoint</b>...<br/>"+resultMsg)
# MAGIC } else {
# MAGIC val (source, extraConfigs) = ("s3a://databricks-corp-training/common", Map[String,String]())
# MAGIC val resultMsg = mountSource(fix, failFast, mountPoint, source, extraConfigs)
# MAGIC displayHTML(s"Mounted course-specific datasets to <b>$mountPoint</b>.")
# MAGIC }
# MAGIC }
# MAGIC
# MAGIC def initAzureDataSource(azureRegion:String):(String,Map[String,String]) = {
# MAGIC val mapping = getAzureMapping(azureRegion)
# MAGIC val (source, config) = mapping
# MAGIC val (sasEntity, sasToken) = config.head
# MAGIC
# MAGIC val datasource = "%s\t%s\t%s".format(source, sasEntity, sasToken)
# MAGIC spark.conf.set("com.databricks.training.azure.datasource", datasource)
# MAGIC
# MAGIC return mapping
# MAGIC }
# MAGIC
# MAGIC def mountSource(fix:Boolean, failFast:Boolean, mountPoint:String, source:String, extraConfigs:Map[String,String]): String = {
# MAGIC val mntSource = source.replace(awsAuth+"@", "")
# MAGIC
# MAGIC if (dbutils.fs.mounts().map(_.mountPoint).contains(mountPoint)) {
# MAGIC val mount = dbutils.fs.mounts().filter(_.mountPoint == mountPoint).head
# MAGIC if (mount.source == mntSource) {
# MAGIC return s"""Datasets are already mounted to <b>$mountPoint</b>."""
# MAGIC
# MAGIC } else if (failFast) {
# MAGIC throw new IllegalStateException(s"Expected $mntSource but found ${mount.source}")
# MAGIC
# MAGIC } else if (fix) {
# MAGIC println(s"Unmounting existing datasets ($mountPoint from ${mount.source}).")
# MAGIC dbutils.fs.unmount(mountPoint)
# MAGIC mountSource(fix, failFast, mountPoint, source, extraConfigs)
# MAGIC } else {
# MAGIC return s"""<b style="color:red">Invalid Mounts!</b></br>
# MAGIC <ul>
# MAGIC <li>The training datasets you are using are from an unexpected source</li>
# MAGIC <li>Expected <b>$mntSource</b> but found <b>${mount.source}</b></li>
# MAGIC <li>Failure to address this issue may result in significant performance degradation. To address this issue:</li>
# MAGIC <ol>
# MAGIC <li>Insert a new cell after this one</li>
# MAGIC <li>In that new cell, run the command <code style="color:blue; font-weight:bold">%scala fixMounts()</code></li>
# MAGIC <li>Verify that the problem has been resolved.</li>
# MAGIC </ol>"""
# MAGIC }
# MAGIC } else {
# MAGIC println(s"""Mounting datasets to $mountPoint.""")
# MAGIC mount(source, extraConfigs, mountPoint)
# MAGIC return s"""Mounted datasets to <b>$mountPoint</b> from <b>$mntSource<b>."""
# MAGIC }
# MAGIC }
# MAGIC
# MAGIC def fixMounts(): Unit = {
# MAGIC autoMount(true)
# MAGIC }
# MAGIC
# MAGIC autoMount(true)
| 74.298507 | 188 | 0.650192 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
for stream in spark.streams.active:
stream.stop()
# COMMAND ----------
import pyspark.sql.functions as F
import re
course_name = "dewd"
username = spark.sql("SELECT current_user()").first()[0]
clean_username = re.sub("[^a-zA-Z0-9]", "_", username)
database = f"dbacademy_{clean_username}_{course_name}"
userhome = f"dbfs:/user/{username}/dbacademy/{course_name}"
print(f"""
username: {username}
userhome: {userhome}
database: {database}""")
dbutils.widgets.text("mode", "setup")
mode = dbutils.widgets.get("mode")
if mode == "reset" or mode == "clean":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
if mode != "clean":
spark.sql(f"CREATE DATABASE IF NOT EXISTS {database}")
spark.sql(f"USE {database}")
# COMMAND ----------
# MAGIC %run ./mount-datasets
# COMMAND ----------
sqlContext.setConf("spark.sql.shuffle.partitions", spark.sparkContext.defaultParallelism)
# COMMAND ----------
dataSource = "/mnt/training/healthcare"
dataLandingLocation = userhome + "/source"
bronzePath = userhome + "/bronze"
recordingsParsedPath = userhome + "/silver/recordings_parsed"
recordingsEnrichedPath = userhome + "/silver/recordings_enriched"
dailyAvgPath = userhome + "/gold/dailyAvg"
checkpointPath = userhome + "/checkpoints"
bronzeCheckpoint = userhome + "/checkpoints/bronze"
recordingsParsedCheckpoint = userhome + "/checkpoints/recordings_parsed"
recordingsEnrichedCheckpoint = userhome + "/checkpoints/recordings_enriched"
dailyAvgCheckpoint = userhome + "/checkpoints/dailyAvgPath"
# COMMAND ----------
class FileArrival:
def __init__(self):
self.source = dataSource + "/tracker/streaming/"
self.userdir = dataLandingLocation + "/"
self.curr_mo = 1
def newData(self, continuous=False):
if self.curr_mo > 12:
print("Data source exhausted\n")
elif continuous == True:
while self.curr_mo <= 12:
curr_file = f"{self.curr_mo:02}.json"
dbutils.fs.cp(self.source + curr_file, self.userdir + curr_file)
self.curr_mo += 1
else:
curr_file = f"{str(self.curr_mo).zfill(2)}.json"
dbutils.fs.cp(self.source + curr_file, self.userdir + curr_file)
self.curr_mo += 1
File = FileArrival()
| 27.876543 | 89 | 0.660393 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
class QueryGenerator:
def __init__(self, course, mode="normal"):
import re
import random
self.username = spark.sql("SELECT current_user()").first()[0]
self.userhome = f"dbfs:/user/{self.username}/{course}"
self.database = f"""dbacademy_{re.sub("[^a-zA-Z0-9]", "_", self.username)}_{course}"""
if mode == "reset":
spark.sql(f"DROP DATABASE IF EXISTS {self.database} CASCADE")
dbutils.fs.rm(self.userhome, True)
def config(self):
print(f"""
CREATE DATABASE {self.database}
LOCATION '{self.userhome}';
USE {self.database};
CREATE TABLE user_ping
(user_id STRING, ping INTEGER, time TIMESTAMP);
CREATE TABLE user_ids (user_id STRING);
INSERT INTO user_ids VALUES
("potato_luver"),
("beanbag_lyfe"),
("default_username"),
("the_king"),
("n00b"),
("frodo"),
("data_the_kid"),
("el_matador"),
("the_wiz");
CREATE FUNCTION get_ping()
RETURNS INT
RETURN int(rand() * 250);
CREATE FUNCTION is_active()
RETURNS BOOLEAN
RETURN CASE
WHEN rand() > .25 THEN true
ELSE false
END;
""")
def load(self):
print(f"""
INSERT INTO {self.database}.user_ping
SELECT *,
{self.database}.get_ping() ping,
current_timestamp() time
FROM {self.database}.user_ids
WHERE {self.database}.is_active()=true;
SELECT * FROM {self.database}.user_ping;
""")
def user_counts(self):
print(f"""
SELECT user_id, count(*) total_records
FROM {self.database}.user_ping
GROUP BY user_id
ORDER BY
total_records DESC,
user_id ASC;;
""")
def avg_ping(self):
print(f"""
SELECT user_id, window.end end_time, mean(ping) avg_ping
FROM {self.database}.user_ping
GROUP BY user_id, window(time, '3 minutes')
ORDER BY
end_time DESC,
user_id ASC;
""")
def summary(self):
print(f"""
SELECT user_id, min(time) first_seen, max(time) last_seen, count(*) total_records, avg(ping) total_avg_ping
FROM {self.database}.user_ping
GROUP BY user_id
ORDER BY user_id ASC
""")
| 22.752809 | 107 | 0.612399 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ./setup-updates
# COMMAND ----------
def merge_deduped_users():
spark.sql(f"""
CREATE OR REPLACE TEMP VIEW deduped_users AS
SELECT user_id, user_first_touch_timestamp, max(email) email, max(updated) updated
FROM users_update
GROUP BY user_id, user_first_touch_timestamp
""")
spark.sql(f"""
MERGE INTO users a
USING deduped_users b
ON a.user_id = b.user_id
WHEN MATCHED AND a.email IS NULL AND b.email IS NOT NULL THEN
UPDATE SET email = b.email, updated = b.updated
WHEN NOT MATCHED THEN INSERT *
""")
# COMMAND ----------
def merge_events_update():
spark.sql(f"""
MERGE INTO events a
USING events_update b
ON a.user_id = b.user_id AND a.event_timestamp = b.event_timestamp
WHEN NOT MATCHED AND b.traffic_source = 'email' THEN
INSERT *
""")
# COMMAND ----------
def merge_sales_update():
spark.sql(f"""
COPY INTO sales
FROM "{Paths.source}/sales/sales-30m.parquet"
FILEFORMAT = PARQUET
""")
# COMMAND ----------
merge_deduped_users()
merge_events_update()
merge_sales_update()
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TABLE item_lookup AS
# MAGIC SELECT * FROM parquet.`${c.source}/products/products.parquet`
| 21 | 84 | 0.657622 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
import sys, subprocess, os
subprocess.check_call([sys.executable, "-m", "pip", "install", "git+https://github.com/databricks-academy/user-setup"])
from dbacademy import LessonConfig
LessonConfig.configure(course_name="Databases Tables and Views on Databricks", use_db=False)
LessonConfig.install_datasets(silent=True)
| 37.888889 | 119 | 0.787966 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ./setup
# COMMAND ----------
dbutils.fs.rm(f"{Paths.source}/sales/sales.csv", True)
dbutils.fs.cp(f"{Paths.source_uri}/sales/sales.csv", f"{Paths.source}/sales/sales.csv", True)
(spark
.read
.format("parquet")
.load(f"{Paths.source}/users/users.parquet")
.repartition(1)
.write
.format("org.apache.spark.sql.jdbc")
.option("url", f"jdbc:sqlite:/{username}_ecommerce.db")
.option("dbtable", "users")
.mode("overwrite")
.save())
| 22.545455 | 96 | 0.628627 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ./setup
# COMMAND ----------
def load_historical():
spark.sql(f"""
CREATE OR REPLACE TABLE events AS
SELECT * FROM parquet.`{Paths.source}/events/events.parquet`
""")
spark.sql(f"""
CREATE OR REPLACE TABLE users AS
SELECT *, current_timestamp() updated FROM parquet.`{Paths.source}/users/users.parquet`
""")
spark.sql(f"""
CREATE OR REPLACE TABLE sales AS
SELECT * FROM parquet.`{Paths.source}/sales/sales.parquet`
""")
# COMMAND ----------
load_historical()
| 19.037037 | 89 | 0.65 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ./sql-setup $course="meta" $mode="cleanup"
# COMMAND ----------
URI = "wasbs://[email protected]/databases_tables_and_views_on_databricks/v02"
# COMMAND ----------
dbutils.fs.cp(URI, f"{userhome}/datasets", True)
| 22.833333 | 103 | 0.677193 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ./setup-cleaned
# COMMAND ----------
def create_transactions():
spark.sql(f"""
CREATE OR REPLACE TABLE transactions AS
SELECT * FROM (
SELECT
user_id,
order_id,
transaction_timestamp,
total_item_quantity,
purchase_revenue_in_usd,
unique_items,
a.items_exploded.item_id item_id,
a.items_exploded.quantity quantity
FROM
( SELECT *, explode(items) items_exploded FROM sales ) a
INNER JOIN users b
ON a.email = b.email
) PIVOT (
sum(quantity) FOR item_id in (
'P_FOAM_K',
'M_STAN_Q',
'P_FOAM_S',
'M_PREM_Q',
'M_STAN_F',
'M_STAN_T',
'M_PREM_K',
'M_PREM_F',
'M_STAN_K',
'M_PREM_T',
'P_DOWN_S',
'P_DOWN_K'
)
)
""")
create_transactions()
| 19.636364 | 64 | 0.506064 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ./setup-load
# COMMAND ----------
def load_events_raw():
spark.sql(f"""
CREATE TABLE IF NOT EXISTS events_json
(key BINARY, offset INT, partition BIGINT, timestamp BIGINT, topic STRING, value BINARY)
USING JSON OPTIONS (path = "{Paths.source}/events/events-kafka.json");
""")
spark.sql(f"""
CREATE OR REPLACE TABLE events_raw
(key BINARY, offset BIGINT, partition BIGINT, timestamp BIGINT, topic STRING, value BINARY);
""")
spark.sql(f"""
INSERT INTO events_raw
SELECT * FROM events_json
""")
# COMMAND ----------
# lesson: nested data & advanced transformations
# Last Lab & Writing to Delta
def create_events_update():
spark.sql(f"""
CREATE OR REPLACE TEMP VIEW events_raw_json AS
SELECT from_json(cast(value as STRING), ("device STRING, ecommerce STRUCT< purchase_revenue_in_usd: DOUBLE, total_item_quantity: BIGINT, unique_items: BIGINT>, event_name STRING, event_previous_timestamp BIGINT, event_timestamp BIGINT, geo STRUCT< city: STRING, state: STRING>, items ARRAY< STRUCT< coupon: STRING, item_id: STRING, item_name: STRING, item_revenue_in_usd: DOUBLE, price_in_usd: DOUBLE, quantity: BIGINT>>, traffic_source STRING, user_first_touch_timestamp BIGINT, user_id STRING")) json
FROM events_raw
""")
spark.sql(f"""
CREATE OR REPLACE TEMP VIEW events_update AS
WITH deduped_events_raw AS (
SELECT max(json) json FROM events_raw_json
GROUP BY json.user_id, json.event_timestamp
)
SELECT json.* FROM deduped_events_raw
""")
# COMMAND ----------
# lesson: Writing delta
def create_users_update():
spark.sql(f"""
CREATE OR REPLACE TEMP VIEW users_update AS
SELECT *, current_timestamp() updated
FROM parquet.`{Paths.source}/users/users-30m.parquet`
""")
# COMMAND ----------
load_events_raw()
create_events_update()
create_users_update()
| 28.857143 | 504 | 0.693617 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
import pyspark.sql.functions as F
import re
class BuildEnvironmentVariables:
def __init__(self, username):
self.course_name = "eltsql"
self.source_uri = "wasbs://[email protected]/elt-with-spark-sql/v01"
self.username = username
self.working_dir = f"dbfs:/user/{self.username}/dbacademy/{self.course_name}"
self.userhome = self.working_dir # TEMPORARY BACKWARDS COMPATABILITY
clean_username = re.sub("[^a-zA-Z0-9]", "_", self.username)
self.database_name = f"{clean_username}_dbacademy_{self.course_name}"
self.database_location = f"{self.working_dir}/db"
self.source = f"{self.working_dir}/source_datasets"
self.base_path=f"{self.working_dir}/tables"
self.sales_table_path = f"{self.base_path}/sales"
self.users_table_path = f"{self.base_path}/users"
self.events_raw_table_path = f"{self.base_path}/events_raw"
self.events_clean_table_path = f"{self.base_path}/events_clean"
self.transactions_table_path = f"{self.base_path}/transactions"
self.clickpaths_table_path = f"{self.base_path}/clickpaths"
def set_hive_variables(self):
for (k, v) in self.__dict__.items():
spark.sql(f"SET c.{k} = {v}")
def __repr__(self):
return self.__dict__.__repr__().replace(", ", ",\n")
# COMMAND ----------
username = spark.sql("SELECT current_user()").first()[0]
dbacademy_env = BuildEnvironmentVariables(username)
Paths = dbacademy_env # Temporary backwards compatability
# Hack for backwards compatability
username = dbacademy_env.username
database = dbacademy_env.database_name
userhome = dbacademy_env.working_dir
print(f"username: {username}")
print(f"database: {database}")
print(f"userhome: {userhome}")
# print(f"dbacademy_env: Databricks Academy configuration object")
# print(f"dbacademy_env.username: {dbacademy_env.username}")
# print(f"dbacademy_env.database_name: {dbacademy_env.database_name}")
# print(f"dbacademy_env.working_dir: {dbacademy_env.working_dir}")
# COMMAND ----------
def path_exists(path):
try:
return len(dbutils.fs.ls(path)) >= 0
except Exception:
return False
dbutils.widgets.text("mode", "default")
mode = dbutils.widgets.get("mode")
if mode == "reset" or mode == "cleanup":
# Drop the database and remove all data for both reset and cleanup
print(f"Removing the database {database}")
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
print(f"Removing previously generated datasets from\n{dbacademy_env.working_dir}")
dbutils.fs.rm(dbacademy_env.working_dir, True)
if mode != "cleanup":
# We are not cleaning up so we want to setup the environment
# RESET is in case we want to force a reset
# not-existing for net-new install
if mode == "reset" or not path_exists(dbacademy_env.source):
print(f"\nInstalling datasets to\n{dbacademy_env.source}")
print(f"""\nNOTE: The datasets that we are installing are located in Washington, USA - depending on the
region that your workspace is in, this operation can take as little as 3 minutes and
upwards to 6 minutes, but this is a one-time operation.""")
dbutils.fs.cp(dbacademy_env.source_uri, dbacademy_env.source, True)
print(f"""\nThe install of the datasets completed successfully.""")
# Create the database and use it.
spark.sql(f"CREATE DATABASE IF NOT EXISTS {dbacademy_env.database_name} LOCATION '{dbacademy_env.database_location}'")
spark.sql(f"USE {dbacademy_env.database_name}")
# Once the database is created, init the hive variables
dbacademy_env.set_hive_variables()
| 37.585859 | 122 | 0.665357 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
import pyspark.sql.functions as F
import re
dbutils.widgets.text("course", "dewd")
course = dbutils.widgets.get("course")
username = spark.sql("SELECT current_user()").collect()[0][0]
userhome = f"dbfs:/user/{username}/{course}"
database = f"""{course}_{re.sub("[^a-zA-Z0-9]", "_", username)}_db"""
print(f"""
username: {username}
userhome: {userhome}
database: {database}""")
spark.sql(f"SET c.username = {username}")
spark.sql(f"SET c.userhome = {userhome}")
spark.sql(f"SET c.database = {database}")
dbutils.widgets.text("mode", "setup")
mode = dbutils.widgets.get("mode")
if mode == "reset":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
spark.sql(f"CREATE DATABASE IF NOT EXISTS {database} LOCATION '{userhome}'")
spark.sql(f"USE {database}")
if mode == "cleanup":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
| 28.060606 | 80 | 0.683716 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md # Project Information
# MAGIC
# MAGIC * Name: **Data Engineering with Databricks**
# MAGIC * Version: **beta.2**
# MAGIC * Built On: **Jan 19, 2022 at 14:41:04 UTC**
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 40.695652 | 192 | 0.679541 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md
# MAGIC # Population Heatmap by State
# MAGIC Using *uszips.csv* as a data source, aggregate the populations by state.
# MAGIC Note: data file provided courtesy of SimpleMaps (https://simplemaps.com/data/us-zips)
# COMMAND ----------
# MAGIC %md
# MAGIC Source a Notebook to configure the table name. If `my_name` resides in a different relative path, then adjust the code in **Cmd 3** accordingly.
# COMMAND ----------
# MAGIC %run ./my_name
# COMMAND ----------
# MAGIC %md
# MAGIC Query the table that was named in the `my_name` Notebook. Aggregate the population, grouping by state. For maximum effectiveness, select the **Map** plot to visualize the output.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT `state_id` AS `state`,SUM(`population`) AS `population`
# MAGIC FROM ${conf.name}
# MAGIC WHERE `state_id` NOT IN ('AS','GU','MP','PR','VI')
# MAGIC GROUP BY `state`
| 32.142857 | 186 | 0.686084 |
sfguide-data-engineering-with-snowpark-python | https://github.com/Snowflake-Labs/sfguide-data-engineering-with-snowpark-python | null | 53 | 2,339 | 2023-11-18 06:33:34+00:00 | 2023-01-23 16:06:45+00:00 | 513 | Apache License 2.0 | Python | import sys;
import os;
ignore_folders = ['__pycache__', '.ipynb_checkpoints']
if len(sys.argv) != 2:
print("Root directory is required")
exit()
root_directory = sys.argv[1]
print(f"Deploying all Snowpark apps in root directory {root_directory}")
# Walk the entire directory structure recursively
for (directory_path, directory_names, file_names) in os.walk(root_directory):
# Get just the last/final folder name in the directory path
base_name = os.path.basename(directory_path)
# Skip any folders we want to ignore
if base_name in ignore_folders:
# print(f"Skipping ignored folder {directory_path}")
continue
# An app.toml file in the folder is our indication that this folder contains
# a snowcli Snowpark App
if not "app.toml" in file_names:
# print(f"Skipping non-app folder {directory_path}")
continue
# Next determine what type of app it is
app_type = "unknown"
if "local_connection.py" in file_names:
app_type = "procedure"
else:
app_type = "function"
# Finally deploy the app with the snowcli tool
print(f"Found {app_type} app in folder {directory_path}")
print(f"Calling snowcli to deploy the {app_type} app")
os.chdir(f"{directory_path}")
# snow login will update the app.toml file with the correct path to the snowsql config file
os.system(f"snow login -c {root_directory}/config -C dev")
os.system(f"snow {app_type} create")
| 33.232558 | 95 | 0.681849 |
sfguide-data-engineering-with-snowpark-python | https://github.com/Snowflake-Labs/sfguide-data-engineering-with-snowpark-python | null | 53 | 2,339 | 2023-11-18 06:33:34+00:00 | 2023-01-23 16:06:45+00:00 | 513 | Apache License 2.0 | Python | #------------------------------------------------------------------------------
# Hands-On Lab: Data Engineering with Snowpark
# Script: 02_load_raw.py
# Author: Jeremiah Hansen, Caleb Baechtold
# Last Updated: 1/9/2023
#------------------------------------------------------------------------------
import time
from snowflake.snowpark import Session
#import snowflake.snowpark.types as T
#import snowflake.snowpark.functions as F
POS_TABLES = ['country', 'franchise', 'location', 'menu', 'truck', 'order_header', 'order_detail']
CUSTOMER_TABLES = ['customer_loyalty']
TABLE_DICT = {
"pos": {"schema": "RAW_POS", "tables": POS_TABLES},
"customer": {"schema": "RAW_CUSTOMER", "tables": CUSTOMER_TABLES}
}
# SNOWFLAKE ADVANTAGE: Schema detection
# SNOWFLAKE ADVANTAGE: Data ingestion with COPY
# SNOWFLAKE ADVANTAGE: Snowflake Tables (not file-based)
def load_raw_table(session, tname=None, s3dir=None, year=None, schema=None):
session.use_schema(schema)
if year is None:
location = "@external.frostbyte_raw_stage/{}/{}".format(s3dir, tname)
else:
print('\tLoading year {}'.format(year))
location = "@external.frostbyte_raw_stage/{}/{}/year={}".format(s3dir, tname, year)
# we can infer schema using the parquet read option
df = session.read.option("compression", "snappy") \
.parquet(location)
df.copy_into_table("{}".format(tname))
# SNOWFLAKE ADVANTAGE: Warehouse elasticity (dynamic scaling)
def load_all_raw_tables(session):
_ = session.sql("ALTER WAREHOUSE HOL_WH SET WAREHOUSE_SIZE = XLARGE WAIT_FOR_COMPLETION = TRUE").collect()
for s3dir, data in TABLE_DICT.items():
tnames = data['tables']
schema = data['schema']
for tname in tnames:
print("Loading {}".format(tname))
# Only load the first 3 years of data for the order tables at this point
# We will load the 2022 data later in the lab
if tname in ['order_header', 'order_detail']:
for year in ['2019', '2020', '2021']:
load_raw_table(session, tname=tname, s3dir=s3dir, year=year, schema=schema)
else:
load_raw_table(session, tname=tname, s3dir=s3dir, schema=schema)
_ = session.sql("ALTER WAREHOUSE HOL_WH SET WAREHOUSE_SIZE = XSMALL").collect()
def validate_raw_tables(session):
# check column names from the inferred schema
for tname in POS_TABLES:
print('{}: \n\t{}\n'.format(tname, session.table('RAW_POS.{}'.format(tname)).columns))
for tname in CUSTOMER_TABLES:
print('{}: \n\t{}\n'.format(tname, session.table('RAW_CUSTOMER.{}'.format(tname)).columns))
# For local debugging
if __name__ == "__main__":
# Add the utils package to our path and import the snowpark_utils function
import os, sys
current_dir = os.getcwd()
parent_dir = os.path.dirname(current_dir)
sys.path.append(parent_dir)
from utils import snowpark_utils
session = snowpark_utils.get_snowpark_session()
load_all_raw_tables(session)
# validate_raw_tables(session)
session.close()
| 37.378049 | 110 | 0.617292 |
sfguide-data-engineering-with-snowpark-python | https://github.com/Snowflake-Labs/sfguide-data-engineering-with-snowpark-python | null | 53 | 2,339 | 2023-11-18 06:33:34+00:00 | 2023-01-23 16:06:45+00:00 | 513 | Apache License 2.0 | Python | #------------------------------------------------------------------------------
# Hands-On Lab: Data Engineering with Snowpark
# Script: 04_create_order_view.py
# Author: Jeremiah Hansen, Caleb Baechtold
# Last Updated: 1/9/2023
#------------------------------------------------------------------------------
# SNOWFLAKE ADVANTAGE: Snowpark DataFrame API
# SNOWFLAKE ADVANTAGE: Streams for incremental processing (CDC)
# SNOWFLAKE ADVANTAGE: Streams on views
from snowflake.snowpark import Session
#import snowflake.snowpark.types as T
import snowflake.snowpark.functions as F
def create_pos_view(session):
session.use_schema('HARMONIZED')
order_detail = session.table("RAW_POS.ORDER_DETAIL").select(F.col("ORDER_DETAIL_ID"), \
F.col("LINE_NUMBER"), \
F.col("MENU_ITEM_ID"), \
F.col("QUANTITY"), \
F.col("UNIT_PRICE"), \
F.col("PRICE"), \
F.col("ORDER_ID"))
order_header = session.table("RAW_POS.ORDER_HEADER").select(F.col("ORDER_ID"), \
F.col("TRUCK_ID"), \
F.col("ORDER_TS"), \
F.to_date(F.col("ORDER_TS")).alias("ORDER_TS_DATE"), \
F.col("ORDER_AMOUNT"), \
F.col("ORDER_TAX_AMOUNT"), \
F.col("ORDER_DISCOUNT_AMOUNT"), \
F.col("LOCATION_ID"), \
F.col("ORDER_TOTAL"))
truck = session.table("RAW_POS.TRUCK").select(F.col("TRUCK_ID"), \
F.col("PRIMARY_CITY"), \
F.col("REGION"), \
F.col("COUNTRY"), \
F.col("FRANCHISE_FLAG"), \
F.col("FRANCHISE_ID"))
menu = session.table("RAW_POS.MENU").select(F.col("MENU_ITEM_ID"), \
F.col("TRUCK_BRAND_NAME"), \
F.col("MENU_TYPE"), \
F.col("MENU_ITEM_NAME"))
franchise = session.table("RAW_POS.FRANCHISE").select(F.col("FRANCHISE_ID"), \
F.col("FIRST_NAME").alias("FRANCHISEE_FIRST_NAME"), \
F.col("LAST_NAME").alias("FRANCHISEE_LAST_NAME"))
location = session.table("RAW_POS.LOCATION").select(F.col("LOCATION_ID"))
'''
We can do this one of two ways: either select before the join so it is more explicit, or just join on the full tables.
The end result is the same, it's mostly a readibility question.
'''
# order_detail = session.table("RAW_POS.ORDER_DETAIL")
# order_header = session.table("RAW_POS.ORDER_HEADER")
# truck = session.table("RAW_POS.TRUCK")
# menu = session.table("RAW_POS.MENU")
# franchise = session.table("RAW_POS.FRANCHISE")
# location = session.table("RAW_POS.LOCATION")
t_with_f = truck.join(franchise, truck['FRANCHISE_ID'] == franchise['FRANCHISE_ID'], rsuffix='_f')
oh_w_t_and_l = order_header.join(t_with_f, order_header['TRUCK_ID'] == t_with_f['TRUCK_ID'], rsuffix='_t') \
.join(location, order_header['LOCATION_ID'] == location['LOCATION_ID'], rsuffix='_l')
final_df = order_detail.join(oh_w_t_and_l, order_detail['ORDER_ID'] == oh_w_t_and_l['ORDER_ID'], rsuffix='_oh') \
.join(menu, order_detail['MENU_ITEM_ID'] == menu['MENU_ITEM_ID'], rsuffix='_m')
final_df = final_df.select(F.col("ORDER_ID"), \
F.col("TRUCK_ID"), \
F.col("ORDER_TS"), \
F.col('ORDER_TS_DATE'), \
F.col("ORDER_DETAIL_ID"), \
F.col("LINE_NUMBER"), \
F.col("TRUCK_BRAND_NAME"), \
F.col("MENU_TYPE"), \
F.col("PRIMARY_CITY"), \
F.col("REGION"), \
F.col("COUNTRY"), \
F.col("FRANCHISE_FLAG"), \
F.col("FRANCHISE_ID"), \
F.col("FRANCHISEE_FIRST_NAME"), \
F.col("FRANCHISEE_LAST_NAME"), \
F.col("LOCATION_ID"), \
F.col("MENU_ITEM_ID"), \
F.col("MENU_ITEM_NAME"), \
F.col("QUANTITY"), \
F.col("UNIT_PRICE"), \
F.col("PRICE"), \
F.col("ORDER_AMOUNT"), \
F.col("ORDER_TAX_AMOUNT"), \
F.col("ORDER_DISCOUNT_AMOUNT"), \
F.col("ORDER_TOTAL"))
final_df.create_or_replace_view('POS_FLATTENED_V')
def create_pos_view_stream(session):
session.use_schema('HARMONIZED')
_ = session.sql('CREATE OR REPLACE STREAM POS_FLATTENED_V_STREAM \
ON VIEW POS_FLATTENED_V \
SHOW_INITIAL_ROWS = TRUE').collect()
def test_pos_view(session):
session.use_schema('HARMONIZED')
tv = session.table('POS_FLATTENED_V')
tv.limit(5).show()
# For local debugging
if __name__ == "__main__":
# Add the utils package to our path and import the snowpark_utils function
import os, sys
current_dir = os.getcwd()
parent_dir = os.path.dirname(current_dir)
sys.path.append(parent_dir)
from utils import snowpark_utils
session = snowpark_utils.get_snowpark_session()
create_pos_view(session)
create_pos_view_stream(session)
# test_pos_view(session)
session.close()
| 51.894309 | 122 | 0.421983 |