Walkthrough of Refactored Code - pathfinder-analytics-uk/dab_project GitHub Wiki

Project Code

resources/citibike_etl_pipeline_py.yml

resources:
  jobs:
    citibike_etl_pipeline_py:
      name: citibike_etl_pipeline_py
      tasks:
        - task_key: 01_bronze_citibike
          spark_python_task:
            python_file: ../citibike_etl/scripts/01_bronze/01_bronze_citibike.py
            parameters:
              - "{{job.id}}"
              - "{{job.run_id}}"
              - "{{task.run_id}}"
              - "{{job.start_time.iso_datetime}}"
              - "${var.catalog}"
          job_cluster_key: ds3_v2_sn
        - task_key: 02_silver_citibike
          depends_on:
            - task_key: 01_bronze_citibike
          spark_python_task:
            python_file: ../citibike_etl/scripts/02_silver/02_silver_citibike.py
            parameters:
              - "{{job.id}}"
              - "{{job.run_id}}"
              - "{{task.run_id}}"
              - "{{job.start_time.iso_datetime}}"
              - "${var.catalog}"
          job_cluster_key: ds3_v2_sn
        - task_key: 03_gold_citibike_daily_ride_summary
          depends_on:
            - task_key: 02_silver_citibike
          spark_python_task:
            python_file: ../citibike_etl/scripts/03_gold/03_gold_citibike_daily_ride_summary.py
            parameters:
              - "${var.catalog}"
          job_cluster_key: ds3_v2_sn
        - task_key: 03_gold_citibike_daily_station_performance
          depends_on:
            - task_key: 02_silver_citibike
          spark_python_task:
            python_file: ../citibike_etl/scripts/03_gold/03_gold_citibike_daily_station_performance.py
            parameters:
              - "${var.catalog}"
          job_cluster_key: ds3_v2_sn
      job_clusters:
        - job_cluster_key: ds3_v2_sn
          new_cluster: "${var.ds3_v2_sn}"
      queue:
        enabled: true

citibike_etl/scripts/01_bronze/01_bronze_citibike.py

from pyspark.sql.types import StructType, StructField, StringType, DecimalType, TimestampType
from pyspark.sql.functions import create_map, lit
import sys

pipeline_id = sys.argv[1]
run_id = sys.argv[2]
task_id = sys.argv[3]
processed_timestamp = sys.argv[4]
catalog = sys.argv[5]

schema = StructType([
    StructField("ride_id", StringType(), True),
    StructField("rideable_type", StringType(), True),
    StructField("started_at", TimestampType(), True),
    StructField("ended_at", TimestampType(), True),
    StructField("start_station_name", StringType(), True), 
    StructField("start_station_id", StringType(), True),   
    StructField("end_station_name", StringType(), True), 
    StructField("end_station_id", StringType(), True), 
    StructField("start_lat", DecimalType(), True), 
    StructField("start_lng", DecimalType(), True), 
    StructField("end_lat", DecimalType(), True), 
    StructField("end_lng", DecimalType(), True), 
    StructField("member_casual", StringType(), True), 
])

df = spark.read.csv(f"/Volumes/{catalog}/00_landing/source_citibike_data/JC-202503-citibike-tripdata.csv", schema=schema, header=True)

df = df.withColumn("metadata", 
              create_map(
                  lit("pipeline_id"), lit(pipeline_id),
                  lit("run_id"), lit(run_id),
                  lit("task_id"), lit(task_id),
                  lit("processed_date"), lit(processed_timestamp)
                  ))

df.write.\
    mode("overwrite").\
    option("overwriteSchema", "true").\
    saveAsTable(f"{catalog}.01_bronze.jc_citibike")

citibike_etl/scripts/02_silver/02_silver_citibike.py

import os
import sys

current_dir = os.getcwd()
project_root = os.path.abspath(os.path.join(current_dir, "..", "..", ".."))
sys.path.append(project_root)

from src.citibike.citibike_utils import get_trip_duration_mins
from src.utils.datetime_utils import timestamp_to_date_col
from pyspark.sql.functions import create_map, lit

pipeline_id = sys.argv[1]
run_id = sys.argv[2]
task_id = sys.argv[3]
processed_timestamp = sys.argv[4]
catalog = sys.argv[5]

df = spark.read.table(f"{catalog}.01_bronze.jc_citibike")

df = get_trip_duration_mins(spark, df, "started_at", "ended_at", "trip_duration_mins")

df = timestamp_to_date_col(spark, df, "started_at", "trip_start_date")

df = df.withColumn("metadata", 
              create_map(
                  lit("pipeline_id"), lit(pipeline_id),
                  lit("run_id"), lit(run_id),
                  lit("task_id"), lit(task_id),
                  lit("processed_date"), lit(processed_timestamp)
                  ))

df = df.select(
    "ride_id",
    "trip_start_date",
    "started_at",
    "ended_at",
    "start_station_name",
    "end_station_name",
    "trip_duration_mins",
    "metadata"
    )

df.write.\
    mode("overwrite").\
    option("overwriteSchema", "true").\
    saveAsTable(f"{catalog}.02_silver.jc_citibike")

citibike_etl/scripts/03_gold/03_gold_citibike_daily_ride_summary.py

from pyspark.sql.functions import max, min, avg, count, round
import sys

catalog = sys.argv[1]

df = spark.read.table(f"{catalog}.02_silver.jc_citibike")

df = df.groupBy("trip_start_date").agg(
    round(max("trip_duration_mins"),2).alias("max_trip_duration_mins"),
    round(min("trip_duration_mins"),2).alias("min_trip_duration_mins"),
    round(avg("trip_duration_mins"),2).alias("avg_trip_duration_mins"),
    count("ride_id").alias("total_trips")
)

df.write.\
    mode("overwrite").\
    option("overwriteSchema", "true").\
    saveAsTable(f"{catalog}.03_gold.daily_ride_summary")

citibike_etl/scripts/03_gold/03_gold_citibike_daily_station_performance.py

from pyspark.sql.functions import avg, count, round
import sys

catalog = sys.argv[1]

df = spark.read.table(f"{catalog}.02_silver.jc_citibike")

df = df.\
    groupBy("trip_start_date", "start_station_name").\
    agg(
    round(avg("trip_duration_mins"),2).alias("avg_trip_duration_mins"),
    count("ride_id").alias("total_trips")
    )

df.write.\
    mode("overwrite").\
    option("overwriteSchema", "true").\
    saveAsTable(f"{catalog}.03_gold.daily_station_performance")