from airflow import DAG
from template.dag_template import build_dag
from airflow.utils.dates import days_ago
from template.utils import set_config_variable_in_emr_steps
from airflow.models import Variable
# DAG specific parameters
config = Variable.get("audience_formatter_conf", deserialize_json=True)
ENV = Variable.get("env")
config['ENV'] = ENV.lower()
MEDIA_OWNER = config['MEDIA_OWNER']
AUD_VERSION = config['AUD_VERSION']
cluster_name = 'audience-formatter-' + MEDIA_OWNER + '-' + AUD_VERSION
dag_id = 'audience-formatter_v003'
emr_steps = """[
{
"step-name": "audience-formatter",
"config-json": [
{"spark.yarn.executor.memoryOverhead":"2g"},
{"spark.app.env":"$ENV"},
{"spark.sql.sources.partitionColumnTypeInference.enabled":false},
{"spark.driver.extraClassPath":"/usr/share/aws/emr/emrfs/lib/emrfs-hadoop-assembly-2.30.0.jar"}
],
"main-class": "audience_formatter_job.core",
"parameters": ["--audience-version", "$AUD_VERSION",
"--market-cat-id", "$MARKET_CAT_ID",
"--market-name", "$MEDIA_OWNER",
"--partitions", "$PARTITIONS",
"--input", "$INPUT_PATH",
"--output", "$OUTPUT_PATH"],
"artifact": "audience-formatter-job"
}
]"""
# cluster level parameters (optional)
cluster_args = {
"cluster-name": cluster_name,
"audience-config-file": "audience_formatter_conf",
"master-instance-types": "m5.2xlarge,m5.4xlarge",
"core-instance-types": "m5.2xlarge,m5.4xlarge",
"task-instance-types": "m5.2xlarge,m5.4xlarge",
"core-instance-capacity": 5,
"task-instance-capacity": 0,
"emr-version": "emr-5.21.0"
}
# set config variables in emr-steps
emr_steps = set_config_variable_in_emr_steps(emr_steps, config)
# dag parameter
dag_args = {
'owner': 'data.engineers@viooh.com',
'start_date': days_ago(1)
}
dag = DAG(
dag_id,
schedule_interval=None, # cron expression
default_args=dag_args)
build_dag(emr_steps=emr_steps, dag=dag, cluster_args=cluster_args)