DAG: audience-publisher_v003

schedule: None


audience-publisher_v003

Toggle wrap
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from airflow import DAG
from template.dag_template import build_dag
from airflow.utils.dates import days_ago
from template.utils import set_config_variable_in_emr_steps
from airflow.models import Variable

# DAG specific parameters
config = Variable.get("audience_publisher_conf", deserialize_json=True)

ENV = Variable.get("env")
config['ENV'] = ENV.lower()
MEDIA_OWNER = config['MEDIA_OWNER']
AUD_VERSION = config['AUD_VERSION']
config['AUD_TYPE_LOWER'] = config['AUD_TYPE'].lower()

cluster_name = 'audience-publisher-' + MEDIA_OWNER + '-' + AUD_VERSION
dag_id = 'audience-publisher_v003'


# function to prepare steps
def step_json_str(FRAME_TYPE):
    emr_step = """{
        "step-name": "$AUD_TYPE_LOWER-audience-publisher-""" + FRAME_TYPE + "\"" + """,
        "config-json": [
            {"spark.app.audience.$AUD_TYPE_LOWER.path": "$AUDIENCE_BASE_PATH/""" + FRAME_TYPE + "\"" + """},
            {"spark.app.audience.category.path": "$AUDIENCE_BASE_PATH/CategoryIndex"},
            {"spark.app.audience.$AUD_TYPE_LOWER.kafka.topic": "$KAFKA_TOPIC"},
            {"spark.app.audience.header.market": "$MEDIA_OWNER"},
            {"spark.app.audience.category.header.market": "$MEDIA_OWNER"},
            {"spark.app.audience.category.header.name": "$AUD_TYPEAudienceCategory"},
            {"spark.app.audience.header.name": "$AUD_TYPEAudience"},
            {"spark.app.audience.$AUD_TYPE_LOWER.version": "$AUD_VERSION"},
            {"spark.executor.memoryOverhead": "2500"},
            {"spark.app.env": "$ENV"},
            {"spark.sql.sources.partitionColumnTypeInference.enabled": false},
            {"spark.app.audience.header.version": "v1"},
            {"spark.app.audience.category.header.version": "v1"},
            {"spark.driver.extraClassPath": "/usr/share/aws/emr/emrfs/lib/emrfs-hadoop-assembly-2.30.0.jar"}
        ],
        "main-class": "com.viooh.audience.sink.$AUD_TYPE_LOWER.$AUD_TYPEAudienceSinkV2",
        "artifact": "audience-publisher"
    }"""
    return emr_step


emr_steps = "[" + step_json_str('static') + "," + step_json_str('dynamic') + "]"

# cluster level parameters (optional)
cluster_args = {
    "cluster-name": cluster_name,
    "audience-config-file": "audience_publisher_conf",
    "master-instance-types": "m5.2xlarge,m5.4xlarge",
    "core-instance-types": "m5.2xlarge,m5.4xlarge",
    "task-instance-types": "m5.2xlarge,m5.4xlarge",
    "core-instance-capacity": 5,
    "task-instance-capacity": 0,
    "emr-version": "emr-5.21.0"
}

# set config variables in emr-steps
emr_steps = set_config_variable_in_emr_steps(emr_steps, config)

# dag parameter
dag_args = {
    'owner': 'data.engineers@viooh.com',
    'start_date': days_ago(1)
}

dag = DAG(
    dag_id,
    schedule_interval=None,  # cron expression
    default_args=dag_args)

build_dag(emr_steps=emr_steps, dag=dag, cluster_args=cluster_args)