-
Notifications
You must be signed in to change notification settings - Fork 1.2k
/
config.yml.sample
105 lines (105 loc) · 5.74 KB
/
config.yml.sample
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
aws:
# Credentials can be hardcoded or set in environment variables
access_key_id: <%= ENV['AWS_SNOWPLOW_ACCESS_KEY'] %>
secret_access_key: <%= ENV['AWS_SNOWPLOW_SECRET_KEY'] %>
s3:
region: ADD HERE
buckets:
assets: s3://snowplow-hosted-assets # DO NOT CHANGE unless you are hosting the jarfiles etc yourself in your own bucket
jsonpath_assets: # If you have defined your own JSON Schemas, add the s3:// path to your own JSON Path files in your own bucket here
log: ADD HERE
raw:
in: # Multiple in buckets are permitted
- ADD HERE # e.g. s3://my-in-bucket
- ADD HERE
processing: ADD HERE
archive: ADD HERE # e.g. s3://my-archive-bucket/raw
enriched:
good: ADD HERE # e.g. s3://my-out-bucket/enriched/good
bad: ADD HERE # e.g. s3://my-out-bucket/enriched/bad
errors: ADD HERE # Leave blank unless :continue_on_unexpected_error: set to true below
archive: ADD HERE # Where to archive enriched events to, e.g. s3://my-archive-bucket/enriched
shredded:
good: ADD HERE # e.g. s3://my-out-bucket/shredded/good
bad: ADD HERE # e.g. s3://my-out-bucket/shredded/bad
errors: ADD HERE # Leave blank unless :continue_on_unexpected_error: set to true below
archive: ADD HERE # Where to archive shredded events to, e.g. s3://my-archive-bucket/shredded
emr:
ami_version: 4.5.0
region: ADD HERE # Always set this
jobflow_role: EMR_EC2_DefaultRole # Created using $ aws emr create-default-roles
service_role: EMR_DefaultRole # Created using $ aws emr create-default-roles
placement: ADD HERE # Set this if not running in VPC. Leave blank otherwise
ec2_subnet_id: ADD HERE # Set this if running in VPC. Leave blank otherwise
ec2_key_name: ADD HERE
bootstrap: [] # Set this to specify custom boostrap actions. Leave empty otherwise
software:
hbase: # Optional. To launch on cluster, provide version, "0.92.0", keep quotes. Leave empty otherwise.
lingual: # Optional. To launch on cluster, provide version, "1.1", keep quotes. Leave empty otherwise.
# Adjust your Hadoop cluster below
jobflow:
master_instance_type: m1.medium
core_instance_count: 2
core_instance_type: m1.medium
task_instance_count: 0 # Increase to use spot instances
task_instance_type: m1.medium
task_instance_bid: 0.015 # In USD. Adjust bid, or leave blank for non-spot-priced (i.e. on-demand) task instances
bootstrap_failure_tries: 3 # Number of times to attempt the job in the event of bootstrap failures
additional_info: # Optional JSON string for selecting additional features
collectors:
format: cloudfront # For example: 'clj-tomcat' for the Clojure Collector, 'thrift' for Thrift records, 'tsv/com.amazon.aws.cloudfront/wd_access_log' for Cloudfront access logs or 'ndjson/urbanairship.connect/v1' for UrbanAirship Connect events
enrich:
job_name: Snowplow ETL # Give your job a name
versions:
hadoop_enrich: 1.8.0 # Version of the Hadoop Enrichment process
hadoop_shred: 0.10.0 # Version of the Hadoop Shredding process
hadoop_elasticsearch: 0.1.0 # Version of the Hadoop to Elasticsearch copying process
continue_on_unexpected_error: false # Set to 'true' (and set :out_errors: above) if you don't want any exceptions thrown from ETL
output_compression: NONE # Compression only supported with Redshift, set to NONE if you have Postgres targets. Allowed formats: NONE, GZIP
storage:
download:
folder: # Postgres-only config option. Where to store the downloaded files. Leave blank for Redshift
targets:
- name: "My Redshift database"
type: redshift
host: ADD HERE # The endpoint as shown in the Redshift console
database: ADD HERE # Name of database
port: 5439 # Default Redshift port
ssl_mode: disable # One of disable (default), require, verify-ca or verify-full
table: atomic.events
username: ADD HERE
password: ADD HERE
maxerror: 1 # Stop loading on first error, or increase to permit more load errors
comprows: 200000 # Default for a 1 XL node cluster. Not used unless --include compupdate specified
- name: "My PostgreSQL database"
type: postgres
host: ADD HERE # Hostname of database server
database: ADD HERE # Name of database
port: 5432 # Default Postgres port
ssl_mode: disable # One of disable (default), require, verify-ca or verify-full
table: atomic.events
username: ADD HERE
password: ADD HERE
maxerror: # Not required for Postgres
comprows: # Not required for Postgres
- name: "My Elasticsearch database"
type: elasticsearch
host: ADD HERE # The Elasticsearch endpoint
database: ADD HERE # Name of index
port: 9200 # Default Elasticsearch port - change to 80 if using Amazon Elasticsearch Service
sources: # Leave blank to write the bad rows created in this run to Elasticsearch, or explicitly provide an array of bad row buckets like ["s3://my-enriched-bucket/bad/run=2015-10-06-15-25-53"]
ssl_mode: # Not required for Elasticsearch
table: ADD HERE # Name of type
username: # Not required for Elasticsearch
password: # Not required for Elasticsearch
es_nodes_wan_only: false # Set to true if using Amazon Elasticsearch Service
maxerror: # Not required for Elasticsearch
comprows: # Not required for Elasticsearch
monitoring:
tags: {} # Name-value pairs describing this job
logging:
level: DEBUG # You can optionally switch to INFO for production
snowplow:
method: get
app_id: ADD HERE # e.g. snowplow
collector: ADD HERE # e.g. d3rkrsqld9gmqf.cloudfront.net