From a8060400d8007389398c229e188504f26f210a11 Mon Sep 17 00:00:00 2001 From: Devin Ford Date: Wed, 5 Apr 2023 16:20:53 -0400 Subject: [PATCH 01/13] feat: set up DBM files --- docker-compose.yml | 3 + services/dbm_exec.sh | 3 + services/dbm_setup.sql | 26 ++ services/postgresql.conf | 780 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 812 insertions(+) create mode 100755 services/dbm_exec.sh create mode 100644 services/dbm_setup.sql create mode 100644 services/postgresql.conf diff --git a/docker-compose.yml b/docker-compose.yml index c86a5bad..4a7c07ad 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -32,11 +32,14 @@ services: volumes: - 'postgres:/var/lib/postgresql/data' - ./services/backend/db/restore:/docker-entrypoint-initdb.d + - ./services/postgresql.conf:/etc/postgresql/13/main/postgresql.conf + - ./services/dbm_setup.sql:/etc/postgresql/13/main/dbm_setup.sql labels: com.datadoghq.ad.check_names: '["postgres"]' com.datadoghq.ad.init_configs: '[{}]' com.datadoghq.ad.instances: '[{"host":"%%host%%", "port":5432,"username":"datadog","password":"datadog"}]' com.datadoghq.ad.logs: '[{"source":"postgresql","service":"postgresql"}]' + command: ["postgres", "-c", "config_file=/etc/postgresql/13/main/postgresql.conf"] networks: - storedog-net redis: diff --git a/services/dbm_exec.sh b/services/dbm_exec.sh new file mode 100755 index 00000000..b6360de4 --- /dev/null +++ b/services/dbm_exec.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +docker-compose exec postgres psql -U postgres -d postgres -a -f /etc/postgresql/13/main/dbm_setup.sql \ No newline at end of file diff --git a/services/dbm_setup.sql b/services/dbm_setup.sql new file mode 100644 index 00000000..592c34df --- /dev/null +++ b/services/dbm_setup.sql @@ -0,0 +1,26 @@ +CREATE SCHEMA datadog; +GRANT USAGE ON SCHEMA datadog TO datadog; +GRANT USAGE ON SCHEMA public TO datadog; +GRANT pg_monitor TO datadog; +CREATE EXTENSION IF NOT EXISTS pg_stat_statements; + +CREATE OR REPLACE FUNCTION datadog.explain_statement( + l_query TEXT, + OUT explain JSON +) +RETURNS SETOF JSON AS +$$ +DECLARE +curs REFCURSOR; +plan JSON; + +BEGIN + OPEN curs FOR EXECUTE pg_catalog.concat('EXPLAIN (FORMAT JSON) ', l_query); + FETCH curs INTO plan; + CLOSE curs; + RETURN QUERY SELECT plan; +END; +$$ +LANGUAGE 'plpgsql' +RETURNS NULL ON NULL INPUT +SECURITY DEFINER; \ No newline at end of file diff --git a/services/postgresql.conf b/services/postgresql.conf new file mode 100644 index 00000000..517f996d --- /dev/null +++ b/services/postgresql.conf @@ -0,0 +1,780 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: B = bytes Time units: us = microseconds +# kB = kilobytes ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +#data_directory = 'ConfigDir' # use data in another directory + # (change requires restart) +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file + # (change requires restart) +#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +#port = 5432 # (change requires restart) +max_connections = 200 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - TCP settings - +# see "man tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default +#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; + # 0 selects the system default + +# - Authentication - + +#authentication_timeout = 1min # 1s-600s +#password_encryption = md5 # md5 or scram-sha-256 +#db_user_namespace = off + +# GSSAPI using Kerberos +#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' +#krb_caseins_users = off + +# - SSL - + +#ssl = off +#ssl_ca_file = '' +#ssl_cert_file = 'server.crt' +#ssl_crl_file = '' +#ssl_key_file = 'server.key' +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +#ssl_prefer_server_ciphers = on +#ssl_ecdh_curve = 'prime256v1' +#ssl_min_protocol_version = 'TLSv1.2' +#ssl_max_protocol_version = '' +#ssl_dh_params_file = '' +#ssl_passphrase_command = '' +#ssl_passphrase_command_supports_reload = off + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 128MB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +#work_mem = 4MB # min 64kB +#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem +#maintenance_work_mem = 64MB # min 1MB +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#logical_decoding_work_mem = 64MB # min 64kB +#max_stack_depth = 2MB # min 100kB +#shared_memory_type = mmap # the default is the first option + # supported by the operating system: + # mmap + # sysv + # windows + # (change requires restart) +dynamic_shared_memory_type = posix # the default is the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kilobytes, or -1 for no limit + +# - Kernel Resources - + +#max_files_per_process = 1000 # min 64 + # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 10 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 512kB # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching +#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching +#max_worker_processes = 8 # (change requires restart) +#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers +#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers +#parallel_leader_participation = on +#max_parallel_workers = 8 # maximum number of max_worker_processes that + # can be used in parallel operations +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) +#backend_flush_after = 0 # measured in pages, 0 disables + + +#------------------------------------------------------------------------------ +# WRITE-AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +#wal_level = replica # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux and FreeBSD) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_compression = off # enable compression of full-page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_init_zero = on # zero-fill new WAL files +#wal_recycle = on # recycle WAL files +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables +#wal_skip_threshold = 2MB + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_timeout = 5min # range 30s-1d +max_wal_size = 1GB +min_wal_size = 80MB +#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + +# - Archive Recovery - + +# These are only used in recovery mode. + +#restore_command = '' # command to use to restore an archived logfile segment + # placeholders: %p = path of file to restore + # %f = file name only + # e.g. 'cp /mnt/server/archivedir/%f %p' + # (change requires restart) +#archive_cleanup_command = '' # command to execute at every restartpoint +#recovery_end_command = '' # command to execute at completion of recovery + +# - Recovery Target - + +# Set these only when performing a targeted recovery. + +#recovery_target = '' # 'immediate' to end recovery as soon as a + # consistent state is reached + # (change requires restart) +#recovery_target_name = '' # the named restore point to which recovery will proceed + # (change requires restart) +#recovery_target_time = '' # the time stamp up to which recovery will proceed + # (change requires restart) +#recovery_target_xid = '' # the transaction ID up to which recovery will proceed + # (change requires restart) +#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed + # (change requires restart) +#recovery_target_inclusive = on # Specifies whether to stop: + # just after the specified recovery target (on) + # just before the recovery target (off) + # (change requires restart) +#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID + # (change requires restart) +#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' + # (change requires restart) + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +# Set these on the master and on any standby that will send replication data. + +#max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +#wal_keep_size = 0 # in megabytes; 0 disables +#max_slot_wal_keep_size = -1 # in megabytes; -1 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables + +#max_replication_slots = 10 # max number of replication slots + # (change requires restart) +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Master Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a master server. + +#primary_conninfo = '' # connection string to sending server +#primary_slot_name = '' # replication slot on sending server +#promote_trigger_file = '' # file name whose presence ends recovery +#hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name + # is not set +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from master + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt +#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_bitmapscan = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_seqscan = on +#enable_sort = on +#enable_incremental_sort = on +#enable_tidscan = on +#enable_partitionwise_join = off +#enable_partitionwise_aggregate = off +#enable_parallel_hash = on +#enable_partition_pruning = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above + +#jit_above_cost = 100000 # perform JIT compilation if available + # and query more expensive than this; + # -1 disables +#jit_inline_above_cost = 500000 # inline small functions if query is + # more expensive than this; -1 disables +#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if + # query is more expensive than this; + # -1 disables + +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +#effective_cache_size = 4GB + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#force_parallel_mode = off +#jit = on # allow JIT compilation +#plan_cache_mode = auto # auto, force_generic_plan or + # force_custom_plan + + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +logging_collector = on # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +log_directory = 'pg_log' # directory where log files are written, + # can be absolute or relative to PGDATA +log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +log_file_mode = 0644 # creation mode for log files, + # begin with 0 to use octal notation +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. +#log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +#log_rotation_size = 10MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (win32): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + +#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements + # and their durations, > 0 logs only a sample of + # statements running at least this number + # of milliseconds; + # sample fraction is determined by log_statement_sample_rate + +#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding + # log_min_duration_sample to be logged; + # 1.0 logs all such statements, 0.0 never logs + + +#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements + # are logged regardless of their duration; 1.0 logs all + # statements from all transactions, 0.0 never logs + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_checkpoints = off +#log_connections = off +#log_disconnections = off +log_duration = on +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +log_line_prefix= '%m [%p] %d %a %u %h %c ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %b = backend type + # %p = process ID + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_parameter_max_length = -1 # when logging statements, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_parameter_max_length_on_error = 0 # when logging an error, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +log_statement = 'all' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'UTC' + +#------------------------------------------------------------------------------ +# PROCESS TITLE +#------------------------------------------------------------------------------ + +#cluster_name = '' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +# - Query and Index Statistics Collector - + +track_activities = on +#track_counts = on +#track_io_timing = off +#track_functions = none # none, pl, all +track_activity_query_size = 4096 # (change requires restart) +#stats_temp_directory = 'pg_stat_tmp' + + +# - Monitoring - + +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off +#log_statement_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts + # before vacuum; -1 disables insert + # vacuums +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table + # size before insert vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +#search_path = '"$user", public' # schema names +#row_security = on +#default_tablespace = '' # a tablespace name, '' uses the default +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#default_table_access_method = 'heap' +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_min_age = 50000000 +#vacuum_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples + # before index cleanup, 0 always performs + # index cleanup +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_fuzzy_search_limit = 0 +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'UTC' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 1 # min -15, max 3; any value >0 actually + # selects precise output mode +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'en_US.utf8' # locale for system error message + # strings +lc_monetary = 'en_US.utf8' # locale for monetary formatting +lc_numeric = 'en_US.utf8' # locale for number formatting +lc_time = 'en_US.utf8' # locale for time formatting + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Shared Library Preloading - + +shared_preload_libraries = 'pg_stat_statements' # (change requires restart) +#local_preload_libraries = '' +#session_preload_libraries = '' +#jit_provider = 'llvmjit' # JIT library to use + +# - Other Defaults - + +#dynamic_library_path = '$libdir' + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#escape_string_warning = on +#lo_compat_privileges = off +#operator_precedence_warning = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here \ No newline at end of file From 414b194a9aa86e8a2b37cf5ffd79378ed603d66b Mon Sep 17 00:00:00 2001 From: Devin Ford Date: Wed, 5 Apr 2023 17:17:37 -0400 Subject: [PATCH 02/13] feat: move file and mount as expected --- docker-compose.yml | 3 ++- services/dbm/dbm_exec.sh | 7 +++++++ services/{ => dbm}/dbm_setup.sql | 0 services/dbm_exec.sh | 3 --- 4 files changed, 9 insertions(+), 4 deletions(-) create mode 100755 services/dbm/dbm_exec.sh rename services/{ => dbm}/dbm_setup.sql (100%) delete mode 100755 services/dbm_exec.sh diff --git a/docker-compose.yml b/docker-compose.yml index 4a7c07ad..b0c7739f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -33,7 +33,8 @@ services: - 'postgres:/var/lib/postgresql/data' - ./services/backend/db/restore:/docker-entrypoint-initdb.d - ./services/postgresql.conf:/etc/postgresql/13/main/postgresql.conf - - ./services/dbm_setup.sql:/etc/postgresql/13/main/dbm_setup.sql + - ./services/dbm/dbm_setup.sql:/etc/postgresql/13/main/dbm_setup.sql + - ./services/dbm/dbm_exec.sh:/dbm_exec.sh labels: com.datadoghq.ad.check_names: '["postgres"]' com.datadoghq.ad.init_configs: '[{}]' diff --git a/services/dbm/dbm_exec.sh b/services/dbm/dbm_exec.sh new file mode 100755 index 00000000..4d6a2dc1 --- /dev/null +++ b/services/dbm/dbm_exec.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +exec postgres -c config_file=/etc/postgresql/13/main/postgresql.conf + +sleep(20) + +exec psql -U postgres -d postgres -a -f /etc/postgresql/13/main/dbm_setup.sql \ No newline at end of file diff --git a/services/dbm_setup.sql b/services/dbm/dbm_setup.sql similarity index 100% rename from services/dbm_setup.sql rename to services/dbm/dbm_setup.sql diff --git a/services/dbm_exec.sh b/services/dbm_exec.sh deleted file mode 100755 index b6360de4..00000000 --- a/services/dbm_exec.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -docker-compose exec postgres psql -U postgres -d postgres -a -f /etc/postgresql/13/main/dbm_setup.sql \ No newline at end of file From 8fa148712bd3f797c001743b0fa9a2d523101ac4 Mon Sep 17 00:00:00 2001 From: Devin Ford Date: Wed, 5 Apr 2023 17:18:44 -0400 Subject: [PATCH 03/13] feat: clean up shell file --- services/dbm/dbm_exec.sh | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/services/dbm/dbm_exec.sh b/services/dbm/dbm_exec.sh index 4d6a2dc1..b6360de4 100755 --- a/services/dbm/dbm_exec.sh +++ b/services/dbm/dbm_exec.sh @@ -1,7 +1,3 @@ #!/bin/sh -exec postgres -c config_file=/etc/postgresql/13/main/postgresql.conf - -sleep(20) - -exec psql -U postgres -d postgres -a -f /etc/postgresql/13/main/dbm_setup.sql \ No newline at end of file +docker-compose exec postgres psql -U postgres -d postgres -a -f /etc/postgresql/13/main/dbm_setup.sql \ No newline at end of file From 7116fdb13d56be8070dc9a9ef140183365df32be Mon Sep 17 00:00:00 2001 From: Devin Ford Date: Wed, 5 Apr 2023 17:21:10 -0400 Subject: [PATCH 04/13] fix: shell script for docker exec --- services/dbm/dbm_exec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/dbm/dbm_exec.sh b/services/dbm/dbm_exec.sh index b6360de4..0a23238a 100755 --- a/services/dbm/dbm_exec.sh +++ b/services/dbm/dbm_exec.sh @@ -1,3 +1,3 @@ #!/bin/sh -docker-compose exec postgres psql -U postgres -d postgres -a -f /etc/postgresql/13/main/dbm_setup.sql \ No newline at end of file +psql -U postgres -d postgres -a -f /etc/postgresql/13/main/dbm_setup.sql \ No newline at end of file From 6b19ed1e78297ee5faa58c3ecd36ffc8c93f8179 Mon Sep 17 00:00:00 2001 From: Devin Ford Date: Wed, 5 Apr 2023 17:28:57 -0400 Subject: [PATCH 05/13] feat: create dd user --- services/dbm/dbm_setup.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/services/dbm/dbm_setup.sql b/services/dbm/dbm_setup.sql index 592c34df..e316e3e2 100644 --- a/services/dbm/dbm_setup.sql +++ b/services/dbm/dbm_setup.sql @@ -1,3 +1,5 @@ +create user datadog with password 'datadog'; +GRANT SELECT ON pg_stat_database TO datadog; CREATE SCHEMA datadog; GRANT USAGE ON SCHEMA datadog TO datadog; GRANT USAGE ON SCHEMA public TO datadog; From c6e3961476fa4add173d6d49983f4328432ee023 Mon Sep 17 00:00:00 2001 From: Devin Ford Date: Thu, 6 Apr 2023 12:02:52 -0400 Subject: [PATCH 06/13] feat: update configs and add files for storedog --- docker-compose.yml | 11 ++- services/backend/db/pg_hba.conf | 100 ++++++++++++++++++++++ services/{ => backend/db}/postgresql.conf | 2 +- 3 files changed, 109 insertions(+), 4 deletions(-) create mode 100644 services/backend/db/pg_hba.conf rename services/{ => backend/db}/postgresql.conf (99%) diff --git a/docker-compose.yml b/docker-compose.yml index b0c7739f..39535263 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -32,7 +32,8 @@ services: volumes: - 'postgres:/var/lib/postgresql/data' - ./services/backend/db/restore:/docker-entrypoint-initdb.d - - ./services/postgresql.conf:/etc/postgresql/13/main/postgresql.conf + - ./services/backend/db/postgresql.conf:/etc/postgresql/13/main/postgresql.conf + - ./services/backend/db/pg_hba.conf:/etc/postgresql/13/main/pg_hba.conf - ./services/dbm/dbm_setup.sql:/etc/postgresql/13/main/dbm_setup.sql - ./services/dbm/dbm_exec.sh:/dbm_exec.sh labels: @@ -67,6 +68,7 @@ services: DB_PORT: 5432 DISABLE_SPRING: 1 DD_APPSEC_ENABLED: 1 + DD_AGENT_HOST: 172.43.0.1 networks: - storedog-net worker: @@ -197,8 +199,10 @@ services: - DD_APM_ENABLED=true - DD_APM_NON_LOCAL_TRAFFIC=true - DD_LOGS_ENABLED=true + - DD_HOSTNAME=172.43.0.4 - DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL=true - - DD_CONTAINER_EXCLUDE="name:datadog-agent" + - DD_CONTAINER_EXCLUDE=name datadog-agent + - DD_HOSTNAME_TRUST_UTS_NAMESPACE=true ports: - "8126:8126" volumes: @@ -206,7 +210,8 @@ services: - /proc/:/host/proc/:ro - /sys/fs/cgroup/:/host/sys/fs/cgroup:ro networks: - - storedog-net + storedog-net: + ipv4_address: 172.43.0.4 ads-java: build: context: ./services/ads/java diff --git a/services/backend/db/pg_hba.conf b/services/backend/db/pg_hba.conf new file mode 100644 index 00000000..fda8d47d --- /dev/null +++ b/services/backend/db/pg_hba.conf @@ -0,0 +1,100 @@ +# TYPE DATABASE USER CIDR-ADDRESS METHOD +host all all 0.0.0.0/0 trust + +# PostgreSQL Client Authentication Configuration File +# =================================================== +# +# Refer to the "Client Authentication" section in the PostgreSQL +# documentation for a complete description of this file. A short +# synopsis follows. +# +# This file controls: which hosts are allowed to connect, how clients +# are authenticated, which PostgreSQL user names they can use, which +# databases they can access. Records take one of these forms: +# +# local DATABASE USER METHOD [OPTIONS] +# host DATABASE USER ADDRESS METHOD [OPTIONS] +# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# +# (The uppercase items must be replaced by actual values.) +# +# The first field is the connection type: "local" is a Unix-domain +# socket, "host" is either a plain or SSL-encrypted TCP/IP socket, +# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a +# non-SSL TCP/IP socket. Similarly, "hostgssenc" uses a +# GSSAPI-encrypted TCP/IP socket, while "hostnogssenc" uses a +# non-GSSAPI socket. +# +# DATABASE can be "all", "sameuser", "samerole", "replication", a +# database name, or a comma-separated list thereof. The "all" +# keyword does not match "replication". Access to replication +# must be enabled in a separate record (see example below). +# +# USER can be "all", a user name, a group name prefixed with "+", or a +# comma-separated list thereof. In both the DATABASE and USER fields +# you can also write a file name prefixed with "@" to include names +# from a separate file. +# +# ADDRESS specifies the set of hosts the record matches. It can be a +# host name, or it is made up of an IP address and a CIDR mask that is +# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that +# specifies the number of significant bits in the mask. A host name +# that starts with a dot (.) matches a suffix of the actual host name. +# Alternatively, you can write an IP address and netmask in separate +# columns to specify the set of hosts. Instead of a CIDR-address, you +# can write "samehost" to match any of the server's own IP addresses, +# or "samenet" to match any address in any subnet that the server is +# directly connected to. +# +# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256", +# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert". +# Note that "password" sends passwords in clear text; "md5" or +# "scram-sha-256" are preferred since they send encrypted passwords. +# +# OPTIONS are a set of options for the authentication in the format +# NAME=VALUE. The available options depend on the different +# authentication methods -- refer to the "Client Authentication" +# section in the documentation for a list of which options are +# available for which authentication methods. +# +# Database and user names containing spaces, commas, quotes and other +# special characters must be quoted. Quoting one of the keywords +# "all", "sameuser", "samerole" or "replication" makes the name lose +# its special character, and just match a database or username with +# that name. +# +# This file is read on server startup and when the server receives a +# SIGHUP signal. If you edit the file on a running system, you have to +# SIGHUP the server for the changes to take effect, run "pg_ctl reload", +# or execute "SELECT pg_reload_conf()". +# +# Put your actual configuration here +# ---------------------------------- +# +# If you want to allow non-local connections, you need to add more +# "host" records. In that case you will also need to make PostgreSQL +# listen on a non-local interface via the listen_addresses +# configuration parameter, or via the -i or -h command line switches. + +# CAUTION: Configuring the system for local "trust" authentication +# allows any local user to connect as any PostgreSQL user, including +# the database superuser. If you do not trust all your local users, +# use another authentication method. + + +# TYPE DATABASE USER ADDRESS METHOD + +# "local" is for Unix domain socket connections only +local all all trust +# IPv4 local connections: +host all all 127.0.0.1/32 trust +# IPv6 local connections: +host all all ::1/128 trust +# Allow replication connections from localhost, by a user with the +# replication privilege. +local replication all trust +host replication all 127.0.0.1/32 trust +host replication all ::1/128 trust diff --git a/services/postgresql.conf b/services/backend/db/postgresql.conf similarity index 99% rename from services/postgresql.conf rename to services/backend/db/postgresql.conf index 517f996d..e2dfe7f7 100644 --- a/services/postgresql.conf +++ b/services/backend/db/postgresql.conf @@ -41,7 +41,7 @@ #data_directory = 'ConfigDir' # use data in another directory # (change requires restart) -#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file +hba_file = '/etc/postgresql/13/main/pg_hba.conf' # host-based authentication file # (change requires restart) #ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file # (change requires restart) From 1a24d980443b433188fd1c2208738ae6e46bacad Mon Sep 17 00:00:00 2001 From: Devin Ford Date: Thu, 6 Apr 2023 12:23:31 -0400 Subject: [PATCH 07/13] feat: clean up files --- docker-compose.yml | 1 - services/backend/db/pg_hba.conf | 100 ---------------------------- services/backend/db/postgresql.conf | 2 +- 3 files changed, 1 insertion(+), 102 deletions(-) delete mode 100644 services/backend/db/pg_hba.conf diff --git a/docker-compose.yml b/docker-compose.yml index 39535263..304c0468 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -33,7 +33,6 @@ services: - 'postgres:/var/lib/postgresql/data' - ./services/backend/db/restore:/docker-entrypoint-initdb.d - ./services/backend/db/postgresql.conf:/etc/postgresql/13/main/postgresql.conf - - ./services/backend/db/pg_hba.conf:/etc/postgresql/13/main/pg_hba.conf - ./services/dbm/dbm_setup.sql:/etc/postgresql/13/main/dbm_setup.sql - ./services/dbm/dbm_exec.sh:/dbm_exec.sh labels: diff --git a/services/backend/db/pg_hba.conf b/services/backend/db/pg_hba.conf deleted file mode 100644 index fda8d47d..00000000 --- a/services/backend/db/pg_hba.conf +++ /dev/null @@ -1,100 +0,0 @@ -# TYPE DATABASE USER CIDR-ADDRESS METHOD -host all all 0.0.0.0/0 trust - -# PostgreSQL Client Authentication Configuration File -# =================================================== -# -# Refer to the "Client Authentication" section in the PostgreSQL -# documentation for a complete description of this file. A short -# synopsis follows. -# -# This file controls: which hosts are allowed to connect, how clients -# are authenticated, which PostgreSQL user names they can use, which -# databases they can access. Records take one of these forms: -# -# local DATABASE USER METHOD [OPTIONS] -# host DATABASE USER ADDRESS METHOD [OPTIONS] -# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] -# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] -# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS] -# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS] -# -# (The uppercase items must be replaced by actual values.) -# -# The first field is the connection type: "local" is a Unix-domain -# socket, "host" is either a plain or SSL-encrypted TCP/IP socket, -# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a -# non-SSL TCP/IP socket. Similarly, "hostgssenc" uses a -# GSSAPI-encrypted TCP/IP socket, while "hostnogssenc" uses a -# non-GSSAPI socket. -# -# DATABASE can be "all", "sameuser", "samerole", "replication", a -# database name, or a comma-separated list thereof. The "all" -# keyword does not match "replication". Access to replication -# must be enabled in a separate record (see example below). -# -# USER can be "all", a user name, a group name prefixed with "+", or a -# comma-separated list thereof. In both the DATABASE and USER fields -# you can also write a file name prefixed with "@" to include names -# from a separate file. -# -# ADDRESS specifies the set of hosts the record matches. It can be a -# host name, or it is made up of an IP address and a CIDR mask that is -# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that -# specifies the number of significant bits in the mask. A host name -# that starts with a dot (.) matches a suffix of the actual host name. -# Alternatively, you can write an IP address and netmask in separate -# columns to specify the set of hosts. Instead of a CIDR-address, you -# can write "samehost" to match any of the server's own IP addresses, -# or "samenet" to match any address in any subnet that the server is -# directly connected to. -# -# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256", -# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert". -# Note that "password" sends passwords in clear text; "md5" or -# "scram-sha-256" are preferred since they send encrypted passwords. -# -# OPTIONS are a set of options for the authentication in the format -# NAME=VALUE. The available options depend on the different -# authentication methods -- refer to the "Client Authentication" -# section in the documentation for a list of which options are -# available for which authentication methods. -# -# Database and user names containing spaces, commas, quotes and other -# special characters must be quoted. Quoting one of the keywords -# "all", "sameuser", "samerole" or "replication" makes the name lose -# its special character, and just match a database or username with -# that name. -# -# This file is read on server startup and when the server receives a -# SIGHUP signal. If you edit the file on a running system, you have to -# SIGHUP the server for the changes to take effect, run "pg_ctl reload", -# or execute "SELECT pg_reload_conf()". -# -# Put your actual configuration here -# ---------------------------------- -# -# If you want to allow non-local connections, you need to add more -# "host" records. In that case you will also need to make PostgreSQL -# listen on a non-local interface via the listen_addresses -# configuration parameter, or via the -i or -h command line switches. - -# CAUTION: Configuring the system for local "trust" authentication -# allows any local user to connect as any PostgreSQL user, including -# the database superuser. If you do not trust all your local users, -# use another authentication method. - - -# TYPE DATABASE USER ADDRESS METHOD - -# "local" is for Unix domain socket connections only -local all all trust -# IPv4 local connections: -host all all 127.0.0.1/32 trust -# IPv6 local connections: -host all all ::1/128 trust -# Allow replication connections from localhost, by a user with the -# replication privilege. -local replication all trust -host replication all 127.0.0.1/32 trust -host replication all ::1/128 trust diff --git a/services/backend/db/postgresql.conf b/services/backend/db/postgresql.conf index e2dfe7f7..517f996d 100644 --- a/services/backend/db/postgresql.conf +++ b/services/backend/db/postgresql.conf @@ -41,7 +41,7 @@ #data_directory = 'ConfigDir' # use data in another directory # (change requires restart) -hba_file = '/etc/postgresql/13/main/pg_hba.conf' # host-based authentication file +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file # (change requires restart) #ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file # (change requires restart) From 220f9e3cccc692d99118664498fe94b7e308e80a Mon Sep 17 00:00:00 2001 From: Devin Ford Date: Thu, 6 Apr 2023 12:58:02 -0400 Subject: [PATCH 08/13] fix: update config file location on mount --- docker-compose.yml | 4 ++-- services/backend/db/postgresql.conf | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 304c0468..76b687bf 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -32,7 +32,7 @@ services: volumes: - 'postgres:/var/lib/postgresql/data' - ./services/backend/db/restore:/docker-entrypoint-initdb.d - - ./services/backend/db/postgresql.conf:/etc/postgresql/13/main/postgresql.conf + - ./services/backend/db/postgresql.conf:/postgresql.conf - ./services/dbm/dbm_setup.sql:/etc/postgresql/13/main/dbm_setup.sql - ./services/dbm/dbm_exec.sh:/dbm_exec.sh labels: @@ -40,7 +40,7 @@ services: com.datadoghq.ad.init_configs: '[{}]' com.datadoghq.ad.instances: '[{"host":"%%host%%", "port":5432,"username":"datadog","password":"datadog"}]' com.datadoghq.ad.logs: '[{"source":"postgresql","service":"postgresql"}]' - command: ["postgres", "-c", "config_file=/etc/postgresql/13/main/postgresql.conf"] + command: ["postgres", "-c", "config_file=/postgresql.conf"] networks: - storedog-net redis: diff --git a/services/backend/db/postgresql.conf b/services/backend/db/postgresql.conf index 517f996d..ff420c6b 100644 --- a/services/backend/db/postgresql.conf +++ b/services/backend/db/postgresql.conf @@ -495,7 +495,7 @@ log_file_mode = 0644 # creation mode for log files, # fatal # panic (effectively off) -#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements +log_min_duration_statement = 0 # -1 is disabled, 0 logs all statements # and their durations, > 0 logs only # statements running at least this number # of milliseconds From 2c0f037f6f8f738b022d353551f14824a187e808 Mon Sep 17 00:00:00 2001 From: Devin Ford Date: Thu, 6 Apr 2023 13:39:55 -0400 Subject: [PATCH 09/13] fix: update with postgres config for dd --- docker-compose.yml | 1 + services/backend/db/dd-agent-conf.yaml | 540 +++++++++++++++++++++++++ 2 files changed, 541 insertions(+) create mode 100644 services/backend/db/dd-agent-conf.yaml diff --git a/docker-compose.yml b/docker-compose.yml index 76b687bf..6ead5935 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -208,6 +208,7 @@ services: - /var/run/docker.sock:/var/run/docker.sock:ro - /proc/:/host/proc/:ro - /sys/fs/cgroup/:/host/sys/fs/cgroup:ro + - ./services/backend/db/dd-agent-conf.yaml:/conf.d/postgres.d/conf.yaml networks: storedog-net: ipv4_address: 172.43.0.4 diff --git a/services/backend/db/dd-agent-conf.yaml b/services/backend/db/dd-agent-conf.yaml new file mode 100644 index 00000000..c8ce9963 --- /dev/null +++ b/services/backend/db/dd-agent-conf.yaml @@ -0,0 +1,540 @@ +## All options defined here are available to all instances. +# +init_config: + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Additionally, this sets the default `service` for every log source. + # + # service: + +## Every instance is scheduled independently of the others. +# +instances: + + ## @param host - string - required + ## The hostname to connect to. + ## NOTE: Even if the server name is `localhost`, the Agent connects to PostgreSQL using TCP/IP unless you also + ## provide a value for the sock key. + # + - dbm: true + host: postgres + + ## @param port - integer - optional - default: 5432 + ## The port to use when connecting to PostgreSQL. + # + port: 5432 + + ## @param username - string - required + ## The Datadog username created to connect to PostgreSQL. + # + username: datadog + + ## @param password - string - optional + ## The password associated with the Datadog user. + # + password: datadog + + ## @param dbname - string - optional - default: postgres + ## The name of the PostgresSQL database to monitor. + ## Note: If omitted, the default system Postgres database is queried. + # + # dbname: + + ## @param reported_hostname - string - optional + ## Set the reported hostname for this instance. This value overrides the hostname detected by the Agent + ## and can be useful to set a custom hostname when connecting to a remote database through a proxy. + # + # reported_hostname: + + ## @param dbstrict - boolean - optional - default: false + ## Whether to restrict the scope of the check to just the database in question. + ## Set to `true` if you only want to gather metrics from the database provided in the dbname parameter. + # + # dbstrict: false + + ## @param ignore_databases - list of strings - optional + ## A list of database to ignore. No metrics or statement samples will be collected for these databases. + ## Each value can be a plain string or a Postgres pattern. + ## For more information on how patterns work, see https://www.postgresql.org/docs/12/functions-matching.html + # + # ignore_databases: + # - template% + # - rdsadmin + # - azure_maintenance + + ## @param ssl - string - optional - default: false + ## This option determines whether or not and with what priority a secure SSL TCP/IP connection + ## is negotiated with the server. There are six modes: + ## - `disable`: Only tries a non-SSL connection. + ## - `allow`: First tries a non-SSL connection; if if fails, tries an SSL connection. + ## - `prefer`: First tries an SSL connection; if it fails, tries a non-SSL connection. + ## - `require`: Only tries an SSL connection. If a root CA file is present, verifies the certificate in + ## the same way as if verify-ca was specified. + ## - `verify-ca`: Only tries an SSL connection, and verifies that the server certificate is issued by a + ## trusted certificate authority (CA). + ## - `verify-full`: Only tries an SSL connection and verifies that the server certificate is issued by a + ## trusted CA and that the requested server host name matches the one in the certificate. + ## + ## For a detailed description of how these options work see https://www.postgresql.org/docs/current/libpq-ssl.html + ## + ## Note: `true` is an alias for `require`, and `false` is an alias for `disable`. + # + # ssl: 'false' + + ## @param ssl_root_cert - string - optional - default: false + ## The path to the ssl root certificate. + ## + ## For a detailed description of how this option works see https://www.postgresql.org/docs/current/libpq-ssl.html + # + # ssl_root_cert: /home/datadog/server-ca.pem + + ## @param ssl_cert - string - optional - default: false + ## The path to the ssl certificate. + ## + ## For a detailed description of how this option works see https://www.postgresql.org/docs/current/libpq-ssl.html + # + # ssl_cert: /home/datadog/client-cert.pem + + ## @param ssl_key - string - optional - default: false + ## The path to the ssl client key. + ## + ## For a detailed description of how this option works see https://www.postgresql.org/docs/current/libpq-ssl.html + # + # ssl_key: /home/datadog/client-key.pem + + ## @param ssl_password - string - optional - default: false + ## The password for the secret key specified in ssl_key, allowing client certificate private keys to be stored + ## in encrypted form on disk. + ## + ## For a detailed description of how this option works see https://www.postgresql.org/docs/current/libpq-ssl.html + # + # ssl_password: ssl_key_password + + ## @param query_timeout - integer - optional - default: 5000 + ## Adds a statement_timeout https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-STATEMENT-TIMEOUT + ## to all metric collection queries. Aborts any statement that takes more than the specified number of milliseconds, + ## starting from the time the command arrives at the server from the client. A value of zero turns this off. + ## Cancelled queries won't log any metrics. + # + # query_timeout: 5000 + + ## @param relations - (list of string or mapping) - optional + ## The list of relations/tables must be specified here to track per-relation (table, index , view, etc.) metrics. + ## If enabled, `dbname` should be specified to collect database-specific relations metrics. + ## You can either specify a single relation by its exact name in 'relation_name' or use a regex to track metrics + ## from all matching relations (useful in cases where relation names are dynamically generated, e.g. TimescaleDB). + ## Each relation generates many metrics (10 + 10 per index). + ## + ## By default all schemas are included. To track relations from specific schemas only, + ## you can specify the `schemas` attribute and provide a list of schemas to use for filtering. + ## + ## Size metrics are collected only for ordinary tables. Index metrics are collected only for user indexes. Lock + ## metrics are collected for all relation types (table, index , view, etc.). The rest of the metrics are + ## collected only for user tables. + ## To track lock metrics for relations of a specific kind only, specify the `relkind` attribute + ## as a list of the options: + ## * r = ordinary table + ## * i = index + ## * S = sequence + ## * t = TOAST table + ## * m = materialized view + ## * c = composite type + ## * f = foreign table + ## * p = partitioned table + ## + ## Note: For compatibility reasons you can also use the following syntax to track relations metrics by specifying + ## the list of table names. All schemas are included and regex are not supported. + ## relations: + # + # relations: + # - relation_name: + # schemas: + # - + # - relation_regex: + # relkind: + # - r + # - p + + ## @param max_relations - integer - optional - default: 300 + ## Determines the maximum number of relations to fetch. + # + # max_relations: 300 + + ## @param collect_function_metrics - boolean - optional - default: false + ## If set to true, collects metrics regarding PL/pgSQL functions from pg_stat_user_functions. + # + # collect_function_metrics: false + + ## @param collect_count_metrics - boolean - optional - default: true + ## Collect count of user tables up to max_relations size from pg_stat_user_tables. + # + # collect_count_metrics: true + + ## @param collect_activity_metrics - boolean - optional - default: false + ## Collect metrics regarding transactions from pg_stat_activity. Please make sure the user + ## has sufficient privileges to read from pg_stat_activity before enabling this option. + # + # collect_activity_metrics: false + + ## @param activity_metrics_excluded_aggregations - list of strings - optional + ## A list of columns to remove from the pg_stat_activity aggregation. + ## By default, datname, usename and application_name will be used. + ## If applications with different application_name are creating a lot of short-lived queries, + ## removing application_name from the aggregation can help generate more stable metrics. + # + # activity_metrics_excluded_aggregations: + # - application_name + + ## @param collect_database_size_metrics - boolean - optional - default: true + ## Collect database size metrics. + # + # collect_database_size_metrics: true + + ## @param collect_default_database - boolean - optional - default: true + ## Include statistics from the default database 'postgres' in the check metrics. + # + # collect_default_database: true + + ## @param collect_bloat_metrics - boolean - optional - default: false + ## Collect metrics about table bloat. Only available when `relation` metrics are enabled. + # + collect_bloat_metrics: false + + ## @param collect_wal_metrics - boolean - optional - default: false + ## Collect metrics about WAL file age. + ## NOTE: You must be running the check local to your database if you want to enable this option. + # + # collect_wal_metrics: false + + ## @param data_directory - string - optional - default: /usr/local/pgsql/data + ## The data directory of your postgres installation + ## Required when collecting WAL metrics. + # + # data_directory: /usr/local/pgsql/data + + ## @param tag_replication_role - boolean - optional - default: false + ## Tag metrics and checks with `replication_role:`. + # + # tag_replication_role: false + + ## @param table_count_limit - integer - optional - default: 200 + ## The maximum number of tables to collect metrics from. + # + # table_count_limit: 200 + + ## @param custom_queries - list of mappings - optional + ## Define custom queries to collect custom metrics from your PostgreSQL + ## See Datadog FAQ article for a guide on collecting custom metrics from PostgreSQL: + ## https://docs.datadoghq.com/integrations/faq/postgres-custom-metric-collection-explained/ + # + # custom_queries: + # - metric_prefix: postgresql + # query: + # columns: + # - name: + # type: + # - name: + # type: + # tags: + # - : + + ## @param application_name - string - optional - default: datadog-agent + ## The application_name can be any string of less than NAMEDATALEN characters (64 characters in a standard build). + ## It is typically set by an application upon connection to the server. + ## The name is displayed in the pg_stat_activity view and included in CSV log entries. + # + # application_name: datadog-agent + + ## @param dbm - boolean - optional - default: false + ## Set to `true` to enable Database Monitoring. + # + # dbm: false + + ## @param pg_stat_statements_view - string - optional - default: show_pg_stat_statements() + ## Set this value if you want to define a custom view or function to allow the datadog user to query the + ## `pg_stat_statements` table, which is useful for restricting the permissions given to the datadog agent. + ## Please note this is an ALPHA feature and is subject to change or deprecation without notice. + # + # pg_stat_statements_view: show_pg_stat_statements() + + ## Configure collection of query metrics + # + # query_metrics: + + ## @param enabled - boolean - optional - default: true + ## Enable collection of query metrics. Requires `dbm: true`. + # + # enabled: true + + ## @param collection_interval - number - optional - default: 10 + ## Set the query metric collection interval (in seconds). Each collection involves a single query to + ## `pg_stat_statements`. If a non-default value is chosen then that exact same value must be used for *every* + ## check instance. Running different instances with different collection intervals is not supported. + # + # collection_interval: 10 + + ## Configure collection of query samples + # + # query_samples: + + ## @param enabled - boolean - optional - default: true + ## Enable collection of query samples. Requires `dbm: true`. + # + # enabled: true + + ## @param collection_interval - number - optional - default: 1 + ## Set the query sample collection interval (in seconds). Each collection involves a single query to + ## `pg_stat_activity` followed by at most one `EXPLAIN` query per unique normalized query seen. + # + # collection_interval: 1 + + ## @param explain_function - string - optional - default: datadog.explain_statement + ## Override the default function used to collect execution plans for queries. + # + # explain_function: datadog.explain_statement + + ## @param explained_queries_per_hour_per_query - integer - optional - default: 60 + ## Set the rate limit for how many execution plans will be collected per hour per normalized query. + # + # explained_queries_per_hour_per_query: 60 + + ## @param samples_per_hour_per_query - integer - optional - default: 15 + ## Set the rate limit for how many query sample events will be ingested per hour per normalized execution + ## plan. + # + # samples_per_hour_per_query: 15 + + ## @param explained_queries_cache_maxsize - integer - optional - default: 5000 + ## Set the max size of the cache used for the explained_queries_per_hour_per_query rate limit. This should + ## be increased for databases with a very large number unique normalized queries which exceed the cache's + ## limit. + # + # explained_queries_cache_maxsize: 5000 + + ## @param seen_samples_cache_maxsize - integer - optional - default: 10000 + ## Set the max size of the cache used for the samples_per_hour_per_query rate limit. This should be increased + ## for databases with a very large number of unique normalized execution plans which exceed the cache's limit. + # + # seen_samples_cache_maxsize: 10000 + + ## @param explain_parameterized_queries - boolean - optional - default: false + ## Note, this is a BETA feature. This option will enable the ability to explain parameterized queries. + ## This is useful if your SQL clients are using the extended query protocol or prepared statements. + # + # explain_parameterized_queries: false + + ## Configure collection of query activity + # + # query_activity: + + ## @param enabled - boolean - optional - default: true + ## Enable collection of query activity. Requires `dbm: true`, and enabling query_samples. + # + # enabled: true + + ## @param collection_interval - number - optional - default: 10 + ## Set the query activity collection interval (in seconds). This number cannot be smaller than + ## query_samples configured collection_interval. + # + # collection_interval: 10 + + ## @param payload_row_limit - number - optional - default: 3500 + ## Set the query activity maximum number of pg_stat_activity rows you want to report. If the table is larger + ## than the maximum rows set, then the top N longest running transactions will be reported. + # + # payload_row_limit: 3500 + + ## This block defines the configuration for AWS RDS and Aurora instances. + ## + ## Complete this section if you have installed the Datadog AWS Integration + ## (https://docs.datadoghq.com/integrations/amazon_web_services) to enrich instances + ## with Postgres integration telemetry. + ## + ## These values are only applied when `dbm: true` option is set. + # + # aws: + + ## @param instance_endpoint - string - optional - default: mydb.cfxgae8cilcf.us-east-1.rds.amazonaws.com + ## Equal to the Endpoint.Address of the instance the agent is connecting to. + ## This value is optional if the value of `host` is already configured to the instance endpoint. + ## + ## For more information on instance endpoints, + ## see the AWS docs https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Endpoint.html + # + # instance_endpoint: mydb.cfxgae8cilcf.us-east-1.rds.amazonaws.com + + ## This block defines the configuration for Google Cloud SQL instances. + ## + ## Complete this section if you have installed the Datadog GCP Integration + ## (https://docs.datadoghq.com/integrations/google_cloud_platform) to enrich instances + ## with Postgres integration telemetry. + ## + ## These values are only applied when `dbm: true` option is set. + # + # gcp: + + ## @param project_id - string - optional - default: foo-project + ## Equal to the GCP resource's project ID. + ## + ## For more information on project IDs, + ## See the GCP docs https://cloud.google.com/resource-manager/docs/creating-managing-projects + # + # project_id: foo-project + + ## @param instance_id - string - optional - default: foo-database + ## Equal to the GCP resource's instance ID. + ## + ## For more information on instance IDs, + ## See the GCP docs https://cloud.google.com/sql/docs/postgres/instance-settings#instance-id-2ndgen + # + # instance_id: foo-database + + ## This block defines the configuration for Azure Database for PostgreSQL. + ## + ## Complete this section if you have installed the Datadog Azure Integration + ## (https://docs.datadoghq.com/integrations/azure) to enrich instances + ## with Postgres integration telemetry. + ## These values are only applied when `dbm: true` option is set. + # + # azure: + + ## @param deployment_type - string - optional - default: flexible_server + ## Equal to the deployment type for the managed database. + ## + ## Acceptable values are: + ## - `flexible_server` + ## - `single_server` + ## - `virtual_machine` + ## + ## For more information on deployment types, + ## see the Azure docs https://docs.microsoft.com/en-us/azure/postgresql/overview-postgres-choose-server-options + # + # deployment_type: flexible_server + + ## @param name - string - optional - default: my-postgres-database + ## Equal to the name of the Azure PostgreSQL database. + # + # name: my-postgres-database + + ## Configure how the SQL obfuscator behaves. + ## Note: This option only applies when `dbm` is enabled. + # + # obfuscator_options: + + ## @param replace_digits - boolean - optional - default: false + ## Set to `true` to replace digits in identifiers and table names with question marks in your SQL statements. + ## Note: This option also applies to extracted tables using `collect_tables`. + # + # replace_digits: false + + ## @param collect_metadata - boolean - optional - default: true + ## Set to `false` to disable the collection of metadata in your SQL statements. + ## Metadata includes things such as tables, commands, and comments. + # + # collect_metadata: true + + ## @param collect_tables - boolean - optional - default: true + ## Set to `false` to disable the collection of tables in your SQL statements. + ## Requires `collect_metadata: true`. + # + # collect_tables: true + + ## @param collect_commands - boolean - optional - default: true + ## Set to `false` to disable the collection of commands in your SQL statements. + ## Requires `collect_metadata: true`. + ## + ## Examples: SELECT, UPDATE, DELETE, etc. + # + # collect_commands: true + + ## @param collect_comments - boolean - optional - default: true + ## Set to `false` to disable the collection of comments in your SQL statements. + ## Requires `collect_metadata: true`. + # + # collect_comments: true + + ## @param keep_sql_alias - boolean - optional - default: true + ## Set to `true` to keep sql aliases in obfuscated SQL statements. Examples of aliases are + ## `with select 1 as alias`, `select column as other_name`, or `select * from table t`. + ## When `true` these aliases will not be removed. + # + # keep_sql_alias: true + + ## @param keep_dollar_quoted_func - boolean - optional - default: true + ## Set to `true` to prevent dollar quoted function strings (e.g. `$func$`) from being removed. + ## When not removed, the sql content of dollar quoted func strings will be obfuscated. + ## Only strings with the tag `$func$` are supported. + # + # keep_dollar_quoted_func: true + + ## @param tags - list of strings - optional + ## A list of tags to attach to every metric and service check emitted by this instance. + ## + ## Learn more about tagging at https://docs.datadoghq.com/tagging + # + # tags: + # - : + # - : + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Overrides any `service` defined in the `init_config` section. + # + # service: + + ## @param min_collection_interval - number - optional - default: 15 + ## This changes the collection interval of the check. For more information, see: + ## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval + # + # min_collection_interval: 15 + + ## @param empty_default_hostname - boolean - optional - default: false + ## This forces the check to send metrics with no hostname. + ## + ## This is useful for cluster-level checks. + # + # empty_default_hostname: false + + ## @param disable_generic_tags - boolean - optional - default: false + ## The integration will stop sending server tag as is reduntant with host tag + # + disable_generic_tags: true + + ## @param metric_patterns - mapping - optional + ## A mapping of metrics to include or exclude, with each entry being a regular expression. + ## + ## Metrics defined in `exclude` will take precedence in case of overlap. + # + # metric_patterns: + # include: + # - + # exclude: + # - + +## Log Section +## +## type - required - Type of log input source (tcp / udp / file / windows_event). +## port / path / channel_path - required - Set port if type is tcp or udp. +## Set path if type is file. +## Set channel_path if type is windows_event. +## source - required - Attribute that defines which integration sent the logs. +## encoding - optional - For file specifies the file encoding. Default is utf-8. Other +## possible values are utf-16-le and utf-16-be. +## service - optional - The name of the service that generates the log. +## Overrides any `service` defined in the `init_config` section. +## tags - optional - Add tags to the collected logs. +## +## Discover Datadog log collection: https://docs.datadoghq.com/logs/log_collection/ +# +# logs: +# - type: file +# path: /pg-log/postgres.log +# source: postgresql +# service: postgres +# log_processing_rules: +# - type: multi_line +# pattern: \d{4}\-(0?[1-9]|1[012])\-(0?[1-9]|[12][0-9]|3[01]) +# name: new_log_start_with_date \ No newline at end of file From b1d682c30a9739b54f78b48a4ea53fe22a2786b4 Mon Sep 17 00:00:00 2001 From: Devin Ford Date: Thu, 6 Apr 2023 13:57:10 -0400 Subject: [PATCH 10/13] feat: update README with dbm instructions --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 985036e1..e9279102 100644 --- a/README.md +++ b/README.md @@ -112,6 +112,15 @@ For example: Turning `cart` off will disable Cart capabilities. - Turn `wishlist` on by setting `wishlist` to `true`. - Run the app and the wishlist functionality should be back on. +#### How to run the DBM backend to test the Database Monitoring in the product + +- complete the startup steps up under Local Development to number 3 +- in `services/frontend/site/featureFlags.config.json` find the object with `name:dbm` and set `active:true` +- run `docker-compose --profile dbm up -d` +- once all the containers are up, run `docker exec storedog-postgres-1 ./dbm_exec.sh` this will add a few things we need for dbm to the database +- run `docker restart storedog-postgres-1` to restart the postgres container + +You should now see your logs in DBM! ## Troubleshoot
From c30b90998f6560e408e7aeb690bff888123c4b77 Mon Sep 17 00:00:00 2001 From: Devin Ford Date: Thu, 6 Apr 2023 16:00:15 -0400 Subject: [PATCH 11/13] feat: update dbm and readme --- README.md | 17 ++++++++++++++++- services/dbm/bootstrap.py | 14 ++++++++++++-- services/dbm/dbm.py | 11 ++++++++++- services/dbm/models.py | 25 +++++++++++++++++++++++++ 4 files changed, 63 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index e9279102..4cc321f4 100644 --- a/README.md +++ b/README.md @@ -112,7 +112,7 @@ For example: Turning `cart` off will disable Cart capabilities. - Turn `wishlist` on by setting `wishlist` to `true`. - Run the app and the wishlist functionality should be back on. -#### How to run the DBM backend to test the Database Monitoring in the product +#### How to run the DBM backend to test the Database Monitoring in the product and incrementally improve for the workshop - complete the startup steps up under Local Development to number 3 - in `services/frontend/site/featureFlags.config.json` find the object with `name:dbm` and set `active:true` @@ -121,6 +121,21 @@ For example: Turning `cart` off will disable Cart capabilities. - run `docker restart storedog-postgres-1` to restart the postgres container You should now see your logs in DBM! + +Once the metrics are showing in DBM, direct the users to the `dbm.py` file in `services/dbm/dbm.py`. + +Have them update the query to change the 2 `{random.randint(1, 7000)}` to `{random.randint(5000, 7000)}` so only the most popular items show in the ticker. + +Then explain that preorder items get marked as false or `f` in the table once they are now regular items. Say that best practice would be to update the `items` table to include the items marked `f`. Now we will update the query to the following to only look at the `items` table. + +```sql +SELECT * +FROM items +WHERE order_count::int > {random.randint(5000, 7000)} +``` + +This will greatly reduce the amount of cost per query + ## Troubleshoot
diff --git a/services/dbm/bootstrap.py b/services/dbm/bootstrap.py index 9d8f0f12..ee1e059a 100644 --- a/services/dbm/bootstrap.py +++ b/services/dbm/bootstrap.py @@ -1,5 +1,5 @@ from flask import Flask -from models import Items, db +from models import Items, Preorder_Items, db from faker import Faker import random import os @@ -28,7 +28,7 @@ def initialize_database(app, db): with app.app_context(): db.drop_all() db.create_all() - for i in range(15000): + for i in range(30000): newItem = Items( fake.sentence(), random.randint(1, 7000), @@ -37,4 +37,14 @@ def initialize_database(app, db): ) db.session.add(newItem) i+1 + for i in range(10000): + newItem = Preorder_Items( + fake.sentence(), + random.randint(1, 7000), + fake.image_url(), + random.randint(1, 10), + bool(random.getrandbits(1)) + ) + db.session.add(newItem) + i+1 db.session.commit() diff --git a/services/dbm/dbm.py b/services/dbm/dbm.py index 489b740e..a88ceae3 100644 --- a/services/dbm/dbm.py +++ b/services/dbm/dbm.py @@ -23,7 +23,16 @@ @app.route("/get-item", methods=["GET"]) def product_ticker(): - query = db.text(f'SELECT * FROM items WHERE order_count::int > {random.randint(1, 7000)};') + query = db.text(f''' + SELECT id, description, order_count, last_hour, image_url, NULL AS is_preorder + FROM items + WHERE order_count::int > {random.randint(1, 7000)} + UNION + SELECT id, description, order_count, last_hour, image_url, is_preorder + FROM preorder_items + WHERE order_count::int > {random.randint(1, 7000)}; + ''') + app.logger.info(engine) try: app.logger.info('Connecting to db') diff --git a/services/dbm/models.py b/services/dbm/models.py index 58797d19..e081063b 100644 --- a/services/dbm/models.py +++ b/services/dbm/models.py @@ -25,3 +25,28 @@ def serialize(self): 'last_hour': self.last_hour, 'image_url': self.image_url } +class Preorder_Items(db.Model): + __tablename__ = 'preorder_items' + id = db.Column(db.Integer, primary_key=True) + description = db.Column(db.String(128)) + order_count = db.Column(db.String(64)) + last_hour = db.Column(db.String(64)) + image_url = db.Column(db.String(64)) + is_preorder = db.Column(db.Boolean()) + + def __init__(self, description, order_count, image_url, last_hour, is_preorder): + self.description = description + self.order_count = order_count + self.last_hour = last_hour + self.image_url = image_url + self.is_preorder = is_preorder + + def serialize(self): + return { + 'id': self.id, + 'description': self.description, + 'order_count': self.order_count, + 'last_hour': self.last_hour, + 'image_url': self.image_url, + 'is_preorder': self.is_preorder + } From 2555a562bb079cfc53d873d0962f8594b9513f2b Mon Sep 17 00:00:00 2001 From: Devin Ford Date: Fri, 7 Apr 2023 09:20:43 -0400 Subject: [PATCH 12/13] feat: add dbm to release flow --- .github/workflows/release.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 018fdc55..3afeb7a6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -44,6 +44,7 @@ jobs: ${{ secrets.PUBLIC_ECR_REGISTRY }}/storedog/auth ${{ secrets.PUBLIC_ECR_REGISTRY }}/storedog/nginx ${{ secrets.PUBLIC_ECR_REGISTRY }}/storedog/frontend + ${{ secrets.PUBLIC_ECR_REGISTRY }}/storedog/dbm ) for i in "${IMAGES[@]}" From 726364fe18915b3f9f818a45b77c7e7d04b9d1b4 Mon Sep 17 00:00:00 2001 From: Devin Ford Date: Fri, 7 Apr 2023 09:40:55 -0400 Subject: [PATCH 13/13] fix: make agent dependency, update work agent host --- docker-compose.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docker-compose.yml b/docker-compose.yml index f23b7d56..60142a90 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -6,6 +6,7 @@ services: command: yarn dev depends_on: - worker + - dd-agent volumes: - "./services/frontend/site:/storedog-app/site" ports: @@ -20,6 +21,7 @@ services: - "80:80" depends_on: - frontend + - dd-agent labels: com.datadoghq.ad.logs: '[{"source": "nginx", "service": "nginx"}]' networks: @@ -27,6 +29,8 @@ services: postgres: image: postgres:13-alpine restart: always + depends_on: + - dd-agent environment: POSTGRES_HOST_AUTH_METHOD: trust volumes: @@ -45,6 +49,8 @@ services: - storedog-net redis: image: redis:6.2-alpine + depends_on: + - dd-agent volumes: - 'redis:/data' networks: @@ -53,6 +59,7 @@ services: depends_on: - 'postgres' - 'redis' + - 'dd-agent' build: context: ./services/backend ports: @@ -75,6 +82,7 @@ services: - 'postgres' - 'redis' - 'web' + - 'dd-agent' build: context: ./services/backend command: bundle exec sidekiq -C config/sidekiq.yml @@ -88,11 +96,13 @@ services: DB_PORT: 5432 DISABLE_SPRING: 1 DD_APPSEC_ENABLED: 1 + DD_AGENT_HOST: 172.43.0.1 networks: - storedog-net ads: depends_on: - postgres + - dd-agent environment: - FLASK_APP=ads.py - FLASK_DEBUG=1 @@ -121,6 +131,7 @@ services: discounts: depends_on: - postgres + - dd-agent environment: - FLASK_APP=discounts.py - FLASK_DEBUG=1 @@ -150,6 +161,7 @@ services: auth: depends_on: - postgres + - dd-agent profiles: - csrf environment: @@ -176,6 +188,7 @@ services: dbm: depends_on: - postgres + - dd-agent profiles: - dbm environment: @@ -223,6 +236,8 @@ services: ads-java: build: context: ./services/ads/java + depends_on: + - dd-agent environment: - DD_SERVICE=ads-java - DD_AGENT_HOST=dd-agent @@ -252,6 +267,7 @@ services: depends_on: - web - discounts + - dd-agent networks: - storedog-net