Skip to content
This repository has been archived by the owner on Apr 24, 2023. It is now read-only.

Commit

Permalink
Add default container volumes (#1153)
Browse files Browse the repository at this point in the history
  • Loading branch information
Paul Schorfheide authored and shamsimam committed Jun 18, 2019
1 parent 6c6597f commit dce0059
Show file tree
Hide file tree
Showing 8 changed files with 159 additions and 26 deletions.
2 changes: 2 additions & 0 deletions integration/bin/run-integration.sh
Expand Up @@ -87,6 +87,8 @@ docker create \
-e "COOK_SCHEDULER_URL=${COOK_URL}" \
-e "USER=root" \
-e "COOK_MESOS_LEADER_URL=http://${MESOS_MASTER_IP}:5050" \
-e "COOK_TEST_DOCKER_IMAGE=python:3.5" \
-v "/tmp/cook-integration-mount:/tmp/cook-integration-mount" \
${COOK_MULTICLUSTER_ENV} \
${DATA_LOCAL_ENV} \
${DOCKER_VOLUME_ARGS} \
Expand Down
43 changes: 42 additions & 1 deletion integration/tests/cook/test_basic.py
Expand Up @@ -2796,7 +2796,15 @@ def get_debug_status_code():

@unittest.skipUnless(util.supports_mesos_containerizer_images(), "Requires support for docker images in mesos containerizer")
def test_mesos_containerizer_image_support(self):
job_uuid, resp = util.submit_job(self.cook_url, executor='mesos', container={'type': 'mesos', 'mesos': {'image': 'alpine'}})
container = {'type': 'mesos', 'mesos': {'image': 'alpine'}}
settings = util.settings(self.cook_url)
default_volumes = util.get_in(settings, 'container-defaults', 'volumes')
if default_volumes is not None:
volumes = [{'container-path': v['container-path'], 'host-path': '/tmp'}
for v in default_volumes]
self.logger.info(f'Setting override volumes {volumes}')
container['volumes'] = volumes
job_uuid, resp = util.submit_job(self.cook_url, executor='mesos', container=container)
try:
self.assertEqual(201, resp.status_code, resp.text)
instance = util.wait_for_instance(self.cook_url, job_uuid, status='success')
Expand Down Expand Up @@ -2838,3 +2846,36 @@ def test_cook_executor_reset_vars(self):
util.wait_for_instance(self.cook_url, uuid, status='success')
finally:
util.kill_jobs(self.cook_url, job_uuids, assert_response=False)


@unittest.skipUnless(util.docker_tests_enabled(), "Requires docker support")
def test_default_container_volumes(self):
settings = util.settings(self.cook_url)
default_volumes = util.get_in(settings, 'container-defaults', 'volumes')
if default_volumes is None or len(default_volumes) == 0:
unittest.skip('Requires a default volume configured')
default_volume = default_volumes[0]
if not os.path.exists(default_volume['host-path']):
os.mkdir(default_volume['host-path'])
image = util.docker_image()
file_name = str(uuid.uuid4())
container_file = os.path.join(default_volume['container-path'], file_name)
host_file = os.path.join(default_volume['host-path'], file_name)
job_uuid, resp = util.submit_job(self.cook_url,
command=f'echo "test_default_container_volumes" >> {container_file}',
executor='mesos',
container={'type': 'DOCKER',
'docker': {'image': image}})
self.assertEqual(resp.status_code, 201, resp.content)
try:
util.wait_for_job(self.cook_url, job_uuid, 'completed')
def check_host_path():
exists = os.path.exists(host_file)
self.logger.info(f'Path {host_file} exists: {exists}')
return exists
util.wait_until(check_host_path, lambda exists: exists)
self.assertTrue(os.path.exists(host_file), f'Expected container to write {host_file}')
with open(host_file, 'r') as f:
self.assertEqual('test_default_container_volumes', f.readline().strip())
finally:
util.kill_jobs(self.cook_url, [job_uuid], assert_response=False)
3 changes: 3 additions & 0 deletions integration/travis/scheduler_travis_config.edn
Expand Up @@ -16,6 +16,9 @@
:admins #{"root" "travis"}
;; users that are allowed to do things on behalf of others
:impersonators #{"poser" "travis"}}
:container-defaults {:volumes [{:host-path "/tmp/cook-integration-mount"
:container-path "/mnt/cook-integration"
:mode "RW"}]}
:cors-origins ["https?://cors.example.com"]
:data-local {:fitness-calculator {:cache-ttl-ms 20000
:cost-endpoint #config/env "DATA_LOCAL_ENDPOINT"
Expand Down
3 changes: 3 additions & 0 deletions scheduler/config.edn
Expand Up @@ -12,6 +12,9 @@
:authorization-fn cook.rest.authorization/configfile-admins-auth-open-gets
;; users that are allowed to do things on behalf of others
:impersonators #{"poser" "other-impersonator"}}
:container-defaults {:volumes [{:host-path "/tmp/cook-integration-mount"
:container-path "/mnt/cook-integration"
:mode "RW"}]}
:cors-origins ["https?://cors.example.com"]
:data-local {:fitness-calculator {:cache-ttl-ms 60000
:cost-endpoint #config/env "DATA_LOCAL_ENDPOINT"
Expand Down
47 changes: 27 additions & 20 deletions scheduler/src/cook/config.clj
Expand Up @@ -25,7 +25,8 @@
[cook.util :as util]
[mount.core :as mount]
[plumbing.core :refer (fnk)]
[plumbing.graph :as graph])
[plumbing.graph :as graph]
[schema.core :as s])
(:import (com.google.common.io Files)
(com.netflix.fenzo VMTaskFitnessCalculator)
(java.io File)
Expand Down Expand Up @@ -138,13 +139,15 @@
{:max-size 5000
:ttl-ms (* 60 1000)}
agent-query-cache))
:container-defaults (fnk [[:config {container-defaults {}}]]
container-defaults)
:cors-origins (fnk [[:config {cors-origins nil}]]
(map re-pattern (or cors-origins [])))
:exit-code-syncer (fnk [[:config {exit-code-syncer nil}]]
(merge
{:publish-batch-size 100
:publish-interval-ms 2500}
exit-code-syncer))
(merge
{:publish-batch-size 100
:publish-interval-ms 2500}
exit-code-syncer))
:sandbox-syncer (fnk [[:config {sandbox-syncer nil}]]
(merge
{:max-consecutive-sync-failure 15
Expand Down Expand Up @@ -287,9 +290,9 @@
:mea-culpa-failure-limit (fnk [[:config {scheduler nil}]]
(:mea-culpa-failure-limit scheduler))
:max-over-quota-jobs (fnk [[:config {scheduler nil}]]
(or (when scheduler
(:max-over-quota-jobs scheduler))
100))
(or (when scheduler
(:max-over-quota-jobs scheduler))
100))
:fenzo-max-jobs-considered (fnk [[:config {scheduler nil}]]
(when scheduler
(or (:fenzo-max-jobs-considered scheduler) 1000)))
Expand Down Expand Up @@ -404,19 +407,19 @@
(merge {:agent-start-grace-period-mins 10}
estimated-completion-constraint))
:data-local-fitness-calculator (fnk [[:config {data-local {}}]]
(let [fitness-calculator (get data-local :fitness-calculator {})]
{:auth (get fitness-calculator :auth nil)
:base-calculator (config-string->fitness-calculator
(get fitness-calculator :base-calculator "com.netflix.fenzo.plugins.BinPackingFitnessCalculators/cpuMemBinPacker"))
:batch-size (get fitness-calculator :batch-size 500)
:cache-ttl-ms (get fitness-calculator :cache-ttl-ms 300000)
:cost-endpoint (get fitness-calculator :cost-endpoint nil)
:data-locality-weight (get fitness-calculator :data-locality-weight 0.95)
:launch-wait-seconds (get fitness-calculator :launch-wait-seconds 60)
:update-interval-ms (get fitness-calculator :update-interval-ms nil)}))
(let [fitness-calculator (get data-local :fitness-calculator {})]
{:auth (get fitness-calculator :auth nil)
:base-calculator (config-string->fitness-calculator
(get fitness-calculator :base-calculator "com.netflix.fenzo.plugins.BinPackingFitnessCalculators/cpuMemBinPacker"))
:batch-size (get fitness-calculator :batch-size 500)
:cache-ttl-ms (get fitness-calculator :cache-ttl-ms 300000)
:cost-endpoint (get fitness-calculator :cost-endpoint nil)
:data-locality-weight (get fitness-calculator :data-locality-weight 0.95)
:launch-wait-seconds (get fitness-calculator :launch-wait-seconds 60)
:update-interval-ms (get fitness-calculator :update-interval-ms nil)}))
:plugins (fnk [[:config {plugins {}}]]
(let [{:keys [job-launch-filter job-submission-validator
pool-selection]} plugins]
(let [{:keys [job-launch-filter job-submission-validator
pool-selection]} plugins]
(merge plugins
{:job-launch-filter
(merge
Expand Down Expand Up @@ -538,3 +541,7 @@
(defn max-over-quota-jobs
[]
(get-in config [:settings :max-over-quota-jobs]))

(defn container-defaults
[]
(get-in config [:settings :container-defaults]))
30 changes: 26 additions & 4 deletions scheduler/src/cook/mesos/task.clj
Expand Up @@ -77,10 +77,30 @@
(assoc "EXECUTOR_PROGRESS_OUTPUT_FILE_ENV" "EXECUTOR_PROGRESS_OUTPUT_FILE_NAME"
"EXECUTOR_PROGRESS_OUTPUT_FILE_NAME" progress-output-file))))

(defn merge-container-defaults
"Takes a job container specification and applies any defaults from the config.
Currently only supports volumes."
[container]
(when container
(let [{:keys [volumes]} (config/container-defaults)
get-path (fn [{:keys [container-path host-path]}]
(or container-path
host-path))]
(cond-> container
volumes
(update :volumes (fn [container-volumes]
(let [volumes-to-add (filter (fn [default-volume]
(not-any? (fn [container-volume]
(.startsWith (get-path default-volume)
(get-path container-volume)))
container-volumes))
volumes)]
(into (vec container-volumes) volumes-to-add))))))))

(defn job->executor-key
"Extract the executor key value from the job"
[db job-ent]
(let [container (util/job-ent->container db job-ent)
[job-ent]
(let [container (util/job-ent->container job-ent)
;; If the custom-executor attr isn't set, we default to using a custom
;; executor in order to support jobs submitted before we added this field
custom-executor? (use-custom-executor? job-ent)
Expand Down Expand Up @@ -108,9 +128,11 @@
(defn job->task-metadata
"Takes a job entity, returns task metadata"
[db mesos-run-as-user job-ent task-id]
(let [container (util/job-ent->container db job-ent)
(let [container (-> job-ent
util/job-ent->container
merge-container-defaults)
cook-executor? (use-cook-executor? job-ent)
executor-key (job->executor-key db job-ent)
executor-key (job->executor-key job-ent)
executor (executor-key->executor executor-key)
resources (util/job-ent->resources job-ent)
group-uuid (util/job-ent->group-uuid job-ent)
Expand Down
2 changes: 1 addition & 1 deletion scheduler/src/cook/tools.clj
Expand Up @@ -180,7 +180,7 @@

(defn job-ent->container
"Take a job entity and return its container"
[db job-ent]
[job-ent]
(some-> job-ent :job/container remove-datomic-namespacing))

(defn job-ent->group-uuid
Expand Down
55 changes: 55 additions & 0 deletions scheduler/test/cook/test/mesos/task.clj
Expand Up @@ -3,6 +3,7 @@
(:require [clojure.data.json :as json]
[clojure.edn :as edn]
[clojure.string :as str]
[cook.config :as config]
[cook.scheduler.scheduler :as sched]
[cook.mesos.task :as task]
[cook.test.testutil :as tu]
Expand Down Expand Up @@ -875,3 +876,57 @@
:job/progress-sample-interval-ms 5000000}]
(with-redefs [cook.config/executor-config (constantly executor-config)]
(task/build-executor-environment job-ent)))))))

(deftest test-merge-container-defaults
(testing "does not add docker info if missing"
(with-redefs [config/container-defaults (constantly {:docker {:parameters {"foo" "bar"}}})]
(is (= {:mesos {:image "baz"}} (task/merge-container-defaults {:mesos {:image "baz"}})))))
(testing "adds volumes even when missing"
(let [volumes [{:container-path "/foo"
:host-path "/foo"}]]
(with-redefs [config/container-defaults (constantly {:volumes volumes})]
(is (= {:mesos {:image "baz"}
:volumes volumes}
(task/merge-container-defaults {:mesos {:image "baz"}}))))))
(testing "merges volumes"
(with-redefs [config/container-defaults (constantly {:volumes [{:host-path "/h/a"
:container-path "/c/a"
:mode "rw"}
{:host-path "/mnt/app/data"
:mode "r"}]})]
(is (= nil (task/merge-container-defaults nil)))
(is (= {:docker {:image "foo"}
:volumes [{:host-path "/h/a"
:container-path "/c/a"
:mode "rw"}
{:host-path "/mnt/app/data"
:mode "r"}]}
(task/merge-container-defaults {:docker {:image "foo"}})))
(is (= {:docker {:image "foo"}
:volumes [{:host-path "/mnt/app"
:mode "rw"}
{:host-path "/h/a"
:container-path "/c/a"
:mode "rw"}]}
(task/merge-container-defaults {:docker {:image "foo"}
:volumes [{:host-path "/mnt/app"
:mode "rw"}]})))
(is (= {:docker {:image "foo"}
:volumes [{:host-path "/diff/a"
:container-path "/c/a"}
{:host-path "/mnt/app/data"
:mode "r"}]}
(task/merge-container-defaults {:docker {:image "foo"}
:volumes [{:host-path "/diff/a"
:container-path "/c/a"}]})))
(is (= {:docker {:image "foo"}
:volumes [{:host-path "/diff/a"
:container-path "/c/a/b"}
{:host-path "/h/a"
:container-path "/c/a"
:mode "rw"}
{:host-path "/mnt/app/data"
:mode "r"}]}
(task/merge-container-defaults {:docker {:image "foo"}
:volumes [{:host-path "/diff/a"
:container-path "/c/a/b"}]}))))))

0 comments on commit dce0059

Please sign in to comment.