Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP

Comparing changes

Choose two branches to see what's changed or to start a new pull request. If you need to, you can also compare across forks.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also compare across forks.
...
Checking mergeability… Don't worry, you can still create the pull request.
  • 7 commits
  • 6 files changed
  • 0 commit comments
  • 1 contributor
Commits on Dec 16, 2013
@rbvermaa rbvermaa Version to 1.1.2 for a patch-release. 53e888f
@rbvermaa rbvermaa Support using ARN's for applying instance profiles to EC2 instances. …
…This allows cross-account API access.
5c82e82
Commits on Jan 13, 2014
@rbvermaa rbvermaa Fix creating instances with PIOPS EBS volumes that do not support bei…
…ng created as EBS optimized instances. Previously nixops would assume the EBS optimized flag, even though the instance type might not support it. Now it is more conservative, setting the EBS Optimized flag only for instance types that are known to have support for this. Cherry-picked from master.
9b0f181
Commits on Mar 13, 2014
@rbvermaa rbvermaa Allow i2 instances to be used (hvm). 0f8516e
Commits on May 07, 2014
@rbvermaa rbvermaa Amazon EC2 r3 instances are HVM instances as well.
(cherry picked from commit 1518e5c)
ae1b4ce
Commits on Jun 02, 2014
@rbvermaa rbvermaa Only apply tags on EBS disks. 3fa8906
Commits on Jul 04, 2014
@rbvermaa rbvermaa Add option --keep-physical to remove backups just from nixops state, …
…and leave the physical copy as-is.

When using --force with 'nixops backup' do not perform status checks on existing backups.

(cherry picked from commit 11a127c)

Conflicts:
	scripts/nixops
56f7444
View
50 nix/ec2.nix
@@ -9,6 +9,10 @@ let
cfg = config.deployment.ec2;
+ defaultEbsOptimized =
+ let props = config.deployment.ec2.physicalProperties;
+ in if props == null then false else (props.allowsEbsOptimized or false);
+
ec2DiskOptions = { config, ... }: {
options = {
@@ -113,7 +117,11 @@ let
cfg.instanceType == "cc1.4xlarge"
|| cfg.instanceType == "cc2.8xlarge"
|| cfg.instanceType == "hs1.8xlarge"
- || cfg.instanceType == "cr1.8xlarge";
+ || cfg.instanceType == "cr1.8xlarge"
+ || builtins.substring 0 2 cfg.instanceType == "i2"
+ || builtins.substring 0 2 cfg.instanceType == "c3"
+ || builtins.substring 0 2 cfg.instanceType == "r3"
+ || builtins.substring 0 2 cfg.instanceType == "m3";
# Map "/dev/mapper/xvdX" to "/dev/xvdX".
dmToDevice = dev:
@@ -342,6 +350,14 @@ in
'';
};
+ deployment.ec2.ebsOptimized = mkOption {
+ default = defaultEbsOptimized;
+ type = types.bool;
+ description = ''
+ Whether the EC2 instance should be created as an EBS Optimized instance.
+ '';
+ };
+
fileSystems = mkOption {
options = {
ec2 = mkOption {
@@ -408,22 +424,22 @@ in
let
type = config.deployment.ec2.instanceType or "unknown";
mapping = {
- "t1.micro" = { cores = 1; memory = 595; };
- "m1.small" = { cores = 1; memory = 1658; };
- "m1.medium" = { cores = 1; memory = 3755; };
- "m1.large" = { cores = 2; memory = 7455; };
- "m1.xlarge" = { cores = 4; memory = 14985; };
- "m2.xlarge" = { cores = 2; memory = 17084; };
- "m2.2xlarge" = { cores = 4; memory = 34241; };
- "m2.4xlarge" = { cores = 8; memory = 68557; };
- "m3.xlarge" = { cores = 4; memory = 14985; };
- "m3.2xlarge" = { cores = 8; memory = 30044; };
- "c1.medium" = { cores = 2; memory = 1697; };
- "c1.xlarge" = { cores = 8; memory = 6953; };
- "cc1.4xlarge" = { cores = 16; memory = 21542; };
- "cc2.8xlarge" = { cores = 32; memory = 59930; };
- "hi1.4xlarge" = { cores = 16; memory = 60711; };
- "cr1.8xlarge" = { cores = 32; memory = 245756; };
+ "t1.micro" = { cores = 1; memory = 595; allowsEbsOptimized = false; };
+ "m1.small" = { cores = 1; memory = 1658; allowsEbsOptimized = false; };
+ "m1.medium" = { cores = 1; memory = 3755; allowsEbsOptimized = false; };
+ "m1.large" = { cores = 2; memory = 7455; allowsEbsOptimized = true; };
+ "m1.xlarge" = { cores = 4; memory = 14985; allowsEbsOptimized = true; };
+ "m2.xlarge" = { cores = 2; memory = 17084; allowsEbsOptimized = false; };
+ "m2.2xlarge" = { cores = 4; memory = 34241; allowsEbsOptimized = true; };
+ "m2.4xlarge" = { cores = 8; memory = 68557; allowsEbsOptimized = true; };
+ "m3.xlarge" = { cores = 4; memory = 14985; allowsEbsOptimized = true; };
+ "m3.2xlarge" = { cores = 8; memory = 30044; allowsEbsOptimized = true; };
+ "c1.medium" = { cores = 2; memory = 1697; allowsEbsOptimized = false; };
+ "c1.xlarge" = { cores = 8; memory = 6953; allowsEbsOptimized = true; };
+ "cc1.4xlarge" = { cores = 16; memory = 21542; allowsEbsOptimized = false; };
+ "cc2.8xlarge" = { cores = 32; memory = 59930; allowsEbsOptimized = false; };
+ "hi1.4xlarge" = { cores = 16; memory = 60711; allowsEbsOptimized = false; };
+ "cr1.8xlarge" = { cores = 32; memory = 245756; allowsEbsOptimized = false; };
};
in attrByPath [ type ] null mapping;
View
2  nixops/backends/__init__.py
@@ -126,7 +126,7 @@ def restore(self, defn, backup_id, devices=[]):
"""Restore persistent disks to a given backup, if possible."""
self.warn("don't know how to restore disks from backup for machine ‘{0}".format(self.name))
- def remove_backup(self, backup_id):
+ def remove_backup(self, backup_id, keep_physical = False):
"""Remove a given backup of persistent disks, if possible."""
self.warn("don't know how to remove a backup for machine ‘{0}".format(self.name))
View
49 nixops/backends/ec2.py
@@ -47,6 +47,7 @@ def __init__(self, xml):
self.tags = {k.get("name"): k.find("string").get("value") for k in x.findall("attr[@name='tags']/attrs/attr")}
self.root_disk_size = int(x.find("attr[@name='ebsInitialRootDiskSize']/int").get("value"))
self.spot_instance_price = int(x.find("attr[@name='spotInstancePrice']/int").get("value"))
+ self.ebs_optimized = x.find("attr[@name='ebsOptimized']/bool").get("value") == "true"
def f(xml):
return {'disk': xml.find("attrs/attr[@name='disk']/string").get("value"),
@@ -348,22 +349,23 @@ def get_backups(self):
return backups
- def remove_backup(self, backup_id):
+ def remove_backup(self, backup_id, keep_physical = False):
self.log('removing backup {0}'.format(backup_id))
self.connect()
_backups = self.backups
if not backup_id in _backups.keys():
self.warn('backup {0} not found, skipping'.format(backup_id))
else:
- for dev, snapshot_id in _backups[backup_id].items():
- snapshot = None
- try:
- snapshot = self._get_snapshot_by_id(snapshot_id)
- except:
- self.warn('snapshot {0} not found, skipping'.format(snapshot_id))
- if not snapshot is None:
- self.log('removing snapshot {0}'.format(snapshot_id))
- nixops.ec2_utils.retry(lambda: snapshot.delete())
+ if not keep_physical:
+ for dev, snapshot_id in _backups[backup_id].items():
+ snapshot = None
+ try:
+ snapshot = self._get_snapshot_by_id(snapshot_id)
+ except:
+ self.warn('snapshot {0} not found, skipping'.format(snapshot_id))
+ if not snapshot is None:
+ self.log('removing snapshot {0}'.format(snapshot_id))
+ nixops.ec2_utils.retry(lambda: snapshot.delete())
_backups.pop(backup_id)
self.backups = _backups
@@ -559,9 +561,12 @@ def create_instance(self, defn, zone, devmap, user_data, ebs_optimized):
block_device_map=devmap,
user_data=user_data,
image_id=defn.ami,
- instance_profile_name=defn.instance_profile,
ebs_optimized=ebs_optimized
)
+ if defn.instance_profile.startswith("arn:") :
+ common_args['instance_profile_arn'] = defn.instance_profile
+ else:
+ common_args['instance_profile_name'] = defn.instance_profile
if defn.spot_instance_price:
request = nixops.ec2_utils.retry(
@@ -737,10 +742,13 @@ def create(self, defn, check, allow_reboot, allow_recreate):
.format(self.name, zone, v['disk'], volume.zone))
# Do we want an EBS-optimized instance?
- ebs_optimized = False
+ prefer_ebs_optimized = False
for k, v in defn.block_device_mapping.iteritems():
(volume_type, iops) = self.disk_volume_options(v)
- if volume_type != "standard": ebs_optimized = True
+ if volume_type != "standard": prefer_ebs_optimized = True
+
+ # if we have PIOPS volume and instance type supports EBS Optimized flags, then use ebs_optimized
+ ebs_optimized = prefer_ebs_optimized and defn.ebs_optimized
# Generate a public/private host key.
if not self.public_host_key:
@@ -900,13 +908,14 @@ def create(self, defn, check, allow_reboot, allow_recreate):
# Always apply tags to all volumes
for k, v in self.block_device_mapping.items():
- # Tag the volume.
- volume_tags = {}
- volume_tags.update(common_tags)
- volume_tags.update(defn.tags)
- volume_tags['Name'] = "{0} [{1} - {2}]".format(self.depl.description, self.name, _sd_to_xvd(k))
-
- nixops.ec2_utils.retry(lambda: self._conn.create_tags([v['volumeId']], volume_tags))
+ if 'volumeId' in v.keys():
+ # Tag the volume.
+ volume_tags = {}
+ volume_tags.update(common_tags)
+ volume_tags.update(defn.tags)
+ volume_tags['Name'] = "{0} [{1} - {2}]".format(self.depl.description, self.name, _sd_to_xvd(k))
+
+ nixops.ec2_utils.retry(lambda: self._conn.create_tags([v['volumeId']], volume_tags))
# Attach missing volumes.
for k, v in self.block_device_mapping.items():
View
8 nixops/deployment.py
@@ -722,19 +722,19 @@ def get_backups(self, include=[], exclude=[]):
return backups
- def clean_backups(self, keep=10):
+ def clean_backups(self, keep=10, keep_physical = False):
_backups = self.get_backups()
backup_ids = [b for b in _backups.keys()]
backup_ids.sort()
index = len(backup_ids)-keep
for backup_id in backup_ids[:index]:
print 'Removing backup {0}'.format(backup_id)
- self.remove_backup(backup_id)
+ self.remove_backup(backup_id, keep_physical)
- def remove_backup(self, backup_id):
+ def remove_backup(self, backup_id, keep_physical = False):
with self._get_deployment_lock():
def worker(m):
- m.remove_backup(backup_id)
+ m.remove_backup(backup_id, keep_physical)
nixops.parallel.run_tasks(nr_workers=len(self.active), tasks=self.machines.itervalues(), worker_fun=worker)
View
2  release.nix
@@ -6,7 +6,7 @@ let
pkgs = import <nixpkgs> { };
- version = "1.1.1" + (if officialRelease then "" else "pre${toString nixopsSrc.revCount}_${nixopsSrc.shortRev}");
+ version = "1.1.2" + (if officialRelease then "" else "pre${toString nixopsSrc.revCount}_${nixopsSrc.shortRev}");
in
View
25 scripts/nixops
@@ -282,24 +282,31 @@ def print_backups(depl, backups):
def op_clean_backups():
depl = open_deployment()
- depl.clean_backups(args.keep)
+ depl.clean_backups(args.keep, args.keep_physical)
def op_remove_backup():
depl = open_deployment()
- depl.remove_backup(args.backupid)
+ depl.remove_backup(args.backupid, args.keep_physical)
def op_backup():
depl = open_deployment()
- backups = depl.get_backups(include=args.include or [], exclude=args.exclude or [])
- backups_status = [b['status'] for _, b in backups.items()]
- if "running" in backups_status and not args.force:
- raise Exception("There are still backups running, use --force to run a new backup concurrently (not advised!)")
- else:
+
+ def do_backup():
backup_id = depl.backup(include=args.include or [], exclude=args.exclude or [])
print backup_id
+ if args.force:
+ do_backup()
+ else:
+ backups = depl.get_backups(include=args.include or [], exclude=args.exclude or [])
+ backups_status = [b['status'] for _, b in backups.items()]
+ if "running" in backups_status:
+ raise Exception("There are still backups running, use --force to run a new backup concurrently (not advised!)")
+ else:
+ do_backup()
+
def op_backup_status():
depl = open_deployment()
@@ -771,11 +778,13 @@ subparser.add_argument('--latest', dest='latest', action="store_true", default=F
subparser = add_subparser('remove-backup', help='remove a given backup')
subparser.set_defaults(op=op_remove_backup)
-subparser.add_argument('backupid', nargs='?', help='backup id to remove')
+subparser.add_argument('backupid', metavar='BACKUP-ID', help='backup ID to remove')
+subparser.add_argument('--keep-physical', dest="keep_physical", default=False, action="store_true", help='do not remove the physical backups, only remove backups from nixops state')
subparser = add_subparser('clean-backups', help='remove old backups')
subparser.set_defaults(op=op_clean_backups)
subparser.add_argument('--keep', dest="keep", type=int, default=10, help='number of backups to keep around')
+subparser.add_argument('--keep-physical', dest="keep_physical", default=False, action="store_true", help='do not remove the physical backups, only remove backups from nixops state')
subparser = add_subparser('restore', help='restore machines based on snapshots of persistent disks in network (currently EC2-only)')
subparser.set_defaults(op=op_restore)

No commit comments for this range

Something went wrong with that request. Please try again.