New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixing issues in deployment, including #666, #667 and #668 #670

Open
wants to merge 4 commits into
base: 3.11
from
Jump to file or symbol
Failed to load files and symbols.
+134 −46
Diff settings

Always

Just for now

Copy path View file
@@ -2,7 +2,7 @@ plugins {
id 'nebula.netflixoss' version '5.0.0'
}
ext.githubProjectName = 'Priam'
ext.githubProjectName = 'Priam-parent'
allprojects {
apply plugin: 'nebula.netflixoss'
@@ -26,7 +26,6 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Interface for Priam's configuration
@@ -739,4 +738,9 @@
* @return SNS Topic ARN to be used to send notification.
*/
public String getBackupNotificationTopicArn();
/**
* @return the SimpleDB domain name for storing instance identities
*/
public String getInstanceIdentityDomain();
}
@@ -47,15 +47,22 @@
public final class SimpleDBConfigSource extends AbstractConfigSource {
private static final Logger logger = LoggerFactory.getLogger(SimpleDBConfigSource.class.getName());
private static final String DOMAIN = "PriamProperties";
private static String ALL_QUERY = "select * from " + DOMAIN + " where " + Attributes.APP_ID + "='%s'";
private static final String DEFAULT_DOMAIN = "PriamProperties";
private static String ALL_QUERY = "select * from `%s` where " + Attributes.APP_ID + "='%s'";
private final Map<String, String> data = Maps.newConcurrentMap();
private final ICredential provider;
private final String domain;
@Inject
public SimpleDBConfigSource(final ICredential provider) {
this.provider = provider;
String configuredDomain = System.getProperty("priam.sdb.properties.domain");
if (configuredDomain == null) {
domain = DEFAULT_DOMAIN;
} else {
domain = configuredDomain;
}
}
@Override
@@ -68,8 +75,9 @@ public void intialize(final String asgName, final String region) {
String nextToken = null;
String appid = asgName.lastIndexOf('-') > 0 ? asgName.substring(0, asgName.indexOf('-')) : asgName;
logger.info("appid used to fetch properties is: {}", appid);
logger.info("domain used to fetch properties is: {}", domain);
do {
SelectRequest request = new SelectRequest(String.format(ALL_QUERY, appid));
SelectRequest request = new SelectRequest(String.format(ALL_QUERY, domain, appid));
request.setNextToken(nextToken);
SelectResult result = simpleDBClient.select(request);
nextToken = result.getNextToken();
@@ -15,11 +15,32 @@
*/
package com.netflix.priam.aws;
import java.io.File;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.AmazonS3Exception;
import com.amazonaws.services.s3.model.BucketLifecycleConfiguration;
import com.amazonaws.services.s3.model.BucketLifecycleConfiguration.Rule;
import com.amazonaws.services.s3.model.CompleteMultipartUploadResult;
import com.amazonaws.services.s3.model.lifecycle.LifecycleAndOperator;
import com.amazonaws.services.s3.model.lifecycle.LifecycleFilter;
import com.amazonaws.services.s3.model.lifecycle.LifecyclePredicateVisitor;
import com.amazonaws.services.s3.model.lifecycle.LifecyclePrefixPredicate;
import com.amazonaws.services.s3.model.lifecycle.LifecycleTagPredicate;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.RateLimiter;
import com.google.inject.Provider;
@@ -38,21 +59,6 @@
import com.netflix.priam.notification.EventGenerator;
import com.netflix.priam.notification.EventObserver;
import com.netflix.priam.scheduler.BlockingSubmitThreadPoolExecutor;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
public abstract class S3FileSystemBase implements IBackupFileSystem, EventGenerator<BackupEvent> {
protected static final int MAX_CHUNKS = 10000;
@@ -151,10 +157,19 @@ public void cleanup() {
private boolean updateLifecycleRule(IConfiguration config, List<Rule> rules, String prefix) {
Rule rule = null;
PrefixVisitor visitor = new PrefixVisitor(prefix);
for (BucketLifecycleConfiguration.Rule lcRule : rules) {
if (lcRule.getPrefix().equals(prefix)) {
rule = lcRule;
break;
if (lcRule.getFilter() != null) {
lcRule.getFilter().getPredicate().accept(visitor);
if (visitor.isMatchesPrefix()) {
rule = lcRule;
break;
}
} else if (lcRule.getPrefix() != null) {
if (lcRule.getPrefix().equals(prefix)) {
rule = lcRule;
break;
}
}
}
if (rule == null && config.getBackupRetentionDays() <= 0)
@@ -165,21 +180,48 @@ private boolean updateLifecycleRule(IConfiguration config, List<Rule> rules, Str
}
if (rule == null) {
// Create a new rule
rule = new BucketLifecycleConfiguration.Rule().withExpirationInDays(config.getBackupRetentionDays()).withPrefix(prefix);
rule = new BucketLifecycleConfiguration.Rule().withExpirationInDays(config.getBackupRetentionDays())
.withFilter(new LifecycleFilter(new LifecyclePrefixPredicate(prefix)));
rule.setStatus(BucketLifecycleConfiguration.ENABLED);
rule.setId(prefix);
rules.add(rule);
logger.info("Setting cleanup for {} to {} days", rule.getPrefix(), rule.getExpirationInDays());
logger.info("Setting cleanup for {} to {} days", prefix, rule.getExpirationInDays());
} else if (config.getBackupRetentionDays() > 0) {
logger.info("Setting cleanup for {} to {} days", rule.getPrefix(), config.getBackupRetentionDays());
logger.info("Setting cleanup for {} to {} days", prefix, config.getBackupRetentionDays());
rule.setExpirationInDays(config.getBackupRetentionDays());
} else {
logger.info("Removing cleanup rule for {}", rule.getPrefix());
logger.info("Removing cleanup rule for {}", prefix);
rules.remove(rule);
}
return true;
}
private class PrefixVisitor implements LifecyclePredicateVisitor {
private String prefix;
private boolean matchesPrefix;
public PrefixVisitor(String prefix) {
this.prefix = prefix;
}
@Override
public void visit(LifecycleAndOperator lifecycleAndOperator) {
}
@Override
public void visit(LifecycleTagPredicate lifecycleTagPredicate) {
}
@Override
public void visit(LifecyclePrefixPredicate lifecyclePrefixPredicate) {
if (lifecyclePrefixPredicate.getPrefix().equals(prefix)) {
matchesPrefix = true;
}
}
public boolean isMatchesPrefix() {
return matchesPrefix;
}
}
/*
@param path - representation of the file uploaded
@param start time of upload, in millisecs
@@ -28,11 +28,16 @@
import java.util.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* DAO for handling Instance identity information such as token, zone, region
*/
@Singleton
public class SDBInstanceData {
private static final Logger logger = LoggerFactory.getLogger(SDBInstanceData.class.getName());
public static class Attributes {
public final static String APP_ID = "appId";
public final static String ID = "id";
@@ -45,17 +50,18 @@
public final static String HOSTNAME = "hostname";
}
public static final String DOMAIN = "InstanceIdentity";
public static final String ALL_QUERY = "select * from " + DOMAIN + " where " + Attributes.APP_ID + "='%s'";
public static final String INSTANCE_QUERY = "select * from " + DOMAIN + " where " + Attributes.APP_ID + "='%s' and " + Attributes.LOCATION + "='%s' and " + Attributes.ID + "='%d'";
public static final String ALL_QUERY = "select * from `%s` where " + Attributes.APP_ID + "='%s'";
public static final String INSTANCE_QUERY = "select * from `%s` where " + Attributes.APP_ID + "='%s' and " + Attributes.LOCATION + "='%s' and " + Attributes.ID + "='%d'";
private final ICredential provider;
private final IConfiguration configuration;
private final String domain;
@Inject
public SDBInstanceData(ICredential provider, IConfiguration configuration) {
this.provider = provider;
this.configuration = configuration;
this.domain = configuration.getInstanceIdentityDomain();
}
/**
@@ -67,7 +73,9 @@ public SDBInstanceData(ICredential provider, IConfiguration configuration) {
*/
public PriamInstance getInstance(String app, String dc, int id) {
AmazonSimpleDB simpleDBClient = getSimpleDBClient();
SelectRequest request = new SelectRequest(String.format(INSTANCE_QUERY, app, dc, id));
String query = String.format(INSTANCE_QUERY, domain, app, dc, id);
logger.info("Fetching instance data using query {}", query);
SelectRequest request = new SelectRequest(query);
SelectResult result = simpleDBClient.select(request);
if (result.getItems().size() == 0)
return null;
@@ -85,7 +93,9 @@ public PriamInstance getInstance(String app, String dc, int id) {
Set<PriamInstance> inslist = new HashSet<PriamInstance>();
String nextToken = null;
do {
SelectRequest request = new SelectRequest(String.format(ALL_QUERY, app));
String query = String.format(ALL_QUERY, domain, app);
logger.info("Fetching IDs using query {}", query);
SelectRequest request = new SelectRequest(query);
request.setNextToken(nextToken);
SelectResult result = simpleDBClient.select(request);
nextToken = result.getNextToken();
@@ -106,7 +116,7 @@ public PriamInstance getInstance(String app, String dc, int id) {
*/
public void createInstance(PriamInstance instance) throws AmazonServiceException {
AmazonSimpleDB simpleDBClient = getSimpleDBClient();
PutAttributesRequest putReq = new PutAttributesRequest(DOMAIN, getKey(instance), createAttributesToRegister(instance));
PutAttributesRequest putReq = new PutAttributesRequest(domain, getKey(instance), createAttributesToRegister(instance));
simpleDBClient.putAttributes(putReq);
}
@@ -118,7 +128,7 @@ public void createInstance(PriamInstance instance) throws AmazonServiceException
*/
public void registerInstance(PriamInstance instance) throws AmazonServiceException {
AmazonSimpleDB simpleDBClient = getSimpleDBClient();
PutAttributesRequest putReq = new PutAttributesRequest(DOMAIN, getKey(instance), createAttributesToRegister(instance));
PutAttributesRequest putReq = new PutAttributesRequest(domain, getKey(instance), createAttributesToRegister(instance));
UpdateCondition expected = new UpdateCondition();
expected.setName(Attributes.INSTANCE_ID);
expected.setExists(false);
@@ -134,7 +144,7 @@ public void registerInstance(PriamInstance instance) throws AmazonServiceExcepti
*/
public void deregisterInstance(PriamInstance instance) throws AmazonServiceException {
AmazonSimpleDB simpleDBClient = getSimpleDBClient();
DeleteAttributesRequest delReq = new DeleteAttributesRequest(DOMAIN, getKey(instance), createAttributesToDeRegister(instance));
DeleteAttributesRequest delReq = new DeleteAttributesRequest(domain, getKey(instance), createAttributesToDeRegister(instance));
simpleDBClient.deleteAttributes(delReq);
}
@@ -34,7 +34,7 @@ public PriamConfigSource(final SimpleDBConfigSource simpleDBConfigSource,
// this order was based off PriamConfigurations loading. W/e loaded last could override, but with Composite, first
// has the highest priority.
super(simpleDBConfigSource,
propertiesConfigSource,
systemPropertiesConfigSource);
systemPropertiesConfigSource,
propertiesConfigSource);
}
}
@@ -184,6 +184,7 @@
private static final String CONFIG_VPC_ROLE_ASSUMPTION_ARN = PRIAM_PRE + ".vpc.roleassumption.arn";
private static final String CONFIG_DUAL_ACCOUNT = PRIAM_PRE + ".roleassumption.dualaccount";
private static final String CONFIG_INSTANCE_IDENTITY_DOMAIN = PRIAM_PRE + ".sdb.instanceidentity.domain";
//Running instance meta data
private String RAC;
@@ -245,6 +246,8 @@
private static final int DEFAULT_TOMBSTONE_WARNING_THRESHOLD = 1000; // C* defaults
private static final int DEFAULT_TOMBSTONE_FAILURE_THRESHOLD = 100000;// C* defaults
private static final String DEFAULT_INSTANCE_IDENTITY_DOMAIN = "InstanceIdentity";
// AWS EC2 Dual Account
private static final boolean DEFAULT_DUAL_ACCOUNT = false;
@@ -1127,4 +1130,8 @@ public String getBackupNotificationTopicArn() {
return config.get(PRIAM_PRE + ".backup.notification.topic.arn", "");
}
@Override
public String getInstanceIdentityDomain() {
return config.get(CONFIG_INSTANCE_IDENTITY_DOMAIN, DEFAULT_INSTANCE_IDENTITY_DOMAIN);
}
}
@@ -26,6 +26,7 @@
import com.netflix.priam.aws.auth.EC2RoleAssumptionCredential;
import com.netflix.priam.aws.auth.IS3Credential;
import com.netflix.priam.aws.auth.S3RoleAssumptionCredential;
import com.netflix.priam.aws.IAMCredential;
import com.netflix.priam.backup.BackupFileSystemContext;
import com.netflix.priam.backup.IBackupFileSystem;
import com.netflix.priam.backup.IBackupMetrics;
@@ -63,7 +64,7 @@ protected void configure() {
bind(IFileCryptography.class).annotatedWith(Names.named("filecryptoalgorithm")).to(PgpCryptography.class);
bind(ICredentialGeneric.class).annotatedWith(Names.named("gcscredential")).to(GcsCredential.class);
bind(ICredentialGeneric.class).annotatedWith(Names.named("pgpcredential")).to(PgpCredential.class);
bind(ICredential.class).to(ClearCredential.class);
bind(ICredential.class).to(IAMCredential.class);
bind(IDeadTokenRetriever.class).to(DeadTokenRetriever.class);
bind(IPreGeneratedTokenRetriever.class).to(PreGeneratedTokenRetriever.class);
bind(INewTokenRetriever.class).to(NewTokenRetriever.class);
@@ -57,8 +57,10 @@ public void writeAllProperties(String yamlLocation, String hostname, String seed
map.put("rpc_port", config.getThriftPort());
map.put("start_native_transport", config.isNativeTransportEnabled());
map.put("native_transport_port", config.getNativeTransportPort());
map.put("listen_address", hostname);
map.put("rpc_address", hostname);
if (hostname != null) {
map.put("listen_address", hostname);
map.put("rpc_address", hostname);
}
//Dont bootstrap in restore mode
if (!Restore.isRestoreEnabled(config)) {
map.put("auto_bootstrap", config.getAutoBoostrap());
@@ -10,19 +10,27 @@ priam.backup.retention=
priam.backup.threads=2
priam.bootcluster=
priam.cache.location=/var/lib/cassandra/saved_caches
priam.cass.home=/mnt/cassandra
priam.cass.home=/etc/cassandra
priam.cass.manual.start.enable=
priam.cass.process=
priam.cass.startscript=/mnt/cassandra/bin/cassandra
priam.cass.stopscript=/mnt/cassandra/bin/cassandra
priam.cass.startscript=/etc/init.d/cassandra start
priam.cass.stopscript=/etc/init.d/cassandra stop
priam.clustername=cass_cluster
priam.commitlog.location=/var/lib/cassandra/commitlog
priam.compaction.throughput=
priam.data.location=/var/lib/cassandra/data
priam.direct.memory.size.m1.large=1G
priam.endpoint_snitch=org.apache.cassandra.locator.Ec2Snitch
priam.heap.newgen.size.m1.large=2G
priam.heap.size.m1.large=4G
priam.heap.size.m1.large=1G
priam.heap.newgen.size.t2.small=1G
priam.heap.size.t2.small=1G
priam.heap.newgen.size.t2.micro=512M
priam.heap.size.t2.micro=512M
priam.heap.newgen.size.m3.medium=1G
priam.heap.size.m3.medium=1G
priam.heap.newgen.size.m3.large=1G
priam.heap.size.m3.large=1G
priam.hint.delay=
priam.hint.window=
priam.jmx.port=7199
@@ -53,3 +61,4 @@ priam.thrift.port=9160
priam.upload.throttle=
priam.yamlLocation=
priam.zones.available=
priam.nativeTransport.enabled=true
@@ -883,4 +883,9 @@ public SchedulerType getFlushSchedulerType() throws UnsupportedTypeException {
public String getFlushCronExpression() {
return null;
}
@Override
public String getInstanceIdentityDomain() {
return "InstanceIdentity";
}
}
ProTip! Use n and p to navigate between commits in a pull request.