ec2_vpc_route_table not updating? #1674
Comments
@erydo, @wimnat, ping. This issue is waiting on your response. |
I have a pretty much identical playbook and can confirm this is still happening on Ansible 2.1.1.0. Ansible notifies about a change on the route table when the instance Id of the NAT instance has changed, but in fact nothing changes. Playbook execution:
{
"changed": true,
"instance_ids": ["i-0891a50ddd61a7006"],
"instances": [{
"ami_launch_index": "0",
"architecture": "x86_64",
"block_device_mapping": {
"/dev/xvda": {
"delete_on_termination": true,
"status": "attached",
"volume_id": "vol-0c1554c87a29e950d"
}
},
"dns_name": "ec2-52-57-28-154.eu-central-1.compute.amazonaws.com",
"ebs_optimized": false,
"groups": {
"sg-2d87e344": "ssh-in"
},
"hypervisor": "xen",
"id": "i-0891a50ddd61a7006",
"image_id": "ami-ccc021a3",
"instance_type": "t2.micro",
"kernel": null,
"key_name": "gini-dev",
"launch_time": "2016-08-31T16:35:12.000Z",
"placement": "eu-central-1a",
"private_dns_name": "ip-172-31-33-124.eu-central-1.compute.internal",
"private_ip": "172.31.33.124",
"public_dns_name": "ec2-52-57-28-154.eu-central-1.compute.amazonaws.com",
"public_ip": "52.57.28.154",
"ramdisk": null,
"region": "eu-central-1",
"root_device_name": "/dev/xvda",
"root_device_type": "ebs",
"state": "running",
"state_code": 16,
"tags": {
"Environment": "Internal",
"Name": "bastion",
"Role": "Bastion"
},
"tenancy": "default",
"virtualization_type": "hvm"
}],
"invocation": {
"module_args": {
"assign_public_ip": true,
"aws_access_key": null,
"aws_secret_key": null,
"count": 1,
"count_tag": "{'Environment': 'Internal', 'Role': 'Bastion'}",
"ebs_optimized": false,
"ec2_url": null,
"exact_count": 1,
"group": ["ssh-in"],
"group_id": null,
"id": null,
"image": "ami-ccc021a3",
"instance_ids": null,
"instance_profile_name": null,
"instance_tags": {
"Environment": "Internal",
"Name": "bastion",
"Role": "Bastion"
},
"instance_type": "t2.micro",
"kernel": null,
"key_name": "gini-dev",
"monitoring": false,
"network_interfaces": null,
"placement_group": null,
"private_ip": null,
"profile": null,
"ramdisk": null,
"region": "eu-central-1",
"security_token": null,
"source_dest_check": false,
"spot_launch_group": null,
"spot_price": null,
"spot_type": "one-time",
"spot_wait_timeout": "600",
"state": "present",
"tenancy": "default",
"termination_protection": false,
"user_data": null,
"validate_certs": true,
"volumes": null,
"vpc_subnet_id": "subnet-943debfc",
"wait": true,
"wait_timeout": "300",
"zone": null
},
"module_name": "ec2"
},
"tagged_instances": [{
"ami_launch_index": "0",
"architecture": "x86_64",
"block_device_mapping": {
"/dev/xvda": {
"delete_on_termination": true,
"status": "attached",
"volume_id": "vol-0c1554c87a29e950d"
}
},
"dns_name": "ec2-52-57-28-154.eu-central-1.compute.amazonaws.com",
"ebs_optimized": false,
"groups": {
"sg-2d87e344": "ssh-in"
},
"hypervisor": "xen",
"id": "i-0891a50ddd61a7006",
"image_id": "ami-ccc021a3",
"instance_type": "t2.micro",
"kernel": null,
"key_name": "gini-dev",
"launch_time": "2016-08-31T16:35:12.000Z",
"placement": "eu-central-1a",
"private_dns_name": "ip-172-31-33-124.eu-central-1.compute.internal",
"private_ip": "172.31.33.124",
"public_dns_name": "ec2-52-57-28-154.eu-central-1.compute.amazonaws.com",
"public_ip": "52.57.28.154",
"ramdisk": null,
"region": "eu-central-1",
"root_device_name": "/dev/xvda",
"root_device_type": "ebs",
"state": "running",
"state_code": 16,
"tags": {
"Environment": "Internal",
"Name": "bastion",
"Role": "Bastion"
},
"tenancy": "default",
"virtualization_type": "hvm"
}]
}
{
"changed": true,
"invocation": {
"module_args": {
"aws_access_key": null,
"aws_secret_key": null,
"ec2_url": null,
"lookup": "tag",
"profile": null,
"propagating_vgw_ids": null,
"region": "eu-central-1",
"route_table_id": null,
"routes": [{
"destination_cidr_block": "0.0.0.0/0",
"instance_id": "i-0891a50ddd61a7006"
}],
"security_token": null,
"state": "present",
"subnets": ["subnet-b103e4cb"],
"tags": {
"Environment": "Internal",
"Name": "private"
},
"validate_certs": true,
"vpc_id": "vpc-xxx"
},
"module_name": "ec2_vpc_route_table"
},
"route_table": {
"id": "rtb-e720d08f",
"routes": [{
"destination_cidr_block": "172.31.0.0/16",
"gateway_id": "local",
"instance_id": null,
"interface_id": null,
"origin": "CreateRouteTable",
"state": "active",
"vpc_peering_connection_id": null
}, {
"destination_cidr_block": "0.0.0.0/0",
"gateway_id": null,
"instance_id": null,
"interface_id": "eni-950925ff",
"origin": "CreateRoute",
"state": "blackhole",
"vpc_peering_connection_id": null
}],
"tags": {
"Environment": "Internal",
"Name": "private"
},
"vpc_id": "vpc-xxx"
}
} Actual state of the route table: {
"RouteTables": [
{
[...]
"Routes": [
{
"GatewayId": "local",
"DestinationCidrBlock": "172.31.0.0/16",
"State": "active",
"Origin": "CreateRouteTable"
},
{
"Origin": "CreateRoute",
"DestinationCidrBlock": "0.0.0.0/0",
"NetworkInterfaceId": "eni-950925ff",
"State": "blackhole"
}
]
}
]
} As the OP mentioned, deleting the black hole route leads to the expected result. |
@erydo, @wimnat, ping. This issue is still waiting on your response. |
Just ran into this, any progress? |
@erydo, @wimnat, ping. This issue is still waiting on your response. |
@erydo, @wimnat, ping. This issue is still waiting on your response. |
1 similar comment
@erydo, @wimnat, ping. This issue is still waiting on your response. |
@erydo, @wimnat, ping. This issue is still waiting on your response. |
This repository has been locked. All new issues and pull requests should be filed in https://github.com/ansible/ansible Please read through the repomerge page in the dev guide. The guide contains links to tools which automatically move your issue or pull request to the ansible/ansible repo. |
Being bitten by this in Ansible 2.3.1.0. |
This issue was moved to ansible/ansible#26664 |
Not sure if Im doing something wrong, but I can add the routes using ec2_vpc_route_table module but if I terminate the NAT instances that are in the routes (instance_id), you see the "black hole" in AWS GUI but for some reason when I go to run my playbook again, it creates new NAT's, gets the instance id's and then attempts to apply them to the route table but fails. If I manually go in and delete the "black hole" routes and run the playbook, its fine.
Version:
playbook code:
Error:
Current route table looks like:
The text was updated successfully, but these errors were encountered: