Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
226 changes: 224 additions & 2 deletions src/cmd-sign
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,13 @@

import argparse
import gi
import json
import os
import shutil
import subprocess
import sys
import tempfile
import time

import boto3

Expand Down Expand Up @@ -54,13 +56,14 @@ def parse_args():

robosig = subparsers.add_parser('robosignatory', help='sign with '
'RoboSignatory via fedora-messaging')
robosig.add_argument("--s3", metavar='<BUCKET>[/PREFIX]', required=True,
robosig.add_argument("--s3", metavar='<BUCKET>[/PREFIX]', required=False,
help="bucket and prefix to S3 builds/ dir")
robosig.add_argument("--aws-config-file", metavar='CONFIG', default="",
help="Path to AWS config file")
group = robosig.add_mutually_exclusive_group(required=True)
group.add_argument("--ostree", help="sign commit", action='store_true')
group.add_argument("--images", help="sign images", action='store_true')
group.add_argument("--oci", help="sign OCI images", action='store_true')
robosig.add_argument("--extra-fedmsg-keys", action='append',
metavar='KEY=VAL', default=[],
help="extra keys to inject into messages")
Expand All @@ -71,6 +74,9 @@ def parse_args():
robosig.add_argument("--gpgkeypath", help="path to directory containing "
"public keys to use for signature verification",
default="/etc/pki/rpm-gpg")
robosig.add_argument("--s3-sigstore", help="bucket and prefix to S3 sigstore")
robosig.add_argument("--manifest-list-digest", metavar="ALGO:DIGEST",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm a bit interested to see the update to coreos/fedora-coreos-pipeline#1211 for this one.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yup, will update soon!

help="digest to manifest list to also sign")
robosig.add_argument("--verify-only", action='store_true',
help="verify only that the sigs are valid and make public")
robosig.set_defaults(func=cmd_robosignatory)
Expand All @@ -82,7 +88,8 @@ def cmd_robosignatory(args):
if args.aws_config_file:
os.environ["AWS_CONFIG_FILE"] = args.aws_config_file
s3 = boto3.client('s3')
args.bucket, args.prefix = get_bucket_and_prefix(args.s3)
if args.s3:
args.bucket, args.prefix = get_bucket_and_prefix(args.s3)

args.extra_keys = {}
for keyval in args.extra_fedmsg_keys:
Expand All @@ -102,9 +109,17 @@ def cmd_robosignatory(args):
if args.ostree:
if args.verify_only:
raise Exception("Cannot use --verify-only with --ostree")
if args.s3 is None:
raise Exception("Missing --s3 for --ostree")
robosign_ostree(args, s3, build, gpgkey)
elif args.oci:
if args.verify_only:
raise Exception("Cannot use --verify-only with --oci")
robosign_oci(args, s3, build, gpgkey)
else:
assert args.images
if args.s3 is None:
raise Exception("Missing --s3 for --images")
robosign_images(args, s3, build, gpgkey)


Expand Down Expand Up @@ -291,5 +306,212 @@ def validate_response(response):
assert response['status'].lower() == 'success', str(response)


def robosign_oci(args, s3, build, gpgkey):
builds = Builds()

# Map of {repo:tag -> [digest1, digest2, ...]}. "Identity" is the term used
# in containers-signature(5) to refer to how users will actually be pulling
# the image (which is usually by tag).
identities = {}
for arch in builds.get_build_arches(args.build):
build = builds.get_build_meta(args.build, arch)
image = build.get('base-oscontainer')
if not image:
print(f"skipping signing for missing OCI image on {arch}")
continue

# We sign for every tag we've pushed as. Note this code makes it seem
# like we may push to different tags per arch, but that's not the case.
for tag in image['tags']:
identity = f"{image['image']}:{tag}"
identities.setdefault(identity, []).append(image['digest'])

# For the manifest list digest, reuse the tags from the x86_64 build. As
# mentioned above, it's the same tags on all arches.
if args.manifest_list_digest:
build = builds.get_build_meta(args.build, 'x86_64')
image = build.get('base-oscontainer')
for tag in image['tags']:
identity = f"{image['image']}:{tag}"
identities[identity].append(args.manifest_list_digest)

# add the git commit of ourselves in the signatures for bookkeeping
creator = 'coreos-assembler'
try:
with open('/cosa/coreos-assembler-git.json') as f:
cosa_git = json.load(f)
creator += ' g' + cosa_git['git']['commit'][:12]
except FileNotFoundError:
pass

with tempfile.TemporaryDirectory(prefix="cosa-sign-", dir="tmp") as d:
# first, create the payloads to be signed
files_to_upload = []
for identity, digests in identities.items():
for digest in digests:
# see https://github.com/containers/container-libs/blob/58b82c921fde7dafbc0da766f1037602cfd5553c/image/docs/containers-signature.5.md?plain=1#L110
data = {
"critical": {
"identity": {
"docker-reference": identity
},
"image": {
"docker-manifest-digest": digest
},
"type": "atomic container signature"
},
"optional": {
"creator": creator,
"timestamp": int(time.time())
}
}

# Make the filename unique per identity file. This is just a
# temporary name. The final naming and structure will be different.
filename = str(abs(hash(str(data))))
path = os.path.join(d, filename)
with open(path, 'w') as f:
# NB: it's important for this to be just one line so that
# we don't have to correct between how gpg canonicalizes
# the input payload differently when it's cleartext signed
# vs detached
json.dump(data, f)
files_to_upload.append({'path': path, 'filename': filename,
'identity': identity, 'digest': digest})

# Upload them to S3. We upload to `staging/` first, and then will move
# them to their final location once they're verified.
sigstore_bucket, sigstore_prefix = get_bucket_and_prefix(args.s3_sigstore)
sigstore_prefix = os.path.join(sigstore_prefix, 'staging')

# First, empty out staging/ so we don't accumulate cruft over time
# https://stackoverflow.com/a/59026702
# Note this assumes we don't run in parallel on the same sigstore
# target, which is the case for us since only one release job can run at
# a time per-stream and the S3 target location is stream-based.
staging_objects = s3.list_objects_v2(Bucket=sigstore_bucket, Prefix=sigstore_prefix)
objects_to_delete = [{'Key': obj['Key']} for obj in staging_objects.get('Contents', [])]
if len(objects_to_delete) > 0:
print(f'Deleting {len(objects_to_delete)} stale files')
s3.delete_objects(Bucket=sigstore_bucket, Delete={'Objects': objects_to_delete})

# now, upload the ones we want
artifacts = []
for f in files_to_upload:
s3_key = os.path.join(sigstore_prefix, f['filename'])
print(f"Uploading s3://{sigstore_bucket}/{s3_key}")
s3.upload_file(f['path'], sigstore_bucket, s3_key)
artifacts.append({
'file': f"s3://{sigstore_bucket}/{s3_key}",
'checksum': f"sha256:{sha256sum_file(f['path'])}"
})

response = send_request_and_wait_for_response(
request_type='artifacts-sign',
config=args.fedmsg_conf,
request_timeout=ROBOSIGNATORY_REQUEST_TIMEOUT_SEC,
priority=ROBOSIGNATORY_MESSAGE_PRIORITY,
environment=fedenv,
body={
'build_id': args.build,
# We pass a 'basearch' here but we're actually bulk signing
# for all arches in one shot. But we can't omit it because
# Robosignatory logs it. It's not used otherwise.
'basearch': args.arch,
'artifacts': artifacts,
**args.extra_keys
}
)

validate_response(response)

# download sigs, verify, finalize, and upload to final location
def gpg(*args):
subprocess.check_call(['gpg', '--homedir', d, *args])

gpg('--quiet', '--import', gpgkey)

sig_counter = {}
# peel off the '/staging' bit
final_sigstore_prefix = os.path.dirname(sigstore_prefix)
for f in files_to_upload:
stg_s3_key = os.path.join(sigstore_prefix, f['filename'])
stg_sig_s3_key = stg_s3_key + '.sig'

tmp_sig_path = os.path.join(d, f['filename'] + '.sig')
print(f"Downloading s3://{sigstore_bucket}/{stg_sig_s3_key}")
s3.download_file(sigstore_bucket, stg_sig_s3_key, tmp_sig_path)
s3.delete_object(Bucket=sigstore_bucket, Key=stg_s3_key)
s3.delete_object(Bucket=sigstore_bucket, Key=stg_sig_s3_key)

print(f"Verifying detached signature for {f['path']}")
try:
gpg('--verify', tmp_sig_path, f['path'])
except subprocess.CalledProcessError as e:
# allow unknown signatures in stg
if fedenv != 'stg':
raise e

# This is where the magic happens, from a detached signature, we
# merge it with the original payload to create a cleartext signed
# message so it's a single artifact like c/image expects.
# See also: https://github.com/containers/container-libs/pull/307
with open(tmp_sig_path, 'rb') as fp:
armored_sig = subprocess.check_output(['gpg', '--homedir', d, '--enarmor'], input=fp.read())
armored_sig = str(armored_sig, encoding='utf-8')

# not strictly required, but looks more like a usual cleartext signature
armored_sig = armored_sig.replace('ARMORED FILE', 'SIGNATURE')

with open(f['path'], 'r') as fp:
original_content = fp.read()

signed_message = "-----BEGIN PGP SIGNED MESSAGE-----\n"
# Right now, we assume Robosignatory (really Sigul), uses SHA256;
# in theory we could parse the signature and get the digest algo
# that was used, but it seems unlikely that Sigul will change this
# before it's sunset, at which pont we would've already moved on
# from this code. If it does, here's one way to do it: call `gpg
# --list-packets` and look for 'digest algo N' and convert N to the
# right string based on
# https://github.com/gpg/gnupg/blob/6771ed4c13226ea8f410d022fa83888930070f70/common/openpgpdefs.h#L185
signed_message += "Hash: SHA256\n\n"
signed_message += original_content + "\n"
signed_message += armored_sig

# just overwrite the original payload; we don't need it anymore
with open(f['path'], 'w') as fp:
fp.write(signed_message)

print(f"Verifying cleartext signature {f['path']}")
try:
gpg('--verify', f['path'])
except subprocess.CalledProcessError as e:
# allow unknown signatures in stg
if fedenv != 'stg':
raise e

# tell c/image that it's a valid signature
# https://github.com/containers/container-libs/blob/58b82c921fde7dafbc0da766f1037602cfd5553c/image/internal/signature/signature.go#L66
signed_message = b'\x00simple-signing\n' + bytes(signed_message, encoding='utf-8')
with open(f['path'], 'wb') as fp:
fp.write(signed_message)

image_repo = f['identity']
# e.g. "quay.io/fedora/fedora-coreos:stable" -> "fedora/fedora-coreos"
_, image_repo = image_repo.split('/', 1)
image_repo, _ = image_repo.split(':')

# we need to match the format in https://github.com/containers/container-libs/blob/310afd427d1eef3bdcfbcf8a2af7cac2021c8a76/image/docker/registries_d.go#L301
sig_prefix = f"{image_repo}@{f['digest'].replace(':', '=')}"
sig_number = sig_counter.get(sig_prefix, 0) + 1
sig_counter[sig_prefix] = sig_number

# upload to final location and make public
final_s3_key = os.path.join(final_sigstore_prefix, sig_prefix, f"signature-{sig_number}")
print(f"Uploading {f['path']} to s3://{sigstore_bucket}/{final_s3_key}")
s3.upload_file(f['path'], sigstore_bucket, final_s3_key, ExtraArgs={'ACL': 'public-read'})


if __name__ == '__main__':
sys.exit(main())
Loading