Skip to content

Commit

Permalink
When refreshing after a reboot, force lock allocation
Browse files Browse the repository at this point in the history
After a reboot, when we refresh Podman's state, we retrieved the
lock from the fresh SHM instance, but we did not mark it as
allocated to prevent it being handed out to other containers and
pods.

Provide a method for marking locks as in-use, and use it when we
refresh Podman state after a reboot.

Fixes containers#2900

Signed-off-by: Matthew Heon <matthew.heon@pm.me>
  • Loading branch information
mheon committed May 6, 2019
1 parent ff260f0 commit 416a165
Show file tree
Hide file tree
Showing 8 changed files with 115 additions and 2 deletions.
2 changes: 1 addition & 1 deletion libpod/container_internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,7 @@ func (c *Container) refresh() error {
}

// We need to pick up a new lock
lock, err := c.runtime.lockManager.RetrieveLock(c.config.LockID)
lock, err := c.runtime.lockManager.AllocateAndRetrieveLock(c.config.LockID)
if err != nil {
return errors.Wrapf(err, "error acquiring lock for container %s", c.ID())
}
Expand Down
16 changes: 16 additions & 0 deletions libpod/lock/in_memory_locks.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,22 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
return m.locks[id], nil
}

// AllocateAndRetrieveLock allocates a lock with the given ID (if not already in
// use) and returns it.
func (m *InMemoryManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
if id >= m.numLocks {
return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks)
}

if m.locks[id].allocated {
return nil, errors.Errorf("given lock ID %d is already in use, cannot reallocate", id)
}

m.locks[id].allocated = true

return m.locks[id], nil
}

// FreeAllLocks frees all locks.
// This function is DANGEROUS. Please read the full comment in locks.go before
// trying to use it.
Expand Down
7 changes: 7 additions & 0 deletions libpod/lock/lock.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,13 @@ type Manager interface {
// The underlying lock MUST be the same as another other lock with the
// same UUID.
RetrieveLock(id uint32) (Locker, error)
// AllocateAndRetrieveLock marks the lock with the given UUID as in use
// and retrieves it.
// RetrieveAndAllocateLock will error if the lock in question has
// already been allocated.
// This is mostly used after a system restart to repopulate the list of
// locks in use.
AllocateAndRetrieveLock(id uint32) (Locker, error)
// PLEASE READ FULL DESCRIPTION BEFORE USING.
// FreeAllLocks frees all allocated locks, in preparation for lock
// reallocation.
Expand Down
55 changes: 55 additions & 0 deletions libpod/lock/shm/shm_lock.c
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,61 @@ int64_t allocate_semaphore(shm_struct_t *shm) {
return -1 * ENOSPC;
}

// Allocate the semaphore with the given ID.
// Returns an error if the semaphore with this ID does not exist, or has already
// been allocated.
// Returns 0 on success, or negative errno values on failure.
int32_t allocate_given_semaphore(shm_struct_t *shm, uint32_t sem_index) {
int bitmap_index, index_in_bitmap, ret_code;
bitmap_t test_map;

if (shm == NULL) {
return -1 * EINVAL;
}

// Check if the lock index is valid
if (sem_index >= shm->num_locks) {
return -1 * EINVAL;
}

bitmap_index = sem_index / BITMAP_SIZE;
index_in_bitmap = sem_index % BITMAP_SIZE;

// This should never happen if the sem_index test above succeeded, but better
// safe than sorry
if (bitmap_index >= shm->num_bitmaps) {
return -1 * EFAULT;
}

test_map = 0x1 << index_in_bitmap;

// Lock the mutex controlling access to our shared memory
ret_code = take_mutex(&(shm->segment_lock));
if (ret_code != 0) {
return -1 * ret_code;
}

// Check if the semaphore is allocated
if ((test_map & shm->locks[bitmap_index].bitmap) != 0) {
ret_code = release_mutex(&(shm->segment_lock));
if (ret_code != 0) {
return -1 * ret_code;
}

return -1 * EEXIST;
}

// The semaphore is not allocated, allocate it
shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap | test_map;

ret_code = release_mutex(&(shm->segment_lock));
if (ret_code != 0) {
return -1 * ret_code;
}

return 0;
}

// Deallocate a given semaphore
// Returns 0 on success, negative ERRNO values on failure
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
Expand Down
17 changes: 17 additions & 0 deletions libpod/lock/shm/shm_lock.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,23 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
return uint32(retCode), nil
}

// AllocateGivenSemaphore allocates the given semaphore from the shared-memory
// segment for use by a container or pod.
// If the semaphore is already in use or the index is invalid an error will be
// returned.
func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
if !locks.valid {
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
}

retCode := C.allocate_given_semaphore(locks.lockStruct, C.uint32_t(sem))
if retCode < 0 {
return syscall.Errno(-1 * retCode)
}

return nil
}

// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
// reallocated to another container or pod.
// The given semaphore must be already allocated, or an error will be returned.
Expand Down
1 change: 1 addition & 0 deletions libpod/lock/shm/shm_lock.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code);
shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code);
int32_t close_lock_shm(shm_struct_t *shm);
int64_t allocate_semaphore(shm_struct_t *shm);
int32_t allocate_given_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t deallocate_all_semaphores(shm_struct_t *shm);
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);
Expand Down
17 changes: 17 additions & 0 deletions libpod/lock/shm_lock_manager_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,23 @@ func (m *SHMLockManager) AllocateLock() (Locker, error) {
return lock, nil
}

func (m *SHMLockManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
lock := new(SHMLock)
lock.lockID = id
lock.manager = m

if id >= m.locks.GetMaxLocks() {
return nil, errors.Wrapf(syscall.EINVAL, "lock ID %d is too large - max lock size is %d",
id, m.locks.GetMaxLocks()-1)
}

if err := m.locks.AllocateGivenSemaphore(id); err != nil {
return nil, err
}

return lock, nil
}

// RetrieveLock retrieves a lock from the manager given its ID.
func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
lock := new(SHMLock)
Expand Down
2 changes: 1 addition & 1 deletion libpod/pod_internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func (p *Pod) refresh() error {
}

// Retrieve the pod's lock
lock, err := p.runtime.lockManager.RetrieveLock(p.config.LockID)
lock, err := p.runtime.lockManager.AllocateAndRetrieveLock(p.config.LockID)
if err != nil {
return errors.Wrapf(err, "error retrieving lock for pod %s", p.ID())
}
Expand Down

0 comments on commit 416a165

Please sign in to comment.