diff --git a/.github/workflows/codestyle.yml b/.github/workflows/codestyle.yml index 6c0577a093ce..3cb568999b39 100644 --- a/.github/workflows/codestyle.yml +++ b/.github/workflows/codestyle.yml @@ -27,7 +27,7 @@ jobs: # depth 2 so: # ^1. we can show the Subject of the current target branch tip # ^2. we reconnect/graft to the later fetch pull/1234/head, - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: {fetch-depth: 2} - name: install codespell @@ -50,7 +50,12 @@ jobs: yamllint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: run yamllint - run: yamllint --strict .github/workflows/*.yml + # Quoting to please all parsers is hard. This indirection helps. + env: + yamllint_config: '{extends: default, rules: {line-length: {max: 100}}}' + run: yamllint -f parsable + -d "$yamllint_config" + --strict .github/workflows/*.yml diff --git a/.github/workflows/installer.yml b/.github/workflows/installer.yml index 178c090f89d9..2b0601d9ef37 100644 --- a/.github/workflows/installer.yml +++ b/.github/workflows/installer.yml @@ -28,7 +28,7 @@ jobs: ] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 # From time to time this will catch a git tag and change SOF_VERSION with: {fetch-depth: 50, submodules: recursive} diff --git a/.github/workflows/ipc_fuzzer.yml b/.github/workflows/ipc_fuzzer.yml deleted file mode 100644 index 036cbc82f37e..000000000000 --- a/.github/workflows/ipc_fuzzer.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- - -# For the actual fuzzer see tools/oss-fuzz/README. -# also see -# https://google.github.io/oss-fuzz/getting-started/continuous-integration/ -# -# Build and run fuzzer for 5s just to check that it runs properly. If it -# consistently fails in under 5s you probably did something wrong - -# If you came here to quickly copy/paste the invocation of some build -# script in order to reproduce a failure reported by github then you -# will be disappointed by the Github Action below: it's magical. For a -# thorough reproduction you must follow the links above. For a quick, -# dirty and incomplete reproduction hack you can try the following two -# lines. Don't do this at home. -# -# OUT=unused_dir cmake -B oss-fuzz-build/ -S tools/oss-fuzz/ -# make -j -C oss-fuzz-build sof_ep fuzz_ipc.o - -name: IPC fuzzer compile test - -# 'workflow_dispatch' allows running this workflow manually from the -# 'Actions' tab - -# yamllint disable-line rule:truthy -on: [pull_request, workflow_dispatch] - -jobs: - ipc-fuzzer-build: - runs-on: ubuntu-latest - steps: - - name: Build Fuzzers - uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master - with: - oss-fuzz-project-name: 'sound-open-firmware' - - - name: Run Fuzzers - uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master - with: - oss-fuzz-project-name: 'sound-open-firmware' - language: c - fuzz-seconds: 5 diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml index 01639e699845..3a06f122765a 100644 --- a/.github/workflows/pull-request.yml +++ b/.github/workflows/pull-request.yml @@ -40,7 +40,7 @@ jobs: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: apt get doxygen graphviz run: sudo apt-get -y install ninja-build doxygen graphviz @@ -58,7 +58,7 @@ jobs: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: {fetch-depth: 5} - name: apt get valgrind @@ -83,6 +83,26 @@ jobs: run: ./scripts/host-testbench.sh + # This is a bit redundant with the other jobs below and with the (much + # faster!) installer[.yml] but it may differ in which platforms are + # built. This makes sure platforms without any open-source toolchain + # are added in the right place and do not accidentally break the -a + # option, Docker testing etc. + gcc-build-default-platforms: + runs-on: ubuntu-22.04 + + steps: + - uses: actions/checkout@v3 + with: {fetch-depth: 5, submodules: recursive} + + - name: docker + run: docker pull thesofproject/sof && docker tag thesofproject/sof sof + + - name: xtensa-build-all.sh -a + run: ./scripts/docker-run.sh ./scripts/xtensa-build-all.sh -a || + ./scripts/docker-run.sh ./scripts/xtensa-build-all.sh -a -j 1 + + gcc-build-only: runs-on: ubuntu-20.04 @@ -101,13 +121,13 @@ jobs: steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: {fetch-depth: 0, submodules: recursive} - name: docker run: docker pull thesofproject/sof && docker tag thesofproject/sof sof - - name: xtensa-build-all + - name: xtensa-build-all.sh platforms env: PLATFORM: ${{ matrix.platform }} run: ./scripts/docker-run.sh @@ -136,7 +156,7 @@ jobs: steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: {fetch-depth: 0, submodules: recursive} - name: turn off HAVE_AGENT @@ -146,7 +166,7 @@ jobs: - name: docker SOF run: docker pull thesofproject/sof && docker tag thesofproject/sof sof - - name: xtensa-build-all -o no-agent + - name: xtensa-build-all.sh -o no-agent platforms env: PLATFORM: ${{ matrix.platform }} run: ./scripts/docker-run.sh diff --git a/.github/workflows/repro-build.yml b/.github/workflows/repro-build.yml index c3090154b608..4325467123ce 100644 --- a/.github/workflows/repro-build.yml +++ b/.github/workflows/repro-build.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: {fetch-depth: 5, submodules: recursive} - name: docker pull diff --git a/.github/workflows/tools.yml b/.github/workflows/tools.yml index 5be87125a8ba..670d67ad9627 100644 --- a/.github/workflows/tools.yml +++ b/.github/workflows/tools.yml @@ -12,7 +12,7 @@ jobs: top-level_default_CMake_target_ALL: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 # The ALSA version in Ubuntu 20.04 is buggy # (https://github.com/thesofproject/sof/issues/2543) and likely diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 9bbe004092ea..bc1619fda3b5 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -14,7 +14,7 @@ jobs: cmocka_utests: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: {fetch-depth: 2} - name: build and run all defconfigs diff --git a/.github/workflows/zephyr.yml b/.github/workflows/zephyr.yml index 79be79fb4a9b..737313368204 100644 --- a/.github/workflows/zephyr.yml +++ b/.github/workflows/zephyr.yml @@ -11,15 +11,23 @@ jobs: zephyr-build: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 # From time to time this will catch a git tag and change SOF_VERSION with: {fetch-depth: 10, submodules: recursive} + # v0.23.4 is the last image with Zephyr SDK 0.14. + # SDK 0.15 fails with the following Werror: + # /workdir/zephyrproject/zephyr/include/zephyr/kernel/thread_stack.h:190:16: + # error: ignoring attribute 'section (".cached.\"WEST_TOPDIR/zephyr/kernel/init.c\".3")' + # because it conflicts with previous 'section + # (".cached.\"WEST_TOPDIR/zephyr/arch/xtensa/include/kernel_arch_func.h\"' + # [-Werror=attributes] + # https://github.com/zephyrproject-rtos/docker-image # Note: env variables can be passed to the container with # -e https_proxy=... - name: build run: docker run -v "$(pwd)":/workdir - ghcr.io/zephyrproject-rtos/zephyr-build:latest + ghcr.io/zephyrproject-rtos/zephyr-build:v0.23.4 ./zephyr/docker-build.sh --cmake-args=-DEXTRA_CFLAGS=-Werror --cmake-args=--warn-uninitialized -a diff --git a/scripts/docker-run.sh b/scripts/docker-run.sh index 96034cf27359..4b3d64fe1c12 100755 --- a/scripts/docker-run.sh +++ b/scripts/docker-run.sh @@ -25,12 +25,16 @@ if tty --quiet; then SOF_DOCKER_RUN="$SOF_DOCKER_RUN --tty" fi -# Not fatal, just a warning to allow other "creative" solutions. -# TODO: fix this with 'adduser' like in zephyr/docker-build.sh -test "$(id -n)" = 1001 || - >&2 printf "Warning: this script should be run as user ID 1001 to match the container\n" +# The --user option below can cause the command to run as a user who +# does not exist in the container. So far so good but in case something +# ever goes wrong try replacing --user with the newer +# scripts/sudo-cwd.sh script. +test "$(id -u)" = 1000 || + >&2 printf "Warning: this script should be run as user ID 1000 to match the container's account\n" set -x +# FIXME: During the transition to sudo-cwd.sh, the tag will be "latest_ubuntu22.04". +# Later it will be back to latest docker run -i -v "${SOF_TOP}":/home/sof/work/sof.git \ -v "${SOF_TOP}":/home/sof/work/sof-bind-mount-DO-NOT-DELETE \ --env CMAKE_BUILD_TYPE \ @@ -40,6 +44,5 @@ docker run -i -v "${SOF_TOP}":/home/sof/work/sof.git \ --env VERBOSE \ --env http_proxy="$http_proxy" \ --env https_proxy="$https_proxy" \ - --user "$(id -u)" \ $SOF_DOCKER_RUN \ - thesofproject/sof "$@" + thesofproject/sof:latest_ubuntu22.04 ./scripts/sudo-cwd.sh "$@" diff --git a/scripts/sudo-cwd.sh b/scripts/sudo-cwd.sh new file mode 100755 index 000000000000..2823cf2f6c73 --- /dev/null +++ b/scripts/sudo-cwd.sh @@ -0,0 +1,77 @@ +#!/bin/sh +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2022 Intel Corporation. All rights reserved. + +# This is a "brute force" solution to filesystem permission issues: +# +# If the current user does not own the current directory then this +# wrapper script switches to the user who does own the current directory +# before running the given command. + +# If no user owns the current directory, a user who does gets created +# first! + +# The main use case is to run this first thing inside a container to +# solve file ownership mismatches. + +# `docker run --user=$(id -un) ...` achieves something very similar +# without any code except the resulting user many not exist inside the +# container. Some commands may not like that. +# +# To understand more about the Docker problem solved here take a look at +# https://stackoverflow.com/questions/35291520/docker-and-userns-remap-how-to-manage-volume-permissions-to-share-data-betwee +# and many other similar questions. + +# TODO: replace sudo with gosu? + +set -e +set -x + +main() +{ + cwd_uid="$(stat --printf='%u' .)" + local current_uid; current_uid="$(id -u)" + if test "$current_uid" = "$cwd_uid"; then + exec "$@" + else + exec_as_cwd_uid "$@" + fi +} + +exec_as_cwd_uid() +{ + # If missing, add new user owning the current directory + local cwd_user; cwd_user="$(id "$cwd_uid")" || { + cwd_user='cwd_user' + + local cwd_guid; cwd_guid="$(stat --printf='%g' .)" + + getent group "$cwd_guid" || + sudo groupadd -g "$cwd_guid" 'cwd_group' + + sudo useradd -m -u "$cwd_uid" -g "$cwd_guid" "$cwd_user" + + local current_user; current_user="$(id -un)" + + # Copy sudo permissions just in case the build needs it + if test -e /etc/sudoers.d/"$current_user"; then + sudo sed -e "s/$current_user/$cwd_user/" /etc/sudoers.d/"$current_user" | + sudo tee -a /etc/sudoers.d/"$cwd_user" + sudo chmod --reference=/etc/sudoers.d/"$current_user" \ + /etc/sudoers.d/"$cwd_user" + fi + } + + # Double sudo to work around some funny restriction in + # zephyr-build:/etc/sudoers: 'user' can do anything but... only as + # root. + # Passing empty http[s]_proxy is OK + # shellcheck disable=SC2154 + sudo sudo -u "$cwd_user" REAL_CC="$REAL_CC" \ + http_proxy="$http_proxy" https_proxy="$https_proxy" \ + "$@" + + exit "$?" +} + +main "$@" diff --git a/tools/topology/topology1/sof-cavs-nocodec.m4 b/tools/topology/topology1/sof-cavs-nocodec.m4 index 1efa0dddf109..40e563eba400 100644 --- a/tools/topology/topology1/sof-cavs-nocodec.m4 +++ b/tools/topology/topology1/sof-cavs-nocodec.m4 @@ -115,17 +115,19 @@ dnl time_domain, sched_comp) # Volume switch capture pipeline 2 on PCM 0 using max 2 channels of PIPE_BITS. # Set 1000us deadline on core SSP0_CORE_ID with priority 0 -PIPELINE_PCM_ADD(sof/pipe-volume-switch-capture.m4, +ifdef(`DISABLE_SSP0',, + `PIPELINE_PCM_ADD(sof/pipe-volume-switch-capture.m4, 2, 0, 2, PIPE_BITS, 1000, 0, SSP0_CORE_ID, - 48000, 48000, 48000) + 48000, 48000, 48000)') # Volume switch capture pipeline 4 on PCM 1 using max 2 channels of PIPE_BITS. # Set 1000us deadline on core SSP1_CORE_ID with priority 0 -PIPELINE_PCM_ADD(sof/pipe-volume-switch-capture.m4, +ifdef(`DISABLE_SSP1',, + `PIPELINE_PCM_ADD(sof/pipe-volume-switch-capture.m4, 4, 1, 2, PIPE_BITS, 1000, 0, SSP1_CORE_ID, - 48000, 48000, 48000) + 48000, 48000, 48000)') # Volume switch capture pipeline 6 on PCM 2 using max 2 channels of PIPE_BITS. # Set 1000us deadline with priority 0 on core SSP2_CORE_ID @@ -146,19 +148,21 @@ dnl deadline, priority, core, time_domain) # playback DAI is SSP0 using 2 periods # Buffers use DAI_BITS format, 1000us deadline with priority 0 on core SSP0_CORE_ID # The 'NOT_USED_IGNORED' is due to dependencies and is adjusted later with an explicit dapm line. -DAI_ADD(sof/pipe-mixer-volume-dai-playback.m4, +ifdef(`DISABLE_SSP0',, + `DAI_ADD(sof/pipe-mixer-volume-dai-playback.m4, 1, SSP, SSP0_IDX, NoCodec-0, NOT_USED_IGNORED, 2, DAI_BITS, - 1000, 0, SSP0_CORE_ID, SCHEDULE_TIME_DOMAIN_TIMER, 2, 48000) + 1000, 0, SSP0_CORE_ID, SCHEDULE_TIME_DOMAIN_TIMER, 2, 48000)') # Low Latency playback pipeline 1 on PCM 0 using max 2 channels of PIPE_BITS. # Set 1000us deadline on core SSP0_CORE_ID with priority 0 -PIPELINE_PCM_ADD(sof/pipe-host-volume-playback.m4, +ifdef(`DISABLE_SSP0',, + `PIPELINE_PCM_ADD(sof/pipe-host-volume-playback.m4, 7, 0, 2, PIPE_BITS, 1000, 0, SSP0_CORE_ID, 48000, 48000, 48000, SCHEDULE_TIME_DOMAIN_TIMER, - PIPELINE_PLAYBACK_SCHED_COMP_1) + PIPELINE_PLAYBACK_SCHED_COMP_1)') # Deep buffer playback pipeline 11 on PCM 3 using max 2 channels of PIPE_BITS. # Set 1000us deadline on core SSP0_CORE_ID with priority 0. @@ -173,33 +177,37 @@ ifelse(PLATFORM, `bxt', `', # capture DAI is SSP0 using 2 periods # Buffers use DAI_BITS format, 1000us deadline with priority 0 on core SSP0_IDX -DAI_ADD(sof/pipe-dai-capture.m4, +ifdef(`DISABLE_SSP0',, + `DAI_ADD(sof/pipe-dai-capture.m4, 2, SSP, SSP0_IDX, NoCodec-0, PIPELINE_SINK_2, 2, DAI_BITS, - 1000, 0, SSP0_CORE_ID, SCHEDULE_TIME_DOMAIN_TIMER) + 1000, 0, SSP0_CORE_ID, SCHEDULE_TIME_DOMAIN_TIMER)') # playback DAI is SSP1 using 2 periods # Buffers use DAI_BITS format, 1000us deadline with priority 0 on core SSP1_CORE_ID -DAI_ADD(sof/pipe-mixer-volume-dai-playback.m4, +ifdef(`DISABLE_SSP1',, + `DAI_ADD(sof/pipe-mixer-volume-dai-playback.m4, 3, SSP, SSP1_IDX, NoCodec-1, NOT_USED_IGNORED, 2, DAI_BITS, - 1000, 0, SSP1_CORE_ID, SCHEDULE_TIME_DOMAIN_TIMER, 2, 48000) + 1000, 0, SSP1_CORE_ID, SCHEDULE_TIME_DOMAIN_TIMER, 2, 48000)') # Low Latency playback pipeline 8 on PCM 1 using max 2 channels of PIPE_BITS. # Set 1000us deadline on core SSP1_CORE_ID with priority 0 -PIPELINE_PCM_ADD(sof/pipe-host-volume-playback.m4, +ifdef(`DISABLE_SSP1',, + `PIPELINE_PCM_ADD(sof/pipe-host-volume-playback.m4, 8, 1, 2, PIPE_BITS, 1000, 0, SSP1_CORE_ID, 48000, 48000, 48000, SCHEDULE_TIME_DOMAIN_TIMER, - PIPELINE_PLAYBACK_SCHED_COMP_3) + PIPELINE_PLAYBACK_SCHED_COMP_3)') # capture DAI is SSP1 using 2 periods # Buffers use DAI_BITS format, 1000us deadline with priority 0 on core SSP1_CORE_ID -DAI_ADD(sof/pipe-dai-capture.m4, +ifdef(`DISABLE_SSP1',, + `DAI_ADD(sof/pipe-dai-capture.m4, 4, SSP, SSP1_IDX, NoCodec-1, PIPELINE_SINK_4, 2, DAI_BITS, - 1000, 0, SSP1_CORE_ID, SCHEDULE_TIME_DOMAIN_TIMER) + 1000, 0, SSP1_CORE_ID, SCHEDULE_TIME_DOMAIN_TIMER)') # playback DAI is SSP2 using 2 periods # Buffers use DAI_BITS format, 1000us deadline with priority 0 on core SSP2_CORE_ID @@ -240,8 +248,8 @@ SectionGraph."mixer-host" { lines [ # connect mixer dai pipelines to PCM pipelines - dapm(PIPELINE_MIXER_1, PIPELINE_SOURCE_7) - dapm(PIPELINE_MIXER_3, PIPELINE_SOURCE_8) + ifdef(`DISABLE_SSP0',,`dapm(PIPELINE_MIXER_1, PIPELINE_SOURCE_7)') + ifdef(`DISABLE_SSP1',, `dapm(PIPELINE_MIXER_3, PIPELINE_SOURCE_8)') dapm(PIPELINE_MIXER_5, PIPELINE_SOURCE_9) ifelse(PLATFORM, `bxt', `dapm(PIPELINE_MIXER_5, PIPELINE_SOURCE_11)',