diff --git a/docker/Dockerfile b/docker/Dockerfile index b6fc19464..f0ed61cff 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -4,6 +4,7 @@ FROM alpine:3.19.1 AS build # args ARG NZBGET_RELEASE=develop ARG UNRAR_VERSION=7.0.7 +ARG UNRAR_NATIVE=false ARG MAKE_JOBS=1 ARG TARGETPLATFORM @@ -48,9 +49,12 @@ RUN \ curl -o /tmp/unrar.tar.gz -L "https://www.rarlab.com/rar/unrarsrc-${UNRAR_VERSION}.tar.gz" && \ tar xf /tmp/unrar.tar.gz -C /tmp/unrar --strip-components=1 && \ cd /tmp/unrar && \ - if [ "${TARGETPLATFORM}" == "linux/amd64" ]; then sed -i "s|CXXFLAGS=-march=native|CXXFLAGS=-march=x86-64-v2|" makefile; fi && \ - if [ "${TARGETPLATFORM}" == "linux/arm64" ]; then sed -i "s|CXXFLAGS=-march=native|CXXFLAGS=-march=armv8-a|" makefile; fi && \ - if [ "${TARGETPLATFORM}" == "linux/arm/v7" ]; then sed -i "s|CXXFLAGS=-march=native|CXXFLAGS=-march=armv7-a|" makefile; fi && \ + if [ "${UNRAR_NATIVE}" != "true" ] && [ "${TARGETPLATFORM}" == "linux/amd64" ]; \ + then sed -i "s|CXXFLAGS=-march=native|CXXFLAGS=-march=x86-64-v2|" makefile; fi && \ + if [ "${UNRAR_NATIVE}" != "true" ] && [ "${TARGETPLATFORM}" == "linux/arm64" ]; \ + then sed -i "s|CXXFLAGS=-march=native|CXXFLAGS=-march=armv8-a|" makefile; fi && \ + if [ "${UNRAR_NATIVE}" != "true" ] && [ "${TARGETPLATFORM}" == "linux/arm/v7" ]; \ + then sed -i "s|CXXFLAGS=-march=native|CXXFLAGS=-march=armv7-a|" makefile; fi && \ make -j ${MAKE_JOBS} && \ install -v -m755 unrar /usr/bin diff --git a/docker/README.md b/docker/README.md index 1b60a85c0..c6a753e8f 100644 --- a/docker/README.md +++ b/docker/README.md @@ -100,6 +100,7 @@ Dockerfile supports next build arguments: |:----------------|- | NZBGET_RELEASE | Branch name or tag to build from | UNRAR_VERSION | Unrar version +| UNRAR_NATIVE | Build native unrar (see below) | MAKE_JOBS | Number of make jobs for speed up build # ghcr.io @@ -117,3 +118,41 @@ In case a linux image or docker image is slower than expected, here are some tip 1. Increase number of server connections (NEWS-SERVERS -> Connections) - default is 8, and 16 and 32 are worth trying 2. For slower machines/hosts - increase article read chunk size from 4 to 64 (CONNECTION -> ArticleReadChunkSize). This is new setting available in v23. +# Native unrar build support + +Unrar from version 7.0 supports hardware crypto acceleration for unpacking encrypted archives. +For compatibility with most hardware nzbgetcom image unrar built with next march parameters: + +- x86_64: x86-64-v2 +- arm64: armv8-a +- armhf: armv7-a + +To build image on hardware which support crypto acceleration with native-optimized unrar can be used docker-compose like this (also needed entrypoint.sh and Dockerfile from [official repository](https://github.com/nzbgetcom/nzbget/tree/develop/docker)): + + +``` +--- +services: + nzbget: + build: + context: . + args: + # branch to build nzbget from + NZBGET_RELEASE: "develop" + # make jobs == host cpu cores + MAKE_JOBS: 4 + # build native unrar + UNRAR_NATIVE: "true" + environment: + - PUID=1000 + - PGID=1000 + - TZ=Europe/London + - NZBGET_USER=nzbget + - NZBGET_PASS=tegbzn6789 + volumes: + - ./config:/config + - ./downloads:/downloads + ports: + - 6789:6789 + restart: unless-stopped +``` diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 54d01e51a..bb57b6c5d 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -6,7 +6,8 @@ services: args: NZBGET_RELEASE: develop MAKE_JOBS: 4 - environment: + UNRAR_NATIVE: true + environment: - PUID=1000 - PGID=1000 - TZ=Europe/London