diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml index 7849cb6bb..6e2e64811 100644 --- a/.github/workflows/docker.yaml +++ b/.github/workflows/docker.yaml @@ -6,9 +6,9 @@ on: tags: [ 'v*' ] pull_request: # Comment these out to force a test build on a PR - branches: - - master - types: [closed] + #branches: + # - master + #types: [closed] env: DOCKER_HUB_SLUG: driveone/onedrive diff --git a/LICENSE b/LICENSE index 94a9ed024..f288702d2 100644 --- a/LICENSE +++ b/LICENSE @@ -1,7 +1,7 @@ GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 - Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -645,7 +645,7 @@ the "copyright" line and a pointer to where the full notice is found. GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program. If not, see . + along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. @@ -664,11 +664,11 @@ might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see -. +. The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read -. +. diff --git a/Makefile.in b/Makefile.in index 5f0ad31cb..f68014786 100644 --- a/Makefile.in +++ b/Makefile.in @@ -34,13 +34,18 @@ DEBUG = @DEBUG@ DC = @DC@ DC_TYPE = @DC_TYPE@ DCFLAGS = @DCFLAGS@ -DCFLAGS += -w -g -O -J. +DCFLAGS += -w -J. ifeq ($(DEBUG),yes) ifeq ($(DC_TYPE),dmd) -DCFLAGS += -debug -gs +# Add DMD Debugging Flags +DCFLAGS += -g -debug -gs else -DCFLAGS += -d-debug -gc +# Add LDC Debugging Flags +DCFLAGS += -g -d-debug -gc endif +else +# Only add optimisation flags if debugging is not enabled +DCFLAGS += -O endif ifeq ($(NOTIFICATIONS),yes) @@ -55,7 +60,7 @@ endif system_unit_files = contrib/systemd/onedrive@.service user_unit_files = contrib/systemd/onedrive.service -DOCFILES = README.md config LICENSE CHANGELOG.md docs/Docker.md docs/INSTALL.md docs/SharePoint-Shared-Libraries.md docs/USAGE.md docs/BusinessSharedFolders.md docs/advanced-usage.md docs/application-security.md +DOCFILES = readme.md config LICENSE changelog.md docs/advanced-usage.md docs/application-config-options.md docs/application-security.md docs/business-shared-items.md docs/client-architecture.md docs/contributing.md docs/docker.md docs/install.md docs/national-cloud-deployments.md docs/podman.md docs/privacy-policy.md docs/sharepoint-libraries.md docs/terms-of-service.md docs/ubuntu-package-install.md docs/usage.md docs/known-issues.md ifneq ("$(wildcard /etc/redhat-release)","") RHEL = $(shell cat /etc/redhat-release | grep -E "(Red Hat Enterprise Linux|CentOS)" | wc -l) @@ -66,19 +71,19 @@ RHEL_VERSION = 0 endif SOURCES = \ + src/main.d \ src/config.d \ - src/itemdb.d \ src/log.d \ - src/main.d \ - src/monitor.d \ - src/onedrive.d \ + src/util.d \ src/qxor.d \ - src/selective.d \ - src/sqlite.d \ + src/curlEngine.d \ + src/onedrive.d \ + src/webhook.d \ src/sync.d \ - src/upload.d \ - src/util.d \ - src/progress.d \ + src/itemdb.d \ + src/sqlite.d \ + src/clientSideFiltering.d \ + src/monitor.d \ src/arsd/cgi.d ifeq ($(NOTIFICATIONS),yes) @@ -92,10 +97,9 @@ clean: rm -rf autom4te.cache rm -f config.log config.status -# also remove files generated via ./configure +# Remove files generated via ./configure distclean: clean - rm -f Makefile contrib/pacman/PKGBUILD contrib/spec/onedrive.spec onedrive.1 \ - $(system_unit_files) $(user_unit_files) + rm -f Makefile contrib/pacman/PKGBUILD contrib/spec/onedrive.spec onedrive.1 $(system_unit_files) $(user_unit_files) onedrive: $(SOURCES) if [ -f .git/HEAD ] ; then \ diff --git a/CHANGELOG.md b/changelog.md similarity index 99% rename from CHANGELOG.md rename to changelog.md index a6d2d3f1b..fab88d4c6 100644 --- a/CHANGELOG.md +++ b/changelog.md @@ -1,6 +1,13 @@ # Changelog -The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) -and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## 2.5.0 - TBA + + +### Changed +* Renamed various documentation files to align with document content + ## 2.4.25 - 2023-06-21 ### Fixed diff --git a/config b/config index 807180ea5..027194977 100644 --- a/config +++ b/config @@ -3,7 +3,7 @@ # with their default values. # All values need to be enclosed in quotes # When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. +# For explanations of all config options below see docs/usage.md or the man page. # # sync_dir = "~/OneDrive" # skip_file = "~*|.~*|*.tmp" @@ -40,22 +40,19 @@ # bypass_data_preservation = "false" # azure_ad_endpoint = "" # azure_tenant_id = "common" -# sync_business_shared_folders = "false" +# sync_business_shared_items = "false" # sync_dir_permissions = "700" # sync_file_permissions = "600" # rate_limit = "131072" +# operation_timeout = "3600" # webhook_enabled = "false" # webhook_public_url = "" # webhook_listening_host = "" # webhook_listening_port = "8888" -# webhook_expiration_interval = "86400" -# webhook_renewal_interval = "43200" +# webhook_expiration_interval = "600" +# webhook_renewal_interval = "300" +# webhook_retry_interval = "60" # space_reservation = "50" # display_running_config = "false" # read_only_auth_scope = "false" # cleanup_local_files = "false" -# operation_timeout = "3600" -# dns_timeout = "60" -# connect_timeout = "10" -# data_timeout = "600" -# ip_protocol_version = "0" diff --git a/configure b/configure index f68a775cc..3d7a6750f 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for onedrive v2.4.25. +# Generated by GNU Autoconf 2.69 for onedrive v2.5.0-rc2. # # Report bugs to . # @@ -579,8 +579,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='onedrive' PACKAGE_TARNAME='onedrive' -PACKAGE_VERSION='v2.4.25' -PACKAGE_STRING='onedrive v2.4.25' +PACKAGE_VERSION='v2.5.0-rc2' +PACKAGE_STRING='onedrive v2.5.0-rc2' PACKAGE_BUGREPORT='https://github.com/abraunegg/onedrive' PACKAGE_URL='' @@ -1219,7 +1219,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures onedrive v2.4.25 to adapt to many kinds of systems. +\`configure' configures onedrive v2.5.0-rc2 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1280,7 +1280,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of onedrive v2.4.25:";; + short | recursive ) echo "Configuration of onedrive v2.5.0-rc2:";; esac cat <<\_ACEOF @@ -1393,7 +1393,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -onedrive configure v2.4.25 +onedrive configure v2.5.0-rc2 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -1410,7 +1410,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by onedrive $as_me v2.4.25, which was +It was created by onedrive $as_me v2.5.0-rc2, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2007,7 +2007,7 @@ $as_echo "no" >&6; } fi fi -for ac_prog in dmd ldmd2 ldc2 +for ac_prog in dmd ldc2 ldmd2 do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 @@ -2162,7 +2162,7 @@ fi -PACKAGE_DATE="June 2023" +PACKAGE_DATE="April 2024" @@ -3159,7 +3159,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by onedrive $as_me v2.4.25, which was +This file was extended by onedrive $as_me v2.5.0-rc2, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -3212,7 +3212,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -onedrive config.status v2.4.25 +onedrive config.status v2.5.0-rc2 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 9c2c0db26..9ce1ec1b4 100644 --- a/configure.ac +++ b/configure.ac @@ -9,7 +9,7 @@ dnl - commit the changed files (configure.ac, configure) dnl - tag the release AC_PREREQ([2.69]) -AC_INIT([onedrive],[v2.4.25], [https://github.com/abraunegg/onedrive], [onedrive]) +AC_INIT([onedrive],[v2.5.0-rc2], [https://github.com/abraunegg/onedrive], [onedrive]) AC_CONFIG_SRCDIR([src/main.d]) @@ -101,7 +101,7 @@ case $(basename $DC) in VERSION=`$DC --version` # remove everything up to first ( VERSION=${VERSION#* (} - # remove everthing after ): + # remove everything after ): VERSION=${VERSION%%):*} # now version should be something like L.M.N MINVERSION=1.18.0 @@ -162,7 +162,7 @@ dnl value via pkg-config and put it into $def_systemdsystemunitdir AS_IF([test "x$with_systemdsystemunitdir" = "xyes" -o "x$with_systemdsystemunitdir" = "xauto"], [ dnl true part, so try to determine with pkg-config def_systemdsystemunitdir=$($PKG_CONFIG --variable=systemdsystemunitdir systemd) - dnl if we cannot find it via pkg-config, *and* the user explicitely passed it in with, + dnl if we cannot find it via pkg-config, *and* the user explicitly passed it in with, dnl we warn, and in all cases we unset (set to no) the respective variable AS_IF([test "x$def_systemdsystemunitdir" = "x"], [ dnl we couldn't find the default value via pkg-config diff --git a/contrib/completions/complete.bash b/contrib/completions/complete.bash index 358084640..68895c9d9 100644 --- a/contrib/completions/complete.bash +++ b/contrib/completions/complete.bash @@ -11,7 +11,7 @@ _onedrive() prev=${COMP_WORDS[COMP_CWORD-1]} options='--check-for-nomount --check-for-nosync --debug-https --disable-notifications --display-config --display-sync-status --download-only --disable-upload-validation --dry-run --enable-logging --force-http-1.1 --force-http-2 --get-file-link --local-first --logout -m --monitor --no-remote-delete --print-token --reauth --resync --skip-dot-files --skip-symlinks --synchronize --upload-only -v --verbose --version -h --help' - argopts='--create-directory --get-O365-drive-id --operation-timeout --remove-directory --single-directory --source-directory' + argopts='--create-directory --get-O365-drive-id --remove-directory --single-directory --source-directory' # Loop on the arguments to manage conflicting options for (( i=0; i < ${#COMP_WORDS[@]}-1; i++ )); do @@ -34,7 +34,7 @@ _onedrive() fi return 0 ;; - --create-directory|--get-O365-drive-id|--operation-timeout|--remove-directory|--single-directory|--source-directory) + --create-directory|--get-O365-drive-id|--remove-directory|--single-directory|--source-directory) return 0 ;; *) diff --git a/contrib/completions/complete.fish b/contrib/completions/complete.fish index 7547574c4..185a85823 100644 --- a/contrib/completions/complete.fish +++ b/contrib/completions/complete.fish @@ -23,7 +23,6 @@ complete -c onedrive -l local-first -d 'Synchronize from the local directory sou complete -c onedrive -l logout -d 'Logout the current user.' complete -c onedrive -n "not __fish_seen_subcommand_from --synchronize" -a "-m --monitor" -d 'Keep monitoring for local and remote changes.' complete -c onedrive -l no-remote-delete -d 'Do not delete local file deletes from OneDrive when using --upload-only.' -complete -c onedrive -l operation-timeout -d 'Specify the maximum amount of time (in seconds) an operation is allowed to take.' complete -c onedrive -l print-token -d 'Print the access token, useful for debugging.' complete -c onedrive -l remote-directory -d 'Remove a directory on OneDrive - no sync will be performed.' complete -c onedrive -l reauth -d 'Reauthenticate the client with OneDrive.' diff --git a/contrib/completions/complete.zsh b/contrib/completions/complete.zsh index b03ea6866..ff92e6f8d 100644 --- a/contrib/completions/complete.zsh +++ b/contrib/completions/complete.zsh @@ -27,7 +27,6 @@ all_opts=( '--logout[Logout the current user]' '(-m --monitor)'{-m,--monitor}'[Keep monitoring for local and remote changes]' '--no-remote-delete[Do not delete local file deletes from OneDrive when using --upload-only]' - '--operation-timeout[Specify the maximum amount of time (in seconds) an operation is allowed to take.]:seconds:' '--print-token[Print the access token, useful for debugging]' '--reauth[Reauthenticate the client with OneDrive]' '--resync[Forget the last saved state, perform a full sync]' diff --git a/contrib/docker/Dockerfile b/contrib/docker/Dockerfile index fca957314..0e9ea4351 100644 --- a/contrib/docker/Dockerfile +++ b/contrib/docker/Dockerfile @@ -1,9 +1,9 @@ # -*-Dockerfile-*- -ARG FEDORA_VERSION=38 +ARG FEDORA_VERSION=40 ARG DEBIAN_VERSION=bullseye -ARG GO_VERSION=1.20 -ARG GOSU_VERSION=1.16 +ARG GO_VERSION=1.22 +ARG GOSU_VERSION=1.17 FROM golang:${GO_VERSION}-${DEBIAN_VERSION} AS builder-gosu ARG GOSU_VERSION diff --git a/contrib/docker/Dockerfile-alpine b/contrib/docker/Dockerfile-alpine index 4db39a699..c3718a08e 100644 --- a/contrib/docker/Dockerfile-alpine +++ b/contrib/docker/Dockerfile-alpine @@ -1,8 +1,8 @@ # -*-Dockerfile-*- -ARG ALPINE_VERSION=3.18 -ARG GO_VERSION=1.20 -ARG GOSU_VERSION=1.16 +ARG ALPINE_VERSION=3.19 +ARG GO_VERSION=1.22 +ARG GOSU_VERSION=1.17 FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS builder-gosu ARG GOSU_VERSION diff --git a/contrib/docker/Dockerfile-debian b/contrib/docker/Dockerfile-debian index a4cae5a44..32fedc723 100644 --- a/contrib/docker/Dockerfile-debian +++ b/contrib/docker/Dockerfile-debian @@ -12,7 +12,7 @@ RUN apt-get clean \ COPY . /usr/src/onedrive WORKDIR /usr/src/onedrive -RUN ./configure DC=/usr/bin/ldmd2 \ +RUN ./configure \ && make clean \ && make \ && make install diff --git a/contrib/docker/entrypoint.sh b/contrib/docker/entrypoint.sh index 8eb529480..03faff8c2 100755 --- a/contrib/docker/entrypoint.sh +++ b/contrib/docker/entrypoint.sh @@ -118,6 +118,34 @@ if [ -n "${ONEDRIVE_SINGLE_DIRECTORY:=""}" ]; then ARGS=(--single-directory \"${ONEDRIVE_SINGLE_DIRECTORY}\" ${ARGS[@]}) fi +# Tell client run in dry-run mode +if [ "${ONEDRIVE_DRYRUN:=0}" == "1" ]; then + echo "# We are running in dry-run mode" + echo "# Adding --dry-run" + ARGS=(--dry-run ${ARGS[@]}) +fi + +# Tell client to disable download validation +if [ "${ONEDRIVE_DISABLE_DOWNLOAD_VALIDATION:=0}" == "1" ]; then + echo "# We are disabling the download integrity checks performed by this client" + echo "# Adding --disable-download-validation" + ARGS=(--disable-download-validation ${ARGS[@]}) +fi + +# Tell client to disable upload validation +if [ "${ONEDRIVE_DISABLE_UPLOAD_VALIDATION:=0}" == "1" ]; then + echo "# We are disabling the upload integrity checks performed by this client" + echo "# Adding --disable-upload-validation" + ARGS=(--disable-upload-validation ${ARGS[@]}) +fi + +# Tell client to download OneDrive Business Shared Files if 'sync_business_shared_items' option has been enabled in the configuration files +if [ "${ONEDRIVE_SYNC_SHARED_FILES:=0}" == "1" ]; then + echo "# We are attempting to sync OneDrive Business Shared Files if 'sync_business_shared_items' has been enabled in the config file" + echo "# Adding --sync-shared-files" + ARGS=(--sync-shared-files ${ARGS[@]}) +fi + if [ ${#} -gt 0 ]; then ARGS=("${@}") fi diff --git a/contrib/gentoo/onedrive-2.4.25.ebuild b/contrib/gentoo/onedrive-2.5.0.ebuild similarity index 100% rename from contrib/gentoo/onedrive-2.4.25.ebuild rename to contrib/gentoo/onedrive-2.5.0.ebuild diff --git a/contrib/spec/onedrive.spec.in b/contrib/spec/onedrive.spec.in index 2b4a22e9b..eca723bb9 100644 --- a/contrib/spec/onedrive.spec.in +++ b/contrib/spec/onedrive.spec.in @@ -12,7 +12,7 @@ %endif Name: onedrive -Version: 2.4.25 +Version: 2.5.0 Release: 1%{?dist} Summary: Microsoft OneDrive Client Group: System Environment/Network diff --git a/docs/BusinessSharedFolders.md b/docs/BusinessSharedFolders.md deleted file mode 100644 index 3f0429434..000000000 --- a/docs/BusinessSharedFolders.md +++ /dev/null @@ -1,192 +0,0 @@ -# How to configure OneDrive Business Shared Folder Sync -## Application Version -Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. - -## Process Overview -Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client: -1. List available shared folders to determine which folder you wish to sync & to validate that you have access to that folder -2. Create a new file called 'business_shared_folders' in your config directory which contains a list of the shared folders you wish to sync -3. Test the configuration using '--dry-run' -4. Sync the OneDrive Business Shared folders as required - -## Listing available OneDrive Business Shared Folders -List the available OneDrive Business Shared folders with the following command: -```text -onedrive --list-shared-folders -``` - This will return a listing of all OneDrive Business Shared folders which have been shared with you and by whom. This is important for conflict resolution: -```text -Initializing the Synchronization Engine ... - -Listing available OneDrive Business Shared Folders: ---------------------------------------- -Shared Folder: SharedFolder0 -Shared By: Firstname Lastname ---------------------------------------- -Shared Folder: SharedFolder1 -Shared By: Firstname Lastname ---------------------------------------- -Shared Folder: SharedFolder2 -Shared By: Firstname Lastname ---------------------------------------- -Shared Folder: SharedFolder0 -Shared By: Firstname Lastname (user@domain) ---------------------------------------- -Shared Folder: SharedFolder1 -Shared By: Firstname Lastname (user@domain) ---------------------------------------- -Shared Folder: SharedFolder2 -Shared By: Firstname Lastname (user@domain) -... -``` - -## Configuring OneDrive Business Shared Folders -1. Create a new file called 'business_shared_folders' in your config directory -2. On each new line, list the OneDrive Business Shared Folder you wish to sync -```text -[alex@centos7full onedrive]$ cat ~/.config/onedrive/business_shared_folders -# comment -Child Shared Folder -# Another comment -Top Level to Share -[alex@centos7full onedrive]$ -``` -3. Validate your configuration with `onedrive --display-config`: -```text -Configuration file successfully loaded -onedrive version = v2.4.3 -Config path = /home/alex/.config/onedrive-business/ -Config file found in config path = true -Config option 'check_nosync' = false -Config option 'sync_dir' = /home/alex/OneDriveBusiness -Config option 'skip_dir' = -Config option 'skip_file' = ~*|.~*|*.tmp -Config option 'skip_dotfiles' = false -Config option 'skip_symlinks' = false -Config option 'monitor_interval' = 300 -Config option 'min_notify_changes' = 5 -Config option 'log_dir' = /var/log/onedrive/ -Config option 'classify_as_big_delete' = 1000 -Config option 'sync_root_files' = false -Selective sync 'sync_list' configured = false -Business Shared Folders configured = true -business_shared_folders contents: -# comment -Child Shared Folder -# Another comment -Top Level to Share -``` - -## Performing a sync of OneDrive Business Shared Folders -Perform a standalone sync using the following command: `onedrive --synchronize --sync-shared-folders --verbose`: -```text -onedrive --synchronize --sync-shared-folders --verbose -Using 'user' Config Dir: /home/alex/.config/onedrive-business/ -Using 'system' Config Dir: -Configuration file successfully loaded -Initializing the OneDrive API ... -Configuring Global Azure AD Endpoints -Opening the item database ... -All operations will be performed in: /home/alex/OneDriveBusiness -Application version: v2.4.3 -Account Type: business -Default Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA -Default Root ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ -Remaining Free Space: 1098316220277 -Fetching details for OneDrive Root -OneDrive Root exists in the database -Initializing the Synchronization Engine ... -Syncing changes from OneDrive ... -Applying changes of Path ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ -Number of items from OneDrive to process: 0 -Attempting to sync OneDrive Business Shared Folders -Syncing this OneDrive Business Shared Folder: Child Shared Folder -OneDrive Business Shared Folder - Shared By: test user -Applying changes of Path ID: 01JRXHEZMREEB3EJVHNVHKNN454Q7DFXPR -Adding OneDrive root details for processing -Adding OneDrive folder details for processing -Adding 4 OneDrive items for processing from OneDrive folder -Adding 2 OneDrive items for processing from /Child Shared Folder/Cisco VDI Whitepaper -Adding 2 OneDrive items for processing from /Child Shared Folder/SMPP_Shared -Processing 11 OneDrive items to ensure consistent local state -Syncing this OneDrive Business Shared Folder: Top Level to Share -OneDrive Business Shared Folder - Shared By: test user (testuser@mynasau3.onmicrosoft.com) -Applying changes of Path ID: 01JRXHEZLRMXHKBYZNOBF3TQOPBXS3VZMA -Adding OneDrive root details for processing -Adding OneDrive folder details for processing -Adding 4 OneDrive items for processing from OneDrive folder -Adding 3 OneDrive items for processing from /Top Level to Share/10-Files -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Cisco VDI Whitepaper -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Images -Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/JPG -Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/PNG -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/SMPP -Processing 31 OneDrive items to ensure consistent local state -Uploading differences of ~/OneDriveBusiness -Processing root -The directory has not changed -Processing SMPP_Local -The directory has not changed -Processing SMPP-IF-SPEC_v3_3-24858.pdf -The file has not changed -Processing SMPP_v3_4_Issue1_2-24857.pdf -The file has not changed -Processing new_local_file.txt -The file has not changed -Processing root -The directory has not changed -... -The directory has not changed -Processing week02-03-Combinational_Logic-v1.pptx -The file has not changed -Uploading new items of ~/OneDriveBusiness -Applying changes of Path ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ -Number of items from OneDrive to process: 0 -Attempting to sync OneDrive Business Shared Folders -Syncing this OneDrive Business Shared Folder: Child Shared Folder -OneDrive Business Shared Folder - Shared By: test user -Applying changes of Path ID: 01JRXHEZMREEB3EJVHNVHKNN454Q7DFXPR -Adding OneDrive root details for processing -Adding OneDrive folder details for processing -Adding 4 OneDrive items for processing from OneDrive folder -Adding 2 OneDrive items for processing from /Child Shared Folder/Cisco VDI Whitepaper -Adding 2 OneDrive items for processing from /Child Shared Folder/SMPP_Shared -Processing 11 OneDrive items to ensure consistent local state -Syncing this OneDrive Business Shared Folder: Top Level to Share -OneDrive Business Shared Folder - Shared By: test user (testuser@mynasau3.onmicrosoft.com) -Applying changes of Path ID: 01JRXHEZLRMXHKBYZNOBF3TQOPBXS3VZMA -Adding OneDrive root details for processing -Adding OneDrive folder details for processing -Adding 4 OneDrive items for processing from OneDrive folder -Adding 3 OneDrive items for processing from /Top Level to Share/10-Files -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Cisco VDI Whitepaper -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Images -Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/JPG -Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/PNG -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/SMPP -Processing 31 OneDrive items to ensure consistent local state -``` - -**Note:** Whenever you modify the `business_shared_folders` file you must perform a `--resync` of your database to clean up stale entries due to changes in your configuration. - -## Enable / Disable syncing of OneDrive Business Shared Folders -Performing a sync of the configured OneDrive Business Shared Folders can be enabled / disabled via adding the following to your configuration file. - -### Enable syncing of OneDrive Business Shared Folders via config file -```text -sync_business_shared_folders = "true" -``` - -### Disable syncing of OneDrive Business Shared Folders via config file -```text -sync_business_shared_folders = "false" -``` - -## Known Issues -Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders. - -Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below: - -![shared_with_me](./images/shared_with_me.JPG) - -This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966) diff --git a/docs/USAGE.md b/docs/USAGE.md deleted file mode 100644 index 235b15d3e..000000000 --- a/docs/USAGE.md +++ /dev/null @@ -1,1469 +0,0 @@ -# Configuration and Usage of the OneDrive Free Client -## Application Version -Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. - -## Table of Contents -- [Using the client](#using-the-client) - * [Upgrading from 'skilion' client](#upgrading-from-skilion-client) - * [Local File and Folder Naming Conventions](#local-file-and-folder-naming-conventions) - * [curl compatibility](#curl-compatibility) - * [Authorize the application with your OneDrive Account](#authorize-the-application-with-your-onedrive-account) - * [Show your configuration](#show-your-configuration) - * [Testing your configuration](#testing-your-configuration) - * [Performing a sync](#performing-a-sync) - * [Performing a single directory sync](#performing-a-single-directory-sync) - * [Performing a 'one-way' download sync](#performing-a-one-way-download-sync) - * [Performing a 'one-way' upload sync](#performing-a-one-way-upload-sync) - * [Performing a selective sync via 'sync_list' file](#performing-a-selective-sync-via-sync_list-file) - * [Performing a --resync](#performing-a---resync) - * [Performing a --force-sync without a --resync or changing your configuration](#performing-a---force-sync-without-a---resync-or-changing-your-configuration) - * [Increasing logging level](#increasing-logging-level) - * [Client Activity Log](#client-activity-log) - * [Notifications](#notifications) - * [Handling a OneDrive account password change](#handling-a-onedrive-account-password-change) -- [Configuration](#configuration) - * [The default configuration](#the-default-configuration-file-is-listed-below) - * ['config' file configuration examples](#config-file-configuration-examples) - + [sync_dir](#sync_dir) - + [sync_dir directory and file permissions](#sync_dir-directory-and-file-permissions) - + [skip_dir](#skip_dir) - + [skip_file](#skip_file) - + [skip_dotfiles](#skip_dotfiles) - + [monitor_interval](#monitor_interval) - + [monitor_fullscan_frequency](#monitor_fullscan_frequency) - + [monitor_log_frequency](#monitor_log_frequency) - + [min_notify_changes](#min_notify_changes) - + [operation_timeout](#operation_timeout) - + [ip_protocol_version](#ip_protocol_version) - + [classify_as_big_delete](#classify_as_big_delete) - * [Configuring the client for 'single tenant application' use](#configuring-the-client-for-single-tenant-application-use) - * [Configuring the client to use older 'skilion' application identifier](#configuring-the-client-to-use-older-skilion-application-identifier) -- [Frequently Asked Configuration Questions](#frequently-asked-configuration-questions) - * [How to sync only specific or single directory?](#how-to-sync-only-specific-or-single-directory) - * [How to 'skip' directories from syncing?](#how-to-skip-directories-from-syncing) - * [How to 'skip' files from syncing?](#how-to-skip-files-from-syncing) - * [How to 'skip' dot files and folders from syncing?](#how-to-skip-dot-files-and-folders-from-syncing) - * [How to 'skip' files larger than a certain size from syncing?](#how-to-skip-files-larger-than-a-certain-size-from-syncing) - * [How to 'rate limit' the application to control bandwidth consumed for upload & download operations?](#how-to-rate-limit-the-application-to-control-bandwidth-consumed-for-upload--download-operations) - * [How to prevent your local disk from filling up?](#how-to-prevent-your-local-disk-from-filling-up) - * [How are symbolic links handled by the client?](#how-are-symbolic-links-handled-by-the-client) - * [How to sync shared folders (OneDrive Personal)?](#how-to-sync-shared-folders-onedrive-personal) - * [How to sync shared folders (OneDrive Business or Office 365)?](#how-to-sync-shared-folders-onedrive-business-or-office-365) - * [How to sync sharePoint / Office 365 Shared Libraries?](#how-to-sync-sharepoint--office-365-shared-libraries) - * [How to run a user systemd service at boot without user login?](#how-to-run-a-user-systemd-service-at-boot-without-user-login) - * [How to create a shareable link?](#how-to-create-a-shareable-link) - * [How to sync both Personal and Business accounts at the same time?](#how-to-sync-both-personal-and-business-accounts-at-the-same-time) - * [How to sync multiple SharePoint Libraries at the same time?](#how-to-sync-multiple-sharepoint-libraries-at-the-same-time) -- [Running 'onedrive' in 'monitor' mode](#running-onedrive-in-monitor-mode) - * [Use webhook to subscribe to remote updates in 'monitor' mode](#use-webhook-to-subscribe-to-remote-updates-in-monitor-mode) - * [More webhook configuration options](#more-webhook-configuration-options) - + [webhook_listening_host and webhook_listening_port](#webhook_listening_host-and-webhook_listening_port) - + [webhook_expiration_interval and webhook_renewal_interval](#webhook_expiration_interval-and-webhook_renewal_interval) -- [Running 'onedrive' as a system service](#running-onedrive-as-a-system-service) - * [OneDrive service running as root user via init.d](#onedrive-service-running-as-root-user-via-initd) - * [OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-root-user-via-systemd-arch-ubuntu-debian-opensuse-fedora) - * [OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux)](#onedrive-service-running-as-root-user-via-systemd-red-hat-enterprise-linux-centos-linux) - * [OneDrive service running as a non-root user via systemd (All Linux Distributions)](#onedrive-service-running-as-a-non-root-user-via-systemd-all-linux-distributions) - * [OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-a-non-root-user-via-systemd-with-notifications-enabled-arch-ubuntu-debian-opensuse-fedora) - * [OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void)](#onedrive-service-running-as-a-non-root-user-via-runit-antix-devuan-artix-void) -- [Additional Configuration](#additional-configuration) - * [Advanced Configuration of the OneDrive Free Client](#advanced-configuration-of-the-onedrive-free-client) - * [Access OneDrive service through a proxy](#access-onedrive-service-through-a-proxy) - * [Setup selinux for a sync folder outside of the home folder](#setup-selinux-for-a-sync-folder-outside-of-the-home-folder) -- [All available commands](#all-available-commands) - -## Using the client -### Upgrading from 'skilion' client -The 'skilion' version contains a significant number of defects in how the local sync state is managed. When upgrading from the 'skilion' version to this version, it is advisable to stop any service / onedrive process from running and then remove any `items.sqlite3` file from your configuration directory (`~/.config/onedrive/`) as this will force the creation of a new local cache file. - -Additionally, if you are using a 'config' file within your configuration directory (`~/.config/onedrive/`), please ensure that you update the `skip_file = ` option as per below: - -**Invalid configuration:** -```text -skip_file = ".*|~*" -``` -**Minimum valid configuration:** -```text -skip_file = "~*" -``` -**Default valid configuration:** -```text -skip_file = "~*|.~*|*.tmp" -``` - -Do not use a skip_file entry of `.*` as this will prevent correct searching of local changes to process. - -### Local File and Folder Naming Conventions -The files and directories in the synchronization directory must follow the [Windows naming conventions](https://docs.microsoft.com/windows/win32/fileio/naming-a-file). -The application will attempt to handle instances where you have two files with the same names but with different capitalization. Where there is a namespace clash, the file name which clashes will not be synced. This is expected behavior and won't be fixed. - -### curl compatibility -If your system utilises curl < 7.47.0, curl defaults to HTTP/1.1 for HTTPS operations. The client will use HTTP/1.1. - -If your system utilises curl >= 7.47.0 and < 7.62.0, curl will prefer HTTP/2 for HTTPS but will stick to HTTP/1.1 by default. The client will use HTTP/1.1 for HTTPS operations. - -If your system utilises curl >= 7.62.0, curl defaults to prefer HTTP/2 over HTTP/1.1 by default. The client will utilse HTTP/2 for most HTTPS operations and HTTP/1.1 for others. This difference is governed by the OneDrive platform and not this client. - -If you wish to explicitly use HTTP/1.1 you will need to use the `--force-http-11` flag or set the config option `force_http_11 = "true"` to force the application to use HTTP/1.1 otherwise all client operations will use whatever is the curl default for your distribution. - -### Authorize the application with your OneDrive Account -After installing the application you must authorize the application with your OneDrive Account. This is done by running the application without any additional command switches. - -Note that some companies require to explicitly add this app in [Microsoft MyApps portal](https://myapps.microsoft.com/). To add an (approved) app to your apps, click on the ellipsis in the top-right corner and choose "Request new apps". On the next page you can add this app. If its not listed, you should request through your IT department. - -You will be asked to open a specific URL by using your web browser where you will have to login into your Microsoft Account and give the application the permission to access your files. After giving permission to the application, you will be redirected to a blank page. Copy the URI of the blank page into the application. -```text -[user@hostname ~]$ onedrive - -Authorize this app visiting: - -https://..... - -Enter the response uri: - -``` - -**Example:** -``` -[user@hostname ~]$ onedrive -Authorize this app visiting: - -https://login.microsoftonline.com/common/oauth2/v2.0/authorize?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient - -Enter the response uri: https://login.microsoftonline.com/common/oauth2/nativeclient?code= - -Application has been successfully authorised, however no additional command switches were provided. - -Please use 'onedrive --help' for further assistance in regards to running this application. -``` - -### Show your configuration -To validate your configuration the application will use, utilize the following: -```text -onedrive --display-config -``` -This will display all the pertinent runtime interpretation of the options and configuration you are using. Example output is as follows: -```text -Configuration file successfully loaded -onedrive version = vX.Y.Z-A-bcdefghi -Config path = /home/alex/.config/onedrive -Config file found in config path = true -Config option 'sync_dir' = /home/alex/OneDrive -Config option 'enable_logging' = false -... -Selective sync 'sync_list' configured = false -Config option 'sync_business_shared_folders' = false -Business Shared Folders configured = false -Config option 'webhook_enabled' = false -``` - -### Testing your configuration -You are able to test your configuration by utilising the `--dry-run` CLI option. No files will be downloaded, uploaded or removed, however the application will display what 'would' have occurred. For example: -```text -onedrive --synchronize --verbose --dry-run -DRY-RUN Configured. Output below shows what 'would' have occurred. -Loading config ... -Using Config Dir: /home/user/.config/onedrive -Initializing the OneDrive API ... -Opening the item database ... -All operations will be performed in: /home/user/OneDrive -Initializing the Synchronization Engine ... -Account Type: personal -Default Drive ID: -Default Root ID: -Remaining Free Space: 5368709120 -Fetching details for OneDrive Root -OneDrive Root exists in the database -Syncing changes from OneDrive ... -Applying changes of Path ID: -Uploading differences of . -Processing root -The directory has not changed -Uploading new items of . -OneDrive Client requested to create remote path: ./newdir -The requested directory to create was not found on OneDrive - creating remote directory: ./newdir -Successfully created the remote directory ./newdir on OneDrive -Uploading new file ./newdir/newfile.txt ... done. -Remaining free space: 5368709076 -Applying changes of Path ID: -``` - -**Note:** `--dry-run` can only be used with `--synchronize`. It cannot be used with `--monitor` and will be ignored. - -### Performing a sync -By default all files are downloaded in `~/OneDrive`. After authorizing the application, a sync of your data can be performed by running: -```text -onedrive --synchronize -``` -This will synchronize files from your OneDrive account to your `~/OneDrive` local directory. - -If you prefer to use your local files as stored in `~/OneDrive` as the 'source of truth' use the following sync command: -```text -onedrive --synchronize --local-first -``` - -### Performing a single directory sync -In some cases it may be desirable to sync a single directory under ~/OneDrive without having to change your client configuration. To do this use the following command: -```text -onedrive --synchronize --single-directory '' -``` - -Example: If the full path is `~/OneDrive/mydir`, the command would be `onedrive --synchronize --single-directory 'mydir'` - -### Performing a 'one-way' download sync -In some cases it may be desirable to 'download only' from OneDrive. To do this use the following command: -```text -onedrive --synchronize --download-only -``` - -### Performing a 'one-way' upload sync -In some cases it may be desirable to 'upload only' to OneDrive. To do this use the following command: -```text -onedrive --synchronize --upload-only -``` -**Note:** If a file or folder is present on OneDrive, that was previously synced and now does not exist locally, that item it will be removed from OneDrive. If the data on OneDrive should be kept, the following should be used: -```text -onedrive --synchronize --upload-only --no-remote-delete -``` -**Note:** The operation of 'upload only' does not request data from OneDrive about what 'other' data exists online. The client only knows about the data that 'this' client uploaded, thus any files or folders created or uploaded outside of this client will remain untouched online. - -### Performing a selective sync via 'sync_list' file -Selective sync allows you to sync only specific files and directories. -To enable selective sync create a file named `sync_list` in your application configuration directory (default is `~/.config/onedrive`). - -Important points to understand before using 'sync_list'. -* 'sync_list' excludes _everything_ by default on onedrive. -* 'sync_list' follows an _"exclude overrides include"_ rule, and requires **explicit inclusion**. -* Order exclusions before inclusions, so that anything _specifically included_ is included. -* How and where you place your `/` matters for excludes and includes in sub directories. - -Each line of the file represents a relative path from your `sync_dir`. All files and directories not matching any line of the file will be skipped during all operations. - -Additionally, the use of `/` is critically important to determine how a rule is interpreted. It is very similar to `**` wildcards, for those that are familiar with globbing patterns. -Here is an example of `sync_list`: -```text -# sync_list supports comments -# -# The ordering of entries is highly recommended - exclusions before inclusions -# -# Exclude temp folder(s) or file(s) under Documents folder(s), anywhere in Onedrive -!Documents/temp* -# -# Exclude secret data folder in root directory only -!/Secret_data/* -# -# Include everything else in root directory -/* -# -# Include my Backup folder(s) or file(s) anywhere on Onedrive -Backup -# -# Include my Backup folder in root -/Backup/ -# -# Include Documents folder(s) anywhere in Onedrive -Documents/ -# -# Include all PDF files in Documents folder(s), anywhere in Onedrive -Documents/*.pdf -# -# Include this single document in Documents folder(s), anywhere in Onedrive -Documents/latest_report.docx -# -# Include all Work/Project directories or files, inside 'Work' folder(s), anywhere in Onedrive -Work/Project* -# -# Include all "notes.txt" files, anywhere in Onedrive -notes.txt -# -# Include /Blender in the ~Onedrive root but not if elsewhere in Onedrive -/Blender -# -# Include these directories(or files) in 'Pictures' folder(s), that have a space in their name -Pictures/Camera Roll -Pictures/Saved Pictures -# -# Include these names if they match any file or folder -Cinema Soc -Codes -Textbooks -Year 2 -``` -The following are supported for pattern matching and exclusion rules: -* Use the `*` to wildcard select any characters to match for the item to be included -* Use either `!` or `-` characters at the start of the line to exclude an otherwise included item - - -**Note:** When enabling the use of 'sync_list' utilise the `--display-config` option to validate that your configuration will be used by the application, and test your configuration by adding `--dry-run` to ensure the client will operate as per your requirement. - -**Note:** After changing the sync_list, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` - -**Note:** In some circumstances, it may be required to sync all the individual files within the 'sync_dir', but due to frequent name change / addition / deletion of these files, it is not desirable to constantly change the 'sync_list' file to include / exclude these files and force a resync. To assist with this, enable the following in your configuration file: -```text -sync_root_files = "true" -``` -This will tell the application to sync any file that it finds in your 'sync_dir' root by default. - -### Performing a --resync -If you modify any of the following configuration items, you will be required to perform a `--resync` to ensure your client is syncing your data with the updated configuration: -* sync_dir -* skip_dir -* skip_file -* drive_id -* Modifying sync_list -* Modifying business_shared_folders - -Additionally, you may choose to perform a `--resync` if you feel that this action needs to be taken to ensure your data is in sync. If you are using this switch simply because you dont know the sync status, you can query the actual sync status using `--display-sync-status`. - -When using `--resync`, the following warning and advice will be presented: -```text -The use of --resync will remove your local 'onedrive' client state, thus no record will exist regarding your current 'sync status' -This has the potential to overwrite local versions of files with potentially older versions downloaded from OneDrive which can lead to data loss -If in-doubt, backup your local data first before proceeding with --resync - -Are you sure you wish to proceed with --resync? [Y/N] -``` - -To proceed with using `--resync`, you must type 'y' or 'Y' to allow the application to continue. - -**Note:** It is highly recommended to only use `--resync` if the application advises you to use it. Do not just blindly set the application to start with `--resync` as the default option. - -**Note:** In some automated environments (and it is 100% assumed you *know* what you are doing because of automation), in order to avoid this 'proceed with acknowledgement' requirement, add `--resync-auth` to automatically acknowledge the prompt. - -### Performing a --force-sync without a --resync or changing your configuration -In some cases and situations, you may have configured the application to skip certain files and folders using 'skip_file' and 'skip_dir' configuration. You then may have a requirement to actually sync one of these items, but do not wish to modify your configuration, nor perform an entire `--resync` twice. - -The `--force-sync` option allows you to sync a specific directory, ignoring your 'skip_file' and 'skip_dir' configuration and negating the requirement to perform a `--resync` - -In order to use this option, you must run the application manually in the following manner: -```text -onedrive --synchronize --single-directory '' --force-sync -``` - -When using `--force-sync`, the following warning and advice will be presented: -```text -WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --synchronize --single-directory --force-sync being used - -The use of --force-sync will reconfigure the application to use defaults. This may have untold and unknown future impacts. -By proceeding in using this option you accept any impacts including any data loss that may occur as a result of using --force-sync. - -Are you sure you wish to proceed with --force-sync [Y/N] -``` - -To proceed with using `--force-sync`, you must type 'y' or 'Y' to allow the application to continue. - -### Increasing logging level -When running a sync it may be desirable to see additional information as to the progress and operation of the client. To do this, use the following command: -```text -onedrive --synchronize --verbose -``` - -### Client Activity Log -When running onedrive all actions can be logged to a separate log file. This can be enabled by using the `--enable-logging` flag. By default, log files will be written to `/var/log/onedrive/` - -**Note:** You will need to ensure the existence of this directory, and that your user has the applicable permissions to write to this directory or the following warning will be printed: -```text -Unable to access /var/log/onedrive/ -Please manually create '/var/log/onedrive/' and set appropriate permissions to allow write access -The requested client activity log will instead be located in the users home directory -``` - -On many systems this can be achieved by -```text -sudo mkdir /var/log/onedrive -sudo chown root:users /var/log/onedrive -sudo chmod 0775 /var/log/onedrive -``` - -All log files will be in the format of `%username%.onedrive.log`, where `%username%` represents the user who ran the client. - -Additionally, you need to ensure that your user account is part of the 'users' group: -``` -cat /etc/group | grep users -``` - -If your user is not part of this group, then you need to add your user to this group: -``` -sudo usermod -a -G users -``` - -You then need to 'logout' of all sessions / SSH sessions to login again to have the new group access applied. - - -**Note:** -To use a different log directory rather than the default above, add the following as a configuration option to `~/.config/onedrive/config`: -```text -log_dir = "/path/to/location/" -``` -Trailing slash required - -An example of the log file is below: -```text -2018-Apr-07 17:09:32.1162837 Loading config ... -2018-Apr-07 17:09:32.1167908 No config file found, using defaults -2018-Apr-07 17:09:32.1170626 Initializing the OneDrive API ... -2018-Apr-07 17:09:32.5359143 Opening the item database ... -2018-Apr-07 17:09:32.5515295 All operations will be performed in: /root/OneDrive -2018-Apr-07 17:09:32.5518387 Initializing the Synchronization Engine ... -2018-Apr-07 17:09:36.6701351 Applying changes of Path ID: -2018-Apr-07 17:09:37.4434282 Adding OneDrive Root to the local database -2018-Apr-07 17:09:37.4478342 The item is already present -2018-Apr-07 17:09:37.4513752 The item is already present -2018-Apr-07 17:09:37.4550062 The item is already present -2018-Apr-07 17:09:37.4586444 The item is already present -2018-Apr-07 17:09:37.7663571 Adding OneDrive Root to the local database -2018-Apr-07 17:09:37.7739451 Fetching details for OneDrive Root -2018-Apr-07 17:09:38.0211861 OneDrive Root exists in the database -2018-Apr-07 17:09:38.0215375 Uploading differences of . -2018-Apr-07 17:09:38.0220464 Processing -2018-Apr-07 17:09:38.0224884 The directory has not changed -2018-Apr-07 17:09:38.0229369 Processing -2018-Apr-07 17:09:38.02338 The directory has not changed -2018-Apr-07 17:09:38.0237678 Processing -2018-Apr-07 17:09:38.0242285 The directory has not changed -2018-Apr-07 17:09:38.0245977 Processing -2018-Apr-07 17:09:38.0250788 The directory has not changed -2018-Apr-07 17:09:38.0254657 Processing -2018-Apr-07 17:09:38.0259923 The directory has not changed -2018-Apr-07 17:09:38.0263547 Uploading new items of . -2018-Apr-07 17:09:38.5708652 Applying changes of Path ID: -``` - -### Notifications -If notification support is compiled in, the following events will trigger a notification within the display manager session: -* Aborting a sync if .nosync file is found -* Cannot create remote directory -* Cannot upload file changes -* Cannot delete remote file / folder -* Cannot move remote file / folder - - -### Handling a OneDrive account password change -If you change your OneDrive account password, the client will no longer be authorised to sync, and will generate the following error: -```text -ERROR: OneDrive returned a 'HTTP 401 Unauthorized' - Cannot Initialize Sync Engine -``` -To re-authorise the client, follow the steps below: -1. If running the client as a service (init.d or systemd), stop the service -2. Run the command `onedrive --reauth`. This will clean up the previous authorisation, and will prompt you to re-authorise the client as per initial configuration. -3. Restart the client if running as a service or perform a manual sync - -The application will now sync with OneDrive with the new credentials. - -## Configuration - -Configuration is determined by three layers: the default values, values set in the configuration file, and values passed in via the command line. The default values provide a reasonable default, and configuration is optional. - -Most command line options have a respective configuration file setting. - -If you want to change the defaults, you can copy and edit the included config file into your configuration directory. Valid default directories for the config file are: -* `~/.config/onedrive` -* `/etc/onedrive` - -**Example:** -```text -mkdir -p ~/.config/onedrive -wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/onedrive/config -nano ~/.config/onedrive/config -``` -This file does not get created by default, and should only be created if you want to change the 'default' operational parameters. - -See the [config](https://raw.githubusercontent.com/abraunegg/onedrive/master/config) file for the full list of options, and [All available commands](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#all-available-commands) for all possible keys and their default values. - -**Note:** The location of the application configuration information can also be specified by using the `--confdir` configuration option which can be passed in at client run-time. - -### The default configuration file is listed below: -```text -# Configuration for OneDrive Linux Client -# This file contains the list of supported configuration fields -# with their default values. -# All values need to be enclosed in quotes -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -# sync_dir = "~/OneDrive" -# skip_file = "~*|.~*|*.tmp" -# monitor_interval = "300" -# skip_dir = "" -# log_dir = "/var/log/onedrive/" -# drive_id = "" -# upload_only = "false" -# check_nomount = "false" -# check_nosync = "false" -# download_only = "false" -# disable_notifications = "false" -# disable_upload_validation = "false" -# enable_logging = "false" -# force_http_11 = "false" -# local_first = "false" -# no_remote_delete = "false" -# skip_symlinks = "false" -# debug_https = "false" -# skip_dotfiles = "false" -# skip_size = "1000" -# dry_run = "false" -# min_notify_changes = "5" -# monitor_log_frequency = "6" -# monitor_fullscan_frequency = "12" -# sync_root_files = "false" -# classify_as_big_delete = "1000" -# user_agent = "" -# remove_source_files = "false" -# skip_dir_strict_match = "false" -# application_id = "" -# resync = "false" -# resync_auth = "false" -# bypass_data_preservation = "false" -# azure_ad_endpoint = "" -# azure_tenant_id = "common" -# sync_business_shared_folders = "false" -# sync_dir_permissions = "700" -# sync_file_permissions = "600" -# rate_limit = "131072" -# webhook_enabled = "false" -# webhook_public_url = "" -# webhook_listening_host = "" -# webhook_listening_port = "8888" -# webhook_expiration_interval = "86400" -# webhook_renewal_interval = "43200" -# space_reservation = "50" -# display_running_config = "false" -# read_only_auth_scope = "false" -# cleanup_local_files = "false" -# operation_timeout = "3600" -# dns_timeout = "60" -# connect_timeout = "10" -# data_timeout = "600" -# ip_protocol_version = "0" -``` - -### 'config' file configuration examples: -The below are 'config' file examples to assist with configuration of the 'config' file: - -#### sync_dir -Configure your local sync directory location. - -Example: -```text -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -sync_dir="~/MyDirToSync" -# skip_file = "~*|.~*|*.tmp" -# monitor_interval = "300" -# skip_dir = "" -# log_dir = "/var/log/onedrive/" -``` -**Please Note:** -Proceed with caution here when changing the default sync dir from `~/OneDrive` to `~/MyDirToSync` - -The issue here is around how the client stores the sync_dir path in the database. If the config file is missing, or you don't use the `--syncdir` parameter - what will happen is the client will default back to `~/OneDrive` and 'think' that either all your data has been deleted - thus delete the content on OneDrive, or will start downloading all data from OneDrive into the default location. - -**Note:** After changing `sync_dir`, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` - -**Important Note:** If your `sync_dir` is pointing to a network mount point (a network share via NFS, Windows Network Share, Samba Network Share) these types of network mount points do not support 'inotify', thus tracking real-time changes via inotify of local files is not possible. Local filesystem changes will be replicated between the local filesystem and OneDrive based on the `monitor_interval` value. This is not something (inotify support for NFS, Samba) that this client can fix. - -#### sync_dir directory and file permissions -The following are directory and file default permissions for any new directory or file that is created: -* Directories: 700 - This provides the following permissions: `drwx------` -* Files: 600 - This provides the following permissions: `-rw-------` - -To change the default permissions, update the following 2 configuration options with the required permissions. Utilise the [Unix Permissions Calculator](https://chmod-calculator.com/) to assist in determining the required permissions. - -```text -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -... -# sync_business_shared_folders = "false" -sync_dir_permissions = "700" -sync_file_permissions = "600" - -``` - -**Important:** Special permission bits (setuid, setgid, sticky bit) are not supported. Valid permission values are from `000` to `777` only. - -#### skip_dir -This option is used to 'skip' certain directories and supports pattern matching. - -Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. - -**Important:** Entries under `skip_dir` are relative to your `sync_dir` path. - -Example: -```text -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -# sync_dir = "~/OneDrive" -# skip_file = "~*|.~*|*.tmp" -# monitor_interval = "300" -skip_dir = "Desktop|Documents/IISExpress|Documents/SQL Server Management Studio|Documents/Visual Studio*|Documents/WindowsPowerShell" -# log_dir = "/var/log/onedrive/" -``` - -**Note:** The `skip_dir` can be specified multiple times, for example: -```text -skip_dir = "SomeDir|OtherDir|ThisDir|ThatDir" -skip_dir = "/Path/To/A/Directory" -skip_dir = "/Another/Path/To/Different/Directory" -``` -This will be interpreted the same as: -```text -skip_dir = "SomeDir|OtherDir|ThisDir|ThatDir|/Path/To/A/Directory|/Another/Path/To/Different/Directory" -``` - -**Note:** After changing `skip_dir`, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` - -#### skip_file -This option is used to 'skip' certain files and supports pattern matching. - -Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. - -Files can be skipped in the following fashion: -* Specify a wildcard, eg: '*.txt' (skip all txt files) -* Explicitly specify the filename and it's full path relative to your sync_dir, eg: '/path/to/file/filename.ext' -* Explicitly specify the filename only and skip every instance of this filename, eg: 'filename.ext' - -By default, the following files will be skipped: -* Files that start with ~ -* Files that start with .~ (like .~lock.* files generated by LibreOffice) -* Files that end in .tmp - -**Important:** Do not use a skip_file entry of `.*` as this will prevent correct searching of local changes to process. - -Example: -```text -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -# sync_dir = "~/OneDrive" -skip_file = "~*|/Documents/OneNote*|/Documents/config.xlaunch|myfile.ext|/Documents/keepass.kdbx" -# monitor_interval = "300" -# skip_dir = "" -# log_dir = "/var/log/onedrive/" -``` - -**Note:** The `skip_file` can be specified multiple times, for example: -```text -skip_file = "~*|.~*|*.tmp|*.swp" -skip_file = "*.blah" -skip_file = "never_sync.file" -skip_file = "/Documents/keepass.kdbx" -``` -This will be interpreted the same as: -```text -skip_file = "~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx" -``` - -**Note:** after changing `skip_file`, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` - -#### skip_dotfiles -Setting this to `"true"` will skip all .files and .folders while syncing. - -Example: -```text -# skip_symlinks = "false" -# debug_https = "false" -skip_dotfiles = "true" -# dry_run = "false" -# monitor_interval = "300" -``` - -#### monitor_interval -The monitor interval is defined as the wait time 'between' sync's when running in monitor mode. When this interval expires, the client will check OneDrive for changes online, performing data integrity checks and scanning the local 'sync_dir' for new content. - -By default without configuration, 'monitor_interval' is set to 300 seconds. Setting this value to 600 will run the sync process every 10 minutes. - -Example: -```text -# skip_dotfiles = "false" -# dry_run = "false" -monitor_interval = "600" -# min_notify_changes = "5" -# monitor_log_frequency = "6" -``` -**Note:** It is strongly advised you do not use a value of less than 300 seconds for 'monitor_interval'. Using a value less than 300 means your application will be constantly needlessly checking OneDrive online for changes. Future versions of the application may enforce the checking of this minimum value. - -#### monitor_fullscan_frequency -This configuration option controls the number of 'monitor_interval' iterations between when a full scan of your data is performed to ensure data integrity and consistency. - -By default without configuration, 'monitor_fullscan_frequency' is set to 12. In this default state, this means that a full scan is performed every 'monitor_interval' x 'monitor_fullscan_frequency' = 3600 seconds. This is only applicable when running in --monitor mode. - -Setting this value to 24 means that the full scan of OneDrive and checking the integrity of the data stored locally will occur every 2 hours (assuming 'monitor_interval' is set to 300 seconds): - -Example: -```text -# min_notify_changes = "5" -# monitor_log_frequency = "6" -monitor_fullscan_frequency = "24" -# sync_root_files = "false" -# classify_as_big_delete = "1000" -``` - -**Note:** When running in --monitor mode, at application start-up, a full scan will be performed to ensure data integrity. This option has zero effect when running the application in `--synchronize` mode and a full scan will always be performed. - -#### monitor_log_frequency -This configuration option controls the output of when logging is performed to detail that a sync is occuring with OneDrive when using `--monitor` mode. The frequency of syncing with OneDrive is controled via 'monitor_interval'. - -By default without configuration, 'monitor_log_frequency' is set to 6. - -By default, at application start-up when using `--monitor` mode, the following will be logged to indicate that the application has correctly started and performed all the initial processing steps: -``` -Configuring Global Azure AD Endpoints -Initializing the Synchronization Engine ... -Initializing monitor ... -OneDrive monitor interval (seconds): 300 -Starting a sync with OneDrive -Syncing changes from OneDrive ... -Performing a database consistency and integrity check on locally stored data ... -Sync with OneDrive is complete -``` -Then, based on 'monitor_log_frequency', the following will be logged when the value is reached: -``` -Starting a sync with OneDrive -Syncing changes from OneDrive ... -Sync with OneDrive is complete -``` -**Note:** The additional log output `Performing a database consistency and integrity check on locally stored data ...` will only be displayed when this activity is occuring which is triggered by 'monitor_fullscan_frequency'. - -#### min_notify_changes -This option defines the minimum number of pending incoming changes necessary to trigger a desktop notification. This allows controlling the frequency of notifications. - -Example: -```text -# dry_run = "false" -# monitor_interval = "300" -min_notify_changes = "50" -# monitor_log_frequency = "6" -# monitor_fullscan_frequency = "12" -``` - -#### operation_timeout -Operation Timeout is the maximum amount of time (seconds) a file operation is allowed to take. This includes DNS resolution, connecting, data transfer, etc. - -Example: -```text -# sync_file_permissions = "600" -# rate_limit = "131072" -operation_timeout = "3600" -``` - -#### ip_protocol_version -By default, the application will use IPv4 and IPv6 to resolve and communicate with Microsoft OneDrive. In some Linux distributions (most notably Ubuntu and those distributions based on Ubuntu) this will cause problems due to how DNS resolution is being performed. - -To configure the application to use a specific IP version, configure the following in your config file: -```text -# operation_timeout = "3600" -# dns_timeout = "60" -# connect_timeout = "10" -# data_timeout = "600" -ip_protocol_version = "1" - -``` -**Note:** -* A value of 0 will mean the client will use IPv4 and IPv6. This is the default. -* A value of 1 will mean the client will use IPv4 only. -* A value of 2 will mean the client will use IPv6 only. - -#### classify_as_big_delete -This configuration option will help prevent the online deletion of files and folders online, when the directory that has been deleted contains more items than the specified value. - -By default, this value is 1000 which will count files and folders as children of the directory that has been deleted. - -To change this value, configure the following in your config file: -```text -# monitor_fullscan_frequency = "12" -# sync_root_files = "false" -classify_as_big_delete = "3000" -# user_agent = "" -# remove_source_files = "false" -``` - -**Note:** -* This option only looks at Directories. It has zero effect on deleting files located in your 'sync_dir' root -* This option (in v2.4.x and below) only gets activated when using `--monitor`. In `--synchronize` mode it is ignored as it is assumed you performed that desired operation before you started your next manual sync with OneDrive. -* Be sensible with setting this value - do not use a low value such as '1' as this will prevent you from syncing your data each and every time you delete a single file. - - -#### Configuring the client for 'single tenant application' use -In some instances when using OneDrive Business Accounts, depending on the Azure organisational configuration, it will be necessary to configure the client as a 'single tenant application'. -To configure this, after creating the application on your Azure tenant, update the 'config' file with the tenant name (not the GUID) and the newly created Application ID, then this will be used for the authentication process. -```text -# skip_dir_strict_match = "false" -application_id = "your.application.id.guid" -# resync = "false" -# bypass_data_preservation = "false" -# azure_ad_endpoint = "xxxxxx" -azure_tenant_id = "your.azure.tenant.name" -# sync_business_shared_folders = "false" -``` - -#### Configuring the client to use older 'skilion' application identifier -In some instances it may be desirable to utilise the older 'skilion' application identifier to avoid authorising a new application ID within Microsoft Azure environments. -To configure this, update the 'config' file with the old Application ID, then this will be used for the authentication process. -```text -# skip_dir_strict_match = "false" -application_id = "22c49a0d-d21c-4792-aed1-8f163c982546" -# resync = "false" -# bypass_data_preservation = "false" -``` -**Note:** The application will now use the older 'skilion' client identifier, however this may increase your chances of getting a OneDrive 429 error. - -**Note:** After changing the 'application_id' you will need to restart any 'onedrive' process you have running, and potentially issue a `--reauth` to re-authenticate the client with this updated application ID. - -## Frequently Asked Configuration Questions - -### How to sync only specific or single directory? -There are two methods to achieve this: -* Utilise '--single-directory' option to only sync this specific path -* Utilise 'sync_list' to configure what files and directories to sync, and what should be exluded - -### How to 'skip' directories from syncing? -There are several mechanisms available to 'skip' a directory from the sync process: -* Utilise 'skip_dir' to configure what directories to skip. Refer to above for configuration advice. -* Utilise 'sync_list' to configure what files and directories to sync, and what should be exluded - -One further method is to add a '.nosync' empty file to any folder. When this file is present, adding `--check-for-nosync` to your command line will now make the sync process skip any folder where the '.nosync' file is present. - -To make this a permanent change to always skip folders when a '.nosync' empty file is present, add the following to your config file: - -Example: -```text -# upload_only = "false" -# check_nomount = "false" -check_nosync = "true" -# download_only = "false" -# disable_notifications = "false" -``` -**Default:** False - -### How to 'skip' files from syncing? -There are two methods to achieve this: -* Utilise 'skip_file' to configure what files to skip. Refer to above for configuration advice. -* Utilise 'sync_list' to configure what files and directories to sync, and what should be exluded - -### How to 'skip' dot files and folders from syncing? -There are three methods to achieve this: -* Utilise 'skip_file' or 'skip_dir' to configure what files or folders to skip. Refer to above for configuration advice. -* Utilise 'sync_list' to configure what files and directories to sync, and what should be exluded -* Utilise 'skip_dotfiles' to skip any dot file (for example: `.Trash-1000` or `.xdg-volume-info`) from syncing to OneDrive. - -Example: -```text -# skip_symlinks = "false" -# debug_https = "false" -skip_dotfiles = "true" -# skip_size = "1000" -# dry_run = "false" -``` -**Default:** False - -### How to 'skip' files larger than a certain size from syncing? -There are two methods to achieve this: -* Use `--skip-size ARG` as part of a CLI command to skip new files larger than this size (in MB) -* Use `skip_size = "value"` as part of your 'config' file where files larger than this size (in MB) will be skipped - -### How to 'rate limit' the application to control bandwidth consumed for upload & download operations? -To minimise the Internet bandwidth for upload and download operations, you can configure the 'rate_limit' option within the config file. - -Example valid values for this are as follows: -* 131072 = 128 KB/s - minimum for basic application operations to prevent timeouts -* 262144 = 256 KB/s -* 524288 = 512 KB/s -* 1048576 = 1 MB/s -* 10485760 = 10 MB/s -* 104857600 = 100 MB/s - -Example: -```text -# sync_business_shared_folders = "false" -# sync_dir_permissions = "700" -# sync_file_permissions = "600" -rate_limit = "131072" -``` - -**Note:** A number greater than '131072' is a valid value, with '104857600' being tested as an upper limit. - -### How to prevent your local disk from filling up? -By default, the application will reserve 50MB of disk space to prevent your filesystem to run out of disk space. This value can be modified by adding the following to your config file: - -Example: -```text -... -# webhook_expiration_interval = "86400" -# webhook_renewal_interval = "43200" -space_reservation = "10" -``` - -The value entered is in MB (Mega Bytes). In this example, a value of 10MB is being used, and will be converted to bytes by the application. The value being used can be reviewed when using `--display-config`: -``` -Config option 'sync_dir_permissions' = 700 -Config option 'sync_file_permissions' = 600 -Config option 'space_reservation' = 10485760 -Config option 'application_id' = -Config option 'azure_ad_endpoint' = -Config option 'azure_tenant_id' = common -``` - -Any value is valid here, however, if you use a value of '0' a value of '1' will actually be used, so that you actually do not run out of disk space. - -### How are symbolic links handled by the client? -Microsoft OneDrive has zero concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories. - -As such, there are only two methods to support symbolic links with this client: -1. Follow the Linux symbolic link and upload what ever the link is pointing at to OneDrive. This is the default behaviour. -2. Skip symbolic links by configuring the application to do so. In skipping, no data, no link, no reference is uploaded to OneDrive. - -To skip symbolic links, edit your configuration as per below: - -```text -# local_first = "false" -# no_remote_delete = "false" -skip_symlinks = "true" -# debug_https = "false" -# skip_dotfiles = "false" -``` -Setting this to `"true"` will configure the client to skip all symbolic links while syncing. - -The default setting is `"false"` which will sync the whole folder structure referenced by the symbolic link, duplicating the contents on OneDrive in the place where the symbolic link is. - -### How to sync shared folders (OneDrive Personal)? -Folders shared with you can be synced by adding them to your OneDrive. To do that open your Onedrive, go to the Shared files list, right click on the folder you want to sync and then click on "Add to my OneDrive". - -### How to sync shared folders (OneDrive Business or Office 365)? -Refer to [./BusinessSharedFolders.md](BusinessSharedFolders.md) for configuration assistance. - -Do not use the 'Add shortcut to My files' from the OneDrive web based interface to add a 'shortcut' to your shared folder. This shortcut is not supported by the OneDrive API, thus it cannot be used. - -### How to sync sharePoint / Office 365 Shared Libraries? -Refer to [./SharePoint-Shared-Libraries.md](SharePoint-Shared-Libraries.md) for configuration assistance. - -### How to run a user systemd service at boot without user login? -In some cases it may be desirable for the systemd service to start without having to login as your 'user' - -To avoid this issue, you need to reconfigure your 'user' account so that the systemd services you have created will startup without you having to login to your system: -```text -loginctl enable-linger -``` - -### How to create a shareable link? -In some cases it may be desirable to create a shareable file link and give this link to other users to access a specific file. - -To do this, use the following command: -```text -onedrive --create-share-link -``` -**Note:** By default this will be a read-only link. - -To make this a read-write link, use the following command: -```text -onedrive --create-share-link --with-editing-perms -``` -**Note:** The ordering of the option file path and option flag is important. - -### How to sync both Personal and Business accounts at the same time? -You must configure separate instances of the application configuration for each account. - -Refer to [./advanced-usage.md](advanced-usage.md) for configuration assistance. - -### How to sync multiple SharePoint Libraries at the same time? -You must configure a separate instances of the application configuration for each SharePoint Library. - -Refer to [./advanced-usage.md](advanced-usage.md) for configuration assistance. - -## Running 'onedrive' in 'monitor' mode -Monitor mode (`--monitor`) allows the onedrive process to continually monitor your local file system for changes to files. - -Two common errors can occur when using monitor mode: -* Intialisation failure -* Unable to add a new inotify watch - -Both of these errors are local environment issues, where the following system variables need to be increased as the current system values are potentially too low: -* `fs.file-max` -* `fs.inotify.max_user_watches` - -To determine what the existing values are on your system use the following commands: -```text -sysctl fs.file-max -sysctl fs.inotify.max_user_watches -``` - -To determine what value to change to, you need to count all the files and folders in your configured 'sync_dir': -```text -cd /path/to/your/sync/dir -ls -laR | wc -l -``` - -To make a change to these variables using your file and folder count: -``` -sudo sysctl fs.file-max= -sudo sysctl fs.inotify.max_user_watches= -``` - -To make these changes permanent, refer to your OS reference documentation. - -### Use webhook to subscribe to remote updates in 'monitor' mode - -A webhook can be optionally enabled in the monitor mode to allow the onedrive process to subscribe to remote updates. Remote changes can be synced to your local file system as soon as possible, without waiting for the next sync cycle. - -To enable this feature, you need to configure the following options in the config file: - -```text -webhook_enabled = "true" -webhook_public_url = "" -``` - -Setting `webhook_enabled` to `true` enables the webhook in 'monitor' mode. The onedrive process will listen for incoming updates at a configurable endpoint, which defaults to `0.0.0.0:8888`. The `webhook_public_url` must be set to an public-facing url for Microsoft to send updates to your webhook. If your host is directly exposed to the Internet, the `webhook_public_url` can be set to `http://:8888/` to match the default endpoint. However, the recommended approach is to configure a reverse proxy like nginx. - -**Note:** A valid HTTPS certificate is required for your public-facing URL if using nginx. - -For example, below is a nginx config snippet to proxy traffic into the webhook: - -```text -server { - listen 80; - location /webhooks/onedrive { - proxy_http_version 1.1; - proxy_pass http://127.0.0.1:8888; - } -} -``` - -With nginx running, you can configure `webhook_public_url` to `https:///webhooks/onedrive`. - -If you receive this application error: -```text -Subscription validation request failed. Response must exactly match validationToken query parameter. -``` -The most likely cause for this error will be your nginx configuration. To resolve, potentially investigate the following configuration for nginx: - -```text -server { - listen 80; - location /webhooks/onedrive { - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Original-Request-URI $request_uri; - proxy_read_timeout 300s; - proxy_connect_timeout 75s; - proxy_buffering off; - proxy_http_version 1.1; - proxy_pass http://127.0.0.1:8888; - } -} -``` - -For any further nginx configuration assistance, please refer to: https://docs.nginx.com/ - -### More webhook configuration options - -Below options can be optionally configured. The default is usually good enough. - -#### webhook_listening_host and webhook_listening_port - -Set `webhook_listening_host` and `webhook_listening_port` to change the webhook listening endpoint. If `webhook_listening_host` is left empty, which is the default, the webhook will bind to `0.0.0.0`. The default `webhook_listening_port` is `8888`. - -``` -webhook_listening_host = "" -webhook_listening_port = "8888" -``` - -#### webhook_expiration_interval and webhook_renewal_interval - -Set `webhook_expiration_interval` and `webhook_renewal_interval` to change the frequency of subscription renewal. By default, the webhook asks Microsoft to keep subscriptions alive for 24 hours, and it renews subscriptions when it is less than 12 hours before their expiration. - -``` -# Default expiration interval is 24 hours -webhook_expiration_interval = "86400" - -# Default renewal interval is 12 hours -webhook_renewal_interval = "43200" -``` - -## Running 'onedrive' as a system service -There are a few ways to use onedrive as a service -* via init.d -* via systemd -* via runit - -**Note:** If using the service files, you may need to increase the `fs.inotify.max_user_watches` value on your system to handle the number of files in the directory you are monitoring as the initial value may be too low. - -### OneDrive service running as root user via init.d -```text -chkconfig onedrive on -service onedrive start -``` -To see the logs run: -```text -tail -f /var/log/onedrive/.onedrive.log -``` -To change what 'user' the client runs under (by default root), manually edit the init.d service file and modify `daemon --user root onedrive_service.sh` for the correct user. - -### OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora) -First, su to root using `su - root`, then enable the systemd service: -```text -systemctl --user enable onedrive -systemctl --user start onedrive -``` -**Note:** `systemctl --user` directive is not applicable for Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms - see below. - -**Note:** This will run the 'onedrive' process with a UID/GID of '0', thus, any files or folders that are created will be owned by 'root' - -To view the status of the service running, use the following: -```text -systemctl --user status onedrive.service -``` - -To see the systemd application logs run: -```text -journalctl --user-unit=onedrive -f -``` - -**Note:** It is a 'systemd' requirement that the XDG environment variables exist for correct enablement and operation of systemd services. If you receive this error when enabling the systemd service: -``` -Failed to connect to bus: No such file or directory -``` -The most likely cause is that the XDG environment variables are missing. To fix this, you must add the following to `.bashrc` or any other file which is run on user login: -``` -export XDG_RUNTIME_DIR="/run/user/$UID" -export DBUS_SESSION_BUS_ADDRESS="unix:path=${XDG_RUNTIME_DIR}/bus" -``` - -To make this change effective, you must logout of all user accounts where this change has been made. - -**Note:** On some systems (for example - Raspbian / Ubuntu / Debian on Raspberry Pi) the above XDG fix may not be reliable after system reboots. The potential alternative to start the client via systemd as root, is to perform the following: -1. Create a symbolic link from `/home/root/.config/onedrive` pointing to `/root/.config/onedrive/` -2. Create a systemd service using the '@' service file: `systemctl enable onedrive@root.service` -3. Start the root@service: `systemctl start onedrive@root.service` - -This will ensure that the service will correctly restart on system reboot. - -To see the systemd application logs run: -```text -journalctl --unit=onedrive@ -f -``` - -### OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux) -```text -systemctl enable onedrive -systemctl start onedrive -``` -**Note:** This will run the 'onedrive' process with a UID/GID of '0', thus, any files or folders that are created will be owned by 'root' - -To see the systemd application logs run: -```text -journalctl --unit=onedrive -f -``` - -### OneDrive service running as a non-root user via systemd (All Linux Distributions) -In some cases it is desirable to run the OneDrive client as a service, but not running as the 'root' user. In this case, follow the directions below to configure the service for your normal user login. - -1. As the user, who will be running the service, run the application in standalone mode, authorize the application for use & validate that the synchronization is working as expected: -```text -onedrive --synchronize --verbose -``` -2. Once the application is validated and working for your user, as the 'root' user, where is your username from step 1 above. -```text -systemctl enable onedrive@.service -systemctl start onedrive@.service -``` -3. To view the status of the service running for the user, use the following: -```text -systemctl status onedrive@.service -``` - -To see the systemd application logs run: -```text -journalctl --unit=onedrive@ -f -``` - -### OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora) -In some cases you may wish to receive GUI notifications when using the client when logged in as a non-root user. In this case, follow the directions below: - -1. Login via graphical UI as user you wish to enable the service for -2. Disable any `onedrive@` service files for your username - eg: -```text -sudo systemctl stop onedrive@alex.service -sudo systemctl disable onedrive@alex.service -``` -3. Enable service as per the following: -```text -systemctl --user enable onedrive -systemctl --user start onedrive -``` - -To view the status of the service running for the user, use the following: -```text -systemctl --user status onedrive.service -``` - -To see the systemd application logs run: -```text -journalctl --user-unit=onedrive -f -``` - -**Note:** `systemctl --user` directive is not applicable for Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms - -### OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void) - -1. Create the following folder if not present already `/etc/sv/runsvdir-` - - - where `` is the `USER` targeted for the service - - _e.g_ `# mkdir /etc/sv/runsvdir-nolan` - -2. Create a file called `run` under the previously created folder with - executable permissions - - - `# touch /etc/sv/runsvdir-/run` - - `# chmod 0755 /etc/sv/runsvdir-/run` - -3. Edit the `run` file with the following contents (priviledges needed) - - ```sh - #!/bin/sh - export USER="" - export HOME="/home/" - - groups="$(id -Gn "${USER}" | tr ' ' ':')" - svdir="${HOME}/service" - - exec chpst -u "${USER}:${groups}" runsvdir "${svdir}" - ``` - - - do not forget to correct the `` according to the `USER` set on - step #1 - -4. Enable the previously created folder as a service - - - `# ln -fs /etc/sv/runsvdir- /var/service/` - -5. Create a subfolder on the `USER`'s `HOME` directory to store the services - (or symlinks) - - - `$ mkdir ~/service` - -6. Create a subfolder for OneDrive specifically - - - `$ mkdir ~/service/onedrive/` - -7. Create a file called `run` under the previously created folder with - executable permissions - - - `$ touch ~/service/onedrive/run` - - `$ chmod 0755 ~/service/onedrive/run` - -8. Append the following contents to the `run` file - - ```sh - #!/usr/bin/env sh - exec /usr/bin/onedrive --monitor - ``` - - - in some scenario the path for the `onedrive` binary might differ, you can - obtain it regardless by running `$ command -v onedrive` - -9. Reboot to apply changes - -10. Check status of user-defined services - - - `$ sv status ~/service/*` - -You may refer to Void's documentation regarding -[Per-User Services](https://docs.voidlinux.org/config/services/user-services.html) -for extra details. - -## Additional Configuration -### Advanced Configuration of the OneDrive Free Client -* Configuring the client to use mulitple OneDrive accounts / configurations, for example: - * Setup to use onedrive with both Personal and Business accounts - * Setup to use onedrive with multiple SharePoint Libraries -* Configuring the client for use in dual-boot (Windows / Linux) situations -* Configuring the client for use when 'sync_dir' is a mounted directory -* Upload data from the local ~/OneDrive folder to a specific location on OneDrive - -Refer to [./advanced-usage.md](advanced-usage.md) for configuration assistance. - -### Access OneDrive service through a proxy -If you have a requirement to run the client through a proxy, there are a couple of ways to achieve this: -1. Set proxy configuration in `~/.bashrc` to allow the authorization process and when utilizing `--synchronize` -2. If running as a systemd service, edit the applicable systemd service file to include the proxy configuration information: -```text -[Unit] -Description=OneDrive Free Client -Documentation=https://github.com/abraunegg/onedrive -After=network-online.target -Wants=network-online.target - -[Service] -Environment="HTTP_PROXY=http://ip.address:port" -Environment="HTTPS_PROXY=http://ip.address:port" -ExecStart=/usr/local/bin/onedrive --monitor -Restart=on-failure -RestartSec=3 - -[Install] -WantedBy=default.target -``` - -**Note:** After modifying the service files, you will need to run `sudo systemctl daemon-reload` to ensure the service file changes are picked up. A restart of the OneDrive service will also be required to pick up the change to send the traffic via the proxy server - -### Setup selinux for a sync folder outside of the home folder -If selinux is enforced and the sync folder is outside of the home folder, as long as there is no policy for cloud fileservice providers, label the file system folder to user_home_t. -```text -sudo semanage fcontext -a -t user_home_t /path/to/onedriveSyncFolder -sudo restorecon -R -v /path/to/onedriveSyncFolder -``` -To remove this change from selinux and restore the default behaivor: -```text -sudo semanage fcontext -d /path/to/onedriveSyncFolder -sudo restorecon -R -v /path/to/onedriveSyncFolder -``` - -## All available commands -Output of `onedrive --help` -```text -OneDrive - a client for OneDrive Cloud Services - -Usage: - onedrive [options] --synchronize - Do a one time synchronization - onedrive [options] --monitor - Monitor filesystem and sync regularly - onedrive [options] --display-config - Display the currently used configuration - onedrive [options] --display-sync-status - Query OneDrive service and report on pending changes - onedrive -h | --help - Show this help screen - onedrive --version - Show version - -Options: - - --auth-files ARG - Perform authorization via two files passed in as ARG in the format `authUrl:responseUrl` - The authorization URL is written to the `authUrl`, then onedrive waits for the file `responseUrl` - to be present, and reads the response from that file. - --auth-response ARG - Perform authentication not via interactive dialog but via providing the response url directly. - --check-for-nomount - Check for the presence of .nosync in the syncdir root. If found, do not perform sync. - --check-for-nosync - Check for the presence of .nosync in each directory. If found, skip directory from sync. - --classify-as-big-delete - Number of children in a path that is locally removed which will be classified as a 'big data delete' - --cleanup-local-files - Cleanup additional local files when using --download-only. This will remove local data. - --confdir ARG - Set the directory used to store the configuration files - --create-directory ARG - Create a directory on OneDrive - no sync will be performed. - --create-share-link ARG - Create a shareable link for an existing file on OneDrive - --debug-https - Debug OneDrive HTTPS communication. - --destination-directory ARG - Destination directory for renamed or move on OneDrive - no sync will be performed. - --disable-download-validation - Disable download validation when downloading from OneDrive - --disable-notifications - Do not use desktop notifications in monitor mode. - --disable-upload-validation - Disable upload validation when uploading to OneDrive - --display-config - Display what options the client will use as currently configured - no sync will be performed. - --display-running-config - Display what options the client has been configured to use on application startup. - --display-sync-status - Display the sync status of the client - no sync will be performed. - --download-only - Replicate the OneDrive online state locally, by only downloading changes from OneDrive. Do not upload local changes to OneDrive. - --dry-run - Perform a trial sync with no changes made - --enable-logging - Enable client activity to a separate log file - --force - Force the deletion of data when a 'big delete' is detected - --force-http-11 - Force the use of HTTP 1.1 for all operations - --force-sync - Force a synchronization of a specific folder, only when using --single-directory and ignoring all non-default skip_dir and skip_file rules - --get-O365-drive-id ARG - Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library - --get-file-link ARG - Display the file link of a synced file - --help -h - This help information. - --list-shared-folders - List OneDrive Business Shared Folders - --local-first - Synchronize from the local directory source first, before downloading changes from OneDrive. - --log-dir ARG - Directory where logging output is saved to, needs to end with a slash. - --logout - Logout the current user - --min-notify-changes ARG - Minimum number of pending incoming changes necessary to trigger a desktop notification - --modified-by ARG - Display the last modified by details of a given path - --monitor -m - Keep monitoring for local and remote changes - --monitor-fullscan-frequency ARG - Number of sync runs before performing a full local scan of the synced directory - --monitor-interval ARG - Number of seconds by which each sync operation is undertaken when idle under monitor mode. - --monitor-log-frequency ARG - Frequency of logging in monitor mode - --no-remote-delete - Do not delete local file 'deletes' from OneDrive when using --upload-only - --operation-timeout ARG - Maximum amount of time (in seconds) an operation is allowed to take - --print-token - Print the access token, useful for debugging - --reauth - Reauthenticate the client with OneDrive - --remove-directory ARG - Remove a directory on OneDrive - no sync will be performed. - --remove-source-files - Remove source file after successful transfer to OneDrive when using --upload-only - --resync - Forget the last saved state, perform a full sync - --resync-auth - Approve the use of performing a --resync action - --single-directory ARG - Specify a single local directory within the OneDrive root to sync. - --skip-dir ARG - Skip any directories that match this pattern from syncing - --skip-dir-strict-match - When matching skip_dir directories, only match explicit matches - --skip-dot-files - Skip dot files and folders from syncing - --skip-file ARG - Skip any files that match this pattern from syncing - --skip-size ARG - Skip new files larger than this size (in MB) - --skip-symlinks - Skip syncing of symlinks - --source-directory ARG - Source directory to rename or move on OneDrive - no sync will be performed. - --space-reservation ARG - The amount of disk space to reserve (in MB) to avoid 100% disk space utilisation - --sync-root-files - Sync all files in sync_dir root when using sync_list. - --sync-shared-folders - Sync OneDrive Business Shared Folders - --syncdir ARG - Specify the local directory used for synchronization to OneDrive - --synchronize - Perform a synchronization - --upload-only - Replicate the locally configured sync_dir state to OneDrive, by only uploading local changes to OneDrive. Do not download changes from OneDrive. - --user-agent ARG - Specify a User Agent string to the http client - --verbose -v+ - Print more details, useful for debugging (repeat for extra debugging) - --version - Print the version and exit - --with-editing-perms - Create a read-write shareable link for an existing file on OneDrive when used with --create-share-link -``` diff --git a/docs/advanced-usage.md b/docs/advanced-usage.md index 2701909d8..17759e44a 100644 --- a/docs/advanced-usage.md +++ b/docs/advanced-usage.md @@ -124,10 +124,12 @@ Example: ExecStart=/usr/local/bin/onedrive --monitor --confdir="/home/myusername/.config/my-new-config" ``` -**Note:** When running the client manually, `--confdir="~/.config/......` is acceptable. In a systemd configuration file, the full path must be used. The `~` must be expanded. +> [!IMPORTANT] +> When running the client manually, `--confdir="~/.config/......` is acceptable. In a systemd configuration file, the full path must be used. The `~` must be manually expanded when editing your systemd file. + ### Step 3: Enable the new systemd service -Once the file is correctly editied, you can enable the new systemd service using the following commands. +Once the file is correctly edited, you can enable the new systemd service using the following commands. #### Red Hat Enterprise Linux, CentOS Linux ```text @@ -227,10 +229,10 @@ docker run -it --name onedrive -v onedrive_conf_sharepoint_site3:/onedrive/conf docker run -it --name onedrive -v onedrive_conf_sharepoint_site50:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite50:/onedrive/data" driveone/onedrive:latest ``` -#### TIP -To avoid 're-authenticating' and 'authorising' each individual Docker container, if all the Docker containers are using the 'same' OneDrive credentials, you can re-use the 'refresh_token' from one Docker container to another by copying this file to the configuration Docker volume of each Docker container. - -If the account credentials are different .. you will need to re-authenticate each Docker container individually. +> [!TIP] +> To avoid 're-authenticating' and 'authorising' each individual Docker container, if all the Docker containers are using the 'same' OneDrive credentials, you can reuse the 'refresh_token' from one Docker container to another by copying this file to the configuration Docker volume of each Docker container. +> +> If the account credentials are different .. you will need to re-authenticate each Docker container individually. ## Configuring the client for use in dual-boot (Windows / Linux) situations When dual booting Windows and Linux, depending on the Windows OneDrive account configuration, the 'Files On-Demand' option may be enabled when running OneDrive within your Windows environment. @@ -241,7 +243,7 @@ To fix the problem of windows turning all files (that should be kept offline) in To find this setting, open the onedrive pop-up window from the taskbar, click "Help & Settings" > "Settings". This opens a new window. Go to the tab "Settings" and look for the section "Files On-Demand". -After unchecking the option and clicking "OK", the Windows OneDrive client should restart itself and start actually downloading your files so they will truely be available on your disk when offline. These files will then be fully accessible under Linux and the Linux OneDrive client. +After unchecking the option and clicking "OK", the Windows OneDrive client should restart itself and start actually downloading your files so they will truly be available on your disk when offline. These files will then be fully accessible under Linux and the Linux OneDrive client. | OneDrive Personal | Onedrive Business
SharePoint | |---|---| @@ -257,12 +259,13 @@ The issue here is - how does the client react if the mount point gets removed - The client has zero knowledge of any event that causes a mountpoint to become unavailable, thus, the client (if you are running as a service) will assume that you deleted the files, thus, will go ahead and delete all your files on OneDrive. This is most certainly an undesirable action. -There are a few options here which you can configure in your 'config' file to assist you to prevent this sort of item from occuring: +There are a few options here which you can configure in your 'config' file to assist you to prevent this sort of item from occurring: 1. classify_as_big_delete 2. check_nomount 3. check_nosync -**Note:** Before making any change to your configuration, stop any sync process & stop any onedrive systemd service from running. +> [!NOTE] +> Before making any change to your configuration, stop any sync process & stop any onedrive systemd service from running. ### classify_as_big_delete By default, this uses a value of 1000 files|folders. An undesirable unmount if you have more than 1000 files, this default level will prevent the client from executing the online delete. Modify this value up or down as desired @@ -282,7 +285,7 @@ After making this sort of change - test with `--dry-run` so you can see the impa ## Upload data from the local ~/OneDrive folder to a specific location on OneDrive In some environments, you may not want your local ~/OneDrive folder to be uploaded directly to the root of your OneDrive account online. -Unfortunatly, the OneDrive API lacks any facility to perform a re-direction of data during upload. +Unfortunately, the OneDrive API lacks any facility to perform a re-direction of data during upload. The workaround for this is to structure your local filesystem and reconfigure your client to achieve the desired goal. diff --git a/docs/application-config-options.md b/docs/application-config-options.md new file mode 100644 index 000000000..fe46a878d --- /dev/null +++ b/docs/application-config-options.md @@ -0,0 +1,1201 @@ +# Application Configuration Options for the OneDrive Client for Linux +## Application Version +Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +## Table of Contents + +- [Configuration File Options](#configuration-file-options) + - [application_id](#application_id) + - [azure_ad_endpoint](#azure_ad_endpoint) + - [azure_tenant_id](#azure_tenant_id) + - [bypass_data_preservation](#bypass_data_preservation) + - [check_nomount](#check_nomount) + - [check_nosync](#check_nosync) + - [classify_as_big_delete](#classify_as_big_delete) + - [cleanup_local_files](#cleanup_local_files) + - [connect_timeout](#connect_timeout) + - [data_timeout](#data_timeout) + - [debug_https](#debug_https) + - [disable_download_validation](#disable_download_validation) + - [disable_notifications](#disable_notifications) + - [disable_upload_validation](#disable_upload_validation) + - [display_running_config](#display_running_config) + - [dns_timeout](#dns_timeout) + - [download_only](#download_only) + - [drive_id](#drive_id) + - [dry_run](#dry_run) + - [enable_logging](#enable_logging) + - [force_http_11](#force_http_11) + - [ip_protocol_version](#ip_protocol_version) + - [local_first](#local_first) + - [log_dir](#log_dir) + - [monitor_fullscan_frequency](#monitor_fullscan_frequency) + - [monitor_interval](#monitor_interval) + - [monitor_log_frequency](#monitor_log_frequency) + - [no_remote_delete](#no_remote_delete) + - [operation_timeout](#operation_timeout) + - [rate_limit](#rate_limit) + - [read_only_auth_scope](#read_only_auth_scope) + - [remove_source_files](#remove_source_files) + - [resync](#resync) + - [resync_auth](#resync_auth) + - [skip_dir](#skip_dir) + - [skip_dir_strict_match](#skip_dir_strict_match) + - [skip_dotfiles](#skip_dotfiles) + - [skip_file](#skip_file) + - [skip_size](#skip_size) + - [skip_symlinks](#skip_symlinks) + - [space_reservation](#space_reservation) + - [sync_business_shared_items](#sync_business_shared_items) + - [sync_dir](#sync_dir) + - [sync_dir_permissions](#sync_dir_permissions) + - [sync_file_permissions](#sync_file_permissions) + - [sync_root_files](#sync_root_files) + - [threads](#threads) + - [upload_only](#upload_only) + - [user_agent](#user_agent) + - [webhook_enabled](#webhook_enabled) + - [webhook_expiration_interval](#webhook_expiration_interval) + - [webhook_listening_host](#webhook_listening_host) + - [webhook_listening_port](#webhook_listening_port) + - [webhook_public_url](#webhook_public_url) + - [webhook_renewal_interval](#webhook_renewal_interval) +- [Command Line Interface (CLI) Only Options](#command-line-interface-cli-only-options) + - [CLI Option: --auth-files](#cli-option---auth-files) + - [CLI Option: --auth-response](#cli-option---auth-response) + - [CLI Option: --confdir](#cli-option---confdir) + - [CLI Option: --create-directory](#cli-option---create-directory) + - [CLI Option: --create-share-link](#cli-option---create-share-link) + - [CLI Option: --destination-directory](#cli-option---destination-directory) + - [CLI Option: --display-config](#cli-option---display-config) + - [CLI Option: --display-sync-status](#cli-option---display-sync-status) + - [CLI Option: --display-quota](#cli-option---display-quota) + - [CLI Option: --force](#cli-option---force) + - [CLI Option: --force-sync](#cli-option---force-sync) + - [CLI Option: --get-file-link](#cli-option---get-file-link) + - [CLI Option: --get-sharepoint-drive-id](#cli-option---get-sharepoint-drive-id) + - [CLI Option: --list-shared-items](#cli-option---list-shared-items) + - [CLI Option: --logout](#cli-option---logout) + - [CLI Option: --modified-by](#cli-option---modified-by) + - [CLI Option: --monitor | -m](#cli-option---monitor--m) + - [CLI Option: --print-access-token](#cli-option---print-access-token) + - [CLI Option: --reauth](#cli-option---reauth) + - [CLI Option: --remove-directory](#cli-option---remove-directory) + - [CLI Option: --single-directory](#cli-option---single-directory) + - [CLI Option: --source-directory](#cli-option---source-directory) + - [CLI Option: --sync | -s](#cli-option---sync--s) + - [CLI Option: --sync-shared-files](#cli-option---sync-shared-files) + - [CLI Option: --verbose | -v+](#cli-option---verbose--v) + - [CLI Option: --with-editing-perms](#cli-option---with-editing-perms) +- [Depreciated Configuration File and CLI Options](#depreciated-configuration-file-and-cli-options) + - [force_http_2](#force_http_2) + - [min_notify_changes](#min_notify_changes) + - [CLI Option: --synchronize](#cli-option---synchronize) + + +## Configuration File Options + +### application_id +_**Description:**_ This is the config option for application id that used used to identify itself to Microsoft OneDrive. In some circumstances, it may be desirable to use your own application id. To do this, you must register a new application with Microsoft Azure via https://portal.azure.com/, then use your new application id with this config option. + +_**Value Type:**_ String + +_**Default Value:**_ d50ca740-c83f-4d1b-b616-12c519384f0c + +_**Config Example:**_ `application_id = "d50ca740-c83f-4d1b-b616-12c519384f0c"` + +### azure_ad_endpoint +_**Description:**_ This is the config option to change the Microsoft Azure Authentication Endpoint that the client uses to conform with data and security requirements that requires data to reside within the geographic borders of that country. + +_**Value Type:**_ String + +_**Default Value:**_ *Empty* - not required for normal operation + +_**Valid Values:**_ USL4, USL5, DE, CN + +_**Config Example:**_ `azure_ad_endpoint = "DE"` + +### azure_tenant_id +_**Description:**_ This config option allows the locking of the client to a specific single tenant and will configure your client to use the specified tenant id in its Azure AD and Graph endpoint URIs, instead of "common". The tenant id may be the GUID Directory ID or the fully qualified tenant name. + +_**Value Type:**_ String + +_**Default Value:**_ *Empty* - not required for normal operation + +_**Config Example:**_ `azure_tenant_id = "example.onmicrosoft.us"` or `azure_tenant_id = "0c4be462-a1ab-499b-99e0-da08ce52a2cc"` + +> [!IMPORTANT] +> Must be configured if 'azure_ad_endpoint' is configured. + +### bypass_data_preservation +_**Description:**_ This config option allows the disabling of preserving local data by renaming the local file in the event of data conflict. If this is enabled, you will experience data loss on your local data as the local file will be over-written with data from OneDrive online. Use with care and caution. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `bypass_data_preservation = "false"` or `bypass_data_preservation = "true"` + +### check_nomount +_**Description:**_ This config option is useful to prevent application startup & ongoing use in 'Monitor Mode' if the configured 'sync_dir' is a separate disk that is being mounted by your system. This option will check for the presence of a `.nosync` file in your mount point, and if present, abort any sync process to preserve data. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `check_nomount = "false"` or `check_nomount = "true"` + +_**CLI Option:**_ `--check-for-nomount` + +> [!TIP] +> Create a `.nosync` file in your mount point *before* you mount your disk so that this `.nosync` file visible, in your mount point if your disk is unmounted at any point to preserve your data when you enable this option. + +### check_nosync +_**Description:**_ This config option is useful to prevent the sync of a *local* directory to Microsoft OneDrive. It will *not* check for this file online to prevent the download of directories to your local system. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `check_nosync = "false"` or `check_nosync = "true"` + +_**CLI Option Use:**_ `--check-for-nosync` + +> [!IMPORTANT] +> Create a `.nosync` file in any *local* directory that you wish to not sync to Microsoft OneDrive when you enable this option. + +### classify_as_big_delete +_**Description:**_ This config option defines the number of children in a path that is locally removed which will be classified as a 'big data delete' to safeguard large data removals - which are typically accidental local delete events. + +_**Value Type:**_ Integer + +_**Default Value:**_ 1000 + +_**Config Example:**_ `classify_as_big_delete = "2000"` + +_**CLI Option Use:**_ `--classify-as-big-delete 2000` + +> [!NOTE] +> If this option is triggered, you will need to add `--force` to force a sync to occur. + +### cleanup_local_files +_**Description:**_ This config option provides the capability to cleanup local files and folders if they are removed online. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `cleanup_local_files = "false"` or `cleanup_local_files = "true"` + +_**CLI Option Use:**_ `--cleanup-local-files` + +> [!IMPORTANT] +> This configuration option can only be used with 'download_only'. It cannot be used with any other application option. + +### connect_timeout +_**Description:**_ This configuration setting manages the TCP connection timeout duration in seconds for HTTPS connections to Microsoft OneDrive when using the curl library (CURLOPT_CONNECTTIMEOUT). + +_**Value Type:**_ Integer + +_**Default Value:**_ 10 + +_**Config Example:**_ `connect_timeout = "15"` + +### data_timeout +_**Description:**_ This setting controls the timeout duration, in seconds, for when data is not received on an active connection to Microsoft OneDrive over HTTPS when using the curl library, before that connection is timeout out. + +_**Value Type:**_ Integer + +_**Default Value:**_ 60 + +_**Config Example:**_ `data_timeout = "300"` + +### debug_https +_**Description:**_ This setting controls whether the curl library is configured to output additional data to assist with diagnosing HTTPS issues and problems. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `debug_https = "false"` or `debug_https = "true"` + +_**CLI Option Use:**_ `--debug-https` + +> [!WARNING] +> Whilst this option can be used at any time, it is advisable that you only use this option when advised as this will output your `Authorization: bearer` - which is your authentication token to Microsoft OneDrive. + +### disable_download_validation +_**Description:**_ This option determines whether the client will conduct integrity validation on files downloaded from Microsoft OneDrive. Sometimes, when downloading files, particularly from SharePoint, there is a discrepancy between the file size reported by the OneDrive API and the byte count received from the SharePoint HTTP Server for the same file. Enable this option to disable the integrity checks performed by this client. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `disable_download_validation = "false"` or `disable_download_validation = "true"` + +_**CLI Option Use:**_ `--disable-download-validation` + +> [!CAUTION] +> If you're downloading data from SharePoint or OneDrive Business Shared Folders, you might find it necessary to activate this option. It's important to note that any issues encountered aren't due to a problem with this client; instead, they should be regarded as issues with the Microsoft OneDrive technology stack. Enabling this option disables all download integrity checks. + +### disable_notifications +_**Description:**_ This setting controls whether GUI notifications are sent from the client to your display manager session. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `disable_notifications = "false"` or `disable_notifications = "true"` + +_**CLI Option Use:**_ `--disable-notifications` + +### disable_upload_validation +_**Description:**_ This option determines whether the client will conduct integrity validation on files uploaded to Microsoft OneDrive. Sometimes, when uploading files, particularly to SharePoint, SharePoint will modify your file post upload by adding new data to your file which breaks the integrity checking of the upload performed by this client. Enable this option to disable the integrity checks performed by this client. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `disable_upload_validation = "false"` or `disable_upload_validation = "true"` + +_**CLI Option Use:**_ `--disable-upload-validation` + +> [!CAUTION] +> If you're uploading data to SharePoint or OneDrive Business Shared Folders, you might find it necessary to activate this option. It's important to note that any issues encountered aren't due to a problem with this client; instead, they should be regarded as issues with the Microsoft OneDrive technology stack. Enabling this option disables all upload integrity checks. + +### display_running_config +_**Description:**_ This option will include the running config of the application at application startup. This may be desirable to enable when running in containerised environments so that any application logging that is occurring, will have the application configuration being consumed at startup, written out to any applicable log file. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `display_running_config = "false"` or `display_running_config = "true"` + +_**CLI Option Use:**_ `--display-running-config` + +### dns_timeout +_**Description:**_ This setting controls the libcurl DNS cache value. By default, libcurl caches this info for 60 seconds. This libcurl DNS cache timeout is entirely speculative that a name resolves to the same address for a small amount of time into the future as libcurl does not use DNS TTL properties. We recommend users not to tamper with this option unless strictly necessary. + +_**Value Type:**_ Integer + +_**Default Value:**_ 60 + +_**Config Example:**_ `dns_timeout = "90"` + +### download_only +_**Description:**_ This setting forces the client to only download data from Microsoft OneDrive and replicate that data locally. No changes made locally will be uploaded to Microsoft OneDrive when using this option. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `download_only = "false"` or `download_only = "true"` + +_**CLI Option Use:**_ `--download-only` + +### drive_id +_**Description:**_ This setting controls the specific drive identifier the client will use when syncing with Microsoft OneDrive. + +_**Value Type:**_ String + +_**Default Value:**_ *None* + +_**Config Example:**_ `drive_id = "b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB"` + +> [!NOTE] +> This option is typically only used when configuring the client to sync a specific SharePoint Library. If this configuration option is specified in your config file, a value must be specified otherwise the application will exit citing a fatal error has occurred. + +### dry_run +_**Description:**_ This setting controls the application capability to test your application configuration without actually performing any actual activity (download, upload, move, delete, folder creation). + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `dry_run = "false"` or `dry_run = "true"` + +_**CLI Option Use:**_ `--dry-run` + +### enable_logging +_**Description:**_ This setting controls the application logging all actions to a separate file. By default, all log files will be written to `/var/log/onedrive`, however this can changed by using the 'log_dir' config option + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `enable_logging = "false"` or `enable_logging = "true"` + +_**CLI Option Use:**_ `--enable-logging` + +> [!IMPORTANT] +> Additional configuration is potentially required to configure the default log directory. Refer to the [Enabling the Client Activity Log](./usage.md#enabling-the-client-activity-log) section in usage.md for details + +### force_http_11 +_**Description:**_ This setting controls the application HTTP protocol version. By default, the application will use libcurl defaults for which HTTP prodocol version will be used to interact with Microsoft OneDrive. Use this setting to downgrade libcurl to only use HTTP/1.1. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `force_http_11 = "false"` or `force_http_11 = "true"` + +_**CLI Option Use:**_ `--force-http-11` + +### ip_protocol_version +_**Description:**_ This setting controls the application IP protocol that should be used when communicating with Microsoft OneDrive. The default is to use IPv4 and IPv6 networks for communicating to Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ 0 + +_**Valid Values:**_ 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only + +_**Config Example:**_ `ip_protocol_version = "0"` or `ip_protocol_version = "1"` or `ip_protocol_version = "2"` + +> [!IMPORTANT] +> In some environments where IPv4 and IPv6 are configured at the same time, this causes resolution and routing issues to Microsoft OneDrive. If this is the case, it is advisable to change 'ip_protocol_version' to match your environment. + +### local_first +_**Description:**_ This setting controls what the application considers the 'source of truth' for your data. By default, what is stored online will be considered as the 'source of truth' when syncing to your local machine. When using this option, your local data will be considered the 'source of truth'. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `local_first = "false"` or `local_first = "true"` + +_**CLI Option Use:**_ `--local-first` + +### log_dir +_**Description:**_ This setting controls the custom application log path when 'enable_logging' has been enabled. By default, all log files will be written to `/var/log/onedrive`. + +_**Value Type:**_ String + +_**Default Value:**_ *None* + +_**Config Example:**_ `log_dir = "~/logs/"` + +_**CLI Option Use:**_ `--log-dir "~/logs/"` + +### monitor_fullscan_frequency +_**Description:**_ This configuration option controls the number of 'monitor_interval' iterations between when a full scan of your data is performed to ensure data integrity and consistency. + +_**Value Type:**_ Integer + +_**Default Value:**_ 12 + +_**Config Example:**_ `monitor_fullscan_frequency = "24"` + +_**CLI Option Use:**_ `--monitor-fullscan-frequency '24'` + +> [!NOTE] +> By default without configuration, 'monitor_fullscan_frequency' is set to 12. In this default state, this means that a full scan is performed every 'monitor_interval' x 'monitor_fullscan_frequency' = 3600 seconds. This setting is only applicable when running in `--monitor` mode. Setting this configuration option to '0' will *disable* the full scan of your data online. + +### monitor_interval +_**Description:**_ This configuration setting determines how often the synchronisation loops run in --monitor mode, measured in seconds. When this time period elapses, the client will check for online changes in Microsoft OneDrive, conduct integrity checks on local data and scan the local 'sync_dir' to identify any new content that hasn't been uploaded yet. + +_**Value Type:**_ Integer + +_**Default Value:**_ 300 + +_**Config Example:**_ `monitor_interval = "600"` + +_**CLI Option Use:**_ `--monitor-interval '600'` + +> [!NOTE] +> A minimum value of 300 is enforced for this configuration setting. + +### monitor_log_frequency +_**Description:**_ This configuration option controls the suppression of frequently printed log items to the system console when using `--monitor` mode. The aim of this configuration item is to reduce the log output when near zero sync activity is occurring. + +_**Value Type:**_ Integer + +_**Default Value:**_ 12 + +_**Config Example:**_ `monitor_log_frequency = "24"` + +_**CLI Option Use:**_ `--monitor-log-frequency '24'` + +_**Usage Example:**_ + +By default, at application start-up when using `--monitor` mode, the following will be logged to indicate that the application has correctly started and has performed all the initial processing steps: +```text +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +Configuring Global Azure AD Endpoints +Sync Engine Initialised with new Onedrive API instance +All application operations will be performed in: /home/user/OneDrive +OneDrive synchronisation interval (seconds): 300 +Initialising filesystem inotify monitoring ... +Performing initial synchronisation to ensure consistent local state ... +Starting a sync with Microsoft OneDrive +Fetching items from the OneDrive API for Drive ID: b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB .. +Processing changes and items received from Microsoft OneDrive ... +Performing a database consistency and integrity check on locally stored data ... +Scanning the local file system '~/OneDrive' for new data to upload ... +Performing a final true-up scan of online data from Microsoft OneDrive +Fetching items from the OneDrive API for Drive ID: b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB .. +Processing changes and items received from Microsoft OneDrive ... +Sync with Microsoft OneDrive is complete +``` +Then, based on 'monitor_log_frequency', the following output will be logged until the suppression loop value is reached: +```text +Starting a sync with Microsoft OneDrive +Syncing changes from Microsoft OneDrive ... +Sync with Microsoft OneDrive is complete +``` +> [!NOTE] +> The additional log output `Performing a database consistency and integrity check on locally stored data ...` will only be displayed when this activity is occurring which is triggered by 'monitor_fullscan_frequency'. + +> [!NOTE] +> If verbose application output is being used (`--verbose`), then this configuration setting has zero effect, as application verbose output takes priority over application output suppression. + +### no_remote_delete +_**Description:**_ This configuration option controls whether local file and folder deletes are actioned on Microsoft OneDrive. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `local_first = "false"` or `local_first = "true"` + +_**CLI Option Use:**_ `--no-remote-delete` + +> [!IMPORTANT] +> This configuration option can *only* be used in conjunction with `--upload-only` + +### operation_timeout +_**Description:**_ This configuration option controls the maximum amount of time (seconds) a file operation is allowed to take. This includes DNS resolution, connecting, data transfer, etc. We recommend users not to tamper with this option unless strictly necessary. This option controls the CURLOPT_TIMEOUT setting of libcurl. + +_**Value Type:**_ Integer + +_**Default Value:**_ 3600 + +_**Config Example:**_ `operation_timeout = "3600"` + +### rate_limit +_**Description:**_ This configuration option controls the bandwidth used by the application, per thread, when interacting with Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ 0 (unlimited, use available bandwidth per thread) + +_**Valid Values:**_ Valid tested values for this configuration option are as follows: + +* 131072 = 128 KB/s - absolute minimum for basic application operations to prevent timeouts +* 262144 = 256 KB/s +* 524288 = 512 KB/s +* 1048576 = 1 MB/s +* 10485760 = 10 MB/s +* 104857600 = 100 MB/s + +_**Config Example:**_ `rate_limit = "131072"` + +### read_only_auth_scope +_**Description:**_ This configuration option controls whether the OneDrive Client for Linux operates in a totally in read-only operation. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `read_only_auth_scope = "false"` or `read_only_auth_scope = "true"` + +> [!IMPORTANT] +> When using 'read_only_auth_scope' you also will need to remove your existing application access consent otherwise old authentication consent will be valid and will be used. This will mean the application will technically have the consent to upload data until you revoke this consent. + +### remove_source_files +_**Description:**_ This configuration option controls whether the OneDrive Client for Linux removes the local file post successful transfer to Microsoft OneDrive. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `remove_source_files = "false"` or `remove_source_files = "true"` + +_**CLI Option Use:**_ `--remove-source-files` + +> [!IMPORTANT] +> This configuration option can *only* be used in conjunction with `--upload-only` + +### resync +_**Description:**_ This configuration option controls whether the known local sync state with Microsoft OneDrive is removed at application startup. When this option is used, a full scan of your data online is performed to ensure that the local sync state is correctly built back up. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `resync = "false"` or `resync = "true"` + +_**CLI Option Use:**_ `--resync` + +> [!CAUTION] +> It's highly recommended to use this option only if the application prompts you to do so. Don't blindly use this option as a default option. If you alter any of the subsequent configuration items, you will be required to execute a `--resync` to make sure your client is syncing your data with the updated configuration: +> * drive_id +> * sync_dir +> * skip_file +> * skip_dir +> * skip_dotfiles +> * skip_symlinks +> * sync_business_shared_items +> * Creating, Modifying or Deleting the 'sync_list' file + +### resync_auth +_**Description:**_ This configuration option controls the approval of performing a 'resync' which can be beneficial in automated environments. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `resync_auth = "false"` or `resync_auth = "true"` + +_**CLI Option Use:**_ `--resync-auth` + +> [!TIP] +> In certain automated environments (assuming you know what you're doing due to using automation), to avoid the 'proceed with acknowledgement' resync requirement, this option allows you to automatically acknowledge the resync prompt. + +### skip_dir +_**Description:**_ This configuration option controls whether the application skips certain directories from being synced. Directories can be specified in 2 ways: + +* As a single entry. This will search the respective path for this entry and skip all instances where this directory is present, where ever it may exist. +* As a full path entry. This will skip the explicit path as set. + +> [!IMPORTANT] +> Entries for 'skip_dir' are *relative* to your 'sync_dir' path. + +_**Value Type:**_ String + +_**Default Value:**_ *Empty* - not required for normal operation + +_**Config Example:**_ + +Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. + +```text +skip_dir = "Desktop|Documents/IISExpress|Documents/SQL Server Management Studio|Documents/Visual Studio*|Documents/WindowsPowerShell|.Rproj-user" +``` + +The 'skip_dir' option can also be specified multiple times within your config file, for example: +```text +skip_dir = "SkipThisDirectoryAnywhere" +skip_dir = ".SkipThisOtherDirectoryAnywhere" +skip_dir = "/Explicit/Path/To/A/Directory" +skip_dir = "/Another/Explicit/Path/To/Different/Directory" +``` + +This will be interpreted the same as: +```text +skip_dir = "SkipThisDirectoryAnywhere|.SkipThisOtherDirectoryAnywhere|/Explicit/Path/To/A/Directory|/Another/Explicit/Path/To/Different/Directory" +``` + +_**CLI Option Use:**_ `--skip-dir 'SkipThisDirectoryAnywhere|.SkipThisOtherDirectoryAnywhere|/Explicit/Path/To/A/Directory|/Another/Explicit/Path/To/Different/Directory'` + +> [!NOTE] +> This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. If using the config file and CLI option is used, the CLI option will *replace* the config file entries. After changing or modifying this option, you will be required to perform a resync. + +### skip_dir_strict_match +_**Description:**_ This configuration option controls whether the application performs strict directory matching when checking 'skip_dir' items. When enabled, the 'skip_dir' item must be a full path match to the path to be skipped. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `skip_dir_strict_match = "false"` or `skip_dir_strict_match = "true"` + +_**CLI Option Use:**_ `--skip-dir-strict-match` + +### skip_dotfiles +_**Description:**_ This configuration option controls whether the application will skip all .files and .folders when performing sync operations. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `skip_dotfiles = "false"` or `skip_dotfiles = "true"` + +_**CLI Option Use:**_ `--skip-dot-files` + +> [!NOTE] +> This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. + +### skip_file +_**Description:**_ This configuration option controls whether the application skips certain files from being synced. + +_**Value Type:**_ String + +_**Default Value:**_ `~*|.~*|*.tmp|*.swp|*.partial` + +_**Config Example:**_ + +Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. + +By default, the following files will be skipped: +* Files that start with ~ +* Files that start with .~ (like .~lock.* files generated by LibreOffice) +* Files that end in .tmp, .swp and .partial + +Files can be skipped in the following fashion: +* Specify a wildcard, eg: '*.txt' (skip all txt files) +* Explicitly specify the filename and it's full path relative to your sync_dir, eg: '/path/to/file/filename.ext' +* Explicitly specify the filename only and skip every instance of this filename, eg: 'filename.ext' + +```text +skip_file = "~*|/Documents/OneNote*|/Documents/config.xlaunch|myfile.ext|/Documents/keepass.kdbx" +``` + +> [!IMPORTANT] +> Entries for 'skip_file' are *relative* to your 'sync_dir' path. + +The 'skip_file' option can be specified multiple times within your config file, for example: +```text +skip_file = "~*|.~*|*.tmp|*.swp" +skip_file = "*.blah" +skip_file = "never_sync.file" +skip_file = "/Documents/keepass.kdbx" +``` +This will be interpreted the same as: +```text +skip_file = "~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx" +``` + +_**CLI Option Use:**_ `--skip-file '~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx'` + +> [!NOTE] +> This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. If using the config file and CLI option is used, the CLI option will *replace* the config file entries. After changing or modifying this option, you will be required to perform a resync. + +### skip_size +_**Description:**_ This configuration option controls whether the application skips syncing certain files larger than the specified size. The value specified is in MB. + +_**Value Type:**_ Integer + +_**Default Value:**_ 0 (all files, regardless of size, are synced) + +_**Config Example:**_ `skip_size = "50"` + +_**CLI Option Use:**_ `--skip-size '50'` + +### skip_symlinks +_**Description:**_ This configuration option controls whether the application will skip all symbolic links when performing sync operations. Microsoft OneDrive has no concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `skip_symlinks = "false"` or `skip_symlinks = "true"` + +_**CLI Option Use:**_ `--skip-symlinks` + +> [!NOTE] +> This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. + +### space_reservation +_**Description:**_ This configuration option controls how much local disk space should be reserved, to prevent the application from filling up your entire disk due to misconfiguration + +_**Value Type:**_ Integer + +_**Default Value:**_ 50 MB (expressesed as Bytes when using `--display-config`) + +_**Config Example:**_ `space_reservation = "100"` + +_**CLI Option Use:**_ `--space-reservation '100'` + +### sync_business_shared_items +_**Description:**_ This configuration option controls whether OneDrive Business | Office 365 Shared Folders, when added as a 'shortcut' to your 'My Files', will be synced to your local system. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `sync_business_shared_items = "false"` or `sync_business_shared_items = "true"` + +_**CLI Option Use:**_ *none* - this is a config file option only + +> [!NOTE] +> This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. + +> [!CAUTION] +> This option is *not* backwards compatible with any v2.4.x application version. If you are enabling this option on *any* system running v2.5.x application version, all your application versions being used *everywhere* must be v2.5.x codebase. + +### sync_dir +_**Description:**_ This configuration option determines the location on your local filesystem where your data from Microsoft OneDrive will be saved. + +_**Value Type:**_ String + +_**Default Value:**_ `~/OneDrive` + +_**Config Example:**_ `sync_dir = "~/MyDirToSync"` + +_**CLI Option Use:**_ `--syncdir '~/MyDirToSync'` + +> [!CAUTION] +> After changing this option, you will be required to perform a resync. Do not change or modify this option without fully understanding the implications of doing so. + +### sync_dir_permissions +_**Description:**_ This configuration option defines the directory permissions applied when a new directory is created locally during the process of syncing your data from Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ `700` - This provides the following permissions: `drwx------` + +_**Config Example:**_ `sync_dir_permissions = "700"` + +> [!IMPORTANT] +> Use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. You will need to manually update all existing directory permissions if you modify this value. + +### sync_file_permissions +_**Description:**_ This configuration option defines the file permissions applied when a new file is created locally during the process of syncing your data from Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ `600` - This provides the following permissions: `-rw-------` + +_**Config Example:**_ `sync_file_permissions = "600"` + +> [!IMPORTANT] +> Use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. You will need to manually update all existing directory permissions if you modify this value. + +### sync_root_files +_**Description:**_ This configuration option manages the synchronisation of files located in the 'sync_dir' root when using a 'sync_list.' It enables you to sync all these files by default, eliminating the need to repeatedly modify your 'sync_list' and initiate resynchronisation. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `sync_root_files = "false"` or `sync_root_files = "true"` + +_**CLI Option Use:**_ `--sync-root-files` + +> [!IMPORTANT] +> Although it's not mandatory, it's recommended that after enabling this option, you perform a `--resync`. This ensures that any previously excluded content is now included in your sync process. + +### threads +_**Description:**_ This configuration option controls the number of 'threads' for upload and download operations when files need to be transferred between your local system and Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ `8` + +_**Maximum Value:**_ `16` + +_**Config Example:**_ `threads = "16"` + +> [!WARNING] +> Increasing the threads beyond the default will lead to increased system utilisation and local TCP port use, which may lead to unpredictable behaviour and/or may lead application stability issues. + +### upload_only +_**Description:**_ This setting forces the client to only upload data to Microsoft OneDrive and replicate the locate state online. By default, this will also remove content online, that has been removed locally. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `upload_only = "false"` or `upload_only = "true"` + +_**CLI Option Use:**_ `--upload-only` + +> [!IMPORTANT] +> To ensure that data deleted locally remains accessible online, you can use the 'no_remote_delete' option. If you want to delete the data from your local storage after a successful upload to Microsoft OneDrive, you can use the 'remove_source_files' option. + +### user_agent +_**Description:**_ This configuration option controls the 'User-Agent' request header that is presented to Microsoft Graph API when accessing the Microsoft OneDrive service. This string lets servers and network peers identify the application, operating system, vendor, and/or version of the application making the request. We recommend users not to tamper with this option unless strictly necessary. + +_**Value Type:**_ String + +_**Default Value:**_ `ISV|abraunegg|OneDrive Client for Linux/vX.Y.Z-A-bcdefghi` + +_**Config Example:**_ `user_agent = "ISV|CompanyName|AppName/Version"` + +> [!IMPORTANT] +> The default 'user_agent' value conforms to specific Microsoft requirements to identify as an ISV that complies with OneDrive traffic decoration requirements. Changing this value potentially will impact how Microsoft see's your client, thus your traffic may get throttled. For further information please read: https://learn.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online + +### webhook_enabled +_**Description:**_ This configuration option controls the application feature 'webhooks' to allow you to subscribe to remote updates as published by Microsoft OneDrive. This option only operates when the client is using 'Monitor Mode'. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ The following is the minimum working example that needs to be added to your 'config' file to enable 'webhooks' successfully: +```text +webhook_enabled = "true" +webhook_public_url = "http://:8888/" +``` + +> [!NOTE] +> Setting `webhook_enabled = "true"` enables the webhook feature in 'monitor' mode. The onedrive process will listen for incoming updates at a configurable endpoint, which defaults to `0.0.0.0:8888`. The `webhook_public_url` must be set to an public-facing url for Microsoft to send updates to your webhook. +> +> If your host is directly exposed to the Internet, the `webhook_public_url` can be set to `http://:8888/` to match the default endpoint. In this case, it is also advisable to configure a reverse proxy like `nginx` to proxy the traffic to the client. For example, below is a nginx config snippet to proxy traffic into the webhook: +> ```text +> server { +> listen 80; +> location /webhooks/onedrive { +> proxy_http_version 1.1; +> proxy_pass http://127.0.0.1:8888; +> } +> } +> ``` +> +> With nginx running, you can configure 'webhook_public_url' to `https:///webhooks/onedrive` + +> [!IMPORTANT] +> A valid HTTPS certificate is required for your public-facing URL if using nginx. Self signed certificates will be rejected. Consider using https://letsencrypt.org/ to utilise free SSL certificates for your public-facing URL. + +> [!TIP] +> If you receive this application error: `Subscription validation request failed. Response must exactly match validationToken query parameter.` the most likely cause for this error will be your nginx configuration. +> +> To resolve this configuration issue, potentially investigate the following configuration for nginx: +> ```text +> server { +> listen 80; +> location /webhooks/onedrive { +> proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +> proxy_set_header X-Original-Request-URI $request_uri; +> proxy_read_timeout 300s; +> proxy_connect_timeout 75s; +> proxy_buffering off; +> proxy_http_version 1.1; +> proxy_pass http://127.0.0.1:8888; +> } +> } +> ``` +> For any further nginx configuration assistance, please refer to: https://docs.nginx.com/ + +### webhook_expiration_interval +_**Description:**_ This configuration option controls the frequency at which an existing Microsoft OneDrive webhook subscription expires. The value is expressed in the number of seconds before expiry. + +_**Value Type:**_ Integer + +_**Default Value:**_ 600 + +_**Config Example:**_ `webhook_expiration_interval = "1200"` + +### webhook_listening_host +_**Description:**_ This configuration option controls the host address that this client binds to, when the webhook feature is enabled. + +_**Value Type:**_ String + +_**Default Value:**_ 0.0.0.0 + +_**Config Example:**_ `webhook_listening_host = ""` - this will use the default value. `webhook_listening_host = "192.168.3.4"` - this will bind the client to use the IP address 192.168.3.4. + +> [!NOTE] +> Use in conjunction with 'webhook_listening_port' to change the webhook listening endpoint. + +### webhook_listening_port +_**Description:**_ This configuration option controls the TCP port that this client listens on, when the webhook feature is enabled. + +_**Value Type:**_ Integer + +_**Default Value:**_ 8888 + +_**Config Example:**_ `webhook_listening_port = "9999"` + +> [!NOTE] +> Use in conjunction with 'webhook_listening_host' to change the webhook listening endpoint. + +### webhook_public_url +_**Description:**_ This configuration option controls the URL that Microsoft will send subscription notifications to. This must be a valid Internet accessible URL. + +_**Value Type:**_ String + +_**Default Value:**_ *empty* + +_**Config Example:**_ +* If your host is directly connected to the Internet: `webhook_public_url = "http://:8888/"` +* If you are using nginx to reverse proxy traffic from the Internet: `webhook_public_url = "https:///webhooks/onedrive"` + +### webhook_renewal_interval +_**Description:**_ This configuration option controls the frequency at which an existing Microsoft OneDrive webhook subscription is renewed. The value is expressed in the number of seconds before renewal. + +_**Value Type:**_ Integer + +_**Default Value:**_ 300 + +_**Config Example:**_ `webhook_renewal_interval = "600"` + +### webhook_retry_interval +_**Description:**_ This configuration option controls the frequency at which an existing Microsoft OneDrive webhook subscription is retried when creating or renewing a subscription failed. The value is expressed in the number of seconds before retry. + +_**Value Type:**_ Integer + +_**Default Value:**_ 60 + +_**Config Example:**_ `webhook_retry_interval = "120"` + +## Command Line Interface (CLI) Only Options + +### CLI Option: --auth-files +_**Description:**_ This CLI option allows the user to perform application authentication not via an interactive dialog but via specific files that the application uses to read the authentication data from. + +_**Usage Example:**_ `onedrive --auth-files authUrl:responseUrl` + +> [!IMPORTANT] +> The authorisation URL is written to the specified 'authUrl' file, then onedrive waits for the file 'responseUrl' to be present, and reads the authentication response from that file. Example: +> +> ```text +> onedrive --auth-files '~/onedrive-auth-url:~/onedrive-response-url' +> Reading configuration file: /home/alex/.config/onedrive/config +> Configuration file successfully loaded +> Configuring Global Azure AD Endpoints +> Client requires authentication before proceeding. Waiting for --auth-files elements to be available. +> ``` +> At this point, the client has written the file `~/onedrive-auth-url` which contains the authentication URL that needs to be visited to perform the authentication process. The client will now wait and watch for the presence of the file `~/onedrive-response-url`. +> +> Visit the authentication URL, and then create a new file called `~/onedrive-response-url` with the response URI. Once this has been done, the application will acknowledge the presence of this file, read the contents, and authenticate the application. +> ```text +> Sync Engine Initialised with new Onedrive API instance +> +> --sync or --monitor switches missing from your command line input. Please add one (not both) of these switches to your command line or use 'onedrive --help' for further assistance. +> +> No OneDrive sync will be performed without one of these two arguments being present. +> ``` + +### CLI Option: --auth-response +_**Description:**_ This CLI option allows the user to perform application authentication not via an interactive dialog but via providing the authentication response URI directly. + +_**Usage Example:**_ `onedrive --auth-response https://login.microsoftonline.com/common/oauth2/nativeclient?code=` + +> [!TIP] +> Typically, unless the application client identifier has been modified, authentication scopes are being modified or a specific Azure Tenant is being specified, the authentication URL will most likely be as follows: +> ```text +> https://login.microsoftonline.com/common/oauth2/v2.0/authorise?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient +> ``` +> With this URL being known, it is possible ahead of time to request an authentication token by visiting this URL, and performing the authentication access request. + +### CLI Option: --confdir +_**Description:**_ This CLI option allows the user to specify where all the application configuration and relevant components are stored. + +_**Usage Example:**_ `onedrive --confdir '~/.config/onedrive-business/'` + +> [!IMPORTANT] +> If using this option, it must be specified each and every time the application is used. If this is omitted, the application default configuration directory will be used. + +### CLI Option: --create-directory +_**Description:**_ This CLI option allows the user to create the specified directory path on Microsoft OneDrive without performing a sync. + +_**Usage Example:**_ `onedrive --create-directory 'path/of/new/folder/structure/to/create/'` + +> [!IMPORTANT] +> The specified path to create is relative to your configured 'sync_dir'. + +### CLI Option: --create-share-link +_**Description:**_ This CLI option enables the creation of a shareable file link that can be provided to users to access the file that is stored on Microsoft OneDrive. By default, the permissions for the file will be 'read-only'. + +_**Usage Example:**_ `onedrive --create-share-link 'relative/path/to/your/file.txt'` + +> [!IMPORTANT] +> If writable access to the file is required, you must add `--with-editing-perms` to your command. See below for details. + +### CLI Option: --destination-directory +_**Description:**_ This CLI option specifies the 'destination' portion of moving a file or folder online, without performing a sync operation. + +_**Usage Example:**_ `onedrive --source-directory 'path/as/source/' --destination-directory 'path/as/destination'` + +> [!IMPORTANT] +> All specified paths are relative to your configured 'sync_dir'. + +### CLI Option: --display-config +_**Description:**_ This CLI option will display the effective application configuration + +_**Usage Example:**_ `onedrive --display-config` + +### CLI Option: --display-sync-status +_**Description:**_ This CLI option will display the sync status of the configured 'sync_dir' + +_**Usage Example:**_ `onedrive --display-sync-status` + +> [!TIP] +> This option can also use the `--single-directory` option to determine the sync status of a specific directory within the configured 'sync_dir' + +### CLI Option: ---display-quota +_**Description:**_ This CLI option will display the quota status of the account drive id or the configured 'drive_id' value + +_**Usage Example:**_ `onedrive --display-quota` + +### CLI Option: --force +_**Description:**_ This CLI option enables the force the deletion of data when a 'big delete' is detected. + +_**Usage Example:**_ `onedrive --sync --verbose --force` + +> [!IMPORTANT] +> This option should only be used exclusively in cases where you've initiated a 'big delete' and genuinely intend to remove all the data that is set to be deleted online. + +### CLI Option: --force-sync +_**Description:**_ This CLI option enables the syncing of a specific directory, using the Client Side Filtering application defaults, overriding any user application configuration. + +_**Usage Example:**_ `onedrive --sync --verbose --force-sync --single-directory 'Data' + +> [!NOTE] +> When this option is used, you will be presented with the following warning and risk acceptance: +> ```text +> WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --synch --single-directory --force-sync being used +> +> The use of --force-sync will reconfigure the application to use defaults. This may have untold and unknown future impacts. +> By proceeding in using this option you accept any impacts including any data loss that may occur as a result of using --force-sync. +> +> Are you sure you wish to proceed with --force-sync [Y/N] +> ``` +> To proceed with this sync task, you must risk accept the actions you are taking. If you have any concerns, first use `--dry-run` and evaluate the outcome before proceeding with the actual action. + +### CLI Option: --get-file-link +_**Description:**_ This CLI option queries the OneDrive API and return's the WebURL for the given local file. + +_**Usage Example:**_ `onedrive --get-file-link 'relative/path/to/your/file.txt'` + +> [!IMPORTANT] +> The path that you should use *must* be relative to your 'sync_dir' + +### CLI Option: --get-sharepoint-drive-id +_**Description:**_ This CLI option queries the OneDrive API and return's the Office 365 Drive ID for a given Office 365 SharePoint Shared Library that can then be used with 'drive_id' to sync a specific SharePoint Library. + +_**Usage Example:**_ `onedrive --get-sharepoint-drive-id '*'` or `onedrive --get-sharepoint-drive-id 'PointPublishing Hub Site'` + +### CLI Option: --list-shared-items +_**Description:**_ This CLI option lists all OneDrive Business Shared items with your account. The resulting list shows shared files and folders that you can configure this client to sync. + +_**Usage Example:**_ `onedrive --list-shared-items` + +_**Example Output:**_ +``` +... +Listing available OneDrive Business Shared Items: + +----------------------------------------------------------------------------------- +Shared File: large_document_shared.docx +Shared By: test user (testuser@domain.tld) +----------------------------------------------------------------------------------- +Shared File: no_download_access.docx +Shared By: test user (testuser@domain.tld) +----------------------------------------------------------------------------------- +Shared File: online_access_only.txt +Shared By: test user (testuser@domain.tld) +----------------------------------------------------------------------------------- +Shared File: read_only.txt +Shared By: test user (testuser@domain.tld) +----------------------------------------------------------------------------------- +Shared File: qewrqwerwqer.txt +Shared By: test user (testuser@domain.tld) +----------------------------------------------------------------------------------- +Shared File: dummy_file_to_share.docx +Shared By: testuser2 testuser2 (testuser2@domain.tld) +----------------------------------------------------------------------------------- +Shared Folder: Sub Folder 2 +Shared By: test user (testuser@domain.tld) +----------------------------------------------------------------------------------- +Shared File: file to share.docx +Shared By: test user (testuser@domain.tld) +----------------------------------------------------------------------------------- +Shared Folder: Top Folder +Shared By: test user (testuser@domain.tld) +----------------------------------------------------------------------------------- +... +``` + +### CLI Option: --logout +_**Description:**_ This CLI option removes this clients authentictaion status with Microsoft OneDrive. Any further application use will require the application to be re-authenticated with Microsoft OneDrive. + +_**Usage Example:**_ `onedrive --logout` + +### CLI Option: --modified-by +_**Description:**_ This CLI option queries the OneDrive API and return's the last modified details for the given local file. + +_**Usage Example:**_ `onedrive --modified-by 'relative/path/to/your/file.txt'` + +> [!IMPORTANT] +> The path that you should use *must* be relative to your 'sync_dir' + +### CLI Option: --monitor | -m +_**Description:**_ This CLI option controls the 'Monitor Mode' operational aspect of the client. When this option is used, the client will perform on-going syncs of data between Microsoft OneDrive and your local system. Local changes will be uploaded in near-realtime, whilst online changes will be downloaded on the next sync process. The frequency of these checks is governed by the 'monitor_interval' value. + +_**Usage Example:**_ `onedrive --monitor` or `onedrive -m` + +### CLI Option: --print-access-token +_**Description:**_ Print the current access token being used to access Microsoft OneDrive. + +_**Usage Example:**_ `onedrive --verbose --verbose --debug-https --print-access-token` + +> [!CAUTION] +> Do not use this option if you do not know why you are wanting to use it. Be highly cautious of exposing this object. Change your password if you feel that you have inadvertently exposed this token. + +### CLI Option: --reauth +_**Description:**_ This CLI option controls the ability to re-authenticate your client with Microsoft OneDrive. + +_**Usage Example:**_ `onedrive --reauth` + +### CLI Option: --remove-directory +_**Description:**_ This CLI option allows the user to remove the specified directory path on Microsoft OneDrive without performing a sync. + +_**Usage Example:**_ `onedrive --remove-directory 'path/of/new/folder/structure/to/remove/'` + +> [!IMPORTANT] +> The specified path to remove is relative to your configured 'sync_dir'. + +### CLI Option: --single-directory +_**Description:**_ This CLI option controls the applications ability to sync a specific single directory. + +_**Usage Example:**_ `onedrive --sync --single-directory 'Data'` + +> [!IMPORTANT] +> The path specified is relative to your configured 'sync_dir' path. If the physical local path 'Folder' to sync is `~/OneDrive/Data/Folder` then the command would be `--single-directory 'Data/Folder'`. + +### CLI Option: --source-directory +_**Description:**_ This CLI option specifies the 'source' portion of moving a file or folder online, without performing a sync operation. + +_**Usage Example:**_ `onedrive --source-directory 'path/as/source/' --destination-directory 'path/as/destination'` + +> [!IMPORTANT] +> All specified paths are relative to your configured 'sync_dir'. + +### CLI Option: --sync | -s +_**Description:**_ This CLI option controls the 'Standalone Mode' operational aspect of the client. When this option is used, the client will perform a one-time sync of data between Microsoft OneDrive and your local system. + +_**Usage Example:**_ `onedrive --sync` or `onedrive -s` + +### CLI Option: --sync-shared-files +_**Description:**_ Sync OneDrive Business Shared Files to the local filesystem. + +_**Usage Example:**_ `onedrive --sync --sync-shared-files` + +> [!IMPORTANT] +> To use this option you must first enable 'sync_business_shared_items' within your application configuration. Please read 'business-shared-items.md' for more information regarding this option. + +### CLI Option: --verbose | -v+ +_**Description:**_ This CLI option controls the verbosity of the application output. Use the option once, to have normal verbose output, use twice to have debug level application output. + +_**Usage Example:**_ `onedrive --sync --verbose` or `onedrive --monitor --verbose` + +### CLI Option: --with-editing-perms +_**Description:**_ This CLI option enables the creation of a writable shareable file link that can be provided to users to access the file that is stored on Microsoft OneDrive. This option can only be used in conjunction with `--create-share-link` + +_**Usage Example:**_ `onedrive --create-share-link 'relative/path/to/your/file.txt' --with-editing-perms` + +> [!IMPORTANT] +> Placement of `--with-editing-perms` is critical. It *must* be placed after the file path as per the example above. + +## Depreciated Configuration File and CLI Options +The following configuration options are no longer supported: + +### force_http_2 +_**Description:**_ Force the use of HTTP/2 for all operations where applicable + +_**Depreciated Config Example:**_ `force_http_2 = "true"` + +_**Depreciated CLI Option:**_ `--force-http-2` + +_**Reason for depreciation:**_ HTTP/2 will be used by default where possible, when the OneDrive API platform does not downgrade the connection to HTTP/1.1, thus this configuration option is no longer required. + +### min_notify_changes +_**Description:**_ Minimum number of pending incoming changes necessary to trigger a GUI desktop notification. + +_**Depreciated Config Example:**_ `min_notify_changes = "50"` + +_**Depreciated CLI Option:**_ `--min-notify-changes '50'` + +_**Reason for depreciation:**_ Application has been totally re-written. When this item was introduced, it was done so to reduce spamming of all events to the GUI desktop. + +### CLI Option: --synchronize +_**Description:**_ Perform a synchronisation with Microsoft OneDrive + +_**Depreciated CLI Option:**_ `--synchronize` + +_**Reason for depreciation:**_ `--synchronize` has been depreciated in favour of `--sync` or `-s` diff --git a/docs/application-security.md b/docs/application-security.md index 7c22c4f13..3a38174df 100644 --- a/docs/application-security.md +++ b/docs/application-security.md @@ -63,6 +63,18 @@ When these delegated API permissions are combined, these provide the effective a These 'default' permissions will allow the OneDrive Client for Linux to read, write and delete data associated with your OneDrive Account. +## How are the Authentication Scopes used? + +When using the OneDrive Client for Linux, the above authentication scopes will be presented to the Microsoft Authentication Service (login.microsoftonline.com), where the service will validate the request and provide an applicable token to access Microsoft OneDrive with. This can be illustrated as the following: + +![Linux Authentication to Microsoft OneDrive](./puml/onedrive_linux_authentication.png) + +This is similar to the Microsoft Windows OneDrive Client: + +![Windows Authentication to Microsoft OneDrive](./puml/onedrive_windows_authentication.png) + +In a business setting, IT staff who need to authorise the use of the OneDrive Client for Linux in their environment can be assured of its safety. The primary concern for IT staff should be securing the device running the OneDrive Client for Linux. Unlike in a corporate environment where Windows devices are secured through Active Directory and Group Policy Objects (GPOs) to protect corporate data on the device, it is beyond the responsibility of this client to manage security on Linux devices. + ## Configuring read-only access to your OneDrive data In some situations, it may be desirable to configure the OneDrive Client for Linux totally in read-only operation. @@ -72,7 +84,8 @@ read_only_auth_scope = "true" ``` This will change the user authentication scope request to use read-only access. -**Note:** When changing this value, you *must* re-authenticate the client using the `--reauth` option to utilise the change in authentication scopes. +> [!IMPORTANT] +> When changing this value, you *must* re-authenticate the client using the `--reauth` option to utilise the change in authentication scopes. When using read-only authentication scopes, the uploading of any data or local change to OneDrive will fail with the following error: ``` @@ -88,7 +101,8 @@ As such, it is also advisable for you to add the following to your configuration download_only = "true" ``` -**Important:** Additionally when using 'read_only_auth_scope' you also will need to remove your existing application access consent otherwise old authentication consent will be valid and will be used. This will mean the application will technically have the consent to upload data. See below on how to remove your prior application consent. +> [!IMPORTANT] +> Additionally when using 'read_only_auth_scope' you also will need to remove your existing application access consent otherwise old authentication consent will be valid and will be used. This will mean the application will technically have the consent to upload data. See below on how to remove your prior application consent. ## Reviewing your existing application access consent diff --git a/docs/build-rpm-howto.md b/docs/build-rpm-howto.md index 5439c3668..d6ee3d8e5 100644 --- a/docs/build-rpm-howto.md +++ b/docs/build-rpm-howto.md @@ -1,5 +1,5 @@ # RPM Package Build Process -The instuctions below have been tested on the following systems: +The instructions below have been tested on the following systems: * CentOS 7 x86_64 * CentOS 8 x86_64 @@ -13,7 +13,7 @@ sudo yum install -y libcurl-devel sudo yum install -y sqlite-devel sudo yum install -y libnotify-devel sudo yum install -y wget -sudo yum install -y http://downloads.dlang.org/releases/2.x/2.088.0/dmd-2.088.0-0.fedora.x86_64.rpm +sudo yum install -y https://downloads.dlang.org/releases/2.x/2.088.0/dmd-2.088.0-0.fedora.x86_64.rpm mkdir -p ~/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS} ``` diff --git a/docs/business-shared-items.md b/docs/business-shared-items.md new file mode 100644 index 000000000..375d4b249 --- /dev/null +++ b/docs/business-shared-items.md @@ -0,0 +1,251 @@ +# How to sync OneDrive Business Shared Items + +> [!CAUTION] +> Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +> [!CAUTION] +> This feature has been 100% re-written from v2.5.0 onwards and is not backwards compatible with v2.4.x client versions. If enabling this feature, you must upgrade to v2.5.0 or above on all systems that are running this client. +> +> An additional pre-requesite before using this capability in v2.5.0 and above is for you to revert any v2.4.x Shared Business Folder configuration you may be currently using, including, but not limited to: +> * Removing `sync_business_shared_folders = "true|false"` from your 'config' file +> * Removing the 'business_shared_folders' file +> * Removing any local data | shared folder data from your configured 'sync_dir' to ensure that there are no conflicts or issues. +> * Removing any configuration online that might be related to using this feature prior to v2.5.0 + +## Process Overview +Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client: +1. From the OneDrive web interface, review the 'Shared' objects that have been shared with you. +2. Select the applicable folder, and click the 'Add shortcut to My files', which will then add this to your 'My files' folder +3. Update your OneDrive Client for Linux 'config' file to enable the feature by adding `sync_business_shared_items = "true"`. Adding this option will trigger a `--resync` requirement. +4. Test the configuration using '--dry-run' +5. Remove the use of '--dry-run' and sync the OneDrive Business Shared folders as required + +### Enable syncing of OneDrive Business Shared Items via config file +```text +sync_business_shared_items = "true" +``` + +### Disable syncing of OneDrive Business Shared Items via config file +```text +sync_business_shared_items = "false" +``` + +## Syncing OneDrive Business Shared Folders +Use the following steps to add a OneDrive Business Shared Folder to your account: +1. Login to Microsoft OneDrive online, and navigate to 'Shared' from the left hand side pane + +![objects_shared_with_me](./images/objects_shared_with_me.png) + +2. Select the respective folder you wish to sync, and click the 'Add shortcut to My files' at the top of the page + +![add_shared_folder](./images/add_shared_folder.png) + +3. The final result online will look like this: + +![shared_folder_added](./images/shared_folder_added.png) + +When using Microsoft Windows, this shared folder will appear as the following: + +![windows_view_shared_folders](./images/windows_view_shared_folders.png) + +4. Sync your data using `onedrive --sync --verbose`. If you have just enabled the `sync_business_shared_items = "true"` configuration option, you will be required to perform a resync. During the sync, the selected shared folder will be downloaded: + +``` +... +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 4 +Finished processing /delta JSON response from the OneDrive API +Processing 3 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Creating local directory: ./my_shared_folder +Quota information is restricted or not available for this drive. +Syncing this OneDrive Business Shared Folder: my_shared_folder +Fetching /delta response from the OneDrive API for Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 6 +Finished processing /delta JSON response from the OneDrive API +Processing 6 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Creating local directory: ./my_shared_folder/asdf +Creating local directory: ./my_shared_folder/original_data +Number of items to download from OneDrive: 3 +Downloading file: my_shared_folder/asdf/asdfasdfhashdkfasdf.txt ... done +Downloading file: my_shared_folder/asdf/asdfasdf.txt ... done +Downloading file: my_shared_folder/original_data/file1.data ... done +Performing a database consistency and integrity check on locally stored data +... +``` + +When this is viewed locally, on Linux, this shared folder is seen as the following: + +![linux_shared_folder_view](./images/linux_shared_folder_view.png) + +Any shared folder you add can utilise any 'client side filtering' rules that you have created. + + +## Syncing OneDrive Business Shared Files +There are two methods to support the syncing OneDrive Business Shared Files with the OneDrive Application +1. Add a 'shortcut' to your 'My Files' for the file, which creates a URL shortcut to the file which can be followed when using a Linux Window Manager (Gnome, KDE etc) and the link will open up in a browser. Microsoft Windows only supports this option. +2. Use `--sync-shared-files` option to sync all files shared with you to your local disk. If you use this method, you can utilise any 'client side filtering' rules that you have created to filter out files you do not want locally. This option will create a new folder locally, with sub-folders named after the person who shared the data with you. + +### Syncing OneDrive Business Shared Files using Option 1 +1. As per the above method for adding folders, select the shared file, then select to 'Add shorcut' to the file + +![add_shared_file_shortcut](./images/add_shared_file_shortcut.png) + +2. The final result online will look like this: + +![add_shared_file_shortcut_added](./images/online_shared_file_link.png) + +When using Microsoft Windows, this shared file will appear as the following: + +![windows_view_shared_file_link](./images/windows_view_shared_file_link.png) + +3. Sync your data using `onedrive --sync --verbose`. If you have just enabled the `sync_business_shared_items = "true"` configuration option, you will be required to perform a resync. +``` +... +All application operations will be performed in the configured local 'sync_dir' directory: /home/alex/OneDrive +Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2 +Finished processing /delta JSON response from the OneDrive API +Processing 1 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Number of items to download from OneDrive: 1 +Downloading file: ./file to share.docx.url ... done +Syncing this OneDrive Business Shared Folder: my_shared_folder +Fetching /delta response from the OneDrive API for Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 0 +Finished processing /delta JSON response from the OneDrive API +No additional changes or items that can be applied were discovered while processing the data received from Microsoft OneDrive +Quota information is restricted or not available for this drive. +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT +Quota information is restricted or not available for this drive. +... +``` + +When this is viewed locally, on Linux, this shared folder is seen as the following: + +![linux_view_shared_file_link](./images/linux_view_shared_file_link.png) + +Any shared file link you add can utilise any 'client side filtering' rules that you have created. + + +### Syncing OneDrive Business Shared Files using Option 2 + +> [!IMPORTANT] +> When using option 2, all files that have been shared with you will be downloaded by default. To reduce this, first use `--list-shared-items` to list all shared items with your account, then use 'client side filtering' rules such as 'sync_list' configuration to selectivly sync all the files to your local system. + +1. Review all items that have been shared with you by using `onedrive --list-shared-items`. This should display output similar to the following: +``` +... +Listing available OneDrive Business Shared Items: + +----------------------------------------------------------------------------------- +Shared File: large_document_shared.docx +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared File: no_download_access.docx +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared File: online_access_only.txt +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared File: read_only.txt +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared File: qewrqwerwqer.txt +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared File: dummy_file_to_share.docx +Shared By: testuser2 testuser2 (testuser2@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared Folder: Sub Folder 2 +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared File: file to share.docx +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared Folder: Top Folder +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared Folder: my_shared_folder +Shared By: testuser2 testuser2 (testuser2@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared Folder: Jenkins +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +... +``` + +2. If applicable, add entries to a 'sync_list' file, to only sync the shared files that are of importance to you. + +3. Run the command `onedrive --sync --verbose --sync-shared-files` to sync the shared files to your local file system. This will create a new local folder called 'Files Shared With Me', and will contain sub-directories named after the entity account that has shared the file with you. In that folder will reside the shared file: + +``` +... +Finished processing /delta JSON response from the OneDrive API +No additional changes or items that can be applied were discovered while processing the data received from Microsoft OneDrive +Syncing this OneDrive Business Shared Folder: my_shared_folder +Fetching /delta response from the OneDrive API for Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 0 +Finished processing /delta JSON response from the OneDrive API +No additional changes or items that can be applied were discovered while processing the data received from Microsoft OneDrive +Quota information is restricted or not available for this drive. +Creating the OneDrive Business Shared Files Local Directory: /home/alex/OneDrive/Files Shared With Me +Checking for any applicable OneDrive Business Shared Files which need to be synced locally +Creating the OneDrive Business Shared File Users Local Directory: /home/alex/OneDrive/Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com) +Creating the OneDrive Business Shared File Users Local Directory: /home/alex/OneDrive/Files Shared With Me/testuser2 testuser2 (testuser2@mynasau3.onmicrosoft.com) +Number of items to download from OneDrive: 7 +Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/file to share.docx ... done +OneDrive returned a 'HTTP 403 - Forbidden' - gracefully handling error +Unable to download this file as this was shared as read-only without download permission: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/no_download_access.docx +ERROR: File failed to download. Increase logging verbosity to determine why. +Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/no_download_access.docx ... failed! +Downloading file: Files Shared With Me/testuser2 testuser2 (testuser2@mynasau3.onmicrosoft.com)/dummy_file_to_share.docx ... done +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 0% | ETA --:--:-- +Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/online_access_only.txt ... done +Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/read_only.txt ... done +Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/qewrqwerwqer.txt ... done +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 5% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 10% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 15% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 20% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 25% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 30% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 35% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 40% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 45% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 50% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 55% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 60% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 65% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 70% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 75% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 80% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 85% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 90% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 95% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 100% | DONE in 00:00:00 +Quota information is restricted or not available for this drive. +Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... done +Quota information is restricted or not available for this drive. +Quota information is restricted or not available for this drive. +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT +Quota information is restricted or not available for this drive. +... +``` + +When this is viewed locally, on Linux, this 'Files Shared With Me' and content is seen as the following: + +![files_shared_with_me_folder](./images/files_shared_with_me_folder.png) + +Unfortunately there is no Microsoft Windows equivalent for this capability. + +## Known Issues +Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders. + +Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below: + +![shared_with_me](./images/shared_with_me.JPG) + +This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966) diff --git a/docs/client-architecture.md b/docs/client-architecture.md new file mode 100644 index 000000000..f9fc4afad --- /dev/null +++ b/docs/client-architecture.md @@ -0,0 +1,333 @@ +# OneDrive Client for Linux Application Architecture + +## How does the client work at a high level? +The client utilises the 'libcurl' library to communicate with Microsoft OneDrive via the Microsoft Graph API. The diagram below shows this high level interaction with the Microsoft and GitHub API services online: + +![client_use_of_libcurl](./puml/client_use_of_libcurl.png) + +Depending on your operational environment, it is possible to 'tweak' the following options which will modify how libcurl operates with it's interaction with Microsoft OneDrive services: + +* Downgrade all HTTPS operations to use HTTP1.1 (Config Option: `force_http_11`) +* Control how long a specific transfer should take before it is considered too slow and aborted (Config Option: `operation_timeout`) +* Control libcurl handling of DNS Cache Timeout (Config Option: `dns_timeout`) +* Control the maximum time allowed for the connection to be established (Config Option: `connect_timeout`) +* Control the timeout for activity on an established HTTPS connection (Config Option: `data_timeout`) +* Control what IP protocol version should be used when communicating with OneDrive (Config Option: `ip_protocol_version`) +* Control what User Agent is presented to Microsoft services (Config Option: `user_agent`) + +> [!IMPORTANT] +> The default 'user_agent' value conforms to specific Microsoft requirements to identify as an ISV that complies with OneDrive traffic decoration requirements. Changing this value potentially will impact how Microsoft see's your client, thus your traffic may get throttled. For further information please read: https://learn.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online + +Diving a little deeper into how the client operates, the diagram below outlines at a high level the operational workflow of the OneDrive Client for Linux, demonstrating how it interacts with the OneDrive API to maintain synchronisation, manage local and cloud data integrity, and ensure that user data is accurately mirrored between the local filesystem and OneDrive cloud storage. + +![High Level Application Sequence](./puml/high_level_operational_process.png) + +The application operational processes have several high level key stages: + +1. **Access Token Validation:** Initially, the client validates its access and the existing access token, refreshing it if necessary. This step ensures that the client has the required permissions to interact with the OneDrive API. + +2. **Query Microsoft OneDrive API:** The client queries the /delta API endpoint of Microsoft OneDrive, which returns JSON responses. The /delta endpoint is particularly used for syncing changes, helping the client to identify any updates in the OneDrive storage. + +3. **Process JSON Responses:** The client processes each JSON response to determine if it represents a 'root' or 'deleted' item. Items not marked as 'root' or 'deleted' are temporarily stored for further processing. For 'root' or 'deleted' items, the client processes them immediately, otherwise, the client evaluates the items against client-side filtering rules to decide whether to discard them or to process and save them in the local database cache for actions like creating directories or downloading files. + +4. **Local Cache Database Processing for Data Integrity:** The client processes its local cache database to check for data integrity and differences compared to the OneDrive storage. If differences are found, such as a file or folder change including deletions, the client uploads these changes to OneDrive. Responses from the API, including item metadata, are saved to the local cache database. + +5. **Local Filesystem Scanning:** The client scans the local filesystem for new files or folders. Each new item is checked against client-side filtering rules. If an item passes the filtering, it is uploaded to OneDrive. Otherwise, it is discarded if it doesn't meet the filtering criteria. + +6. **Final Data True-Up:** Lastly, the client queries the /delta link for a final true-up, processing any further online JSON changes if required. This ensures that the local and OneDrive storages are fully synchronised. + +## What are the operational modes of the client? + +There are 2 main operational modes that the client can utilise: + +1. Standalone sync mode that performs a single sync action against Microsoft OneDrive. This method is used when you utilise `--sync`. +2. Ongoing sync mode that continuously syncs your data with Microsoft OneDrive and utilises 'inotify' to watch for local system changes. This method is used when you utilise `--monitor`. + +By default, both modes consider all data stored online within Microsoft OneDrive as the 'source-of-truth' - that is, what is online, is the correct data (file version, file content, file timestamp, folder structure and so on). This consideration also matches how the Microsoft OneDrive Client for Windows operates. + +However, in standalone mode (`--sync`), you can *change* what reference the client will use as the 'source-of-truth' for your data by using the `--local-first` option so that the application will look at your local files *first* and consider your local files as your 'source-of-truth' to replicate that directory structure to Microsoft OneDrive. + +> [!IMPORTANT] +> Please be aware that if you designate a network mount point (such as NFS, Windows Network Share, or Samba Network Share) as your `sync_dir`, this setup inherently lacks 'inotify' support. Support for 'inotify' is essential for real-time tracking of file changes, which means that the client's 'Monitor Mode' cannot immediately detect changes in files located on these network shares. Instead, synchronisation between your local filesystem and Microsoft OneDrive will occur at intervals specified by the `monitor_interval` setting. This limitation regarding 'inotify' support on network mount points like NFS or Samba is beyond the control of this client. + +## OneDrive Client for Linux High Level Activity Flows + +The diagrams below show the high level process flow and decision making when running the application + +### Main functional activity flows +![Main Activity](./puml/main_activity_flows.png) + +### Processing a potentially new local item +![applyPotentiallyNewLocalItem](./puml/applyPotentiallyNewLocalItem.png) + +### Processing a potentially changed local item +![applyPotentiallyChangedItem](./puml/applyPotentiallyChangedItem.png) + +### Download a file from Microsoft OneDrive +![downloadFile](./puml/downloadFile.png) + +### Upload a modified file to Microsoft OneDrive +![uploadModifiedFile](./puml/uploadModifiedFile.png) + +### Upload a new local file to Microsoft OneDrive +![uploadFile](./puml/uploadFile.png) + +### Determining if an 'item' is synchronised between Microsoft OneDrive and the local file system +![Item Sync Determination](./puml/is_item_in_sync.png) + +### Determining if an 'item' is excluded due to 'Client Side Filtering' rules + +By default, the OneDrive Client for Linux will sync all files and folders between Microsoft OneDrive and the local filesystem. + +Client Side Filtering in the context of this client refers to user-configured rules that determine what files and directories the client should upload or download from Microsoft OneDrive. These rules are crucial for optimising synchronisation, especially when dealing with large numbers of files or specific file types. The OneDrive Client for Linux offers several configuration options to facilitate this: + +* **skip_dir:** This option allows the user to specify directories that should not be synchronised with OneDrive. It's particularly useful for omitting large or irrelevant directories from the sync process. + +* **skip_dotfiles:** Dotfiles, usually configuration files or scripts, can be excluded from the sync. This is useful for users who prefer to keep these files local. + +* **skip_file:** Specific files can be excluded from synchronisation using this option. It provides flexibility in selecting which files are essential for cloud storage. + +* **skip_symlinks:** Symlinks often point to files outside the OneDrive directory or to locations that are not relevant for cloud storage. This option prevents them from being included in the sync. + +This exclusion process can be illustrated by the following activity diagram. A 'true' return value means that the path being evaluated needs to be excluded: + +![Client Side Filtering Determination](./puml/client_side_filtering_rules.png) + +## File conflict handling - default operational modes + +When using the default operational modes (`--sync` or `--monitor`) the client application is conforming to how the Microsoft Windows OneDrive client operates in terms of resolving conflicts for files. + +Additionally, when using `--resync` this conflict resolution can differ slightly, as, when using `--resync` you are *deleting* the known application state, thus, the application has zero reference as to what was previously in sync with the local file system. + +Due to this factor, when using `--resync` the online source is always going to be considered accurate and the source-of-truth, regardless of the local file state, file timestamp or file hash. + +### Default Operational Modes - Conflict Handling + +#### Scenario +1. Create a local file +2. Perform a sync with Microsoft OneDrive using `onedrive --sync` +3. Modify file online +4. Modify file locally with different data|contents +5. Perform a sync with Microsoft OneDrive using `onedrive --sync` + +![conflict_handling_default](./puml/conflict_handling_default.png) + +#### Evidence of Conflict Handling +``` +... +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2 +Finished processing /delta JSON response from the OneDrive API +Processing 1 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Number of items to download from OneDrive: 1 +The local file to replace (./1.txt) has been modified locally since the last download. Renaming it to avoid potential local data loss. +The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ./1.txt -> ./1-onedrive-client-dev.txt +Downloading file ./1.txt ... done +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing ~/OneDrive +The directory has not changed +Processing α +... +The file has not changed +Processing เอกสาร +The directory has not changed +Processing 1.txt +The file has not changed +Scanning the local file system '~/OneDrive' for new data to upload +... +New items to upload to OneDrive: 1 +Total New Data to Upload: 52 Bytes +Uploading new file ./1-onedrive-client-dev.txt ... done. +Performing a last examination of the most recent online data within Microsoft OneDrive to complete the reconciliation process +Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2 +Finished processing /delta JSON response from the OneDrive API +Processing 1 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state + +Sync with Microsoft OneDrive is complete +Waiting for all internal threads to complete before exiting application +``` + +### Default Operational Modes - Conflict Handling with --resync + +#### Scenario +1. Create a local file +2. Perform a sync with Microsoft OneDrive using `onedrive --sync` +3. Modify file online +4. Modify file locally with different data|contents +5. Perform a sync with Microsoft OneDrive using `onedrive --sync --resync` + +![conflict_handling_default_resync](./puml/conflict_handling_default_resync.png) + +#### Evidence of Conflict Handling +``` +... +Deleting the saved application sync status ... +Using IPv4 and IPv6 (if configured) for all network operations +Checking Application Version ... +... +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 14 +Finished processing /delta JSON response from the OneDrive API +Processing 13 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Local file time discrepancy detected: ./1.txt +This local file has a different modified time 2024-Feb-19 19:32:55Z (UTC) when compared to remote modified time 2024-Feb-19 19:32:36Z (UTC) +The local file has a different hash when compared to remote file hash +Local item does not exist in local database - replacing with file from OneDrive - failed download? +The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ./1.txt -> ./1-onedrive-client-dev.txt +Number of items to download from OneDrive: 1 +Downloading file ./1.txt ... done +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing ~/OneDrive +The directory has not changed +Processing α +... +Processing เอกสาร +The directory has not changed +Processing 1.txt +The file has not changed +Scanning the local file system '~/OneDrive' for new data to upload +... +New items to upload to OneDrive: 1 +Total New Data to Upload: 52 Bytes +Uploading new file ./1-onedrive-client-dev.txt ... done. +Performing a last examination of the most recent online data within Microsoft OneDrive to complete the reconciliation process +Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2 +Finished processing /delta JSON response from the OneDrive API +Processing 1 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state + +Sync with Microsoft OneDrive is complete +Waiting for all internal threads to complete before exiting application +``` + +## File conflict handling - local-first operational mode + +When using `--local-first` as your operational parameter the client application is now using your local filesystem data as the 'source-of-truth' as to what should be stored online. + +However - Microsoft OneDrive itself, has *zero* acknowledgement of this concept, thus, conflict handling needs to be aligned to how Microsoft OneDrive on other platforms operate, that is, rename the local offending file. + +Additionally, when using `--resync` you are *deleting* the known application state, thus, the application has zero reference as to what was previously in sync with the local file system. + +Due to this factor, when using `--resync` the online source is always going to be considered accurate and the source-of-truth, regardless of the local file state, file timestamp or file hash or use of `--local-first`. + +### Local First Operational Modes - Conflict Handling + +#### Scenario +1. Create a local file +2. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first` +3. Modify file locally with different data|contents +4. Modify file online with different data|contents +5. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first` + +![conflict_handling_local-first_default](./puml/conflict_handling_local-first_default.png) + +#### Evidence of Conflict Handling +``` +Reading configuration file: /home/alex/.config/onedrive/config +... +Using IPv4 and IPv6 (if configured) for all network operations +Checking Application Version ... +... +Sync Engine Initialised with new Onedrive API instance +All application operations will be performed in the configured local 'sync_dir' directory: /home/alex/OneDrive +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing ~/OneDrive +The directory has not changed +Processing α +The directory has not changed +... +The file has not changed +Processing เอกสาร +The directory has not changed +Processing 1.txt +Local file time discrepancy detected: 1.txt +The file content has changed locally and has a newer timestamp, thus needs to be uploaded to OneDrive +Changed local items to upload to OneDrive: 1 +The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: 1.txt -> 1-onedrive-client-dev.txt +Uploading new file 1-onedrive-client-dev.txt ... done. +Scanning the local file system '~/OneDrive' for new data to upload +... +Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 3 +Finished processing /delta JSON response from the OneDrive API +Processing 2 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Number of items to download from OneDrive: 1 +Downloading file ./1.txt ... done + +Sync with Microsoft OneDrive is complete +Waiting for all internal threads to complete before exiting application +``` + + +### Local First Operational Modes - Conflict Handling with --resync + +#### Scenario +1. Create a local file +2. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first` +3. Modify file locally with different data|contents +4. Modify file online with different data|contents +5. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first --resync` + +![conflict_handling_local-first_resync](./puml/conflict_handling_local-first_resync.png) + +#### Evidence of Conflict Handling +``` +... + +The usage of --resync will delete your local 'onedrive' client state, thus no record of your current 'sync status' will exist. +This has the potential to overwrite local versions of files with perhaps older versions of documents downloaded from OneDrive, resulting in local data loss. +If in doubt, backup your local data before using --resync + +Are you sure you wish to proceed with --resync? [Y/N] y + +Deleting the saved application sync status ... +Using IPv4 and IPv6 (if configured) for all network operations +... +Sync Engine Initialised with new Onedrive API instance +All application operations will be performed in the configured local 'sync_dir' directory: /home/alex/OneDrive +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing ~/OneDrive +The directory has not changed +Scanning the local file system '~/OneDrive' for new data to upload +Skipping item - excluded by sync_list config: ./random_25k_files +OneDrive Client requested to create this directory online: ./α +The requested directory to create was found on OneDrive - skipping creating the directory: ./α +... +New items to upload to OneDrive: 9 +Total New Data to Upload: 49 KB +... +The file we are attempting to upload as a new file already exists on Microsoft OneDrive: ./1.txt +Skipping uploading this item as a new file, will upload as a modified file (online file already exists): ./1.txt +The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ./1.txt -> ./1-onedrive-client-dev.txt +Uploading new file ./1-onedrive-client-dev.txt ... done. +Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 15 +Finished processing /delta JSON response from the OneDrive API +Processing 14 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Number of items to download from OneDrive: 1 +Downloading file ./1.txt ... done + +Sync with Microsoft OneDrive is complete +Waiting for all internal threads to complete before exiting application +``` + +## Client Functional Component Architecture Relationships + +The diagram below shows the main functional relationship of application code components, and how these relate to each relevant code module within this application: + +![Functional Code Components](./puml/code_functional_component_relationships.png) + +## Database Schema + +The diagram below shows the database schema that is used within the application + +![Database Schema](./puml/database_schema.png) diff --git a/docs/contributing.md b/docs/contributing.md new file mode 100644 index 000000000..1678cf548 --- /dev/null +++ b/docs/contributing.md @@ -0,0 +1,175 @@ +# OneDrive Client for Linux: Coding Style Guidelines + +## Introduction + +This document outlines the coding style guidelines for code contributions for the OneDrive Client for Linux. + +These guidelines are intended to ensure the codebase remains clean, well-organised, and accessible to all contributors, new and experienced alike. + +## Code Layout +> [!NOTE] +> When developing any code contribution, please utilise either Microsoft Visual Studio Code or Notepad++. + +### Indentation +Most of the codebase utilises tabs for space indentation, with 4 spaces to a tab. Please keep to this convention. + +### Line Length +Try and keep line lengths to a reasonable length. Do not constrain yourself to short line lengths such as 80 characters. This means when the code is being displayed in the code editor, lines are correctly displayed when using screen resolutions of 1920x1080 and above. + +If you wish to use shorter line lengths (80 characters for example), please do not follow this sort of example: +```code +... + void functionName( + string somevar, + bool someOtherVar, + cost(char) anotherVar=null + ){ +.... +``` + +### Coding Style | Braces +Please use 1TBS (One True Brace Style) which is a variation of the K&R (Kernighan & Ritchie) style. This approach is intended to improve readability and maintain consistency throughout the code. + +When using this coding style, even when the code of the `if`, `else`, `for`, or function definition contains only one statement, braces are used to enclose it. + +```code + // What this if statement is doing + if (condition) { + // The condition was true + ..... + } else { + // The condition was false + ..... + } + + // Loop 10 times to do something + for (int i = 0; i < 10; i++) { + // Loop body + } + + // This function is to do this + void functionExample() { + // Function body + } +``` + +## Naming Conventsions + +### Variables and Functions +Please use `camelCase` for variable and function names. + +### Classes and Interfaces +Please use `PascalCase` for classes, interfaces, and structs. + +### Constants +Use uppercase with underscores between words. + +## Documentation + +### Language and Spelling +To maintain consistency across the project's documentation, comments, and code, all written text must adhere to British English spelling conventions, not American English. This requirement applies to all aspects of the codebase, including variable names, comments, and documentation. + +For example, use "specialise" instead of "specialize", "colour" instead of "color", and "organise" instead of "organize". This standard ensures that the project maintains a cohesive and consistent linguistic style. + +### Code Comments +Please comment code at all levels. Use `//` for all line comments. Detail why a statement is needed, or what is expected to happen so future readers or contributors can read through the intent of the code with clarity. + +If fixing a 'bug', please add a link to the GitHub issue being addressed as a comment, for example: +```code +... + // Before discarding change - does this ID still exist on OneDrive - as in IS this + // potentially a --single-directory sync and the user 'moved' the file out of the 'sync-dir' to another OneDrive folder + // This is a corner edge case - https://github.com/skilion/onedrive/issues/341 + + // What is the original local path for this ID in the database? Does it match 'syncFolderChildPath' + if (itemdb.idInLocalDatabase(driveId, item["id"].str)){ + // item is in the database + string originalLocalPath = computeItemPath(driveId, item["id"].str); +... +``` + +All code should be clearly commented. + +### Application Logging Output +If making changes to any application logging output, please first discuss this either via direct communication or email. + +For reference, below are the available application logging output functions and examples: +```code + + // most used + addLogEntry("Basic 'info' message", ["info"]); .... or just use addLogEntry("Basic 'info' message"); + addLogEntry("Basic 'verbose' message", ["verbose"]); + addLogEntry("Basic 'debug' message", ["debug"]); + + // GUI notify only + addLogEntry("Basic 'notify' ONLY message and displayed in GUI if notifications are enabled", ["notify"]); + + // info and notify + addLogEntry("Basic 'info and notify' message and displayed in GUI if notifications are enabled", ["info", "notify"]); + + // log file only + addLogEntry("Information sent to the log file only, and only if logging to a file is enabled", ["logFileOnly"]); + + // Console only (session based upload|download) + addLogEntry("Basic 'Console only with new line' message", ["consoleOnly"]); + + // Console only with no new line + addLogEntry("Basic 'Console only with no new line' message", ["consoleOnlyNoNewLine"]); + +``` + +### Documentation Updates +If the code changes any of the functionality that is documented, it is expected that any PR submission will also include updating the respective section of user documentation and/or man page as part of the code submission. + +## Development Testing +Whilst there are more modern DMD and LDC compilers available, ensuring client build compatibility with older platforms is a key requirement. + +The issue stems from Debian and Ubuntu LTS versions - such as Ubuntu 20.04. It's [ldc package](https://packages.ubuntu.com/focal/ldc) is only v1.20.1 , thus, this is the minimum version that all compilation needs to be tested against. + +The reason LDC v1.20.1 must be used, is that this is the version that is used to compile the packages presented at [OpenSuSE Build Service ](https://build.opensuse.org/package/show/home:npreining:debian-ubuntu-onedrive/onedrive) - which is where most Debian and Ubuntu users will install the client from. + +It is assumed here that you know how to download and install the correct LDC compiler for your platform. + +## Submitting a PR +When submitting a PR, please provide your testing evidence in the PR submission of what has been fixed, in the format of: + +### Without PR +``` +Application output that is doing whatever | or illustration of issue | illustration of bug +``` + +### With PR +``` +Application output that is doing whatever | or illustration of issue being fixed | illustration of bug being fixed +``` +Please also include validation of compilation using the minimum LDC package version. + +To assist with your testing validation against the minimum LDC compiler version, a script as per below could assist you with this validation: + +```bash + +#!/bin/bash + +PR= + +rm -rf ./onedrive-pr${PR} +git clone https://github.com/abraunegg/onedrive.git onedrive-pr${PR} +cd onedrive-pr${PR} +git fetch origin pull/${PR}/head:pr${PR} +git checkout pr${PR} + +# MIN LDC Version to compile +# MIN Version for ARM / Compiling with LDC +source ~/dlang/ldc-1.20.1/activate + +# Compile code with specific LDC version +./configure --enable-debug --enable-notifications; make clean; make; +deactivate +./onedrive --version + +``` + +## References + +* D Language Official Style Guide: https://dlang.org/dstyle.html +* British English spelling conventions: https://www.collinsdictionary.com/ \ No newline at end of file diff --git a/docs/Docker.md b/docs/docker.md similarity index 89% rename from docs/Docker.md rename to docs/docker.md index 1f0050fd6..ca92e359f 100644 --- a/docs/Docker.md +++ b/docs/docker.md @@ -23,7 +23,8 @@ The 'edge' Docker Container will align closer to all documentation and features, Additionally there are specific version release tags for each release. Refer to https://hub.docker.com/r/driveone/onedrive/tags for any other Docker tags you may be interested in. -**Note:** The below instructions for docker has been tested and validated when logging into the system as an unprivileged user (non 'root' user). +> [!NOTE] +> The below instructions for docker has been tested and validated when logging into the system as an unprivileged user (non 'root' user). ## High Level Configuration Steps 1. Install 'docker' as per your distribution platform's instructions if not already installed. @@ -37,7 +38,11 @@ Additionally there are specific version release tags for each release. Refer to ## Configuration Steps ### 1. Install 'docker' on your platform -Install 'docker' as per your distribution platform's instructions if not already installed. +Install Docker for your system using the official instructions found at https://docs.docker.com/engine/install/. + +> [!CAUTION] +> If you are using Ubuntu or any distribution based on Ubuntu, do not install Docker from your distribution's repositories, as they may contain obsolete versions. Instead, you must install Docker using the packages provided directly by Docker. + ### 2. Configure 'docker' to allow non-privileged users to run Docker commands Read https://docs.docker.com/engine/install/linux-postinstall/ to configure the 'docker' user group with your user account to allow your non 'root' user to run 'docker' commands. @@ -131,17 +136,19 @@ This will create a docker volume labeled `onedrive_data` and will map to a path * The owner of this specified folder must have permissions for its parent directory * Docker will attempt to change the permissions of the volume to the user the container is configured to run as -**NOTE:** Issues occur when this target folder is a mounted folder of an external system (NAS, SMB mount, USB Drive etc) as the 'mount' itself is owed by 'root'. If this is your use case, you *must* ensure your normal user can mount your desired target without having the target mounted by 'root'. If you do not fix this, your Docker container will fail to start with the following error message: -```bash -ROOT level privileges prohibited! -``` +> [!IMPORTANT] +> Issues occur when this target folder is a mounted folder of an external system (NAS, SMB mount, USB Drive etc) as the 'mount' itself is owed by 'root'. If this is your use case, you *must* ensure your normal user can mount your desired target without having the target mounted by 'root'. If you do not fix this, your Docker container will fail to start with the following error message: +> ```bash +> ROOT level privileges prohibited! +> ``` ### 6. First run of Docker container under docker and performing authorisation The 'onedrive' client within the container first needs to be authorised with your Microsoft account. This is achieved by initially running docker in interactive mode. Run the docker image with the commands below and make sure to change the value of `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `export ONEDRIVE_DATA_DIR="/home/abraunegg/OneDrive"`). -**Important:** The 'target' folder of `ONEDRIVE_DATA_DIR` must exist before running the docker container. The script below will create 'ONEDRIVE_DATA_DIR' so that it exists locally for the docker volume mapping to occur. +> [!IMPORTANT] +> The 'target' folder of `ONEDRIVE_DATA_DIR` must exist before running the docker container. The script below will create 'ONEDRIVE_DATA_DIR' so that it exists locally for the docker volume mapping to occur. It is also a requirement that the container be run using a non-root uid and gid, you must insert a non-root UID and GID (e.g.` export ONEDRIVE_UID=1000` and export `ONEDRIVE_GID=1000`). The script below will use `id` to evaluate your system environment to use the correct values. ```bash @@ -228,7 +235,7 @@ docker volume inspect onedrive_conf Or you can map your own config folder to the config volume. Make sure to copy all files from the docker volume into your mapped folder first. -The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#configuration) +The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration) ### Syncing multiple accounts There are many ways to do this, the easiest is probably to do the following: @@ -246,7 +253,7 @@ If you are experienced with docker and onedrive, you can use the following scrip ```bash # Update ONEDRIVE_DATA_DIR with correct OneDrive directory path ONEDRIVE_DATA_DIR="${HOME}/OneDrive" -# Create directory if non-existant +# Create directory if non-existent mkdir -p ${ONEDRIVE_DATA_DIR} firstRun='-d' @@ -270,10 +277,14 @@ docker run $firstRun --restart unless-stopped --name onedrive -v onedrive_conf:/ | ONEDRIVE_NOREMOTEDELETE | Controls "--no-remote-delete" switch on onedrive sync. Default is 0 | 1 | | ONEDRIVE_LOGOUT | Controls "--logout" switch. Default is 0 | 1 | | ONEDRIVE_REAUTH | Controls "--reauth" switch. Default is 0 | 1 | -| ONEDRIVE_AUTHFILES | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" | -| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#authorize-the-application-with-your-onedrive-account) | +| ONEDRIVE_AUTHFILES | Controls "--auth-files" option. Default is "" | Please read [CLI Option: --auth-files](./application-config-options.md#cli-option---auth-files) | +| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | Please read [CLI Option: --auth-response](./application-config-options.md#cli-option---auth-response) | | ONEDRIVE_DISPLAY_CONFIG | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 | | ONEDRIVE_SINGLE_DIRECTORY | Controls "--single-directory" option. Default = "" | "mydir" | +| ONEDRIVE_DRYRUN | Controls "--dry-run" option. Default is 0 | 1 | +| ONEDRIVE_DISABLE_DOWNLOAD_VALIDATION | Controls "--disable-download-validation" option. Default is 0 | 1 | +| ONEDRIVE_DISABLE_UPLOAD_VALIDATION | Controls "--disable-upload-validation" option. Default is 0 | 1 | +| ONEDRIVE_SYNC_SHARED_FILES | Controls "--sync-shared-files" option. Default is 0 | 1 | ### Environment Variables Usage Examples **Verbose Output:** @@ -334,7 +345,8 @@ If you are running a Raspberry Pi, you will need to edit your system configurati * Modify the file `/etc/dphys-swapfile` and edit the `CONF_SWAPSIZE`, for example: `CONF_SWAPSIZE=2048`. -A reboot of your Raspberry Pi is required to make this change effective. +> [!IMPORTANT] +> A reboot of your Raspberry Pi is required to make this change effective. ### Building and running a custom Docker image You can also build your own image instead of pulling the one from [hub.docker.com](https://hub.docker.com/r/driveone/onedrive): diff --git a/docs/images/add_shared_file_shortcut.png b/docs/images/add_shared_file_shortcut.png new file mode 100644 index 000000000..2d16abb4c Binary files /dev/null and b/docs/images/add_shared_file_shortcut.png differ diff --git a/docs/images/add_shared_folder.png b/docs/images/add_shared_folder.png new file mode 100644 index 000000000..a34f089bf Binary files /dev/null and b/docs/images/add_shared_folder.png differ diff --git a/docs/images/files_shared_with_me_folder.png b/docs/images/files_shared_with_me_folder.png new file mode 100644 index 000000000..9f9462c24 Binary files /dev/null and b/docs/images/files_shared_with_me_folder.png differ diff --git a/docs/images/linux_shared_folder_view.png b/docs/images/linux_shared_folder_view.png new file mode 100644 index 000000000..2fa0a1923 Binary files /dev/null and b/docs/images/linux_shared_folder_view.png differ diff --git a/docs/images/linux_view_shared_file_link.png b/docs/images/linux_view_shared_file_link.png new file mode 100644 index 000000000..eb2dfea97 Binary files /dev/null and b/docs/images/linux_view_shared_file_link.png differ diff --git a/docs/images/objects_shared_with_me.png b/docs/images/objects_shared_with_me.png new file mode 100644 index 000000000..1327c535e Binary files /dev/null and b/docs/images/objects_shared_with_me.png differ diff --git a/docs/images/online_shared_file_link.png b/docs/images/online_shared_file_link.png new file mode 100644 index 000000000..c264b105a Binary files /dev/null and b/docs/images/online_shared_file_link.png differ diff --git a/docs/images/shared_folder_added.png b/docs/images/shared_folder_added.png new file mode 100644 index 000000000..3677de1b2 Binary files /dev/null and b/docs/images/shared_folder_added.png differ diff --git a/docs/images/windows_view_shared_file_link.png b/docs/images/windows_view_shared_file_link.png new file mode 100644 index 000000000..d6bfb02f2 Binary files /dev/null and b/docs/images/windows_view_shared_file_link.png differ diff --git a/docs/images/windows_view_shared_folders.png b/docs/images/windows_view_shared_folders.png new file mode 100644 index 000000000..9432a62cd Binary files /dev/null and b/docs/images/windows_view_shared_folders.png differ diff --git a/docs/INSTALL.md b/docs/install.md similarity index 69% rename from docs/INSTALL.md rename to docs/install.md index 3f00ae212..941a30274 100644 --- a/docs/INSTALL.md +++ b/docs/install.md @@ -5,51 +5,42 @@ This project has been packaged for the following Linux distributions as per belo Only the current release version or greater is supported. Earlier versions are not supported and should not be installed or used. -#### Important Note: -Distribution packages may be of an older release when compared to the latest release that is [available](https://github.com/abraunegg/onedrive/releases). If any package version indicator below is 'red' for your distribution, it is recommended that you build from source. Do not install the software from the available distribution package. If a package is out of date, please contact the package maintainer for resolution. +> [!CAUTION] +> Distribution packages may be of an older release when compared to the latest release that is [available](https://github.com/abraunegg/onedrive/releases). If any package version indicator below is 'red' for your distribution, it is recommended that you build from source. Do not install the software from the available distribution package. If a package is out of date, please contact the package maintainer for resolution. | Distribution | Package Name & Package Link |   PKG_Version   |  i686  | x86_64 | ARMHF | AARCH64 | Extra Details | |---------------------------------|------------------------------------------------------------------------------|:---------------:|:----:|:------:|:-----:|:-------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Alpine Linux | [onedrive](https://pkgs.alpinelinux.org/packages?name=onedrive&branch=edge) |Alpine Linux Edge package|❌|✔|❌|✔ | | -| Arch Linux

Manjaro Linux | [onedrive-abraunegg](https://aur.archlinux.org/packages/onedrive-abraunegg/) |AUR package|✔|✔|✔|✔ | Install via: `pamac build onedrive-abraunegg` from the Arch Linux User Repository (AUR)

**Note:** If asked regarding a provider for 'd-runtime' and 'd-compiler', select 'liblphobos' and 'ldc'

**Note:** System must have at least 1GB of memory & 1GB swap space -| Debian 11 | [onedrive](https://packages.debian.org/bullseye/source/onedrive) |Debian 11 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories

It is recommended that for Debian 11 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | -| Debian 12 | [onedrive](https://packages.debian.org/bookworm/source/onedrive) |Debian 12 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories

It is recommended that for Debian 12 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | +| Arch Linux

Manjaro Linux | [onedrive-abraunegg](https://aur.archlinux.org/packages/onedrive-abraunegg/) |AUR package|✔|✔|✔|✔ | Install via: `pamac build onedrive-abraunegg` from the Arch Linux User Repository (AUR)

**Note:** You must first install 'base-devel' as this is a pre-requisite for using the AUR

**Note:** If asked regarding a provider for 'd-runtime' and 'd-compiler', select 'liblphobos' and 'ldc'

**Note:** System must have at least 1GB of memory & 1GB swap space +| CentOS 8 | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |CentOS 8 package|❌|✔|❌|✔| **Note:** You must install the EPEL Repository first | +| CentOS 9 | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |CentOS 9 package|❌|✔|❌|✔| **Note:** You must install the EPEL Repository first | +| Debian 11 | [onedrive](https://packages.debian.org/bullseye/source/onedrive) |Debian 11 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories as the package is obsolete and is not supported

For a supported application version, it is recommended that for Debian 11 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | +| Debian 12 | [onedrive](https://packages.debian.org/bookworm/source/onedrive) |Debian 12 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories as the package is obsolete and is not supported

For a supported application version, it is recommended that for Debian 12 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | +| Debian Sid | [onedrive](https://packages.debian.org/sid/onedrive) |Debian Sid package|✔|✔|✔|✔| | | Fedora | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |Fedora Rawhide package|✔|✔|✔|✔| | | Gentoo | [onedrive](https://gpo.zugaina.org/net-misc/onedrive) | No API Available |✔|✔|❌|❌| | | Homebrew | [onedrive](https://formulae.brew.sh/formula/onedrive) | Homebrew package |❌|✔|❌|❌| | -| Linux Mint 20.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |Ubuntu 20.04 package |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories

It is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | -| Linux Mint 21.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |Ubuntu 22.04 package |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories

It is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Linux Mint 20.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |Ubuntu 20.04 package |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories as the package is obsolete and is not supported

For a supported application version, it is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Linux Mint 21.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |Ubuntu 22.04 package |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories as the package is obsolete and is not supported

For a supported application version, it is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | | NixOS | [onedrive](https://search.nixos.org/packages?channel=20.09&from=0&size=50&sort=relevance&query=onedrive)|nixpkgs unstable package|❌|✔|❌|❌| Use package `onedrive` either by adding it to `configuration.nix` or by using the command `nix-env -iA .onedrive`. This does not install a service. To install a service, use unstable channel (will stabilize in 20.09) and add `services.onedrive.enable=true` in `configuration.nix`. You can also add a custom package using the `services.onedrive.package` option (recommended since package lags upstream). Enabling the service installs a default package too (based on the channel). You can also add multiple onedrive accounts trivially, see [documentation](https://github.com/NixOS/nixpkgs/pull/77734#issuecomment-575874225). | | OpenSuSE | [onedrive](https://software.opensuse.org/package/onedrive) |openSUSE Tumbleweed package|✔|✔|❌|❌| | | OpenSuSE Build Service | [onedrive](https://build.opensuse.org/package/show/home:npreining:debian-ubuntu-onedrive/onedrive) | No API Available |✔|✔|✔|✔| Package Build Service for Debian and Ubuntu | -| Raspbian | [onedrive](https://archive.raspbian.org/raspbian/pool/main/o/onedrive/) |Raspbian Stable package |❌|❌|✔|✔| **Note:** Do not install from Raspbian Package Repositories

It is recommended that for Raspbian that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | +| Raspbian | [onedrive](https://archive.raspbian.org/raspbian/pool/main/o/onedrive/) |Raspbian Stable package |❌|❌|✔|✔| **Note:** Do not install from Raspbian Package Repositories as the package is obsolete and is not supported

For a supported application version, it is recommended that for Raspbian that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | | Slackware | [onedrive](https://slackbuilds.org/result/?search=onedrive&sv=) |SlackBuilds package|✔|✔|❌|❌| | | Solus | [onedrive](https://dev.getsol.us/search/query/FB7PIf1jG9Z9/#R) |Solus package|✔|✔|❌|❌| | -| Ubuntu 20.04 | [onedrive](https://packages.ubuntu.com/focal/onedrive) |Ubuntu 20.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe

It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | -| Ubuntu 22.04 | [onedrive](https://packages.ubuntu.com/jammy/onedrive) |Ubuntu 22.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe

It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | -| Ubuntu 23.04 | [onedrive](https://packages.ubuntu.com/lunar/onedrive) |Ubuntu 23.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe

It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Ubuntu 20.04 | [onedrive](https://packages.ubuntu.com/focal/onedrive) |Ubuntu 20.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe as the package is obsolete and is not supported

For a supported application version, it is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Ubuntu 22.04 | [onedrive](https://packages.ubuntu.com/jammy/onedrive) |Ubuntu 22.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe as the package is obsolete and is not supported

For a supported application version, it is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Ubuntu 23.04 | [onedrive](https://packages.ubuntu.com/lunar/onedrive) |Ubuntu 23.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe as the package is obsolete and is not supported

For a supported application version, it is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Ubuntu 24.04 | [onedrive](https://packages.ubuntu.com/noble/onedrive) |Ubuntu 24.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe as the package is obsolete and is not supported

For a supported application version, it is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | | Void Linux | [onedrive](https://voidlinux.org/packages/?arch=x86_64&q=onedrive) |Void Linux x86_64 package|✔|✔|❌|❌| | -#### Important information for all Ubuntu and Ubuntu based distribution users: -This information is specifically for the following platforms and distributions: -* Ubuntu -* Lubuntu -* Linux Mint -* POP OS -* Peppermint OS - -Whilst there are [onedrive](https://packages.ubuntu.com/search?keywords=onedrive&searchon=names&suite=all§ion=all) Universe packages available for Ubuntu, do not install 'onedrive' from these Universe packages. The default Universe packages are out-of-date and are not supported and should not be used. If you wish to use a package, it is highly recommended that you utilise the [OpenSuSE Build Service](ubuntu-package-install.md) to install packages for these platforms. If the OpenSuSE Build Service does not cater for your version, your only option is to build from source. - -If you wish to change this situation so that you can just use the Universe packages via 'apt install onedrive', consider becoming the Ubuntu package maintainer and contribute back to your community. - ## Building from Source - High Level Requirements -* Build environment must have at least 1GB of memory & 1GB swap space -* Install the required distribution package dependencies -* [libcurl](http://curl.haxx.se/libcurl/) -* [SQLite 3](https://www.sqlite.org/) >= 3.7.15 -* [Digital Mars D Compiler (DMD)](http://dlang.org/download.html) or [LDC – the LLVM-based D Compiler](https://github.com/ldc-developers/ldc) +* For successful compilation of this application, it's crucial that the build environment is equipped with a minimum of 1GB of memory and an additional 1GB of swap space. +* Install the required distribution package dependencies coverering the required development tools and development libraries for curl and sqlite +* Install the [Digital Mars D Compiler (DMD)](https://dlang.org/download.html) or [LDC – the LLVM-based D Compiler](https://github.com/ldc-developers/ldc) -**Note:** DMD version >= 2.088.0 or LDC version >= 1.18.0 is required to compile this application +> [!IMPORTANT] +> To compile this application successfully, it is essential to use either DMD version **2.088.0** or higher, or LDC version **1.18.0** or higher. Ensuring compatibility and optimal performance necessitates the use of these specific versions or their more recent updates. ### Example for installing DMD Compiler ```text @@ -71,7 +62,7 @@ Ubuntu Linux 18.x LTS reached the end of its five-year LTS window on May 31th 20 ### Dependencies: Debian 9 Debian 9 reached the end of its five-year support window on June 30th 2022 and is no longer supported. -### Dependencies: Ubuntu 20.x -> Ubuntu 23.x / Debian 10 -> Debian 12 - x86_64 +### Dependencies: Ubuntu 20.x -> Ubuntu 24.x / Debian 10 -> Debian 12 - x86_64 These dependencies are also applicable for all Ubuntu based distributions such as: * Lubuntu * Linux Mint @@ -101,7 +92,7 @@ For notifications the following is also necessary: sudo yum install libnotify-devel ``` -### Dependencies: Fedora > Version 18 / CentOS 8.x / RHEL 8.x / RHEL 9.x +### Dependencies: Fedora > Version 18 / CentOS 8.x / CentOS 9.x/ RHEL 8.x / RHEL 9.x ```text sudo dnf groupinstall 'Development Tools' sudo dnf install libcurl-devel sqlite-devel @@ -122,14 +113,16 @@ sudo pacman -S libnotify ``` ### Dependencies: Raspbian (ARMHF) and Ubuntu 22.x / Debian 11 / Debian 12 / Raspbian (ARM64) -**Note:** The minimum LDC compiler version required to compile this application is now 1.18.0, which is not available for Debian Buster or distributions based on Debian Buster. You are advised to first upgrade your platform distribution to one that is based on Debian Bullseye (Debian 11) or later. +> [!CAUTION] +> The minimum LDC compiler version required to compile this application is 1.18.0, which is not available for Debian Buster or distributions based on Debian Buster. You are advised to first upgrade your platform distribution to one that is based on Debian Bullseye (Debian 11) or later. These instructions were validated using: * `Linux raspberrypi 5.10.92-v8+ #1514 SMP PREEMPT Mon Jan 17 17:39:38 GMT 2022 aarch64` (2022-01-28-raspios-bullseye-armhf-lite) using Raspberry Pi 3B (revision 1.2) * `Linux raspberrypi 5.10.92-v8+ #1514 SMP PREEMPT Mon Jan 17 17:39:38 GMT 2022 aarch64` (2022-01-28-raspios-bullseye-arm64-lite) using Raspberry Pi 3B (revision 1.2) * `Linux ubuntu 5.15.0-1005-raspi #5-Ubuntu SMP PREEMPT Mon Apr 4 12:21:48 UTC 2022 aarch64 aarch64 aarch64 GNU/Linux` (ubuntu-22.04-preinstalled-server-arm64+raspi) using Raspberry Pi 3B (revision 1.2) -**Note:** Build environment must have at least 1GB of memory & 1GB swap space. Check with `swapon`. +> [!IMPORTANT] +> For successful compilation of this application, it's crucial that the build environment is equipped with a minimum of 1GB of memory and an additional 1GB of swap space. To verify your system's swap space availability, you can use the `swapon` command. Ensuring these requirements are met is vital for the application's compilation process. ```text sudo apt install build-essential @@ -200,7 +193,8 @@ Run `deactivate` later on to restore your environment. ``` Without performing this step, the compilation process will fail. -**Note:** Depending on your DMD version, substitute `2.088.0` above with your DMD version that is installed. +> [!NOTE] +> Depending on your DMD version, substitute `2.088.0` above with your DMD version that is installed. ```text git clone https://github.com/abraunegg/onedrive.git @@ -211,8 +205,10 @@ sudo make install ``` ### Build options -Notifications can be enabled using the `configure` switch `--enable-notifications`. +#### GUI Notification Support +GUI notification support can be enabled using the `configure` switch `--enable-notifications`. +#### systemd service directory customisation support Systemd service files are installed in the appropriate directories on the system, as provided by `pkg-config systemd` settings. If the need for overriding the deduced path are necessary, the two options `--with-systemdsystemunitdir` (for @@ -220,9 +216,11 @@ the Systemd system unit location), and `--with-systemduserunitdir` (for the Systemd user unit location) can be specified. Passing in `no` to one of these options disabled service file installation. +#### Additional Compiler Debug By passing `--enable-debug` to the `configure` call, `onedrive` gets built with additional debug information, useful (for example) to get `perf`-issued figures. +#### Shell Completion Support By passing `--enable-completions` to the `configure` call, shell completion functions are installed for `bash`, `zsh` and `fish`. The installation directories are determined as far as possible automatically, but can be overridden by passing @@ -231,9 +229,12 @@ as far as possible automatically, but can be overridden by passing ### Building using a different compiler (for example [LDC](https://wiki.dlang.org/LDC)) #### ARMHF Architecture (Raspbian) and ARM64 Architecture (Ubuntu 22.x / Debian 11 / Raspbian) -**Note:** The minimum LDC compiler version required to compile this application is now 1.18.0, which is not available for Debian Buster or distributions based on Debian Buster. You are advised to first upgrade your platform distribution to one that is based on Debian Bullseye (Debian 11) or later. +> [!CAUTION] +> The minimum LDC compiler version required to compile this application is 1.18.0, which is not available for Debian Buster or distributions based on Debian Buster. You are advised to first upgrade your platform distribution to one that is based on Debian Bullseye (Debian 11) or later. + +> [!IMPORTANT] +> For successful compilation of this application, it's crucial that the build environment is equipped with a minimum of 1GB of memory and an additional 1GB of swap space. To verify your system's swap space availability, you can use the `swapon` command. Ensuring these requirements are met is vital for the application's compilation process. -**Note:** Build environment must have at least 1GB of memory & 1GB swap space. Check with `swapon`. ```text git clone https://github.com/abraunegg/onedrive.git cd onedrive @@ -247,11 +248,13 @@ If you have installed the client from a distribution package, the client will be If you have built the client from source, to upgrade your client, it is recommended that you first uninstall your existing 'onedrive' binary (see below), then re-install the client by re-cloning, re-compiling and re-installing the client again to install the new version. -**Note:** Following the uninstall process will remove all client components including *all* systemd files, including any custom files created for specific access such as SharePoint Libraries. +> [!NOTE] +> Following the uninstall process will remove all client components including *all* systemd files, including any custom files created for specific access such as SharePoint Libraries. You can optionally choose to not perform this uninstallation step, and simply re-install the client by re-cloning, re-compiling and re-installing the client again - however the risk here is that you end up with two onedrive client binaries on your system, and depending on your system search path preferences, this will determine which binary is used. -**Important:** Before performing any upgrade, it is highly recommended for you to stop any running systemd service if applicable to ensure that these services are restarted using the updated client version. +> [!CAUTION] +> Before performing any upgrade, it is highly recommended for you to stop any running systemd service if applicable to ensure that these services are restarted using the updated client version. Post re-install, to confirm that you have the new version of the client installed, use `onedrive --version` to determine the client version that is now installed. @@ -269,7 +272,8 @@ If you are not upgrading your client, to remove your application state and confi ``` rm -rf ~/.config/onedrive ``` -**Note:** If you are using the `--confdir option`, substitute `~/.config/onedrive` for the correct directory storing your client configuration. +> [!IMPORTANT] +> If you are using the `--confdir option`, substitute `~/.config/onedrive` for the correct directory storing your client configuration. If you want to just delete the application key, but keep the items database: ```text diff --git a/docs/known-issues.md b/docs/known-issues.md index 6d970ff91..d6ac302a2 100644 --- a/docs/known-issues.md +++ b/docs/known-issues.md @@ -1,54 +1,60 @@ -# Known Issues -The below are known issues with this client: +# List of Identified Known Issues +The following points detail known issues associated with this client: -## Moving files into different folders should not cause data to delete and be re-uploaded -**Issue Tracker:** [#876](https://github.com/abraunegg/onedrive/issues/876) +## Renaming or Moving Files in Standalone Mode causes online deletion and re-upload to occur +**Issue Tracker:** [#876](https://github.com/abraunegg/onedrive/issues/876), [#2579](https://github.com/abraunegg/onedrive/issues/2579) -**Description:** +**Summary:** -When running the client in standalone mode (`--synchronize`) moving folders that are successfully synced around between subsequent standalone syncs causes a deletion & re-upload of data to occur. +Renaming or moving files and/or folders while using the standalone sync option `--sync` this results in unnecessary data deletion online and subsequent re-upload. -**Explanation:** +**Detailed Description:** -Technically, the client is 'working' correctly, as, when moving files, you are 'deleting' them from the current location, but copying them to the 'new location'. As the client is running in standalone sync mode, there is no way to track what OS operations have been done when the client is not running - thus, this is why the 'delete and upload' is occurring. +In standalone mode (`--sync`), the renaming or moving folders locally that have already been synchronized leads to the data being deleted online and then re-uploaded in the next synchronization process. -**Workaround:** +**Technical Explanation:** -If the tracking of moving data to new local directories is requried, it is better to run the client in service mode (`--monitor`) rather than in standalone mode, as the 'move' of files can then be handled at the point when it occurs, so that the data is moved to the new location on OneDrive without the need to be deleted and re-uploaded. +This behavior is expected from the client under these specific conditions. Renaming or moving files is interpreted as deleting them from their original location and creating them in a new location. In standalone sync mode, the client lacks the capability to track file system changes (including renames and moves) that occur when it is not running. This limitation is the root cause of the observed 'deletion and re-upload' cycle. + +**Recommended Workaround:** + +For effective tracking of file and folder renames or moves to new local directories, it is recommended to run the client in service mode (`--monitor`) rather than in standalone mode. This approach allows the client to immediately process these changes, enabling the data to be updated (renamed or moved) in the new location on OneDrive without undergoing deletion and re-upload. ## Application 'stops' running without any visible reason **Issue Tracker:** [#494](https://github.com/abraunegg/onedrive/issues/494), [#753](https://github.com/abraunegg/onedrive/issues/753), [#792](https://github.com/abraunegg/onedrive/issues/792), [#884](https://github.com/abraunegg/onedrive/issues/884), [#1162](https://github.com/abraunegg/onedrive/issues/1162), [#1408](https://github.com/abraunegg/onedrive/issues/1408), [#1520](https://github.com/abraunegg/onedrive/issues/1520), [#1526](https://github.com/abraunegg/onedrive/issues/1526) -**Description:** +**Summary:** + +Users experience sudden shutdowns in a client application during file transfers with Microsoft's Europe Data Centers, likely due to unstable internet or HTTPS inspection issues. This problem, often signaled by an error code of 141, is related to the application's reliance on Curl and OpenSSL. Resolution steps include system updates, seeking support from OS vendors, ISPs, OpenSSL/Curl teams, and providing detailed debug logs to Microsoft for analysis. -When running the client and performing an upload or download operation, the application just stops working without any reason or explanation. If `echo $?` is used after the application has exited without visible reason, an error level of 141 may be provided. +**Detailed Description:** -Additionally, this issue has mainly been seen when the client is operating against Microsoft's Europe Data Centre's. +The application unexpectedly stops functioning during upload or download operations when using the client. This issue occurs without any apparent reason. Running `echo $?` after the unexpected exit may return an error code of 141. -**Explanation:** +This problem predominantly arises when the client interacts with Microsoft's Europe Data Centers. -The client is heavily dependant on Curl and OpenSSL to perform the activities with the Microsoft OneDrive service. Generally, when this issue occurs, the following is found in the HTTPS Debug Log: +**Technical Explanation:** + +The client heavily relies on Curl and OpenSSL for operations with the Microsoft OneDrive service. A common observation during this error is an entry in the HTTPS Debug Log stating: ``` OpenSSL SSL_read: SSL_ERROR_SYSCALL, errno 104 ``` -The only way to determine that this is the cause of the application ceasing to work is to generate a HTTPS debug log using the following additional flags: +To confirm this as the root cause, a detailed HTTPS debug log can be generated with these commands: ``` --verbose --verbose --debug-https ``` -This is indicative of the following: -* Some sort of flaky Internet connection somewhere between you and the OneDrive service -* Some sort of 'broken' HTTPS transparent inspection service inspecting your traffic somewhere between you and the OneDrive service - -**How to resolve:** +This error typically suggests one of the following issues: +* An unstable internet connection between the user and the OneDrive service. +* An issue with HTTPS transparent inspection services that monitor the traffic en route to the OneDrive service. -The best avenue of action here are: -* Ensure your OS is as up-to-date as possible -* Get support from your OS vendor -* Speak to your ISP or Help Desk for assistance -* Open a ticket with OpenSSL and/or Curl teams to better handle this sort of connection failure -* Generate a HTTPS Debug Log for this application and open a new support request with Microsoft and provide the debug log file for their analysis. +**Recommended Resolution:** -If you wish to diagnose this issue further, refer to the following: +Recommended steps to address this issue include: +* Updating your operating system to the latest version. +* Seeking assistance from your OS vendor. +* Contacting your Internet Service Provider (ISP) or your IT Help Desk. +* Reporting the issue to the OpenSSL and/or Curl teams for improved handling of such connection failures. +* Creating a HTTPS Debug Log during the issue and submitting a support request to Microsoft with the log for their analysis. -https://maulwuff.de/research/ssl-debugging.html +For more in-depth SSL troubleshooting, please read: https://maulwuff.de/research/ssl-debugging.html \ No newline at end of file diff --git a/docs/national-cloud-deployments.md b/docs/national-cloud-deployments.md index 6b348388d..7666f8dd8 100644 --- a/docs/national-cloud-deployments.md +++ b/docs/national-cloud-deployments.md @@ -1,13 +1,13 @@ # How to configure access to specific Microsoft Azure deployments -## Application Version -Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. +> [!CAUTION] +> Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. ## Process Overview -In some cases it is a requirement to utilise specific Microsoft Azure cloud deployments to conform with data and security reuqirements that requires data to reside within the geographic borders of that country. +In some cases it is a requirement to utilise specific Microsoft Azure cloud deployments to conform with data and security requirements that requires data to reside within the geographic borders of that country. Current national clouds that are supported are: * Microsoft Cloud for US Government * Microsoft Cloud Germany -* Azure and Office365 operated by 21Vianet in China +* Azure and Office365 operated by VNET in China In order to successfully use these specific Microsoft Azure deployments, the following steps are required: 1. Register an application with the Microsoft identity platform using the Azure portal @@ -22,9 +22,9 @@ In order to successfully use these specific Microsoft Azure deployments, the fol | National Cloud Environment | Microsoft Azure Portal | |---|---| -| Microsoft Cloud for US Government | https://portal.azure.com/ | -| Microsoft Cloud Germany | https://portal.azure.com/ | -| Azure and Office365 operated by 21Vianet | https://portal.azure.cn/ | +| Microsoft Cloud for US Government | https://portal.azure.com/ | +| Microsoft Cloud Germany | https://portal.azure.com/ | +| Azure and Office365 operated by VNET | https://portal.azure.cn/ | 2. Select 'Azure Active Directory' as the service you wish to configure 3. Under 'Manage', select 'App registrations' to register a new application @@ -37,7 +37,8 @@ In order to successfully use these specific Microsoft Azure deployments, the fol ![application_registration_done](./images/application_registration_done.jpg) -**Note:** The Application (client) ID UUID as displayed after client registration, is what is required as the 'application_id' for Step 4 below. +> [!NOTE] +> The Application (client) ID UUID as displayed after client registration, is what is required as the 'application_id' for Step 4 below. ## Step 2: Configure application authentication scopes Configure the API permissions as per the following: @@ -59,12 +60,12 @@ Add the appropriate redirect URI for your Azure deployment: A valid entry for the response URI should be one of: * https://login.microsoftonline.us/common/oauth2/nativeclient (Microsoft Cloud for US Government) * https://login.microsoftonline.de/common/oauth2/nativeclient (Microsoft Cloud Germany) -* https://login.chinacloudapi.cn/common/oauth2/nativeclient (Azure and Office365 operated by 21Vianet in China) +* https://login.chinacloudapi.cn/common/oauth2/nativeclient (Azure and Office365 operated by VNET in China) For a single-tenant application, it may be necessary to use your specific tenant id instead of "common": * https://login.microsoftonline.us/example.onmicrosoft.us/oauth2/nativeclient (Microsoft Cloud for US Government) * https://login.microsoftonline.de/example.onmicrosoft.de/oauth2/nativeclient (Microsoft Cloud Germany) -* https://login.chinacloudapi.cn/example.onmicrosoft.cn/oauth2/nativeclient (Azure and Office365 operated by 21Vianet in China) +* https://login.chinacloudapi.cn/example.onmicrosoft.cn/oauth2/nativeclient (Azure and Office365 operated by VNET in China) ## Step 4: Configure the onedrive client to use new application registration Update to your 'onedrive' configuration file (`~/.config/onedrive/config`) the following: @@ -89,7 +90,7 @@ Valid entries are: * USL4 (Microsoft Cloud for US Government) * USL5 (Microsoft Cloud for US Government - DOD) * DE (Microsoft Cloud Germany) -* CN (Azure and Office365 operated by 21Vianet in China) +* CN (Azure and Office365 operated by VNET in China) This will configure your client to use the correct Azure AD and Graph endpoints as per [https://docs.microsoft.com/en-us/graph/deployments](https://docs.microsoft.com/en-us/graph/deployments) @@ -105,7 +106,7 @@ azure_tenant_id = "insert valid entry here" This will configure your client to use the specified tenant id in its Azure AD and Graph endpoint URIs, instead of "common". The tenant id may be the GUID Directory ID (formatted "00000000-0000-0000-0000-000000000000"), or the fully qualified tenant name (e.g. "example.onmicrosoft.us"). -The GUID Directory ID may be located in the Azure administation page as per [https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id](https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id). Note that you may need to go to your national-deployment-specific administration page, rather than following the links within that document. +The GUID Directory ID may be located in the Azure administration page as per [https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id](https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id). Note that you may need to go to your national-deployment-specific administration page, rather than following the links within that document. The tenant name may be obtained by following the PowerShell instructions on [https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id](https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id); it is shown as the "TenantDomain" upon completion of the "Connect-AzureAD" command. **Example:** diff --git a/docs/Podman.md b/docs/podman.md similarity index 88% rename from docs/Podman.md rename to docs/podman.md index 35e69d26e..8e5888524 100644 --- a/docs/Podman.md +++ b/docs/podman.md @@ -23,7 +23,8 @@ The 'edge' Docker Container will align closer to all documentation and features, Additionally there are specific version release tags for each release. Refer to https://hub.docker.com/r/driveone/onedrive/tags for any other Docker tags you may be interested in. -**Note:** The below instructions for podman has been tested and validated when logging into the system as an unprivileged user (non 'root' user). +> [!NOTE] +> The below instructions for podman has been tested and validated when logging into the system as an unprivileged user (non 'root' user). ## High Level Configuration Steps 1. Install 'podman' as per your distribution platform's instructions if not already installed. @@ -103,17 +104,19 @@ This will create a podman volume labeled `onedrive_data` and will map to a path * The owner of this specified folder must not be root * Podman will attempt to change the permissions of the volume to the user the container is configured to run as -**NOTE:** Issues occur when this target folder is a mounted folder of an external system (NAS, SMB mount, USB Drive etc) as the 'mount' itself is owed by 'root'. If this is your use case, you *must* ensure your normal user can mount your desired target without having the target mounted by 'root'. If you do not fix this, your Podman container will fail to start with the following error message: -```bash -ROOT level privileges prohibited! -``` +> [!IMPORTANT] +> Issues occur when this target folder is a mounted folder of an external system (NAS, SMB mount, USB Drive etc) as the 'mount' itself is owed by 'root'. If this is your use case, you *must* ensure your normal user can mount your desired target without having the target mounted by 'root'. If you do not fix this, your Podman container will fail to start with the following error message: +> ```bash +> ROOT level privileges prohibited! +> ``` ### 5. First run of Docker container under podman and performing authorisation The 'onedrive' client within the container first needs to be authorised with your Microsoft account. This is achieved by initially running podman in interactive mode. Run the podman image with the commands below and make sure to change the value of `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `export ONEDRIVE_DATA_DIR="/home/abraunegg/OneDrive"`). -**Important:** The 'target' folder of `ONEDRIVE_DATA_DIR` must exist before running the podman container. The script below will create 'ONEDRIVE_DATA_DIR' so that it exists locally for the podman volume mapping to occur. +> [!IMPORTANT] +> The 'target' folder of `ONEDRIVE_DATA_DIR` must exist before running the podman container. The script below will create 'ONEDRIVE_DATA_DIR' so that it exists locally for the podman volume mapping to occur. It is also a requirement that the container be run using a non-root uid and gid, you must insert a non-root UID and GID (e.g.` export ONEDRIVE_UID=1000` and export `ONEDRIVE_GID=1000`). The script below will use `id` to evaluate your system environment to use the correct values. ```bash @@ -127,7 +130,8 @@ podman run -it --name onedrive --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \ driveone/onedrive:edge ``` -**Important:** In some scenarios, 'podman' sets the configuration and data directories to a different UID & GID as specified. To resolve this situation, you must run 'podman' with the `--userns=keep-id` flag to ensure 'podman' uses the UID and GID as specified. The updated script example when using `--userns=keep-id` is below: +> [!IMPORTANT] +> In some scenarios, 'podman' sets the configuration and data directories to a different UID & GID as specified. To resolve this situation, you must run 'podman' with the `--userns=keep-id` flag to ensure 'podman' uses the UID and GID as specified. The updated script example when using `--userns=keep-id` is below: ```bash export ONEDRIVE_DATA_DIR="${HOME}/OneDrive" @@ -142,7 +146,8 @@ podman run -it --name onedrive --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \ ``` -**Important:** If you plan to use the 'podman' built in auto-updating of container images described in 'Systemd Service & Auto Updating' below, you must pass an additional argument to set a label during the first run. The updated script example to support auto-updating of container images is below: +> [!IMPORTANT] +> If you plan to use the 'podman' built in auto-updating of container images described in 'Systemd Service & Auto Updating' below, you must pass an additional argument to set a label during the first run. The updated script example to support auto-updating of container images is below: ```bash export ONEDRIVE_DATA_DIR="${HOME}/OneDrive" @@ -195,7 +200,6 @@ podman start onedrive podman rm -f onedrive ``` - ## Advanced Usage ### Systemd Service & Auto Updating @@ -255,7 +259,7 @@ podman volume inspect onedrive_conf ``` Or you can map your own config folder to the config volume. Make sure to copy all files from the volume into your mapped folder first. -The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#configuration) +The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration) ### Syncing multiple accounts There are many ways to do this, the easiest is probably to do the following: @@ -290,10 +294,14 @@ podman run -it --name onedrive_work --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \ | ONEDRIVE_NOREMOTEDELETE | Controls "--no-remote-delete" switch on onedrive sync. Default is 0 | 1 | | ONEDRIVE_LOGOUT | Controls "--logout" switch. Default is 0 | 1 | | ONEDRIVE_REAUTH | Controls "--reauth" switch. Default is 0 | 1 | -| ONEDRIVE_AUTHFILES | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" | -| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#authorize-the-application-with-your-onedrive-account) | +| ONEDRIVE_AUTHFILES | Controls "--auth-files" option. Default is "" | Please read [CLI Option: --auth-files](./application-config-options.md#cli-option---auth-files) | +| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | Please read [CLI Option: --auth-response](./application-config-options.md#cli-option---auth-response) | | ONEDRIVE_DISPLAY_CONFIG | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 | | ONEDRIVE_SINGLE_DIRECTORY | Controls "--single-directory" option. Default = "" | "mydir" | +| ONEDRIVE_DRYRUN | Controls "--dry-run" option. Default is 0 | 1 | +| ONEDRIVE_DISABLE_DOWNLOAD_VALIDATION | Controls "--disable-download-validation" option. Default is 0 | 1 | +| ONEDRIVE_DISABLE_UPLOAD_VALIDATION | Controls "--disable-upload-validation" option. Default is 0 | 1 | +| ONEDRIVE_SYNC_SHARED_FILES | Controls "--sync-shared-files" option. Default is 0 | 1 | ### Environment Variables Usage Examples **Verbose Output:** diff --git a/docs/puml/applyPotentiallyChangedItem.png b/docs/puml/applyPotentiallyChangedItem.png new file mode 100644 index 000000000..cbb7f9566 Binary files /dev/null and b/docs/puml/applyPotentiallyChangedItem.png differ diff --git a/docs/puml/applyPotentiallyChangedItem.puml b/docs/puml/applyPotentiallyChangedItem.puml new file mode 100644 index 000000000..58ed2d53b --- /dev/null +++ b/docs/puml/applyPotentiallyChangedItem.puml @@ -0,0 +1,48 @@ +@startuml +start +partition "applyPotentiallyChangedItem" { + :Check if existing item path differs from changed item path; + if (itemWasMoved) then (yes) + :Log moving item; + if (destination exists) then (yes) + if (item in database) then (yes) + :Check if item is synced; + if (item is synced) then (yes) + :Log destination is in sync; + else (no) + :Log destination occupied with a different item; + :Backup conflicting file; + note right: Local data loss prevention + endif + else (no) + :Log destination occupied by an un-synced file; + :Backup conflicting file; + note right: Local data loss prevention + endif + endif + :Try to rename path; + if (dry run) then (yes) + :Track as faked id item; + :Track path not renamed; + else (no) + :Rename item; + :Flag item as moved; + if (item is a file) then (yes) + :Set local timestamp to match online; + endif + endif + else (no) + endif + :Check if eTag changed; + if (eTag changed) then (yes) + if (item is a file and not moved) then (yes) + :Decide if to download based on hash; + else (no) + :Update database; + endif + else (no) + :Update database if timestamp differs or in specific operational mode; + endif +} +stop +@enduml diff --git a/docs/puml/applyPotentiallyNewLocalItem.png b/docs/puml/applyPotentiallyNewLocalItem.png new file mode 100644 index 000000000..59705f75d Binary files /dev/null and b/docs/puml/applyPotentiallyNewLocalItem.png differ diff --git a/docs/puml/applyPotentiallyNewLocalItem.puml b/docs/puml/applyPotentiallyNewLocalItem.puml new file mode 100644 index 000000000..b900f3ef5 --- /dev/null +++ b/docs/puml/applyPotentiallyNewLocalItem.puml @@ -0,0 +1,90 @@ +@startuml +start +partition "applyPotentiallyNewLocalItem" { + :Check if path exists; + + if (Path exists?) then (yes) + :Log "Path on local disk already exists"; + + if (Is symbolic link?) then (yes) + :Log "Path is a symbolic link"; + + if (Can read symbolic link?) then (no) + :Log "Reading symbolic link failed"; + :Log "Skipping item - invalid symbolic link"; + stop + endif + endif + + :Determine if item is in-sync; + note right: Execute 'isItemSynced()' function + if (Is item in-sync?) then (yes) + :Log "Item in-sync"; + :Update/Insert item in DB; + stop + else (no) + :Log "Item not in-sync"; + :Compare local & remote modification times; + + if (Local time > Remote time?) then (yes) + if (ID in database?) then (yes) + :Log "Local file is newer & ID in DB"; + :Fetch latest DB record; + if (Times equal?) then (yes) + :Log "Times match, keeping local file"; + else (no) + :Log "Local time newer, keeping file"; + note right: Online item has an 'older' modified timestamp wise than the local file\nIt is assumed that the local file is the file to keep + endif + stop + else (no) + :Log "Local item not in DB"; + if (Bypass data protection?) then (yes) + :Log "WARNING: Data protection disabled"; + else (no) + :Safe backup local file; + note right: Local data loss prevention + endif + stop + endif + else (no) + if (Remote time > Local time?) then (yes) + :Log "Remote item is newer"; + if (Bypass data protection?) then (yes) + :Log "WARNING: Data protection disabled"; + else (no) + :Safe backup local file; + note right: Local data loss prevention + endif + endif + + if (Times equal?) then (yes) + note left: Specific handling if timestamp was\nadjusted by isItemSynced() + :Log "Times equal, no action required"; + :Update/Insert item in DB; + stop + endif + endif + endif + + else (no) + :Handle as potentially new item; + switch (Item type) + case (File) + :Add to download queue; + case (Directory) + :Log "Creating local directory"; + if (Dry run?) then (no) + :Create directory & set attributes; + :Save item to DB; + else + :Log "Dry run, faking directory creation"; + :Save item to dry-run DB; + endif + case (Unknown) + :Log "Unknown type, no action"; + endswitch + endif +} +stop +@enduml diff --git a/docs/puml/client_side_filtering_rules.png b/docs/puml/client_side_filtering_rules.png new file mode 100644 index 000000000..2a71d76ae Binary files /dev/null and b/docs/puml/client_side_filtering_rules.png differ diff --git a/docs/puml/client_side_filtering_rules.puml b/docs/puml/client_side_filtering_rules.puml new file mode 100644 index 000000000..2e3ed1a66 --- /dev/null +++ b/docs/puml/client_side_filtering_rules.puml @@ -0,0 +1,71 @@ +@startuml +start +:Start; +partition "checkPathAgainstClientSideFiltering" { + :Get localFilePath; + + if (Does path exist?) then (no) + :Return false; + stop + endif + + if (Check .nosync?) then (yes) + :Check for .nosync file; + if (.nosync found) then (yes) + :Log and return true; + stop + endif + endif + + if (Skip dotfiles?) then (yes) + :Check if dotfile; + if (Is dotfile) then (yes) + :Log and return true; + stop + endif + endif + + if (Skip symlinks?) then (yes) + :Check if symlink; + if (Is symlink) then (yes) + if (Config says skip?) then (yes) + :Log and return true; + stop + elseif (Unexisting symlink?) then (yes) + :Check if relative link works; + if (Relative link ok) then (no) + :Log and return true; + stop + endif + endif + endif + endif + + if (Skip dir or file?) then (yes) + :Check dir or file exclusion; + if (Excluded by config?) then (yes) + :Log and return true; + stop + endif + endif + + if (Use sync_list?) then (yes) + :Check sync_list exclusions; + if (Excluded by sync_list?) then (yes) + :Log and return true; + stop + endif + endif + + if (Check file size?) then (yes) + :Check for file size limit; + if (File size exceeds limit?) then (yes) + :Log and return true; + stop + endif + endif + + :Return false; +} +stop +@enduml diff --git a/docs/puml/client_use_of_libcurl.png b/docs/puml/client_use_of_libcurl.png new file mode 100644 index 000000000..d5b988383 Binary files /dev/null and b/docs/puml/client_use_of_libcurl.png differ diff --git a/docs/puml/client_use_of_libcurl.puml b/docs/puml/client_use_of_libcurl.puml new file mode 100644 index 000000000..c9b4e9b26 --- /dev/null +++ b/docs/puml/client_use_of_libcurl.puml @@ -0,0 +1,41 @@ +@startuml +participant "OneDrive Client\nfor Linux" as od +participant "libcurl" as lc +participant "Client Web Browser" as browser +participant "Microsoft Authentication Service\n(OAuth 2.0 Endpoint)" as oauth +participant "GitHub API" as github +participant "Microsoft Graph API" as graph + +activate od +activate lc + +od->od: Generate Authentication\nService URL +activate browser +od->browser: Navigate to Authentication\nService URL via Client Web Browser +browser->oauth: Request access token +activate oauth +oauth-->browser: Access token +browser-->od: Access token +deactivate oauth +deactivate browser + +od->lc: Check application version\nvia api.github.com +activate github +lc->github: Query release status +activate github +github-->lc: Release information +deactivate github +lc-->od: Process release information +deactivate lc + +loop API Communication + od->lc: Construct HTTPS request (with token) + activate lc + lc->graph: API Request + activate graph + graph-->lc: API Response + deactivate graph + lc-->od: Process response + deactivate lc +end +@enduml diff --git a/docs/puml/code_functional_component_relationships.png b/docs/puml/code_functional_component_relationships.png new file mode 100644 index 000000000..1a2a28542 Binary files /dev/null and b/docs/puml/code_functional_component_relationships.png differ diff --git a/docs/puml/code_functional_component_relationships.puml b/docs/puml/code_functional_component_relationships.puml new file mode 100644 index 000000000..ede5bff01 --- /dev/null +++ b/docs/puml/code_functional_component_relationships.puml @@ -0,0 +1,78 @@ +@startuml +!define DATABASE_ENTITY(x) entity x +component main { +} +component config { +} +component log { +} +component curlEngine { +} +component util { +} +component onedrive { +} +component syncEngine { +} +component itemdb { +} +component clientSideFiltering { +} +component monitor { +} +component sqlite { +} +component qxor { +} + +DATABASE_ENTITY("Database") + +main --> config +main --> log +main --> curlEngine +main --> util +main --> onedrive +main --> syncEngine +main --> itemdb +main --> clientSideFiltering +main --> monitor + +config --> log +config --> util + +clientSideFiltering --> config +clientSideFiltering --> util +clientSideFiltering --> log + +syncEngine --> config +syncEngine --> log +syncEngine --> util +syncEngine --> onedrive +syncEngine --> itemdb +syncEngine --> clientSideFiltering + +util --> log +util --> config +util --> qxor +util --> curlEngine + +sqlite --> log +sqlite -> "Database" : uses + +onedrive --> config +onedrive --> log +onedrive --> util +onedrive --> curlEngine + +monitor --> config +monitor --> util +monitor --> log +monitor --> clientSideFiltering +monitor .> syncEngine : inotify event + +itemdb --> sqlite +itemdb --> util +itemdb --> log + +curlEngine --> log +@enduml diff --git a/docs/puml/conflict_handling_default.png b/docs/puml/conflict_handling_default.png new file mode 100644 index 000000000..90d61ac09 Binary files /dev/null and b/docs/puml/conflict_handling_default.png differ diff --git a/docs/puml/conflict_handling_default.puml b/docs/puml/conflict_handling_default.puml new file mode 100644 index 000000000..31f6b96fd --- /dev/null +++ b/docs/puml/conflict_handling_default.puml @@ -0,0 +1,31 @@ +@startuml +start +note left: Operational Mode 'onedrive --sync' +:Query OneDrive /delta API for online changes; +note left: This data is considered the 'source-of-truth'\nLocal data should be a 'replica' of this data +:Process received JSON data; +if (JSON item is a file) then (yes) + if (Does the file exist locally) then (yes) + :Compute relevant file hashes; + :Check DB for file record; + if (DB record found) then (yes) + :Compare file hash with DB hash; + if (Is the hash different) then (yes) + :Log that the local file was modified locally since last sync; + :Renaming local file to avoid potential local data loss; + note left: Local data loss prevention\nRenamed file will be uploaded as new file + else (no) + endif + else (no) + endif + else (no) + endif +:Download file (as per online JSON item) as required; +else (no) + :Other handling for directories | root objects | deleted items; +endif +:Performing a database consistency and\nintegrity check on locally stored data; +:Scan file system for any new data to upload; +note left: The file that was renamed will be uploaded here +stop +@enduml \ No newline at end of file diff --git a/docs/puml/conflict_handling_default_resync.png b/docs/puml/conflict_handling_default_resync.png new file mode 100644 index 000000000..f0b8f29c3 Binary files /dev/null and b/docs/puml/conflict_handling_default_resync.png differ diff --git a/docs/puml/conflict_handling_default_resync.puml b/docs/puml/conflict_handling_default_resync.puml new file mode 100644 index 000000000..fb08253b8 --- /dev/null +++ b/docs/puml/conflict_handling_default_resync.puml @@ -0,0 +1,35 @@ +@startuml +start +note left: Operational Mode 'onedrive -sync --resync' +:Query OneDrive /delta API for online changes; +note left: This data is considered the 'source-of-truth'\nLocal data should be a 'replica' of this data +:Process received JSON data; +if (JSON item is a file) then (yes) + if (Does the file exist locally) then (yes) + note left: In a --resync scenario there are no DB\nrecords that can be used or referenced\nuntil the JSON item is processed and\nadded to the local database cache + if (Can the file be read) then (yes) + :Compute UTC timestamp data from local file and JSON data; + if (timestamps are equal) then (yes) + else (no) + :Log that a local file time discrepancy was detected; + if (Do file hashes match) then (yes) + :Correct the offending timestamp as hashes match; + else (no) + :Local file is technically different; + :Renaming local file to avoid potential local data loss; + note left: Local data loss prevention\nRenamed file will be uploaded as new file + endif + endif + else (no) + endif + else (no) + endif +:Download file (as per online JSON item) as required; +else (no) + :Other handling for directories | root objects | deleted items; +endif +:Performing a database consistency and\nintegrity check on locally stored data; +:Scan file system for any new data to upload; +note left: The file that was renamed will be uploaded here +stop +@enduml \ No newline at end of file diff --git a/docs/puml/conflict_handling_local-first_default.png b/docs/puml/conflict_handling_local-first_default.png new file mode 100644 index 000000000..cd9ce760e Binary files /dev/null and b/docs/puml/conflict_handling_local-first_default.png differ diff --git a/docs/puml/conflict_handling_local-first_default.puml b/docs/puml/conflict_handling_local-first_default.puml new file mode 100644 index 000000000..1faca7cd1 --- /dev/null +++ b/docs/puml/conflict_handling_local-first_default.puml @@ -0,0 +1,62 @@ +@startuml +start +note left: Operational Mode 'onedrive -sync -local-first' +:Performing a database consistency and\nintegrity check on locally stored data; +note left: This data is considered the 'source-of-truth'\nOnline data should be a 'replica' of this data +repeat + :Process each DB record; + if (Is the DB record is in sync with local file) then (yes) + + else (no) + + :Log reason for discrepancy; + :Flag item to be processed as a modified local file; + + endif +repeat while + +:Process modified items to upload; + +if (Does local file DB record match current latest online JSON data) then (yes) + +else (no) + + :Log that the local file was modified locally since last sync; + :Renaming local file to avoid potential local data loss; + note left: Local data loss prevention\nRenamed file will be uploaded as new file + :Upload renamed local file as new file; + +endif + +:Upload modified file; + +:Scan file system for any new data to upload; + +:Query OneDrive /delta API for online changes; + +:Process received JSON data; +if (JSON item is a file) then (yes) + if (Does the file exist locally) then (yes) + :Compute relevant file hashes; + :Check DB for file record; + if (DB record found) then (yes) + :Compare file hash with DB hash; + if (Is the hash different) then (yes) + :Log that the local file was modified locally since last sync; + :Renaming local file to avoid potential local data loss; + note left: Local data loss prevention\nRenamed file will be uploaded as new file + else (no) + endif + else (no) + + endif + else (no) + endif + + :Download file (as per online JSON item) as required; + +else (no) + :Other handling for directories | root objects | deleted items; +endif +stop +@enduml \ No newline at end of file diff --git a/docs/puml/conflict_handling_local-first_resync.png b/docs/puml/conflict_handling_local-first_resync.png new file mode 100644 index 000000000..541db42ee Binary files /dev/null and b/docs/puml/conflict_handling_local-first_resync.png differ diff --git a/docs/puml/conflict_handling_local-first_resync.puml b/docs/puml/conflict_handling_local-first_resync.puml new file mode 100644 index 000000000..ca4bf990e --- /dev/null +++ b/docs/puml/conflict_handling_local-first_resync.puml @@ -0,0 +1,70 @@ +@startuml +start +note left: Operational Mode 'onedrive -sync -local-first -resync' +:Query OneDrive API and create new database with default root account objects; +:Performing a database consistency and\nintegrity check on locally stored data; +note left: This data is considered the 'source-of-truth'\nOnline data should be a 'replica' of this data\nHowever the database has only 1 record currently +:Scan file system for any new data to upload; +note left: This is where in this specific mode all local\n content is assessed for applicability for\nupload to Microsoft OneDrive + +repeat + :For each new local item; + if (Is the item a directory) then (yes) + if (Is Directory found online) then (yes) + :Save directory details from online in local database; + else (no) + :Create directory online; + :Save details in local database; + endif + else (no) + :Flag file as a potentially new item to upload; + endif +repeat while + +:Process potential new items to upload; + +repeat + :For each potential file to upload; + if (Is File found online) then (yes) + if (Does the online JSON data match local file) then (yes) + :Save details in local database; + else (no) + :Log that the local file was modified locally since last sync; + :Renaming local file to avoid potential local data loss; + note left: Local data loss prevention\nRenamed file will be uploaded as new file + :Upload renamed local file as new file; + endif + else (no) + :Upload new file; + endif +repeat while + +:Query OneDrive /delta API for online changes; +:Process received JSON data; +if (JSON item is a file) then (yes) + if (Does the file exist locally) then (yes) + :Compute relevant file hashes; + :Check DB for file record; + if (DB record found) then (yes) + :Compare file hash with DB hash; + if (Is the hash different) then (yes) + :Log that the local file was modified locally since last sync; + :Renaming local file to avoid potential local data loss; + note left: Local data loss prevention\nRenamed file will be uploaded as new file + else (no) + endif + else (no) + + endif + else (no) + endif + +:Download file (as per online JSON item) as required; + +else (no) + :Other handling for directories | root objects | deleted items; +endif + + +stop +@enduml \ No newline at end of file diff --git a/docs/puml/database_schema.png b/docs/puml/database_schema.png new file mode 100644 index 000000000..b45eb5265 Binary files /dev/null and b/docs/puml/database_schema.png differ diff --git a/docs/puml/database_schema.puml b/docs/puml/database_schema.puml new file mode 100644 index 000000000..225a03e32 --- /dev/null +++ b/docs/puml/database_schema.puml @@ -0,0 +1,39 @@ +@startuml + +class item { + driveId: TEXT + id: TEXT + name: TEXT + remoteName: TEXT + type: TEXT + eTag: TEXT + cTag: TEXT + mtime: TEXT + parentId: TEXT + quickXorHash: TEXT + sha256Hash: TEXT + remoteDriveId: TEXT + remoteParentId: TEXT + remoteId: TEXT + remoteType: TEXT + deltaLink: TEXT + syncStatus: TEXT + size: TEXT +} + +note right of item::driveId + PRIMARY KEY (driveId, id) + FOREIGN KEY (driveId, parentId) REFERENCES item +end note + +item --|> item : parentId + +note "Indexes" as N1 +note left of N1 + name_idx ON item (name) + remote_idx ON item (remoteDriveId, remoteId) + item_children_idx ON item (driveId, parentId) + selectByPath_idx ON item (name, driveId, parentId) +end note + +@enduml \ No newline at end of file diff --git a/docs/puml/downloadFile.png b/docs/puml/downloadFile.png new file mode 100644 index 000000000..4ab2322ee Binary files /dev/null and b/docs/puml/downloadFile.png differ diff --git a/docs/puml/downloadFile.puml b/docs/puml/downloadFile.puml new file mode 100644 index 000000000..e61aab6c0 --- /dev/null +++ b/docs/puml/downloadFile.puml @@ -0,0 +1,63 @@ +@startuml +start + +partition "Download File" { + + :Get item specifics from JSON; + :Calculate item's path; + + if (Is item malware?) then (yes) + :Log malware detected; + stop + else (no) + :Check for file size in JSON; + if (File size missing) then (yes) + :Log error; + stop + endif + + :Configure hashes for comparison; + if (Hashes missing) then (yes) + :Log error; + stop + endif + + if (Does file exist locally?) then (yes) + :Check DB for item; + if (DB hash match?) then (no) + :Log modification; Perform safe backup; + note left: Local data loss prevention + endif + endif + + :Check local disk space; + if (Insufficient space?) then (yes) + :Log insufficient space; + stop + else (no) + if (Dry run?) then (yes) + :Fake download process; + else (no) + :Attempt to download file; + if (Download exception occurs?) then (yes) + :Handle exceptions; Retry download or log error; + endif + + if (File downloaded successfully?) then (yes) + :Validate download; + if (Validation passes?) then (yes) + :Log success; Update DB; + else (no) + :Log validation failure; Remove file; + endif + else (no) + :Log download failed; + endif + endif + endif + endif + +} + +stop +@enduml diff --git a/docs/puml/high_level_operational_process.png b/docs/puml/high_level_operational_process.png new file mode 100644 index 000000000..19c906021 Binary files /dev/null and b/docs/puml/high_level_operational_process.png differ diff --git a/docs/puml/high_level_operational_process.puml b/docs/puml/high_level_operational_process.puml new file mode 100644 index 000000000..37d76b997 --- /dev/null +++ b/docs/puml/high_level_operational_process.puml @@ -0,0 +1,55 @@ +@startuml + +participant "OneDrive Client\nfor Linux" as Client +participant "Microsoft OneDrive\nAPI" as API + +== Access Token Validation == +Client -> Client: Validate access and\nexisting access token\nRefresh if needed + +== Query Microsoft OneDrive /delta API == +Client -> API: Query /delta API +API -> Client: JSON responses + +== Process JSON Responses == +loop for each JSON response + Client -> Client: Determine if JSON is 'root'\nor 'deleted' item\nElse, push into temporary array for further processing + alt if 'root' or 'deleted' + Client -> Client: Process 'root' or 'deleted' items + else + Client -> Client: Evaluate against 'Client Side Filtering' rules + alt if unwanted + Client -> Client: Discard JSON + else + Client -> Client: Process JSON (create dir/download file) + Client -> Client: Save in local database cache + end + end +end + +== Local Cache Database Processing for Data Integrity == +Client -> Client: Process local cache database\nto check local data integrity and for differences +alt if difference found + Client -> API: Upload file/folder change including deletion + API -> Client: Response with item metadata + Client -> Client: Save response to local cache database +end + +== Local Filesystem Scanning == +Client -> Client: Scan local filesystem\nfor new files/folders + +loop for each new item + Client -> Client: Check item against 'Client Side Filtering' rules + alt if item passes filtering + Client -> API: Upload new file/folder change including deletion + API -> Client: Response with item metadata + Client -> Client: Save response in local\ncache database + else + Client -> Client: Discard item\n(Does not meet filtering criteria) + end +end + +== Final Data True-Up == +Client -> API: Query /delta link for true-up +API -> Client: Process further online JSON changes if required + +@enduml diff --git a/docs/puml/is_item_in_sync.png b/docs/puml/is_item_in_sync.png new file mode 100644 index 000000000..4f6a55bae Binary files /dev/null and b/docs/puml/is_item_in_sync.png differ diff --git a/docs/puml/is_item_in_sync.puml b/docs/puml/is_item_in_sync.puml new file mode 100644 index 000000000..d3fe40a26 --- /dev/null +++ b/docs/puml/is_item_in_sync.puml @@ -0,0 +1,79 @@ +@startuml +start +partition "Is item in sync" { + :Check if path exists; + if (path does not exist) then (no) + :Return false; + stop + else (yes) + endif + + :Identify item type; + switch (item type) + case (file) + + :Check if path is a file; + if (path is not a file) then (no) + :Log "item is a directory but should be a file"; + :Return false; + stop + else (yes) + endif + + :Attempt to read local file; + if (file is unreadable) then (no) + :Log "file cannot be read"; + :Return false; + stop + else (yes) + endif + + :Get local and input item modified time; + note right: The 'input item' could be a database reference object, or the online JSON object\nas provided by the Microsoft OneDrive API + :Reduce time resolution to seconds; + + if (localModifiedTime == itemModifiedTime) then (yes) + :Return true; + stop + else (no) + :Log time discrepancy; + endif + + :Check if file hash is the same; + if (hash is the same) then (yes) + :Log "hash match, correcting timestamp"; + if (local time > item time) then (yes) + if (download only mode) then (no) + :Correct timestamp online if not dryRun; + else (yes) + :Correct local timestamp if not dryRun; + endif + else (no) + :Correct local timestamp if not dryRun; + endif + :Return false; + note right: Specifically return false here as we performed a time correction\nApplication logic will then perform additional handling based on this very specific response. + stop + else (no) + :Log "different hash"; + :Return false; + stop + endif + + case (dir or remote) + :Check if path is a directory; + if (path is a directory) then (yes) + :Return true; + stop + else (no) + :Log "item is a file but should be a directory"; + :Return false; + stop + endif + + case (unknown) + :Return true but do not sync; + stop + endswitch +} +@enduml diff --git a/docs/puml/main_activity_flows.png b/docs/puml/main_activity_flows.png new file mode 100644 index 000000000..2c5e290bf Binary files /dev/null and b/docs/puml/main_activity_flows.png differ diff --git a/docs/puml/main_activity_flows.puml b/docs/puml/main_activity_flows.puml new file mode 100644 index 000000000..dc1f07ce4 --- /dev/null +++ b/docs/puml/main_activity_flows.puml @@ -0,0 +1,81 @@ +@startuml + +start + +:Validate access and existing access token\nRefresh if needed; + +:Query /delta API; +note right: Query Microsoft OneDrive /delta API +:Receive JSON responses; + +:Process JSON Responses; +partition "Process /delta JSON Responses" { + while (for each JSON response) is (yes) + :Determine if JSON is 'root'\nor 'deleted' item; + if ('root' or 'deleted') then (yes) + :Process 'root' or 'deleted' items; + if ('root' object) then (yes) + :Process 'root' JSON; + else (no) + if (Is 'deleted' object in sync) then (yes) + :Process deletion of local item; + else (no) + :Rename local file as it is not in sync; + note right: Deletion event conflict handling\nLocal data loss prevention + endif + endif + else (no) + :Evaluate against 'Client Side Filtering' rules; + if (unwanted) then (yes) + :Discard JSON; + else (no) + :Process JSON (create dir/download file); + if (Is the 'JSON' item in the local cache) then (yes) + :Process JSON as a potentially changed local item; + note left: Run 'applyPotentiallyChangedItem' function + else (no) + :Process JSON as potentially new local item; + note right: Run 'applyPotentiallyNewLocalItem' function + endif + :Process objects in download queue; + :Download File; + note left: Download file from Microsoft OneDrive (Multi Threaded Download) + :Save in local database cache; + endif + endif + endwhile +} + +partition "Perform data integrity check based on local cache database" { + :Process local cache database\nto check local data integrity and for differences; + if (difference found) then (yes) + :Upload file/folder change including deletion; + note right: Upload local change to Microsoft OneDrive + :Receive response with item metadata; + :Save response to local cache database; + else (no) + endif +} + +partition "Local Filesystem Scanning" { + :Scan local filesystem\nfor new files/folders; + while (for each new item) is (yes) + :Check item against 'Client Side Filtering' rules; + if (item passes filtering) then (yes) + :Upload new file/folder change including deletion; + note right: Upload to Microsoft OneDrive + :Receive response with item metadata; + :Save response in local\ncache database; + else (no) + :Discard item\n(Does not meet filtering criteria); + endif + endwhile +} + +partition "Final True-Up" { + :Query /delta link for true-up; + note right: Final Data True-Up + :Process further online JSON changes if required; +} +stop +@enduml \ No newline at end of file diff --git a/docs/puml/onedrive_linux_authentication.png b/docs/puml/onedrive_linux_authentication.png new file mode 100644 index 000000000..4e27a2144 Binary files /dev/null and b/docs/puml/onedrive_linux_authentication.png differ diff --git a/docs/puml/onedrive_linux_authentication.puml b/docs/puml/onedrive_linux_authentication.puml new file mode 100644 index 000000000..4d89f0dbb --- /dev/null +++ b/docs/puml/onedrive_linux_authentication.puml @@ -0,0 +1,47 @@ +@startuml +participant "OneDrive Client for Linux" +participant "Microsoft OneDrive\nAuthentication Service\n(login.microsoftonline.com)" as AuthServer +participant "User's Device (for MFA)" as UserDevice +participant "Microsoft Graph API\n(graph.microsoft.com)" as GraphAPI +participant "Microsoft OneDrive" + +"OneDrive Client for Linux" -> AuthServer: Request Authorization\n(Client Credentials, Scopes) +AuthServer -> "OneDrive Client for Linux": Provide Authorization Code + +"OneDrive Client for Linux" -> AuthServer: Request Access Token\n(Authorization Code, Client Credentials) + +alt MFA Enabled + AuthServer -> UserDevice: Trigger MFA Challenge + UserDevice -> AuthServer: Provide MFA Verification + AuthServer -> "OneDrive Client for Linux": Return Access Token\n(and Refresh Token) + "OneDrive Client for Linux" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token) + loop Token Expiry Check + "OneDrive Client for Linux" -> AuthServer: Is Access Token Expired? + alt Token Expired + "OneDrive Client for Linux" -> AuthServer: Request New Access Token\n(Refresh Token) + AuthServer -> "OneDrive Client for Linux": Return New Access Token + else Token Valid + GraphAPI -> "Microsoft OneDrive": Retrieve Data + "Microsoft OneDrive" -> GraphAPI: Return Data + GraphAPI -> "OneDrive Client for Linux": Provide Data + end + end +else MFA Not Required + AuthServer -> "OneDrive Client for Linux": Return Access Token\n(and Refresh Token) + "OneDrive Client for Linux" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token) + loop Token Expiry Check + "OneDrive Client for Linux" -> AuthServer: Is Access Token Expired? + alt Token Expired + "OneDrive Client for Linux" -> AuthServer: Request New Access Token\n(Refresh Token) + AuthServer -> "OneDrive Client for Linux": Return New Access Token + else Token Valid + GraphAPI -> "Microsoft OneDrive": Retrieve Data + "Microsoft OneDrive" -> GraphAPI: Return Data + GraphAPI -> "OneDrive Client for Linux": Provide Data + end + end +else MFA Failed or Other Auth Error + AuthServer -> "OneDrive Client for Linux": Error Message (e.g., Invalid Credentials, MFA Failure) +end + +@enduml \ No newline at end of file diff --git a/docs/puml/onedrive_windows_ad_authentication.puml b/docs/puml/onedrive_windows_ad_authentication.puml new file mode 100644 index 000000000..43b312fab --- /dev/null +++ b/docs/puml/onedrive_windows_ad_authentication.puml @@ -0,0 +1,59 @@ +@startuml +participant "Microsoft Windows OneDrive Client" +participant "Azure Active Directory\n(Active Directory)\n(login.microsoftonline.com)" as AzureAD +participant "Microsoft OneDrive\nAuthentication Service\n(login.microsoftonline.com)" as AuthServer +participant "User's Device (for MFA)" as UserDevice +participant "Microsoft Graph API\n(graph.microsoft.com)" as GraphAPI +participant "Microsoft OneDrive" + +"Microsoft Windows OneDrive Client" -> AzureAD: Request Authorization\n(Client Credentials, Scopes) +AzureAD -> AuthServer: Validate Credentials\n(Forward Request) +AuthServer -> AzureAD: Provide Authorization Code +AzureAD -> "Microsoft Windows OneDrive Client": Provide Authorization Code (via AzureAD) + +"Microsoft Windows OneDrive Client" -> AzureAD: Request Access Token\n(Authorization Code, Client Credentials) +AzureAD -> AuthServer: Request Access Token\n(Authorization Code, Forwarded Credentials) +AuthServer -> AzureAD: Return Access Token\n(and Refresh Token) +AzureAD -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token) (via AzureAD) + +alt MFA Enabled + AzureAD -> UserDevice: Trigger MFA Challenge + UserDevice -> AzureAD: Provide MFA Verification + AzureAD -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token) (Post MFA) + "Microsoft Windows OneDrive Client" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token) + loop Token Expiry Check + "Microsoft Windows OneDrive Client" -> AzureAD: Is Access Token Expired? + AzureAD -> AuthServer: Validate Token Expiry + alt Token Expired + "Microsoft Windows OneDrive Client" -> AzureAD: Request New Access Token\n(Refresh Token) + AzureAD -> AuthServer: Request New Access Token\n(Refresh Token) + AuthServer -> AzureAD: Return New Access Token + AzureAD -> "Microsoft Windows OneDrive Client": Return New Access Token (via AzureAD) + else Token Valid + GraphAPI -> "Microsoft OneDrive": Retrieve Data + "Microsoft OneDrive" -> GraphAPI: Return Data + GraphAPI -> "Microsoft Windows OneDrive Client": Provide Data + end + end +else MFA Not Required + AzureAD -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token) (Direct) + "Microsoft Windows OneDrive Client" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token) + loop Token Expiry Check + "Microsoft Windows OneDrive Client" -> AzureAD: Is Access Token Expired? + AzureAD -> AuthServer: Validate Token Expiry + alt Token Expired + "Microsoft Windows OneDrive Client" -> AzureAD: Request New Access Token\n(Refresh Token) + AzureAD -> AuthServer: Request New Access Token\n(Refresh Token) + AuthServer -> AzureAD: Return New Access Token + AzureAD -> "Microsoft Windows OneDrive Client": Return New Access Token (via AzureAD) + else Token Valid + GraphAPI -> "Microsoft OneDrive": Retrieve Data + "Microsoft OneDrive" -> GraphAPI: Return Data + GraphAPI -> "Microsoft Windows OneDrive Client": Provide Data + end + end +else MFA Failed or Other Auth Error + AzureAD -> "Microsoft Windows OneDrive Client": Error Message (e.g., Invalid Credentials, MFA Failure) +end + +@enduml diff --git a/docs/puml/onedrive_windows_authentication.png b/docs/puml/onedrive_windows_authentication.png new file mode 100644 index 000000000..d87ba7490 Binary files /dev/null and b/docs/puml/onedrive_windows_authentication.png differ diff --git a/docs/puml/onedrive_windows_authentication.puml b/docs/puml/onedrive_windows_authentication.puml new file mode 100644 index 000000000..43a458a04 --- /dev/null +++ b/docs/puml/onedrive_windows_authentication.puml @@ -0,0 +1,47 @@ +@startuml +participant "Microsoft Windows OneDrive Client" +participant "Microsoft OneDrive\nAuthentication Service\n(login.microsoftonline.com)" as AuthServer +participant "User's Device (for MFA)" as UserDevice +participant "Microsoft Graph API\n(graph.microsoft.com)" as GraphAPI +participant "Microsoft OneDrive" + +"Microsoft Windows OneDrive Client" -> AuthServer: Request Authorization\n(Client Credentials, Scopes) +AuthServer -> "Microsoft Windows OneDrive Client": Provide Authorization Code + +"Microsoft Windows OneDrive Client" -> AuthServer: Request Access Token\n(Authorization Code, Client Credentials) + +alt MFA Enabled + AuthServer -> UserDevice: Trigger MFA Challenge + UserDevice -> AuthServer: Provide MFA Verification + AuthServer -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token) + "Microsoft Windows OneDrive Client" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token) + loop Token Expiry Check + "Microsoft Windows OneDrive Client" -> AuthServer: Is Access Token Expired? + alt Token Expired + "Microsoft Windows OneDrive Client" -> AuthServer: Request New Access Token\n(Refresh Token) + AuthServer -> "Microsoft Windows OneDrive Client": Return New Access Token + else Token Valid + GraphAPI -> "Microsoft OneDrive": Retrieve Data + "Microsoft OneDrive" -> GraphAPI: Return Data + GraphAPI -> "Microsoft Windows OneDrive Client": Provide Data + end + end +else MFA Not Required + AuthServer -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token) + "Microsoft Windows OneDrive Client" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token) + loop Token Expiry Check + "Microsoft Windows OneDrive Client" -> AuthServer: Is Access Token Expired? + alt Token Expired + "Microsoft Windows OneDrive Client" -> AuthServer: Request New Access Token\n(Refresh Token) + AuthServer -> "Microsoft Windows OneDrive Client": Return New Access Token + else Token Valid + GraphAPI -> "Microsoft OneDrive": Retrieve Data + "Microsoft OneDrive" -> GraphAPI: Return Data + GraphAPI -> "Microsoft Windows OneDrive Client": Provide Data + end + end +else MFA Failed or Other Auth Error + AuthServer -> "Microsoft Windows OneDrive Client": Error Message (e.g., Invalid Credentials, MFA Failure) +end + +@enduml \ No newline at end of file diff --git a/docs/puml/uploadFile.png b/docs/puml/uploadFile.png new file mode 100644 index 000000000..84f60d9c8 Binary files /dev/null and b/docs/puml/uploadFile.png differ diff --git a/docs/puml/uploadFile.puml b/docs/puml/uploadFile.puml new file mode 100644 index 000000000..b7c1218f3 --- /dev/null +++ b/docs/puml/uploadFile.puml @@ -0,0 +1,62 @@ +@startuml +start +partition "Upload File" { + :Log "fileToUpload"; + :Check database for parent path; + if (parent path found?) then (yes) + if (drive ID not empty?) then (yes) + :Proceed; + else (no) + :Use defaultDriveId; + endif + else (no) + stop + endif + :Check if file exists locally; + if (file exists?) then (yes) + :Read local file; + if (can read file?) then (yes) + if (parent path in DB?) then (yes) + :Get file size; + if (file size <= max?) then (yes) + :Check available space on OneDrive; + if (space available?) then (yes) + :Check if file exists on OneDrive; + if (file exists online?) then (yes) + :Save online metadata only; + if (if local file newer) then (yes) + :Local file is newer; + :Upload file as changed local file; + else (no) + :Remote file is newer; + :Perform safe backup; + note right: Local data loss prevention + :Upload renamed file as new file; + endif + else (no) + :Attempt upload; + endif + else (no) + :Log "Insufficient space"; + endif + else (no) + :Log "File too large"; + endif + else (no) + :Log "Parent path issue"; + endif + else (no) + :Log "Cannot read file"; + endif + else (no) + :Log "File disappeared locally"; + endif + :Upload success or failure; + if (upload failed?) then (yes) + :Log failure; + else (no) + :Update cache; + endif +} +stop +@enduml diff --git a/docs/puml/uploadModifiedFile.png b/docs/puml/uploadModifiedFile.png new file mode 100644 index 000000000..6b72220d8 Binary files /dev/null and b/docs/puml/uploadModifiedFile.png differ diff --git a/docs/puml/uploadModifiedFile.puml b/docs/puml/uploadModifiedFile.puml new file mode 100644 index 000000000..4e30b3683 --- /dev/null +++ b/docs/puml/uploadModifiedFile.puml @@ -0,0 +1,56 @@ +@startuml +start +partition "Upload Modified File" { + :Initialize API Instance; + :Check for Dry Run; + if (Is Dry Run?) then (yes) + :Create Fake Response; + else (no) + :Get Current Online Data; + if (Error Fetching Data) then (yes) + :Handle Errors; + if (Retryable Error?) then (yes) + :Retry Fetching Data; + detach + else (no) + :Log and Display Error; + endif + endif + if (filesize > 0 and valid latest online data) then (yes) + if (is online file newer) then (yes) + :Log that online is newer; + :Perform safe backup; + note left: Local data loss prevention + :Upload renamed local file as new file; + endif + endif + :Determine Upload Method; + if (Use Simple Upload?) then (yes) + :Perform Simple Upload; + if (Upload Error) then (yes) + :Handle Upload Errors and Retries; + if (Retryable Upload Error?) then (yes) + :Retry Upload; + detach + else (no) + :Log and Display Upload Error; + endif + endif + else (no) + :Create Upload Session; + :Perform Upload via Session; + if (Session Upload Error) then (yes) + :Handle Session Upload Errors and Retries; + if (Retryable Session Error?) then (yes) + :Retry Session Upload; + detach + else (no) + :Log and Display Session Error; + endif + endif + endif + endif + :Finalize; +} +stop +@enduml diff --git a/docs/SharePoint-Shared-Libraries.md b/docs/sharepoint-libraries.md similarity index 71% rename from docs/SharePoint-Shared-Libraries.md rename to docs/sharepoint-libraries.md index d1714d4ed..7cf199b7e 100644 --- a/docs/SharePoint-Shared-Libraries.md +++ b/docs/sharepoint-libraries.md @@ -1,21 +1,23 @@ # How to configure OneDrive SharePoint Shared Library sync -**WARNING:** Several users have reported files being overwritten causing data loss as a result of using this client with SharePoint Libraries when running as a systemd service. -When this has been investigated, the following has been noted as potential root causes: -* File indexing application such as Baloo File Indexer or Tracker3 constantly indexing your OneDrive data -* The use of WPS Office and how it 'saves' files by deleting the existing item and replaces it with the saved data - -Additionally there could be a yet unknown bug with the client, however all debugging and data provided previously shows that an 'external' process to the 'onedrive' application modifies the files triggering the undesirable upload to occur. - -**Possible Preventative Actions:** -* Disable all File Indexing for your SharePoint Library data. It is out of scope to detail on how you should do this. -* Disable using a systemd service for syncing your SharePoint Library data. -* Do not use WPS Office to edit your documents. Use OpenOffice or LibreOffice as these do not exhibit the same 'delete to save' action that WPS Office has. - -Additionally, please use caution when using this client with SharePoint. - -## Application Version -Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. +> [!CAUTION] +> Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +> [!CAUTION] +> Several users have reported files being overwritten causing data loss as a result of using this client with SharePoint Libraries when running as a systemd service. +> +> When this has been investigated, the following has been noted as potential root causes: +> * File indexing application such as Baloo File Indexer or Tracker3 constantly indexing your OneDrive data +> * The use of WPS Office and how it 'saves' files by deleting the existing item and replaces it with the saved data. Do not use WPS Office. +> +> Additionally there could be a yet unknown bug with the client, however all debugging and data provided previously shows that an 'external' process to the 'onedrive' application modifies the files triggering the undesirable upload to occur. +> +> **Possible Preventative Actions:** +> * Disable all File Indexing for your SharePoint Library data. It is out of scope to detail on how you should do this. +> * Disable using a systemd service for syncing your SharePoint Library data. +> * Do not use WPS Office to edit your documents. Use OpenOffice or LibreOffice as these do not exhibit the same 'delete to save' action that WPS Office has. +> +> Additionally has been 100% re-written from v2.5.0 onwards, thus the mechanism for saving data to SharePoint has been critically overhauled to simplify actions to negate the impacts where SharePoint will *modify* your file post upload, breaking file integrity as the file you have locally, is not the file that is stored online. Please read https://github.com/OneDrive/onedrive-api-docs/issues/935 for relevant details. ## Process Overview Syncing a OneDrive SharePoint library requires additional configuration for your 'onedrive' client: @@ -26,7 +28,8 @@ Syncing a OneDrive SharePoint library requires additional configuration for your 5. Test the configuration using '--dry-run' 6. Sync the SharePoint Library as required -**Note:** The `--get-O365-drive-id` process below requires a fully configured 'onedrive' configuration so that the applicable Drive ID for the given Office 365 SharePoint Shared Library can be determined. It is highly recommended that you do not use the application 'default' configuration directory for any SharePoint Site, and configure separate items for each site you wish to use. +> [!IMPORTANT] +> The `--get-sharepoint-drive-id` process below requires a fully configured 'onedrive' configuration so that the applicable Drive ID for the given SharePoint Shared Library can be determined. It is highly recommended that you do not use the application 'default' configuration directory for any SharePoint Site, and configure separate items for each site you wish to use. ## 1. Listing available OneDrive SharePoint Libraries Login to the OneDrive web interface and determine which shared library you wish to configure the client for: @@ -35,7 +38,7 @@ Login to the OneDrive web interface and determine which shared library you wish ## 2. Query OneDrive API to obtain required configuration details Run the following command using the 'onedrive' client to query the OneDrive API to obtain the required 'drive_id' of the SharePoint Library that you wish to sync: ```text -onedrive --get-O365-drive-id '' +onedrive --get-sharepoint-drive-id '' ``` This will return something similar to the following: ```text @@ -78,7 +81,8 @@ Create a new local folder to store the SharePoint Library data in: mkdir ~/SharePoint_My_Library_Name ``` -**Note:** Do not use spaces in the directory name, use '_' as a replacement +> [!TIP] +> Do not use spaces in the directory name, use '_' as a replacement ## 4. Configure SharePoint Library config file with the required 'drive_id' & 'sync_dir' options Download a copy of the default configuration file by downloading this file from GitHub and saving this file in the directory created above: @@ -97,7 +101,8 @@ drive_id = "insert the drive_id value from above here" ``` The OneDrive client will now be configured to sync this SharePoint shared library to your local system and the location you have configured. -**Note:** After changing `drive_id`, you must perform a full re-synchronization by adding `--resync` to your existing command line. +> [!IMPORTANT] +> After changing `drive_id`, you must perform a full re-synchronization by adding `--resync` to your existing command line. ## 5. Validate and Test the configuration Validate your new configuration using the `--display-config` option to validate you have configured the application correctly: @@ -110,7 +115,8 @@ Test your new configuration using the `--dry-run` option to validate the applica onedrive --confdir="~/.config/SharePoint_My_Library_Name" --synchronize --verbose --dry-run ``` -**Note:** As this is a *new* configuration, the application will be required to be re-authorised the first time this command is run with the new configuration. +> [!IMPORTANT] +> As this is a *new* configuration, the application will be required to be re-authorised the first time this command is run with the new configuration. ## 6. Sync the SharePoint Library as required Sync the SharePoint Library to your system with either `--synchronize` or `--monitor` operations: @@ -122,7 +128,8 @@ onedrive --confdir="~/.config/SharePoint_My_Library_Name" --synchronize --verbos onedrive --confdir="~/.config/SharePoint_My_Library_Name" --monitor --verbose ``` -**Note:** As this is a *new* configuration, the application will be required to be re-authorised the first time this command is run with the new configuration. +> [!IMPORTANT] +> As this is a *new* configuration, the application will be required to be re-authorised the first time this command is run with the new configuration. ## 7. Enable custom systemd service for SharePoint Library Systemd can be used to automatically run this configuration in the background, however, a unique systemd service will need to be setup for this SharePoint Library instance @@ -163,10 +170,11 @@ Example: ExecStart=/usr/local/bin/onedrive --monitor --confdir="/home/myusername/.config/SharePoint_My_Library_Name" ``` -**Note:** When running the client manually, `--confdir="~/.config/......` is acceptable. In a systemd configuration file, the full path must be used. The `~` must be expanded. +> [!IMPORTANT] +> When running the client manually, `--confdir="~/.config/......` is acceptable. In a systemd configuration file, the full path must be used. The `~` must be manually expanded when editing your systemd file. ### Step 3: Enable the new systemd service -Once the file is correctly editied, you can enable the new systemd service using the following commands. +Once the file is correctly edited, you can enable the new systemd service using the following commands. #### Red Hat Enterprise Linux, CentOS Linux ```text diff --git a/docs/terms-of-service.md b/docs/terms-of-service.md index cdf7c4328..ad2cccc34 100644 --- a/docs/terms-of-service.md +++ b/docs/terms-of-service.md @@ -38,7 +38,7 @@ OneDrive Client for Linux is not responsible for the Microsoft OneDrive Service To the fullest extent permitted by law, we shall not be liable for any direct, indirect, incidental, special, consequential, or punitive damages, or any loss of profits or revenues, whether incurred directly or indirectly, or any loss of data, use, goodwill, or other intangible losses, resulting from (a) your use or inability to use the Service, or (b) any other matter relating to the Service. -This limitiation of liability explicitly relates to the use of the OneDrive Client for Linux software and does not affect your rights under the GPLv3. +This limitation of liability explicitly relates to the use of the OneDrive Client for Linux software and does not affect your rights under the GPLv3. ## 7. Changes to Terms diff --git a/docs/ubuntu-package-install.md b/docs/ubuntu-package-install.md index 292a822cb..d530305fe 100644 --- a/docs/ubuntu-package-install.md +++ b/docs/ubuntu-package-install.md @@ -1,24 +1,30 @@ # Installation of 'onedrive' package on Debian and Ubuntu -This document covers the appropriate steps to install the 'onedrive' client using the provided packages for Debian and Ubuntu. - -#### Important information for all Ubuntu and Ubuntu based distribution users: -This information is specifically for the following platforms and distributions: - -* Lubuntu -* Linux Mint -* POP OS -* Peppermint OS -* Raspbian -* Ubuntu - -Whilst there are [onedrive](https://packages.ubuntu.com/search?keywords=onedrive&searchon=names&suite=all§ion=all) Universe packages available for Ubuntu, do not install 'onedrive' from these Universe packages. The default Ubuntu Universe packages are out-of-date and are not supported and should not be used. +This document outlines the steps for installing the 'onedrive' client on Debian, Ubuntu, and their derivatives using the OpenSuSE Build Service Packages. + +> [!CAUTION] +> This information is specifically for the following platforms and distributions: +> * Debian +> * Deepin +> * Elementary OS +> * Kali Linux +> * Lubuntu +> * Linux Mint +> * Pop!_OS +> * Peppermint OS +> * Raspbian | Raspberry Pi OS +> * Ubuntu | Kubuntu | Xubuntu | Ubuntu Mate +> * Zorin OS +> +> Although packages for the 'onedrive' client are available through distribution repositories, it is strongly advised against installing them. These distribution-provided packages are outdated, unsupported, and contain bugs and issues that have already been resolved in newer versions. They should not be used. ## Determine which instructions to use -Ubuntu and its clones are based on various different releases, thus, you must use the correct instructions below, otherwise you may run into package dependancy issues and will be unable to install the client. +Ubuntu and its clones are based on various different releases, thus, you must use the correct instructions below, otherwise you may run into package dependency issues and will be unable to install the client. ### Step 1: Remove any configured PPA and associated 'onedrive' package and systemd service files -Many Internet 'help' pages provide inconsistent details on how to install the OneDrive Client for Linux. A number of these websites continue to point users to install the client via the yann1ck PPA repository however this PPA no longer exists and should not be used. + +#### Step 1a: Remove PPA if configured +Many Internet 'help' pages provide inconsistent details on how to install the OneDrive Client for Linux. A number of these websites continue to point users to install the client via the yann1ck PPA repository however this PPA no longer exists and should not be used. If you have previously configured, or attempted to add this PPA, this needs to be removed. To remove the PPA repository and the older client, perform the following actions: ```text @@ -26,10 +32,22 @@ sudo apt remove onedrive sudo add-apt-repository --remove ppa:yann1ck/onedrive ``` -Additionally, Ubuntu and its clones have a bad habit of creating a 'default' systemd service file when installing the 'onedrive' package so that the client will automatically run the client post being authenticated. This systemd entry is erroneous and needs to be removed. +#### Step 1b: Remove errant systemd service file installed by PPA or distribution package + +Additionally, the distributon packages have a bad habit of creating a 'default' systemd service file when installing the 'onedrive' package so that the client will automatically run the client post being authenticated: ``` Created symlink /etc/systemd/user/default.target.wants/onedrive.service → /usr/lib/systemd/user/onedrive.service. ``` +This systemd entry is erroneous and needs to be removed. Without removing this erroneous systemd link, this increases your risk of getting the following error message: +``` +Opening the item database ... + +ERROR: onedrive application is already running - check system process list for active application instances + - Use 'sudo ps aufxw | grep onedrive' to potentially determine active running process + +Waiting for all internal threads to complete before exiting application +``` + To remove this symbolic link, run the following command: ``` sudo rm /etc/systemd/user/default.target.wants/onedrive.service @@ -141,6 +159,7 @@ If required, review the table below based on your 'lsb_release' information to p | Debian 10 | You must build from source or upgrade your Operating System to Debian 12 | | Debian 11 | Use [Debian 11](#distribution-debian-11) instructions below | | Debian 12 | Use [Debian 12](#distribution-debian-12) instructions below | +| Debian Sid | Refer to https://packages.debian.org/sid/onedrive for assistance | | Raspbian GNU/Linux 10 | You must build from source or upgrade your Operating System to Raspbian GNU/Linux 12 | | Raspbian GNU/Linux 11 | Use [Debian 11](#distribution-debian-11) instructions below | | Raspbian GNU/Linux 12 | Use [Debian 12](#distribution-debian-12) instructions below | @@ -152,6 +171,13 @@ If required, review the table below based on your 'lsb_release' information to p | Ubuntu 22.10 / Kinetic | Use [Ubuntu 22.10](#distribution-ubuntu-2210) instructions below | | Ubuntu 23.04 / Lunar | Use [Ubuntu 23.04](#distribution-ubuntu-2304) instructions below | | Ubuntu 23.10 / Mantic | Use [Ubuntu 23.10](#distribution-ubuntu-2310) instructions below | +| Ubuntu 24.04 / Noble | Use [Ubuntu 24.04](#distribution-ubuntu-2404) instructions below | + +> [!IMPORTANT] +> If your Linux distribution and release is not in the table above, you have 2 options: +> +> 1. Compile the application from source. Refer to install.md (Compilation & Installation) for assistance. +> 2. Raise a support case with your Linux Distribution to provide you with an applicable package you can use. ## Distribution Package Install Instructions @@ -398,6 +424,32 @@ Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` #### Step 5: Read 'Known Issues' with these packages Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. +### Distribution: Ubuntu 24.04 +The packages support the following platform architectures: +|  i686  | x86_64 | ARMHF | AARCH64 | +|:----:|:------:|:-----:|:-------:| +|❌|✔|❌|✔| + +#### Step 1: Add the OpenSuSE Build Service repository release key +Add the OpenSuSE Build Service repository release key using the following command: +```text +wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_24.04/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null +``` + +#### Step 2: Add the OpenSuSE Build Service repository +Add the OpenSuSE Build Service repository using the following command: +```text +echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_24.04/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list +``` + +#### Step 3: Update your apt package cache +Run: `sudo apt-get update` + +#### Step 4: Install 'onedrive' +Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` + +#### Step 5: Read 'Known Issues' with these packages +Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. ## Known Issues with Installing from the above packages diff --git a/docs/usage.md b/docs/usage.md new file mode 100644 index 000000000..3fd95e93e --- /dev/null +++ b/docs/usage.md @@ -0,0 +1,1027 @@ +# Using the OneDrive Client for Linux +## Application Version +Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +## Table of Contents + +- [Important Notes](#important-notes) + - [Upgrading from the 'skilion' Client](#upgrading-from-the-sklion-client) + - [Guidelines for Local File and Folder Naming in the Synchronisation Directory](#guidelines-for-local-file-and-folder-naming-in-the-synchronisation-directory) + - [Compatibility with curl](#compatibility-with-curl) +- [First Steps](#first-steps) + - [Authorise the Application with Your Microsoft OneDrive Account](#authorise-the-application-with-your-microsoft-onedrive-account) + - [Display Your Applicable Runtime Configuration](#display-your-applicable-runtime-configuration) + - [Understanding OneDrive Client for Linux Operational Modes](#understanding-onedrive-client-for-linux-operational-modes) + - [Standalone Synchronisation Operational Mode (Standalone Mode)](#standalone-synchronisation-operational-mode-standalone-mode) + - [Ongoing Synchronisation Operational Mode (Monitor Mode)](#ongoing-synchronisation-operational-mode-monitor-mode) + - [Increasing application logging level](#increasing-application-logging-level) + - [Using 'Client Side Filtering' rules to determine what should be synced with Microsoft OneDrive](#using-client-side-filtering-rules-to-determine-what-should-be-synced-with-microsoft-onedrive) + - [Testing your configuration](#testing-your-configuration) + - [Performing a sync with Microsoft OneDrive](#performing-a-sync-with-microsoft-onedrive) + - [Performing a single directory synchronisation with Microsoft OneDrive](#performing-a-single-directory-synchronisation-with-microsoft-onedrive) + - [Performing a 'one-way' download synchronisation with Microsoft OneDrive](#performing-a-one-way-download-synchronisation-with-microsoft-onedrive) + - [Performing a 'one-way' upload synchronisation with Microsoft OneDrive](#performing-a-one-way-upload-synchronisation-with-microsoft-onedrive) + - [Performing a selective synchronisation via 'sync_list' file](#performing-a-selective-synchronisation-via-sync_list-file) + - [Performing a --resync](#performing-a---resync) + - [Performing a --force-sync without a --resync or changing your configuration](#performing-a---force-sync-without-a---resync-or-changing-your-configuration) + - [Enabling the Client Activity Log](#enabling-the-client-activity-log) + - [Client Activity Log Example:](#client-activity-log-example) + - [Client Activity Log Differences](#client-activity-log-differences) + - [GUI Notifications](#gui-notifications) + - [Handling a Microsoft OneDrive Account Password Change](#handling-a-microsoft-onedrive-account-password-change) + - [Determining the synchronisation result](#determining-the-synchronisation-result) +- [Frequently Asked Configuration Questions](#frequently-asked-configuration-questions) + - [How to change the default configuration of the client?](#how-to-change-the-default-configuration-of-the-client) + - [How to change where my data from Microsoft OneDrive is stored?](#how-to-change-where-my-data-from-microsoft-onedrive-is-stored) + - [How to change what file and directory permissions are assigned to data that is downloaded from Microsoft OneDrive?](#how-to-change-what-file-and-directory-permissions-are-assigned-to-data-that-is-downloaded-from-microsoft-onedrive) + - [How are uploads and downloads managed?](#how-are-uploads-and-downloads-managed) + - [How to only sync a specific directory?](#how-to-only-sync-a-specific-directory) + - [How to 'skip' files from syncing?](#how-to-skip-files-from-syncing) + - [How to 'skip' directories from syncing?](#how-to-skip-directories-from-syncing) + - [How to 'skip' .files and .folders from syncing?](#how-to-skip-files-and-folders-from-syncing) + - [How to 'skip' files larger than a certain size from syncing?](#how-to-skip-files-larger-than-a-certain-size-from-syncing) + - [How to 'rate limit' the application to control bandwidth consumed for upload & download operations?](#how-to-rate-limit-the-application-to-control-bandwidth-consumed-for-upload--download-operations) + - [How can I prevent my local disk from filling up?](#how-can-i-prevent-my-local-disk-from-filling-up) + - [How does the client handle symbolic links?](#how-does-the-client-handle-symbolic-links) + - [How to synchronise OneDrive Personal Shared Folders?](#how-to-synchronise-onedrive-personal-shared-folders) + - [How to synchronise OneDrive Business Shared Items (Files and Folders)?](#how-to-synchronise-onedrive-business-shared-items-files-and-folders) + - [How to synchronise SharePoint / Office 365 Shared Libraries?](#how-to-synchronise-sharepoint--office-365-shared-libraries) + - [How to Create a Shareable Link?](#how-to-create-a-shareable-link) + - [How to Synchronise Both Personal and Business Accounts at once?](#how-to-synchronise-both-personal-and-business-accounts-at-once) + - [How to Synchronise Multiple SharePoint Libraries simultaneously?](#how-to-synchronise-multiple-sharepoint-libraries-simultaneously) + - [How to Receive Real-time Changes from Microsoft OneDrive Service, instead of waiting for the next sync period?](#how-to-receive-real-time-changes-from-microsoft-onedrive-service-instead-of-waiting-for-the-next-sync-period) + - [How to initiate the client as a background service?](#how-to-initiate-the-client-as-a-background-service) + - [OneDrive service running as root user via init.d](#onedrive-service-running-as-root-user-via-initd) + - [OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-root-user-via-systemd-arch-ubuntu-debian-opensuse-fedora) + - [OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux)](#onedrive-service-running-as-root-user-via-systemd-red-hat-enterprise-linux-centos-linux) + - [OneDrive service running as a non-root user via systemd (All Linux Distributions)](#onedrive-service-running-as-a-non-root-user-via-systemd-all-linux-distributions) + - [OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-a-non-root-user-via-systemd-with-notifications-enabled-arch-ubuntu-debian-opensuse-fedora) + - [OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void)](#onedrive-service-running-as-a-non-root-user-via-runit-antix-devuan-artix-void) + - [How to start a user systemd service at boot without user login?](#how-to-start-a-user-systemd-service-at-boot-without-user-login) + +## Important Notes +### Upgrading from the 'skilion' Client +The 'skilion' version has a significant number of issues in how it manages the local sync state. When upgrading from the 'skilion' client to this client, it's recommended to stop any service or OneDrive process that may be running. Once all OneDrive services are stopped, make sure to remove any old client binaries from your system. + +Furthermore, if you're using a 'config' file within your configuration directory (`~/.config/onedrive/`), please ensure that you update the `skip_file = ` option as shown below: + +**Invalid 'skilion' configuration:** +```text +skip_file = ".*|~*" +``` +**Minimum valid configuration:** +```text +skip_file = "~*" +``` +**Default valid configuration:** +```text +skip_file = "~*|.~*|*.tmp|*.swp|*.partial" +``` + +Avoid using a 'skip_file' entry of `.*` as it may prevent the correct detection of local changes to process. The configuration values for 'skip_file' will be checked for validity, and if there is an issue, the following error message will be displayed: +```text +ERROR: Invalid skip_file entry '.*' detected +``` + +### Guidelines for Local File and Folder Naming in the Synchronisation Directory + +To ensure seamless synchronisation with Microsoft OneDrive, it's critical to adhere strictly to the prescribed naming conventions for your files and folders within the sync directory. The guidelines detailed below are designed to preempt potential sync failures by aligning with Microsoft Windows Naming Conventions, coupled with specific OneDrive restrictions. + +> [!WARNING] +> Failure to comply will result in synchronisation being bypassed for the offending files or folders, necessitating a rename of the local item to establish sync compatibility. + +#### Key Restrictions and Limitations +* Invalid Characters: + * Avoid using the following characters in names of files and folders: `" * : < > ? / \ |` + * Names should not start or end with spaces + * Names should not end with a fullstop / period character `.` +* Prohibited Names: + * Certain names are reserved and cannot be used for files or folders: `.lock`, `CON`, `PRN`, `AUX`, `NUL`, `COM0 - COM9`, `LPT0 - LPT9`, `desktop.ini`, any filename starting with `~$` + * The text sequence `_vti_` cannot appear anywhere in a file or directory name + * A file and folder called `forms` is unsupported at the root level of a synchronisation directory +* Path Length + * All files and folders stored in your 'sync_dir' (typically `~/OneDrive`) must not have a path length greater than: + * 400 characters for OneDrive Business & SharePoint + * 430 characters for OneDrive Personal + +Should a file or folder infringe upon these naming conventions or restrictions, synchronisation will skip the item, indicating an invalid name according to Microsoft Naming Convention. The only remedy is to rename the offending item. This constraint is by design and remains firm. + +> [!TIP] +> UTF-16 provides a capability to use alternative characters to work around the restrictions and limitations imposed by Microsoft OneDrive. An example of some replacement characters are below: +> | Standard Invalid Character | Potential UTF-16 Replacement Character | +> |--------------------|------------------------------| +> | . | ․ (One Dot Leader, `\u2024`) | +> | : | ː (Modifier Letter Triangular Colon, `\u02D0`) | +> | \| | │ (Box Drawings Light Vertical, `\u2502`) | + +> [!CAUTION] +> The last critically important point is that Microsoft OneDrive does not adhere to POSIX standards, which fundamentally impacts naming conventions. In Unix environments (which are POSIX compliant), files and folders can exist simultaneously with identical names if their capitalisation differs. **This is not possible on Microsoft OneDrive.** If such a scenario occurs, the OneDrive Client for Linux will encounter a conflict, preventing the synchronisation of the conflicting file or folder. This constraint is a conscious design choice and is immutable. To avoid synchronisation issues, preemptive renaming of any conflicting local files or folders is advised. + +#### Further reading: +The above guidelines are essential for maintaining synchronisation integrity with Microsoft OneDrive. Adhering to them ensures your files and folders sync without issue. For additional details, consult the following resources: +* [Microsoft Windows Naming Conventions](https://docs.microsoft.com/windows/win32/fileio/naming-a-file) +* [Restrictions and limitations in OneDrive and SharePoint](https://support.microsoft.com/en-us/office/restrictions-and-limitations-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa) + +**Adherence to these guidelines is not optional but mandatory to avoid sync disruptions.** + +### Compatibility with curl +If your system uses curl < 7.47.0, curl will default to HTTP/1.1 for HTTPS operations, and the client will follow suit, using HTTP/1.1. + +For systems running curl >= 7.47.0 and < 7.62.0, curl will prefer HTTP/2 for HTTPS, but it will still use HTTP/1.1 as the default for these operations. The client will employ HTTP/1.1 for HTTPS operations as well. + +However, if your system employs curl >= 7.62.0, curl will, by default, prioritise HTTP/2 over HTTP/1.1. In this case, the client will utilise HTTP/2 for most HTTPS operations and stick with HTTP/1.1 for others. Please note that this distinction is governed by the OneDrive platform, not our client. + +If you explicitly want to use HTTP/1.1, you can do so by using the `--force-http-11` flag or setting the configuration option `force_http_11 = "true"`. This will compel the application to exclusively use HTTP/1.1. Otherwise, all client operations will align with the curl default settings for your distribution. + +## First Steps +### Authorise the Application with Your Microsoft OneDrive Account +Once you've installed the application, you'll need to authorise it using your Microsoft OneDrive Account. This can be done by simply running the application without any additional command switches. + +Please be aware that some companies may require you to explicitly add this app to the [Microsoft MyApps portal](https://myapps.microsoft.com/). To add an approved app to your apps, click on the ellipsis in the top-right corner and select "Request new apps." On the next page, you can add this app. If it's not listed, you should make a request through your IT department. + +When you run the application for the first time, you'll be prompted to open a specific URL using your web browser, where you'll need to log in to your Microsoft Account and grant the application permission to access your files. After granting permission to the application, you'll be redirected to a blank page. Simply copy the URI from the blank page and paste it into the application. + +**Example:** +```text +[user@hostname ~]$ onedrive +Authorise this app by visiting: + +https://login.microsoftonline.com/common/oauth2/v2.0/authorise?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient + +Enter the response URI from your browser: https://login.microsoftonline.com/common/oauth2/nativeclient?code= + +The application has been successfully authorised, but no additional command switches were provided. + +Please use 'onedrive --help' for further assistance on how to run this application. +``` + +> [!IMPORTANT] +> Without additional input or configuration, the OneDrive Client for Linux will automatically adhere to default application settings during synchronisation processes with Microsoft OneDrive. + +### Display Your Applicable Runtime Configuration +To verify the configuration that the application will use, use the following command: +```text +onedrive --display-config +``` +This command will display all the relevant runtime interpretations of the options and configurations you are using. An example output is as follows: +```text +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +onedrive version = vX.Y.Z-A-bcdefghi +Config path = /home/user/.config/onedrive +Config file found in config path = true +Config option 'drive_id' = +Config option 'sync_dir' = ~/OneDrive +... +Config option 'webhook_enabled' = false +``` + +> [!IMPORTANT] +> When using multiple OneDrive accounts, it's essential to always use the `--confdir` command followed by the appropriate configuration directory. This ensures that the specific configuration you intend to view is correctly displayed. + +### Understanding OneDrive Client for Linux Operational Modes +There are two modes of operation when using the client: +1. Standalone sync mode that performs a single sync action against Microsoft OneDrive. +2. Ongoing sync mode that continuously syncs your data with Microsoft OneDrive. + +> [!IMPORTANT] +> The default setting for the OneDrive Client on Linux will sync all data from your Microsoft OneDrive account to your local device. To avoid this and select specific items for synchronisation, you should explore setting up 'Client Side Filtering' rules. This will help you manage and specify what exactly gets synced with your Microsoft OneDrive account. + +#### Standalone Synchronisation Operational Mode (Standalone Mode) +This method of use can be employed by issuing the following option to the client: +```text +onedrive --sync +``` +For simplicity, this can be shortened to the following: +```text +onedrive -s +``` + +#### Ongoing Synchronisation Operational Mode (Monitor Mode) +This method of use can be utilised by issuing the following option to the client: +```text +onedrive --monitor +``` +For simplicity, this can be shortened to the following: +```text +onedrive -m +``` +> [!NOTE] +> This method of use is used when enabling a systemd service to run the application in the background. + +Two common errors can occur when using monitor mode: +* Initialisation failure +* Unable to add a new inotify watch + +Both of these errors are local environment issues, where the following system variables need to be increased as the current system values are potentially too low: +* `fs.file-max` +* `fs.inotify.max_user_watches` + +To determine what the existing values are on your system, use the following commands: +```text +sysctl fs.file-max +sysctl fs.inotify.max_user_watches +``` +Alternatively, when running the client with increased verbosity (see below), the client will display what the current configured system maximum values are: +```text +... +All application operations will be performed in: /home/user/OneDrive +OneDrive synchronisation interval (seconds): 300 +Maximum allowed open files: 393370 <-- This is the current operating system fs.file-max value +Maximum allowed inotify watches: 29374 <-- This is the current operating system fs.inotify.max_user_watches value +Initialising filesystem inotify monitoring ... +... +``` +To determine what value to change to, you need to count all the files and folders in your configured 'sync_dir': +```text +cd /path/to/your/sync/dir +ls -laR | wc -l +``` + +To make a change to these variables using your file and folder count, use the following process: +```text +sudo sysctl fs.file-max= +sudo sysctl fs.inotify.max_user_watches= +``` +Once these values are changed, you will need to restart your client so that the new values are detected and used. + +To make these changes permanent on your system, refer to your OS reference documentation. + +### Increasing application logging level +When running a sync (`--sync`) or using monitor mode (`--monitor`), it may be desirable to see additional information regarding the progress and operation of the client. For example, for a `--sync` command, this would be: +```text +onedrive --sync --verbose +``` +Furthermore, for simplicity, this can be simplified to the following: +``` +onedrive -s -v +``` + +> [!IMPORTANT] +> Adding `--verbose` twice will enable debug logging output. This is generally required when raising a bug report or needing to understand a problem. + +### Using 'Client Side Filtering' rules to determine what should be synced with Microsoft OneDrive +Client Side Filtering in the context of the OneDrive Client for Linux refers to user-configured rules that determine what files and directories the client should upload or download from Microsoft OneDrive. These rules are crucial for optimising synchronisation, especially when dealing with large numbers of files or specific file types. The OneDrive Client for Linux offers several configuration options to facilitate this: + +* **skip_dir:** This option allows the user to specify directories that should not be synchronised with OneDrive. It's particularly useful for omitting large or irrelevant directories from the sync process. + +* **skip_dotfiles:** Dotfiles, usually configuration files or scripts, can be excluded from the sync. This is useful for users who prefer to keep these files local. + +* **skip_file:** Specific files can be excluded from synchronisation using this option. It provides flexibility in selecting which files are essential for cloud storage. + +* **skip_symlinks:** Symlinks often point to files outside the OneDrive directory or to locations that are not relevant for cloud storage. This option prevents them from being included in the sync. + +Additionally, the OneDrive Client for Linux allows the implementation of Client Side Filtering rules through a 'sync_list' file. This file explicitly states which directories or files should be included in the synchronisation. By default, any item not listed in the 'sync_list' file is excluded. This method offers a more granular approach to synchronisation, ensuring that only the necessary data is transferred to and from Microsoft OneDrive. + +These configurable options and the 'sync_list' file provide users with the flexibility to tailor the synchronisation process to their specific needs, conserving bandwidth and storage space while ensuring that important files are always backed up and accessible. + +> [!IMPORTANT] +> After changing any Client Side Filtering rule, you must perform a full re-synchronisation. + +### Testing your configuration +You can test your configuration by utilising the `--dry-run` CLI option. No files will be downloaded, uploaded, or removed; however, the application will display what 'would' have occurred. For example: +```text +onedrive --sync --verbose --dry-run +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +Using 'user' Config Dir: /home/user/.config/onedrive +DRY-RUN Configured. Output below shows what 'would' have occurred. +DRY-RUN: Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations +DRY RUN: Not creating backup config file as --dry-run has been used +DRY RUN: Not updating hash files as --dry-run has been used +Checking Application Version ... +Attempting to initialise the OneDrive API ... +Configuring Global Azure AD Endpoints +The OneDrive API was initialised successfully +Opening the item database ... +Sync Engine Initialised with new Onedrive API instance +Application version: vX.Y.Z-A-bcdefghi +Account Type: +Default Drive ID: +Default Root ID: +Remaining Free Space: 1058488129 KB +All application operations will be performed in: /home/user/OneDrive +Fetching items from the OneDrive API for Drive ID: .. +... +Performing a database consistency and integrity check on locally stored data ... +Processing DB entries for this Drive ID: +Processing ~/OneDrive +The directory has not changed +... +Scanning local filesystem '~/OneDrive' for new data to upload ... +... +Performing a final true-up scan of online data from Microsoft OneDrive +Fetching items from the OneDrive API for Drive ID: .. + +Sync with Microsoft OneDrive is complete +``` + +### Performing a sync with Microsoft OneDrive +By default, all files are downloaded in `~/OneDrive`. This download location is controlled by the 'sync_dir' config option. + +After authorising the application, a sync of your data can be performed by running: +```text +onedrive --sync +``` +This will synchronise files from your Microsoft OneDrive account to your `~/OneDrive` local directory or to your specified 'sync_dir' location. + +> [!TIP] +> If you prefer to use your local files as stored in `~/OneDrive` as your 'source of truth,' use the following sync command: +> ```text +> onedrive --sync --local-first +> ``` + +### Performing a single directory synchronisation with Microsoft OneDrive +In some cases, it may be desirable to synchronise a single directory under ~/OneDrive without having to change your client configuration. To do this, use the following command: +```text +onedrive --sync --single-directory '' +``` + +> [!TIP] +> If the full path is `~/OneDrive/mydir`, the command would be `onedrive --sync --single-directory 'mydir'` + +### Performing a 'one-way' download synchronisation with Microsoft OneDrive +In some cases, it may be desirable to 'download only' from Microsoft OneDrive. To do this, use the following command: +```text +onedrive --sync --download-only +``` +This will download all the content from Microsoft OneDrive to your `~/OneDrive` location. Any files that are deleted online remain locally and will not be removed. + +However, in some circumstances, it may be desirable to clean up local files that have been removed online. To do this, use the following command: + +```text +onedrive --sync --download-only --cleanup-local-files +``` + +### Performing a 'one-way' upload synchronisation with Microsoft OneDrive +In certain scenarios, you might need to perform an 'upload only' operation to Microsoft OneDrive. This means that you'll be uploading data to OneDrive, but not synchronising any changes or additions made elsewhere. Use this command to initiate an upload-only synchronisation: + +```text +onedrive --sync --upload-only +``` + +> [!IMPORTANT] +> - The 'upload only' mode operates independently of OneDrive's online content. It doesn't check or sync with what's already stored on OneDrive. It only uploads data from the local client. +> - If a local file or folder that was previously synchronised with Microsoft OneDrive is now missing locally, it will be deleted from OneDrive during this operation. + +> [!TIP] +> If you have the requirement to ensure that all data on Microsoft OneDrive remains intact (e.g., preventing deletion of items on OneDrive if they're deleted locally), use this command instead: +> ```text +> onedrive --sync --upload-only --no-remote-delete +> ``` + +> [!IMPORTANT] +> - `--upload-only`: This command will only upload local changes to OneDrive. These changes can include additions, modifications, moves, and deletions of files and folders. +> - `--no-remote-delete`: Adding this command prevents the deletion of any items on OneDrive, even if they're deleted locally. This creates a one-way archive on OneDrive where files are only added and never removed. + +### Performing a selective synchronisation via 'sync_list' file +Selective synchronisation allows you to sync only specific files and directories. +To enable selective synchronisation, create a file named `sync_list` in your application configuration directory (default is `~/.config/onedrive`). + +> [!IMPORTANT] +> Important points to understand before using 'sync_list'. +> * 'sync_list' excludes _everything_ by default on OneDrive. +> * 'sync_list' follows an _"exclude overrides include"_ rule, and requires **explicit inclusion**. +> * Order exclusions before inclusions, so that anything _specifically included_ is included. +> * How and where you place your `/` matters for excludes and includes in subdirectories. + +Each line of the 'sync_list' file represents a relative path from your `sync_dir`. All files and directories not matching any line of the file will be skipped during all operations. + +Additionally, the use of `/` is critically important to determine how a rule is interpreted. It is very similar to `**` wildcards, for those that are familiar with globbing patterns. +Here is an example of `sync_list`: +```text +# sync_list supports comments +# +# The ordering of entries is highly recommended - exclusions before inclusions +# +# Exclude temp folder(s) or file(s) under Documents folder(s), anywhere in OneDrive +!Documents/temp* +# +# Exclude secret data folder in root directory only +!/Secret_data/* +# +# Include everything else in root directory +/* +# +# Include my Backup folder(s) or file(s) anywhere on OneDrive +Backup +# +# Include my Backup folder in root +/Backup/ +# +# Include Documents folder(s) anywhere in OneDrive +Documents/ +# +# Include all PDF files in Documents folder(s), anywhere in OneDrive +Documents/*.pdf +# +# Include this single document in Documents folder(s), anywhere in OneDrive +Documents/latest_report.docx +# +# Include all Work/Project directories or files, inside 'Work' folder(s), anywhere in OneDrive +Work/Project* +# +# Include the 'Blog' directory, but exclude 'Parent' and any other children of the parent +# . +# ├── Parent +# │   ├── Blog +# │   │   ├── random_files +# │   │   │   ├── CZ9aZRM7U1j7pM21fH0MfP2gywlX7bqW +# │   │   │   └── k4GptfTBE2z2meRFqjf54tnvSXcXe30Y +# │   │   └── random_images +# │   │   ├── cAuQMfX7qsMIOmzyQYdELikZwsXeCYsL +# │   │   └── GqjZuo7UBB0qjYM2WUcZXOvToAhCQ29M +# │   └── other_stuffs +/Parent/Blog/* +# +# Include all "notes.txt" files, anywhere in OneDrive +notes.txt +# +# Include /Blender in the ~OneDrive root but not if elsewhere in OneDrive +/Blender +# +# Include these directories(or files) in 'Pictures' folder(s), that have a space in their name +Pictures/Camera Roll +Pictures/Saved Pictures +# +# Include these names if they match any file or folder +Cinema Soc +Codes +Textbooks +Year 2 +``` +The following are supported for pattern matching and exclusion rules: +* Use the `*` to wildcard select any characters to match for the item to be included +* Use either `!` or `-` characters at the start of the line to exclude an otherwise included item + +> [!IMPORTANT] +> After changing the sync_list, you must perform a full re-synchronisation by adding `--resync` to your existing command line - for example: `onedrive --sync --resync` + +> [!TIP] +> When enabling the use of 'sync_list,' utilise the `--display-config` option to validate that your configuration will be used by the application, and test your configuration by adding `--dry-run` to ensure the client will operate as per your requirement. + +> [!TIP] +> In some circumstances, it may be required to sync all the individual files within the 'sync_dir', but due to frequent name change / addition / deletion of these files, it is not desirable to constantly change the 'sync_list' file to include / exclude these files and force a resync. To assist with this, enable the following in your configuration file: +> ```text +> sync_root_files = "true" +> ``` +> This will tell the application to sync any file that it finds in your 'sync_dir' root by default, negating the need to constantly update your 'sync_list' file. + +### Performing a --resync +If you alter any of the subsequent configuration items, you will be required to execute a `--resync` to make sure your client is syncing your data with the updated configuration: +* drive_id +* sync_dir +* skip_file +* skip_dir +* skip_dotfiles +* skip_symlinks +* sync_business_shared_items +* Creating, Modifying or Deleting the 'sync_list' file + +Additionally, you might opt for a `--resync` if you think it's necessary to ensure your data remains in sync. If you're using this switch simply because you're unsure of the sync status, you can check the actual sync status using `--display-sync-status`. + +When you use `--resync`, you'll encounter the following warning and advice: +```text +Using --resync will delete your local 'onedrive' client state, so there won't be a record of your current 'sync status.' +This may potentially overwrite local versions of files with older versions downloaded from OneDrive, leading to local data loss. +If in doubt, back up your local data before using --resync. + +Are you sure you want to proceed with --resync? [Y/N] +``` + +To proceed with `--resync`, you must type 'y' or 'Y' to allow the application to continue. + +> [!CAUTION] +> It's highly recommended to use `--resync` only if the application prompts you to do so. Don't blindly set the application to start with `--resync` as your default option. + +> [!IMPORTANT] +> In certain automated environments (assuming you know what you're doing due to automation), to avoid the 'proceed with acknowledgement' requirement, add `--resync-auth` to automatically acknowledge the prompt. + +### Performing a --force-sync without a --resync or changing your configuration +In some cases and situations, you may have configured the application to skip certain files and folders using 'skip_file' and 'skip_dir' configuration. You then may have a requirement to actually sync one of these items, but do not wish to modify your configuration, nor perform an entire `--resync` twice. + +The `--force-sync` option allows you to sync a specific directory, ignoring your 'skip_file' and 'skip_dir' configuration and negating the requirement to perform a `--resync`. + +To use this option, you must run the application manually in the following manner: +```text +onedrive --sync --single-directory '' --force-sync +``` + +When using `--force-sync`, you'll encounter the following warning and advice: +```text +WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --sync --single-directory --force-sync being used + +Using --force-sync will reconfigure the application to use defaults. This may have unknown future impacts. +By proceeding with this option, you accept any impacts, including potential data loss resulting from using --force-sync. + +Are you sure you want to proceed with --force-sync [Y/N] +``` + +To proceed with `--force-sync`, you must type 'y' or 'Y' to allow the application to continue. + +### Enabling the Client Activity Log +When running onedrive, all actions can be logged to a separate log file. This can be enabled by using the `--enable-logging` flag or by adding `enable_logging = "true"` to your 'config' file. + +By default, log files will be written to `/var/log/onedrive/` and will be in the format of `%username%.onedrive.log`, where `%username%` represents the user who ran the client to allow easy sorting of user to client activity log. + +> [!NOTE] +> You will need to ensure the existence of this directory and that your user has the applicable permissions to write to this directory; otherwise, the following error message will be printed: +> ```text +> ERROR: Unable to access /var/log/onedrive +> ERROR: Please manually create '/var/log/onedrive' and set appropriate permissions to allow write access +> ERROR: The requested client activity log will instead be located in your user's home directory +> ``` + +On many systems, ensuring that the log directory exists can be achieved by performing the following: +```text +sudo mkdir /var/log/onedrive +sudo chown root:users /var/log/onedrive +sudo chmod 0775 /var/log/onedrive +``` + +Additionally, you need to ensure that your user account is part of the 'users' group: +``` +cat /etc/group | grep users +``` + +If your user is not part of this group, then you need to add your user to this group: +``` +sudo usermod -a -G users +``` + +If you need to make a group modification, you will need to 'logout' of all sessions / SSH sessions to log in again to have the new group access applied. + +If the client is unable to write the client activity log, the following error message will be printed: +```text +ERROR: Unable to write the activity log to /var/log/onedrive/%username%.onedrive.log +ERROR: Please set appropriate permissions to allow write access to the logging directory for your user account +ERROR: The requested client activity log will instead be located in your user's home directory +``` + +If you receive this error message, you will need to diagnose why your system cannot write to the specified file location. + +#### Client Activity Log Example: +An example of a client activity log for the command `onedrive --sync --enable-logging` is below: +```text +2023-Sep-27 08:16:00.1128806 Configuring Global Azure AD Endpoints +2023-Sep-27 08:16:00.1160620 Sync Engine Initialised with new Onedrive API instance +2023-Sep-27 08:16:00.5227122 All application operations will be performed in: /home/user/OneDrive +2023-Sep-27 08:16:00.5227977 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:16:00.7780979 Processing changes and items received from Microsoft OneDrive ... +2023-Sep-27 08:16:00.7781548 Performing a database consistency and integrity check on locally stored data ... +2023-Sep-27 08:16:00.7785889 Scanning the local file system '~/OneDrive' for new data to upload ... +2023-Sep-27 08:16:00.7813710 Performing a final true-up scan of online data from Microsoft OneDrive +2023-Sep-27 08:16:00.7814668 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:16:01.0141776 Processing changes and items received from Microsoft OneDrive ... +2023-Sep-27 08:16:01.0142454 Sync with Microsoft OneDrive is complete +``` +An example of a client activity log for the command `onedrive --sync --verbose --enable-logging` is below: +```text +2023-Sep-27 08:20:05.4600464 Checking Application Version ... +2023-Sep-27 08:20:05.5235017 Attempting to initialise the OneDrive API ... +2023-Sep-27 08:20:05.5237207 Configuring Global Azure AD Endpoints +2023-Sep-27 08:20:05.5238087 The OneDrive API was initialised successfully +2023-Sep-27 08:20:05.5238536 Opening the item database ... +2023-Sep-27 08:20:05.5270612 Sync Engine Initialised with new Onedrive API instance +2023-Sep-27 08:20:05.9226535 Application version: vX.Y.Z-A-bcdefghi +2023-Sep-27 08:20:05.9227079 Account Type: +2023-Sep-27 08:20:05.9227360 Default Drive ID: +2023-Sep-27 08:20:05.9227550 Default Root ID: +2023-Sep-27 08:20:05.9227862 Remaining Free Space: +2023-Sep-27 08:20:05.9228296 All application operations will be performed in: /home/user/OneDrive +2023-Sep-27 08:20:05.9228989 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:20:06.2076569 Performing a database consistency and integrity check on locally stored data ... +2023-Sep-27 08:20:06.2077121 Processing DB entries for this Drive ID: +2023-Sep-27 08:20:06.2078408 Processing ~/OneDrive +2023-Sep-27 08:20:06.2078739 The directory has not changed +2023-Sep-27 08:20:06.2079783 Processing Attachments +2023-Sep-27 08:20:06.2080071 The directory has not changed +2023-Sep-27 08:20:06.2081585 Processing Attachments/file.docx +2023-Sep-27 08:20:06.2082079 The file has not changed +2023-Sep-27 08:20:06.2082760 Processing Documents +2023-Sep-27 08:20:06.2083225 The directory has not changed +2023-Sep-27 08:20:06.2084284 Processing Documents/file.log +2023-Sep-27 08:20:06.2084886 The file has not changed +2023-Sep-27 08:20:06.2085150 Scanning the local file system '~/OneDrive' for new data to upload ... +2023-Sep-27 08:20:06.2087133 Skipping item - excluded by sync_list config: ./random_25k_files +2023-Sep-27 08:20:06.2116235 Performing a final true-up scan of online data from Microsoft OneDrive +2023-Sep-27 08:20:06.2117190 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:20:06.5049743 Sync with Microsoft OneDrive is complete +``` + +#### Client Activity Log Differences +Despite application logging being enabled as early as possible, the following log entries will be missing from the client activity log when compared to console output: + +**No user configuration file:** +```text +No user or system config file found, using application defaults +Using 'user' configuration path for application state data: /home/user/.config/onedrive +Using the following path to store the runtime application log: /var/log/onedrive +``` +**User configuration file:** +```text +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +Using 'user' configuration path for application state data: /home/user/.config/onedrive +Using the following path to store the runtime application log: /var/log/onedrive +``` + +### GUI Notifications +If notification support has been compiled in (refer to [GUI Notification Support](install.md#gui-notification-support)), the following events will trigger a GUI notification within the display manager session: +* Aborting a sync if .nosync file is found +* Skipping a particular item due to an invalid name +* Skipping a particular item due to an invalid symbolic link +* Skipping a particular item due to an invalid UTF sequence +* Skipping a particular item due to an invalid character encoding sequence +* Cannot create remote directory +* Cannot upload file changes (free space issue, breaches maximum allowed size, breaches maximum OneDrive Account path length) +* Cannot delete remote file / folder +* Cannot move remote file / folder +* When a re-authentication is required +* When a new client version is available +* Files that fail to upload +* Files that fail to download + +### Handling a Microsoft OneDrive Account Password Change +If you change your Microsoft OneDrive Account Password, the client will no longer be authorised to sync, and will generate the following error upon next application run: +```text +AADSTS50173: The provided grant has expired due to it being revoked, a fresh auth token is needed. The user might have changed or reset their password. The grant was issued on '' and the TokensValidFrom date (before which tokens are not valid) for this user is ''. + +ERROR: You will need to issue a --reauth and re-authorise this client to obtain a fresh auth token. +``` + +To re-authorise the client, follow the steps below: +1. If running the client as a system service (init.d or systemd), stop the applicable system service +2. Run the command `onedrive --reauth`. This will clean up the previous authorisation, and will prompt you to re-authorise the client as per initial configuration. Please note, if you are using `--confdir` as part of your application runtime configuration, you must include this when telling the client to re-authenticate. +3. Restart the client if running as a system service or perform the standalone sync operation again + +The application will now sync with OneDrive with the new credentials. + +### Determining the synchronisation result +When the client has finished syncing without errors, the following will be displayed: +``` +Sync with Microsoft OneDrive is complete +``` + +If any items failed to sync, the following will be displayed: +``` +Sync with Microsoft OneDrive has completed, however there are items that failed to sync. +``` +A file list of failed upload or download items will also be listed to allow you to determine your next steps. + +In order to fix the upload or download failures, you may need to: +* Review the application output to determine what happened +* Re-try your command utilising a resync to ensure your system is correctly synced with your Microsoft OneDrive Account + +## Frequently Asked Configuration Questions + +### How to change the default configuration of the client? +Configuration is determined by three layers, and applied in the following order: +* Application default values +* Values that are set in the configuration file +* Values that are passed in via the command line at application runtime. These values will override any configuration file set value. + +The default application values provide a reasonable operational default, and additional configuration is entirely optional. + +If you want to change the application defaults, you can download a copy of the config file into your application configuration directory. Valid default directories for the config file are: +* `~/.config/onedrive` +* `/etc/onedrive` + +> [!TIP] +> To download a copy of the config file, use the following: +> ```text +> mkdir -p ~/.config/onedrive +> wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/onedrive/config +> ``` + +For full configuration options and CLI switches, please refer to [application-config-options.md](application-config-options.md) + +### How to change where my data from Microsoft OneDrive is stored? +By default, the location where your Microsoft OneDrive data is stored, is within your Home Directory under a directory called 'OneDrive'. This replicates as close as possible where the Microsoft Windows OneDrive client stores data. + +To change this location, the application configuration option 'sync_dir' is used to specify a new local directory where your Microsoft OneDrive data should be stored. + +> [!IMPORTANT] +> Please be aware that if you designate a network mount point (such as NFS, Windows Network Share, or Samba Network Share) as your `sync_dir`, this setup inherently lacks 'inotify' support. Support for 'inotify' is essential for real-time tracking of file changes, which means that the client's 'Monitor Mode' cannot immediately detect changes in files located on these network shares. Instead, synchronisation between your local filesystem and Microsoft OneDrive will occur at intervals specified by the `monitor_interval` setting. This limitation regarding 'inotify' support on network mount points like NFS or Samba is beyond the control of this client. + +### How to change what file and directory permissions are assigned to data that is downloaded from Microsoft OneDrive? +The following are the application default permissions for any new directory or file that is created locally when downloaded from Microsoft OneDrive: +* Directories: 700 - This provides the following permissions: `drwx------` +* Files: 600 - This provides the following permissions: `-rw-------` + +These default permissions align to the security principal of 'least privilege' so that only you should have access to your data that you download from Microsoft OneDrive. + +To alter these default permissions, you can adjust the values of two configuration options as follows. You can also use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. +```text +sync_dir_permissions = "700" +sync_file_permissions = "600" +``` + +> [!IMPORTANT] +> Please note that special permission bits such as setuid, setgid, and the sticky bit are not supported. Valid permission values range from `000` to `777` only. + +### How are uploads and downloads managed? +The system manages downloads and uploads using a multi-threaded approach. Specifically, the application utilises 16 threads for these processes. This thread count is preset and cannot be modified by users. This design ensures efficient handling of data transfers but does not allow for customisation of thread allocation. + +### How to only sync a specific directory? +There are two methods to achieve this: +* Employ the '--single-directory' option to only sync this specific path +* Employ 'sync_list' as part of your 'config' file to configure what files and directories to sync, and what should be excluded + +### How to 'skip' files from syncing? +There are two methods to achieve this: +* Employ 'skip_file' as part of your 'config' file to configure what files to skip +* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded + +### How to 'skip' directories from syncing? +There are three methods available to 'skip' a directory from the sync process: +* Employ 'skip_dir' as part of your 'config' file to configure what directories to skip +* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded +* Employ 'check_nosync' as part of your 'config' file and a '.nosync' empty file within the directory to exclude to skip that directory + +### How to 'skip' .files and .folders from syncing? +There are three methods to achieve this: +* Employ 'skip_file' or 'skip_dir' to configure what files or folders to skip +* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded +* Employ 'skip_dotfiles' as part of your 'config' file to skip any dot file (for example: `.Trash-1000` or `.xdg-volume-info`) from syncing to OneDrive + +### How to 'skip' files larger than a certain size from syncing? +Use `skip_size = "value"` as part of your 'config' file where files larger than this size (in MB) will be skipped. + +### How to 'rate limit' the application to control bandwidth consumed for upload & download operations? +To optimise Internet bandwidth usage during upload and download processes, include the 'rate_limit' setting in your configuration file. This setting controls the bandwidth allocated to each thread. + +By default, 'rate_limit' is set to '0', indicating that the application will utilise the maximum available bandwidth across all threads. + +To check the current 'rate_limit' value, use the `--display-config` command. + +> [!NOTE] +> Since downloads and uploads are processed through multiple threads, the 'rate_limit' value applies to each thread separately. For instance, setting 'rate_limit' to 1048576 (1MB) means that during data transfers, the total bandwidth consumption might reach around 16MB, not just the 1MB configured due to the number of threads being used. + +### How can I prevent my local disk from filling up? +By default, the application will reserve 50MB of disk space to prevent your filesystem from running out of disk space. + +This default value can be modified by adding the 'space_reservation' configuration option and the applicable value as part of your 'config' file. + +You can review the value being used when using `--display-config`. + +### How does the client handle symbolic links? +Microsoft OneDrive has no concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories. + +As such, there are only two methods to support symbolic links with this client: +1. Follow the Linux symbolic link and upload whatever the local symbolic link is pointing to to Microsoft OneDrive. This is the default behaviour. +2. Skip symbolic links by configuring the application to do so. When skipping, no data, no link, no reference is uploaded to OneDrive. + +Use 'skip_symlinks' as part of your 'config' file to configure the skipping of all symbolic links while syncing. + +### How to synchronise OneDrive Personal Shared Folders? +Folders shared with you can be synchronised by adding them to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the folder you want to synchronise, and then click on "Add to my OneDrive". + +### How to synchronise OneDrive Business Shared Items (Files and Folders)? +Folders shared with you can be synchronised by adding them to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the folder you want to synchronise, and then click on "Add to my OneDrive". + +Files shared with you can be synchronised using two methods: +1. Add a link to the file +2. Sync the actual file locally + +Refer to [business-shared-items.md](business-shared-items.md) for further details. + +### How to synchronise SharePoint / Office 365 Shared Libraries? +There are two methods to achieve this: +* SharePoint library can be directly added to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the SharePoint Library you want to synchronise, and then click on "Add to my OneDrive". +* Configure a separate application instance to only synchronise that specific SharePoint Library. Refer to [sharepoint-libraries.md](sharepoint-libraries.md) for configuration assistance. + +### How to Create a Shareable Link? +In certain situations, you might want to generate a shareable file link and provide this link to other users for accessing a specific file. + +To accomplish this, employ the following command: +```text +onedrive --create-share-link +``` +> [!IMPORTANT] +> By default, this access permissions for the file link will be read-only. + +To make it a read-write link, execute the following command: +```text +onedrive --create-share-link --with-editing-perms +``` +> [!IMPORTANT] +> The order of the file path and option flag is crucial. + +### How to Synchronise Both Personal and Business Accounts at once? +You need to set up separate instances of the application configuration for each account. + +Refer to [advanced-usage.md](advanced-usage.md) for guidance on configuration. + +### How to Synchronise Multiple SharePoint Libraries simultaneously? +For each SharePoint Library, configure a separate instance of the application configuration. + +Refer to [advanced-usage.md](advanced-usage.md) for configuration instructions. + +### How to Receive Real-time Changes from Microsoft OneDrive Service, instead of waiting for the next sync period? +When operating in 'Monitor Mode,' it may be advantageous to receive real-time updates to online data. A 'webhook' is the method to achieve this, so that when in 'Monitor Mode,' the client subscribes to remote updates. + +Remote changes can then be promptly synchronised to your local file system, without waiting for the next synchronisation cycle. + +This is accomplished by: +* Using 'webhook_enabled' as part of your 'config' file to enable this feature +* Using 'webhook_public_url' as part of your 'config' file to configure the URL the webhook will use for subscription updates + +### How to initiate the client as a background service? +There are a few ways to employ onedrive as a service: +* via init.d +* via systemd +* via runit + +#### OneDrive service running as root user via init.d +```text +chkconfig onedrive on +service onedrive start +``` +To view the logs, execute: +```text +tail -f /var/log/onedrive/.onedrive.log +``` +To alter the 'user' under which the client operates (typically root by default), manually modify the init.d service file and adjust `daemon --user root onedrive_service.sh` to match the correct user. + +#### OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora) +Initially, switch to the root user with `su - root`, then activate the systemd service: +```text +systemctl --user enable onedrive +systemctl --user start onedrive +``` + +> [!IMPORTANT] +> This will execute the 'onedrive' process with a UID/GID of '0', which means any files or folders created will be owned by 'root'. + +> [!IMPORTANT] +> The `systemctl --user` command is not applicable to Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms - see below. + +To monitor the service's status, use the following: +```text +systemctl --user status onedrive.service +``` + +To observe the systemd application logs, use: +```text +journalctl --user-unit=onedrive -f +``` + +> [!TIP] +> For systemd to function correctly, it requires the presence of XDG environment variables. If you encounter the following error while enabling the systemd service: +> ```text +> Failed to connect to bus: No such file or directory +> ``` +> The most likely cause is missing XDG environment variables. To resolve this, add the following lines to `.bashrc` or another file executed upon user login: +> ```text +> export XDG_RUNTIME_DIR="/run/user/$UID" +> export DBUS_SESSION_BUS_ADDRESS="unix:path=${XDG_RUNTIME_DIR}/bus" +> ``` +> +> To apply this change, you must log out of all user accounts where it has been made. + +> [!IMPORTANT] +> On certain systems (e.g., Raspbian / Ubuntu / Debian on Raspberry Pi), the XDG fix above may not persist after system reboots. An alternative to starting the client via systemd as root is as follows: +> 1. Create a symbolic link from `/home/root/.config/onedrive` to `/root/.config/onedrive/`. +> 2. Establish a systemd service using the '@' service file: `systemctl enable onedrive@root.service`. +> 3. Start the root@service: `systemctl start onedrive@root.service`. +> +> This ensures that the service correctly restarts upon system reboot. + +To examine the systemd application logs, run: +```text +journalctl --unit=onedrive@ -f +``` + +#### OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux) +```text +systemctl enable onedrive +systemctl start onedrive +``` +> [!IMPORTANT] +> This will execute the 'onedrive' process with a UID/GID of '0', meaning any files or folders created will be owned by 'root'. + +To view the systemd application logs, execute: +```text +journalctl --unit=onedrive -f +``` + +#### OneDrive service running as a non-root user via systemd (All Linux Distributions) +In some instances, it is preferable to run the OneDrive client as a service without the 'root' user. Follow the instructions below to configure the service for your regular user login. + +1. As the user who will run the service, launch the application in standalone mode, authorize it for use, and verify that synchronization is functioning as expected: +```text +onedrive --sync --verbose +``` +2. After validating the application for your user, switch to the 'root' user, where is your username from step 1 above. +```text +systemctl enable onedrive@.service +systemctl start onedrive@.service +``` +3. To check the service's status for the user, use the following: +```text +systemctl status onedrive@.service +``` + +To observe the systemd application logs, use: +```text +journalctl --unit=onedrive@ -f +``` + +#### OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora) +In some scenarios, you may want to receive GUI notifications when using the client as a non-root user. In this case, follow these steps: + +1. Log in via the graphical UI as the user you want to enable the service for. +2. Disable any `onedrive@` service files for your username, e.g.: +```text +sudo systemctl stop onedrive@alex.service +sudo systemctl disable onedrive@alex.service +``` +3. Enable the service as follows: +```text +systemctl --user enable onedrive +systemctl --user start onedrive +``` + +To check the service's status for the user, use the following: +```text +systemctl --user status onedrive.service +``` + +To view the systemd application logs, execute: +```text +journalctl --user-unit=onedrive -f +``` + +> [!IMPORTANT] +> The `systemctl --user` command is not applicable to Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms. + +#### OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void) + +1. Create the following folder if it doesn't already exist: `/etc/sv/runsvdir-` + + - where `` is the `USER` targeted for the service + - e.g., `# mkdir /etc/sv/runsvdir-nolan` + +2. Create a file called `run` under the previously created folder with executable permissions + + - `# touch /etc/sv/runsvdir-/run` + - `# chmod 0755 /etc/sv/runsvdir-/run` + +3. Edit the `run` file with the following contents (permissions needed): + + ```sh + #!/bin/sh + export USER="" + export HOME="/home/" + + groups="$(id -Gn "${USER}" | tr ' ' ':')" + svdir="${HOME}/service" + + exec chpst -u "${USER}:${groups}" runsvdir "${svdir}" + ``` + + - Ensure you replace `` with the `USER` set in step #1. + +4. Enable the previously created folder as a service + + - `# ln -fs /etc/sv/runsvdir- /var/service/` + +5. Create a subfolder in the `USER`'s `HOME` directory to store the services (or symlinks) + + - `$ mkdir ~/service` + +6. Create a subfolder specifically for OneDrive + + - `$ mkdir ~/service/onedrive/` + +7. Create a file called `run` under the previously created folder with executable permissions + + - `$ touch ~/service/onedrive/run` + - `$ chmod 0755 ~/service/onedrive/run` + +8. Append the following contents to the `run` file + + ```sh + #!/usr/bin/env sh + exec /usr/bin/onedrive --monitor + ``` + + - In some scenarios, the path to the `onedrive` binary may vary. You can obtain it by running `$ command -v onedrive`. + +9. Reboot to apply the changes + +10. Check the status of user-defined services + + - `$ sv status ~/service/*` + +> [!NOTE] +> For additional details, you can refer to Void's documentation on [Per-User Services](https://docs.voidlinux.org/config/services/user-services.html) + +### How to start a user systemd service at boot without user login? +In some situations, it may be necessary for the systemd service to start without requiring your 'user' to log in. + +To address this issue, you need to reconfigure your 'user' account so that the systemd services you've created launch without the need for you to log in to your system: +```text +loginctl enable-linger +``` \ No newline at end of file diff --git a/onedrive.1.in b/onedrive.1.in index 5caacb0d6..fb950dd6c 100644 --- a/onedrive.1.in +++ b/onedrive.1.in @@ -1,391 +1,364 @@ .TH ONEDRIVE "1" "@PACKAGE_DATE@" "@PACKAGE_VERSION@" "User Commands" .SH NAME -onedrive \- folder synchronization with OneDrive +onedrive \- A client for the Microsoft OneDrive Cloud Service .SH SYNOPSIS .B onedrive -[\fI\,OPTION\/\fR] \-\-synchronize +[\fI\,OPTION\/\fR] --sync .br .B onedrive -[\fI\,OPTION\/\fR] \-\-monitor +[\fI\,OPTION\/\fR] --monitor .br .B onedrive -[\fI\,OPTION\/\fR] \-\-display-config +[\fI\,OPTION\/\fR] --display-config .br .B onedrive -[\fI\,OPTION\/\fR] \-\-display-sync-status +[\fI\,OPTION\/\fR] --display-sync-status +.br +.B onedrive +[\fI\,OPTION\/\fR] -h | --help +.br +.B onedrive +--version .SH DESCRIPTION -A complete tool to interact with OneDrive on Linux. -.SH OPTIONS -Without any option given, no sync is done and the program exits. -.TP -\fB\-\-auth\-files\fP ARG -Perform authorization via two files passed in as \fBARG\fP in the format \fBauthUrl:responseUrl\fP. -The authorization URL is written to the \fBauthUrl\fP, then \fBonedrive\fP waits for -the file \fBresponseUrl\fP to be present, and reads the response from that file. -.TP -\fB\-\-auth\-response\fP ARG -Perform authentication not via interactive dialog but via providing the response url directly. -.TP -\fB\-\-check\-for\-nomount\fP -Check for the presence of .nosync in the syncdir root. If found, do not perform sync. +This is a free Microsoft OneDrive Client designed to work with OneDrive Personal, OneDrive for Business, Office365 OneDrive, and SharePoint Libraries. It's fully compatible with most major Linux distributions and FreeBSD, and can be containerised using Docker or Podman. The client offers secure one-way and two-way synchronisation capabilities, making it easy to connect to Microsoft OneDrive services across various platforms. + +.SH FEATURES .br -Configuration file key: \fBcheck_nomount\fP (default: \fBfalse\fP) -.TP -\fB\-\-check\-for\-nosync\fP -Check for the presence of .nosync in each directory. If found, skip directory from sync. +* Compatible with OneDrive Personal, OneDrive for Business including accessing Microsoft SharePoint Libraries .br -Configuration file key: \fBcheck_nosync\fP (default: \fBfalse\fP) -.TP -\fB\-\-classify\-as\-big\-delete\fP -Number of children in a path that is locally removed which will be classified as a 'big data delete' +* Provides rules for client-side filtering to select data for syncing with Microsoft OneDrive accounts .br -Configuration file key: \fBclassify_as_big_delete\fP (default: \fB1000\fP) -.TP -\fB\-\-cleanup\-local\-files\fP -Cleanup additional local files when using \-\-download-only. This will remove local data. +* Caches sync state for efficiency .br -Configuration file key: \fBcleanup_local_files\fP (default: \fBfalse\fP) -.TP -\fB\-\-confdir\fP ARG -Set the directory used to store the configuration files -.TP -\fB\-\-create\-directory\fP ARG -Create a directory on OneDrive \- no sync will be performed. -.TP -\fB\-\-create\-share\-link\fP ARG -Create a shareable link for an existing file on OneDrive -.TP -\fB\-\-debug\-https\fP -Debug OneDrive HTTPS communication. +* Supports a dry-run option for safe configuration testing .br -Configuration file key: \fBdebug_https\fP (default: \fBfalse\fP) -.TP -\fB\-\-destination\-directory\fP ARG -Destination directory for renamed or move on OneDrive \- no sync will be performed. -.TP -\fB\-\-disable\-download\-validation\fP -Disable download validation when downloading from OneDrive +* Validates file transfers to ensure data integrity .br -Configuration file key: \fBdisable_download_validation\fP (default: \fBfalse\fP) -.TP -\fB\-\-disable\-notifications\fP -Do not use desktop notifications in monitor mode +* Monitors local files in real-time using inotify .br -Configuration file key: \fBdisable_notifications\fP (default: \fBfalse\fP) -.TP -\fB\-\-disable\-upload\-validation\fP -Disable upload validation when uploading to OneDrive +* Supports interrupted uploads for completion at a later time .br -Configuration file key: \fBdisable_upload_validation\fP (default: \fBfalse\fP) -.TP -\fB\-\-display\-config\fP -Display what options the client will use as currently configured \- no sync will be performed. -.TP -\fB\-\-display\-running\-config\fP -Display what options the client has been configured to use on application startup. -.TP -\fB\-\-display\-sync\-status\fP -Display the sync status of the client \- no sync will be performed. -.TP -\fB\-\-download\-only\fP -Replicate the OneDrive online state locally, by only downloading changes from OneDrive. Do not upload local changes to OneDrive. +* Capability to sync remote updates immediately via webhooks .br -Configuration file key: \fBdownload_only\fP (default: \fBfalse\fP) -.TP -\fB\-\-dry\-run\fP -Perform a trial sync with no changes made. Can ONLY be used with --synchronize. Will be ignored for --monitor +* Enhanced synchronisation speed with multi-threaded file transfers .br -Configuration file key: \fBdry_run\fP (default: \fBfalse\fP) -.TP -\fB\-\-enable\-logging\fP -Enable client activity to a separate log file +* Manages traffic bandwidth use with rate limiting .br -Configuration file key: \fBenable_logging\fP (default: \fBfalse\fP) -.TP -\fB\-\-force\fP -Force the deletion of data when a 'big delete' is detected -.TP -\fB\-\-force\-http\-11\fP -Force the use of HTTP 1.1 for all operations +* Supports seamless access to shared folders and files across both OneDrive Personal and OneDrive for Business accounts .br -Configuration file key: \fBforce_http_11\fP (default: \fBfalse\fP) -.TP -\fB\-\-force\-sync\fP -Force a synchronization of a specific folder, only when using --synchronize --single-directory and ignore +* Supports national cloud deployments including Microsoft Cloud for US Government, Microsoft Cloud Germany, and Azure and Office 365 operated by VNET in China +.br +* Supports sending desktop alerts using libnotify .br -all non-default skip_dir and skip_file rules +* Protects against significant data loss on OneDrive after configuration changes +.br +* Works with both single and multi-tenant applications + +.SH CONFIGURATION +By default, the client will use a sensible set of default values to interact with the Microsoft OneDrive service. .TP -\fB\-\-get\-O365\-drive\-id\fP ARG -Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library +Should you wish to change these defaults, you should copy the default config file into your home directory before making any applicable changes: + +.nf +\fB +mkdir\ \-p\ ~/.config/onedrive +cp\ @DOCDIR@/config\ ~/.config/onedrive/config +\fP +.fi + .TP -\fB\-\-get\-file\-link\fP ARG -Display the file link of a synced file +Please refer to the online documentation file application-config-options.md for details on all configuration file options. + +.SH CLIENT SIDE FILTERING +Client Side Filtering in the context of the OneDrive Client for Linux refers to user-configured rules that determine what files and directories the client should upload or download from Microsoft OneDrive. These rules are crucial for optimising synchronisation, especially when dealing with large numbers of files or specific file types. The OneDrive Client for Linux offers several configuration options to facilitate this: .TP -\fB\-\-list\-shared\-folders\fP -List OneDrive Business Shared Folders +.B skip_dir +Specifies directories that should not be synchronised with OneDrive. Useful for omitting large or irrelevant directories from the sync process. .TP -\fB\-\-local\-first\fP -Synchronize from the local directory source first, before downloading changes from OneDrive. -.br -Configuration file key: \fBlocal_first\fP (default: \fBfalse\fP) +.B skip_dotfiles +Excludes dotfiles, usually configuration files or scripts, from the sync. Ideal for users who prefer to keep these files local. .TP -\fB\-\-logout\fP -Logout the current user +.B skip_file +Allows specifying specific files to exclude from synchronisation. Offers flexibility in selecting essential files for cloud storage. .TP -\fB\-\-log\-dir\fP ARG -defines the directory where logging output is saved to, needs to end with a slash -.br -Configuration file key: \fBlog_dir\fP (default: \fB/var/log/onedrive/\fP) +.B skip_symlinks +Prevents symlinks, which often point to files outside the OneDrive directory or to irrelevant locations, from being included in the sync. +.PP +Additionally, the OneDrive Client for Linux allows the implementation of Client Side Filtering rules through a 'sync_list' file. This file explicitly states which directories or files should be included in the synchronisation. By default, any item not listed in the 'sync_list' file is excluded. This approach offers granular control over synchronisation, ensuring that only necessary data is transferred to and from Microsoft OneDrive. +.PP +These configurable options and the 'sync_list' file provide users with the flexibility to tailor the synchronisation process to their specific needs, conserving bandwidth and storage space while ensuring that important files are always backed up and accessible. .TP -\fB\-\-min\-notify\-changes\fP -the minimum number of pending incoming changes necessary to trigger -a desktop notification -.br -Configuration file key: \fBmin_notify_changes\fP (default: \fB5\fP) +.B NOTE: +After changing any Client Side Filtering rule, a full re-synchronisation must be performed using --resync + +.SH FIRST RUN +Once you've installed the application, you'll need to authorise it using your Microsoft OneDrive Account. This can be done by simply running the application without any additional command switches. .TP -\fB\-m \-\-modified\-by\fP ARG -Display the last modified by details of a given path +Please be aware that some companies may require you to explicitly add this app to the Microsoft MyApps portal. To add an approved app to your apps, click on the ellipsis in the top-right corner and select "Request new apps." On the next page, you can add this app. If it's not listed, you should make a request through your IT department. .TP -\fB\-m \-\-monitor\fP -Keep monitoring for local and remote changes +When you run the application for the first time, you'll be prompted to open a specific URL using your web browser, where you'll need to log in to your Microsoft Account and grant the application permission to access your files. After granting permission to the application, you'll be redirected to a blank page. Simply copy the URI from the blank page and paste it into the application. .TP -\fB\-\-monitor\-interval\fP ARG -The number of seconds by which each sync operation is undertaken when -idle under monitor mode -.br -Configuration file key: \fBmonitor_interval\fP (default: \fB300\fP) +This process authenticates your application with your account information, and it is now ready to use to sync your data between your local system and Microsoft OneDrive. + +.SH GUI NOTIFICATIONS +If the client has been compiled with support for notifications, the client will send notifications about client activity via libnotify to the GUI via DBus when the client is being run in --monitor mode. + +.SH APPLICATION LOGGING +When running onedrive all actions can be logged to a separate log file. This can be enabled by using the \fB--enable-logging\fP flag. By default, log files will be written to \fB/var/log/onedrive\fP. All logfiles will be in the format of \fB%username%.onedrive.log\fP, where \fB%username%\fP represents the user who ran the client. + +.SH ALL CLI OPTIONS +The options below allow you to control the behavior of the onedrive client from the CLI. Without any specific option, if the client is already authenticated, the client will exit without any further action. + .TP -\fB\-\-monitor\-fullscan-frequency\fP ARG -Number of sync runs before performing a full local scan of the synced directory -.br -Configuration file key: \fBmonitor_fullscan_frequency\fP (default: \fB10\fP) +\fB\-\-sync\fR +Do a one-time synchronisation with OneDrive. + .TP -\fB\-\-monitor\-log\-frequency\fP ARG -Frequency of logging in monitor mode -.br -Configuration file key: \fBmonitor_log_frequency\fP (default: \fB5\fP) +\fB\-\-monitor\fR +Monitor filesystem for changes and sync regularly. + .TP -\fB\-\-no\-remote\-delete\fP -Do not delete local file 'deletes' from OneDrive when using \fB\-\-upload\-only\fR -.br -Configuration file key: \fBno_remote_delete\fP (default: \fBfalse\fP) +\fB\-\-display-config\fR +Display the currently used configuration for the onedrive client. + .TP -\fB\-\-operation\-timeout\fP ARG -Set the maximum amount of time (seconds) a file operation is allowed to take. This includes DNS resolution, connecting, data transfer, etc. +\fB\-\-display-sync-status\fR +Query OneDrive service and report on pending changes. + +.TP +\fB\-\-auth-files\fR \fIARG\fR +Perform authentication not via interactive dialog but via files that are read/written when using this option. The two files are passed in as \fBARG\fP in the format \fBauthUrl:responseUrl\fP. +The authorisation URL is written to the \fBauthUrl\fP file, then \fBonedrive\fP waits for the file \fBresponseUrl\fP to be present, and reads the response from that file. .br -Configuration file key: \fBoperation_timeout\fP (default: \fB3600\fP) +Always specify the full path when using this option, otherwise the application will default to using the default configuration path for these files (~/.config/onedrive/) + .TP -\fB\-\-print\-token\fP -Print the access token, useful for debugging +\fB\-\-auth-response\fR \fIARG\fR +Perform authentication not via interactive dialog but via providing the response URL directly. + .TP -\fB\-\-reauth\fP -Reauthenticate the client with OneDrive +\fB\-\-check-for-nomount\fR +Check for the presence of .nosync in the syncdir root. If found, do not perform sync. + .TP -\fB\-\-remove\-directory\fP ARG -Remove a directory on OneDrive \- no sync will be performed. +\fB\-\-check-for-nosync\fR +Check for the presence of .nosync in each directory. If found, skip directory from sync. + .TP -\fB\-\-remove\-source\-files\fP -Remove source file after successful transfer to OneDrive when using \-\-upload-only -.br -Configuration file key: \fBremove_source_files\fP (default: \fBfalse\fP) +\fB\-\-classify-as-big-delete\fR \fIARG\fR +Number of children in a path that is locally removed which will be classified as a 'big data delete'. + .TP -\fB\-\-resync\fP -Forget the last saved state, perform a full sync +\fB\-\-cleanup-local-files\fR +Cleanup additional local files when using --download-only. This will remove local data. + .TP -\fB\-\-resync\-auth\fP -Approve the use of performing a --resync action without needing CLI authorization +\fB\-\-confdir\fR \fIARG\fR +Set the directory used to store the configuration files. + .TP -\fB\-\-single\-directory\fP ARG -Specify a single local directory within the OneDrive root to sync. +\fB\-\-create-directory\fR \fIARG\fR +Create a directory on OneDrive - no sync will be performed. + .TP -\fB\-\-skip\-dir\fP ARG -Skip any directories that match this pattern from syncing +\fB\-\-create-share-link\fR \fIARG\fR +Create a shareable link for an existing file on OneDrive. + .TP -\fB\-\-skip\-dir\-strict\-match\fP -When matching skip_dir directories, only match explicit matches -.br -Configuration file key: \fBskip_dir_strict_match\fP (default: \fBfalse\fP) +\fB\-\-debug-https\fR +Debug OneDrive HTTPS communication. + .TP -\fB\-\-skip\-dot\-files\fP -Skip dot files and folders from syncing -.br -Configuration file key: \fBskip_dotfiles\fP (default: \fBfalse\fP) +\fB\-\-destination-directory\fR \fIARG\fR +Destination directory for renamed or moved items on OneDrive - no sync will be performed. + .TP -\fB\-\-skip\-file\fP -Skip any files that match this pattern from syncing -.br -Configuration file key: \fBskip_file\fP (default: \fB~*|.~*|*.tmp\fP) +\fB\-\-disable-download-validation\fR +Disable download validation when downloading from OneDrive. + .TP -\fB\-\-skip\-size\fP ARG -Skip new files larger than this size (in MB) +\fB\-\-disable-notifications\fR +Do not use desktop notifications in monitor mode. + .TP -\fB\-\-skip\-symlinks\fP -Skip syncing of symlinks -.br -Configuration file key: \fBskip_symlinks\fP (default: \fBfalse\fP) +\fB\-\-disable-upload-validation\fR +Disable upload validation when uploading to OneDrive. + .TP -\fB\-\-source\-directory\fP ARG -Source directory to rename or move on OneDrive \- no sync will be performed. +\fB\-\-display-quota\fR +Display the quota status of the client - no sync will be performed. + .TP -\fB\-\-space\-reservation\fP ARG -The amount of disk space to reserve (in MB) to avoid 100% disk space utilisation +\fB\-\-display-running-config\fR +Display what options the client has been configured to use on application startup. + .TP -\fB\-\-sync\-root\-files\fP -Sync all files in sync_dir root when using sync_list. +\fB\-\-download-only\fR +Replicate the OneDrive online state locally, by only downloading changes from OneDrive. Do not upload local changes to OneDrive. + .TP -\fB\-\-sync\-shared\-folders\fP -Sync OneDrive Business Shared Folders -.br -Configuration file key: \fBsync_business_shared_folders\fP (default: \fBfalse\fP) +\fB\-\-dry-run\fR +Perform a trial sync with no changes made. + .TP -\fB\-\-syncdir\fP ARG -Set the directory used to sync the files that are synced -.br -Configuration file key: \fBsync_dir\fP (default: \fB~/OneDrive\fP) +\fB\-\-enable-logging\fR +Enable client activity to a separate log file. + .TP -\fB\-\-synchronize\fP -Perform a synchronization +\fB\-\-force\fR +Force the deletion of data when a 'big delete' is detected. + .TP -\fB\-\-upload\-only\fP -Replicate the locally configured sync_dir state to OneDrive, by only uploading local changes to OneDrive. Do not download changes from OneDrive. -.br -Configuration file key: \fBupload_only\fP (default: \fBfalse\fP) +\fB\-\-force-http-11\fR +Force the use of HTTP 1.1 for all operations. + .TP -\fB\-\-user\-agent\fP ARG -Set the used User Agent identifier -.br -Configuration file key: \fBuser_agent\fP (default: don't change) +\fB\-\-force-sync\fR +Force a synchronisation of a specific folder, only when using --sync --single-directory and ignore all non-default skip_dir and skip_file rules. + .TP -\fB\-v \-\-verbose\fP -Print more details, useful for debugging. Given two times (or more) -enables even more verbose debug statements. +\fB\-\-get-O365-drive-id\fR \fIARG\fR +Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library (DEPRECATED). + +.TP +\fB\-\-get-file-link\fR \fIARG\fR +Display the file link of a synced file. + .TP -\fB\-\-version\fP -Print the version and exit +\fB\-\-get-sharepoint-drive-id\fR +Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library. + .TP -\fB\-\-with\-editing\-perms\fP -Create a read-write shareable link for an existing file on OneDrive when used with --create-share-link +\fB\-\-help\fR, \fB\-h\fR +Display application help. + .TP -\fB\-h \-\-help\fP -This help information. -.PP +\fB\-\-list-shared-items\fR +List OneDrive Business Shared Items. -.SH FEATURES +.TP +\fB\-\-local-first\fR +Synchronise from the local directory source first, before downloading changes from OneDrive. -State caching +.TP +\fB\-\-log-dir\fR \fIARG\fR +Directory where logging output is saved to, needs to end with a slash. -Real-Time file monitoring with Inotify +.TP +\fB\-\-logout\fR +Logout the current user. -File upload / download validation to ensure data integrity +.TP +\fB\-\-modified-by\fR \fIARG\fR +Display the last modified by details of a given path. -Resumable uploads +.TP +\fB\-\-monitor-interval\fR \fIARG\fR +Number of seconds by which each sync operation is undertaken when idle under monitor mode. -Support OneDrive for Business (part of Office 365) +.TP +\fB\-\-monitor-log-frequency\fR \fIARG\fR +Frequency of logging in monitor mode. -Shared Folder support for OneDrive Personal and OneDrive Business accounts +.TP +\fB\-\-no-remote-delete\fR +Do not delete local file 'deletes' from OneDrive when using --upload-only. -SharePoint / Office365 Shared Libraries +.TP +\fB\-\-print-access-token\fR +Print the access token, useful for debugging. -Desktop notifications via libnotify +.TP +\fB\-\-reauth\fR +Reauthenticate the client with OneDrive. -Dry-run capability to test configuration changes +.TP +\fB\-\-remove-directory\fR \fIARG\fR +Remove a directory on OneDrive - no sync will be performed. -Prevent major OneDrive accidental data deletion after configuration change +.TP +\fB\-\-remove-source-files\fR +Remove source file after successful transfer to OneDrive when using --upload-only. -Support for National cloud deployments (Microsoft Cloud for US Government, Microsoft Cloud Germany, Azure and Office 365 operated by 21Vianet in China) +.TP +\fB\-\-resync\fR +Forget the last saved state, perform a full sync. +.TP +\fB\-\-resync-auth\fR +Approve the use of performing a --resync action. -.SH CONFIGURATION +.TP +\fB\-\-single-directory\fR \fIARG\fR +Specify a single local directory within the OneDrive root to sync. -You should copy the default config file into your home directory before making changes: -.nf -\fB -mkdir\ \-p\ ~/.config/onedrive -cp\ @DOCDIR@/config\ ~/.config/onedrive/config -\fP -.fi +.TP +\fB\-\-skip-dir\fR \fIARG\fR +Skip any directories that match this pattern from syncing. -For the supported options see the above list of command line options -for the availability of a configuration key. -.PP -Pattern are case insensitive. -\fB*\fP and \fB?\fP wildcards characters are supported. -Use \fB|\fP to separate multiple patterns. +.TP +\fB\-\-skip-dir-strict-match\fR +When matching skip_dir directories, only match explicit matches. -After changing the filters (\fBskip_file\fP or \fBskip_dir\fP in your configs) you must -execute \fBonedrive --synchronize --resync\fP. +.TP +\fB\-\-skip-dot-files\fR +Skip dot files and folders from syncing. -.SH FIRST RUN +.TP +\fB\-\-skip-file\fR \fIARG\fR +Skip any files that match this pattern from syncing. -After installing the application you must run it at least once from the terminal -to authorize it. +.TP +\fB\-\-skip-size\fR \fIARG\fR +Skip new files larger than this size (in MB). -You will be asked to open a specific link using your web browser where you -will have to login into your Microsoft Account and give the application the -permission to access your files. After giving the permission, you will be -redirected to a blank page. Copy the URI of the blank page into the application. +.TP +\fB\-\-skip-symlinks\fR +Skip syncing of symlinks. +.TP +\fB\-\-source-directory\fR \fIARG\fR +Source directory to rename or move on OneDrive - no sync will be performed. -.SH SYSTEMD INTEGRATION +.TP +\fB\-\-space-reservation\fR \fIARG\fR +The amount of disk space to reserve (in MB) to avoid 100% disk space utilisation. -Service files are installed into user and system directories. .TP -OneDrive service running as root user -To enable this mode, run as root user -.nf -\fB -systemctl enable onedrive -systemctl start onedrive -\fP -.fi +\fB\-\-sync-root-files\fR +Sync all files in sync_dir root when using sync_list. .TP -OneDrive service running as root user for a non-root user -This mode allows starting the OneDrive service automatically with -system start for multiple users. For each \fB\fP run: -.nf -\fB -systemctl enable onedrive@ -systemctl start onedrive@ -\fP -.fi +\fB\-\-sync-shared-files\fR +Sync OneDrive Business Shared Files to the local filesystem. .TP -OneDrive service running as non-root user -In this mode the service will be started when the user logs in. -Run as user -.nf -\fB -systemctl --user enable onedrive -systemctl --user start onedrive -\fP -.fi +\fB\-\-syncdir\fR \fIARG\fR +Specify the local directory used for synchronisation to OneDrive. -.SH LOGGING OUTPUT +.TP +\fB\-\-synchronize\fR +Perform a synchronisation with Microsoft OneDrive (DEPRECATED). -When running onedrive all actions can be logged to a separate log file. -This can be enabled by using the \fB--enable-logging\fP flag. -By default, log files will be written to \fB/var/log/onedrive\fP. +.TP +\fB\-\-upload-only\fR +Replicate the locally configured sync_dir state to OneDrive, by only uploading local changes to OneDrive. Do not download changes from OneDrive. -All logfiles will be in the format of \fB%username%.onedrive.log\fP, -where \fB%username%\fP represents the user who ran the client. +.TP +\fB\-\-verbose\fR, \fB\-v+\fR +Print more details, useful for debugging (repeat for extra debugging). +.TP +\fB\-\-version\fR +Print the version and exit. -.SH NOTIFICATIONS +.TP +\fB\-\-with-editing-perms\fR +Create a read-write shareable link for an existing file on OneDrive when used with --create-share-link . -If OneDrive has been compiled with support for notifications, a running -\fBonedrive\fP in monitor mode will send notifications about -initialization and errors via libnotify to the dbus. +.SH DOCUMENTATION +All documentation is available on GitHub: https://github.com/abraunegg/onedrive/tree/master/docs/ -Note that this does not work if \fBonedrive\fP is started as root -for a user via the \fBonedrive@\fP service. .SH SEE ALSO - -Further examples and documentation is available in -\f[C]README.md\f[] -\f[C]docs/USAGE.md\f[] -\f[C]docs/advanced-usage.md\f[] -\f[C]docs/BusinessSharedFolders.md\f[] -\f[C]docs/SharePoint-Shared-Libraries.md\f[] -\f[C]docs/national-cloud-deployments.md\f[] +.BR curl(1), diff --git a/README.md b/readme.md similarity index 50% rename from README.md rename to readme.md index 28b663595..27a3ebe66 100644 --- a/README.md +++ b/readme.md @@ -5,27 +5,30 @@ [![Build Docker Images](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml/badge.svg)](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml) [![Docker Pulls](https://img.shields.io/docker/pulls/driveone/onedrive)](https://hub.docker.com/r/driveone/onedrive) -A free Microsoft OneDrive Client which supports OneDrive Personal, OneDrive for Business, OneDrive for Office365 and SharePoint. +Introducing a free Microsoft OneDrive Client that seamlessly supports OneDrive Personal, OneDrive for Business, OneDrive for Office365, and SharePoint Libraries. -This powerful and highly configurable client can run on all major Linux distributions, FreeBSD, or as a Docker container. It supports one-way and two-way sync capabilities and securely connects to Microsoft OneDrive services. +This robust and highly customisable client is compatible with all major Linux distributions and FreeBSD, and can also be deployed as a container using Docker or Podman. It offers both one-way and two-way synchronisation capabilities while ensuring a secure connection to Microsoft OneDrive services. -This client is a 'fork' of the [skilion](https://github.com/skilion/onedrive) client, which the developer has confirmed he has no desire to maintain or support the client ([reference](https://github.com/skilion/onedrive/issues/518#issuecomment-717604726)). This fork has been in active development since mid 2018. +Originally derived as a 'fork' from the [skilion](https://github.com/skilion/onedrive) client, it's worth noting that the developer of the original client has explicitly stated they have no intention of maintaining or supporting their work ([reference](https://github.com/skilion/onedrive/issues/518#issuecomment-717604726)). + +This client represents a 100% re-imagining of the original work, addressing numerous notable bugs and issues while incorporating a significant array of new features. This client has been under active development since mid-2018. ## Features -* State caching -* Real-Time local file monitoring with inotify -* Real-Time syncing of remote updates via webhooks -* File upload / download validation to ensure data integrity -* Resumable uploads -* Support OneDrive for Business (part of Office 365) -* Shared Folder support for OneDrive Personal and OneDrive Business accounts -* SharePoint / Office365 Shared Libraries -* Desktop notifications via libnotify -* Dry-run capability to test configuration changes -* Prevent major OneDrive accidental data deletion after configuration change -* Support for National cloud deployments (Microsoft Cloud for US Government, Microsoft Cloud Germany, Azure and Office 365 operated by 21Vianet in China) -* Supports single & multi-tenanted applications -* Supports rate limiting of traffic +* Compatible with OneDrive Personal, OneDrive for Business including accessing Microsoft SharePoint Libraries +* Provides rules for client-side filtering to select data for syncing with Microsoft OneDrive accounts +* Caches sync state for efficiency +* Supports a dry-run option for safe configuration testing +* Validates file transfers to ensure data integrity +* Monitors local files in real-time using inotify +* Supports interrupted uploads for completion at a later time +* Capability to sync remote updates immediately via webhooks +* Enhanced synchronisation speed with multi-threaded file transfers +* Manages traffic bandwidth use with rate limiting +* Supports seamless access to shared folders and files across both OneDrive Personal and OneDrive for Business accounts +* Supports national cloud deployments including Microsoft Cloud for US Government, Microsoft Cloud Germany and Azure and Office 365 operated by VNET in China +* Supports sending desktop alerts using libnotify +* Protects against significant data loss on OneDrive after configuration changes +* Works with both single and multi-tenant applications ## What's missing * Ability to encrypt/decrypt files on-the-fly when uploading/downloading files from OneDrive @@ -36,28 +39,17 @@ This client is a 'fork' of the [skilion](https://github.com/skilion/onedrive) cl * Colorful log output terminal modification: [OneDrive Client for Linux Colorful log Output](https://github.com/zzzdeb/dotfiles/blob/master/scripts/tools/onedrive_log) * System Tray Icon: [OneDrive Client for Linux System Tray Icon](https://github.com/DanielBorgesOliveira/onedrive_tray) -## Supported Application Version -Only the current application release version or greater is supported. - -The current application release version is: [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) - -Check the version of the application you are using `onedrive --version` and ensure that you are running either the current release or compile the application yourself from master to get the latest version. - -If you are not using the above application version or greater, you must upgrade your application to obtain support. - -## Have a Question -If you have a question or need something clarified, please raise a new disscussion post [here](https://github.com/abraunegg/onedrive/discussions) - -Be sure to review the Frequently Asked Questions as well before raising a new discussion post. - ## Frequently Asked Questions Refer to [Frequently Asked Questions](https://github.com/abraunegg/onedrive/wiki/Frequently-Asked-Questions) +## Have a question +If you have a question or need something clarified, please raise a new discussion post [here](https://github.com/abraunegg/onedrive/discussions) + ## Reporting an Issue or Bug -If you encounter any bugs you can report them here on GitHub. Before filing an issue be sure to: +If you encounter any bugs you can report them here on Github. Before filing an issue be sure to: -1. Check the version of the application you are using `onedrive --version` and ensure that you are running a supported application version. If you are not using a supported application version, you must first upgrade your application to a supported version and then re-test for your issue. -2. If you are using a supported applcation version, fill in a new bug report using the [issue template](https://github.com/abraunegg/onedrive/issues/new?template=bug_report.md) +1. Check the version of the application you are using `onedrive --version` and ensure that you are running either the latest [release](https://github.com/abraunegg/onedrive/releases) or built from master. +2. Fill in a new bug report using the [issue template](https://github.com/abraunegg/onedrive/issues/new?template=bug_report.md) 3. Generate a debug log for support using the following [process](https://github.com/abraunegg/onedrive/wiki/Generate-debug-log-for-support) * If you are in *any* way concerned regarding the sensitivity of the data contained with in the verbose debug log file, create a new OneDrive account, configure the client to use that, use *dummy* data to simulate your environment and then replicate your original issue * If you are still concerned, provide an NDA or confidentiality document to sign @@ -70,23 +62,23 @@ Refer to [docs/known-issues.md](https://github.com/abraunegg/onedrive/blob/maste ## Documentation and Configuration Assistance ### Installing from Distribution Packages or Building the OneDrive Client for Linux from source -Refer to [docs/INSTALL.md](https://github.com/abraunegg/onedrive/blob/master/docs/INSTALL.md) +Refer to [docs/install.md](https://github.com/abraunegg/onedrive/blob/master/docs/install.md) ### Configuration and Usage -Refer to [docs/USAGE.md](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md) +Refer to [docs/usage.md](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md) -### Configure OneDrive Business Shared Folders -Refer to [docs/BusinessSharedFolders.md](https://github.com/abraunegg/onedrive/blob/master/docs/BusinessSharedFolders.md) +### Configure OneDrive Business Shared Items +Refer to [docs/business-shared-items.md](https://github.com/abraunegg/onedrive/blob/master/docs/business-shared-items.md) ### Configure SharePoint / Office 365 Shared Libraries (Business or Education) -Refer to [docs/SharePoint-Shared-Libraries.md](https://github.com/abraunegg/onedrive/blob/master/docs/SharePoint-Shared-Libraries.md) +Refer to [docs/sharepoint-libraries.md](https://github.com/abraunegg/onedrive/blob/master/docs/sharepoint-libraries.md) ### Configure National Cloud support Refer to [docs/national-cloud-deployments.md](https://github.com/abraunegg/onedrive/blob/master/docs/national-cloud-deployments.md) ### Docker support -Refer to [docs/Docker.md](https://github.com/abraunegg/onedrive/blob/master/docs/Docker.md) +Refer to [docs/docker.md](https://github.com/abraunegg/onedrive/blob/master/docs/docker.md) ### Podman support -Refer to [docs/Podman.md](https://github.com/abraunegg/onedrive/blob/master/docs/Podman.md) +Refer to [docs/podman.md](https://github.com/abraunegg/onedrive/blob/master/docs/podman.md) diff --git a/src/arsd/cgi.d b/src/arsd/cgi.d index 79f5feaad..741b57128 100644 --- a/src/arsd/cgi.d +++ b/src/arsd/cgi.d @@ -683,6 +683,7 @@ enum long defaultMaxContentLength = 5_000_000; public import std.string; public import std.stdio; public import std.conv; +import std.concurrency; import std.uri; import std.uni; import std.algorithm.comparison; @@ -763,7 +764,7 @@ class ConnectionClosedException : Exception { version(Windows) { // FIXME: ugly hack to solve stdin exception problems on Windows: // reading stdin results in StdioException (Bad file descriptor) -// this is probably due to http://d.puremagic.com/issues/show_bug.cgi?id=3425 +// this is probably due to https://issues.dlang.org/show_bug.cgi?id=3425 private struct stdin { struct ByChunk { // Replicates std.stdio.ByChunk private: @@ -1100,7 +1101,7 @@ class Cgi { const(ubyte)[] delegate() readdata = null, // finally, use this to do custom output if needed void delegate(const(ubyte)[]) _rawDataOutput = null, - // to flush teh custom output + // to flush the custom output void delegate() _flush = null ) { @@ -2226,7 +2227,7 @@ class Cgi { uri ~= "s"; uri ~= "://"; uri ~= host; - /+ // the host has the port so p sure this never needed, cgi on apache and embedded http all do the right hting now + /+ // the host has the port so p sure this never needed, cgi on apache and embedded http all do the right thing now version(none) if(!(!port || port == defaultPort)) { uri ~= ":"; @@ -2316,7 +2317,7 @@ class Cgi { /// This is like setResponseExpires, but it can be called multiple times. The setting most in the past is the one kept. /// If you have multiple functions, they all might call updateResponseExpires about their own return value. The program - /// output as a whole is as cacheable as the least cachable part in the chain. + /// output as a whole is as cacheable as the least cacheable part in the chain. /// setCache(false) always overrides this - it is, by definition, the strictest anti-cache statement available. If your site outputs sensitive user data, you should probably call setCache(false) when you do, to ensure no other functions will cache the content, as it may be a privacy risk. /// Conversely, setting here overrides setCache(true), since any expiration date is in the past of infinity. @@ -2328,7 +2329,7 @@ class Cgi { } /* - /// Set to true if you want the result to be cached publically - that is, is the content shared? + /// Set to true if you want the result to be cached publicly - that is, is the content shared? /// Should generally be false if the user is logged in. It assumes private cache only. /// setCache(true) also turns on public caching, and setCache(false) sets to private. void setPublicCaching(bool allowPublicCaches) { @@ -3910,14 +3911,16 @@ struct RequestServer { If you want the forking worker process server, you do need to compile with the embedded_httpd_processes config though. +/ - void serveEmbeddedHttp(alias fun, CustomCgi = Cgi, long maxContentLength = defaultMaxContentLength)(ThisFor!fun _this) { + shared void serveEmbeddedHttp(alias fun, T, CustomCgi = Cgi, long maxContentLength = defaultMaxContentLength)(shared T _this) { globalStopFlag = false; static if(__traits(isStaticFunction, fun)) - alias funToUse = fun; + void funToUse(CustomCgi cgi) { + fun(_this, cgi); + } else void funToUse(CustomCgi cgi) { static if(__VERSION__ > 2097) - __traits(child, _this, fun)(cgi); + __traits(child, _inst_this, fun)(_inst_this, cgi); else static assert(0, "Not implemented in your compiler version!"); } auto manager = new ListeningConnectionManager(listeningHost, listeningPort, &doThreadHttpConnection!(CustomCgi, funToUse), null, useFork, numberOfThreads); @@ -6275,7 +6278,7 @@ ByChunkRange byChunk(BufferedInputRange ir, size_t atMost) { } version(cgi_with_websocket) { - // http://tools.ietf.org/html/rfc6455 + // https://tools.ietf.org/html/rfc6455 /** WEBSOCKET SUPPORT: @@ -7289,7 +7292,7 @@ private void serialize(T)(scope void delegate(scope ubyte[]) sink, T t) { } else static assert(0, T.stringof); } -// all may be stack buffers, so use cautio +// all may be stack buffers, so use caution private void deserialize(T)(scope ubyte[] delegate(int sz) get, scope void delegate(T) dg) { static if(is(T == struct)) { T t; @@ -10178,7 +10181,7 @@ struct Redirection { /++ Serves a class' methods, as a kind of low-state RPC over the web. To be used with [dispatcher]. - Usage of this function will add a dependency on [arsd.dom] and [arsd.jsvar] unless you have overriden + Usage of this function will add a dependency on [arsd.dom] and [arsd.jsvar] unless you have overridden the presenter in the dispatcher. FIXME: explain this better @@ -10618,7 +10621,7 @@ template urlNamesForMethod(alias method, string default_) { enum AccessCheck { allowed, denied, - nonExistant, + nonExistent, } enum Operation { @@ -11807,4 +11810,4 @@ Authors: Adam D. Ruppe Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -*/ \ No newline at end of file +*/ diff --git a/src/clientSideFiltering.d b/src/clientSideFiltering.d new file mode 100644 index 000000000..4040c2669 --- /dev/null +++ b/src/clientSideFiltering.d @@ -0,0 +1,385 @@ +// What is this module called? +module clientSideFiltering; + +// What does this module require to function? +import std.algorithm; +import std.array; +import std.file; +import std.path; +import std.regex; +import std.stdio; +import std.string; +import std.conv; + +// What other modules that we have created do we need to import? +import config; +import util; +import log; + +class ClientSideFiltering { + // Class variables + ApplicationConfig appConfig; + string[] paths; + Regex!char fileMask; + Regex!char directoryMask; + bool skipDirStrictMatch = false; + bool skipDotfiles = false; + + this(ApplicationConfig appConfig) { + // Configure the class variable to consume the application configuration + this.appConfig = appConfig; + } + + ~this() { + object.destroy(appConfig); + object.destroy(paths); + object.destroy(fileMask); + object.destroy(directoryMask); + } + + // Initialise the required items + bool initialise() { + // Log what is being done + addLogEntry("Configuring Client Side Filtering (Selective Sync)", ["debug"]); + + // Load the sync_list file if it exists + if (exists(appConfig.syncListFilePath)){ + loadSyncList(appConfig.syncListFilePath); + } + + // Configure skip_dir, skip_file, skip-dir-strict-match & skip_dotfiles from config entries + // Handle skip_dir configuration in config file + addLogEntry("Configuring skip_dir ...", ["debug"]); + addLogEntry("skip_dir: " ~ to!string(appConfig.getValueString("skip_dir")), ["debug"]); + setDirMask(appConfig.getValueString("skip_dir")); + + // Was --skip-dir-strict-match configured? + addLogEntry("Configuring skip_dir_strict_match ...", ["debug"]); + addLogEntry("skip_dir_strict_match: " ~ to!string(appConfig.getValueBool("skip_dir_strict_match")), ["debug"]); + if (appConfig.getValueBool("skip_dir_strict_match")) { + setSkipDirStrictMatch(); + } + + // Was --skip-dot-files configured? + addLogEntry("Configuring skip_dotfiles ...", ["debug"]); + addLogEntry("skip_dotfiles: " ~ to!string(appConfig.getValueBool("skip_dotfiles")), ["debug"]); + if (appConfig.getValueBool("skip_dotfiles")) { + setSkipDotfiles(); + } + + // Handle skip_file configuration in config file + addLogEntry("Configuring skip_file ...", ["debug"]); + + // Validate skip_file to ensure that this does not contain an invalid configuration + // Do not use a skip_file entry of .* as this will prevent correct searching of local changes to process. + foreach(entry; appConfig.getValueString("skip_file").split("|")){ + if (entry == ".*") { + // invalid entry element detected + addLogEntry("ERROR: Invalid skip_file entry '.*' detected"); + return false; + } + } + + // All skip_file entries are valid + addLogEntry("skip_file: " ~ appConfig.getValueString("skip_file"), ["debug"]); + setFileMask(appConfig.getValueString("skip_file")); + + // All configured OK + return true; + } + + // Shutdown components + void shutdown() { + object.destroy(appConfig); + object.destroy(paths); + object.destroy(fileMask); + object.destroy(directoryMask); + } + + // Load sync_list file if it exists + void loadSyncList(string filepath) { + // open file as read only + auto file = File(filepath, "r"); + auto range = file.byLine(); + foreach (line; range) { + // Skip comments in file + if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; + paths ~= buildNormalizedPath(line); + } + file.close(); + } + + // Configure the regex that will be used for 'skip_file' + void setFileMask(const(char)[] mask) { + fileMask = wild2regex(mask); + addLogEntry("Selective Sync File Mask: " ~ to!string(fileMask), ["debug"]); + } + + // Configure the regex that will be used for 'skip_dir' + void setDirMask(const(char)[] dirmask) { + directoryMask = wild2regex(dirmask); + addLogEntry("Selective Sync Directory Mask: " ~ to!string(directoryMask), ["debug"]); + } + + // Configure skipDirStrictMatch if function is called + // By default, skipDirStrictMatch = false; + void setSkipDirStrictMatch() { + skipDirStrictMatch = true; + } + + // Configure skipDotfiles if function is called + // By default, skipDotfiles = false; + void setSkipDotfiles() { + skipDotfiles = true; + } + + // return value of skipDotfiles + bool getSkipDotfiles() { + return skipDotfiles; + } + + // Match against sync_list only + bool isPathExcludedViaSyncList(string path) { + // Debug output that we are performing a 'sync_list' inclusion / exclusion test + return isPathExcluded(path, paths); + } + + // config file skip_dir parameter + bool isDirNameExcluded(string name) { + // Does the directory name match skip_dir config entry? + // Returns true if the name matches a skip_dir config entry + // Returns false if no match + addLogEntry("skip_dir evaluation for: " ~ name, ["debug"]); + + // Try full path match first + if (!name.matchFirst(directoryMask).empty) { + addLogEntry("'!name.matchFirst(directoryMask).empty' returned true = matched", ["debug"]); + return true; + } else { + // Do we check the base name as well? + if (!skipDirStrictMatch) { + addLogEntry("No Strict Matching Enforced", ["debug"]); + + // Test the entire path working backwards from child + string path = buildNormalizedPath(name); + string checkPath; + foreach_reverse(directory; pathSplitter(path)) { + if (directory != "/") { + // This will add a leading '/' but that needs to be stripped to check + checkPath = "/" ~ directory ~ checkPath; + if(!checkPath.strip('/').matchFirst(directoryMask).empty) { + addLogEntry("'!checkPath.matchFirst(directoryMask).empty' returned true = matched", ["debug"]); + return true; + } + } + } + } else { + // No match + addLogEntry("Strict Matching Enforced - No Match", ["debug"]); + } + } + // no match + return false; + } + + // config file skip_file parameter + bool isFileNameExcluded(string name) { + // Does the file name match skip_file config entry? + // Returns true if the name matches a skip_file config entry + // Returns false if no match + addLogEntry("skip_file evaluation for: " ~ name, ["debug"]); + + // Try full path match first + if (!name.matchFirst(fileMask).empty) { + return true; + } else { + // check just the file name + string filename = baseName(name); + if(!filename.matchFirst(fileMask).empty) { + return true; + } + } + // no match + return false; + } + + // test if the given path is not included in the allowed paths + // if there are no allowed paths always return false + private bool isPathExcluded(string path, string[] allowedPaths) { + // function variables + bool exclude = false; + bool exludeDirectMatch = false; // will get updated to true, if there is a pattern match to sync_list entry + bool excludeMatched = false; // will get updated to true, if there is a pattern match to sync_list entry + bool finalResult = true; // will get updated to false, if pattern match to sync_list entry + int offset; + string wildcard = "*"; + + // always allow the root + if (path == ".") return false; + // if there are no allowed paths always return false + if (allowedPaths.empty) return false; + path = buildNormalizedPath(path); + addLogEntry("Evaluation against 'sync_list' for this path: " ~ path, ["debug"]); + addLogEntry("[S]exclude = " ~ to!string(exclude), ["debug"]); + addLogEntry("[S]exludeDirectMatch = " ~ to!string(exludeDirectMatch), ["debug"]); + addLogEntry("[S]excludeMatched = " ~ to!string(excludeMatched), ["debug"]); + + // unless path is an exact match, entire sync_list entries need to be processed to ensure + // negative matches are also correctly detected + foreach (allowedPath; allowedPaths) { + // is this an inclusion path or finer grained exclusion? + switch (allowedPath[0]) { + case '-': + // sync_list path starts with '-', this user wants to exclude this path + exclude = true; + // If the sync_list entry starts with '-/' offset needs to be 2, else 1 + if (startsWith(allowedPath, "-/")){ + // Offset needs to be 2 + offset = 2; + } else { + // Offset needs to be 1 + offset = 1; + } + break; + case '!': + // sync_list path starts with '!', this user wants to exclude this path + exclude = true; + // If the sync_list entry starts with '!/' offset needs to be 2, else 1 + if (startsWith(allowedPath, "!/")){ + // Offset needs to be 2 + offset = 2; + } else { + // Offset needs to be 1 + offset = 1; + } + break; + case '/': + // sync_list path starts with '/', this user wants to include this path + // but a '/' at the start causes matching issues, so use the offset for comparison + exclude = false; + offset = 1; + break; + + default: + // no negative pattern, default is to not exclude + exclude = false; + offset = 0; + } + + // What are we comparing against? + addLogEntry("Evaluation against 'sync_list' entry: " ~ allowedPath, ["debug"]); + + // Generate the common prefix from the path vs the allowed path + auto comm = commonPrefix(path, allowedPath[offset..$]); + + // Is path is an exact match of the allowed path? + if (comm.length == path.length) { + // we have a potential exact match + // strip any potential '/*' from the allowed path, to avoid a potential lesser common match + string strippedAllowedPath = strip(allowedPath[offset..$], "/*"); + + if (path == strippedAllowedPath) { + // we have an exact path match + addLogEntry("Exact path match with 'sync_list' entry", ["debug"]); + + if (!exclude) { + addLogEntry("Evaluation against 'sync_list' result: direct match", ["debug"]); + finalResult = false; + // direct match, break and go sync + break; + } else { + addLogEntry("Evaluation against 'sync_list' result: direct match - path to be excluded", ["debug"]); + + // do not set excludeMatched = true here, otherwise parental path also gets excluded + // flag exludeDirectMatch so that a 'wildcard match' will not override this exclude + exludeDirectMatch = true; + // final result + finalResult = true; + } + } else { + // no exact path match, but something common does match + addLogEntry("Something 'common' matches the 'sync_list' input path", ["debug"]); + + auto splitAllowedPaths = pathSplitter(strippedAllowedPath); + string pathToEvaluate = ""; + foreach(base; splitAllowedPaths) { + pathToEvaluate ~= base; + if (path == pathToEvaluate) { + // The input path matches what we want to evaluate against as a direct match + if (!exclude) { + addLogEntry("Evaluation against 'sync_list' result: direct match for parental path item", ["debug"]); + finalResult = false; + // direct match, break and go sync + break; + } else { + addLogEntry("Evaluation against 'sync_list' result: direct match for parental path item but to be excluded", ["debug"]); + finalResult = true; + // do not set excludeMatched = true here, otherwise parental path also gets excluded + } + } + pathToEvaluate ~= dirSeparator; + } + } + } + + // Is path is a subitem/sub-folder of the allowed path? + if (comm.length == allowedPath[offset..$].length) { + // The given path is potentially a subitem of an allowed path + // We want to capture sub-folders / files of allowed paths here, but not explicitly match other items + // if there is no wildcard + auto subItemPathCheck = allowedPath[offset..$] ~ "/"; + if (canFind(path, subItemPathCheck)) { + // The 'path' includes the allowed path, and is 'most likely' a sub-path item + if (!exclude) { + addLogEntry("Evaluation against 'sync_list' result: parental path match", ["debug"]); + finalResult = false; + // parental path matches, break and go sync + break; + } else { + addLogEntry("Evaluation against 'sync_list' result: parental path match but must be excluded", ["debug"]); + finalResult = true; + excludeMatched = true; + } + } + } + + // Does the allowed path contain a wildcard? (*) + if (canFind(allowedPath[offset..$], wildcard)) { + // allowed path contains a wildcard + // manually replace '*' for '.*' to be compatible with regex + string regexCompatiblePath = replace(allowedPath[offset..$], "*", ".*"); + auto allowedMask = regex(regexCompatiblePath); + if (matchAll(path, allowedMask)) { + // regex wildcard evaluation matches + // if we have a prior pattern match for an exclude, excludeMatched = true + if (!exclude && !excludeMatched && !exludeDirectMatch) { + // nothing triggered an exclusion before evaluation against wildcard match attempt + addLogEntry("Evaluation against 'sync_list' result: wildcard pattern match", ["debug"]); + finalResult = false; + } else { + addLogEntry("Evaluation against 'sync_list' result: wildcard pattern matched but must be excluded", ["debug"]); + finalResult = true; + excludeMatched = true; + } + } + } + } + // Interim results + addLogEntry("[F]exclude = " ~ to!string(exclude), ["debug"]); + addLogEntry("[F]exludeDirectMatch = " ~ to!string(exludeDirectMatch), ["debug"]); + addLogEntry("[F]excludeMatched = " ~ to!string(excludeMatched), ["debug"]); + + // If exclude or excludeMatched is true, then finalResult has to be true + if ((exclude) || (excludeMatched) || (exludeDirectMatch)) { + finalResult = true; + } + + // results + if (finalResult) { + addLogEntry("Evaluation against 'sync_list' final result: EXCLUDED", ["debug"]); + } else { + addLogEntry("Evaluation against 'sync_list' final result: included for sync", ["debug"]); + } + return finalResult; + } +} \ No newline at end of file diff --git a/src/config.d b/src/config.d index 8c9ba2ff9..8172d3bb6 100644 --- a/src/config.d +++ b/src/config.d @@ -1,130 +1,217 @@ +// What is this module called? +module config; + +// What does this module require to function? import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; -import std.file, std.string, std.regex, std.stdio, std.process, std.algorithm.searching, std.getopt, std.conv, std.path; +import std.stdio; +import std.process; +import std.regex; +import std.string; +import std.algorithm.searching; import std.algorithm.sorting: sort; -import selective; -static import log; - -final class Config -{ - // application defaults - public string defaultSyncDir = "~/OneDrive"; - public string defaultSkipFile = "~*|.~*|*.tmp"; - public string defaultSkipDir = ""; - public string defaultLogFileDir = "/var/log/onedrive/"; - // application set items - public string refreshTokenFilePath = ""; - public string deltaLinkFilePath = ""; - public string databaseFilePath = ""; - public string databaseFilePathDryRun = ""; - public string uploadStateFilePath = ""; - public string syncListFilePath = ""; - public string homePath = ""; - public string configDirName = ""; - public string systemConfigDirName = ""; - public string configFileSyncDir = ""; - public string configFileSkipFile = ""; - public string configFileSkipDir = ""; - public string businessSharedFolderFilePath = ""; - private string userConfigFilePath = ""; - private string systemConfigFilePath = ""; - // was the application just authorised - paste of response uri - public bool applicationAuthorizeResponseUri = false; - // hashmap for the values found in the user config file - // ARGGGG D is stupid and cannot make hashmap initializations!!! - // private string[string] foobar = [ "aa": "bb" ] does NOT work!!! - private string[string] stringValues; - private bool[string] boolValues; - private long[string] longValues; - // Compile time regex - this does not change - public auto configRegex = ctRegex!(`^(\w+)\s*=\s*"(.*)"\s*$`); - // Default directory permission mode - public long defaultDirectoryPermissionMode = 700; - public int configuredDirectoryPermissionMode; - // Default file permission mode - public long defaultFilePermissionMode = 600; - public int configuredFilePermissionMode; - - // Bring in v2.5.0 config items +import std.file; +import std.conv; +import std.path; +import std.getopt; +import std.format; +import std.ascii; +import std.datetime; + +// What other modules that we have created do we need to import? +import log; +import util; + +class ApplicationConfig { + // Application default values - these do not change + // - Compile time regex + immutable auto configRegex = ctRegex!(`^(\w+)\s*=\s*"(.*)"\s*$`); + // - Default directory to store data + immutable string defaultSyncDir = "~/OneDrive"; + // - Default Directory Permissions + immutable long defaultDirectoryPermissionMode = 700; + // - Default File Permissions + immutable long defaultFilePermissionMode = 600; + // - Default types of files to skip + // v2.0.x - 2.4.x: ~*|.~*|*.tmp + // v2.5.x : ~*|.~*|*.tmp|*.swp|*.partial + immutable string defaultSkipFile = "~*|.~*|*.tmp|*.swp|*.partial"; + // - Default directories to skip (default is skip none) + immutable string defaultSkipDir = ""; + // - Default application logging directory + immutable string defaultLogFileDir = "/var/log/onedrive"; + // - Default configuration directory + immutable string defaultConfigDirName = "~/.config/onedrive"; + // - Default 'OneDrive Business Shared Files' Folder Name + immutable string defaultBusinessSharedFilesDirectoryName = "Files Shared With Me"; + + // Microsoft Requirements + // - Default Application ID (abraunegg) + immutable string defaultApplicationId = "d50ca740-c83f-4d1b-b616-12c519384f0c"; + // - Microsoft User Agent ISV Tag + immutable string isvTag = "ISV"; + // - Microsoft User Agent Company name + immutable string companyName = "abraunegg"; + // - Microsoft Application name as per Microsoft Azure application registration + immutable string appTitle = "OneDrive Client for Linux"; + // Comply with OneDrive traffic decoration requirements + // https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online + // - Identify as ISV and include Company Name, App Name separated by a pipe character and then adding Version number separated with a slash character + immutable string defaultUserAgent = isvTag ~ "|" ~ companyName ~ "|" ~ appTitle ~ "/" ~ strip(import("version")); // HTTP Struct items, used for configuring HTTP() // Curl Timeout Handling // libcurl dns_cache_timeout timeout - immutable int defaultDnsTimeout = 60; + immutable int defaultDnsTimeout = 60; // in seconds // Connect timeout for HTTP|HTTPS connections - immutable int defaultConnectTimeout = 10; - // With the following settings we force - // - if there is no data flow for 10min, abort - // - if the download time for one item exceeds 1h, abort - // - // Timeout for activity on connection - // this translates into Curl's CURLOPT_LOW_SPEED_TIME - // which says: - // It contains the time in number seconds that the - // transfer speed should be below the CURLOPT_LOW_SPEED_LIMIT - // for the library to consider it too slow and abort. - immutable int defaultDataTimeout = 600; + // Controls CURLOPT_CONNECTTIMEOUT + immutable int defaultConnectTimeout = 10; // in seconds + // Default data timeout for HTTP operations + // curl.d has a default of: _defaultDataTimeout = dur!"minutes"(2); + immutable int defaultDataTimeout = 60; // in seconds // Maximum time any operation is allowed to take // This includes dns resolution, connecting, data transfer, etc. - immutable int defaultOperationTimeout = 3600; - // Specify how many redirects should be allowed - immutable int defaultMaxRedirects = 5; + // Controls CURLOPT_TIMEOUT + immutable int defaultOperationTimeout = 3600; // in seconds // Specify what IP protocol version should be used when communicating with OneDrive immutable int defaultIpProtocol = 0; // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only + // Specify how many redirects should be allowed + immutable int defaultMaxRedirects = 5; + + // Azure Active Directory & Graph Explorer Endpoints + // - Global & Default + immutable string globalAuthEndpoint = "https://login.microsoftonline.com"; + immutable string globalGraphEndpoint = "https://graph.microsoft.com"; + // - US Government L4 + immutable string usl4AuthEndpoint = "https://login.microsoftonline.us"; + immutable string usl4GraphEndpoint = "https://graph.microsoft.us"; + // - US Government L5 + immutable string usl5AuthEndpoint = "https://login.microsoftonline.us"; + immutable string usl5GraphEndpoint = "https://dod-graph.microsoft.us"; + // - Germany + immutable string deAuthEndpoint = "https://login.microsoftonline.de"; + immutable string deGraphEndpoint = "https://graph.microsoft.de"; + // - China + immutable string cnAuthEndpoint = "https://login.chinacloudapi.cn"; + immutable string cnGraphEndpoint = "https://microsoftgraph.chinacloudapi.cn"; + + // Application Version + immutable string applicationVersion = "onedrive " ~ strip(import("version")); + + // Application items that depend on application run-time environment, thus cannot be immutable + // Public variables + + // Logging output + bool verboseLogging = false; + bool debugLogging = false; + long verbosityCount = 0; + + // Was the application just authorised - paste of response uri + bool applicationAuthorizeResponseUri = false; + + // Store the refreshToken for use within the application + const(char)[] refreshToken; + // Store the current accessToken for use within the application + const(char)[] accessToken; + // Store the 'refresh_token' file path + string refreshTokenFilePath = ""; + // Store the accessTokenExpiration for use within the application + SysTime accessTokenExpiration; + // Store the 'session_upload.CRC32-HASH' file path + string uploadSessionFilePath = ""; + // API initialisation flags + bool apiWasInitialised = false; + bool syncEngineWasInitialised = false; + // Important Account Details + string accountType; + string defaultDriveId; + string defaultRootId; + + // Sync Operations + bool fullScanTrueUpRequired = false; + bool suppressLoggingOutput = false; + + // Number of concurrent threads when downloading and uploading data + ulong defaultConcurrentThreads = 8; + + // All application run-time paths are formulated from this as a set of defaults + // - What is the home path of the actual 'user' that is running the application + string defaultHomePath = ""; + // - What is the config path for the application. By default, this is ~/.config/onedrive but can be overridden by using --confdir + string configDirName = defaultConfigDirName; + // - In case we have to use a system config directory such as '/etc/onedrive' or similar, store that path in this variable + private string systemConfigDirName = ""; + // - Store the configured converted octal value for directory permissions + private int configuredDirectoryPermissionMode; + // - Store the configured converted octal value for file permissions + private int configuredFilePermissionMode; + // - Store the 'delta_link' file path + private string deltaLinkFilePath = ""; + // - Store the 'items.sqlite3' file path + string databaseFilePath = ""; + // - Store the 'items-dryrun.sqlite3' file path + string databaseFilePathDryRun = ""; + // - Store the user 'config' file path + private string userConfigFilePath = ""; + // - Store the system 'config' file path + private string systemConfigFilePath = ""; + // - What is the 'config' file path that will be used? + private string applicableConfigFilePath = ""; + // - Store the 'sync_list' file path + string syncListFilePath = ""; + + // OneDrive Business Shared File handling - what directory will be used? + string configuredBusinessSharedFilesDirectoryName = ""; - this(string confdirOption) - { - // default configuration - entries in config file ~/.config/onedrive/config - // an entry here means it can be set via the config file if there is a coresponding entry, read from config and set via update_from_args() - stringValues["sync_dir"] = defaultSyncDir; - stringValues["skip_file"] = defaultSkipFile; - stringValues["skip_dir"] = defaultSkipDir; + // Hash files so that we can detect when the configuration has changed, in items that will require a --resync + private string configHashFile = ""; + private string configBackupFile = ""; + private string syncListHashFile = ""; + + // Store the actual 'runtime' hash + private string currentConfigHash = ""; + private string currentSyncListHash = ""; + + // Store the previous config files hash values (file contents) + private string previousConfigHash = ""; + private string previousSyncListHash = ""; + + // Store items that come in from the 'config' file, otherwise these need to be set the the defaults + private string configFileSyncDir = defaultSyncDir; + private string configFileSkipFile = defaultSkipFile; + private string configFileSkipDir = ""; // Default here is no directories are skipped + private string configFileDriveId = ""; // Default here is that no drive id is specified + private bool configFileSkipDotfiles = false; + private bool configFileSkipSymbolicLinks = false; + private bool configFileSyncBusinessSharedItems = false; + + // File permission values (set via initialise function) + private int convertedPermissionValue; + + // Array of values that are the actual application runtime configuration + // The values stored in these array's are the actual application configuration which can then be accessed by getValue & setValue + string[string] stringValues; + long[string] longValues; + bool[string] boolValues; + bool shellEnvironmentSet = false; + + // Initialise the application configuration + bool initialise(string confdirOption, bool helpRequested) { + + // Default runtime configuration - entries in config file ~/.config/onedrive/config or derived from variables above + // An entry here means it can be set via the config file if there is a corresponding entry, read from config and set via update_from_args() + // The below becomes the 'default' application configuration before config file and/or cli options are overlaid on top + + // - Set the required default values + stringValues["application_id"] = defaultApplicationId; stringValues["log_dir"] = defaultLogFileDir; + stringValues["skip_dir"] = defaultSkipDir; + stringValues["skip_file"] = defaultSkipFile; + stringValues["sync_dir"] = defaultSyncDir; + stringValues["user_agent"] = defaultUserAgent; + // - The 'drive_id' is used when we specify a specific OneDrive ID when attempting to sync Shared Folders and SharePoint items stringValues["drive_id"] = ""; - stringValues["user_agent"] = ""; - boolValues["upload_only"] = false; - boolValues["check_nomount"] = false; - boolValues["check_nosync"] = false; - boolValues["download_only"] = false; - boolValues["disable_notifications"] = false; - boolValues["disable_download_validation"] = false; - boolValues["disable_upload_validation"] = false; - boolValues["enable_logging"] = false; - boolValues["force_http_11"] = false; - boolValues["local_first"] = false; - boolValues["no_remote_delete"] = false; - boolValues["skip_symlinks"] = false; - boolValues["debug_https"] = false; - boolValues["skip_dotfiles"] = false; - boolValues["dry_run"] = false; - boolValues["sync_root_files"] = false; - longValues["verbose"] = log.verbose; // might be initialized by the first getopt call! - // The amount of time (seconds) between monitor sync loops - longValues["monitor_interval"] = 300; - longValues["skip_size"] = 0; - longValues["min_notify_changes"] = 5; - longValues["monitor_log_frequency"] = 6; - // Number of N sync runs before performing a full local scan of sync_dir - // By default 12 which means every ~60 minutes a full disk scan of sync_dir will occur - // 'monitor_interval' * 'monitor_fullscan_frequency' = 3600 = 1 hour - longValues["monitor_fullscan_frequency"] = 12; - // Number of children in a path that is locally removed which will be classified as a 'big data delete' - longValues["classify_as_big_delete"] = 1000; - // Delete source after successful transfer - boolValues["remove_source_files"] = false; - // Strict matching for skip_dir - boolValues["skip_dir_strict_match"] = false; - // Allow for a custom Client ID / Application ID to be used to replace the inbuilt default - // This is a config file option ONLY - stringValues["application_id"] = ""; - // allow for resync to be set via config file - boolValues["resync"] = false; - // resync now needs to be acknowledged based on the 'risk' of using it - boolValues["resync_auth"] = false; - // Ignore data safety checks and overwrite local data rather than preserve & rename - // This is a config file option ONLY - boolValues["bypass_data_preservation"] = false; // Support National Azure AD endpoints as per https://docs.microsoft.com/en-us/graph/deployments // By default, if empty, use standard Azure AD URL's // Will support the following options: @@ -141,130 +228,204 @@ final class Config // AD Endpoint: https://login.chinacloudapi.cn // Graph Endpoint: https://microsoftgraph.chinacloudapi.cn stringValues["azure_ad_endpoint"] = ""; + // Support single-tenant applications that are not able to use the "common" multiplexer - stringValues["azure_tenant_id"] = "common"; - // Allow enable / disable of the syncing of OneDrive Business Shared Folders via configuration file - boolValues["sync_business_shared_folders"] = false; - // Configure the default folder permission attributes for newly created folders + stringValues["azure_tenant_id"] = ""; + // - Store how many times was --verbose added + longValues["verbose"] = verbosityCount; + // - The amount of time (seconds) between monitor sync loops + longValues["monitor_interval"] = 300; + // - What size of file should be skipped? + longValues["skip_size"] = 0; + // - How many 'loops' when using --monitor, before we print out high frequency recurring items? + longValues["monitor_log_frequency"] = 12; + // - Number of N sync runs before performing a full local scan of sync_dir + // By default 12 which means every ~60 minutes a full disk scan of sync_dir will occur + // 'monitor_interval' * 'monitor_fullscan_frequency' = 3600 = 1 hour + longValues["monitor_fullscan_frequency"] = 12; + // - Number of children in a path that is locally removed which will be classified as a 'big data delete' + longValues["classify_as_big_delete"] = 1000; + // - Configure the default folder permission attributes for newly created folders longValues["sync_dir_permissions"] = defaultDirectoryPermissionMode; - // Configure the default file permission attributes for newly created file + // - Configure the default file permission attributes for newly created file longValues["sync_file_permissions"] = defaultFilePermissionMode; - // Configure download / upload rate limits + // - Configure download / upload rate limits longValues["rate_limit"] = 0; - // To ensure we do not fill up the load disk, how much disk space should be reserved by default + // - To ensure we do not fill up the load disk, how much disk space should be reserved by default longValues["space_reservation"] = 50 * 2^^20; // 50 MB as Bytes - // Webhook options - boolValues["webhook_enabled"] = false; - stringValues["webhook_public_url"] = ""; - stringValues["webhook_listening_host"] = ""; - longValues["webhook_listening_port"] = 8888; - longValues["webhook_expiration_interval"] = 3600 * 24; - longValues["webhook_renewal_interval"] = 3600 * 12; - // Log to application output running configuration values - boolValues["display_running_config"] = false; - // Configure read-only authentication scope - boolValues["read_only_auth_scope"] = false; - // Flag to cleanup local files when using --download-only - boolValues["cleanup_local_files"] = false; - - // DEVELOPER OPTIONS - // display_memory = true | false - // - It may be desirable to display the memory usage of the application to assist with diagnosing memory issues with the application - // - This is especially beneficial when debugging or performing memory tests with Valgrind - boolValues["display_memory"] = false; - // monitor_max_loop = long value - // - It may be desirable to, when running in monitor mode, force monitor mode to 'quit' after X number of loops - // - This is especially beneficial when debugging or performing memory tests with Valgrind - longValues["monitor_max_loop"] = 0; - // display_sync_options = true | false - // - It may be desirable to see what options are being passed in to performSync() without enabling the full verbose debug logging - boolValues["display_sync_options"] = false; - // force_children_scan = true | false - // - Force client to use /children rather than /delta to query changes on OneDrive - // - This option flags nationalCloudDeployment as true, forcing the client to act like it is using a National Cloud Deployment - boolValues["force_children_scan"] = false; - // display_processing_time = true | false - // - Enabling this option will add function processing times to the console output - // - This then enables tracking of where the application is spending most amount of time when processing data when users have questions re performance - boolValues["display_processing_time"] = false; // HTTPS & CURL Operation Settings // - Maximum time an operation is allowed to take - // This includes dns resolution, connecting, data transfer, etc. + // This includes dns resolution, connecting, data transfer, etc - controls CURLOPT_TIMEOUT + // CURLOPT_TIMEOUT: This option sets the maximum time in seconds that you allow the libcurl transfer operation to take. + // This is useful for controlling how long a specific transfer should take before it is considered too slow and aborted. However, it does not directly control the keep-alive time of a socket. longValues["operation_timeout"] = defaultOperationTimeout; // libcurl dns_cache_timeout timeout longValues["dns_timeout"] = defaultDnsTimeout; - // Timeout for HTTPS connections + // Timeout for HTTPS connections - controls CURLOPT_CONNECTTIMEOUT + // CURLOPT_CONNECTTIMEOUT: This option sets the timeout, in seconds, for the connection phase. It is the maximum time allowed for the connection to be established. longValues["connect_timeout"] = defaultConnectTimeout; // Timeout for activity on a HTTPS connection longValues["data_timeout"] = defaultDataTimeout; // What IP protocol version should be used when communicating with OneDrive longValues["ip_protocol_version"] = defaultIpProtocol; // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only - + + // Number of concurrent threads + longValues["threads"] = defaultConcurrentThreads; // Default is 8, user can increase to max of 16 or decrease + + // - Do we wish to upload only? + boolValues["upload_only"] = false; + // - Do we need to check for the .nomount file on the mount point? + boolValues["check_nomount"] = false; + // - Do we need to check for the .nosync file anywhere? + boolValues["check_nosync"] = false; + // - Do we wish to download only? + boolValues["download_only"] = false; + // - Do we disable notifications? + boolValues["disable_notifications"] = false; + // - Do we bypass all the download validation? + // This is critically important not to disable, but because of SharePoint 'feature' can be highly desirable to enable + boolValues["disable_download_validation"] = false; + // - Do we bypass all the upload validation? + // This is critically important not to disable, but because of SharePoint 'feature' can be highly desirable to enable + boolValues["disable_upload_validation"] = false; + // - Do we enable logging? + boolValues["enable_logging"] = false; + // - Do we force HTTP 1.1 for connections to the OneDrive API + // By default we use the curl library default, which should be HTTP2 for most operations governed by the OneDrive API + boolValues["force_http_11"] = false; + // - Do we treat the local file system as the source of truth for our data? + boolValues["local_first"] = false; + // - Do we ignore local file deletes, so that all files are retained online? + boolValues["no_remote_delete"] = false; + // - Do we skip symbolic links? + boolValues["skip_symlinks"] = false; + // - Do we enable debugging for all HTTPS flows. Critically important for debugging API issues. + boolValues["debug_https"] = false; + // - Do we skip .files and .folders? + boolValues["skip_dotfiles"] = false; + // - Do we perform a 'dry-run' with no local or remote changes actually being performed? + boolValues["dry_run"] = false; + // - Do we sync all the files in the 'sync_dir' root? + boolValues["sync_root_files"] = false; + // - Do we delete source after successful transfer? + boolValues["remove_source_files"] = false; + // - Do we perform strict matching for skip_dir? + boolValues["skip_dir_strict_match"] = false; + // - Do we perform a --resync? + boolValues["resync"] = false; + // - resync now needs to be acknowledged based on the 'risk' of using it + boolValues["resync_auth"] = false; + // - Ignore data safety checks and overwrite local data rather than preserve & rename + // This is a config file option ONLY + boolValues["bypass_data_preservation"] = false; + // - Allow enable / disable of the syncing of OneDrive Business Shared items (files & folders) via configuration file + boolValues["sync_business_shared_items"] = false; + // - Log to application output running configuration values + boolValues["display_running_config"] = false; + // - Configure read-only authentication scope + boolValues["read_only_auth_scope"] = false; + // - Flag to cleanup local files when using --download-only + boolValues["cleanup_local_files"] = false; + + // Webhook Feature Options + boolValues["webhook_enabled"] = false; + stringValues["webhook_public_url"] = ""; + stringValues["webhook_listening_host"] = ""; + longValues["webhook_listening_port"] = 8888; + longValues["webhook_expiration_interval"] = 600; + longValues["webhook_renewal_interval"] = 300; + longValues["webhook_retry_interval"] = 60; + // EXPAND USERS HOME DIRECTORY // Determine the users home directory. // Need to avoid using ~ here as expandTilde() below does not interpret correctly when running under init.d or systemd scripts // Check for HOME environment variable if (environment.get("HOME") != ""){ // Use HOME environment variable - log.vdebug("homePath: HOME environment variable set"); - homePath = environment.get("HOME"); + addLogEntry("runtime_environment: HOME environment variable detected, expansion of '~' should be possible", ["debug"]); + defaultHomePath = environment.get("HOME"); + shellEnvironmentSet = true; } else { if ((environment.get("SHELL") == "") && (environment.get("USER") == "")){ // No shell is set or username - observed case when running as systemd service under CentOS 7.x - log.vdebug("homePath: WARNING - no HOME environment variable set"); - log.vdebug("homePath: WARNING - no SHELL environment variable set"); - log.vdebug("homePath: WARNING - no USER environment variable set"); - homePath = "/root"; + addLogEntry("runtime_environment: No HOME, SHELL or USER environment variable configuration detected. Expansion of '~' not possible", ["debug"]); + defaultHomePath = "/root"; + shellEnvironmentSet = false; } else { // A shell & valid user is set, but no HOME is set, use ~ which can be expanded - log.vdebug("homePath: WARNING - no HOME environment variable set"); - homePath = "~"; + addLogEntry("runtime_environment: SHELL and USER environment variable detected, expansion of '~' should be possible", ["debug"]); + defaultHomePath = "~"; + shellEnvironmentSet = true; } } - - // Output homePath calculation - log.vdebug("homePath: ", homePath); - - // Determine the correct configuration directory to use + // outcome of setting defaultHomePath + addLogEntry("runtime_environment: Calculated defaultHomePath: " ~ defaultHomePath, ["debug"]); + + // DEVELOPER OPTIONS + // display_memory = true | false + // - It may be desirable to display the memory usage of the application to assist with diagnosing memory issues with the application + // - This is especially beneficial when debugging or performing memory tests with Valgrind + boolValues["display_memory"] = false; + // monitor_max_loop = long value + // - It may be desirable to, when running in monitor mode, force monitor mode to 'quit' after X number of loops + // - This is especially beneficial when debugging or performing memory tests with Valgrind + longValues["monitor_max_loop"] = 0; + // display_sync_options = true | false + // - It may be desirable to see what options are being passed in to performSync() without enabling the full verbose debug logging + boolValues["display_sync_options"] = false; + // force_children_scan = true | false + // - Force client to use /children rather than /delta to query changes on OneDrive + // - This option flags nationalCloudDeployment as true, forcing the client to act like it is using a National Cloud Deployment model + boolValues["force_children_scan"] = false; + // display_processing_time = true | false + // - Enabling this option will add function processing times to the console output + // - This then enables tracking of where the application is spending most amount of time when processing data when users have questions re performance + boolValues["display_processing_time"] = false; + + // Function variables string configDirBase; string systemConfigDirBase; - if (confdirOption != "") { + bool configurationInitialised = false; + + // Initialise the application configuration, using the provided --confdir option was passed in + if (!confdirOption.empty) { // A CLI 'confdir' was passed in - // Clean up any stray " .. these should not be there ... + // Clean up any stray " .. these should not be there for correct process handling of the configuration option confdirOption = strip(confdirOption,"\""); - log.vdebug("configDirName: CLI override to set configDirName to: ", confdirOption); + addLogEntry("configDirName: CLI override to set configDirName to: " ~ confdirOption, ["debug"]); + if (canFind(confdirOption,"~")) { // A ~ was found - log.vdebug("configDirName: A '~' was found in configDirName, using the calculated 'homePath' to replace '~'"); - configDirName = homePath ~ strip(confdirOption,"~","~"); + addLogEntry("configDirName: A '~' was found in configDirName, using the calculated 'defaultHomePath' to replace '~'", ["debug"]); + configDirName = defaultHomePath ~ strip(confdirOption,"~","~"); } else { configDirName = confdirOption; } } else { - // Determine the base directory relative to which user specific configuration files should be stored. + // Determine the base directory relative to which user specific configuration files should be stored if (environment.get("XDG_CONFIG_HOME") != ""){ - log.vdebug("configDirBase: XDG_CONFIG_HOME environment variable set"); + addLogEntry("configDirBase: XDG_CONFIG_HOME environment variable set", ["debug"]); configDirBase = environment.get("XDG_CONFIG_HOME"); } else { // XDG_CONFIG_HOME does not exist on systems where X11 is not present - ie - headless systems / servers - log.vdebug("configDirBase: WARNING - no XDG_CONFIG_HOME environment variable set"); - configDirBase = homePath ~ "/.config"; + addLogEntry("configDirBase: WARNING - no XDG_CONFIG_HOME environment variable set", ["debug"]); + configDirBase = buildNormalizedPath(buildPath(defaultHomePath, ".config")); // Also set up a path to pre-shipped shared configs (which can be overridden by supplying a config file in userspace) systemConfigDirBase = "/etc"; } - + // Output configDirBase calculation - log.vdebug("configDirBase: ", configDirBase); - // Set the default application configuration directory - log.vdebug("configDirName: Configuring application to use default config path"); + addLogEntry("configDirBase: " ~ configDirBase, ["debug"]); + // Set the calculated application configuration directory + addLogEntry("configDirName: Configuring application to use calculated config path", ["debug"]); // configDirBase contains the correct path so we do not need to check for presence of '~' - configDirName = configDirBase ~ "/onedrive"; + configDirName = buildNormalizedPath(buildPath(configDirBase, "onedrive")); // systemConfigDirBase contains the correct path so we do not need to check for presence of '~' - systemConfigDirName = systemConfigDirBase ~ "/onedrive"; + systemConfigDirName = buildNormalizedPath(buildPath(systemConfigDirBase, "onedrive")); } - - // Config directory options all determined + + // Configuration directory should now have been correctly identified if (!exists(configDirName)) { // create the directory mkdirRecurse(configDirName); @@ -276,166 +437,539 @@ final class Config if (!isDir(configDirName)) { if (!confdirOption.empty) { // the configuration path was passed in by the user .. user error - writeln("ERROR: --confdir entered value is an existing file instead of an existing directory"); + addLogEntry("ERROR: --confdir entered value is an existing file instead of an existing directory"); } else { // other error - writeln("ERROR: ~/.config/onedrive is a file rather than a directory"); + addLogEntry("ERROR: " ~ confdirOption ~ " is a file rather than a directory"); } // Must exit exit(EXIT_FAILURE); } } - - // configDirName has a trailing / - if (!configDirName.empty) log.vlog("Using 'user' Config Dir: ", configDirName); - if (!systemConfigDirName.empty) log.vlog("Using 'system' Config Dir: ", systemConfigDirName); - + // Update application set variables based on configDirName - refreshTokenFilePath = buildNormalizedPath(configDirName ~ "/refresh_token"); - deltaLinkFilePath = buildNormalizedPath(configDirName ~ "/delta_link"); - databaseFilePath = buildNormalizedPath(configDirName ~ "/items.sqlite3"); - databaseFilePathDryRun = buildNormalizedPath(configDirName ~ "/items-dryrun.sqlite3"); - uploadStateFilePath = buildNormalizedPath(configDirName ~ "/resume_upload"); - userConfigFilePath = buildNormalizedPath(configDirName ~ "/config"); - syncListFilePath = buildNormalizedPath(configDirName ~ "/sync_list"); - systemConfigFilePath = buildNormalizedPath(systemConfigDirName ~ "/config"); - businessSharedFolderFilePath = buildNormalizedPath(configDirName ~ "/business_shared_folders"); - + // - What is the full path for the 'refresh_token' + refreshTokenFilePath = buildNormalizedPath(buildPath(configDirName, "refresh_token")); + // - What is the full path for the 'delta_link' + deltaLinkFilePath = buildNormalizedPath(buildPath(configDirName, "delta_link")); + // - What is the full path for the 'items.sqlite3' - the database cache file + databaseFilePath = buildNormalizedPath(buildPath(configDirName, "items.sqlite3")); + // - What is the full path for the 'items-dryrun.sqlite3' - the dry-run database cache file + databaseFilePathDryRun = buildNormalizedPath(buildPath(configDirName, "items-dryrun.sqlite3")); + // - What is the full path for the 'resume_upload' + uploadSessionFilePath = buildNormalizedPath(buildPath(configDirName, "session_upload")); + // - What is the full path for the 'sync_list' file + syncListFilePath = buildNormalizedPath(buildPath(configDirName, "sync_list")); + // - What is the full path for the 'config' - the user file to configure the application + userConfigFilePath = buildNormalizedPath(buildPath(configDirName, "config")); + // - What is the full path for the system 'config' file if it is required + systemConfigFilePath = buildNormalizedPath(buildPath(systemConfigDirName, "config")); + + // To determine if any configuration items has changed, where a --resync would be required, we need to have a hash file for the following items + // - 'config.backup' file + // - applicable 'config' file + // - 'sync_list' file + // - 'business_shared_items' file + configBackupFile = buildNormalizedPath(buildPath(configDirName, ".config.backup")); + configHashFile = buildNormalizedPath(buildPath(configDirName, ".config.hash")); + syncListHashFile = buildNormalizedPath(buildPath(configDirName, ".sync_list.hash")); + // Debug Output for application set variables based on configDirName - log.vdebug("refreshTokenFilePath = ", refreshTokenFilePath); - log.vdebug("deltaLinkFilePath = ", deltaLinkFilePath); - log.vdebug("databaseFilePath = ", databaseFilePath); - log.vdebug("databaseFilePathDryRun = ", databaseFilePathDryRun); - log.vdebug("uploadStateFilePath = ", uploadStateFilePath); - log.vdebug("userConfigFilePath = ", userConfigFilePath); - log.vdebug("syncListFilePath = ", syncListFilePath); - log.vdebug("systemConfigFilePath = ", systemConfigFilePath); - log.vdebug("businessSharedFolderFilePath = ", businessSharedFolderFilePath); - } - - bool initialize() - { - // Initialise the application - if (!exists(userConfigFilePath)) { - // 'user' configuration file does not exist - // Is there a system configuration file? - if (!exists(systemConfigFilePath)) { - // 'system' configuration file does not exist - log.vlog("No user or system config file found, using application defaults"); - return true; + addLogEntry("refreshTokenFilePath = " ~ refreshTokenFilePath, ["debug"]); + addLogEntry("deltaLinkFilePath = " ~ deltaLinkFilePath, ["debug"]); + addLogEntry("databaseFilePath = " ~ databaseFilePath, ["debug"]); + addLogEntry("databaseFilePathDryRun = " ~ databaseFilePathDryRun, ["debug"]); + addLogEntry("uploadSessionFilePath = " ~ uploadSessionFilePath, ["debug"]); + addLogEntry("userConfigFilePath = " ~ userConfigFilePath, ["debug"]); + addLogEntry("syncListFilePath = " ~ syncListFilePath, ["debug"]); + addLogEntry("systemConfigFilePath = " ~ systemConfigFilePath, ["debug"]); + addLogEntry("configBackupFile = " ~ configBackupFile, ["debug"]); + addLogEntry("configHashFile = " ~ configHashFile, ["debug"]); + addLogEntry("syncListHashFile = " ~ syncListHashFile, ["debug"]); + + // Configure the Hash and Backup File Permission Value + string valueToConvert = to!string(defaultFilePermissionMode); + auto convertedValue = parse!long(valueToConvert, 8); + convertedPermissionValue = to!int(convertedValue); + + // Do not try and load any user configuration file if --help was used + if (helpRequested) { + return true; + } else { + // Initialise the application using the configuration file if it exists + if (!exists(userConfigFilePath)) { + // 'user' configuration file does not exist + // Is there a system configuration file? + if (!exists(systemConfigFilePath)) { + // 'system' configuration file does not exist + addLogEntry("No user or system config file found, using application defaults", ["verbose"]); + applicableConfigFilePath = userConfigFilePath; + configurationInitialised = true; + } else { + // 'system' configuration file exists + // can we load the configuration file without error? + if (loadConfigFile(systemConfigFilePath)) { + // configuration file loaded without error + addLogEntry("System configuration file successfully loaded"); + + // Set 'applicableConfigFilePath' to equal the 'config' we loaded + applicableConfigFilePath = systemConfigFilePath; + // Update the configHashFile path value to ensure we are using the system 'config' file for the hash + configHashFile = buildNormalizedPath(buildPath(systemConfigDirName, ".config.hash")); + configurationInitialised = true; + } else { + // there was a problem loading the configuration file + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("System configuration file has errors - please check your configuration"); + } + } } else { - // 'system' configuration file exists + // 'user' configuration file exists // can we load the configuration file without error? - if (load(systemConfigFilePath)) { + if (loadConfigFile(userConfigFilePath)) { // configuration file loaded without error - log.log("System configuration file successfully loaded"); - return true; + addLogEntry("Configuration file successfully loaded"); + + // Set 'applicableConfigFilePath' to equal the 'config' we loaded + applicableConfigFilePath = userConfigFilePath; + configurationInitialised = true; } else { // there was a problem loading the configuration file - log.log("System configuration file has errors - please check your configuration"); - return false; + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("Configuration file has errors - please check your configuration"); } } - } else { - // 'user' configuration file exists - // can we load the configuration file without error? - if (load(userConfigFilePath)) { - // configuration file loaded without error - log.log("Configuration file successfully loaded"); - return true; + + // Advise the user path that we will use for the application state data + if (canFind(applicableConfigFilePath, configDirName)) { + addLogEntry("Using 'user' configuration path for application state data: " ~ configDirName, ["verbose"]); } else { - // there was a problem loading the configuration file - log.log("Configuration file has errors - please check your configuration"); - return false; + if (canFind(applicableConfigFilePath, systemConfigDirName)) { + addLogEntry("Using 'system' configuration path for application state data: " ~ systemConfigDirName, ["verbose"]); + } + } + } + + // return if the configuration was initialised + return configurationInitialised; + } + + // Create a backup of the 'config' file if it does not exist + void createBackupConfigFile() { + if (!getValueBool("dry_run")) { + // Is there a backup of the config file if the config file exists? + if (exists(applicableConfigFilePath)) { + addLogEntry("Creating a backup of the applicable config file", ["debug"]); + // create backup copy of current config file + std.file.copy(applicableConfigFilePath, configBackupFile); + // File Copy should only be readable by the user who created it - 0600 permissions needed + configBackupFile.setAttributes(convertedPermissionValue); } + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + addLogEntry("DRY RUN: Not creating backup config file as --dry-run has been used"); + } + } + + // Return a given string value based on the provided key + string getValueString(string key) { + auto p = key in stringValues; + if (p) { + return *p; + } else { + throw new Exception("Missing config value: " ~ key); } } - void update_from_args(string[] args) - { - // Add additional options that are NOT configurable via config file - stringValues["create_directory"] = ""; - stringValues["create_share_link"] = ""; - stringValues["destination_directory"] = ""; - stringValues["get_file_link"] = ""; - stringValues["modified_by"] = ""; - stringValues["get_o365_drive_id"] = ""; - stringValues["remove_directory"] = ""; - stringValues["single_directory"] = ""; - stringValues["source_directory"] = ""; - stringValues["auth_files"] = ""; - stringValues["auth_response"] = ""; - boolValues["display_config"] = false; - boolValues["display_sync_status"] = false; - boolValues["print_token"] = false; - boolValues["logout"] = false; - boolValues["reauth"] = false; - boolValues["monitor"] = false; - boolValues["synchronize"] = false; - boolValues["force"] = false; - boolValues["list_business_shared_folders"] = false; - boolValues["force_sync"] = false; - boolValues["with_editing_perms"] = false; + // Return a given long value based on the provided key + long getValueLong(string key) { + auto p = key in longValues; + if (p) { + return *p; + } else { + throw new Exception("Missing config value: " ~ key); + } + } - // Application Startup option validation - try { - string tmpStr; - bool tmpBol; - long tmpVerb; - // duplicated from main.d to get full help output! - auto opt = getopt( + // Return a given bool value based on the provided key + bool getValueBool(string key) { + auto p = key in boolValues; + if (p) { + return *p; + } else { + throw new Exception("Missing config value: " ~ key); + } + } + + // Set a given string value based on the provided key + void setValueString(string key, string value) { + stringValues[key] = value; + } - args, - std.getopt.config.bundling, - std.getopt.config.caseSensitive, - "auth-files", - "Perform authentication not via interactive dialog but via files read/writes to these files.", - &stringValues["auth_files"], - "auth-response", - "Perform authentication not via interactive dialog but via providing the response url directly.", - &stringValues["auth_response"], - "check-for-nomount", - "Check for the presence of .nosync in the syncdir root. If found, do not perform sync.", - &boolValues["check_nomount"], - "check-for-nosync", - "Check for the presence of .nosync in each directory. If found, skip directory from sync.", - &boolValues["check_nosync"], - "classify-as-big-delete", - "Number of children in a path that is locally removed which will be classified as a 'big data delete'", - &longValues["classify_as_big_delete"], - "cleanup-local-files", - "Cleanup additional local files when using --download-only. This will remove local data.", - &boolValues["cleanup_local_files"], - "create-directory", - "Create a directory on OneDrive - no sync will be performed.", - &stringValues["create_directory"], - "create-share-link", - "Create a shareable link for an existing file on OneDrive", - &stringValues["create_share_link"], - "debug-https", - "Debug OneDrive HTTPS communication.", - &boolValues["debug_https"], - "destination-directory", - "Destination directory for renamed or move on OneDrive - no sync will be performed.", - &stringValues["destination_directory"], - "disable-notifications", - "Do not use desktop notifications in monitor mode.", - &boolValues["disable_notifications"], - "disable-download-validation", - "Disable download validation when downloading from OneDrive", - &boolValues["disable_download_validation"], - "disable-upload-validation", - "Disable upload validation when uploading to OneDrive", - &boolValues["disable_upload_validation"], - "display-config", - "Display what options the client will use as currently configured - no sync will be performed.", - &boolValues["display_config"], - "display-running-config", + // Set a given long value based on the provided key + void setValueLong(string key, long value) { + longValues[key] = value; + } + + // Set a given long value based on the provided key + void setValueBool(string key, bool value) { + boolValues[key] = value; + } + + // Configure the directory octal permission value + void configureRequiredDirectoryPermisions() { + // return the directory permission mode required + // - return octal!defaultDirectoryPermissionMode; ... cant be used .. which is odd + // Error: variable defaultDirectoryPermissionMode cannot be read at compile time + if (getValueLong("sync_dir_permissions") != defaultDirectoryPermissionMode) { + // return user configured permissions as octal integer + string valueToConvert = to!string(getValueLong("sync_dir_permissions")); + auto convertedValue = parse!long(valueToConvert, 8); + configuredDirectoryPermissionMode = to!int(convertedValue); + } else { + // return default as octal integer + string valueToConvert = to!string(defaultDirectoryPermissionMode); + auto convertedValue = parse!long(valueToConvert, 8); + configuredDirectoryPermissionMode = to!int(convertedValue); + } + } + + // Configure the file octal permission value + void configureRequiredFilePermisions() { + // return the file permission mode required + // - return octal!defaultFilePermissionMode; ... cant be used .. which is odd + // Error: variable defaultFilePermissionMode cannot be read at compile time + if (getValueLong("sync_file_permissions") != defaultFilePermissionMode) { + // return user configured permissions as octal integer + string valueToConvert = to!string(getValueLong("sync_file_permissions")); + auto convertedValue = parse!long(valueToConvert, 8); + configuredFilePermissionMode = to!int(convertedValue); + } else { + // return default as octal integer + string valueToConvert = to!string(defaultFilePermissionMode); + auto convertedValue = parse!long(valueToConvert, 8); + configuredFilePermissionMode = to!int(convertedValue); + } + } + + // Read the configuredDirectoryPermissionMode and return + int returnRequiredDirectoryPermisions() { + if (configuredDirectoryPermissionMode == 0) { + // the configured value is zero, this means that directories would get + // values of d--------- + configureRequiredDirectoryPermisions(); + } + return configuredDirectoryPermissionMode; + } + + // Read the configuredFilePermissionMode and return + int returnRequiredFilePermisions() { + if (configuredFilePermissionMode == 0) { + // the configured value is zero + configureRequiredFilePermisions(); + } + return configuredFilePermissionMode; + } + + // Load a configuration file from the provided filename + private bool loadConfigFile(string filename) { + try { + addLogEntry("Reading configuration file: " ~ filename); + readText(filename); + } catch (std.file.FileException e) { + addLogEntry("ERROR: Unable to access " ~ e.msg); + return false; + } + + auto file = File(filename, "r"); + string lineBuffer; + + scope(exit) { + file.close(); + object.destroy(file); + object.destroy(lineBuffer); + } + + scope(failure) { + file.close(); + object.destroy(file); + object.destroy(lineBuffer); + } + + foreach (line; file.byLine()) { + lineBuffer = stripLeft(line).to!string; + if (lineBuffer.empty || lineBuffer[0] == ';' || lineBuffer[0] == '#') continue; + auto c = lineBuffer.matchFirst(configRegex); + if (c.empty) { + addLogEntry("Malformed config line: " ~ lineBuffer); + addLogEntry(); + addLogEntry("Please review the documentation on how to correctly configure this application."); + forceExit(); + } + + c.popFront(); // skip the whole match + string key = c.front.dup; + c.popFront(); + + // Handle deprecated keys + switch (key) { + case "min_notify_changes": + case "force_http_2": + addLogEntry("The option '" ~ key ~ "' has been depreciated and will be ignored. Please read the updated documentation and update your client configuration to remove this option."); + continue; + case "sync_business_shared_folders": + addLogEntry(); + addLogEntry("The option 'sync_business_shared_folders' has been depreciated and the process for synchronising Microsoft OneDrive Business Shared Folders has changed."); + addLogEntry("Please review the revised documentation on how to correctly configure this application feature."); + addLogEntry("You must update your client configuration and make changes to your local filesystem and online data to use this capability."); + return false; + default: + break; + } + + // Process other keys + if (key in boolValues) { + // Only accept "true" as true value. + setValueBool(key, c.front.dup == "true" ? true : false); + if (key == "skip_dotfiles") configFileSkipDotfiles = true; + if (key == "skip_symlinks") configFileSkipSymbolicLinks = true; + if (key == "sync_business_shared_items") configFileSyncBusinessSharedItems = true; + } else if (key in stringValues) { + string value = c.front.dup; + setValueString(key, value); + if (key == "sync_dir") { + if (!strip(value).empty) { + configFileSyncDir = value; + } else { + addLogEntry(); + addLogEntry("Invalid value for key in config file: " ~ key); + addLogEntry("ERROR: sync_dir in config file cannot be empty - this is a fatal error and must be corrected"); + addLogEntry(); + forceExit(); + } + } else if (key == "skip_file") { + // Handle multiple 'config' file entries of skip_file + if (configFileSkipFile.empty) { + // currently no entry exists + configFileSkipFile = c.front.dup; + } else { + // add to existing entry + configFileSkipFile = configFileSkipFile ~ "|" ~ to!string(c.front.dup); + setValueString("skip_file", configFileSkipFile); + } + } else if (key == "skip_dir") { + // Handle multiple entries of skip_dir + if (configFileSkipDir.empty) { + // currently no entry exists + configFileSkipDir = c.front.dup; + } else { + // add to existing entry + configFileSkipDir = configFileSkipDir ~ "|" ~ to!string(c.front.dup); + setValueString("skip_dir", configFileSkipDir); + } + } else if (key == "single_directory") { + string configFileSingleDirectory = strip(value, "\""); + setValueString("single_directory", configFileSingleDirectory); + } else if (key == "azure_ad_endpoint") { + switch (value) { + case "": + addLogEntry("Using default config option for Global Azure AD Endpoints"); + break; + case "USL4": + addLogEntry("Using config option for Azure AD for US Government Endpoints"); + break; + case "USL5": + addLogEntry("Using config option for Azure AD for US Government Endpoints (DOD)"); + break; + case "DE": + addLogEntry("Using config option for Azure AD Germany"); + break; + case "CN": + addLogEntry("Using config option for Azure AD China operated by VNET"); + break; + default: + addLogEntry("Unknown Azure AD Endpoint - using Global Azure AD Endpoints"); + } + } else if (key == "application_id") { + string tempApplicationId = strip(value); + if (tempApplicationId.empty) { + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + addLogEntry("application_id in config file cannot be empty - using default application_id", ["debug"]); + setValueString("application_id", defaultApplicationId); + } + } else if (key == "drive_id") { + string tempDriveId = strip(value); + if (tempDriveId.empty) { + addLogEntry(); + addLogEntry("Invalid value for key in config file: " ~ key); + addLogEntry("drive_id in config file cannot be empty - this is a fatal error and must be corrected by removing this entry from your config file.", ["debug"]); + addLogEntry(); + forceExit(); + } else { + configFileDriveId = tempDriveId; + } + } else if (key == "log_dir") { + string tempLogDir = strip(value); + if (tempLogDir.empty) { + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + addLogEntry("log_dir in config file cannot be empty - using default log_dir", ["debug"]); + setValueString("log_dir", defaultLogFileDir); + } + } + } else if (key in longValues) { + ulong thisConfigValue; + try { + thisConfigValue = to!ulong(c.front.dup); + } catch (std.conv.ConvException) { + addLogEntry("Invalid value for key in config file: " ~ key); + return false; + } + setValueLong(key, thisConfigValue); + if (key == "monitor_interval") { // if key is 'monitor_interval' the value must be 300 or greater + ulong tempValue = thisConfigValue; + // the temp value needs to be 300 or greater + if (tempValue < 300) { + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + tempValue = 300; + } + setValueLong("monitor_interval", tempValue); + } else if (key == "monitor_fullscan_frequency") { // if key is 'monitor_fullscan_frequency' the value must be 12 or greater + ulong tempValue = thisConfigValue; + // the temp value needs to be 12 or greater + if (tempValue < 12) { + // If this is not set to zero (0) then we are not disabling 'monitor_fullscan_frequency' + if (tempValue != 0) { + // invalid value + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + tempValue = 12; + } + } + setValueLong("monitor_fullscan_frequency", tempValue); + } else if (key == "space_reservation") { // if key is 'space_reservation' we have to calculate MB -> bytes + ulong tempValue = thisConfigValue; + // a value of 0 needs to be made at least 1MB .. + if (tempValue == 0) { + addLogEntry("Invalid value for key in config file - using 1MB: " ~ key); + tempValue = 1; + } + setValueLong("space_reservation", tempValue * 2^^20); + } else if (key == "ip_protocol_version") { + ulong tempValue = thisConfigValue; + if (tempValue > 2) { + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + tempValue = defaultIpProtocol; + } + setValueLong("ip_protocol_version", tempValue); + } else if (key == "threads") { + ulong tempValue = thisConfigValue; + if (tempValue > 16) { + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + tempValue = defaultConcurrentThreads; + } + setValueLong("threads", tempValue); + } + } else { + addLogEntry("Unknown key in config file: " ~ key); + return false; + } + } + // Return that we were able to read in the config file and parse the options without issue + return true; + } + + // Update the application configuration based on CLI passed in parameters + void updateFromArgs(string[] cliArgs) { + // Add additional CLI options that are NOT configurable via config file + stringValues["create_directory"] = ""; + stringValues["create_share_link"] = ""; + stringValues["destination_directory"] = ""; + stringValues["get_file_link"] = ""; + stringValues["modified_by"] = ""; + stringValues["sharepoint_library_name"] = ""; + stringValues["remove_directory"] = ""; + stringValues["single_directory"] = ""; + stringValues["source_directory"] = ""; + stringValues["auth_files"] = ""; + stringValues["auth_response"] = ""; + boolValues["display_config"] = false; + boolValues["display_sync_status"] = false; + boolValues["display_quota"] = false; + boolValues["print_token"] = false; + boolValues["logout"] = false; + boolValues["reauth"] = false; + boolValues["monitor"] = false; + boolValues["synchronize"] = false; + boolValues["force"] = false; + boolValues["list_business_shared_items"] = false; + boolValues["sync_business_shared_files"] = false; + boolValues["force_sync"] = false; + boolValues["with_editing_perms"] = false; + + // Specific options for CLI input handling + stringValues["sync_dir_cli"] = ""; + + // Application Startup option validation + try { + string tmpStr; + bool tmpBol; + long tmpVerb; + // duplicated from main.d to get full help output! + auto opt = getopt( + + cliArgs, + std.getopt.config.bundling, + std.getopt.config.caseSensitive, + "auth-files", + "Perform authentication not via interactive dialog but via files read/writes to these files.", + &stringValues["auth_files"], + "auth-response", + "Perform authentication not via interactive dialog but via providing the response url directly.", + &stringValues["auth_response"], + "check-for-nomount", + "Check for the presence of .nosync in the syncdir root. If found, do not perform sync.", + &boolValues["check_nomount"], + "check-for-nosync", + "Check for the presence of .nosync in each directory. If found, skip directory from sync.", + &boolValues["check_nosync"], + "classify-as-big-delete", + "Number of children in a path that is locally removed which will be classified as a 'big data delete'", + &longValues["classify_as_big_delete"], + "cleanup-local-files", + "Cleanup additional local files when using --download-only. This will remove local data.", + &boolValues["cleanup_local_files"], + "create-directory", + "Create a directory on OneDrive - no sync will be performed.", + &stringValues["create_directory"], + "create-share-link", + "Create a shareable link for an existing file on OneDrive", + &stringValues["create_share_link"], + "debug-https", + "Debug OneDrive HTTPS communication.", + &boolValues["debug_https"], + "destination-directory", + "Destination directory for renamed or move on OneDrive - no sync will be performed.", + &stringValues["destination_directory"], + "disable-notifications", + "Do not use desktop notifications in monitor mode.", + &boolValues["disable_notifications"], + "disable-download-validation", + "Disable download validation when downloading from OneDrive", + &boolValues["disable_download_validation"], + "disable-upload-validation", + "Disable upload validation when uploading to OneDrive", + &boolValues["disable_upload_validation"], + "display-config", + "Display what options the client will use as currently configured - no sync will be performed.", + &boolValues["display_config"], + "display-running-config", "Display what options the client has been configured to use on application startup.", &boolValues["display_running_config"], "display-sync-status", "Display the sync status of the client - no sync will be performed.", &boolValues["display_sync_status"], + "display-quota", + "Display the quota status of the client - no sync will be performed.", + &boolValues["display_quota"], "download-only", "Replicate the OneDrive online state locally, by only downloading changes from OneDrive. Do not upload local changes to OneDrive.", &boolValues["download_only"], @@ -452,14 +986,23 @@ final class Config "Force the deletion of data when a 'big delete' is detected", &boolValues["force"], "force-sync", - "Force a synchronization of a specific folder, only when using --synchronize --single-directory and ignore all non-default skip_dir and skip_file rules", + "Force a synchronization of a specific folder, only when using --sync --single-directory and ignore all non-default skip_dir and skip_file rules", &boolValues["force_sync"], "get-file-link", "Display the file link of a synced file", &stringValues["get_file_link"], - "get-O365-drive-id", + "get-sharepoint-drive-id", "Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library", - &stringValues["get_o365_drive_id"], + &stringValues["sharepoint_library_name"], + "get-O365-drive-id", + "Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library (DEPRECIATED)", + &stringValues["sharepoint_library_name"], + "list-shared-items", + "List OneDrive Business Shared Items", + &boolValues["list_business_shared_items"], + "sync-shared-files", + "Sync OneDrive Business Shared Files to the local filesystem", + &boolValues["sync_business_shared_files"], "local-first", "Synchronize from the local directory source first, before downloading changes from OneDrive.", &boolValues["local_first"], @@ -469,9 +1012,6 @@ final class Config "logout", "Logout the current user", &boolValues["logout"], - "min-notify-changes", - "Minimum number of pending incoming changes necessary to trigger a desktop notification", - &longValues["min_notify_changes"], "modified-by", "Display the last modified by details of a given path", &stringValues["modified_by"], @@ -490,7 +1030,7 @@ final class Config "no-remote-delete", "Do not delete local file 'deletes' from OneDrive when using --upload-only", &boolValues["no_remote_delete"], - "print-token", + "print-access-token", "Print the access token, useful for debugging", &boolValues["print_token"], "reauth", @@ -536,10 +1076,13 @@ final class Config "The amount of disk space to reserve (in MB) to avoid 100% disk space utilisation", &longValues["space_reservation"], "syncdir", - "Specify the local directory used for synchronization to OneDrive", - &stringValues["sync_dir"], + "Specify the local directory used for synchronisation to OneDrive", + &stringValues["sync_dir_cli"], + "sync|s", + "Perform a synchronisation with Microsoft OneDrive", + &boolValues["synchronize"], "synchronize", - "Perform a synchronization", + "Perform a synchronisation with Microsoft OneDrive (DEPRECIATED)", &boolValues["synchronize"], "sync-root-files", "Sync all files in sync_dir root when using sync_list.", @@ -547,9 +1090,6 @@ final class Config "upload-only", "Replicate the locally configured sync_dir state to OneDrive, by only uploading local changes to OneDrive. Do not download changes from OneDrive.", &boolValues["upload_only"], - "user-agent", - "Specify a User Agent string to the http client", - &stringValues["user_agent"], "confdir", "Set the directory used to store the configuration files", &tmpStr, @@ -559,343 +1099,1159 @@ final class Config "version", "Print the version and exit", &tmpBol, - "list-shared-folders", - "List OneDrive Business Shared Folders", - &boolValues["list_business_shared_folders"], - "sync-shared-folders", - "Sync OneDrive Business Shared Folders", - &boolValues["sync_business_shared_folders"], "with-editing-perms", "Create a read-write shareable link for an existing file on OneDrive when used with --create-share-link ", &boolValues["with_editing_perms"] ); + + // Was --syncdir used? + if (!getValueString("sync_dir_cli").empty) { + // Build the line we need to update and/or write out + string newConfigOptionSyncDirLine = "sync_dir = \"" ~ getValueString("sync_dir_cli") ~ "\""; + + // Does a 'config' file exist? + if (!exists(applicableConfigFilePath)) { + // No existing 'config' file exists, create it, and write the 'sync_dir' configuration to it + if (!getValueBool("dry_run")) { + std.file.write(applicableConfigFilePath, newConfigOptionSyncDirLine); + // Config file should only be readable by the user who created it - 0600 permissions needed + applicableConfigFilePath.setAttributes(convertedPermissionValue); + } + } else { + // an existing config file exists .. so this now becomes tricky + // string replace 'sync_dir' if it exists, in the existing 'config' file, but only if 'sync_dir' (already read in) is different from 'sync_dir_cli' + if ( (getValueString("sync_dir")) != (getValueString("sync_dir_cli")) ) { + // values are different + File applicableConfigFilePathFileHandle = File(applicableConfigFilePath, "r"); + string lineBuffer; + string[] newConfigFileEntries; + + // read applicableConfigFilePath line by line + auto range = applicableConfigFilePathFileHandle.byLine(); + + // for each 'config' file line + foreach (line; range) { + lineBuffer = stripLeft(line).to!string; + if (lineBuffer.length == 0 || lineBuffer[0] == ';' || lineBuffer[0] == '#') { + newConfigFileEntries ~= [lineBuffer]; + } else { + auto c = lineBuffer.matchFirst(configRegex); + if (!c.empty) { + c.popFront(); // skip the whole match + string key = c.front.dup; + if (key == "sync_dir") { + // lineBuffer is the line we want to keep + newConfigFileEntries ~= [newConfigOptionSyncDirLine]; + } else { + newConfigFileEntries ~= [lineBuffer]; + } + } + } + } + + // close original 'config' file if still open + if (applicableConfigFilePathFileHandle.isOpen()) { + // close open file + applicableConfigFilePathFileHandle.close(); + } + + // free memory from file open + object.destroy(applicableConfigFilePathFileHandle); + + // Update the existing item in the file line array + if (!getValueBool("dry_run")) { + // Open the file with write access using 'w' mode to overwrite existing content + File applicableConfigFilePathFileHandleWrite = File(applicableConfigFilePath, "w"); + + // Write each line from the 'newConfigFileEntries' array to the file + foreach (line; newConfigFileEntries) { + applicableConfigFilePathFileHandleWrite.writeln(line); + } + + // Flush and close the file handle to ensure all data is written + if (applicableConfigFilePathFileHandleWrite.isOpen()) { + applicableConfigFilePathFileHandleWrite.flush(); + applicableConfigFilePathFileHandleWrite.close(); + } + + // free memory from file open + object.destroy(applicableConfigFilePathFileHandleWrite); + } + } + } + + // Final - configure sync_dir with the value of sync_dir_cli so that it can be used as part of the application configuration and detect change + setValueString("sync_dir", getValueString("sync_dir_cli")); + } + + // Was --auth-files used? + if (!getValueString("auth_files").empty) { + // --auth-files used, need to validate that '~' was not used as a path identifier, and if yes, perform the correct expansion + string[] tempAuthFiles = getValueString("auth_files").split(":"); + string tempAuthUrl = tempAuthFiles[0]; + string tempResponseUrl = tempAuthFiles[1]; + string newAuthFilesString; + + // shell expansion if required + if (!shellEnvironmentSet){ + // No shell environment is set, no automatic expansion of '~' if present is possible + // Does the 'currently configured' tempAuthUrl include a ~ + if (canFind(tempAuthUrl, "~")) { + // A ~ was found in auth_files(authURL) + addLogEntry("auth_files: A '~' was found in 'auth_files(authURL)', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]); + tempAuthUrl = buildNormalizedPath(buildPath(defaultHomePath, strip(tempAuthUrl, "~"))); + } + + // Does the 'currently configured' tempAuthUrl include a ~ + if (canFind(tempResponseUrl, "~")) { + // A ~ was found in auth_files(authURL) + addLogEntry("auth_files: A '~' was found in 'auth_files(tempResponseUrl)', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]); + tempResponseUrl = buildNormalizedPath(buildPath(defaultHomePath, strip(tempResponseUrl, "~"))); + } + } else { + // Shell environment is set, automatic expansion of '~' if present is possible + // Does the 'currently configured' tempAuthUrl include a ~ + if (canFind(tempAuthUrl, "~")) { + // A ~ was found in auth_files(authURL) + addLogEntry("auth_files: A '~' was found in the configured 'auth_files(authURL)', automatically expanding as SHELL and USER environment variable is set", ["debug"]); + tempAuthUrl = expandTilde(tempAuthUrl); + } + + // Does the 'currently configured' tempAuthUrl include a ~ + if (canFind(tempResponseUrl, "~")) { + // A ~ was found in auth_files(authURL) + addLogEntry("auth_files: A '~' was found in the configured 'auth_files(tempResponseUrl)', automatically expanding as SHELL and USER environment variable is set", ["debug"]); + tempResponseUrl = expandTilde(tempResponseUrl); + } + } + + // Build new string + newAuthFilesString = tempAuthUrl ~ ":" ~ tempResponseUrl; + addLogEntry("auth_files - updated value: " ~ newAuthFilesString, ["debug"]); + setValueString("auth_files", newAuthFilesString); + } + if (opt.helpWanted) { outputLongHelp(opt.options); exit(EXIT_SUCCESS); } } catch (GetOptException e) { - log.error(e.msg); - log.error("Try 'onedrive -h' for more information"); + // getOpt error - must use writeln() here + writeln(e.msg); + writeln("Try 'onedrive -h' for more information"); exit(EXIT_FAILURE); } catch (Exception e) { - // error - log.error(e.msg); - log.error("Try 'onedrive -h' for more information"); + // general error - must use writeln() here + writeln(e.msg); + writeln("Try 'onedrive -h' for more information"); exit(EXIT_FAILURE); } } - - string getValueString(string key) - { - auto p = key in stringValues; - if (p) { - return *p; - } else { - throw new Exception("Missing config value: " ~ key); + + // Check the arguments passed in for any that will be depreciated + void checkDepreciatedOptions(string[] cliArgs) { + + bool depreciatedCommandsFound = false; + + foreach (cliArg; cliArgs) { + // Check each CLI arg for items that have been depreciated + + // --synchronize depreciated in v2.5.0, will be removed in future version + if (cliArg == "--synchronize") { + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("DEPRECIATION WARNING: --synchronize has been depreciated in favour of --sync or -s"); + depreciatedCommandsFound = true; + } + + // --get-O365-drive-id depreciated in v2.5.0, will be removed in future version + if (cliArg == "--get-O365-drive-id") { + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("DEPRECIATION WARNING: --get-O365-drive-id has been depreciated in favour of --get-sharepoint-drive-id"); + depreciatedCommandsFound = true; + } + } + + if (depreciatedCommandsFound) { + addLogEntry("DEPRECIATION WARNING: Depreciated commands will be removed in a future release."); + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering } } + + // Display the applicable application configuration + void displayApplicationConfiguration() { + if (getValueBool("display_running_config")) { + addLogEntry("--------------- Application Runtime Configuration ---------------"); + } + + // Display application version + addLogEntry("onedrive version = " ~ applicationVersion); + + // Display all of the pertinent configuration options + addLogEntry("Config path = " ~ configDirName); + // Does a config file exist or are we using application defaults + addLogEntry("Config file found in config path = " ~ to!string(exists(applicableConfigFilePath))); + + // Is config option drive_id configured? + addLogEntry("Config option 'drive_id' = " ~ getValueString("drive_id")); + + // Config Options as per 'config' file + addLogEntry("Config option 'sync_dir' = " ~ getValueString("sync_dir")); + + // logging and notifications + addLogEntry("Config option 'enable_logging' = " ~ to!string(getValueBool("enable_logging"))); + addLogEntry("Config option 'log_dir' = " ~ getValueString("log_dir")); + addLogEntry("Config option 'disable_notifications' = " ~ to!string(getValueBool("disable_notifications"))); + + // skip files and directory and 'matching' policy + addLogEntry("Config option 'skip_dir' = " ~ getValueString("skip_dir")); + addLogEntry("Config option 'skip_dir_strict_match' = " ~ to!string(getValueBool("skip_dir_strict_match"))); + addLogEntry("Config option 'skip_file' = " ~ getValueString("skip_file")); + addLogEntry("Config option 'skip_dotfiles' = " ~ to!string(getValueBool("skip_dotfiles"))); + addLogEntry("Config option 'skip_symlinks' = " ~ to!string(getValueBool("skip_symlinks"))); + + // --monitor sync process options + addLogEntry("Config option 'monitor_interval' = " ~ to!string(getValueLong("monitor_interval"))); + addLogEntry("Config option 'monitor_log_frequency' = " ~ to!string(getValueLong("monitor_log_frequency"))); + addLogEntry("Config option 'monitor_fullscan_frequency' = " ~ to!string(getValueLong("monitor_fullscan_frequency"))); + + // sync process and method + addLogEntry("Config option 'read_only_auth_scope' = " ~ to!string(getValueBool("read_only_auth_scope"))); + addLogEntry("Config option 'dry_run' = " ~ to!string(getValueBool("dry_run"))); + addLogEntry("Config option 'upload_only' = " ~ to!string(getValueBool("upload_only"))); + addLogEntry("Config option 'download_only' = " ~ to!string(getValueBool("download_only"))); + addLogEntry("Config option 'local_first' = " ~ to!string(getValueBool("local_first"))); + addLogEntry("Config option 'check_nosync' = " ~ to!string(getValueBool("check_nosync"))); + addLogEntry("Config option 'check_nomount' = " ~ to!string(getValueBool("check_nomount"))); + addLogEntry("Config option 'resync' = " ~ to!string(getValueBool("resync"))); + addLogEntry("Config option 'resync_auth' = " ~ to!string(getValueBool("resync_auth"))); + addLogEntry("Config option 'cleanup_local_files' = " ~ to!string(getValueBool("cleanup_local_files"))); - long getValueLong(string key) - { - auto p = key in longValues; - if (p) { - return *p; + // data integrity + addLogEntry("Config option 'classify_as_big_delete' = " ~ to!string(getValueLong("classify_as_big_delete"))); + addLogEntry("Config option 'disable_upload_validation' = " ~ to!string(getValueBool("disable_upload_validation"))); + addLogEntry("Config option 'disable_download_validation' = " ~ to!string(getValueBool("disable_download_validation"))); + addLogEntry("Config option 'bypass_data_preservation' = " ~ to!string(getValueBool("bypass_data_preservation"))); + addLogEntry("Config option 'no_remote_delete' = " ~ to!string(getValueBool("no_remote_delete"))); + addLogEntry("Config option 'remove_source_files' = " ~ to!string(getValueBool("remove_source_files"))); + addLogEntry("Config option 'sync_dir_permissions' = " ~ to!string(getValueLong("sync_dir_permissions"))); + addLogEntry("Config option 'sync_file_permissions' = " ~ to!string(getValueLong("sync_file_permissions"))); + addLogEntry("Config option 'space_reservation' = " ~ to!string(getValueLong("space_reservation"))); + + // curl operations + addLogEntry("Config option 'application_id' = " ~ getValueString("application_id")); + addLogEntry("Config option 'azure_ad_endpoint' = " ~ getValueString("azure_ad_endpoint")); + addLogEntry("Config option 'azure_tenant_id' = " ~ getValueString("azure_tenant_id")); + addLogEntry("Config option 'user_agent' = " ~ getValueString("user_agent")); + addLogEntry("Config option 'force_http_11' = " ~ to!string(getValueBool("force_http_11"))); + addLogEntry("Config option 'debug_https' = " ~ to!string(getValueBool("debug_https"))); + addLogEntry("Config option 'rate_limit' = " ~ to!string(getValueLong("rate_limit"))); + addLogEntry("Config option 'operation_timeout' = " ~ to!string(getValueLong("operation_timeout"))); + addLogEntry("Config option 'dns_timeout' = " ~ to!string(getValueLong("dns_timeout"))); + addLogEntry("Config option 'connect_timeout' = " ~ to!string(getValueLong("connect_timeout"))); + addLogEntry("Config option 'data_timeout' = " ~ to!string(getValueLong("data_timeout"))); + addLogEntry("Config option 'ip_protocol_version' = " ~ to!string(getValueLong("ip_protocol_version"))); + addLogEntry("Config option 'threads' = " ~ to!string(getValueLong("threads"))); + + // Is sync_list configured ? + if (exists(syncListFilePath)){ + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("Selective sync 'sync_list' configured = true"); + addLogEntry("sync_list config option 'sync_root_files' = " ~ to!string(getValueBool("sync_root_files"))); + addLogEntry("sync_list contents:"); + // Output the sync_list contents + auto syncListFile = File(syncListFilePath, "r"); + auto range = syncListFile.byLine(); + foreach (line; range) + { + addLogEntry(to!string(line)); + } } else { - throw new Exception("Missing config value: " ~ key); + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("Selective sync 'sync_list' configured = false"); + } + + // Is sync_business_shared_items enabled and configured ? + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("Config option 'sync_business_shared_items' = " ~ to!string(getValueBool("sync_business_shared_items"))); + if (getValueBool("sync_business_shared_items")) { + // display what the shared files directory will be + addLogEntry("Config option 'Shared Files Directory' = " ~ configuredBusinessSharedFilesDirectoryName); + } + + // Are webhooks enabled? + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("Config option 'webhook_enabled' = " ~ to!string(getValueBool("webhook_enabled"))); + if (getValueBool("webhook_enabled")) { + addLogEntry("Config option 'webhook_public_url' = " ~ getValueString("webhook_public_url")); + addLogEntry("Config option 'webhook_listening_host' = " ~ getValueString("webhook_listening_host")); + addLogEntry("Config option 'webhook_listening_port' = " ~ to!string(getValueLong("webhook_listening_port"))); + addLogEntry("Config option 'webhook_expiration_interval' = " ~ to!string(getValueLong("webhook_expiration_interval"))); + addLogEntry("Config option 'webhook_renewal_interval' = " ~ to!string(getValueLong("webhook_renewal_interval"))); + addLogEntry("Config option 'webhook_retry_interval' = " ~ to!string(getValueLong("webhook_retry_interval"))); + } + + if (getValueBool("display_running_config")) { + addLogEntry(); + addLogEntry("--------------------DEVELOPER_OPTIONS----------------------------"); + addLogEntry("Config option 'force_children_scan' = " ~ to!string(getValueBool("force_children_scan"))); + addLogEntry(); + } + + if (getValueBool("display_running_config")) { + addLogEntry("-----------------------------------------------------------------"); } } - - bool getValueBool(string key) - { - auto p = key in boolValues; - if (p) { - return *p; + + // Prompt the user to accept the risk of using --resync + bool displayResyncRiskForAcceptance() { + // what is the user risk acceptance? + bool userRiskAcceptance = false; + + // Did the user use --resync-auth or 'resync_auth' in the config file to negate presenting this message? + if (!getValueBool("resync_auth")) { + // need to prompt user + char response; + + // --resync warning message + addLogEntry("", ["consoleOnly"]); // new line, console only + addLogEntry("The usage of --resync will delete your local 'onedrive' client state, thus no record of your current 'sync status' will exist.", ["consoleOnly"]); + addLogEntry("This has the potential to overwrite local versions of files with perhaps older versions of documents downloaded from OneDrive, resulting in local data loss.", ["consoleOnly"]); + addLogEntry("If in doubt, backup your local data before using --resync", ["consoleOnly"]); + addLogEntry("", ["consoleOnly"]); // new line, console only + addLogEntry("Are you sure you wish to proceed with --resync? [Y/N] ", ["consoleOnlyNoNewLine"]); + + try { + // Attempt to read user response + string input = readln().strip; + if (input.length > 0) { + response = std.ascii.toUpper(input[0]); + } + } catch (std.format.FormatException e) { + userRiskAcceptance = false; + // Caught an error + return EXIT_FAILURE; + } + + // What did the user enter? + addLogEntry("--resync warning User Response Entered: " ~ to!string(response), ["debug"]); + + // Evaluate user response + if ((to!string(response) == "y") || (to!string(response) == "Y")) { + // User has accepted --resync risk to proceed + userRiskAcceptance = true; + // Are you sure you wish .. does not use writeln(); + write("\n"); + } } else { - throw new Exception("Missing config value: " ~ key); + // resync_auth is true + userRiskAcceptance = true; } + + // Return the --resync acceptance or not + return userRiskAcceptance; } - - void setValueBool(string key, bool value) - { - boolValues[key] = value; - } - - void setValueString(string key, string value) - { - stringValues[key] = value; - } - - void setValueLong(string key, long value) - { - longValues[key] = value; - } - - // load a configuration file - private bool load(string filename) - { - // configure function variables - try { - readText(filename); - } catch (std.file.FileException e) { - // Unable to access required file - log.error("ERROR: Unable to access ", e.msg); - // Use exit scopes to shutdown API - return false; - } + + // Prompt the user to accept the risk of using --force-sync + bool displayForceSyncRiskForAcceptance() { + // what is the user risk acceptance? + bool userRiskAcceptance = false; - // We were able to readText the config file - so, we should be able to open and read it - auto file = File(filename, "r"); - string lineBuffer; + // need to prompt user + char response; - // configure scopes - // - failure - scope(failure) { - // close file if open - if (file.isOpen()){ - // close open file - file.close(); + // --force-sync warning message + addLogEntry("", ["consoleOnly"]); // new line, console only + addLogEntry("The use of --force-sync will reconfigure the application to use defaults. This may have untold and unknown future impacts.", ["consoleOnly"]); + addLogEntry("By proceeding in using this option you accept any impacts including any data loss that may occur as a result of using --force-sync.", ["consoleOnly"]); + addLogEntry("", ["consoleOnly"]); // new line, console only + addLogEntry("Are you sure you wish to proceed with --force-sync [Y/N] ", ["consoleOnlyNoNewLine"]); + + try { + // Attempt to read user response + string input = readln().strip; + if (input.length > 0) { + response = std.ascii.toUpper(input[0]); } + } catch (std.format.FormatException e) { + userRiskAcceptance = false; + // Caught an error + return EXIT_FAILURE; } - // - exit - scope(exit) { - // close file if open - if (file.isOpen()){ - // close open file - file.close(); - } + + // What did the user enter? + addLogEntry("--force-sync warning User Response Entered: " ~ to!string(response), ["debug"]); + + // Evaluate user response + if ((to!string(response) == "y") || (to!string(response) == "Y")) { + // User has accepted --force-sync risk to proceed + userRiskAcceptance = true; + // Are you sure you wish .. does not use writeln(); + write("\n"); } + + // Return the --resync acceptance or not + return userRiskAcceptance; + } + + // Check the application configuration for any changes that need to trigger a --resync + // This function is only called if --resync is not present + bool applicationChangeWhereResyncRequired() { + // Default is that no resync is required + bool resyncRequired = false; - // read file line by line - auto range = file.byLine(); - foreach (line; range) { - lineBuffer = stripLeft(line).to!string; - if (lineBuffer.length == 0 || lineBuffer[0] == ';' || lineBuffer[0] == '#') continue; - auto c = lineBuffer.matchFirst(configRegex); - if (!c.empty) { - c.popFront(); // skip the whole match - string key = c.front.dup; - auto p = key in boolValues; - if (p) { - c.popFront(); - // only accept "true" as true value. TODO Should we support other formats? - setValueBool(key, c.front.dup == "true" ? true : false); - } else { - auto pp = key in stringValues; - if (pp) { - c.popFront(); - setValueString(key, c.front.dup); - // detect need for --resync for these: - // --syncdir ARG - // --skip-file ARG - // --skip-dir ARG - if (key == "sync_dir") configFileSyncDir = c.front.dup; - if (key == "skip_file") { - // Handle multiple entries of skip_file - if (configFileSkipFile.empty) { - // currently no entry exists - configFileSkipFile = c.front.dup; - } else { - // add to existing entry - configFileSkipFile = configFileSkipFile ~ "|" ~ to!string(c.front.dup); - setValueString("skip_file", configFileSkipFile); + // Consolidate the flags for different configuration changes + bool[9] configOptionsDifferent; + + // Handle multiple entries of skip_file + string backupConfigFileSkipFile; + + // Handle multiple entries of skip_dir + string backupConfigFileSkipDir; + + // Create and read the required initial hash files + createRequiredInitialConfigurationHashFiles(); + // Read in the existing hash file values + readExistingConfigurationHashFiles(); + + // Helper lambda for logging and setting the difference flag + auto logAndSetDifference = (string message, size_t index) { + addLogEntry(message, ["debug"]); + configOptionsDifferent[index] = true; + }; + + // Check for changes in the sync_list and business_shared_items files + if (currentSyncListHash != previousSyncListHash) + logAndSetDifference("sync_list file has been updated, --resync needed", 0); + + // Check for updates in the config file + if (currentConfigHash != previousConfigHash) { + addLogEntry("Application configuration file has been updated, checking if --resync needed"); + addLogEntry("Using this configBackupFile: " ~ configBackupFile, ["debug"]); + + if (exists(configBackupFile)) { + string[string] backupConfigStringValues; + backupConfigStringValues["drive_id"] = ""; + backupConfigStringValues["sync_dir"] = ""; + backupConfigStringValues["skip_file"] = ""; + backupConfigStringValues["skip_dir"] = ""; + backupConfigStringValues["skip_dotfiles"] = ""; + backupConfigStringValues["skip_symlinks"] = ""; + backupConfigStringValues["sync_business_shared_items"] = ""; + + bool drive_id_present = false; + bool sync_dir_present = false; + bool skip_file_present = false; + bool skip_dir_present = false; + bool skip_dotfiles_present = false; + bool skip_symlinks_present = false; + bool sync_business_shared_items_present = false; + + string configOptionModifiedMessage = " was modified since the last time the application was successfully run, --resync required"; + + auto configBackupFileHandle = File(configBackupFile, "r"); + scope(exit) { + if (configBackupFileHandle.isOpen()) { + configBackupFileHandle.close(); + } + } + + string lineBuffer; + auto range = configBackupFileHandle.byLine(); + foreach (line; range) { + lineBuffer = stripLeft(line).to!string; + if (lineBuffer.length == 0 || lineBuffer[0] == ';' || lineBuffer[0] == '#') continue; + auto c = lineBuffer.matchFirst(configRegex); + if (!c.empty) { + c.popFront(); // skip the whole match + string key = c.front.dup; + addLogEntry("Backup Config Key: " ~ key, ["debug"]); + + auto p = key in backupConfigStringValues; + if (p) { + c.popFront(); + string value = c.front.dup; + // Compare each key value with current config + if (key == "drive_id") { + drive_id_present = true; + if (value != getValueString("drive_id")) { + logAndSetDifference(key ~ configOptionModifiedMessage, 2); + } } - } - if (key == "skip_dir") { - // Handle multiple entries of skip_dir - if (configFileSkipDir.empty) { - // currently no entry exists - configFileSkipDir = c.front.dup; - } else { - // add to existing entry - configFileSkipDir = configFileSkipDir ~ "|" ~ to!string(c.front.dup); - setValueString("skip_dir", configFileSkipDir); + if (key == "sync_dir") { + sync_dir_present = true; + if (value != getValueString("sync_dir")) { + logAndSetDifference(key ~ configOptionModifiedMessage, 3); + } } - } - // --single-directory Strip quotation marks from path - // This is an issue when using ONEDRIVE_SINGLE_DIRECTORY with Docker - if (key == "single_directory") { - // Strip quotation marks from provided path - string configSingleDirectory = strip(to!string(c.front.dup), "\""); - setValueString("single_directory", configSingleDirectory); - } - // Azure AD Configuration - if (key == "azure_ad_endpoint") { - string azureConfigValue = c.front.dup; - switch(azureConfigValue) { - case "": - log.log("Using config option for Global Azure AD Endpoints"); - break; - case "USL4": - log.log("Using config option for Azure AD for US Government Endpoints"); - break; - case "USL5": - log.log("Using config option for Azure AD for US Government Endpoints (DOD)"); - break; - case "DE": - log.log("Using config option for Azure AD Germany"); - break; - case "CN": - log.log("Using config option for Azure AD China operated by 21Vianet"); - break; - // Default - all other entries - default: - log.log("Unknown Azure AD Endpoint - using Global Azure AD Endpoints"); + + // skip_file handling + if (key == "skip_file") { + skip_file_present = true; + // Handle multiple entries of skip_file + if (backupConfigFileSkipFile.empty) { + // currently no entry exists, include 'defaultSkipFile' entries + backupConfigFileSkipFile = defaultSkipFile ~ "|" ~ to!string(c.front.dup); + } else { + // add to existing backupConfigFileSkipFile entry + backupConfigFileSkipFile = backupConfigFileSkipFile ~ "|" ~ to!string(c.front.dup); + } } - } - } else { - auto ppp = key in longValues; - if (ppp) { - c.popFront(); - setValueLong(key, to!long(c.front.dup)); - // if key is space_reservation we have to calculate MB -> bytes - if (key == "space_reservation") { - // temp value - ulong tempValue = to!long(c.front.dup); - // a value of 0 needs to be made at least 1MB .. - if (tempValue == 0) { - tempValue = 1; + + // skip_dir handling + if (key == "skip_dir") { + skip_dir_present = true; + // Handle multiple entries of skip_dir + if (backupConfigFileSkipDir.empty) { + // currently no entry exists + backupConfigFileSkipDir = c.front.dup; + } else { + // add to existing backupConfigFileSkipDir entry + backupConfigFileSkipDir = backupConfigFileSkipDir ~ "|" ~ to!string(c.front.dup); + } + } + + if (key == "skip_dotfiles") { + skip_dotfiles_present = true; + if (value != to!string(getValueBool("skip_dotfiles"))) { + logAndSetDifference(key ~ configOptionModifiedMessage, 6); + } + } + if (key == "skip_symlinks") { + skip_symlinks_present = true; + if (value != to!string(getValueBool("skip_symlinks"))) { + logAndSetDifference(key ~ configOptionModifiedMessage, 7); + } + } + if (key == "sync_business_shared_items") { + sync_business_shared_items_present = true; + if (value != to!string(getValueBool("sync_business_shared_items"))) { + logAndSetDifference(key ~ configOptionModifiedMessage, 8); } - setValueLong("space_reservation", to!long(tempValue * 2^^20)); } - } else { - log.log("Unknown key in config file: ", key); - return false; } } } + + // skip_file can be specified multiple times + if (skip_file_present && backupConfigFileSkipFile != configFileSkipFile) logAndSetDifference("skip_file" ~ configOptionModifiedMessage, 4); + + // skip_dir can be specified multiple times + if (skip_dir_present && backupConfigFileSkipDir != configFileSkipDir) logAndSetDifference("skip_dir" ~ configOptionModifiedMessage, 5); + + // Check for newly added configuration options + if (!drive_id_present && configFileDriveId != "") logAndSetDifference("drive_id newly added ... --resync needed", 2); + if (!sync_dir_present && configFileSyncDir != defaultSyncDir) logAndSetDifference("sync_dir newly added ... --resync needed", 3); + if (!skip_file_present && configFileSkipFile != defaultSkipFile) logAndSetDifference("skip_file newly added ... --resync needed", 4); + if (!skip_dir_present && configFileSkipDir != "") logAndSetDifference("skip_dir newly added ... --resync needed", 5); + if (!skip_dotfiles_present && configFileSkipDotfiles) logAndSetDifference("skip_dotfiles newly added ... --resync needed", 6); + if (!skip_symlinks_present && configFileSkipSymbolicLinks) logAndSetDifference("skip_symlinks newly added ... --resync needed", 7); + if (!sync_business_shared_items_present && configFileSyncBusinessSharedItems) logAndSetDifference("sync_business_shared_items newly added ... --resync needed", 8); } else { - log.log("Malformed config line: ", lineBuffer); - return false; + addLogEntry("WARNING: no backup config file was found, unable to validate if any changes made"); } } - return true; - } - void configureRequiredDirectoryPermisions() { - // return the directory permission mode required - // - return octal!defaultDirectoryPermissionMode; ... cant be used .. which is odd - // Error: variable defaultDirectoryPermissionMode cannot be read at compile time - if (getValueLong("sync_dir_permissions") != defaultDirectoryPermissionMode) { - // return user configured permissions as octal integer - string valueToConvert = to!string(getValueLong("sync_dir_permissions")); - auto convertedValue = parse!long(valueToConvert, 8); - configuredDirectoryPermissionMode = to!int(convertedValue); + // Check CLI options + if (exists(applicableConfigFilePath)) { + if (configFileSyncDir != "" && configFileSyncDir != getValueString("sync_dir")) logAndSetDifference("sync_dir: CLI override of config file option, --resync needed", 3); + if (configFileSkipFile != "" && configFileSkipFile != getValueString("skip_file")) logAndSetDifference("skip_file: CLI override of config file option, --resync needed", 4); + if (configFileSkipDir != "" && configFileSkipDir != getValueString("skip_dir")) logAndSetDifference("skip_dir: CLI override of config file option, --resync needed", 5); + if (!configFileSkipDotfiles && getValueBool("skip_dotfiles")) logAndSetDifference("skip_dotfiles: CLI override of config file option, --resync needed", 6); + if (!configFileSkipSymbolicLinks && getValueBool("skip_symlinks")) logAndSetDifference("skip_symlinks: CLI override of config file option, --resync needed", 7); + } + + // Aggregate the result to determine if a resync is required + foreach (optionDifferent; configOptionsDifferent) { + if (optionDifferent) { + resyncRequired = true; + break; + } + } + + // Final override + // In certain situations, regardless of config 'resync' needed status, ignore this so that the application can display 'non-syncable' information + // Options that should now be looked at are: + // --list-shared-items + if (getValueBool("list_business_shared_items")) resyncRequired = false; + + // Return the calculated boolean + return resyncRequired; + } + + // Cleanup hash files that require to be cleaned up when a --resync is issued + void cleanupHashFilesDueToResync() { + if (!getValueBool("dry_run")) { + // cleanup hash files + addLogEntry("Cleaning up configuration hash files", ["debug"]); + safeRemove(configHashFile); + safeRemove(syncListHashFile); } else { - // return default as octal integer - string valueToConvert = to!string(defaultDirectoryPermissionMode); - auto convertedValue = parse!long(valueToConvert, 8); - configuredDirectoryPermissionMode = to!int(convertedValue); + // --dry-run scenario ... technically we should not be making any local file changes ....... + addLogEntry("DRY RUN: Not removing hash files as --dry-run has been used"); } } - - void configureRequiredFilePermisions() { - // return the file permission mode required - // - return octal!defaultFilePermissionMode; ... cant be used .. which is odd - // Error: variable defaultFilePermissionMode cannot be read at compile time - if (getValueLong("sync_file_permissions") != defaultFilePermissionMode) { - // return user configured permissions as octal integer - string valueToConvert = to!string(getValueLong("sync_file_permissions")); - auto convertedValue = parse!long(valueToConvert, 8); - configuredFilePermissionMode = to!int(convertedValue); + + // For each of the config files, update the hash data in the hash files + void updateHashContentsForConfigFiles() { + // Are we in a --dry-run scenario? + if (!getValueBool("dry_run")) { + // Not a dry-run scenario, update the applicable files + // Update applicable 'config' files + if (exists(applicableConfigFilePath)) { + // Update the hash of the applicable config file + addLogEntry("Updating applicable config file hash", ["debug"]); + std.file.write(configHashFile, computeQuickXorHash(applicableConfigFilePath)); + // Hash file should only be readable by the user who created it - 0600 permissions needed + configHashFile.setAttributes(convertedPermissionValue); + } + // Update 'sync_list' files + if (exists(syncListFilePath)) { + // update sync_list hash + addLogEntry("Updating sync_list hash", ["debug"]); + std.file.write(syncListHashFile, computeQuickXorHash(syncListFilePath)); + // Hash file should only be readable by the user who created it - 0600 permissions needed + syncListHashFile.setAttributes(convertedPermissionValue); + } } else { - // return default as octal integer - string valueToConvert = to!string(defaultFilePermissionMode); - auto convertedValue = parse!long(valueToConvert, 8); - configuredFilePermissionMode = to!int(convertedValue); + // --dry-run scenario ... technically we should not be making any local file changes ....... + addLogEntry("DRY RUN: Not updating hash files as --dry-run has been used"); } } - - int returnRequiredDirectoryPermisions() { - // read the configuredDirectoryPermissionMode and return - if (configuredDirectoryPermissionMode == 0) { - // the configured value is zero, this means that directories would get - // values of d--------- - configureRequiredDirectoryPermisions(); + + // Create any required hash files for files that help us determine if the configuration has changed since last run + void createRequiredInitialConfigurationHashFiles() { + // Does a 'config' file exist with a valid hash file + if (exists(applicableConfigFilePath)) { + if (!exists(configHashFile)) { + // no existing hash file exists + std.file.write(configHashFile, "initial-hash"); + // Hash file should only be readable by the user who created it - 0600 permissions needed + configHashFile.setAttributes(convertedPermissionValue); + } + // Generate the runtime hash for the 'config' file + currentConfigHash = computeQuickXorHash(applicableConfigFilePath); + } + + // Does a 'sync_list' file exist with a valid hash file + if (exists(syncListFilePath)) { + if (!exists(syncListHashFile)) { + // no existing hash file exists + std.file.write(syncListHashFile, "initial-hash"); + // Hash file should only be readable by the user who created it - 0600 permissions needed + syncListHashFile.setAttributes(convertedPermissionValue); + } + // Generate the runtime hash for the 'sync_list' file + currentSyncListHash = computeQuickXorHash(syncListFilePath); } - return configuredDirectoryPermissionMode; } - - int returnRequiredFilePermisions() { - // read the configuredFilePermissionMode and return - if (configuredFilePermissionMode == 0) { - // the configured value is zero + + // Read in the text values of the previous configurations + int readExistingConfigurationHashFiles() { + if (exists(configHashFile)) { + try { + previousConfigHash = readText(configHashFile); + } catch (std.file.FileException e) { + // Unable to access required hash file + addLogEntry("ERROR: Unable to access " ~ e.msg); + // Use exit scopes to shutdown API + return EXIT_FAILURE; + } + } + + if (exists(syncListHashFile)) { + try { + previousSyncListHash = readText(syncListHashFile); + } catch (std.file.FileException e) { + // Unable to access required hash file + addLogEntry("ERROR: Unable to access " ~ e.msg); + // Use exit scopes to shutdown API + return EXIT_FAILURE; + } + } + + return 0; + } + + // Check for basic option conflicts - flags that should not be used together and/or flag combinations that conflict with each other + bool checkForBasicOptionConflicts() { + + bool operationalConflictDetected = false; + + // What are the permission that have been set for the application? + // These are relevant for: + // - The ~/OneDrive parent folder or 'sync_dir' configured item + // - Any new folder created under ~/OneDrive or 'sync_dir' + // - Any new file created under ~/OneDrive or 'sync_dir' + // valid permissions are 000 -> 777 - anything else is invalid + if ((getValueLong("sync_dir_permissions") < 0) || (getValueLong("sync_file_permissions") < 0) || (getValueLong("sync_dir_permissions") > 777) || (getValueLong("sync_file_permissions") > 777)) { + addLogEntry("ERROR: Invalid 'User|Group|Other' permissions set within config file. Please check your configuration"); + operationalConflictDetected = true; + } else { + // Debug log output what permissions are being set to + addLogEntry("Configuring default new folder permissions as: " ~ to!string(getValueLong("sync_dir_permissions")), ["debug"]); + configureRequiredDirectoryPermisions(); + addLogEntry("Configuring default new file permissions as: " ~ to!string(getValueLong("sync_file_permissions")), ["debug"]); configureRequiredFilePermisions(); } - return configuredFilePermissionMode; + + // --upload-only and --download-only cannot be used together + if ((getValueBool("upload_only")) && (getValueBool("download_only"))) { + addLogEntry("ERROR: --upload-only and --download-only cannot be used together. Use one, not both at the same time"); + operationalConflictDetected = true; + } + + // --sync and --monitor cannot be used together + if ((getValueBool("synchronize")) && (getValueBool("monitor"))) { + addLogEntry("ERROR: --sync and --monitor cannot be used together. Only use one of these options, not both at the same time"); + operationalConflictDetected = true; + } + + // --no-remote-delete can ONLY be enabled when --upload-only is used + if ((getValueBool("no_remote_delete")) && (!getValueBool("upload_only"))) { + addLogEntry("ERROR: --no-remote-delete can only be used with --upload-only"); + operationalConflictDetected = true; + } + + // --remove-source-files can ONLY be enabled when --upload-only is used + if ((getValueBool("remove_source_files")) && (!getValueBool("upload_only"))) { + addLogEntry("ERROR: --remove-source-files can only be used with --upload-only"); + operationalConflictDetected = true; + } + + // --cleanup-local-files can ONLY be enabled when --download-only is used + if ((getValueBool("cleanup_local_files")) && (!getValueBool("download_only"))) { + addLogEntry("ERROR: --cleanup-local-files can only be used with --download-only"); + operationalConflictDetected = true; + } + + // --list-shared-folders cannot be used with --resync and/or --resync-auth + if ((getValueBool("list_business_shared_items")) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + addLogEntry("ERROR: --list-shared-items cannot be used with --resync or --resync-auth"); + operationalConflictDetected = true; + } + + // --list-shared-folders cannot be used with --sync or --monitor + if ((getValueBool("list_business_shared_items")) && ((getValueBool("synchronize")) || (getValueBool("monitor")))) { + addLogEntry("ERROR: --list-shared-items cannot be used with --sync or --monitor"); + operationalConflictDetected = true; + } + + // --sync-shared-files can ONLY be used with sync_business_shared_items + if ((getValueBool("sync_business_shared_files")) && (!getValueBool("sync_business_shared_items"))) { + addLogEntry("ERROR: The --sync-shared-files option can only be utilised if the 'sync_business_shared_items' configuration setting is enabled."); + operationalConflictDetected = true; + } + + // --display-sync-status cannot be used with --resync and/or --resync-auth + if ((getValueBool("display_sync_status")) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + addLogEntry("ERROR: --display-sync-status cannot be used with --resync or --resync-auth"); + operationalConflictDetected = true; + } + + // --modified-by cannot be used with --resync and/or --resync-auth + if ((!getValueString("modified_by").empty) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + addLogEntry("ERROR: --modified-by cannot be used with --resync or --resync-auth"); + operationalConflictDetected = true; + } + + // --get-file-link cannot be used with --resync and/or --resync-auth + if ((!getValueString("get_file_link").empty) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + addLogEntry("ERROR: --get-file-link cannot be used with --resync or --resync-auth"); + operationalConflictDetected = true; + } + + // --create-share-link cannot be used with --resync and/or --resync-auth + if ((!getValueString("create_share_link").empty) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + addLogEntry("ERROR: --create-share-link cannot be used with --resync or --resync-auth"); + + operationalConflictDetected = true; + } + + // --get-sharepoint-drive-id cannot be used with --resync and/or --resync-auth + if ((!getValueString("sharepoint_library_name").empty) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + addLogEntry("ERROR: --get-sharepoint-drive-id cannot be used with --resync or --resync-auth"); + operationalConflictDetected = true; + } + + // --monitor and --display-sync-status cannot be used together + if ((getValueBool("monitor")) && (getValueBool("display_sync_status"))) { + addLogEntry("ERROR: --monitor and --display-sync-status cannot be used together"); + operationalConflictDetected = true; + } + + // --sync and and --display-sync-status cannot be used together + if ((getValueBool("synchronize")) && (getValueBool("display_sync_status"))) { + addLogEntry("ERROR: --sync and and --display-sync-status cannot be used together"); + operationalConflictDetected = true; + } + + // --monitor and --display-quota cannot be used together + if ((getValueBool("monitor")) && (getValueBool("display_quota"))) { + addLogEntry("ERROR: --monitor and --display-quota cannot be used together"); + operationalConflictDetected = true; + } + + // --sync and and --display-quota cannot be used together + if ((getValueBool("synchronize")) && (getValueBool("display_quota"))) { + addLogEntry("ERROR: --sync and and --display-quota cannot be used together"); + operationalConflictDetected = true; + } + + // --force-sync can only be used when using --sync --single-directory + if (getValueBool("force_sync")) { + + bool conflict = false; + // Should not be used with --monitor + if (getValueBool("monitor")) conflict = true; + // single_directory must not be empty + if (getValueString("single_directory").empty) conflict = true; + if (conflict) { + addLogEntry("ERROR: --force-sync can only be used with --sync --single-directory"); + operationalConflictDetected = true; + } + } + + // When using 'azure_ad_endpoint', 'azure_tenant_id' cannot be empty + if ((!getValueString("azure_ad_endpoint").empty) && (getValueString("azure_tenant_id").empty)) { + addLogEntry("ERROR: config option 'azure_tenant_id' cannot be empty when 'azure_ad_endpoint' is configured"); + operationalConflictDetected = true; + } + + // When using --enable-logging the 'log_dir' cannot be empty + if ((getValueBool("enable_logging")) && (getValueString("log_dir").empty)) { + addLogEntry("ERROR: config option 'log_dir' cannot be empty when 'enable_logging' is configured"); + operationalConflictDetected = true; + } + + // When using --syncdir, the value cannot be empty. + if (strip(getValueString("sync_dir")).empty) { + addLogEntry("ERROR: --syncdir value cannot be empty"); + operationalConflictDetected = true; + } + + // --monitor and --create-directory cannot be used together + if ((getValueBool("monitor")) && (!getValueString("create_directory").empty)) { + addLogEntry("ERROR: --monitor and --create-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --sync and --create-directory cannot be used together + if ((getValueBool("synchronize")) && (!getValueString("create_directory").empty)) { + addLogEntry("ERROR: --sync and --create-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --monitor and --remove-directory cannot be used together + if ((getValueBool("monitor")) && (!getValueString("remove_directory").empty)) { + addLogEntry("ERROR: --monitor and --remove-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --sync and --remove-directory cannot be used together + if ((getValueBool("synchronize")) && (!getValueString("remove_directory").empty)) { + addLogEntry("ERROR: --sync and --remove-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --monitor and --source-directory cannot be used together + if ((getValueBool("monitor")) && (!getValueString("source_directory").empty)) { + addLogEntry("ERROR: --monitor and --source-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --sync and --source-directory cannot be used together + if ((getValueBool("synchronize")) && (!getValueString("source_directory").empty)) { + addLogEntry("ERROR: --sync and --source-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --monitor and --destination-directory cannot be used together + if ((getValueBool("monitor")) && (!getValueString("destination_directory").empty)) { + addLogEntry("ERROR: --monitor and --destination-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --sync and --destination-directory cannot be used together + if ((getValueBool("synchronize")) && (!getValueString("destination_directory").empty)) { + addLogEntry("ERROR: --sync and --destination-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --download-only and --local-first cannot be used together + if ((getValueBool("download_only")) && (getValueBool("local_first"))) { + addLogEntry("ERROR: --download-only cannot be used with --local-first"); + operationalConflictDetected = true; + } + + // Return bool value indicating if we have an operational conflict + return operationalConflictDetected; } + // Reset skip_file and skip_dir to application defaults when --force-sync is used void resetSkipToDefaults() { - // reset skip_file and skip_dir to application defaults // skip_file - log.vdebug("original skip_file: ", getValueString("skip_file")); - log.vdebug("resetting skip_file"); + addLogEntry("original skip_file: " ~ getValueString("skip_file"), ["debug"]); + addLogEntry("resetting skip_file to application defaults", ["debug"]); setValueString("skip_file", defaultSkipFile); - log.vdebug("reset skip_file: ", getValueString("skip_file")); + addLogEntry("reset skip_file: " ~ getValueString("skip_file"), ["debug"]); + // skip_dir - log.vdebug("original skip_dir: ", getValueString("skip_dir")); - log.vdebug("resetting skip_dir"); + addLogEntry("original skip_dir: " ~ getValueString("skip_dir"), ["debug"]); + addLogEntry("resetting skip_dir to application defaults", ["debug"]); setValueString("skip_dir", defaultSkipDir); - log.vdebug("reset skip_dir: ", getValueString("skip_dir")); + addLogEntry("reset skip_dir: " ~ getValueString("skip_dir"), ["debug"]); + } + + // Initialise the correct 'sync_dir' expanding any '~' if present + string initialiseRuntimeSyncDirectory() { + + string runtimeSyncDirectory; + + addLogEntry("sync_dir: Setting runtimeSyncDirectory from config value 'sync_dir'", ["debug"]); + + if (!shellEnvironmentSet){ + addLogEntry("sync_dir: No SHELL or USER environment variable configuration detected", ["debug"]); + + // No shell or user set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker + // Does the 'currently configured' sync_dir include a ~ + if (canFind(getValueString("sync_dir"), "~")) { + // A ~ was found in sync_dir + addLogEntry("sync_dir: A '~' was found in 'sync_dir', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]); + runtimeSyncDirectory = buildNormalizedPath(buildPath(defaultHomePath, strip(getValueString("sync_dir"), "~"))); + } else { + // No ~ found in sync_dir, use as is + addLogEntry("sync_dir: Using configured 'sync_dir' path as-is as no SHELL or USER environment variable configuration detected", ["debug"]); + runtimeSyncDirectory = getValueString("sync_dir"); + } + } else { + // A shell and user environment variable is set, expand any ~ as this will be expanded correctly if present + if (canFind(getValueString("sync_dir"), "~")) { + addLogEntry("sync_dir: A '~' was found in the configured 'sync_dir', automatically expanding as SHELL and USER environment variable is set", ["debug"]); + runtimeSyncDirectory = expandTilde(getValueString("sync_dir")); + } else { + // No ~ found in sync_dir, does the path begin with a '/' ? + addLogEntry("sync_dir: Using configured 'sync_dir' path as-is as however SHELL or USER environment variable configuration detected - should be placed in USER home directory", ["debug"]); + if (!startsWith(getValueString("sync_dir"), "/")) { + addLogEntry("Configured 'sync_dir' does not start with a '/' or '~/' - adjusting configured 'sync_dir' to use User Home Directory as base for 'sync_dir' path", ["debug"]); + string updatedPathWithHome = "~/" ~ getValueString("sync_dir"); + runtimeSyncDirectory = expandTilde(updatedPathWithHome); + } else { + addLogEntry("use 'sync_dir' as is - no touch", ["debug"]); + runtimeSyncDirectory = getValueString("sync_dir"); + } + } + } + + // What will runtimeSyncDirectory be actually set to? + addLogEntry("sync_dir: runtimeSyncDirectory set to: " ~ runtimeSyncDirectory, ["debug"]); + + // Configure configuredBusinessSharedFilesDirectoryName + configuredBusinessSharedFilesDirectoryName = buildNormalizedPath(buildPath(runtimeSyncDirectory, defaultBusinessSharedFilesDirectoryName)); + + return runtimeSyncDirectory; + } + + // Initialise the correct 'log_dir' when application logging to a separate file is enabled with 'enable_logging' and expanding any '~' if present + string calculateLogDirectory() { + + string configuredLogDirPath; + + addLogEntry("log_dir: Setting runtime application log from config value 'log_dir'", ["debug"]); + + if (getValueString("log_dir") != defaultLogFileDir) { + // User modified 'log_dir' to be used with 'enable_logging' + // if 'log_dir' contains a '~' this needs to be expanded correctly + if (canFind(getValueString("log_dir"), "~")) { + // ~ needs to be expanded correctly + if (!shellEnvironmentSet) { + // No shell or user environment variable set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker + addLogEntry("log_dir: A '~' was found in log_dir, using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]); + configuredLogDirPath = buildNormalizedPath(buildPath(defaultHomePath, strip(getValueString("log_dir"), "~"))); + } else { + // A shell and user environment variable is set, expand any ~ as this will be expanded correctly if present + addLogEntry("log_dir: A '~' was found in the configured 'log_dir', automatically expanding as SHELL and USER environment variable is set", ["debug"]); + configuredLogDirPath = expandTilde(getValueString("log_dir")); + } + } else { + // '~' not found in log_dir entry, use as is + configuredLogDirPath = getValueString("log_dir"); + } + } else { + // Default 'log_dir' to be used with 'enable_logging' + configuredLogDirPath = defaultLogFileDir; + } + + // Attempt to create 'configuredLogDirPath' otherwise we need to fall back to the users home directory + if (!exists(configuredLogDirPath)) { + // 'configuredLogDirPath' path does not exist - try and create it + try { + mkdirRecurse(configuredLogDirPath); + } catch (std.file.FileException e) { + // We got an error when attempting to create the directory .. + addLogEntry(); + addLogEntry("ERROR: Unable to create " ~ configuredLogDirPath); + addLogEntry("ERROR: Please manually create '" ~ configuredLogDirPath ~ "' and set appropriate permissions to allow write access for your user to this location."); + addLogEntry("ERROR: The requested client activity log will instead be located in your users home directory"); + addLogEntry(); + + // Reconfigure 'configuredLogDirPath' to use environment.get("HOME") value, which we have already calculated + configuredLogDirPath = defaultHomePath; + } + } + + // Return the initialised application log path + return configuredLogDirPath; + } + + void setConfigLoggingLevels(bool verboseLoggingInput, bool debugLoggingInput, long verbosityCountInput) { + // set the appConfig logging values + verboseLogging = verboseLoggingInput; + debugLogging = debugLoggingInput; + verbosityCount = verbosityCountInput; + } + + // What IP protocol is going to be used to access Microsoft OneDrive + void displayIPProtocol() { + if (getValueLong("ip_protocol_version") == 0) addLogEntry("Using IPv4 and IPv6 (if configured) for all network operations"); + if (getValueLong("ip_protocol_version") == 1) addLogEntry("Forcing client to use IPv4 connections only"); + if (getValueLong("ip_protocol_version") == 2) addLogEntry("Forcing client to use IPv6 connections only"); + } + + // Has a 'no-sync' task been requested? + bool hasNoSyncOperationBeenRequested() { + + bool noSyncOperation = false; + + // Are we performing some sort of 'no-sync' task? + // - Are we obtaining the Office 365 Drive ID for a given Office 365 SharePoint Shared Library? + // - Are we displaying the sync status? + // - Are we getting the URL for a file online? + // - Are we listing who modified a file last online? + // - Are we listing OneDrive Business Shared Items? + // - Are we creating a shareable link for an existing file on OneDrive? + // - Are we just creating a directory online, without any sync being performed? + // - Are we just deleting a directory online, without any sync being performed? + // - Are we renaming or moving a directory? + // - Are we displaying the quota information? + + // Return a true|false if any of these have been set, so that we use the 'dry-run' DB copy, to execute these tasks, in case the client is currently operational + + // --get-sharepoint-drive-id - Get the SharePoint Library drive_id + if (getValueString("sharepoint_library_name") != "") { + // flag that a no sync operation has been requested + noSyncOperation = true; + } + + // --display-sync-status - Query the sync status + if (getValueBool("display_sync_status")) { + // flag that a no sync operation has been requested + noSyncOperation = true; + } + + // --get-file-link - Get the URL path for a synced file? + if (getValueString("get_file_link") != "") { + // flag that a no sync operation has been requested + noSyncOperation = true; + } + + // --modified-by - Are we listing the modified-by details of a provided path? + if (getValueString("modified_by") != "") { + // flag that a no sync operation has been requested + noSyncOperation = true; + } + + // --list-shared-items - Are we listing OneDrive Business Shared Items + if (getValueBool("list_business_shared_items")) { + // flag that a no sync operation has been requested + noSyncOperation = true; + } + + // --create-share-link - Are we creating a shareable link for an existing file on OneDrive? + if (getValueString("create_share_link") != "") { + // flag that a no sync operation has been requested + noSyncOperation = true; + } + + // --create-directory - Are we just creating a directory online, without any sync being performed? + if ((getValueString("create_directory") != "")) { + // flag that a no sync operation has been requested + noSyncOperation = true; + } + + // --remove-directory - Are we just deleting a directory online, without any sync being performed? + if ((getValueString("remove_directory") != "")) { + // flag that a no sync operation has been requested + noSyncOperation = true; + } + + // Are we renaming or moving a directory online? + // onedrive --source-directory 'path/as/source/' --destination-directory 'path/as/destination' + if ((getValueString("source_directory") != "") && (getValueString("destination_directory") != "")) { + // flag that a no sync operation has been requested + noSyncOperation = true; + } + + // Are we displaying the quota information? + if (getValueBool("display_quota")) { + // flag that a no sync operation has been requested + noSyncOperation = true; + } + + // Return result + return noSyncOperation; } } -void outputLongHelp(Option[] opt) -{ - auto argsNeedingOptions = [ - "--auth-files", - "--auth-response", - "--confdir", - "--create-directory", - "--create-share-link", - "--destination-directory", - "--get-file-link", - "--get-O365-drive-id", - "--log-dir", - "--min-notify-changes", - "--modified-by", - "--monitor-interval", - "--monitor-log-frequency", - "--monitor-fullscan-frequency", - "--operation-timeout", - "--remove-directory", - "--single-directory", - "--skip-dir", - "--skip-file", - "--skip-size", - "--source-directory", - "--space-reservation", - "--syncdir", - "--user-agent" ]; - writeln(`OneDrive - a client for OneDrive Cloud Services - -Usage: - onedrive [options] --synchronize +// Output the full application help when --help is passed in +void outputLongHelp(Option[] opt) { + auto argsNeedingOptions = [ + "--auth-files", + "--auth-response", + "--confdir", + "--create-directory", + "--classify-as-big-delete", + "--create-share-link", + "--destination-directory", + "--get-file-link", + "--get-O365-drive-id", + "--log-dir", + "--min-notify-changes", + "--modified-by", + "--monitor-interval", + "--monitor-log-frequency", + "--monitor-fullscan-frequency", + "--remove-directory", + "--single-directory", + "--skip-dir", + "--skip-file", + "--skip-size", + "--source-directory", + "--space-reservation", + "--syncdir", + "--user-agent" ]; + writeln(`onedrive - A client for the Microsoft OneDrive Cloud Service + + Usage: + onedrive [options] --sync Do a one time synchronization - onedrive [options] --monitor + onedrive [options] --monitor Monitor filesystem and sync regularly - onedrive [options] --display-config + onedrive [options] --display-config Display the currently used configuration - onedrive [options] --display-sync-status + onedrive [options] --display-sync-status Query OneDrive service and report on pending changes - onedrive -h | --help + onedrive -h | --help Show this help screen - onedrive --version + onedrive --version Show version -Options: -`); - foreach (it; opt.sort!("a.optLong < b.optLong")) { - writefln(" %s%s%s%s\n %s", - it.optLong, - it.optShort == "" ? "" : " " ~ it.optShort, - argsNeedingOptions.canFind(it.optLong) ? " ARG" : "", - it.required ? " (required)" : "", it.help); - } -} - -unittest -{ - auto cfg = new Config(""); - cfg.load("config"); - assert(cfg.getValueString("sync_dir") == "~/OneDrive"); -} + Options: + `); + foreach (it; opt.sort!("a.optLong < b.optLong")) { + writefln(" %s%s%s%s\n %s", + it.optLong, + it.optShort == "" ? "" : " " ~ it.optShort, + argsNeedingOptions.canFind(it.optLong) ? " ARG" : "", + it.required ? " (required)" : "", it.help); + } +} \ No newline at end of file diff --git a/src/curlEngine.d b/src/curlEngine.d new file mode 100644 index 000000000..6670dd7d9 --- /dev/null +++ b/src/curlEngine.d @@ -0,0 +1,545 @@ +// What is this module called? +module curlEngine; + +// What does this module require to function? +import std.net.curl; +import etc.c.curl; +import std.datetime; +import std.conv; +import std.file; +import std.json; +import std.stdio; +import std.range; +import core.memory; + +// What other modules that we have created do we need to import? +import log; +import util; + +class CurlResponse { + HTTP.Method method; + const(char)[] url; + const(char)[][const(char)[]] requestHeaders; + const(char)[] postBody; + + bool hasResponse; + string[string] responseHeaders; + HTTP.StatusLine statusLine; + char[] content; + + this() { + reset(); + } + + void reset() { + method = HTTP.Method.undefined; + url = ""; + requestHeaders = null; + postBody = []; + hasResponse = false; + responseHeaders = null; + statusLine.reset(); + content = []; + } + + void addRequestHeader(const(char)[] name, const(char)[] value) { + requestHeaders[to!string(name)] = to!string(value); + } + + void connect(HTTP.Method method, const(char)[] url) { + this.method = method; + this.url = url; + } + + const JSONValue json() { + JSONValue json; + try { + json = content.parseJSON(); + } catch (JSONException e) { + // Log that a JSON Exception was caught, dont output the HTML response from OneDrive + addLogEntry("JSON Exception caught when performing HTTP operations - use --debug-https to diagnose further", ["debug"]); + } + return json; + }; + + void update(HTTP *http) { + hasResponse = true; + this.responseHeaders = http.responseHeaders(); + this.statusLine = http.statusLine; + addLogEntry("HTTP Response Headers: " ~ to!string(this.responseHeaders), ["debug"]); + addLogEntry("HTTP Status Line: " ~ to!string(this.statusLine), ["debug"]); + } + + @safe pure HTTP.StatusLine getStatus() { + return this.statusLine; + } + + // Return the current value of retryAfterValue + int getRetryAfterValue() { + int delayBeforeRetry; + // Is 'retry-after' in the response headers + if ("retry-after" in responseHeaders) { + // Set the retry-after value + addLogEntry("curlEngine.http.perform() => Received a 'Retry-After' Header Response with the following value: " ~ to!string(responseHeaders["retry-after"]), ["debug"]); + addLogEntry("curlEngine.http.perform() => Setting retryAfterValue to: " ~ responseHeaders["retry-after"], ["debug"]); + delayBeforeRetry = to!int(responseHeaders["retry-after"]); + } else { + // Use a 120 second delay as a default given header value was zero + // This value is based on log files and data when determining correct process for 429 response handling + delayBeforeRetry = 120; + // Update that we are over-riding the provided value with a default + addLogEntry("HTTP Response Header retry-after value was missing - Using a preconfigured default of: " ~ to!string(delayBeforeRetry), ["debug"]); + } + return delayBeforeRetry; + } + + const string parseRequestHeaders(const(const(char)[][const(char)[]]) headers) { + string requestHeadersStr = ""; + // Ensure response headers is not null and iterate over keys safely. + if (headers !is null) { + foreach (string header; headers.byKey()) { + if (header == "Authorization") { + continue; + } + // Use the 'in' operator to safely check if the key exists in the associative array. + if (auto val = header in headers) { + requestHeadersStr ~= "< " ~ header ~ ": " ~ *val ~ "\n"; + } + } + } + return requestHeadersStr; + } + + const string parseResponseHeaders(const(string[string]) headers) { + string responseHeadersStr = ""; + // Ensure response headers is not null and iterate over keys safely. + if (headers !is null) { + foreach (string header; headers.byKey()) { + // Check if the key actually exists before accessing it to avoid RangeError. + if (auto val = header in headers) { // 'in' checks for the key and returns a pointer to the value if found. + responseHeadersStr ~= "> " ~ header ~ ": " ~ *val ~ "\n"; // Dereference pointer to get the value. + } + } + } + return responseHeadersStr; + } + + const string dumpDebug() { + import std.range; + import std.format : format; + + string str = ""; + str ~= format("< %s %s\n", method, url); + if (!requestHeaders.empty) { + str ~= parseRequestHeaders(requestHeaders); + } + if (!postBody.empty) { + str ~= format("\n----\n%s\n----\n", postBody); + } + str ~= format("< %s\n", statusLine); + if (!responseHeaders.empty) { + str ~= parseResponseHeaders(responseHeaders); + } + return str; + } + + const string dumpResponse() { + import std.range; + import std.format : format; + + string str = ""; + if (!content.empty) { + str ~= format("\n----\n%s\n----\n", content); + } + return str; + } + + override string toString() const { + string str = "Curl debugging: \n"; + str ~= dumpDebug(); + if (hasResponse) { + str ~= "Curl response: \n"; + str ~= dumpResponse(); + } + return str; + } +} + +class CurlEngine { + + // Shared pool of CurlEngine instances accessible across all threads + __gshared CurlEngine[] curlEnginePool; // __gshared is used to declare a variable that is shared across all threads + + HTTP http; + File uploadFile; + CurlResponse response; + bool keepAlive; + ulong dnsTimeout; + string internalThreadId; + + this() { + http = HTTP(); // Directly initializes HTTP using its default constructor + response = null; // Initialize as null + internalThreadId = generateAlphanumericString(); + } + + // The destructor should only clean up resources owned directly by this instance + ~this() { + // Is the file still open? + if (uploadFile.isOpen()) { + uploadFile.close(); + } + + // Is 'response' cleared? + if (response !is null) { + object.destroy(response); // Destroy, then set to null + response = null; + } + + // Is the actual http instance is stopped? + if (!http.isStopped) { + // HTTP instance was not stopped .. we need to stop it + http.shutdown(); + object.destroy(http); // Destroy, however we cant set to null + } + } + + // Get a curl instance for the OneDrive API to use + static CurlEngine getCurlInstance() { + addLogEntry("CurlEngine getCurlInstance() called", ["debug"]); + + synchronized (CurlEngine.classinfo) { + // What is the current pool size + addLogEntry("CurlEngine curlEnginePool current size: " ~ to!string(curlEnginePool.length), ["debug"]); + + if (curlEnginePool.empty) { + addLogEntry("CurlEngine curlEnginePool is empty - constructing a new CurlEngine instance", ["debug"]); + return new CurlEngine; // Constructs a new CurlEngine with a fresh HTTP instance + } else { + CurlEngine curlEngine = curlEnginePool[$ - 1]; + curlEnginePool.popBack(); // assumes a LIFO (last-in, first-out) usage pattern + + // Is this engine stopped? + if (curlEngine.http.isStopped) { + // return a new curl engine as a stopped one cannot be used + addLogEntry("CurlEngine was in a stoppped state (not usable) - constructing a new CurlEngine instance", ["debug"]); + return new CurlEngine; // Constructs a new CurlEngine with a fresh HTTP instance + } else { + // return an existing curl engine + addLogEntry("CurlEngine was in a valid state - returning existing CurlEngine instance", ["debug"]); + addLogEntry("CurlEngine instance ID: " ~ curlEngine.internalThreadId, ["debug"]); + return curlEngine; + } + } + } + } + + // Release all curl instances + static void releaseAllCurlInstances() { + addLogEntry("CurlEngine releaseAllCurlInstances() called", ["debug"]); + synchronized (CurlEngine.classinfo) { + // What is the current pool size + addLogEntry("CurlEngine curlEnginePool size to release: " ~ to!string(curlEnginePool.length), ["debug"]); + if (curlEnginePool.length > 0) { + // Safely iterate and clean up each CurlEngine instance + foreach (curlEngineInstance; curlEnginePool) { + try { + curlEngineInstance.cleanup(true); // Cleanup instance by resetting values and flushing cookie cache + curlEngineInstance.shutdownCurlHTTPInstance(); // Assume proper cleanup of any resources used by HTTP + } catch (Exception e) { + // Log the error or handle it appropriately + // e.g., writeln("Error during cleanup/shutdown: ", e.toString()); + } + + // It's safe to destroy the object here assuming no other references exist + object.destroy(curlEngineInstance); // Destroy, then set to null + curlEngineInstance = null; + // Perform Garbage Collection on this destroyed curl engine + GC.collect(); + } + + // Clear the array after all instances have been handled + curlEnginePool.length = 0; // More explicit than curlEnginePool = []; + } + } + // Perform Garbage Collection on this destroyed curl engine + GC.collect(); + } + + // Return how many curl engines there are + static ulong curlEnginePoolLength() { + return curlEnginePool.length; + } + + // Destroy all curl instances + static void destroyAllCurlInstances() { + addLogEntry("CurlEngine destroyAllCurlInstances() called", ["debug"]); + // Release all 'curl' instances + releaseAllCurlInstances(); + } + + // We are releasing a curl instance back to the pool + void releaseEngine() { + // Log that we are releasing this engine back to the pool + addLogEntry("CurlEngine releaseEngine() called on instance id: " ~ to!string(internalThreadId), ["debug"]); + addLogEntry("CurlEngine curlEnginePool size before release: " ~ to!string(curlEnginePool.length), ["debug"]); + + // cleanup this curl instance before putting it back in the pool + cleanup(true); // Cleanup instance by resetting values and flushing cookie cache + synchronized (CurlEngine.classinfo) { + curlEnginePool ~= this; + addLogEntry("CurlEngine curlEnginePool size after release: " ~ to!string(curlEnginePool.length), ["debug"]); + } + // Perform Garbage Collection + GC.collect(); + } + + // Initialise this curl instance + void initialise(ulong dnsTimeout, ulong connectTimeout, ulong dataTimeout, ulong operationTimeout, int maxRedirects, bool httpsDebug, string userAgent, bool httpProtocol, ulong userRateLimit, ulong protocolVersion, bool keepAlive=true) { + // Setting this to false ensures that when we close the curl instance, any open sockets are closed - which we need to do when running + // multiple threads and API instances at the same time otherwise we run out of local files | sockets pretty quickly + this.keepAlive = keepAlive; + this.dnsTimeout = dnsTimeout; + + // Curl Timeout Handling + + // libcurl dns_cache_timeout timeout + // https://curl.se/libcurl/c/CURLOPT_DNS_CACHE_TIMEOUT.html + // https://dlang.org/library/std/net/curl/http.dns_timeout.html + http.dnsTimeout = (dur!"seconds"(dnsTimeout)); + + // Timeout for HTTPS connections + // https://curl.se/libcurl/c/CURLOPT_CONNECTTIMEOUT.html + // https://dlang.org/library/std/net/curl/http.connect_timeout.html + http.connectTimeout = (dur!"seconds"(connectTimeout)); + + // Timeout for activity on connection + // This is a DMD | DLANG specific item, not a libcurl item + // https://dlang.org/library/std/net/curl/http.data_timeout.html + // https://raw.githubusercontent.com/dlang/phobos/master/std/net/curl.d - private enum _defaultDataTimeout = dur!"minutes"(2); + http.dataTimeout = (dur!"seconds"(dataTimeout)); + + // Maximum time any operation is allowed to take + // This includes dns resolution, connecting, data transfer, etc. + // https://curl.se/libcurl/c/CURLOPT_TIMEOUT_MS.html + // https://dlang.org/library/std/net/curl/http.operation_timeout.html + http.operationTimeout = (dur!"seconds"(operationTimeout)); + + // Specify how many redirects should be allowed + http.maxRedirects(maxRedirects); + // Debug HTTPS + http.verbose = httpsDebug; + // Use the configured 'user_agent' value + http.setUserAgent = userAgent; + // What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6 + http.handle.set(CurlOption.ipresolve,protocolVersion); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only + + // What version of HTTP protocol do we use? + // Curl >= 7.62.0 defaults to http2 for a significant number of operations + if (httpProtocol) { + // Downgrade to HTTP 1.1 - yes version = 2 is HTTP 1.1 + http.handle.set(CurlOption.http_version,2); + } + + // Configure upload / download rate limits if configured + // 131072 = 128 KB/s - minimum for basic application operations to prevent timeouts + // A 0 value means rate is unlimited, and is the curl default + if (userRateLimit > 0) { + // set rate limit + http.handle.set(CurlOption.max_send_speed_large,userRateLimit); + http.handle.set(CurlOption.max_recv_speed_large,userRateLimit); + } + + // Explicitly set libcurl options to avoid using signal handlers in a multi-threaded environment + // See: https://curl.se/libcurl/c/CURLOPT_NOSIGNAL.html + // The CURLOPT_NOSIGNAL option is intended for use in multi-threaded programs to ensure that libcurl does not use any signal handling. + // Set CURLOPT_NOSIGNAL to 1 to prevent libcurl from using signal handlers, thus avoiding interference with the application's signal handling which could lead to issues such as unstable behavior or application crashes. + http.handle.set(CurlOption.nosignal,1); + + // https://curl.se/libcurl/c/CURLOPT_TCP_NODELAY.html + // Ensure that TCP_NODELAY is set to 0 to ensure that TCP NAGLE is enabled + http.handle.set(CurlOption.tcp_nodelay,0); + + // https://curl.se/libcurl/c/CURLOPT_FORBID_REUSE.html + // CURLOPT_FORBID_REUSE - make connection get closed at once after use + // Setting this to 0 ensures that we ARE reusing connections (we did this in v2.4.xx) to ensure connections remained open and usable + // Setting this to 1 ensures that when we close the curl instance, any open sockets are forced closed when the API curl instance is destroyed + // The libcurl default is 0 as per the documentation (to REUSE connections) - ensure we are configuring to reuse sockets + http.handle.set(CurlOption.forbid_reuse,0); + + if (httpsDebug) { + // Output what options we are using so that in the debug log this can be tracked + addLogEntry("http.dnsTimeout = " ~ to!string(dnsTimeout), ["debug"]); + addLogEntry("http.connectTimeout = " ~ to!string(connectTimeout), ["debug"]); + addLogEntry("http.dataTimeout = " ~ to!string(dataTimeout), ["debug"]); + addLogEntry("http.operationTimeout = " ~ to!string(operationTimeout), ["debug"]); + addLogEntry("http.maxRedirects = " ~ to!string(maxRedirects), ["debug"]); + addLogEntry("http.CurlOption.ipresolve = " ~ to!string(protocolVersion), ["debug"]); + addLogEntry("http.header.Connection.keepAlive = " ~ to!string(keepAlive), ["debug"]); + } + } + + void setResponseHolder(CurlResponse response) { + if (response is null) { + // Create a response instance if it doesn't already exist + if (this.response is null) + this.response = new CurlResponse(); + } else { + this.response = response; + } + } + + void addRequestHeader(const(char)[] name, const(char)[] value) { + setResponseHolder(null); + http.addRequestHeader(name, value); + response.addRequestHeader(name, value); + } + + void connect(HTTP.Method method, const(char)[] url) { + setResponseHolder(null); + if (!keepAlive) + addRequestHeader("Connection", "close"); + http.method = method; + http.url = url; + response.connect(method, url); + } + + void setContent(const(char)[] contentType, const(char)[] sendData) { + setResponseHolder(null); + addRequestHeader("Content-Type", contentType); + if (sendData) { + http.contentLength = sendData.length; + http.onSend = (void[] buf) { + import std.algorithm: min; + size_t minLen = min(buf.length, sendData.length); + if (minLen == 0) return 0; + buf[0 .. minLen] = cast(void[]) sendData[0 .. minLen]; + sendData = sendData[minLen .. $]; + return minLen; + }; + response.postBody = sendData; + } + } + + void setFile(string filepath, string contentRange, ulong offset, ulong offsetSize) { + setResponseHolder(null); + // open file as read-only in binary mode + uploadFile = File(filepath, "rb"); + + if (contentRange.empty) { + offsetSize = uploadFile.size(); + } else { + addRequestHeader("Content-Range", contentRange); + uploadFile.seek(offset); + } + + // Setup progress bar to display + http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) { + return 0; + }; + + addRequestHeader("Content-Type", "application/octet-stream"); + http.onSend = data => uploadFile.rawRead(data).length; + http.contentLength = offsetSize; + } + + CurlResponse execute() { + scope(exit) { + cleanup(); + } + setResponseHolder(null); + http.onReceive = (ubyte[] data) { + response.content ~= data; + // HTTP Server Response Code Debugging if --https-debug is being used + + return data.length; + }; + http.perform(); + response.update(&http); + return response; + } + + CurlResponse download(string originalFilename, string downloadFilename) { + setResponseHolder(null); + // open downloadFilename as write in binary mode + auto file = File(downloadFilename, "wb"); + + // function scopes + scope(exit) { + cleanup(); + if (file.isOpen()){ + // close open file + file.close(); + } + } + + http.onReceive = (ubyte[] data) { + file.rawWrite(data); + return data.length; + }; + + http.perform(); + + // Rename downloaded file + rename(downloadFilename, originalFilename); + + response.update(&http); + return response; + } + + // Cleanup this instance internal variables that may have been set + void cleanup(bool flushCookies = false) { + // Reset any values to defaults, freeing any set objects + addLogEntry("CurlEngine cleanup() called on instance id: " ~ to!string(internalThreadId), ["debug"]); + + // Is the instance is stopped? + if (!http.isStopped) { + // A stopped instance is not usable, these cannot be reset + http.clearRequestHeaders(); + http.onSend = null; + http.onReceive = null; + http.onReceiveHeader = null; + http.onReceiveStatusLine = null; + http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) { + return 0; + }; + http.contentLength = 0; + + // We only do this if we are pushing the curl engine back to the curl pool + if (flushCookies) { + // Flush the cookie cache as well + http.flushCookieJar(); + http.clearSessionCookies(); + http.clearAllCookies(); + } + } + + // set the response to null + response = null; + + // close file if open + if (uploadFile.isOpen()){ + // close open file + uploadFile.close(); + } + } + + // Shut down the curl instance & close any open sockets + void shutdownCurlHTTPInstance() { + addLogEntry("CurlEngine shutdownCurlHTTPInstance() called on instance id: " ~ to!string(internalThreadId), ["debug"]); + + // Is the instance is stopped? + if (!http.isStopped) { + addLogEntry("HTTP instance still active: " ~ to!string(internalThreadId), ["debug"]); + http.shutdown(); + object.destroy(http); // Destroy, however we cant set to null + addLogEntry("HTTP instance shutdown and destroyed: " ~ to!string(internalThreadId), ["debug"]); + } else { + // Already stopped .. destroy it + object.destroy(http); // Destroy, however we cant set to null + addLogEntry("Stopped HTTP instance shutdown and destroyed: " ~ to!string(internalThreadId), ["debug"]); + } + // Perform Garbage Collection + GC.collect(); + } +} \ No newline at end of file diff --git a/src/itemdb.d b/src/itemdb.d index 28fc47121..3a12ab532 100644 --- a/src/itemdb.d +++ b/src/itemdb.d @@ -1,3 +1,7 @@ +// What is this module called? +module itemdb; + +// What does this module require to function? import std.datetime; import std.exception; import std.path; @@ -5,19 +9,27 @@ import std.string; import std.stdio; import std.algorithm.searching; import core.stdc.stdlib; +import std.json; +import std.conv; + +// What other modules that we have created do we need to import? import sqlite; -static import log; +import util; +import log; enum ItemType { + none, file, dir, - remote + remote, + unknown } struct Item { string driveId; string id; string name; + string remoteName; ItemType type; string eTag; string cTag; @@ -26,25 +38,167 @@ struct Item { string quickXorHash; string sha256Hash; string remoteDriveId; + string remoteParentId; string remoteId; + ItemType remoteType; string syncStatus; + string size; +} + +// Construct an Item struct from a JSON driveItem +Item makeDatabaseItem(JSONValue driveItem) { + + Item item = { + id: driveItem["id"].str, + name: "name" in driveItem ? driveItem["name"].str : null, // name may be missing for deleted files in OneDrive Business + eTag: "eTag" in driveItem ? driveItem["eTag"].str : null, // eTag is not returned for the root in OneDrive Business + cTag: "cTag" in driveItem ? driveItem["cTag"].str : null, // cTag is missing in old files (and all folders in OneDrive Business) + remoteName: "actualOnlineName" in driveItem ? driveItem["actualOnlineName"].str : null, // actualOnlineName is only used with OneDrive Business Shared Folders + }; + + // OneDrive API Change: https://github.com/OneDrive/onedrive-api-docs/issues/834 + // OneDrive no longer returns lastModifiedDateTime if the item is deleted by OneDrive + if(isItemDeleted(driveItem)) { + // Set mtime to SysTime(0) + item.mtime = SysTime(0); + } else { + // Item is not in a deleted state + // Resolve 'Key not found: fileSystemInfo' when then item is a remote item + // https://github.com/abraunegg/onedrive/issues/11 + if (isItemRemote(driveItem)) { + // remoteItem is a OneDrive object that exists on a 'different' OneDrive drive id, when compared to account default + // Normally, the 'remoteItem' field will contain 'fileSystemInfo' however, if the user uses the 'Add Shortcut ..' option in OneDrive WebUI + // to create a 'link', this object, whilst remote, does not have 'fileSystemInfo' in the expected place, thus leading to a application crash + // See: https://github.com/abraunegg/onedrive/issues/1533 + if ("fileSystemInfo" in driveItem["remoteItem"]) { + // 'fileSystemInfo' is in 'remoteItem' which will be the majority of cases + item.mtime = SysTime.fromISOExtString(driveItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str); + } else { + // is a remote item, but 'fileSystemInfo' is missing from 'remoteItem' + if ("fileSystemInfo" in driveItem) { + item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str); + } + } + } else { + // Does fileSystemInfo exist at all ? + if ("fileSystemInfo" in driveItem) { + item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str); + } + } + } + + // Set this item object type + bool typeSet = false; + if (isItemFile(driveItem)) { + // 'file' object exists in the JSON + addLogEntry("Flagging object as a file", ["debug"]); + typeSet = true; + item.type = ItemType.file; + } + + if (isItemFolder(driveItem)) { + // 'folder' object exists in the JSON + addLogEntry("Flagging object as a directory", ["debug"]); + typeSet = true; + item.type = ItemType.dir; + } + + if (isItemRemote(driveItem)) { + // 'remote' object exists in the JSON + addLogEntry("Flagging object as a remote", ["debug"]); + typeSet = true; + item.type = ItemType.remote; + } + + // root and remote items do not have parentReference + if (!isItemRoot(driveItem) && ("parentReference" in driveItem) != null) { + item.driveId = driveItem["parentReference"]["driveId"].str; + if (hasParentReferenceId(driveItem)) { + item.parentId = driveItem["parentReference"]["id"].str; + } + } + + // extract the file hash and file size + if (isItemFile(driveItem) && ("hashes" in driveItem["file"])) { + // Get file size + if (hasFileSize(driveItem)) { + item.size = to!string(driveItem["size"].integer); + // Get quickXorHash as default + if ("quickXorHash" in driveItem["file"]["hashes"]) { + item.quickXorHash = driveItem["file"]["hashes"]["quickXorHash"].str; + } else { + addLogEntry("quickXorHash is missing from " ~ driveItem["id"].str, ["debug"]); + } + + // If quickXorHash is empty .. + if (item.quickXorHash.empty) { + // Is there a sha256Hash? + if ("sha256Hash" in driveItem["file"]["hashes"]) { + item.sha256Hash = driveItem["file"]["hashes"]["sha256Hash"].str; + } else { + addLogEntry("sha256Hash is missing from " ~ driveItem["id"].str, ["debug"]); + } + } + } else { + // So that we have at least a zero value here as the API provided no 'size' data for this file item + item.size = "0"; + } + } + + // Is the object a remote drive item - living on another driveId ? + if (isItemRemote(driveItem)) { + // Check and assign remoteDriveId + if ("parentReference" in driveItem["remoteItem"] && "driveId" in driveItem["remoteItem"]["parentReference"]) { + item.remoteDriveId = driveItem["remoteItem"]["parentReference"]["driveId"].str; + } + + // Check and assign remoteParentId + if ("parentReference" in driveItem["remoteItem"] && "id" in driveItem["remoteItem"]["parentReference"]) { + item.remoteParentId = driveItem["remoteItem"]["parentReference"]["id"].str; + } + + // Check and assign remoteId + if ("id" in driveItem["remoteItem"]) { + item.remoteId = driveItem["remoteItem"]["id"].str; + } + + // Check and assign remoteType + if ("file" in driveItem["remoteItem"].object) { + item.remoteType = ItemType.file; + } else { + item.remoteType = ItemType.dir; + } + } + + // We have 3 different operational modes where 'item.syncStatus' is used to flag if an item is synced or not: + // - National Cloud Deployments do not support /delta as a query + // - When using --single-directory + // - When using --download-only --cleanup-local-files + // + // Thus we need to track in the database that this item is in sync + // As we are making an item, set the syncStatus to Y + // ONLY when either of the three modes above are being used, all the existing DB entries will get set to N + // so when processing /children, it can be identified what the 'deleted' difference is + item.syncStatus = "Y"; + + // Return the created item + return item; } -final class ItemDatabase -{ +final class ItemDatabase { // increment this for every change in the db schema - immutable int itemDatabaseVersion = 11; + immutable int itemDatabaseVersion = 13; Database db; string insertItemStmt; string updateItemStmt; string selectItemByIdStmt; + string selectItemByRemoteIdStmt; string selectItemByParentIdStmt; string deleteItemByIdStmt; bool databaseInitialised = false; - this(const(char)[] filename) - { + this(const(char)[] filename) { db = Database(filename); int dbVersion; try { @@ -52,14 +206,40 @@ final class ItemDatabase } catch (SqliteException e) { // An error was generated - what was the error? if (e.msg == "database is locked") { - writeln(); - log.error("ERROR: onedrive application is already running - check system process list for active application instances"); - log.vlog(" - Use 'sudo ps aufxw | grep onedrive' to potentially determine acive running process"); - writeln(); + addLogEntry(); + addLogEntry("ERROR: The 'onedrive' application is already running - please check system process list for active application instances"); + addLogEntry(" - Use 'sudo ps aufxw | grep onedrive' to potentially determine active running process"); + addLogEntry(); } else { - writeln(); - log.error("ERROR: An internal database error occurred: " ~ e.msg); - writeln(); + // A different error .. detail the message, detail the actual SQLite Error Code to assist with troubleshooting + addLogEntry(); + addLogEntry("ERROR: An internal database error occurred: " ~ e.msg ~ " (SQLite Error Code: " ~ to!string(e.errorCode) ~ ")"); + addLogEntry(); + + // Give the user some additional information and pointers on this error + // The below list is based on user issue / discussion reports since 2018 + switch (e.errorCode) { + case 7: // SQLITE_NOMEM + addLogEntry("The operation could not be completed due to insufficient memory. Please close unnecessary applications to free up memory and try again."); + break; + case 10: // SQLITE_IOERR + addLogEntry("A disk I/O error occurred. This could be due to issues with the storage medium (e.g., disk full, hardware failure, filesystem corruption). Please check your disk's health using a disk utility tool, ensure there is enough free space, and check the filesystem for errors."); + break; + case 11: // SQLITE_CORRUPT + addLogEntry("The database file appears to be corrupt. This could be due to incomplete or failed writes, hardware issues, or unexpected interruptions during database operations. Please perform a --resync operation."); + break; + case 14: // SQLITE_CANTOPEN + addLogEntry("The database file could not be opened. Please check that the database file exists, has the correct permissions, and is not being blocked by another process or security software."); + break; + case 26: // SQLITE_NOTADB + addLogEntry("The file attempted to be opened does not appear to be a valid SQLite database, or it may have been corrupted to a point where it's no longer recognizable. Please check your application configuration directory and/or perform a --resync operation."); + break; + default: + addLogEntry("An unexpected error occurred. Please consult the application documentation or support to resolve this issue."); + break; + } + // Blank line before exit + addLogEntry(); } return; } @@ -67,10 +247,15 @@ final class ItemDatabase if (dbVersion == 0) { createTable(); } else if (db.getVersion() != itemDatabaseVersion) { - log.log("The item database is incompatible, re-creating database table structures"); + addLogEntry("The item database is incompatible, re-creating database table structures"); db.exec("DROP TABLE item"); createTable(); } + + // What is the threadsafe value + auto threadsafeValue = db.getThreadsafeValue(); + addLogEntry("Threadsafe database value: " ~ to!string(threadsafeValue), ["debug"]); + // Set the enforcement of foreign key constraints. // https://www.sqlite.org/pragma.html#pragma_foreign_keys // PRAGMA foreign_keys = boolean; @@ -99,12 +284,12 @@ final class ItemDatabase db.exec("PRAGMA locking_mode = EXCLUSIVE"); insertItemStmt = " - INSERT OR REPLACE INTO item (driveId, id, name, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteId, syncStatus) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13) + INSERT OR REPLACE INTO item (driveId, id, name, remoteName, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteParentId, remoteId, remoteType, syncStatus, size) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17) "; updateItemStmt = " UPDATE item - SET name = ?3, type = ?4, eTag = ?5, cTag = ?6, mtime = ?7, parentId = ?8, quickXorHash = ?9, sha256Hash = ?10, remoteDriveId = ?11, remoteId = ?12, syncStatus = ?13 + SET name = ?3, remoteName = ?4, type = ?5, eTag = ?6, cTag = ?7, mtime = ?8, parentId = ?9, quickXorHash = ?10, sha256Hash = ?11, remoteDriveId = ?12, remoteParentId = ?13, remoteId = ?14, remoteType = ?15, syncStatus = ?16, size = ?17 WHERE driveId = ?1 AND id = ?2 "; selectItemByIdStmt = " @@ -112,6 +297,11 @@ final class ItemDatabase FROM item WHERE driveId = ?1 AND id = ?2 "; + selectItemByRemoteIdStmt = " + SELECT * + FROM item + WHERE remoteDriveId = ?1 AND remoteId = ?2 + "; selectItemByParentIdStmt = "SELECT * FROM item WHERE driveId = ? AND parentId = ?"; deleteItemByIdStmt = "DELETE FROM item WHERE driveId = ? AND id = ?"; @@ -119,17 +309,16 @@ final class ItemDatabase databaseInitialised = true; } - bool isDatabaseInitialised() - { + bool isDatabaseInitialised() { return databaseInitialised; } - void createTable() - { + void createTable() { db.exec("CREATE TABLE item ( driveId TEXT NOT NULL, id TEXT NOT NULL, name TEXT NOT NULL, + remoteName TEXT, type TEXT NOT NULL, eTag TEXT, cTag TEXT, @@ -138,9 +327,12 @@ final class ItemDatabase quickXorHash TEXT, sha256Hash TEXT, remoteDriveId TEXT, + remoteParentId TEXT, remoteId TEXT, + remoteType TEXT, deltaLink TEXT, syncStatus TEXT, + size TEXT, PRIMARY KEY (driveId, id), FOREIGN KEY (driveId, parentId) REFERENCES item (driveId, id) @@ -154,32 +346,27 @@ final class ItemDatabase db.setVersion(itemDatabaseVersion); } - void insert(const ref Item item) - { + void insert(const ref Item item) { auto p = db.prepare(insertItemStmt); bindItem(item, p); p.exec(); } - void update(const ref Item item) - { + void update(const ref Item item) { auto p = db.prepare(updateItemStmt); bindItem(item, p); p.exec(); } - void dump_open_statements() - { + void dump_open_statements() { db.dump_open_statements(); } - int db_checkpoint() - { + int db_checkpoint() { return db.db_checkpoint(); } - void upsert(const ref Item item) - { + void upsert(const ref Item item) { auto s = db.prepare("SELECT COUNT(*) FROM item WHERE driveId = ? AND id = ?"); s.bind(1, item.driveId); s.bind(2, item.id); @@ -191,8 +378,7 @@ final class ItemDatabase stmt.exec(); } - Item[] selectChildren(const(char)[] driveId, const(char)[] id) - { + Item[] selectChildren(const(char)[] driveId, const(char)[] id) { auto p = db.prepare(selectItemByParentIdStmt); p.bind(1, driveId); p.bind(2, id); @@ -205,8 +391,7 @@ final class ItemDatabase return items; } - bool selectById(const(char)[] driveId, const(char)[] id, out Item item) - { + bool selectById(const(char)[] driveId, const(char)[] id, out Item item) { auto p = db.prepare(selectItemByIdStmt); p.bind(1, driveId); p.bind(2, id); @@ -218,9 +403,20 @@ final class ItemDatabase return false; } + bool selectByRemoteId(const(char)[] remoteDriveId, const(char)[] remoteId, out Item item) { + auto p = db.prepare(selectItemByRemoteIdStmt); + p.bind(1, remoteDriveId); + p.bind(2, remoteId); + auto r = p.exec(); + if (!r.empty) { + item = buildItem(r); + return true; + } + return false; + } + // returns true if an item id is in the database - bool idInLocalDatabase(const(string) driveId, const(string)id) - { + bool idInLocalDatabase(const(string) driveId, const(string)id) { auto p = db.prepare(selectItemByIdStmt); p.bind(1, driveId); p.bind(2, id); @@ -233,18 +429,11 @@ final class ItemDatabase // returns the item with the given path // the path is relative to the sync directory ex: "./Music/Turbo Killer.mp3" - bool selectByPath(const(char)[] path, string rootDriveId, out Item item) - { + bool selectByPath(const(char)[] path, string rootDriveId, out Item item) { Item currItem = { driveId: rootDriveId }; // Issue https://github.com/abraunegg/onedrive/issues/578 - if (startsWith(path, "./") || path == ".") { - // Need to remove the . from the path prefix - path = "root/" ~ path.chompPrefix("."); - } else { - // Leave path as it is - path = "root/" ~ path; - } + path = "root/" ~ (startsWith(path, "./") || path == "." ? path.chompPrefix(".") : path); auto s = db.prepare("SELECT * FROM item WHERE name = ?1 AND driveId IS ?2 AND parentId IS ?3"); foreach (name; pathSplitter(path)) { @@ -254,12 +443,15 @@ final class ItemDatabase auto r = s.exec(); if (r.empty) return false; currItem = buildItem(r); - // if the item is of type remote substitute it with the child + + // If the item is of type remote substitute it with the child if (currItem.type == ItemType.remote) { + addLogEntry("Record is a Remote Object: " ~ to!string(currItem), ["debug"]); Item child; if (selectById(currItem.remoteDriveId, currItem.remoteId, child)) { assert(child.type != ItemType.remote, "The type of the child cannot be remote"); currItem = child; + addLogEntry("Selecting Record that is NOT Remote Object: " ~ to!string(currItem), ["debug"]); } } } @@ -267,19 +459,12 @@ final class ItemDatabase return true; } - // same as selectByPath() but it does not traverse remote folders - bool selectByPathWithoutRemote(const(char)[] path, string rootDriveId, out Item item) - { + // same as selectByPath() but it does not traverse remote folders, returns the remote element if that is what is required + bool selectByPathIncludingRemoteItems(const(char)[] path, string rootDriveId, out Item item) { Item currItem = { driveId: rootDriveId }; // Issue https://github.com/abraunegg/onedrive/issues/578 - if (startsWith(path, "./") || path == ".") { - // Need to remove the . from the path prefix - path = "root/" ~ path.chompPrefix("."); - } else { - // Leave path as it is - path = "root/" ~ path; - } + path = "root/" ~ (startsWith(path, "./") || path == "." ? path.chompPrefix(".") : path); auto s = db.prepare("SELECT * FROM item WHERE name IS ?1 AND driveId IS ?2 AND parentId IS ?3"); foreach (name; pathSplitter(path)) { @@ -290,75 +475,129 @@ final class ItemDatabase if (r.empty) return false; currItem = buildItem(r); } + + if (currItem.type == ItemType.remote) { + addLogEntry("Record selected is a Remote Object: " ~ to!string(currItem), ["debug"]); + } + item = currItem; return true; } - void deleteById(const(char)[] driveId, const(char)[] id) - { + void deleteById(const(char)[] driveId, const(char)[] id) { auto p = db.prepare(deleteItemByIdStmt); p.bind(1, driveId); p.bind(2, id); p.exec(); } - private void bindItem(const ref Item item, ref Statement stmt) - { + private void bindItem(const ref Item item, ref Statement stmt) { with (stmt) with (item) { bind(1, driveId); bind(2, id); bind(3, name); + bind(4, remoteName); + // type handling string typeStr = null; final switch (type) with (ItemType) { case file: typeStr = "file"; break; case dir: typeStr = "dir"; break; case remote: typeStr = "remote"; break; + case unknown: typeStr = "unknown"; break; + case none: typeStr = null; break; } - bind(4, typeStr); - bind(5, eTag); - bind(6, cTag); - bind(7, mtime.toISOExtString()); - bind(8, parentId); - bind(9, quickXorHash); - bind(10, sha256Hash); - bind(11, remoteDriveId); - bind(12, remoteId); - bind(13, syncStatus); + bind(5, typeStr); + bind(6, eTag); + bind(7, cTag); + bind(8, mtime.toISOExtString()); + bind(9, parentId); + bind(10, quickXorHash); + bind(11, sha256Hash); + bind(12, remoteDriveId); + bind(13, remoteParentId); + bind(14, remoteId); + // remoteType handling + string remoteTypeStr = null; + final switch (remoteType) with (ItemType) { + case file: remoteTypeStr = "file"; break; + case dir: remoteTypeStr = "dir"; break; + case remote: remoteTypeStr = "remote"; break; + case unknown: remoteTypeStr = "unknown"; break; + case none: remoteTypeStr = null; break; + } + bind(15, remoteTypeStr); + bind(16, syncStatus); + bind(17, size); } } - private Item buildItem(Statement.Result result) - { + private Item buildItem(Statement.Result result) { assert(!result.empty, "The result must not be empty"); - assert(result.front.length == 14, "The result must have 14 columns"); + assert(result.front.length == 18, "The result must have 18 columns"); Item item = { + + // column 0: driveId + // column 1: id + // column 2: name + // column 3: remoteName - only used when there is a difference in the local name & remote shared folder name + // column 4: type + // column 5: eTag + // column 6: cTag + // column 7: mtime + // column 8: parentId + // column 9: quickXorHash + // column 10: sha256Hash + // column 11: remoteDriveId + // column 12: remoteParentId + // column 13: remoteId + // column 14: remoteType + // column 15: deltaLink + // column 16: syncStatus + // column 17: size + driveId: result.front[0].dup, id: result.front[1].dup, name: result.front[2].dup, - eTag: result.front[4].dup, - cTag: result.front[5].dup, - mtime: SysTime.fromISOExtString(result.front[6]), - parentId: result.front[7].dup, - quickXorHash: result.front[8].dup, - sha256Hash: result.front[9].dup, - remoteDriveId: result.front[10].dup, - remoteId: result.front[11].dup, - syncStatus: result.front[12].dup + remoteName: result.front[3].dup, + // Column 4 is type - not set here + eTag: result.front[5].dup, + cTag: result.front[6].dup, + mtime: SysTime.fromISOExtString(result.front[7]), + parentId: result.front[8].dup, + quickXorHash: result.front[9].dup, + sha256Hash: result.front[10].dup, + remoteDriveId: result.front[11].dup, + remoteParentId: result.front[12].dup, + remoteId: result.front[13].dup, + // Column 14 is remoteType - not set here + // Column 15 is deltaLink - not set here + syncStatus: result.front[16].dup, + size: result.front[17].dup }; - switch (result.front[3]) { + // Configure item.type + switch (result.front[4]) { case "file": item.type = ItemType.file; break; case "dir": item.type = ItemType.dir; break; case "remote": item.type = ItemType.remote; break; default: assert(0, "Invalid item type"); } + + // Configure item.remoteType + switch (result.front[14]) { + // We only care about 'dir' and 'file' for 'remote' items + case "file": item.remoteType = ItemType.file; break; + case "dir": item.remoteType = ItemType.dir; break; + default: item.remoteType = ItemType.none; break; // Default to ItemType.none + } + + // Return item return item; } // computes the path of the given item id // the path is relative to the sync directory ex: "Music/Turbo Killer.mp3" // the trailing slash is not added even if the item is a directory - string computePath(const(char)[] driveId, const(char)[] id) - { + string computePath(const(char)[] driveId, const(char)[] id) { assert(driveId && id); string path; Item item; @@ -406,9 +645,9 @@ final class ItemDatabase } } else { // broken tree - log.vdebug("The following generated a broken tree query:"); - log.vdebug("Drive ID: ", driveId); - log.vdebug("Item ID: ", id); + addLogEntry("The following generated a broken tree query:", ["debug"]); + addLogEntry("Drive ID: " ~ to!string(driveId), ["debug"]); + addLogEntry("Item ID: " ~ to!string(id), ["debug"]); assert(0); } } @@ -416,8 +655,7 @@ final class ItemDatabase return path; } - Item[] selectRemoteItems() - { + Item[] selectRemoteItems() { Item[] items; auto stmt = db.prepare("SELECT * FROM item WHERE remoteDriveId IS NOT NULL"); auto res = stmt.exec(); @@ -428,8 +666,11 @@ final class ItemDatabase return items; } - string getDeltaLink(const(char)[] driveId, const(char)[] id) - { + string getDeltaLink(const(char)[] driveId, const(char)[] id) { + // Log what we received + addLogEntry("DeltaLink Query (driveId): " ~ to!string(driveId), ["debug"]); + addLogEntry("DeltaLink Query (id): " ~ to!string(id), ["debug"]); + assert(driveId && id); auto stmt = db.prepare("SELECT deltaLink FROM item WHERE driveId = ?1 AND id = ?2"); stmt.bind(1, driveId); @@ -439,8 +680,7 @@ final class ItemDatabase return res.front[0].dup; } - void setDeltaLink(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink) - { + void setDeltaLink(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink) { assert(driveId && id); assert(deltaLink); auto stmt = db.prepare("UPDATE item SET deltaLink = ?3 WHERE driveId = ?1 AND id = ?2"); @@ -453,10 +693,9 @@ final class ItemDatabase // National Cloud Deployments (US and DE) do not support /delta as a query // We need to track in the database that this item is in sync // As we query /children to get all children from OneDrive, update anything in the database - // to be flagged as not-in-sync, thus, we can use that flag to determing what was previously + // to be flagged as not-in-sync, thus, we can use that flag to determine what was previously // in-sync, but now deleted on OneDrive - void downgradeSyncStatusFlag(const(char)[] driveId, const(char)[] id) - { + void downgradeSyncStatusFlag(const(char)[] driveId, const(char)[] id) { assert(driveId); auto stmt = db.prepare("UPDATE item SET syncStatus = 'N' WHERE driveId = ?1 AND id = ?2"); stmt.bind(1, driveId); @@ -466,8 +705,7 @@ final class ItemDatabase // National Cloud Deployments (US and DE) do not support /delta as a query // Select items that have a out-of-sync flag set - Item[] selectOutOfSyncItems(const(char)[] driveId) - { + Item[] selectOutOfSyncItems(const(char)[] driveId) { assert(driveId); Item[] items; auto stmt = db.prepare("SELECT * FROM item WHERE syncStatus = 'N' AND driveId = ?1"); @@ -482,8 +720,7 @@ final class ItemDatabase // OneDrive Business Folders are stored in the database potentially without a root | parentRoot link // Select items associated with the provided driveId - Item[] selectByDriveId(const(char)[] driveId) - { + Item[] selectByDriveId(const(char)[] driveId) { assert(driveId); Item[] items; auto stmt = db.prepare("SELECT * FROM item WHERE driveId = ?1 AND parentId IS NULL"); @@ -497,21 +734,21 @@ final class ItemDatabase } // Perform a vacuum on the database, commit WAL / SHM to file - void performVacuum() - { + void performVacuum() { + addLogEntry("Attempting to perform a database vacuum to merge any temporary data", ["debug"]); try { auto stmt = db.prepare("VACUUM;"); stmt.exec(); + addLogEntry("Database vacuum is complete", ["debug"]); } catch (SqliteException e) { - writeln(); - log.error("ERROR: Unable to perform a database vacuum: " ~ e.msg); - writeln(); + addLogEntry(); + addLogEntry("ERROR: Unable to perform a database vacuum: " ~ e.msg); + addLogEntry(); } } // Select distinct driveId items from database - string[] selectDistinctDriveIds() - { + string[] selectDistinctDriveIds() { string[] driveIdArray; auto stmt = db.prepare("SELECT DISTINCT driveId FROM item;"); auto res = stmt.exec(); @@ -522,4 +759,4 @@ final class ItemDatabase } return driveIdArray; } -} +} \ No newline at end of file diff --git a/src/log.d b/src/log.d index b7aa0da68..f55246dcc 100644 --- a/src/log.d +++ b/src/log.d @@ -1,239 +1,216 @@ +// What is this module called? +module log; + +// What does this module require to function? import std.stdio; import std.file; import std.datetime; -import std.process; -import std.conv; -import core.memory; -import core.sys.posix.pwd, core.sys.posix.unistd, core.stdc.string : strlen; -import std.algorithm : splitter; +import std.concurrency; +import std.typecons; +import core.sync.condition; +import core.sync.mutex; +import core.thread; +import std.format; +import std.string; + version(Notifications) { import dnotify; } -// enable verbose logging -long verbose; -bool writeLogFile = false; -bool logFileWriteFailFlag = false; - -private bool doNotifications; - -// shared string variable for username -string username; -string logFilePath; - -void init(string logDir) -{ - writeLogFile = true; - username = getUserName(); - logFilePath = logDir; - - if (!exists(logFilePath)){ - // logfile path does not exist - try { - mkdirRecurse(logFilePath); - } - catch (std.file.FileException e) { - // we got an error .. - writeln("\nUnable to access ", logFilePath); - writeln("Please manually create '",logFilePath, "' and set appropriate permissions to allow write access"); - writeln("The requested client activity log will instead be located in your users home directory"); - } - } -} - -void setNotifications(bool value) -{ - version(Notifications) { - // if we try to enable notifications, check for server availability - // and disable in case dbus server is not reachable - if (value) { - auto serverAvailable = dnotify.check_availability(); - if (!serverAvailable) { - log("Notification (dbus) server not available, disabling"); - value = false; +// Shared module object +shared LogBuffer logBuffer; + +// Timer for logging +shared MonoTime lastInsertedTime; + +class LogBuffer { + private: + string[3][] buffer; + Mutex bufferLock; + Condition condReady; + string logFilePath; + bool writeToFile; + bool verboseLogging; + bool debugLogging; + Thread flushThread; + bool isRunning; + bool sendGUINotification; + + public: + this(bool verboseLogging, bool debugLogging) { + // Initialise the mutex + bufferLock = new Mutex(); + condReady = new Condition(bufferLock); + // Initialise other items + this.logFilePath = logFilePath; + this.writeToFile = writeToFile; + this.verboseLogging = verboseLogging; + this.debugLogging = debugLogging; + this.isRunning = true; + this.sendGUINotification = true; + this.flushThread = new Thread(&flushBuffer); + flushThread.isDaemon(true); + flushThread.start(); + } + + // Shutdown logging + void shutdown() { + synchronized(bufferLock) { + if (!isRunning) return; // Prevent multiple shutdowns + isRunning = false; + condReady.notifyAll(); // Wake up all waiting threads } + + // Wait for the flush thread to finish outside of the synchronized block to avoid deadlocks + if (flushThread.isRunning()) { + // Join all threads + flushThread.join(); + // Flush any remaining log + flushBuffer(); + } + + // Flush anything remaining + flush(); // Perform a final flush to ensure all data is processed + flushBuffer(); // Finally flush the buffers one last time } - } - doNotifications = value; -} - -void log(T...)(T args) -{ - writeln(args); - if(writeLogFile){ - // Write to log file - logfileWriteLine(args); - } -} - -void logAndNotify(T...)(T args) -{ - notify(args); - log(args); -} - -void fileOnly(T...)(T args) -{ - if(writeLogFile){ - // Write to log file - logfileWriteLine(args); - } -} - -void vlog(T...)(T args) -{ - if (verbose >= 1) { - writeln(args); - if(writeLogFile){ - // Write to log file - logfileWriteLine(args); - } - } -} -void vdebug(T...)(T args) -{ - if (verbose >= 2) { - writeln("[DEBUG] ", args); - if(writeLogFile){ - // Write to log file - logfileWriteLine("[DEBUG] ", args); - } - } + shared void logThisMessage(string message, string[] levels = ["info"]) { + // Generate the timestamp for this log entry + auto timeStamp = leftJustify(Clock.currTime().toString(), 28, '0'); + + synchronized(bufferLock) { + foreach (level; levels) { + // Normal application output + if (!debugLogging) { + if ((level == "info") || ((verboseLogging) && (level == "verbose")) || (level == "logFileOnly") || (level == "consoleOnly") || (level == "consoleOnlyNoNewLine")) { + // Add this message to the buffer, with this format + buffer ~= [timeStamp, level, format("%s", message)]; + } + } else { + // Debug Logging (--verbose --verbose | -v -v | -vv) output + // Add this message, regardless of 'level' to the buffer, with this format + buffer ~= [timeStamp, level, format("DEBUG: %s", message)]; + // If there are multiple 'levels' configured, ignore this and break as we are doing debug logging + break; + } + + // Submit the message to the dbus / notification daemon for display within the GUI being used + // Will not send GUI notifications when running in debug mode + if ((!debugLogging) && (level == "notify")) { + version(Notifications) { + if (sendGUINotification) { + notify(message); + } + } + } + } + (cast()condReady).notify(); + } + } + + shared void notify(string message) { + // Use dnotify's functionality for GUI notifications, if GUI notifications is enabled + version(Notifications) { + try { + auto n = new Notification("OneDrive Client", message, "IGNORED"); + // Show notification for 10 seconds + n.timeout = 10; + n.show(); + } catch (NotificationError e) { + sendGUINotification = false; + addLogEntry("Unable to send notification; disabled in the following: " ~ e.message); + } + } + } + + private void flushBuffer() { + while (isRunning) { + flush(); + } + stdout.flush(); + } + + private void flush() { + string[3][] messages; + synchronized(bufferLock) { + while (buffer.empty && isRunning) { + condReady.wait(); + } + messages = buffer; + buffer.length = 0; + } + + foreach (msg; messages) { + // timestamp, logLevel, message + // Always write the log line to the console, if level != logFileOnly + if (msg[1] != "logFileOnly") { + // Console output .. what sort of output + if (msg[1] == "consoleOnlyNoNewLine") { + // This is used write out a message to the console only, without a new line + // This is used in non-verbose mode to indicate something is happening when downloading JSON data from OneDrive or when we need user input from --resync + write(msg[2]); + } else { + // write this to the console with a new line + writeln(msg[2]); + } + } + + // Was this just console only output? + if ((msg[1] != "consoleOnlyNoNewLine") && (msg[1] != "consoleOnly")) { + // Write to the logfile only if configured to do so - console only items should not be written out + if (writeToFile) { + string logFileLine = format("[%s] %s", msg[0], msg[2]); + std.file.append(logFilePath, logFileLine ~ "\n"); + } + } + } + // Clear Messages + messages = []; + } } -void vdebugNewLine(T...)(T args) -{ - if (verbose >= 2) { - writeln("\n[DEBUG] ", args); - if(writeLogFile){ - // Write to log file - logfileWriteLine("\n[DEBUG] ", args); - } - } +// Function to initialize the logging system +void initialiseLogging(bool verboseLogging = false, bool debugLogging = false) { + logBuffer = cast(shared) new LogBuffer(verboseLogging, debugLogging); + lastInsertedTime = MonoTime.currTime(); } -void error(T...)(T args) -{ - stderr.writeln(args); - if(writeLogFile){ - // Write to log file - logfileWriteLine(args); - } +// Function to add a log entry with multiple levels +void addLogEntry(string message = "", string[] levels = ["info"]) { + logBuffer.logThisMessage(message, levels); } -void errorAndNotify(T...)(T args) -{ - notify(args); - error(args); +// Is logging still active +bool loggingActive() { + return logBuffer.isRunning; } -void notify(T...)(T args) -{ - version(Notifications) { - if (doNotifications) { - string result; - foreach (index, arg; args) { - result ~= to!string(arg); - if (index != args.length - 1) - result ~= " "; - } - auto n = new Notification("OneDrive", result, "IGNORED"); - try { - n.show(); - // Sent message to notification daemon - if (verbose >= 2) { - writeln("[DEBUG] Sent notification to notification service. If notification is not displayed, check dbus or notification-daemon for errors"); - } - - } catch (Throwable e) { - vlog("Got exception from showing notification: ", e); - } - } +void addProcessingLogHeaderEntry(string message, long verbosityCount) { + if (verbosityCount == 0) { + addLogEntry(message, ["logFileOnly"]); + // Use the dots to show the application is 'doing something' if verbosityCount == 0 + addLogEntry(message ~ " .", ["consoleOnlyNoNewLine"]); + } else { + // Fallback to normal logging if in verbose or above level + addLogEntry(message); } } -private void logfileWriteLine(T...)(T args) -{ - static import std.exception; - // Write to log file - string logFileName = .logFilePath ~ .username ~ ".onedrive.log"; - auto currentTime = Clock.currTime(); - auto timeString = currentTime.toString(); - File logFile; - - // Resolve: std.exception.ErrnoException@std/stdio.d(423): Cannot open file `/var/log/onedrive/xxxxx.onedrive.log' in mode `a' (Permission denied) - try { - logFile = File(logFileName, "a"); - } - catch (std.exception.ErrnoException e) { - // We cannot open the log file in logFilePath location for writing - // The user is not part of the standard 'users' group (GID 100) - // Change logfile to ~/onedrive.log putting the log file in the users home directory - - if (!logFileWriteFailFlag) { - // write out error message that we cant log to the requested file - writeln("\nUnable to write activity log to ", logFileName); - writeln("Please set appropriate permissions to allow write access to the logging directory for your user account"); - writeln("The requested client activity log will instead be located in your users home directory\n"); - - // set the flag so we dont keep printing this error message - logFileWriteFailFlag = true; - } - - string homePath = environment.get("HOME"); - string logFileNameAlternate = homePath ~ "/onedrive.log"; - logFile = File(logFileNameAlternate, "a"); - } - // Write to the log file - logFile.writeln(timeString, "\t", args); - logFile.close(); -} - -private string getUserName() -{ - auto pw = getpwuid(getuid); - - // get required details - auto runtime_pw_name = pw.pw_name[0 .. strlen(pw.pw_name)].splitter(','); - auto runtime_pw_uid = pw.pw_uid; - auto runtime_pw_gid = pw.pw_gid; - - // user identifiers from process - vdebug("Process ID: ", pw); - vdebug("User UID: ", runtime_pw_uid); - vdebug("User GID: ", runtime_pw_gid); - - // What should be returned as username? - if (!runtime_pw_name.empty && runtime_pw_name.front.length){ - // user resolved - vdebug("User Name: ", runtime_pw_name.front.idup); - return runtime_pw_name.front.idup; - } else { - // Unknown user? - vdebug("User Name: unknown"); - return "unknown"; +void addProcessingDotEntry() { + if (MonoTime.currTime() - lastInsertedTime < dur!"seconds"(1)) { + // Don't flood the log buffer + return; } + lastInsertedTime = MonoTime.currTime(); + addLogEntry(".", ["consoleOnlyNoNewLine"]); } -void displayMemoryUsagePreGC() -{ -// Display memory usage -writeln("\nMemory Usage pre GC (bytes)"); -writeln("--------------------"); -writeln("memory usedSize = ", GC.stats.usedSize); -writeln("memory freeSize = ", GC.stats.freeSize); -// uncomment this if required, if not using LDC 1.16 as this does not exist in that version -//writeln("memory allocatedInCurrentThread = ", GC.stats.allocatedInCurrentThread, "\n"); +// Function to set logFilePath and enable logging to a file +void enableLogFileOutput(string configuredLogFilePath) { + logBuffer.logFilePath = configuredLogFilePath; + logBuffer.writeToFile = true; } -void displayMemoryUsagePostGC() -{ -// Display memory usage -writeln("\nMemory Usage post GC (bytes)"); -writeln("--------------------"); -writeln("memory usedSize = ", GC.stats.usedSize); -writeln("memory freeSize = ", GC.stats.freeSize); -// uncomment this if required, if not using LDC 1.16 as this does not exist in that version -//writeln("memory allocatedInCurrentThread = ", GC.stats.allocatedInCurrentThread, "\n"); -} +void disableGUINotifications(bool userConfigDisableNotifications) { + logBuffer.sendGUINotification = userConfigDisableNotifications; +} \ No newline at end of file diff --git a/src/main.d b/src/main.d index 688cd1d57..4580078c2 100644 --- a/src/main.d +++ b/src/main.d @@ -1,2094 +1,1499 @@ +// What is this module called? +module main; + +// What does this module require to function? import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; -import core.memory, core.time, core.thread; -import std.getopt, std.file, std.path, std.process, std.stdio, std.conv, std.algorithm.searching, std.string, std.regex; -import config, itemdb, monitor, onedrive, selective, sync, util; +import core.sys.posix.signal; +import core.memory; +import core.time; +import core.thread; +import std.stdio; +import std.getopt; +import std.string; +import std.file; +import std.process; +import std.algorithm; +import std.path; +import std.concurrency; +import std.parallelism; +import std.conv; +import std.traits; import std.net.curl: CurlException; -import core.stdc.signal; -import std.traits, std.format; -import std.concurrency: receiveTimeout; import std.datetime; -static import log; - -OneDriveApi oneDrive; -ItemDatabase itemDb; -bool onedriveInitialised = false; -const int EXIT_UNAUTHORIZED = 3; +// What other modules that we have created do we need to import? +import config; +import log; +import curlEngine; +import util; +import onedrive; +import syncEngine; +import itemdb; +import clientSideFiltering; +import monitor; +import webhook; + +// What other constant variables do we require? const int EXIT_RESYNC_REQUIRED = 126; -enum MONITOR_LOG_SILENT = 2; -enum MONITOR_LOG_QUIET = 1; -enum LOG_NORMAL = 0; - -int main(string[] args) -{ - // Disable buffering on stdout +// Class objects +ApplicationConfig appConfig; +OneDriveWebhook oneDriveWebhook; +SyncEngine syncEngineInstance; +ItemDatabase itemDB; +ClientSideFiltering selectiveSync; +Monitor filesystemMonitor; + +// Class variables +// Flag for performing a synchronised shutdown +bool shutdownInProgress = false; +// Flag if a --dry-run is being performed, as, on shutdown, once config is destroyed, we have no reference here +bool dryRun = false; +// Configure the runtime database file path so that it is available to us on shutdown so objects can be destroyed and removed if required +// - Typically this will be the default, but in a --dry-run scenario, we use a separate database file +string runtimeDatabaseFile = ""; + +int main(string[] cliArgs) { + // Application Start Time - used during monitor loop to detail how long it has been running for + auto applicationStartTime = Clock.currTime(); + // Disable buffering on stdout - this is needed so that when we are using plain write() it will go to the terminal without flushing stdout.setvbuf(0, _IONBF); - - // main function variables - string confdirOption; - string configFilePath; - string syncListFilePath; - string databaseFilePath; - string businessSharedFolderFilePath; - string currentConfigHash; - string currentSyncListHash; - string previousConfigHash; - string previousSyncListHash; - string configHashFile; - string syncListHashFile; - string configBackupFile; - string syncDir; - string logOutputMessage; - string currentBusinessSharedFoldersHash; - string previousBusinessSharedFoldersHash; - string businessSharedFoldersHashFile; - string databaseFilePathDryRunGlobal; - bool configOptionsDifferent = false; - bool businessSharedFoldersDifferent = false; - bool syncListConfigured = false; - bool syncListDifferent = false; - bool syncDirDifferent = false; - bool skipFileDifferent = false; - bool skipDirDifferent = false; + + // Required main function variables + string genericHelpMessage = "Please use 'onedrive --help' for further assistance in regards to running this application."; + // If the user passes in --confdir we need to store this as a variable + string confdirOption = ""; + // running as what user? + string runtimeUserName = ""; + // Are we online? bool online = false; - bool performSyncOK = false; + // Does the operating environment have shell environment variables set + bool shellEnvSet = false; + // What is the runtime synchronisation directory that will be used + // Typically this will be '~/OneDrive' .. however tilde expansion is unreliable + string runtimeSyncDirectory = ""; + + // Verbosity Logging Count - this defines if verbose or debug logging is being used + long verbosityCount = 0; + // Application Logging Level + bool verboseLogging = false; + bool debugLogging = false; + // Monitor loop failures + bool monitorFailures = false; + // Help requested + bool helpRequested = false; + + // DEVELOPER OPTIONS OUTPUT VARIABLES bool displayMemoryUsage = false; bool displaySyncOptions = false; - bool cleanupLocalFilesGlobal = false; - bool synchronizeConfigured = false; - bool invalidSyncExit = false; - // start and finish messages - string startMessage = "Starting a sync with OneDrive"; - string finishMessage = "Sync with OneDrive is complete"; - string helpMessage = "Please use 'onedrive --help' for further assistance in regards to running this application."; + // Application Version + immutable string applicationVersion = "onedrive " ~ strip(import("version")); - // hash file permission values - string hashPermissionValue = "600"; - auto convertedPermissionValue = parse!long(hashPermissionValue, 8); - - // Define scopes + // Define 'exit' and 'failure' scopes scope(exit) { - // detail what scope was called - log.vdebug("Exit scope called"); - if (synchronizeConfigured) { - log.log(finishMessage); - } - // Display memory details - if (displayMemoryUsage) { - log.displayMemoryUsagePreGC(); - } - // if initialised, shut down the HTTP instance - if (onedriveInitialised) { - oneDrive.shutdown(); - } - // was itemDb initialised? - if (itemDb !is null) { - // Make sure the .wal file is incorporated into the main db before we exit - if(!invalidSyncExit) { - itemDb.performVacuum(); - } - destroy(itemDb); - } - // cleanup any dry-run data - cleanupDryRunDatabase(databaseFilePathDryRunGlobal); - // free API instance - if (oneDrive !is null) { - destroy(oneDrive); - } - // Perform Garbage Cleanup - GC.collect(); - // Display memory details - if (displayMemoryUsage) { - log.displayMemoryUsagePostGC(); - } + // Detail what scope was called + addLogEntry("Exit scope was called", ["debug"]); + // Perform synchronised exit + performSynchronisedExitProcess("exitScope"); + exit(EXIT_SUCCESS); } - + scope(failure) { - // detail what scope was called - log.vdebug("Failure scope called"); - // Display memory details - if (displayMemoryUsage) { - log.displayMemoryUsagePreGC(); - } - // if initialised, shut down the HTTP instance - if (onedriveInitialised) { - oneDrive.shutdown(); - } - // was itemDb initialised? - if (itemDb !is null) { - // Make sure the .wal file is incorporated into the main db before we exit - if(!invalidSyncExit) { - itemDb.performVacuum(); - } - destroy(itemDb); - } - // cleanup any dry-run data - cleanupDryRunDatabase(databaseFilePathDryRunGlobal); - // free API instance - if (oneDrive !is null) { - destroy(oneDrive); - } - // Perform Garbage Cleanup - GC.collect(); - // Display memory details - if (displayMemoryUsage) { - log.displayMemoryUsagePostGC(); - } + // Detail what scope was called + addLogEntry("Failure scope was called", ["debug"]); + // Perform synchronised exit + performSynchronisedExitProcess("failureScope"); + exit(EXIT_FAILURE); } - - // read in application options as passed in + + // Read in application options as passed in try { bool printVersion = false; - auto opt = getopt( - args, + auto cliOptions = getopt( + cliArgs, std.getopt.config.passThrough, std.getopt.config.bundling, std.getopt.config.caseSensitive, "confdir", "Set the directory used to store the configuration files", &confdirOption, - "verbose|v+", "Print more details, useful for debugging (repeat for extra debugging)", &log.verbose, + "verbose|v+", "Print more details, useful for debugging (repeat for extra debugging)", &verbosityCount, "version", "Print the version and exit", &printVersion ); - // print help and exit - if (opt.helpWanted) { - args ~= "--help"; + // Print help and exit + if (cliOptions.helpWanted) { + cliArgs ~= "--help"; + helpRequested = true; } - // print the version and exit + // Print the version and exit if (printVersion) { - writeln("onedrive ", strip(import("version"))); - return EXIT_SUCCESS; + writeln(applicationVersion); + exit(EXIT_SUCCESS); } } catch (GetOptException e) { - // option errors - log.error(e.msg); - log.error("Try 'onedrive --help' for more information"); + // Option errors + writeln(e.msg); + writeln(genericHelpMessage); return EXIT_FAILURE; } catch (Exception e) { - // generic error - log.error(e.msg); - log.error("Try 'onedrive --help' for more information"); + // Generic error + writeln(e.msg); + writeln(genericHelpMessage); return EXIT_FAILURE; } - - // confdirOption must be a directory, not a file - // - By default ~/.config/onedrive will be used - // - If the user is using --confdir , the confdirOption needs to be evaluated when trying to load any file - // load configuration file if available - auto cfg = new config.Config(confdirOption); - if (!cfg.initialize()) { - // There was an error loading the configuration + + // Determine the application logging verbosity + if (verbosityCount == 1) { verboseLogging = true;} + if (verbosityCount >= 2) { debugLogging = true;} + + // Initialize the application logging class, as we know the application verbosity level + // If we need to enable logging to a file, we can only do this once we know the application configuration which is done slightly later on + initialiseLogging(verboseLogging, debugLogging); + + // Log application start time, log line has start time + addLogEntry("Application started", ["debug"]); + + // Who are we running as? This will print the ProcessID, UID, GID and username the application is running as + runtimeUserName = getUserName(); + + // Print the application version and how this was compiled as soon as possible + addLogEntry("Application Version: " ~ applicationVersion, ["debug"]); + addLogEntry("Application Compiled With: " ~ compilerDetails(), ["debug"]); + + // How was this application started - what options were passed in + addLogEntry("Passed in 'cliArgs': " ~ to!string(cliArgs), ["debug"]); + addLogEntry("Note: --confdir and --verbose are not listed in 'cliArgs' array", ["debug"]); + addLogEntry("Passed in --confdir if present: " ~ confdirOption, ["debug"]); + addLogEntry("Passed in --verbose count if present: " ~ to!string(verbosityCount), ["debug"]); + + // Create a new AppConfig object with default values, + appConfig = new ApplicationConfig(); + // Update the default application configuration with the logging level so these can be used as a config option throughout the application + appConfig.setConfigLoggingLevels(verboseLogging, debugLogging, verbosityCount); + + // Initialise the application configuration, utilising --confdir if it was passed in + // Otherwise application defaults will be used to configure the application + if (!appConfig.initialise(confdirOption, helpRequested)) { + // There was an error loading the user specified application configuration // Error message already printed return EXIT_FAILURE; } - // How was this application started - what options were passed in - log.vdebug("passed in options: ", args); - log.vdebug("note --confdir and --verbose not listed in args"); - - // set memory display - displayMemoryUsage = cfg.getValueBool("display_memory"); - - // set display sync options - displaySyncOptions = cfg.getValueBool("display_sync_options"); - - // update configuration from command line args - cfg.update_from_args(args); + // Update the current runtime application configuration (default or 'config' fileread-in options) from any passed in command line arguments + appConfig.updateFromArgs(cliArgs); - // --resync should be a 'last resort item' .. the user needs to 'accept' to proceed - if ((cfg.getValueBool("resync")) && (!cfg.getValueBool("display_config"))) { - // what is the risk acceptance? - bool resyncRiskAcceptance = false; + // Configure dryRun so that this can be used here & during shutdown + dryRun = appConfig.getValueBool("dry_run"); - if (!cfg.getValueBool("resync_auth")) { - // need to prompt user - char response; - // warning message - writeln("\nThe use of --resync will remove your local 'onedrive' client state, thus no record will exist regarding your current 'sync status'"); - writeln("This has the potential to overwrite local versions of files with potentially older versions downloaded from OneDrive which can lead to data loss"); - writeln("If in-doubt, backup your local data first before proceeding with --resync"); - write("\nAre you sure you wish to proceed with --resync? [Y/N] "); - - try { - // Attempt to read user response - readf(" %c\n", &response); - } catch (std.format.FormatException e) { - // Caught an error - return EXIT_FAILURE; - } - - // Evaluate user repsonse - if ((to!string(response) == "y") || (to!string(response) == "Y")) { - // User has accepted --resync risk to proceed - resyncRiskAcceptance = true; - // Are you sure you wish .. does not use writeln(); - write("\n"); - } + // As early as possible, now re-configure the logging class, given that we have read in any applicable 'config' file and updated the application running config from CLI input: + // - Enable logging to a file if this is required + // - Disable GUI notifications if this has been configured + + // Configure application logging to a log file only if this has been enabled + // This is the earliest point that this can be done, as the client configuration has been read in, and any CLI arguments have been processed. + // Either of those ('confif' file, CPU arguments) could be enabling logging, thus this is the earliest point at which this can be validated and enabled. + // The buffered logging also ensures that all 'output' to this point is also captured and written out to the log file + if (appConfig.getValueBool("enable_logging")) { + // Calculate the application logging directory + string calculatedLogDirPath = appConfig.calculateLogDirectory(); + string calculatedLogFilePath; + // Initialise using the configured logging directory + addLogEntry("Using the following path to store the runtime application log: " ~ calculatedLogDirPath, ["verbose"]); + // Calculate the logfile name + if (calculatedLogDirPath != appConfig.defaultHomePath) { + // Log file is not going to the home directory + string logfileName = runtimeUserName ~ ".onedrive.log"; + calculatedLogFilePath = buildNormalizedPath(buildPath(calculatedLogDirPath, logfileName)); } else { - // resync_auth is true - resyncRiskAcceptance = true; - } - - // Action based on response - if (!resyncRiskAcceptance){ - // --resync risk not accepted - return EXIT_FAILURE; - } - } - - // Initialise normalised file paths - configFilePath = buildNormalizedPath(cfg.configDirName ~ "/config"); - syncListFilePath = buildNormalizedPath(cfg.configDirName ~ "/sync_list"); - databaseFilePath = buildNormalizedPath(cfg.configDirName ~ "/items.db"); - businessSharedFolderFilePath = buildNormalizedPath(cfg.configDirName ~ "/business_shared_folders"); - - // Has any of our configuration that would require a --resync been changed? - // 1. sync_list file modification - // 2. config file modification - but only if sync_dir, skip_dir, skip_file or drive_id was modified - // 3. CLI input overriding configured config file option - configHashFile = buildNormalizedPath(cfg.configDirName ~ "/.config.hash"); - syncListHashFile = buildNormalizedPath(cfg.configDirName ~ "/.sync_list.hash"); - configBackupFile = buildNormalizedPath(cfg.configDirName ~ "/.config.backup"); - businessSharedFoldersHashFile = buildNormalizedPath(cfg.configDirName ~ "/.business_shared_folders.hash"); - - // Does a 'config' file exist with a valid hash file - if (exists(configFilePath)) { - if (!exists(configHashFile)) { - // hash of config file needs to be created, but only if we are not in a --resync scenario - if (!cfg.getValueBool("resync")) { - std.file.write(configHashFile, "initial-hash"); - // Hash file should only be readable by the user who created it - 0600 permissions needed - configHashFile.setAttributes(to!int(convertedPermissionValue)); - } - } - } else { - // no 'config' file exists, application defaults being used, no hash file required - if (exists(configHashFile)) { - // remove the hash, but only if --resync was issued as now the application will use 'defaults' which 'may' be different - if (cfg.getValueBool("resync")) { - // resync issued, remove hash files - safeRemove(configHashFile); - safeRemove(configBackupFile); - } - } - } - - // Does a 'sync_list' file exist with a valid hash file - if (exists(syncListFilePath)) { - if (!exists(syncListHashFile)) { - // hash of config file needs to be created, but only if we are not in a --resync scenario - if (!cfg.getValueBool("resync")) { - std.file.write(syncListHashFile, "initial-hash"); - // Hash file should only be readable by the user who created it - 0600 permissions needed - syncListHashFile.setAttributes(to!int(convertedPermissionValue)); - } - } - } else { - // no 'sync_list' file exists, no hash file required - if (exists(syncListHashFile)) { - // remove the hash, but only if --resync was issued as now the application will use 'defaults' which 'may' be different - if (cfg.getValueBool("resync")) { - // resync issued, remove hash files - safeRemove(syncListHashFile); - } + // Log file is going to the users home directory + calculatedLogFilePath = buildNormalizedPath(buildPath(calculatedLogDirPath, "onedrive.log")); } + // Update the logging class to use 'calculatedLogFilePath' for the application log file now that this has been determined + enableLogFileOutput(calculatedLogFilePath); } - - // Does a 'business_shared_folders' file exist with a valid hash file - if (exists(businessSharedFolderFilePath)) { - if (!exists(businessSharedFoldersHashFile)) { - // hash of config file needs to be created, but only if we are not in a --resync scenario - if (!cfg.getValueBool("resync")) { - std.file.write(businessSharedFoldersHashFile, "initial-hash"); - // Hash file should only be readable by the user who created it - 0600 permissions needed - businessSharedFoldersHashFile.setAttributes(to!int(convertedPermissionValue)); - } - } - } else { - // no 'business_shared_folders' file exists, no hash file required - if (exists(businessSharedFoldersHashFile)) { - // remove the hash, but only if --resync was issued as now the application will use 'defaults' which 'may' be different - if (cfg.getValueBool("resync")) { - // resync issued, remove hash files - safeRemove(businessSharedFoldersHashFile); - } - } - } - - // Generate current hashes for the relevant configuration files if they exist - if (exists(configFilePath)) currentConfigHash = computeQuickXorHash(configFilePath); - if (exists(syncListFilePath)) currentSyncListHash = computeQuickXorHash(syncListFilePath); - if (exists(businessSharedFolderFilePath)) currentBusinessSharedFoldersHash = computeQuickXorHash(businessSharedFolderFilePath); - // read the existing hashes for each of the relevant configuration files if they exist - if (exists(configHashFile)) { - try { - previousConfigHash = readText(configHashFile); - } catch (std.file.FileException e) { - // Unable to access required file - log.error("ERROR: Unable to access ", e.msg); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } + // Disable GUI Notifications if configured to do so + // - This option is reverse action. If 'disable_notifications' is 'true', we need to send 'false' + if (appConfig.getValueBool("disable_notifications")) { + // disable_notifications is true, ensure GUI notifications is initialised with false so that NO GUI notification is sent + disableGUINotifications(false); + addLogEntry("Disabling GUI notifications as per user configuration"); } - if (exists(syncListHashFile)) { - try { - previousSyncListHash = readText(syncListHashFile); - } catch (std.file.FileException e) { - // Unable to access required file - log.error("ERROR: Unable to access ", e.msg); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - } - if (exists(businessSharedFoldersHashFile)) { - try { - previousBusinessSharedFoldersHash = readText(businessSharedFoldersHashFile); - } catch (std.file.FileException e) { - // Unable to access required file - log.error("ERROR: Unable to access ", e.msg); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - } - - // Was sync_list file updated? - if (currentSyncListHash != previousSyncListHash) { - // Debugging output to assist what changed - log.vdebug("sync_list file has been updated, --resync needed"); - syncListDifferent = true; - } - - // Was business_shared_folders updated? - if (currentBusinessSharedFoldersHash != previousBusinessSharedFoldersHash) { - // Debugging output to assist what changed - log.vdebug("business_shared_folders file has been updated, --resync needed"); - businessSharedFoldersDifferent = true; + + // Perform a depreciated options check now that the config file (if present) and CLI options have all been parsed to advise the user that their option usage might change + appConfig.checkDepreciatedOptions(cliArgs); + + // Configure Client Side Filtering (selective sync) by parsing and getting a usable regex for skip_file, skip_dir and sync_list config components + selectiveSync = new ClientSideFiltering(appConfig); + if (!selectiveSync.initialise()) { + // exit here as something triggered a selective sync configuration failure + return EXIT_FAILURE; } - - // Was config file updated between last execution ang this execution? - if (currentConfigHash != previousConfigHash) { - // config file was updated, however we only want to trigger a --resync requirement if sync_dir, skip_dir, skip_file or drive_id was modified - if (!cfg.getValueBool("display_config")){ - // only print this message if we are not using --display-config - log.log("config file has been updated, checking if --resync needed"); - } - if (exists(configBackupFile)) { - // check backup config what has changed for these configuration options if anything - // # sync_dir = "~/OneDrive" - // # skip_file = "~*|.~*|*.tmp" - // # skip_dir = "" - // # drive_id = "" - string[string] stringValues; - stringValues["sync_dir"] = ""; - stringValues["skip_file"] = ""; - stringValues["skip_dir"] = ""; - stringValues["drive_id"] = ""; - auto configBackupFileHandle = File(configBackupFile, "r"); - string lineBuffer; - auto range = configBackupFileHandle.byLine(); - // read configBackupFile line by line - foreach (line; range) { - lineBuffer = stripLeft(line).to!string; - if (lineBuffer.length == 0 || lineBuffer[0] == ';' || lineBuffer[0] == '#') continue; - auto c = lineBuffer.matchFirst(cfg.configRegex); - if (!c.empty) { - c.popFront(); // skip the whole match - string key = c.front.dup; - auto p = key in stringValues; - if (p) { - c.popFront(); - // compare this key - if ((key == "sync_dir") && (c.front.dup != cfg.getValueString("sync_dir"))) { - log.vdebug(key, " was modified since the last time the application was successfully run, --resync needed"); - configOptionsDifferent = true; - } - - if ((key == "skip_file") && (c.front.dup != cfg.getValueString("skip_file"))){ - log.vdebug(key, " was modified since the last time the application was successfully run, --resync needed"); - configOptionsDifferent = true; - } - if ((key == "skip_dir") && (c.front.dup != cfg.getValueString("skip_dir"))){ - log.vdebug(key, " was modified since the last time the application was successfully run, --resync needed"); - configOptionsDifferent = true; - } - if ((key == "drive_id") && (c.front.dup != cfg.getValueString("drive_id"))){ - log.vdebug(key, " was modified since the last time the application was successfully run, --resync needed"); - configOptionsDifferent = true; - } - } - } - } - // close file if open - if (configBackupFileHandle.isOpen()){ - // close open file - configBackupFileHandle.close(); - } - } else { - // no backup to check - log.vdebug("WARNING: no backup config file was found, unable to validate if any changes made"); - } - - // If there was a backup, any modified values we need to worry about would been detected - if (!cfg.getValueBool("display_config")) { - // we are not testing the configuration - if (!configOptionsDifferent) { - // no options are different - if (!cfg.getValueBool("dry_run")) { - // we are not in a dry-run scenario - // update config hash - log.vdebug("updating config hash as it is out of date"); - std.file.write(configHashFile, computeQuickXorHash(configFilePath)); - // Hash file should only be readable by the user who created it - 0600 permissions needed - configHashFile.setAttributes(to!int(convertedPermissionValue)); - // create backup copy of current config file - log.vdebug("making backup of config file as it is out of date"); - std.file.copy(configFilePath, configBackupFile); - // File Copy should only be readable by the user who created it - 0600 permissions needed - configBackupFile.setAttributes(to!int(convertedPermissionValue)); - } - } + + // Set runtimeDatabaseFile, this will get updated if we are using --dry-run + runtimeDatabaseFile = appConfig.databaseFilePath; + + // Read in 'sync_dir' from appConfig with '~' if present expanded + runtimeSyncDirectory = appConfig.initialiseRuntimeSyncDirectory(); + + // DEVELOPER OPTIONS OUTPUT + // Set to display memory details as early as possible + displayMemoryUsage = appConfig.getValueBool("display_memory"); + // set to display sync options + displaySyncOptions = appConfig.getValueBool("display_sync_options"); + + // Display the current application configuration (based on all defaults, 'config' file parsing and/or options passed in via the CLI) and exit if --display-config has been used + if ((appConfig.getValueBool("display_config")) || (appConfig.getValueBool("display_running_config"))) { + // Display the application configuration + appConfig.displayApplicationConfiguration(); + // Do we exit? We exit only if '--display-config' has been used + if (appConfig.getValueBool("display_config")) { + return EXIT_SUCCESS; } } - - // Is there a backup of the config file if the config file exists? - if ((exists(configFilePath)) && (!exists(configBackupFile))) { - // create backup copy of current config file - std.file.copy(configFilePath, configBackupFile); - // File Copy should only be readable by the user who created it - 0600 permissions needed - configBackupFile.setAttributes(to!int(convertedPermissionValue)); - } - - // config file set options can be changed via CLI input, specifically these will impact sync and --resync will be needed: - // --syncdir ARG - // --skip-file ARG - // --skip-dir ARG - if (exists(configFilePath)) { - // config file exists - // was the sync_dir updated by CLI? - if (cfg.configFileSyncDir != "") { - // sync_dir was set in config file - if (cfg.configFileSyncDir != cfg.getValueString("sync_dir")) { - // config file was set and CLI input changed this - log.vdebug("sync_dir: CLI override of config file option, --resync needed"); - syncDirDifferent = true; - } - } - - // was the skip_file updated by CLI? - if (cfg.configFileSkipFile != "") { - // skip_file was set in config file - if (cfg.configFileSkipFile != cfg.getValueString("skip_file")) { - // config file was set and CLI input changed this - log.vdebug("skip_file: CLI override of config file option, --resync needed"); - skipFileDifferent = true; - } - } - - // was the skip_dir updated by CLI? - if (cfg.configFileSkipDir != "") { - // skip_dir was set in config file - if (cfg.configFileSkipDir != cfg.getValueString("skip_dir")) { - // config file was set and CLI input changed this - log.vdebug("skip_dir: CLI override of config file option, --resync needed"); - skipDirDifferent = true; - } - } + + // Check for basic application option conflicts - flags that should not be used together and/or flag combinations that conflict with each other, values that should be present and are not + if (appConfig.checkForBasicOptionConflicts) { + // Any error will have been printed by the function itself, but we need a small delay here to allow the buffered logging to output any error + return EXIT_FAILURE; } - - // Has anything triggered a --resync requirement? - if (configOptionsDifferent || syncListDifferent || syncDirDifferent || skipFileDifferent || skipDirDifferent || businessSharedFoldersDifferent) { - // --resync needed, is the user performing any operation where a --resync is not required? - // flag to ignore --resync requirement - bool ignoreResyncRequirement = false; - // These flags do not need --resync as no sync operation is needed: --display-config, --list-shared-folders, --get-O365-drive-id, --get-file-link - if (cfg.getValueBool("display_config")) ignoreResyncRequirement = true; - if (cfg.getValueBool("list_business_shared_folders")) ignoreResyncRequirement = true; - if ((!cfg.getValueString("get_o365_drive_id").empty)) ignoreResyncRequirement = true; - if ((!cfg.getValueString("get_file_link").empty)) ignoreResyncRequirement = true; + + // Check for --dry-run operation or a 'no-sync' operation where the 'dry-run' DB copy should be used + // If this has been requested, we need to ensure that all actions are performed against the dry-run database copy, and, + // no actual action takes place - such as deleting files if deleted online, moving files if moved online or local, downloading new & changed files, uploading new & changed files + if (dryRun || (appConfig.hasNoSyncOperationBeenRequested())) { - // Do we need to ignore a --resync requirement? - if (!ignoreResyncRequirement) { - // We are not ignoring --requirement - if (!cfg.getValueBool("resync")) { - // --resync not issued, fail fast - log.error("An application configuration change has been detected where a --resync is required"); - return EXIT_RESYNC_REQUIRED; - } else { - // --resync issued, update hashes of config files if they exist - if (!cfg.getValueBool("dry_run")) { - // not doing a dry run, update hash files if config & sync_list exist - if (exists(configFilePath)) { - // update hash - log.vdebug("updating config hash as --resync issued"); - std.file.write(configHashFile, computeQuickXorHash(configFilePath)); - // Hash file should only be readable by the user who created it - 0600 permissions needed - configHashFile.setAttributes(to!int(convertedPermissionValue)); - // create backup copy of current config file - log.vdebug("making backup of config file as --resync issued"); - std.file.copy(configFilePath, configBackupFile); - // File copy should only be readable by the user who created it - 0600 permissions needed - configBackupFile.setAttributes(to!int(convertedPermissionValue)); - } - if (exists(syncListFilePath)) { - // update sync_list hash - log.vdebug("updating sync_list hash as --resync issued"); - std.file.write(syncListHashFile, computeQuickXorHash(syncListFilePath)); - // Hash file should only be readable by the user who created it - 0600 permissions needed - syncListHashFile.setAttributes(to!int(convertedPermissionValue)); - } - if (exists(businessSharedFolderFilePath)) { - // update business_shared_folders hash - log.vdebug("updating business_shared_folders hash as --resync issued"); - std.file.write(businessSharedFoldersHashFile, computeQuickXorHash(businessSharedFolderFilePath)); - // Hash file should only be readable by the user who created it - 0600 permissions needed - businessSharedFoldersHashFile.setAttributes(to!int(convertedPermissionValue)); - } - } - } - } - } - - // --dry-run operation notification and database setup - // Are we performing any of the following operations? - // --dry-run, --list-shared-folders, --get-O365-drive-id, --get-file-link - if ((cfg.getValueBool("dry_run")) || (cfg.getValueBool("list_business_shared_folders")) || (!cfg.getValueString("get_o365_drive_id").empty) || (!cfg.getValueString("get_file_link").empty)) { - // is this a --list-shared-folders, --get-O365-drive-id, --get-file-link operation - if (cfg.getValueBool("dry_run")) { - // this is a --dry-run operation - log.log("DRY-RUN Configured. Output below shows what 'would' have occurred."); - } else { - // is this a --list-shared-folders, --get-O365-drive-id, --get-file-link operation - log.log("Using dry-run database copy for OneDrive API query"); - } - // configure databaseFilePathDryRunGlobal - databaseFilePathDryRunGlobal = cfg.databaseFilePathDryRun; + if (dryRun) { + // This is a --dry-run operation + addLogEntry("DRY-RUN Configured. Output below shows what 'would' have occurred."); + } + + // Cleanup any existing dry-run elements ... these should never be left hanging around + cleanupDryRunDatabaseFiles(appConfig.databaseFilePathDryRun); - string dryRunShmFile = databaseFilePathDryRunGlobal ~ "-shm"; - string dryRunWalFile = databaseFilePathDryRunGlobal ~ "-wal"; - // If the dry run database exists, clean this up - if (exists(databaseFilePathDryRunGlobal)) { - // remove the existing file - log.vdebug("Removing items-dryrun.sqlite3 as it still exists for some reason"); - safeRemove(databaseFilePathDryRunGlobal); - } - // silent cleanup of shm and wal files if they exist - if (exists(dryRunShmFile)) { - // remove items-dryrun.sqlite3-shm - safeRemove(dryRunShmFile); - } - if (exists(dryRunWalFile)) { - // remove items-dryrun.sqlite3-wal - safeRemove(dryRunWalFile); - } - // Make a copy of the original items.sqlite3 for use as the dry run copy if it exists - if (exists(cfg.databaseFilePath)) { - // in a --dry-run --resync scenario, we should not copy the existing database file - if (!cfg.getValueBool("resync")) { - // copy the existing DB file to the dry-run copy - log.vdebug("Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations"); - copy(cfg.databaseFilePath,databaseFilePathDryRunGlobal); + if (exists(appConfig.databaseFilePath)) { + // In a --dry-run --resync scenario, we should not copy the existing database file + if (!appConfig.getValueBool("resync")) { + // Copy the existing DB file to the dry-run copy + if (dryRun) { + addLogEntry("DRY-RUN: Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations"); + } + copy(appConfig.databaseFilePath,appConfig.databaseFilePathDryRun); } else { - // no database copy due to --resync - log.vdebug("No database copy created for --dry-run due to --resync also being used"); + // No database copy due to --resync + if (dryRun) { + addLogEntry("DRY-RUN: No database copy created for --dry-run due to --resync also being used"); + } } } - } - - // sync_dir environment handling to handle ~ expansion properly - bool shellEnvSet = false; - if ((environment.get("SHELL") == "") && (environment.get("USER") == "")){ - log.vdebug("sync_dir: No SHELL or USER environment variable configuration detected"); - // No shell or user set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker - // Does the 'currently configured' sync_dir include a ~ - if (canFind(cfg.getValueString("sync_dir"), "~")) { - // A ~ was found in sync_dir - log.vdebug("sync_dir: A '~' was found in sync_dir, using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set"); - syncDir = cfg.homePath ~ strip(cfg.getValueString("sync_dir"), "~"); - } else { - // No ~ found in sync_dir, use as is - log.vdebug("sync_dir: Getting syncDir from config value sync_dir"); - syncDir = cfg.getValueString("sync_dir"); - } + // update runtimeDatabaseFile now that we are using the dry run path + runtimeDatabaseFile = appConfig.databaseFilePathDryRun; } else { - // A shell and user is set, expand any ~ as this will be expanded correctly if present - shellEnvSet = true; - log.vdebug("sync_dir: Getting syncDir from config value sync_dir"); - if (canFind(cfg.getValueString("sync_dir"), "~")) { - log.vdebug("sync_dir: A '~' was found in configured sync_dir, automatically expanding as SHELL and USER environment variable is set"); - syncDir = expandTilde(cfg.getValueString("sync_dir")); - } else { - syncDir = cfg.getValueString("sync_dir"); - } + // Cleanup any existing dry-run elements ... these should never be left hanging around + cleanupDryRunDatabaseFiles(appConfig.databaseFilePathDryRun); } - - // vdebug syncDir as set and calculated - log.vdebug("syncDir: ", syncDir); - - // Configure the logging directory if different from application default - // log_dir environment handling to handle ~ expansion properly - string logDir = cfg.getValueString("log_dir"); - if (logDir != cfg.defaultLogFileDir) { - // user modified log_dir entry - // if 'log_dir' contains a '~' this needs to be expanded correctly - if (canFind(cfg.getValueString("log_dir"), "~")) { - // ~ needs to be expanded correctly - if (!shellEnvSet) { - // No shell or user set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker - log.vdebug("log_dir: A '~' was found in log_dir, using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set"); - logDir = cfg.homePath ~ strip(cfg.getValueString("log_dir"), "~"); - } else { - // A shell and user is set, expand any ~ as this will be expanded correctly if present - log.vdebug("log_dir: A '~' was found in log_dir, using SHELL or USER environment variable to expand '~'"); - logDir = expandTilde(cfg.getValueString("log_dir")); - } - } else { - // '~' not found in log_dir entry, use as is - logDir = cfg.getValueString("log_dir"); - } - // update log_dir with normalised path, with '~' expanded correctly - cfg.setValueString("log_dir", logDir); - } - - // Configure logging only if enabled - if (cfg.getValueBool("enable_logging")){ - // Initialise using the configured logging directory - log.vlog("Using logfile dir: ", logDir); - log.init(logDir); - } - - // Configure whether notifications are used - log.setNotifications(cfg.getValueBool("monitor") && !cfg.getValueBool("disable_notifications")); - - // Application upgrades - skilion version etc - if (exists(databaseFilePath)) { - if (!cfg.getValueBool("dry_run")) { - safeRemove(databaseFilePath); - } - log.logAndNotify("Database schema changed, resync needed"); - cfg.setValueBool("resync", true); - } - + // Handle --logout as separate item, do not 'resync' on a --logout - if (cfg.getValueBool("logout")) { - log.vdebug("--logout requested"); - log.log("Deleting the saved authentication status ..."); - if (!cfg.getValueBool("dry_run")) { - safeRemove(cfg.refreshTokenFilePath); + if (appConfig.getValueBool("logout")) { + addLogEntry("--logout requested", ["debug"]); + addLogEntry("Deleting the saved authentication status ..."); + if (!dryRun) { + safeRemove(appConfig.refreshTokenFilePath); + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + addLogEntry("DRY RUN: Not removing the saved authentication status"); } // Exit return EXIT_SUCCESS; } // Handle --reauth to re-authenticate the client - if (cfg.getValueBool("reauth")) { - log.vdebug("--reauth requested"); - log.log("Deleting the saved authentication status ... re-authentication requested"); - if (!cfg.getValueBool("dry_run")) { - safeRemove(cfg.refreshTokenFilePath); + if (appConfig.getValueBool("reauth")) { + addLogEntry("--reauth requested", ["debug"]); + addLogEntry("Deleting the saved authentication status ... re-authentication requested"); + if (!dryRun) { + safeRemove(appConfig.refreshTokenFilePath); + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + addLogEntry("DRY RUN: Not removing the saved authentication status"); } } - // Display current application configuration - if ((cfg.getValueBool("display_config")) || (cfg.getValueBool("display_running_config"))) { - if (cfg.getValueBool("display_running_config")) { - writeln("--------------- Application Runtime Configuration ---------------"); - } - - // Display application version - writeln("onedrive version = ", strip(import("version"))); - // Display all of the pertinent configuration options - writeln("Config path = ", cfg.configDirName); - // Does a config file exist or are we using application defaults - writeln("Config file found in config path = ", exists(configFilePath)); - - // Is config option drive_id configured? - if (cfg.getValueString("drive_id") != ""){ - writeln("Config option 'drive_id' = ", cfg.getValueString("drive_id")); - } - - // Config Options as per 'config' file - writeln("Config option 'sync_dir' = ", syncDir); - - // logging and notifications - writeln("Config option 'enable_logging' = ", cfg.getValueBool("enable_logging")); - writeln("Config option 'log_dir' = ", cfg.getValueString("log_dir")); - writeln("Config option 'disable_notifications' = ", cfg.getValueBool("disable_notifications")); - writeln("Config option 'min_notify_changes' = ", cfg.getValueLong("min_notify_changes")); - - // skip files and directory and 'matching' policy - writeln("Config option 'skip_dir' = ", cfg.getValueString("skip_dir")); - writeln("Config option 'skip_dir_strict_match' = ", cfg.getValueBool("skip_dir_strict_match")); - writeln("Config option 'skip_file' = ", cfg.getValueString("skip_file")); - writeln("Config option 'skip_dotfiles' = ", cfg.getValueBool("skip_dotfiles")); - writeln("Config option 'skip_symlinks' = ", cfg.getValueBool("skip_symlinks")); - - // --monitor sync process options - writeln("Config option 'monitor_interval' = ", cfg.getValueLong("monitor_interval")); - writeln("Config option 'monitor_log_frequency' = ", cfg.getValueLong("monitor_log_frequency")); - writeln("Config option 'monitor_fullscan_frequency' = ", cfg.getValueLong("monitor_fullscan_frequency")); + // --resync should be considered a 'last resort item' or if the application configuration has changed, where a resync is needed .. the user needs to 'accept' this warning to proceed + // If --resync has not been used (bool value is false), check the application configuration for 'changes' that require a --resync to ensure that the data locally reflects the users requested configuration + if (appConfig.getValueBool("resync")) { + // what is the risk acceptance for --resync? + bool resyncRiskAcceptance = appConfig.displayResyncRiskForAcceptance(); + addLogEntry("Returned --resync risk acceptance: " ~ to!string(resyncRiskAcceptance), ["debug"]); - // sync process and method - writeln("Config option 'read_only_auth_scope' = ", cfg.getValueBool("read_only_auth_scope")); - writeln("Config option 'dry_run' = ", cfg.getValueBool("dry_run")); - writeln("Config option 'upload_only' = ", cfg.getValueBool("upload_only")); - writeln("Config option 'download_only' = ", cfg.getValueBool("download_only")); - writeln("Config option 'local_first' = ", cfg.getValueBool("local_first")); - writeln("Config option 'check_nosync' = ", cfg.getValueBool("check_nosync")); - writeln("Config option 'check_nomount' = ", cfg.getValueBool("check_nomount")); - writeln("Config option 'resync' = ", cfg.getValueBool("resync")); - writeln("Config option 'resync_auth' = ", cfg.getValueBool("resync_auth")); - writeln("Config option 'cleanup_local_files' = ", cfg.getValueBool("cleanup_local_files")); - - // data integrity - writeln("Config option 'classify_as_big_delete' = ", cfg.getValueLong("classify_as_big_delete")); - writeln("Config option 'disable_upload_validation' = ", cfg.getValueBool("disable_upload_validation")); - writeln("Config option 'bypass_data_preservation' = ", cfg.getValueBool("bypass_data_preservation")); - writeln("Config option 'no_remote_delete' = ", cfg.getValueBool("no_remote_delete")); - writeln("Config option 'remove_source_files' = ", cfg.getValueBool("remove_source_files")); - writeln("Config option 'sync_dir_permissions' = ", cfg.getValueLong("sync_dir_permissions")); - writeln("Config option 'sync_file_permissions' = ", cfg.getValueLong("sync_file_permissions")); - writeln("Config option 'space_reservation' = ", cfg.getValueLong("space_reservation")); - - // curl operations - writeln("Config option 'application_id' = ", cfg.getValueString("application_id")); - writeln("Config option 'azure_ad_endpoint' = ", cfg.getValueString("azure_ad_endpoint")); - writeln("Config option 'azure_tenant_id' = ", cfg.getValueString("azure_tenant_id")); - writeln("Config option 'user_agent' = ", cfg.getValueString("user_agent")); - writeln("Config option 'force_http_11' = ", cfg.getValueBool("force_http_11")); - writeln("Config option 'debug_https' = ", cfg.getValueBool("debug_https")); - writeln("Config option 'rate_limit' = ", cfg.getValueLong("rate_limit")); - writeln("Config option 'operation_timeout' = ", cfg.getValueLong("operation_timeout")); - writeln("Config option 'dns_timeout' = ", cfg.getValueLong("dns_timeout")); - writeln("Config option 'connect_timeout' = ", cfg.getValueLong("connect_timeout")); - writeln("Config option 'data_timeout' = ", cfg.getValueLong("data_timeout")); - writeln("Config option 'ip_protocol_version' = ", cfg.getValueLong("ip_protocol_version")); - - // Is sync_list configured ? - writeln("Config option 'sync_root_files' = ", cfg.getValueBool("sync_root_files")); - if (exists(syncListFilePath)){ - - writeln("Selective sync 'sync_list' configured = true"); - writeln("sync_list contents:"); - // Output the sync_list contents - auto syncListFile = File(syncListFilePath, "r"); - auto range = syncListFile.byLine(); - foreach (line; range) - { - writeln(line); - } + // Action based on user response + if (!resyncRiskAcceptance){ + // --resync risk not accepted + return EXIT_FAILURE; } else { - writeln("Selective sync 'sync_list' configured = false"); - + addLogEntry("--resync issued and risk accepted", ["debug"]); + // --resync risk accepted, perform a cleanup of items that require a cleanup + appConfig.cleanupHashFilesDueToResync(); + // Make a backup of the applicable configuration file + appConfig.createBackupConfigFile(); + // Update hash files and generate a new config backup + appConfig.updateHashContentsForConfigFiles(); + // Remove the items database + processResyncDatabaseRemoval(runtimeDatabaseFile); } - - // Is business_shared_folders enabled and configured ? - writeln("Config option 'sync_business_shared_folders' = ", cfg.getValueBool("sync_business_shared_folders")); - if (exists(businessSharedFolderFilePath)){ - writeln("Business Shared Folders configured = true"); - writeln("business_shared_folders contents:"); - // Output the business_shared_folders contents - auto businessSharedFolderFileList = File(businessSharedFolderFilePath, "r"); - auto range = businessSharedFolderFileList.byLine(); - foreach (line; range) - { - writeln(line); + } else { + // Is the application currently authenticated? If not, it is pointless checking if a --resync is required until the application is authenticated + if (exists(appConfig.refreshTokenFilePath)) { + // Has any of our application configuration that would require a --resync been changed? + if (appConfig.applicationChangeWhereResyncRequired()) { + // Application configuration has changed however --resync not issued, fail fast + addLogEntry(); + addLogEntry("An application configuration change has been detected where a --resync is required"); + addLogEntry(); + return EXIT_RESYNC_REQUIRED; + } else { + // No configuration change that requires a --resync to be issued + // Special cases need to be checked - if these options were enabled, it creates a false 'Resync Required' flag, so do not create a backup + if ((!appConfig.getValueBool("list_business_shared_items"))) { + // Make a backup of the applicable configuration file + appConfig.createBackupConfigFile(); + // Update hash files and generate a new config backup + appConfig.updateHashContentsForConfigFiles(); + } } - } else { - writeln("Business Shared Folders configured = false"); - } - - // Are webhooks enabled? - writeln("Config option 'webhook_enabled' = ", cfg.getValueBool("webhook_enabled")); - if (cfg.getValueBool("webhook_enabled")) { - writeln("Config option 'webhook_public_url' = ", cfg.getValueString("webhook_public_url")); - writeln("Config option 'webhook_listening_host' = ", cfg.getValueString("webhook_listening_host")); - writeln("Config option 'webhook_listening_port' = ", cfg.getValueLong("webhook_listening_port")); - writeln("Config option 'webhook_expiration_interval' = ", cfg.getValueLong("webhook_expiration_interval")); - writeln("Config option 'webhook_renewal_interval' = ", cfg.getValueLong("webhook_renewal_interval")); } - - if (cfg.getValueBool("display_running_config")) { - writeln("-----------------------------------------------------------------"); - } - - // Do we exit? We only exit if --display-config has been used - if (cfg.getValueBool("display_config")) { - return EXIT_SUCCESS; - } - } - - // --upload-only and --download-only are mutually exclusive and cannot be used together - if ((cfg.getValueBool("upload_only")) && (cfg.getValueBool("download_only"))) { - // both cannot be true at the same time - writeln("ERROR: --upload-only and --download-only are mutually exclusive and cannot be used together.\n"); - return EXIT_FAILURE; } - - // Handle the actual --resync to remove local files - if (cfg.getValueBool("resync")) { - log.vdebug("--resync requested"); - log.vdebug("Testing if we have exclusive access to local database file"); - // Are we the only running instance? Test that we can open the database file path - itemDb = new ItemDatabase(cfg.databaseFilePath); + + // Implement https://github.com/abraunegg/onedrive/issues/1129 + // Force a synchronisation of a specific folder, only when using --synchronize --single-directory and ignoring all non-default skip_dir and skip_file rules + if (appConfig.getValueBool("force_sync")) { + // appConfig.checkForBasicOptionConflicts() has already checked for the basic requirements for --force-sync + addLogEntry(); + addLogEntry("WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --sync --single-directory --force-sync being used"); + addLogEntry(); + bool forceSyncRiskAcceptance = appConfig.displayForceSyncRiskForAcceptance(); + addLogEntry("Returned --force-sync risk acceptance: " ~ forceSyncRiskAcceptance, ["debug"]); - // did we successfully initialise the database class? - if (!itemDb.isDatabaseInitialised()) { - // no .. destroy class - itemDb = null; - // exit application + // Action based on user response + if (!forceSyncRiskAcceptance){ + // --force-sync risk not accepted return EXIT_FAILURE; - } - - // If we have exclusive access we will not have exited - // destroy access test - destroy(itemDb); - // delete application sync state - log.log("Deleting the saved application sync status ..."); - if (!cfg.getValueBool("dry_run")) { - safeRemove(cfg.databaseFilePath); - safeRemove(cfg.deltaLinkFilePath); - safeRemove(cfg.uploadStateFilePath); + } else { + // --force-sync risk accepted + // reset set config using function to use application defaults + appConfig.resetSkipToDefaults(); + // update sync engine regex with reset defaults + selectiveSync.setDirMask(appConfig.getValueString("skip_dir")); + selectiveSync.setFileMask(appConfig.getValueString("skip_file")); } } + // What IP Protocol are we going to use to access the network with + appConfig.displayIPProtocol(); + // Test if OneDrive service can be reached, exit if it cant be reached - log.vdebug("Testing network to ensure network connectivity to Microsoft OneDrive Service"); - online = testNetwork(cfg); + addLogEntry("Testing network to ensure network connectivity to Microsoft OneDrive Service", ["debug"]); + online = testInternetReachability(appConfig); + + // If we are not 'online' - how do we handle this situation? if (!online) { - // Cant initialise the API as we are not online - if (!cfg.getValueBool("monitor")) { + // We are unable to initialise the OneDrive API as we are not online + if (!appConfig.getValueBool("monitor")) { // Running as --synchronize - log.error("Unable to reach Microsoft OneDrive API service, unable to initialize application\n"); + addLogEntry(); + addLogEntry("ERROR: Unable to reach Microsoft OneDrive API service, unable to initialise application"); + addLogEntry(); return EXIT_FAILURE; } else { // Running as --monitor - log.error("Unable to reach Microsoft OneDrive API service at this point in time, re-trying network tests\n"); - // re-try network connection to OneDrive - // https://github.com/abraunegg/onedrive/issues/1184 - // Back off & retry with incremental delay - int retryCount = 10000; - int retryAttempts = 1; - int backoffInterval = 1; - int maxBackoffInterval = 3600; - - bool retrySuccess = false; - while (!retrySuccess){ - // retry to access OneDrive API - backoffInterval++; - int thisBackOffInterval = retryAttempts*backoffInterval; - log.vdebug(" Retry Attempt: ", retryAttempts); - if (thisBackOffInterval <= maxBackoffInterval) { - log.vdebug(" Retry In (seconds): ", thisBackOffInterval); - Thread.sleep(dur!"seconds"(thisBackOffInterval)); - } else { - log.vdebug(" Retry In (seconds): ", maxBackoffInterval); - Thread.sleep(dur!"seconds"(maxBackoffInterval)); - } - // perform the re-rty - online = testNetwork(cfg); - if (online) { - // We are now online - log.log("Internet connectivity to Microsoft OneDrive service has been restored"); - retrySuccess = true; - } else { - // We are still offline - if (retryAttempts == retryCount) { - // we have attempted to re-connect X number of times - // false set this to true to break out of while loop - retrySuccess = true; - } - } - // Increment & loop around - retryAttempts++; - } - if (!online) { - // Not online after 1.2 years of trying - log.error("ERROR: Was unable to reconnect to the Microsoft OneDrive service after 10000 attempts lasting over 1.2 years!"); + addLogEntry(); + addLogEntry("Unable to reach the Microsoft OneDrive API service at this point in time, re-trying network tests based on applicable intervals"); + addLogEntry(); + if (!retryInternetConnectivtyTest(appConfig)) { return EXIT_FAILURE; } } } - // Check application version and Initialize OneDrive API, check for authorization + // This needs to be a separate 'if' statement, as, if this was an 'if-else' from above, if we were originally offline and using --monitor, we would never get to this point if (online) { // Check Application Version - log.vlog("Checking Application Version ..."); + addLogEntry("Checking Application Version ...", ["verbose"]); checkApplicationVersion(); - - // we can only initialise if we are online - log.vlog("Initializing the OneDrive API ..."); - oneDrive = new OneDriveApi(cfg); - onedriveInitialised = oneDrive.init(); - oneDrive.printAccessToken = cfg.getValueBool("print_token"); - } - - if (!onedriveInitialised) { - log.error("Could not initialize the OneDrive API"); - // Use exit scopes to shutdown API - return EXIT_UNAUTHORIZED; - } - - // if --synchronize or --monitor not passed in, configure the flag to display help & exit - if (cfg.getValueBool("synchronize") || cfg.getValueBool("monitor")) { - performSyncOK = true; - } - - // --source-directory must only be used with --destination-directory - // neither can (or should) be added individually as they have a no operational impact if they are - if (((cfg.getValueString("source_directory") == "") && (cfg.getValueString("destination_directory") != "")) || ((cfg.getValueString("source_directory") != "") && (cfg.getValueString("destination_directory") == ""))) { - // so either --source-directory or --destination-directory was passed in, without the other required item being passed in - // --source-directory or --destination-directory cannot be used with --synchronize or --monitor - writeln(); - if (performSyncOK) { - // log an error - log.error("ERROR: --source-directory or --destination-directory cannot be used with --synchronize or --monitor"); - } else { - // display issue with using these options - string emptyParameter; - string dataParameter; - if (cfg.getValueString("source_directory").empty) { - emptyParameter = "--source-directory"; - dataParameter = "--destination-directory"; - } else { - emptyParameter = "--destination-directory"; - dataParameter = "--source-directory"; - } - log.error("ERROR: " ~ dataParameter ~ " was passed in without also using " ~ emptyParameter); - } - // Use exit scopes to shutdown API - writeln(); - log.error(helpMessage); - writeln(); - return EXIT_FAILURE; - } - - // --create-directory, --remove-directory, --source-directory, --destination-directory - // these are activities that dont perform a sync, so to not generate an error message for these items either - if (((cfg.getValueString("create_directory") != "") || (cfg.getValueString("remove_directory") != "")) || ((cfg.getValueString("source_directory") != "") && (cfg.getValueString("destination_directory") != "")) || (cfg.getValueString("get_file_link") != "") || (cfg.getValueString("modified_by") != "") || (cfg.getValueString("create_share_link") != "") || (cfg.getValueString("get_o365_drive_id") != "") || cfg.getValueBool("display_sync_status") || cfg.getValueBool("list_business_shared_folders")) { - performSyncOK = true; - } - - // Were acceptable sync operations provided? Was --synchronize or --monitor passed in - if (!performSyncOK) { - // was the application just authorised? - if (cfg.applicationAuthorizeResponseUri) { - // Application was just authorised - if (exists(cfg.refreshTokenFilePath)) { - // OneDrive refresh token exists - log.log("\nApplication has been successfully authorised, however no additional command switches were provided.\n"); - log.log(helpMessage); - writeln(); - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } else { - // we just authorised, but refresh_token does not exist .. probably an auth error - log.log("\nApplication has not been successfully authorised. Please check your URI response entry and try again.\n"); + + // Initialise the OneDrive API + addLogEntry("Attempting to initialise the OneDrive API ...", ["verbose"]); + OneDriveApi oneDriveApiInstance = new OneDriveApi(appConfig); + appConfig.apiWasInitialised = oneDriveApiInstance.initialise(); + + // Did the API initialise successfully? + if (appConfig.apiWasInitialised) { + addLogEntry("The OneDrive API was initialised successfully", ["verbose"]); + + // Flag that we were able to initialise the API in the application config + oneDriveApiInstance.debugOutputConfiguredAPIItems(); + oneDriveApiInstance.releaseCurlEngine(); + object.destroy(oneDriveApiInstance); + oneDriveApiInstance = null; + + // Need to configure the itemDB and syncEngineInstance for 'sync' and 'non-sync' operations + addLogEntry("Opening the item database ...", ["verbose"]); + + // Configure the Item Database + itemDB = new ItemDatabase(runtimeDatabaseFile); + // Was the database successfully initialised? + if (!itemDB.isDatabaseInitialised()) { + // no .. destroy class + itemDB = null; + // exit application return EXIT_FAILURE; } + + // Initialise the syncEngine + syncEngineInstance = new SyncEngine(appConfig, itemDB, selectiveSync); + appConfig.syncEngineWasInitialised = syncEngineInstance.initialise(); + + // Are we not doing a --sync or a --monitor operation? Both of these will be false if they are not set + if ((!appConfig.getValueBool("synchronize")) && (!appConfig.getValueBool("monitor"))) { + + // Are we performing some sort of 'no-sync' task? + // - Are we obtaining the Office 365 Drive ID for a given Office 365 SharePoint Shared Library? + // - Are we displaying the sync status? + // - Are we getting the URL for a file online? + // - Are we listing who modified a file last online? + // - Are we listing OneDrive Business Shared Items? + // - Are we creating a shareable link for an existing file on OneDrive? + // - Are we just creating a directory online, without any sync being performed? + // - Are we just deleting a directory online, without any sync being performed? + // - Are we renaming or moving a directory? + // - Are we displaying the quota information? + // - Did we just authorise the client? + + // --get-sharepoint-drive-id - Get the SharePoint Library drive_id + if (appConfig.getValueString("sharepoint_library_name") != "") { + // Get the SharePoint Library drive_id + syncEngineInstance.querySiteCollectionForDriveID(appConfig.getValueString("sharepoint_library_name")); + // Exit application + // Use exit scopes to shutdown API and cleanup data + return EXIT_SUCCESS; + } + + // --display-sync-status - Query the sync status + if (appConfig.getValueBool("display_sync_status")) { + // path to query variable + string pathToQueryStatusOn; + // What path do we query? + if (!appConfig.getValueString("single_directory").empty) { + pathToQueryStatusOn = "/" ~ appConfig.getValueString("single_directory"); + } else { + pathToQueryStatusOn = "/"; + } + // Query the sync status + syncEngineInstance.queryOneDriveForSyncStatus(pathToQueryStatusOn); + // Exit application + // Use exit scopes to shutdown API and cleanup data + return EXIT_SUCCESS; + } + + // --get-file-link - Get the URL path for a synced file? + if (appConfig.getValueString("get_file_link") != "") { + // Query the OneDrive API for the file link + syncEngineInstance.queryOneDriveForFileDetails(appConfig.getValueString("get_file_link"), runtimeSyncDirectory, "URL"); + // Exit application + // Use exit scopes to shutdown API and cleanup data + return EXIT_SUCCESS; + } + + // --modified-by - Are we listing the modified-by details of a provided path? + if (appConfig.getValueString("modified_by") != "") { + // Query the OneDrive API for the last modified by details + syncEngineInstance.queryOneDriveForFileDetails(appConfig.getValueString("modified_by"), runtimeSyncDirectory, "ModifiedBy"); + // Exit application + // Use exit scopes to shutdown API and cleanup data + return EXIT_SUCCESS; + } + + // --list-shared-items - Are we listing OneDrive Business Shared Items + if (appConfig.getValueBool("list_business_shared_items")) { + // Is this a business account type? + if (appConfig.accountType == "business") { + // List OneDrive Business Shared Items + syncEngineInstance.listBusinessSharedObjects(); + } else { + addLogEntry("ERROR: Unsupported account type for listing OneDrive Business Shared Items"); + } + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // --create-share-link - Are we creating a shareable link for an existing file on OneDrive? + if (appConfig.getValueString("create_share_link") != "") { + // Query OneDrive for the file, and if valid, create a shareable link for the file + + // By default, the shareable link will be read-only. + // If the user adds: + // --with-editing-perms + // this will create a writeable link + syncEngineInstance.queryOneDriveForFileDetails(appConfig.getValueString("create_share_link"), runtimeSyncDirectory, "ShareableLink"); + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // --create-directory - Are we just creating a directory online, without any sync being performed? + if ((appConfig.getValueString("create_directory") != "")) { + // Handle the remote path creation and updating of the local database without performing a sync + syncEngineInstance.createDirectoryOnline(appConfig.getValueString("create_directory")); + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // --remove-directory - Are we just deleting a directory online, without any sync being performed? + if ((appConfig.getValueString("remove_directory") != "")) { + // Handle the remote path deletion without performing a sync + syncEngineInstance.deleteByPath(appConfig.getValueString("remove_directory")); + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // Are we renaming or moving a directory online? + // onedrive --source-directory 'path/as/source/' --destination-directory 'path/as/destination' + if ((appConfig.getValueString("source_directory") != "") && (appConfig.getValueString("destination_directory") != "")) { + // We are renaming or moving a directory + syncEngineInstance.uploadMoveItem(appConfig.getValueString("source_directory"), appConfig.getValueString("destination_directory")); + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // Are we displaying the quota information? + if (appConfig.getValueBool("display_quota")) { + // Query and respond with the quota details + syncEngineInstance.queryOneDriveForQuotaDetails(); + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // If we get to this point, we have not performed a 'no-sync' task .. + // Did we just authorise the client? + if (appConfig.applicationAuthorizeResponseUri) { + // Authorisation activity + if (exists(appConfig.refreshTokenFilePath)) { + // OneDrive refresh token exists + addLogEntry(); + addLogEntry("The application has been successfully authorised, but no extra command options have been specified."); + addLogEntry(); + addLogEntry(genericHelpMessage); + addLogEntry(); + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } else { + // We just authorised, but refresh_token does not exist .. probably an auth error? + addLogEntry(); + addLogEntry("Your application's authorisation was unsuccessful. Please review your URI response entry, then attempt authorisation again with a new URI response."); + addLogEntry(); + // Use exit scopes to shutdown API + return EXIT_FAILURE; + } + } else { + // No authorisation activity + addLogEntry(); + addLogEntry("Your command line input is missing either the '--sync' or '--monitor' switches. Please include one (but not both) of these switches in your command line, or refer to 'onedrive --help' for additional guidance."); + addLogEntry(); + addLogEntry("It is important to note that you must include one of these two arguments in your command line for the application to perform a synchronisation with Microsoft OneDrive"); + addLogEntry(); + // Use exit scopes to shutdown API + // invalidSyncExit = true; + return EXIT_FAILURE; + } + } } else { - // Application was not just authorised - log.log("\n--synchronize or --monitor switches missing from your command line input. Please add one (not both) of these switches to your command line or use 'onedrive --help' for further assistance.\n"); - log.log("No OneDrive sync will be performed without one of these two arguments being present.\n"); - // Use exit scopes to shutdown API - invalidSyncExit = true; + // API could not be initialised + addLogEntry("The OneDrive API could not be initialised"); return EXIT_FAILURE; } } - - // if --synchronize && --monitor passed in, exit & display help as these conflict with each other - if (cfg.getValueBool("synchronize") && cfg.getValueBool("monitor")) { - writeln(); - log.error("ERROR: --synchronize and --monitor cannot be used together"); - writeln(); - log.error(helpMessage); - writeln(); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - - // Initialize the item database - log.vlog("Opening the item database ..."); - // Are we performing any of the following operations? - // --dry-run, --list-shared-folders, --get-O365-drive-id, --get-file-link - if ((cfg.getValueBool("dry_run")) || (cfg.getValueBool("list_business_shared_folders")) || (!cfg.getValueString("get_o365_drive_id").empty) || (!cfg.getValueString("get_file_link").empty)) { - // Load the items-dryrun.sqlite3 file as the database - log.vdebug("Using database file: ", asNormalizedPath(databaseFilePathDryRunGlobal)); - itemDb = new ItemDatabase(databaseFilePathDryRunGlobal); - } else { - // Not a dry-run scenario or trying to query O365 Library - should be the default scenario - // Load the items.sqlite3 file as the database - log.vdebug("Using database file: ", asNormalizedPath(cfg.databaseFilePath)); - itemDb = new ItemDatabase(cfg.databaseFilePath); - } - // did we successfully initialise the database class? - if (!itemDb.isDatabaseInitialised()) { - // no .. destroy class - itemDb = null; - // exit application - return EXIT_FAILURE; - } - - // What are the permission that have been set for the application? - // These are relevant for: - // - The ~/OneDrive parent folder or 'sync_dir' configured item - // - Any new folder created under ~/OneDrive or 'sync_dir' - // - Any new file created under ~/OneDrive or 'sync_dir' - // valid permissions are 000 -> 777 - anything else is invalid - if ((cfg.getValueLong("sync_dir_permissions") < 0) || (cfg.getValueLong("sync_file_permissions") < 0) || (cfg.getValueLong("sync_dir_permissions") > 777) || (cfg.getValueLong("sync_file_permissions") > 777)) { - log.error("ERROR: Invalid 'User|Group|Other' permissions set within config file. Please check."); - return EXIT_FAILURE; - } else { - // debug log output what permissions are being set to - log.vdebug("Configuring default new folder permissions as: ", cfg.getValueLong("sync_dir_permissions")); - cfg.configureRequiredDirectoryPermisions(); - log.vdebug("Configuring default new file permissions as: ", cfg.getValueLong("sync_file_permissions")); - cfg.configureRequiredFilePermisions(); - } - - // configure the sync direcory based on syncDir config option - log.vlog("All operations will be performed in: ", syncDir); + // Configure the sync directory based on the runtimeSyncDirectory configured directory + addLogEntry("All application operations will be performed in the configured local 'sync_dir' directory: " ~ runtimeSyncDirectory, ["verbose"]); + // Try and set the 'sync_dir', attempt to create if it does not exist try { - if (!exists(syncDir)) { - log.vdebug("syncDir: Configured syncDir is missing. Creating: ", syncDir); + if (!exists(runtimeSyncDirectory)) { + addLogEntry("runtimeSyncDirectory: Configured 'sync_dir' is missing locally. Creating: " ~ runtimeSyncDirectory, ["debug"]); + try { // Attempt to create the sync dir we have been configured with - mkdirRecurse(syncDir); + mkdirRecurse(runtimeSyncDirectory); // Configure the applicable permissions for the folder - log.vdebug("Setting directory permissions for: ", syncDir); - syncDir.setAttributes(cfg.returnRequiredDirectoryPermisions()); + addLogEntry("Setting directory permissions for: " ~ runtimeSyncDirectory, ["debug"]); + runtimeSyncDirectory.setAttributes(appConfig.returnRequiredDirectoryPermisions()); } catch (std.file.FileException e) { // Creating the sync directory failed - log.error("ERROR: Unable to create local OneDrive syncDir - ", e.msg); + addLogEntry("ERROR: Unable to create the configured local 'sync_dir' directory: " ~ e.msg); // Use exit scopes to shutdown API return EXIT_FAILURE; } } } catch (std.file.FileException e) { // Creating the sync directory failed - log.error("ERROR: Unable to test the configured OneDrive syncDir - ", e.msg); + addLogEntry("ERROR: Unable to test for the existence of the configured local 'sync_dir' directory: " ~ e.msg); // Use exit scopes to shutdown API return EXIT_FAILURE; } - - // Change the working directory to the 'sync_dir' configured item - chdir(syncDir); - - // Configure selective sync by parsing and getting a regex for skip_file config component - auto selectiveSync = new SelectiveSync(); - - // load sync_list if it exists - if (exists(syncListFilePath)){ - log.vdebug("Loading user configured sync_list file ..."); - syncListConfigured = true; - // list what will be synced - auto syncListFile = File(syncListFilePath, "r"); - auto range = syncListFile.byLine(); - foreach (line; range) - { - log.vdebug("sync_list: ", line); - } - // close syncListFile if open - if (syncListFile.isOpen()){ - // close open file - syncListFile.close(); - } - } - selectiveSync.load(syncListFilePath); - - // load business_shared_folders if it exists - if (exists(businessSharedFolderFilePath)){ - log.vdebug("Loading user configured business_shared_folders file ..."); - // list what will be synced - auto businessSharedFolderFileList = File(businessSharedFolderFilePath, "r"); - auto range = businessSharedFolderFileList.byLine(); - foreach (line; range) - { - log.vdebug("business_shared_folders: ", line); - } - } - selectiveSync.loadSharedFolders(businessSharedFolderFilePath); - - // Configure skip_dir, skip_file, skip-dir-strict-match & skip_dotfiles from config entries - // Handle skip_dir configuration in config file - log.vdebug("Configuring skip_dir ..."); - log.vdebug("skip_dir: ", cfg.getValueString("skip_dir")); - selectiveSync.setDirMask(cfg.getValueString("skip_dir")); - - // Was --skip-dir-strict-match configured? - log.vdebug("Configuring skip_dir_strict_match ..."); - log.vdebug("skip_dir_strict_match: ", cfg.getValueBool("skip_dir_strict_match")); - if (cfg.getValueBool("skip_dir_strict_match")) { - selectiveSync.setSkipDirStrictMatch(); - } - - // Was --skip-dot-files configured? - log.vdebug("Configuring skip_dotfiles ..."); - log.vdebug("skip_dotfiles: ", cfg.getValueBool("skip_dotfiles")); - if (cfg.getValueBool("skip_dotfiles")) { - selectiveSync.setSkipDotfiles(); - } - - // Handle skip_file configuration in config file - log.vdebug("Configuring skip_file ..."); - // Validate skip_file to ensure that this does not contain an invalid configuration - // Do not use a skip_file entry of .* as this will prevent correct searching of local changes to process. - foreach(entry; cfg.getValueString("skip_file").split("|")){ - if (entry == ".*") { - // invalid entry element detected - log.logAndNotify("ERROR: Invalid skip_file entry '.*' detected"); - return EXIT_FAILURE; - } - } - // All skip_file entries are valid - log.vdebug("skip_file: ", cfg.getValueString("skip_file")); - selectiveSync.setFileMask(cfg.getValueString("skip_file")); - - // Implement https://github.com/abraunegg/onedrive/issues/1129 - // Force a synchronization of a specific folder, only when using --synchronize --single-directory and ignoring all non-default skip_dir and skip_file rules - if ((cfg.getValueBool("synchronize")) && (cfg.getValueString("single_directory") != "") && (cfg.getValueBool("force_sync"))) { - log.log("\nWARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --synchronize --single-directory --force-sync being used"); - // performing this action could have undesirable effects .. the user must accept this risk - // what is the risk acceptance? - bool resyncRiskAcceptance = false; - // need to prompt user - char response; - // warning message - writeln("\nThe use of --force-sync will reconfigure the application to use defaults. This may have untold and unknown future impacts."); - writeln("By proceeding in using this option you accept any impacts including any data loss that may occur as a result of using --force-sync."); - write("\nAre you sure you wish to proceed with --force-sync [Y/N] "); - - try { - // Attempt to read user response - readf(" %c\n", &response); - } catch (std.format.FormatException e) { - // Caught an error - return EXIT_FAILURE; - } - - // Evaluate user repsonse - if ((to!string(response) == "y") || (to!string(response) == "Y")) { - // User has accepted --force-sync risk to proceed - resyncRiskAcceptance = true; - // Are you sure you wish .. does not use writeln(); - write("\n"); - } - - // Action based on response - if (!resyncRiskAcceptance){ - // --force-sync not accepted - return EXIT_FAILURE; - } else { - // --force-sync risk accepted - // reset set config using function to use application defaults - cfg.resetSkipToDefaults(); - // update sync engine regex with reset defaults - selectiveSync.setDirMask(cfg.getValueString("skip_dir")); - selectiveSync.setFileMask(cfg.getValueString("skip_file")); - } - } - - // Initialize the sync engine - auto sync = new SyncEngine(cfg, oneDrive, itemDb, selectiveSync); - try { - if (!initSyncEngine(sync)) { - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } else { - if ((cfg.getValueString("get_file_link") == "") && (cfg.getValueString("create_share_link") == "")) { - // Print out that we are initializing the engine only if we are not grabbing the file link or creating a shareable link - log.logAndNotify("Initializing the Synchronization Engine ..."); - } - } - } catch (CurlException e) { - if (!cfg.getValueBool("monitor")) { - log.log("\nNo Internet connection."); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - } - - // if sync list is configured, set to true now that the sync engine is initialised - if (syncListConfigured) { - sync.setSyncListConfigured(); - } - - // Do we need to configure specific --upload-only options? - if (cfg.getValueBool("upload_only")) { - // --upload-only was passed in or configured - log.vdebug("Configuring uploadOnly flag to TRUE as --upload-only passed in or configured"); - sync.setUploadOnly(); - // was --no-remote-delete passed in or configured - if (cfg.getValueBool("no_remote_delete")) { - // Configure the noRemoteDelete flag - log.vdebug("Configuring noRemoteDelete flag to TRUE as --no-remote-delete passed in or configured"); - sync.setNoRemoteDelete(); - } - // was --remove-source-files passed in or configured - if (cfg.getValueBool("remove_source_files")) { - // Configure the localDeleteAfterUpload flag - log.vdebug("Configuring localDeleteAfterUpload flag to TRUE as --remove-source-files passed in or configured"); - sync.setLocalDeleteAfterUpload(); - } - } - - // Do we configure to disable the upload validation routine - if (cfg.getValueBool("disable_upload_validation")) sync.setDisableUploadValidation(); - - // Do we configure to disable the download validation routine - if (cfg.getValueBool("disable_download_validation")) sync.setDisableDownloadValidation(); - - // Has the user enabled to bypass data preservation of renaming local files when there is a conflict? - if (cfg.getValueBool("bypass_data_preservation")) { - log.log("WARNING: Application has been configured to bypass local data preservation in the event of file conflict."); - log.log("WARNING: Local data loss MAY occur in this scenario."); - sync.setBypassDataPreservation(); - } + // Change the working directory to the 'sync_dir' as configured + chdir(runtimeSyncDirectory); - // Do we configure to clean up local files if using --download-only ? - if ((cfg.getValueBool("download_only")) && (cfg.getValueBool("cleanup_local_files"))) { - // --download-only and --cleanup-local-files were passed in - log.log("WARNING: Application has been configured to cleanup local files that are not present online."); - log.log("WARNING: Local data loss MAY occur in this scenario if you are expecting data to remain archived locally."); - sync.setCleanupLocalFiles(); - // Set the global flag as we will use this as thhe item to be passed into the sync function below - cleanupLocalFilesGlobal = true; - } - - // Are we configured to use a National Cloud Deployment - if (cfg.getValueString("azure_ad_endpoint") != "") { - // value is configured, is it a valid value? - if ((cfg.getValueString("azure_ad_endpoint") == "USL4") || (cfg.getValueString("azure_ad_endpoint") == "USL5") || (cfg.getValueString("azure_ad_endpoint") == "DE") || (cfg.getValueString("azure_ad_endpoint") == "CN")) { - // valid entries to flag we are using a National Cloud Deployment - // National Cloud Deployments do not support /delta as a query - // https://docs.microsoft.com/en-us/graph/deployments#supported-features - // Flag that we have a valid National Cloud Deployment that cannot use /delta queries - sync.setNationalCloudDeployment(); - } - } + // Do we need to validate the runtimeSyncDirectory to check for the presence of a '.nosync' file + checkForNoMountScenario(); - // Are we forcing to use /children scan instead of /delta to simulate National Cloud Deployment use of /children? - if (cfg.getValueBool("force_children_scan")) { - log.log("Forcing client to use /children scan rather than /delta to simulate National Cloud Deployment use of /children"); - sync.setNationalCloudDeployment(); - } + // Set the default thread pool value + defaultPoolThreads(1); - // Do we need to display the function processing timing - if (cfg.getValueBool("display_processing_time")) { - log.log("Forcing client to display function processing times"); - sync.setPerformanceProcessingOutput(); - } - - // Do we need to validate the syncDir to check for the presence of a '.nosync' file - if (cfg.getValueBool("check_nomount")) { - // we were asked to check the mounts - if (exists(syncDir ~ "/.nosync")) { - log.logAndNotify("ERROR: .nosync file found. Aborting synchronization process to safeguard data."); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - } - - // Do we need to create or remove a directory? - if ((cfg.getValueString("create_directory") != "") || (cfg.getValueString("remove_directory") != "")) { - // create directory - if (cfg.getValueString("create_directory") != "") { - // create a directory on OneDrive - sync.createDirectoryNoSync(cfg.getValueString("create_directory")); - } - //remove directory - if (cfg.getValueString("remove_directory") != "") { - // remove a directory on OneDrive - sync.deleteDirectoryNoSync(cfg.getValueString("remove_directory")); - } - } - - // Are we renaming or moving a directory? - if ((cfg.getValueString("source_directory") != "") && (cfg.getValueString("destination_directory") != "")) { - // We are renaming or moving a directory - sync.renameDirectoryNoSync(cfg.getValueString("source_directory"), cfg.getValueString("destination_directory")); - } - - // Are we obtaining the Office 365 Drive ID for a given Office 365 SharePoint Shared Library? - if (cfg.getValueString("get_o365_drive_id") != "") { - sync.querySiteCollectionForDriveID(cfg.getValueString("get_o365_drive_id")); - // Exit application - // Use exit scopes to shutdown API and cleanup data - return EXIT_SUCCESS; - } - - // --create-share-link - Are we createing a shareable link for an existing file on OneDrive? - if (cfg.getValueString("create_share_link") != "") { - // Query OneDrive for the file, and if valid, create a shareable link for the file - - // By default, the shareable link will be read-only. - // If the user adds: - // --with-editing-perms - // this will create a writeable link - bool writeablePermissions = cfg.getValueBool("with_editing_perms"); - sync.createShareableLinkForFile(cfg.getValueString("create_share_link"), writeablePermissions); + // Is the sync engine initialised correctly? + if (appConfig.syncEngineWasInitialised) { + // Configure some initial variables + string singleDirectoryPath; + string localPath = "."; + string remotePath = "/"; - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } - - // --get-file-link - Are we obtaining the URL path for a synced file? - if (cfg.getValueString("get_file_link") != "") { - // Query OneDrive for the file link - sync.queryOneDriveForFileDetails(cfg.getValueString("get_file_link"), syncDir, "URL"); - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } - - // --modified-by - Are we listing the modified-by details of a provided path? - if (cfg.getValueString("modified_by") != "") { - // Query OneDrive for the file link - sync.queryOneDriveForFileDetails(cfg.getValueString("modified_by"), syncDir, "ModifiedBy"); - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } - - // Are we listing OneDrive Business Shared Folders - if (cfg.getValueBool("list_business_shared_folders")) { - // Is this a business account type? - if (sync.getAccountType() == "business"){ - // List OneDrive Business Shared Folders - sync.listOneDriveBusinessSharedFolders(); - } else { - log.error("ERROR: Unsupported account type for listing OneDrive Business Shared Folders"); - } - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } - - // Are we going to sync OneDrive Business Shared Folders - if (cfg.getValueBool("sync_business_shared_folders")) { - // Is this a business account type? - if (sync.getAccountType() == "business"){ - // Configure flag to sync business folders - sync.setSyncBusinessFolders(); + if (!appConfig.getValueBool("resync")) { + // Check if there are interrupted upload session(s) + if (syncEngineInstance.checkForInterruptedSessionUploads) { + // Need to re-process the session upload files to resume the failed session uploads + addLogEntry("There are interrupted session uploads that need to be resumed ..."); + // Process the session upload files + syncEngineInstance.processForInterruptedSessionUploads(); + } } else { - log.error("ERROR: Unsupported account type for syncing OneDrive Business Shared Folders"); + // Clean up any upload session files due to --resync being used + syncEngineInstance.clearInterruptedSessionUploads(); } - } - - // Ensure that the value stored for cfg.getValueString("single_directory") does not contain any extra quotation marks - if (cfg.getValueString("single_directory") != ""){ - string originalSingleDirectoryValue = cfg.getValueString("single_directory"); - // Strip quotation marks from provided path to ensure no issues within a Docker environment when using passed in values - string updatedSingleDirectoryValue = strip(originalSingleDirectoryValue, "\""); - cfg.setValueString("single_directory", updatedSingleDirectoryValue); - } - - // Are we displaying the sync status of the client? - if (cfg.getValueBool("display_sync_status")) { - string remotePath = "/"; - // Are we doing a single directory check? - if (cfg.getValueString("single_directory") != ""){ - // Need two different path strings here - remotePath = cfg.getValueString("single_directory"); + + // Are we doing a single directory operation (--single-directory) ? + if (!appConfig.getValueString("single_directory").empty) { + // Set singleDirectoryPath + singleDirectoryPath = appConfig.getValueString("single_directory"); + + // Ensure that this is a normalised relative path to runtimeSyncDirectory + string normalisedRelativePath = replace(buildNormalizedPath(absolutePath(singleDirectoryPath)), buildNormalizedPath(absolutePath(runtimeSyncDirectory)), "." ); + + // The user provided a directory to sync within the configured 'sync_dir' path + // This also validates if the path being used exists online and/or does not have a 'case-insensitive match' + syncEngineInstance.setSingleDirectoryScope(normalisedRelativePath); + + // Does the directory we want to sync actually exist locally? + if (!exists(singleDirectoryPath)) { + // The requested path to use with --single-directory does not exist locally within the configured 'sync_dir' + addLogEntry("WARNING: The requested path for --single-directory does not exist locally. Creating requested path within " ~ runtimeSyncDirectory, ["info", "notify"]); + // Make the required --single-directory path locally + mkdirRecurse(singleDirectoryPath); + // Configure the applicable permissions for the folder + addLogEntry("Setting directory permissions for: " ~ singleDirectoryPath, ["debug"]); + singleDirectoryPath.setAttributes(appConfig.returnRequiredDirectoryPermisions()); + } + + // Update the paths that we use to perform the sync actions + localPath = singleDirectoryPath; + remotePath = singleDirectoryPath; + + // Display that we are syncing from a specific path due to --single-directory + addLogEntry("Syncing changes from this selected path: " ~ singleDirectoryPath, ["verbose"]); } - sync.queryDriveForChanges(remotePath); - } - - // Are we performing a sync, or monitor operation? - if ((cfg.getValueBool("synchronize")) || (cfg.getValueBool("monitor"))) { - // Initialise the monitor class, so that we can do more granular inotify handling when performing the actual sync - // needed for --synchronize and --monitor handling - Monitor m = new Monitor(selectiveSync); - - if (cfg.getValueBool("synchronize")) { - if (online) { - // set flag for exit scope - synchronizeConfigured = true; - - // Check user entry for local path - the above chdir means we are already in ~/OneDrive/ thus singleDirectory is local to this path - if (cfg.getValueString("single_directory") != "") { - // Does the directory we want to sync actually exist? - if (!exists(cfg.getValueString("single_directory"))) { - // The requested path to use with --single-directory does not exist locally within the configured 'sync_dir' - log.logAndNotify("WARNING: The requested path for --single-directory does not exist locally. Creating requested path within ", syncDir); - // Make the required --single-directory path locally - string singleDirectoryPath = cfg.getValueString("single_directory"); - mkdirRecurse(singleDirectoryPath); - // Configure the applicable permissions for the folder - log.vdebug("Setting directory permissions for: ", singleDirectoryPath); - singleDirectoryPath.setAttributes(cfg.returnRequiredDirectoryPermisions()); - } + + // Handle SIGINT and SIGTERM + setupSignalHandler(); + + // Are we doing a --sync operation? This includes doing any --single-directory operations + if (appConfig.getValueBool("synchronize")) { + // Did the user specify --upload-only? + if (appConfig.getValueBool("upload_only")) { + // Perform the --upload-only sync process + performUploadOnlySyncProcess(localPath); + } + + // Did the user specify --download-only? + if (appConfig.getValueBool("download_only")) { + // Only download data from OneDrive + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + // Perform the DB consistency check + // This will also delete any out-of-sync flagged items if configured to do so + syncEngineInstance.performDatabaseConsistencyAndIntegrityCheck(); + // Do we cleanup local files? + // - Deletes of data from online will already have been performed, but what we are now doing is searching the local filesystem + // for any new data locally, that usually would be uploaded to OneDrive, but instead, because of the options being + // used, will need to be deleted from the local filesystem + if (appConfig.getValueBool("cleanup_local_files")) { + // Perform the filesystem walk + syncEngineInstance.scanLocalFilesystemPathForNewData(localPath); } - // perform a --synchronize sync - // fullScanRequired = false, for final true-up - // but if we have sync_list configured, use syncListConfigured which = true - performSync(sync, cfg.getValueString("single_directory"), cfg.getValueBool("download_only"), cfg.getValueBool("local_first"), cfg.getValueBool("upload_only"), LOG_NORMAL, false, syncListConfigured, displaySyncOptions, cfg.getValueBool("monitor"), m, cleanupLocalFilesGlobal); - - // Write WAL and SHM data to file for this sync - log.vdebug("Merge contents of WAL and SHM files into main database file"); - itemDb.performVacuum(); } + + // If no use of --upload-only or --download-only + if ((!appConfig.getValueBool("upload_only")) && (!appConfig.getValueBool("download_only"))) { + // Perform the standard sync process + performStandardSyncProcess(localPath); + } + + // Detail the outcome of the sync process + displaySyncOutcome(); } - - if (cfg.getValueBool("monitor")) { - log.logAndNotify("Initializing monitor ..."); - log.log("OneDrive monitor interval (seconds): ", cfg.getValueLong("monitor_interval")); - - m.onDirCreated = delegate(string path) { + + // Are we doing a --monitor operation? + if (appConfig.getValueBool("monitor")) { + // What are the current values for the platform we are running on + // Max number of open files /proc/sys/fs/file-max + string maxOpenFiles = strip(readText("/proc/sys/fs/file-max")); + // What is the currently configured maximum inotify watches that can be used + // /proc/sys/fs/inotify/max_user_watches + string maxInotifyWatches = strip(readText("/proc/sys/fs/inotify/max_user_watches")); + + // Start the monitor process + addLogEntry("OneDrive synchronisation interval (seconds): " ~ to!string(appConfig.getValueLong("monitor_interval"))); + + // If we are in a --download-only method of operation, the output of these is not required + if (!appConfig.getValueBool("download_only")) { + addLogEntry("Maximum allowed open files: " ~ maxOpenFiles, ["verbose"]); + addLogEntry("Maximum allowed inotify user watches: " ~ maxInotifyWatches, ["verbose"]); + } + + // Configure the monitor class + filesystemMonitor = new Monitor(appConfig, selectiveSync); + + // Delegated function for when inotify detects a new local directory has been created + filesystemMonitor.onDirCreated = delegate(string path) { // Handle .folder creation if skip_dotfiles is enabled - if ((cfg.getValueBool("skip_dotfiles")) && (selectiveSync.isDotFile(path))) { - log.vlog("[M] Skipping watching local path - .folder found & --skip-dot-files enabled: ", path); + if ((appConfig.getValueBool("skip_dotfiles")) && (isDotFile(path))) { + addLogEntry("[M] Skipping watching local path - .folder found & --skip-dot-files enabled: " ~ path, ["verbose"]); } else { - log.vlog("[M] Local directory created: ", path); + addLogEntry("[M] Local directory created: " ~ path, ["verbose"]); try { - sync.scanForDifferences(path); + syncEngineInstance.scanLocalFilesystemPathForNewData(path); } catch (CurlException e) { - log.vlog("Offline, cannot create remote dir!"); - } catch(Exception e) { - log.logAndNotify("Cannot create remote directory: ", e.msg); + addLogEntry("Offline, cannot create remote dir: " ~ path, ["verbose"]); + } catch (Exception e) { + addLogEntry("Cannot create remote directory: " ~ e.msg, ["info", "notify"]); } } }; - m.onFileChanged = delegate(string path) { - log.vlog("[M] Local file changed: ", path); - try { - sync.scanForDifferences(path); - } catch (CurlException e) { - log.vlog("Offline, cannot upload changed item!"); - } catch(Exception e) { - log.logAndNotify("Cannot upload file changes/creation: ", e.msg); - } + + // Delegated function for when inotify detects a local file has been changed + filesystemMonitor.onFileChanged = delegate(string[] changedLocalFilesToUploadToOneDrive) { + // Handle a potentially locally changed file + // Logging for this event moved to handleLocalFileTrigger() due to threading and false triggers from scanLocalFilesystemPathForNewData() above + addLogEntry("[M] Total number of local file changed: " ~ to!string(changedLocalFilesToUploadToOneDrive.length)); + syncEngineInstance.handleLocalFileTrigger(changedLocalFilesToUploadToOneDrive); }; - m.onDelete = delegate(string path) { - log.log("Received inotify delete event from operating system .. attempting item deletion as requested"); - log.vlog("[M] Local item deleted: ", path); + + // Delegated function for when inotify detects a delete event + filesystemMonitor.onDelete = delegate(string path) { + addLogEntry("[M] Local item deleted: " ~ path, ["verbose"]); try { - sync.deleteByPath(path); + addLogEntry("The operating system sent a deletion notification. Trying to delete the item as requested"); + syncEngineInstance.deleteByPath(path); } catch (CurlException e) { - log.vlog("Offline, cannot delete item!"); - } catch(SyncException e) { + addLogEntry("Offline, cannot delete item: " ~ path, ["verbose"]); + } catch (SyncException e) { if (e.msg == "The item to delete is not in the local database") { - log.vlog("Item cannot be deleted from OneDrive because it was not found in the local database"); + addLogEntry("Item cannot be deleted from Microsoft OneDrive because it was not found in the local database", ["verbose"]); } else { - log.logAndNotify("Cannot delete remote item: ", e.msg); + addLogEntry("Cannot delete remote item: " ~ e.msg, ["info", "notify"]); } - } catch(Exception e) { - log.logAndNotify("Cannot delete remote item: ", e.msg); + } catch (Exception e) { + addLogEntry("Cannot delete remote item: " ~ e.msg, ["info", "notify"]); } }; - m.onMove = delegate(string from, string to) { - log.vlog("[M] Local item moved: ", from, " -> ", to); + + // Delegated function for when inotify detects a move event + filesystemMonitor.onMove = delegate(string from, string to) { + addLogEntry("[M] Local item moved: " ~ from ~ " -> " ~ to, ["verbose"]); try { // Handle .folder -> folder if skip_dotfiles is enabled - if ((cfg.getValueBool("skip_dotfiles")) && (selectiveSync.isDotFile(from))) { + if ((appConfig.getValueBool("skip_dotfiles")) && (isDotFile(from))) { // .folder -> folder handling - has to be handled as a new folder - sync.scanForDifferences(to); + syncEngineInstance.scanLocalFilesystemPathForNewData(to); } else { - sync.uploadMoveItem(from, to); + syncEngineInstance.uploadMoveItem(from, to); } } catch (CurlException e) { - log.vlog("Offline, cannot move item!"); - } catch(Exception e) { - log.logAndNotify("Cannot move item: ", e.msg); + addLogEntry("Offline, cannot move item !", ["verbose"]); + } catch (Exception e) { + addLogEntry("Cannot move item: " ~ e.msg, ["info", "notify"]); } }; - signal(SIGINT, &exitHandler); - signal(SIGTERM, &exitHandler); - - // attempt to initialise monitor class - if (!cfg.getValueBool("download_only")) { + + // Initialise the local filesystem monitor class using inotify to monitor for local filesystem changes + // If we are in a --download-only method of operation, we do not enable local filesystem monitoring + if (!appConfig.getValueBool("download_only")) { + // Not using --download-only try { - m.init(cfg, cfg.getValueLong("verbose") > 0, cfg.getValueBool("skip_symlinks"), cfg.getValueBool("check_nosync")); - } catch (MonitorException e) { - // monitor initialisation failed - log.error("ERROR: ", e.msg); - oneDrive.shutdown(); + addLogEntry("Initialising filesystem inotify monitoring ..."); + filesystemMonitor.initialise(); + addLogEntry("Performing initial synchronisation to ensure consistent local state ..."); + } catch (MonitorException e) { + // monitor class initialisation failed + addLogEntry("ERROR: " ~ e.msg); return EXIT_FAILURE; } } - - // monitor loop + + // Filesystem monitor loop variables + // Immutables + immutable auto checkOnlineInterval = dur!"seconds"(appConfig.getValueLong("monitor_interval")); + immutable auto githubCheckInterval = dur!"seconds"(86400); + immutable ulong fullScanFrequency = appConfig.getValueLong("monitor_fullscan_frequency"); + immutable ulong logOutputSupressionInterval = appConfig.getValueLong("monitor_log_frequency"); + immutable bool webhookEnabled = appConfig.getValueBool("webhook_enabled"); + immutable string loopStartOutputMessage = "################################################## NEW LOOP ##################################################"; + immutable string loopStopOutputMessage = "################################################ LOOP COMPLETE ###############################################"; + + // Changables bool performMonitor = true; ulong monitorLoopFullCount = 0; - immutable auto checkInterval = dur!"seconds"(cfg.getValueLong("monitor_interval")); - immutable auto githubCheckInterval = dur!"seconds"(86400); - immutable long logInterval = cfg.getValueLong("monitor_log_frequency"); - immutable long fullScanFrequency = cfg.getValueLong("monitor_fullscan_frequency"); + ulong fullScanFrequencyLoopCount = 0; + ulong monitorLogOutputLoopCount = 0; MonoTime lastCheckTime = MonoTime.currTime(); MonoTime lastGitHubCheckTime = MonoTime.currTime(); - long logMonitorCounter = 0; - long fullScanCounter = 0; - // set fullScanRequired to true so that at application startup we perform a full walk - bool fullScanRequired = true; - bool syncListConfiguredFullScanOverride = false; - // if sync list is configured, set to true - if (syncListConfigured) { - // sync list is configured - syncListConfiguredFullScanOverride = true; - } - immutable bool webhookEnabled = cfg.getValueBool("webhook_enabled"); - + // Webhook Notification Handling + bool notificationReceived = false; + while (performMonitor) { - if (!cfg.getValueBool("download_only")) { + // Do we need to validate the runtimeSyncDirectory to check for the presence of a '.nosync' file - the disk may have been ejected .. + checkForNoMountScenario(); + + // If we are in a --download-only method of operation, there is no filesystem monitoring, so no inotify events to check + if (!appConfig.getValueBool("download_only")) { try { - m.update(online); + // Process any inotify events + filesystemMonitor.update(true); } catch (MonitorException e) { // Catch any exceptions thrown by inotify / monitor engine - log.error("ERROR: The following inotify error was generated: ", e.msg); + addLogEntry("ERROR: The following inotify error was generated: " ~ e.msg); } } - + // Check for notifications pushed from Microsoft to the webhook - bool notificationReceived = false; if (webhookEnabled) { // Create a subscription on the first run, or renew the subscription // on subsequent runs when it is about to expire. - oneDrive.createOrRenewSubscription(); - - // Process incoming notifications if any. - - // Empirical evidence shows that Microsoft often sends multiple - // notifications for one single change, so we need a loop to exhaust - // all signals that were queued up by the webhook. The notifications - // do not contain any actual changes, and we will always rely do the - // delta endpoint to sync to latest. Therefore, only one sync run is - // good enough to catch up for multiple notifications. - for (int signalCount = 0;; signalCount++) { - const auto signalExists = receiveTimeout(dur!"seconds"(-1), (ulong _) {}); - if (signalExists) { - notificationReceived = true; - } else { - if (notificationReceived) { - log.log("Received ", signalCount," refresh signals from the webhook"); - } - break; - } - } - } - - auto currTime = MonoTime.currTime(); - // has monitor_interval elapsed or are we at application startup / monitor startup? - // in a --resync scenario, if we have not 're-populated' the database, valid changes will get skipped: - // Monitor directory: ./target - // Monitor directory: target/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby - // [M] Item moved: random_files/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby -> target/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby - // Moving random_files/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby to target/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby - // Skipping uploading this new file as parent path is not in the database: target/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby - // 'target' should be in the DB, it should also exist online, but because of --resync, it does not exist in the database thus parent check fails - if (notificationReceived || (currTime - lastCheckTime > checkInterval) || (monitorLoopFullCount == 0)) { - // Check Application Version against GitHub once per day - if (currTime - lastGitHubCheckTime > githubCheckInterval) { - // --monitor GitHub Application Version Check time expired - checkApplicationVersion(); - // update when we have performed this check - lastGitHubCheckTime = MonoTime.currTime(); - } - - // monitor sync loop - logOutputMessage = "################################################## NEW LOOP ##################################################"; - if (displaySyncOptions) { - log.log(logOutputMessage); + if (oneDriveWebhook is null) { + oneDriveWebhook = new OneDriveWebhook(thisTid, appConfig); + oneDriveWebhook.serve(); } else { - log.vdebug(logOutputMessage); + oneDriveWebhook.createOrRenewSubscription(); } - // Increment monitorLoopFullCount + } + + // Get the current time this loop is starting + auto currentTime = MonoTime.currTime(); + + // Do we perform a sync with OneDrive? + if ((currentTime - lastCheckTime >= checkOnlineInterval) || (monitorLoopFullCount == 0)) { + // Increment relevant counters monitorLoopFullCount++; - // Display memory details at start of loop - if (displayMemoryUsage) { - log.displayMemoryUsagePreGC(); - } - - // log monitor output suppression - logMonitorCounter += 1; - if (logMonitorCounter > logInterval) { - logMonitorCounter = 1; - } - - // do we perform a full scan of sync_dir and database integrity check? - fullScanCounter += 1; - // fullScanFrequency = 'monitor_fullscan_frequency' from config - if (fullScanCounter > fullScanFrequency){ - // 'monitor_fullscan_frequency' counter has exceeded - fullScanCounter = 1; - // set fullScanRequired = true due to 'monitor_fullscan_frequency' counter has been exceeded - fullScanRequired = true; - // are we using sync_list? - if (syncListConfigured) { - // sync list is configured - syncListConfiguredFullScanOverride = true; + fullScanFrequencyLoopCount++; + monitorLogOutputLoopCount++; + + // If full scan at a specific frequency enabled? + if (fullScanFrequency > 0) { + // Full Scan set for some 'frequency' - do we flag to perform a full scan of the online data? + if (fullScanFrequencyLoopCount > fullScanFrequency) { + // set full scan trigger for true up + addLogEntry("Enabling Full Scan True Up (fullScanFrequencyLoopCount > fullScanFrequency), resetting fullScanFrequencyLoopCount = 1", ["debug"]); + fullScanFrequencyLoopCount = 1; + appConfig.fullScanTrueUpRequired = true; + } else { + // unset full scan trigger for true up + addLogEntry("Disabling Full Scan True Up", ["debug"]); + appConfig.fullScanTrueUpRequired = false; } - } - - if (displaySyncOptions) { - // sync option handling per sync loop - log.log("fullScanCounter = ", fullScanCounter); - log.log("syncListConfigured = ", syncListConfigured); - log.log("fullScanRequired = ", fullScanRequired); - log.log("syncListConfiguredFullScanOverride = ", syncListConfiguredFullScanOverride); } else { - // sync option handling per sync loop via debug - log.vdebug("fullScanCounter = ", fullScanCounter); - log.vdebug("syncListConfigured = ", syncListConfigured); - log.vdebug("fullScanRequired = ", fullScanRequired); - log.vdebug("syncListConfiguredFullScanOverride = ", syncListConfiguredFullScanOverride); + // No it is disabled - ensure this is false + appConfig.fullScanTrueUpRequired = false; } - - try { - if (!initSyncEngine(sync)) { - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - try { - // performance timing - SysTime startSyncProcessingTime = Clock.currTime(); - - // perform a --monitor sync - if ((cfg.getValueLong("verbose") > 0) || (logMonitorCounter == logInterval) || (fullScanRequired) ) { - // log to console and log file if enabled - if (cfg.getValueBool("display_processing_time")) { - log.log(startMessage, " ", startSyncProcessingTime); - } else { - log.log(startMessage); - } - } else { - // log file only if enabled so we know when a sync started when not using --verbose - log.fileOnly(startMessage); - } - performSync(sync, cfg.getValueString("single_directory"), cfg.getValueBool("download_only"), cfg.getValueBool("local_first"), cfg.getValueBool("upload_only"), (logMonitorCounter == logInterval ? MONITOR_LOG_QUIET : MONITOR_LOG_SILENT), fullScanRequired, syncListConfiguredFullScanOverride, displaySyncOptions, cfg.getValueBool("monitor"), m, cleanupLocalFilesGlobal); - if (!cfg.getValueBool("download_only")) { - // discard all events that may have been generated by the sync that have not already been handled - try { - m.update(false); - } catch (MonitorException e) { - // Catch any exceptions thrown by inotify / monitor engine - log.error("ERROR: The following inotify error was generated: ", e.msg); - } - } - SysTime endSyncProcessingTime = Clock.currTime(); - if ((cfg.getValueLong("verbose") > 0) || (logMonitorCounter == logInterval) || (fullScanRequired) ) { - // log to console and log file if enabled - if (cfg.getValueBool("display_processing_time")) { - log.log(finishMessage, " ", endSyncProcessingTime); - log.log("Elapsed Sync Time with OneDrive Service: ", (endSyncProcessingTime - startSyncProcessingTime)); - } else { - log.log(finishMessage); - } + + // Loop Start + addLogEntry(loopStartOutputMessage, ["debug"]); + addLogEntry("Total Run-Time Loop Number: " ~ to!string(monitorLoopFullCount), ["debug"]); + addLogEntry("Full Scan Frequency Loop Number: " ~ to!string(fullScanFrequencyLoopCount), ["debug"]); + SysTime startFunctionProcessingTime = Clock.currTime(); + addLogEntry("Start Monitor Loop Time: " ~ to!string(startFunctionProcessingTime), ["debug"]); + + // Do we perform any monitor console logging output suppression? + // 'monitor_log_frequency' controls how often, in a non-verbose application output mode, how often + // the full output of what is occurring is done. This is done to lessen the 'verbosity' of non-verbose + // logging, but only when running in --monitor + if (monitorLogOutputLoopCount > logOutputSupressionInterval) { + // unsurpress the logging output + monitorLogOutputLoopCount = 1; + addLogEntry("Unsuppressing initial sync log output", ["debug"]); + appConfig.suppressLoggingOutput = false; + } else { + // do we suppress the logging output to absolute minimal + if (monitorLoopFullCount == 1) { + // application startup with --monitor + addLogEntry("Unsuppressing initial sync log output", ["debug"]); + appConfig.suppressLoggingOutput = false; + } else { + // only suppress if we are not doing --verbose or higher + if (appConfig.verbosityCount == 0) { + addLogEntry("Suppressing --monitor log output", ["debug"]); + appConfig.suppressLoggingOutput = true; } else { - // log file only if enabled so we know when a sync completed when not using --verbose - log.fileOnly(finishMessage); + addLogEntry("Unsuppressing log output", ["debug"]); + appConfig.suppressLoggingOutput = false; } - } catch (CurlException e) { - // we already tried three times in the performSync routine - // if we still have problems, then the sync handle might have - // gone stale and we need to re-initialize the sync engine - log.log("Persistent connection errors, reinitializing connection"); - sync.reset(); } - } catch (CurlException e) { - log.log("Cannot initialize connection to OneDrive"); } - // performSync complete, set lastCheckTime to current time - lastCheckTime = MonoTime.currTime(); - // Display memory details before cleanup - if (displayMemoryUsage) log.displayMemoryUsagePreGC(); - // Perform Garbage Cleanup - GC.collect(); - // Display memory details after cleanup - if (displayMemoryUsage) log.displayMemoryUsagePostGC(); + // How long has the application been running for? + auto elapsedTime = Clock.currTime() - applicationStartTime; + addLogEntry("Application run-time thus far: " ~ to!string(elapsedTime), ["debug"]); - // If we did a full scan, make sure we merge the conents of the WAL and SHM to disk - if (fullScanRequired) { - // Write WAL and SHM data to file for this loop - log.vdebug("Merge contents of WAL and SHM files into main database file"); - itemDb.performVacuum(); + // Need to re-validate that the client is still online for this loop + if (testInternetReachability(appConfig)) { + // Starting a sync + addLogEntry("Starting a sync with Microsoft OneDrive"); + + // Attempt to reset syncFailures + syncEngineInstance.resetSyncFailures(); + + // Update cached quota details from online as this may have changed online in the background outside of this application + syncEngineInstance.freshenCachedDriveQuotaDetails(); + + // Did the user specify --upload-only? + if (appConfig.getValueBool("upload_only")) { + // Perform the --upload-only sync process + performUploadOnlySyncProcess(localPath, filesystemMonitor); + } else { + // Perform the standard sync process + performStandardSyncProcess(localPath, filesystemMonitor); + } + + // Handle any new inotify events + filesystemMonitor.update(true); + + // Detail the outcome of the sync process + displaySyncOutcome(); + + // Cleanup sync process arrays + syncEngineInstance.cleanupArrays(); + + // Write WAL and SHM data to file for this loop and release memory used by in-memory processing + addLogEntry("Merge contents of WAL and SHM files into main database file", ["debug"]); + itemDB.performVacuum(); + } else { + // Not online + addLogEntry("Microsoft OneDrive service is not reachable at this time. Will re-try on next sync attempt."); } - // reset fullScanRequired and syncListConfiguredFullScanOverride - fullScanRequired = false; - if (syncListConfigured) syncListConfiguredFullScanOverride = false; + // Output end of loop processing times + SysTime endFunctionProcessingTime = Clock.currTime(); + addLogEntry("End Monitor Loop Time: " ~ to!string(endFunctionProcessingTime), ["debug"]); + addLogEntry("Elapsed Monitor Loop Processing Time: " ~ to!string((endFunctionProcessingTime - startFunctionProcessingTime)), ["debug"]); + + // Release all the curl instances used during this loop + // New curl instances will be established on next loop + addLogEntry("CurlEngine Pool Size PRE Cleanup: " ~ to!string(CurlEngine.curlEnginePoolLength()), ["debug"]); + CurlEngine.releaseAllCurlInstances(); + addLogEntry("CurlEngine Pool Size POST Cleanup: " ~ to!string(CurlEngine.curlEnginePoolLength()) , ["debug"]); + + // Display memory details before garbage collection + if (displayMemoryUsage) displayMemoryUsagePreGC(); + // Perform Garbage Collection + GC.collect(); + // Return free memory to the OS + GC.minimize(); + + // Display memory details after garbage collection + if (displayMemoryUsage) displayMemoryUsagePostGC(); + + // Log that this loop is complete + addLogEntry(loopStopOutputMessage, ["debug"]); + + // performSync complete, set lastCheckTime to current time + lastCheckTime = MonoTime.currTime(); - // monitor loop complete - logOutputMessage = "################################################ LOOP COMPLETE ###############################################"; - - // Handle display options - if (displaySyncOptions) { - log.log(logOutputMessage); - } else { - log.vdebug(logOutputMessage); - } // Developer break via config option - if (cfg.getValueLong("monitor_max_loop") > 0) { + if (appConfig.getValueLong("monitor_max_loop") > 0) { // developer set option to limit --monitor loops - if (monitorLoopFullCount == (cfg.getValueLong("monitor_max_loop"))) { + if (monitorLoopFullCount == (appConfig.getValueLong("monitor_max_loop"))) { + performMonitor = false; + addLogEntry("Exiting after " ~ to!string(monitorLoopFullCount) ~ " loops due to developer set option"); + } + } + } + + if (performMonitor) { + auto nextCheckTime = lastCheckTime + checkOnlineInterval; + currentTime = MonoTime.currTime(); + auto sleepTime = nextCheckTime - currentTime; + addLogEntry("Sleep for " ~ to!string(sleepTime), ["debug"]); + + if(filesystemMonitor.initialised || webhookEnabled) { + if(filesystemMonitor.initialised) { + // If local monitor is on and is waiting (previous event was not from webhook) + // start the worker and wait for event + if (!notificationReceived) + filesystemMonitor.send(true); + } + + if(webhookEnabled) { + // if onedrive webhook is enabled + // update sleep time based on renew interval + Duration nextWebhookCheckDuration = oneDriveWebhook.getNextExpirationCheckDuration(); + if (nextWebhookCheckDuration < sleepTime) { + sleepTime = nextWebhookCheckDuration; + addLogEntry("Update sleeping time to " ~ to!string(sleepTime), ["debug"]); + } + // Webhook Notification reset to false for this loop + notificationReceived = false; + } + + int res = 1; + // Process incoming notifications if any. + auto signalExists = receiveTimeout(sleepTime, (int msg) {res = msg;},(ulong _) {notificationReceived = true;}); + + // Debug values + addLogEntry("signalExists = " ~ to!string(signalExists), ["debug"]); + addLogEntry("worker status = " ~ to!string(res), ["debug"]); + addLogEntry("notificationReceived = " ~ to!string(notificationReceived), ["debug"]); + + // Empirical evidence shows that Microsoft often sends multiple + // notifications for one single change, so we need a loop to exhaust + // all signals that were queued up by the webhook. The notifications + // do not contain any actual changes, and we will always rely do the + // delta endpoint to sync to latest. Therefore, only one sync run is + // good enough to catch up for multiple notifications. + if (notificationReceived) { + int signalCount = 1; + while (true) { + signalExists = receiveTimeout(dur!"seconds"(-1), (ulong _) {}); + if (signalExists) { + signalCount++; + } else { + addLogEntry("Received " ~ to!string(signalCount) ~ " refresh signals from the webhook"); + oneDriveWebhookCallback(); + break; + } + } + } + + if(res == -1) { + addLogEntry("ERROR: Monitor worker failed."); + monitorFailures = true; performMonitor = false; - log.log("Exiting after ", monitorLoopFullCount, " loops due to developer set option"); } + } else { + // no hooks available, nothing to check + Thread.sleep(sleepTime); } } - // Sleep the monitor thread for 1 second, loop around and pick up any inotify changes - Thread.sleep(dur!"seconds"(1)); } } + } else { + // Exit application as the sync engine could not be initialised + addLogEntry("Application Sync Engine could not be initialised correctly"); + // Use exit scope + return EXIT_FAILURE; + } + + // Exit application using exit scope + if (!syncEngineInstance.syncFailures && !monitorFailures) { + return EXIT_SUCCESS; + } else { + return EXIT_FAILURE; + } +} + +void oneDriveWebhookCallback() { + // If we are in a --download-only method of operation, there is no filesystem monitoring, so no inotify events to check + if (!appConfig.getValueBool("download_only")) { + try { + // Process any inotify events + filesystemMonitor.update(true); + } catch (MonitorException e) { + // Catch any exceptions thrown by inotify / monitor engine + addLogEntry("ERROR: The following inotify error was generated: " ~ e.msg); + } + } + + // Download data from OneDrive last + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + if (appConfig.getValueBool("monitor")) { + // Handle any new inotify events + filesystemMonitor.update(true); + } +} + +void performUploadOnlySyncProcess(string localPath, Monitor filesystemMonitor = null) { + // Perform the local database consistency check, picking up locally modified data and uploading this to OneDrive + syncEngineInstance.performDatabaseConsistencyAndIntegrityCheck(); + if (appConfig.getValueBool("monitor")) { + // Handle any inotify events whilst the DB was being scanned + filesystemMonitor.update(true); + } + + // Scan the configured 'sync_dir' for new data to upload + syncEngineInstance.scanLocalFilesystemPathForNewData(localPath); + if (appConfig.getValueBool("monitor")) { + // Handle any new inotify events whilst the local filesystem was being scanned + filesystemMonitor.update(true); + } +} + +void performStandardSyncProcess(string localPath, Monitor filesystemMonitor = null) { + + // If we are performing log suppression, output this message so the user knows what is happening + if (appConfig.suppressLoggingOutput) { + addLogEntry("Syncing changes from Microsoft OneDrive ..."); + } + + // Zero out these arrays + syncEngineInstance.fileDownloadFailures = []; + syncEngineInstance.fileUploadFailures = []; + + // Which way do we sync first? + // OneDrive first then local changes (normal operational process that uses OneDrive as the source of truth) + // Local First then OneDrive changes (alternate operation process to use local files as source of truth) + if (appConfig.getValueBool("local_first")) { + // Local data first + // Perform the local database consistency check, picking up locally modified data and uploading this to OneDrive + syncEngineInstance.performDatabaseConsistencyAndIntegrityCheck(); + if (appConfig.getValueBool("monitor")) { + // Handle any inotify events whilst the DB was being scanned + filesystemMonitor.update(true); + } + + // Scan the configured 'sync_dir' for new data to upload to OneDrive + syncEngineInstance.scanLocalFilesystemPathForNewData(localPath); + if (appConfig.getValueBool("monitor")) { + // Handle any new inotify events whilst the local filesystem was being scanned + filesystemMonitor.update(true); + } + + // Download data from OneDrive last + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + if (appConfig.getValueBool("monitor")) { + // Cancel out any inotify events from downloading data + filesystemMonitor.update(false); + } + } else { + // Normal sync + // Download data from OneDrive first + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + if (appConfig.getValueBool("monitor")) { + // Cancel out any inotify events from downloading data + filesystemMonitor.update(false); + } + + // Perform the local database consistency check, picking up locally modified data and uploading this to OneDrive + syncEngineInstance.performDatabaseConsistencyAndIntegrityCheck(); + if (appConfig.getValueBool("monitor")) { + // Handle any inotify events whilst the DB was being scanned + filesystemMonitor.update(true); + } + + // Is --download-only NOT configured? + if (!appConfig.getValueBool("download_only")) { + + // Scan the configured 'sync_dir' for new data to upload to OneDrive + syncEngineInstance.scanLocalFilesystemPathForNewData(localPath); + if (appConfig.getValueBool("monitor")) { + // Handle any new inotify events whilst the local filesystem was being scanned + filesystemMonitor.update(true); + } + + // Make sure we sync any DB data to this point, but only if not in --monitor mode + // In --monitor mode, this is handled within the 'loop', based on when the full scan true up is being performed + if (!appConfig.getValueBool("monitor")) { + itemDB.performVacuum(); + } + + // If we are not doing a 'force_children_scan' perform a true-up + // 'force_children_scan' is used when using /children rather than /delta and it is not efficient to re-run this exact same process twice + if (!appConfig.getValueBool("force_children_scan")) { + // Perform the final true up scan to ensure we have correctly replicated the current online state locally + if (!appConfig.suppressLoggingOutput) { + addLogEntry("Performing a last examination of the most recent online data within Microsoft OneDrive to complete the reconciliation process"); + } + // We pass in the 'appConfig.fullScanTrueUpRequired' value which then flags do we use the configured 'deltaLink' + // If 'appConfig.fullScanTrueUpRequired' is true, we do not use the 'deltaLink' if we are in --monitor mode, thus forcing a full scan true up + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + if (appConfig.getValueBool("monitor")) { + // Cancel out any inotify events from downloading data + filesystemMonitor.update(false); + } + } + } + } +} + +void displaySyncOutcome() { + + // Detail any download or upload transfer failures + syncEngineInstance.displaySyncFailures(); + + // Sync is either complete or partially complete + if (!syncEngineInstance.syncFailures) { + // No download or upload issues + if (!appConfig.getValueBool("monitor")) addLogEntry(); // Add an additional line break so that this is clear when using --sync + addLogEntry("Sync with Microsoft OneDrive is complete"); + } else { + addLogEntry(); + addLogEntry("Sync with Microsoft OneDrive has completed, however there are items that failed to sync."); + // Due to how the OneDrive API works 'changes' such as add new files online, rename files online, delete files online are only sent once when using the /delta API call. + // That we failed to download it, we need to track that, and then issue a --resync to download any of these failed files .. unfortunate, but there is no easy way here + if (!syncEngineInstance.fileDownloadFailures.empty) { + addLogEntry("To fix any download failures you may need to perform a --resync to ensure this system is correctly synced with your Microsoft OneDrive Account"); + } + if (!syncEngineInstance.fileUploadFailures.empty) { + addLogEntry("To fix any upload failures you may need to perform a --resync to ensure this system is correctly synced with your Microsoft OneDrive Account"); + } + // So that from a logging perspective these messages are clear, add a line break in + addLogEntry(); } +} - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; +void processResyncDatabaseRemoval(string databaseFilePathToRemove) { + addLogEntry("Testing if we have exclusive access to local database file", ["debug"]); + + // Are we the only running instance? Test that we can open the database file path + itemDB = new ItemDatabase(databaseFilePathToRemove); + + // did we successfully initialise the database class? + if (!itemDB.isDatabaseInitialised()) { + // no .. destroy class + itemDB = null; + // exit application - void function, force exit this way + exit(EXIT_FAILURE); + } + + // If we have exclusive access we will not have exited + // destroy access test + destroy(itemDB); + // delete application sync state + addLogEntry("Deleting the saved application sync status ..."); + if (!dryRun) { + safeRemove(databaseFilePathToRemove); + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + addLogEntry("DRY RUN: Not removing the saved application sync status"); + } } -void cleanupDryRunDatabase(string databaseFilePathDryRun) -{ - // cleanup dry-run data - log.vdebug("Running cleanupDryRunDatabase"); - string dryRunShmFile = databaseFilePathDryRun ~ "-shm"; - string dryRunWalFile = databaseFilePathDryRun ~ "-wal"; - if (exists(databaseFilePathDryRun)) { - // remove the file - log.vdebug("Removing items-dryrun.sqlite3 as dry run operations complete"); - // remove items-dryrun.sqlite3 - safeRemove(databaseFilePathDryRun); +void cleanupDryRunDatabaseFiles(string dryRunDatabaseFile) { + // Temp variables + string dryRunShmFile = dryRunDatabaseFile ~ "-shm"; + string dryRunWalFile = dryRunDatabaseFile ~ "-wal"; + + // If the dry run database exists, clean this up + if (exists(dryRunDatabaseFile)) { + // remove the existing file + addLogEntry("DRY-RUN: Removing items-dryrun.sqlite3 as it still exists for some reason", ["debug"]); + safeRemove(dryRunDatabaseFile); } - // silent cleanup of shm and wal files if they exist + + // silent cleanup of shm files if it exists if (exists(dryRunShmFile)) { // remove items-dryrun.sqlite3-shm + addLogEntry("DRY-RUN: Removing items-dryrun.sqlite3-shm as it still exists for some reason", ["debug"]); safeRemove(dryRunShmFile); } + + // silent cleanup of wal files if it exists if (exists(dryRunWalFile)) { // remove items-dryrun.sqlite3-wal + addLogEntry("DRY-RUN: Removing items-dryrun.sqlite3-wal as it still exists for some reason", ["debug"]); safeRemove(dryRunWalFile); } } -bool initSyncEngine(SyncEngine sync) -{ - try { - sync.init(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 400 || e.httpStatusCode == 401) { - // Authorization is invalid - log.log("\nAuthorization token invalid, use --reauth to authorize the client again\n"); - return false; - } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error, message already printed - return false; +void checkForNoMountScenario() { + // If this is a 'mounted' folder, the 'mount point' should have this file to help the application stop any action to preserve data because the drive to mount is not currently mounted + if (appConfig.getValueBool("check_nomount")) { + // we were asked to check the mount point for the presence of a '.nosync' file + if (exists(".nosync")) { + addLogEntry("ERROR: .nosync file found in directory mount point. Aborting application startup process to safeguard data.", ["info", "notify"]); + // Perform the shutdown process + performSynchronisedExitProcess("check_nomount"); + // Exit + exit(EXIT_FAILURE); } } - return true; } -// try to synchronize the folder three times -void performSync(SyncEngine sync, string singleDirectory, bool downloadOnly, bool localFirst, bool uploadOnly, long logLevel, bool fullScanRequired, bool syncListConfiguredFullScanOverride, bool displaySyncOptions, bool monitorEnabled, Monitor m, bool cleanupLocalFiles) -{ - int count; - string remotePath = "/"; - string localPath = "."; - string logOutputMessage; - - // performSync API scan triggers - log.vdebug("performSync API scan triggers"); - log.vdebug("-----------------------------"); - log.vdebug("fullScanRequired = ", fullScanRequired); - log.vdebug("syncListConfiguredFullScanOverride = ", syncListConfiguredFullScanOverride); - log.vdebug("-----------------------------"); +// Getting around the @nogc problem +// https://p0nce.github.io/d-idioms/#Bypassing-@nogc +auto assumeNoGC(T) (T t) if (isFunctionPointer!T || isDelegate!T) { + enum attrs = functionAttributes!T | FunctionAttribute.nogc; + return cast(SetFunctionAttributes!(T, functionLinkage!T, attrs)) t; +} - // Are we doing a single directory sync? - if (singleDirectory != ""){ - // Need two different path strings here - remotePath = singleDirectory; - localPath = singleDirectory; - // Set flag for singleDirectoryScope for change handling - sync.setSingleDirectoryScope(); - } +void setupSignalHandler() { + sigaction_t action; + action.sa_handler = &exitHandler; // Direct function pointer assignment + sigemptyset(&action.sa_mask); // Initialize the signal set to empty + action.sa_flags = 0; + sigaction(SIGINT, &action, null); // Interrupt from keyboard + sigaction(SIGTERM, &action, null); // Termination signal +} - // Due to Microsoft Sharepoint 'enrichment' of files, we try to download the Microsoft modified file automatically - // Set flag if we are in upload only state to handle this differently - // See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details - if (uploadOnly) sync.setUploadOnly(); +// Catch SIGINT (CTRL-C) and SIGTERM (kill), handle rapid repeat presses +extern(C) nothrow @nogc @system void exitHandler(int value) { + + if (shutdownInProgress) { + return; // Ignore subsequent presses + } else { + // Disable logging suppression + appConfig.suppressLoggingOutput = false; + // Flag we are shutting down + shutdownInProgress = true; - do { try { - // starting a sync - logOutputMessage = "################################################## NEW SYNC ##################################################"; - if (displaySyncOptions) { - log.log(logOutputMessage); - } else { - log.vdebug(logOutputMessage); - } - if (singleDirectory != ""){ - // we were requested to sync a single directory - log.vlog("Syncing changes from this selected path: ", singleDirectory); - if (uploadOnly){ - // Upload Only of selected single directory - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from selected local path only - NOT syncing data changes from OneDrive ..."); - sync.scanForDifferences(localPath); - } else { - // No upload only - if (localFirst) { - // Local First - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from selected local path first before downloading changes from OneDrive ..."); - sync.scanForDifferences(localPath); - sync.applyDifferencesSingleDirectory(remotePath); - } else { - // OneDrive First - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from selected OneDrive path ..."); - sync.applyDifferencesSingleDirectory(remotePath); - - // Is this a --download-only --cleanup-local-files request? - // If yes, scan for local changes - but --cleanup-local-files is being used, a further flag will trigger local file deletes rather than attempt to upload files to OneDrive - if (cleanupLocalFiles) { - // --download-only and --cleanup-local-files were passed in - log.log("Searching local filesystem for extra files and folders which need to be removed"); - sync.scanForDifferencesFilesystemScan(localPath); - } else { - // is this a --download-only request? - if (!downloadOnly) { - // process local changes - sync.scanForDifferences(localPath); - // ensure that the current remote state is updated locally - sync.applyDifferencesSingleDirectory(remotePath); - } - } - } - } - } else { - // no single directory sync - if (uploadOnly){ - // Upload Only of entire sync_dir - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from local path only - NOT syncing data changes from OneDrive ..."); - sync.scanForDifferences(localPath); - } else { - // No upload only - string syncCallLogOutput; - if (localFirst) { - // sync local files first before downloading from OneDrive - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from local path first before downloading changes from OneDrive ..."); - sync.scanForDifferences(localPath); - // if syncListConfiguredFullScanOverride = true - if (syncListConfiguredFullScanOverride) { - // perform a full walk of OneDrive objects - sync.applyDifferences(syncListConfiguredFullScanOverride); - } else { - // perform a walk based on if a full scan is required - sync.applyDifferences(fullScanRequired); - } - } else { - // sync from OneDrive first before uploading files to OneDrive - if ((logLevel < MONITOR_LOG_SILENT) || (fullScanRequired)) log.log("Syncing changes and items from OneDrive ..."); - - // For the initial sync, always use the delta link so that we capture all the right delta changes including adds, moves & deletes - logOutputMessage = "Initial Scan: Call OneDrive Delta API for delta changes as compared to last successful sync."; - syncCallLogOutput = "Calling sync.applyDifferences(false);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(false); - - // Is this a --download-only --cleanup-local-files request? - // If yes, scan for local changes - but --cleanup-local-files is being used, a further flag will trigger local file deletes rather than attempt to upload files to OneDrive - if (cleanupLocalFiles) { - // --download-only and --cleanup-local-files were passed in - log.log("Searching local filesystem for extra files and folders which need to be removed"); - sync.scanForDifferencesFilesystemScan(localPath); - } else { - // is this a --download-only request? - if (!downloadOnly) { - // process local changes walking the entire path checking for changes - // in monitor mode all local changes are captured via inotify - // thus scanning every 'monitor_interval' (default 300 seconds) for local changes is excessive and not required - logOutputMessage = "Process local filesystem (sync_dir) for file changes as compared to database entries"; - syncCallLogOutput = "Calling sync.scanForDifferences(localPath);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - - SysTime startIntegrityCheckProcessingTime = Clock.currTime(); - if (sync.getPerformanceProcessingOutput()) { - // performance timing for DB and file system integrity check - start - writeln("============================================================"); - writeln("Start Integrity Check Processing Time: ", startIntegrityCheckProcessingTime); - } - - // What sort of local scan do we want to do? - // In --monitor mode, when performing the DB scan, a race condition occurs where by if a file or folder is moved during this process - // the inotify event is discarded once performSync() is finished (see m.update(false) above), so these events need to be handled - // This can be remediated by breaking the DB and file system scan into separate processes, and handing any applicable inotify events in between - if (!monitorEnabled) { - // --synchronize in use - log.log("Performing a database consistency and integrity check on locally stored data ... "); - // standard process flow - sync.scanForDifferences(localPath); - } else { - // --monitor in use - // Use individual calls with inotify checks between to avoid a race condition between these 2 functions - // Database scan integrity check to compare DB data vs actual content on disk to ensure what we think is local, is local - // and that the data 'hash' as recorded in the DB equals the hash of the actual content - // This process can be extremely expensive time and CPU processing wise - // - // fullScanRequired is set to TRUE when the application starts up, or the config option 'monitor_fullscan_frequency' count is reached - // By default, 'monitor_fullscan_frequency' = 12, and 'monitor_interval' = 300, meaning that by default, a full database consistency check - // is done once an hour. - // - // To change this behaviour adjust 'monitor_interval' and 'monitor_fullscan_frequency' to desired values in the application config file - if (fullScanRequired) { - log.log("Performing a database consistency and integrity check on locally stored data due to fullscan requirement ... "); - sync.scanForDifferencesDatabaseScan(localPath); - // handle any inotify events that occured 'whilst' we were scanning the database - m.update(true); - } else { - log.vdebug("NOT performing Database Integrity Check .. fullScanRequired = FALSE"); - m.update(true); - } - - // Filesystem walk to find new files not uploaded - log.vdebug("Searching local filesystem for new data"); - sync.scanForDifferencesFilesystemScan(localPath); - // handle any inotify events that occured 'whilst' we were scanning the local filesystem - m.update(true); - } - - SysTime endIntegrityCheckProcessingTime = Clock.currTime(); - if (sync.getPerformanceProcessingOutput()) { - // performance timing for DB and file system integrity check - finish - writeln("End Integrity Check Processing Time: ", endIntegrityCheckProcessingTime); - writeln("Elapsed Function Processing Time: ", (endIntegrityCheckProcessingTime - startIntegrityCheckProcessingTime)); - writeln("============================================================"); - } - - // At this point, all OneDrive changes / local changes should be uploaded and in sync - // This MAY not be the case when using sync_list, thus a full walk of OneDrive ojects is required - - // --synchronize & no sync_list : fullScanRequired = false, syncListConfiguredFullScanOverride = false - // --synchronize & sync_list in use : fullScanRequired = false, syncListConfiguredFullScanOverride = true + assumeNoGC ( () { + addLogEntry("\nReceived termination signal, initiating application cleanup"); + // Wait for all parallel jobs that depend on the database to complete + addLogEntry("Waiting for any existing upload|download process to complete"); + shutdownSyncEngine(); + + // Perform the shutdown process + performSynchronisedExitProcess("SIGINT-SIGTERM-HANDLER"); + })(); + } catch (Exception e) { + // Any output here will cause a GC allocation + // - Error: `@nogc` function `main.exitHandler` cannot call non-@nogc function `std.stdio.writeln!string.writeln` + // - Error: cannot use operator `~` in `@nogc` function `main.exitHandler` + // writeln("Exception during shutdown: " ~ e.msg); + } + // Exit the process with the provided exit code + exit(value); + } +} - // --monitor loops around 12 iterations. On the 1st loop, sets fullScanRequired = true, syncListConfiguredFullScanOverride = true if requried +// Handle application exit +void performSynchronisedExitProcess(string scopeCaller = null) { + synchronized { + // Perform cleanup and shutdown of various services and resources + try { + // Log who called this function + addLogEntry("performSynchronisedExitProcess called by: " ~ scopeCaller, ["debug"]); + // Shutdown the OneDrive Webhook instance + shutdownOneDriveWebhook(); + // Shutdown the client side filtering objects + shutdownSelectiveSync(); + // Destroy all 'curl' instances + destroyCurlInstances(); + // Shutdown the sync engine + shutdownSyncEngine(); + // Shutdown any local filesystem monitoring + shutdownFilesystemMonitor(); + // Shutdown the database + shutdownDatabase(); + // Shutdown the application configuration objects + shutdownAppConfig(); + } catch (Exception e) { + addLogEntry("Error during performStandardExitProcess: " ~ e.toString(), ["error"]); + } + + // Finalise all logging and destroy log buffer + if (loggingActive()) { + // Shutdown application logging + shutdownApplicationLogging(); + } + } +} - // --monitor & no sync_list (loop #1) : fullScanRequired = true, syncListConfiguredFullScanOverride = false - // --monitor & no sync_list (loop #2 - #12) : fullScanRequired = false, syncListConfiguredFullScanOverride = false - // --monitor & sync_list in use (loop #1) : fullScanRequired = true, syncListConfiguredFullScanOverride = true - // --monitor & sync_list in use (loop #2 - #12) : fullScanRequired = false, syncListConfiguredFullScanOverride = false +void shutdownOneDriveWebhook() { + if (oneDriveWebhook !is null) { + addLogEntry("Shutting down OneDrive Webhook instance", ["debug"]); + oneDriveWebhook.stop(); + object.destroy(oneDriveWebhook); + oneDriveWebhook = null; + addLogEntry("Shutdown of OneDrive Webhook instance is complete", ["debug"]); + } +} - // Do not perform a full walk of the OneDrive objects - if ((!fullScanRequired) && (!syncListConfiguredFullScanOverride)){ - logOutputMessage = "Final True-Up: Do not perform a full walk of the OneDrive objects - not required"; - syncCallLogOutput = "Calling sync.applyDifferences(false);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(false); - } +void shutdownFilesystemMonitor() { + if (filesystemMonitor !is null) { + addLogEntry("Shutting down Filesystem Monitoring instance", ["debug"]); + filesystemMonitor.shutdown(); + object.destroy(filesystemMonitor); + filesystemMonitor = null; + addLogEntry("Shut down of Filesystem Monitoring instance is complete", ["debug"]); + } +} - // Perform a full walk of OneDrive objects because sync_list is in use / or trigger was set in --monitor loop - if ((!fullScanRequired) && (syncListConfiguredFullScanOverride)){ - logOutputMessage = "Final True-Up: Perform a full walk of OneDrive objects because sync_list is in use / or trigger was set in --monitor loop"; - syncCallLogOutput = "Calling sync.applyDifferences(true);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(true); - } +void shutdownSelectiveSync() { + if (selectiveSync !is null) { + addLogEntry("Shutting down Client Side Filtering instance", ["debug"]); + selectiveSync.shutdown(); + object.destroy(selectiveSync); + selectiveSync = null; + addLogEntry("Shut down of Client Side Filtering instance is complete", ["debug"]); + } +} - // Perform a full walk of OneDrive objects because a full scan was required - if ((fullScanRequired) && (!syncListConfiguredFullScanOverride)){ - logOutputMessage = "Final True-Up: Perform a full walk of OneDrive objects because a full scan was required"; - syncCallLogOutput = "Calling sync.applyDifferences(true);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(true); - } +void shutdownSyncEngine() { + if (syncEngineInstance !is null) { + addLogEntry("Shutting down Sync Engine instance", ["debug"]); + syncEngineInstance.shutdown(); // Make sure any running thread completes first + object.destroy(syncEngineInstance); + syncEngineInstance = null; + addLogEntry("Shut down Sync Engine instance is complete", ["debug"]); + } +} - // Perform a full walk of OneDrive objects because a full scan was required and sync_list is in use and trigger was set in --monitor loop - if ((fullScanRequired) && (syncListConfiguredFullScanOverride)){ - logOutputMessage = "Final True-Up: Perform a full walk of OneDrive objects because a full scan was required and sync_list is in use and trigger was set in --monitor loop"; - syncCallLogOutput = "Calling sync.applyDifferences(true);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(true); - } - } - } - } - } - } +void shutdownDatabase() { + if (itemDB !is null && itemDB.isDatabaseInitialised()) { + addLogEntry("Shutting down Database instance", ["debug"]); + addLogEntry("Performing a database vacuum" , ["debug"]); + itemDB.performVacuum(); + addLogEntry("Database vacuum is complete" , ["debug"]); + object.destroy(itemDB); + itemDB = null; + addLogEntry("Shut down Database instance is complete", ["debug"]); + } +} - // sync is complete - logOutputMessage = "################################################ SYNC COMPLETE ###############################################"; - if (displaySyncOptions) { - log.log(logOutputMessage); - } else { - log.vdebug(logOutputMessage); - } +void shutdownAppConfig() { + if (appConfig !is null) { + addLogEntry("Shutting down Application Configuration instance", ["debug"]); + if (dryRun) { + // We were running with --dry-run , clean up the applicable database + cleanupDryRunDatabaseFiles(runtimeDatabaseFile); + } + object.destroy(appConfig); + appConfig = null; + addLogEntry("Shut down of Application Configuration instance is complete", ["debug"]); + } +} - count = -1; - } catch (Exception e) { - if (++count == 3) { - log.log("Giving up on sync after three attempts: ", e.msg); - throw e; - } else - log.log("Retry sync count: ", count, ": ", e.msg); - } - } while (count != -1); +void destroyCurlInstances() { + CurlEngine.destroyAllCurlInstances(); } -// getting around the @nogc problem -// https://p0nce.github.io/d-idioms/#Bypassing-@nogc -auto assumeNoGC(T) (T t) if (isFunctionPointer!T || isDelegate!T) -{ - enum attrs = functionAttributes!T | FunctionAttribute.nogc; - return cast(SetFunctionAttributes!(T, functionLinkage!T, attrs)) t; +void shutdownApplicationLogging() { + // Log that we are exitintg + addLogEntry("Application is exiting.", ["debug"]); + addLogEntry("#######################################################################################################################################", ["logFileOnly"]); + // Destroy the shared logging buffer + (cast() logBuffer).shutdown(); + object.destroy(logBuffer); } -extern(C) nothrow @nogc @system void exitHandler(int value) { - try { - assumeNoGC ( () { - log.log("Got termination signal, performing clean up"); - // if initialised, shut down the HTTP instance - if (onedriveInitialised) { - log.log("Shutting down the HTTP instance"); - oneDrive.shutdown(); - } - // was itemDb initialised? - if (itemDb.isDatabaseInitialised()) { - // Make sure the .wal file is incorporated into the main db before we exit - log.log("Shutting down db connection and merging temporary data"); - itemDb.performVacuum(); - destroy(itemDb); - } - })(); - } catch(Exception e) {} - exit(0); +string compilerDetails() { + version(DigitalMars) enum compiler = "DMD"; + else version(LDC) enum compiler = "LDC"; + else version(GNU) enum compiler = "GDC"; + else enum compiler = "Unknown compiler"; + string compilerString = compiler ~ " " ~ to!string(__VERSION__); + return compilerString; } diff --git a/src/monitor.d b/src/monitor.d index 06aac0d7a..882b2ff8f 100644 --- a/src/monitor.d +++ b/src/monitor.d @@ -1,87 +1,345 @@ -import core.sys.linux.sys.inotify; +// What is this module called? +module monitor; + +// What does this module require to function? import core.stdc.errno; -import core.sys.posix.poll, core.sys.posix.unistd; -import std.exception, std.file, std.path, std.regex, std.stdio, std.string, std.algorithm; import core.stdc.stdlib; +import core.sys.linux.sys.inotify; +import core.sys.posix.poll; +import core.sys.posix.unistd; +import core.sys.posix.sys.select; +import core.thread; +import core.time; +import std.algorithm; +import std.concurrency; +import std.exception; +import std.file; +import std.path; +import std.process; +import std.regex; +import std.stdio; +import std.string; +import std.conv; + +// What other modules that we have created do we need to import? import config; -import selective; import util; -static import log; +import log; +import clientSideFiltering; -// relevant inotify events +// Relevant inotify events private immutable uint32_t mask = IN_CLOSE_WRITE | IN_CREATE | IN_DELETE | IN_MOVE | IN_IGNORED | IN_Q_OVERFLOW; -class MonitorException: ErrnoException -{ - @safe this(string msg, string file = __FILE__, size_t line = __LINE__) - { +class MonitorException: ErrnoException { + @safe this(string msg, string file = __FILE__, size_t line = __LINE__) { super(msg, file, line); } } -final class Monitor -{ - bool verbose; +class MonitorBackgroundWorker { // inotify file descriptor - private int fd; + int fd; + Pipe p; + bool isAlive; + + this() { + isAlive = true; + p = pipe(); + } + + shared void initialise() { + fd = inotify_init(); + if (fd < 0) throw new MonitorException("inotify_init failed"); + } + + // Add this path to be monitored + shared int addInotifyWatch(string pathname) { + int wd = inotify_add_watch(fd, toStringz(pathname), mask); + if (wd < 0) { + if (errno() == ENOSPC) { + // Get the current value + ulong maxInotifyWatches = to!int(strip(readText("/proc/sys/fs/inotify/max_user_watches"))); + addLogEntry("The user limit on the total number of inotify watches has been reached."); + addLogEntry("Your current limit of inotify watches is: " ~ to!string(maxInotifyWatches)); + addLogEntry("It is recommended that you change the max number of inotify watches to at least double your existing value."); + addLogEntry("To change the current max number of watches to " ~ to!string((maxInotifyWatches * 2)) ~ " run:"); + addLogEntry("EXAMPLE: sudo sysctl fs.inotify.max_user_watches=" ~ to!string((maxInotifyWatches * 2))); + } + if (errno() == 13) { + addLogEntry("WARNING: inotify_add_watch failed - permission denied: " ~ pathname, ["verbose"]); + } + // Flag any other errors + addLogEntry("ERROR: inotify_add_watch failed: " ~ pathname); + return wd; + } + + // Add path to inotify watch - required regardless if a '.folder' or 'folder' + addLogEntry("inotify_add_watch successfully added for: " ~ pathname, ["debug"]); + + // Do we log that we are monitoring this directory? + if (isDir(pathname)) { + // Log that this is directory is being monitored + addLogEntry("Monitoring directory: " ~ pathname, ["verbose"]); + } + return wd; + } + + shared int removeInotifyWatch(int wd) { + return inotify_rm_watch(fd, wd); + } + + shared void watch(Tid callerTid) { + // On failure, send -1 to caller + int res; + + // wait for the caller to be ready + receiveOnly!int(); + + while (isAlive) { + fd_set fds; + FD_ZERO (&fds); + FD_SET(fd, &fds); + // Listen for messages from the caller + FD_SET((cast()p).readEnd.fileno, &fds); + + res = select(FD_SETSIZE, &fds, null, null, null); + + if(res == -1) { + if(errno() == EINTR) { + // Received an interrupt signal but no events are available + // directly watch again + } else { + // Error occurred, tell caller to terminate. + callerTid.send(-1); + break; + } + } else { + // Wake up caller + callerTid.send(1); + + // wait for the caller to be ready + if (isAlive) + isAlive = receiveOnly!bool(); + } + } + } + + shared void interrupt() { + isAlive = false; + (cast()p).writeEnd.writeln("done"); + (cast()p).writeEnd.flush(); + } + + shared void shutdown() { + isAlive = false; + if (fd > 0) { + close(fd); + fd = 0; + (cast()p).close(); + } + } +} + +void startMonitorJob(shared(MonitorBackgroundWorker) worker, Tid callerTid) { + try { + worker.watch(callerTid); + } catch (OwnerTerminated error) { + // caller is terminated + worker.shutdown(); + } +} + +enum ActionType { + moved, + deleted, + changed, + createDir +} + +struct Action { + ActionType type; + bool skipped; + string src; + string dst; +} + +struct ActionHolder { + Action[] actions; + size_t[string] srcMap; + + void append(ActionType type, string src, string dst=null) { + size_t[] pendingTargets; + switch (type) { + case ActionType.changed: + if (src in srcMap && actions[srcMap[src]].type == ActionType.changed) { + // skip duplicate operations + return; + } + break; + case ActionType.createDir: + break; + case ActionType.deleted: + if (src in srcMap) { + size_t pendingTarget = srcMap[src]; + // Skip operations require reading local file that is gone + switch (actions[pendingTarget].type) { + case ActionType.changed: + case ActionType.createDir: + actions[srcMap[src]].skipped = true; + srcMap.remove(src); + break; + default: + break; + } + } + break; + case ActionType.moved: + for(int i = 0; i < actions.length; i++) { + // Only match for latest operation + if (actions[i].src in srcMap) { + switch (actions[i].type) { + case ActionType.changed: + case ActionType.createDir: + // check if the source is the prefix of the target + string prefix = src ~ "/"; + string target = actions[i].src; + if (prefix[0] != '.') + prefix = "./" ~ prefix; + if (target[0] != '.') + target = "./" ~ target; + string comm = commonPrefix(prefix, target); + if (src == actions[i].src || comm.length == prefix.length) { + // Hold operations require reading local file that is moved after the target is moved online + pendingTargets ~= i; + actions[i].skipped = true; + srcMap.remove(actions[i].src); + if (comm.length == target.length) + actions[i].src = dst; + else + actions[i].src = dst ~ target[comm.length - 1 .. target.length]; + } + break; + default: + break; + } + } + } + break; + default: + break; + } + actions ~= Action(type, false, src, dst); + srcMap[src] = actions.length - 1; + + foreach (pendingTarget; pendingTargets) { + actions ~= actions[pendingTarget]; + actions[$-1].skipped = false; + srcMap[actions[$-1].src] = actions.length - 1; + } + } +} + +final class Monitor { + // Class variables + ApplicationConfig appConfig; + ClientSideFiltering selectiveSync; + + // Are we verbose in logging output + bool verbose = false; + // skip symbolic links + bool skip_symlinks = false; + // check for .nosync if enabled + bool check_nosync = false; + // check if initialised + bool initialised = false; + // Worker Tid + Tid workerTid; + + // Configure Private Class Variables + shared(MonitorBackgroundWorker) worker; // map every inotify watch descriptor to its directory private string[int] wdToDirName; // map the inotify cookies of move_from events to their path private string[int] cookieToPath; // buffer to receive the inotify events private void[] buffer; - // skip symbolic links - bool skip_symlinks; - // check for .nosync if enabled - bool check_nosync; - - private SelectiveSync selectiveSync; + // Configure function delegates void delegate(string path) onDirCreated; - void delegate(string path) onFileChanged; + void delegate(string[] path) onFileChanged; void delegate(string path) onDelete; void delegate(string from, string to) onMove; + + // List of paths that were moved, not deleted + bool[string] movedNotDeleted; - this(SelectiveSync selectiveSync) - { - assert(selectiveSync); + ActionHolder actionHolder; + + // Configure the class variable to consume the application configuration including selective sync + this(ApplicationConfig appConfig, ClientSideFiltering selectiveSync) { + this.appConfig = appConfig; this.selectiveSync = selectiveSync; } - - void init(Config cfg, bool verbose, bool skip_symlinks, bool check_nosync) - { - this.verbose = verbose; - this.skip_symlinks = skip_symlinks; - this.check_nosync = check_nosync; + + // The destructor should only clean up resources owned directly by this instance + ~this() { + object.destroy(worker); + } + + // Initialise the monitor class + void initialise() { + // Configure the variables + skip_symlinks = appConfig.getValueBool("skip_symlinks"); + check_nosync = appConfig.getValueBool("check_nosync"); + if (appConfig.getValueLong("verbose") > 0) { + verbose = true; + } assert(onDirCreated && onFileChanged && onDelete && onMove); - fd = inotify_init(); - if (fd < 0) throw new MonitorException("inotify_init failed"); if (!buffer) buffer = new void[4096]; - + worker = cast(shared) new MonitorBackgroundWorker; + worker.initialise(); + // from which point do we start watching for changes? string monitorPath; - if (cfg.getValueString("single_directory") != ""){ - // single directory in use, monitor only this - monitorPath = "./" ~ cfg.getValueString("single_directory"); + if (appConfig.getValueString("single_directory") != ""){ + // single directory in use, monitor only this path + monitorPath = "./" ~ appConfig.getValueString("single_directory"); } else { // default monitorPath = "."; } addRecursive(monitorPath); + + // Start monitoring + workerTid = spawn(&startMonitorJob, worker, thisTid); + + initialised = true; + } + + // Communication with worker + void send(bool isAlive) { + workerTid.send(isAlive); } - void shutdown() - { - if (fd > 0) close(fd); + // Shutdown the monitor class + void shutdown() { + if(!initialised) + return; + initialised = false; + // Release all resources + removeAll(); + // Notify the worker that the monitor has been shutdown + worker.interrupt(); + send(false); wdToDirName = null; } - private void addRecursive(string dirname) - { + // Recursively add this path to be monitored + private void addRecursive(string dirname) { // skip non existing/disappeared items if (!exists(dirname)) { - log.vlog("Not adding non-existing/disappeared directory: ", dirname); + addLogEntry("Not adding non-existing/disappeared directory: " ~ dirname, ["verbose"]); return; } @@ -93,7 +351,7 @@ final class Monitor if (isDir(dirname)) { if (selectiveSync.isDirNameExcluded(dirname.strip('.'))) { // dont add a watch for this item - log.vdebug("Skipping monitoring due to skip_dir match: ", dirname); + addLogEntry("Skipping monitoring due to skip_dir match: " ~ dirname, ["debug"]); return; } } @@ -103,14 +361,14 @@ final class Monitor // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched if (selectiveSync.isFileNameExcluded(dirname.strip('.'))) { // dont add a watch for this item - log.vdebug("Skipping monitoring due to skip_file match: ", dirname); + addLogEntry("Skipping monitoring due to skip_file match: " ~ dirname, ["debug"]); return; } } - // is the path exluded by sync_list? + // is the path excluded by sync_list? if (selectiveSync.isPathExcludedViaSyncList(buildNormalizedPath(dirname))) { // dont add a watch for this item - log.vdebug("Skipping monitoring due to sync_list match: ", dirname); + addLogEntry("Skipping monitoring due to sync_list match: " ~ dirname, ["debug"]); return; } } @@ -127,24 +385,36 @@ final class Monitor // Do we need to check for .nosync? Only if check_nosync is true if (check_nosync) { if (exists(buildNormalizedPath(dirname) ~ "/.nosync")) { - log.vlog("Skipping watching path - .nosync found & --check-for-nosync enabled: ", buildNormalizedPath(dirname)); + addLogEntry("Skipping watching path - .nosync found & --check-for-nosync enabled: " ~ buildNormalizedPath(dirname), ["verbose"]); + return; + } + } + + if (isDir(dirname)) { + // This is a directory + // is the path excluded if skip_dotfiles configured and path is a .folder? + if ((selectiveSync.getSkipDotfiles()) && (isDotFile(dirname))) { + // dont add a watch for this directory return; } } // passed all potential exclusions // add inotify watch for this path / directory / file - log.vdebug("Calling add() for this dirname: ", dirname); - add(dirname); + addLogEntry("Calling worker.addInotifyWatch() for this dirname: " ~ dirname, ["debug"]); + int wd = worker.addInotifyWatch(dirname); + if (wd > 0) { + wdToDirName[wd] = buildNormalizedPath(dirname) ~ "/"; + } - // if this is a directory, recursivly add this path + // if this is a directory, recursively add this path if (isDir(dirname)) { // try and get all the directory entities for this path try { auto pathList = dirEntries(dirname, SpanMode.shallow, false); foreach(DirEntry entry; pathList) { if (entry.isDir) { - log.vdebug("Calling addRecursive() for this directory: ", entry.name); + addLogEntry("Calling addRecursive() for this directory: " ~ entry.name, ["debug"]); addRecursive(entry.name); } } @@ -158,12 +428,12 @@ final class Monitor // Need to check for: Failed to stat file in error message if (canFind(e.msg, "Failed to stat file")) { // File system access issue - log.error("ERROR: The local file system returned an error with the following message:"); - log.error(" Error Message: ", e.msg); - log.error("ACCESS ERROR: Please check your UID and GID access to this file, as the permissions on this file is preventing this application to read it"); - log.error("\nFATAL: Exiting application to avoid deleting data due to local file system access issues\n"); - // Must exit here - exit(-1); + addLogEntry("ERROR: The local file system returned an error with the following message:"); + addLogEntry(" Error Message: " ~ e.msg); + addLogEntry("ACCESS ERROR: Please check your UID and GID access to this file, as the permissions on this file is preventing this application to read it"); + addLogEntry("\nFATAL: Forcing exiting application to avoid deleting data due to local file system access issues\n"); + // Must force exit here, allow logging to be done + forceExit(); } else { // some other error displayFileSystemErrorMessage(e.msg, getFunctionName!({})); @@ -173,219 +443,238 @@ final class Monitor } } - private void add(string pathname) - { - int wd = inotify_add_watch(fd, toStringz(pathname), mask); - if (wd < 0) { - if (errno() == ENOSPC) { - log.log("The user limit on the total number of inotify watches has been reached."); - log.log("To see the current max number of watches run:"); - log.log("sysctl fs.inotify.max_user_watches"); - log.log("To change the current max number of watches to 524288 run:"); - log.log("sudo sysctl fs.inotify.max_user_watches=524288"); - } - if (errno() == 13) { - if ((selectiveSync.getSkipDotfiles()) && (selectiveSync.isDotFile(pathname))) { - // no misleading output that we could not add a watch due to permission denied - return; - } else { - log.vlog("WARNING: inotify_add_watch failed - permission denied: ", pathname); - return; - } - } - // Flag any other errors - log.error("ERROR: inotify_add_watch failed: ", pathname); - return; - } - - // Add path to inotify watch - required regardless if a '.folder' or 'folder' - wdToDirName[wd] = buildNormalizedPath(pathname) ~ "/"; - log.vdebug("inotify_add_watch successfully added for: ", pathname); - - // Do we log that we are monitoring this directory? - if (isDir(pathname)) { - // This is a directory - // is the path exluded if skip_dotfiles configured and path is a .folder? - if ((selectiveSync.getSkipDotfiles()) && (selectiveSync.isDotFile(pathname))) { - // no misleading output that we are monitoring this directory - return; - } - // Log that this is directory is being monitored - log.vlog("Monitor directory: ", pathname); + // Remove a watch descriptor + private void removeAll() { + string[int] copy = wdToDirName.dup; + foreach (wd, path; copy) { + remove(wd); } } - // remove a watch descriptor - private void remove(int wd) - { + private void remove(int wd) { assert(wd in wdToDirName); - int ret = inotify_rm_watch(fd, wd); + int ret = worker.removeInotifyWatch(wd); if (ret < 0) throw new MonitorException("inotify_rm_watch failed"); - log.vlog("Monitored directory removed: ", wdToDirName[wd]); + addLogEntry("Monitored directory removed: " ~ to!string(wdToDirName[wd]), ["verbose"]); wdToDirName.remove(wd); } - // remove the watch descriptors associated to the given path - private void remove(const(char)[] path) - { + // Remove the watch descriptors associated to the given path + private void remove(const(char)[] path) { path ~= "/"; foreach (wd, dirname; wdToDirName) { if (dirname.startsWith(path)) { - int ret = inotify_rm_watch(fd, wd); + int ret = worker.removeInotifyWatch(wd); if (ret < 0) throw new MonitorException("inotify_rm_watch failed"); wdToDirName.remove(wd); - log.vlog("Monitored directory removed: ", dirname); + addLogEntry("Monitored directory removed: " ~ dirname, ["verbose"]); } } } - // return the file path from an inotify event - private string getPath(const(inotify_event)* event) - { + // Return the file path from an inotify event + private string getPath(const(inotify_event)* event) { string path = wdToDirName[event.wd]; if (event.len > 0) path ~= fromStringz(event.name.ptr); - log.vdebug("inotify path event for: ", path); + addLogEntry("inotify path event for: " ~ path, ["debug"]); return path; } - void update(bool useCallbacks = true) - { + // Update + void update(bool useCallbacks = true) { + if(!initialised) + return; + pollfd fds = { - fd: fd, + fd: worker.fd, events: POLLIN }; while (true) { - int ret = poll(&fds, 1, 0); - if (ret == -1) throw new MonitorException("poll failed"); - else if (ret == 0) break; // no events available - - size_t length = read(fd, buffer.ptr, buffer.length); - if (length == -1) throw new MonitorException("read failed"); - - int i = 0; - while (i < length) { - inotify_event *event = cast(inotify_event*) &buffer[i]; - string path; - string evalPath; - // inotify event debug - log.vdebug("inotify event wd: ", event.wd); - log.vdebug("inotify event mask: ", event.mask); - log.vdebug("inotify event cookie: ", event.cookie); - log.vdebug("inotify event len: ", event.len); - log.vdebug("inotify event name: ", event.name); - if (event.mask & IN_ACCESS) log.vdebug("inotify event flag: IN_ACCESS"); - if (event.mask & IN_MODIFY) log.vdebug("inotify event flag: IN_MODIFY"); - if (event.mask & IN_ATTRIB) log.vdebug("inotify event flag: IN_ATTRIB"); - if (event.mask & IN_CLOSE_WRITE) log.vdebug("inotify event flag: IN_CLOSE_WRITE"); - if (event.mask & IN_CLOSE_NOWRITE) log.vdebug("inotify event flag: IN_CLOSE_NOWRITE"); - if (event.mask & IN_MOVED_FROM) log.vdebug("inotify event flag: IN_MOVED_FROM"); - if (event.mask & IN_MOVED_TO) log.vdebug("inotify event flag: IN_MOVED_TO"); - if (event.mask & IN_CREATE) log.vdebug("inotify event flag: IN_CREATE"); - if (event.mask & IN_DELETE) log.vdebug("inotify event flag: IN_DELETE"); - if (event.mask & IN_DELETE_SELF) log.vdebug("inotify event flag: IN_DELETE_SELF"); - if (event.mask & IN_MOVE_SELF) log.vdebug("inotify event flag: IN_MOVE_SELF"); - if (event.mask & IN_UNMOUNT) log.vdebug("inotify event flag: IN_UNMOUNT"); - if (event.mask & IN_Q_OVERFLOW) log.vdebug("inotify event flag: IN_Q_OVERFLOW"); - if (event.mask & IN_IGNORED) log.vdebug("inotify event flag: IN_IGNORED"); - if (event.mask & IN_CLOSE) log.vdebug("inotify event flag: IN_CLOSE"); - if (event.mask & IN_MOVE) log.vdebug("inotify event flag: IN_MOVE"); - if (event.mask & IN_ONLYDIR) log.vdebug("inotify event flag: IN_ONLYDIR"); - if (event.mask & IN_DONT_FOLLOW) log.vdebug("inotify event flag: IN_DONT_FOLLOW"); - if (event.mask & IN_EXCL_UNLINK) log.vdebug("inotify event flag: IN_EXCL_UNLINK"); - if (event.mask & IN_MASK_ADD) log.vdebug("inotify event flag: IN_MASK_ADD"); - if (event.mask & IN_ISDIR) log.vdebug("inotify event flag: IN_ISDIR"); - if (event.mask & IN_ONESHOT) log.vdebug("inotify event flag: IN_ONESHOT"); - if (event.mask & IN_ALL_EVENTS) log.vdebug("inotify event flag: IN_ALL_EVENTS"); - - // skip events that need to be ignored - if (event.mask & IN_IGNORED) { - // forget the directory associated to the watch descriptor - wdToDirName.remove(event.wd); - goto skip; - } else if (event.mask & IN_Q_OVERFLOW) { - throw new MonitorException("Inotify overflow, events missing"); - } + bool hasNotification = false; + int sleep_counter = 0; + // Batch events up to 5 seconds + while (sleep_counter < 5) { + int ret = poll(&fds, 1, 0); + if (ret == -1) throw new MonitorException("poll failed"); + else if (ret == 0) break; // no events available + hasNotification = true; + size_t length = read(worker.fd, buffer.ptr, buffer.length); + if (length == -1) throw new MonitorException("read failed"); - // if the event is not to be ignored, obtain path - path = getPath(event); - // configure the skip_dir & skip skip_file comparison item - evalPath = path.strip('.'); - - // Skip events that should be excluded based on application configuration - // We cant use isDir or isFile as this information is missing from the inotify event itself - // Thus this causes a segfault when attempting to query this - https://github.com/abraunegg/onedrive/issues/995 - - // Based on the 'type' of event & object type (directory or file) check that path against the 'right' user exclusions - // Directory events should only be compared against skip_dir and file events should only be compared against skip_file - if (event.mask & IN_ISDIR) { - // The event in question contains IN_ISDIR event mask, thus highly likely this is an event on a directory - // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched - if (selectiveSync.isDirNameExcluded(evalPath)) { - // The path to evaluate matches a path that the user has configured to skip + int i = 0; + while (i < length) { + inotify_event *event = cast(inotify_event*) &buffer[i]; + string path; + string evalPath; + + // inotify event debug + addLogEntry("inotify event wd: " ~ to!string(event.wd), ["debug"]); + addLogEntry("inotify event mask: " ~ to!string(event.mask), ["debug"]); + addLogEntry("inotify event cookie: " ~ to!string(event.cookie), ["debug"]); + addLogEntry("inotify event len: " ~ to!string(event.len), ["debug"]); + addLogEntry("inotify event name: " ~ to!string(event.name), ["debug"]); + + // inotify event handling + if (event.mask & IN_ACCESS) addLogEntry("inotify event flag: IN_ACCESS", ["debug"]); + if (event.mask & IN_MODIFY) addLogEntry("inotify event flag: IN_MODIFY", ["debug"]); + if (event.mask & IN_ATTRIB) addLogEntry("inotify event flag: IN_ATTRIB", ["debug"]); + if (event.mask & IN_CLOSE_WRITE) addLogEntry("inotify event flag: IN_CLOSE_WRITE", ["debug"]); + if (event.mask & IN_CLOSE_NOWRITE) addLogEntry("inotify event flag: IN_CLOSE_NOWRITE", ["debug"]); + if (event.mask & IN_MOVED_FROM) addLogEntry("inotify event flag: IN_MOVED_FROM", ["debug"]); + if (event.mask & IN_MOVED_TO) addLogEntry("inotify event flag: IN_MOVED_TO", ["debug"]); + if (event.mask & IN_CREATE) addLogEntry("inotify event flag: IN_CREATE", ["debug"]); + if (event.mask & IN_DELETE) addLogEntry("inotify event flag: IN_DELETE", ["debug"]); + if (event.mask & IN_DELETE_SELF) addLogEntry("inotify event flag: IN_DELETE_SELF", ["debug"]); + if (event.mask & IN_MOVE_SELF) addLogEntry("inotify event flag: IN_MOVE_SELF", ["debug"]); + if (event.mask & IN_UNMOUNT) addLogEntry("inotify event flag: IN_UNMOUNT", ["debug"]); + if (event.mask & IN_Q_OVERFLOW) addLogEntry("inotify event flag: IN_Q_OVERFLOW", ["debug"]); + if (event.mask & IN_IGNORED) addLogEntry("inotify event flag: IN_IGNORED", ["debug"]); + if (event.mask & IN_CLOSE) addLogEntry("inotify event flag: IN_CLOSE", ["debug"]); + if (event.mask & IN_MOVE) addLogEntry("inotify event flag: IN_MOVE", ["debug"]); + if (event.mask & IN_ONLYDIR) addLogEntry("inotify event flag: IN_ONLYDIR", ["debug"]); + if (event.mask & IN_DONT_FOLLOW) addLogEntry("inotify event flag: IN_DONT_FOLLOW", ["debug"]); + if (event.mask & IN_EXCL_UNLINK) addLogEntry("inotify event flag: IN_EXCL_UNLINK", ["debug"]); + if (event.mask & IN_MASK_ADD) addLogEntry("inotify event flag: IN_MASK_ADD", ["debug"]); + if (event.mask & IN_ISDIR) addLogEntry("inotify event flag: IN_ISDIR", ["debug"]); + if (event.mask & IN_ONESHOT) addLogEntry("inotify event flag: IN_ONESHOT", ["debug"]); + if (event.mask & IN_ALL_EVENTS) addLogEntry("inotify event flag: IN_ALL_EVENTS", ["debug"]); + + // skip events that need to be ignored + if (event.mask & IN_IGNORED) { + // forget the directory associated to the watch descriptor + wdToDirName.remove(event.wd); goto skip; + } else if (event.mask & IN_Q_OVERFLOW) { + throw new MonitorException("inotify overflow, inotify events will be missing"); } - } else { - // The event in question missing the IN_ISDIR event mask, thus highly likely this is an event on a file - // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched - if (selectiveSync.isFileNameExcluded(evalPath)) { - // The path to evaluate matches a file that the user has configured to skip + + // if the event is not to be ignored, obtain path + path = getPath(event); + // configure the skip_dir & skip skip_file comparison item + evalPath = path.strip('.'); + + // Skip events that should be excluded based on application configuration + // We cant use isDir or isFile as this information is missing from the inotify event itself + // Thus this causes a segfault when attempting to query this - https://github.com/abraunegg/onedrive/issues/995 + + // Based on the 'type' of event & object type (directory or file) check that path against the 'right' user exclusions + // Directory events should only be compared against skip_dir and file events should only be compared against skip_file + if (event.mask & IN_ISDIR) { + // The event in question contains IN_ISDIR event mask, thus highly likely this is an event on a directory + // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched + if (selectiveSync.isDirNameExcluded(evalPath)) { + // The path to evaluate matches a path that the user has configured to skip + goto skip; + } + } else { + // The event in question missing the IN_ISDIR event mask, thus highly likely this is an event on a file + // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched + if (selectiveSync.isFileNameExcluded(evalPath)) { + // The path to evaluate matches a file that the user has configured to skip + goto skip; + } + } + + // is the path, excluded via sync_list + if (selectiveSync.isPathExcludedViaSyncList(path)) { + // The path to evaluate matches a directory or file that the user has configured not to include in the sync goto skip; } - } - - // is the path, excluded via sync_list - if (selectiveSync.isPathExcludedViaSyncList(path)) { - // The path to evaluate matches a directory or file that the user has configured not to include in the sync - goto skip; - } - - // handle the inotify events - if (event.mask & IN_MOVED_FROM) { - log.vdebug("event IN_MOVED_FROM: ", path); - cookieToPath[event.cookie] = path; - } else if (event.mask & IN_MOVED_TO) { - log.vdebug("event IN_MOVED_TO: ", path); - if (event.mask & IN_ISDIR) addRecursive(path); - auto from = event.cookie in cookieToPath; - if (from) { - cookieToPath.remove(event.cookie); - if (useCallbacks) onMove(*from, path); - } else { - // item moved from the outside + + // handle the inotify events + if (event.mask & IN_MOVED_FROM) { + addLogEntry("event IN_MOVED_FROM: " ~ path, ["debug"]); + cookieToPath[event.cookie] = path; + movedNotDeleted[path] = true; // Mark as moved, not deleted + } else if (event.mask & IN_MOVED_TO) { + addLogEntry("event IN_MOVED_TO: " ~ path, ["debug"]); + if (event.mask & IN_ISDIR) addRecursive(path); + auto from = event.cookie in cookieToPath; + if (from) { + cookieToPath.remove(event.cookie); + if (useCallbacks) actionHolder.append(ActionType.moved, *from, path); + movedNotDeleted.remove(*from); // Clear moved status + } else { + // Handle file moved in from outside + if (event.mask & IN_ISDIR) { + if (useCallbacks) actionHolder.append(ActionType.createDir, path); + } else { + if (useCallbacks) actionHolder.append(ActionType.changed, path); + } + } + } else if (event.mask & IN_CREATE) { + addLogEntry("event IN_CREATE: " ~ path, ["debug"]); if (event.mask & IN_ISDIR) { - if (useCallbacks) onDirCreated(path); + addRecursive(path); + if (useCallbacks) actionHolder.append(ActionType.createDir, path); + } + } else if (event.mask & IN_DELETE) { + if (path in movedNotDeleted) { + movedNotDeleted.remove(path); // Ignore delete for moved files } else { - if (useCallbacks) onFileChanged(path); + addLogEntry("event IN_DELETE: " ~ path, ["debug"]); + if (useCallbacks) actionHolder.append(ActionType.deleted, path); } + } else if ((event.mask & IN_CLOSE_WRITE) && !(event.mask & IN_ISDIR)) { + addLogEntry("event IN_CLOSE_WRITE and not IN_ISDIR: " ~ path, ["debug"]); + if (useCallbacks) actionHolder.append(ActionType.changed, path); + } else { + addLogEntry("event unhandled: " ~ path, ["debug"]); + assert(0); } - } else if (event.mask & IN_CREATE) { - log.vdebug("event IN_CREATE: ", path); - if (event.mask & IN_ISDIR) { - addRecursive(path); - if (useCallbacks) onDirCreated(path); - } - } else if (event.mask & IN_DELETE) { - log.vdebug("event IN_DELETE: ", path); - if (useCallbacks) onDelete(path); - } else if ((event.mask & IN_CLOSE_WRITE) && !(event.mask & IN_ISDIR)) { - log.vdebug("event IN_CLOSE_WRITE and ...: ", path); - if (useCallbacks) onFileChanged(path); - } else { - log.vdebug("event unhandled: ", path); - assert(0); + + skip: + i += inotify_event.sizeof + event.len; } - skip: - i += inotify_event.sizeof + event.len; + // Sleep for one second to prevent missing fast-changing events. + if (poll(&fds, 1, 0) == 0) { + sleep_counter += 1; + Thread.sleep(dur!"seconds"(1)); + } } - // assume that the items moved outside the watched directory have been deleted + if (!hasNotification) break; + processChanges(); + + // Assume that the items moved outside the watched directory have been deleted foreach (cookie, path; cookieToPath) { - log.vdebug("deleting (post loop): ", path); + addLogEntry("Deleting cookie|watch (post loop): " ~ path, ["debug"]); if (useCallbacks) onDelete(path); remove(path); cookieToPath.remove(cookie); } + // Debug Log that all inotify events are flushed + addLogEntry("inotify events flushed", ["debug"]); } } + + private void processChanges() { + string[] changes; + + foreach(action; actionHolder.actions) { + if (action.skipped) + continue; + switch (action.type) { + case ActionType.changed: + changes ~= action.src; + break; + case ActionType.deleted: + onDelete(action.src); + break; + case ActionType.createDir: + onDirCreated(action.src); + break; + case ActionType.moved: + onMove(action.src, action.dst); + break; + default: + break; + } + } + if (!changes.empty) + onFileChanged(changes); + + object.destroy(actionHolder); + } } diff --git a/src/notifications/dnotify.d b/src/notifications/dnotify.d index 1cc093560..bcd962393 100644 --- a/src/notifications/dnotify.d +++ b/src/notifications/dnotify.d @@ -79,11 +79,11 @@ void init(in char[] name) { alias notify_is_initted is_initted; alias notify_uninit uninit; -static this() { +shared static this() { init(__FILE__); } -static ~this() { +shared static ~this() { uninit(); } diff --git a/src/onedrive.d b/src/onedrive.d index 29d33a46e..d80c40bba 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -1,586 +1,406 @@ +// What is this module called? +module onedrive; + +// What does this module require to function? +import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; +import core.memory; +import core.thread; +import std.stdio; +import std.string; +import std.utf; +import std.file; +import std.exception; +import std.regex; +import std.json; +import std.algorithm; import std.net.curl; -import etc.c.curl: CurlOption; -import std.datetime, std.datetime.systime, std.exception, std.file, std.json, std.path; -import std.stdio, std.string, std.uni, std.uri, std.file, std.uuid; -import std.array: split; -import core.atomic : atomicOp; -import core.stdc.stdlib; -import core.thread, std.conv, std.math; -import std.algorithm.searching; -import std.concurrency; -import progress; -import config; -import util; -import arsd.cgi; import std.datetime; -static import log; -shared bool debugResponse = false; -private bool dryRun = false; -private bool simulateNoRefreshTokenFile = false; -private ulong retryAfterValue = 0; - -private immutable { - // Client ID / Application ID (abraunegg) - string clientIdDefault = "d50ca740-c83f-4d1b-b616-12c519384f0c"; - - // Azure Active Directory & Graph Explorer Endpoints - // Global & Defaults - string globalAuthEndpoint = "https://login.microsoftonline.com"; - string globalGraphEndpoint = "https://graph.microsoft.com"; - - // US Government L4 - string usl4AuthEndpoint = "https://login.microsoftonline.us"; - string usl4GraphEndpoint = "https://graph.microsoft.us"; - - // US Government L5 - string usl5AuthEndpoint = "https://login.microsoftonline.us"; - string usl5GraphEndpoint = "https://dod-graph.microsoft.us"; - - // Germany - string deAuthEndpoint = "https://login.microsoftonline.de"; - string deGraphEndpoint = "https://graph.microsoft.de"; - - // China - string cnAuthEndpoint = "https://login.chinacloudapi.cn"; - string cnGraphEndpoint = "https://microsoftgraph.chinacloudapi.cn"; -} - -private { - // Client ID / Application ID - string clientId = clientIdDefault; - - // Default User Agent configuration - string isvTag = "ISV"; - string companyName = "abraunegg"; - // Application name as per Microsoft Azure application registration - string appTitle = "OneDrive Client for Linux"; - - // Default Drive ID - string driveId = ""; +import std.path; +import std.conv; +import std.math; +import std.uri; +import std.array; - // API Query URL's, based on using defaults, but can be updated by config option 'azure_ad_endpoint' - // Authentication - string authUrl = globalAuthEndpoint ~ "/common/oauth2/v2.0/authorize"; - string redirectUrl = globalAuthEndpoint ~ "/common/oauth2/nativeclient"; - string tokenUrl = globalAuthEndpoint ~ "/common/oauth2/v2.0/token"; +// Required for webhooks +import std.uuid; - // Drive Queries - string driveUrl = globalGraphEndpoint ~ "/v1.0/me/drive"; - string driveByIdUrl = globalGraphEndpoint ~ "/v1.0/drives/"; - - // What is 'shared with me' Query - string sharedWithMeUrl = globalGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; - - // Item Queries - string itemByIdUrl = globalGraphEndpoint ~ "/v1.0/me/drive/items/"; - string itemByPathUrl = globalGraphEndpoint ~ "/v1.0/me/drive/root:/"; - - // Office 365 / SharePoint Queries - string siteSearchUrl = globalGraphEndpoint ~ "/v1.0/sites?search"; - string siteDriveUrl = globalGraphEndpoint ~ "/v1.0/sites/"; +// What other modules that we have created do we need to import? +import config; +import log; +import util; +import curlEngine; - // Subscriptions - string subscriptionUrl = globalGraphEndpoint ~ "/v1.0/subscriptions"; -} +// Shared variables between classes +shared bool debugHTTPResponseOutput = false; -class OneDriveException: Exception -{ +// Define the 'OneDriveException' class +class OneDriveException: Exception { // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/errors int httpStatusCode; + const CurlResponse response; JSONValue error; - @safe pure this(int httpStatusCode, string reason, string file = __FILE__, size_t line = __LINE__) - { + this(int httpStatusCode, string reason, const CurlResponse response, string file = __FILE__, size_t line = __LINE__) { this.httpStatusCode = httpStatusCode; - this.error = error; - string msg = format("HTTP request returned status code %d (%s)", httpStatusCode, reason); + this.response = response; + this.error = response.json(); + string msg = format("HTTP request returned status code %d (%s)\n%s", httpStatusCode, reason, toJSON(error, true)); super(msg, file, line); } - this(int httpStatusCode, string reason, ref const JSONValue error, string file = __FILE__, size_t line = __LINE__) - { + this(int httpStatusCode, string reason, string file = __FILE__, size_t line = __LINE__) { this.httpStatusCode = httpStatusCode; - this.error = error; - string msg = format("HTTP request returned status code %d (%s)\n%s", httpStatusCode, reason, toJSON(error, true)); - super(msg, file, line); + this.response = null; + super(msg, file, line, null); } } -class OneDriveWebhook { - // We need OneDriveWebhook.serve to be a static function, otherwise we would hit the member function - // "requires a dual-context, which is deprecated" warning. The root cause is described here: - // - https://issues.dlang.org/show_bug.cgi?id=5710 - // - https://forum.dlang.org/post/fkyppfxzegenniyzztos@forum.dlang.org - // The problem is deemed a bug and should be fixed in the compilers eventually. The singleton stuff - // could be undone when it is fixed. - // - // Following the singleton pattern described here: https://wiki.dlang.org/Low-Lock_Singleton_Pattern - // Cache instantiation flag in thread-local bool - // Thread local - private static bool instantiated_; - - // Thread global - private __gshared OneDriveWebhook instance_; - - private string host; - private ushort port; - private Tid parentTid; - private shared uint count; - - static OneDriveWebhook getOrCreate(string host, ushort port, Tid parentTid) { - if (!instantiated_) { - synchronized(OneDriveWebhook.classinfo) { - if (!instance_) { - instance_ = new OneDriveWebhook(host, port, parentTid); - } - - instantiated_ = true; - } - } - - return instance_; - } - - private this(string host, ushort port, Tid parentTid) { - this.host = host; - this.port = port; - this.parentTid = parentTid; - this.count = 0; - } - - // The static serve() is necessary because spawn() does not like instance methods - static serve() { - // we won't create the singleton instance if it hasn't been created already - // such case is a bug which should crash the program and gets fixed - instance_.serveImpl(); +// Define the 'OneDriveError' class +class OneDriveError: Error { + this(string msg) { + super(msg); } +} - // The static handle() is necessary to work around the dual-context warning mentioned above - private static void handle(Cgi cgi) { - // we won't create the singleton instance if it hasn't been created already - // such case is a bug which should crash the program and gets fixed - instance_.handleImpl(cgi); - } +// Define the 'OneDriveApi' class +class OneDriveApi { + // Class variables that use other classes + ApplicationConfig appConfig; + CurlEngine curlEngine; + CurlResponse response; + + // Class variables + string clientId = ""; + string companyName = ""; + string authUrl = ""; + string redirectUrl = ""; + string tokenUrl = ""; + string driveUrl = ""; + string driveByIdUrl = ""; + string sharedWithMeUrl = ""; + string itemByIdUrl = ""; + string itemByPathUrl = ""; + string siteSearchUrl = ""; + string siteDriveUrl = ""; + string subscriptionUrl = ""; + string tenantId = ""; + string authScope = ""; + const(char)[] refreshToken = ""; + bool dryRun = false; + bool debugResponse = false; + bool keepAlive = false; + + this(ApplicationConfig appConfig) { + // Configure the class variable to consume the application configuration + this.appConfig = appConfig; + this.curlEngine = null; + // Configure the major API Query URL's, based on using application configuration + // These however can be updated by config option 'azure_ad_endpoint', thus handled differently + + // Drive Queries + driveUrl = appConfig.globalGraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.globalGraphEndpoint ~ "/v1.0/drives/"; - private void serveImpl() { - auto server = new RequestServer(host, port); - server.serveEmbeddedHttp!handle(); - } + // What is 'shared with me' Query + sharedWithMeUrl = appConfig.globalGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; - private void handleImpl(Cgi cgi) { - if (.debugResponse) { - log.log("Webhook request: ", cgi.requestMethod, " ", cgi.requestUri); - if (!cgi.postBody.empty) { - log.log("Webhook post body: ", cgi.postBody); - } - } + // Item Queries + itemByIdUrl = appConfig.globalGraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.globalGraphEndpoint ~ "/v1.0/me/drive/root:/"; - cgi.setResponseContentType("text/plain"); + // Office 365 / SharePoint Queries + siteSearchUrl = appConfig.globalGraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.globalGraphEndpoint ~ "/v1.0/sites/"; - if ("validationToken" in cgi.get) { - // For validation requests, respond with the validation token passed in the query string - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/webhook-receiver-validation-request - cgi.write(cgi.get["validationToken"]); - log.log("Webhook: handled validation request"); - } else { - // Notifications don't include any information about the changes that triggered them. - // Put a refresh signal in the queue and let the main monitor loop process it. - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/using-webhooks - count.atomicOp!"+="(1); - send(parentTid, to!ulong(count)); - cgi.write("OK"); - log.log("Webhook: sent refresh signal #", count); - } + // Subscriptions + subscriptionUrl = appConfig.globalGraphEndpoint ~ "/v1.0/subscriptions"; } -} - -final class OneDriveApi -{ - private Config cfg; - private string refreshToken, accessToken, subscriptionId; - private SysTime accessTokenExpiration; - private HTTP http; - private OneDriveWebhook webhook; - private SysTime subscriptionExpiration; - private Duration subscriptionExpirationInterval, subscriptionRenewalInterval; - private string notificationUrl; - - // if true, every new access token is printed - bool printAccessToken; - - this(Config cfg) - { - this.cfg = cfg; - http = HTTP(); - // Curl Timeout Handling - // libcurl dns_cache_timeout timeout - http.dnsTimeout = (dur!"seconds"(cfg.getValueLong("dns_timeout"))); - // Timeout for HTTPS connections - http.connectTimeout = (dur!"seconds"(cfg.getValueLong("connect_timeout"))); - // with the following settings we force - // - if there is no data flow for 10min, abort - // - if the download time for one item exceeds 1h, abort - // - // timeout for activity on connection - // this translates into Curl's CURLOPT_LOW_SPEED_TIME - // which says - // It contains the time in number seconds that the - // transfer speed should be below the CURLOPT_LOW_SPEED_LIMIT - // for the library to consider it too slow and abort. - http.dataTimeout = (dur!"seconds"(cfg.getValueLong("data_timeout"))); - // maximum time an operation is allowed to take - // This includes dns resolution, connecting, data transfer, etc. - http.operationTimeout = (dur!"seconds"(cfg.getValueLong("operation_timeout"))); - // What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6 - http.handle.set(CurlOption.ipresolve,cfg.getValueLong("ip_protocol_version")); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only - // Specify how many redirects should be allowed - http.maxRedirects(cfg.defaultMaxRedirects); - - // Do we enable curl debugging? - if (cfg.getValueBool("debug_https")) { - http.verbose = true; - .debugResponse = true; - - // Output what options we are using so that in the debug log this can be tracked - log.vdebug("http.dnsTimeout = ", cfg.getValueLong("dns_timeout")); - log.vdebug("http.connectTimeout = ", cfg.getValueLong("connect_timeout")); - log.vdebug("http.dataTimeout = ", cfg.getValueLong("data_timeout")); - log.vdebug("http.operationTimeout = ", cfg.getValueLong("operation_timeout")); - log.vdebug("http.CurlOption.ipresolve = ", cfg.getValueLong("ip_protocol_version")); - log.vdebug("http.maxRedirects = ", cfg.defaultMaxRedirects); + + // The destructor should only clean up resources owned directly by this instance + ~this() { + object.destroy(response); + object.destroy(curlEngine); + response = null; + curlEngine = null; + appConfig = null; + } + + // Initialise the OneDrive API class + bool initialise(bool keepAlive=true) { + // Initialise the curl engine + this.keepAlive = keepAlive; + if (curlEngine is null) { + curlEngine = CurlEngine.getCurlInstance(); + curlEngine.initialise(appConfig.getValueLong("dns_timeout"), appConfig.getValueLong("connect_timeout"), appConfig.getValueLong("data_timeout"), appConfig.getValueLong("operation_timeout"), appConfig.defaultMaxRedirects, appConfig.getValueBool("debug_https"), appConfig.getValueString("user_agent"), appConfig.getValueBool("force_http_11"), appConfig.getValueLong("rate_limit"), appConfig.getValueLong("ip_protocol_version"), keepAlive); } - // Update clientId if application_id is set in config file - if (cfg.getValueString("application_id") != "") { - // an application_id is set in config file - log.vdebug("Setting custom application_id to: " , cfg.getValueString("application_id")); - clientId = cfg.getValueString("application_id"); + // Authorised value to return + bool authorised = false; + + // Did the user specify --dry-run + dryRun = appConfig.getValueBool("dry_run"); + + // Did the user specify --debug-https + debugResponse = appConfig.getValueBool("debug_https"); + // Flag this so if webhooks are being used, it can also be consumed + debugHTTPResponseOutput = appConfig.getValueBool("debug_https"); + + // Set clientId to use the configured 'application_id' + clientId = appConfig.getValueString("application_id"); + if (clientId != appConfig.defaultApplicationId) { + // a custom 'application_id' was set companyName = "custom_application"; } - - // Configure tenant id value, if 'azure_tenant_id' is configured, - // otherwise use the "common" multiplexer - string tenantId = "common"; - if (cfg.getValueString("azure_tenant_id") != "") { + + // Do we have a custom Azure Tenant ID? + if (!appConfig.getValueString("azure_tenant_id").empty) { // Use the value entered by the user - tenantId = cfg.getValueString("azure_tenant_id"); + tenantId = appConfig.getValueString("azure_tenant_id"); + } else { + // set to common + tenantId = "common"; } - + + // Did the user specify a 'drive_id' ? + if (!appConfig.getValueString("drive_id").empty) { + // Update base URL's + driveUrl = driveByIdUrl ~ appConfig.getValueString("drive_id"); + itemByIdUrl = driveUrl ~ "/items"; + itemByPathUrl = driveUrl ~ "/root:/"; + } + + // Configure the authentication scope + if (appConfig.getValueBool("read_only_auth_scope")) { + // read-only authentication scopes has been requested + authScope = "&scope=Files.Read%20Files.Read.All%20Sites.Read.All%20offline_access&response_type=code&prompt=login&redirect_uri="; + } else { + // read-write authentication scopes will be used (default) + authScope = "&scope=Files.ReadWrite%20Files.ReadWrite.All%20Sites.ReadWrite.All%20offline_access&response_type=code&prompt=login&redirect_uri="; + } + // Configure Azure AD endpoints if 'azure_ad_endpoint' is configured - string azureConfigValue = cfg.getValueString("azure_ad_endpoint"); + string azureConfigValue = appConfig.getValueString("azure_ad_endpoint"); switch(azureConfigValue) { case "": if (tenantId == "common") { - log.log("Configuring Global Azure AD Endpoints"); + if (!appConfig.apiWasInitialised) addLogEntry("Configuring Global Azure AD Endpoints"); } else { - log.log("Configuring Global Azure AD Endpoints - Single Tenant Application"); + if (!appConfig.apiWasInitialised) addLogEntry("Configuring Global Azure AD Endpoints - Single Tenant Application"); } // Authentication - authUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; - tokenUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + authUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + tokenUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; break; case "USL4": - log.log("Configuring Azure AD for US Government Endpoints"); + if (!appConfig.apiWasInitialised) addLogEntry("Configuring Azure AD for US Government Endpoints"); // Authentication - authUrl = usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - tokenUrl = usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; - if (clientId == clientIdDefault) { + authUrl = appConfig.usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + tokenUrl = appConfig.usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + if (clientId == appConfig.defaultApplicationId) { // application_id == default - log.vdebug("USL4 AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint"); - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + addLogEntry("USL4 AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]); + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id - redirectUrl = usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } // Drive Queries - driveUrl = usl4GraphEndpoint ~ "/v1.0/me/drive"; - driveByIdUrl = usl4GraphEndpoint ~ "/v1.0/drives/"; + driveUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/drives/"; // Item Queries - itemByIdUrl = usl4GraphEndpoint ~ "/v1.0/me/drive/items/"; - itemByPathUrl = usl4GraphEndpoint ~ "/v1.0/me/drive/root:/"; + itemByIdUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/me/drive/root:/"; // Office 365 / SharePoint Queries - siteSearchUrl = usl4GraphEndpoint ~ "/v1.0/sites?search"; - siteDriveUrl = usl4GraphEndpoint ~ "/v1.0/sites/"; + siteSearchUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/sites/"; // Shared With Me - sharedWithMeUrl = usl4GraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; + sharedWithMeUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; // Subscriptions - subscriptionUrl = usl4GraphEndpoint ~ "/v1.0/subscriptions"; + subscriptionUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/subscriptions"; break; case "USL5": - log.log("Configuring Azure AD for US Government Endpoints (DOD)"); + if (!appConfig.apiWasInitialised) addLogEntry("Configuring Azure AD for US Government Endpoints (DOD)"); // Authentication - authUrl = usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - tokenUrl = usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; - if (clientId == clientIdDefault) { + authUrl = appConfig.usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + tokenUrl = appConfig.usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + if (clientId == appConfig.defaultApplicationId) { // application_id == default - log.vdebug("USL5 AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint"); - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + addLogEntry("USL5 AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]); + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id - redirectUrl = usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } // Drive Queries - driveUrl = usl5GraphEndpoint ~ "/v1.0/me/drive"; - driveByIdUrl = usl5GraphEndpoint ~ "/v1.0/drives/"; + driveUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/drives/"; // Item Queries - itemByIdUrl = usl5GraphEndpoint ~ "/v1.0/me/drive/items/"; - itemByPathUrl = usl5GraphEndpoint ~ "/v1.0/me/drive/root:/"; + itemByIdUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/me/drive/root:/"; // Office 365 / SharePoint Queries - siteSearchUrl = usl5GraphEndpoint ~ "/v1.0/sites?search"; - siteDriveUrl = usl5GraphEndpoint ~ "/v1.0/sites/"; + siteSearchUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/sites/"; // Shared With Me - sharedWithMeUrl = usl5GraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; + sharedWithMeUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; // Subscriptions - subscriptionUrl = usl5GraphEndpoint ~ "/v1.0/subscriptions"; + subscriptionUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/subscriptions"; break; case "DE": - log.log("Configuring Azure AD Germany"); + if (!appConfig.apiWasInitialised) addLogEntry("Configuring Azure AD Germany"); // Authentication - authUrl = deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - tokenUrl = deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; - if (clientId == clientIdDefault) { + authUrl = appConfig.deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + tokenUrl = appConfig.deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + if (clientId == appConfig.defaultApplicationId) { // application_id == default - log.vdebug("DE AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint"); - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + addLogEntry("DE AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]); + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id - redirectUrl = deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } // Drive Queries - driveUrl = deGraphEndpoint ~ "/v1.0/me/drive"; - driveByIdUrl = deGraphEndpoint ~ "/v1.0/drives/"; + driveUrl = appConfig.deGraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.deGraphEndpoint ~ "/v1.0/drives/"; // Item Queries - itemByIdUrl = deGraphEndpoint ~ "/v1.0/me/drive/items/"; - itemByPathUrl = deGraphEndpoint ~ "/v1.0/me/drive/root:/"; + itemByIdUrl = appConfig.deGraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.deGraphEndpoint ~ "/v1.0/me/drive/root:/"; // Office 365 / SharePoint Queries - siteSearchUrl = deGraphEndpoint ~ "/v1.0/sites?search"; - siteDriveUrl = deGraphEndpoint ~ "/v1.0/sites/"; + siteSearchUrl = appConfig.deGraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.deGraphEndpoint ~ "/v1.0/sites/"; // Shared With Me - sharedWithMeUrl = deGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; + sharedWithMeUrl = appConfig.deGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; // Subscriptions - subscriptionUrl = deGraphEndpoint ~ "/v1.0/subscriptions"; + subscriptionUrl = appConfig.deGraphEndpoint ~ "/v1.0/subscriptions"; break; case "CN": - log.log("Configuring AD China operated by 21Vianet"); + if (!appConfig.apiWasInitialised) addLogEntry("Configuring AD China operated by VNET"); // Authentication - authUrl = cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - tokenUrl = cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; - if (clientId == clientIdDefault) { + authUrl = appConfig.cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + tokenUrl = appConfig.cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + if (clientId == appConfig.defaultApplicationId) { // application_id == default - log.vdebug("CN AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint"); - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + addLogEntry("CN AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]); + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id - redirectUrl = cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } // Drive Queries - driveUrl = cnGraphEndpoint ~ "/v1.0/me/drive"; - driveByIdUrl = cnGraphEndpoint ~ "/v1.0/drives/"; + driveUrl = appConfig.cnGraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.cnGraphEndpoint ~ "/v1.0/drives/"; // Item Queries - itemByIdUrl = cnGraphEndpoint ~ "/v1.0/me/drive/items/"; - itemByPathUrl = cnGraphEndpoint ~ "/v1.0/me/drive/root:/"; + itemByIdUrl = appConfig.cnGraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.cnGraphEndpoint ~ "/v1.0/me/drive/root:/"; // Office 365 / SharePoint Queries - siteSearchUrl = cnGraphEndpoint ~ "/v1.0/sites?search"; - siteDriveUrl = cnGraphEndpoint ~ "/v1.0/sites/"; + siteSearchUrl = appConfig.cnGraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.cnGraphEndpoint ~ "/v1.0/sites/"; // Shared With Me - sharedWithMeUrl = cnGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; + sharedWithMeUrl = appConfig.cnGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; // Subscriptions - subscriptionUrl = cnGraphEndpoint ~ "/v1.0/subscriptions"; + subscriptionUrl = appConfig.cnGraphEndpoint ~ "/v1.0/subscriptions"; break; // Default - all other entries default: - log.log("Unknown Azure AD Endpoint request - using Global Azure AD Endpoints"); + if (!appConfig.apiWasInitialised) addLogEntry("Unknown Azure AD Endpoint request - using Global Azure AD Endpoints"); + } + + // Has the application been authenticated? + if (!exists(appConfig.refreshTokenFilePath)) { + addLogEntry("Application has no 'refresh_token' thus needs to be authenticated", ["debug"]); + authorised = authorise(); + } else { + // Try and read the value from the appConfig if it is set, rather than trying to read the value from disk + if (!appConfig.refreshToken.empty) { + addLogEntry("Read token from appConfig", ["debug"]); + refreshToken = strip(appConfig.refreshToken); + authorised = true; + } else { + // Try and read the file from disk + try { + refreshToken = strip(readText(appConfig.refreshTokenFilePath)); + // is the refresh_token empty? + if (refreshToken.empty) { + addLogEntry("RefreshToken exists but is empty: " ~ appConfig.refreshTokenFilePath); + authorised = authorise(); + } else { + // existing token not empty + authorised = true; + // update appConfig.refreshToken + appConfig.refreshToken = refreshToken; + } + } catch (FileException exception) { + authorised = authorise(); + } catch (std.utf.UTFException exception) { + // path contains characters which generate a UTF exception + addLogEntry("Cannot read refreshToken from: " ~ appConfig.refreshTokenFilePath); + addLogEntry(" Error Reason:" ~ exception.msg); + authorised = false; + } + } + + if (refreshToken.empty) { + // PROBLEM ... CODING TO DO ?????????? + addLogEntry("refreshToken is empty !!!!!!!!!! This will cause 4xx errors ... CODING TO DO TO HANDLE ?????"); + } } + + // Return if we are authorised + addLogEntry("Authorised State: " ~ to!string(authorised), ["debug"]); + return authorised; + } + // If the API has been configured correctly, print the items that been configured + void debugOutputConfiguredAPIItems() { // Debug output of configured URL's + // Application Identification + addLogEntry("Configured clientId " ~ clientId, ["debug"]); + addLogEntry("Configured userAgent " ~ appConfig.getValueString("user_agent"), ["debug"]); // Authentication - log.vdebug("Configured authUrl: ", authUrl); - log.vdebug("Configured redirectUrl: ", redirectUrl); - log.vdebug("Configured tokenUrl: ", tokenUrl); - + addLogEntry("Configured authScope: " ~ authScope, ["debug"]); + addLogEntry("Configured authUrl: " ~ authUrl, ["debug"]); + addLogEntry("Configured redirectUrl: " ~ redirectUrl, ["debug"]); + addLogEntry("Configured tokenUrl: " ~ tokenUrl, ["debug"]); // Drive Queries - log.vdebug("Configured driveUrl: ", driveUrl); - log.vdebug("Configured driveByIdUrl: ", driveByIdUrl); - + addLogEntry("Configured driveUrl: " ~ driveUrl, ["debug"]); + addLogEntry("Configured driveByIdUrl: " ~ driveByIdUrl, ["debug"]); // Shared With Me - log.vdebug("Configured sharedWithMeUrl: ", sharedWithMeUrl); - + addLogEntry("Configured sharedWithMeUrl: " ~ sharedWithMeUrl, ["debug"]); // Item Queries - log.vdebug("Configured itemByIdUrl: ", itemByIdUrl); - log.vdebug("Configured itemByPathUrl: ", itemByPathUrl); - + addLogEntry("Configured itemByIdUrl: " ~ itemByIdUrl, ["debug"]); + addLogEntry("Configured itemByPathUrl: " ~ itemByPathUrl, ["debug"]); // SharePoint Queries - log.vdebug("Configured siteSearchUrl: ", siteSearchUrl); - log.vdebug("Configured siteDriveUrl: ", siteDriveUrl); - - // Configure the User Agent string - if (cfg.getValueString("user_agent") == "") { - // Application User Agent string defaults - // Comply with OneDrive traffic decoration requirements - // https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online - // - Identify as ISV and include Company Name, App Name separated by a pipe character and then adding Version number separated with a slash character - // Note: If you've created an application, the recommendation is to register and use AppID and AppTitle - // The issue here is that currently the application is still using the 'skilion' application ID, thus no idea what the AppTitle used was. - http.setUserAgent = isvTag ~ "|" ~ companyName ~ "|" ~ appTitle ~ "/" ~ strip(import("version")); - } else { - // Use the value entered by the user - http.setUserAgent = cfg.getValueString("user_agent"); - } - - // What version of HTTP protocol do we use? - // Curl >= 7.62.0 defaults to http2 for a significant number of operations - if (cfg.getValueBool("force_http_11")) { - // Downgrade to curl to use HTTP 1.1 for all operations - log.vlog("Downgrading all HTTP operations to HTTP/1.1 due to user configuration"); - // Downgrade to HTTP 1.1 - yes version = 2 is HTTP 1.1 - http.handle.set(CurlOption.http_version,2); - } else { - // Use curl defaults - log.vlog("Using Curl defaults for all HTTP operations"); - } - - // Configure upload / download rate limits if configured - long userRateLimit = cfg.getValueLong("rate_limit"); - // 131072 = 128 KB/s - minimum for basic application operations to prevent timeouts - // A 0 value means rate is unlimited, and is the curl default - - if (userRateLimit > 0) { - // User configured rate limit - writeln("User Configured Rate Limit: ", userRateLimit); - - // If user provided rate limit is < 131072, flag that this is too low, setting to the minimum of 131072 - if (userRateLimit < 131072) { - // user provided limit too low - log.log("WARNING: User configured rate limit too low for normal application processing and preventing application timeouts. Overriding to default minimum of 131072 (128KB/s)"); - userRateLimit = 131072; - } - - // set rate limit - http.handle.set(CurlOption.max_send_speed_large,userRateLimit); - http.handle.set(CurlOption.max_recv_speed_large,userRateLimit); - } - - // Explicitly set libcurl options - // https://curl.se/libcurl/c/CURLOPT_NOSIGNAL.html - // Ensure that nosignal is set to 0 - Setting CURLOPT_NOSIGNAL to 0 makes libcurl ask the system to ignore SIGPIPE signals - http.handle.set(CurlOption.nosignal,0); - // https://curl.se/libcurl/c/CURLOPT_TCP_NODELAY.html - // Ensure that TCP_NODELAY is set to 0 to ensure that TCP NAGLE is enabled - http.handle.set(CurlOption.tcp_nodelay,0); - // https://curl.se/libcurl/c/CURLOPT_FORBID_REUSE.html - // Ensure that we ARE reusing connections - setting to 0 ensures that we are reusing connections - http.handle.set(CurlOption.forbid_reuse,0); - - // Do we set the dryRun handlers? - if (cfg.getValueBool("dry_run")) { - .dryRun = true; - if (cfg.getValueBool("logout")) { - .simulateNoRefreshTokenFile = true; - } - } - - subscriptionExpiration = Clock.currTime(UTC()); - subscriptionExpirationInterval = dur!"seconds"(cfg.getValueLong("webhook_expiration_interval")); - subscriptionRenewalInterval = dur!"seconds"(cfg.getValueLong("webhook_renewal_interval")); - notificationUrl = cfg.getValueString("webhook_public_url"); + addLogEntry("Configured siteSearchUrl: " ~ siteSearchUrl, ["debug"]); + addLogEntry("Configured siteDriveUrl: " ~ siteDriveUrl, ["debug"]); } - - // Shutdown OneDrive HTTP construct - void shutdown() - { - // delete subscription if there exists any - deleteSubscription(); - - // reset any values to defaults, freeing any set objects - http.clearRequestHeaders(); - http.onSend = null; - http.onReceive = null; - http.onReceiveHeader = null; - http.onReceiveStatusLine = null; - http.contentLength = 0; - // shut down the curl instance - http.shutdown(); - } - - bool init() - { - static import std.utf; - // detail what we are using for applicaion identification - log.vdebug("clientId = ", clientId); - log.vdebug("companyName = ", companyName); - log.vdebug("appTitle = ", appTitle); - - try { - driveId = cfg.getValueString("drive_id"); - if (driveId.length) { - driveUrl = driveByIdUrl ~ driveId; - itemByIdUrl = driveUrl ~ "/items"; - itemByPathUrl = driveUrl ~ "/root:/"; - } - } catch (Exception e) {} - - if (!.dryRun) { - // original code - try { - refreshToken = readText(cfg.refreshTokenFilePath); - } catch (FileException e) { - try { - return authorize(); - } catch (CurlException e) { - log.error("Cannot authorize with Microsoft OneDrive Service"); - return false; - } - } catch (std.utf.UTFException e) { - // path contains characters which generate a UTF exception - log.error("Cannot read refreshToken from: ", cfg.refreshTokenFilePath); - log.error(" Error Reason:", e.msg); - return false; - } - return true; - } else { - // --dry-run - if (!.simulateNoRefreshTokenFile) { - try { - refreshToken = readText(cfg.refreshTokenFilePath); - } catch (FileException e) { - return authorize(); - } catch (std.utf.UTFException e) { - // path contains characters which generate a UTF exception - log.error("Cannot read refreshToken from: ", cfg.refreshTokenFilePath); - log.error(" Error Reason:", e.msg); - return false; - } - return true; - } else { - // --dry-run & --reauth - return authorize(); - } + + // Release CurlEngine bask to the Curl Engine Pool + void releaseCurlEngine() { + // Log that this was called + addLogEntry("OneDrive API releaseCurlEngine() CALLED", ["debug"]); + // Release curl instance back to the pool + if (curlEngine !is null) { + curlEngine.releaseEngine(); + curlEngine = null; } } - bool authorize() - { - import std.stdio, std.regex; + // Authenticate this client against Microsoft OneDrive API + bool authorise() { + char[] response; - string authScope; - // What authentication scope to use? - if (cfg.getValueBool("read_only_auth_scope")) { - // read-only authentication scopes has been requested - authScope = "&scope=Files.Read%20Files.Read.All%20Sites.Read.All%20offline_access&response_type=code&prompt=login&redirect_uri="; - } else { - // read-write authentication scopes will be used (default) - authScope = "&scope=Files.ReadWrite%20Files.ReadWrite.All%20Sites.ReadWrite.All%20offline_access&response_type=code&prompt=login&redirect_uri="; - } - + // What URL should be presented to the user to access string url = authUrl ~ "?client_id=" ~ clientId ~ authScope ~ redirectUrl; - string authFilesString = cfg.getValueString("auth_files"); - string authResponseString = cfg.getValueString("auth_response"); - if (authResponseString != "") { + // Configure automated authentication if --auth-files authUrl:responseUrl is being used + string authFilesString = appConfig.getValueString("auth_files"); + string authResponseString = appConfig.getValueString("auth_response"); + + if (!authResponseString.empty) { + // read the response from authResponseString response = cast(char[]) authResponseString; } else if (authFilesString != "") { string[] authFiles = authFilesString.split(":"); @@ -588,27 +408,35 @@ final class OneDriveApi string responseUrl = authFiles[1]; try { - // Try and write out the auth URL to the nominated file auto authUrlFile = File(authUrl, "w"); authUrlFile.write(url); authUrlFile.close(); - } catch (std.exception.ErrnoException e) { + } catch (FileException exception) { // There was a file system error // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return false; + displayFileSystemErrorMessage(exception.msg, getFunctionName!({})); + // Must force exit here, allow logging to be done + forceExit(); + } catch (ErrnoException exception) { + // There was a file system error + // display the error message + displayFileSystemErrorMessage(exception.msg, getFunctionName!({})); + // Must force exit here, allow logging to be done + forceExit(); } + + addLogEntry("Client requires authentication before proceeding. Waiting for --auth-files elements to be available."); while (!exists(responseUrl)) { Thread.sleep(dur!("msecs")(100)); } - // read response from OneDrive + // read response from provided from OneDrive try { response = cast(char[]) read(responseUrl); - } catch (OneDriveException e) { + } catch (OneDriveException exception) { // exception generated - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); return false; } @@ -616,20 +444,33 @@ final class OneDriveApi try { std.file.remove(authUrl); std.file.remove(responseUrl); - } catch (FileException e) { - log.error("Cannot remove files ", authUrl, " ", responseUrl); + } catch (FileException exception) { + addLogEntry("Cannot remove files " ~ authUrl ~ " " ~ responseUrl); return false; } } else { - log.log("Authorize this app visiting:\n"); - write(url, "\n\n", "Enter the response uri: "); - readln(response); - cfg.applicationAuthorizeResponseUri = true; + // Are we in a --dry-run scenario? + if (!appConfig.getValueBool("dry_run")) { + // No --dry-run is being used + addLogEntry("Authorise this application by visiting:\n", ["consoleOnly"]); + addLogEntry(url ~ "\n", ["consoleOnly"]); + addLogEntry("Enter the response uri from your browser: ", ["consoleOnlyNoNewLine"]); + readln(response); + appConfig.applicationAuthorizeResponseUri = true; + } else { + // The application cannot be authorised when using --dry-run as we have to write out the authentication data, which negates the whole 'dry-run' process + addLogEntry(); + addLogEntry("The application requires authorisation, which involves saving authentication data on your system. Note that authorisation cannot be completed with the '--dry-run' option."); + addLogEntry(); + addLogEntry("To exclusively authorise the application without performing any additional actions, use this command: onedrive"); + addLogEntry(); + forceExit(); + } } // match the authorization code auto c = matchFirst(response, r"(?:[\?&]code=)([\w\d-.]+)"); if (c.empty) { - log.log("Invalid response uri entered"); + addLogEntry("An empty or invalid response uri was entered"); return false; } c.popFront(); // skip the whole match @@ -637,100 +478,116 @@ final class OneDriveApi return true; } - string getSiteSearchUrl() - { - // Return the actual siteSearchUrl being used and/or requested when performing 'siteQuery = onedrive.o365SiteSearch(nextLink);' call - return .siteSearchUrl; - } - - ulong getRetryAfterValue() - { - // Return the current value of retryAfterValue if it has been set to something other than 0 - return .retryAfterValue; - } - - void resetRetryAfterValue() - { - // Reset the current value of retryAfterValue to 0 after it has been used - .retryAfterValue = 0; - } - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/drive_get - JSONValue getDefaultDrive() - { - checkAccessTokenExpired(); - const(char)[] url; + JSONValue getDefaultDriveDetails() { + string url; url = driveUrl; - return get(driveUrl); + return get(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get - JSONValue getDefaultRoot() - { - checkAccessTokenExpired(); - const(char)[] url; + JSONValue getDefaultRootDetails() { + string url; url = driveUrl ~ "/root"; return get(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get - JSONValue getDriveIdRoot(const(char)[] driveId) - { - checkAccessTokenExpired(); - const(char)[] url; + JSONValue getDriveIdRoot(string driveId) { + string url; url = driveByIdUrl ~ driveId ~ "/root"; return get(url); } - - // https://docs.microsoft.com/en-us/graph/api/drive-sharedwithme - JSONValue getSharedWithMe() - { - checkAccessTokenExpired(); - return get(sharedWithMeUrl); - } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/drive_get - JSONValue getDriveQuota(const(char)[] driveId) - { - checkAccessTokenExpired(); - const(char)[] url; + JSONValue getDriveQuota(string driveId) { + string url; url = driveByIdUrl ~ driveId ~ "/"; url ~= "?select=quota"; return get(url); } - + + // Return the details of the specified path, by giving the path we wish to query + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get + JSONValue getPathDetails(string path) { + string url; + if ((path == ".")||(path == "/")) { + url = driveUrl ~ "/root/"; + } else { + url = itemByPathUrl ~ encodeComponent(path) ~ ":/"; + } + // Add select clause + url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; + return get(url); + } + + // Return the details of the specified item based on its driveID and itemID + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get + JSONValue getPathDetailsById(string driveId, string id) { + string url; + url = driveByIdUrl ~ driveId ~ "/items/" ~ id; + url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; + return get(url); + } + + // Return all the items that are shared with the user + // https://docs.microsoft.com/en-us/graph/api/drive-sharedwithme + JSONValue getSharedWithMe() { + return get(sharedWithMeUrl); + } + + // Create a shareable link for an existing file on OneDrive based on the accessScope JSON permissions + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_createlink + JSONValue createShareableLink(string driveId, string id, JSONValue accessScope) { + string url; + url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/createLink"; + return post(url, accessScope.toString()); + } + + // Return the requested details of the specified path on the specified drive id and path + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get + JSONValue getPathDetailsByDriveId(string driveId, string path) { + string url; + // https://learn.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online + // Required format: /drives/{drive-id}/root:/{item-path}: + url = driveByIdUrl ~ driveId ~ "/root:/" ~ encodeComponent(path) ~ ":"; + url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; + return get(url); + } + + // Track changes for a given driveId // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_delta - JSONValue viewChangesByItemId(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink) - { - checkAccessTokenExpired(); - const(char)[] url; + // Your app begins by calling delta without any parameters. The service starts enumerating the drive's hierarchy, returning pages of items and either an @odata.nextLink or an @odata.deltaLink, as described below. + // Your app should continue calling with the @odata.nextLink until you no longer see an @odata.nextLink returned, or you see a response with an empty set of changes. + // After you have finished receiving all the changes, you may apply them to your local state. To check for changes in the future, call delta again with the @odata.deltaLink from the previous successful response. + JSONValue getChangesByItemId(string driveId, string id, string deltaLink) { + string[string] requestHeaders; + // If Business Account add Prefer: Include-Feature=AddToOneDrive + if ((appConfig.accountType != "personal") && ( appConfig.getValueBool("sync_business_shared_items"))) { + addIncludeFeatureRequestHeader(&requestHeaders); + } + + string url; // configure deltaLink to query if (deltaLink.empty) { url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/delta"; + // Reduce what we ask for in the response - which reduces the data transferred back to us, and reduces what is held in memory during initial JSON processing url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; } else { url = deltaLink; } - return get(url); - } - - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_delta - JSONValue viewChangesByDriveId(const(char)[] driveId, const(char)[] deltaLink) - { - checkAccessTokenExpired(); - const(char)[] url = deltaLink; - if (url == null) { - url = driveByIdUrl ~ driveId ~ "/root/delta"; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; - } - return get(url); + return get(url, false, requestHeaders); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_list_children - JSONValue listChildren(const(char)[] driveId, const(char)[] id, const(char)[] nextLink) - { - checkAccessTokenExpired(); - const(char)[] url; + JSONValue listChildren(string driveId, string id, string nextLink) { + string[string] requestHeaders; + // If Business Account add addIncludeFeatureRequestHeader() which should add Prefer: Include-Feature=AddToOneDrive + if ((appConfig.accountType != "personal") && ( appConfig.getValueBool("sync_business_shared_items"))) { + addIncludeFeatureRequestHeader(&requestHeaders); + } + + string url; // configure URL to query if (nextLink.empty) { url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/children"; @@ -738,241 +595,87 @@ final class OneDriveApi } else { url = nextLink; } - return get(url); - } - - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get_content - void downloadById(const(char)[] driveId, const(char)[] id, string saveToPath, long fileSize) - { - checkAccessTokenExpired(); - scope(failure) { - if (exists(saveToPath)) { - // try and remove the file, catch error - try { - remove(saveToPath); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } - } - - // Create the required local directory - string newPath = dirName(saveToPath); - - // Does the path exist locally? - if (!exists(newPath)) { - try { - log.vdebug("Requested path does not exist, creating directory structure: ", newPath); - mkdirRecurse(newPath); - // Configure the applicable permissions for the folder - log.vdebug("Setting directory permissions for: ", newPath); - newPath.setAttributes(cfg.returnRequiredDirectoryPermisions()); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } - - const(char)[] url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/content?AVOverride=1"; - // Download file - download(url, saveToPath, fileSize); - // Does path exist? - if (exists(saveToPath)) { - // File was downloaded successfully - configure the applicable permissions for the file - log.vdebug("Setting file permissions for: ", saveToPath); - saveToPath.setAttributes(cfg.returnRequiredFilePermisions()); - } + return get(url, false, requestHeaders); } - - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content - JSONValue simpleUpload(string localPath, string parentDriveId, string parentId, string filename, const(char)[] eTag = null) - { - checkAccessTokenExpired(); - string url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ ":/" ~ encodeComponent(filename) ~ ":/content"; - // TODO: investigate why this fails for remote folders - //if (eTag) http.addRequestHeader("If-Match", eTag); - /*else http.addRequestHeader("If-None-Match", "*");*/ - return upload(localPath, url); - } - - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content - JSONValue simpleUploadReplace(string localPath, string driveId, string id, const(char)[] eTag = null) - { - checkAccessTokenExpired(); - string url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/content"; - if (eTag) http.addRequestHeader("If-Match", eTag); - return upload(localPath, url); + + // https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_search + JSONValue searchDriveForPath(string driveId, string path) { + string url; + url = "https://graph.microsoft.com/v1.0/drives/" ~ driveId ~ "/root/search(q='" ~ encodeComponent(path) ~ "')"; + return get(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_update - JSONValue updateById(const(char)[] driveId, const(char)[] id, JSONValue data, const(char)[] eTag = null) - { - checkAccessTokenExpired(); + JSONValue updateById(const(char)[] driveId, const(char)[] id, JSONValue data, const(char)[] eTag = null) { + string[string] requestHeaders; const(char)[] url = driveByIdUrl ~ driveId ~ "/items/" ~ id; - if (eTag) http.addRequestHeader("If-Match", eTag); - http.addRequestHeader("Content-Type", "application/json"); - return patch(url, data.toString()); + if (eTag) requestHeaders["If-Match"] = to!string(eTag); + return patch(url, data.toString(), requestHeaders); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_delete - void deleteById(const(char)[] driveId, const(char)[] id, const(char)[] eTag = null) - { - checkAccessTokenExpired(); + void deleteById(const(char)[] driveId, const(char)[] id, const(char)[] eTag = null) { + // string[string] requestHeaders; const(char)[] url = driveByIdUrl ~ driveId ~ "/items/" ~ id; //TODO: investigate why this always fail with 412 (Precondition Failed) - //if (eTag) http.addRequestHeader("If-Match", eTag); - del(url); + // if (eTag) requestHeaders["If-Match"] = eTag; + performDelete(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_post_children - JSONValue createById(const(char)[] parentDriveId, const(char)[] parentId, JSONValue item) - { - checkAccessTokenExpired(); - const(char)[] url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ "/children"; - http.addRequestHeader("Content-Type", "application/json"); + JSONValue createById(string parentDriveId, string parentId, JSONValue item) { + string url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ "/children"; return post(url, item.toString()); } - - // Return the details of the specified path - JSONValue getPathDetails(const(string) path) - { - checkAccessTokenExpired(); - const(char)[] url; - if ((path == ".")||(path == "/")) url = driveUrl ~ "/root/"; - else url = itemByPathUrl ~ encodeComponent(path) ~ ":/"; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; - return get(url); - } - - // Return the details of the specified id - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get - JSONValue getPathDetailsById(const(char)[] driveId, const(char)[] id) - { - checkAccessTokenExpired(); - const(char)[] url; - url = driveByIdUrl ~ driveId ~ "/items/" ~ id; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; - return get(url); - } - - // Return the requested details of the specified path on the specified drive id and path - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get?view=odsp-graph-online - JSONValue getPathDetailsByDriveId(const(char)[] driveId, const(string) path) - { - checkAccessTokenExpired(); - const(char)[] url; - // string driveByIdUrl = "https://graph.microsoft.com/v1.0/drives/"; - // Required format: /drives/{drive-id}/root:/{item-path} - url = driveByIdUrl ~ driveId ~ "/root:/" ~ encodeComponent(path); - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; - return get(url); - } - - // Return the requested details of the specified path on the specified drive id and item id - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get?view=odsp-graph-online - JSONValue getPathDetailsByDriveIdAndItemId(const(char)[] driveId, const(char)[] itemId) - { - checkAccessTokenExpired(); - const(char)[] url; - // string driveByIdUrl = "https://graph.microsoft.com/v1.0/drives/"; - // Required format: /drives/{drive-id}/items/{item-id} - url = driveByIdUrl ~ driveId ~ "/items/" ~ itemId; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; - return get(url); - } - - // Return the requested details of the specified id - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get - JSONValue getFileDetails(const(char)[] driveId, const(char)[] id) - { - checkAccessTokenExpired(); - const(char)[] url; - url = driveByIdUrl ~ driveId ~ "/items/" ~ id; - url ~= "?select=size,malware,file,webUrl,lastModifiedBy,lastModifiedDateTime"; - return get(url); - } - - // Create an anonymous read-only shareable link for an existing file on OneDrive - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_createlink - JSONValue createShareableLink(const(char)[] driveId, const(char)[] id, JSONValue accessScope) - { - checkAccessTokenExpired(); - const(char)[] url; - url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/createLink"; - http.addRequestHeader("Content-Type", "application/json"); - return post(url, accessScope.toString()); + + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content + JSONValue simpleUpload(string localPath, string parentDriveId, string parentId, string filename) { + string url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ ":/" ~ encodeComponent(filename) ~ ":/content"; + return put(url, localPath); } - - // https://dev.onedrive.com/items/move.htm - JSONValue moveByPath(const(char)[] sourcePath, JSONValue moveData) - { - // Need to use itemByPathUrl - checkAccessTokenExpired(); - string url = itemByPathUrl ~ encodeComponent(sourcePath); - http.addRequestHeader("Content-Type", "application/json"); - return move(url, moveData.toString()); + + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content + JSONValue simpleUploadReplace(string localPath, string driveId, string id) { + string url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/content"; + return put(url, localPath); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_createuploadsession - JSONValue createUploadSession(const(char)[] parentDriveId, const(char)[] parentId, const(char)[] filename, const(char)[] eTag = null, JSONValue item = null) - { - checkAccessTokenExpired(); - const(char)[] url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ ":/" ~ encodeComponent(filename) ~ ":/createUploadSession"; - if (eTag) http.addRequestHeader("If-Match", eTag); - http.addRequestHeader("Content-Type", "application/json"); + //JSONValue createUploadSession(string parentDriveId, string parentId, string filename, string eTag = null, JSONValue item = null) { + JSONValue createUploadSession(string parentDriveId, string parentId, string filename, const(char)[] eTag = null, JSONValue item = null) { + // string[string] requestHeaders; + string url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ ":/" ~ encodeComponent(filename) ~ ":/createUploadSession"; + // eTag If-Match header addition commented out for the moment + // At some point, post the creation of this upload session the eTag is being 'updated' by OneDrive, thus when uploadFragment() is used + // this generates a 412 Precondition Failed and then a 416 Requested Range Not Satisfiable + // This needs to be investigated further as to why this occurs + // if (eTag) requestHeaders["If-Match"] = eTag; return post(url, item.toString()); } - + // https://dev.onedrive.com/items/upload_large_files.htm - JSONValue uploadFragment(const(char)[] uploadUrl, string filepath, long offset, long offsetSize, long fileSize) - { - checkAccessTokenExpired(); + JSONValue uploadFragment(string uploadUrl, string filepath, long offset, long offsetSize, long fileSize) { // open file as read-only in binary mode - auto file = File(filepath, "rb"); - file.seek(offset); + + // If we upload a modified file, with the current known online eTag, this gets changed when the session is started - thus, the tail end of uploading + // a fragment fails with a 412 Precondition Failed and then a 416 Requested Range Not Satisfiable + // For the moment, comment out adding the If-Match header in createUploadSession, which then avoids this issue + string contentRange = "bytes " ~ to!string(offset) ~ "-" ~ to!string(offset + offsetSize - 1) ~ "/" ~ to!string(fileSize); - log.vdebugNewLine("contentRange: ", contentRange); - - // function scopes - scope(exit) { - http.clearRequestHeaders(); - http.onSend = null; - http.onReceive = null; - http.onReceiveHeader = null; - http.onReceiveStatusLine = null; - http.contentLength = 0; - // close file if open - if (file.isOpen()){ - // close open file - file.close(); - } - } + addLogEntry("", ["debug"]); // Add an empty newline before log output + addLogEntry("contentRange: " ~ contentRange, ["debug"]); - http.method = HTTP.Method.put; - http.url = uploadUrl; - http.addRequestHeader("Content-Range", contentRange); - http.onSend = data => file.rawRead(data).length; - // convert offsetSize to ulong - http.contentLength = to!ulong(offsetSize); - auto response = perform(); - // TODO: retry on 5xx errors - checkHttpCode(response); - return response; + return put(uploadUrl, filepath, true, contentRange, offset, offsetSize); } - + // https://dev.onedrive.com/items/upload_large_files.htm - JSONValue requestUploadStatus(const(char)[] uploadUrl) - { - checkAccessTokenExpired(); - // when using microsoft graph the auth code is different + JSONValue requestUploadStatus(string uploadUrl) { return get(uploadUrl, true); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/site_search?view=odsp-graph-online - JSONValue o365SiteSearch(const(char)[] nextLink){ - checkAccessTokenExpired(); - const(char)[] url; + JSONValue o365SiteSearch(string nextLink) { + string url; // configure URL to query if (nextLink.empty) { url = siteSearchUrl ~ "=*"; @@ -981,57 +684,18 @@ final class OneDriveApi } return get(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/drive_list?view=odsp-graph-online JSONValue o365SiteDrives(string site_id){ - checkAccessTokenExpired(); - const(char)[] url; + string url; url = siteDriveUrl ~ site_id ~ "/drives"; return get(url); } - // Create a new subscription or renew the existing subscription - void createOrRenewSubscription() { - checkAccessTokenExpired(); - - // Kick off the webhook server first - if (webhook is null) { - webhook = OneDriveWebhook.getOrCreate( - cfg.getValueString("webhook_listening_host"), - to!ushort(cfg.getValueLong("webhook_listening_port")), - thisTid - ); - spawn(&OneDriveWebhook.serve); - } - - if (!hasValidSubscription()) { - createSubscription(); - } else if (isSubscriptionUpForRenewal()) { - try { - renewSubscription(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 404) { - log.log("The subscription is not found on the server. Recreating subscription ..."); - createSubscription(); - } - } - } - } - - private bool hasValidSubscription() { - return !subscriptionId.empty && subscriptionExpiration > Clock.currTime(UTC()); - } - - private bool isSubscriptionUpForRenewal() { - return subscriptionExpiration < Clock.currTime(UTC()) + subscriptionRenewalInterval; - } - - private void createSubscription() { - log.log("Initializing subscription for updates ..."); - - auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; - const(char)[] url; - url = subscriptionUrl; + JSONValue createSubscription(string notificationUrl, SysTime expirationDateTime) { + string driveId = appConfig.getValueString("drive_id"); + string url = subscriptionUrl; + // Create a resource item based on if we have a driveId string resourceItem; if (driveId.length) { @@ -1039,7 +703,7 @@ final class OneDriveApi } else { resourceItem = "/me/drive/root"; } - + // create JSON request to create webhook subscription const JSONValue request = [ "changeType": "updated", @@ -1048,840 +712,706 @@ final class OneDriveApi "expirationDateTime": expirationDateTime.toISOExtString(), "clientState": randomUUID().toString() ]; - http.addRequestHeader("Content-Type", "application/json"); - JSONValue response; - - try { - response = post(url, request.toString()); - } catch (OneDriveException e) { - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - - // We need to exit here, user needs to fix issue - log.error("ERROR: Unable to initialize subscriptions for updates. Please fix this issue."); - shutdown(); - exit(-1); - } - - // Save important subscription metadata including id and expiration - subscriptionId = response["id"].str; - subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); + curlEngine.http.addRequestHeader("Content-Type", "application/json"); + return post(url, request.toString()); } - - private void renewSubscription() { - log.log("Renewing subscription for updates ..."); - - auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; - const(char)[] url; + + JSONValue renewSubscription(string subscriptionId, SysTime expirationDateTime) { + string url; url = subscriptionUrl ~ "/" ~ subscriptionId; const JSONValue request = [ "expirationDateTime": expirationDateTime.toISOExtString() ]; - http.addRequestHeader("Content-Type", "application/json"); - JSONValue response = patch(url, request.toString()); - - // Update subscription expiration from the response - subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); + curlEngine.http.addRequestHeader("Content-Type", "application/json"); + return post(url, request.toString()); } + + void deleteSubscription(string subscriptionId) { + string url; + url = subscriptionUrl ~ "/" ~ subscriptionId; + performDelete(url); + } + + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get_content + void downloadById(const(char)[] driveId, const(char)[] id, string saveToPath, long fileSize) { + scope(failure) { + if (exists(saveToPath)) { + // try and remove the file, catch error + try { + remove(saveToPath); + } catch (FileException exception) { + // display the error message + displayFileSystemErrorMessage(exception.msg, getFunctionName!({})); + } + } + } - private void deleteSubscription() { - if (!hasValidSubscription()) { - return; + // Create the required local directory + string newPath = dirName(saveToPath); + + // Does the path exist locally? + if (!exists(newPath)) { + try { + addLogEntry("Requested local path does not exist, creating directory structure: " ~ newPath, ["debug"]); + mkdirRecurse(newPath); + // Configure the applicable permissions for the folder + addLogEntry("Setting directory permissions for: " ~ newPath, ["debug"]); + newPath.setAttributes(appConfig.returnRequiredDirectoryPermisions()); + } catch (FileException exception) { + // display the error message + displayFileSystemErrorMessage(exception.msg, getFunctionName!({})); + } } - const(char)[] url; - url = subscriptionUrl ~ "/" ~ subscriptionId; - del(url); - log.log("Deleted subscription"); + const(char)[] url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/content?AVOverride=1"; + // Download file + downloadFile(url, saveToPath, fileSize); + // Does path exist? + if (exists(saveToPath)) { + // File was downloaded successfully - configure the applicable permissions for the file + addLogEntry("Setting file permissions for: " ~ saveToPath, ["debug"]); + saveToPath.setAttributes(appConfig.returnRequiredFilePermisions()); + } + } + + // Return the actual siteSearchUrl being used and/or requested when performing 'siteQuery = onedrive.o365SiteSearch(nextLink);' call + string getSiteSearchUrl() { + return siteSearchUrl; + } + + // Private OneDrive API Functions + private void addIncludeFeatureRequestHeader(string[string]* headers) { + addLogEntry("Adding 'Include-Feature=AddToOneDrive' API request header as 'sync_business_shared_items' config option is enabled", ["debug"]); + (*headers)["Prefer"] = "Include-Feature=AddToOneDrive"; } - private void redeemToken(const(char)[] authCode) - { - const(char)[] postData = + private void redeemToken(char[] authCode){ + char[] postData = "client_id=" ~ clientId ~ "&redirect_uri=" ~ redirectUrl ~ "&code=" ~ authCode ~ "&grant_type=authorization_code"; acquireToken(postData); } - - private void newToken() - { - string postData = - "client_id=" ~ clientId ~ - "&redirect_uri=" ~ redirectUrl ~ - "&refresh_token=" ~ refreshToken ~ - "&grant_type=refresh_token"; - acquireToken(postData); - } - - private void acquireToken(const(char)[] postData) - { + + private void acquireToken(char[] postData) { JSONValue response; try { - response = post(tokenUrl, postData); - } catch (OneDriveException e) { + response = post(tokenUrl, postData, true, "application/x-www-form-urlencoded"); + } catch (OneDriveException exception) { // an error was generated - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + if ((exception.httpStatusCode == 400) || (exception.httpStatusCode == 401)) { + // Handle an unauthorised client + handleClientUnauthorised(exception.httpStatusCode, exception.msg); + } else { + if (exception.httpStatusCode >= 500) { + // There was a HTTP 5xx Server Side Error - retry + acquireToken(postData); + } else { + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } + } } if (response.type() == JSONType.object) { // Has the client been configured to use read_only_auth_scope - if (cfg.getValueBool("read_only_auth_scope")) { + if (appConfig.getValueBool("read_only_auth_scope")) { // read_only_auth_scope has been configured if ("scope" in response){ string effectiveScopes = response["scope"].str(); // Display the effective authentication scopes - writeln(); - writeln("Effective API Authentication Scopes: ", effectiveScopes); + addLogEntry(); + addLogEntry("Effective API Authentication Scopes: " ~ effectiveScopes, ["verbose"]); + // if we have any write scopes, we need to tell the user to update an remove online prior authentication and exit application if (canFind(effectiveScopes, "Write")) { // effective scopes contain write scopes .. so not a read-only configuration - writeln(); - writeln("ERROR: You have authentication scopes that allow write operations. You need to remove your existing application access consent"); - writeln(); - writeln("Please login to https://account.live.com/consent/Manage and remove your existing application access consent"); - writeln(); + addLogEntry(); + addLogEntry("ERROR: You have authentication scopes that allow write operations. You need to remove your existing application access consent"); + addLogEntry(); + addLogEntry("Please login to https://account.live.com/consent/Manage and remove your existing application access consent"); + addLogEntry(); // force exit - shutdown(); - exit(-1); + releaseCurlEngine(); + // Must force exit here, allow logging to be done + forceExit(); } } } if ("access_token" in response){ - accessToken = "bearer " ~ response["access_token"].str(); - refreshToken = response["refresh_token"].str(); - accessTokenExpiration = Clock.currTime() + dur!"seconds"(response["expires_in"].integer()); - if (!.dryRun) { + appConfig.accessToken = "bearer " ~ strip(response["access_token"].str); + + // Do we print the current access token + if (appConfig.verbosityCount > 1) { + if (appConfig.getValueBool("debug_https")) { + if (appConfig.getValueBool("print_token")) { + // This needs to be highly restricted in output .... + addLogEntry("CAUTION - KEEP THIS SAFE: Current access token: " ~ to!string(appConfig.accessToken), ["debug"]); + } + } + } + + refreshToken = strip(response["refresh_token"].str); + appConfig.accessTokenExpiration = Clock.currTime() + dur!"seconds"(response["expires_in"].integer()); + if (!dryRun) { + // Update the refreshToken in appConfig so that we can reuse it + if (appConfig.refreshToken.empty) { + // The access token is empty + addLogEntry("Updating appConfig.refreshToken with new refreshToken as appConfig.refreshToken is empty", ["debug"]); + appConfig.refreshToken = refreshToken; + } else { + // Is the access token different? + if (appConfig.refreshToken != refreshToken) { + // Update the memory version + addLogEntry("Updating appConfig.refreshToken with updated refreshToken", ["debug"]); + appConfig.refreshToken = refreshToken; + } + } + + // try and update the refresh_token file on disk try { - // try and update the refresh_token file - std.file.write(cfg.refreshTokenFilePath, refreshToken); - log.vdebug("Setting file permissions for: ", cfg.refreshTokenFilePath); - cfg.refreshTokenFilePath.setAttributes(cfg.returnRequiredFilePermisions()); - } catch (FileException e) { + addLogEntry("Updating refreshToken on disk", ["debug"]); + std.file.write(appConfig.refreshTokenFilePath, refreshToken); + addLogEntry("Setting file permissions for: " ~ appConfig.refreshTokenFilePath, ["debug"]); + appConfig.refreshTokenFilePath.setAttributes(appConfig.returnRequiredFilePermisions()); + } catch (FileException exception) { // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + displayFileSystemErrorMessage(exception.msg, getFunctionName!({})); } } - if (printAccessToken) writeln("New access token: ", accessToken); } else { - log.error("\nInvalid authentication response from OneDrive. Please check the response uri\n"); + addLogEntry("\nInvalid authentication response from OneDrive. Please check the response uri\n"); // re-authorize - authorize(); + authorise(); } } else { - log.vdebug("Invalid JSON response from OneDrive unable to initialize application"); + addLogEntry("Invalid response from the Microsoft Graph API. Unable to initialise OneDrive API instance."); + // Must force exit here, allow logging to be done + forceExit(); } } - - private void checkAccessTokenExpired() - { - try { - if (Clock.currTime() >= accessTokenExpiration) { - newToken(); - } - } catch (OneDriveException e) { - if (e.httpStatusCode == 400 || e.httpStatusCode == 401) { - // flag error and notify - writeln(); - log.errorAndNotify("ERROR: Refresh token invalid, use --reauth to authorize the client again."); - writeln(); - // set error message - e.msg ~= "\nRefresh token invalid, use --reauth to authorize the client again"; - } + + private void newToken() { + addLogEntry("Need to generate a new access token for Microsoft OneDrive", ["debug"]); + auto postData = appender!(string)(); + postData ~= "client_id=" ~ clientId; + postData ~= "&redirect_uri=" ~ redirectUrl; + postData ~= "&refresh_token=" ~ to!string(refreshToken); + postData ~= "&grant_type=refresh_token"; + acquireToken(postData.data.dup); + } + + private void checkAccessTokenExpired() { + if (Clock.currTime() >= appConfig.accessTokenExpiration) { + addLogEntry("Microsoft OneDrive Access Token has EXPIRED. Must generate a new Microsoft OneDrive Access Token", ["debug"]); + newToken(); + } else { + addLogEntry("Existing Microsoft OneDrive Access Token Expires: " ~ to!string(appConfig.accessTokenExpiration), ["debug"]); } } - - private void addAccessTokenHeader() - { - http.addRequestHeader("Authorization", accessToken); + + private string getAccessToken() { + checkAccessTokenExpired(); + return to!string(appConfig.accessToken); } - private JSONValue get(const(char)[] url, bool skipToken = false) - { - scope(exit) http.clearRequestHeaders(); - log.vdebug("Request URL = ", url); - http.method = HTTP.Method.get; - http.url = url; - if (!skipToken) addAccessTokenHeader(); // HACK: requestUploadStatus - JSONValue response; - response = perform(); - checkHttpCode(response); - // OneDrive API Response Debugging if --https-debug is being used - if (.debugResponse){ - log.vdebug("OneDrive API Response: ", response); - } - return response; + private void addAccessTokenHeader(string[string]* requestHeaders) { + (*requestHeaders)["Authorization"] = getAccessToken(); } - - private void del(const(char)[] url) - { - scope(exit) http.clearRequestHeaders(); - http.method = HTTP.Method.del; - http.url = url; - addAccessTokenHeader(); - auto response = perform(); - checkHttpCode(response); + + private void connect(HTTP.Method method, const(char)[] url, bool skipToken, CurlResponse response, string[string] requestHeaders=null) { + addLogEntry("Request URL = " ~ to!string(url), ["debug"]); + // Check access token first in case the request is overridden + if (!skipToken) addAccessTokenHeader(&requestHeaders); + curlEngine.setResponseHolder(response); + foreach(k, v; requestHeaders) { + curlEngine.addRequestHeader(k, v); + } + curlEngine.connect(method, url); } - private void download(const(char)[] url, string filename, long fileSize) - { + private void performDelete(const(char)[] url, string[string] requestHeaders=null, string callingFunction=__FUNCTION__, int lineno=__LINE__) { + bool validateJSONResponse = false; + oneDriveErrorHandlerWrapper((CurlResponse response) { + connect(HTTP.Method.del, url, false, response, requestHeaders); + return curlEngine.execute(); + }, validateJSONResponse, callingFunction, lineno); + } + + private void downloadFile(const(char)[] url, string filename, long fileSize, string callingFunction=__FUNCTION__, int lineno=__LINE__) { // Threshold for displaying download bar long thresholdFileSize = 4 * 2^^20; // 4 MiB // To support marking of partially-downloaded files, string originalFilename = filename; string downloadFilename = filename ~ ".partial"; - - // open downloadFilename as write in binary mode - auto file = File(downloadFilename, "wb"); - - // function scopes - scope(exit) { - http.clearRequestHeaders(); - http.onSend = null; - http.onReceive = null; - http.onReceiveHeader = null; - http.onReceiveStatusLine = null; - http.contentLength = 0; - // Reset onProgress to not display anything for next download - http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) - { - return 0; - }; - // close file if open - if (file.isOpen()){ - // close open file - file.close(); - } - } - - http.method = HTTP.Method.get; - http.url = url; - addAccessTokenHeader(); - - http.onReceive = (ubyte[] data) { - file.rawWrite(data); - return data.length; - }; - - if (fileSize >= thresholdFileSize){ - // Download Progress Bar - size_t iteration = 20; - Progress p = new Progress(iteration); - p.title = "Downloading"; - writeln(); - bool barInit = false; - real previousProgressPercent = -1.0; - real percentCheck = 5.0; - long segmentCount = 1; - // Setup progress bar to display - http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) - { - // For each onProgress, what is the % of dlnow to dltotal - // floor - rounds down to nearest whole number - real currentDLPercent = floor(double(dlnow)/dltotal*100); - // Have we started downloading? - if (currentDLPercent > 0){ - // We have started downloading - log.vdebugNewLine("Data Received = ", dlnow); - log.vdebug("Expected Total = ", dltotal); - log.vdebug("Percent Complete = ", currentDLPercent); - // Every 5% download we need to increment the download bar - - // Has the user set a data rate limit? - // when using rate_limit, we will get odd download rates, for example: - // Percent Complete = 24 - // Data Received = 13080163 - // Expected Total = 52428800 - // Percent Complete = 24 - // Data Received = 13685777 - // Expected Total = 52428800 - // Percent Complete = 26 <---- jumps to 26% missing 25%, thus fmod misses incrementing progress bar - // Data Received = 13685777 - // Expected Total = 52428800 - // Percent Complete = 26 - - if (cfg.getValueLong("rate_limit") > 0) { - // User configured rate limit - // How much data should be in each segment to qualify for 5% - long dataPerSegment = to!long(floor(double(dltotal)/iteration)); - // How much data received do we need to validate against - long thisSegmentData = dataPerSegment * segmentCount; - long nextSegmentData = dataPerSegment * (segmentCount + 1); - // Has the data that has been received in a 5% window that we need to increment the progress bar at - if ((dlnow > thisSegmentData) && (dlnow < nextSegmentData) && (previousProgressPercent != currentDLPercent) || (dlnow == dltotal)) { - // Downloaded data equals approx 5% - log.vdebug("Incrementing Progress Bar using calculated 5% of data received"); - // Downloading 50% |oooooooooooooooooooo | ETA 00:01:40 - // increment progress bar - p.next(); - // update values - log.vdebug("Setting previousProgressPercent to ", currentDLPercent); - previousProgressPercent = currentDLPercent; - log.vdebug("Incrementing segmentCount"); - segmentCount++; + bool validateJSONResponse = false; + oneDriveErrorHandlerWrapper((CurlResponse response) { + connect(HTTP.Method.get, url, false, response); + + if (fileSize >= thresholdFileSize){ + // Download Progress variables + size_t expected_total_segments = 20; + ulong start_unix_time = Clock.currTime.toUnixTime(); + int h, m, s; + string etaString; + bool barInit = false; + real previousProgressPercent = -1.0; + real percentCheck = 5.0; + size_t segmentCount = -1; + + // Setup progress bar to display + curlEngine.http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) { + // For each onProgress, what is the % of dlnow to dltotal + // floor - rounds down to nearest whole number + real currentDLPercent = floor(double(dlnow)/dltotal*100); + string downloadLogEntry = "Downloading: " ~ filename ~ " ... "; + + // Have we started downloading? + if (currentDLPercent > 0){ + // We have started downloading + addLogEntry("", ["debug"]); // Debug new line only + addLogEntry("Data Received = " ~ to!string(dlnow), ["debug"]); + addLogEntry("Expected Total = " ~ to!string(dltotal), ["debug"]); + addLogEntry("Percent Complete = " ~ to!string(currentDLPercent), ["debug"]); + + // Every 5% download we need to increment the download bar + + // Has the user set a data rate limit? + // when using rate_limit, we will get odd download rates, for example: + // Percent Complete = 24 + // Data Received = 13080163 + // Expected Total = 52428800 + // Percent Complete = 24 + // Data Received = 13685777 + // Expected Total = 52428800 + // Percent Complete = 26 <---- jumps to 26% missing 25%, thus fmod misses incrementing progress bar + // Data Received = 13685777 + // Expected Total = 52428800 + // Percent Complete = 26 + + if (appConfig.getValueLong("rate_limit") > 0) { + // User configured rate limit + // How much data should be in each segment to qualify for 5% + ulong dataPerSegment = to!ulong(floor(double(dltotal)/expected_total_segments)); + // How much data received do we need to validate against + ulong thisSegmentData = dataPerSegment * segmentCount; + ulong nextSegmentData = dataPerSegment * (segmentCount + 1); + + // Has the data that has been received in a 5% window that we need to increment the progress bar at + if ((dlnow > thisSegmentData) && (dlnow < nextSegmentData) && (previousProgressPercent != currentDLPercent) || (dlnow == dltotal)) { + // Downloaded data equals approx 5% + addLogEntry("Incrementing Progress Bar using calculated 5% of data received", ["debug"]); + + // 100% check + if (currentDLPercent != 100) { + // Not 100% yet + // Calculate the output + segmentCount++; + auto eta = calc_eta(segmentCount, expected_total_segments, start_unix_time); + dur!"seconds"(eta).split!("hours", "minutes", "seconds")(h, m, s); + etaString = format!"| ETA %02d:%02d:%02d"( h, m, s); + string percentage = leftJustify(to!string(currentDLPercent) ~ "%", 5, ' '); + addLogEntry(downloadLogEntry ~ percentage ~ etaString, ["consoleOnly"]); + } else { + // 100% done + ulong end_unix_time = Clock.currTime.toUnixTime(); + auto upload_duration = cast(int)(end_unix_time - start_unix_time); + dur!"seconds"(upload_duration).split!("hours", "minutes", "seconds")(h, m, s); + etaString = format!"| DONE in %02d:%02d:%02d"( h, m, s); + string percentage = leftJustify(to!string(currentDLPercent) ~ "%", 5, ' '); + addLogEntry(downloadLogEntry ~ percentage ~ etaString, ["consoleOnly"]); + } + + // update values + addLogEntry("Setting previousProgressPercent to " ~ to!string(currentDLPercent), ["debug"]); + previousProgressPercent = currentDLPercent; + addLogEntry("Incrementing segmentCount", ["debug"]); + segmentCount++; + } + } else { + // Is currentDLPercent divisible by 5 leaving remainder 0 and does previousProgressPercent not equal currentDLPercent + if ((isIdentical(fmod(currentDLPercent, percentCheck), 0.0)) && (previousProgressPercent != currentDLPercent)) { + // currentDLPercent matches a new increment + addLogEntry("Incrementing Progress Bar using fmod match", ["debug"]); + + // 100% check + if (currentDLPercent != 100) { + // Not 100% yet + // Calculate the output + segmentCount++; + auto eta = calc_eta(segmentCount, expected_total_segments, start_unix_time); + dur!"seconds"(eta).split!("hours", "minutes", "seconds")(h, m, s); + etaString = format!"| ETA %02d:%02d:%02d"( h, m, s); + string percentage = leftJustify(to!string(currentDLPercent) ~ "%", 5, ' '); + addLogEntry(downloadLogEntry ~ percentage ~ etaString, ["consoleOnly"]); + } else { + // 100% done + ulong end_unix_time = Clock.currTime.toUnixTime(); + auto upload_duration = cast(int)(end_unix_time - start_unix_time); + dur!"seconds"(upload_duration).split!("hours", "minutes", "seconds")(h, m, s); + etaString = format!"| DONE in %02d:%02d:%02d"( h, m, s); + string percentage = leftJustify(to!string(currentDLPercent) ~ "%", 5, ' '); + addLogEntry(downloadLogEntry ~ percentage ~ etaString, ["consoleOnly"]); + } + + // update values + previousProgressPercent = currentDLPercent; + } } } else { - // Is currentDLPercent divisible by 5 leaving remainder 0 and does previousProgressPercent not equal currentDLPercent - if ((isIdentical(fmod(currentDLPercent, percentCheck), 0.0)) && (previousProgressPercent != currentDLPercent)) { - // currentDLPercent matches a new increment - log.vdebug("Incrementing Progress Bar using fmod match"); - // Downloading 50% |oooooooooooooooooooo | ETA 00:01:40 - // increment progress bar - p.next(); - // update values - previousProgressPercent = currentDLPercent; + if ((currentDLPercent == 0) && (!barInit)) { + // Calculate the output + segmentCount++; + etaString = "| ETA --:--:--"; + string percentage = leftJustify(to!string(currentDLPercent) ~ "%", 5, ' '); + addLogEntry(downloadLogEntry ~ percentage ~ etaString, ["consoleOnly"]); + barInit = true; } } - } else { - if ((currentDLPercent == 0) && (!barInit)) { - // Initialise the download bar at 0% - // Downloading 0% | | ETA --:--:--: - p.next(); - barInit = true; - } - } - return 0; - }; - - // Perform download & display progress bar - try { - // try and catch any curl error - http.perform(); - // Check the HTTP Response headers - needed for correct 429 handling - // check will be performed in checkHttpCode() - writeln(); - // Reset onProgress to not display anything for next download done using exit scope - } catch (CurlException e) { - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - } - // free progress bar memory - p = null; - } else { - // No progress bar - try { - // try and catch any curl error - http.perform(); - // Check the HTTP Response headers - needed for correct 429 handling - // check will be performed in checkHttpCode() - } catch (CurlException e) { - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + return 0; + }; + } else { + // No progress bar } - } - - // Rename downloaded file - rename(downloadFilename, originalFilename); - - // Check the HTTP response code, which, if a 429, will also check response headers - checkHttpCode(); - } - - private auto patch(T)(const(char)[] url, const(T)[] patchData) - { - scope(exit) http.clearRequestHeaders(); - http.method = HTTP.Method.patch; - http.url = url; - addAccessTokenHeader(); - auto response = perform(patchData); - checkHttpCode(response); - return response; - } - - private auto post(T)(const(char)[] url, const(T)[] postData) - { - scope(exit) http.clearRequestHeaders(); - http.method = HTTP.Method.post; - http.url = url; - addAccessTokenHeader(); - auto response = perform(postData); - checkHttpCode(response); - return response; + + return curlEngine.download(originalFilename, downloadFilename); + }, validateJSONResponse, callingFunction, lineno); } - private auto move(T)(const(char)[] url, const(T)[] postData) - { - scope(exit) http.clearRequestHeaders(); - http.method = HTTP.Method.patch; - http.url = url; - addAccessTokenHeader(); - auto response = perform(postData); - // Check the HTTP response code, which, if a 429, will also check response headers - checkHttpCode(); - return response; + private JSONValue get(string url, bool skipToken = false, string[string] requestHeaders=null, string callingFunction=__FUNCTION__, int lineno=__LINE__) { + bool validateJSONResponse = true; + return oneDriveErrorHandlerWrapper((CurlResponse response) { + connect(HTTP.Method.get, url, skipToken, response, requestHeaders); + return curlEngine.execute(); + }, validateJSONResponse, callingFunction, lineno); } - private JSONValue upload(string filepath, string url) - { - checkAccessTokenExpired(); - // open file as read-only in binary mode - auto file = File(filepath, "rb"); - - // function scopes - scope(exit) { - http.clearRequestHeaders(); - http.onSend = null; - http.onReceive = null; - http.onReceiveHeader = null; - http.onReceiveStatusLine = null; - http.contentLength = 0; - // close file if open - if (file.isOpen()){ - // close open file - file.close(); - } - } - - http.method = HTTP.Method.put; - http.url = url; - addAccessTokenHeader(); - http.addRequestHeader("Content-Type", "application/octet-stream"); - http.onSend = data => file.rawRead(data).length; - http.contentLength = file.size; - auto response = perform(); - checkHttpCode(response); - return response; + private JSONValue patch(const(char)[] url, const(char)[] patchData, string[string] requestHeaders=null, const(char)[] contentType = "application/json", string callingFunction=__FUNCTION__, int lineno=__LINE__) { + bool validateJSONResponse = true; + return oneDriveErrorHandlerWrapper((CurlResponse response) { + connect(HTTP.Method.patch, url, false, response, requestHeaders); + curlEngine.setContent(contentType, patchData); + return curlEngine.execute(); + }, validateJSONResponse, callingFunction, lineno); } - private JSONValue perform(const(void)[] sendData) - { - scope(exit) { - http.onSend = null; - http.contentLength = 0; - } - if (sendData) { - http.contentLength = sendData.length; - http.onSend = (void[] buf) { - import std.algorithm: min; - size_t minLen = min(buf.length, sendData.length); - if (minLen == 0) return 0; - buf[0 .. minLen] = sendData[0 .. minLen]; - sendData = sendData[minLen .. $]; - return minLen; - }; - } else { - http.onSend = buf => 0; - } - auto response = perform(); - return response; + private JSONValue post(const(char)[] url, const(char)[] postData, bool skipToken = false, const(char)[] contentType = "application/json", string callingFunction=__FUNCTION__, int lineno=__LINE__) { + bool validateJSONResponse = true; + return oneDriveErrorHandlerWrapper((CurlResponse response) { + connect(HTTP.Method.post, url, skipToken, response); + curlEngine.setContent(contentType, postData); + return curlEngine.execute(); + }, validateJSONResponse, callingFunction, lineno); } - - private JSONValue perform() - { - scope(exit) http.onReceive = null; - char[] content; - JSONValue json; - - http.onReceive = (ubyte[] data) { - content ~= data; - // HTTP Server Response Code Debugging if --https-debug is being used - if (.debugResponse){ - log.vdebug("onedrive.perform() => OneDrive HTTP Server Response: ", http.statusLine.code); - } - return data.length; - }; - - try { - http.perform(); - // Check the HTTP Response headers - needed for correct 429 handling - checkHTTPResponseHeaders(); - } catch (CurlException e) { - // Parse and display error message received from OneDrive - log.vdebug("onedrive.perform() Generated a OneDrive CurlException"); - auto errorArray = splitLines(e.msg); - string errorMessage = errorArray[0]; + + private JSONValue put(const(char)[] url, string filepath, bool skipToken=false, string contentRange=null, ulong offset=0, ulong offsetSize=0, string callingFunction=__FUNCTION__, int lineno=__LINE__) { + bool validateJSONResponse = true; + return oneDriveErrorHandlerWrapper((CurlResponse response) { + connect(HTTP.Method.put, url, skipToken, response); + curlEngine.setFile(filepath, contentRange, offset, offsetSize); + return curlEngine.execute(); + }, validateJSONResponse, callingFunction, lineno); + } + + // Wrapper function for all requests to OneDrive API + // - This should throw a OneDriveException so that this exception can be handled appropriately elsewhere in the application + private JSONValue oneDriveErrorHandlerWrapper(CurlResponse delegate(CurlResponse response) executer, bool validateJSONResponse, string callingFunction, int lineno) { + // Create a new 'curl' response + response = new CurlResponse(); + + // Other wrapper variables + int retryAttempts = 0; + int baseBackoffInterval = 1; // Base backoff interval in seconds + int maxRetryCount = 175200; // Approx 365 days based on maxBackoffInterval + appConfig.defaultDataTimeout + //int maxRetryCount = 5; // Temp + int maxBackoffInterval = 120; // Maximum backoff interval in seconds + int thisBackOffInterval = 0; + int timestampAlign = 0; + JSONValue result; + SysTime currentTime; + SysTime retryTime; + bool retrySuccess = false; + bool transientError = false; + + while (!retrySuccess) { + // Reset thisBackOffInterval + thisBackOffInterval = 0; + transientError = false; - // what is contained in the curl error message? - if (canFind(errorMessage, "Couldn't connect to server on handle") || canFind(errorMessage, "Couldn't resolve host name on handle") || canFind(errorMessage, "Timeout was reached on handle")) { - // This is a curl timeout - // or is this a 408 request timeout - // https://github.com/abraunegg/onedrive/issues/694 - // Back off & retry with incremental delay - int retryCount = 10000; - int retryAttempts = 0; - int backoffInterval = 0; - int maxBackoffInterval = 3600; - int timestampAlign = 0; - bool retrySuccess = false; - SysTime currentTime; + if (retryAttempts >= 1) { + // re-try log entry & clock time + retryTime = Clock.currTime(); + retryTime.fracSecs = Duration.zero; + addLogEntry("Retrying the respective Microsoft Graph API call ... (" ~ to!string(retryTime) ~ ")"); + } + + try { + response.reset(); + response = executer(response); + // Check for a valid response + if (response.hasResponse) { + // Process the response + result = response.json(); + // Print response if 'debugResponse' is flagged + if (debugResponse){ + addLogEntry("Microsoft Graph API Response: " ~ response.dumpResponse(), ["debug"]); + } + // Check http response code, raise a OneDriveException if the operation was not successfully performed + // '100 != 2' This condition checks if the response code does not start with 2. In the context of HTTP response codes, the 2xx series represents successful responses. + // '!= 302' This condition explicitly checks if the response code is not 302. The 302 status code represents a temporary redirect, indicating that the requested resource has been temporarily moved to a different URI. + if (response.statusLine.code / 100 != 2 && response.statusLine.code != 302) { + // Not a 2xx or 302 response code + // Every other HTTP status code, including those from the 1xx (Informational), 3xx (other Redirection codes excluding 302), 4xx (Client Error), and 5xx (Server Error) series, will trigger the following line of code. + throw new OneDriveException(response.statusLine.code, response.statusLine.reason, response); + } + // Do we need to validate the JSON response? + if (validateJSONResponse) { + if (result.type() != JSONType.object) { + throw new OneDriveException(0, "Caller request a non null JSON response, get null instead", response); + } + } + + // If retryAtempts is greater than 1, it means we were re-trying the request + if (retryAttempts > 1) { + // No error from http.perform() on re-try + if (!transientError) { + // Log that Internet access has been restored + addLogEntry("Internet connectivity to Microsoft OneDrive service has been restored"); + } + // unset the fresh connect option as this then creates performance issues if left enabled + addLogEntry("Unsetting libcurl to use a fresh connection as this causes a performance impact if left enabled", ["debug"]); + curlEngine.http.handle.set(CurlOption.fresh_connect,0); + } + + // On successful processing, break out of the loop + break; + } else { + // Throw a custom 506 error + // Whilst this error code is a bit more esoteric and typically involves content negotiation issues that lead to a configuration error on the server, but it could be loosely + // interpreted to signal that the response received didn't meet the expected criteria or format. + throw new OneDriveException(506, "Received an unexpected response from Microsoft OneDrive", response); + } + // A 'curl' exception was thrown + } catch (CurlException exception) { + // Handle 'curl' exception errors - // what caused the initial curl exception? - if (canFind(errorMessage, "Couldn't connect to server on handle")) log.vdebug("Unable to connect to server - HTTPS access blocked?"); - if (canFind(errorMessage, "Couldn't resolve host name on handle")) log.vdebug("Unable to resolve server - DNS access blocked?"); - if (canFind(errorMessage, "Timeout was reached on handle")) log.vdebug("A timeout was triggered - data too slow, no response ... use --debug-https to diagnose further"); + // Detail the curl exception, debug output only + addLogEntry("Handling a specific Curl exception:", ["debug"]); + addLogEntry(to!string(response), ["debug"]); - while (!retrySuccess){ - try { - // configure libcurl to perform a fresh connection - log.vdebug("Configuring libcurl to use a fresh connection for re-try"); - http.handle.set(CurlOption.fresh_connect,1); - // try the access - http.perform(); - // Check the HTTP Response headers - needed for correct 429 handling - checkHTTPResponseHeaders(); - // no error from http.perform() on re-try - log.log("Internet connectivity to Microsoft OneDrive service has been restored"); - // unset the fresh connect option as this then creates performance issues if left enabled - log.vdebug("Unsetting libcurl to use a fresh connection as this causes a performance impact if left enabled"); - http.handle.set(CurlOption.fresh_connect,0); - // connectivity restored - retrySuccess = true; - } catch (CurlException e) { - // when was the exception generated - currentTime = Clock.currTime(); - // Increment retry attempts - retryAttempts++; - if (canFind(e.msg, "Couldn't connect to server on handle") || canFind(e.msg, "Couldn't resolve host name on handle") || canFind(errorMessage, "Timeout was reached on handle")) { - // no access to Internet - writeln(); - log.error("ERROR: There was a timeout in accessing the Microsoft OneDrive service - Internet connectivity issue?"); - // what is the error reason to assis the user as what to check - if (canFind(e.msg, "Couldn't connect to server on handle")) { - log.log(" - Check HTTPS access or Firewall Rules"); - timestampAlign = 9; - } - if (canFind(e.msg, "Couldn't resolve host name on handle")) { - log.log(" - Check DNS resolution or Firewall Rules"); - timestampAlign = 0; - } - - // increment backoff interval - backoffInterval++; - int thisBackOffInterval = retryAttempts*backoffInterval; - - // display retry information - currentTime.fracSecs = Duration.zero; - auto timeString = currentTime.toString(); - log.vlog(" Retry attempt: ", retryAttempts); - log.vlog(" This attempt timestamp: ", timeString); - if (thisBackOffInterval > maxBackoffInterval) { - thisBackOffInterval = maxBackoffInterval; + // Parse and display error message received from OneDrive + addLogEntry(callingFunction ~ "() - Generated a OneDrive CurlException", ["debug"]); + auto errorArray = splitLines(exception.msg); + string errorMessage = errorArray[0]; + + // Configure libcurl to perform a fresh connection + setFreshConnectOption(); + + // What is contained in the curl error message? + if (canFind(errorMessage, "Couldn't connect to server on handle") || canFind(errorMessage, "Couldn't resolve host name on handle") || canFind(errorMessage, "Timeout was reached on handle")) { + // Connectivity to Microsoft OneDrive was lost + addLogEntry("Internet connectivity to Microsoft OneDrive service has been interrupted .. re-trying in the background"); + + // What caused the initial curl exception? + if (canFind(errorMessage, "Couldn't resolve host name on handle")) addLogEntry("Unable to resolve server - DNS access blocked?", ["debug"]); + if (canFind(errorMessage, "Couldn't connect to server on handle")) addLogEntry("Unable to connect to server - HTTPS access blocked?", ["debug"]); + if (canFind(errorMessage, "Timeout was reached on handle")) { + // Common cause is libcurl trying IPv6 DNS resolution when there are only IPv4 DNS servers available + addLogEntry("A libcurl timeout has been triggered - data transfer too slow, no DNS resolution response, no server response", ["verbose"]); + // There are 3 common causes for this issue: + // 1. Usually poor DNS resolution where libcurl flip/flops to use IPv6 and is unable to resolve + // 2. A device between the user and Microsoft OneDrive is unable to correctly handle HTTP/2 communication + // 3. No Internet access from this system at this point in time + addLogEntry(" - IPv6 DNS resolution issues may be causing timeouts. Consider setting 'ip_protocol_version' to IPv4 to potentially avoid this", ["verbose"]); + addLogEntry(" - HTTP/2 compatibility issues might also be interfering with your system. Use 'force_http_11' to switch to HTTP/1.1 to potentially avoid this", ["verbose"]); + addLogEntry(" - If these options do not resolve this timeout issue, please use --debug-https to diagnose this issue further.", ["verbose"]); + } + } else { + // Some other 'libcurl' error was returned + if (canFind(errorMessage, "Problem with the SSL CA cert (path? access rights?) on handle")) { + // error setting certificate verify locations: + // CAfile: /etc/pki/tls/certs/ca-bundle.crt + // CApath: none + // + // Tell the Curl Engine to bypass SSL check - essentially SSL is passing back a bad value due to 'stdio' compile time option + // Further reading: + // https://github.com/curl/curl/issues/6090 + // https://github.com/openssl/openssl/issues/7536 + // https://stackoverflow.com/questions/45829588/brew-install-fails-curl77-error-setting-certificate-verify + // https://forum.dlang.org/post/vwvkbubufexgeuaxhqfl@forum.dlang.org + + addLogEntry("Problem with reading the local SSL CA cert via libcurl - please repair your system SSL CA Certificates"); + throw new OneDriveError("OneDrive operation encountered an issue with libcurl reading the local SSL CA Certificates"); + } else { + // Was this a curl initialization error? + if (canFind(errorMessage, "Failed initialization on handle")) { + // initialization error ... prevent a run-away process if we have zero disk space + ulong localActualFreeSpace = getAvailableDiskSpace("."); + if (localActualFreeSpace == 0) { + throw new OneDriveError("Zero disk space detected"); } - - // detail when the next attempt will be tried - // factor in the delay for curl to generate the exception - otherwise the next timestamp appears to be 'out' even though technically correct - auto nextRetry = currentTime + dur!"seconds"(thisBackOffInterval) + dur!"seconds"(timestampAlign); - log.vlog(" Next retry in approx: ", (thisBackOffInterval + timestampAlign), " seconds"); - log.vlog(" Next retry approx: ", nextRetry); - - // thread sleep - Thread.sleep(dur!"seconds"(thisBackOffInterval)); - } - if (retryAttempts == retryCount) { - // we have attempted to re-connect X number of times - // false set this to true to break out of while loop - retrySuccess = true; + } else { + // Unknown error + displayGeneralErrorMessage(exception, callingFunction, lineno); } } } - if (retryAttempts >= retryCount) { - log.error(" ERROR: Unable to reconnect to the Microsoft OneDrive service after ", retryCount, " attempts lasting over 1.2 years!"); - throw new OneDriveException(408, "Request Timeout - HTTP 408 or Internet down?"); - } - } else { - // Log that an error was returned - log.error("ERROR: OneDrive returned an error with the following message:"); - // Some other error was returned - log.error(" Error Message: ", errorMessage); - log.error(" Calling Function: ", getFunctionName!({})); + // A OneDrive API exception was thrown + } catch (OneDriveException exception) { + // https://developer.overdrive.com/docs/reference-guide + // https://learn.microsoft.com/en-us/onedrive/developer/rest-api/concepts/errors?view=odsp-graph-online + // https://learn.microsoft.com/en-us/graph/errors - // Was this a curl initialization error? - if (canFind(errorMessage, "Failed initialization on handle")) { - // initialization error ... prevent a run-away process if we have zero disk space - ulong localActualFreeSpace = to!ulong(getAvailableDiskSpace(".")); - if (localActualFreeSpace == 0) { - // force exit - shutdown(); - exit(-1); - } + /** + HTTP/1.1 Response handling + + Errors in the OneDrive API are returned using standard HTTP status codes, as well as a JSON error response object. The following HTTP status codes should be expected. + + Status code Status message Description + 100 Continue Continue + 200 OK Request was handled OK + 201 Created This means you've made a successful POST to checkout, lock in a format, or place a hold + 204 No Content This means you've made a successful DELETE to remove a hold or return a title + + 400 Bad Request Cannot process the request because it is malformed or incorrect. + 401 Unauthorized Required authentication information is either missing or not valid for the resource. + 403 Forbidden Access is denied to the requested resource. The user might not have enough permission. + 404 Not Found The requested resource doesn’t exist. + 405 Method Not Allowed The HTTP method in the request is not allowed on the resource. + 406 Not Acceptable This service doesn’t support the format requested in the Accept header. + 408 Request Time out CUSTOM ERROR - Not expected from OneDrive, but can be used to handle Internet connection failures the same (fallback and try again) + 409 Conflict The current state conflicts with what the request expects. For example, the specified parent folder might not exist. + 410 Gone The requested resource is no longer available at the server. + 411 Length Required A Content-Length header is required on the request. + 412 Precondition Failed A precondition provided in the request (such as an if-match header) does not match the resource's current state. + 413 Request Entity Too Large The request size exceeds the maximum limit. + 415 Unsupported Media Type The content type of the request is a format that is not supported by the service. + 416 Requested Range Not Satisfiable The specified byte range is invalid or unavailable. + 422 Unprocessable Entity Cannot process the request because it is semantically incorrect. + 423 Locked The file is currently checked out or locked for editing by another user + 429 Too Many Requests Client application has been throttled and should not attempt to repeat the request until an amount of time has elapsed. + + 500 Internal Server Error There was an internal server error while processing the request. + 501 Not Implemented The requested feature isn’t implemented. + 502 Bad Gateway The service was unreachable + 503 Service Unavailable The service is temporarily unavailable. You may repeat the request after a delay. There may be a Retry-After header. + 504 Gateway Timeout The server, which is acting as a gateway or proxy, did not receive a timely response from an upstream server it needed to access in order to complete the request + 506 Variant Also Negotiates CUSTOM ERROR - Received an unexpected response from Microsoft OneDrive + 507 Insufficient Storage The maximum storage quota has been reached. + 509 Bandwidth Limit Exceeded Your app has been throttled for exceeding the maximum bandwidth cap. Your app can retry the request again after more time has elapsed. + + HTTP/2 Response handling + + 0 OK + + **/ + + // Detail the OneDriveAPI exception, debug output only + addLogEntry("Handling a OneDrive API exception:", ["debug"]); + addLogEntry(to!string(response), ["debug"]); + + // Parse and display error message received from OneDrive + addLogEntry(callingFunction ~ "() - Generated a OneDriveException", ["debug"]); + + // Configure libcurl to perform a fresh connection on API retry + setFreshConnectOption(); + + // Perform action based on the HTTP Status Code + switch(exception.httpStatusCode) { + + // 0 - OK ... HTTP/2 version of 200 OK + case 0: + break; + // 100 - Continue + case 100: + break; + // 408 - Request Time Out + // 429 - Too Many Requests, backoff + case 408,429: + // If OneDrive sends a status code 429 then this function will be used to process the Retry-After response header which contains the value by which we need to wait + if (exception.httpStatusCode == 408) { + addLogEntry("Handling a Microsoft Graph API HTTP 408 Response Code (Request Time Out) - Internal Thread ID: " ~ to!string(curlEngine.internalThreadId)); + } else { + addLogEntry("Handling a Microsoft Graph API HTTP 429 Response Code (Too Many Requests) - Internal Thread ID: " ~ to!string(curlEngine.internalThreadId)); + } + // Read in the Retry-After HTTP header as set and delay as per this value before retrying the request + thisBackOffInterval = response.getRetryAfterValue(); + addLogEntry("Using Retry-After Value = " ~ to!string(thisBackOffInterval), ["debug"]); + transientError = true; + break; + // Transient errors + // 503 - Service Unavailable + // 504 - Gateway Timeout + case 503,504: + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to query the Microsoft Graph API Service - retrying applicable request in 30 seconds - Internal Thread ID: " ~ to!string(curlEngine.internalThreadId)); + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + // Transient error - try again in 30 seconds + thisBackOffInterval = 30; + transientError = true; + break; + // Default + default: + // This exception should be then passed back to the original calling function for handling a OneDriveException + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); } + + // A FileSystem exception was thrown + } catch (ErrnoException exception) { + // There was a file system error + // display the error message + displayFileSystemErrorMessage(exception.msg, callingFunction); + throw new OneDriveException(0, "There was a file system error during OneDrive request: " ~ exception.msg, response); } - // return an empty JSON for handling - return json; - } - - try { - json = content.parseJSON(); - } catch (JSONException e) { - // Log that a JSON Exception was caught, dont output the HTML response from OneDrive - log.vdebug("JSON Exception caught when performing HTTP operations - use --debug-https to diagnose further"); - } - return json; - } - - private void checkHTTPResponseHeaders() - { - // Get the HTTP Response headers - needed for correct 429 handling - auto responseHeaders = http.responseHeaders(); - if (.debugResponse){ - log.vdebug("http.perform() => HTTP Response Headers: ", responseHeaders); - } - - // is retry-after in the response headers - if ("retry-after" in http.responseHeaders) { - // Set the retry-after value - log.vdebug("http.perform() => Received a 'Retry-After' Header Response with the following value: ", http.responseHeaders["retry-after"]); - log.vdebug("http.perform() => Setting retryAfterValue to: ", http.responseHeaders["retry-after"]); - .retryAfterValue = to!ulong(http.responseHeaders["retry-after"]); - } - } - - private void checkHttpCode() - { - // https://dev.onedrive.com/misc/errors.htm - // https://developer.overdrive.com/docs/reference-guide - - /* - HTTP/1.1 Response handling - - Errors in the OneDrive API are returned using standard HTTP status codes, as well as a JSON error response object. The following HTTP status codes should be expected. - - Status code Status message Description - 100 Continue Continue - 200 OK Request was handled OK - 201 Created This means you've made a successful POST to checkout, lock in a format, or place a hold - 204 No Content This means you've made a successful DELETE to remove a hold or return a title - - 400 Bad Request Cannot process the request because it is malformed or incorrect. - 401 Unauthorized Required authentication information is either missing or not valid for the resource. - 403 Forbidden Access is denied to the requested resource. The user might not have enough permission. - 404 Not Found The requested resource doesn’t exist. - 405 Method Not Allowed The HTTP method in the request is not allowed on the resource. - 406 Not Acceptable This service doesn’t support the format requested in the Accept header. - 408 Request Time out Not expected from OneDrive, but can be used to handle Internet connection failures the same (fallback and try again) - 409 Conflict The current state conflicts with what the request expects. For example, the specified parent folder might not exist. - 410 Gone The requested resource is no longer available at the server. - 411 Length Required A Content-Length header is required on the request. - 412 Precondition Failed A precondition provided in the request (such as an if-match header) does not match the resource's current state. - 413 Request Entity Too Large The request size exceeds the maximum limit. - 415 Unsupported Media Type The content type of the request is a format that is not supported by the service. - 416 Requested Range Not Satisfiable The specified byte range is invalid or unavailable. - 422 Unprocessable Entity Cannot process the request because it is semantically incorrect. - 429 Too Many Requests Client application has been throttled and should not attempt to repeat the request until an amount of time has elapsed. - - 500 Internal Server Error There was an internal server error while processing the request. - 501 Not Implemented The requested feature isn’t implemented. - 502 Bad Gateway The service was unreachable - 503 Service Unavailable The service is temporarily unavailable. You may repeat the request after a delay. There may be a Retry-After header. - 507 Insufficient Storage The maximum storage quota has been reached. - 509 Bandwidth Limit Exceeded Your app has been throttled for exceeding the maximum bandwidth cap. Your app can retry the request again after more time has elapsed. - - HTTP/2 Response handling - - 0 OK - - */ - - switch(http.statusLine.code) - { - // 0 - OK ... HTTP2 version of 200 OK - case 0: - break; - // 100 - Continue - case 100: - break; - // 200 - OK - case 200: - // No Log .. - break; - // 201 - Created OK - // 202 - Accepted - // 204 - Deleted OK - case 201,202,204: - // No actions, but log if verbose logging - //log.vlog("OneDrive Response: '", http.statusLine.code, " - ", http.statusLine.reason, "'"); - break; - - // 302 - resource found and available at another location, redirect - case 302: - break; - - // 400 - Bad Request - case 400: - // Bad Request .. how should we act? - log.vlog("OneDrive returned a 'HTTP 400 - Bad Request' - gracefully handling error"); - break; - - // 403 - Forbidden - case 403: - // OneDrive responded that the user is forbidden - log.vlog("OneDrive returned a 'HTTP 403 - Forbidden' - gracefully handling error"); - break; - - // 404 - Item not found - case 404: - // Item was not found - do not throw an exception - log.vlog("OneDrive returned a 'HTTP 404 - Item not found' - gracefully handling error"); - break; - - // 408 - Request Timeout - case 408: - // Request to connect to OneDrive service timed out - log.vlog("Request Timeout - gracefully handling error"); - throw new OneDriveException(408, "Request Timeout - HTTP 408 or Internet down?"); - - // 409 - Conflict - case 409: - // Conflict handling .. how should we act? This only really gets triggered if we are using --local-first & we remove items.db as the DB thinks the file is not uploaded but it is - log.vlog("OneDrive returned a 'HTTP 409 - Conflict' - gracefully handling error"); - break; - - // 412 - Precondition Failed - case 412: - // A precondition provided in the request (such as an if-match header) does not match the resource's current state. - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error"); - break; - - // 415 - Unsupported Media Type - case 415: - // Unsupported Media Type ... sometimes triggered on image files, especially PNG - log.vlog("OneDrive returned a 'HTTP 415 - Unsupported Media Type' - gracefully handling error"); - break; - // 429 - Too Many Requests - case 429: - // Too many requests in a certain time window - // Check the HTTP Response headers - needed for correct 429 handling - checkHTTPResponseHeaders(); - // https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online - log.vlog("OneDrive returned a 'HTTP 429 - Too Many Requests' - gracefully handling error"); - throw new OneDriveException(http.statusLine.code, http.statusLine.reason); - - // Server side (OneDrive) Errors - // 500 - Internal Server Error - // 502 - Bad Gateway - // 503 - Service Unavailable - // 504 - Gateway Timeout (Issue #320) - case 500: - // No actions - log.vlog("OneDrive returned a 'HTTP 500 Internal Server Error' - gracefully handling error"); - break; - - case 502: - // No actions - log.vlog("OneDrive returned a 'HTTP 502 Bad Gateway Error' - gracefully handling error"); - break; - - case 503: - // No actions - log.vlog("OneDrive returned a 'HTTP 503 Service Unavailable Error' - gracefully handling error"); - break; - - case 504: - // No actions - log.vlog("OneDrive returned a 'HTTP 504 Gateway Timeout Error' - gracefully handling error"); - break; - - // "else" - default: - throw new OneDriveException(http.statusLine.code, http.statusLine.reason); - } - } - - private void checkHttpCode(ref const JSONValue response) - { - switch(http.statusLine.code) - { - // 0 - OK ... HTTP2 version of 200 OK - case 0: - break; - // 100 - Continue - case 100: - break; - // 200 - OK - case 200: - // No Log .. - break; - // 201 - Created OK - // 202 - Accepted - // 204 - Deleted OK - case 201,202,204: - // No actions, but log if verbose logging - //log.vlog("OneDrive Response: '", http.statusLine.code, " - ", http.statusLine.reason, "'"); - break; - - // 302 - resource found and available at another location, redirect - case 302: - break; - - // 400 - Bad Request - case 400: - // Bad Request .. how should we act? - // make sure this is thrown so that it is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - // 403 - Forbidden - case 403: - // OneDrive responded that the user is forbidden - log.vlog("OneDrive returned a 'HTTP 403 - Forbidden' - gracefully handling error"); - // Throw this as a specific exception so this is caught when performing 'siteQuery = onedrive.o365SiteSearch(nextLink);' call - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - // 412 - Precondition Failed - case 412: - // Throw this as a specific exception so this is caught when performing sync.uploadLastModifiedTime - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - // Server side (OneDrive) Errors - // 500 - Internal Server Error - // 502 - Bad Gateway - // 503 - Service Unavailable - // 504 - Gateway Timeout (Issue #320) - case 500: - // Throw this as a specific exception so this is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - case 502: - // Throw this as a specific exception so this is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - case 503: - // Throw this as a specific exception so this is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - case 504: - // Throw this as a specific exception so this is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - // Default - all other errors that are not a 2xx or a 302 - default: - if (http.statusLine.code / 100 != 2 && http.statusLine.code != 302) { - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); + // Increment re-try counter + retryAttempts++; + + // Has maxRetryCount been reached? + if (retryAttempts > maxRetryCount) { + addLogEntry("ERROR: Unable to reconnect to the Microsoft OneDrive service after " ~ to!string(retryAttempts) ~ " attempts lasting approximately 365 days"); + throw new OneDriveException(408, "Request Timeout - HTTP 408 or Internet down?", response); + } else { + // Was 'thisBackOffInterval' set by a 429 event ? + if (thisBackOffInterval == 0) { + // Calculate and apply exponential backoff upto a maximum of 120 seconds before the API call is re-tried + thisBackOffInterval = calculateBackoff(retryAttempts, baseBackoffInterval, maxBackoffInterval); + } + + // When are we re-trying the API call? + currentTime = Clock.currTime(); + currentTime.fracSecs = Duration.zero; + auto timeString = currentTime.toString(); + addLogEntry("Retry attempt: " ~ to!string(retryAttempts) ~ " - Internal Thread ID: " ~ to!string(curlEngine.internalThreadId), ["verbose"]); + addLogEntry(" This attempt timestamp: " ~ timeString, ["verbose"]); + // Detail when the next attempt will be tried + // Factor in the delay for curl to generate the exception - otherwise the next timestamp appears to be 'out' even though technically correct + auto nextRetry = currentTime + dur!"seconds"(thisBackOffInterval) + dur!"seconds"(timestampAlign); + addLogEntry(" Next retry in approx: " ~ to!string((thisBackOffInterval + timestampAlign)) ~ " seconds"); + addLogEntry(" Next retry approx: " ~ to!string(nextRetry), ["verbose"]); + + // Thread sleep + Thread.sleep(dur!"seconds"(thisBackOffInterval)); } } - } -} -unittest -{ - string configDirName = expandTilde("~/.config/onedrive"); - auto cfg = new config.Config(configDirName); - cfg.init(); - OneDriveApi onedrive = new OneDriveApi(cfg); - onedrive.init(); - std.file.write("/tmp/test", "test"); - - // simpleUpload - auto item = onedrive.simpleUpload("/tmp/test", "/test"); - try { - item = onedrive.simpleUpload("/tmp/test", "/test"); - } catch (OneDriveException e) { - assert(e.httpStatusCode == 409); + // Return the result + return result; } - try { - item = onedrive.simpleUpload("/tmp/test", "/test", "123"); - } catch (OneDriveException e) { - assert(e.httpStatusCode == 412); + + // Calculates the delay for exponential backoff + private int calculateBackoff(int retryAttempts, int baseInterval, int maxInterval) { + int backoffTime = min(pow(2, retryAttempts) * baseInterval, maxInterval); + return backoffTime; } - item = onedrive.simpleUpload("/tmp/test", "/test", item["eTag"].str); - - // deleteById - try { - onedrive.deleteById(item["id"].str, "123"); - } catch (OneDriveException e) { - assert(e.httpStatusCode == 412); + + // Configure libcurl to perform a fresh connection + private void setFreshConnectOption() { + addLogEntry("Configuring libcurl to use a fresh connection for re-try", ["debug"]); + curlEngine.http.handle.set(CurlOption.fresh_connect,1); } - onedrive.deleteById(item["id"].str, item["eTag"].str); - onedrive.http.shutdown(); -} +} \ No newline at end of file diff --git a/src/progress.d b/src/progress.d deleted file mode 100644 index 9277ae121..000000000 --- a/src/progress.d +++ /dev/null @@ -1,156 +0,0 @@ -module progress; - -import std.stdio; -import std.range; -import std.format; -import std.datetime; -import core.sys.posix.unistd; -import core.sys.posix.sys.ioctl; - -class Progress -{ - private: - - immutable static size_t default_width = 80; - size_t max_width = 40; - size_t width = default_width; - - ulong start_time; - string caption = "Progress"; - size_t iterations; - size_t counter; - - - size_t getTerminalWidth() { - size_t column = default_width; - version (CRuntime_Musl) { - } else version(Android) { - } else { - winsize ws; - if(ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws) != -1 && ws.ws_col > 0) { - column = ws.ws_col; - } - } - - return column; - } - - - void clear() { - write("\r"); - for(auto i = 0; i < width; i++) write(" "); - write("\r"); - } - - - int calc_eta() { - immutable auto ratio = cast(double)counter / iterations; - auto current_time = Clock.currTime.toUnixTime(); - auto duration = cast(int)(current_time - start_time); - int hours, minutes, seconds; - double elapsed = (current_time - start_time); - int eta_sec = cast(int)((elapsed / ratio) - elapsed); - - // Return an ETA or Duration? - if (eta_sec != 0){ - return eta_sec; - } else { - return duration; - } - } - - - string progressbarText(string header_text, string footer_text) { - immutable auto ratio = cast(double)counter / iterations; - string result = ""; - - double bar_length = width - header_text.length - footer_text.length; - if(bar_length > max_width && max_width > 0) { - bar_length = max_width; - } - size_t i = 0; - for(; i < ratio * bar_length; i++) result ~= "o"; - for(; i < bar_length; i++) result ~= " "; - - return header_text ~ result ~ footer_text; - } - - - void print() { - immutable auto ratio = cast(double)counter / iterations; - auto header = appender!string(); - auto footer = appender!string(); - - header.formattedWrite("%s %3d%% |", caption, cast(int)(ratio * 100)); - - if(counter <= 0 || ratio == 0.0) { - footer.formattedWrite("| ETA --:--:--:"); - } else { - int h, m, s; - dur!"seconds"(calc_eta()) - .split!("hours", "minutes", "seconds")(h, m, s); - if (counter != iterations){ - footer.formattedWrite("| ETA %02d:%02d:%02d ", h, m, s); - } else { - footer.formattedWrite("| DONE IN %02d:%02d:%02d ", h, m, s); - } - } - - write(progressbarText(header.data, footer.data)); - } - - - void update() { - width = getTerminalWidth(); - - clear(); - - print(); - stdout.flush(); - } - - - public: - - this(size_t iterations) { - if(iterations <= 0) iterations = 1; - - counter = -1; - this.iterations = iterations; - start_time = Clock.currTime.toUnixTime; - } - - @property { - string title() { return caption; } - string title(string text) { return caption = text; } - } - - @property { - size_t count() { return counter; } - size_t count(size_t val) { - if(val > iterations) val = iterations; - return counter = val; - } - } - - @property { - size_t maxWidth() { return max_width; } - size_t maxWidth(size_t w) { - return max_width = w; - } - } - - void reset() { - counter = -1; - start_time = Clock.currTime.toUnixTime; - } - - void next() { - counter++; - if(counter > iterations) counter = iterations; - - update(); - } - - -} diff --git a/src/qxor.d b/src/qxor.d index 63e8f0f5e..64de204f7 100644 --- a/src/qxor.d +++ b/src/qxor.d @@ -1,7 +1,11 @@ +// What is this module called? +module qxor; + +// What does this module require to function? import std.algorithm; import std.digest; -// implementation of the QuickXorHash algorithm in D +// Implementation of the QuickXorHash algorithm in D // https://github.com/OneDrive/onedrive-api-docs/blob/live/docs/code-snippets/quickxorhash.md struct QuickXor { @@ -71,18 +75,4 @@ struct QuickXor } return tmp; } -} - -unittest -{ - assert(isDigest!QuickXor); -} - -unittest -{ - QuickXor qxor; - qxor.put(cast(ubyte[]) "The quick brown fox jumps over the lazy dog"); - assert(qxor.finish().toHexString() == "6CC4A56F2B26C492FA4BBE57C1F31C4193A972BE"); -} - -alias QuickXorDigest = WrapperDigest!(QuickXor); +} \ No newline at end of file diff --git a/src/selective.d b/src/selective.d deleted file mode 100644 index 55be94eb7..000000000 --- a/src/selective.d +++ /dev/null @@ -1,422 +0,0 @@ -import std.algorithm; -import std.array; -import std.file; -import std.path; -import std.regex; -import std.stdio; -import std.string; -import util; -import log; - -final class SelectiveSync -{ - private string[] paths; - private string[] businessSharedFoldersList; - private Regex!char mask; - private Regex!char dirmask; - private bool skipDirStrictMatch = false; - private bool skipDotfiles = false; - - // load sync_list file - void load(string filepath) - { - if (exists(filepath)) { - // open file as read only - auto file = File(filepath, "r"); - auto range = file.byLine(); - foreach (line; range) { - // Skip comments in file - if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; - paths ~= buildNormalizedPath(line); - } - file.close(); - } - } - - // Configure skipDirStrictMatch if function is called - // By default, skipDirStrictMatch = false; - void setSkipDirStrictMatch() - { - skipDirStrictMatch = true; - } - - // load business_shared_folders file - void loadSharedFolders(string filepath) - { - if (exists(filepath)) { - // open file as read only - auto file = File(filepath, "r"); - auto range = file.byLine(); - foreach (line; range) { - // Skip comments in file - if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; - businessSharedFoldersList ~= buildNormalizedPath(line); - } - file.close(); - } - } - - void setFileMask(const(char)[] mask) - { - this.mask = wild2regex(mask); - } - - void setDirMask(const(char)[] dirmask) - { - this.dirmask = wild2regex(dirmask); - } - - // Configure skipDotfiles if function is called - // By default, skipDotfiles = false; - void setSkipDotfiles() - { - skipDotfiles = true; - } - - // return value of skipDotfiles - bool getSkipDotfiles() - { - return skipDotfiles; - } - - // config file skip_dir parameter - bool isDirNameExcluded(string name) - { - // Does the directory name match skip_dir config entry? - // Returns true if the name matches a skip_dir config entry - // Returns false if no match - log.vdebug("skip_dir evaluation for: ", name); - - // Try full path match first - if (!name.matchFirst(dirmask).empty) { - log.vdebug("'!name.matchFirst(dirmask).empty' returned true = matched"); - return true; - } else { - // Do we check the base name as well? - if (!skipDirStrictMatch) { - log.vdebug("No Strict Matching Enforced"); - - // Test the entire path working backwards from child - string path = buildNormalizedPath(name); - string checkPath; - auto paths = pathSplitter(path); - - foreach_reverse(directory; paths) { - if (directory != "/") { - // This will add a leading '/' but that needs to be stripped to check - checkPath = "/" ~ directory ~ checkPath; - if(!checkPath.strip('/').matchFirst(dirmask).empty) { - log.vdebug("'!checkPath.matchFirst(dirmask).empty' returned true = matched"); - return true; - } - } - } - } else { - log.vdebug("Strict Matching Enforced - No Match"); - } - } - // no match - return false; - } - - // config file skip_file parameter - bool isFileNameExcluded(string name) - { - // Does the file name match skip_file config entry? - // Returns true if the name matches a skip_file config entry - // Returns false if no match - log.vdebug("skip_file evaluation for: ", name); - - // Try full path match first - if (!name.matchFirst(mask).empty) { - return true; - } else { - // check just the file name - string filename = baseName(name); - if(!filename.matchFirst(mask).empty) { - return true; - } - } - // no match - return false; - } - - // Match against sync_list only - bool isPathExcludedViaSyncList(string path) - { - // Debug output that we are performing a 'sync_list' inclusion / exclusion test - return .isPathExcluded(path, paths); - } - - // Match against skip_dir, skip_file & sync_list entries - bool isPathExcludedMatchAll(string path) - { - return .isPathExcluded(path, paths) || .isPathMatched(path, mask) || .isPathMatched(path, dirmask); - } - - // is the path a dotfile? - bool isDotFile(string path) - { - // always allow the root - if (path == ".") return false; - - path = buildNormalizedPath(path); - auto paths = pathSplitter(path); - foreach(base; paths) { - if (startsWith(base, ".")){ - return true; - } - } - return false; - } - - // is business shared folder matched - bool isSharedFolderMatched(string name) - { - // if there are no shared folder always return false - if (businessSharedFoldersList.empty) return false; - - if (!name.matchFirst(businessSharedFoldersList).empty) { - return true; - } else { - // try a direct comparison just in case - foreach (userFolder; businessSharedFoldersList) { - if (userFolder == name) { - // direct match - log.vdebug("'matchFirst' failed to match, however direct comparison was matched: ", name); - return true; - } - } - return false; - } - } - - // is business shared folder included - bool isPathIncluded(string path, string[] allowedPaths) - { - // always allow the root - if (path == ".") return true; - // if there are no allowed paths always return true - if (allowedPaths.empty) return true; - - path = buildNormalizedPath(path); - foreach (allowed; allowedPaths) { - auto comm = commonPrefix(path, allowed); - if (comm.length == path.length) { - // the given path is contained in an allowed path - return true; - } - if (comm.length == allowed.length && path[comm.length] == '/') { - // the given path is a subitem of an allowed path - return true; - } - } - return false; - } -} - -// test if the given path is not included in the allowed paths -// if there are no allowed paths always return false -private bool isPathExcluded(string path, string[] allowedPaths) -{ - // function variables - bool exclude = false; - bool exludeDirectMatch = false; // will get updated to true, if there is a pattern match to sync_list entry - bool excludeMatched = false; // will get updated to true, if there is a pattern match to sync_list entry - bool finalResult = true; // will get updated to false, if pattern match to sync_list entry - int offset; - string wildcard = "*"; - - // always allow the root - if (path == ".") return false; - // if there are no allowed paths always return false - if (allowedPaths.empty) return false; - path = buildNormalizedPath(path); - log.vdebug("Evaluation against 'sync_list' for this path: ", path); - log.vdebug("[S]exclude = ", exclude); - log.vdebug("[S]exludeDirectMatch = ", exludeDirectMatch); - log.vdebug("[S]excludeMatched = ", excludeMatched); - - // unless path is an exact match, entire sync_list entries need to be processed to ensure - // negative matches are also correctly detected - foreach (allowedPath; allowedPaths) { - // is this an inclusion path or finer grained exclusion? - switch (allowedPath[0]) { - case '-': - // sync_list path starts with '-', this user wants to exclude this path - exclude = true; - // If the sync_list entry starts with '-/' offset needs to be 2, else 1 - if (startsWith(allowedPath, "-/")){ - // Offset needs to be 2 - offset = 2; - } else { - // Offset needs to be 1 - offset = 1; - } - break; - case '!': - // sync_list path starts with '!', this user wants to exclude this path - exclude = true; - // If the sync_list entry starts with '!/' offset needs to be 2, else 1 - if (startsWith(allowedPath, "!/")){ - // Offset needs to be 2 - offset = 2; - } else { - // Offset needs to be 1 - offset = 1; - } - break; - case '/': - // sync_list path starts with '/', this user wants to include this path - // but a '/' at the start causes matching issues, so use the offset for comparison - exclude = false; - offset = 1; - break; - - default: - // no negative pattern, default is to not exclude - exclude = false; - offset = 0; - } - - // What are we comparing against? - log.vdebug("Evaluation against 'sync_list' entry: ", allowedPath); - - // Generate the common prefix from the path vs the allowed path - auto comm = commonPrefix(path, allowedPath[offset..$]); - - // Is path is an exact match of the allowed path? - if (comm.length == path.length) { - // we have a potential exact match - // strip any potential '/*' from the allowed path, to avoid a potential lesser common match - string strippedAllowedPath = strip(allowedPath[offset..$], "/*"); - - if (path == strippedAllowedPath) { - // we have an exact path match - log.vdebug("exact path match"); - if (!exclude) { - log.vdebug("Evaluation against 'sync_list' result: direct match"); - finalResult = false; - // direct match, break and go sync - break; - } else { - log.vdebug("Evaluation against 'sync_list' result: direct match - path to be excluded"); - // do not set excludeMatched = true here, otherwise parental path also gets excluded - // flag exludeDirectMatch so that a 'wildcard match' will not override this exclude - exludeDirectMatch = true; - // final result - finalResult = true; - } - } else { - // no exact path match, but something common does match - log.vdebug("something 'common' matches the input path"); - auto splitAllowedPaths = pathSplitter(strippedAllowedPath); - string pathToEvaluate = ""; - foreach(base; splitAllowedPaths) { - pathToEvaluate ~= base; - if (path == pathToEvaluate) { - // The input path matches what we want to evaluate against as a direct match - if (!exclude) { - log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item"); - finalResult = false; - // direct match, break and go sync - break; - } else { - log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item but to be excluded"); - finalResult = true; - // do not set excludeMatched = true here, otherwise parental path also gets excluded - } - } - pathToEvaluate ~= dirSeparator; - } - } - } - - // Is path is a subitem/sub-folder of the allowed path? - if (comm.length == allowedPath[offset..$].length) { - // The given path is potentially a subitem of an allowed path - // We want to capture sub-folders / files of allowed paths here, but not explicitly match other items - // if there is no wildcard - auto subItemPathCheck = allowedPath[offset..$] ~ "/"; - if (canFind(path, subItemPathCheck)) { - // The 'path' includes the allowed path, and is 'most likely' a sub-path item - if (!exclude) { - log.vdebug("Evaluation against 'sync_list' result: parental path match"); - finalResult = false; - // parental path matches, break and go sync - break; - } else { - log.vdebug("Evaluation against 'sync_list' result: parental path match but must be excluded"); - finalResult = true; - excludeMatched = true; - } - } - } - - // Does the allowed path contain a wildcard? (*) - if (canFind(allowedPath[offset..$], wildcard)) { - // allowed path contains a wildcard - // manually replace '*' for '.*' to be compatible with regex - string regexCompatiblePath = replace(allowedPath[offset..$], "*", ".*"); - auto allowedMask = regex(regexCompatiblePath); - if (matchAll(path, allowedMask)) { - // regex wildcard evaluation matches - // if we have a prior pattern match for an exclude, excludeMatched = true - if (!exclude && !excludeMatched && !exludeDirectMatch) { - // nothing triggered an exclusion before evaluation against wildcard match attempt - log.vdebug("Evaluation against 'sync_list' result: wildcard pattern match"); - finalResult = false; - } else { - log.vdebug("Evaluation against 'sync_list' result: wildcard pattern matched but must be excluded"); - finalResult = true; - excludeMatched = true; - } - } - } - } - // Interim results - log.vdebug("[F]exclude = ", exclude); - log.vdebug("[F]exludeDirectMatch = ", exludeDirectMatch); - log.vdebug("[F]excludeMatched = ", excludeMatched); - - // If exclude or excludeMatched is true, then finalResult has to be true - if ((exclude) || (excludeMatched) || (exludeDirectMatch)) { - finalResult = true; - } - - // results - if (finalResult) { - log.vdebug("Evaluation against 'sync_list' final result: EXCLUDED"); - } else { - log.vdebug("Evaluation against 'sync_list' final result: included for sync"); - } - return finalResult; -} - -// test if the given path is matched by the regex expression. -// recursively test up the tree. -private bool isPathMatched(string path, Regex!char mask) { - path = buildNormalizedPath(path); - auto paths = pathSplitter(path); - - string prefix = ""; - foreach(base; paths) { - prefix ~= base; - if (!path.matchFirst(mask).empty) { - // the given path matches something which we should skip - return true; - } - prefix ~= dirSeparator; - } - return false; -} - -// unit tests -unittest -{ - assert(isPathExcluded("Documents2", ["Documents"])); - assert(!isPathExcluded("Documents", ["Documents"])); - assert(!isPathExcluded("Documents/a.txt", ["Documents"])); - assert(isPathExcluded("Hello/World", ["Hello/John"])); - assert(!isPathExcluded(".", ["Documents"])); -} diff --git a/src/sqlite.d b/src/sqlite.d index 5e1839ece..b78bb5fb0 100644 --- a/src/sqlite.d +++ b/src/sqlite.d @@ -1,100 +1,108 @@ +// What is this module called? module sqlite; + +// What does this module require to function? import std.stdio; import etc.c.sqlite3; import std.string: fromStringz, toStringz; import core.stdc.stdlib; import std.conv; -static import log; + +// What other modules that we have created do we need to import? +import log; +import util; extern (C) immutable(char)* sqlite3_errstr(int); // missing from the std library -static this() -{ +static this() { if (sqlite3_libversion_number() < 3006019) { - throw new SqliteException("sqlite 3.6.19 or newer is required"); + throw new SqliteException(-1,"sqlite 3.6.19 or newer is required"); } } -private string ifromStringz(const(char)* cstr) -{ +private string ifromStringz(const(char)* cstr) { return fromStringz(cstr).dup; } -class SqliteException: Exception -{ - @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable next = null) +class SqliteException: Exception { + int errorCode; // Add an errorCode member to store the SQLite error code + @safe pure nothrow this(int errorCode, string msg, string file = __FILE__, size_t line = __LINE__, Throwable next = null) { super(msg, file, line, next); + this.errorCode = errorCode; // Set the errorCode } - @safe pure nothrow this(string msg, Throwable next, string file = __FILE__, size_t line = __LINE__) + @safe pure nothrow this(int errorCode, string msg, Throwable next, string file = __FILE__, size_t line = __LINE__) { super(msg, file, line, next); + this.errorCode = errorCode; // Set the errorCode } } -struct Database -{ +struct Database { private sqlite3* pDb; - this(const(char)[] filename) - { + this(const(char)[] filename) { open(filename); } - ~this() - { + ~this() { close(); } - int db_checkpoint() - { + int db_checkpoint() { return sqlite3_wal_checkpoint(pDb, null); } - void dump_open_statements() - { - log.log("Dumpint open statements: \n"); + void dump_open_statements() { + addLogEntry("Dumping open statements:", ["debug"]); auto p = sqlite3_next_stmt(pDb, null); while (p != null) { - log.log (" - " ~ ifromStringz(sqlite3_sql(p)) ~ "\n"); + addLogEntry(" - " ~ to!string(ifromStringz(sqlite3_sql(p)))); p = sqlite3_next_stmt(pDb, p); } } - void open(const(char)[] filename) - { + void open(const(char)[] filename) { // https://www.sqlite.org/c3ref/open.html int rc = sqlite3_open(toStringz(filename), &pDb); if (rc == SQLITE_CANTOPEN) { // Database cannot be opened - log.error("\nThe database cannot be opened. Please check the permissions of ~/.config/onedrive/items.sqlite3\n"); + addLogEntry(); + addLogEntry("The database cannot be opened. Please check the permissions of " ~ to!string(filename)); + addLogEntry(); close(); - exit(-1); + // Must force exit here, allow logging to be done + forceExit(); } if (rc != SQLITE_OK) { - log.error("\nA database access error occurred: " ~ getErrorMessage() ~ "\n"); + addLogEntry(); + addLogEntry("A database access error occurred: " ~ getErrorMessage()); + addLogEntry(); close(); - exit(-1); + // Must force exit here, allow logging to be done + forceExit(); } sqlite3_extended_result_codes(pDb, 1); // always use extended result codes } - void exec(const(char)[] sql) - { + void exec(const(char)[] sql) { // https://www.sqlite.org/c3ref/exec.html int rc = sqlite3_exec(pDb, toStringz(sql), null, null, null); if (rc != SQLITE_OK) { - log.error("\nA database execution error occurred: "~ getErrorMessage() ~ "\n"); - log.error("Please retry your command with --resync to fix any local database corruption issues.\n"); + addLogEntry(); + addLogEntry("A database execution error occurred: "~ getErrorMessage()); + addLogEntry(); + addLogEntry("Please retry your command with --resync to fix any local database corruption issues."); + addLogEntry(); close(); - exit(-1); + // Must force exit here, allow logging to be done + forceExit(); } } - int getVersion() - { + int getVersion() { int userVersion; extern (C) int callback(void* user_version, int count, char** column_text, char** column_name) { import core.stdc.stdlib: atoi; @@ -103,73 +111,69 @@ struct Database } int rc = sqlite3_exec(pDb, "PRAGMA user_version", &callback, &userVersion, null); if (rc != SQLITE_OK) { - throw new SqliteException(ifromStringz(sqlite3_errmsg(pDb))); + throw new SqliteException(rc, ifromStringz(sqlite3_errmsg(pDb))); } return userVersion; } + + int getThreadsafeValue() { + // Get the threadsafe value + auto threadsafeValue = sqlite3_threadsafe(); + return threadsafeValue; + } - string getErrorMessage() - { + string getErrorMessage() { return ifromStringz(sqlite3_errmsg(pDb)); } - void setVersion(int userVersion) - { + void setVersion(int userVersion) { import std.conv: to; exec("PRAGMA user_version=" ~ to!string(userVersion)); } - Statement prepare(const(char)[] zSql) - { + Statement prepare(const(char)[] zSql) { Statement s; // https://www.sqlite.org/c3ref/prepare.html int rc = sqlite3_prepare_v2(pDb, zSql.ptr, cast(int) zSql.length, &s.pStmt, null); if (rc != SQLITE_OK) { - throw new SqliteException(ifromStringz(sqlite3_errmsg(pDb))); + throw new SqliteException(rc, ifromStringz(sqlite3_errmsg(pDb))); } return s; } - void close() - { + void close() { // https://www.sqlite.org/c3ref/close.html sqlite3_close_v2(pDb); pDb = null; } } -struct Statement -{ - struct Result - { +struct Statement { + struct Result { private sqlite3_stmt* pStmt; private const(char)[][] row; - private this(sqlite3_stmt* pStmt) - { + private this(sqlite3_stmt* pStmt) { this.pStmt = pStmt; step(); // initialize the range } - @property bool empty() - { + @property bool empty() { return row.length == 0; } - @property auto front() - { + @property auto front() { return row; } alias step popFront; - void step() - { + void step() { // https://www.sqlite.org/c3ref/step.html int rc = sqlite3_step(pStmt); if (rc == SQLITE_BUSY) { // Database is locked by another onedrive process - log.error("The database is currently locked by another process - cannot sync"); + addLogEntry("The database is currently locked by another process - cannot sync"); return; } if (rc == SQLITE_DONE) { @@ -185,72 +189,43 @@ struct Statement } } else { string errorMessage = ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt))); - log.error("\nA database statement execution error occurred: "~ errorMessage ~ "\n"); - log.error("Please retry your command with --resync to fix any local database corruption issues.\n"); - exit(-1); + addLogEntry(); + addLogEntry("A database statement execution error occurred: "~ errorMessage); + addLogEntry(); + addLogEntry("Please retry your command with --resync to fix any local database corruption issues."); + addLogEntry(); + // Must force exit here, allow logging to be done + forceExit(); } } } private sqlite3_stmt* pStmt; - ~this() - { + ~this() { // https://www.sqlite.org/c3ref/finalize.html sqlite3_finalize(pStmt); } - void bind(int index, const(char)[] value) - { + void bind(int index, const(char)[] value) { reset(); // https://www.sqlite.org/c3ref/bind_blob.html int rc = sqlite3_bind_text(pStmt, index, value.ptr, cast(int) value.length, SQLITE_STATIC); if (rc != SQLITE_OK) { - throw new SqliteException(ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt)))); + throw new SqliteException(rc, ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt)))); } } - Result exec() - { + Result exec() { reset(); return Result(pStmt); } - private void reset() - { + private void reset() { // https://www.sqlite.org/c3ref/reset.html int rc = sqlite3_reset(pStmt); if (rc != SQLITE_OK) { - throw new SqliteException(ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt)))); + throw new SqliteException(rc, ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt)))); } } -} - -unittest -{ - auto db = Database(":memory:"); - db.exec("CREATE TABLE test( - id TEXT PRIMARY KEY, - value TEXT - )"); - - assert(db.getVersion() == 0); - db.setVersion(1); - assert(db.getVersion() == 1); - - auto s = db.prepare("INSERT INTO test VALUES (?, ?)"); - s.bind(1, "key1"); - s.bind(2, "value"); - s.exec(); - s.bind(1, "key2"); - s.bind(2, null); - s.exec(); - - s = db.prepare("SELECT * FROM test ORDER BY id ASC"); - auto r = s.exec(); - assert(r.front[0] == "key1"); - r.popFront(); - assert(r.front[1] == null); - r.popFront(); - assert(r.empty); -} +} \ No newline at end of file diff --git a/src/sync.d b/src/sync.d index 346d8c00c..eaac8777c 100644 --- a/src/sync.d +++ b/src/sync.d @@ -1,3035 +1,2364 @@ +// What is this module called? +module syncEngine; + +// What does this module require to function? +import core.memory; +import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; +import core.thread; +import core.time; import std.algorithm; -import std.array: array; -import std.datetime; -import std.exception: enforce; -import std.file, std.json, std.path; -import std.regex; -import std.stdio, std.string, std.uni, std.uri; +import std.array; +import std.concurrency; +import std.container.rbtree; import std.conv; +import std.datetime; import std.encoding; -import core.time, core.thread; -import core.stdc.stdlib; -import config, itemdb, onedrive, selective, upload, util; -static import log; - -// threshold after which files will be uploaded using an upload session -private long thresholdFileSize = 4 * 2^^20; // 4 MiB - -// flag to set whether local files should be deleted from OneDrive -private bool noRemoteDelete = false; - -// flag to set whether the local file should be deleted once it is successfully uploaded to OneDrive -private bool localDeleteAfterUpload = false; - -// flag to set if we are running as uploadOnly -private bool uploadOnly = false; - -// Do we configure to disable the upload validation routine -private bool disableUploadValidation = false; - -// Do we configure to disable the download validation routine -private bool disableDownloadValidation = false; - -// Do we perform a local cleanup of files that are 'extra' on the local file system, when using --download-only -private bool cleanupLocalFiles = false; - -private bool isItemFolder(const ref JSONValue item) -{ - return ("folder" in item) != null; -} - -private bool isItemFile(const ref JSONValue item) -{ - return ("file" in item) != null; -} - -private bool isItemDeleted(const ref JSONValue item) -{ - return ("deleted" in item) != null; -} - -private bool isItemRoot(const ref JSONValue item) -{ - return ("root" in item) != null; -} - -private bool isItemRemote(const ref JSONValue item) -{ - return ("remoteItem" in item) != null; -} - -private bool hasParentReference(const ref JSONValue item) -{ - return ("parentReference" in item) != null; -} +import std.exception; +import std.file; +import std.json; +import std.parallelism; +import std.path; +import std.range; +import std.regex; +import std.stdio; +import std.string; +import std.uni; +import std.uri; +import std.utf; +import std.math; -private bool hasParentReferenceId(const ref JSONValue item) -{ - return ("id" in item["parentReference"]) != null; -} +// What other modules that we have created do we need to import? +import config; +import log; +import util; +import onedrive; +import itemdb; +import clientSideFiltering; -private bool hasParentReferencePath(const ref JSONValue item) -{ - return ("path" in item["parentReference"]) != null; +class JsonResponseException: Exception { + @safe pure this(string inputMessage) { + string msg = format(inputMessage); + super(msg); + } } -private bool isMalware(const ref JSONValue item) -{ - return ("malware" in item) != null; +class PosixException: Exception { + @safe pure this(string localTargetName, string remoteTargetName) { + string msg = format("POSIX 'case-insensitive match' between '%s' (local) and '%s' (online) which violates the Microsoft OneDrive API namespace convention", localTargetName, remoteTargetName); + super(msg); + } } -private bool hasFileSize(const ref JSONValue item) -{ - return ("size" in item) != null; +class AccountDetailsException: Exception { + @safe pure this() { + string msg = format("Unable to query OneDrive API to obtain required account details"); + super(msg); + } } -private bool hasId(const ref JSONValue item) -{ - return ("id" in item) != null; +class SyncException: Exception { + @nogc @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__) { + super(msg, file, line); + } } -private bool hasHashes(const ref JSONValue item) -{ - return ("hashes" in item["file"]) != null; +struct DriveDetailsCache { + // - driveId is the drive for the operations were items need to be stored + // - quotaRestricted details a bool value as to if that drive is restricting our ability to understand if there is space available. Some 'Business' and 'SharePoint' restrict, and most (if not all) shared folders it cant be determined if there is free space + // - quotaAvailable is a ulong value that stores the value of what the current free space is available online + string driveId; + bool quotaRestricted; + bool quotaAvailable; + ulong quotaRemaining; } -private bool hasQuickXorHash(const ref JSONValue item) -{ - return ("quickXorHash" in item["file"]["hashes"]) != null; +struct DeltaLinkDetails { + string driveId; + string itemId; + string latestDeltaLink; } -private bool hasSHA256Hash(const ref JSONValue item) -{ - return ("sha256Hash" in item["file"]["hashes"]) != null; +struct DatabaseItemsToDeleteOnline { + Item dbItem; + string localFilePath; } -private bool isDotFile(const(string) path) -{ - // always allow the root - if (path == ".") return false; - auto paths = pathSplitter(buildNormalizedPath(path)); - foreach(base; paths) { - if (startsWith(base, ".")){ - return true; +class SyncEngine { + // Class Variables + ApplicationConfig appConfig; + ItemDatabase itemDB; + ClientSideFiltering selectiveSync; + + // Array of directory databaseItem.id to skip while applying the changes. + // These are the 'parent path' id's that are being excluded, so if the parent id is in here, the child needs to be skipped as well + RedBlackTree!string skippedItems = redBlackTree!string(); + // Array of databaseItem.id to delete after the changes have been downloaded + string[2][] idsToDelete; + // Array of JSON items which are files or directories that are not 'root', skipped or to be deleted, that need to be processed + JSONValue[] jsonItemsToProcess; + // Array of JSON items which are files that are not 'root', skipped or to be deleted, that need to be downloaded + JSONValue[] fileJSONItemsToDownload; + // Array of paths that failed to download + string[] fileDownloadFailures; + // Associative array mapping of all OneDrive driveId's that have been seen, mapped with DriveDetailsCache data for reference + DriveDetailsCache[string] onlineDriveDetails; + // List of items we fake created when using --dry-run + string[2][] idsFaked; + // List of paths we fake deleted when using --dry-run + string[] pathFakeDeletedArray; + // Array of database Parent Item ID, Item ID & Local Path where the content has changed and needs to be uploaded + string[3][] databaseItemsWhereContentHasChanged; + // Array of local file paths that need to be uploaded as new itemts to OneDrive + string[] newLocalFilesToUploadToOneDrive; + // Array of local file paths that failed to be uploaded to OneDrive + string[] fileUploadFailures; + // List of path names changed online, but not changed locally when using --dry-run + string[] pathsRenamed; + // List of paths that were a POSIX case-insensitive match, thus could not be created online + string[] posixViolationPaths; + // List of local paths, that, when using the OneDrive Business Shared Folders feature, then disabling it, folder still exists locally and online + // This list of local paths need to be skipped + string[] businessSharedFoldersOnlineToSkip; + // List of interrupted uploads session files that need to be resumed + string[] interruptedUploadsSessionFiles; + // List of validated interrupted uploads session JSON items to resume + JSONValue[] jsonItemsToResumeUpload; + // This list of local paths that need to be created online + string[] pathsToCreateOnline; + // Array of items from the database that have been deleted locally, that needs to be deleted online + DatabaseItemsToDeleteOnline[] databaseItemsToDeleteOnline; + + // Flag that there were upload or download failures listed + bool syncFailures = false; + // Is sync_list configured + bool syncListConfigured = false; + // Was --dry-run used? + bool dryRun = false; + // Was --upload-only used? + bool uploadOnly = false; + // Was --remove-source-files used? + // Flag to set whether the local file should be deleted once it is successfully uploaded to OneDrive + bool localDeleteAfterUpload = false; + + // Do we configure to disable the download validation routine due to --disable-download-validation + // We will always validate our downloads + // However, when downloading files from SharePoint, the OneDrive API will not advise the correct file size + // which means that the application thinks the file download has failed as the size is different / hash is different + // See: https://github.com/abraunegg/onedrive/discussions/1667 + bool disableDownloadValidation = false; + + // Do we configure to disable the upload validation routine due to --disable-upload-validation + // We will always validate our uploads + // However, when uploading a file that can contain metadata SharePoint will associate some + // metadata from the library the file is uploaded to directly in the file which breaks this validation. + // See: https://github.com/abraunegg/onedrive/issues/205 + // See: https://github.com/OneDrive/onedrive-api-docs/issues/935 + bool disableUploadValidation = false; + + // Do we perform a local cleanup of files that are 'extra' on the local file system, when using --download-only + bool cleanupLocalFiles = false; + // Are we performing a --single-directory sync ? + bool singleDirectoryScope = false; + string singleDirectoryScopeDriveId; + string singleDirectoryScopeItemId; + // Is National Cloud Deployments configured ? + bool nationalCloudDeployment = false; + // Do we configure not to perform a remote file delete if --upload-only & --no-remote-delete configured + bool noRemoteDelete = false; + // Is bypass_data_preservation set via config file + // Local data loss MAY occur in this scenario + bool bypassDataPreservation = false; + // Maximum file size upload + // https://support.microsoft.com/en-us/office/invalid-file-names-and-file-types-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us + // July 2020, maximum file size for all accounts is 100GB + // January 2021, maximum file size for all accounts is 250GB + ulong maxUploadFileSize = 268435456000; // 250GB + // Threshold after which files will be uploaded using an upload session + ulong sessionThresholdFileSize = 4 * 2^^20; // 4 MiB + // File size limit for file operations that the user has configured + ulong fileSizeLimit; + // Total data to upload + ulong totalDataToUpload; + // How many items have been processed for the active operation + ulong processedCount; + // Are we creating a simulated /delta response? This is critically important in terms of how we 'update' the database + bool generateSimulatedDeltaResponse = false; + // Store the latest DeltaLink + string latestDeltaLink; + // Struct of containing the deltaLink details + DeltaLinkDetails deltaLinkCache; + // Array of driveId and deltaLink for use when performing the last examination of the most recent online data + alias DeltaLinkInfo = string[string]; + DeltaLinkInfo deltaLinkInfo; + + // Create the specific task pool to process items in parallel + TaskPool processPool; + + // Configure this class instance + this(ApplicationConfig appConfig, ItemDatabase itemDB, ClientSideFiltering selectiveSync) { + + // Create the specific task pool to process items in parallel + processPool = new TaskPool(to!int(appConfig.getValueLong("threads"))); + addLogEntry("Initialised TaskPool worker with threads: " ~ to!string(processPool.size), ["debug"]); + + // Configure the class variable to consume the application configuration + this.appConfig = appConfig; + // Configure the class variable to consume the database configuration + this.itemDB = itemDB; + // Configure the class variable to consume the selective sync (skip_dir, skip_file and sync_list) configuration + this.selectiveSync = selectiveSync; + + // Configure the dryRun flag to capture if --dry-run was used + // Application startup already flagged we are also in a --dry-run state, so no need to output anything else here + this.dryRun = appConfig.getValueBool("dry_run"); + + // Configure file size limit + if (appConfig.getValueLong("skip_size") != 0) { + fileSizeLimit = appConfig.getValueLong("skip_size") * 2^^20; + fileSizeLimit = (fileSizeLimit == 0) ? ulong.max : fileSizeLimit; } - } - return false; -} - -// construct an Item struct from a JSON driveItem -private Item makeDatabaseItem(const ref JSONValue driveItem) -{ - Item item = { - id: driveItem["id"].str, - name: "name" in driveItem ? driveItem["name"].str : null, // name may be missing for deleted files in OneDrive Biz - eTag: "eTag" in driveItem ? driveItem["eTag"].str : null, // eTag is not returned for the root in OneDrive Biz - cTag: "cTag" in driveItem ? driveItem["cTag"].str : null, // cTag is missing in old files (and all folders in OneDrive Biz) - }; - - // OneDrive API Change: https://github.com/OneDrive/onedrive-api-docs/issues/834 - // OneDrive no longer returns lastModifiedDateTime if the item is deleted by OneDrive - if(isItemDeleted(driveItem)){ - // Set mtime to SysTime(0) - item.mtime = SysTime(0); - } else { - // Item is not in a deleted state - // Resolve 'Key not found: fileSystemInfo' when then item is a remote item - // https://github.com/abraunegg/onedrive/issues/11 - if (isItemRemote(driveItem)) { - // remoteItem is a OneDrive object that exists on a 'different' OneDrive drive id, when compared to account default - // Normally, the 'remoteItem' field will contain 'fileSystemInfo' however, if the user uses the 'Add Shortcut ..' option in OneDrive WebUI - // to create a 'link', this object, whilst remote, does not have 'fileSystemInfo' in the expected place, thus leading to a application crash - // See: https://github.com/abraunegg/onedrive/issues/1533 - if ("fileSystemInfo" in driveItem["remoteItem"]) { - // 'fileSystemInfo' is in 'remoteItem' which will be the majority of cases - item.mtime = SysTime.fromISOExtString(driveItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str); - } else { - // is a remote item, but 'fileSystemInfo' is missing from 'remoteItem' - item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str); + + // Is there a sync_list file present? + if (exists(appConfig.syncListFilePath)) this.syncListConfigured = true; + + // Configure the uploadOnly flag to capture if --upload-only was used + if (appConfig.getValueBool("upload_only")) { + addLogEntry("Configuring uploadOnly flag to TRUE as --upload-only passed in or configured", ["debug"]); + this.uploadOnly = true; + } + + // Configure the localDeleteAfterUpload flag + if (appConfig.getValueBool("remove_source_files")) { + addLogEntry("Configuring localDeleteAfterUpload flag to TRUE as --remove-source-files passed in or configured", ["debug"]); + this.localDeleteAfterUpload = true; + } + + // Configure the disableDownloadValidation flag + if (appConfig.getValueBool("disable_download_validation")) { + addLogEntry("Configuring disableDownloadValidation flag to TRUE as --disable-download-validation passed in or configured", ["debug"]); + this.disableDownloadValidation = true; + } + + // Configure the disableUploadValidation flag + if (appConfig.getValueBool("disable_upload_validation")) { + addLogEntry("Configuring disableUploadValidation flag to TRUE as --disable-upload-validation passed in or configured", ["debug"]); + this.disableUploadValidation = true; + } + + // Do we configure to clean up local files if using --download-only ? + if ((appConfig.getValueBool("download_only")) && (appConfig.getValueBool("cleanup_local_files"))) { + // --download-only and --cleanup-local-files were passed in + addLogEntry(); + addLogEntry("WARNING: Application has been configured to cleanup local files that are not present online."); + addLogEntry("WARNING: Local data loss MAY occur in this scenario if you are expecting data to remain archived locally."); + addLogEntry(); + // Set the flag + this.cleanupLocalFiles = true; + } + + // Do we configure to NOT perform a remote delete if --upload-only & --no-remote-delete configured ? + if ((appConfig.getValueBool("upload_only")) && (appConfig.getValueBool("no_remote_delete"))) { + // --upload-only and --no-remote-delete were passed in + addLogEntry("WARNING: Application has been configured NOT to cleanup remote files that are deleted locally."); + // Set the flag + this.noRemoteDelete = true; + } + + // Are we configured to use a National Cloud Deployment? + if (appConfig.getValueString("azure_ad_endpoint") != "") { + // value is configured, is it a valid value? + if ((appConfig.getValueString("azure_ad_endpoint") == "USL4") || (appConfig.getValueString("azure_ad_endpoint") == "USL5") || (appConfig.getValueString("azure_ad_endpoint") == "DE") || (appConfig.getValueString("azure_ad_endpoint") == "CN")) { + // valid entries to flag we are using a National Cloud Deployment + // National Cloud Deployments do not support /delta as a query + // https://docs.microsoft.com/en-us/graph/deployments#supported-features + // Flag that we have a valid National Cloud Deployment that cannot use /delta queries + this.nationalCloudDeployment = true; + // Reverse set 'force_children_scan' for completeness + appConfig.setValueBool("force_children_scan", true); } - } else { - // item exists on account default drive id - item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str); } - } - if (isItemFile(driveItem)) { - item.type = ItemType.file; - } else if (isItemFolder(driveItem)) { - item.type = ItemType.dir; - } else if (isItemRemote(driveItem)) { - item.type = ItemType.remote; - } else { - // do not throw exception, item will be removed in applyDifferences() - } - - // root and remote items do not have parentReference - if (!isItemRoot(driveItem) && ("parentReference" in driveItem) != null) { - item.driveId = driveItem["parentReference"]["driveId"].str; - if (hasParentReferenceId(driveItem)) { - item.parentId = driveItem["parentReference"]["id"].str; + // Are we forcing to use /children scan instead of /delta to simulate National Cloud Deployment use of /children? + if (appConfig.getValueBool("force_children_scan")) { + addLogEntry("Forcing client to use /children API call rather than /delta API to retrieve objects from the OneDrive API"); + this.nationalCloudDeployment = true; } - } - - // extract the file hash - if (isItemFile(driveItem) && ("hashes" in driveItem["file"])) { - // Get quickXorHash - if ("quickXorHash" in driveItem["file"]["hashes"]) { - item.quickXorHash = driveItem["file"]["hashes"]["quickXorHash"].str; - } else { - log.vdebug("quickXorHash is missing from ", driveItem["id"].str); + + // Are we forcing the client to bypass any data preservation techniques to NOT rename any local files if there is a conflict? + // The enabling of this function could lead to data loss + if (appConfig.getValueBool("bypass_data_preservation")) { + addLogEntry("WARNING: Application has been configured to bypass local data preservation in the event of file conflict."); + addLogEntry("WARNING: Local data loss MAY occur in this scenario."); + this.bypassDataPreservation = true; + } + + // Did the user configure a specific rate limit for the application? + if (appConfig.getValueLong("rate_limit") > 0) { + // User configured rate limit + addLogEntry("User Configured Rate Limit: " ~ to!string(appConfig.getValueLong("rate_limit"))); + + // If user provided rate limit is < 131072, flag that this is too low, setting to the recommended minimum of 131072 + if (appConfig.getValueLong("rate_limit") < 131072) { + // user provided limit too low + addLogEntry("WARNING: User configured rate limit too low for normal application processing and preventing application timeouts. Overriding to recommended minimum of 131072 (128KB/s)"); + appConfig.setValueLong("rate_limit", 131072); + } } - // sha256Hash - if ("sha256Hash" in driveItem["file"]["hashes"]) { - item.sha256Hash = driveItem["file"]["hashes"]["sha256Hash"].str; + + // Did the user downgrade all HTTP operations to force HTTP 1.1 + if (appConfig.getValueBool("force_http_11")) { + // User is forcing downgrade to curl to use HTTP 1.1 for all operations + addLogEntry("Downgrading all HTTP operations to HTTP/1.1 due to user configuration", ["verbose"]); } else { - log.vdebug("sha256Hash is missing from ", driveItem["id"].str); + // Use curl defaults + addLogEntry("Using Curl defaults for HTTP operational protocol version (potentially HTTP/2)", ["debug"]); } - } - - if (isItemRemote(driveItem)) { - item.remoteDriveId = driveItem["remoteItem"]["parentReference"]["driveId"].str; - item.remoteId = driveItem["remoteItem"]["id"].str; - } - - // National Cloud Deployments do not support /delta as a query - // Thus we need to track in the database that this item is in sync - // As we are making an item, set the syncStatus to Y - // ONLY when using a National Cloud Deployment, all the existing DB entries will get set to N - // so when processing /children, it can be identified what the 'deleted' difference is - item.syncStatus = "Y"; - - return item; -} - -private bool testFileHash(const(string) path, const ref Item item) -{ - // Generate QuickXORHash first before others - if (item.quickXorHash) { - if (item.quickXorHash == computeQuickXorHash(path)) return true; - } else if (item.sha256Hash) { - if (item.sha256Hash == computeSHA256Hash(path)) return true; } - return false; -} - -class SyncException: Exception -{ - @nogc @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__) - { - super(msg, file, line); - } -} - -final class SyncEngine -{ - private Config cfg; - private OneDriveApi onedrive; - private ItemDatabase itemdb; - private UploadSession session; - private SelectiveSync selectiveSync; - // list of items to skip while applying the changes - private string[] skippedItems; - // list of items to delete after the changes has been downloaded - private string[2][] idsToDelete; - // list of items we fake created when using --dry-run - private string[2][] idsFaked; - // list of directory names changed online, but not changed locally when using --dry-run - private string[] pathsRenamed; - // default drive id - private string defaultDriveId; - // default root id - private string defaultRootId; - // type of OneDrive account - private string accountType; - // free space remaining at init() - private long remainingFreeSpace; - // file size limit for a new file - private long newSizeLimit; - // is file malware flag - private bool malwareDetected = false; - // download filesystem issue flag - private bool downloadFailed = false; - // upload failure - OneDrive or filesystem issue (reading data) - private bool uploadFailed = false; - // initialization has been done - private bool initDone = false; - // sync engine dryRun flag - private bool dryRun = false; - // quota details available - private bool quotaAvailable = true; - // quota details restricted - private bool quotaRestricted = false; - // sync business shared folders flag - private bool syncBusinessFolders = false; - // single directory scope flag - private bool singleDirectoryScope = false; - // is sync_list configured - private bool syncListConfigured = false; - // sync_list new folder added, trigger delta scan override - private bool oneDriveFullScanTrigger = false; - // is bypass_data_preservation set via config file - // Local data loss MAY occur in this scenario - private bool bypassDataPreservation = false; - // is National Cloud Deployments configured - private bool nationalCloudDeployment = false; - // has performance processing timings been requested - private bool displayProcessingTime = false; - // array of all OneDrive driveId's for use with OneDrive Business Folders - private string[] driveIDsArray; - this(Config cfg, OneDriveApi onedrive, ItemDatabase itemdb, SelectiveSync selectiveSync) - { - assert(onedrive && itemdb && selectiveSync); - this.cfg = cfg; - this.onedrive = onedrive; - this.itemdb = itemdb; - this.selectiveSync = selectiveSync; - // session = UploadSession(onedrive, cfg.uploadStateFilePath); - this.dryRun = cfg.getValueBool("dry_run"); - this.newSizeLimit = cfg.getValueLong("skip_size") * 2^^20; - this.newSizeLimit = (this.newSizeLimit == 0) ? long.max : this.newSizeLimit; - } - - void reset() - { - initDone=false; - } - - void init() - { - // Set accountType, defaultDriveId, defaultRootId & remainingFreeSpace once and reuse where possible - JSONValue oneDriveDetails; - JSONValue oneDriveRootDetails; - - if (initDone) { - return; + // Initialise the Sync Engine class + bool initialise() { + // Control whether the worker threads are daemon threads. A daemon thread is automatically terminated when all non-daemon threads have terminated. + if (__VERSION__ < 2098) { + // LDC version less than 1.28.0 is being used + // DMD version less than 2.098.0 is being used + processPool.isDaemon(false); // Not a daemon thread + } else { + // Normal TaskPool threading + processPool.isDaemon(true); // daemon thread } - - session = UploadSession(onedrive, cfg.uploadStateFilePath); - - // Need to catch 400 or 5xx server side errors at initialization - // Get Default Drive - try { - oneDriveDetails = onedrive.getDefaultDrive(); - } catch (OneDriveException e) { - log.vdebug("oneDriveDetails = onedrive.getDefaultDrive() generated a OneDriveException"); - if (e.httpStatusCode == 400) { - // OneDrive responded with 400 error: Bad Request - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - - // Check this - if (cfg.getValueString("drive_id").length) { - writeln(); - log.error("ERROR: Check your 'drive_id' entry in your configuration file as it may be incorrect"); - writeln(); - } - // Must exit here - onedrive.shutdown(); - exit(-1); + + // Create a new instance of the OneDrive API + OneDriveApi oneDriveApiInstance; + oneDriveApiInstance = new OneDriveApi(appConfig); + + // Can the API be initialised successfully? + if (oneDriveApiInstance.initialise()) { + // Get the relevant default drive details + try { + getDefaultDriveDetails(); + } catch (AccountDetailsException exception) { + // details could not be queried + addLogEntry(exception.msg); + // Shutdown this API instance, as we will create API instances as required, when required + oneDriveApiInstance.releaseCurlEngine(); + // Free object and memory + oneDriveApiInstance = null; + // Must force exit here, allow logging to be done + forceExit(); } - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling init();"); - init(); - // return back to original call - return; + + // Get the relevant default root details + try { + getDefaultRootDetails(); + } catch (AccountDetailsException exception) { + // details could not be queried + addLogEntry(exception.msg); + // Shutdown this API instance, as we will create API instances as required, when required + oneDriveApiInstance.releaseCurlEngine(); + // Free object and memory + oneDriveApiInstance = null; + // Must force exit here, allow logging to be done + forceExit(); } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + + // Display details + try { + displaySyncEngineDetails(); + } catch (AccountDetailsException exception) { + // Details could not be queried + addLogEntry(exception.msg); + // Shutdown this API instance, as we will create API instances as required, when required + oneDriveApiInstance.releaseCurlEngine(); + // Free object and memory + oneDriveApiInstance = null; + // Must force exit here, allow logging to be done + forceExit(); } + } else { + // API could not be initialised + addLogEntry("OneDrive API could not be initialised with previously used details"); + // Shutdown this API instance, as we will create API instances as required, when required + oneDriveApiInstance.releaseCurlEngine(); + // Free object and memory + oneDriveApiInstance = null; + // Must force exit here, allow logging to be done + forceExit(); } - // Get Default Root - try { - oneDriveRootDetails = onedrive.getDefaultRoot(); - } catch (OneDriveException e) { - log.vdebug("oneDriveRootDetails = onedrive.getDefaultRoot() generated a OneDriveException"); - if (e.httpStatusCode == 400) { - // OneDrive responded with 400 error: Bad Request - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Check this - if (cfg.getValueString("drive_id").length) { - writeln(); - log.error("ERROR: Check your 'drive_id' entry in your configuration file as it may be incorrect"); - writeln(); - } - // Must exit here - onedrive.shutdown(); - exit(-1); - } - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling init();"); - init(); - // return back to original call - return; - } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + // API was initialised + addLogEntry("Sync Engine Initialised with new Onedrive API instance", ["verbose"]); + // Shutdown this API instance, as we will create API instances as required, when required + oneDriveApiInstance.releaseCurlEngine(); + oneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + return true; + } + + // Shutdown the sync engine, wait for anything in processPool to complete + void shutdown() { + addLogEntry("SyncEngine: Waiting for all internal threads to complete", ["debug"]); + shutdownProcessPool(); + } + + void shutdownProcessPool() { + // TaskPool needs specific shutdown based on compiler version otherwise this causes a segfault + if (processPool.size > 0) { + // TaskPool is still configured for 'thread' size + addLogEntry("Application compiled with D compiler version: " ~ to!string(__VERSION__), ["debug"]); + // We must be using 2098 or greater to use thread blocking when shutting down + if (__VERSION__ < 2098) { + // Compromised TaskPool shutdown process + addLogEntry("Shutting down processPool in a legacy manner", ["debug"]); + // LDC version less than 1.28.0 is being used + // DMD version less than 2.098.0 is being used + // https://dlang.org/library/std/parallelism/task_pool.finish.html + // https://dlang.org/library/std/parallelism/task_pool.stop.html + processPool.finish(); // If we flag 'true' here, the application segfaults on exit when using DMD 2.098.0 or LDC 1.28.0 or earlier + processPool.stop(); // Signals to all worker threads to terminate as soon as they are finished with their current Task, or immediately if they are not executing a Task. + } else { + // Normal TaskPool shutdown process + addLogEntry("Shutting down processPool in a thread blocking manner", ["debug"]); + // All worker threads are daemon threads which are automatically terminated when all non-daemon threads have terminated. + processPool.finish(true); // If blocking argument is true, wait for all worker threads to terminate before returning. } } - - if ((oneDriveDetails.type() == JSONType.object) && (oneDriveRootDetails.type() == JSONType.object) && (hasId(oneDriveDetails)) && (hasId(oneDriveRootDetails))) { - // JSON elements are valid - // Debug OneDrive Account details response - log.vdebug("OneDrive Account Details: ", oneDriveDetails); - log.vdebug("OneDrive Account Root Details: ", oneDriveRootDetails); - - // Successfully got details from OneDrive without a server side error such as 'HTTP/1.1 500 Internal Server Error' or 'HTTP/1.1 504 Gateway Timeout' - accountType = oneDriveDetails["driveType"].str; - defaultDriveId = oneDriveDetails["id"].str; - defaultRootId = oneDriveRootDetails["id"].str; - - // get the remaining size from OneDrive API - if ("remaining" in oneDriveDetails["quota"]){ - // use the value provided - remainingFreeSpace = oneDriveDetails["quota"]["remaining"].integer; + } + + // Get Default Drive Details for this Account + void getDefaultDriveDetails() { + + // Function variables + JSONValue defaultOneDriveDriveDetails; + + // Create a new instance of the OneDrive API + OneDriveApi getDefaultDriveApiInstance; + getDefaultDriveApiInstance = new OneDriveApi(appConfig); + getDefaultDriveApiInstance.initialise(); + + // Get Default Drive Details for this Account + try { + addLogEntry("Getting Account Default Drive Details", ["debug"]); + defaultOneDriveDriveDetails = getDefaultDriveApiInstance.getDefaultDriveDetails(); + } catch (OneDriveException exception) { + addLogEntry("defaultOneDriveDriveDetails = getDefaultDriveApiInstance.getDefaultDriveDetails() generated a OneDriveException", ["debug"]); + string thisFunctionName = getFunctionName!({}); + + if ((exception.httpStatusCode == 400) || (exception.httpStatusCode == 401)) { + // Handle the 400 | 401 error + handleClientUnauthorised(exception.httpStatusCode, exception.msg); } else { - // set at zero - remainingFreeSpace = 0; + // Default operation if not 400,401 errors + // - 408,429,503,504 errors are handled as a retry within getDefaultDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); } + } + + // If the JSON response is a correct JSON object, and has an 'id' we can set these details + if ((defaultOneDriveDriveDetails.type() == JSONType.object) && (hasId(defaultOneDriveDriveDetails))) { + addLogEntry("OneDrive Account Default Drive Details: " ~ to!string(defaultOneDriveDriveDetails), ["debug"]); + appConfig.accountType = defaultOneDriveDriveDetails["driveType"].str; + appConfig.defaultDriveId = defaultOneDriveDriveDetails["id"].str; - // Make sure that defaultDriveId is in our driveIDs array to use when checking if item is in database - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, defaultDriveId)) { - // Add this drive id to the array to search with - driveIDsArray ~= defaultDriveId; - } + // Make sure that appConfig.defaultDriveId is in our driveIDs array to use when checking if item is in database + // Keep the DriveDetailsCache array with unique entries only + DriveDetailsCache cachedOnlineDriveData; + if (!canFindDriveId(appConfig.defaultDriveId, cachedOnlineDriveData)) { + // Add this driveId to the drive cache, which then also sets for the defaultDriveId: + // - quotaRestricted; + // - quotaAvailable; + // - quotaRemaining; + addOrUpdateOneDriveOnlineDetails(appConfig.defaultDriveId); + } + + // Fetch the details from cachedOnlineDriveData + cachedOnlineDriveData = getDriveDetails(appConfig.defaultDriveId); + // - cachedOnlineDriveData.quotaRestricted; + // - cachedOnlineDriveData.quotaAvailable; + // - cachedOnlineDriveData.quotaRemaining; // In some cases OneDrive Business configurations 'restrict' quota details thus is empty / blank / negative value / zero - if (remainingFreeSpace <= 0) { + if (cachedOnlineDriveData.quotaRemaining <= 0) { // free space is <= 0 .. why ? - if ("remaining" in oneDriveDetails["quota"]){ - // json response contained a 'remaining' value - if (accountType == "personal"){ + if ("remaining" in defaultOneDriveDriveDetails["quota"]) { + if (appConfig.accountType == "personal") { // zero space available - log.error("ERROR: OneDrive account currently has zero space available. Please free up some space online."); - quotaAvailable = false; + addLogEntry("ERROR: OneDrive account currently has zero space available. Please free up some space online."); } else { // zero space available is being reported, maybe being restricted? - log.error("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); - quotaRestricted = true; + addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); } } else { // json response was missing a 'remaining' value - if (accountType == "personal"){ - log.error("ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online."); - quotaAvailable = false; + if (appConfig.accountType == "personal") { + addLogEntry("ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online."); } else { // quota details not available - log.error("ERROR: OneDrive quota information is being restricted. Please fix by speaking to your OneDrive / Office 365 Administrator."); - quotaRestricted = true; - } + addLogEntry("ERROR: OneDrive quota information is being restricted. Please fix by speaking to your OneDrive / Office 365 Administrator."); + } } } - // Display accountType, defaultDriveId, defaultRootId & remainingFreeSpace for verbose logging purposes - log.vlog("Application version: ", strip(import("version"))); - log.vlog("Account Type: ", accountType); - log.vlog("Default Drive ID: ", defaultDriveId); - log.vlog("Default Root ID: ", defaultRootId); + // What did we set based on the data from the JSON + addLogEntry("appConfig.accountType = " ~ appConfig.accountType, ["debug"]); + addLogEntry("appConfig.defaultDriveId = " ~ appConfig.defaultDriveId, ["debug"]); + addLogEntry("cachedOnlineDriveData.quotaRemaining = " ~ to!string(cachedOnlineDriveData.quotaRemaining), ["debug"]); + addLogEntry("cachedOnlineDriveData.quotaAvailable = " ~ to!string(cachedOnlineDriveData.quotaAvailable), ["debug"]); + addLogEntry("cachedOnlineDriveData.quotaRestricted = " ~ to!string(cachedOnlineDriveData.quotaRestricted), ["debug"]); - // What do we display here - if (remainingFreeSpace > 0) { - // Display the actual value - log.vlog("Remaining Free Space: ", remainingFreeSpace); + } else { + // Handle the invalid JSON response + throw new AccountDetailsException(); + } + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + getDefaultDriveApiInstance.releaseCurlEngine(); + getDefaultDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + } + + // Get Default Root Details for this Account + void getDefaultRootDetails() { + + // Function variables + JSONValue defaultOneDriveRootDetails; + + // Create a new instance of the OneDrive API + OneDriveApi getDefaultRootApiInstance; + getDefaultRootApiInstance = new OneDriveApi(appConfig); + getDefaultRootApiInstance.initialise(); + + // Get Default Root Details for this Account + try { + addLogEntry("Getting Account Default Root Details", ["debug"]); + defaultOneDriveRootDetails = getDefaultRootApiInstance.getDefaultRootDetails(); + } catch (OneDriveException exception) { + addLogEntry("defaultOneDriveRootDetails = getDefaultRootApiInstance.getDefaultRootDetails() generated a OneDriveException", ["debug"]); + string thisFunctionName = getFunctionName!({}); + + if ((exception.httpStatusCode == 400) || (exception.httpStatusCode == 401)) { + // Handle the 400 | 401 error + handleClientUnauthorised(exception.httpStatusCode, exception.msg); } else { - // zero or non-zero value or restricted - if (!quotaRestricted){ - log.vlog("Remaining Free Space: 0"); - } else { - log.vlog("Remaining Free Space: Not Available"); - } + // Default operation if not 400,401 errors + // - 408,429,503,504 errors are handled as a retry within getDefaultRootApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); } + } + + // If the JSON response is a correct JSON object, and has an 'id' we can set these details + if ((defaultOneDriveRootDetails.type() == JSONType.object) && (hasId(defaultOneDriveRootDetails))) { + addLogEntry("OneDrive Account Default Root Details: " ~ to!string(defaultOneDriveRootDetails), ["debug"]); + appConfig.defaultRootId = defaultOneDriveRootDetails["id"].str; + addLogEntry("appConfig.defaultRootId = " ~ appConfig.defaultRootId, ["debug"]); + + // Save the item to the database, so the account root drive is is always going to be present in the DB + saveItem(defaultOneDriveRootDetails); + } else { + // Handle the invalid JSON response + throw new AccountDetailsException(); + } - // If account type is documentLibrary - then most likely this is a SharePoint repository - // and files 'may' be modified after upload. See: https://github.com/abraunegg/onedrive/issues/205 - if(accountType == "documentLibrary") { - // set this flag for SharePoint regardless of --disable-upload-validation being used - setDisableUploadValidation(); + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + getDefaultRootApiInstance.releaseCurlEngine(); + getDefaultRootApiInstance = null; + // Perform Garbage Collection + GC.collect(); + } + + // Reset syncFailures to false based on file activity + void resetSyncFailures() { + // Log initial status and any non-empty arrays + string logMessage = "Evaluating reset of syncFailures: "; + if (fileDownloadFailures.length > 0) { + logMessage ~= "fileDownloadFailures is not empty; "; + } + if (fileUploadFailures.length > 0) { + logMessage ~= "fileUploadFailures is not empty; "; + } + + // Check if both arrays are empty to reset syncFailures + if (fileDownloadFailures.length == 0 && fileUploadFailures.length == 0) { + if (syncFailures) { + syncFailures = false; + logMessage ~= "Resetting syncFailures to false."; + } else { + logMessage ~= "syncFailures already false."; } + } else { + // Indicate no reset of syncFailures due to non-empty conditions + logMessage ~= "Not resetting syncFailures due to non-empty arrays."; + } + + // Log the final decision and conditions + addLogEntry(logMessage, ["debug"]); + } + + // Perform a sync of the OneDrive Account + // - Query /delta + // - If singleDirectoryScope or nationalCloudDeployment is used we need to generate a /delta like response + // - Process changes (add, changes, moves, deletes) + // - Process any items to add (download data to local) + // - Detail any files that we failed to download + // - Process any deletes (remove local data) + void syncOneDriveAccountToLocalDisk() { + + // performFullScanTrueUp value + addLogEntry("Perform a Full Scan True-Up: " ~ to!string(appConfig.fullScanTrueUpRequired), ["debug"]); + + // Fetch the API response of /delta to track changes that were performed online + fetchOneDriveDeltaAPIResponse(); - // Check the local database to ensure the OneDrive Root details are in the database - checkDatabaseForOneDriveRoot(); + // Process any download activities or cleanup actions + processDownloadActivities(); - // Check if there is an interrupted upload session - if (session.restore()) { - log.log("Continuing the upload session ..."); - string uploadSessionLocalFilePath = session.getUploadSessionLocalFilePath(); - auto item = session.upload(); + // If singleDirectoryScope is false, we are not targeting a single directory + // but if true, the target 'could' be a shared folder - so dont try and scan it again + if (!singleDirectoryScope) { + // OneDrive Shared Folder Handling + if (appConfig.accountType == "personal") { + // Personal Account Type + // https://github.com/OneDrive/onedrive-api-docs/issues/764 - // is 'item' a valid JSON response and not null - if (item.type() == JSONType.object) { - // Upload did not fail, JSON response contains data - // Are we in an --upload-only & --remove-source-files scenario? - // Use actual config values as we are doing an upload session recovery - if ((cfg.getValueBool("upload_only")) && (cfg.getValueBool("remove_source_files"))) { - // Log that we are deleting a local item - log.log("Removing local file as --upload-only & --remove-source-files configured"); - // are we in a --dry-run scenario? - if (!dryRun) { - // No --dry-run ... process local file delete - if (!uploadSessionLocalFilePath.empty) { - // only perform the delete if we have a valid file path - if (exists(uploadSessionLocalFilePath)) { - // file exists - log.vdebug("Removing local file: ", uploadSessionLocalFilePath); - safeRemove(uploadSessionLocalFilePath); + // Get the Remote Items from the Database + Item[] remoteItems = itemDB.selectRemoteItems(); + foreach (remoteItem; remoteItems) { + // Check if this path is specifically excluded by 'skip_dir', but only if 'skip_dir' is not empty + if (appConfig.getValueString("skip_dir") != "") { + // The path that needs to be checked needs to include the '/' + // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched + if (selectiveSync.isDirNameExcluded(remoteItem.name)) { + // This directory name is excluded + addLogEntry("Skipping item - excluded by skip_dir config: " ~ remoteItem.name, ["verbose"]); + continue; + } + } + + // Directory name is not excluded or skip_dir is not populated + if (!appConfig.suppressLoggingOutput) { + addLogEntry("Syncing this OneDrive Personal Shared Folder: " ~ remoteItem.name); + } + // Check this OneDrive Personal Shared Folder for changes + fetchOneDriveDeltaAPIResponse(remoteItem.remoteDriveId, remoteItem.remoteId, remoteItem.name); + + // Process any download activities or cleanup actions for this OneDrive Personal Shared Folder + processDownloadActivities(); + } + // Clear the array + remoteItems = []; + } else { + // Is this a Business Account with Sync Business Shared Items enabled? + if ((appConfig.accountType == "business") && (appConfig.getValueBool("sync_business_shared_items"))) { + + // Business Account Shared Items Handling + // - OneDrive Business Shared Folder + // - OneDrive Business Shared Files + // - SharePoint Links + + // Get the Remote Items from the Database + Item[] remoteItems = itemDB.selectRemoteItems(); + + foreach (remoteItem; remoteItems) { + // As all remote items are returned, including files, we only want to process directories here + if (remoteItem.remoteType == ItemType.dir) { + // Check if this path is specifically excluded by 'skip_dir', but only if 'skip_dir' is not empty + if (appConfig.getValueString("skip_dir") != "") { + // The path that needs to be checked needs to include the '/' + // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched + if (selectiveSync.isDirNameExcluded(remoteItem.name)) { + // This directory name is excluded + addLogEntry("Skipping item - excluded by skip_dir config: " ~ remoteItem.name, ["verbose"]); + continue; } } + + // Directory name is not excluded or skip_dir is not populated + if (!appConfig.suppressLoggingOutput) { + addLogEntry("Syncing this OneDrive Business Shared Folder: " ~ remoteItem.name); + } + + // Debug log output + addLogEntry("Fetching /delta API response for:", ["debug"]); + addLogEntry(" remoteItem.remoteDriveId: " ~ remoteItem.remoteDriveId, ["debug"]); + addLogEntry(" remoteItem.remoteId: " ~ remoteItem.remoteId, ["debug"]); + + // Check this OneDrive Business Shared Folder for changes + fetchOneDriveDeltaAPIResponse(remoteItem.remoteDriveId, remoteItem.remoteId, remoteItem.name); + + // Process any download activities or cleanup actions for this OneDrive Business Shared Folder + processDownloadActivities(); } - // as file is removed, we have nothing to add to the local database - log.vdebug("Skipping adding to database as --upload-only & --remove-source-files configured"); - } else { - // save the item - saveItem(item); } - } else { - // JSON response was not valid, upload failed - log.error("ERROR: File failed to upload. Increase logging verbosity to determine why."); + // Clear the array + remoteItems = []; + + // OneDrive Business Shared File Handling - but only if this option is enabled + if (appConfig.getValueBool("sync_business_shared_files")) { + // We need to create a 'new' local folder in the 'sync_dir' where these shared files & associated folder structure will reside + // Whilst these files are synced locally, the entire folder structure will need to be excluded from syncing back to OneDrive + // But file changes , *if any* , will need to be synced back to the original shared file location + // . + // ├── Files Shared With Me -> Directory should not be created online | Not Synced + // │   └── Display Name (email address) (of Account who shared file) -> Directory should not be created online | Not Synced + // │   │ └── shared file.ext -> File synced with original shared file location on remote drive + // │   │ └── shared file.ext -> File synced with original shared file location on remote drive + // │   │ └── ...... -> File synced with original shared file location on remote drive + // │   └── Display Name (email address) ... + // │ └── shared file.ext .... -> File synced with original shared file location on remote drive + + // Does the Local Folder to store the OneDrive Business Shared Files exist? + if (!exists(appConfig.configuredBusinessSharedFilesDirectoryName)) { + // Folder does not exist locally and needs to be created + addLogEntry("Creating the OneDrive Business Shared Files Local Directory: " ~ appConfig.configuredBusinessSharedFilesDirectoryName); + + // Local folder does not exist, thus needs to be created + mkdirRecurse(appConfig.configuredBusinessSharedFilesDirectoryName); + // As this will not be created online, generate a response so it can be saved to the database + Item sharedFilesPath = makeItem(createFakeResponse(baseName(appConfig.configuredBusinessSharedFilesDirectoryName))); + + // Add DB record to the local database + addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]); + itemDB.upsert(sharedFilesPath); + } else { + // Folder exists locally, is the folder in the database? + // Query DB for this path + Item dbRecord; + if (!itemDB.selectByPath(baseName(appConfig.configuredBusinessSharedFilesDirectoryName), appConfig.defaultDriveId, dbRecord)) { + // As this will not be created online, generate a response so it can be saved to the database + Item sharedFilesPath = makeItem(createFakeResponse(baseName(appConfig.configuredBusinessSharedFilesDirectoryName))); + + // Add DB record to the local database + addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]); + itemDB.upsert(sharedFilesPath); + } + } + + // Query for OneDrive Business Shared Files + addLogEntry("Checking for any applicable OneDrive Business Shared Files which need to be synced locally", ["verbose"]); + queryBusinessSharedObjects(); + + // Download any OneDrive Business Shared Files + processDownloadActivities(); + } } } - initDone = true; - } else { - // init failure - initDone = false; - // log why - log.error("ERROR: Unable to query OneDrive to initialize application"); - // Debug OneDrive Account details response - log.vdebug("OneDrive Account Details: ", oneDriveDetails); - log.vdebug("OneDrive Account Root Details: ", oneDriveRootDetails); - // Must exit here - onedrive.shutdown(); - exit(-1); } } - - // Configure uploadOnly if function is called - // By default, uploadOnly = false; - void setUploadOnly() - { - uploadOnly = true; - } - - // Configure noRemoteDelete if function is called - // By default, noRemoteDelete = false; - // Meaning we will process local deletes to delete item on OneDrive - void setNoRemoteDelete() - { - noRemoteDelete = true; - } - - // Configure localDeleteAfterUpload if function is called - // By default, localDeleteAfterUpload = false; - // Meaning we will not delete any local file after upload is successful - void setLocalDeleteAfterUpload() - { - localDeleteAfterUpload = true; - } - // set the flag that we are going to sync business shared folders - void setSyncBusinessFolders() - { - syncBusinessFolders = true; + // Cleanup arrays when used in --monitor loops + void cleanupArrays() { + addLogEntry("Cleaning up all internal arrays used when processing data", ["debug"]); + + // Multi Dimensional Arrays + idsToDelete.length = 0; + idsFaked.length = 0; + databaseItemsWhereContentHasChanged.length = 0; + + // JSON Items Arrays + jsonItemsToProcess = []; + fileJSONItemsToDownload = []; + jsonItemsToResumeUpload = []; + + // String Arrays + fileDownloadFailures = []; + pathFakeDeletedArray = []; + pathsRenamed = []; + newLocalFilesToUploadToOneDrive = []; + fileUploadFailures = []; + posixViolationPaths = []; + businessSharedFoldersOnlineToSkip = []; + interruptedUploadsSessionFiles = []; + pathsToCreateOnline = []; + databaseItemsToDeleteOnline = []; + + // Perform Garbage Collection on this destroyed curl engine + GC.collect(); + addLogEntry("Cleaning of internal arrays complete", ["debug"]); } - // Configure singleDirectoryScope if function is called + // Configure singleDirectoryScope = true if this function is called // By default, singleDirectoryScope = false - void setSingleDirectoryScope() - { + void setSingleDirectoryScope(string normalisedSingleDirectoryPath) { + + // Function variables + Item searchItem; + JSONValue onlinePathData; + + // Set the main flag singleDirectoryScope = true; + + // What are we doing? + addLogEntry("The OneDrive Client was asked to search for this directory online and create it if it's not located: " ~ normalisedSingleDirectoryPath); + + // Query the OneDrive API for the specified path online + // In a --single-directory scenario, we need to travervse the entire path that we are wanting to sync + // and then check the path element does it exist online, if it does, is it a POSIX match, or if it does not, create the path + // Once we have searched online, we have the right drive id and item id so that we can downgrade the sync status, then build up + // any object items from that location + // This is because, in a --single-directory scenario, any folder in the entire path tree could be a 'case-insensitive match' + + try { + onlinePathData = queryOneDriveForSpecificPathAndCreateIfMissing(normalisedSingleDirectoryPath, true); + } catch (PosixException e) { + displayPosixErrorMessage(e.msg); + addLogEntry("ERROR: Requested directory to search for and potentially create has a 'case-insensitive match' to an existing directory on OneDrive online."); + } + + // Was a valid JSON response provided? + if (onlinePathData.type() == JSONType.object) { + // Valid JSON item was returned + searchItem = makeItem(onlinePathData); + addLogEntry("searchItem: " ~ to!string(searchItem), ["debug"]); + + // Is this item a potential Shared Folder? + // Is this JSON a remote object + if (isItemRemote(onlinePathData)) { + // The path we are seeking is remote to our account drive id + searchItem.driveId = onlinePathData["remoteItem"]["parentReference"]["driveId"].str; + searchItem.id = onlinePathData["remoteItem"]["id"].str; + } + + // Set these items so that these can be used as required + singleDirectoryScopeDriveId = searchItem.driveId; + singleDirectoryScopeItemId = searchItem.id; + } else { + addLogEntry(); + addLogEntry("The requested --single-directory path to sync has generated an error. Please correct this error and try again."); + addLogEntry(); + forceExit(); + } } - // Configure disableUploadValidation if function is called - // By default, disableUploadValidation = false; - // Meaning we will always validate our uploads - // However, when uploading a file that can contain metadata SharePoint will associate some - // metadata from the library the file is uploaded to directly in the file - // which breaks this validation. See https://github.com/abraunegg/onedrive/issues/205 - void setDisableUploadValidation() - { - disableUploadValidation = true; - log.vdebug("documentLibrary account type - flagging to disable upload validation checks due to Microsoft SharePoint file modification enrichments"); - } - - // Configure disableDownloadValidation if function is called - // By default, disableDownloadValidation = false; - // Meaning we will always validate our downloads - // However, when downloading files from SharePoint, the OneDrive API will not advise the correct file size - // which means that the application thinks the file download has failed as the size is different / hash is different - // See: https://github.com/abraunegg/onedrive/discussions/1667 - void setDisableDownloadValidation() - { - disableDownloadValidation = true; - log.vdebug("Flagging to disable download validation checks due to user request"); - } - - // Issue #658 Handling - // If an existing folder is moved into a sync_list valid path (where it previously was out of scope due to sync_list), - // then set this flag to true, so that on the second 'true-up' sync, we force a rescan of the OneDrive path to capture any 'files' - void setOneDriveFullScanTrigger() - { - oneDriveFullScanTrigger = true; - log.vdebug("Setting oneDriveFullScanTrigger = true due to new folder creation request in a location that is now in-scope which may have previously out of scope"); - } - - // unset method - void unsetOneDriveFullScanTrigger() - { - oneDriveFullScanTrigger = false; - log.vdebug("Setting oneDriveFullScanTrigger = false"); - } - - // set syncListConfigured to true - void setSyncListConfigured() - { - syncListConfigured = true; - log.vdebug("Setting syncListConfigured = true"); - } - - // set bypassDataPreservation to true - void setBypassDataPreservation() - { - bypassDataPreservation = true; - log.vdebug("Setting bypassDataPreservation = true"); - } - - // set nationalCloudDeployment to true - void setNationalCloudDeployment() - { - nationalCloudDeployment = true; - log.vdebug("Setting nationalCloudDeployment = true"); - } - - // set performance timing flag - void setPerformanceProcessingOutput() - { - displayProcessingTime = true; - log.vdebug("Setting displayProcessingTime = true"); - } - - // get performance timing flag - bool getPerformanceProcessingOutput() - { - return displayProcessingTime; - } + // Query OneDrive API for /delta changes and iterate through items online + void fetchOneDriveDeltaAPIResponse(string driveIdToQuery = null, string itemIdToQuery = null, string sharedFolderName = null) { + + string deltaLink = null; + string currentDeltaLink = null; + string databaseDeltaLink; + JSONValue deltaChanges; + ulong responseBundleCount; + ulong jsonItemsReceived = 0; - // set cleanupLocalFiles to true - void setCleanupLocalFiles() - { - cleanupLocalFiles = true; - log.vdebug("Setting cleanupLocalFiles = true"); - } - - // return the OneDrive Account Type - auto getAccountType() - { - // return account type in use - return accountType; - } - - // download all new changes from OneDrive - void applyDifferences(bool performFullItemScan) - { - // Set defaults for the root folder - // Use the global's as initialised via init() rather than performing unnecessary additional HTTPS calls - string driveId = defaultDriveId; - string rootId = defaultRootId; - applyDifferences(driveId, rootId, performFullItemScan); - - // Check OneDrive Personal Shared Folders - if (accountType == "personal"){ - // https://github.com/OneDrive/onedrive-api-docs/issues/764 - Item[] items = itemdb.selectRemoteItems(); - foreach (item; items) { - // Only check path if config is != "" - if (cfg.getValueString("skip_dir") != "") { - // The path that needs to be checked needs to include the '/' - // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched - if (selectiveSync.isDirNameExcluded(item.name)) { - // This directory name is excluded - log.vlog("Skipping item - excluded by skip_dir config: ", item.name); - continue; + // Reset jsonItemsToProcess & processedCount + jsonItemsToProcess = []; + processedCount = 0; + + // Was a driveId provided as an input + if (strip(driveIdToQuery).empty) { + // No provided driveId to query, use the account default + addLogEntry("driveIdToQuery was empty, setting to appConfig.defaultDriveId", ["debug"]); + driveIdToQuery = appConfig.defaultDriveId; + addLogEntry("driveIdToQuery: " ~ driveIdToQuery, ["debug"]); + } + + // Was an itemId provided as an input + if (strip(itemIdToQuery).empty) { + // No provided itemId to query, use the account default + addLogEntry("itemIdToQuery was empty, setting to appConfig.defaultRootId", ["debug"]); + itemIdToQuery = appConfig.defaultRootId; + addLogEntry("itemIdToQuery: " ~ itemIdToQuery, ["debug"]); + } + + // What OneDrive API query do we use? + // - Are we running against a National Cloud Deployments that does not support /delta ? + // National Cloud Deployments do not support /delta as a query + // https://docs.microsoft.com/en-us/graph/deployments#supported-features + // + // - Are we performing a --single-directory sync, which will exclude many items online, focusing in on a specific online directory + // + // - Are we performing a --download-only --cleanup-local-files action? + // - If we are, and we use a normal /delta query, we get all the local 'deleted' objects as well. + // - If the user deletes a folder online, then replaces it online, we download the deletion events and process the new 'upload' via the web interface .. + // the net effect of this, is that the valid local files we want to keep, are actually deleted ...... not desirable + if ((singleDirectoryScope) || (nationalCloudDeployment) || (cleanupLocalFiles)) { + // Generate a simulated /delta response so that we correctly capture the current online state, less any 'online' delete and replace activity + generateSimulatedDeltaResponse = true; + } + + // Reset latestDeltaLink & deltaLinkCache + latestDeltaLink = null; + deltaLinkCache.driveId = null; + deltaLinkCache.itemId = null; + deltaLinkCache.latestDeltaLink = null; + // Perform Garbage Collection + GC.collect(); + + // What /delta query do we use? + if (!generateSimulatedDeltaResponse) { + // This should be the majority default pathway application use + + // Do we need to perform a Full Scan True Up? Is 'appConfig.fullScanTrueUpRequired' set to 'true'? + if (appConfig.fullScanTrueUpRequired) { + addLogEntry("Performing a full scan of online data to ensure consistent local state"); + addLogEntry("Setting currentDeltaLink = null", ["debug"]); + currentDeltaLink = null; + } else { + // Try and get the current Delta Link from the internal cache, this saves a DB I/O call + currentDeltaLink = getDeltaLinkFromCache(deltaLinkInfo, driveIdToQuery); + + // Is currentDeltaLink empty (no cached entry found) ? + if (currentDeltaLink.empty) { + // Try and get the current delta link from the database for this DriveID and RootID + databaseDeltaLink = itemDB.getDeltaLink(driveIdToQuery, itemIdToQuery); + if (!databaseDeltaLink.empty) { + addLogEntry("Using database stored deltaLink", ["debug"]); + currentDeltaLink = databaseDeltaLink; + } else { + addLogEntry("Zero deltaLink available for use, we will be performing a full online scan", ["debug"]); + currentDeltaLink = null; } - } - // Directory name is not excluded or skip_dir is not populated - log.vdebug("------------------------------------------------------------------"); - if (!cfg.getValueBool("monitor")) { - log.log("Syncing this OneDrive Personal Shared Folder: ", item.name); } else { - log.vlog("Syncing this OneDrive Personal Shared Folder: ", item.name); - } - // Check this OneDrive Personal Shared Folders - applyDifferences(item.remoteDriveId, item.remoteId, performFullItemScan); - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, item.remoteDriveId)) { - // Add this OneDrive Personal Shared Folder driveId array - driveIDsArray ~= item.remoteDriveId; + // Log that we are using the deltaLink for cache + addLogEntry("Using cached deltaLink", ["debug"]); } } - } - - // Check OneDrive Business Shared Folders, if configured to do so - if (syncBusinessFolders){ - // query OneDrive Business Shared Folders shared with me - log.vlog("Attempting to sync OneDrive Business Shared Folders"); - JSONValue graphQuery; - try { - graphQuery = onedrive.getSharedWithMe(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - graphQuery = onedrive.getSharedWithMe();"); - graphQuery = onedrive.getSharedWithMe(); - } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + + // Dynamic output for non-verbose and verbose run so that the user knows something is being retrieved from the OneDrive API + if (appConfig.verbosityCount == 0) { + if (!appConfig.suppressLoggingOutput) { + addProcessingLogHeaderEntry("Fetching items from the OneDrive API for Drive ID: " ~ driveIdToQuery, appConfig.verbosityCount); } + } else { + addLogEntry("Fetching /delta response from the OneDrive API for Drive ID: " ~ driveIdToQuery, ["verbose"]); } - - if (graphQuery.type() == JSONType.object) { - string sharedFolderName; - foreach (searchResult; graphQuery["value"].array) { - // Configure additional logging items for this array element - string sharedByName; - string sharedByEmail; - // Extra details for verbose logging - if ("sharedBy" in searchResult["remoteItem"]["shared"]) { - if ("displayName" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByName = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["displayName"].str; - } - if ("email" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByEmail = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["email"].str; - } + + for (;;) { + responseBundleCount++; + // Get the /delta changes via the OneDrive API + + // Ensure deltaChanges is empty before we query /delta + deltaChanges = null; + // Perform Garbage Collection + GC.collect(); + + // getDeltaChangesByItemId has the re-try logic for transient errors + deltaChanges = getDeltaChangesByItemId(driveIdToQuery, itemIdToQuery, currentDeltaLink); + + // If the initial deltaChanges response is an invalid JSON object, keep trying until we get a valid response .. + if (deltaChanges.type() != JSONType.object) { + while (deltaChanges.type() != JSONType.object) { + // Handle the invalid JSON response and retry + addLogEntry("ERROR: Query of the OneDrive API via deltaChanges = getDeltaChangesByItemId() returned an invalid JSON response", ["debug"]); + deltaChanges = getDeltaChangesByItemId(driveIdToQuery, itemIdToQuery, currentDeltaLink); } + } - // is the shared item with us a 'folder' ? - if (isItemFolder(searchResult)) { - // item returned is a shared folder, not a shared file - sharedFolderName = searchResult["name"].str; - // Output Shared Folder Name early - log.vdebug("Shared Folder Name: ", sharedFolderName); - // Compare this to values in business_shared_folders - if(selectiveSync.isSharedFolderMatched(sharedFolderName)){ - // Folder name matches what we are looking for - // Flags for matching - bool itemInDatabase = false; - bool itemLocalDirExists = false; - bool itemPathIsLocal = false; - - // "what if" there are 2 or more folders shared with me have the "same" name? - // The folder name will be the same, but driveId will be different - // This will then cause these 'shared folders' to cross populate data, which may not be desirable - log.vdebug("Shared Folder Name: MATCHED to any entry in 'business_shared_folders'"); - log.vdebug("Parent Drive Id: ", searchResult["remoteItem"]["parentReference"]["driveId"].str); - log.vdebug("Shared Item Id: ", searchResult["remoteItem"]["id"].str); - Item databaseItem; - - // for each driveid in the existing driveIDsArray - foreach (searchDriveId; driveIDsArray) { - log.vdebug("searching database for: ", searchDriveId, " ", sharedFolderName); - if (itemdb.idInLocalDatabase(searchDriveId, searchResult["remoteItem"]["id"].str)){ - // Shared folder is present - log.vdebug("Found shared folder name in database"); - itemInDatabase = true; - // Query the DB for the details of this item - itemdb.selectByPath(sharedFolderName, searchDriveId, databaseItem); - log.vdebug("databaseItem: ", databaseItem); - // Does the databaseItem.driveId == defaultDriveId? - if (databaseItem.driveId == defaultDriveId) { - itemPathIsLocal = true; - } - } else { - log.vdebug("Shared folder name not found in database"); - // "what if" there is 'already' a local folder with this name - // Check if in the database - // If NOT in the database, but resides on disk, this could be a new local folder created after last sync but before this one - // However we sync 'shared folders' before checking for local changes - string localpath = expandTilde(cfg.getValueString("sync_dir")) ~ "/" ~ sharedFolderName; - if (exists(localpath)) { - // local path exists - log.vdebug("Found shared folder name in local OneDrive sync_dir"); - itemLocalDirExists = true; - } - } - } - - // Shared Folder Evaluation Debugging - log.vdebug("item in database: ", itemInDatabase); - log.vdebug("path exists on disk: ", itemLocalDirExists); - log.vdebug("database drive id matches defaultDriveId: ", itemPathIsLocal); - log.vdebug("database data matches search data: ", ((databaseItem.driveId == searchResult["remoteItem"]["parentReference"]["driveId"].str) && (databaseItem.id == searchResult["remoteItem"]["id"].str))); - - if ( ((!itemInDatabase) || (!itemLocalDirExists)) || (((databaseItem.driveId == searchResult["remoteItem"]["parentReference"]["driveId"].str) && (databaseItem.id == searchResult["remoteItem"]["id"].str)) && (!itemPathIsLocal)) ) { - // This shared folder does not exist in the database - if (!cfg.getValueBool("monitor")) { - log.log("Syncing this OneDrive Business Shared Folder: ", sharedFolderName); - } else { - log.vlog("Syncing this OneDrive Business Shared Folder: ", sharedFolderName); - } - Item businessSharedFolder = makeItem(searchResult); - - // Log who shared this to assist with sync data correlation - if ((sharedByName != "") && (sharedByEmail != "")) { - log.vlog("OneDrive Business Shared Folder - Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.vlog("OneDrive Business Shared Folder - Shared By: ", sharedByName); - } - } - - // Do the actual sync - applyDifferences(businessSharedFolder.remoteDriveId, businessSharedFolder.remoteId, performFullItemScan); - // add this parent drive id to the array to search for, ready for next use - string newDriveID = searchResult["remoteItem"]["parentReference"]["driveId"].str; - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, newDriveID)) { - // Add this drive id to the array to search with - driveIDsArray ~= newDriveID; - } - } else { - // Shared Folder Name Conflict ... - log.log("WARNING: Skipping shared folder due to existing name conflict: ", sharedFolderName); - log.log("WARNING: Skipping changes of Path ID: ", searchResult["remoteItem"]["id"].str); - log.log("WARNING: To sync this shared folder, this shared folder needs to be renamed"); - - // Log who shared this to assist with conflict resolution - if ((sharedByName != "") && (sharedByEmail != "")) { - log.vlog("WARNING: Conflict Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.vlog("WARNING: Conflict Shared By: ", sharedByName); - } - } - } - } else { - log.vdebug("Shared Folder Name: NO MATCH to any entry in 'business_shared_folders'"); - } - } else { - // not a folder, is this a file? - if (isItemFile(searchResult)) { - // shared item is a file - string sharedFileName = searchResult["name"].str; - // log that this is not supported - log.vlog("WARNING: Not syncing this OneDrive Business Shared File: ", sharedFileName); - - // Log who shared this to assist with sync data correlation - if ((sharedByName != "") && (sharedByEmail != "")) { - log.vlog("OneDrive Business Shared File - Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.vlog("OneDrive Business Shared File - Shared By: ", sharedByName); - } - } - } else { - // something else entirely - log.log("WARNING: Not syncing this OneDrive Business Shared item: ", searchResult["name"].str); - } + ulong nrChanges = count(deltaChanges["value"].array); + int changeCount = 0; + + if (appConfig.verbosityCount == 0) { + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.suppressLoggingOutput) { + addProcessingDotEntry(); } + } else { + addLogEntry("Processing API Response Bundle: " ~ to!string(responseBundleCount) ~ " - Quantity of 'changes|items' in this bundle to process: " ~ to!string(nrChanges), ["verbose"]); } - } else { - // Log that an invalid JSON object was returned - log.error("ERROR: onedrive.getSharedWithMe call returned an invalid JSON Object"); - } - } - } - - // download all new changes from a specified folder on OneDrive - void applyDifferencesSingleDirectory(const(string) path) - { - // Ensure we check the 'right' location for this directory on OneDrive - // It could come from the following places: - // 1. My OneDrive Root - // 2. My OneDrive Root as an Office 365 Shared Library - // 3. A OneDrive Business Shared Folder - // If 1 & 2, the configured default items are what we need - // If 3, we need to query OneDrive - - string driveId = defaultDriveId; - string rootId = defaultRootId; - string folderId; - string itemId; - JSONValue onedrivePathDetails; - - // Check OneDrive Business Shared Folders, if configured to do so - if (syncBusinessFolders){ - log.vlog("Attempting to sync OneDrive Business Shared Folders"); - // query OneDrive Business Shared Folders shared with me - JSONValue graphQuery; - try { - graphQuery = onedrive.getSharedWithMe(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); + + // Update the count of items received + jsonItemsReceived = jsonItemsReceived + nrChanges; + + // The 'deltaChanges' response may contain either @odata.nextLink or @odata.deltaLink + // Check for @odata.nextLink + if ("@odata.nextLink" in deltaChanges) { + // @odata.nextLink is the pointer within the API to the next '200+' JSON bundle - this is the checkpoint link for this bundle + // This URL changes between JSON bundle sets + // Log the action of setting currentDeltaLink to @odata.nextLink + addLogEntry("Setting currentDeltaLink to @odata.nextLink: " ~ deltaChanges["@odata.nextLink"].str, ["debug"]); + + // Update currentDeltaLink to @odata.nextLink for the next '200+' JSON bundle - this is the checkpoint link for this bundle + currentDeltaLink = deltaChanges["@odata.nextLink"].str; } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - graphQuery = onedrive.getSharedWithMe();"); - graphQuery = onedrive.getSharedWithMe(); + + // Check for @odata.deltaLink - usually only in the LAST JSON changeset bundle + if ("@odata.deltaLink" in deltaChanges) { + // @odata.deltaLink is the pointer that finalises all the online 'changes' for this particular checkpoint + // When the API is queried again, this is fetched from the DB as this is the starting point + // The API issue here is - the LAST JSON bundle will ONLY ever contain this item, meaning if this is then committed to the database + // if there has been any file download failures from within this LAST JSON bundle, the only way to EVER re-try the failed items is for the user to perform a --resync + // This is an API capability gap: + // + // .. + // @odata.nextLink: https://graph.microsoft.com/v1.0/drives//items//delta?token=F9JRD0zODEyNzg7JTIzOyUyMzA7JTIz + // Processing API Response Bundle: 115 - Quantity of 'changes|items' in this bundle to process: 204 + // .. + // @odata.nextLink: https://graph.microsoft.com/v1.0/drives//items//delta?token=F9JRD0zODM2Nzg7JTIzOyUyMzA7JTIz + // Processing API Response Bundle: 127 - Quantity of 'changes|items' in this bundle to process: 204 + // @odata.nextLink: https://graph.microsoft.com/v1.0/drives//items//delta?token=F9JRD0zODM4Nzg7JTIzOyUyMzA7JTIz + // Processing API Response Bundle: 128 - Quantity of 'changes|items' in this bundle to process: 176 + // @odata.deltaLink: https://graph.microsoft.com/v1.0/drives//items//delta?token= + // Finished processing /delta JSON response from the OneDrive API + + // Log the action of setting currentDeltaLink to @odata.deltaLink + addLogEntry("Setting currentDeltaLink to (@odata.deltaLink): " ~ deltaChanges["@odata.deltaLink"].str, ["debug"]); + + // Update currentDeltaLink to @odata.deltaLink as the final checkpoint URL for this entire JSON response set + currentDeltaLink = deltaChanges["@odata.deltaLink"].str; + + // Store this currentDeltaLink as latestDeltaLink + latestDeltaLink = deltaChanges["@odata.deltaLink"].str; + + // Update deltaLinkCache + deltaLinkCache.driveId = driveIdToQuery; + deltaLinkCache.itemId = itemIdToQuery; + deltaLinkCache.latestDeltaLink = currentDeltaLink; } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + + // We have a valid deltaChanges JSON array. This means we have at least 200+ JSON items to process. + // The API response however cannot be run in parallel as the OneDrive API sends the JSON items in the order in which they must be processed + auto jsonArrayToProcess = deltaChanges["value"].array; + foreach (onedriveJSONItem; jsonArrayToProcess) { + // increment change count for this item + changeCount++; + // Process the received OneDrive object item JSON for this JSON bundle + // This will determine its initial applicability and perform some initial processing on the JSON if required + processDeltaJSONItem(onedriveJSONItem, nrChanges, changeCount, responseBundleCount, singleDirectoryScope); } - } - - if (graphQuery.type() == JSONType.object) { - // valid response from OneDrive - string sharedFolderName; - foreach (searchResult; graphQuery["value"].array) { - // set sharedFolderName - sharedFolderName = searchResult["name"].str; - // Configure additional logging items for this array element - string sharedByName; - string sharedByEmail; - - // Extra details for verbose logging - if ("sharedBy" in searchResult["remoteItem"]["shared"]) { - if ("displayName" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByName = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["displayName"].str; - } - if ("email" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByEmail = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["email"].str; - } - } - // Compare this to values in business_shared_folders - if(selectiveSync.isSharedFolderMatched(sharedFolderName)){ - // Matched sharedFolderName to item in business_shared_folders - log.vdebug("Matched sharedFolderName in business_shared_folders: ", sharedFolderName); - // But is this shared folder what we are looking for as part of --single-directory? - // User could be using 'directory' or 'directory/directory1/directory2/directory3/' - // Can we find 'sharedFolderName' in the given 'path' - if (canFind(path, sharedFolderName)) { - // Found 'sharedFolderName' in the given 'path' - log.vdebug("Matched 'sharedFolderName' in the given 'path'"); - // What was the matched folder JSON - log.vdebug("Matched sharedFolderName in business_shared_folders JSON: ", searchResult); - // Path we want to sync is on a OneDrive Business Shared Folder - // Set the correct driveId - driveId = searchResult["remoteItem"]["parentReference"]["driveId"].str; - // Set this items id - itemId = searchResult["remoteItem"]["id"].str; - log.vdebug("Updated the driveId to a new value: ", driveId); - log.vdebug("Updated the itemId to a new value: ", itemId); - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, driveId)) { - // Add this drive id to the array to search with - driveIDsArray ~= driveId; - } - - // Log who shared this to assist with sync data correlation - if ((sharedByName != "") && (sharedByEmail != "")) { - log.vlog("OneDrive Business Shared Folder - Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.vlog("OneDrive Business Shared Folder - Shared By: ", sharedByName); - } - } - } + // Clear up this data + jsonArrayToProcess = null; + // Perform Garbage Collection + GC.collect(); + + // Is latestDeltaLink matching deltaChanges["@odata.deltaLink"].str ? + if ("@odata.deltaLink" in deltaChanges) { + if (latestDeltaLink == deltaChanges["@odata.deltaLink"].str) { + // break out of the 'for (;;) {' loop + break; } } - } else { - // Log that an invalid JSON object was returned - log.error("ERROR: onedrive.getSharedWithMe call returned an invalid JSON Object"); + + // Cleanup deltaChanges as this is no longer needed + deltaChanges = null; + // Perform Garbage Collection + GC.collect(); } - } - - // Test if the path we are going to sync from actually exists on OneDrive - log.vlog("Getting path details from OneDrive ..."); - try { - // Need to use different calls here - one call for majority, another if this is a OneDrive Business Shared Folder - if (!syncBusinessFolders){ - // Not a OneDrive Business Shared Folder - log.vdebug("Calling onedrive.getPathDetailsByDriveId(driveId, path) with: ", driveId, ", ", path); - onedrivePathDetails = onedrive.getPathDetailsByDriveId(driveId, path); + + // To finish off the JSON processing items, this is needed to reflect this in the log + addLogEntry("------------------------------------------------------------------", ["debug"]); + + // Log that we have finished querying the /delta API + if (appConfig.verbosityCount == 0) { + if (!appConfig.suppressLoggingOutput) { + // Close out the '....' being printed to the console + addLogEntry("\n", ["consoleOnlyNoNewLine"]); + } } else { - // OneDrive Business Shared Folder - Use another API call using the folders correct driveId and itemId - log.vdebug("Calling onedrive.getPathDetailsByDriveIdAndItemId(driveId, itemId) with: ", driveId, ", ", itemId); - onedrivePathDetails = onedrive.getPathDetailsByDriveIdAndItemId(driveId, itemId); + addLogEntry("Finished processing /delta JSON response from the OneDrive API", ["verbose"]); } - } catch (OneDriveException e) { - log.vdebug("onedrivePathDetails = onedrive.getPathDetails(path) generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found - if (syncBusinessFolders){ - // 404 was returned when trying to use a specific driveId and itemId .. which 'should' work .... but didnt - // Try the query with the path as a backup failsafe - log.vdebug("Calling onedrive.getPathDetailsByDriveId(driveId, path) as backup with: ", driveId, ", ", path); - try { - // try calling using the path - onedrivePathDetails = onedrive.getPathDetailsByDriveId(driveId, path); - } catch (OneDriveException e) { - - if (e.httpStatusCode == 404) { - log.error("ERROR: The requested single directory to sync was not found on OneDrive - Check folder permissions and sharing status with folder owner"); - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling applyDifferencesSingleDirectory(path);"); - applyDifferencesSingleDirectory(path); - // return back to original call - return; - } - - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; - } - } - } else { - // Not a OneDrive Business Shared folder operation - log.error("ERROR: The requested single directory to sync was not found on OneDrive"); - return; - } + + // If this was set, now unset it, as this will have been completed, so that for a true up, we dont do a double full scan + if (appConfig.fullScanTrueUpRequired) { + addLogEntry("Unsetting fullScanTrueUpRequired as this has been performed", ["debug"]); + appConfig.fullScanTrueUpRequired = false; } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling applyDifferencesSingleDirectory(path);"); - applyDifferencesSingleDirectory(path); - // return back to original call - return; + // Cleanup deltaChanges as this is no longer needed + deltaChanges = null; + // Perform Garbage Collection + GC.collect(); + + } else { + // Why are are generating a /delta response + addLogEntry("Why are we generating a /delta response:", ["debug"]); + addLogEntry(" singleDirectoryScope: " ~ to!string(singleDirectoryScope), ["debug"]); + addLogEntry(" nationalCloudDeployment: " ~ to!string(nationalCloudDeployment), ["debug"]); + addLogEntry(" cleanupLocalFiles: " ~ to!string(cleanupLocalFiles), ["debug"]); + + // What 'path' are we going to start generating the response for + string pathToQuery; + + // If --single-directory has been called, use the value that has been set + if (singleDirectoryScope) { + pathToQuery = appConfig.getValueString("single_directory"); } - - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + + // We could also be syncing a Shared Folder of some description + if (!sharedFolderName.empty) { + pathToQuery = sharedFolderName; + } + + // Generate the simulated /delta response + // + // The generated /delta response however contains zero deleted JSON items, so the only way that we can track this, is if the object was in sync + // we have the object in the database, thus, what we need to do is for every DB object in the tree of items, flag 'syncStatus' as 'N', then when we process + // the returned JSON items from the API, we flag the item as back in sync, then we can cleanup any out-of-sync items + // + // The flagging of the local database items to 'N' is handled within the generateDeltaResponse() function + // + // When these JSON items are then processed, if the item exists online, and is in the DB, and that the values match, the DB item is flipped back to 'Y' + // This then allows the application to look for any remaining 'N' values, and delete these as no longer needed locally + deltaChanges = generateDeltaResponse(pathToQuery); + + ulong nrChanges = count(deltaChanges["value"].array); + int changeCount = 0; + addLogEntry("API Response Bundle: " ~ to!string(responseBundleCount) ~ " - Quantity of 'changes|items' in this bundle to process: " ~ to!string(nrChanges), ["debug"]); + // Update the count of items received + jsonItemsReceived = jsonItemsReceived + nrChanges; + + // The API response however cannot be run in parallel as the OneDrive API sends the JSON items in the order in which they must be processed + auto jsonArrayToProcess = deltaChanges["value"].array; + foreach (onedriveJSONItem; deltaChanges["value"].array) { + // increment change count for this item + changeCount++; + // Process the received OneDrive object item JSON for this JSON bundle + // When we generate a /delta response .. there is no currentDeltaLink value + processDeltaJSONItem(onedriveJSONItem, nrChanges, changeCount, responseBundleCount, singleDirectoryScope); + } + + // Clear up this data + jsonArrayToProcess = null; + // Perform Garbage Collection + GC.collect(); + + // To finish off the JSON processing items, this is needed to reflect this in the log + addLogEntry("------------------------------------------------------------------", ["debug"]); + + // Log that we have finished generating our self generated /delta response + if (!appConfig.suppressLoggingOutput) { + addLogEntry("Finished processing self generated /delta JSON response from the OneDrive API"); } + + // Cleanup deltaChanges as this is no longer needed + deltaChanges = null; + } - // OK - the path on OneDrive should exist, get the driveId and rootId for this folder - // Was the response a valid JSON Object? - if (onedrivePathDetails.type() == JSONType.object) { - // OneDrive Personal Shared Folder handling - // Is this item a remote item? - if(isItemRemote(onedrivePathDetails)){ - // 2 step approach: - // 1. Ensure changes for the root remote path are captured - // 2. Download changes specific to the remote path - - // root remote - applyDifferences(defaultDriveId, onedrivePathDetails["id"].str, false); + // Cleanup deltaChanges as this is no longer needed + deltaChanges = null; + // Perform Garbage Collection + GC.collect(); + + // We have JSON items received from the OneDrive API + addLogEntry("Number of JSON Objects received from OneDrive API: " ~ to!string(jsonItemsReceived), ["debug"]); + addLogEntry("Number of JSON Objects already processed (root and deleted items): " ~ to!string((jsonItemsReceived - jsonItemsToProcess.length)), ["debug"]); + + // We should have now at least processed all the JSON items as returned by the /delta call + // Additionally, we should have a new array, that now contains all the JSON items we need to process that are non 'root' or deleted items + addLogEntry("Number of JSON items to process is: " ~ to!string(jsonItemsToProcess.length), ["debug"]); + + // Are there items to process? + if (jsonItemsToProcess.length > 0) { + // Lets deal with the JSON items in a batch process + size_t batchSize = 500; + ulong batchCount = (jsonItemsToProcess.length + batchSize - 1) / batchSize; + ulong batchesProcessed = 0; + + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.suppressLoggingOutput) { + addProcessingLogHeaderEntry("Processing " ~ to!string(jsonItemsToProcess.length) ~ " applicable changes and items received from Microsoft OneDrive", appConfig.verbosityCount); + } - // remote changes - driveId = onedrivePathDetails["remoteItem"]["parentReference"]["driveId"].str; // Should give something like 66d53be8a5056eca - folderId = onedrivePathDetails["remoteItem"]["id"].str; // Should give something like BC7D88EC1F539DCF!107 + // For each batch, process the JSON items that need to be now processed. + // 'root' and deleted objects have already been handled + foreach (batchOfJSONItems; jsonItemsToProcess.chunks(batchSize)) { + // Chunk the total items to process into 500 lot items + batchesProcessed++; + if (appConfig.verbosityCount == 0) { + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.suppressLoggingOutput) { + addProcessingDotEntry(); + } + } else { + addLogEntry("Processing OneDrive JSON item batch [" ~ to!string(batchesProcessed) ~ "/" ~ to!string(batchCount) ~ "] to ensure consistent local state", ["verbose"]); + } + + // Process the batch + processJSONItemsInBatch(batchOfJSONItems, batchesProcessed, batchCount); - // Apply any differences found on OneDrive for this path (download data) - applyDifferences(driveId, folderId, false); - } else { - // use the item id as folderId - folderId = onedrivePathDetails["id"].str; // Should give something like 12345ABCDE1234A1!101 - // Apply any differences found on OneDrive for this path (download data) - // Use driveId rather than defaultDriveId as this will be updated if path was matched to another parent driveId - applyDifferences(driveId, folderId, false); + // To finish off the JSON processing items, this is needed to reflect this in the log + addLogEntry("------------------------------------------------------------------", ["debug"]); } - } else { - // Log that an invalid JSON object was returned - log.vdebug("onedrive.getPathDetails call returned an invalid JSON Object"); - } - } - - // make sure the OneDrive root is in our database - auto checkDatabaseForOneDriveRoot() - { - log.vlog("Fetching details for OneDrive Root"); - JSONValue rootPathDetails = onedrive.getDefaultRoot(); // Returns a JSON Value - - // validate object is a JSON value - if (rootPathDetails.type() == JSONType.object) { - // valid JSON object - Item rootPathItem = makeItem(rootPathDetails); - // configure driveId and rootId for the OneDrive Root - // Set defaults for the root folder - string driveId = rootPathDetails["parentReference"]["driveId"].str; // Should give something like 12345abcde1234a1 - string rootId = rootPathDetails["id"].str; // Should give something like 12345ABCDE1234A1!101 - - // Query the database - if (!itemdb.selectById(driveId, rootId, rootPathItem)) { - log.vlog("OneDrive Root does not exist in the database. We need to add it."); - applyDifference(rootPathDetails, driveId, true); - log.vlog("Added OneDrive Root to the local database"); - } else { - log.vlog("OneDrive Root exists in the database"); + + if (appConfig.verbosityCount == 0) { + // close off '.' output + if (!appConfig.suppressLoggingOutput) { + addLogEntry("\n", ["consoleOnlyNoNewLine"]); + } } + + // Debug output - what was processed + addLogEntry("Number of JSON items to process is: " ~ to!string(jsonItemsToProcess.length), ["debug"]); + addLogEntry("Number of JSON items processed was: " ~ to!string(processedCount), ["debug"]); + + // Free up memory and items processed as it is pointless now having this data around + jsonItemsToProcess = []; + + // Perform Garbage Collection on this destroyed curl engine + GC.collect(); } else { - // Log that an invalid JSON object was returned - log.error("ERROR: Unable to query OneDrive for account details"); - log.vdebug("onedrive.getDefaultRoot call returned an invalid JSON Object"); - // Must exit here as we cant configure our required variables - onedrive.shutdown(); - exit(-1); + if (!appConfig.suppressLoggingOutput) { + addLogEntry("No changes or items that can be applied were discovered while processing the data received from Microsoft OneDrive"); + } + } + + // Keep the DriveDetailsCache array with unique entries only + DriveDetailsCache cachedOnlineDriveData; + if (!canFindDriveId(driveIdToQuery, cachedOnlineDriveData)) { + // Add this driveId to the drive cache + addOrUpdateOneDriveOnlineDetails(driveIdToQuery); } } - // create a directory on OneDrive without syncing - auto createDirectoryNoSync(const(string) path) - { - // Attempt to create the requested path within OneDrive without performing a sync - log.vlog("Attempting to create the requested path within OneDrive"); - - // Handle the remote folder creation and updating of the local database without performing a sync - uploadCreateDir(path); - } - - // delete a directory on OneDrive without syncing - auto deleteDirectoryNoSync(const(string) path) - { - // Use the global's as initialised via init() rather than performing unnecessary additional HTTPS calls - const(char)[] rootId = defaultRootId; + // Process the /delta API JSON response items + void processDeltaJSONItem(JSONValue onedriveJSONItem, ulong nrChanges, int changeCount, ulong responseBundleCount, bool singleDirectoryScope) { - // Attempt to delete the requested path within OneDrive without performing a sync - log.vlog("Attempting to delete the requested path within OneDrive"); + // Variables for this JSON item + string thisItemId; + bool itemIsRoot = false; + bool handleItemAsRootObject = false; + bool itemIsDeletedOnline = false; + bool itemHasParentReferenceId = false; + bool itemHasParentReferencePath = false; + bool itemIdMatchesDefaultRootId = false; + bool itemNameExplicitMatchRoot = false; + string objectParentDriveId; + auto jsonProcessingStartTime = Clock.currTime(); - // test if the path we are going to exists on OneDrive - try { - onedrive.getPathDetails(path); - } catch (OneDriveException e) { - log.vdebug("onedrive.getPathDetails(path) generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found on OneDrive - no need to delete it - log.vlog("The requested directory to delete was not found on OneDrive - skipping removing the remote directory as it doesn't exist"); - return; - } + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("Processing OneDrive Item " ~ to!string(changeCount) ~ " of " ~ to!string(nrChanges) ~ " from API Response Bundle " ~ to!string(responseBundleCount), ["debug"]); + addLogEntry("Raw JSON OneDrive Item: " ~ to!string(onedriveJSONItem), ["debug"]); + + // What is this item's id + thisItemId = onedriveJSONItem["id"].str; + + // Is this a deleted item - only calculate this once + itemIsDeletedOnline = isItemDeleted(onedriveJSONItem); + if (!itemIsDeletedOnline) { + // This is not a deleted item + addLogEntry("This item is not a OneDrive deletion change", ["debug"]); - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling deleteDirectoryNoSync(path);"); - deleteDirectoryNoSync(path); - // return back to original call - return; - } + // Only calculate this once + itemIsRoot = isItemRoot(onedriveJSONItem); + itemHasParentReferenceId = hasParentReferenceId(onedriveJSONItem); + itemIdMatchesDefaultRootId = (thisItemId == appConfig.defaultRootId); + itemNameExplicitMatchRoot = (onedriveJSONItem["name"].str == "root"); + objectParentDriveId = onedriveJSONItem["parentReference"]["driveId"].str; - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + // Test is this is the OneDrive Users Root? + // Debug output of change evaluation items + addLogEntry("defaultRootId = " ~ appConfig.defaultRootId, ["debug"]); + addLogEntry("'search id' = " ~ thisItemId, ["debug"]); + addLogEntry("id == defaultRootId = " ~ to!string(itemIdMatchesDefaultRootId), ["debug"]); + addLogEntry("isItemRoot(onedriveJSONItem) = " ~ to!string(itemIsRoot), ["debug"]); + addLogEntry("onedriveJSONItem['name'].str == 'root' = " ~ to!string(itemNameExplicitMatchRoot), ["debug"]); + addLogEntry("itemHasParentReferenceId = " ~ to!string(itemHasParentReferenceId), ["debug"]); + + if ( (itemIdMatchesDefaultRootId || singleDirectoryScope) && itemIsRoot && itemNameExplicitMatchRoot) { + // This IS a OneDrive Root item or should be classified as such in the case of 'singleDirectoryScope' + addLogEntry("JSON item will flagged as a 'root' item", ["debug"]); + handleItemAsRootObject = true; } } - Item item; - // Need to check all driveid's we know about, not just the defaultDriveId - bool itemInDB = false; - foreach (searchDriveId; driveIDsArray) { - if (itemdb.selectByPath(path, searchDriveId, item)) { - // item was found in the DB - itemInDB = true; - break; - } - } - // Was the item found in the DB - if (!itemInDB) { - // this is odd .. this directory is not in the local database - just go delete it - log.vlog("The requested directory to delete was not found in the local database - pushing delete request direct to OneDrive"); - uploadDeleteItem(item, path); + // How do we handle this JSON item from the OneDrive API? + // Is this a confirmed 'root' item, has no Parent ID, or is a Deleted Item + if (handleItemAsRootObject || !itemHasParentReferenceId || itemIsDeletedOnline){ + // Is a root item, has no id in parentReference or is a OneDrive deleted item + addLogEntry("objectParentDriveId = " ~ objectParentDriveId, ["debug"]); + addLogEntry("handleItemAsRootObject = " ~ to!string(handleItemAsRootObject), ["debug"]); + addLogEntry("itemHasParentReferenceId = " ~ to!string(itemHasParentReferenceId), ["debug"]); + addLogEntry("itemIsDeletedOnline = " ~ to!string(itemIsDeletedOnline), ["debug"]); + addLogEntry("Handling change immediately as 'root item', or has no parent reference id or is a deleted item", ["debug"]); + + // OK ... do something with this JSON post here .... + processRootAndDeletedJSONItems(onedriveJSONItem, objectParentDriveId, handleItemAsRootObject, itemIsDeletedOnline, itemHasParentReferenceId); } else { - // the folder was in the local database - // Handle the deletion and saving any update to the local database - log.vlog("The requested directory to delete was found in the local database. Processing the deletion normally"); - deleteByPath(path); - } - } - - // rename a directory on OneDrive without syncing - auto renameDirectoryNoSync(string source, string destination) - { - try { - // test if the local path exists on OneDrive - onedrive.getPathDetails(source); - } catch (OneDriveException e) { - log.vdebug("onedrive.getPathDetails(source); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found - log.vlog("The requested directory to rename was not found on OneDrive"); - return; + // Do we need to update this RAW JSON from OneDrive? + if ( (objectParentDriveId != appConfig.defaultDriveId) && (appConfig.accountType == "business") && (appConfig.getValueBool("sync_business_shared_items")) ) { + // Potentially need to update this JSON data + addLogEntry("Potentially need to update this source JSON .... need to check the database", ["debug"]); + + // Check the DB for 'remote' objects, searching 'remoteDriveId' and 'remoteId' items for this remoteItem.driveId and remoteItem.id + Item remoteDBItem; + itemDB.selectByRemoteId(objectParentDriveId, thisItemId, remoteDBItem); + + // Is the data that was returned from the database what we are looking for? + if ((remoteDBItem.remoteDriveId == objectParentDriveId) && (remoteDBItem.remoteId == thisItemId)) { + // Yes, this is the record we are looking for + addLogEntry("DB Item response for remoteDBItem: " ~ to!string(remoteDBItem), ["debug"]); + + // Must compare remoteDBItem.name with remoteItem.name + if (remoteDBItem.name != onedriveJSONItem["name"].str) { + // Update JSON Item + string actualOnlineName = onedriveJSONItem["name"].str; + addLogEntry("Updating source JSON 'name' to that which is the actual local directory", ["debug"]); + addLogEntry("onedriveJSONItem['name'] was: " ~ onedriveJSONItem["name"].str, ["debug"]); + addLogEntry("Updating onedriveJSONItem['name'] to: " ~ remoteDBItem.name, ["debug"]); + onedriveJSONItem["name"] = remoteDBItem.name; + addLogEntry("onedriveJSONItem['name'] now: " ~ onedriveJSONItem["name"].str, ["debug"]); + // Add the original name to the JSON + onedriveJSONItem["actualOnlineName"] = actualOnlineName; + } + } } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling renameDirectoryNoSync(source, destination);"); - renameDirectoryNoSync(source, destination); - // return back to original call - return; + + // If we are not self-generating a /delta response, check this initial /delta JSON bundle item against the basic checks + // of applicability against 'skip_file', 'skip_dir' and 'sync_list' + // We only do this if we did not generate a /delta response, as generateDeltaResponse() performs the checkJSONAgainstClientSideFiltering() + // against elements as it is building the /delta compatible response + // If we blindly just 'check again' all JSON responses then there is potentially double JSON processing going on if we used generateDeltaResponse() + bool discardDeltaJSONItem = false; + if (!generateSimulatedDeltaResponse) { + // Check applicability against 'skip_file', 'skip_dir' and 'sync_list' + discardDeltaJSONItem = checkJSONAgainstClientSideFiltering(onedriveJSONItem); } - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + // Add this JSON item for further processing if this is not being discarded + if (!discardDeltaJSONItem) { + // Add onedriveJSONItem to jsonItemsToProcess + addLogEntry("Adding this Raw JSON OneDrive Item to jsonItemsToProcess array for further processing", ["debug"]); + jsonItemsToProcess ~= onedriveJSONItem; } } - // The OneDrive API returned a 200 OK status, so the folder exists - // Rename the requested directory on OneDrive without performing a sync - moveByPath(source, destination); + + // How long to initially process this JSON item + auto jsonProcessingElapsedTime = Clock.currTime() - jsonProcessingStartTime; + addLogEntry("Initial JSON item processing time: " ~ to!string(jsonProcessingElapsedTime), ["debug"]); } - // download the new changes of a specific item - // id is the root of the drive or a shared folder - private void applyDifferences(string driveId, const(char)[] id, bool performFullItemScan) - { - log.vlog("Applying changes of Path ID: " ~ id); - // function variables - char[] idToQuery; - JSONValue changes; - JSONValue changesAvailable; - JSONValue idDetails; - JSONValue currentDriveQuota; - string syncFolderName; - string syncFolderPath; - string syncFolderChildPath; - string deltaLink; - string deltaLinkAvailable; - bool nationalCloudChildrenScan = false; - - // Tracking processing performance - SysTime startFunctionProcessingTime; - SysTime endFunctionProcessingTime; - SysTime startBundleProcessingTime; - SysTime endBundleProcessingTime; - ulong cumulativeOneDriveItemCount = 0; - - if (displayProcessingTime) { - writeln("============================================================"); - writeln("Querying OneDrive API for relevant 'changes|items' stored online for this account"); - startFunctionProcessingTime = Clock.currTime(); - writeln("Start Function Processing Time: ", startFunctionProcessingTime); - } - - // Update the quota details for this driveId, as this could have changed since we started the application - the user could have added / deleted data online, or purchased additional storage - // Quota details are ONLY available for the main default driveId, as the OneDrive API does not provide quota details for shared folders - try { - currentDriveQuota = onedrive.getDriveQuota(driveId); - } catch (OneDriveException e) { - log.vdebug("currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException"); - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling applyDifferences(driveId, id, performFullItemScan);"); - applyDifferences(driveId, id, performFullItemScan); - // return back to original call - return; + // Process 'root' and 'deleted' OneDrive JSON items + void processRootAndDeletedJSONItems(JSONValue onedriveJSONItem, string driveId, bool handleItemAsRootObject, bool itemIsDeletedOnline, bool itemHasParentReferenceId) { + + // Use the JSON elements rather can computing a DB struct via makeItem() + string thisItemId = onedriveJSONItem["id"].str; + string thisItemDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + + // Check if the item has been seen before + Item existingDatabaseItem; + bool existingDBEntry = itemDB.selectById(thisItemDriveId, thisItemId, existingDatabaseItem); + + // Is the item deleted online? + if(!itemIsDeletedOnline) { + + // Is the item a confirmed root object? + + // The JSON item should be considered a 'root' item if: + // 1. Contains a ["root"] element + // 2. Has no ["parentReference"]["id"] ... #323 & #324 highlighted that this is false as some 'root' shared objects now can have an 'id' element .. OneDrive API change + // 2. Has no ["parentReference"]["path"] + // 3. Was detected by an input flag as to be handled as a root item regardless of actual status + + if ((handleItemAsRootObject) || (!itemHasParentReferenceId)) { + addLogEntry("Handing JSON object as OneDrive 'root' object", ["debug"]); + if (!existingDBEntry) { + // we have not seen this item before + saveItem(onedriveJSONItem); + } } - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + } else { + // Change is to delete an item + addLogEntry("Handing a OneDrive Deleted Item", ["debug"]); + if (existingDBEntry) { + // Is the item to delete locally actually in sync with OneDrive currently? + // What is the source of this item data? + string itemSource = "online"; + + // Compute this deleted items path based on the database entries + string localPathToDelete = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.parentId) ~ "/" ~ existingDatabaseItem.name; + if (isItemSynced(existingDatabaseItem, localPathToDelete, itemSource)) { + // Flag to delete + addLogEntry("Flagging to delete item locally: " ~ to!string(onedriveJSONItem), ["debug"]); + idsToDelete ~= [thisItemDriveId, thisItemId]; + } else { + // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not + // In case the renamed path is needed + string renamedPath; + safeBackup(localPathToDelete, dryRun, renamedPath); + } + } else { + // Flag to ignore + addLogEntry("Flagging item to skip: " ~ to!string(onedriveJSONItem), ["debug"]); + skippedItems.insert(thisItemId); } } - - // validate that currentDriveQuota is a JSON value - if (currentDriveQuota.type() == JSONType.object) { - // Response from API contains valid data - // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - // If 'business' accounts, if driveId == defaultDriveId, then we will have data - // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be 0 values - if ("quota" in currentDriveQuota){ - if (driveId == defaultDriveId) { - // We potentially have updated quota remaining details available - // However in some cases OneDrive Business configurations 'restrict' quota details thus is empty / blank / negative value / zero - if ("remaining" in currentDriveQuota["quota"]){ - // We have valid quota details returned for the drive id - remainingFreeSpace = currentDriveQuota["quota"]["remaining"].integer; - if (remainingFreeSpace <= 0) { - if (accountType == "personal"){ - // zero space available - log.error("ERROR: OneDrive account currently has zero space available. Please free up some space online."); - quotaAvailable = false; - } else { - // zero space available is being reported, maybe being restricted? - log.error("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); - quotaRestricted = true; - } - } else { - // Display the updated value - log.vlog("Updated Remaining Free Space: ", remainingFreeSpace); - } + } + + // Process each of the elements contained in jsonItemsToProcess[] + void processJSONItemsInBatch(JSONValue[] array, ulong batchGroup, ulong batchCount) { + + ulong batchElementCount = array.length; + + foreach (i, onedriveJSONItem; array.enumerate) { + // Use the JSON elements rather can computing a DB struct via makeItem() + ulong elementCount = i +1; + auto jsonProcessingStartTime = Clock.currTime(); + + // To show this is the processing for this particular item, start off with this breaker line + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("Processing OneDrive JSON item " ~ to!string(elementCount) ~ " of " ~ to!string(batchElementCount) ~ " as part of JSON Item Batch " ~ to!string(batchGroup) ~ " of " ~ to!string(batchCount), ["debug"]); + addLogEntry("Raw JSON OneDrive Item (Batched Item): " ~ to!string(onedriveJSONItem), ["debug"]); + + string thisItemId = onedriveJSONItem["id"].str; + string thisItemDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + string thisItemParentId = onedriveJSONItem["parentReference"]["id"].str; + string thisItemName = onedriveJSONItem["name"].str; + + // Create an empty item struct for an existing DB item + Item existingDatabaseItem; + + // Do we NOT want this item? + bool unwanted = false; // meaning by default we will WANT this item + // Is this parent is in the database + bool parentInDatabase = false; + // What is the path of the new item + string newItemPath; + + // Configure the remoteItem - so if it is used, it can be utilised later + Item remoteItem; + + // Check the database for an existing entry for this JSON item + bool existingDBEntry = itemDB.selectById(thisItemDriveId, thisItemId, existingDatabaseItem); + + // Calculate if the Parent Item is in the database so that it can be re-used + parentInDatabase = itemDB.idInLocalDatabase(thisItemDriveId, thisItemParentId); + + // Calculate the path of this JSON item, but we can only do this if the parent is in the database + if (parentInDatabase) { + // Calculate this items path + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + addLogEntry("JSON Item calculated full path is: " ~ newItemPath, ["debug"]); + } else { + // Parent not in the database + // Is the parent a 'folder' from another user? ie - is this a 'shared folder' that has been shared with us? + + // Lets determine why? + if (thisItemDriveId == appConfig.defaultDriveId) { + // Parent path does not exist - flagging as unwanted + addLogEntry("Flagging as unwanted: thisItemDriveId (" ~ thisItemDriveId ~ "), thisItemParentId (" ~ thisItemParentId ~ ") not in local database", ["debug"]); + // Was this a skipped item? + if (thisItemParentId in skippedItems) { + // Parent is a skipped item + addLogEntry("Reason: thisItemParentId listed within skippedItems", ["debug"]); + } else { + // Parent is not in the database, as we are not creating it + addLogEntry("Reason: Parent ID is not in the DB .. ", ["debug"]); } + + // Flag as unwanted + unwanted = true; } else { - // quota details returned, but for a drive id that is not ours - if ("remaining" in currentDriveQuota["quota"]){ - // remaining is in the quota JSON response - if (currentDriveQuota["quota"]["remaining"].integer <= 0) { - // value returned is 0 or less than 0 - log.vlog("OneDrive quota information is set at zero, as this is not our drive id, ignoring"); + // Edge case as the parent (from another users OneDrive account) will never be in the database - potentially a shared object? + addLogEntry("The reported parentId is not in the database. This potentially is a shared folder as 'remoteItem.driveId' != 'appConfig.defaultDriveId'. Relevant Details: remoteItem.driveId (" ~ remoteItem.driveId ~ "), remoteItem.parentId (" ~ remoteItem.parentId ~ ")", ["debug"]); + addLogEntry("Potential Shared Object JSON: " ~ to!string(onedriveJSONItem), ["debug"]); + + // Format the OneDrive change into a consumable object for the database + remoteItem = makeItem(onedriveJSONItem); + + if (appConfig.accountType == "personal") { + // Personal Account Handling + addLogEntry("Handling a Personal Shared Item JSON object", ["debug"]); + + if (hasSharedElement(onedriveJSONItem)) { + // Has the Shared JSON structure + addLogEntry("Personal Shared Item JSON object has the 'shared' JSON structure", ["debug"]); + // Create a 'root' DB Tie Record for this JSON object + createDatabaseRootTieRecordForOnlineSharedFolder(onedriveJSONItem); + } + + // Ensure that this item has no parent + addLogEntry("Setting remoteItem.parentId to be null", ["debug"]); + remoteItem.parentId = null; + // Add this record to the local database + addLogEntry("Update/Insert local database with remoteItem details with remoteItem.parentId as null: " ~ to!string(remoteItem), ["debug"]); + itemDB.upsert(remoteItem); + } else { + // Business or SharePoint Account Handling + addLogEntry("Handling a Business or SharePoint Shared Item JSON object", ["debug"]); + + if (appConfig.accountType == "business") { + // Create a 'root' DB Tie Record for this JSON object + createDatabaseRootTieRecordForOnlineSharedFolder(onedriveJSONItem); + + // Ensure that this item has no parent + addLogEntry("Setting remoteItem.parentId to be null", ["debug"]); + remoteItem.parentId = null; + + // Check the DB for 'remote' objects, searching 'remoteDriveId' and 'remoteId' items for this remoteItem.driveId and remoteItem.id + Item remoteDBItem; + itemDB.selectByRemoteId(remoteItem.driveId, remoteItem.id, remoteDBItem); + + // Must compare remoteDBItem.name with remoteItem.name + if ((!remoteDBItem.name.empty) && (remoteDBItem.name != remoteItem.name)) { + // Update DB Item + addLogEntry("The shared item stored in OneDrive, has a different name to the actual name on the remote drive", ["debug"]); + addLogEntry("Updating remoteItem.name JSON data with the actual name being used on account drive and local folder", ["debug"]); + addLogEntry("remoteItem.name was: " ~ remoteItem.name, ["debug"]); + addLogEntry("Updating remoteItem.name to: " ~ remoteDBItem.name, ["debug"]); + remoteItem.name = remoteDBItem.name; + addLogEntry("Setting remoteItem.remoteName to: " ~ onedriveJSONItem["name"].str, ["debug"]); + + // Update JSON Item + remoteItem.remoteName = onedriveJSONItem["name"].str; + addLogEntry("Updating source JSON 'name' to that which is the actual local directory", ["debug"]); + addLogEntry("onedriveJSONItem['name'] was: " ~ onedriveJSONItem["name"].str, ["debug"]); + addLogEntry("Updating onedriveJSONItem['name'] to: " ~ remoteDBItem.name, ["debug"]); + onedriveJSONItem["name"] = remoteDBItem.name; + addLogEntry("onedriveJSONItem['name'] now: " ~ onedriveJSONItem["name"].str, ["debug"]); + + // Update newItemPath value + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ remoteDBItem.name; + addLogEntry("New Item updated calculated full path is: " ~ newItemPath, ["debug"]); + } + + // Add this record to the local database + addLogEntry("Update/Insert local database with remoteItem details: " ~ to!string(remoteItem), ["debug"]); + itemDB.upsert(remoteItem); + } else { + // Sharepoint account type + addLogEntry("Handling a SharePoint Shared Item JSON object - NOT IMPLEMENTED YET ........ ", ["info"]); } } } - } else { - // No quota details returned - if (driveId == defaultDriveId) { - // no quota details returned for current drive id - log.error("ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online."); - } else { - // quota details not available - log.vdebug("OneDrive quota information is being restricted as this is not our drive id."); - } - } - } - - // Query OneDrive API for the name of this folder id - try { - idDetails = onedrive.getPathDetailsById(driveId, id); - } catch (OneDriveException e) { - log.vdebug("idDetails = onedrive.getPathDetailsById(driveId, id) generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // id was not found - possibly a remote (shared) folder - log.vlog("No details returned for given Path ID"); - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling applyDifferences(driveId, id, performFullItemScan);"); - applyDifferences(driveId, id, performFullItemScan); - // return back to original call - return; } - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; - } - } - - // validate that idDetails is a JSON value - if (idDetails.type() == JSONType.object) { - // Get the name of this 'Path ID' - if (("id" in idDetails) != null) { - // valid response from onedrive.getPathDetailsById(driveId, id) - a JSON item object present - if ((idDetails["id"].str == id) && (!isItemFile(idDetails))){ - // Is a Folder or Remote Folder - syncFolderName = idDetails["name"].str; - } - - // Debug output of path details as queried from OneDrive - log.vdebug("OneDrive Path Details: ", idDetails); + // Check the skippedItems array for the parent id of this JSONItem if this is something we need to skip + if (!unwanted) { + if (thisItemParentId in skippedItems) { + // Flag this JSON item as unwanted + addLogEntry("Flagging as unwanted: find(thisItemParentId).length != 0", ["debug"]); + unwanted = true; + + // Is this item id in the database? + if (existingDBEntry) { + // item exists in database, most likely moved out of scope for current client configuration + addLogEntry("This item was previously synced / seen by the client", ["debug"]); + + if (("name" in onedriveJSONItem["parentReference"]) != null) { - // OneDrive Personal Folder Item Reference (24/4/2019) - // "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#drives('66d53be8a5056eca')/items/$entity", - // "cTag": "adDo2NkQ1M0JFOEE1MDU2RUNBITEwMS42MzY5MTY5NjQ1ODcwNzAwMDA", - // "eTag": "aNjZENTNCRThBNTA1NkVDQSExMDEuMQ", - // "fileSystemInfo": { - // "createdDateTime": "2018-06-06T20:45:24.436Z", - // "lastModifiedDateTime": "2019-04-24T07:09:31.29Z" - // }, - // "folder": { - // "childCount": 3, - // "view": { - // "sortBy": "takenOrCreatedDateTime", - // "sortOrder": "ascending", - // "viewType": "thumbnails" - // } - // }, - // "id": "66D53BE8A5056ECA!101", - // "name": "root", - // "parentReference": { - // "driveId": "66d53be8a5056eca", - // "driveType": "personal" - // }, - // "root": {}, - // "size": 0 - - // OneDrive Personal Remote / Shared Folder Item Reference (4/9/2019) - // "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#drives('driveId')/items/$entity", - // "cTag": "cTag", - // "eTag": "eTag", - // "id": "itemId", - // "name": "shared", - // "parentReference": { - // "driveId": "driveId", - // "driveType": "personal", - // "id": "parentItemId", - // "path": "/drive/root:" - // }, - // "remoteItem": { - // "fileSystemInfo": { - // "createdDateTime": "2019-01-14T18:54:43.2666667Z", - // "lastModifiedDateTime": "2019-04-24T03:47:22.53Z" - // }, - // "folder": { - // "childCount": 0, - // "view": { - // "sortBy": "takenOrCreatedDateTime", - // "sortOrder": "ascending", - // "viewType": "thumbnails" - // } - // }, - // "id": "remoteItemId", - // "parentReference": { - // "driveId": "remoteDriveId", - // "driveType": "personal" - // "id": "id", - // "name": "name", - // "path": "/drives//items/:/" - // }, - // "size": 0, - // "webUrl": "webUrl" - // } - - // OneDrive Business Folder & Shared Folder Item Reference (24/4/2019) - // "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#drives('driveId')/items/$entity", - // "@odata.etag": "\"{eTag},1\"", - // "cTag": "\"c:{cTag},0\"", - // "eTag": "\"{eTag},1\"", - // "fileSystemInfo": { - // "createdDateTime": "2019-04-17T04:00:43Z", - // "lastModifiedDateTime": "2019-04-17T04:00:43Z" - // }, - // "folder": { - // "childCount": 2 - // }, - // "id": "itemId", - // "name": "shared_folder", - // "parentReference": { - // "driveId": "parentDriveId", - // "driveType": "business", - // "id": "parentId", - // "path": "/drives/driveId/root:" - // }, - // "size": 0 - - // To evaluate a change received from OneDrive, this must be set correctly - if (hasParentReferencePath(idDetails)) { - // Path from OneDrive has a parentReference we can use - log.vdebug("Item details returned contains parent reference path - potentially shared folder object"); - syncFolderPath = idDetails["parentReference"]["path"].str; - syncFolderChildPath = syncFolderPath ~ "/" ~ idDetails["name"].str ~ "/"; - } else { - // No parentReference, set these to blank - log.vdebug("Item details returned no parent reference path"); - syncFolderPath = ""; - syncFolderChildPath = ""; + // How is this out of scope? + // is sync_list configured + if (syncListConfigured) { + // sync_list configured and in use + if (selectiveSync.isPathExcludedViaSyncList(onedriveJSONItem["parentReference"]["name"].str)) { + // Previously synced item is now out of scope as it has been moved out of what is included in sync_list + addLogEntry("This previously synced item is now excluded from being synced due to sync_list exclusion", ["debug"]); + } + } + // flag to delete local file as it now is no longer in sync with OneDrive + addLogEntry("Flagging to delete item locally: ", ["debug"]); + idsToDelete ~= [thisItemDriveId, thisItemId]; + } + } } - - // Debug Output - log.vdebug("Sync Folder Name: ", syncFolderName); - log.vdebug("Sync Folder Parent Path: ", syncFolderPath); - log.vdebug("Sync Folder Child Path: ", syncFolderChildPath); - } - } else { - // Log that an invalid JSON object was returned - log.vdebug("onedrive.getPathDetailsById call returned an invalid JSON Object"); - } - - // Issue #658 - // If we are using a sync_list file, using deltaLink will actually 'miss' changes (moves & deletes) on OneDrive as using sync_list discards changes - // Use the performFullItemScan boolean to control whether we perform a full object scan of use the delta link for the root folder - // When using --synchronize the normal process order is: - // 1. Scan OneDrive for changes - // 2. Scan local folder for changes - // 3. Scan OneDrive for changes - // When using sync_list and performing a full scan, what this means is a full scan is performed twice, which leads to massive processing & time overheads - // Control this via performFullItemScan - - // Get the current delta link - deltaLinkAvailable = itemdb.getDeltaLink(driveId, id); - // if sync_list is not configured, syncListConfigured should be false - log.vdebug("syncListConfigured = ", syncListConfigured); - // oneDriveFullScanTrigger should be false unless set by actions on OneDrive and only if sync_list or skip_dir is used - log.vdebug("oneDriveFullScanTrigger = ", oneDriveFullScanTrigger); - // should only be set if 10th scan in monitor mode or as final true up sync in stand alone mode - log.vdebug("performFullItemScan = ", performFullItemScan); - - // do we override performFullItemScan if it is currently false and oneDriveFullScanTrigger is true? - if ((!performFullItemScan) && (oneDriveFullScanTrigger)) { - // forcing a full scan earlier than potentially normal - // oneDriveFullScanTrigger = true due to new folder creation request in a location that is now in-scope which was previously out of scope - performFullItemScan = true; - log.vdebug("overriding performFullItemScan as oneDriveFullScanTrigger was set"); - } - - // depending on the scan type (--monitor or --synchronize) performFullItemScan is set depending on the number of sync passes performed (--monitor) or ALWAYS if just --synchronize is used - if (!performFullItemScan){ - // performFullItemScan == false - // use delta link - log.vdebug("performFullItemScan is false, using the deltaLink as per database entry"); - if (deltaLinkAvailable == ""){ - deltaLink = ""; - log.vdebug("deltaLink was requested to be used, but contains no data - resulting API query will be treated as a full scan of OneDrive"); - } else { - deltaLink = deltaLinkAvailable; - log.vdebug("deltaLink contains valid data - resulting API query will be treated as a delta scan of OneDrive"); - } - } else { - // performFullItemScan == true - // do not use delta-link - deltaLink = ""; - log.vdebug("performFullItemScan is true, not using the database deltaLink so that we query all objects on OneDrive to compare against all local objects"); - } - - for (;;) { - - if (displayProcessingTime) { - writeln("------------------------------------------------------------"); - startBundleProcessingTime = Clock.currTime(); - writeln("Start 'change|item' API Response Bundle Processing Time: ", startBundleProcessingTime); - } - - // Due to differences in OneDrive API's between personal and business we need to get changes only from defaultRootId - // If we used the 'id' passed in & when using --single-directory with a business account we get: - // 'HTTP request returned status code 501 (Not Implemented): view.delta can only be called on the root.' - // To view changes correctly, we need to use the correct path id for the request - if (driveId == defaultDriveId) { - // The drive id matches our users default drive id - log.vdebug("Configuring 'idToQuery' as defaultRootId duplicate"); - idToQuery = defaultRootId.dup; - } else { - // The drive id does not match our users default drive id - // Potentially the 'path id' we are requesting the details of is a Shared Folder (remote item) - // Use the 'id' that was passed in (folderId) - log.vdebug("Configuring 'idToQuery' as 'id' duplicate"); - idToQuery = id.dup; } - // what path id are we going to query? - log.vdebug("Path object to query configured as 'idToQuery' = ", idToQuery); - long deltaChanges = 0; - // What query do we use? - // National Cloud Deployments do not support /delta as a query - // https://docs.microsoft.com/en-us/graph/deployments#supported-features - // Are we running against a National Cloud Deployments that does not support /delta - if (nationalCloudDeployment) { - // National Cloud Deployment that does not support /delta query - // Have to query /children and build our own /delta response - nationalCloudChildrenScan = true; - log.vdebug("Using /children call to query drive for items to populate 'changes' and 'changesAvailable'"); - // In a OneDrive Business Shared Folder scenario + nationalCloudDeployment, if ALL items are downgraded, then this leads to local file deletion - // Downgrade ONLY files associated with this driveId and idToQuery - log.vdebug("Downgrading all children for this driveId (" ~ driveId ~ ") and idToQuery (" ~ idToQuery ~ ") to an out-of-sync state"); - - // Before we get any data, flag any object in the database as out-of-sync for this driveID & ID - auto drivePathChildren = itemdb.selectChildren(driveId, idToQuery); - if (count(drivePathChildren) > 0) { - // Children to process and flag as out-of-sync - foreach (drivePathChild; drivePathChildren) { - // Flag any object in the database as out-of-sync for this driveID & ID - log.vdebug("Downgrading item as out-of-sync: ", drivePathChild.id); - itemdb.downgradeSyncStatusFlag(drivePathChild.driveId, drivePathChild.id); - } - } - - // Build own 'changes' response to simulate a /delta response - try { - // we have to 'build' our own JSON response that looks like /delta - changes = generateDeltaResponse(driveId, idToQuery); - if (changes.type() == JSONType.object) { - log.vdebug("Query 'changes = generateDeltaResponse(driveId, idToQuery)' performed successfully"); - } - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: changes = generateDeltaResponse(driveId, idToQuery)"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); - return; - } - - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query OneDrive drive items"); - } - - // HTTP request returned status code 500 (Internal Server Error) - if (e.httpStatusCode == 500) { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + // Check the item type - if it not an item type that we support, we cant process the JSON item + if (!unwanted) { + if (isItemFile(onedriveJSONItem)) { + addLogEntry("The item we are syncing is a file", ["debug"]); + } else if (isItemFolder(onedriveJSONItem)) { + addLogEntry("The item we are syncing is a folder", ["debug"]); + } else if (isItemRemote(onedriveJSONItem)) { + addLogEntry("The item we are syncing is a remote item", ["debug"]); + } else { + // Why was this unwanted? + if (newItemPath.empty) { + // Compute this item path & need the full path for this file + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]); } - - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // If an error is returned when querying 'changes' and we recall the original function, we go into a never ending loop where the sync never ends - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query OneDrive drive items - retrying applicable request"); - log.vdebug("changes = generateDeltaResponse(driveId, idToQuery) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - log.vdebug("Retrying Query - using original deltaLink after delay"); - } - // re-try original request - retried for 429 and 504 - try { - log.vdebug("Retrying Query: changes = generateDeltaResponse(driveId, idToQuery)"); - changes = generateDeltaResponse(driveId, idToQuery); - log.vdebug("Query 'changes = generateDeltaResponse(driveId, idToQuery)' performed successfully on re-try"); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("Query Error: changes = generateDeltaResponse(driveId, idToQuery) on re-try after delay"); - // error was not a 504 this time - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } + // Microsoft OneNote container objects present as neither folder or file but has file size + if ((!isItemFile(onedriveJSONItem)) && (!isItemFolder(onedriveJSONItem)) && (hasFileSize(onedriveJSONItem))) { + // Log that this was skipped as this was a Microsoft OneNote item and unsupported + addLogEntry("The Microsoft OneNote Notebook '" ~ newItemPath ~ "' is not supported by this client", ["verbose"]); } else { - // Default operation if not 404, 410, 429, 500 or 504 errors - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + // Log that this item was skipped as unsupported + addLogEntry("The OneDrive item '" ~ newItemPath ~ "' is not supported by this client", ["verbose"]); } + unwanted = true; + addLogEntry("Flagging as unwanted: item type is not supported", ["debug"]); } - } else { - log.vdebug("Using /delta call to query drive for items to populate 'changes' and 'changesAvailable'"); - // query for changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); - try { - // Fetch the changes relative to the path id we want to query - log.vdebug("Attempting query 'changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)'"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("Previous deltaLink: ", deltaLink); - // changes with or without deltaLink - changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); - if (changes.type() == JSONType.object) { - log.vdebug("Query 'changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)' performed successfully"); - log.vdebug("OneDrive API /delta response: ", changes); - } - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)"); - - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); - return; - } - - // HTTP request returned status code 410 (The requested resource is no longer available at the server) - if (e.httpStatusCode == 410) { - log.vdebug("Delta link expired for 'onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)', setting 'deltaLink = null'"); - deltaLink = null; - continue; - } - - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query changes from OneDrive using deltaLink"); - } - - // HTTP request returned status code 500 (Internal Server Error) - if (e.httpStatusCode == 500) { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // If an error is returned when querying 'changes' and we recall the original function, we go into a never ending loop where the sync never ends - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query for changes - retrying applicable request"); - log.vdebug("changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - log.vdebug("Retrying Query - using original deltaLink after delay"); - } - // re-try original request - retried for 429 and 504 - try { - log.vdebug("Retrying Query: changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)"); - changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); - log.vdebug("Query 'changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)' performed successfully on re-try"); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("Query Error: changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink) on re-try after delay"); - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query for changes - retrying applicable request"); - log.vdebug("changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink) previously threw an error - retrying with empty deltaLink"); - try { - // try query with empty deltaLink value - deltaLink = null; - changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); - log.vdebug("Query 'changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)' performed successfully on re-try"); - } catch (OneDriveException e) { - // Tried 3 times, give up - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } + } + + // Check if this is excluded by config option: skip_dir + if (!unwanted) { + // Only check path if config is != "" + if (!appConfig.getValueString("skip_dir").empty) { + // Is the item a folder? + if (isItemFolder(onedriveJSONItem)) { + // work out the 'snippet' path where this folder would be created + string simplePathToCheck = ""; + string complexPathToCheck = ""; + string matchDisplay = ""; + + if (hasParentReference(onedriveJSONItem)) { + // we need to workout the FULL path for this item + // simple path + if (("name" in onedriveJSONItem["parentReference"]) != null) { + simplePathToCheck = onedriveJSONItem["parentReference"]["name"].str ~ "/" ~ onedriveJSONItem["name"].str; } else { - // error was not a 504 this time - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + simplePathToCheck = onedriveJSONItem["name"].str; + } + addLogEntry("skip_dir path to check (simple): " ~ simplePathToCheck, ["debug"]); + + // complex path + if (parentInDatabase) { + // build up complexPathToCheck + complexPathToCheck = buildNormalizedPath(newItemPath); + } else { + addLogEntry("Parent details not in database - unable to compute complex path to check", ["debug"]); } + if (!complexPathToCheck.empty) { + addLogEntry("skip_dir path to check (complex): " ~ complexPathToCheck, ["debug"]); + } + } else { + simplePathToCheck = onedriveJSONItem["name"].str; } - } else { - // Default operation if not 404, 410, 429, 500 or 504 errors - // Issue #1174 handling where stored deltaLink is invalid - if ((e.httpStatusCode == 400) && (deltaLink != "")) { - // Set deltaLink to an empty entry so invalid URL is not reused - string emptyDeltaLink = ""; - itemdb.setDeltaLink(driveId, idToQuery, emptyDeltaLink); + + // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder + // then isDirNameExcluded matching will not work + if (simplePathToCheck.canFind(":")) { + addLogEntry("Updating simplePathToCheck to remove 'root:'", ["debug"]); + simplePathToCheck = processPathToRemoveRootReference(simplePathToCheck); } - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - } - - // query for changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable); - try { - // Fetch the changes relative to the path id we want to query - log.vdebug("Attempting query 'changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)'"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("deltaLinkAvailable: ", deltaLinkAvailable); - // changes based on deltaLink - changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable); - if (changesAvailable.type() == JSONType.object) { - log.vdebug("Query 'changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)' performed successfully"); - // are there any delta changes? - if (("value" in changesAvailable) != null) { - deltaChanges = count(changesAvailable["value"].array); - log.vdebug("changesAvailable query reports that there are " , deltaChanges , " changes that need processing on OneDrive"); + if (complexPathToCheck.canFind(":")) { + addLogEntry("Updating complexPathToCheck to remove 'root:'", ["debug"]); + complexPathToCheck = processPathToRemoveRootReference(complexPathToCheck); + } + + // OK .. what checks are we doing? + if ((!simplePathToCheck.empty) && (complexPathToCheck.empty)) { + // just a simple check + addLogEntry("Performing a simple check only", ["debug"]); + unwanted = selectiveSync.isDirNameExcluded(simplePathToCheck); + } else { + // simple and complex + addLogEntry("Performing a simple then complex path match if required", ["debug"]); + + // simple first + addLogEntry("Performing a simple check first", ["debug"]); + unwanted = selectiveSync.isDirNameExcluded(simplePathToCheck); + matchDisplay = simplePathToCheck; + if (!unwanted) { + // simple didnt match, perform a complex check + addLogEntry("Simple match was false, attempting complex match", ["debug"]); + unwanted = selectiveSync.isDirNameExcluded(complexPathToCheck); + matchDisplay = complexPathToCheck; + } + } + // result + addLogEntry("skip_dir exclude result (directory based): " ~ to!string(unwanted), ["debug"]); + if (unwanted) { + // This path should be skipped + addLogEntry("Skipping item - excluded by skip_dir config: " ~ matchDisplay, ["verbose"]); } } - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)"); - - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); - return; - } - - // HTTP request returned status code 410 (The requested resource is no longer available at the server) - if (e.httpStatusCode == 410) { - log.vdebug("Delta link expired for 'onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)', setting 'deltaLinkAvailable = null'"); - deltaLinkAvailable = null; - continue; - } - - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query changes from OneDrive using deltaLinkAvailable"); - } - - // HTTP request returned status code 500 (Internal Server Error) - if (e.httpStatusCode == 500) { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } + // Is the item a file? + // We need to check to see if this files path is excluded as well + if (isItemFile(onedriveJSONItem)) { - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // If an error is returned when querying 'changes' and we recall the original function, we go into a never ending loop where the sync never ends - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query for changes - retrying applicable request"); - log.vdebug("changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - log.vdebug("Retrying Query - using original deltaLinkAvailable after delay"); + string pathToCheck; + // does the newItemPath start with '/'? + if (!startsWith(newItemPath, "/")){ + // path does not start with '/', but we need to check skip_dir entries with and without '/' + // so always make sure we are checking a path with '/' + pathToCheck = '/' ~ dirName(newItemPath); + } else { + pathToCheck = dirName(newItemPath); } - // re-try original request - retried for 429 and 504 - try { - log.vdebug("Retrying Query: changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)"); - changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable); - log.vdebug("Query 'changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)' performed successfully on re-try"); - if (changesAvailable.type() == JSONType.object) { - // are there any delta changes? - if (("value" in changesAvailable) != null) { - deltaChanges = count(changesAvailable["value"].array); - log.vdebug("changesAvailable query reports that there are " , deltaChanges , " changes that need processing on OneDrive"); - } - } - } catch (OneDriveException e) { - // display what the error is - log.vdebug("Query Error: changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable) on re-try after delay"); - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query for changes - retrying applicable request"); - log.vdebug("changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable) previously threw an error - retrying with empty deltaLinkAvailable"); - // Increase delay and wait again before retry - log.vdebug("Thread sleeping for 90 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(90)); - log.vdebug("Retrying Query - using a null deltaLinkAvailable after delay"); - try { - // try query with empty deltaLinkAvailable value - deltaLinkAvailable = null; - changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable); - log.vdebug("Query 'changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)' performed successfully on re-try"); - if (changesAvailable.type() == JSONType.object) { - // are there any delta changes? - if (("value" in changesAvailable) != null) { - deltaChanges = count(changesAvailable["value"].array); - log.vdebug("changesAvailable query reports that there are " , deltaChanges , " changes that need processing on OneDrive when using a null deltaLink value"); - } - } - } catch (OneDriveException e) { - // Tried 3 times, give up - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - - // OK .. if this was a 504, and running with --download-only & --cleanup-local-files - // need to exit to preserve local data, otherwise potential files will be deleted that should not be deleted - // leading to undesirable potential data loss scenarios - if ((e.httpStatusCode == 504) && (cleanupLocalFiles)) { - // log why we are exiting - log.log("Exiting application due to OneDrive API Gateway Timeout & --download-only & --cleanup-local-files configured to preserve local data"); - // Must exit here - onedrive.shutdown(); - exit(-1); - } - return; - } - } else { - // error was not a 504 this time - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } + + // perform the check + unwanted = selectiveSync.isDirNameExcluded(pathToCheck); + // result + addLogEntry("skip_dir exclude result (file based): " ~ to!string(unwanted), ["debug"]); + if (unwanted) { + // this files path should be skipped + addLogEntry("Skipping item - file path is excluded by skip_dir config: " ~ newItemPath, ["verbose"]); } - } else { - // Default operation if not 404, 410, 429, 500 or 504 errors - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; } } } - // In some OneDrive Business scenarios, the shared folder /delta response lacks the 'root' drive details - // When this occurs, this creates the following error: A database statement execution error occurred: foreign key constraint failed - // Ensure we query independently the root details for this shared folder and ensure that it is added before we process the /delta response - - // However, if we are using a National Cloud Deployment, these deployments do not support /delta, so we generate a /delta response via generateDeltaResponse() - // This specifically adds the root drive details to the self generated /delta response - if ((!nationalCloudDeployment) && (driveId!= defaultDriveId) && (syncBusinessFolders)) { - // fetch this driveId root details to ensure we add this to the database for this remote drive - JSONValue rootData; - - try { - rootData = onedrive.getDriveIdRoot(driveId); - } catch (OneDriveException e) { - log.vdebug("rootData = onedrive.getDriveIdRoot(driveId) generated a OneDriveException"); - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - if (e.httpStatusCode == 429) { - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - retrying applicable request"); - handleOneDriveThrottleRequest(); + // Check if this is excluded by config option: skip_file + if (!unwanted) { + // Is the JSON item a file? + if (isItemFile(onedriveJSONItem)) { + // skip_file can contain 4 types of entries: + // - wildcard - *.txt + // - text + wildcard - name*.txt + // - full path + combination of any above two - /path/name*.txt + // - full path to file - /path/to/file.txt + + // is the parent id in the database? + if (parentInDatabase) { + // Compute this item path & need the full path for this file + if (newItemPath.empty) { + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]); } - if (e.httpStatusCode == 504) { - log.vdebug("Retrying original request that generated the HTTP 504 (Gateway Timeout) - retrying applicable request"); - Thread.sleep(dur!"seconds"(30)); + + // The path that needs to be checked needs to include the '/' + // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched + // However, as 'path' used throughout, use a temp variable with this modification so that we use the temp variable for exclusion checks + string exclusionTestPath = ""; + if (!startsWith(newItemPath, "/")){ + // Add '/' to the path + exclusionTestPath = '/' ~ newItemPath; } - // Retry original request by calling function again to avoid replicating any further error handling - rootData = onedrive.getDriveIdRoot(driveId); + addLogEntry("skip_file item to check: " ~ exclusionTestPath, ["debug"]); + unwanted = selectiveSync.isFileNameExcluded(exclusionTestPath); + addLogEntry("Result: " ~ to!string(unwanted), ["debug"]); + if (unwanted) addLogEntry("Skipping item - excluded by skip_file config: " ~ thisItemName, ["verbose"]); } else { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + // parent id is not in the database + unwanted = true; + addLogEntry("Skipping file - parent path not present in local database", ["verbose"]); } } - - // apply this root drive data - applyDifference(rootData, driveId, true); } - // Process /delta response from OneDrive - // is changes a valid JSON response - if (changes.type() == JSONType.object) { - // Are there any changes to process? - if ((("value" in changes) != null) && ((deltaChanges > 0) || (oneDriveFullScanTrigger) || (nationalCloudChildrenScan) || (syncBusinessFolders) )) { - auto nrChanges = count(changes["value"].array); - auto changeCount = 0; - - // Display the number of changes or OneDrive objects we are processing - // OneDrive ships 'changes' in ~200 bundles. We display that we are processing X number of objects - // Do not display anything unless we are doing a verbose debug as due to #658 we are essentially doing a --resync each time when using sync_list - - // performance logging output - if (displayProcessingTime) { - writeln("Number of 'change|item' in this API Response Bundle from OneDrive to process: ", nrChanges); + // Check if this is included or excluded by use of sync_list + if (!unwanted) { + // No need to try and process something against a sync_list if it has been configured + if (syncListConfigured) { + // Compute the item path if empty - as to check sync_list we need an actual path to check + if (newItemPath.empty) { + // Calculate this items path + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]); } - // is nrChanges >= min_notify_changes (default of min_notify_changes = 5) - if (nrChanges >= cfg.getValueLong("min_notify_changes")) { - // nrChanges is >= than min_notify_changes - // verbose log, no 'notify' .. it is over the top - if (!syncListConfigured) { - // sync_list is not being used - lets use the right messaging here - if (oneDriveFullScanTrigger) { - // full scan was triggered out of cycle - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to a full scan being triggered by actions on OneDrive"); - // unset now the full scan trigger if set - unsetOneDriveFullScanTrigger(); - } else { - // no sync_list in use, oneDriveFullScanTrigger not set via sync_list or skip_dir - if (performFullItemScan){ - // performFullItemScan was set - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to a full scan being requested"); - } else { - // default processing message - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state"); - } - } - } else { - // sync_list is being used - why are we going through the entire OneDrive contents? - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to sync_list being used"); - } - } else { - // There are valid changes but less than the min_notify_changes configured threshold - // We will only output the number of changes being processed to debug log if this is set to assist with debugging - // As this is debug logging, messaging can be the same, regardless of sync_list being used or not - - // is performFullItemScan set due to a full scan required? - // is oneDriveFullScanTrigger set due to a potentially out-of-scope item now being in-scope - if ((performFullItemScan) || (oneDriveFullScanTrigger)) { - // oneDriveFullScanTrigger should be false unless set by actions on OneDrive and only if sync_list or skip_dir is used - log.vdebug("performFullItemScan or oneDriveFullScanTrigger = true"); - // full scan was requested or triggered - // use the right message - if (oneDriveFullScanTrigger) { - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to a full scan being triggered by actions on OneDrive"); - // unset now the full scan trigger if set - unsetOneDriveFullScanTrigger(); - } else { - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to a full scan being requested"); - } - } else { - // standard message - log.vlog("Number of items from OneDrive to process: ", nrChanges); - } - } + // What path are we checking? + addLogEntry("sync_list item to check: " ~ newItemPath, ["debug"]); - // Add nrChanges to cumulativeOneDriveItemCount so we can detail how may items in total were processed - cumulativeOneDriveItemCount = cumulativeOneDriveItemCount + nrChanges; - - foreach (item; changes["value"].array) { - bool isRoot = false; - string thisItemParentPath; - string thisItemFullPath; - changeCount++; - - // Change as reported by OneDrive - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Processing change ", changeCount, " of ", nrChanges); - log.vdebug("OneDrive Change: ", item); - - // Deleted items returned from onedrive.viewChangesByItemId or onedrive.viewChangesByDriveId (/delta) do not have a 'name' attribute - // Thus we cannot name check for 'root' below on deleted items - if(!isItemDeleted(item)){ - // This is not a deleted item - log.vdebug("Not a OneDrive deleted item change"); - // Test is this is the OneDrive Users Root? - // Debug output of change evaluation items - log.vdebug("defaultRootId = ", defaultRootId); - log.vdebug("'search id' = ", id); - log.vdebug("id == defaultRootId = ", (id == defaultRootId)); - log.vdebug("isItemRoot(item) = ", (isItemRoot(item))); - log.vdebug("item['name'].str == 'root' = ", (item["name"].str == "root")); - log.vdebug("singleDirectoryScope = ", (singleDirectoryScope)); - - // Use the global's as initialised via init() rather than performing unnecessary additional HTTPS calls - // In a --single-directory scenario however, '(id == defaultRootId) = false' for root items - if ( ((id == defaultRootId) || (singleDirectoryScope)) && (isItemRoot(item)) && (item["name"].str == "root")) { - // This IS a OneDrive Root item - log.vdebug("Change will flagged as a 'root' item change"); - isRoot = true; - } - } - - // How do we handle this change? - if (isRoot || !hasParentReferenceId(item) || isItemDeleted(item)){ - // Is a root item, has no id in parentReference or is a OneDrive deleted item - log.vdebug("isRoot = ", isRoot); - log.vdebug("!hasParentReferenceId(item) = ", (!hasParentReferenceId(item))); - log.vdebug("isItemDeleted(item) = ", (isItemDeleted(item))); - log.vdebug("Handling change as 'root item', has no parent reference or is a deleted item"); - applyDifference(item, driveId, isRoot); + // Unfortunately there is no avoiding this call to check if the path is excluded|included via sync_list + if (selectiveSync.isPathExcludedViaSyncList(newItemPath)) { + // selective sync advised to skip, however is this a file and are we configured to upload / download files in the root? + if ((isItemFile(onedriveJSONItem)) && (appConfig.getValueBool("sync_root_files")) && (rootName(newItemPath) == "") ) { + // This is a file + // We are configured to sync all files in the root + // This is a file in the logical root + unwanted = false; } else { - // What is this item's parent path? - if (hasParentReferencePath(item)) { - thisItemParentPath = item["parentReference"]["path"].str; - thisItemFullPath = thisItemParentPath ~ "/" ~ item["name"].str; - } else { - thisItemParentPath = ""; - } - - // Special case handling flags - bool singleDirectorySpecialCase = false; - bool sharedFoldersSpecialCase = false; - - // Debug output of change evaluation items - log.vdebug("'parentReference id' = ", item["parentReference"]["id"].str); - log.vdebug("search criteria: syncFolderName = ", syncFolderName); - log.vdebug("search criteria: syncFolderPath = ", syncFolderPath); - log.vdebug("search criteria: syncFolderChildPath = ", syncFolderChildPath); - log.vdebug("thisItemId = ", item["id"].str); - log.vdebug("thisItemParentPath = ", thisItemParentPath); - log.vdebug("thisItemFullPath = ", thisItemFullPath); - log.vdebug("'item id' matches search 'id' = ", (item["id"].str == id)); - log.vdebug("'parentReference id' matches search 'id' = ", (item["parentReference"]["id"].str == id)); - log.vdebug("'thisItemParentPath' contains 'syncFolderChildPath' = ", (canFind(thisItemParentPath, syncFolderChildPath))); - log.vdebug("'thisItemParentPath' contains search 'id' = ", (canFind(thisItemParentPath, id))); - - // Special case handling - --single-directory - // If we are in a --single-directory sync scenario, and, the DB does not contain any parent details, or --single-directory is used with --resync - // all changes will be discarded as 'Remote change discarded - not in --single-directory sync scope (not in DB)' even though, some of the changes - // are actually valid and required as they are part of the parental path - if (singleDirectoryScope){ - // What is the full path for this item from OneDrive - log.vdebug("'syncFolderChildPath' contains 'thisItemFullPath' = ", (canFind(syncFolderChildPath, thisItemFullPath))); - if (canFind(syncFolderChildPath, thisItemFullPath)) { - singleDirectorySpecialCase = true; - } - } - - // Special case handling - Shared Business Folders - // - IF we are syncing shared folders, and the shared folder is not the 'top level' folder being shared out - // canFind(thisItemParentPath, syncFolderChildPath) will never match: - // Syncing this OneDrive Business Shared Folder: MyFolderName - // OneDrive Business Shared By: Firstname Lastname (email@address) - // Applying changes of Path ID: pathId - // [DEBUG] Sync Folder Name: MyFolderName - // [DEBUG] Sync Folder Path: /drives/driveId/root:/TopLevel/ABCD - // [DEBUG] Sync Folder Child Path: /drives/driveId/root:/TopLevel/ABCD/MyFolderName/ - // ... - // [DEBUG] 'item id' matches search 'id' = false - // [DEBUG] 'parentReference id' matches search 'id' = false - // [DEBUG] 'thisItemParentPath' contains 'syncFolderChildPath' = false - // [DEBUG] 'thisItemParentPath' contains search 'id' = false - // [DEBUG] Change does not match any criteria to apply - // Remote change discarded - not in business shared folders sync scope - - if ((!canFind(thisItemParentPath, syncFolderChildPath)) && (syncBusinessFolders)) { - // Syncing Shared Business folders & we dont have a path match - // is this a reverse path match? - log.vdebug("'thisItemParentPath' contains 'syncFolderName' = ", (canFind(thisItemParentPath, syncFolderName))); - if (canFind(thisItemParentPath, syncFolderName)) { - sharedFoldersSpecialCase = true; - } + // path is unwanted + unwanted = true; + addLogEntry("Skipping item - excluded by sync_list config: " ~ newItemPath, ["verbose"]); + // flagging to skip this item now, but does this exist in the DB thus needs to be removed / deleted? + if (existingDBEntry) { + // flag to delete + addLogEntry("Flagging item for local delete as item exists in database: " ~ newItemPath, ["verbose"]); + idsToDelete ~= [thisItemDriveId, thisItemId]; } - - // Check this item's path to see if this is a change on the path we want: - // 1. 'item id' matches 'id' - // 2. 'parentReference id' matches 'id' - // 3. 'item path' contains 'syncFolderChildPath' - // 4. 'item path' contains 'id' - // 5. Special Case was triggered - if ( (item["id"].str == id) || (item["parentReference"]["id"].str == id) || (canFind(thisItemParentPath, syncFolderChildPath)) || (canFind(thisItemParentPath, id)) || (singleDirectorySpecialCase) || (sharedFoldersSpecialCase) ){ - // This is a change we want to apply - if ((!singleDirectorySpecialCase) && (!sharedFoldersSpecialCase)) { - log.vdebug("Change matches search criteria to apply"); - } else { - if (singleDirectorySpecialCase) log.vdebug("Change matches search criteria to apply - special case criteria - reverse path matching used (--single-directory)"); - if (sharedFoldersSpecialCase) log.vdebug("Change matches search criteria to apply - special case criteria - reverse path matching used (Shared Business Folders)"); - } - // Apply OneDrive change - applyDifference(item, driveId, isRoot); - } else { - // No item ID match or folder sync match - log.vdebug("Change does not match any criteria to apply"); - - // Before discarding change - does this ID still exist on OneDrive - as in IS this - // potentially a --single-directory sync and the user 'moved' the file out of the 'sync-dir' to another OneDrive folder - // This is a corner edge case - https://github.com/skilion/onedrive/issues/341 - - // What is the original local path for this ID in the database? Does it match 'syncFolderChildPath' - if (itemdb.idInLocalDatabase(driveId, item["id"].str)){ - // item is in the database - string originalLocalPath = computeItemPath(driveId, item["id"].str); - - if (canFind(originalLocalPath, syncFolderChildPath)){ - JSONValue oneDriveMovedNotDeleted; - try { - oneDriveMovedNotDeleted = onedrive.getPathDetailsById(driveId, item["id"].str); - } catch (OneDriveException e) { - log.vdebug("oneDriveMovedNotDeleted = onedrive.getPathDetailsById(driveId, item['id'].str); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // No .. that ID is GONE - log.vlog("Remote change discarded - item cannot be found"); - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry request after delay - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling oneDriveMovedNotDeleted = onedrive.getPathDetailsById(driveId, item['id'].str);"); - try { - oneDriveMovedNotDeleted = onedrive.getPathDetailsById(driveId, item["id"].str); - } catch (OneDriveException e) { - // A further error was generated - // Rather than retry original function, retry the actual call and replicate error handling - if (e.httpStatusCode == 404) { - // No .. that ID is GONE - log.vlog("Remote change discarded - item cannot be found"); - } else { - // not a 404 - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - } - } - } else { - // not a 404 or a 429 - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - } - } - - // Yes .. ID is still on OneDrive but elsewhere .... #341 edge case handling - // This 'change' relates to an item that WAS in 'syncFolderChildPath' but is now - // stored elsewhere on OneDrive - outside the path we are syncing from - // Remove this item locally as it's local path is now obsolete - idsToDelete ~= [driveId, item["id"].str]; - } else { - // out of scope for some other reason - if (singleDirectoryScope){ - log.vlog("Remote change discarded - not in --single-directory sync scope (in DB)"); - } else { - log.vlog("Remote change discarded - not in sync scope"); - } - log.vdebug("Remote change discarded: ", item); - } - } else { - // item is not in the database - if (singleDirectoryScope){ - // We are syncing a single directory, so this is the reason why it is out of scope - log.vlog("Remote change discarded - not in --single-directory sync scope (not in DB)"); - log.vdebug("Remote change discarded: ", item); - } else { - // Not a single directory sync - if (syncBusinessFolders) { - // if we are syncing shared business folders, a 'change' may be out of scope as we are not syncing that 'folder' - // but we are sent all changes from the 'parent root' as we cannot query the 'delta' for this folder - // as that is a 501 error - not implemented - log.vlog("Remote change discarded - not in business shared folders sync scope"); - log.vdebug("Remote change discarded: ", item); - } else { - // out of scope for some other reason - log.vlog("Remote change discarded - not in sync scope"); - log.vdebug("Remote change discarded: ", item); - } - } - } - } } } - } else { - // No changes reported on OneDrive - log.vdebug("OneDrive Reported no delta changes - Local path and OneDrive in-sync"); - } - - // the response may contain either @odata.deltaLink or @odata.nextLink - if ("@odata.deltaLink" in changes) { - deltaLink = changes["@odata.deltaLink"].str; - log.vdebug("Setting next deltaLink to (@odata.deltaLink): ", deltaLink); } - if (deltaLink != "") { - // we initialise deltaLink to a blank string - if it is blank, dont update the DB to be empty - log.vdebug("Updating completed deltaLink in DB to: ", deltaLink); - itemdb.setDeltaLink(driveId, id, deltaLink); + } + + // Check if the user has configured to skip downloading .files or .folders: skip_dotfiles + if (!unwanted) { + if (appConfig.getValueBool("skip_dotfiles")) { + if (isDotFile(newItemPath)) { + addLogEntry("Skipping item - .file or .folder: " ~ newItemPath, ["verbose"]); + unwanted = true; + } } - - // Processing Timing for this bundle - if (displayProcessingTime) { - endBundleProcessingTime = Clock.currTime(); - writeln("End 'change|item' API Response Bundle Processing Time: ", endBundleProcessingTime); - writeln("Elapsed Processing Time: ", (endBundleProcessingTime - startBundleProcessingTime)); + } + + // Check if this should be skipped due to a --check-for-nosync directive (.nosync)? + if (!unwanted) { + if (appConfig.getValueBool("check_nosync")) { + // need the parent path for this object + string parentPath = dirName(newItemPath); + // Check for the presence of a .nosync in the parent path + if (exists(parentPath ~ "/.nosync")) { + addLogEntry("Skipping downloading item - .nosync found in parent folder & --check-for-nosync is enabled: " ~ newItemPath, ["verbose"]); + unwanted = true; + } } - - if ("@odata.nextLink" in changes) { - // Update deltaLink to next changeSet bundle - deltaLink = changes["@odata.nextLink"].str; - // Update deltaLinkAvailable to next changeSet bundle to quantify how many changes we have to process - deltaLinkAvailable = changes["@odata.nextLink"].str; - log.vdebug("Setting next deltaLink & deltaLinkAvailable to (@odata.nextLink): ", deltaLink); - } - else break; - } else { - // Log that an invalid JSON object was returned - if ((driveId == defaultDriveId) || (!syncBusinessFolders)) { - log.vdebug("onedrive.viewChangesByItemId call returned an invalid JSON Object"); - } else { - log.vdebug("onedrive.viewChangesByDriveId call returned an invalid JSON Object"); - } - } - } - - // delete items in idsToDelete - if (idsToDelete.length > 0) deleteItems(); - // empty the skipped items - skippedItems.length = 0; - assumeSafeAppend(skippedItems); - - // Processing timing and metrics for everything that was processed - if (displayProcessingTime) { - endFunctionProcessingTime = Clock.currTime(); - // complete the bundle output - writeln("------------------------------------------------------------"); - writeln("Start Function Processing Time: ", startFunctionProcessingTime); - writeln("End Function Processing Time: ", endFunctionProcessingTime); - writeln("Elapsed Function Processing Time: ", (endFunctionProcessingTime - startFunctionProcessingTime)); - writeln("Total number of OneDrive items processed: ", cumulativeOneDriveItemCount); - writeln("============================================================"); - } - } - - // process the change of a single DriveItem - private void applyDifference(JSONValue driveItem, string driveId, bool isRoot) - { - // Format the OneDrive change into a consumable object for the database - Item item = makeItem(driveItem); - - // Reset the malwareDetected flag for this item - malwareDetected = false; - - // Reset the downloadFailed flag for this item - downloadFailed = false; - - // Path we will be using - string path = ""; - - if(isItemDeleted(driveItem)){ - // Change is to delete an item - log.vdebug("Remote deleted item"); - } else { - // Is the change from OneDrive a 'root' item - // The change should be considered a 'root' item if: - // 1. Contains a ["root"] element - // 2. Has no ["parentReference"]["id"] ... #323 & #324 highlighted that this is false as some 'root' shared objects now can have an 'id' element .. OneDrive API change - // 2. Has no ["parentReference"]["path"] - // 3. Was detected by an input flag as to be handled as a root item regardless of actual status - if (isItemRoot(driveItem) || !hasParentReferencePath(driveItem) || isRoot) { - log.vdebug("Handing a OneDrive 'root' change"); - item.parentId = null; // ensures that it has no parent - item.driveId = driveId; // HACK: makeItem() cannot set the driveId property of the root - log.vdebug("Update/Insert local database with item details"); - itemdb.upsert(item); - log.vdebug("item details: ", item); - return; } - } - - bool unwanted; - // Check if the parent id is something we need to skip - if (skippedItems.find(item.parentId).length != 0) { - // Potentially need to flag as unwanted - log.vdebug("Flagging as unwanted: find(item.parentId).length != 0"); - unwanted = true; - - // Is this item id in the database? - if (itemdb.idInLocalDatabase(item.driveId, item.id)){ - // item exists in database, most likely moved out of scope for current client configuration - log.vdebug("This item was previously synced / seen by the client"); - if (("name" in driveItem["parentReference"]) != null) { - // How is this out of scope? - // is sync_list configured - if (syncListConfigured) { - // sync_list configured and in use - if (selectiveSync.isPathExcludedViaSyncList(driveItem["parentReference"]["name"].str)) { - // Previously synced item is now out of scope as it has been moved out of what is included in sync_list - log.vdebug("This previously synced item is now excluded from being synced due to sync_list exclusion"); - } - } - // flag to delete local file as it now is no longer in sync with OneDrive - log.vdebug("Flagging to delete item locally"); - idsToDelete ~= [item.driveId, item.id]; - } - } - } - - // Check if this is excluded by config option: skip_dir - if (!unwanted) { - // Only check path if config is != "" - if (cfg.getValueString("skip_dir") != "") { - // Is the item a folder and not a deleted item? - if ((isItemFolder(driveItem)) && (!isItemDeleted(driveItem))) { - // work out the 'snippet' path where this folder would be created - string simplePathToCheck = ""; - string complexPathToCheck = ""; - string matchDisplay = ""; - - if (hasParentReference(driveItem)) { - // we need to workout the FULL path for this item - string parentDriveId = driveItem["parentReference"]["driveId"].str; - string parentItem = driveItem["parentReference"]["id"].str; - // simple path - if (("name" in driveItem["parentReference"]) != null) { - simplePathToCheck = driveItem["parentReference"]["name"].str ~ "/" ~ driveItem["name"].str; - } else { - simplePathToCheck = driveItem["name"].str; - } - log.vdebug("skip_dir path to check (simple): ", simplePathToCheck); - // complex path - if (itemdb.idInLocalDatabase(parentDriveId, parentItem)){ - // build up complexPathToCheck - complexPathToCheck = computeItemPath(parentDriveId, parentItem) ~ "/" ~ driveItem["name"].str; - complexPathToCheck = buildNormalizedPath(complexPathToCheck); - } else { - log.vdebug("Parent details not in database - unable to compute complex path to check"); - } - log.vdebug("skip_dir path to check (complex): ", complexPathToCheck); - } else { - simplePathToCheck = driveItem["name"].str; - } - - // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder - // then isDirNameExcluded matching will not work - // Clean up 'root:' if present - if (startsWith(simplePathToCheck, "root:")){ - log.vdebug("Updating simplePathToCheck to remove 'root:'"); - simplePathToCheck = strip(simplePathToCheck, "root:"); - } - if (startsWith(complexPathToCheck, "root:")){ - log.vdebug("Updating complexPathToCheck to remove 'root:'"); - complexPathToCheck = strip(complexPathToCheck, "root:"); - } - - // OK .. what checks are we doing? - if ((simplePathToCheck != "") && (complexPathToCheck == "")) { - // just a simple check - log.vdebug("Performing a simple check only"); - unwanted = selectiveSync.isDirNameExcluded(simplePathToCheck); - } else { - // simple and complex - log.vdebug("Performing a simple & complex path match if required"); - // simple first - unwanted = selectiveSync.isDirNameExcluded(simplePathToCheck); - matchDisplay = simplePathToCheck; - if (!unwanted) { - log.vdebug("Simple match was false, attempting complex match"); - // simple didnt match, perform a complex check - unwanted = selectiveSync.isDirNameExcluded(complexPathToCheck); - matchDisplay = complexPathToCheck; + + // Check if this is excluded by a user set maximum filesize to download + if (!unwanted) { + if (isItemFile(onedriveJSONItem)) { + if (fileSizeLimit != 0) { + if (onedriveJSONItem["size"].integer >= fileSizeLimit) { + addLogEntry("Skipping item - excluded by skip_size config: " ~ thisItemName ~ " (" ~ to!string(onedriveJSONItem["size"].integer/2^^20) ~ " MB)", ["verbose"]); + unwanted = true; } } - - log.vdebug("Result: ", unwanted); - if (unwanted) log.vlog("Skipping item - excluded by skip_dir config: ", matchDisplay); } } - } - - // Check if this is excluded by config option: skip_file - if (!unwanted) { - // Is the item a file and not a deleted item? - if ((isItemFile(driveItem)) && (!isItemDeleted(driveItem))) { - // skip_file can contain 4 types of entries: - // - wildcard - *.txt - // - text + wildcard - name*.txt - // - full path + combination of any above two - /path/name*.txt - // - full path to file - /path/to/file.txt + + // At this point all the applicable checks on this JSON object from OneDrive are complete: + // - skip_file + // - skip_dir + // - sync_list + // - skip_dotfiles + // - check_nosync + // - skip_size + // - We know if this item exists in the DB or not in the DB + + // We know if this JSON item is unwanted or not + if (unwanted) { + // This JSON item is NOT wanted - it is excluded + addLogEntry("Skipping OneDrive change as this is determined to be unwanted", ["debug"]); - // is the parent id in the database? - if (itemdb.idInLocalDatabase(item.driveId, item.parentId)){ - // Compute this item path & need the full path for this file - path = computeItemPath(item.driveId, item.parentId) ~ "/" ~ item.name; - - // The path that needs to be checked needs to include the '/' - // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched - // However, as 'path' used throughout, use a temp variable with this modification so that we use the temp variable for exclusion checks - string exclusionTestPath = ""; - if (!startsWith(path, "/")){ - // Add '/' to the path - exclusionTestPath = '/' ~ path; - } - - log.vdebug("skip_file item to check: ", exclusionTestPath); - unwanted = selectiveSync.isFileNameExcluded(exclusionTestPath); - log.vdebug("Result: ", unwanted); - if (unwanted) log.vlog("Skipping item - excluded by skip_file config: ", item.name); - } else { - // parent id is not in the database - unwanted = true; - log.vlog("Skipping file - parent path not present in local database"); + // Add to the skippedItems array, but only if it is a directory ... pointless adding 'files' here, as it is the 'id' we check as the parent path which can only be a directory + if (!isItemFile(onedriveJSONItem)) { + skippedItems.insert(thisItemId); } - } - } - - // check the item type - if (!unwanted) { - if (isItemFile(driveItem)) { - log.vdebug("The item we are syncing is a file"); - } else if (isItemFolder(driveItem)) { - log.vdebug("The item we are syncing is a folder"); - } else if (isItemRemote(driveItem)) { - log.vdebug("The item we are syncing is a remote item"); - assert(isItemFolder(driveItem["remoteItem"]), "The remote item is not a folder"); } else { - // Why was this unwanted? - if (path.empty) { - // Compute this item path & need the full path for this file - path = computeItemPath(item.driveId, item.parentId) ~ "/" ~ item.name; - } - // Microsoft OneNote container objects present as neither folder or file but has file size - if ((!isItemFile(driveItem)) && (!isItemFolder(driveItem)) && (hasFileSize(driveItem))) { - // Log that this was skipped as this was a Microsoft OneNote item and unsupported - log.vlog("The Microsoft OneNote Notebook '", path, "' is not supported by this client"); + // This JSON item is wanted - we need to process this JSON item further + // Take the JSON item and create a consumable object for eventual database insertion + Item newDatabaseItem = makeItem(onedriveJSONItem); + + if (existingDBEntry) { + // The details of this JSON item are already in the DB + // Is the item in the DB the same as the JSON data provided - or is the JSON data advising this is an updated file? + addLogEntry("OneDrive change is an update to an existing local item", ["debug"]); + + // Compute the existing item path + // NOTE: + // string existingItemPath = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.id); + // + // This will calculate the path as follows: + // + // existingItemPath: Document.txt + // + // Whereas above we use the following + // + // newItemPath = computeItemPath(newDatabaseItem.driveId, newDatabaseItem.parentId) ~ "/" ~ newDatabaseItem.name; + // + // Which generates the following path: + // + // changedItemPath: ./Document.txt + // + // Need to be consistent here with how 'newItemPath' was calculated + string existingItemPath = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.parentId) ~ "/" ~ existingDatabaseItem.name; + // Attempt to apply this changed item + applyPotentiallyChangedItem(existingDatabaseItem, existingItemPath, newDatabaseItem, newItemPath, onedriveJSONItem); } else { - // Log that this item was skipped as unsupported - log.vlog("The OneDrive item '", path, "' is not supported by this client"); + // Action this JSON item as a new item as we have no DB record of it + // The actual item may actually exist locally already, meaning that just the database is out-of-date or missing the data due to --resync + // But we also cannot compute the newItemPath as the parental objects may not exist as well + addLogEntry("OneDrive change is potentially a new local item", ["debug"]); + + // Attempt to apply this potentially new item + applyPotentiallyNewLocalItem(newDatabaseItem, onedriveJSONItem, newItemPath); } - unwanted = true; - log.vdebug("Flagging as unwanted: item type is not supported"); } + + + // How long to process this JSON item in batch + auto jsonProcessingElapsedTime = Clock.currTime() - jsonProcessingStartTime; + addLogEntry("Batched JSON item processing time: " ~ to!string(jsonProcessingElapsedTime), ["debug"]); + + // Tracking as to if this item was processed + processedCount++; } - - // Check if this is included by use of sync_list - if (!unwanted) { - // Is the item parent in the local database? - if (itemdb.idInLocalDatabase(item.driveId, item.parentId)){ - // parent item is in the local database - // compute the item path if empty - if (path.empty) { - path = computeItemPath(item.driveId, item.parentId) ~ "/" ~ item.name; - } - // what path are we checking - log.vdebug("sync_list item to check: ", path); - - // Unfortunatly there is no avoiding this call to check if the path is excluded|included via sync_list - if (selectiveSync.isPathExcludedViaSyncList(path)) { - // selective sync advised to skip, however is this a file and are we configured to upload / download files in the root? - if ((isItemFile(driveItem)) && (cfg.getValueBool("sync_root_files")) && (rootName(path) == "") ) { - // This is a file - // We are configured to sync all files in the root - // This is a file in the logical root - unwanted = false; - } else { - // path is unwanted - unwanted = true; - log.vlog("Skipping item - excluded by sync_list config: ", path); - // flagging to skip this file now, but does this exist in the DB thus needs to be removed / deleted? - if (itemdb.idInLocalDatabase(item.driveId, item.id)){ - log.vlog("Flagging item for local delete as item exists in database: ", path); - // flag to delete - idsToDelete ~= [item.driveId, item.id]; - } - } - } - } else { - // Parent not in the database - // Is the parent a 'folder' from another user? ie - is this a 'shared folder' that has been shared with us? - if (defaultDriveId == item.driveId){ - // Flagging as unwanted - log.vdebug("Flagging as unwanted: item.driveId (", item.driveId,"), item.parentId (", item.parentId,") not in local database"); - unwanted = true; + } + + // Perform the download of any required objects in parallel + void processDownloadActivities() { + + // Are there any items to delete locally? Cleanup space locally first + if (!idsToDelete.empty) { + // There are elements that potentially need to be deleted locally + addLogEntry("Items to potentially delete locally: " ~ to!string(idsToDelete.length), ["verbose"]); + + if (appConfig.getValueBool("download_only")) { + // Download only has been configured + if (cleanupLocalFiles) { + // Process online deleted items + addLogEntry("Processing local deletion activity as --download-only & --cleanup-local-files configured", ["verbose"]); + processDeleteItems(); } else { - // Edge case as the parent (from another users OneDrive account) will never be in the database - log.vdebug("The reported parentId is not in the database. This potentially is a shared folder as 'item.driveId' != 'defaultDriveId'. Relevant Details: item.driveId (", item.driveId,"), item.parentId (", item.parentId,")"); - // If we are syncing OneDrive Business Shared Folders, a 'folder' shared with us, has a 'parent' that is not shared with us hence the above message - // What we need to do is query the DB for this 'item.driveId' and use the response from the DB to set the 'item.parentId' for this new item we are trying to add to the database - if (syncBusinessFolders) { - foreach(dbItem; itemdb.selectByDriveId(item.driveId)) { - if (dbItem.name == "root") { - // Ensure that this item uses the root id as parent - log.vdebug("Falsifying item.parentId to be ", dbItem.id); - item.parentId = dbItem.id; - } - } - } else { - // Ensure that this item has no parent - log.vdebug("Setting item.parentId to be null"); - item.parentId = null; - } - log.vdebug("Update/Insert local database with item details"); - itemdb.upsert(item); - log.vdebug("item details: ", item); - return; + // Not cleaning up local files + addLogEntry("Skipping local deletion activity as --download-only has been used", ["verbose"]); } + } else { + // Not using --download-only process normally + processDeleteItems(); } + // Cleanup array memory + idsToDelete = []; } - - // skip downloading dot files if configured - if (cfg.getValueBool("skip_dotfiles")) { - if (isDotFile(path)) { - log.vlog("Skipping item - .file or .folder: ", path); - unwanted = true; - } - } - - // skip unwanted items early - if (unwanted) { - log.vdebug("Skipping OneDrive change as this is determined to be unwanted"); - skippedItems ~= item.id; - return; + + // Are there any items to download post fetching and processing the /delta data? + if (!fileJSONItemsToDownload.empty) { + // There are elements to download + addLogEntry("Number of items to download from Microsoft OneDrive: " ~ to!string(fileJSONItemsToDownload.length)); + downloadOneDriveItems(); + // Cleanup array memory + fileJSONItemsToDownload = []; } - - // check if the item has been seen before - Item oldItem; - bool cached = itemdb.selectById(item.driveId, item.id, oldItem); - - // check if the item is going to be deleted - if (isItemDeleted(driveItem)) { - // item.name is not available, so we get a bunch of meaningless log output - // Item name we will attempt to delete will be printed out later - if (cached) { - // flag to delete - log.vdebug("Flagging item for deletion: ", item); - idsToDelete ~= [item.driveId, item.id]; - } else { - // flag to ignore - log.vdebug("Flagging item to skip: ", item); - skippedItems ~= item.id; - } - return; + + // Are there any skipped items still? + if (!skippedItems.empty) { + // Cleanup array memory + skippedItems.clear(); } - - // rename the local item if it is unsynced and there is a new version of it on OneDrive - string oldPath; - if (cached && item.eTag != oldItem.eTag) { - // Is the item in the local database - if (itemdb.idInLocalDatabase(item.driveId, item.id)){ - log.vdebug("OneDrive item ID is present in local database"); - // Compute this item path - oldPath = computeItemPath(item.driveId, item.id); - // Query DB for existing local item in specified path - string itemSource = "database"; - if (!isItemSynced(oldItem, oldPath, itemSource)) { - if (exists(oldPath)) { - // Is the local file technically 'newer' based on UTC timestamp? - SysTime localModifiedTime = timeLastModified(oldPath).toUTC(); - localModifiedTime.fracSecs = Duration.zero; - item.mtime.fracSecs = Duration.zero; - - // debug the output of time comparison - log.vdebug("localModifiedTime (local file): ", localModifiedTime); - log.vdebug("item.mtime (OneDrive item): ", item.mtime); - - // Compare file on disk modified time with modified time provided by OneDrive API - if (localModifiedTime >= item.mtime) { - // local file is newer or has the same time than the item on OneDrive - log.vdebug("Skipping OneDrive change as this is determined to be unwanted due to local item modified time being newer or equal to item modified time from OneDrive"); - // no local rename - // no download needed - if (localModifiedTime == item.mtime) { - log.vlog("Local item modified time is equal to OneDrive item modified time based on UTC time conversion - keeping local item"); - } else { - log.vlog("Local item modified time is newer than OneDrive item modified time based on UTC time conversion - keeping local item"); - } - skippedItems ~= item.id; - return; - } else { - // remote file is newer than local item - log.vlog("Remote item modified time is newer based on UTC time conversion"); // correct message, remote item is newer - auto ext = extension(oldPath); - auto newPath = path.chomp(ext) ~ "-" ~ deviceName ~ ext; - - // has the user configured to IGNORE local data protection rules? - if (bypassDataPreservation) { - // The user has configured to ignore data safety checks and overwrite local data rather than preserve & rename - log.vlog("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: ", oldPath); - } else { - // local data protection is configured, renaming local file - log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent data loss: ", oldPath, " -> ", newPath); - - // perform the rename action - if (!dryRun) { - safeRename(oldPath); - } else { - // Expectation here is that there is a new file locally (newPath) however as we don't create this, the "new file" will not be uploaded as it does not exist - log.vdebug("DRY-RUN: Skipping local file rename"); - } - } - } - } - cached = false; - } - } + + // If deltaLinkCache.latestDeltaLink is not empty, update the deltaLink in the database for this driveId so that we can reuse this now that jsonItemsToProcess has been fully processed + if (!deltaLinkCache.latestDeltaLink.empty) { + addLogEntry("Updating completed deltaLink for driveID " ~ deltaLinkCache.driveId ~ " in DB to: " ~ deltaLinkCache.latestDeltaLink, ["debug"]); + itemDB.setDeltaLink(deltaLinkCache.driveId, deltaLinkCache.itemId, deltaLinkCache.latestDeltaLink); + + // Now that the DB is updated, when we perform the last examination of the most recent online data, cache this so this can be obtained this from memory + cacheLatestDeltaLink(deltaLinkInfo, deltaLinkCache.driveId, deltaLinkCache.latestDeltaLink); } - - // update the item - if (cached) { - // the item is in the items.sqlite3 database - log.vdebug("OneDrive change is an update to an existing local item"); - applyChangedItem(oldItem, oldPath, item, path); + } + + // Function to add or update a key pair in the deltaLinkInfo array + void cacheLatestDeltaLink(ref DeltaLinkInfo deltaLinkInfo, string driveId, string latestDeltaLink) { + if (driveId !in deltaLinkInfo) { + addLogEntry("Added new latestDeltaLink entry: " ~ driveId ~ " -> " ~ latestDeltaLink, ["debug"]); } else { - log.vdebug("OneDrive change is potentially a new local item"); - // Check if file should be skipped based on size limit - if (isItemFile(driveItem)) { - if (cfg.getValueLong("skip_size") != 0) { - if (driveItem["size"].integer >= this.newSizeLimit) { - log.vlog("Skipping item - excluded by skip_size config: ", item.name, " (", driveItem["size"].integer/2^^20, " MB)"); - return; - } - } - } - // apply this new item - applyNewItem(item, path); + addLogEntry("Updated latestDeltaLink entry for " ~ driveId ~ " from " ~ deltaLinkInfo[driveId] ~ " to " ~ latestDeltaLink, ["debug"]); } - - if ((malwareDetected == false) && (downloadFailed == false)){ - // save the item in the db - // if the file was detected as malware and NOT downloaded, we dont want to falsify the DB as downloading it as otherwise the next pass will think it was deleted, thus delete the remote item - // Likewise if the download failed, we dont want to falsify the DB as downloading it as otherwise the next pass will think it was deleted, thus delete the remote item - if (cached) { - // the item is in the items.sqlite3 database - // Do we need to update the database with the details that were provided by the OneDrive API? - // Is the last modified timestamp in the DB the same as the API data? - SysTime localModifiedTime = oldItem.mtime; - localModifiedTime.fracSecs = Duration.zero; - SysTime remoteModifiedTime = item.mtime; - remoteModifiedTime.fracSecs = Duration.zero; - - // If the timestamp is different, or we are running on a National Cloud Deployment that does not support /delta queries - we have to update the DB with the details from OneDrive - // Unfortunatly because of the consequence of Nataional Cloud Deployments not supporting /delta queries, the application uses the local database to flag what is out-of-date / track changes - // This means that the constant disk writing to the database fix implemented with https://github.com/abraunegg/onedrive/pull/2004 cannot be utilised when using Nataional Cloud Deployments - // as all records are touched / updated when performing the OneDrive sync operations. The only way to change this, is for Microsoft to support /delta queries for Nataional Cloud Deployments - if ((localModifiedTime != remoteModifiedTime) || (nationalCloudDeployment)) { - // Database update needed for this item because our local record is out-of-date - log.vdebug("Updating local database with item details from OneDrive as local record needs to be updated"); - itemdb.update(item); - } - } else { - // item is not in the items.sqlite3 database - log.vdebug("Inserting new item details to local database"); - itemdb.insert(item); - } - // What was the item that was saved - log.vdebug("item details: ", item); - } else { - // flag was tripped, which was it - if (downloadFailed) { - log.vdebug("Download or creation of local directory failed"); - } - if (malwareDetected) { - log.vdebug("OneDrive reported that file contained malware"); - } + deltaLinkInfo[driveId] = latestDeltaLink; + } + + // Function to get the latestDeltaLink based on driveId + string getDeltaLinkFromCache(ref DeltaLinkInfo deltaLinkInfo, string driveId) { + string cachedDeltaLink; + if (driveId in deltaLinkInfo) { + cachedDeltaLink = deltaLinkInfo[driveId]; } + return cachedDeltaLink; } - - // download an item that was not synced before - private void applyNewItem(const ref Item item, const(string) path) - { - // Test for the local path existence - if (exists(path)) { + + // If the JSON item is not in the database, it is potentially a new item that we need to action + void applyPotentiallyNewLocalItem(Item newDatabaseItem, JSONValue onedriveJSONItem, string newItemPath) { + + // The JSON and Database items being passed in here have passed the following checks: + // - skip_file + // - skip_dir + // - sync_list + // - skip_dotfiles + // - check_nosync + // - skip_size + // - Is not currently cached in the local database + // As such, we should not be doing any other checks here to determine if the JSON item is wanted .. it is + + if (exists(newItemPath)) { + addLogEntry("Path on local disk already exists", ["debug"]); // Issue #2209 fix - test if path is a bad symbolic link - if (isSymlink(path)) { - log.vdebug("Path on local disk is a symbolic link ........"); - if (!exists(readLink(path))) { + if (isSymlink(newItemPath)) { + addLogEntry("Path on local disk is a symbolic link ........", ["debug"]); + if (!exists(readLink(newItemPath))) { // reading the symbolic link failed - log.vdebug("Reading the symbolic link target failed ........ "); - log.logAndNotify("Skipping item - invalid symbolic link: ", path); + addLogEntry("Reading the symbolic link target failed ........ ", ["debug"]); + addLogEntry("Skipping item - invalid symbolic link: " ~ newItemPath, ["info", "notify"]); return; } } - - // path exists locally, is not a bad symbolic link - // Query DB for new remote item in specified path + + // Path exists locally, is not a bad symbolic link + // Test if this item is actually in-sync + // What is the source of this item data? string itemSource = "remote"; - if (isItemSynced(item, path, itemSource)) { - // file details from OneDrive and local file details in database are in-sync - log.vdebug("The item to sync is already present on the local file system and is in-sync with the local database"); + if (isItemSynced(newDatabaseItem, newItemPath, itemSource)) { + // Item details from OneDrive and local item details in database are in-sync + addLogEntry("The item to sync is already present on the local filesystem and is in-sync with what is reported online", ["debug"]); + addLogEntry("Update/Insert local database with item details: " ~ to!string(newDatabaseItem), ["debug"]); + itemDB.upsert(newDatabaseItem); return; } else { - // file is not in sync with the database - // is the local file technically 'newer' based on UTC timestamp? - SysTime localModifiedTime = timeLastModified(path).toUTC(); - SysTime itemModifiedTime = item.mtime; - // HACK: reduce time resolution to seconds before comparing + // Item details from OneDrive and local item details in database are NOT in-sync + addLogEntry("The item to sync exists locally but is potentially not in the local database - otherwise this would be handled as changed item", ["debug"]); + + // Which object is newer? The local file or the remote file? + SysTime localModifiedTime = timeLastModified(newItemPath).toUTC(); + SysTime itemModifiedTime = newDatabaseItem.mtime; + // Reduce time resolution to seconds before comparing localModifiedTime.fracSecs = Duration.zero; itemModifiedTime.fracSecs = Duration.zero; - // is the local modified time greater than that from OneDrive? + // Is the local modified time greater than that from OneDrive? if (localModifiedTime > itemModifiedTime) { - // local file is newer than item on OneDrive based on file modified time + // Local file is newer than item on OneDrive based on file modified time // Is this item id in the database? - if (itemdb.idInLocalDatabase(item.driveId, item.id)){ + if (itemDB.idInLocalDatabase(newDatabaseItem.driveId, newDatabaseItem.id)) { // item id is in the database // no local rename // no download needed - log.vlog("Local item modified time is newer based on UTC time conversion - keeping local item as this exists in the local database"); - log.vdebug("Skipping OneDrive change as this is determined to be unwanted due to local item modified time being newer than OneDrive item and present in the sqlite database"); + + // Fetch the latest DB record - as this could have been updated by the isItemSynced if the date online was being corrected, then the DB updated as a result + Item latestDatabaseItem; + itemDB.selectById(newDatabaseItem.driveId, newDatabaseItem.id, latestDatabaseItem); + addLogEntry("latestDatabaseItem: " ~ to!string(latestDatabaseItem), ["debug"]); + + SysTime latestItemModifiedTime = latestDatabaseItem.mtime; + // Reduce time resolution to seconds before comparing + latestItemModifiedTime.fracSecs = Duration.zero; + + if (localModifiedTime == latestItemModifiedTime) { + // Log action + addLogEntry("Local file modified time matches existing database record - keeping local file", ["verbose"]); + addLogEntry("Skipping OneDrive change as this is determined to be unwanted due to local file modified time matching database data", ["debug"]); + } else { + // Log action + addLogEntry("Local file modified time is newer based on UTC time conversion - keeping local file as this exists in the local database", ["verbose"]); + addLogEntry("Skipping OneDrive change as this is determined to be unwanted due to local file modified time being newer than OneDrive file and present in the sqlite database", ["debug"]); + } + // Return as no further action needed return; } else { // item id is not in the database .. maybe a --resync ? - // Should this 'download' be skipped? - // Do we need to check for .nosync? Only if --check-for-nosync was passed in - if (cfg.getValueBool("check_nosync")) { - // need the parent path for this object - string parentPath = dirName(path); - if (exists(parentPath ~ "/.nosync")) { - log.vlog("Skipping downloading item - .nosync found in parent folder & --check-for-nosync is enabled: ", path); - // flag that this download failed, otherwise the 'item' is added to the database - then, as not present on the local disk, would get deleted from OneDrive - downloadFailed = true; - // clean up this partial file, otherwise every sync we will get theis warning - log.vlog("Removing previous partial file download due to .nosync found in parent folder & --check-for-nosync is enabled"); - safeRemove(path); - return; - } - } // file exists locally but is not in the sqlite database - maybe a failed download? - log.vlog("Local item does not exist in local database - replacing with file from OneDrive - failed download?"); - + addLogEntry("Local item does not exist in local database - replacing with file from OneDrive - failed download?", ["verbose"]); - // in a --resync scenario or if items.sqlite3 was deleted before startup we have zero way of knowing IF the local file is meant to be the right file - // we have passed the following checks: - // 1. file exists locally - // 2. local modified time > remote modified time - // 3. id is not in the database + // In a --resync scenario or if items.sqlite3 was deleted before startup we have zero way of knowing IF the local file is meant to be the right file + // To this pint we have passed the following checks: + // 1. Any client side filtering checks - this determined this is a file that is wanted + // 2. A file with the exact name exists locally + // 3. The local modified time > remote modified time + // 4. The id of the item from OneDrive is not in the database - auto ext = extension(path); - auto newPath = path.chomp(ext) ~ "-" ~ deviceName ~ ext; - // has the user configured to IGNORE local data protection rules? + // Has the user configured to IGNORE local data protection rules? if (bypassDataPreservation) { - // The user has configured to ignore data safety checks and overwrite local data rather than preserve & rename - log.vlog("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: ", path); + // The user has configured to ignore data safety checks and overwrite local data rather than preserve & safeBackup + addLogEntry("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: " ~ newItemPath, ["info", "notify"]); } else { - // local data protection is configured, renaming local file - log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ", path, " -> ", newPath); - // perform the rename action of the local file - if (!dryRun) { - safeRename(path); - } else { - // Expectation here is that there is a new file locally (newPath) however as we don't create this, the "new file" will not be uploaded as it does not exist - log.vdebug("DRY-RUN: Skipping local file rename"); - } + // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not + // In case the renamed path is needed + string renamedPath; + safeBackup(newItemPath, dryRun, renamedPath); } - } } else { - // remote file is newer than local item - log.vlog("Remote item modified time is newer based on UTC time conversion"); // correct message, remote item is newer - log.vdebug("localModifiedTime (local file): ", localModifiedTime); - log.vdebug("itemModifiedTime (OneDrive item): ", itemModifiedTime); - - auto ext = extension(path); - auto newPath = path.chomp(ext) ~ "-" ~ deviceName ~ ext; - - // has the user configured to IGNORE local data protection rules? - if (bypassDataPreservation) { - // The user has configured to ignore data safety checks and overwrite local data rather than preserve & rename - log.vlog("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: ", path); - } else { - // local data protection is configured, renaming local file - log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent data loss: ", path, " -> ", newPath); - // perform the rename action of the local file - if (!dryRun) { - safeRename(path); + // Is the remote newer? + if (localModifiedTime < itemModifiedTime) { + // Remote file is newer than the existing local item + addLogEntry("Remote item modified time is newer based on UTC time conversion", ["verbose"]); // correct message, remote item is newer + addLogEntry("localModifiedTime (local file): " ~ to!string(localModifiedTime), ["debug"]); + addLogEntry("itemModifiedTime (OneDrive item): " ~ to!string(itemModifiedTime), ["debug"]); + + // Has the user configured to IGNORE local data protection rules? + if (bypassDataPreservation) { + // The user has configured to ignore data safety checks and overwrite local data rather than preserve & safeBackup + addLogEntry("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: " ~ newItemPath, ["info", "notify"]); } else { - // Expectation here is that there is a new file locally (newPath) however as we don't create this, the "new file" will not be uploaded as it does not exist - log.vdebug("DRY-RUN: Skipping local file rename"); - } - } - } - } - } else { - // Path does not exist locally - this will be a new file download or folder creation - - // Should this 'download' be skipped due to 'skip_dir' directive - if (cfg.getValueString("skip_dir") != "") { - string pathToCheck; - // does the path start with '/'? - if (!startsWith(path, "/")){ - // path does not start with '/', but we need to check skip_dir entries with and without '/' - // so always make sure we are checking a path with '/' - // If this is a file, we need to check the parent path - if (item.type == ItemType.file) { - // use parent path and add '/' - pathToCheck = '/' ~ dirName(path); - } else { - // use path and add '/' - pathToCheck = '/' ~ path; + // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not + // In case the renamed path is needed + string renamedPath; + safeBackup(newItemPath, dryRun, renamedPath); + } } - } - - // perform the check - if (selectiveSync.isDirNameExcluded(pathToCheck)) { - // this path should be skipped - if (item.type == ItemType.file) { - log.vlog("Skipping item - file path is excluded by skip_dir config: ", path); - } else { - log.vlog("Skipping item - excluded by skip_dir config: ", path); + + // Are the timestamps equal? + if (localModifiedTime == itemModifiedTime) { + // yes they are equal + addLogEntry("File timestamps are equal, no further action required", ["debug"]); // correct message as timestamps are equal + addLogEntry("Update/Insert local database with item details: " ~ to!string(newDatabaseItem), ["debug"]); + itemDB.upsert(newDatabaseItem); + return; } - // flag that this download failed, otherwise the 'item' is added to the database - then, as not present on the local disk, would get deleted from OneDrive - downloadFailed = true; - return; } } + } - // Should this 'download' be skipped due to nosync directive? - // Do we need to check for .nosync? Only if --check-for-nosync was passed in - if (cfg.getValueBool("check_nosync")) { - // need the parent path for this object - string parentPath = dirName(path); - if (exists(parentPath ~ "/.nosync")) { - log.vlog("Skipping downloading item - .nosync found in parent folder & --check-for-nosync is enabled: ", path); - // flag that this download failed, otherwise the 'item' is added to the database - then, as not present on the local disk, would get deleted from OneDrive - downloadFailed = true; - return; + // Path does not exist locally (should not exist locally if renamed file) - this will be a new file download or new folder creation + // How to handle this Potentially New Local Item JSON ? + final switch (newDatabaseItem.type) { + case ItemType.file: + // Add to the file to the download array for processing later + fileJSONItemsToDownload ~= onedriveJSONItem; + break; + case ItemType.dir: + // Create the directory immediately as we depend on its entry existing + handleLocalDirectoryCreation(newDatabaseItem, newItemPath, onedriveJSONItem); + break; + case ItemType.remote: + // Add to the directory and relevant detils for processing later + if (newDatabaseItem.remoteType == ItemType.dir) { + handleLocalDirectoryCreation(newDatabaseItem, newItemPath, onedriveJSONItem); + } else { + // Add to the file to the download array for processing later + fileJSONItemsToDownload ~= onedriveJSONItem; } - } + break; + case ItemType.unknown: + case ItemType.none: + // Unknown type - we dont action or sync these items + break; } - - // how to handle this item? - final switch (item.type) { - case ItemType.file: - downloadFileItem(item, path); - if (dryRun) { - // we dont download the file, but we need to track that we 'faked it' - idsFaked ~= [item.driveId, item.id]; - } - break; - case ItemType.dir: - case ItemType.remote: - log.log("Creating local directory: ", path); - - // Issue #658 handling - is sync_list in use? - if (syncListConfigured) { - // sync_list configured and in use - // path to create was previously checked if this should be included / excluded. No need to check again. - log.vdebug("Issue #658 handling"); - setOneDriveFullScanTrigger(); - } - - // Issue #865 handling - is skip_dir in use? - if (cfg.getValueString("skip_dir") != "") { - // we have some entries in skip_dir - // path to create was previously checked if this should be included / excluded. No need to check again. - log.vdebug("Issue #865 handling"); - setOneDriveFullScanTrigger(); - } - + } + + // Handle the creation of a new local directory + void handleLocalDirectoryCreation(Item newDatabaseItem, string newItemPath, JSONValue onedriveJSONItem) { + // To create a path, 'newItemPath' must not be empty + if (!newItemPath.empty) { + // Update the logging output to be consistent + addLogEntry("Creating local directory: " ~ "./" ~ buildNormalizedPath(newItemPath), ["verbose"]); if (!dryRun) { try { - // Does the path exist locally? - if (!exists(path)) { - // Create the new directory - log.vdebug("Requested path does not exist, creating directory structure: ", path); - mkdirRecurse(path); - // Configure the applicable permissions for the folder - log.vdebug("Setting directory permissions for: ", path); - path.setAttributes(cfg.returnRequiredDirectoryPermisions()); - // Update the time of the folder to match the last modified time as is provided by OneDrive - // If there are any files then downloaded into this folder, the last modified time will get - // updated by the local Operating System with the latest timestamp - as this is normal operation - // as the directory has been modified - log.vdebug("Setting directory lastModifiedDateTime for: ", path , " to ", item.mtime); - setTimes(path, item.mtime, item.mtime); - } + // Create the new directory + addLogEntry("Requested path does not exist, creating directory structure: " ~ newItemPath, ["debug"]); + mkdirRecurse(newItemPath); + // Configure the applicable permissions for the folder + addLogEntry("Setting directory permissions for: " ~ newItemPath, ["debug"]); + newItemPath.setAttributes(appConfig.returnRequiredDirectoryPermisions()); + // Update the time of the folder to match the last modified time as is provided by OneDrive + // If there are any files then downloaded into this folder, the last modified time will get + // updated by the local Operating System with the latest timestamp - as this is normal operation + // as the directory has been modified + addLogEntry("Setting directory lastModifiedDateTime for: " ~ newItemPath ~ " to " ~ to!string(newDatabaseItem.mtime), ["debug"]); + addLogEntry("Calling setTimes() for this directory: " ~ newItemPath, ["debug"]); + setTimes(newItemPath, newDatabaseItem.mtime, newDatabaseItem.mtime); + // Save the item to the database + saveItem(onedriveJSONItem); } catch (FileException e) { // display the error message displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - // flag that this failed - downloadFailed = true; - return; } } else { // we dont create the directory, but we need to track that we 'faked it' - idsFaked ~= [item.driveId, item.id]; + idsFaked ~= [newDatabaseItem.driveId, newDatabaseItem.id]; + // Save the item to the dry-run database + saveItem(onedriveJSONItem); } - break; } } - - // update a local item - // the local item is assumed to be in sync with the local db - private void applyChangedItem(Item oldItem, string oldPath, Item newItem, string newPath) - { - assert(oldItem.driveId == newItem.driveId); - assert(oldItem.id == newItem.id); - assert(oldItem.type == newItem.type); - assert(oldItem.remoteDriveId == newItem.remoteDriveId); - assert(oldItem.remoteId == newItem.remoteId); - - if (oldItem.eTag != newItem.eTag) { - // handle changed name/path - if (oldPath != newPath) { - log.log("Moving ", oldPath, " to ", newPath); - if (exists(newPath)) { - Item localNewItem; - if (itemdb.selectByPath(newPath, defaultDriveId, localNewItem)) { - // Query DB for new local item in specified path + + // If the JSON item IS in the database, this will be an update to an existing in-sync item + void applyPotentiallyChangedItem(Item existingDatabaseItem, string existingItemPath, Item changedOneDriveItem, string changedItemPath, JSONValue onedriveJSONItem) { + + // If we are moving the item, we do not need to download it again + bool itemWasMoved = false; + + // Do we need to actually update the database with the details that were provided by the OneDrive API? + // Calculate these time items from the provided items + SysTime existingItemModifiedTime = existingDatabaseItem.mtime; + existingItemModifiedTime.fracSecs = Duration.zero; + SysTime changedOneDriveItemModifiedTime = changedOneDriveItem.mtime; + changedOneDriveItemModifiedTime.fracSecs = Duration.zero; + + // Did the eTag change? + if (existingDatabaseItem.eTag != changedOneDriveItem.eTag) { + // The eTag has changed to what we previously cached + if (existingItemPath != changedItemPath) { + // Log that we are changing / moving an item to a new name + addLogEntry("Moving " ~ existingItemPath ~ " to " ~ changedItemPath); + // Is the destination path empty .. or does something exist at that location? + if (exists(changedItemPath)) { + // Destination we are moving to exists ... + Item changedLocalItem; + // Query DB for this changed item in specified path that exists and see if it is in-sync + if (itemDB.selectByPath(changedItemPath, changedOneDriveItem.driveId, changedLocalItem)) { + // The 'changedItemPath' is in the database string itemSource = "database"; - if (isItemSynced(localNewItem, newPath, itemSource)) { - log.vlog("Destination is in sync and will be overwritten"); + if (isItemSynced(changedLocalItem, changedItemPath, itemSource)) { + // The destination item is in-sync + addLogEntry("Destination is in sync and will be overwritten", ["verbose"]); } else { - // TODO: force remote sync by deleting local item - log.vlog("The destination is occupied, renaming the conflicting file..."); - if (!dryRun) { - safeRename(newPath); - } + // The destination item is different + addLogEntry("The destination is occupied with a different item, renaming the conflicting file...", ["verbose"]); + // Backup this item, passing in if we are performing a --dry-run or not + // In case the renamed path is needed + string renamedPath; + safeBackup(changedItemPath, dryRun, renamedPath); } } else { - // to be overwritten item is not already in the itemdb, so it should - // be synced. Do a safe rename here, too. - // TODO: force remote sync by deleting local item - log.vlog("The destination is occupied by new file, renaming the conflicting file..."); - if (!dryRun) { - safeRename(newPath); - } + // The to be overwritten item is not already in the itemdb, so it should saved to avoid data loss + addLogEntry("The destination is occupied by an existing un-synced file, renaming the conflicting file...", ["verbose"]); + // Backup this item, passing in if we are performing a --dry-run or not + // In case the renamed path is needed + string renamedPath; + safeBackup(changedItemPath, dryRun, renamedPath); } } - // try and rename path, catch exception + + // Try and rename path, catch any exception generated try { - log.vdebug("Calling rename(oldPath, newPath)"); - if (!dryRun) { - // rename physical path on disk - rename(oldPath, newPath); + // If we are in a --dry-run situation? , the actual rename did not occur - but we need to track like it did + if(!dryRun) { + // Rename this item, passing in if we are performing a --dry-run or not + safeRename(existingItemPath, changedItemPath, dryRun); + + // Flag that the item was moved | renamed + itemWasMoved = true; + + // If the item is a file, make sure that the local timestamp now is the same as the timestamp online + // Otherwise when we do the DB check, the move on the file system, the file technically has a newer timestamp + // which is 'correct' .. but we need to report locally the online timestamp here as the move was made online + if (changedOneDriveItem.type == ItemType.file) { + // Set the timestamp + addLogEntry("Calling setTimes() for this file: " ~ changedItemPath, ["debug"]); + setTimes(changedItemPath, changedOneDriveItem.mtime, changedOneDriveItem.mtime); + } } else { - // track this as a faked id item - idsFaked ~= [newItem.driveId, newItem.id]; - // we also need to track that we did not rename this path - pathsRenamed ~= [oldPath]; + // --dry-run situation - the actual rename did not occur - but we need to track like it did + // Track this as a faked id item + idsFaked ~= [changedOneDriveItem.driveId, changedOneDriveItem.id]; + // We also need to track that we did not rename this path + pathsRenamed ~= [existingItemPath]; } } catch (FileException e) { // display the error message displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } - // handle changed content and mtime - // HACK: use mtime+hash instead of cTag because of https://github.com/OneDrive/onedrive-api-docs/issues/765 - if (newItem.type == ItemType.file && oldItem.mtime != newItem.mtime && !testFileHash(newPath, newItem)) { - downloadFileItem(newItem, newPath); - } - // handle changed time - if (newItem.type == ItemType.file && oldItem.mtime != newItem.mtime) { - try { - log.vdebug("Calling setTimes() for this file: ", newPath); - setTimes(newPath, newItem.mtime, newItem.mtime); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + // What sort of changed item is this? + // Is it a file or remote file, and we did not move it .. + if (((changedOneDriveItem.type == ItemType.file) && (!itemWasMoved)) || (((changedOneDriveItem.type == ItemType.remote) && (changedOneDriveItem.remoteType == ItemType.file)) && (!itemWasMoved))) { + // The eTag is notorious for being 'changed' online by some backend Microsoft process + if (existingDatabaseItem.quickXorHash != changedOneDriveItem.quickXorHash) { + // Add to the items to download array for processing - the file hash we previously recorded is not the same as online + fileJSONItemsToDownload ~= onedriveJSONItem; + } else { + // If the timestamp is different, or we are running a client operational mode that does not support /delta queries - we have to update the DB with the details from OneDrive + // Unfortunately because of the consequence of Nataional Cloud Deployments not supporting /delta queries, the application uses the local database to flag what is out-of-date / track changes + // This means that the constant disk writing to the database fix implemented with https://github.com/abraunegg/onedrive/pull/2004 cannot be utilised when using these operational modes + // as all records are touched / updated when performing the OneDrive sync operations. The impacted operational modes are: + // - National Cloud Deployments do not support /delta as a query + // - When using --single-directory + // - When using --download-only --cleanup-local-files + + // Is the last modified timestamp in the DB the same as the API data or are we running an operational mode where we simulated the /delta response? + if ((existingItemModifiedTime != changedOneDriveItemModifiedTime) || (generateSimulatedDeltaResponse)) { + // Save this item in the database + // Add to the local database + addLogEntry("Adding changed OneDrive Item to database: " ~ to!string(changedOneDriveItem), ["debug"]); + itemDB.upsert(changedOneDriveItem); + } + } + } else { + // Save this item in the database + saveItem(onedriveJSONItem); + + // If the 'Add shortcut to My files' link was the item that was actually renamed .. we have to update our DB records + if (changedOneDriveItem.type == ItemType.remote) { + // Select remote item data from the database + Item existingRemoteDbItem; + itemDB.selectById(changedOneDriveItem.remoteDriveId, changedOneDriveItem.remoteId, existingRemoteDbItem); + // Update the 'name' in existingRemoteDbItem and save it back to the database + // This is the local name stored on disk that was just 'moved' + existingRemoteDbItem.name = changedOneDriveItem.name; + itemDB.upsert(existingRemoteDbItem); } } - } - } - - // downloads a File resource - private void downloadFileItem(const ref Item item, const(string) path) - { - static import std.exception; - assert(item.type == ItemType.file); - write("Downloading file ", path, " ... "); - JSONValue fileDetails; + } else { + // The existingDatabaseItem.eTag == changedOneDriveItem.eTag .. nothing has changed eTag wise + + // If the timestamp is different, or we are running a client operational mode that does not support /delta queries - we have to update the DB with the details from OneDrive + // Unfortunately because of the consequence of Nataional Cloud Deployments not supporting /delta queries, the application uses the local database to flag what is out-of-date / track changes + // This means that the constant disk writing to the database fix implemented with https://github.com/abraunegg/onedrive/pull/2004 cannot be utilised when using these operational modes + // as all records are touched / updated when performing the OneDrive sync operations. The impacted operational modes are: + // - National Cloud Deployments do not support /delta as a query + // - When using --single-directory + // - When using --download-only --cleanup-local-files - try { - fileDetails = onedrive.getFileDetails(item.driveId, item.id); - } catch (OneDriveException e) { - log.error("ERROR: Query of OneDrive for file details failed"); - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - downloadFailed = true; - return; + // Is the last modified timestamp in the DB the same as the API data or are we running an operational mode where we simulated the /delta response? + if ((existingItemModifiedTime != changedOneDriveItemModifiedTime) || (generateSimulatedDeltaResponse)) { + // Database update needed for this item because our local record is out-of-date + // Add to the local database + addLogEntry("Adding changed OneDrive Item to database: " ~ to!string(changedOneDriveItem), ["debug"]); + itemDB.upsert(changedOneDriveItem); } } + } + + // Download new file items as identified + void downloadOneDriveItems() { + // Lets deal with all the JSON items that need to be downloaded in a batch process + size_t batchSize = to!int(appConfig.getValueLong("threads")); + ulong batchCount = (fileJSONItemsToDownload.length + batchSize - 1) / batchSize; + ulong batchesProcessed = 0; - // fileDetails has to be a valid JSON object - if (fileDetails.type() == JSONType.object){ - if (isMalware(fileDetails)){ - // OneDrive reports that this file is malware - log.error("ERROR: MALWARE DETECTED IN FILE - DOWNLOAD SKIPPED"); - // set global flag - malwareDetected = true; - return; - } - } else { - // Issue #550 handling - log.error("ERROR: Query of OneDrive for file details failed"); - log.vdebug("onedrive.getFileDetails call returned an invalid JSON Object"); - // We want to return, cant download - downloadFailed = true; - return; + foreach (chunk; fileJSONItemsToDownload.chunks(batchSize)) { + // send an array containing 'appConfig.getValueLong("threads")' JSON items to download + downloadOneDriveItemsInParallel(chunk); + } + } + + // Download items in parallel + void downloadOneDriveItemsInParallel(JSONValue[] array) { + // This function received an array of JSON items to download, the number of elements based on appConfig.getValueLong("threads") + foreach (i, onedriveJSONItem; processPool.parallel(array)) { + // Take each JSON item and + downloadFileItem(onedriveJSONItem); } + } + + // Perform the actual download of an object from OneDrive + void downloadFileItem(JSONValue onedriveJSONItem) { + + bool downloadFailed = false; + string OneDriveFileXORHash; + string OneDriveFileSHA256Hash; + ulong jsonFileSize = 0; + Item databaseItem; + bool fileFoundInDB = false; - if (!dryRun) { - ulong onlineFileSize = 0; - string OneDriveFileHash; - - // fileDetails should be a valid JSON due to prior check - if (hasFileSize(fileDetails)) { - // Use the configured onlineFileSize as reported by OneDrive - onlineFileSize = fileDetails["size"].integer; + // Download item specifics + string downloadItemId = onedriveJSONItem["id"].str; + string downloadItemName = onedriveJSONItem["name"].str; + string downloadDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + string downloadParentId = onedriveJSONItem["parentReference"]["id"].str; + + // Calculate this items path + string newItemPath = computeItemPath(downloadDriveId, downloadParentId) ~ "/" ~ downloadItemName; + addLogEntry("JSON Item calculated full path for download is: " ~ newItemPath, ["debug"]); + + // Is the item reported as Malware ? + if (isMalware(onedriveJSONItem)){ + // OneDrive reports that this file is malware + addLogEntry("ERROR: MALWARE DETECTED IN FILE - DOWNLOAD SKIPPED: " ~ newItemPath, ["info", "notify"]); + downloadFailed = true; + } else { + // Grab this file's filesize + if (hasFileSize(onedriveJSONItem)) { + // Use the configured filesize as reported by OneDrive + jsonFileSize = onedriveJSONItem["size"].integer; } else { // filesize missing - log.vdebug("WARNING: fileDetails['size'] is missing"); + addLogEntry("ERROR: onedriveJSONItem['size'] is missing", ["debug"]); } - - if (hasHashes(fileDetails)) { + + // Configure the hashes for comparison post download + if (hasHashes(onedriveJSONItem)) { // File details returned hash details // QuickXorHash - if (hasQuickXorHash(fileDetails)) { - // Use the configured quickXorHash as reported by OneDrive - if (fileDetails["file"]["hashes"]["quickXorHash"].str != "") { - OneDriveFileHash = fileDetails["file"]["hashes"]["quickXorHash"].str; + if (hasQuickXorHash(onedriveJSONItem)) { + // Use the provided quickXorHash as reported by OneDrive + if (onedriveJSONItem["file"]["hashes"]["quickXorHash"].str != "") { + OneDriveFileXORHash = onedriveJSONItem["file"]["hashes"]["quickXorHash"].str; } } else { - // Check for sha256Hash as quickXorHash did not exist - if (hasSHA256Hash(fileDetails)) { - // Use the configured sha256Hash as reported by OneDrive - if (fileDetails["file"]["hashes"]["sha256Hash"].str != "") { - OneDriveFileHash = fileDetails["file"]["hashes"]["sha256Hash"].str; + // Fallback: Check for SHA256Hash + if (hasSHA256Hash(onedriveJSONItem)) { + // Use the provided sha256Hash as reported by OneDrive + if (onedriveJSONItem["file"]["hashes"]["sha256Hash"].str != "") { + OneDriveFileSHA256Hash = onedriveJSONItem["file"]["hashes"]["sha256Hash"].str; } } } } else { // file hash data missing - log.vdebug("WARNING: fileDetails['file']['hashes'] is missing - unable to compare file hash after download"); + addLogEntry("ERROR: onedriveJSONItem['file']['hashes'] is missing - unable to compare file hash after download", ["debug"]); + } + + // Does the file already exist in the path locally? + if (exists(newItemPath)) { + // file exists locally already + foreach (driveId; onlineDriveDetails.keys) { + if (itemDB.selectByPath(newItemPath, driveId, databaseItem)) { + fileFoundInDB = true; + break; + } + } + + // Log the DB details + addLogEntry("File to download exists locally and this is the DB record: " ~ to!string(databaseItem), ["debug"]); + + // Does the DB (what we think is in sync) hash match the existing local file hash? + if (!testFileHash(newItemPath, databaseItem)) { + // local file is different to what we know to be true + addLogEntry("The local file to replace (" ~ newItemPath ~ ") has been modified locally since the last download. Renaming it to avoid potential local data loss."); + + // Perform the local safeBackup of the existing local file, passing in if we are performing a --dry-run or not + // In case the renamed path is needed + string renamedPath; + safeBackup(newItemPath, dryRun, renamedPath); + } } // Is there enough free space locally to download the file @@ -3037,266 +2366,477 @@ final class SyncEngine ulong localActualFreeSpace = to!ulong(getAvailableDiskSpace(".")); // So that we are not responsible in making the disk 100% full if we can download the file, compare the current available space against the reservation set and file size // The reservation value is user configurable in the config file, 50MB by default - ulong freeSpaceReservation = cfg.getValueLong("space_reservation"); + ulong freeSpaceReservation = appConfig.getValueLong("space_reservation"); // debug output - log.vdebug("Local Disk Space Actual: ", localActualFreeSpace); - log.vdebug("Free Space Reservation: ", freeSpaceReservation); - log.vdebug("File Size to Download: ", onlineFileSize); + addLogEntry("Local Disk Space Actual: " ~ to!string(localActualFreeSpace), ["debug"]); + addLogEntry("Free Space Reservation: " ~ to!string(freeSpaceReservation), ["debug"]); + addLogEntry("File Size to Download: " ~ to!string(jsonFileSize), ["debug"]); - // calculate if we can download file - if ((localActualFreeSpace < freeSpaceReservation) || (onlineFileSize > localActualFreeSpace)) { + // Calculate if we can actually download file - is there enough free space? + if ((localActualFreeSpace < freeSpaceReservation) || (jsonFileSize > localActualFreeSpace)) { // localActualFreeSpace is less than freeSpaceReservation .. insufficient free space - // onlineFileSize is greater than localActualFreeSpace .. insufficient free space - writeln("failed!"); - log.log("Insufficient local disk space to download file"); + // jsonFileSize is greater than localActualFreeSpace .. insufficient free space + addLogEntry("Downloading file: " ~ newItemPath ~ " ... failed!"); + addLogEntry("Insufficient local disk space to download file"); downloadFailed = true; - return; - } - - // Attempt to download the file - try { - onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); - } catch (OneDriveException e) { - log.vdebug("onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); generated a OneDriveException"); - // 408 = Request Time Out - // 429 = Too Many Requests - need to delay - if (e.httpStatusCode == 408) { - // 408 error handling - request time out - // https://github.com/abraunegg/onedrive/issues/694 - // Back off & retry with incremental delay - int retryCount = 10; - int retryAttempts = 1; - int backoffInterval = 2; - while (retryAttempts < retryCount){ - // retry in 2,4,8,16,32,64,128,256,512,1024 seconds - Thread.sleep(dur!"seconds"(retryAttempts*backoffInterval)); - try { - onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); - // successful download - retryAttempts = retryCount; - } catch (OneDriveException e) { - log.vdebug("onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); generated a OneDriveException"); - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 408)) { - // If another 408 .. - if (e.httpStatusCode == 408) { - // Increment & loop around - log.vdebug("HTTP 408 generated - incrementing retryAttempts"); - retryAttempts++; - } - // If a 429 .. - if (e.httpStatusCode == 429) { - // Increment & loop around - handleOneDriveThrottleRequest(); - log.vdebug("HTTP 429 generated - incrementing retryAttempts"); - retryAttempts++; - } - } else { - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - } + } else { + // If we are in a --dry-run situation - if not, actually perform the download + if (!dryRun) { + // Attempt to download the file as there is enough free space locally + OneDriveApi downloadFileOneDriveApiInstance; + + try { + // Initialise API instance + downloadFileOneDriveApiInstance = new OneDriveApi(appConfig); + downloadFileOneDriveApiInstance.initialise(); + + // OneDrive Business Shared Files - update the driveId where to get the file from + if (isItemRemote(onedriveJSONItem)) { + downloadDriveId = onedriveJSONItem["remoteItem"]["parentReference"]["driveId"].str; } + + // Perform the download + downloadFileOneDriveApiInstance.downloadById(downloadDriveId, downloadItemId, newItemPath, jsonFileSize); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + downloadFileOneDriveApiInstance.releaseCurlEngine(); + downloadFileOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + } catch (OneDriveException exception) { + addLogEntry("downloadFileOneDriveApiInstance.downloadById(downloadDriveId, downloadItemId, newItemPath, jsonFileSize); generated a OneDriveException", ["debug"]); + string thisFunctionName = getFunctionName!({}); + + // HTTP request returned status code 403 + if ((exception.httpStatusCode == 403) && (appConfig.getValueBool("sync_business_shared_files"))) { + // We attempted to download a file, that was shared with us, but this was shared with us as read-only and no download permission + addLogEntry("Unable to download this file as this was shared as read-only without download permission: " ~ newItemPath); + downloadFailed = true; + } else { + // Default operation if not a 403 error + // - 408,429,503,504 errors are handled as a retry within downloadFileOneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } + } catch (FileException e) { + // There was a file system error + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + downloadFailed = true; + } catch (ErrnoException e) { + // There was a file system error + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + downloadFailed = true; } - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests) - // https://github.com/abraunegg/onedrive/issues/133 - int retryCount = 10; - int retryAttempts = 1; - while (retryAttempts < retryCount){ - // retry after waiting the timeout value from the 429 HTTP response header Retry-After - handleOneDriveThrottleRequest(); - try { - onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); - // successful download - retryAttempts = retryCount; - } catch (OneDriveException e) { - log.vdebug("onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); generated a OneDriveException"); - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 408)) { - // If another 408 .. - if (e.httpStatusCode == 408) { - // Increment & loop around - log.vdebug("HTTP 408 generated - incrementing retryAttempts"); - retryAttempts++; - } - // If a 429 .. - if (e.httpStatusCode == 429) { - // Increment & loop around - handleOneDriveThrottleRequest(); - log.vdebug("HTTP 429 generated - incrementing retryAttempts"); - retryAttempts++; + + // If we get to this point, something was downloaded .. does it match what we expected? + if (exists(newItemPath)) { + // When downloading some files from SharePoint, the OneDrive API reports one file size, + // but the SharePoint HTTP Server sends a totally different byte count for the same file + // we have implemented --disable-download-validation to disable these checks + + if (!disableDownloadValidation) { + // A 'file' was downloaded - does what we downloaded = reported jsonFileSize or if there is some sort of funky local disk compression going on + // Does the file hash OneDrive reports match what we have locally? + string onlineFileHash; + string downloadedFileHash; + ulong downloadFileSize = getSize(newItemPath); + + if (!OneDriveFileXORHash.empty) { + onlineFileHash = OneDriveFileXORHash; + // Calculate the QuickXOHash for this file + downloadedFileHash = computeQuickXorHash(newItemPath); + } else { + onlineFileHash = OneDriveFileSHA256Hash; + // Fallback: Calculate the SHA256 Hash for this file + downloadedFileHash = computeSHA256Hash(newItemPath); + } + + if ((downloadFileSize == jsonFileSize) && (downloadedFileHash == onlineFileHash)) { + // Downloaded file matches size and hash + addLogEntry("Downloaded file matches reported size and reported file hash", ["debug"]); + + try { + // get the mtime from the JSON data + SysTime itemModifiedTime; + if (isItemRemote(onedriveJSONItem)) { + // remote file item + itemModifiedTime = SysTime.fromISOExtString(onedriveJSONItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str); + } else { + // not a remote item + itemModifiedTime = SysTime.fromISOExtString(onedriveJSONItem["fileSystemInfo"]["lastModifiedDateTime"].str); + } + + // set the correct time on the downloaded file + if (!dryRun) { + addLogEntry("Calling setTimes() for this file: " ~ newItemPath, ["debug"]); + setTimes(newItemPath, itemModifiedTime, itemModifiedTime); + } + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } else { - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + // Downloaded file does not match size or hash .. which is it? + bool downloadValueMismatch = false; + + // Size error? + if (downloadFileSize != jsonFileSize) { + // downloaded file size does not match + downloadValueMismatch = true; + addLogEntry("Actual file size on disk: " ~ to!string(downloadFileSize), ["debug"]); + addLogEntry("OneDrive API reported size: " ~ to!string(jsonFileSize), ["debug"]); + addLogEntry("ERROR: File download size mismatch. Increase logging verbosity to determine why."); + } + + // Hash Error + if (downloadedFileHash != onlineFileHash) { + // downloaded file hash does not match + downloadValueMismatch = true; + addLogEntry("Actual local file hash: " ~ downloadedFileHash, ["debug"]); + addLogEntry("OneDrive API reported hash: " ~ onlineFileHash, ["debug"]); + addLogEntry("ERROR: File download hash mismatch. Increase logging verbosity to determine why."); + } + + // .heic data loss check + // - https://github.com/abraunegg/onedrive/issues/2471 + // - https://github.com/OneDrive/onedrive-api-docs/issues/1532 + // - https://github.com/OneDrive/onedrive-api-docs/issues/1723 + if (downloadValueMismatch && (toLower(extension(newItemPath)) == ".heic")) { + // Need to display a message to the user that they have experienced data loss + addLogEntry("DATA-LOSS: File downloaded has experienced data loss due to a Microsoft OneDrive API bug. DO NOT DELETE THIS FILE ONLINE: " ~ newItemPath, ["info", "notify"]); + addLogEntry(" Please read https://github.com/OneDrive/onedrive-api-docs/issues/1723 for more details.", ["verbose"]); + } + + // Add some workaround messaging for SharePoint + if (appConfig.accountType == "documentLibrary"){ + // It has been seen where SharePoint / OneDrive API reports one size via the JSON + // but the content length and file size written to disk is totally different - example: + // From JSON: "size": 17133 + // From HTTPS Server: < Content-Length: 19340 + // with no logical reason for the difference, except for a 302 redirect before file download + addLogEntry("INFO: It is most likely that a SharePoint OneDrive API issue is the root cause. Add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed."); + } else { + // other account types + addLogEntry("INFO: Potentially add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed."); + } + + // If the computed hash does not equal provided online hash, consider this a failed download + if (downloadedFileHash != onlineFileHash) { + // We do not want this local file to remain on the local file system as it failed the integrity checks + addLogEntry("Removing local file " ~ newItemPath ~ " due to failed integrity checks"); + if (!dryRun) { + safeRemove(newItemPath); + } + + // Was this item previously in-sync with the local system? + // We previously searched for the file in the DB, we need to use that record + if (fileFoundInDB) { + // Purge DB record so that the deleted local file does not cause an online delete + // In a --dry-run scenario, this is being done against a DB copy + addLogEntry("Removing DB record due to failed integrity checks"); + itemDB.deleteById(databaseItem.driveId, databaseItem.id); + } + + // Flag that the download failed + downloadFailed = true; + } } - } - } - } - } catch (FileException e) { - // There was a file system error - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - downloadFailed = true; - return; - } catch (std.exception.ErrnoException e) { - // There was a file system error - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - downloadFailed = true; - return; - } - // file has to have downloaded in order to set the times / data for the file - if (exists(path)) { - // When downloading some files from SharePoint, the OneDrive API reports one file size, but the SharePoint HTTP Server sends a totally different byte count - // for the same file - // we have implemented --disable-download-validation to disable these checks - - if (!disableDownloadValidation) { - // A 'file' was downloaded - does what we downloaded = reported onlineFileSize or if there is some sort of funky local disk compression going on - // does the file hash OneDrive reports match what we have locally? - string quickXorHash = computeQuickXorHash(path); - // Compute the local file size - ulong localFileSize = getSize(path); - - if ((localFileSize == onlineFileSize) || (OneDriveFileHash == quickXorHash)) { - // downloaded matches either size or hash - log.vdebug("Downloaded file matches reported size and or reported file hash"); - try { - log.vdebug("Calling setTimes() for this file: ", path); - setTimes(path, item.mtime, item.mtime); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } else { - // size error? - if (localFileSize != onlineFileSize) { - // downloaded file size does not match - log.vdebug("Actual file size on disk: ", localFileSize); - log.vdebug("OneDrive API reported size: ", onlineFileSize); - log.error("ERROR: File download size mis-match. Increase logging verbosity to determine why."); - } - // hash error? - if (OneDriveFileHash != quickXorHash) { - // downloaded file hash does not match - log.vdebug("Actual local file hash: ", quickXorHash); - log.vdebug("OneDrive API reported hash: ", OneDriveFileHash); - log.error("ERROR: File download hash mis-match. Increase logging verbosity to determine why."); - } - // add some workaround messaging - if (accountType == "documentLibrary"){ - // It has been seen where SharePoint / OneDrive API reports one size via the JSON - // but the content length and file size written to disk is totally different - example: - // From JSON: "size": 17133 - // From HTTPS Server: < Content-Length: 19340 - // with no logical reason for the difference, except for a 302 redirect before file download - log.error("INFO: It is most likely that a SharePoint OneDrive API issue is the root cause. Add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed."); } else { - // other account types - log.error("INFO: Potentially add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed."); - } - - // we do not want this local file to remain on the local file system - safeRemove(path); + // Download validation checks were disabled + addLogEntry("Downloaded file validation disabled due to --disable-download-validation", ["debug"]); + addLogEntry("WARNING: Skipping download integrity check for: " ~ newItemPath, ["verbose"]); + } // end of (!disableDownloadValidation) + } else { + addLogEntry("ERROR: File failed to download. Increase logging verbosity to determine why."); downloadFailed = true; - return; } - } else { - // download checks have been disabled - log.vdebug("Downloaded file validation disabled due to --disable-download-validation "); + } + } + + // File should have been downloaded + if (!downloadFailed) { + // Download did not fail + addLogEntry("Downloading file: " ~ newItemPath ~ " ... done"); + // Save this item into the database + saveItem(onedriveJSONItem); + + // If we are in a --dry-run situation - if we are, we need to track that we faked the download + if (dryRun) { + // track that we 'faked it' + idsFaked ~= [downloadDriveId, downloadItemId]; } } else { - log.error("ERROR: File failed to download. Increase logging verbosity to determine why."); - downloadFailed = true; - return; + // Output download failed + addLogEntry("Downloading file: " ~ newItemPath ~ " ... failed!"); + // Add the path to a list of items that failed to download + if (!canFind(fileDownloadFailures, newItemPath)) { + fileDownloadFailures ~= newItemPath; // Add newItemPath if it's not already present + } } } - - if (!downloadFailed) { - writeln("done."); - log.fileOnly("Downloading file ", path, " ... done."); - } else { - writeln("failed!"); - log.fileOnly("Downloading file ", path, " ... failed!"); - } } - - // returns true if the given item corresponds to the local one - private bool isItemSynced(const ref Item item, const(string) path, string itemSource) - { + + // Test if the given item is in-sync. Returns true if the given item corresponds to the local one + bool isItemSynced(Item item, string path, string itemSource) { if (!exists(path)) return false; - final switch (item.type) { - case ItemType.file: - if (isFile(path)) { - // can we actually read the local file? - if (readLocalFile(path)){ - // local file is readable - SysTime localModifiedTime = timeLastModified(path).toUTC(); - SysTime itemModifiedTime = item.mtime; - // HACK: reduce time resolution to seconds before comparing - localModifiedTime.fracSecs = Duration.zero; - itemModifiedTime.fracSecs = Duration.zero; - if (localModifiedTime == itemModifiedTime) { - return true; - } else { - log.vlog("The local item has a different modified time ", localModifiedTime, " when compared to ", itemSource, " modified time ", itemModifiedTime); - // The file has been modified ... is the hash the same? - // Test the file hash as the date / time stamp is different - // Generating a hash is computationally expensive - only generate the hash if timestamp was modified - if (testFileHash(path, item)) { - return true; - } else { - log.vlog("The local item has a different hash when compared to ", itemSource, " item hash"); + + // Combine common logic for readability and file check into a single block + if (item.type == ItemType.file || ((item.type == ItemType.remote) && (item.remoteType == ItemType.file))) { + // Can we actually read the local file? + if (!readLocalFile(path)) { + // Unable to read local file + addLogEntry("Unable to determine the sync state of this file as it cannot be read (file permissions or file corruption): " ~ path); + return false; + } + + // Get time values + SysTime localModifiedTime = timeLastModified(path).toUTC(); + SysTime itemModifiedTime = item.mtime; + // Reduce time resolution to seconds before comparing + localModifiedTime.fracSecs = Duration.zero; + itemModifiedTime.fracSecs = Duration.zero; + + if (localModifiedTime == itemModifiedTime) { + return true; + } else { + // The file has a different timestamp ... is the hash the same meaning no file modification? + addLogEntry("Local file time discrepancy detected: " ~ path, ["verbose"]); + addLogEntry("This local file has a different modified time " ~ to!string(localModifiedTime) ~ " (UTC) when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime) ~ " (UTC)", ["verbose"]); + + // The file has a different timestamp ... is the hash the same meaning no file modification? + // Test the file hash as the date / time stamp is different + // Generating a hash is computationally expensive - we only generate the hash if timestamp was different + if (testFileHash(path, item)) { + // The hash is the same .. so we need to fix-up the timestamp depending on where it is wrong + addLogEntry("Local item has the same hash value as the item online - correcting the applicable file timestamp", ["verbose"]); + // Correction logic based on the configuration and the comparison of timestamps + if (localModifiedTime > itemModifiedTime) { + // Local file is newer .. are we in a --download-only situation? + if (!appConfig.getValueBool("download_only") && !dryRun) { + // The source of the out-of-date timestamp was OneDrive and this needs to be corrected to avoid always generating a hash test if timestamp is different + addLogEntry("The source of the incorrect timestamp was OneDrive online - correcting timestamp online", ["verbose"]); + // Attempt to update the online date time stamp + // We need to use the correct driveId and itemId, especially if we are updating a OneDrive Business Shared File timestamp + if (item.type == ItemType.file) { + // Not a remote file + uploadLastModifiedTime(item, item.driveId, item.id, localModifiedTime, item.eTag); + } else { + // Remote file, remote values need to be used + uploadLastModifiedTime(item, item.remoteDriveId, item.remoteId, localModifiedTime, item.eTag); + } + } else if (!dryRun) { + // --download-only is being used ... local file needs to be corrected ... but why is it newer - indexing application potentially changing the timestamp ? + addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally due to --download-only", ["verbose"]); + // Fix the local file timestamp + addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]); + setTimes(path, item.mtime, item.mtime); } + } else if (!dryRun) { + // The source of the out-of-date timestamp was the local file and this needs to be corrected to avoid always generating a hash test if timestamp is different + addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally", ["verbose"]); + // Fix the local file timestamp + addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]); + setTimes(path, item.mtime, item.mtime); } + return false; } else { - // Unable to read local file - log.log("Unable to determine the sync state of this file as it cannot be read (file permissions or file corruption): ", path); + // The hash is different so the content of the file has to be different as to what is stored online + addLogEntry("The local file has a different hash when compared to " ~ itemSource ~ " file hash", ["verbose"]); return false; } + } + } else if (item.type == ItemType.dir || ((item.type == ItemType.remote) && (item.remoteType == ItemType.dir))) { + // item is a directory + return true; + } else { + // ItemType.unknown or ItemType.none + // Logically, we might not want to sync these items, but a more nuanced approach may be needed based on application context + return true; + } + } + + // Get the /delta data using the provided details + //JSONValue getDeltaChangesByItemId(string selectedDriveId, string selectedItemId, string providedDeltaLink, OneDriveApi getDeltaQueryOneDriveApiInstance) { + JSONValue getDeltaChangesByItemId(string selectedDriveId, string selectedItemId, string providedDeltaLink) { + + // Function variables + JSONValue deltaChangesBundle; + + // Create a new API Instance for querying the actual /delta and initialise it + OneDriveApi getDeltaDataOneDriveApiInstance; + getDeltaDataOneDriveApiInstance = new OneDriveApi(appConfig); + getDeltaDataOneDriveApiInstance.initialise(); + + + // Get the /delta data for this account | driveId | deltaLink combination + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("selectedDriveId: " ~ selectedDriveId, ["debug"]); + addLogEntry("selectedItemId: " ~ selectedItemId, ["debug"]); + addLogEntry("providedDeltaLink: " ~ providedDeltaLink, ["debug"]); + addLogEntry("------------------------------------------------------------------", ["debug"]); + + try { + deltaChangesBundle = getDeltaDataOneDriveApiInstance.getChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink); + } catch (OneDriveException exception) { + // caught an exception + addLogEntry("getDeltaDataOneDriveApiInstance.getChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink) generated a OneDriveException", ["debug"]); + + auto errorArray = splitLines(exception.msg); + string thisFunctionName = getFunctionName!({}); + + // Error handling operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within getDeltaDataOneDriveApiInstance + if (exception.httpStatusCode == 410) { + addLogEntry(); + addLogEntry("WARNING: The OneDrive API responded with an error that indicates the locally stored deltaLink value is invalid"); + // Essentially the 'providedDeltaLink' that we have stored is no longer available ... re-try without the stored deltaLink + addLogEntry("WARNING: Retrying OneDrive API call without using the locally stored deltaLink value"); + // Configure an empty deltaLink + addLogEntry("Delta link expired for 'getDeltaDataOneDriveApiInstance.getChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink)', setting 'deltaLink = null'", ["debug"]); + string emptyDeltaLink = ""; + // retry with empty deltaLink + deltaChangesBundle = getDeltaDataOneDriveApiInstance.getChangesByItemId(selectedDriveId, selectedItemId, emptyDeltaLink); } else { - log.vlog("The local item is a directory but should be a file"); + // Display what the error is + addLogEntry("CODING TO DO: Hitting this failure error output after getting a httpStatusCode != 410 when the API responded the deltaLink was invalid"); + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + deltaChangesBundle = null; + // Perform Garbage Collection + GC.collect(); } - break; - case ItemType.dir: - case ItemType.remote: - if (isDir(path)) { - return true; + } + + // Destroy this object? + getDeltaDataOneDriveApiInstance.releaseCurlEngine(); + getDeltaDataOneDriveApiInstance = null; + // Perform Garbage Collection on this destroyed curl engine + GC.collect(); + + // Return data + return deltaChangesBundle; + } + + // If the JSON response is not correct JSON object, exit + void invalidJSONResponseFromOneDriveAPI() { + addLogEntry("ERROR: Query of the OneDrive API returned an invalid JSON response"); + // Must force exit here, allow logging to be done + forceExit(); + } + + // Handle an unhandled API error + void defaultUnhandledHTTPErrorCode(OneDriveException exception) { + // display error + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + // Must force exit here, allow logging to be done + forceExit(); + } + + // Display the pertinant details of the sync engine + void displaySyncEngineDetails() { + // Display accountType, defaultDriveId, defaultRootId & remainingFreeSpace for verbose logging purposes + addLogEntry("Application Version: " ~ appConfig.applicationVersion, ["verbose"]); + addLogEntry("Account Type: " ~ appConfig.accountType, ["verbose"]); + addLogEntry("Default Drive ID: " ~ appConfig.defaultDriveId, ["verbose"]); + addLogEntry("Default Root ID: " ~ appConfig.defaultRootId, ["verbose"]); + + // Fetch the details from cachedOnlineDriveData + DriveDetailsCache cachedOnlineDriveData; + cachedOnlineDriveData = getDriveDetails(appConfig.defaultDriveId); + + // What do we display here for space remaining + if (cachedOnlineDriveData.quotaRemaining > 0) { + // Display the actual value + addLogEntry("Remaining Free Space: " ~ to!string(byteToGibiByte(cachedOnlineDriveData.quotaRemaining)) ~ " GB (" ~ to!string(cachedOnlineDriveData.quotaRemaining) ~ " bytes)", ["verbose"]); + } else { + // zero or non-zero value or restricted + if (!cachedOnlineDriveData.quotaRestricted){ + addLogEntry("Remaining Free Space: 0 KB", ["verbose"]); } else { - log.vlog("The local item is a file but should be a directory"); + addLogEntry("Remaining Free Space: Not Available", ["verbose"]); } - break; + } + } + + // Query itemdb.computePath() and catch potential assert when DB consistency issue occurs + string computeItemPath(string thisDriveId, string thisItemId) { + + // static declare this for this function + static import core.exception; + string calculatedPath; + addLogEntry("Attempting to calculate local filesystem path for " ~ thisDriveId ~ " and " ~ thisItemId, ["debug"]); + + try { + calculatedPath = itemDB.computePath(thisDriveId, thisItemId); + } catch (core.exception.AssertError) { + // broken tree in the database, we cant compute the path for this item id, exit + addLogEntry("ERROR: A database consistency issue has been caught. A --resync is needed to rebuild the database."); + // Must force exit here, allow logging to be done + forceExit(); + } + + // return calculated path as string + return calculatedPath; + } + + // Try and compute the file hash for the given item + bool testFileHash(string path, Item item) { + + // Generate QuickXORHash first before attempting to generate any other type of hash + if (item.quickXorHash) { + if (item.quickXorHash == computeQuickXorHash(path)) return true; + } else if (item.sha256Hash) { + if (item.sha256Hash == computeSHA256Hash(path)) return true; } return false; } - - private void deleteItems() - { + + // Process items that need to be removed + void processDeleteItems() { + foreach_reverse (i; idsToDelete) { Item item; string path; - if (!itemdb.selectById(i[0], i[1], item)) continue; // check if the item is in the db + if (!itemDB.selectById(i[0], i[1], item)) continue; // check if the item is in the db // Compute this item path path = computeItemPath(i[0], i[1]); - // Try to delete item object - log.log("Trying to delete item ", path); - if (!dryRun) { - // Actually process the database entry removal - itemdb.deleteById(item.driveId, item.id); - if (item.remoteDriveId != null) { - // delete the linked remote folder - itemdb.deleteById(item.remoteDriveId, item.remoteId); + + // Log the action if the path exists .. it may of already been removed and this is a legacy array item + if (exists(path)) { + if (item.type == ItemType.file) { + addLogEntry("Trying to delete file " ~ path); + } else { + addLogEntry("Trying to delete directory " ~ path); } } + + // Process the database entry removal. In a --dry-run scenario, this is being done against a DB copy + itemDB.deleteById(item.driveId, item.id); + if (item.remoteDriveId != null) { + // delete the linked remote folder + itemDB.deleteById(item.remoteDriveId, item.remoteId); + } + + // Add to pathFakeDeletedArray + // We dont want to try and upload this item again, so we need to track this objects removal + if (dryRun) { + // We need to add './' here so that it can be correctly searched to ensure it is not uploaded + string pathToAdd = "./" ~ path; + pathFakeDeletedArray ~= pathToAdd; + } + bool needsRemoval = false; if (exists(path)) { // path exists on the local system // make sure that the path refers to the correct item Item pathItem; - if (itemdb.selectByPath(path, item.driveId, pathItem)) { + if (itemDB.selectByPath(path, item.driveId, pathItem)) { if (pathItem.id == item.id) { needsRemoval = true; } else { - log.log("Skipped due to id difference!"); + addLogEntry("Skipped due to id difference!"); } } else { // item has disappeared completely @@ -3304,7 +2844,14 @@ final class SyncEngine } } if (needsRemoval) { - log.log("Deleting item ", path); + // Log the action + if (item.type == ItemType.file) { + addLogEntry("Deleting file " ~ path); + } else { + addLogEntry("Deleting directory " ~ path); + } + + // Perform the action if (!dryRun) { if (isFile(path)) { remove(path); @@ -3327,1039 +2874,590 @@ final class SyncEngine } if (!dryRun) { - // clean up idsToDelete - idsToDelete.length = 0; - assumeSafeAppend(idsToDelete); + // Cleanup array memory + idsToDelete = []; } } - // scan the given directory for differences and new items - for use with --synchronize - void scanForDifferences(const(string) path) - { - // To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences? - string logPath; - if (path == ".") { - // get the configured sync_dir - logPath = buildNormalizedPath(cfg.getValueString("sync_dir")); - } else { - // use what was passed in - logPath = path; - } - - // If we are using --upload-only & --sync-shared-folders there is a possability that a 'new' local folder might - // be misinterpreted that it needs to be uploaded to the users default OneDrive DriveID rather than the requested / configured - // Shared Business Folder. In --resync scenarios, the DB information that tells that this Business Shared Folder does not exist, - // and in a --upload-only scenario will never exist, so the correct lookups are unable to be performed. - if ((exists(cfg.businessSharedFolderFilePath)) && (syncBusinessFolders) && (cfg.getValueBool("upload_only"))){ - // business_shared_folders file exists, --sync-shared-folders is enabled, --upload-only is enabled - log.vdebug("OneDrive Business --upload-only & --sync-shared-folders edge case triggered"); - handleUploadOnlyBusinessSharedFoldersEdgeCase(); - } + // Update the timestamp of an object online + void uploadLastModifiedTime(Item originItem, string driveId, string id, SysTime mtime, string eTag) { - // Are we configured to use a National Cloud Deployment - if (nationalCloudDeployment) { - // Select items that have a out-of-sync flag set - flagNationalCloudDeploymentOutOfSyncItems(); - } + string itemModifiedTime; + itemModifiedTime = mtime.toISOExtString(); + JSONValue data = [ + "fileSystemInfo": JSONValue([ + "lastModifiedDateTime": itemModifiedTime + ]) + ]; - // scan for changes in the path provided - if (isDir(path)) { - // if this path is a directory, output this message. - // if a file, potentially leads to confusion as to what the client is actually doing - log.log("Uploading differences of ", logPath); + // What eTag value do we use? + string eTagValue; + if (appConfig.accountType == "personal") { + // Nullify the eTag to avoid 412 errors as much as possible + eTagValue = null; + } else { + eTagValue = eTag; } - Item item; - // For each unique OneDrive driveID we know about - foreach (driveId; driveIDsArray) { - log.vdebug("Processing DB entries for this driveId: ", driveId); - // Database scan of every item in DB for the given driveId based on the root parent for that drive - if ((syncBusinessFolders) && (driveId != defaultDriveId)) { - // There could be multiple shared folders all from this same driveId - are we doing a single directory sync? - if (cfg.getValueString("single_directory") != ""){ - // Limit the local filesystem check to just the requested directory - if (itemdb.selectByPath(path, driveId, item)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(item); - } - } else { - // check everything associated with each driveId we know about - foreach(dbItem; itemdb.selectByDriveId(driveId)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(dbItem); - } - } + JSONValue response; + OneDriveApi uploadLastModifiedTimeApiInstance; + + // Try and update the online last modified time + try { + // Create a new OneDrive API instance + uploadLastModifiedTimeApiInstance = new OneDriveApi(appConfig); + uploadLastModifiedTimeApiInstance.initialise(); + // Use this instance + response = uploadLastModifiedTimeApiInstance.updateById(driveId, id, data, eTagValue); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + uploadLastModifiedTimeApiInstance.releaseCurlEngine(); + uploadLastModifiedTimeApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + // Do we actually save the response? + // Special case here .. if the DB record item (originItem) is a remote object, thus, if we save the 'response' we will have a DB FOREIGN KEY constraint failed problem + // Update 'originItem.mtime' with the correct timestamp + // Update 'originItem.size' with the correct size from the response + // Update 'originItem.eTag' with the correct eTag from the response + // Update 'originItem.cTag' with the correct cTag from the response + // Update 'originItem.quickXorHash' with the correct quickXorHash from the response + // Everything else should remain the same .. and then save this DB record to the DB .. + // However, we did this, for the local modified file right before calling this function to update the online timestamp ... so .. do we need to do this again, effectively performing a double DB write for the same data? + if ((originItem.type != ItemType.remote) && (originItem.remoteType != ItemType.file)) { + // Save the response JSON + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(response); + } + } catch (OneDriveException exception) { + string thisFunctionName = getFunctionName!({}); + // Handle a 412 - A precondition provided in the request (such as an if-match header) does not match the resource's current state. + if (exception.httpStatusCode == 412) { + // OneDrive threw a 412 error, most likely: ETag does not match current item's value + addLogEntry("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting file time stamp update - gracefully handling error", ["verbose"]); + addLogEntry("File Metadata Update Failed - OneDrive eTag / cTag match issue", ["debug"]); + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + // Retry without eTag + uploadLastModifiedTime(originItem, driveId, id, mtime, null); } else { - if (itemdb.selectByPath(path, driveId, item)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(item); - } + // Any other error that should be handled + // - 408,429,503,504 errors are handled as a retry within oneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); } - } - - // scan for changes in the path provided - if (isDir(path)) { - // if this path is a directory, output this message. - // if a file, potentially leads to confusion as to what the client is actually doing - log.log("Uploading new items of ", logPath); - } - - // Filesystem walk to find new files not uploaded - uploadNewItems(path); - // clean up idsToDelete only if --dry-run is set - if (dryRun) { - idsToDelete.length = 0; - assumeSafeAppend(idsToDelete); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + uploadLastModifiedTimeApiInstance.releaseCurlEngine(); + uploadLastModifiedTimeApiInstance = null; + // Perform Garbage Collection + GC.collect(); } } - // scan the given directory for differences only - for use with --monitor - void scanForDifferencesDatabaseScan(const(string) path) - { - // To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences? - string logPath; - if (path == ".") { - // get the configured sync_dir - logPath = buildNormalizedPath(cfg.getValueString("sync_dir")); - } else { - // use what was passed in - logPath = path; - } + // Perform a database integrity check - checking all the items that are in-sync at the moment, validating what we know should be on disk, to what is actually on disk + void performDatabaseConsistencyAndIntegrityCheck() { - // If we are using --upload-only & --sync-shared-folders there is a possability that a 'new' local folder might - // be misinterpreted that it needs to be uploaded to the users default OneDrive DriveID rather than the requested / configured - // Shared Business Folder. In --resync scenarios, the DB information that tells that this Business Shared Folder does not exist, - // and in a --upload-only scenario will never exist, so the correct lookups are unable to be performed. - if ((exists(cfg.businessSharedFolderFilePath)) && (syncBusinessFolders) && (cfg.getValueBool("upload_only"))){ - // business_shared_folders file exists, --sync-shared-folders is enabled, --upload-only is enabled - log.vdebug("OneDrive Business --upload-only & --sync-shared-folders edge case triggered"); - handleUploadOnlyBusinessSharedFoldersEdgeCase(); + // Log what we are doing + if (!appConfig.suppressLoggingOutput) { + addProcessingLogHeaderEntry("Performing a database consistency and integrity check on locally stored data", appConfig.verbosityCount); } - // Are we configured to use a National Cloud Deployment - if (nationalCloudDeployment) { - // Select items that have a out-of-sync flag set - flagNationalCloudDeploymentOutOfSyncItems(); + // What driveIDsArray do we use? If we are doing a --single-directory we need to use just the drive id associated with that operation + string[] consistencyCheckDriveIdsArray; + if (singleDirectoryScope) { + consistencyCheckDriveIdsArray ~= singleDirectoryScopeDriveId; + } else { + // Query the DB for all unique DriveID's + consistencyCheckDriveIdsArray = itemDB.selectDistinctDriveIds(); } - // scan for changes in the path provided - if (isDir(path)) { - // if this path is a directory, output this message. - // if a file, potentially leads to confusion as to what the client is actually doing - log.vlog("Uploading differences of ", logPath); - } + // Create a new DB blank item Item item; - // For each unique OneDrive driveID we know about - foreach (driveId; driveIDsArray) { - log.vdebug("Processing DB entries for this driveId: ", driveId); - // Database scan of every item in DB for the given driveId based on the root parent for that drive - if ((syncBusinessFolders) && (driveId != defaultDriveId)) { - // There could be multiple shared folders all from this same driveId - are we doing a single directory sync? - if (cfg.getValueString("single_directory") != ""){ - // Limit the local filesystem check to just the requested directory - if (itemdb.selectByPath(path, driveId, item)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(item); + // Use the array we populate, rather than selecting all distinct driveId's from the database + foreach (driveId; consistencyCheckDriveIdsArray) { + // Make the logging more accurate - we cant update driveId as this then breaks the below queries + addLogEntry("Processing DB entries for this Drive ID: " ~ driveId, ["verbose"]); + + // Initialise the array + Item[] driveItems = []; + + // Freshen the cached quota details for this driveID + addOrUpdateOneDriveOnlineDetails(driveId); + + // What OneDrive API query do we use? + // - Are we running against a National Cloud Deployments that does not support /delta ? + // National Cloud Deployments do not support /delta as a query + // https://docs.microsoft.com/en-us/graph/deployments#supported-features + // + // - Are we performing a --single-directory sync, which will exclude many items online, focusing in on a specific online directory + // + // - Are we performing a --download-only --cleanup-local-files action? + // + // If we did, we self generated a /delta response, thus need to now process elements that are still flagged as out-of-sync + if ((singleDirectoryScope) || (nationalCloudDeployment) || (cleanupLocalFiles)) { + // Any entry in the DB than is flagged as out-of-sync needs to be cleaned up locally first before we scan the entire DB + // Normally, this is done at the end of processing all /delta queries, however when using --single-directory or a National Cloud Deployments is configured + // We cant use /delta to query the OneDrive API as National Cloud Deployments dont support /delta + // https://docs.microsoft.com/en-us/graph/deployments#supported-features + // We dont use /delta for --single-directory as, in order to sync a single path with /delta, we need to query the entire OneDrive API JSON data to then filter out + // objects that we dont want, thus, it is easier to use the same method as National Cloud Deployments, but query just the objects we are after + + // For each unique OneDrive driveID we know about + Item[] outOfSyncItems = itemDB.selectOutOfSyncItems(driveId); + foreach (outOfSyncItem; outOfSyncItems) { + if (!dryRun) { + // clean up idsToDelete + idsToDelete.length = 0; + assumeSafeAppend(idsToDelete); + // flag to delete local file as it now is no longer in sync with OneDrive + addLogEntry("Flagging to delete local item as it now is no longer in sync with OneDrive", ["debug"]); + addLogEntry("outOfSyncItem: " ~ to!string(outOfSyncItem), ["debug"]); + idsToDelete ~= [outOfSyncItem.driveId, outOfSyncItem.id]; + // delete items in idsToDelete + if (idsToDelete.length > 0) processDeleteItems(); } + } + + // Clear array + outOfSyncItems = []; + + // Fetch database items associated with this path + if (singleDirectoryScope) { + // Use the --single-directory items we previously configured + // - query database for children objects using those items + driveItems = getChildren(singleDirectoryScopeDriveId, singleDirectoryScopeItemId); } else { - // check everything associated with each driveId we know about - foreach(dbItem; itemdb.selectByDriveId(driveId)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(dbItem); - } + // Check everything associated with each driveId we know about + addLogEntry("Selecting DB items via itemDB.selectByDriveId(driveId)", ["debug"]); + // Query database + driveItems = itemDB.selectByDriveId(driveId); + } + + // Log DB items to process + addLogEntry("Database items to process for this driveId: " ~ to!string(driveItems.count), ["debug"]); + + // Process each database database item associated with the driveId + foreach(dbItem; driveItems) { + // Does it still exist on disk in the location the DB thinks it is + checkDatabaseItemForConsistency(dbItem); } } else { - if (itemdb.selectByPath(path, driveId, item)) { + // Check everything associated with each driveId we know about + addLogEntry("Selecting DB items via itemDB.selectByDriveId(driveId)", ["debug"]); + + // Query database + driveItems = itemDB.selectByDriveId(driveId); + addLogEntry("Database items to process for this driveId: " ~ to!string(driveItems.count), ["debug"]); + + // Process each database database item associated with the driveId + foreach(dbItem; driveItems) { // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(item); + checkDatabaseItemForConsistency(dbItem); } } + + // Clear the array + driveItems = []; } - } - - void flagNationalCloudDeploymentOutOfSyncItems() { - // Any entry in the DB than is flagged as out-of-sync needs to be cleaned up locally first before we scan the entire DB - // Normally, this is done at the end of processing all /delta queries, however National Cloud Deployments do not support /delta as a query - // https://docs.microsoft.com/en-us/graph/deployments#supported-features - // Select items that have a out-of-sync flag set - foreach (driveId; driveIDsArray) { - // For each unique OneDrive driveID we know about - Item[] outOfSyncItems = itemdb.selectOutOfSyncItems(driveId); - foreach (item; outOfSyncItems) { - if (!dryRun) { - // clean up idsToDelete - idsToDelete.length = 0; - assumeSafeAppend(idsToDelete); - // flag to delete local file as it now is no longer in sync with OneDrive - log.vdebug("Flagging to delete local item as it now is no longer in sync with OneDrive"); - log.vdebug("item: ", item); - idsToDelete ~= [item.driveId, item.id]; - // delete items in idsToDelete - if (idsToDelete.length > 0) deleteItems(); - } + + // Close out the '....' being printed to the console + if (!appConfig.suppressLoggingOutput) { + if (appConfig.verbosityCount == 0) { + addLogEntry("\n", ["consoleOnlyNoNewLine"]); } } - } - - void handleUploadOnlyBusinessSharedFoldersEdgeCase() { - // read in the business_shared_folders file contents - string[] businessSharedFoldersList; - // open file as read only - auto file = File(cfg.businessSharedFolderFilePath, "r"); - auto range = file.byLine(); - foreach (line; range) { - // Skip comments in file - if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; - businessSharedFoldersList ~= buildNormalizedPath(line); - } - file.close(); - - // Query the GET /me/drive/sharedWithMe API - JSONValue graphQuery = onedrive.getSharedWithMe(); - if (graphQuery.type() == JSONType.object) { - if (count(graphQuery["value"].array) != 0) { - // Shared items returned - log.vdebug("onedrive.getSharedWithMe API Response: ", graphQuery); - foreach (searchResult; graphQuery["value"].array) { - // loop variables - string sharedFolderName; - string remoteParentDriveId; - string remoteParentItemId; - Item remoteItemRoot; - Item remoteItem; - - // is the shared item with us a 'folder' ? - // we only handle folders, not files or other items - if (isItemFolder(searchResult)) { - // Debug response output - log.vdebug("shared folder entry: ", searchResult); - sharedFolderName = searchResult["name"].str; - remoteParentDriveId = searchResult["remoteItem"]["parentReference"]["driveId"].str; - remoteParentItemId = searchResult["remoteItem"]["parentReference"]["id"].str; - - if (canFind(businessSharedFoldersList, sharedFolderName)) { - // Shared Folder matches what is in the shared folder list - log.vdebug("shared folder name matches business_shared_folders list item: ", sharedFolderName); - // Actions: - // 1. Add this remote item to the DB so that it can be queried - // 2. Add remoteParentDriveId to driveIDsArray so we have a record of it - - // Make JSON item DB compatible - remoteItem = makeItem(searchResult); - // Fix up entries, as we are manipulating the data - remoteItem.driveId = remoteParentDriveId; - remoteItem.eTag = ""; - remoteItem.cTag = ""; - remoteItem.parentId = defaultRootId; - remoteItem.remoteDriveId = ""; - remoteItem.remoteId = ""; - - // Build the remote root DB item - remoteItemRoot.driveId = remoteParentDriveId; - remoteItemRoot.id = defaultRootId; - remoteItemRoot.name = "root"; - remoteItemRoot.type = ItemType.dir; - remoteItemRoot.mtime = remoteItem.mtime; - remoteItemRoot.syncStatus = "Y"; - - // Add root remote item to the local database - log.vdebug("Adding remote folder root to database: ", remoteItemRoot); - itemdb.upsert(remoteItemRoot); - - // Add shared folder item to the local database - log.vdebug("Adding remote folder to database: ", remoteItem); - itemdb.upsert(remoteItem); - - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, remoteParentDriveId)) { - // Add this drive id to the array to search with - driveIDsArray ~= remoteParentDriveId; - } - } - } + + // Are we doing a --download-only sync? + if (!appConfig.getValueBool("download_only")) { + + // Do we have any known items, where they have been deleted locally, that now need to be deleted online? + if (databaseItemsToDeleteOnline.length > 0) { + // There are items to delete online + addLogEntry("Deleted local items to delete on Microsoft OneDrive: " ~ to!string(databaseItemsToDeleteOnline.length)); + foreach(localItemToDeleteOnline; databaseItemsToDeleteOnline) { + // Upload to OneDrive the instruction to delete this item. This will handle the 'noRemoteDelete' flag if set + uploadDeletedItem(localItemToDeleteOnline.dbItem, localItemToDeleteOnline.localFilePath); } + // Cleanup array memory + databaseItemsToDeleteOnline = []; } - } - } - - // scan the given directory for new items - for use with --monitor or --cleanup-local-files - void scanForDifferencesFilesystemScan(const(string) path) - { - // To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences? - string logPath; - if (path == ".") { - // get the configured sync_dir - logPath = buildNormalizedPath(cfg.getValueString("sync_dir")); - } else { - // use what was passed in - logPath = path; - } - - // scan for changes in the path provided - if (isDir(path)) { - // if this path is a directory, output this message. - // if a file, potentially leads to confusion as to what the client is actually doing - if (!cleanupLocalFiles) { - // if --cleanup-local-files was set, we will not be uploading data - log.vlog("Uploading new items of ", logPath); + + // Do we have any known items, where the content has changed locally, that needs to be uploaded? + if (databaseItemsWhereContentHasChanged.length > 0) { + // There are changed local files that were in the DB to upload + addLogEntry("Changed local items to upload to Microsoft OneDrive: " ~ to!string(databaseItemsWhereContentHasChanged.length)); + processChangedLocalItemsToUpload(); + // Cleanup array memory + databaseItemsWhereContentHasChanged = []; } } - - // Filesystem walk to find extra files that reside locally. - // If --cleanup-local-files is not used, these will be uploaded (normal operation) - // If --download-only --cleanup-local-files is being used, extra files found locally will be deleted from the local filesystem - uploadNewItems(path); } - private void uploadDifferences(const ref Item item) - { - // see if this item.id we were supposed to have deleted - // match early and return - if (dryRun) { - foreach (i; idsToDelete) { - if (i[1] == item.id) { - return; - } - } - } - + // Check this Database Item for its consistency on disk + void checkDatabaseItemForConsistency(Item dbItem) { + + // What is the local path item + string localFilePath; + // Do we want to onward process this item? bool unwanted = false; - string path; - - // Compute this item path early as we we use this path often - path = computeItemPath(item.driveId, item.id); - // item.id was in the database associated with the item.driveId specified - log.vlog("Processing ", buildNormalizedPath(path)); - - // What type of DB item are we processing - // Is this item excluded by user configuration of skip_dir or skip_file? - // Is this item a directory or 'remote' type? A 'remote' type is a folder DB tie so should be compared as directory for exclusion - if ((item.type == ItemType.dir)||(item.type == ItemType.remote)) { - // Do we need to check for .nosync? Only if --check-for-nosync was passed in - if (cfg.getValueBool("check_nosync")) { - if (exists(path ~ "/.nosync")) { - log.vlog("Skipping item - .nosync found & --check-for-nosync enabled: ", path); - return; - } - } - // Is the path excluded? - unwanted = selectiveSync.isDirNameExcluded(item.name); - } - - // Is this item a file? - if (item.type == ItemType.file) { - // Is the filename excluded? - unwanted = selectiveSync.isFileNameExcluded(item.name); - } - - // If path or filename does not exclude, is this excluded due to use of selective sync? - if (!unwanted) { - // is sync_list configured - if (syncListConfigured) { - // sync_list configured and in use - // Is the path excluded via sync_list? - unwanted = selectiveSync.isPathExcludedViaSyncList(path); - } - } - - // skip unwanted items - if (unwanted) { - //log.vlog("Filtered out"); + // Remote directory items we can 'skip' + if ((dbItem.type == ItemType.remote) && (dbItem.remoteType == ItemType.dir)) { + // return .. nothing to check here, no logging needed return; } - // Check against Microsoft OneDrive restriction and limitations about Windows naming files - if (!isValidName(path)) { - log.logAndNotify("Skipping item - invalid name (Microsoft Naming Convention): ", path); - return; - } + // Compute this dbItem path early as we we use this path often + localFilePath = buildNormalizedPath(computeItemPath(dbItem.driveId, dbItem.id)); - // Check for bad whitespace items - if (!containsBadWhiteSpace(path)) { - log.logAndNotify("Skipping item - invalid name (Contains an invalid whitespace item): ", path); - return; + // To improve logging output for this function, what is the 'logical path'? + string logOutputPath; + if (localFilePath == ".") { + // get the configured sync_dir + logOutputPath = buildNormalizedPath(appConfig.getValueString("sync_dir")); + } else { + // Use the path that was computed + logOutputPath = localFilePath; } - // Check for HTML ASCII Codes as part of file name - if (!containsASCIIHTMLCodes(path)) { - log.logAndNotify("Skipping item - invalid name (Contains HTML ASCII Code): ", path); - return; + // Log what we are doing + addLogEntry("Processing: " ~ logOutputPath, ["verbose"]); + // Add a processing '.' + if (!appConfig.suppressLoggingOutput) { + if (appConfig.verbosityCount == 0) { + addProcessingDotEntry(); + } } - final switch (item.type) { - case ItemType.dir: - uploadDirDifferences(item, path); - break; + // Determine which action to take + final switch (dbItem.type) { case ItemType.file: - uploadFileDifferences(item, path); + // Logging output result is handled by checkFileDatabaseItemForConsistency + checkFileDatabaseItemForConsistency(dbItem, localFilePath); + break; + case ItemType.dir: + // Logging output result is handled by checkDirectoryDatabaseItemForConsistency + checkDirectoryDatabaseItemForConsistency(dbItem, localFilePath); break; case ItemType.remote: - uploadRemoteDirDifferences(item, path); + // DB items that match: dbItem.remoteType == ItemType.dir - these should have been skipped above + // This means that anything that hits here should be: dbItem.remoteType == ItemType.file + checkFileDatabaseItemForConsistency(dbItem, localFilePath); + break; + case ItemType.unknown: + case ItemType.none: + // Unknown type - we dont action these items break; } } - - private void uploadDirDifferences(const ref Item item, const(string) path) - { - assert(item.type == ItemType.dir); - if (exists(path)) { - // Fix https://github.com/abraunegg/onedrive/issues/1915 - try { - if (!isDir(path)) { - log.vlog("The item was a directory but now it is a file"); - uploadDeleteItem(item, path); - uploadNewFile(path); - } else { - log.vlog("The directory has not changed"); - // loop through the children - foreach (Item child; itemdb.selectChildren(item.driveId, item.id)) { - uploadDifferences(child); - } - } - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return; - } - } else { - // Directory does not exist locally - // If we are in a --dry-run situation - this directory may never have existed as we never downloaded it - if (!dryRun) { - // Not --dry-run situation - if (!cfg.getValueBool("monitor")) { - // Not in --monitor mode - log.vlog("The directory has been deleted locally"); - } else { - // Appropriate message as we are in --monitor mode - log.vlog("The directory appears to have been deleted locally .. but we are running in --monitor mode. This may have been 'moved' on the local filesystem rather than being 'deleted'"); - log.vdebug("Most likely cause - 'inotify' event was missing for whatever action was taken locally or action taken when application was stopped"); - } - // A moved file will be uploaded as 'new', delete the old file and reference - if (noRemoteDelete) { - // do not process remote directory delete - log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } - } else { - // we are in a --dry-run situation, directory appears to have deleted locally - this directory may never have existed as we never downloaded it .. - // Check if path does not exist in database - Item databaseItem; - if (!itemdb.selectByPath(path, defaultDriveId, databaseItem)) { - // Path not found in database - log.vlog("The directory has been deleted locally"); - if (noRemoteDelete) { - // do not process remote directory delete - log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } - } else { - // Path was found in the database - // Did we 'fake create it' as part of --dry-run ? - foreach (i; idsFaked) { - if (i[1] == item.id) { - log.vdebug("Matched faked dir which is 'supposed' to exist but not created due to --dry-run use"); - log.vlog("The directory has not changed"); - return; - } - } - // item.id did not match a 'faked' download new directory creation - log.vlog("The directory has been deleted locally"); - uploadDeleteItem(item, path); - } - } - } - } - - private void uploadRemoteDirDifferences(const ref Item item, const(string) path) - { - assert(item.type == ItemType.remote); - if (exists(path)) { - if (!isDir(path)) { - log.vlog("The item was a directory but now it is a file"); - uploadDeleteItem(item, path); - uploadNewFile(path); - } else { - log.vlog("The directory has not changed"); - // continue through the linked folder - assert(item.remoteDriveId && item.remoteId); - Item remoteItem; - bool found = itemdb.selectById(item.remoteDriveId, item.remoteId, remoteItem); - if(found){ - // item was found in the database - uploadDifferences(remoteItem); - } - } - } else { - // are we in a dry-run scenario - if (!dryRun) { - // no dry-run - log.vlog("The directory has been deleted locally"); - if (noRemoteDelete) { - // do not process remote directory delete - log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } - } else { - // we are in a --dry-run situation, directory appears to have deleted locally - this directory may never have existed as we never downloaded it .. - // Check if path does not exist in database - Item databaseItem; - if (!itemdb.selectByPathWithoutRemote(path, defaultDriveId, databaseItem)) { - // Path not found in database - log.vlog("The directory has been deleted locally"); - if (noRemoteDelete) { - // do not process remote directory delete - log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } - } else { - // Path was found in the database - // Did we 'fake create it' as part of --dry-run ? - foreach (i; idsFaked) { - if (i[1] == item.id) { - log.vdebug("Matched faked dir which is 'supposed' to exist but not created due to --dry-run use"); - log.vlog("The directory has not changed"); - return; - } - } - // item.id did not match a 'faked' download new directory creation - log.vlog("The directory has been deleted locally"); - uploadDeleteItem(item, path); - } - } - } - } - - // upload local file system differences to OneDrive - private void uploadFileDifferences(const ref Item item, const(string) path) - { - // Reset upload failure - OneDrive or filesystem issue (reading data) - uploadFailed = false; + + // Perform the database consistency check on this file item + void checkFileDatabaseItemForConsistency(Item dbItem, string localFilePath) { - // uploadFileDifferences is called when processing DB entries to compare against actual files on disk + // What is the source of this item data? string itemSource = "database"; - - assert(item.type == ItemType.file); - if (exists(path)) { - if (isFile(path)) { - // can we actually read the local file? - if (readLocalFile(path)){ - // file is readable - SysTime localModifiedTime = timeLastModified(path).toUTC(); - SysTime itemModifiedTime = item.mtime; - // HACK: reduce time resolution to seconds before comparing + + // Does this item|file still exist on disk? + if (exists(localFilePath)) { + // Path exists locally, is this path a file? + if (isFile(localFilePath)) { + // Can we actually read the local file? + if (readLocalFile(localFilePath)){ + // File is readable + SysTime localModifiedTime = timeLastModified(localFilePath).toUTC(); + SysTime itemModifiedTime = dbItem.mtime; + // Reduce time resolution to seconds before comparing itemModifiedTime.fracSecs = Duration.zero; localModifiedTime.fracSecs = Duration.zero; if (localModifiedTime != itemModifiedTime) { - log.vlog("The file last modified time has changed"); - log.vdebug("The local item has a different modified time ", localModifiedTime, " when compared to ", itemSource, " modified time ", itemModifiedTime); - string eTag = item.eTag; + // The modified dates are different + addLogEntry("Local file time discrepancy detected: " ~ localFilePath, ["verbose"]); + addLogEntry("This local file has a different modified time " ~ to!string(localModifiedTime) ~ " (UTC) when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime) ~ " (UTC)", ["debug"]); - // perform file hash tests - has the content of the file changed? - if (!testFileHash(path, item)) { - log.vlog("The file content has changed"); - log.vdebug("The local item has a different hash when compared to ", itemSource, " item hash"); - write("Uploading modified file ", path, " ... "); - JSONValue response; - - if (!dryRun) { - // Get the file size - long thisFileSize = getSize(path); - // Are we using OneDrive Personal or OneDrive Business? - // To solve 'Multiple versions of file shown on website after single upload' (https://github.com/abraunegg/onedrive/issues/2) - // check what 'account type' this is as this issue only affects OneDrive Business so we need some extra logic here - if (accountType == "personal"){ - // Original file upload logic - if (thisFileSize <= thresholdFileSize) { - try { - response = onedrive.simpleUploadReplace(path, item.driveId, item.id, item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 404) { - // HTTP request returned status code 404 - the eTag provided does not exist - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 404 - eTag Issue' - gracefully handling error"); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return; - } - // Resolve https://github.com/abraunegg/onedrive/issues/36 - if ((e.httpStatusCode == 409) || (e.httpStatusCode == 423)) { - // The file is currently checked out or locked for editing by another user - // We cant upload this file at this time - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - write("", path, " is currently checked out or locked for editing by another user."); - log.fileOnly(path, " is currently checked out or locked for editing by another user."); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 412) { - // HTTP request returned status code 412 - ETag does not match current item's value - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vdebug("Simple Upload Replace Failed - OneDrive eTag / cTag match issue (Personal Account)"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request as a session"); - // Try upload as a session - response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); - } else { - // display what the error is - writeln("skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - // upload done without error - writeln("done."); - } else { - writeln(""); - try { - response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 412) { - // HTTP request returned status code 412 - ETag does not match current item's value - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vdebug("Session Upload Replace Failed - OneDrive eTag / cTag match issue (Personal Account)"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return; - } else { - // display what the error is - writeln("skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - // upload done without error - writeln("done."); - } + // Test the file hash + if (!testFileHash(localFilePath, dbItem)) { + // Is the local file 'newer' or 'older' (ie was an old file 'restored locally' by a different backup / replacement process?) + if (localModifiedTime >= itemModifiedTime) { + // Local file is newer + if (!appConfig.getValueBool("download_only")) { + addLogEntry("The file content has changed locally and has a newer timestamp, thus needs to be uploaded to OneDrive", ["verbose"]); + // Add to an array of files we need to upload as this file has changed locally in-between doing the /delta check and performing this check + databaseItemsWhereContentHasChanged ~= [dbItem.driveId, dbItem.id, localFilePath]; } else { - // OneDrive Business Account - // We need to always use a session to upload, but handle the changed file correctly - if (accountType == "business"){ - try { - // is this a zero-byte file? - if (thisFileSize == 0) { - // the file we are trying to upload as a session is a zero byte file - we cant use a session to upload or replace the file - // as OneDrive technically does not support zero byte files - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("Skip Reason: Microsoft OneDrive does not support 'zero-byte' files as a modified upload. Will upload as new file."); - // delete file on OneDrive - onedrive.deleteById(item.driveId, item.id, item.eTag); - // delete file from local database - itemdb.deleteById(item.driveId, item.id); - return; - } else { - if ((!syncBusinessFolders) || (item.driveId == defaultDriveId)) { - // For logging consistency - writeln(""); - // If we are not syncing Shared Business Folders, or this change is going to the 'users' default drive, handle normally - // Perform a normal session upload - response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); - } else { - // If we are uploading to a shared business folder, there are a couple of corner cases here: - // 1. Shared Folder is a 'users' folder - // 2. Shared Folder is a 'SharePoint Library' folder, meaning we get hit by this stupidity: https://github.com/OneDrive/onedrive-api-docs/issues/935 - response = handleSharePointMetadataAdditionBug(item, path); - } - } - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - // Resolve https://github.com/abraunegg/onedrive/issues/36 - if ((e.httpStatusCode == 409) || (e.httpStatusCode == 423)) { - // The file is currently checked out or locked for editing by another user - // We cant upload this file at this time - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - writeln("", path, " is currently checked out or locked for editing by another user."); - log.fileOnly(path, " is currently checked out or locked for editing by another user."); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 412) { - // HTTP request returned status code 412 - ETag does not match current item's value - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vdebug("Session Upload Replace Failed - OneDrive eTag / cTag match issue (Business Account)"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return; - } else { - // display what the error is - writeln("skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - // Did the upload fail? - if (!uploadFailed){ - // upload done without error or failure - writeln("done."); - // As the session.upload includes the last modified time, save the response - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } else { - // uploadFailed, return - return; - } - } - - // OneDrive documentLibrary - if (accountType == "documentLibrary"){ - // is this a zero-byte file? - if (thisFileSize == 0) { - // the file we are trying to upload as a session is a zero byte file - we cant use a session to upload or replace the file - // as OneDrive technically does not support zero byte files - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("Skip Reason: Microsoft OneDrive does not support 'zero-byte' files as a modified upload. Will upload as new file."); - // delete file on OneDrive - onedrive.deleteById(item.driveId, item.id, item.eTag); - // delete file from local database - itemdb.deleteById(item.driveId, item.id); - return; - } else { - // Due to https://github.com/OneDrive/onedrive-api-docs/issues/935 Microsoft modifies all PDF, MS Office & HTML files with added XML content. It is a 'feature' of SharePoint. - // This means, as a session upload, on 'completion' the file is 'moved' and generates a 404 ...... - response = handleSharePointMetadataAdditionBug(item, path); - - // Did the upload fail? - if (!uploadFailed){ - // upload done without error or failure - writeln("done."); - // As the session.upload includes the last modified time, save the response - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } else { - // uploadFailed, return - return; - } - } - } + addLogEntry("The file content has changed locally and has a newer timestamp. The file will remain different to online file due to --download-only being used", ["verbose"]); } - - // Update etag with ctag from response - if ("cTag" in response) { - // use the cTag instead of the eTag because OneDrive may update the metadata of files AFTER they have been uploaded via simple upload - eTag = response["cTag"].str; + } else { + // Local file is older - data recovery process? something else? + if (!appConfig.getValueBool("download_only")) { + addLogEntry("The file content has changed locally and file now has a older timestamp. Uploading this file to OneDrive may potentially cause data-loss online", ["verbose"]); + // Add to an array of files we need to upload as this file has changed locally in-between doing the /delta check and performing this check + databaseItemsWhereContentHasChanged ~= [dbItem.driveId, dbItem.id, localFilePath]; } else { - // Is there an eTag in the response? - if ("eTag" in response) { - // use the eTag from the response as there was no cTag - eTag = response["eTag"].str; + addLogEntry("The file content has changed locally and file now has a older timestamp. The file will remain different to online file due to --download-only being used", ["verbose"]); + } + } + } else { + // The file contents have not changed, but the modified timestamp has + addLogEntry("The last modified timestamp has changed however the file content has not changed", ["verbose"]); + + // Local file is newer .. are we in a --download-only situation? + if (!appConfig.getValueBool("download_only")) { + // Not a --download-only scenario + if (!dryRun) { + // Attempt to update the online date time stamp + // We need to use the correct driveId and itemId, especially if we are updating a OneDrive Business Shared File timestamp + if (dbItem.type == ItemType.file) { + // Not a remote file + // Log what is being done + addLogEntry("The local item has the same hash value as the item online - correcting timestamp online", ["verbose"]); + // Correct timestamp + uploadLastModifiedTime(dbItem, dbItem.driveId, dbItem.id, localModifiedTime.toUTC(), dbItem.eTag); } else { - // no tag available - set to nothing - eTag = ""; + // Remote file, remote values need to be used, we may not even have permission to change timestamp, update local file + addLogEntry("The local item has the same hash value as the item online, however file is a OneDrive Business Shared File - correcting local timestamp", ["verbose"]); + addLogEntry("Calling setTimes() for this file: " ~ localFilePath, ["debug"]); + setTimes(localFilePath, dbItem.mtime, dbItem.mtime); } } - - // log that the modified file was uploaded successfully - log.fileOnly("Uploading modified file ", path, " ... done."); - - // update free space tracking if this is our drive id - if (item.driveId == defaultDriveId) { - // how much space is left on OneDrive after upload? - remainingFreeSpace = (remainingFreeSpace - thisFileSize); - log.vlog("Remaining free space on OneDrive: ", remainingFreeSpace); - } } else { - // we are --dry-run - simulate the file upload - writeln("done."); - response = createFakeResponse(path); - // Log action to log file - log.fileOnly("Uploading modified file ", path, " ... done."); - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - return; - } - } - if (accountType == "personal"){ - // If Personal, call to update the modified time as stored on OneDrive - if (!dryRun) { - uploadLastModifiedTime(item.driveId, item.id, eTag, localModifiedTime.toUTC()); + // --download-only being used + addLogEntry("The local item has the same hash value as the item online - correcting local timestamp due to --download-only being used to ensure local file matches timestamp online", ["verbose"]); + if (!dryRun) { + addLogEntry("Calling setTimes() for this file: " ~ localFilePath, ["debug"]); + setTimes(localFilePath, dbItem.mtime, dbItem.mtime); + } } } } else { - log.vlog("The file has not changed"); + // The file has not changed + addLogEntry("The file has not changed", ["verbose"]); } } else { //The file is not readable - skipped - log.log("Skipping processing this file as it cannot be read (file permissions or file corruption): ", path); - uploadFailed = true; + addLogEntry("Skipping processing this file as it cannot be read (file permissions or file corruption): " ~ localFilePath); } } else { - log.vlog("The item was a file but now is a directory"); - uploadDeleteItem(item, path); - uploadCreateDir(path); + // The item was a file but now is a directory + addLogEntry("The item was a file but now is a directory", ["verbose"]); } } else { - // File does not exist locally + // File does not exist locally, but is in our database as a dbItem containing all the data was passed into this function // If we are in a --dry-run situation - this file may never have existed as we never downloaded it if (!dryRun) { // Not --dry-run situation - if (!cfg.getValueBool("monitor")) { - log.vlog("The file has been deleted locally"); - } else { - // Appropriate message as we are in --monitor mode - log.vlog("The file appears to have been deleted locally .. but we are running in --monitor mode. This may have been 'moved' on the local filesystem rather than being 'deleted'"); - log.vdebug("Most likely cause - 'inotify' event was missing for whatever action was taken locally or action taken when application was stopped"); - } - // A moved file will be uploaded as 'new', delete the old file and reference - if (noRemoteDelete) { - // do not process remote file delete - log.vlog("Skipping remote file delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } + addLogEntry("The file has been deleted locally", ["verbose"]); + // Add this to the array to handle post checking all database items + databaseItemsToDeleteOnline ~= [DatabaseItemsToDeleteOnline(dbItem, localFilePath)]; } else { - // We are in a --dry-run situation, file appears to have deleted locally - this file may never have existed as we never downloaded it .. - // Check if path does not exist in database - Item databaseItem; - if (!itemdb.selectByPath(path, defaultDriveId, databaseItem)) { - // file not found in database - log.vlog("The file has been deleted locally"); - if (noRemoteDelete) { - // do not process remote file delete - log.vlog("Skipping remote file delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } - } else { - // file was found in the database - // Did we 'fake create it' as part of --dry-run ? - foreach (i; idsFaked) { - if (i[1] == item.id) { - log.vdebug("Matched faked file which is 'supposed' to exist but not created due to --dry-run use"); - log.vlog("The file has not changed"); - return; - } - } - // item.id did not match a 'faked' download new file creation - log.vlog("The file has been deleted locally"); - if (noRemoteDelete) { - // do not process remote file delete - log.vlog("Skipping remote file delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); + // We are in a --dry-run situation, file appears to have been deleted locally - this file may never have existed locally as we never downloaded it due to --dry-run + // Did we 'fake create it' as part of --dry-run ? + bool idsFakedMatch = false; + foreach (i; idsFaked) { + if (i[1] == dbItem.id) { + addLogEntry("Matched faked file which is 'supposed' to exist but not created due to --dry-run use", ["debug"]); + addLogEntry("The file has not changed", ["verbose"]); + idsFakedMatch = true; } } + if (!idsFakedMatch) { + // dbItem.id did not match a 'faked' download new file creation - so this in-sync object was actually deleted locally, but we are in a --dry-run situation + addLogEntry("The file has been deleted locally", ["verbose"]); + // Add this to the array to handle post checking all database items + databaseItemsToDeleteOnline ~= [DatabaseItemsToDeleteOnline(dbItem, localFilePath)]; + } } } } - private JSONValue handleSharePointMetadataAdditionBug(const ref Item item, const(string) path) - { - // Explicit function for handling https://github.com/OneDrive/onedrive-api-docs/issues/935 - JSONValue response; - // Handle certain file types differently - if ((extension(path) == ".txt") || (extension(path) == ".csv")) { - // .txt and .csv are unaffected by https://github.com/OneDrive/onedrive-api-docs/issues/935 - // For logging consistency - writeln(""); + // Perform the database consistency check on this directory item + void checkDirectoryDatabaseItemForConsistency(Item dbItem, string localFilePath) { + + // What is the source of this item data? + string itemSource = "database"; + + // Does this item|directory still exist on disk? + if (exists(localFilePath)) { + // Fix https://github.com/abraunegg/onedrive/issues/1915 try { - response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return response; - } - // Resolve https://github.com/abraunegg/onedrive/issues/36 - if ((e.httpStatusCode == 409) || (e.httpStatusCode == 423)) { - // The file is currently checked out or locked for editing by another user - // We cant upload this file at this time - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - writeln("", path, " is currently checked out or locked for editing by another user."); - log.fileOnly(path, " is currently checked out or locked for editing by another user."); - uploadFailed = true; - return response; - } - if (e.httpStatusCode == 412) { - // HTTP request returned status code 412 - ETag does not match current item's value - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vdebug("Session Upload Replace Failed - OneDrive eTag / cTag match issue (Sharepoint Library)"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return response; + if (!isDir(localFilePath)) { + addLogEntry("The item was a directory but now it is a file", ["verbose"]); + uploadDeletedItem(dbItem, localFilePath); + uploadNewFile(localFilePath); } else { - // display what the error is - writeln("skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; + // Directory still exists locally + addLogEntry("The directory has not changed", ["verbose"]); + // When we are using --single-directory, we use a the getChildren() call to get all children of a path, meaning all children are already traversed + // Thus, if we traverse the path of this directory .. we end up with double processing & log output .. which is not ideal + if (!singleDirectoryScope) { + // loop through the children + Item[] childrenFromDatabase = itemDB.selectChildren(dbItem.driveId, dbItem.id); + foreach (Item child; childrenFromDatabase) { + checkDatabaseItemForConsistency(child); + } + // Clear DB response array + childrenFromDatabase = []; + } } } catch (FileException e) { // display the error message - writeln("skipped."); displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; } - // upload done without error - writeln("done."); } else { - // Due to https://github.com/OneDrive/onedrive-api-docs/issues/935 Microsoft modifies all PDF, MS Office & HTML files with added XML content. It is a 'feature' of SharePoint. - // This means, as a session upload, on 'completion' the file is 'moved' and generates a 404 ...... - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("Skip Reason: Microsoft Sharepoint 'enrichment' after upload issue"); - log.vlog("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details"); - // Delete record from the local database - file will be uploaded as a new file - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return response; - } - - // return a JSON response so that it can be used and saved - return response; + // Directory does not exist locally, but it is in our database as a dbItem containing all the data was passed into this function + // If we are in a --dry-run situation - this directory may never have existed as we never created it + if (!dryRun) { + // Not --dry-run situation + if (!appConfig.getValueBool("monitor")) { + // Not in --monitor mode + addLogEntry("The directory has been deleted locally", ["verbose"]); + } else { + // Appropriate message as we are in --monitor mode + addLogEntry("The directory appears to have been deleted locally .. but we are running in --monitor mode. This may have been 'moved' on the local filesystem rather than being 'deleted'", ["verbose"]); + addLogEntry("Most likely cause - 'inotify' event was missing for whatever action was taken locally or action taken when application was stopped", ["debug"]); + } + // A moved directory will be uploaded as 'new', delete the old directory and database reference + // Add this to the array to handle post checking all database items + databaseItemsToDeleteOnline ~= [DatabaseItemsToDeleteOnline(dbItem, localFilePath)]; + } else { + // We are in a --dry-run situation, directory appears to have been deleted locally - this directory may never have existed locally as we never created it due to --dry-run + // Did we 'fake create it' as part of --dry-run ? + bool idsFakedMatch = false; + foreach (i; idsFaked) { + if (i[1] == dbItem.id) { + addLogEntry("Matched faked dir which is 'supposed' to exist but not created due to --dry-run use", ["debug"]); + addLogEntry("The directory has not changed", ["verbose"]); + idsFakedMatch = true; + } + } + if (!idsFakedMatch) { + // dbItem.id did not match a 'faked' download new directory creation - so this in-sync object was actually deleted locally, but we are in a --dry-run situation + addLogEntry("The directory has been deleted locally", ["verbose"]); + // Add this to the array to handle post checking all database items + databaseItemsToDeleteOnline ~= [DatabaseItemsToDeleteOnline(dbItem, localFilePath)]; + } else { + // When we are using --single-directory, we use a the getChildren() call to get all children of a path, meaning all children are already traversed + // Thus, if we traverse the path of this directory .. we end up with double processing & log output .. which is not ideal + if (!singleDirectoryScope) { + // loop through the children + Item[] childrenFromDatabase = itemDB.selectChildren(dbItem.driveId, dbItem.id); + foreach (Item child; childrenFromDatabase) { + checkDatabaseItemForConsistency(child); + } + // Clear DB response array + childrenFromDatabase = []; + } + } + } + } } - - // upload new items to OneDrive - private void uploadNewItems(const(string) path) - { - static import std.utf; - import std.range : walkLength; - import std.uni : byGrapheme; - // https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders - // If the path is greater than allowed characters, then one drive will return a '400 - Bad Request' - // Need to ensure that the URI is encoded before the check is made: - // - 400 Character Limit for OneDrive Business / Office 365 - // - 430 Character Limit for OneDrive Personal - long maxPathLength = 0; - long pathWalkLength = 0; + + // Does this local path (directory or file) conform with the Microsoft Naming Restrictions? It needs to conform otherwise we cannot create the directory or upload the file. + bool checkPathAgainstMicrosoftNamingRestrictions(string localFilePath) { + + // Check if the given path violates certain Microsoft restrictions and limitations + // Return a true|false response + bool invalidPath = false; - // Configure maxPathLength based on account type - if (accountType == "personal"){ - // Personal Account - maxPathLength = 430; - } else { - // Business Account / Office365 - maxPathLength = 400; + // Check path against Microsoft OneDrive restriction and limitations about Windows naming for files and folders + if (!invalidPath) { + if (!isValidName(localFilePath)) { // This will return false if this is not a valid name according to the OneDrive API specifications + addLogEntry("Skipping item - invalid name (Microsoft Naming Convention): " ~ localFilePath, ["info", "notify"]); + invalidPath = true; + } } - // A short lived file that has disappeared will cause an error - is the path valid? - if (!exists(path)) { - log.log("Skipping item - path has disappeared: ", path); - return; + // Check path for bad whitespace items + if (!invalidPath) { + if (containsBadWhiteSpace(localFilePath)) { // This will return true if this contains a bad whitespace item + addLogEntry("Skipping item - invalid name (Contains an invalid whitespace item): " ~ localFilePath, ["info", "notify"]); + invalidPath = true; + } } - // Calculate the path length by walking the path, catch any UTF-8 character errors - // https://github.com/abraunegg/onedrive/issues/487 - // https://github.com/abraunegg/onedrive/issues/1192 - try { - pathWalkLength = path.byGrapheme.walkLength; - } catch (std.utf.UTFException e) { - // path contains characters which generate a UTF exception - log.vlog("Skipping item - invalid UTF sequence: ", path); - log.vdebug(" Error Reason:", e.msg); - return; + // Check path for HTML ASCII Codes + if (!invalidPath) { + if (containsASCIIHTMLCodes(localFilePath)) { // This will return true if this contains HTML ASCII Codes + addLogEntry("Skipping item - invalid name (Contains HTML ASCII Code): " ~ localFilePath, ["info", "notify"]); + invalidPath = true; + } } - // check the std.encoding of the path - // https://github.com/skilion/onedrive/issues/57 - // https://github.com/abraunegg/onedrive/issues/487 - if(!isValid(path)) { - // Path is not valid according to https://dlang.org/phobos/std_encoding.html - log.vlog("Skipping item - invalid character encoding sequence: ", path); - return; + // Validate that the path is a valid UTF-16 encoded path + if (!invalidPath) { + if (!isValidUTF16(localFilePath)) { // This will return true if this is a valid UTF-16 encoded path, so we are checking for 'false' as response + addLogEntry("Skipping item - invalid name (Invalid UTF-16 encoded item): " ~ localFilePath, ["info", "notify"]); + invalidPath = true; + } } - // Is the path length is less than maxPathLength - if(pathWalkLength < maxPathLength){ - // skip dot files if configured - if (cfg.getValueBool("skip_dotfiles")) { - if (isDotFile(path)) { - log.vlog("Skipping item - .file or .folder: ", path); - return; - } + // Check path for ASCII Control Codes + if (!invalidPath) { + if (containsASCIIControlCodes(localFilePath)) { // This will return true if this contains ASCII Control Codes + addLogEntry("Skipping item - invalid name (Contains ASCII Control Codes): " ~ localFilePath, ["info", "notify"]); + invalidPath = true; } - + } + + // Return if this is a valid path + return invalidPath; + } + + // Does this local path (directory or file) get excluded from any operation based on any client side filtering rules? + bool checkPathAgainstClientSideFiltering(string localFilePath) { + + // Check the path against client side filtering rules + // - check_nosync + // - skip_dotfiles + // - skip_symlinks + // - skip_file + // - skip_dir + // - sync_list + // - skip_size + // Return a true|false response + + bool clientSideRuleExcludesPath = false; + + // does the path exist? + if (!exists(localFilePath)) { + // path does not exist - we cant review any client side rules on something that does not exist locally + return clientSideRuleExcludesPath; + } + + // - check_nosync + if (!clientSideRuleExcludesPath) { // Do we need to check for .nosync? Only if --check-for-nosync was passed in - if (cfg.getValueBool("check_nosync")) { - if (exists(path ~ "/.nosync")) { - log.vlog("Skipping item - .nosync found & --check-for-nosync enabled: ", path); - return; + if (appConfig.getValueBool("check_nosync")) { + if (exists(localFilePath ~ "/.nosync")) { + addLogEntry("Skipping item - .nosync found & --check-for-nosync enabled: " ~ localFilePath, ["verbose"]); + clientSideRuleExcludesPath = true; } } - + } + + // - skip_dotfiles + if (!clientSideRuleExcludesPath) { + // Do we need to check skip dot files if configured + if (appConfig.getValueBool("skip_dotfiles")) { + if (isDotFile(localFilePath)) { + addLogEntry("Skipping item - .file or .folder: " ~ localFilePath, ["verbose"]); + clientSideRuleExcludesPath = true; + } + } + } + + // - skip_symlinks + if (!clientSideRuleExcludesPath) { // Is the path a symbolic link - if (isSymlink(path)) { + if (isSymlink(localFilePath)) { // if config says so we skip all symlinked items - if (cfg.getValueBool("skip_symlinks")) { - log.vlog("Skipping item - skip symbolic links configured: ", path); - return; - + if (appConfig.getValueBool("skip_symlinks")) { + addLogEntry("Skipping item - skip symbolic links configured: " ~ localFilePath, ["verbose"]); + clientSideRuleExcludesPath = true; } // skip unexisting symbolic links - else if (!exists(readLink(path))) { + else if (!exists(readLink(localFilePath))) { // reading the symbolic link failed - is the link a relative symbolic link // drwxrwxr-x. 2 alex alex 46 May 30 09:16 . // drwxrwxr-x. 3 alex alex 35 May 30 09:14 .. @@ -4368,7 +3466,7 @@ final class SyncEngine // // absolute links will be able to be read, but 'relative' links will fail, because they cannot be read based on the current working directory 'sync_dir' string currentSyncDir = getcwd(); - string fullLinkPath = buildNormalizedPath(absolutePath(path)); + string fullLinkPath = buildNormalizedPath(absolutePath(localFilePath)); string fileName = baseName(fullLinkPath); string parentLinkPath = dirName(fullLinkPath); // test if this is a 'relative' symbolic link @@ -4379,1834 +3477,3746 @@ final class SyncEngine chdir(currentSyncDir); // results if (relativeLinkTest) { - log.vdebug("Not skipping item - symbolic link is a 'relative link' to target ('", relativeLink, "') which can be supported: ", path); + addLogEntry("Not skipping item - symbolic link is a 'relative link' to target ('" ~ relativeLink ~ "') which can be supported: " ~ localFilePath, ["debug"]); } else { - log.logAndNotify("Skipping item - invalid symbolic link: ", path); - return; + addLogEntry("Skipping item - invalid symbolic link: "~ localFilePath, ["info", "notify"]); + clientSideRuleExcludesPath = true; } } } - - // Check for bad whitespace items - if (!containsBadWhiteSpace(path)) { - log.logAndNotify("Skipping item - invalid name (Contains an invalid whitespace item): ", path); - return; - } - - // Check for HTML ASCII Codes as part of file name - if (!containsASCIIHTMLCodes(path)) { - log.logAndNotify("Skipping item - invalid name (Contains HTML ASCII Code): ", path); - return; - } - - // Is this item excluded by user configuration of skip_dir or skip_file? - if (path != ".") { - if (isDir(path)) { - log.vdebug("Checking local path: ", path); + } + + // Is this item excluded by user configuration of skip_dir or skip_file? + if (!clientSideRuleExcludesPath) { + if (localFilePath != ".") { + // skip_dir handling + if (isDir(localFilePath)) { + addLogEntry("Checking local path: " ~ localFilePath, ["debug"]); + // Only check path if config is != "" - if (cfg.getValueString("skip_dir") != "") { + if (appConfig.getValueString("skip_dir") != "") { // The path that needs to be checked needs to include the '/' // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched - if (selectiveSync.isDirNameExcluded(path.strip('.'))) { - log.vlog("Skipping item - excluded by skip_dir config: ", path); - return; - } - } - - // In the event that this 'new item' is actually a OneDrive Business Shared Folder - // however the user may have omitted --sync-shared-folders, thus 'technically' this is a new item - // for this account OneDrive root, however this then would cause issues if --sync-shared-folders - // is added again after this sync - if ((exists(cfg.businessSharedFolderFilePath)) && (!syncBusinessFolders)){ - // business_shared_folders file exists, but we are not using / syncing them - // The file contents can only contain 'folder' names, so we need to strip './' from any path we are checking - if(selectiveSync.isSharedFolderMatched(strip(path,"./"))){ - // path detected as a 'new item' is matched as a path in business_shared_folders - log.vlog("Skipping item - excluded as included in business_shared_folders config: ", path); - log.vlog("To sync this directory to your OneDrive Account update your business_shared_folders config"); - return; + if (selectiveSync.isDirNameExcluded(localFilePath.strip('.'))) { + addLogEntry("Skipping item - excluded by skip_dir config: " ~ localFilePath, ["verbose"]); + clientSideRuleExcludesPath = true; } } } - if (isFile(path)) { - log.vdebug("Checking file: ", path); + // skip_file handling + if (isFile(localFilePath)) { + addLogEntry("Checking file: " ~ localFilePath, ["debug"]); + // The path that needs to be checked needs to include the '/' // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched - if (selectiveSync.isFileNameExcluded(path.strip('.'))) { - log.vlog("Skipping item - excluded by skip_file config: ", path); - return; + if (selectiveSync.isFileNameExcluded(localFilePath.strip('.'))) { + addLogEntry("Skipping item - excluded by skip_file config: " ~ localFilePath, ["verbose"]); + clientSideRuleExcludesPath = true; } } - - // is sync_list configured + } + } + + // Is this item excluded by user configuration of sync_list? + if (!clientSideRuleExcludesPath) { + if (localFilePath != ".") { if (syncListConfigured) { // sync_list configured and in use - if (selectiveSync.isPathExcludedViaSyncList(path)) { - if ((isFile(path)) && (cfg.getValueBool("sync_root_files")) && (rootName(path.strip('.').strip('/')) == "")) { - log.vdebug("Not skipping path due to sync_root_files inclusion: ", path); + if (selectiveSync.isPathExcludedViaSyncList(localFilePath)) { + if ((isFile(localFilePath)) && (appConfig.getValueBool("sync_root_files")) && (rootName(localFilePath.strip('.').strip('/')) == "")) { + addLogEntry("Not skipping path due to sync_root_files inclusion: " ~ localFilePath, ["debug"]); } else { - string userSyncList = cfg.configDirName ~ "/sync_list"; - if (exists(userSyncList)){ + if (exists(appConfig.syncListFilePath)){ // skipped most likely due to inclusion in sync_list - log.vlog("Skipping item - excluded by sync_list config: ", path); - return; + addLogEntry("Skipping item - excluded by sync_list config: " ~ localFilePath, ["verbose"]); + clientSideRuleExcludesPath = true; } else { // skipped for some other reason - log.vlog("Skipping item - path excluded by user config: ", path); - return; + addLogEntry("Skipping item - path excluded by user config: " ~ localFilePath, ["verbose"]); + clientSideRuleExcludesPath = true; } } } } } - - // Check against Microsoft OneDrive restriction and limitations about Windows naming files - if (!isValidName(path)) { - log.logAndNotify("Skipping item - invalid name (Microsoft Naming Convention): ", path); - return; - } - - // If we are in a --dry-run scenario, we may have renamed a folder - but it is technically not renamed locally - // Thus, that entire path may be attemtped to be uploaded as new data to OneDrive - if (dryRun) { - // check the pathsRenamed array for this path - // if any match - we need to exclude this path - foreach (thisRenamedPath; pathsRenamed) { - log.vdebug("Renamed Path to evaluate: ", thisRenamedPath); - // Can we find 'thisRenamedPath' in the given 'path' - if (canFind(path, thisRenamedPath)) { - log.vdebug("Renamed Path MATCH - DONT UPLOAD AS NEW"); - return; + } + + // Check if this is excluded by a user set maximum filesize to upload + if (!clientSideRuleExcludesPath) { + if (isFile(localFilePath)) { + if (fileSizeLimit != 0) { + // Get the file size + ulong thisFileSize = getSize(localFilePath); + if (thisFileSize >= fileSizeLimit) { + addLogEntry("Skipping item - excluded by skip_size config: " ~ localFilePath ~ " (" ~ to!string(thisFileSize/2^^20) ~ " MB)", ["verbose"]); } } } + } + + return clientSideRuleExcludesPath; + } + + // Does this JSON item (as received from OneDrive API) get excluded from any operation based on any client side filtering rules? + // This function is used when we are fetching objects from the OneDrive API using a /children query to help speed up what object we query or when checking OneDrive Business Shared Files + bool checkJSONAgainstClientSideFiltering(JSONValue onedriveJSONItem) { - // We want to upload this new local data - if (isDir(path)) { - Item item; - bool pathFoundInDB = false; - foreach (driveId; driveIDsArray) { - if (itemdb.selectByPath(path, driveId, item)) { - pathFoundInDB = true; - } - } - - // Was the path found in the database? - if (!pathFoundInDB) { - // Path not found in database when searching all drive id's - if (!cleanupLocalFiles) { - // --download-only --cleanup-local-files not used - uploadCreateDir(path); - } else { - // we need to clean up this directory - log.log("Removing local directory as --download-only & --cleanup-local-files configured"); - // Remove any children of this path if they still exist - // Resolve 'Directory not empty' error when deleting local files - try { - foreach (DirEntry child; dirEntries(path, SpanMode.depth, false)) { - // what sort of child is this? - if (isDir(child.name)) { - log.log("Removing local directory: ", child.name); - } else { - log.log("Removing local file: ", child.name); - } - // are we in a --dry-run scenario? - if (!dryRun) { - // No --dry-run ... process local delete - try { - attrIsDir(child.linkAttributes) ? rmdir(child.name) : remove(child.name); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } - } - // Remove the path now that it is empty of children - log.log("Removing local directory: ", path); - // are we in a --dry-run scenario? - if (!dryRun) { - // No --dry-run ... process local delete - try { - rmdirRecurse(path); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return; + bool clientSideRuleExcludesPath = false; + + // Check the path against client side filtering rules + // - check_nosync (MISSING) + // - skip_dotfiles (MISSING) + // - skip_symlinks (MISSING) + // - skip_file + // - skip_dir + // - sync_list + // - skip_size + // Return a true|false response + + // Use the JSON elements rather can computing a DB struct via makeItem() + string thisItemId = onedriveJSONItem["id"].str; + string thisItemDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + string thisItemParentId = onedriveJSONItem["parentReference"]["id"].str; + string thisItemName = onedriveJSONItem["name"].str; + + // Is this parent is in the database + bool parentInDatabase = false; + + // Calculate if the Parent Item is in the database so that it can be re-used + parentInDatabase = itemDB.idInLocalDatabase(thisItemDriveId, thisItemParentId); + + // Check if this is excluded by config option: skip_dir + if (!clientSideRuleExcludesPath) { + // Is the item a folder? + if (isItemFolder(onedriveJSONItem)) { + // Only check path if config is != "" + if (!appConfig.getValueString("skip_dir").empty) { + // work out the 'snippet' path where this folder would be created + string simplePathToCheck = ""; + string complexPathToCheck = ""; + string matchDisplay = ""; + + if (hasParentReference(onedriveJSONItem)) { + // we need to workout the FULL path for this item + // simple path + if (("name" in onedriveJSONItem["parentReference"]) != null) { + simplePathToCheck = onedriveJSONItem["parentReference"]["name"].str ~ "/" ~ onedriveJSONItem["name"].str; + } else { + simplePathToCheck = onedriveJSONItem["name"].str; + } + addLogEntry("skip_dir path to check (simple): " ~ simplePathToCheck, ["debug"]); + + // complex path + if (parentInDatabase) { + // build up complexPathToCheck + //complexPathToCheck = buildNormalizedPath(newItemPath); + complexPathToCheck = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + } else { + addLogEntry("Parent details not in database - unable to compute complex path to check", ["debug"]); } + if (!complexPathToCheck.empty) { + addLogEntry("skip_dir path to check (complex): " ~ complexPathToCheck, ["debug"]); + } + } else { + simplePathToCheck = onedriveJSONItem["name"].str; } - } - - // recursively traverse children - // the above operation takes time and the directory might have - // disappeared in the meantime - if (!exists(path)) { - if (!cleanupLocalFiles) { - // --download-only --cleanup-local-files not used - log.vlog("Directory disappeared during upload: ", path); + + // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder + // then isDirNameExcluded matching will not work + if (simplePathToCheck.canFind(":")) { + addLogEntry("Updating simplePathToCheck to remove 'root:'", ["debug"]); + simplePathToCheck = processPathToRemoveRootReference(simplePathToCheck); } - return; - } - - // Try and access the directory and any path below - try { - auto entries = dirEntries(path, SpanMode.shallow, false); - foreach (DirEntry entry; entries) { - string thisPath = entry.name; - uploadNewItems(thisPath); + if (complexPathToCheck.canFind(":")) { + addLogEntry("Updating complexPathToCheck to remove 'root:'", ["debug"]); + complexPathToCheck = processPathToRemoveRootReference(complexPathToCheck); } - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return; - } - } else { - // path is not a directory, is it a valid file? - // pipes - whilst technically valid files, are not valid for this client - // prw-rw-r--. 1 user user 0 Jul 7 05:55 my_pipe - if (isFile(path)) { - // Path is a valid file - bool fileFoundInDB = false; - Item item; - // Search the database for this file - foreach (driveId; driveIDsArray) { - if (itemdb.selectByPath(path, driveId, item)) { - fileFoundInDB = true; + // OK .. what checks are we doing? + if ((!simplePathToCheck.empty) && (complexPathToCheck.empty)) { + // just a simple check + addLogEntry("Performing a simple check only", ["debug"]); + clientSideRuleExcludesPath = selectiveSync.isDirNameExcluded(simplePathToCheck); + } else { + // simple and complex + addLogEntry("Performing a simple then complex path match if required", ["debug"]); + + // simple first + addLogEntry("Performing a simple check first", ["debug"]); + clientSideRuleExcludesPath = selectiveSync.isDirNameExcluded(simplePathToCheck); + matchDisplay = simplePathToCheck; + if (!clientSideRuleExcludesPath) { + addLogEntry("Simple match was false, attempting complex match", ["debug"]); + // simple didnt match, perform a complex check + clientSideRuleExcludesPath = selectiveSync.isDirNameExcluded(complexPathToCheck); + matchDisplay = complexPathToCheck; } } - - // Was the file found in the database? - if (!fileFoundInDB) { - // File not found in database when searching all drive id's - // Do we upload the file or clean up the file? - if (!cleanupLocalFiles) { - // --download-only --cleanup-local-files not used - uploadNewFile(path); - // Did the upload fail? - if (!uploadFailed) { - // Upload did not fail - // Issue #763 - Delete local files after sync handling - // are we in an --upload-only & --remove-source-files scenario? - if ((uploadOnly) && (localDeleteAfterUpload)) { - // Log that we are deleting a local item - log.log("Removing local file as --upload-only & --remove-source-files configured"); - // are we in a --dry-run scenario? - log.vdebug("Removing local file: ", path); - if (!dryRun) { - // No --dry-run ... process local file delete - safeRemove(path); - } - } - } - } else { - // we need to clean up this file - log.log("Removing local file as --download-only & --cleanup-local-files configured"); - // are we in a --dry-run scenario? - log.log("Removing local file: ", path); - if (!dryRun) { - // No --dry-run ... process local file delete - safeRemove(path); - } - } + // End Result + addLogEntry("skip_dir exclude result (directory based): " ~ to!string(clientSideRuleExcludesPath), ["debug"]); + if (clientSideRuleExcludesPath) { + // This path should be skipped + addLogEntry("Skipping item - excluded by skip_dir config: " ~ matchDisplay, ["verbose"]); } - } else { - // path is not a valid file - log.log("Skipping item - item is not a valid file: ", path); } } - } else { - // This path was skipped - why? - log.log("Skipping item '", path, "' due to the full path exceeding ", maxPathLength, " characters (Microsoft OneDrive limitation)"); } - } - - // create new directory on OneDrive - private void uploadCreateDir(const(string) path) - { - log.vlog("OneDrive Client requested to create remote path: ", path); - - JSONValue onedrivePathDetails; - Item parent; - - // Was the path entered the root path? - if (path != "."){ - // What parent path to use? - string parentPath = dirName(path); // will be either . or something else - if (parentPath == "."){ - // Assume this is a new 'local' folder in the users configured sync_dir - // Use client defaults - parent.id = defaultRootId; // Should give something like 12345ABCDE1234A1!101 - parent.driveId = defaultDriveId; // Should give something like 12345abcde1234a1 - } else { - // Query the database using each of the driveId's we are using - foreach (driveId; driveIDsArray) { - // Query the database for this parent path using each driveId - Item dbResponse; - if(itemdb.selectByPathWithoutRemote(parentPath, driveId, dbResponse)){ - // parent path was found in the database - parent = dbResponse; - } - } - } - - // If this is still null or empty - we cant query the database properly later on - // Query OneDrive API for parent details - if ((parent.driveId == "") && (parent.id == "")){ - try { - log.vdebug("Attempting to query OneDrive for this parent path: ", parentPath); - onedrivePathDetails = onedrive.getPathDetails(parentPath); - } catch (OneDriveException e) { - log.vdebug("onedrivePathDetails = onedrive.getPathDetails(parentPath); generated a OneDriveException"); - // exception - set onedriveParentRootDetails to a blank valid JSON - onedrivePathDetails = parseJSON("{}"); - if (e.httpStatusCode == 404) { - // Parent does not exist ... need to create parent - log.vdebug("Parent path does not exist: ", parentPath); - uploadCreateDir(parentPath); - } + + // Check if this is excluded by config option: skip_file + if (!clientSideRuleExcludesPath) { + // is the item a file ? + if (isFileItem(onedriveJSONItem)) { + // JSON item is a file + + // skip_file can contain 4 types of entries: + // - wildcard - *.txt + // - text + wildcard - name*.txt + // - full path + combination of any above two - /path/name*.txt + // - full path to file - /path/to/file.txt + + string exclusionTestPath = ""; + + // is the parent id in the database? + if (parentInDatabase) { + // parent id is in the database, so we can try and calculate the full file path + string jsonItemPath = ""; - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadCreateDir(path);"); - uploadCreateDir(path); - // return back to original call - return; + // Compute this item path & need the full path for this file + jsonItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + // Log the calculation + addLogEntry("New Item calculated full path is: " ~ jsonItemPath, ["debug"]); + + // The path that needs to be checked needs to include the '/' + // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched + // However, as 'path' used throughout, use a temp variable with this modification so that we use the temp variable for exclusion checks + if (!startsWith(jsonItemPath, "/")){ + // Add '/' to the path + exclusionTestPath = '/' ~ jsonItemPath; } - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + // what are we checking + addLogEntry("skip_file item to check (full calculated path): " ~ exclusionTestPath, ["debug"]); + } else { + // parent not in database, we can only check using this JSON item's name + if (!startsWith(thisItemName, "/")){ + // Add '/' to the path + exclusionTestPath = '/' ~ thisItemName; } + + // what are we checking + addLogEntry("skip_file item to check (file name only - parent path not in database): " ~ exclusionTestPath, ["debug"]); + clientSideRuleExcludesPath = selectiveSync.isFileNameExcluded(exclusionTestPath); } - // configure the parent item data - if (hasId(onedrivePathDetails) && hasParentReference(onedrivePathDetails)){ - log.vdebug("Parent path found, configuring parent item"); - parent.id = onedrivePathDetails["id"].str; // This item's ID. Should give something like 12345ABCDE1234A1!101 - parent.driveId = onedrivePathDetails["parentReference"]["driveId"].str; // Should give something like 12345abcde1234a1 - } else { - // OneDrive API query failed - // Assume client defaults - log.vdebug("Parent path could not be queried, using OneDrive account defaults"); - parent.id = defaultRootId; // Should give something like 12345ABCDE1234A1!101 - parent.driveId = defaultDriveId; // Should give something like 12345abcde1234a1 + // Perform the 'skip_file' evaluation + clientSideRuleExcludesPath = selectiveSync.isFileNameExcluded(exclusionTestPath); + addLogEntry("Result: " ~ to!string(clientSideRuleExcludesPath), ["debug"]); + + if (clientSideRuleExcludesPath) { + // This path should be skipped + addLogEntry("Skipping item - excluded by skip_file config: " ~ exclusionTestPath, ["verbose"]); } } - - JSONValue response; - // test if the path we are going to create already exists on OneDrive - try { - log.vdebug("Attempting to query OneDrive for this path: ", path); - response = onedrive.getPathDetailsByDriveId(parent.driveId, path); - } catch (OneDriveException e) { - log.vdebug("response = onedrive.getPathDetails(path); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found on the drive id we queried - log.vlog("The requested directory to create was not found on OneDrive - creating remote directory: ", path); - - if (!dryRun) { - // Perform the database lookup - is the parent in the database? - if (!itemdb.selectByPath(dirName(path), parent.driveId, parent)) { - // parent is not in the database - log.vdebug("Parent path is not in the database - need to add it: ", dirName(path)); - uploadCreateDir(dirName(path)); - } + } + + // Check if this is included or excluded by use of sync_list + if (!clientSideRuleExcludesPath) { + // No need to try and process something against a sync_list if it has been configured + if (syncListConfigured) { + // Compute the item path if empty - as to check sync_list we need an actual path to check + + // What is the path of the new item + string newItemPath; + + // Is the parent in the database? If not, we cannot compute the the full path based on the database entries + // In a --resync scenario - the database is empty + if (parentInDatabase) { + // Calculate this items path based on database entries + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + } else { + // parent not in the database + if (("path" in onedriveJSONItem["parentReference"]) != null) { + // If there is a parent reference path, try and use it + string selfBuiltPath = onedriveJSONItem["parentReference"]["path"].str ~ "/" ~ onedriveJSONItem["name"].str; - // Is the parent a 'folder' from another user? ie - is this a 'shared folder' that has been shared with us? - if (defaultDriveId == parent.driveId){ - // enforce check of parent path. if the above was triggered, the below will generate a sync retry and will now be sucessful - enforce(itemdb.selectByPath(dirName(path), parent.driveId, parent), "The parent item id is not in the database"); - } else { - log.vdebug("Parent drive ID is not our drive ID - parent most likely a shared folder"); + // Check for ':' and split if present + auto splitIndex = selfBuiltPath.indexOf(":"); + if (splitIndex != -1) { + // Keep only the part after ':' + selfBuiltPath = selfBuiltPath[splitIndex + 1 .. $]; } - JSONValue driveItem = [ - "name": JSONValue(baseName(path)), - "folder": parseJSON("{}") - ]; - - // Submit the creation request - // Fix for https://github.com/skilion/onedrive/issues/356 - try { - // Attempt to create a new folder on the configured parent driveId & parent id - response = onedrive.createById(parent.driveId, parent.id, driveItem); - } catch (OneDriveException e) { - if (e.httpStatusCode == 409) { - // OneDrive API returned a 404 (above) to say the directory did not exist - // but when we attempted to create it, OneDrive responded that it now already exists - log.vlog("OneDrive reported that ", path, " already exists .. OneDrive API race condition"); - return; - } else { - // some other error from OneDrive was returned - display what it is - log.error("OneDrive generated an error when creating this path: ", path); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - } - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); + // Set newItemPath to the self built path + newItemPath = selfBuiltPath; } else { - // Simulate a successful 'directory create' & save it to the dryRun database copy - // The simulated response has to pass 'makeItem' as part of saveItem - auto fakeResponse = createFakeResponse(path); - saveItem(fakeResponse); + // no parent reference path available in provided JSON + newItemPath = thisItemName; } - - log.vlog("Successfully created the remote directory ", path, " on OneDrive"); - return; } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadCreateDir(path);"); - uploadCreateDir(path); - // return back to original call - return; + // Check for HTML entities (e.g., '%20' for space) in newItemPath + if (containsURLEncodedItems(newItemPath)) { + addLogEntry("CAUTION: The JSON element transmitted by the Microsoft OneDrive API includes HTML URL encoded items, which may complicate pattern matching and potentially lead to synchronisation problems for this item."); + addLogEntry("WORKAROUND: An alternative solution could be to change the name of this item through the online platform: " ~ newItemPath, ["verbose"]); + addLogEntry("See: https://github.com/OneDrive/onedrive-api-docs/issues/1765 for further details", ["verbose"]); } - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + // Update newItemPath + if(newItemPath[0] == '/') { + newItemPath = newItemPath[1..$]; } - } - - // response from OneDrive has to be a valid JSON object - if (response.type() == JSONType.object){ - // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file - // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, - // even though some file systems (such as a POSIX-compliant file system) may consider them as different. - // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior. - - if (response["name"].str == baseName(path)){ - // OneDrive 'name' matches local path name - log.vlog("The requested directory to create was found on OneDrive - skipping creating the directory: ", path ); - // Check that this path is in the database - if (!itemdb.selectById(parent.driveId, parent.id, parent)){ - // parent for 'path' is NOT in the database - log.vlog("The parent for this path is not in the local database - need to add parent to local database"); - parentPath = dirName(path); - // add the parent into the database - uploadCreateDir(parentPath); - // save this child item into the database - log.vlog("The parent for this path has been added to the local database - adding requested path (", path ,") to database"); - if (!dryRun) { - // save the live data - saveItem(response); - } else { - // need to fake this data - auto fakeResponse = createFakeResponse(path); - saveItem(fakeResponse); - } + + // What path are we checking? + addLogEntry("sync_list item to check: " ~ newItemPath, ["debug"]); + + // Unfortunately there is no avoiding this call to check if the path is excluded|included via sync_list + if (selectiveSync.isPathExcludedViaSyncList(newItemPath)) { + // selective sync advised to skip, however is this a file and are we configured to upload / download files in the root? + if ((isItemFile(onedriveJSONItem)) && (appConfig.getValueBool("sync_root_files")) && (rootName(newItemPath) == "") ) { + // This is a file + // We are configured to sync all files in the root + // This is a file in the logical root + clientSideRuleExcludesPath = false; } else { - // parent is in database - log.vlog("The parent for this path is in the local database - adding requested path (", path ,") to database"); - // are we in a --dry-run scenario? - if (!dryRun) { - // get the live data - JSONValue pathDetails; - try { - pathDetails = onedrive.getPathDetailsByDriveId(parent.driveId, path); - } catch (OneDriveException e) { - log.vdebug("pathDetails = onedrive.getPathDetailsByDriveId(parent.driveId, path) generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found - log.error("ERROR: The requested single directory to sync was not found on OneDrive"); - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling onedrive.getPathDetailsByDriveId(parent.driveId, path);"); - pathDetails = onedrive.getPathDetailsByDriveId(parent.driveId, path); - } - - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; - } - } - - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(pathDetails); - - // OneDrive Personal Shared Folder edgecase handling - // In a: - // --resync --upload-only --single-directory 'dir' scenario, and where the root 'dir' for --single-directory is a 'shared folder' - // OR - // --resync --upload-only scenario, and where the root 'dir' to upload is a 'shared folder' - // - // We will not have the 'tie' DB entry created because of --upload-only because we do not download the folder structure from OneDrive - // to know what the remoteDriveId actually is - if (accountType == "personal"){ - // are we in a --resync --upload-only scenario ? - if ((cfg.getValueBool("resync")) && (cfg.getValueBool("upload_only"))) { - // Create a temp item - // Takes a JSON input and formats to an item which can be used by the database - Item tempItem = makeItem(pathDetails); - // New DB Tie item due to edge case - Item tieDBItem; - // Set the name - tieDBItem.name = tempItem.name; - // Set the correct item type - tieDBItem.type = ItemType.dir; - //parent.type = ItemType.remote; - if ((tempItem.type == ItemType.remote) && (!tempItem.remoteDriveId.empty)) { - // set the right elements - tieDBItem.driveId = tempItem.remoteDriveId; - tieDBItem.id = tempItem.remoteId; - // Set the correct mtime - tieDBItem.mtime = tempItem.mtime; - // Add tie DB record to the local database - log.vdebug("Adding tie DB record to database: ", tieDBItem); - itemdb.upsert(tieDBItem); - } - } - } - } else { - // need to fake this data - auto fakeResponse = createFakeResponse(path); - saveItem(fakeResponse); - } + // path is unwanted + clientSideRuleExcludesPath = true; + addLogEntry("Skipping item - excluded by sync_list config: " ~ newItemPath, ["verbose"]); + } + } + } + } + + // Check if this is excluded by a user set maximum filesize to download + if (!clientSideRuleExcludesPath) { + if (isItemFile(onedriveJSONItem)) { + if (fileSizeLimit != 0) { + if (onedriveJSONItem["size"].integer >= fileSizeLimit) { + addLogEntry("Skipping item - excluded by skip_size config: " ~ thisItemName ~ " (" ~ to!string(onedriveJSONItem["size"].integer/2^^20) ~ " MB)", ["verbose"]); + clientSideRuleExcludesPath = true; } - } else { - // They are the "same" name wise but different in case sensitivity - log.error("ERROR: Current directory has a 'case-insensitive match' to an existing directory on OneDrive"); - log.error("ERROR: To resolve, rename this local directory: ", buildNormalizedPath(absolutePath(path))); - log.error("ERROR: Remote OneDrive directory: ", response["name"].str); - log.log("Skipping: ", buildNormalizedPath(absolutePath(path))); - return; } - } else { - // response is not valid JSON, an error was returned from OneDrive - log.error("ERROR: There was an error performing this operation on OneDrive"); - log.error("ERROR: Increase logging verbosity to assist determining why."); - log.log("Skipping: ", buildNormalizedPath(absolutePath(path))); - return; } } + + + // return if path is excluded + return clientSideRuleExcludesPath; } - // upload a new file to OneDrive - private void uploadNewFile(const(string) path) - { - // Reset upload failure - OneDrive or filesystem issue (reading data) - uploadFailed = false; - Item parent; - bool parentPathFoundInDB = false; - // Check the database for the parent path - // What parent path to use? - string parentPath = dirName(path); // will be either . or something else - if (parentPath == "."){ - // Assume this is a new file in the users configured sync_dir root - // Use client defaults - parent.id = defaultRootId; // Should give something like 12345ABCDE1234A1!101 - parent.driveId = defaultDriveId; // Should give something like 12345abcde1234a1 - parentPathFoundInDB = true; + // Process the list of local changes to upload to OneDrive + void processChangedLocalItemsToUpload() { + + // Each element in this array 'databaseItemsWhereContentHasChanged' is an Database Item ID that has been modified locally + size_t batchSize = to!int(appConfig.getValueLong("threads")); + ulong batchCount = (databaseItemsWhereContentHasChanged.length + batchSize - 1) / batchSize; + ulong batchesProcessed = 0; + + // For each batch of files to upload, upload the changed data to OneDrive + foreach (chunk; databaseItemsWhereContentHasChanged.chunks(batchSize)) { + processChangedLocalItemsToUploadInParallel(chunk); + chunk = null; + } + } + + // Process all the changed local items in parallel + void processChangedLocalItemsToUploadInParallel(string[3][] array) { + // This function received an array of string items to upload, the number of elements based on appConfig.getValueLong("threads") + foreach (i, localItemDetails; processPool.parallel(array)) { + addLogEntry("Upload Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]); + uploadChangedLocalFileToOneDrive(localItemDetails); + addLogEntry("Upload Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]); + } + } + + // Upload changed local files to OneDrive in parallel + void uploadChangedLocalFileToOneDrive(string[3] localItemDetails) { + + // These are the details of the item we need to upload + string changedItemParentId = localItemDetails[0]; + string changedItemId = localItemDetails[1]; + string localFilePath = localItemDetails[2]; + + // Log the path that was modified + addLogEntry("uploadChangedLocalFileToOneDrive: " ~ localFilePath, ["debug"]); + + // How much space is remaining on OneDrive + ulong remainingFreeSpace; + // Did the upload fail? + bool uploadFailed = false; + // Did we skip due to exceeding maximum allowed size? + bool skippedMaxSize = false; + // Did we skip to an exception error? + bool skippedExceptionError = false; + // Flag for if space is available online + bool spaceAvailableOnline = false; + + // When we are uploading OneDrive Business Shared Files, we need to be targeting the right driveId and itemId + string targetDriveId; + string targetItemId; + + // Unfortunately, we cant store an array of Item's ... so we have to re-query the DB again - unavoidable extra processing here + // This is because the Item[] has no other functions to allow is to parallel process those elements, so we have to use a string array as input to this function + Item dbItem; + itemDB.selectById(changedItemParentId, changedItemId, dbItem); + + // Is this a remote target? + if ((dbItem.type == ItemType.remote) && (dbItem.remoteType == ItemType.file)) { + // This is a remote file + targetDriveId = dbItem.remoteDriveId; + targetItemId = dbItem.remoteId; + // we are going to make the assumption here that as this is a OneDrive Business Shared File, that there is space available + spaceAvailableOnline = true; } else { - // Query the database using each of the driveId's we are using - foreach (driveId; driveIDsArray) { - // Query the database for this parent path using each driveId - Item dbResponse; - if(itemdb.selectByPath(parentPath, driveId, dbResponse)){ - // parent path was found in the database - parent = dbResponse; - parentPathFoundInDB = true; - } + // This is not a remote file + targetDriveId = dbItem.driveId; + targetItemId = dbItem.id; + } + + // Fetch the details from cachedOnlineDriveData + // - cachedOnlineDriveData.quotaRestricted; + // - cachedOnlineDriveData.quotaAvailable; + // - cachedOnlineDriveData.quotaRemaining; + DriveDetailsCache cachedOnlineDriveData; + cachedOnlineDriveData = getDriveDetails(targetDriveId); + remainingFreeSpace = cachedOnlineDriveData.quotaRemaining; + + // Get the file size from the actual file + ulong thisFileSizeLocal = getSize(localFilePath); + // Get the file size from the DB data + ulong thisFileSizeFromDB; + if (!dbItem.size.empty) { + thisFileSizeFromDB = to!ulong(dbItem.size); + } else { + thisFileSizeFromDB = 0; + } + + // 'remainingFreeSpace' online includes the current file online + // We need to remove the online file (add back the existing file size) then take away the new local file size to get a new approximate value + ulong calculatedSpaceOnlinePostUpload = (remainingFreeSpace + thisFileSizeFromDB) - thisFileSizeLocal; + + // Based on what we know, for this thread - can we safely upload this modified local file? + addLogEntry("This Thread Estimated Free Space Online: " ~ to!string(remainingFreeSpace), ["debug"]); + addLogEntry("This Thread Calculated Free Space Online Post Upload: " ~ to!string(calculatedSpaceOnlinePostUpload), ["debug"]); + JSONValue uploadResponse; + + // Is there quota available for the given drive where we are uploading to? + // If 'personal' accounts, if driveId == defaultDriveId, then we will have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused + // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - cachedOnlineDriveData.quotaRestricted will be set as true + // If 'business' accounts, if driveId == defaultDriveId, then we will potentially have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused + // If 'business' accounts, if driveId != defaultDriveId, then we will potentially have quota data, but it most likely will be a 0 value - cachedOnlineDriveData.quotaRestricted will be set as true + if (cachedOnlineDriveData.quotaAvailable) { + // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? + if (calculatedSpaceOnlinePostUpload > 0) { + // Based on this thread action, we believe that there is space available online to upload - proceed + spaceAvailableOnline = true; } } - // Get the file size - long thisFileSize = getSize(path); - // Can we upload this file - is there enough free space? - https://github.com/skilion/onedrive/issues/73 - // We can only use 'remainingFreeSpace' if we are uploading to our driveId ... if this is a shared folder, we have no visibility of space available, as quota details are not provided by the OneDrive API - if (parent.driveId == defaultDriveId) { - // the file will be uploaded to my driveId - log.vdebug("File upload destination is users default driveId .."); - // are quota details being restricted? - if (!quotaRestricted) { - // quota is not being restricted - we can track drive space allocation to determine if it is possible to upload the file - if ((remainingFreeSpace - thisFileSize) < 0) { - // no space to upload file, based on tracking of quota values - quotaAvailable = false; - } else { - // there is free space to upload file, based on tracking of quota values - quotaAvailable = true; + // Is quota being restricted? + if (cachedOnlineDriveData.quotaRestricted) { + // Space available online is being restricted - so we have no way to really know if there is space available online + spaceAvailableOnline = true; + } + + // Do we have space available or is space available being restricted (so we make the blind assumption that there is space available) + if (spaceAvailableOnline) { + // Does this file exceed the maximum file size to upload to OneDrive? + if (thisFileSizeLocal <= maxUploadFileSize) { + // Attempt to upload the modified file + // Error handling is in performModifiedFileUpload(), and the JSON that is responded with - will either be null or a valid JSON object containing the upload result + uploadResponse = performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + + // Evaluate the returned JSON uploadResponse + // If there was an error uploading the file, uploadResponse should be empty and invalid + if (uploadResponse.type() != JSONType.object) { + uploadFailed = true; + skippedExceptionError = true; } + } else { - // set quotaAvailable as true, even though we have zero way to validate that this is correct or not - quotaAvailable = true; + // Skip file - too large + uploadFailed = true; + skippedMaxSize = true; } } else { - // the file will be uploaded to a shared folder - // we can't track if there is enough free space to upload the file - log.vdebug("File upload destination is a shared folder - the upload may fail if not enough space on OneDrive .."); - // set quotaAvailable as true, even though we have zero way to validate that this is correct or not - quotaAvailable = true; - } - - // If performing a dry-run or parentPath is found in the database & there is quota available to upload file - if ((dryRun) || (parentPathFoundInDB && quotaAvailable)) { - // Maximum file size upload - // https://support.microsoft.com/en-us/office/invalid-file-names-and-file-types-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us - // July 2020, maximum file size for all accounts is 100GB - // January 2021, maximum file size for all accounts is 250GB - auto maxUploadFileSize = 268435456000; // 250GB - - // Can we read the file - as a permissions issue or file corruption will cause a failure - // https://github.com/abraunegg/onedrive/issues/113 - if (readLocalFile(path)){ - // we are able to read the file - // To avoid a 409 Conflict error - does the file actually exist on OneDrive already? - JSONValue fileDetailsFromOneDrive; - if (thisFileSize <= maxUploadFileSize){ - // Resolves: https://github.com/skilion/onedrive/issues/121, https://github.com/skilion/onedrive/issues/294, https://github.com/skilion/onedrive/issues/329 - // Does this 'file' already exist on OneDrive? - try { - // test if the local path exists on OneDrive - // if parent.driveId is invalid, then API call will generate a 'HTTP 400 - Bad Request' - make sure we at least have a valid parent.driveId - if (!parent.driveId.empty) { - // use configured value for parent.driveId - fileDetailsFromOneDrive = onedrive.getPathDetailsByDriveId(parent.driveId, path); - } else { - // switch to using defaultDriveId - log.vdebug("parent.driveId is empty - using defaultDriveId for API call"); - fileDetailsFromOneDrive = onedrive.getPathDetailsByDriveId(defaultDriveId, path); - } - } catch (OneDriveException e) { - // log that we generated an exception - log.vdebug("fileDetailsFromOneDrive = onedrive.getPathDetailsByDriveId(parent.driveId, path); generated a OneDriveException"); - // OneDrive returned a 'HTTP/1.1 400 Bad Request' - // If the 'path', when encoded, cannot be interpreted by the OneDrive API, the API will generate a 400 error - if (e.httpStatusCode == 400) { - log.log("Skipping uploading this new file: ", buildNormalizedPath(absolutePath(path))); - log.vlog("Skipping item - OneDrive returned a 'HTTP 400 - Bad Request' when attempting to query if file exists"); - log.error("ERROR: To resolve, rename this local file: ", buildNormalizedPath(absolutePath(path))); - uploadFailed = true; - return; - } - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - if (e.httpStatusCode == 401) { - log.vlog("Skipping item - OneDrive returned a 'HTTP 401 - Unauthorized' when attempting to query if file exists"); - uploadFailed = true; - return; - } - // A 404 is the expected response if the file was not present - if (e.httpStatusCode == 404) { - // The file was not found on OneDrive, need to upload it - // Check if file should be skipped based on skip_size config - if (thisFileSize >= this.newSizeLimit) { - log.vlog("Skipping item - excluded by skip_size config: ", path, " (", thisFileSize/2^^20," MB)"); - return; - } - - // start of upload file - write("Uploading new file ", path, " ... "); - JSONValue response; - - // Calculate upload speed - auto uploadStartTime = Clock.currTime(); - - if (!dryRun) { - // Resolve https://github.com/abraunegg/onedrive/issues/37 - if (thisFileSize == 0){ - // We can only upload zero size files via simpleFileUpload regardless of account type - // https://github.com/OneDrive/onedrive-api-docs/issues/53 - try { - response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - // error uploading file - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } else { - // File is not a zero byte file - // Are we using OneDrive Personal or OneDrive Business? - // To solve 'Multiple versions of file shown on website after single upload' (https://github.com/abraunegg/onedrive/issues/2) - // check what 'account type' this is as this issue only affects OneDrive Business so we need some extra logic here - if (accountType == "personal"){ - // Original file upload logic - if (thisFileSize <= thresholdFileSize) { - try { - response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } - - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request as a session"); - // Try upload as a session - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - // error uploading file - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } else { - // File larger than threshold - use a session to upload - writeln(""); - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } - } else { - // OneDrive Business Account - always use a session to upload - writeln(""); - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } - } - - // response from OneDrive has to be a valid JSON object - if (response.type() == JSONType.object){ - // upload done without error - writeln("done."); - - // upload finished - auto uploadFinishTime = Clock.currTime(); - auto uploadDuration = uploadFinishTime - uploadStartTime; - log.vdebug("File Size: ", thisFileSize, " Bytes"); - log.vdebug("Upload Duration: ", (uploadDuration.total!"msecs"/1e3), " Seconds"); - auto uploadSpeed = (thisFileSize / (uploadDuration.total!"msecs"/1e3)/ 1024 / 1024); - log.vdebug("Upload Speed: ", uploadSpeed, " Mbps (approx)"); - - // Log upload action to log file - log.fileOnly("Uploading new file ", path, " ... done."); - // The file was uploaded, or a 4xx / 5xx error was generated - if ("size" in response){ - // The response JSON contains size, high likelihood valid response returned - ulong uploadFileSize = response["size"].integer; - - // In some cases the file that was uploaded was not complete, but 'completed' without errors on OneDrive - // This has been seen with PNG / JPG files mainly, which then contributes to generating a 412 error when we attempt to update the metadata - // Validate here that the file uploaded, at least in size, matches in the response to what the size is on disk - if (thisFileSize != uploadFileSize){ - // Upload size did not match local size - // There are 2 scenarios where this happens: - // 1. Failed Transfer - // 2. Upload file is going to a SharePoint Site, where Microsoft enriches the file with additional metadata with no way to disable - // For this client: - // - If a SharePoint Library, disableUploadValidation gets flagged as True - // - If we are syncing a business shared folder, this folder could reside on a Users Path (there should be no upload issue) or SharePoint (upload issue) - if ((disableUploadValidation)|| (syncBusinessFolders && (parent.driveId != defaultDriveId))){ - // Print a warning message - should only be triggered if: - // - disableUploadValidation gets flagged (documentLibrary account type) - // - syncBusinessFolders is being used & parent.driveId != defaultDriveId - log.log("WARNING: Uploaded file size does not match local file - skipping upload validation"); - log.vlog("WARNING: Due to Microsoft Sharepoint 'enrichment' of files, this file is now technically different to your local copy"); - log.vlog("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details"); - } else { - // OK .. the uploaded file does not match and we did not disable this validation - log.log("Uploaded file size does not match local file - upload failure - retrying"); - // Delete uploaded bad file - onedrive.deleteById(response["parentReference"]["driveId"].str, response["id"].str, response["eTag"].str); - // Re-upload - uploadNewFile(path); - return; - } - } - - // File validation is OK - if ((accountType == "personal") || (thisFileSize == 0)){ - // Update the item's metadata on OneDrive - string id = response["id"].str; - string cTag; - - // Is there a valid cTag in the response? - if ("cTag" in response) { - // use the cTag instead of the eTag because OneDrive may update the metadata of files AFTER they have been uploaded - cTag = response["cTag"].str; - } else { - // Is there an eTag in the response? - if ("eTag" in response) { - // use the eTag from the response as there was no cTag - cTag = response["eTag"].str; - } else { - // no tag available - set to nothing - cTag = ""; - } - } - // check if the path exists locally before we try to set the file times - if (exists(path)) { - SysTime mtime = timeLastModified(path).toUTC(); - // update the file modified time on OneDrive and save item details to database - uploadLastModifiedTime(parent.driveId, id, cTag, mtime); - } else { - // will be removed in different event! - log.log("File disappeared after upload: ", path); - } - } else { - // OneDrive Business Account - always use a session to upload - // The session includes a Request Body element containing lastModifiedDateTime - // which negates the need for a modify event against OneDrive - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } - } - - // update free space tracking if this is our drive id - if (parent.driveId == defaultDriveId) { - // how much space is left on OneDrive after upload? - remainingFreeSpace = (remainingFreeSpace - thisFileSize); - log.vlog("Remaining free space on OneDrive: ", remainingFreeSpace); - } - // File uploaded successfully, space details updated if required - return; - } else { - // response is not valid JSON, an error was returned from OneDrive - log.fileOnly("Uploading new file ", path, " ... error"); - uploadFailed = true; - return; - } - } else { - // we are --dry-run - simulate the file upload - writeln("done."); - response = createFakeResponse(path); - // Log action to log file - log.fileOnly("Uploading new file ", path, " ... done."); - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - return; - } - } - // OneDrive returned a '429 - Too Many Requests' - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadNewFile(path);"); - uploadNewFile(path); - // return back to original call - return; - } - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - if (e.httpStatusCode >= 500) { - uploadFailed = true; - return; - } - } - - // Check that the filename that is returned is actually the file we wish to upload - // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file - // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, - // even though some file systems (such as a POSIX-compliant file system) may consider them as different. - // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior. - - // fileDetailsFromOneDrive has to be a valid object - if (fileDetailsFromOneDrive.type() == JSONType.object){ - // fileDetailsFromOneDrive = onedrive.getPathDetails(path) returned a valid JSON, meaning the file exists on OneDrive - // Check that 'name' is in the JSON response (validates data) and that 'name' == the path we are looking for - if (("name" in fileDetailsFromOneDrive) && (fileDetailsFromOneDrive["name"].str == baseName(path))) { - // OneDrive 'name' matches local path name - log.vlog("Requested file to upload exists on OneDrive - local database is out of sync for this file: ", path); - - // Is the local file newer than the uploaded file? - SysTime localFileModifiedTime = timeLastModified(path).toUTC(); - SysTime remoteFileModifiedTime = SysTime.fromISOExtString(fileDetailsFromOneDrive["fileSystemInfo"]["lastModifiedDateTime"].str); - localFileModifiedTime.fracSecs = Duration.zero; - - if (localFileModifiedTime > remoteFileModifiedTime){ - // local file is newer - log.vlog("Requested file to upload is newer than existing file on OneDrive"); - write("Uploading modified file ", path, " ... "); - JSONValue response; - - if (!dryRun) { - if (accountType == "personal"){ - // OneDrive Personal account upload handling - if (thisFileSize <= thresholdFileSize) { - try { - response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); - writeln("done."); - } catch (OneDriveException e) { - log.vdebug("response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); generated a OneDriveException"); - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadNewFile(path);"); - uploadNewFile(path); - // return back to original call - return; - } - - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request as a session"); - // Try upload as a session - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - writeln("done."); - } catch (OneDriveException e) { - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // error uploading file - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } else { - // File larger than threshold - use a session to upload - writeln(""); - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - writeln("done."); - } catch (OneDriveException e) { - log.vdebug("response = session.upload(path, parent.driveId, parent.id, baseName(path)); generated a OneDriveException"); - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadNewFile(path);"); - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // error uploading file - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } - - // response from OneDrive has to be a valid JSON object - if (response.type() == JSONType.object){ - // response is a valid JSON object - string id = response["id"].str; - string cTag; - - // Is there a valid cTag in the response? - if ("cTag" in response) { - // use the cTag instead of the eTag because Onedrive may update the metadata of files AFTER they have been uploaded - cTag = response["cTag"].str; - } else { - // Is there an eTag in the response? - if ("eTag" in response) { - // use the eTag from the response as there was no cTag - cTag = response["eTag"].str; - } else { - // no tag available - set to nothing - cTag = ""; - } - } - // validate if path exists so mtime can be calculated - if (exists(path)) { - SysTime mtime = timeLastModified(path).toUTC(); - uploadLastModifiedTime(parent.driveId, id, cTag, mtime); - } else { - // will be removed in different event! - log.log("File disappeared after upload: ", path); - } - } else { - // Log that an invalid JSON object was returned - log.vdebug("onedrive.simpleUpload or session.upload call returned an invalid JSON Object"); - return; - } - } else { - // OneDrive Business account modified file upload handling - if (accountType == "business"){ - // OneDrive Business Account - if ((!syncBusinessFolders) || (parent.driveId == defaultDriveId)) { - // If we are not syncing Shared Business Folders, or this change is going to the 'users' default drive, handle normally - // For logging consistency - writeln(""); - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path), fileDetailsFromOneDrive["eTag"].str); - } catch (OneDriveException e) { - log.vdebug("response = session.upload(path, parent.driveId, parent.id, baseName(path), fileDetailsFromOneDrive['eTag'].str); generated a OneDriveException"); - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadNewFile(path);"); - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // error uploading file - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - // upload complete - writeln("done."); - saveItem(response); - } else { - // If we are uploading to a shared business folder, there are a couple of corner cases here: - // 1. Shared Folder is a 'users' folder - // 2. Shared Folder is a 'SharePoint Library' folder, meaning we get hit by this stupidity: https://github.com/OneDrive/onedrive-api-docs/issues/935 - - // Need try{} & catch (OneDriveException e) { & catch (FileException e) { handler for this query - response = handleSharePointMetadataAdditionBugReplaceFile(fileDetailsFromOneDrive, parent, path); - if (!uploadFailed){ - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } else { - // uploadFailed, return - return; - } - } - } - - // OneDrive SharePoint account modified file upload handling - if (accountType == "documentLibrary"){ - // Depending on the file size, this will depend on how best to handle the modified local file - // as if too large, the following error will be generated by OneDrive: - // HTTP request returned status code 413 (Request Entity Too Large) - // We also cant use a session to upload the file, we have to use simpleUploadReplace - - // Need try{} & catch (OneDriveException e) { & catch (FileException e) { handler for this query - response = handleSharePointMetadataAdditionBugReplaceFile(fileDetailsFromOneDrive, parent, path); - if (!uploadFailed){ - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } else { - // uploadFailed, return - return; - } - } - } - - // Log action to log file - log.fileOnly("Uploading modified file ", path, " ... done."); - - // update free space tracking if this is our drive id - if (parent.driveId == defaultDriveId) { - // how much space is left on OneDrive after upload? - remainingFreeSpace = (remainingFreeSpace - thisFileSize); - log.vlog("Remaining free space on OneDrive: ", remainingFreeSpace); - } - } else { - // we are --dry-run - simulate the file upload - writeln("done."); - response = createFakeResponse(path); - // Log action to log file - log.fileOnly("Uploading modified file ", path, " ... done."); - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - return; - } - } else { - // Save the details of the file that we got from OneDrive - // --dry-run safe - log.vlog("Updating the local database with details for this file: ", path); - if (!dryRun) { - // use the live data - saveItem(fileDetailsFromOneDrive); - } else { - // need to fake this data - auto fakeResponse = createFakeResponse(path); - saveItem(fakeResponse); - } - } - } else { - // The files are the "same" name wise but different in case sensitivity - log.error("ERROR: A local file has the same name as another local file."); - log.error("ERROR: To resolve, rename this local file: ", buildNormalizedPath(absolutePath(path))); - log.log("Skipping uploading this new file: ", buildNormalizedPath(absolutePath(path))); - } - } else { - // fileDetailsFromOneDrive is not valid JSON, an error was returned from OneDrive - log.error("ERROR: An error was returned from OneDrive and the resulting response is not a valid JSON object"); - log.error("ERROR: Increase logging verbosity to assist determining why."); - uploadFailed = true; - return; - } - } else { - // Skip file - too large - log.log("Skipping uploading this new file as it exceeds the maximum size allowed by OneDrive: ", path); - uploadFailed = true; - return; + // Cant upload this file - no space available + uploadFailed = true; + } + + // Did the upload fail? + if (uploadFailed) { + // Upload failed .. why? + // No space available online + if (!spaceAvailableOnline) { + addLogEntry("Skipping uploading modified file: " ~ localFilePath ~ " due to insufficient free space available on Microsoft OneDrive", ["info", "notify"]); + } + // File exceeds max allowed size + if (skippedMaxSize) { + addLogEntry("Skipping uploading this modified file as it exceeds the maximum size allowed by Microsoft OneDrive: " ~ localFilePath, ["info", "notify"]); + } + // Generic message + if (skippedExceptionError) { + // normal failure message if API or exception error generated + // If Issue #2626 | Case 2-1 is triggered, the file we tried to upload was renamed, then uploaded as a new name + if (exists(localFilePath)) { + // Issue #2626 | Case 2-1 was not triggered, file still exists on local filesystem + addLogEntry("Uploading modified file: " ~ localFilePath ~ " ... failed!", ["info", "notify"]); } - } else { - // unable to read local file - log.log("Skipping uploading this file as it cannot be read (file permissions or file corruption): ", path); } } else { - // Upload of the new file did not occur .. why? - if (!parentPathFoundInDB) { - // Parent path was not found - log.log("Skipping uploading this new file as parent path is not in the database: ", path); - uploadFailed = true; - return; + // Upload was successful + addLogEntry("Uploading modified file: " ~ localFilePath ~ " ... done.", ["info", "notify"]); + + // What do we save to the DB? Is this a OneDrive Business Shared File? + if ((dbItem.type == ItemType.remote) && (dbItem.remoteType == ItemType.file)) { + // We need to 'massage' the old DB record, with data from online, as the DB record was specifically crafted for OneDrive Business Shared Files + Item tempItem = makeItem(uploadResponse); + dbItem.eTag = tempItem.eTag; + dbItem.cTag = tempItem.cTag; + dbItem.mtime = tempItem.mtime; + dbItem.quickXorHash = tempItem.quickXorHash; + dbItem.sha256Hash = tempItem.sha256Hash; + dbItem.size = tempItem.size; + itemDB.upsert(dbItem); + } else { + // Save the response JSON item in database as is + saveItem(uploadResponse); } - if (!quotaAvailable) { - // Not enough free space - log.log("Skipping item '", path, "' due to insufficient free space available on OneDrive"); - uploadFailed = true; - return; + + // Update the 'cachedOnlineDriveData' record for this 'targetDriveId' so that this is tracked as accurately as possible for other threads + updateDriveDetailsCache(targetDriveId, cachedOnlineDriveData.quotaRestricted, cachedOnlineDriveData.quotaAvailable, thisFileSizeLocal); + + // Check the integrity of the uploaded modified file if not in a --dry-run scenario + if (!dryRun) { + // Perform the integrity of the uploaded modified file + performUploadIntegrityValidationChecks(uploadResponse, localFilePath, thisFileSizeLocal); + + // Update the date / time of the file online to match the local item + // Get the local file last modified time + SysTime localModifiedTime = timeLastModified(localFilePath).toUTC(); + localModifiedTime.fracSecs = Duration.zero; + // Get the latest eTag, and use that + string etagFromUploadResponse = uploadResponse["eTag"].str; + // Attempt to update the online date time stamp based on our local data + uploadLastModifiedTime(dbItem, targetDriveId, targetItemId, localModifiedTime, etagFromUploadResponse); } } } - - private JSONValue handleSharePointMetadataAdditionBugReplaceFile(JSONValue fileDetailsFromOneDrive, const ref Item parent, const(string) path) - { - // Explicit function for handling https://github.com/OneDrive/onedrive-api-docs/issues/935 - // Replace existing file - JSONValue response; + + // Perform the upload of a locally modified file to OneDrive + JSONValue performModifiedFileUpload(Item dbItem, string localFilePath, ulong thisFileSizeLocal) { + + // Function variables + JSONValue uploadResponse; + OneDriveApi uploadFileOneDriveApiInstance; + uploadFileOneDriveApiInstance = new OneDriveApi(appConfig); + uploadFileOneDriveApiInstance.initialise(); - // Depending on the file size, this will depend on how best to handle the modified local file - // as if too large, the following error will be generated by OneDrive: - // HTTP request returned status code 413 (Request Entity Too Large) - // We also cant use a session to upload the file, we have to use simpleUploadReplace + // Configure JSONValue variables we use for a session upload + JSONValue currentOnlineData; + JSONValue uploadSessionData; + string currentETag; - // Calculate existing hash for this file - string existingFileHash = computeQuickXorHash(path); + // When we are uploading OneDrive Business Shared Files, we need to be targeting the right driveId and itemId + string targetDriveId; + string targetParentId; + string targetItemId; - if (getSize(path) <= thresholdFileSize) { - // Upload file via simpleUploadReplace as below threshold size - try { - response = onedrive.simpleUploadReplace(path, fileDetailsFromOneDrive["parentReference"]["driveId"].str, fileDetailsFromOneDrive["id"].str, fileDetailsFromOneDrive["eTag"].str); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return response; - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; - } + // Is this a remote target? + if ((dbItem.type == ItemType.remote) && (dbItem.remoteType == ItemType.file)) { + // This is a remote file + targetDriveId = dbItem.remoteDriveId; + targetParentId = dbItem.remoteParentId; + targetItemId = dbItem.remoteId; } else { - // Have to upload via a session, however we have to delete the file first otherwise this will generate a 404 error post session upload - // Remove the existing file - onedrive.deleteById(fileDetailsFromOneDrive["parentReference"]["driveId"].str, fileDetailsFromOneDrive["id"].str, fileDetailsFromOneDrive["eTag"].str); - // Upload as a session, as a new file - writeln(""); + // This is not a remote file + targetDriveId = dbItem.driveId; + targetParentId = dbItem.parentId; + targetItemId = dbItem.id; + } + + // Is this a dry-run scenario? + if (!dryRun) { + // Do we use simpleUpload or create an upload session? + bool useSimpleUpload = false; + + // Try and get the absolute latest object details from online, so we get the latest eTag to try and avoid a 412 eTag error try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return response; + currentOnlineData = uploadFileOneDriveApiInstance.getPathDetailsById(targetDriveId, targetItemId); + } catch (OneDriveException exception) { + // Display what the error is + // - 408,429,503,504 errors are handled as a retry within uploadFileOneDriveApiInstance + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } + + // Was a valid JSON response provided? + if (currentOnlineData.type() == JSONType.object) { + // Does the response contain an eTag? + if (hasETag(currentOnlineData)) { + // Use the value returned from online as this will attempt to avoid a 412 response if we are creating a session upload + currentETag = currentOnlineData["eTag"].str; } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; + // Use the database value - greater potential for a 412 error to occur if we are creating a session upload + addLogEntry("Online data for file returned zero eTag - using database eTag value", ["debug"]); + currentETag = dbItem.eTag; } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; + } else { + // no valid JSON response - greater potential for a 412 error to occur if we are creating a session upload + addLogEntry("Online data returned was invalid - using database eTag value", ["debug"]); + currentETag = dbItem.eTag; } - } - writeln("done."); - // Due to https://github.com/OneDrive/onedrive-api-docs/issues/935 Microsoft modifies all PDF, MS Office & HTML files with added XML content. It is a 'feature' of SharePoint. - // So - now the 'local' and 'remote' file is technically DIFFERENT ... thanks Microsoft .. NO way to disable this stupidity - string uploadNewFileHash; - if (hasQuickXorHash(response)) { - // use the response json hash detail to compare - uploadNewFileHash = response["file"]["hashes"]["quickXorHash"].str; - } + + // What upload method should be used? + if (thisFileSizeLocal <= sessionThresholdFileSize) { + useSimpleUpload = true; + } + + // If the filesize is greater than zero , and we have valid 'latest' online data is the online file matching what we think is in the database? + if ((thisFileSizeLocal > 0) && (currentOnlineData.type() == JSONType.object)) { + // Issue #2626 | Case 2-1 + // If the 'online' file is newer, this will be overwritten with the file from the local filesystem - potentially constituting online data loss + Item onlineFile = makeItem(currentOnlineData); + + // Which file is technically newer? The local file or the remote file? + SysTime localModifiedTime = timeLastModified(localFilePath).toUTC(); + SysTime onlineModifiedTime = onlineFile.mtime; + + // Reduce time resolution to seconds before comparing + localModifiedTime.fracSecs = Duration.zero; + onlineModifiedTime.fracSecs = Duration.zero; + + // Which file is newer? If local is newer, it will be uploaded as a modified file in the correct manner + if (localModifiedTime < onlineModifiedTime) { + // Online File is actually newer than the locally modified file + addLogEntry("currentOnlineData: " ~ to!string(currentOnlineData), ["debug"]); + addLogEntry("onlineFile: " ~ to!string(onlineFile), ["debug"]); + addLogEntry("database item: " ~ to!string(dbItem), ["debug"]); + addLogEntry("Skipping uploading this item as a locally modified file, will upload as a new file (online file already exists and is newer): " ~ localFilePath); + + // Online is newer, rename local, then upload the renamed file + // We need to know the renamed path so we can upload it + string renamedPath; + // Rename the local path + safeBackup(localFilePath, dryRun, renamedPath); + // Upload renamed local file as a new file + uploadNewFile(renamedPath); + + // Process the database entry removal for the original file. In a --dry-run scenario, this is being done against a DB copy. + // This is done so we can download the newer online file + itemDB.deleteById(targetDriveId, targetItemId); + + // This file is now uploaded, return from here, but this will trigger a response that the upload failed (technically for the original filename it did, but we renamed it, then uploaded it + return uploadResponse; + } + } + + // We can only upload zero size files via simpleFileUpload regardless of account type + // Reference: https://github.com/OneDrive/onedrive-api-docs/issues/53 + // Additionally, all files where file size is < 4MB should be uploaded by simpleUploadReplace - everything else should use a session to upload the modified file + if ((thisFileSizeLocal == 0) || (useSimpleUpload)) { + // Must use Simple Upload to replace the file online + try { + uploadResponse = uploadFileOneDriveApiInstance.simpleUploadReplace(localFilePath, targetDriveId, targetItemId); + } catch (OneDriveException exception) { + // Function name + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 403 + if ((exception.httpStatusCode == 403) && (appConfig.getValueBool("sync_business_shared_files"))) { + // We attempted to upload a file, that was shared with us, but this was shared with us as read-only + addLogEntry("Unable to upload this modified file as this was shared as read-only: " ~ localFilePath); + } + // HTTP request returned status code 423 + // Resolve https://github.com/abraunegg/onedrive/issues/36 + if (exception.httpStatusCode == 423) { + // The file is currently checked out or locked for editing by another user + // We cant upload this file at this time + addLogEntry("Unable to upload this modified file as this is currently checked out or locked for editing by another user: " ~ localFilePath); + } else { + // Handle all other HTTP status codes + // - 408,429,503,504 errors are handled as a retry within uploadFileOneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } catch (FileException e) { + // filesystem error + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } else { + // As this is a unique thread, the sessionFilePath for where we save the data needs to be unique + // The best way to do this is generate a 10 digit alphanumeric string, and use this as the file extension + string threadUploadSessionFilePath = appConfig.uploadSessionFilePath ~ "." ~ generateAlphanumericString(); + + // Create the upload session + try { + uploadSessionData = createSessionFileUpload(uploadFileOneDriveApiInstance, localFilePath, targetDriveId, targetParentId, baseName(localFilePath), currentETag, threadUploadSessionFilePath); + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + + // HTTP request returned status code 403 + if ((exception.httpStatusCode == 403) && (appConfig.getValueBool("sync_business_shared_files"))) { + // We attempted to upload a file, that was shared with us, but this was shared with us as read-only + addLogEntry("Unable to upload this modified file as this was shared as read-only: " ~ localFilePath); + return uploadResponse; + } + // HTTP request returned status code 423 + // Resolve https://github.com/abraunegg/onedrive/issues/36 + if (exception.httpStatusCode == 423) { + // The file is currently checked out or locked for editing by another user + // We cant upload this file at this time + addLogEntry("Unable to upload this modified file as this is currently checked out or locked for editing by another user: " ~ localFilePath); + return uploadResponse; + } else { + // Handle all other HTTP status codes + // - 408,429,503,504 errors are handled as a retry within uploadFileOneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + + } catch (FileException e) { + writeln("DEBUG TO REMOVE: Modified file upload FileException Handling (Create the Upload Session)"); + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + + // Perform the upload using the session that has been created + try { + uploadResponse = performSessionFileUpload(uploadFileOneDriveApiInstance, thisFileSizeLocal, uploadSessionData, threadUploadSessionFilePath); + } catch (OneDriveException exception) { + // Function name + string thisFunctionName = getFunctionName!({}); + + // Handle all other HTTP status codes + // - 408,429,503,504 errors are handled as a retry within uploadFileOneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + + } catch (FileException e) { + writeln("DEBUG TO REMOVE: Modified file upload FileException Handling (Perform the Upload using the session)"); + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } + } else { + // We are in a --dry-run scenario + uploadResponse = createFakeResponse(localFilePath); + } + + // Debug Log the modified upload response + addLogEntry("Modified File Upload Response: " ~ to!string(uploadResponse), ["debug"]); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + uploadFileOneDriveApiInstance.releaseCurlEngine(); + uploadFileOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + // Return JSON + return uploadResponse; + } + + // Query the OneDrive API using the provided driveId to get the latest quota details + string[3][] getRemainingFreeSpaceOnline(string driveId) { + // Get the quota details for this driveId + // Quota details are ONLY available for the main default driveId, as the OneDrive API does not provide quota details for shared folders + JSONValue currentDriveQuota; + bool quotaRestricted = false; // Assume quota is not restricted unless "remaining" is missing + bool quotaAvailable = false; + ulong quotaRemainingOnline = 0; + string[3][] result; + OneDriveApi getCurrentDriveQuotaApiInstance; + + // Ensure that we have a valid driveId to query + if (driveId.empty) { + // No 'driveId' was provided, use the application default + driveId = appConfig.defaultDriveId; + } + + // Try and query the quota for the provided driveId + try { + // Create a new OneDrive API instance + getCurrentDriveQuotaApiInstance = new OneDriveApi(appConfig); + getCurrentDriveQuotaApiInstance.initialise(); + addLogEntry("Seeking available quota for this drive id: " ~ driveId, ["debug"]); + currentDriveQuota = getCurrentDriveQuotaApiInstance.getDriveQuota(driveId); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + getCurrentDriveQuotaApiInstance.releaseCurlEngine(); + getCurrentDriveQuotaApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + } catch (OneDriveException e) { + addLogEntry("currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException", ["debug"]); + // If an exception occurs, it's unclear if quota is restricted, but quota details are not available + quotaRestricted = true; // Considering restricted due to failure to access + // Return result + result ~= [to!string(quotaRestricted), to!string(quotaAvailable), to!string(quotaRemainingOnline)]; + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + getCurrentDriveQuotaApiInstance.releaseCurlEngine(); + getCurrentDriveQuotaApiInstance = null; + // Perform Garbage Collection + GC.collect(); + return result; + } + + // Validate that currentDriveQuota is a JSON value + if (currentDriveQuota.type() == JSONType.object && "quota" in currentDriveQuota) { + // Response from API contains valid data + // If 'personal' accounts, if driveId == defaultDriveId, then we will have data + // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data + // If 'business' accounts, if driveId == defaultDriveId, then we will have data + // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value + addLogEntry("Quota Details: " ~ to!string(currentDriveQuota), ["debug"]); + + auto quota = currentDriveQuota["quota"]; + if ("remaining" in quota) { + quotaRemainingOnline = quota["remaining"].integer; + quotaAvailable = quotaRemainingOnline > 0; + // If "remaining" is present but its value is <= 0, it's not restricted but exhausted + if (quotaRemainingOnline <= 0) { + if (appConfig.accountType == "personal") { + addLogEntry("ERROR: OneDrive account currently has zero space available. Please free up some space online or purchase additional capacity."); + } else { // Assuming 'business' or 'sharedLibrary' + addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); + } + } + } else { + // "remaining" not present, indicating restricted quota information + quotaRestricted = true; + addLogEntry("Quota information is restricted or not available for this drive.", ["verbose"]); + } + } else { + // When valid quota details are not fetched + addLogEntry("Failed to fetch or interpret quota details from the API response.", ["verbose"]); + quotaRestricted = true; // Considering restricted due to failure to interpret + } + + // What was the determined available quota? + addLogEntry("Reported Available Online Quota for driveID '" ~ driveId ~ "': " ~ to!string(quotaRemainingOnline), ["debug"]); + + // Return result + result ~= [to!string(quotaRestricted), to!string(quotaAvailable), to!string(quotaRemainingOnline)]; + return result; + } + + // Perform a filesystem walk to uncover new data to upload to OneDrive + void scanLocalFilesystemPathForNewData(string path) { + // Cleanup array memory before we start adding files + pathsToCreateOnline = []; + newLocalFilesToUploadToOneDrive = []; + + // Perform a filesystem walk to uncover new data + scanLocalFilesystemPathForNewDataToUpload(path); + + // Create new directories online that has been identified + processNewDirectoriesToCreateOnline(); + + // Upload new data that has been identified + processNewLocalItemsToUpload(); + } + + // Scan the local filesystem for new data to upload + void scanLocalFilesystemPathForNewDataToUpload(string path) { + // To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences? + string logPath; + if (path == ".") { + // get the configured sync_dir + logPath = buildNormalizedPath(appConfig.getValueString("sync_dir")); + } else { + // use what was passed in + if (!appConfig.getValueBool("monitor")) { + logPath = buildNormalizedPath(appConfig.getValueString("sync_dir")) ~ "/" ~ path; + } else { + logPath = path; + } + } + + // Log the action that we are performing, however only if this is a directory + if (isDir(path)) { + if (!appConfig.suppressLoggingOutput) { + if (!cleanupLocalFiles) { + addProcessingLogHeaderEntry("Scanning the local file system '" ~ logPath ~ "' for new data to upload", appConfig.verbosityCount); + } else { + addProcessingLogHeaderEntry("Scanning the local file system '" ~ logPath ~ "' for data to cleanup", appConfig.verbosityCount); + } + } + } + + auto startTime = Clock.currTime(); + addLogEntry("Starting Filesystem Walk: " ~ to!string(startTime), ["debug"]); + + // Add a processing '.' + if (!appConfig.suppressLoggingOutput) { + if (appConfig.verbosityCount == 0) { + addProcessingDotEntry(); + } + } + + // Perform the filesystem walk of this path, building an array of new items to upload + scanPathForNewData(path); + + if (appConfig.verbosityCount == 0) { + if (!appConfig.suppressLoggingOutput) { + // Close out the '....' being printed to the console + addLogEntry("\n", ["consoleOnlyNoNewLine"]); + } + } + + // To finish off the processing items, this is needed to reflect this in the log + addLogEntry("------------------------------------------------------------------", ["debug"]); + + auto finishTime = Clock.currTime(); + addLogEntry("Finished Filesystem Walk: " ~ to!string(finishTime), ["debug"]); + + auto elapsedTime = finishTime - startTime; + addLogEntry("Elapsed Time Filesystem Walk: " ~ to!string(elapsedTime), ["debug"]); + } + + void processNewDirectoriesToCreateOnline() { + // Are there any new local directories to create online? + if (!pathsToCreateOnline.empty) { + // There are new directories to create online + addLogEntry("New directories to create on Microsoft OneDrive: " ~ to!string(pathsToCreateOnline.length) ); + foreach(pathToCreateOnline; pathsToCreateOnline) { + // Create this directory on OneDrive so that we can upload files to it + createDirectoryOnline(pathToCreateOnline); + } + } + } + + // Upload new data that has been identified to Microsoft OneDrive + void processNewLocalItemsToUpload() { + // Are there any new local items to upload? + if (!newLocalFilesToUploadToOneDrive.empty) { + // There are elements to upload + addLogEntry("New items to upload to Microsoft OneDrive: " ~ to!string(newLocalFilesToUploadToOneDrive.length) ); + + // Reset totalDataToUpload + totalDataToUpload = 0; + + // How much data do we need to upload? This is important, as, we need to know how much data to determine if all the files can be uploaded + foreach (uploadFilePath; newLocalFilesToUploadToOneDrive) { + // validate that the path actually exists so that it can be counted + if (exists(uploadFilePath)) { + totalDataToUpload = totalDataToUpload + getSize(uploadFilePath); + } + } + + // How much data is there to upload + if (totalDataToUpload < 1024) { + // Display as Bytes to upload + addLogEntry("Total New Data to Upload: " ~ to!string(totalDataToUpload) ~ " Bytes", ["verbose"]); + } else { + if ((totalDataToUpload > 1024) && (totalDataToUpload < 1048576)) { + // Display as KB to upload + addLogEntry("Total New Data to Upload: " ~ to!string((totalDataToUpload / 1024)) ~ " KB", ["verbose"]); + } else { + // Display as MB to upload + addLogEntry("Total New Data to Upload: " ~ to!string((totalDataToUpload / 1024 / 1024)) ~ " MB", ["verbose"]); + } + } + + // How much space is available + // The file, could be uploaded to a shared folder, which, we are not tracking how much free space is available there ... + // Iterate through all the drives we have cached thus far, that we know about + foreach (driveId, driveDetails; onlineDriveDetails) { + // Log how much space is available for each driveId + addLogEntry("Current Available Space Online (" ~ driveId ~ "): " ~ to!string((driveDetails.quotaRemaining / 1024 / 1024)) ~ " MB", ["debug"]); + } + + // Perform the upload + uploadNewLocalFileItems(); + + // Cleanup array memory after uploading all files + newLocalFilesToUploadToOneDrive = []; + } + } + + // Scan this path for new data + void scanPathForNewData(string path) { + + // Add a processing '.' + if (isDir(path)) { + if (!appConfig.suppressLoggingOutput) { + if (appConfig.verbosityCount == 0) { + addProcessingDotEntry(); + } + } + } + + ulong maxPathLength; + ulong pathWalkLength; + + // Add this logging break to assist with what was checked for each path + if (path != ".") { + addLogEntry("------------------------------------------------------------------", ["debug"]); + } + + // https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders + // If the path is greater than allowed characters, then one drive will return a '400 - Bad Request' + // Need to ensure that the URI is encoded before the check is made: + // - 400 Character Limit for OneDrive Business / Office 365 + // - 430 Character Limit for OneDrive Personal + + // Configure maxPathLength based on account type + if (appConfig.accountType == "personal") { + // Personal Account + maxPathLength = 430; + } else { + // Business Account / Office365 / SharePoint + maxPathLength = 400; + } + + // OneDrive Business Shared Files Handling - if we make a 'backup' locally of a file shared with us (because we modified it, and then maybe did a --resync), it will be treated as a new file to upload ... + // The issue here is - the 'source' was a shared file - we may not even have permission to upload a 'renamed' file to the shared file's parent folder + // In this case, we need to skip adding this new local file - we do not upload it (we cant , and we should not) + if (appConfig.accountType == "business") { + // Check appConfig.configuredBusinessSharedFilesDirectoryName against 'path' + if (canFind(path, baseName(appConfig.configuredBusinessSharedFilesDirectoryName))) { + // Log why this path is being skipped + addLogEntry("Skipping scanning path for new files as this is reserved for OneDrive Business Shared Files: " ~ path, ["info"]); + return; + } + } + + // A short lived item that has already disappeared will cause an error - is the path still valid? + if (!exists(path)) { + addLogEntry("Skipping item - path has disappeared: " ~ path); + return; + } + + // Calculate the path length by walking the path and catch any UTF-8 sequence errors at the same time + // https://github.com/skilion/onedrive/issues/57 + // https://github.com/abraunegg/onedrive/issues/487 + // https://github.com/abraunegg/onedrive/issues/1192 + try { + pathWalkLength = path.byGrapheme.walkLength; + } catch (std.utf.UTFException e) { + // Path contains characters which generate a UTF exception + addLogEntry("Skipping item - invalid UTF sequence: " ~ path, ["info", "notify"]); + addLogEntry(" Error Reason:" ~ e.msg, ["debug"]); + return; + } + + // Is the path length is less than maxPathLength + if (pathWalkLength < maxPathLength) { + // Is this path unwanted + bool unwanted = false; + + // First check of this item - if we are in a --dry-run scenario, we may have 'fake deleted' this path + // thus, the entries are not in the dry-run DB copy, thus, at this point the client thinks that this is an item to upload + // Check this 'path' for an entry in pathFakeDeletedArray - if it is there, this is unwanted + if (dryRun) { + // Is this path in the array of fake deleted items? If yes, return early, nothing else to do, save processing + if (canFind(pathFakeDeletedArray, path)) return; + } + + // Check if item if found in database + bool itemFoundInDB = pathFoundInDatabase(path); + + // If the item is already found in the database, it is redundant to perform these checks + if (!itemFoundInDB) { + // This not a Client Side Filtering check, nor a Microsoft Check, but is a sanity check that the path provided is UTF encoded correctly + // Check the std.encoding of the path against: Unicode 5.0, ASCII, ISO-8859-1, ISO-8859-2, WINDOWS-1250, WINDOWS-1251, WINDOWS-1252 + if (!unwanted) { + if(!isValid(path)) { + // Path is not valid according to https://dlang.org/phobos/std_encoding.html + addLogEntry("Skipping item - invalid character encoding sequence: " ~ path, ["info", "notify"]); + unwanted = true; + } + } + + // Check this path against the Client Side Filtering Rules + // - check_nosync + // - skip_dotfiles + // - skip_symlinks + // - skip_file + // - skip_dir + // - sync_list + // - skip_size + if (!unwanted) { + unwanted = checkPathAgainstClientSideFiltering(path); + } + + // Check this path against the Microsoft Naming Conventions & Restristions + // - Check path against Microsoft OneDrive restriction and limitations about Windows naming for files and folders + // - Check path for bad whitespace items + // - Check path for HTML ASCII Codes + // - Check path for ASCII Control Codes + if (!unwanted) { + unwanted = checkPathAgainstMicrosoftNamingRestrictions(path); + } + } + + if (!unwanted) { + // At this point, this path, we want to scan for new data as it is not excluded + if (isDir(path)) { + // Was the path found in the database? + if (!itemFoundInDB) { + // Path not found in database when searching all drive id's + if (!cleanupLocalFiles) { + // --download-only --cleanup-local-files not used + // Create this directory on OneDrive so that we can upload files to it + // Add this path to an array so that the directory online can be created before we upload files + pathsToCreateOnline ~= [path]; + } else { + // we need to clean up this directory + addLogEntry("Removing local directory as --download-only & --cleanup-local-files configured"); + // Remove any children of this path if they still exist + // Resolve 'Directory not empty' error when deleting local files + try { + auto directoryEntries = dirEntries(path, SpanMode.depth, false); + foreach (DirEntry child; directoryEntries) { + // what sort of child is this? + if (isDir(child.name)) { + addLogEntry("Removing local directory: " ~ child.name); + } else { + addLogEntry("Removing local file: " ~ child.name); + } + + // are we in a --dry-run scenario? + if (!dryRun) { + // No --dry-run ... process local delete + if (exists(child)) { + try { + attrIsDir(child.linkAttributes) ? rmdir(child.name) : remove(child.name); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } + } + } + // Clear directoryEntries + object.destroy(directoryEntries); + + // Remove the path now that it is empty of children + addLogEntry("Removing local directory: " ~ path); + // are we in a --dry-run scenario? + if (!dryRun) { + // No --dry-run ... process local delete + try { + rmdirRecurse(path); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + return; + } + } + } + + // flag for if we are going traverse this path + bool skipFolderTraverse = false; + + // Before we traverse this 'path', we need to make a last check to see if this was just excluded + if (appConfig.accountType == "business") { + // search businessSharedFoldersOnlineToSkip for this path + if (canFind(businessSharedFoldersOnlineToSkip, path)) { + // This path was skipped - why? + addLogEntry("Skipping item '" ~ path ~ "' due to this path matching an existing online Business Shared Folder name", ["info", "notify"]); + addLogEntry("To sync this Business Shared Folder, consider enabling 'sync_business_shared_folders' within your application configuration.", ["info"]); + skipFolderTraverse = true; + } + } + + // Do we traverse this path? + if (!skipFolderTraverse) { + // Try and access this directory and any path below + try { + auto directoryEntries = dirEntries(path, SpanMode.shallow, false); + foreach (DirEntry entry; directoryEntries) { + string thisPath = entry.name; + scanPathForNewData(thisPath); + } + // Clear directoryEntries + object.destroy(directoryEntries); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + return; + } + } + } else { + // https://github.com/abraunegg/onedrive/issues/984 + // path is not a directory, is it a valid file? + // pipes - whilst technically valid files, are not valid for this client + // prw-rw-r--. 1 user user 0 Jul 7 05:55 my_pipe + if (isFile(path)) { + // Was the file found in the database? + if (!itemFoundInDB) { + // File not found in database when searching all drive id's + // Do we upload the file or clean up the file? + if (!cleanupLocalFiles) { + // --download-only --cleanup-local-files not used + // Add this path as a file we need to upload + addLogEntry("OneDrive Client flagging to upload this file to Microsoft OneDrive: " ~ path, ["debug"]); + newLocalFilesToUploadToOneDrive ~= path; + } else { + // we need to clean up this file + addLogEntry("Removing local file as --download-only & --cleanup-local-files configured"); + // are we in a --dry-run scenario? + addLogEntry("Removing local file: " ~ path); + if (!dryRun) { + // No --dry-run ... process local file delete + safeRemove(path); + } + } + } + } else { + // path is not a valid file + addLogEntry("Skipping item - item is not a valid file: " ~ path, ["info", "notify"]); + } + } + } + } else { + // This path was skipped - why? + addLogEntry("Skipping item '" ~ path ~ "' due to the full path exceeding " ~ to!string(maxPathLength) ~ " characters (Microsoft OneDrive limitation)", ["info", "notify"]); + } + } + + // Handle a single file inotify trigger when using --monitor + void handleLocalFileTrigger(string[] changedLocalFilesToUploadToOneDrive) { + // Is this path a new file or an existing one? + // Normally we would use pathFoundInDatabase() to calculate, but we need 'databaseItem' as well if the item is in the database + foreach (localFilePath; changedLocalFilesToUploadToOneDrive) { + try { + Item databaseItem; + bool fileFoundInDB = false; + + foreach (driveId; onlineDriveDetails.keys) { + if (itemDB.selectByPath(localFilePath, driveId, databaseItem)) { + fileFoundInDB = true; + break; + } + } + + // Was the file found in the database? + if (!fileFoundInDB) { + // This is a new file as it is not in the database + // Log that the file has been added locally + addLogEntry("[M] New local file added: " ~ localFilePath, ["verbose"]); + scanLocalFilesystemPathForNewDataToUpload(localFilePath); + } else { + // This is a potentially modified file, needs to be handled as such. Is the item truly modified? + if (!testFileHash(localFilePath, databaseItem)) { + // The local file failed the hash comparison test - there is a data difference + // Log that the file has changed locally + addLogEntry("[M] Local file changed: " ~ localFilePath, ["verbose"]); + // Add the modified item to the array to upload + uploadChangedLocalFileToOneDrive([databaseItem.driveId, databaseItem.id, localFilePath]); + } + } + } catch(Exception e) { + addLogEntry("Cannot upload file changes/creation: " ~ e.msg, ["info", "notify"]); + } + } + processNewLocalItemsToUpload(); + } + + // Query the database to determine if this path is within the existing database + bool pathFoundInDatabase(string searchPath) { + + // Check if this path in the database + Item databaseItem; + addLogEntry("Search DB for this path: " ~ searchPath, ["debug"]); + + foreach (driveId; onlineDriveDetails.keys) { + if (itemDB.selectByPath(searchPath, driveId, databaseItem)) { + addLogEntry("DB Record for search path: " ~ to!string(databaseItem), ["debug"]); + return true; // Early exit on finding the path in the DB + } + } + return false; // Return false if path is not found in any drive + } + + // Create a new directory online on OneDrive + // - Test if we can get the parent path details from the database, otherwise we need to search online + // for the path flow and create the folder that way + void createDirectoryOnline(string thisNewPathToCreate) { + // Log what we are doing + addLogEntry("OneDrive Client requested to create this directory online: " ~ thisNewPathToCreate, ["verbose"]); + + // Function variables + Item parentItem; + JSONValue onlinePathData; + + // Special Folder Handling: Do NOT create the folder online if it is being used for OneDrive Business Shared Files + // These are local copy files, in a self created directory structure which is not to be replicated online + // Check appConfig.configuredBusinessSharedFilesDirectoryName against 'thisNewPathToCreate' + if (canFind(thisNewPathToCreate, baseName(appConfig.configuredBusinessSharedFilesDirectoryName))) { + // Log why this is being skipped + addLogEntry("Skipping creating '" ~ thisNewPathToCreate ~ "' as this path is used for handling OneDrive Business Shared Files", ["info", "notify"]); + return; + } + + // Create a new API Instance for this thread and initialise it + OneDriveApi createDirectoryOnlineOneDriveApiInstance; + createDirectoryOnlineOneDriveApiInstance = new OneDriveApi(appConfig); + createDirectoryOnlineOneDriveApiInstance.initialise(); + + // What parent path to use? + string parentPath = dirName(thisNewPathToCreate); // will be either . or something else + + // Configure the parentItem by if this is the account 'root' use the root details, or search the database for the parent details + if (parentPath == ".") { + // Parent path is '.' which is the account root + // Use client defaults + parentItem.driveId = appConfig.defaultDriveId; // Should give something like 12345abcde1234a1 + parentItem.id = appConfig.defaultRootId; // Should give something like 12345ABCDE1234A1!101 + } else { + // Query the parent path online + addLogEntry("Attempting to query Local Database for this parent path: " ~ parentPath, ["debug"]); + + // Attempt a 2 step process to work out where to create the directory + // Step 1: Query the DB first for the parent path, to try and avoid an API call + // Step 2: Query online as last resort + + // Step 1: Check if this parent path in the database + Item databaseItem; + bool parentPathFoundInDB = false; + + foreach (driveId; onlineDriveDetails.keys) { + addLogEntry("Query DB with this driveID for the Parent Path: " ~ driveId, ["debug"]); + // Query the database for this parent path using each driveId that we know about + if (itemDB.selectByPath(parentPath, driveId, databaseItem)) { + parentPathFoundInDB = true; + addLogEntry("Parent databaseItem: " ~ to!string(databaseItem), ["debug"]); + addLogEntry("parentPathFoundInDB: " ~ to!string(parentPathFoundInDB), ["debug"]); + parentItem = databaseItem; + } + } + + // After querying all DB entries for each driveID for the parent path, what are the details in parentItem? + addLogEntry("Parent parentItem after DB Query exhausted: " ~ to!string(parentItem), ["debug"]); + + // Step 2: Query for the path online if not found in the local database + if (!parentPathFoundInDB) { + // parent path not found in database + try { + addLogEntry("Attempting to query OneDrive Online for this parent path as path not found in local database: " ~ parentPath, ["debug"]); + onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetails(parentPath); + addLogEntry("Online Parent Path Query Response: " ~ to!string(onlinePathData), ["debug"]); + + // Save item to the database + saveItem(onlinePathData); + parentItem = makeItem(onlinePathData); + } catch (OneDriveException exception) { + if (exception.httpStatusCode == 404) { + // Parent does not exist ... need to create parent + addLogEntry("Parent path does not exist online: " ~ parentPath, ["debug"]); + createDirectoryOnline(parentPath); + // no return here as we need to continue, but need to re-query the OneDrive API to get the right parental details now that they exist + onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetails(parentPath); + parentItem = makeItem(onlinePathData); + } else { + string thisFunctionName = getFunctionName!({}); + // Default operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within oneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + } + } + + // Make sure the full path does not exist online, this should generate a 404 response, to which then the folder will be created online + try { + // Try and query the OneDrive API for the path we need to create + addLogEntry("Attempting to query OneDrive API for this path: " ~ thisNewPathToCreate, ["debug"]); + addLogEntry("parentItem details: " ~ to!string(parentItem), ["debug"]); + + // Depending on the data within parentItem, will depend on what method we are using to search + // A Shared Folder will be 'remote' so we need to check the remote parent id, rather than parentItem details + Item queryItem; + + if (parentItem.type == ItemType.remote) { + // This folder is a potential shared object + addLogEntry("ParentItem is a remote item object", ["debug"]); + // Need to create the DB Tie for this shared object to ensure this exists in the database + createDatabaseTieRecordForOnlineSharedFolder(parentItem); + // Update the queryItem values + queryItem.driveId = parentItem.remoteDriveId; + queryItem.id = parentItem.remoteId; + } else { + // Use parent item for the query item + addLogEntry("Standard Query, use parentItem", ["debug"]); + queryItem = parentItem; + } + + if (queryItem.driveId == appConfig.defaultDriveId) { + // Use getPathDetailsByDriveId + addLogEntry("Selecting getPathDetailsByDriveId to query OneDrive API for path data", ["debug"]); + onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetailsByDriveId(queryItem.driveId, thisNewPathToCreate); + } else { + // Use searchDriveForPath to query OneDrive + addLogEntry("Selecting searchDriveForPath to query OneDrive API for path data", ["debug"]); + // If the queryItem.driveId is not our driveId - the path we are looking for will not be at the logical location that getPathDetailsByDriveId + // can use - as it will always return a 404 .. even if the path actually exists (which is the whole point of this test) + // Search the queryItem.driveId for any folder name match that we are going to create, then compare response JSON items with queryItem.id + // If no match, the folder we want to create does not exist at the location we are seeking to create it at, thus generate a 404 + onlinePathData = createDirectoryOnlineOneDriveApiInstance.searchDriveForPath(queryItem.driveId, baseName(thisNewPathToCreate)); + addLogEntry("onlinePathData: " ~to!string(onlinePathData), ["debug"]); + + // Process the response from searching the drive + ulong responseCount = count(onlinePathData["value"].array); + if (responseCount > 0) { + // Search 'name' matches were found .. need to match these against queryItem.id + bool foundDirectoryOnline = false; + JSONValue foundDirectoryJSONItem; + // Items were returned .. but is one of these what we are looking for? + foreach (childJSON; onlinePathData["value"].array) { + // Is this item not a file? + if (!isFileItem(childJSON)) { + Item thisChildItem = makeItem(childJSON); + // Direct Match Check + if ((queryItem.id == thisChildItem.parentId) && (baseName(thisNewPathToCreate) == thisChildItem.name)) { + // High confidence that this child folder is a direct match we are trying to create and it already exists online + addLogEntry("Path we are searching for exists online (Direct Match): " ~ baseName(thisNewPathToCreate), ["debug"]); + addLogEntry("childJSON: " ~ to!string(childJSON), ["debug"]); + foundDirectoryOnline = true; + foundDirectoryJSONItem = childJSON; + break; + } + + // Full Lower Case POSIX Match Check + string childAsLower = toLower(childJSON["name"].str); + string thisFolderNameAsLower = toLower(baseName(thisNewPathToCreate)); + + // Child name check + if (childAsLower == thisFolderNameAsLower) { + // This is a POSIX 'case in-sensitive match' ..... + // Local item name has a 'case-insensitive match' to an existing item on OneDrive + addLogEntry("Path we are searching for exists online (POSIX 'case in-sensitive match'): " ~ baseName(thisNewPathToCreate), ["debug"]); + addLogEntry("childJSON: " ~ to!string(childJSON), ["debug"]); + foundDirectoryOnline = true; + foundDirectoryJSONItem = childJSON; + break; + } + } + } + + if (foundDirectoryOnline) { + // Directory we are seeking was found online ... + addLogEntry("The directory we are seeking was found online by using searchDriveForPath ...", ["debug"]); + onlinePathData = foundDirectoryJSONItem; + } else { + // No 'search item matches found' - raise a 404 so that the exception handling will take over to create the folder + throw new OneDriveException(404, "Name not found via search"); + } + } else { + // No 'search item matches found' - raise a 404 so that the exception handling will take over to create the folder + throw new OneDriveException(404, "Name not found via search"); + } + } + } catch (OneDriveException exception) { + if (exception.httpStatusCode == 404) { + // This is a good error - it means that the directory to create 100% does not exist online + // The directory was not found on the drive id we queried + addLogEntry("The requested directory to create was not found on OneDrive - creating remote directory: " ~ thisNewPathToCreate, ["verbose"]); + + // Build up the create directory request + JSONValue createDirectoryOnlineAPIResponse; + JSONValue newDriveItem = [ + "name": JSONValue(baseName(thisNewPathToCreate)), + "folder": parseJSON("{}") + ]; + + // Submit the creation request + // Fix for https://github.com/skilion/onedrive/issues/356 + if (!dryRun) { + try { + // Attempt to create a new folder on the required driveId and parent item id + string requiredDriveId; + string requiredParentItemId; + + // Is the item a Remote Object (Shared Folder) ? + if (parentItem.type == ItemType.remote) { + // Yes .. Shared Folder + addLogEntry("parentItem data: " ~ to!string(parentItem), ["debug"]); + requiredDriveId = parentItem.remoteDriveId; + requiredParentItemId = parentItem.remoteId; + } else { + // Not a Shared Folder + requiredDriveId = parentItem.driveId; + requiredParentItemId = parentItem.id; + } + + // Where are we creating this new folder? + addLogEntry("requiredDriveId: " ~ requiredDriveId, ["debug"]); + addLogEntry("requiredParentItemId: " ~ requiredParentItemId, ["debug"]); + addLogEntry("newDriveItem JSON: " ~ to!string(newDriveItem), ["debug"]); + + // Create the new folder + createDirectoryOnlineAPIResponse = createDirectoryOnlineOneDriveApiInstance.createById(requiredDriveId, requiredParentItemId, newDriveItem); + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(createDirectoryOnlineAPIResponse); + // Log that the directory was created + addLogEntry("Successfully created the remote directory " ~ thisNewPathToCreate ~ " on Microsoft OneDrive"); + } catch (OneDriveException exception) { + if (exception.httpStatusCode == 409) { + // OneDrive API returned a 404 (above) to say the directory did not exist + // but when we attempted to create it, OneDrive responded that it now already exists + addLogEntry("OneDrive reported that " ~ thisNewPathToCreate ~ " already exists .. OneDrive API race condition", ["verbose"]); + // Shutdown this API instance, as we will create API instances as required, when required + createDirectoryOnlineOneDriveApiInstance.releaseCurlEngine(); + // Free object and memory + createDirectoryOnlineOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + return; + } else { + // some other error from OneDrive was returned - display what it is + addLogEntry("OneDrive generated an error when creating this path: " ~ thisNewPathToCreate); + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + // Shutdown this API instance, as we will create API instances as required, when required + createDirectoryOnlineOneDriveApiInstance.releaseCurlEngine(); + // Free object and memory + createDirectoryOnlineOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + return; + } + } + } else { + // Simulate a successful 'directory create' & save it to the dryRun database copy + addLogEntry("Successfully created the remote directory " ~ thisNewPathToCreate ~ " on Microsoft OneDrive"); + // The simulated response has to pass 'makeItem' as part of saveItem + auto fakeResponse = createFakeResponse(thisNewPathToCreate); + // Save item to the database + saveItem(fakeResponse); + } + + // Shutdown this API instance, as we will create API instances as required, when required + createDirectoryOnlineOneDriveApiInstance.releaseCurlEngine(); + // Free object and memory + createDirectoryOnlineOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + return; + + } else { + // Default operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within createDirectoryOnlineOneDriveApiInstance + + // If we get a 400 error, there is an issue creating this folder on Microsoft OneDrive for some reason + // If the error is not 400, re-try, else fail + if (exception.httpStatusCode != 400) { + // Attempt a re-try + createDirectoryOnline(thisNewPathToCreate); + } else { + // We cant create this directory online + addLogEntry("This folder cannot be created online: " ~ buildNormalizedPath(absolutePath(thisNewPathToCreate)), ["debug"]); + } + } + } + + // If we get to this point - onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, thisNewPathToCreate) generated a 'valid' response .... + // This means that the folder potentially exists online .. which is odd .. as it should not have existed + if (onlinePathData.type() == JSONType.object) { + // A valid object was responded with + if (onlinePathData["name"].str == baseName(thisNewPathToCreate)) { + // OneDrive 'name' matches local path name + if (appConfig.accountType == "business") { + // We are a business account, this existing online folder, could be a Shared Online Folder could be a 'Add shortcut to My files' item + addLogEntry("onlinePathData: " ~ to!string(onlinePathData), ["debug"]); + + // Is this a remote folder + if (isItemRemote(onlinePathData)) { + // The folder is a remote item ... we do not want to create this ... + addLogEntry("Existing Remote Online Folder is most likely a OneDrive Shared Business Folder Link added by 'Add shortcut to My files'", ["debug"]); + + // Is Shared Business Folder Syncing enabled ? + if (!appConfig.getValueBool("sync_business_shared_items")) { + // Shared Business Folder Syncing is NOT enabled + addLogEntry("We need to skip this path: " ~ thisNewPathToCreate, ["debug"]); + // Add this path to businessSharedFoldersOnlineToSkip + businessSharedFoldersOnlineToSkip ~= [thisNewPathToCreate]; + // no save to database, no online create + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + createDirectoryOnlineOneDriveApiInstance.releaseCurlEngine(); + createDirectoryOnlineOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + return; + } else { + // As the 'onlinePathData' is potentially missing the actual correct parent folder id in the 'remoteItem' JSON response, we have to perform a further query to get the correct answer + // Failure to do this, means the 'root' DB Tie Record has a different parent reference id to that what this folder's parent reference id actually is + JSONValue sharedFolderParentPathData; + string remoteDriveId = onlinePathData["remoteItem"]["parentReference"]["driveId"].str; + string remoteItemId = onlinePathData["remoteItem"]["id"].str; + sharedFolderParentPathData = createDirectoryOnlineOneDriveApiInstance.getPathDetailsById(remoteDriveId, remoteItemId); + + // A 'root' DB Tie Record needed for this folder using the correct parent data + createDatabaseRootTieRecordForOnlineSharedFolder(sharedFolderParentPathData); + } + } + } + + // Path found online + addLogEntry("The requested directory to create was found on OneDrive - skipping creating the directory: " ~ thisNewPathToCreate, ["verbose"]); + + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(onlinePathData); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + createDirectoryOnlineOneDriveApiInstance.releaseCurlEngine(); + createDirectoryOnlineOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + return; + } else { + // Normally this would throw an error, however we cant use throw new PosixException() + string msg = format("POSIX 'case-insensitive match' between '%s' (local) and '%s' (online) which violates the Microsoft OneDrive API namespace convention", baseName(thisNewPathToCreate), onlinePathData["name"].str); + displayPosixErrorMessage(msg); + addLogEntry("ERROR: Requested directory to create has a 'case-insensitive match' to an existing directory on OneDrive online."); + addLogEntry("ERROR: To resolve, rename this local directory: " ~ buildNormalizedPath(absolutePath(thisNewPathToCreate))); + addLogEntry("Skipping creating this directory online due to 'case-insensitive match': " ~ thisNewPathToCreate); + // Add this path to posixViolationPaths + posixViolationPaths ~= [thisNewPathToCreate]; + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + createDirectoryOnlineOneDriveApiInstance.releaseCurlEngine(); + createDirectoryOnlineOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + return; + } + } else { + // response is not valid JSON, an error was returned from OneDrive + addLogEntry("ERROR: There was an error performing this operation on Microsoft OneDrive"); + addLogEntry("ERROR: Increase logging verbosity to assist determining why."); + addLogEntry("Skipping: " ~ buildNormalizedPath(absolutePath(thisNewPathToCreate))); + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + createDirectoryOnlineOneDriveApiInstance.releaseCurlEngine(); + createDirectoryOnlineOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + return; + } + } + + // Test that the online name actually matches the requested local name + void performPosixTest(string localNameToCheck, string onlineName) { + + // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file + // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, + // even though some file systems (such as a POSIX-compliant file system) may consider them as different. + // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior. + if (localNameToCheck != onlineName) { + // POSIX Error + // Local item name has a 'case-insensitive match' to an existing item on OneDrive + throw new PosixException(localNameToCheck, onlineName); + } + } + + // Upload new file items as identified + void uploadNewLocalFileItems() { + // Lets deal with the new local items in a batch process + size_t batchSize = to!int(appConfig.getValueLong("threads")); + ulong batchCount = (newLocalFilesToUploadToOneDrive.length + batchSize - 1) / batchSize; + ulong batchesProcessed = 0; + + foreach (chunk; newLocalFilesToUploadToOneDrive.chunks(batchSize)) { + uploadNewLocalFileItemsInParallel(chunk); + } + } + + // Upload the file batches in parallel + void uploadNewLocalFileItemsInParallel(string[] array) { + // This function received an array of string items to upload, the number of elements based on appConfig.getValueLong("threads") + foreach (i, fileToUpload; processPool.parallel(array)) { + addLogEntry("Upload Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]); + uploadNewFile(fileToUpload); + addLogEntry("Upload Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]); + } + } + + // Upload a new file to OneDrive + void uploadNewFile(string fileToUpload) { + // Debug for the moment + addLogEntry("fileToUpload: " ~ fileToUpload, ["debug"]); + + // These are the details of the item we need to upload + // How much space is remaining on OneDrive + ulong remainingFreeSpaceOnline; + // Did the upload fail? + bool uploadFailed = false; + // Did we skip due to exceeding maximum allowed size? + bool skippedMaxSize = false; + // Did we skip to an exception error? + bool skippedExceptionError = false; + // Is the parent path in the item database? + bool parentPathFoundInDB = false; + // Get this file size + ulong thisFileSize; + // Is there space available online + bool spaceAvailableOnline = false; + + DriveDetailsCache cachedOnlineDriveData; + ulong calculatedSpaceOnlinePostUpload; + + OneDriveApi checkFileOneDriveApiInstance; + + // Check the database for the parent path of fileToUpload + Item parentItem; + // What parent path to use? + string parentPath = dirName(fileToUpload); // will be either . or something else + if (parentPath == "."){ + // Assume this is a new file in the users configured sync_dir root + // Use client defaults + parentItem.id = appConfig.defaultRootId; // Should give something like 12345ABCDE1234A1!101 + parentItem.driveId = appConfig.defaultDriveId; // Should give something like 12345abcde1234a1 + parentPathFoundInDB = true; + } else { + // Query the database using each of the driveId's we are using + foreach (driveId; onlineDriveDetails.keys) { + // Query the database for this parent path using each driveId + Item dbResponse; + if(itemDB.selectByPath(parentPath, driveId, dbResponse)){ + // parent path was found in the database + parentItem = dbResponse; + parentPathFoundInDB = true; + } + } + } + + // If the parent path was found in the DB, to ensure we are uploading the the right location 'parentItem.driveId' must not be empty + if ((parentPathFoundInDB) && (parentItem.driveId.empty)) { + // switch to using defaultDriveId + addLogEntry("parentItem.driveId is empty - using defaultDriveId for upload API calls", ["debug"]); + parentItem.driveId = appConfig.defaultDriveId; + } + + // Check if the path still exists locally before we try to upload + if (exists(fileToUpload)) { + // Can we read the file - as a permissions issue or actual file corruption will cause a failure + // Resolves: https://github.com/abraunegg/onedrive/issues/113 + if (readLocalFile(fileToUpload)) { + // The local file can be read - so we can read it to attempt to upload it in this thread + // Is the path parent in the DB? + if (parentPathFoundInDB) { + // Parent path is in the database + // Get the new file size + // Even if the permissions on the file are: -rw-------. 1 root root 8 Jan 11 09:42 + // we can still obtain the file size, however readLocalFile() also tests if the file can be read (permission check) + thisFileSize = getSize(fileToUpload); + + // Does this file exceed the maximum filesize for OneDrive + // Resolves: https://github.com/skilion/onedrive/issues/121 , https://github.com/skilion/onedrive/issues/294 , https://github.com/skilion/onedrive/issues/329 + if (thisFileSize <= maxUploadFileSize) { + // Is there enough free space on OneDrive as compared to when we started this thread, to safely upload the file to OneDrive? + + // Make sure that parentItem.driveId is in our driveIDs array to use when checking if item is in database + // Keep the DriveDetailsCache array with unique entries only + if (!canFindDriveId(parentItem.driveId, cachedOnlineDriveData)) { + // Add this driveId to the drive cache, which then also sets for the defaultDriveId: + // - quotaRestricted; + // - quotaAvailable; + // - quotaRemaining; + addOrUpdateOneDriveOnlineDetails(parentItem.driveId); + // Fetch the details from cachedOnlineDriveData + cachedOnlineDriveData = getDriveDetails(parentItem.driveId); + } + + // Fetch the details from cachedOnlineDriveData + // - cachedOnlineDriveData.quotaRestricted; + // - cachedOnlineDriveData.quotaAvailable; + // - cachedOnlineDriveData.quotaRemaining; + remainingFreeSpaceOnline = cachedOnlineDriveData.quotaRemaining; + + // When we compare the space online to the total we are trying to upload - is there space online? + calculatedSpaceOnlinePostUpload = remainingFreeSpaceOnline - thisFileSize; + + // Based on what we know, for this thread - can we safely upload this modified local file? + addLogEntry("This Thread Estimated Free Space Online: " ~ to!string(remainingFreeSpaceOnline), ["debug"]); + addLogEntry("This Thread Calculated Free Space Online Post Upload: " ~ to!string(calculatedSpaceOnlinePostUpload), ["debug"]); + + // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - appConfig.quotaAvailable will be updated + // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - appConfig.quotaRestricted will be set as true + // If 'business' accounts, if driveId == defaultDriveId, then we will have data + // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value - appConfig.quotaRestricted will be set as true + + if (remainingFreeSpaceOnline > totalDataToUpload) { + // Space available + spaceAvailableOnline = true; + } else { + // we need to look more granular + // What was the latest getRemainingFreeSpace() value? + if (cachedOnlineDriveData.quotaAvailable) { + // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? + if (calculatedSpaceOnlinePostUpload > 0) { + // Based on this thread action, we believe that there is space available online to upload - proceed + spaceAvailableOnline = true; + } + } + } + + // Is quota being restricted? + if (cachedOnlineDriveData.quotaRestricted) { + // If the upload target drive is not our drive id, then it is a shared folder .. we need to print a space warning message + if (parentItem.driveId != appConfig.defaultDriveId) { + // Different message depending on account type + if (appConfig.accountType == "personal") { + addLogEntry("WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed.", ["verbose"]); + } else { + addLogEntry("WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]); + } + } else { + if (appConfig.accountType == "personal") { + addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed.", ["verbose"]); + } else { + addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]); + } + } + // Space available online is being restricted - so we have no way to really know if there is space available online + spaceAvailableOnline = true; + } + + // Do we have space available or is space available being restricted (so we make the blind assumption that there is space available) + if (spaceAvailableOnline) { + // We need to check that this new local file does not exist on OneDrive + JSONValue fileDetailsFromOneDrive; + + // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file + // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, + // even though some file systems (such as a POSIX-compliant file systems that Linux use) may consider them as different. + // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior, OneDrive does not use this. + + // In order to upload this file - this query HAS to respond with a '404 - Not Found' so that the upload is triggered + + // Does this 'file' already exist on OneDrive? + try { + + // Create a new API Instance for this thread and initialise it + checkFileOneDriveApiInstance = new OneDriveApi(appConfig); + checkFileOneDriveApiInstance.initialise(); + + if (parentItem.driveId == appConfig.defaultDriveId) { + // getPathDetailsByDriveId is only reliable when the driveId is our driveId + fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); + } else { + // We need to curate a response by listing the children of this parentItem.driveId and parentItem.id , without traversing directories + // So that IF the file is on a Shared Folder, it can be found, and, if it exists, checked correctly + fileDetailsFromOneDrive = searchDriveItemForFile(parentItem.driveId, parentItem.id, fileToUpload); + // Was the file found? + if (fileDetailsFromOneDrive.type() != JSONType.object) { + // No .... + throw new OneDriveException(404, "Name not found via searchDriveItemForFile"); + } + } + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + checkFileOneDriveApiInstance.releaseCurlEngine(); + checkFileOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + // Portable Operating System Interface (POSIX) testing of JSON response from OneDrive API + if (hasName(fileDetailsFromOneDrive)) { + performPosixTest(baseName(fileToUpload), fileDetailsFromOneDrive["name"].str); + } else { + throw new JsonResponseException("Unable to perform POSIX test as the OneDrive API request generated an invalid JSON response"); + } + + // If we get to this point, the OneDrive API returned a 200 OK with valid JSON data that indicates a 'file' exists at this location already + // and that it matches the POSIX filename of the local item we are trying to upload as a new file + addLogEntry("The file we are attempting to upload as a new file already exists on Microsoft OneDrive: " ~ fileToUpload, ["verbose"]); + + // No 404 or otherwise was triggered, meaning that the file already exists online and passes the POSIX test ... + addLogEntry("fileDetailsFromOneDrive after exist online check: " ~ to!string(fileDetailsFromOneDrive), ["debug"]); + + // Does the data from online match our local file that we are attempting to upload as a new file? + bool raiseWarning = true; + if (!disableUploadValidation && performUploadIntegrityValidationChecks(fileDetailsFromOneDrive, fileToUpload, thisFileSize, raiseWarning)) { + // Save online item details to the database + saveItem(fileDetailsFromOneDrive); + } else { + // The local file we are attempting to upload as a new file is different to the existing file online + addLogEntry("Triggering newfile upload target already exists edge case, where the online item does not match what we are trying to upload", ["debug"]); + + // Issue #2626 | Case 2-2 (resync) + + // If the 'online' file is newer, this will be overwritten with the file from the local filesystem - potentially constituting online data loss + // The file 'version history' online will have to be used to 'recover' the prior online file + string changedItemParentDriveId = fileDetailsFromOneDrive["parentReference"]["driveId"].str; + string changedItemId = fileDetailsFromOneDrive["id"].str; + addLogEntry("Skipping uploading this item as a new file, will upload as a modified file (online file already exists): " ~ fileToUpload); + + // In order for the processing of the local item as a 'changed' item, unfortunately we need to save the online data of the existing online file to the local DB + saveItem(fileDetailsFromOneDrive); + + // Which file is technically newer? The local file or the remote file? + Item onlineFile = makeItem(fileDetailsFromOneDrive); + SysTime localModifiedTime = timeLastModified(fileToUpload).toUTC(); + SysTime onlineModifiedTime = onlineFile.mtime; + + // Reduce time resolution to seconds before comparing + localModifiedTime.fracSecs = Duration.zero; + onlineModifiedTime.fracSecs = Duration.zero; + + // Which file is newer? + if (localModifiedTime >= onlineModifiedTime) { + // Upload the locally modified file as-is, as it is newer + uploadChangedLocalFileToOneDrive([changedItemParentDriveId, changedItemId, fileToUpload]); + } else { + // Online is newer, rename local, then upload the renamed file + // We need to know the renamed path so we can upload it + string renamedPath; + // Rename the local path + safeBackup(fileToUpload, dryRun, renamedPath); + // Upload renamed local file as a new file + uploadNewFile(renamedPath); + // Process the database entry removal for the original file. In a --dry-run scenario, this is being done against a DB copy. + // This is done so we can download the newer online file + itemDB.deleteById(changedItemParentDriveId, changedItemId); + } + } + } catch (OneDriveException exception) { + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + checkFileOneDriveApiInstance.releaseCurlEngine(); + checkFileOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + // If we get a 404 .. the file is not online .. this is what we want .. file does not exist online + if (exception.httpStatusCode == 404) { + // The file has been checked, client side filtering checked, does not exist online - we need to upload it + addLogEntry("fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); generated a 404 - file does not exist online - must upload it", ["debug"]); + uploadFailed = performNewFileUpload(parentItem, fileToUpload, thisFileSize); + } else { + // some other error + string thisFunctionName = getFunctionName!({}); + // Default operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within oneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } catch (PosixException e) { + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + checkFileOneDriveApiInstance.releaseCurlEngine(); + checkFileOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + // Display POSIX error message + displayPosixErrorMessage(e.msg); + uploadFailed = true; + } catch (JsonResponseException e) { + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + checkFileOneDriveApiInstance.releaseCurlEngine(); + checkFileOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + // Display JSON error message + addLogEntry(e.msg, ["debug"]); + uploadFailed = true; + } + } else { + // skip file upload - insufficient space to upload + addLogEntry("Skipping uploading this new file as it exceeds the available free space on Microsoft OneDrive: " ~ fileToUpload); + uploadFailed = true; + } + } else { + // Skip file upload - too large + addLogEntry("Skipping uploading this new file as it exceeds the maximum size allowed by Microsoft OneDrive: " ~ fileToUpload); + uploadFailed = true; + } + } else { + // why was the parent path not in the database? + if (canFind(posixViolationPaths, parentPath)) { + addLogEntry("ERROR: POSIX 'case-insensitive match' for the parent path which violates the Microsoft OneDrive API namespace convention."); + } else { + addLogEntry("ERROR: Parent path is not in the database or online."); + } + addLogEntry("ERROR: Unable to upload this file: " ~ fileToUpload); + uploadFailed = true; + } + } else { + // Unable to read local file + addLogEntry("Skipping uploading this file as it cannot be read (file permissions or file corruption): " ~ fileToUpload); + uploadFailed = true; + } + } else { + // File disappeared before upload + addLogEntry("File disappeared locally before upload: " ~ fileToUpload); + // dont set uploadFailed = true; as the file disappeared before upload, thus nothing here failed + } + + // Upload success or failure? + if (!uploadFailed) { + // Update the 'cachedOnlineDriveData' record for this 'dbItem.driveId' so that this is tracked as accurately as possible for other threads + updateDriveDetailsCache(parentItem.driveId, cachedOnlineDriveData.quotaRestricted, cachedOnlineDriveData.quotaAvailable, thisFileSize); + + } else { + // Need to add this to fileUploadFailures to capture at the end + fileUploadFailures ~= fileToUpload; + } + } + + // Perform the actual upload to OneDrive + bool performNewFileUpload(Item parentItem, string fileToUpload, ulong thisFileSize) { + + // Assume that by default the upload fails + bool uploadFailed = true; + + // OneDrive API Upload Response + JSONValue uploadResponse; + + // Create the OneDriveAPI Upload Instance + OneDriveApi uploadFileOneDriveApiInstance; + + // Calculate upload speed + auto uploadStartTime = Clock.currTime(); + + // Is this a dry-run scenario? + if (!dryRun) { + // Not a dry-run situation + // Do we use simpleUpload or create an upload session? + bool useSimpleUpload = false; + if (thisFileSize <= sessionThresholdFileSize) { + useSimpleUpload = true; + } + + // We can only upload zero size files via simpleFileUpload regardless of account type + // Reference: https://github.com/OneDrive/onedrive-api-docs/issues/53 + // Additionally, only where file size is < 4MB should be uploaded by simpleUpload - everything else should use a session to upload + + if ((thisFileSize == 0) || (useSimpleUpload)) { + try { + // Initialise API for simple upload + uploadFileOneDriveApiInstance = new OneDriveApi(appConfig); + uploadFileOneDriveApiInstance.initialise(); + + // Attempt to upload the zero byte file using simpleUpload for all account types + uploadResponse = uploadFileOneDriveApiInstance.simpleUpload(fileToUpload, parentItem.driveId, parentItem.id, baseName(fileToUpload)); + uploadFailed = false; + addLogEntry("Uploading new file: " ~ fileToUpload ~ " ... done."); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + uploadFileOneDriveApiInstance.releaseCurlEngine(); + uploadFileOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + } catch (OneDriveException exception) { + // An error was responded with - what was it + + string thisFunctionName = getFunctionName!({}); + // Default operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within oneDriveApiInstance + // Display what the error is + addLogEntry("Uploading new file: " ~ fileToUpload ~ " ... failed."); + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + uploadFileOneDriveApiInstance.releaseCurlEngine(); + uploadFileOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + } catch (FileException e) { + // display the error message + addLogEntry("Uploading new file: " ~ fileToUpload ~ " ... failed."); + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + uploadFileOneDriveApiInstance.releaseCurlEngine(); + uploadFileOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + } + } else { + // Initialise API for session upload + uploadFileOneDriveApiInstance = new OneDriveApi(appConfig); + uploadFileOneDriveApiInstance.initialise(); + + // Session Upload for this criteria: + // - Personal Account and file size > 4MB + // - All Business | Office365 | SharePoint files > 0 bytes + JSONValue uploadSessionData; + // As this is a unique thread, the sessionFilePath for where we save the data needs to be unique + // The best way to do this is generate a 10 digit alphanumeric string, and use this as the file extension + string threadUploadSessionFilePath = appConfig.uploadSessionFilePath ~ "." ~ generateAlphanumericString(); + + // Attempt to upload the > 4MB file using an upload session for all account types + try { + // Create the Upload Session + uploadSessionData = createSessionFileUpload(uploadFileOneDriveApiInstance, fileToUpload, parentItem.driveId, parentItem.id, baseName(fileToUpload), null, threadUploadSessionFilePath); + } catch (OneDriveException exception) { + // An error was responded with - what was it + + string thisFunctionName = getFunctionName!({}); + // Default operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within oneDriveApiInstance + // Display what the error is + addLogEntry("Uploading new file: " ~ fileToUpload ~ " ... failed."); + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + + } catch (FileException e) { + // display the error message + addLogEntry("Uploading new file: " ~ fileToUpload ~ " ... failed."); + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + + // Do we have a valid session URL that we can use ? + if (uploadSessionData.type() == JSONType.object) { + // This is a valid JSON object + bool sessionDataValid = true; + + // Validate that we have the following items which we need + if (!hasUploadURL(uploadSessionData)) { + sessionDataValid = false; + addLogEntry("Session data missing 'uploadUrl'", ["debug"]); + } + + if (!hasNextExpectedRanges(uploadSessionData)) { + sessionDataValid = false; + addLogEntry("Session data missing 'nextExpectedRanges'", ["debug"]); + } + + if (!hasLocalPath(uploadSessionData)) { + sessionDataValid = false; + addLogEntry("Session data missing 'localPath'", ["debug"]); + } + + if (sessionDataValid) { + // We have a valid Upload Session Data we can use + try { + // Try and perform the upload session + uploadResponse = performSessionFileUpload(uploadFileOneDriveApiInstance, thisFileSize, uploadSessionData, threadUploadSessionFilePath); + + if (uploadResponse.type() == JSONType.object) { + uploadFailed = false; + addLogEntry("Uploading new file: " ~ fileToUpload ~ " ... done."); + } else { + addLogEntry("Uploading new file: " ~ fileToUpload ~ " ... failed."); + uploadFailed = true; + } + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // Default operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within oneDriveApiInstance + // Display what the error is + addLogEntry("Uploading new file: " ~ fileToUpload ~ " ... failed."); + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + + } + } else { + // No Upload URL or nextExpectedRanges or localPath .. not a valid JSON we can use + addLogEntry("Session data is missing required elements to perform a session upload.", ["verbose"]); + addLogEntry("Uploading new file: " ~ fileToUpload ~ " ... failed."); + } + } else { + // Create session Upload URL failed + addLogEntry("Uploading new file: " ~ fileToUpload ~ " ... failed."); + } + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + uploadFileOneDriveApiInstance.releaseCurlEngine(); + uploadFileOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + } + } else { + // We are in a --dry-run scenario + uploadResponse = createFakeResponse(fileToUpload); + uploadFailed = false; + addLogEntry("Uploading new file: " ~ fileToUpload ~ " ... done.", ["info", "notify"]); + } + + // Upload has finished + auto uploadFinishTime = Clock.currTime(); + // If no upload failure, calculate metrics, perform integrity validation + if (!uploadFailed) { + // Upload did not fail ... + auto uploadDuration = uploadFinishTime - uploadStartTime; + addLogEntry("File Size: " ~ to!string(thisFileSize) ~ " Bytes", ["debug"]); + addLogEntry("Upload Duration: " ~ to!string((uploadDuration.total!"msecs"/1e3)) ~ " Seconds", ["debug"]); + auto uploadSpeed = (thisFileSize / (uploadDuration.total!"msecs"/1e3)/ 1024 / 1024); + addLogEntry("Upload Speed: " ~ to!string(uploadSpeed) ~ " Mbps (approx)", ["debug"]); + + // OK as the upload did not fail, we need to save the response from OneDrive, but it has to be a valid JSON response + if (uploadResponse.type() == JSONType.object) { + // check if the path still exists locally before we try to set the file times online - as short lived files, whilst we uploaded it - it may not exist locally already + if (exists(fileToUpload)) { + if (!dryRun) { + // Check the integrity of the uploaded file, if the local file still exists + performUploadIntegrityValidationChecks(uploadResponse, fileToUpload, thisFileSize); + + // Update the file modified time on OneDrive and save item details to database + // Update the item's metadata on OneDrive + SysTime mtime = timeLastModified(fileToUpload).toUTC(); + mtime.fracSecs = Duration.zero; + string newFileId = uploadResponse["id"].str; + string newFileETag = uploadResponse["eTag"].str; + // Attempt to update the online date time stamp based on our local data + uploadLastModifiedTime(parentItem, parentItem.driveId, newFileId, mtime, newFileETag); + } + } else { + // will be removed in different event! + addLogEntry("File disappeared locally after upload: " ~ fileToUpload); + } + } else { + // Log that an invalid JSON object was returned + addLogEntry("uploadFileOneDriveApiInstance.simpleUpload or session.upload call returned an invalid JSON Object from the OneDrive API", ["debug"]); + } + } + + // Return upload status + return uploadFailed; + } + + // Create the OneDrive Upload Session + JSONValue createSessionFileUpload(OneDriveApi activeOneDriveApiInstance, string fileToUpload, string parentDriveId, string parentId, string filename, string eTag, string threadUploadSessionFilePath) { + + // Upload file via a OneDrive API session + JSONValue uploadSession; + + // Calculate modification time + SysTime localFileLastModifiedTime = timeLastModified(fileToUpload).toUTC(); + localFileLastModifiedTime.fracSecs = Duration.zero; + + // Construct the fileSystemInfo JSON component needed to create the Upload Session + JSONValue fileSystemInfo = [ + "item": JSONValue([ + "@microsoft.graph.conflictBehavior": JSONValue("replace"), + "fileSystemInfo": JSONValue([ + "lastModifiedDateTime": localFileLastModifiedTime.toISOExtString() + ]) + ]) + ]; + + // Try to create the upload session for this file + uploadSession = activeOneDriveApiInstance.createUploadSession(parentDriveId, parentId, filename, eTag, fileSystemInfo); + + if (uploadSession.type() == JSONType.object) { + // a valid session object was created + if ("uploadUrl" in uploadSession) { + // Add the file path we are uploading to this JSON Session Data + uploadSession["localPath"] = fileToUpload; + // Save this session + saveSessionFile(threadUploadSessionFilePath, uploadSession); + } + } else { + // no valid session was created + addLogEntry("Creation of OneDrive API Upload Session failed.", ["verbose"]); + // return upload() will return a JSONValue response, create an empty JSONValue response to return + uploadSession = null; + } + // Return the JSON + return uploadSession; + } + + // Save the session upload data + void saveSessionFile(string threadUploadSessionFilePath, JSONValue uploadSessionData) { + + try { + std.file.write(threadUploadSessionFilePath, uploadSessionData.toString()); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } + + // Perform the upload of file via the Upload Session that was created + JSONValue performSessionFileUpload(OneDriveApi activeOneDriveApiInstance, ulong thisFileSize, JSONValue uploadSessionData, string threadUploadSessionFilePath) { + + // Response for upload + JSONValue uploadResponse; + + // Session JSON needs to contain valid elements + // Get the offset details + ulong fragmentSize = 10 * 2^^20; // 10 MiB + size_t fragmentCount = 0; + ulong fragSize = 0; + ulong offset = uploadSessionData["nextExpectedRanges"][0].str.splitter('-').front.to!ulong; + size_t expected_total_fragments = cast(size_t) ceil(double(thisFileSize) / double(fragmentSize)); + ulong start_unix_time = Clock.currTime.toUnixTime(); + int h, m, s; + string etaString; + string uploadLogEntry = "Uploading: " ~ uploadSessionData["localPath"].str ~ " ... "; + + // Start the session upload using the active API instance for this thread + while (true) { + fragmentCount++; + addLogEntry("Fragment: " ~ to!string(fragmentCount) ~ " of " ~ to!string(expected_total_fragments), ["debug"]); + + // What ETA string do we use? + auto eta = calc_eta((fragmentCount -1), expected_total_fragments, start_unix_time); + if (eta == 0) { + // Initial calculation ... + etaString = format!"| ETA --:--:--"; + } else { + // we have at least an ETA provided + dur!"seconds"(eta).split!("hours", "minutes", "seconds")(h, m, s); + etaString = format!"| ETA %02d:%02d:%02d"( h, m, s); + } + + // Calculate this progress output + auto ratio = cast(double)(fragmentCount -1) / expected_total_fragments; + // Convert the ratio to a percentage and format it to two decimal places + string percentage = leftJustify(format("%d%%", cast(int)(ratio * 100)), 5, ' '); + addLogEntry(uploadLogEntry ~ percentage ~ etaString, ["consoleOnly"]); + + // What fragment size will be used? + addLogEntry("fragmentSize: " ~ to!string(fragmentSize) ~ " offset: " ~ to!string(offset) ~ " thisFileSize: " ~ to!string(thisFileSize), ["debug"]); + fragSize = fragmentSize < thisFileSize - offset ? fragmentSize : thisFileSize - offset; + addLogEntry("Using fragSize: " ~ to!string(fragSize), ["debug"]); + + // fragSize must not be a negative value + if (fragSize < 0) { + // Session upload will fail + // not a JSON object - fragment upload failed + addLogEntry("File upload session failed - invalid calculation of fragment size", ["verbose"]); + if (exists(threadUploadSessionFilePath)) { + remove(threadUploadSessionFilePath); + } + // set uploadResponse to null as error + uploadResponse = null; + return uploadResponse; + } + + // If the resume upload fails, we need to check for a return code here + try { + uploadResponse = activeOneDriveApiInstance.uploadFragment( + uploadSessionData["uploadUrl"].str, + uploadSessionData["localPath"].str, + offset, + fragSize, + thisFileSize + ); + } catch (OneDriveException exception) { + // if a 100 uploadResponse is generated, continue + if (exception.httpStatusCode == 100) { + continue; + } + // There was an error uploadResponse from OneDrive when uploading the file fragment + + // Handle transient errors: + // 408 - Request Time Out + // 429 - Too Many Requests + // 503 - Service Unavailable + // 504 - Gateway Timeout + + // Insert a new line as well, so that the below error is inserted on the console in the right location + addLogEntry("Fragment upload failed - received an exception response from OneDrive API", ["verbose"]); + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + // retry fragment upload in case error is transient + addLogEntry("Retrying fragment upload", ["verbose"]); + + try { + uploadResponse = activeOneDriveApiInstance.uploadFragment( + uploadSessionData["uploadUrl"].str, + uploadSessionData["localPath"].str, + offset, + fragSize, + thisFileSize + ); + } catch (OneDriveException e) { + // OneDrive threw another error on retry + addLogEntry("Retry to upload fragment failed", ["verbose"]); + // display what the error is + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + // set uploadResponse to null as the fragment upload was in error twice + uploadResponse = null; + } catch (std.exception.ErrnoException e) { + // There was a file system error - display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + return uploadResponse; + } + } catch (ErrnoException e) { + // There was a file system error + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + uploadResponse = null; + return uploadResponse; + } + + // was the fragment uploaded without issue? + if (uploadResponse.type() == JSONType.object){ + offset += fragmentSize; + if (offset >= thisFileSize) { + break; + } + // update the uploadSessionData details + uploadSessionData["expirationDateTime"] = uploadResponse["expirationDateTime"]; + uploadSessionData["nextExpectedRanges"] = uploadResponse["nextExpectedRanges"]; + saveSessionFile(threadUploadSessionFilePath, uploadSessionData); + } else { + // not a JSON object - fragment upload failed + addLogEntry("File upload session failed - invalid response from OneDrive API", ["verbose"]); + + // cleanup session data + if (exists(threadUploadSessionFilePath)) { + remove(threadUploadSessionFilePath); + } + // set uploadResponse to null as error + uploadResponse = null; + return uploadResponse; + } + } + + // upload complete + ulong end_unix_time = Clock.currTime.toUnixTime(); + auto upload_duration = cast(int)(end_unix_time - start_unix_time); + dur!"seconds"(upload_duration).split!("hours", "minutes", "seconds")(h, m, s); + etaString = format!"| DONE in %02d:%02d:%02d"( h, m, s); + addLogEntry(uploadLogEntry ~ "100% " ~ etaString, ["consoleOnly"]); + + // Remove session file if it exists + if (exists(threadUploadSessionFilePath)) { + remove(threadUploadSessionFilePath); + } + + // Return the session upload response + return uploadResponse; + } + + // Delete an item on OneDrive + void uploadDeletedItem(Item itemToDelete, string path) { + + OneDriveApi uploadDeletedItemOneDriveApiInstance; + + // Are we in a situation where we HAVE to keep the data online - do not delete the remote object + if (noRemoteDelete) { + if ((itemToDelete.type == ItemType.dir)) { + // Do not process remote directory delete + addLogEntry("Skipping remote directory delete as --upload-only & --no-remote-delete configured", ["verbose"]); + } else { + // Do not process remote file delete + addLogEntry("Skipping remote file delete as --upload-only & --no-remote-delete configured", ["verbose"]); + } + } else { + + // Is this a --download-only operation? + if (!appConfig.getValueBool("download_only")) { + // Process the delete - delete the object online + addLogEntry("Deleting item from Microsoft OneDrive: " ~ path); + bool flagAsBigDelete = false; + + Item[] children; + ulong itemsToDelete; + + if ((itemToDelete.type == ItemType.dir)) { + // Query the database - how many objects will this remove? + children = getChildren(itemToDelete.driveId, itemToDelete.id); + // Count the returned items + the original item (1) + itemsToDelete = count(children) + 1; + addLogEntry("Number of items online to delete: " ~ to!string(itemsToDelete), ["debug"]); + } else { + itemsToDelete = 1; + } + // Clear array + children = []; + + // A local delete of a file|folder when using --monitor will issue a inotify event, which will trigger the local & remote data immediately be deleted + // The user may also be --sync process, so we are checking if something was deleted between application use + if (itemsToDelete >= appConfig.getValueLong("classify_as_big_delete")) { + // A big delete has been detected + flagAsBigDelete = true; + if (!appConfig.getValueBool("force")) { + addLogEntry("ERROR: An attempt to remove a large volume of data from OneDrive has been detected. Exiting client to preserve data on Microsoft OneDrive"); + addLogEntry("ERROR: The total number of items being deleted is: " ~ to!string(itemsToDelete)); + addLogEntry("ERROR: To delete a large volume of data use --force or increase the config value 'classify_as_big_delete' to a larger value"); + // Must exit here to preserve data on online , allow logging to be done + forceExit(); + } + } + + // Are we in a --dry-run scenario? + if (!dryRun) { + // We are not in a dry run scenario + addLogEntry("itemToDelete: " ~ to!string(itemToDelete), ["debug"]); + + // what item are we trying to delete? + addLogEntry("Attempting to delete this single item id: " ~ itemToDelete.id ~ " from drive: " ~ itemToDelete.driveId, ["debug"]); + + // Configure these item variables to handle OneDrive Business Shared Folder Deletion + Item actualItemToDelete; + Item remoteShortcutLinkItem; + + // OneDrive Business Shared Folder Deletion Handling + // Is this a Business Account with Sync Business Shared Items enabled? + if ((appConfig.accountType == "business") && (appConfig.getValueBool("sync_business_shared_items"))) { + // Syncing Business Shared Items is enabled + if (itemToDelete.driveId != appConfig.defaultDriveId) { + // The item to delete is on a remote drive ... technically we do not own this and should not be deleting this online + // We should however be deleting the 'link' in our account online, and, remove the DB link entry + if (itemToDelete.type == ItemType.dir) { + // Query the database for this potential link + itemDB.selectByPathIncludingRemoteItems(path, appConfig.defaultDriveId, remoteShortcutLinkItem); + } + } + } + + // Configure actualItemToDelete + if (remoteShortcutLinkItem.id != "") { + // A DB entry was returned + addLogEntry("remoteShortcutLinkItem: " ~ to!string(remoteShortcutLinkItem), ["debug"]); + // Set actualItemToDelete to this data + actualItemToDelete = remoteShortcutLinkItem; + // Delete the shortcut reference in the local database + itemDB.deleteById(remoteShortcutLinkItem.driveId, remoteShortcutLinkItem.id); + addLogEntry("Deleted OneDrive Business Shared Folder 'Shorcut Link'", ["debug"]); + } else { + // No data was returned, use the original data + actualItemToDelete = itemToDelete; + } + + // Try the online deletion + try { + // Create new OneDrive API Instance + uploadDeletedItemOneDriveApiInstance = new OneDriveApi(appConfig); + uploadDeletedItemOneDriveApiInstance.initialise(); + + // Perform the delete via the default OneDrive API instance + uploadDeletedItemOneDriveApiInstance.deleteById(actualItemToDelete.driveId, actualItemToDelete.id); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + uploadDeletedItemOneDriveApiInstance.releaseCurlEngine(); + uploadDeletedItemOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + } catch (OneDriveException e) { + if (e.httpStatusCode == 404) { + // item.id, item.eTag could not be found on the specified driveId + addLogEntry("OneDrive reported: The resource could not be found to be deleted.", ["verbose"]); + } + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + uploadDeletedItemOneDriveApiInstance.releaseCurlEngine(); + uploadDeletedItemOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + } + + // Delete the reference in the local database - use the original input + itemDB.deleteById(itemToDelete.driveId, itemToDelete.id); + if (itemToDelete.remoteId != null) { + // If the item is a remote item, delete the reference in the local database + itemDB.deleteById(itemToDelete.remoteDriveId, itemToDelete.remoteId); + } + } else { + // log that this is a dry-run activity + addLogEntry("dry run - no delete activity"); + } + } else { + // --download-only operation, we are not uploading any delete event to OneDrive + addLogEntry("Not pushing local delete to Microsoft OneDrive due to --download-only being used", ["debug"]); + } + } + } + + // Get the children of an item id from the database + Item[] getChildren(string driveId, string id) { + + Item[] children; + children ~= itemDB.selectChildren(driveId, id); + foreach (Item child; children) { + if (child.type != ItemType.file) { + // recursively get the children of this child + children ~= getChildren(child.driveId, child.id); + } + } + return children; + } + + // Perform a 'reverse' delete of all child objects on OneDrive + void performReverseDeletionOfOneDriveItems(Item[] children, Item itemToDelete) { + + // Log what is happening + addLogEntry("Attempting a reverse delete of all child objects from OneDrive", ["debug"]); + + // Create a new API Instance for this thread and initialise it + OneDriveApi performReverseDeletionOneDriveApiInstance; + performReverseDeletionOneDriveApiInstance = new OneDriveApi(appConfig); + performReverseDeletionOneDriveApiInstance.initialise(); + + foreach_reverse (Item child; children) { + // Log the action + addLogEntry("Attempting to delete this child item id: " ~ child.id ~ " from drive: " ~ child.driveId, ["debug"]); + + // perform the delete via the default OneDrive API instance + performReverseDeletionOneDriveApiInstance.deleteById(child.driveId, child.id, child.eTag); + // delete the child reference in the local database + itemDB.deleteById(child.driveId, child.id); + } + // Log the action + addLogEntry("Attempting to delete this parent item id: " ~ itemToDelete.id ~ " from drive: " ~ itemToDelete.driveId, ["debug"]); + + // Perform the delete via the default OneDrive API instance + performReverseDeletionOneDriveApiInstance.deleteById(itemToDelete.driveId, itemToDelete.id, itemToDelete.eTag); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + performReverseDeletionOneDriveApiInstance.releaseCurlEngine(); + performReverseDeletionOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + } + + // Create a fake OneDrive response suitable for use with saveItem + // Create a fake OneDrive response suitable for use with saveItem + JSONValue createFakeResponse(string path) { + import std.digest.sha; + + // Generate a simulated JSON response which can be used + // At a minimum we need: + // 1. eTag + // 2. cTag + // 3. fileSystemInfo + // 4. file or folder. if file, hash of file + // 5. id + // 6. name + // 7. parent reference + + string fakeDriveId = appConfig.defaultDriveId; + string fakeRootId = appConfig.defaultRootId; + SysTime mtime = exists(path) ? timeLastModified(path).toUTC() : Clock.currTime(UTC()); + auto sha1 = new SHA1Digest(); + ubyte[] fakedOneDriveItemValues = sha1.digest(path); + JSONValue fakeResponse; + + string parentPath = dirName(path); + if (parentPath != "." && exists(path)) { + foreach (searchDriveId; onlineDriveDetails.keys) { + Item databaseItem; + if (itemDB.selectByPath(parentPath, searchDriveId, databaseItem)) { + fakeDriveId = databaseItem.driveId; + fakeRootId = databaseItem.id; + break; // Exit loop after finding the first match + } + } + } + + fakeResponse = [ + "id": JSONValue(toHexString(fakedOneDriveItemValues)), + "cTag": JSONValue(toHexString(fakedOneDriveItemValues)), + "eTag": JSONValue(toHexString(fakedOneDriveItemValues)), + "fileSystemInfo": JSONValue([ + "createdDateTime": mtime.toISOExtString(), + "lastModifiedDateTime": mtime.toISOExtString() + ]), + "name": JSONValue(baseName(path)), + "parentReference": JSONValue([ + "driveId": JSONValue(fakeDriveId), + "driveType": JSONValue(appConfig.accountType), + "id": JSONValue(fakeRootId) + ]) + ]; + + if (exists(path)) { + if (isDir(path)) { + fakeResponse["folder"] = JSONValue(""); + } else { + string quickXorHash = computeQuickXorHash(path); + fakeResponse["file"] = JSONValue([ + "hashes": JSONValue(["quickXorHash": JSONValue(quickXorHash)]) + ]); + } + } else { + // Assume directory if path does not exist + fakeResponse["folder"] = JSONValue(""); + } + + addLogEntry("Generated Fake OneDrive Response: " ~ to!string(fakeResponse), ["debug"]); + return fakeResponse; + } + + // Save JSON item details into the item database + void saveItem(JSONValue jsonItem) { + // jsonItem has to be a valid object + if (jsonItem.type() == JSONType.object) { + // Check if the response JSON has an 'id', otherwise makeItem() fails with 'Key not found: id' + if (hasId(jsonItem)) { + // Are we in a --upload-only & --remove-source-files scenario? + // We do not want to add the item to the database in this situation as there is no local reference to the file post file deletion + // If the item is a directory, we need to add this to the DB, if this is a file, we dont add this, the parent path is not in DB, thus any new files in this directory are not added + if ((uploadOnly) && (localDeleteAfterUpload) && (isItemFile(jsonItem))) { + // Log that we skipping adding item to the local DB and the reason why + addLogEntry("Skipping adding to database as --upload-only & --remove-source-files configured", ["debug"]); + } else { + // What is the JSON item we are trying to create a DB record with? + addLogEntry("saveItem - creating DB item from this JSON: " ~ to!string(jsonItem), ["debug"]); + + // Takes a JSON input and formats to an item which can be used by the database + Item item = makeItem(jsonItem); + + // Is this JSON item a 'root' item? + if ((isItemRoot(jsonItem)) && (item.name == "root")) { + addLogEntry("Updating DB Item object with correct values as this is a 'root' object", ["debug"]); + item.parentId = null; // ensures that this database entry has no parent + // Check for parentReference + if (hasParentReference(jsonItem)) { + // Set the correct item.driveId + addLogEntry("ROOT JSON Item HAS parentReference .... setting item.driveId = jsonItem['parentReference']['driveId'].str", ["debug"]); + item.driveId = jsonItem["parentReference"]["driveId"].str; + } + + // We only should be adding our account 'root' to the database, not shared folder 'root' items + if (item.driveId != appConfig.defaultDriveId) { + // Shared Folder drive 'root' object .. we dont want this item + addLogEntry("NOT adding 'remote root' object to database: " ~ to!string(item), ["debug"]); + return; + } + } + + // Add to the local database + itemDB.upsert(item); + + // If we have a remote drive ID, add this to our list of known drive id's + if (!item.remoteDriveId.empty) { + // Keep the DriveDetailsCache array with unique entries only + DriveDetailsCache cachedOnlineDriveData; + if (!canFindDriveId(item.remoteDriveId, cachedOnlineDriveData)) { + // Add this driveId to the drive cache + addOrUpdateOneDriveOnlineDetails(item.remoteDriveId); + } + } + } + } else { + // log error + addLogEntry("ERROR: OneDrive response missing required 'id' element"); + addLogEntry("ERROR: " ~ to!string(jsonItem)); + } + } else { + // log error + addLogEntry("ERROR: An error was returned from OneDrive and the resulting response is not a valid JSON object that can be processed."); + addLogEntry("ERROR: Increase logging verbosity to assist determining why."); + } + } + + // Wrapper function for makeDatabaseItem so we can check to ensure that the item has the required hashes + Item makeItem(JSONValue onedriveJSONItem) { + + // Make the DB Item from the JSON data provided + Item newDatabaseItem = makeDatabaseItem(onedriveJSONItem); + + // Is this a 'file' item that has not been deleted? Deleted items have no hash + if ((newDatabaseItem.type == ItemType.file) && (!isItemDeleted(onedriveJSONItem))) { + // Does this item have a file size attribute? + if (hasFileSize(onedriveJSONItem)) { + // Is the file size greater than 0? + if (onedriveJSONItem["size"].integer > 0) { + // Does the DB item have any hashes as per the API provided JSON data? + if ((newDatabaseItem.quickXorHash.empty) && (newDatabaseItem.sha256Hash.empty)) { + // Odd .. there is no hash for this item .. why is that? + // Is there a 'file' JSON element? + if ("file" in onedriveJSONItem) { + // Microsoft OneDrive OneNote objects will report as files but have 'application/msonenote' and 'application/octet-stream' as mime types + if ((isMicrosoftOneNoteMimeType1(onedriveJSONItem)) || (isMicrosoftOneNoteMimeType2(onedriveJSONItem))) { + // Debug log output that this is a potential OneNote object + addLogEntry("This item is potentially an associated Microsoft OneNote Object Item", ["debug"]); + } else { + // Not a Microsoft OneNote Mime Type Object .. + string apiWarningMessage = "WARNING: OneDrive API inconsistency - this file does not have any hash: "; + // This is computationally expensive .. but we are only doing this if there are no hashes provided + bool parentInDatabase = itemDB.idInLocalDatabase(newDatabaseItem.driveId, newDatabaseItem.parentId); + // Is the parent id in the database? + if (parentInDatabase) { + // This is again computationally expensive .. calculate this item path to advise the user the actual path of this item that has no hash + string newItemPath = computeItemPath(newDatabaseItem.driveId, newDatabaseItem.parentId) ~ "/" ~ newDatabaseItem.name; + addLogEntry(apiWarningMessage ~ newItemPath); + } else { + // Parent is not in the database .. why? + // Check if the parent item had been skipped .. + if (newDatabaseItem.parentId in skippedItems) { + addLogEntry(apiWarningMessage ~ "newDatabaseItem.parentId listed within skippedItems", ["debug"]); + } else { + // Use the item ID .. there is no other reference available, parent is not being skipped, so we should have been able to calculate this - but we could not + addLogEntry(apiWarningMessage ~ newDatabaseItem.id); + } + } + } + } + } + } else { + // zero file size + addLogEntry("This item file is zero size - potentially no hash provided by the OneDrive API", ["debug"]); + } + } + } + + // Return the new database item + return newDatabaseItem; + } + + // Print the fileDownloadFailures and fileUploadFailures arrays if they are not empty + void displaySyncFailures_old() { + + // Were there any file download failures? + if (!fileDownloadFailures.empty) { + // There are download failures ... + addLogEntry(); + addLogEntry("Failed items to download from Microsoft OneDrive: " ~ to!string(fileDownloadFailures.length)); + foreach(failedFileToDownload; fileDownloadFailures) { + // List the detail of the item that failed to download + addLogEntry("Failed to download: " ~ failedFileToDownload, ["info", "notify"]); + + // Is this failed item in the DB? It should not be .. + Item downloadDBItem; + // Need to check all driveid's we know about, not just the defaultDriveId + foreach (searchDriveId; onlineDriveDetails.keys) { + if (itemDB.selectByPath(failedFileToDownload, searchDriveId, downloadDBItem)) { + // item was found in the DB + addLogEntry("ERROR: Failed Download Path found in database, must delete this item from the database .. it should not be in there if it failed to download"); + // Process the database entry removal. In a --dry-run scenario, this is being done against a DB copy + itemDB.deleteById(downloadDBItem.driveId, downloadDBItem.id); + if (downloadDBItem.remoteDriveId != null) { + // delete the linked remote folder + itemDB.deleteById(downloadDBItem.remoteDriveId, downloadDBItem.remoteId); + } + } + } + } + // Set the flag + syncFailures = true; + } + + // Were there any file upload failures? + if (!fileUploadFailures.empty) { + // There are download failures ... + addLogEntry(); + addLogEntry("Failed items to upload to Microsoft OneDrive: " ~ to!string(fileUploadFailures.length)); + foreach(failedFileToUpload; fileUploadFailures) { + // List the path of the item that failed to upload + addLogEntry("Failed to upload: " ~ failedFileToUpload, ["info", "notify"]); + + // Is this failed item in the DB? It should not be .. + Item uploadDBItem; + // Need to check all driveid's we know about, not just the defaultDriveId + foreach (searchDriveId; onlineDriveDetails.keys) { + if (itemDB.selectByPath(failedFileToUpload, searchDriveId, uploadDBItem)) { + // item was found in the DB + addLogEntry("ERROR: Failed Upload Path found in database, must delete this item from the database .. it should not be in there if it failed to upload"); + // Process the database entry removal. In a --dry-run scenario, this is being done against a DB copy + itemDB.deleteById(uploadDBItem.driveId, uploadDBItem.id); + if (uploadDBItem.remoteDriveId != null) { + // delete the linked remote folder + itemDB.deleteById(uploadDBItem.remoteDriveId, uploadDBItem.remoteId); + } + } + } + } + // Set the flag + syncFailures = true; + } + } + + // Print the fileDownloadFailures and fileUploadFailures arrays if they are not empty + void displaySyncFailures() { + bool logFailures(string[] failures, string operation) { + if (failures.empty) return false; + + addLogEntry(); + addLogEntry("Failed items to " ~ operation ~ " to/from Microsoft OneDrive: " ~ to!string(failures.length)); + + foreach (failedFile; failures) { + addLogEntry("Failed to " ~ operation ~ ": " ~ failedFile, ["info", "notify"]); + + foreach (searchDriveId; onlineDriveDetails.keys) { + Item dbItem; + if (itemDB.selectByPath(failedFile, searchDriveId, dbItem)) { + addLogEntry("ERROR: Failed " ~ operation ~ " path found in database, must delete this item from the database .. it should not be in there if the file failed to " ~ operation); + itemDB.deleteById(dbItem.driveId, dbItem.id); + if (dbItem.remoteDriveId != null) { + itemDB.deleteById(dbItem.remoteDriveId, dbItem.remoteId); + } + } + } + } + return true; + } + + bool downloadFailuresLogged = logFailures(fileDownloadFailures, "download"); + bool uploadFailuresLogged = logFailures(fileUploadFailures, "upload"); + syncFailures = downloadFailuresLogged || uploadFailuresLogged; + } + + // Generate a /delta compatible response - for use when we cant actually use /delta + // This is required when the application is configured to use National Azure AD deployments as these do not support /delta queries + // The same technique can also be used when we are using --single-directory. The parent objects up to the single directory target can be added, + // then once the target of the --single-directory request is hit, all of the children of that path can be queried, giving a much more focused + // JSON response which can then be processed, negating the need to continuously traverse the tree and 'exclude' items + JSONValue generateDeltaResponse(string pathToQuery = null) { + + // JSON value which will be responded with + JSONValue selfGeneratedDeltaResponse; + + // Function variables + Item searchItem; + JSONValue rootData; + JSONValue driveData; + JSONValue pathData; + JSONValue topLevelChildren; + JSONValue[] childrenData; + string nextLink; + OneDriveApi generateDeltaResponseOneDriveApiInstance; + + // Was a path to query passed in? + if (pathToQuery.empty) { + // Will query for the 'root' + pathToQuery = "."; + } + + // Create new OneDrive API Instance + generateDeltaResponseOneDriveApiInstance = new OneDriveApi(appConfig); + generateDeltaResponseOneDriveApiInstance.initialise(); + + // Is this a --single-directory invocation? + if (!singleDirectoryScope) { + // In a --resync scenario, there is no DB data to query, so we have to query the OneDrive API here to get relevant details + try { + // Query the OneDrive API + pathData = generateDeltaResponseOneDriveApiInstance.getPathDetails(pathToQuery); + // Is the path on OneDrive local or remote to our account drive id? + if (isItemRemote(pathData)) { + // The path we are seeking is remote to our account drive id + searchItem.driveId = pathData["remoteItem"]["parentReference"]["driveId"].str; + searchItem.id = pathData["remoteItem"]["id"].str; + } else { + // The path we are seeking is local to our account drive id + searchItem.driveId = pathData["parentReference"]["driveId"].str; + searchItem.id = pathData["id"].str; + } + } catch (OneDriveException e) { + // Display error message + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + generateDeltaResponseOneDriveApiInstance.releaseCurlEngine(); + generateDeltaResponseOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + // Must force exit here, allow logging to be done + forceExit(); + } + } else { + // When setSingleDirectoryScope() was called, the following were set to the correct items, even if the path was remote: + // - singleDirectoryScopeDriveId + // - singleDirectoryScopeItemId + // Reuse these prior set values + searchItem.driveId = singleDirectoryScopeDriveId; + searchItem.id = singleDirectoryScopeItemId; + } + + // Before we get any data from the OneDrive API, flag any child object in the database as out-of-sync for this driveId & and object id + // Downgrade ONLY files associated with this driveId and idToQuery + addLogEntry("Downgrading all children for this searchItem.driveId (" ~ searchItem.driveId ~ ") and searchItem.id (" ~ searchItem.id ~ ") to an out-of-sync state", ["debug"]); + + Item[] drivePathChildren = getChildren(searchItem.driveId, searchItem.id); + if (count(drivePathChildren) > 0) { + // Children to process and flag as out-of-sync + foreach (drivePathChild; drivePathChildren) { + // Flag any object in the database as out-of-sync for this driveId & and object id + addLogEntry("Downgrading item as out-of-sync: " ~ drivePathChild.id, ["debug"]); + itemDB.downgradeSyncStatusFlag(drivePathChild.driveId, drivePathChild.id); + } + } + // Clear DB response array + drivePathChildren = []; + + // Get drive details for the provided driveId + try { + driveData = generateDeltaResponseOneDriveApiInstance.getPathDetailsById(searchItem.driveId, searchItem.id); + } catch (OneDriveException exception) { + addLogEntry("driveData = generateDeltaResponseOneDriveApiInstance.getPathDetailsById(searchItem.driveId, searchItem.id) generated a OneDriveException", ["debug"]); + + string thisFunctionName = getFunctionName!({}); + // Default operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within oneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + + // Was a valid JSON response for 'driveData' provided? + if (driveData.type() == JSONType.object) { + + // Dynamic output for a non-verbose run so that the user knows something is happening + if (appConfig.verbosityCount == 0) { + if (!appConfig.suppressLoggingOutput) { + addProcessingLogHeaderEntry("Fetching items from the OneDrive API for Drive ID: " ~ searchItem.driveId, appConfig.verbosityCount); + } + } else { + addLogEntry("Generating a /delta response from the OneDrive API for Drive ID: " ~ searchItem.driveId, ["verbose"]); + } + + // Process this initial JSON response + if (!isItemRoot(driveData)) { + // Get root details for the provided driveId + try { + rootData = generateDeltaResponseOneDriveApiInstance.getDriveIdRoot(searchItem.driveId); + } catch (OneDriveException exception) { + addLogEntry("rootData = onedrive.getDriveIdRoot(searchItem.driveId) generated a OneDriveException", ["debug"]); + + string thisFunctionName = getFunctionName!({}); + // Default operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within oneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + + } + // Add driveData JSON data to array + addLogEntry("Adding OneDrive root details for processing", ["verbose"]); + childrenData ~= rootData; + } + + // Add driveData JSON data to array + addLogEntry("Adding OneDrive folder details for processing", ["verbose"]); + childrenData ~= driveData; + } else { + // driveData is an invalid JSON object + writeln("CODING TO DO: The query of OneDrive API to getPathDetailsById generated an invalid JSON response - thus we cant build our own /delta simulated response ... how to handle?"); + // Must exit here + generateDeltaResponseOneDriveApiInstance.releaseCurlEngine(); + // Free object and memory + generateDeltaResponseOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + // Must force exit here, allow logging to be done + forceExit(); + } + + // For each child object, query the OneDrive API + for (;;) { + // query top level children + try { + topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink); + } catch (OneDriveException exception) { + // OneDrive threw an error + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("Query Error: topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink)", ["debug"]); + addLogEntry("driveId: " ~ searchItem.driveId, ["debug"]); + addLogEntry("idToQuery: " ~ searchItem.id, ["debug"]); + addLogEntry("nextLink: " ~ nextLink, ["debug"]); + + string thisFunctionName = getFunctionName!({}); + // Default operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within oneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + + } + + // process top level children + addLogEntry("Adding " ~ to!string(count(topLevelChildren["value"].array)) ~ " OneDrive items for processing from the OneDrive 'root' folder", ["verbose"]); + + foreach (child; topLevelChildren["value"].array) { + // Check for any Client Side Filtering here ... we should skip querying the OneDrive API for 'folders' that we are going to just process and skip anyway. + // This avoids needless calls to the OneDrive API, and potentially speeds up this process. + if (!checkJSONAgainstClientSideFiltering(child)) { + // add this child to the array of objects + childrenData ~= child; + // is this child a folder? + if (isItemFolder(child)) { + // We have to query this folders children if childCount > 0 + if (child["folder"]["childCount"].integer > 0){ + // This child folder has children + string childIdToQuery = child["id"].str; + string childDriveToQuery = child["parentReference"]["driveId"].str; + auto childParentPath = child["parentReference"]["path"].str.split(":"); + string folderPathToScan = childParentPath[1] ~ "/" ~ child["name"].str; + + string pathForLogging; + // Are we in a --single-directory situation? If we are, the path we are using for logging needs to use the input path as a base + if (singleDirectoryScope) { + pathForLogging = appConfig.getValueString("single_directory") ~ "/" ~ child["name"].str; + } else { + pathForLogging = child["name"].str; + } + + // Query the children of this item + JSONValue[] grandChildrenData = queryForChildren(childDriveToQuery, childIdToQuery, folderPathToScan, pathForLogging); + foreach (grandChild; grandChildrenData.array) { + // add the grandchild to the array + childrenData ~= grandChild; + } + } + } + } + } + // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response + // to indicate more items are available and provide the request URL for the next page of items. + if ("@odata.nextLink" in topLevelChildren) { + // Update nextLink to next changeSet bundle + addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); + nextLink = topLevelChildren["@odata.nextLink"].str; + } else break; + } + + if (appConfig.verbosityCount == 0) { + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.suppressLoggingOutput) { + addLogEntry("\n", ["consoleOnlyNoNewLine"]); + } + } + + // Craft response from all returned JSON elements + selfGeneratedDeltaResponse = [ + "@odata.context": JSONValue("https://graph.microsoft.com/v1.0/$metadata#Collection(driveItem)"), + "value": JSONValue(childrenData.array) + ]; + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + generateDeltaResponseOneDriveApiInstance.releaseCurlEngine(); + generateDeltaResponseOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + // Return the generated JSON response + return selfGeneratedDeltaResponse; + } + + // Query the OneDrive API for the specified child id for any children objects + JSONValue[] queryForChildren(string driveId, string idToQuery, string childParentPath, string pathForLogging) { + + // function variables + JSONValue thisLevelChildren; + JSONValue[] thisLevelChildrenData; + string nextLink; - if (existingFileHash != uploadNewFileHash) { - // file was modified by Microsoft post upload to SharePoint site - log.vdebug("Existing Local File Hash: ", existingFileHash); - log.vdebug("New Remote File Hash: ", uploadNewFileHash); + // Create new OneDrive API Instance + OneDriveApi queryChildrenOneDriveApiInstance; + queryChildrenOneDriveApiInstance = new OneDriveApi(appConfig); + queryChildrenOneDriveApiInstance.initialise(); + + for (;;) { + // query this level children + try { + thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance); + } catch (OneDriveException exception) { + // MAY NEED FUTURE WORK HERE .. YET TO TRIGGER THIS + writeln("CODING TO DO: EXCEPTION HANDLING NEEDED: thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance)"); + } + + if (appConfig.verbosityCount == 0) { + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.suppressLoggingOutput) { + addProcessingDotEntry(); + } + } + + // Was a valid JSON response for 'thisLevelChildren' provided? + if (thisLevelChildren.type() == JSONType.object) { + // process this level children + if (!childParentPath.empty) { + // We dont use childParentPath to log, as this poses an information leak risk. + // The full parent path of the child, as per the JSON might be: + // /Level 1/Level 2/Level 3/Child Shared Folder/some folder/another folder + // But 'Child Shared Folder' is what is shared, thus '/Level 1/Level 2/Level 3/' is a potential information leak if logged. + // Plus, the application output now shows accurately what is being shared - so that is a good thing. + addLogEntry("Adding " ~ to!string(count(thisLevelChildren["value"].array)) ~ " OneDrive items for processing from " ~ pathForLogging, ["verbose"]); + } + foreach (child; thisLevelChildren["value"].array) { + // Check for any Client Side Filtering here ... we should skip querying the OneDrive API for 'folders' that we are going to just process and skip anyway. + // This avoids needless calls to the OneDrive API, and potentially speeds up this process. + if (!checkJSONAgainstClientSideFiltering(child)) { + // add this child to the array of objects + thisLevelChildrenData ~= child; + // is this child a folder? + if (isItemFolder(child)){ + // We have to query this folders children if childCount > 0 + if (child["folder"]["childCount"].integer > 0){ + // This child folder has children + string childIdToQuery = child["id"].str; + string childDriveToQuery = child["parentReference"]["driveId"].str; + auto grandchildParentPath = child["parentReference"]["path"].str.split(":"); + string folderPathToScan = grandchildParentPath[1] ~ "/" ~ child["name"].str; + string newLoggingPath = pathForLogging ~ "/" ~ child["name"].str; + JSONValue[] grandChildrenData = queryForChildren(childDriveToQuery, childIdToQuery, folderPathToScan, newLoggingPath); + foreach (grandChild; grandChildrenData.array) { + // add the grandchild to the array + thisLevelChildrenData ~= grandChild; + } + } + } + } + } + // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response + // to indicate more items are available and provide the request URL for the next page of items. + if ("@odata.nextLink" in thisLevelChildren) { + // Update nextLink to next changeSet bundle + nextLink = thisLevelChildren["@odata.nextLink"].str; + addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); + } else break; - if(!uploadOnly){ - // Download the Microsoft 'modified' file so 'local' is now in sync - log.vlog("Due to Microsoft Sharepoint 'enrichment' of files, downloading 'enriched' file to ensure local file is in-sync"); - log.vlog("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details"); - auto fileSize = response["size"].integer; - onedrive.downloadById(response["parentReference"]["driveId"].str, response["id"].str, path, fileSize); } else { - // we are not downloading a file, warn that file differences will exist - log.vlog("WARNING: Due to Microsoft Sharepoint 'enrichment' of files, this file is now technically different to your local copy"); - log.vlog("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details"); + // Invalid JSON response when querying this level children + addLogEntry("INVALID JSON response when attempting a retry of parent function - queryForChildren(driveId, idToQuery, childParentPath, pathForLogging)", ["debug"]); + + // retry thisLevelChildren = queryThisLevelChildren + addLogEntry("Thread sleeping for an additional 30 seconds", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + addLogEntry("Retry this call thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance)", ["debug"]); + thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance); } } - // return a JSON response so that it can be used and saved - return response; + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + queryChildrenOneDriveApiInstance.releaseCurlEngine(); + queryChildrenOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + // return response + return thisLevelChildrenData; } - - // delete an item on OneDrive - private void uploadDeleteItem(Item item, const(string) path) - { - log.log("Deleting item from OneDrive: ", path); - bool flagAsBigDelete = false; + + // Query the OneDrive API for the child objects for this element + JSONValue queryThisLevelChildren(string driveId, string idToQuery, string nextLink, OneDriveApi queryChildrenOneDriveApiInstance) { - // query the database - how many objects will this remove? - auto children = getChildren(item.driveId, item.id); - long itemsToDelete = count(children); - log.vdebug("Number of items to delete: ", itemsToDelete); + // function variables + JSONValue thisLevelChildren; - // Are we running in monitor mode? A local delete of a file will issue a inotify event, which will trigger the local & remote data immediately - if (!cfg.getValueBool("monitor")) { - // not running in monitor mode - if (itemsToDelete > cfg.getValueLong("classify_as_big_delete")) { - // A big delete detected - flagAsBigDelete = true; - if (!cfg.getValueBool("force")) { - log.error("ERROR: An attempt to remove a large volume of data from OneDrive has been detected. Exiting client to preserve data on OneDrive"); - log.error("ERROR: To delete a large volume of data use --force or increase the config value 'classify_as_big_delete' to a larger value"); - // Must exit here to preserve data on OneDrive - onedrive.shutdown(); - exit(-1); - } - } + // query children + try { + // attempt API call + addLogEntry("Attempting Query: thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)", ["debug"]); + thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink); + addLogEntry("Query 'thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)' performed successfully", ["debug"]); + } catch (OneDriveException exception) { + // OneDrive threw an error + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("Query Error: thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)", ["debug"]); + addLogEntry("driveId: " ~ driveId, ["debug"]); + addLogEntry("idToQuery: " ~ idToQuery, ["debug"]); + addLogEntry("nextLink: " ~ nextLink, ["debug"]); + + string thisFunctionName = getFunctionName!({}); + // Default operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within oneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + + // return response + return thisLevelChildren; + } + + // Traverses the provided path online, via the OneDrive API, following correct parent driveId and itemId elements across the account + // to find if this full path exists. If this path exists online, the last item in the object path will be returned as a full JSON item. + // + // If the createPathIfMissing = false + no path exists online, a null invalid JSON item will be returned. + // If the createPathIfMissing = true + no path exists online, the requested path will be created in the correct location online. The resulting + // response to the directory creation will then be returned. + // + // This function also ensures that each path in the requested path actually matches the requested element to ensure that the OneDrive API response + // is not falsely matching a 'case insensitive' match to the actual request which is a POSIX compliance issue. + JSONValue queryOneDriveForSpecificPathAndCreateIfMissing(string thisNewPathToSearch, bool createPathIfMissing) { - if (!dryRun) { - // we are not in a --dry-run situation, process deletion to OneDrive - if ((item.driveId == "") && (item.id == "") && (item.eTag == "")){ - // These are empty ... we cannot delete if this is empty .... - log.vdebug("item.driveId, item.id & item.eTag are empty ... need to query OneDrive for values"); - log.vdebug("Checking OneDrive for path: ", path); - JSONValue onedrivePathDetails = onedrive.getPathDetails(path); // Returns a JSON String for the OneDrive Path - log.vdebug("OneDrive path details: ", onedrivePathDetails); - item.driveId = onedrivePathDetails["parentReference"]["driveId"].str; // Should give something like 12345abcde1234a1 - item.id = onedrivePathDetails["id"].str; // This item's ID. Should give something like 12345ABCDE1234A1!101 - item.eTag = onedrivePathDetails["eTag"].str; // Should be something like aNjM2NjJFRUVGQjY2NjJFMSE5MzUuMA - } - - // do the delete - try { - // what item are we trying to delete? - log.vdebug("Attempting to delete item from drive: ", item.driveId); - log.vdebug("Attempting to delete this item id: ", item.id); - // perform the delete via the API - onedrive.deleteById(item.driveId, item.id, item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 404) { - // item.id, item.eTag could not be found on driveId - log.vlog("OneDrive reported: The resource could not be found."); + // function variables + JSONValue getPathDetailsAPIResponse; + string currentPathTree; + Item parentDetails; + JSONValue topLevelChildren; + string nextLink; + bool directoryFoundOnline = false; + bool posixIssue = false; + + // Create a new API Instance for this thread and initialise it + OneDriveApi queryOneDriveForSpecificPath; + queryOneDriveForSpecificPath = new OneDriveApi(appConfig); + queryOneDriveForSpecificPath.initialise(); + + foreach (thisFolderName; pathSplitter(thisNewPathToSearch)) { + addLogEntry("Testing for the existence online of this folder path: " ~ thisFolderName, ["debug"]); + directoryFoundOnline = false; + + // If this is '.' this is the account root + if (thisFolderName == ".") { + currentPathTree = thisFolderName; + } else { + currentPathTree = currentPathTree ~ "/" ~ thisFolderName; + } + + addLogEntry("Attempting to query OneDrive for this path: " ~ currentPathTree, ["debug"]); + + // What query do we use? + if (thisFolderName == ".") { + // Query the root, set the right details + try { + getPathDetailsAPIResponse = queryOneDriveForSpecificPath.getPathDetails(currentPathTree); + parentDetails = makeItem(getPathDetailsAPIResponse); + // Save item to the database + saveItem(getPathDetailsAPIResponse); + directoryFoundOnline = true; + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // Default operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within oneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + + } + } else { + // Ensure we have a valid driveId to search here + if (parentDetails.driveId.empty) { + parentDetails.driveId = appConfig.defaultDriveId; + } + + // If the prior JSON 'getPathDetailsAPIResponse' is on this account driveId .. then continue to use getPathDetails + if (parentDetails.driveId == appConfig.defaultDriveId) { + + try { + // Query OneDrive API for this path + getPathDetailsAPIResponse = queryOneDriveForSpecificPath.getPathDetails(currentPathTree); + + // Portable Operating System Interface (POSIX) testing of JSON response from OneDrive API + if (hasName(getPathDetailsAPIResponse)) { + performPosixTest(thisFolderName, getPathDetailsAPIResponse["name"].str); + } else { + throw new JsonResponseException("Unable to perform POSIX test as the OneDrive API request generated an invalid JSON response"); + } + + // No POSIX issue with requested path element + parentDetails = makeItem(getPathDetailsAPIResponse); + // Save item to the database + saveItem(getPathDetailsAPIResponse); + directoryFoundOnline = true; + + // Is this JSON a remote object + addLogEntry("Testing if this is a remote Shared Folder", ["debug"]); + if (isItemRemote(getPathDetailsAPIResponse)) { + // Remote Directory .. need a DB Tie Record + createDatabaseTieRecordForOnlineSharedFolder(parentDetails); + + // Temp DB Item to bind the 'remote' path to our parent path + Item tempDBItem; + // Set the name + tempDBItem.name = parentDetails.name; + // Set the correct item type + tempDBItem.type = ItemType.dir; + // Set the right elements using the 'remote' of the parent as the 'actual' for this DB Tie + tempDBItem.driveId = parentDetails.remoteDriveId; + tempDBItem.id = parentDetails.remoteId; + // Set the correct mtime + tempDBItem.mtime = parentDetails.mtime; + + // Update parentDetails to use this temp record + parentDetails = tempDBItem; + } + } catch (OneDriveException exception) { + if (exception.httpStatusCode == 404) { + directoryFoundOnline = false; + } else { + + string thisFunctionName = getFunctionName!({}); + // Default operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within oneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + + } + } catch (JsonResponseException e) { + addLogEntry(e.msg, ["debug"]); + } } else { - // Not a 404 response .. is this a 401 response due to some sort of OneDrive Business security policy? - if ((e.httpStatusCode == 401) && (accountType != "personal")) { - log.vdebug("onedrive.deleteById generated a 401 error response when attempting to delete object by item id"); - auto errorArray = splitLines(e.msg); - JSONValue errorMessage = parseJSON(replace(e.msg, errorArray[0], "")); - if (errorMessage["error"]["message"].str == "Access denied. You do not have permission to perform this action or access this resource.") { - // Issue #1041 - Unable to delete OneDrive content when permissions prevent deletion - try { - log.vdebug("Attempting a reverse delete of all child objects from OneDrive"); - foreach_reverse (Item child; children) { - log.vdebug("Delete child item from drive: ", child.driveId); - log.vdebug("Delete this child item id: ", child.id); - onedrive.deleteById(child.driveId, child.id, child.eTag); - // delete the child reference in the local database - itemdb.deleteById(child.driveId, child.id); + // parentDetails.driveId is not the account drive id - thus will be a remote shared item + addLogEntry("This parent directory is a remote object this next path will be on a remote drive", ["debug"]); + + // For this parentDetails.driveId, parentDetails.id object, query the OneDrive API for it's children + for (;;) { + // Query this remote object for its children + topLevelChildren = queryOneDriveForSpecificPath.listChildren(parentDetails.driveId, parentDetails.id, nextLink); + // Process each child + foreach (child; topLevelChildren["value"].array) { + // Is this child a folder? + if (isItemFolder(child)) { + // Is this the child folder we are looking for, and is a POSIX match? + if (child["name"].str == thisFolderName) { + // EXACT MATCH including case sensitivity: Flag that we found the folder online + directoryFoundOnline = true; + // Use these details for the next entry path + getPathDetailsAPIResponse = child; + parentDetails = makeItem(getPathDetailsAPIResponse); + // Save item to the database + saveItem(getPathDetailsAPIResponse); + // No need to continue searching + break; + } else { + string childAsLower = toLower(child["name"].str); + string thisFolderNameAsLower = toLower(thisFolderName); + if (childAsLower == thisFolderNameAsLower) { + // This is a POSIX 'case in-sensitive match' ..... + // Local item name has a 'case-insensitive match' to an existing item on OneDrive + posixIssue = true; + throw new PosixException(thisFolderName, child["name"].str); + } } - log.vdebug("Delete parent item from drive: ", item.driveId); - log.vdebug("Delete this parent item id: ", item.id); - onedrive.deleteById(item.driveId, item.id, item.eTag); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("A further error was generated when attempting a reverse delete of objects from OneDrive"); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; } } + + if (directoryFoundOnline) { + // We found the folder, no need to continue searching nextLink data + break; + } + + // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response + // to indicate more items are available and provide the request URL for the next page of items. + if ("@odata.nextLink" in topLevelChildren) { + // Update nextLink to next changeSet bundle + addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); + nextLink = topLevelChildren["@odata.nextLink"].str; + } else break; } - - // Not a 404 response .. is this a 403 response due to OneDrive Business Retention Policy being enabled? - if ((e.httpStatusCode == 403) && (accountType != "personal")) { - log.vdebug("onedrive.deleteById generated a 403 error response when attempting to delete object by item id"); - auto errorArray = splitLines(e.msg); - JSONValue errorMessage = parseJSON(replace(e.msg, errorArray[0], "")); - if (errorMessage["error"]["message"].str == "Request was cancelled by event received. If attempting to delete a non-empty folder, it's possible that it's on hold") { - // Issue #338 - Unable to delete OneDrive content when OneDrive Business Retention Policy is enabled + } + } + + // If we did not find the folder, we need to create this folder + if (!directoryFoundOnline) { + // Folder not found online + // Set any response to be an invalid JSON item + getPathDetailsAPIResponse = null; + // Was there a POSIX issue? + if (!posixIssue) { + // No POSIX issue + if (createPathIfMissing) { + // Create this path as it is missing on OneDrive online and there is no POSIX issue with a 'case-insensitive match' + addLogEntry("FOLDER NOT FOUND ONLINE AND WE ARE REQUESTED TO CREATE IT", ["debug"]); + addLogEntry("Create folder on this drive: " ~ parentDetails.driveId, ["debug"]); + addLogEntry("Create folder as a child on this object: " ~ parentDetails.id, ["debug"]); + addLogEntry("Create this folder name: " ~ thisFolderName, ["debug"]); + + // Generate the JSON needed to create the folder online + JSONValue newDriveItem = [ + "name": JSONValue(thisFolderName), + "folder": parseJSON("{}") + ]; + + JSONValue createByIdAPIResponse; + // Submit the creation request + // Fix for https://github.com/skilion/onedrive/issues/356 + if (!dryRun) { try { - log.vdebug("Attempting a reverse delete of all child objects from OneDrive"); - foreach_reverse (Item child; children) { - log.vdebug("Delete child item from drive: ", child.driveId); - log.vdebug("Delete this child item id: ", child.id); - onedrive.deleteById(child.driveId, child.id, child.eTag); - // delete the child reference in the local database - itemdb.deleteById(child.driveId, child.id); - } - log.vdebug("Delete parent item from drive: ", item.driveId); - log.vdebug("Delete this parent item id: ", item.id); - onedrive.deleteById(item.driveId, item.id, item.eTag); + // Attempt to create a new folder on the configured parent driveId & parent id + createByIdAPIResponse = queryOneDriveForSpecificPath.createById(parentDetails.driveId, parentDetails.id, newDriveItem); + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(createByIdAPIResponse); + // Set getPathDetailsAPIResponse to createByIdAPIResponse + getPathDetailsAPIResponse = createByIdAPIResponse; } catch (OneDriveException e) { - // display what the error is - log.vdebug("A further error was generated when attempting a reverse delete of objects from OneDrive"); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + // 409 - API Race Condition + if (e.httpStatusCode == 409) { + // When we attempted to create it, OneDrive responded that it now already exists + addLogEntry("OneDrive reported that " ~ thisFolderName ~ " already exists .. OneDrive API race condition", ["verbose"]); + } else { + // some other error from OneDrive was returned - display what it is + addLogEntry("OneDrive generated an error when creating this path: " ~ thisFolderName); + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + } } + } else { + // Simulate a successful 'directory create' & save it to the dryRun database copy + // The simulated response has to pass 'makeItem' as part of saveItem + auto fakeResponse = createFakeResponse(thisNewPathToSearch); + // Save item to the database + saveItem(fakeResponse); } - } else { - // Not a 403 response & OneDrive Business Account / O365 Shared Folder / Library - log.vdebug("onedrive.deleteById generated an error response when attempting to delete object by item id"); - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; } } } - - // delete the reference in the local database - itemdb.deleteById(item.driveId, item.id); - if (item.remoteId != null) { - // If the item is a remote item, delete the reference in the local database - itemdb.deleteById(item.remoteDriveId, item.remoteId); - } } + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + queryOneDriveForSpecificPath.releaseCurlEngine(); + queryOneDriveForSpecificPath = null; + // Perform Garbage Collection + GC.collect(); + + // Output our search results + addLogEntry("queryOneDriveForSpecificPathAndCreateIfMissing.getPathDetailsAPIResponse = " ~ to!string(getPathDetailsAPIResponse), ["debug"]); + return getPathDetailsAPIResponse; } - - // get the children of an item id from the database - private Item[] getChildren(string driveId, string id) - { - Item[] children; - children ~= itemdb.selectChildren(driveId, id); - foreach (Item child; children) { - if (child.type != ItemType.file) { - // recursively get the children of this child - children ~= getChildren(child.driveId, child.id); + + // Delete an item by it's path + // This function is only used in --monitor mode and --remove-directory directive + void deleteByPath(string path) { + + // function variables + Item dbItem; + + // Need to check all driveid's we know about, not just the defaultDriveId + bool itemInDB = false; + foreach (searchDriveId; onlineDriveDetails.keys) { + if (itemDB.selectByPath(path, searchDriveId, dbItem)) { + // item was found in the DB + itemInDB = true; + break; } } - return children; - } - - // update the item's last modified time - private void uploadLastModifiedTime(const(char)[] driveId, const(char)[] id, const(char)[] eTag, SysTime mtime) - { - string itemModifiedTime; - itemModifiedTime = mtime.toISOExtString(); - JSONValue data = [ - "fileSystemInfo": JSONValue([ - "lastModifiedDateTime": itemModifiedTime - ]) - ]; - JSONValue response; + // Was the item found in the database? + if (!itemInDB) { + // path to delete is not in the local database .. + // was this a --remove-directory attempt? + if (!appConfig.getValueBool("monitor")) { + // --remove-directory deletion attempt + addLogEntry("The item to delete is not in the local database - unable to delete online"); + return; + } else { + // normal use .. --monitor being used + throw new SyncException("The item to delete is not in the local database"); + } + } + + // This needs to be enforced as we have to know the parent id of the object being deleted + if (dbItem.parentId == null) { + // the item is a remote folder, need to do the operation on the parent + enforce(itemDB.selectByPathIncludingRemoteItems(path, appConfig.defaultDriveId, dbItem)); + } + try { - response = onedrive.updateById(driveId, id, data, eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 412) { - // OneDrive threw a 412 error, most likely: ETag does not match current item's value - // Retry without eTag - log.vdebug("File Metadata Update Failed - OneDrive eTag / cTag match issue"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting file time stamp update - gracefully handling error"); - string nullTag = null; - response = onedrive.updateById(driveId, id, data, nullTag); + if (noRemoteDelete) { + // do not process remote delete + addLogEntry("Skipping remote delete as --upload-only & --no-remote-delete configured", ["verbose"]); + } else { + uploadDeletedItem(dbItem, path); } - } - // save the updated response from OneDrive in the database - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } - - // save item details into database - private void saveItem(JSONValue jsonItem) - { - // jsonItem has to be a valid object - if (jsonItem.type() == JSONType.object){ - // Check if the response JSON has an 'id', otherwise makeItem() fails with 'Key not found: id' - if (hasId(jsonItem)) { - // Are we in a --upload-only & --remove-source-files scenario? - // We do not want to add the item to the database in this situation as there is no local reference to the file post file deletion - // If the item is a directory, we need to add this to the DB, if this is a file, we dont add this, the parent path is not in DB, thus any new files in this directory are not added - if ((uploadOnly) && (localDeleteAfterUpload) && (isItemFile(jsonItem))) { - // Log that we skipping adding item to the local DB and the reason why - log.vdebug("Skipping adding to database as --upload-only & --remove-source-files configured"); - } else { - // What is the JSON item we are trying to create a DB record with? - log.vdebug("Creating DB item from this JSON: ", jsonItem); - // Takes a JSON input and formats to an item which can be used by the database - Item item = makeItem(jsonItem); - // Add to the local database - log.vdebug("Adding to database: ", item); - itemdb.upsert(item); - - // If we have a remote drive ID, add this to our list of known drive id's - if (!item.remoteDriveId.empty) { - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, item.remoteDriveId)) { - // Add this drive id to the array to search with - driveIDsArray ~= item.remoteDriveId; - } - } - } + } catch (OneDriveException e) { + if (e.httpStatusCode == 404) { + addLogEntry(e.msg); } else { - // log error - log.error("ERROR: OneDrive response missing required 'id' element"); - log.error("ERROR: ", jsonItem); + // display what the error is + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); } - } else { - // log error - log.error("ERROR: An error was returned from OneDrive and the resulting response is not a valid JSON object"); - log.error("ERROR: Increase logging verbosity to assist determining why."); } } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_move // This function is only called in monitor mode when an move event is coming from // inotify and we try to move the item. - void uploadMoveItem(string from, string to) - { - log.log("Moving ", from, " to ", to); - - // 'to' file validation .. is the 'to' file valid for upload? - if (isSymlink(to)) { - // if config says so we skip all symlinked items - if (cfg.getValueBool("skip_symlinks")) { - log.vlog("Skipping item - skip symbolic links configured: ", to); - return; - - } - // skip unexisting symbolic links - else if (!exists(readLink(to))) { - log.logAndNotify("Skipping item - invalid symbolic link: ", to); - return; + void uploadMoveItem(string oldPath, string newPath) { + // Log that we are doing a move + addLogEntry("Moving " ~ oldPath ~ " to " ~ newPath); + // Is this move unwanted? + bool unwanted = false; + // Item variables + Item oldItem, newItem, parentItem; + + // This not a Client Side Filtering check, nor a Microsoft Check, but is a sanity check that the path provided is UTF encoded correctly + // Check the std.encoding of the path against: Unicode 5.0, ASCII, ISO-8859-1, ISO-8859-2, WINDOWS-1250, WINDOWS-1251, WINDOWS-1252 + if (!unwanted) { + if(!isValid(newPath)) { + // Path is not valid according to https://dlang.org/phobos/std_encoding.html + addLogEntry("Skipping item - invalid character encoding sequence: " ~ newPath, ["info", "notify"]); + unwanted = true; } } - // Check against Microsoft OneDrive restriction and limitations about Windows naming files - if (!isValidName(to)) { - log.logAndNotify("Skipping item - invalid name (Microsoft Naming Convention): ", to); - return; + // Check this path against the Client Side Filtering Rules + // - check_nosync + // - skip_dotfiles + // - skip_symlinks + // - skip_file + // - skip_dir + // - sync_list + // - skip_size + if (!unwanted) { + unwanted = checkPathAgainstClientSideFiltering(newPath); } - // Check for bad whitespace items - if (!containsBadWhiteSpace(to)) { - log.logAndNotify("Skipping item - invalid name (Contains an invalid whitespace item): ", to); - return; + // Check this path against the Microsoft Naming Conventions & Restristions + // - Check path against Microsoft OneDrive restriction and limitations about Windows naming for files and folders + // - Check path for bad whitespace items + // - Check path for HTML ASCII Codes + // - Check path for ASCII Control Codes + if (!unwanted) { + unwanted = checkPathAgainstMicrosoftNamingRestrictions(newPath); } - // Check for HTML ASCII Codes as part of file name - if (!containsASCIIHTMLCodes(to)) { - log.logAndNotify("Skipping item - invalid name (Contains HTML ASCII Code): ", to); - return; - } + // 'newPath' has passed client side filtering validation + if (!unwanted) { - // 'to' file has passed file validation - Item fromItem, toItem, parentItem; - if (!itemdb.selectByPath(from, defaultDriveId, fromItem)) { - if (cfg.getValueBool("skip_dotfiles") && isDotFile(to)){ - log.log("Skipping upload due to skip_dotfile = true"); - return; - } else { - uploadNewFile(to); + if (!itemDB.selectByPath(oldPath, appConfig.defaultDriveId, oldItem)) { + // The old path|item is not synced with the database, upload as a new file + addLogEntry("Moved local item was not in-sync with local database - uploading as new item"); + scanLocalFilesystemPathForNewData(newPath); return; } - } - if (fromItem.parentId == null) { - // the item is a remote folder, need to do the operation on the parent - enforce(itemdb.selectByPathWithoutRemote(from, defaultDriveId, fromItem)); - } - if (itemdb.selectByPath(to, defaultDriveId, toItem)) { - // the destination has been overwritten - uploadDeleteItem(toItem, to); - } - if (!itemdb.selectByPath(dirName(to), defaultDriveId, parentItem)) { - // the parent item is not in the database - - // is the destination a .folder that is being skipped? - if (cfg.getValueBool("skip_dotfiles")) { - if (isDotFile(dirName(to))) { - // target location is a .folder - log.vdebug("Target location is excluded from sync due to skip_dotfiles = true"); - // item will have been moved locally, but as this is now to a location that is not synced, needs to be removed from OneDrive - log.log("Item has been moved to a location that is excluded from sync operations. Removing item from OneDrive"); - uploadDeleteItem(fromItem, from); - return; - } + + if (oldItem.parentId == null) { + // the item is a remote folder, need to do the operation on the parent + enforce(itemDB.selectByPathIncludingRemoteItems(oldPath, appConfig.defaultDriveId, oldItem)); } - - // some other error - throw new SyncException("Can't move an item to an unsynced directory"); - } - if (cfg.getValueBool("skip_dotfiles") && isDotFile(to)){ - log.log("Removing item from OneDrive due to skip_dotfiles = true"); - uploadDeleteItem(fromItem, from); - return; - } - if (fromItem.driveId != parentItem.driveId) { - // items cannot be moved between drives - uploadDeleteItem(fromItem, from); - uploadNewFile(to); - } else { - if (!exists(to)) { - log.vlog("uploadMoveItem target has disappeared: ", to); - return; + + if (itemDB.selectByPath(newPath, appConfig.defaultDriveId, newItem)) { + // the destination has been overwritten + addLogEntry("Moved local item overwrote an existing item - deleting old online item"); + uploadDeletedItem(newItem, newPath); } - SysTime mtime = timeLastModified(to).toUTC(); - JSONValue diff = [ - "name": JSONValue(baseName(to)), - "parentReference": JSONValue([ - "id": parentItem.id - ]), - "fileSystemInfo": JSONValue([ - "lastModifiedDateTime": mtime.toISOExtString() - ]) - ]; - // Perform the move operation on OneDrive - JSONValue response; - try { - response = onedrive.updateById(fromItem.driveId, fromItem.id, diff, fromItem.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 412) { - // OneDrive threw a 412 error, most likely: ETag does not match current item's value - // Retry without eTag - log.vdebug("File Move Failed - OneDrive eTag / cTag match issue"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting to move the file - gracefully handling error"); - string nullTag = null; - // move the file but without the eTag - response = onedrive.updateById(fromItem.driveId, fromItem.id, diff, nullTag); - } - } - // save the move response from OneDrive in the database - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } - } - - // delete an item by it's path - void deleteByPath(const(string) path) - { - Item item; - // Need to check all driveid's we know about, not just the defaultDriveId - bool itemInDB = false; - foreach (searchDriveId; driveIDsArray) { - if (itemdb.selectByPath(path, searchDriveId, item)) { - // item was found in the DB - itemInDB = true; - break; + if (!itemDB.selectByPath(dirName(newPath), appConfig.defaultDriveId, parentItem)) { + // the parent item is not in the database + throw new SyncException("Can't move an item to an unsynced directory"); } - } - if (!itemInDB) { - throw new SyncException("The item to delete is not in the local database"); - } - if (item.parentId == null) { - // the item is a remote folder, need to do the operation on the parent - enforce(itemdb.selectByPathWithoutRemote(path, defaultDriveId, item)); - } - try { - if (noRemoteDelete) { - // do not process remote delete - log.vlog("Skipping remote delete as --upload-only & --no-remote-delete configured"); + if (oldItem.driveId != parentItem.driveId) { + // items cannot be moved between drives + uploadDeletedItem(oldItem, oldPath); + + // what sort of move is this? + if (isFile(newPath)) { + // newPath is a file + uploadNewFile(newPath); + } else { + // newPath is a directory + scanLocalFilesystemPathForNewData(newPath); + } } else { - uploadDeleteItem(item, path); + if (!exists(newPath)) { + // is this --monitor use? + if (appConfig.getValueBool("monitor")) { + addLogEntry("uploadMoveItem target has disappeared: " ~ newPath, ["verbose"]); + return; + } + } + + // Configure the modification JSON item + SysTime mtime; + if (appConfig.getValueBool("monitor")) { + // Use the newPath modified timestamp + mtime = timeLastModified(newPath).toUTC(); + } else { + // Use the current system time + mtime = Clock.currTime().toUTC(); + } + + JSONValue data = [ + "name": JSONValue(baseName(newPath)), + "parentReference": JSONValue([ + "id": parentItem.id + ]), + "fileSystemInfo": JSONValue([ + "lastModifiedDateTime": mtime.toISOExtString() + ]) + ]; + + // Perform the move operation on OneDrive + bool isMoveSuccess = false; + JSONValue response; + string eTag = oldItem.eTag; + + // Create a new API Instance for this thread and initialise it + OneDriveApi movePathOnlineApiInstance; + movePathOnlineApiInstance = new OneDriveApi(appConfig); + movePathOnlineApiInstance.initialise(); + + // Try the online move + for (int i = 0; i < 3; i++) { + try { + response = movePathOnlineApiInstance.updateById(oldItem.driveId, oldItem.id, data, eTag); + isMoveSuccess = true; + break; + } catch (OneDriveException e) { + // Handle a 412 - A precondition provided in the request (such as an if-match header) does not match the resource's current state. + if (e.httpStatusCode == 412) { + // OneDrive threw a 412 error, most likely: ETag does not match current item's value + // Retry without eTag + addLogEntry("File Move Failed - OneDrive eTag / cTag match issue", ["debug"]); + addLogEntry("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting to move the file - gracefully handling error", ["verbose"]); + eTag = null; + // Retry to move the file but without the eTag, via the for() loop + } else if (e.httpStatusCode == 409) { + // Destination item already exists and is a conflict, delete it first + addLogEntry("Moved local item overwrote an existing item - deleting old online item"); + uploadDeletedItem(newItem, newPath); + } else + break; + } + } + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + movePathOnlineApiInstance.releaseCurlEngine(); + movePathOnlineApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + // save the move response from OneDrive in the database + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(response); } - } catch (OneDriveException e) { - if (e.httpStatusCode == 404) { - log.log(e.msg); + } else { + // Moved item is unwanted + addLogEntry("Item has been moved to a location that is excluded from sync operations. Removing item from OneDrive"); + uploadDeletedItem(oldItem, oldPath); + } + } + + // Perform integrity validation of the file that was uploaded + bool performUploadIntegrityValidationChecks(JSONValue uploadResponse, string localFilePath, ulong localFileSize, bool raiseWarning=true) { + + bool integrityValid = false; + + if (!disableUploadValidation) { + // Integrity validation has not been disabled (this is the default so we are always integrity checking our uploads) + if (uploadResponse.type() == JSONType.object) { + // Provided JSON is a valid JSON + ulong uploadFileSize = uploadResponse["size"].integer; + string uploadFileHash = uploadResponse["file"]["hashes"]["quickXorHash"].str; + string localFileHash = computeQuickXorHash(localFilePath); + + if ((localFileSize == uploadFileSize) && (localFileHash == uploadFileHash)) { + // Uploaded file integrity intact + addLogEntry("Uploaded local file matches reported online size and hash values", ["debug"]); + integrityValid = true; + } else if (raiseWarning) { + // Upload integrity failure .. what failed? + // There are 2 scenarios where this happens: + // 1. Failed Transfer + // 2. Upload file is going to a SharePoint Site, where Microsoft enriches the file with additional metadata with no way to disable + addLogEntry("WARNING: Online file integrity failure for: " ~ localFilePath, ["info", "notify"]); + + // What integrity failed - size? + if (localFileSize != uploadFileSize) { + addLogEntry("WARNING: Online file integrity failure - Size Mismatch", ["verbose"]); + } + + // What integrity failed - hash? + if (localFileHash != uploadFileHash) { + addLogEntry("WARNING: Online file integrity failure - Hash Mismatch", ["verbose"]); + } + + // What account type is this? + if (appConfig.accountType != "personal") { + // Not a personal account, thus the integrity failure is most likely due to SharePoint + addLogEntry("CAUTION: When you upload files to Microsoft OneDrive that uses SharePoint as its backend, Microsoft OneDrive will alter your files post upload.", ["verbose"]); + addLogEntry("CAUTION: This will lead to technical differences between the version stored online and your local original file, potentially causing issues with the accuracy or consistency of your data.", ["verbose"]); + addLogEntry("CAUTION: Please read https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details.", ["verbose"]); + } + // How can this be disabled? + addLogEntry("To disable the integrity checking of uploaded files use --disable-upload-validation"); + } } else { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + addLogEntry("Online file validation unable to be performed: input JSON was invalid"); + addLogEntry("WARNING: Skipping upload integrity check for: " ~ localFilePath); } - } - } - - // move a OneDrive folder from one name to another - void moveByPath(const(string) source, const(string) destination) - { - log.vlog("Moving remote folder: ", source, " -> ", destination); - - // Source and Destination are relative to ~/OneDrive - string sourcePath = source; - string destinationBasePath = dirName(destination).idup; - - // if destinationBasePath == '.' then destinationBasePath needs to be "" - if (destinationBasePath == ".") { - destinationBasePath = ""; + } else { + // We are bypassing integrity checks due to --disable-upload-validation + addLogEntry("Online file validation disabled due to --disable-upload-validation", ["debug"]); + addLogEntry("WARNING: Skipping upload integrity check for: " ~ localFilePath, ["info", "notify"]); } - string newFolderName = baseName(destination).idup; - string destinationPathString = "/drive/root:/" ~ destinationBasePath; - - // Build up the JSON changes - JSONValue moveData = ["name": newFolderName]; - JSONValue destinationPath = ["path": destinationPathString]; - moveData["parentReference"] = destinationPath; - - // Make the change on OneDrive - auto res = onedrive.moveByPath(sourcePath, moveData); + // Is the file integrity online valid? + return integrityValid; } - // Query Office 365 SharePoint Shared Library site to obtain it's Drive ID - void querySiteCollectionForDriveID(string o365SharedLibraryName) - { + // Query Office 365 SharePoint Shared Library site name to obtain it's Drive ID + void querySiteCollectionForDriveID(string sharepointLibraryNameToQuery) { // Steps to get the ID: // 1. Query https://graph.microsoft.com/v1.0/sites?search= with the name entered // 2. Evaluate the response. A valid response will contain the description and the id. If the response comes back with nothing, the site name cannot be found or no access @@ -6221,95 +7231,99 @@ final class SyncEngine string nextLink; string[] siteSearchResults; + // Create a new API Instance for this thread and initialise it + OneDriveApi querySharePointLibraryNameApiInstance; + querySharePointLibraryNameApiInstance = new OneDriveApi(appConfig); + querySharePointLibraryNameApiInstance.initialise(); + // The account type must not be a personal account type - if (accountType == "personal"){ - log.error("ERROR: A OneDrive Personal Account cannot be used with --get-O365-drive-id. Please re-authenticate your client using a OneDrive Business Account."); + if (appConfig.accountType == "personal") { + addLogEntry("ERROR: A OneDrive Personal Account cannot be used with --get-sharepoint-drive-id. Please re-authenticate your client using a OneDrive Business Account."); return; } // What query are we performing? - log.log("Office 365 Library Name Query: ", o365SharedLibraryName); + addLogEntry(); + addLogEntry("Office 365 Library Name Query: " ~ sharepointLibraryNameToQuery); for (;;) { try { - siteQuery = onedrive.o365SiteSearch(nextLink); + siteQuery = querySharePointLibraryNameApiInstance.o365SiteSearch(nextLink); } catch (OneDriveException e) { - log.error("ERROR: Query of OneDrive for Office 365 Library Name failed"); + addLogEntry("ERROR: Query of OneDrive for Office 365 Library Name failed"); // Forbidden - most likely authentication scope needs to be updated if (e.httpStatusCode == 403) { - log.error("ERROR: Authentication scope needs to be updated. Use --reauth and re-authenticate client."); + addLogEntry("ERROR: Authentication scope needs to be updated. Use --reauth and re-authenticate client."); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + querySharePointLibraryNameApiInstance.releaseCurlEngine(); + querySharePointLibraryNameApiInstance = null; + // Perform Garbage Collection + GC.collect(); return; } + // Requested resource cannot be found if (e.httpStatusCode == 404) { string siteSearchUrl; if (nextLink.empty) { - siteSearchUrl = onedrive.getSiteSearchUrl(); + siteSearchUrl = querySharePointLibraryNameApiInstance.getSiteSearchUrl(); } else { siteSearchUrl = nextLink; } // log the error - log.error("ERROR: Your OneDrive Account and Authentication Scope cannot access this OneDrive API: ", siteSearchUrl); - log.error("ERROR: To resolve, please discuss this issue with whomever supports your OneDrive and SharePoint environment."); - return; - } - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query OneDrive drive children"); - } - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query Sharepoint Sites - retrying applicable request"); - log.vdebug("siteQuery = onedrive.o365SiteSearch(nextLink) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - } - // re-try original request - retried for 429 and 504 - try { - log.vdebug("Retrying Query: siteQuery = onedrive.o365SiteSearch(nextLink)"); - siteQuery = onedrive.o365SiteSearch(nextLink); - log.vdebug("Query 'siteQuery = onedrive.o365SiteSearch(nextLink)' performed successfully on re-try"); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("Query Error: siteQuery = onedrive.o365SiteSearch(nextLink) on re-try after delay"); - // error was not a 504 this time - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - } else { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + addLogEntry("ERROR: Your OneDrive Account and Authentication Scope cannot access this OneDrive API: " ~ siteSearchUrl); + addLogEntry("ERROR: To resolve, please discuss this issue with whomever supports your OneDrive and SharePoint environment."); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + querySharePointLibraryNameApiInstance.releaseCurlEngine(); + querySharePointLibraryNameApiInstance = null; + // Perform Garbage Collection + GC.collect(); return; } + + // Default operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within oneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + querySharePointLibraryNameApiInstance.releaseCurlEngine(); + querySharePointLibraryNameApiInstance = null; + // Perform Garbage Collection + GC.collect(); + return; } // is siteQuery a valid JSON object & contain data we can use? if ((siteQuery.type() == JSONType.object) && ("value" in siteQuery)) { // valid JSON object - log.vdebug("O365 Query Response: ", siteQuery); + addLogEntry("O365 Query Response: " ~ to!string(siteQuery), ["debug"]); foreach (searchResult; siteQuery["value"].array) { - // Need an 'exclusive' match here with o365SharedLibraryName as entered - log.vdebug("Found O365 Site: ", searchResult); + // Need an 'exclusive' match here with sharepointLibraryNameToQuery as entered + addLogEntry("Found O365 Site: " ~ to!string(searchResult), ["debug"]); // 'displayName' and 'id' have to be present in the search result record in order to query the site if (("displayName" in searchResult) && ("id" in searchResult)) { - if (o365SharedLibraryName == searchResult["displayName"].str){ + if (sharepointLibraryNameToQuery == searchResult["displayName"].str){ // 'displayName' matches search request site_id = searchResult["id"].str; JSONValue siteDriveQuery; try { - siteDriveQuery = onedrive.o365SiteDrives(site_id); + siteDriveQuery = querySharePointLibraryNameApiInstance.o365SiteDrives(site_id); } catch (OneDriveException e) { - log.error("ERROR: Query of OneDrive for Office Site ID failed"); + addLogEntry("ERROR: Query of OneDrive for Office Site ID failed"); // display what the error is displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + querySharePointLibraryNameApiInstance.releaseCurlEngine(); + querySharePointLibraryNameApiInstance = null; + // Perform Garbage Collection + GC.collect(); return; } @@ -6319,7 +7333,7 @@ final class SyncEngine foreach (driveResult; siteDriveQuery["value"].array) { // Display results writeln("-----------------------------------------------"); - log.vdebug("Site Details: ", driveResult); + addLogEntry("Site Details: " ~ to!string(driveResult), ["debug"]); found = true; writeln("Site Name: ", searchResult["displayName"].str); writeln("Library Name: ", driveResult["name"].str); @@ -6330,8 +7344,14 @@ final class SyncEngine writeln("-----------------------------------------------"); } else { // not a valid JSON object - log.error("ERROR: There was an error performing this operation on OneDrive"); - log.error("ERROR: Increase logging verbosity to assist determining why."); + addLogEntry("ERROR: There was an error performing this operation on Microsoft OneDrive"); + addLogEntry("ERROR: Increase logging verbosity to assist determining why."); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + querySharePointLibraryNameApiInstance.releaseCurlEngine(); + querySharePointLibraryNameApiInstance = null; + // Perform Garbage Collection + GC.collect(); return; } } @@ -6345,13 +7365,13 @@ final class SyncEngine if ("id" in searchResult) idAvailable = true; // Display error details for this site data - writeln(); - log.error("ERROR: SharePoint Site details not provided for: ", siteNameAvailable); - log.error("ERROR: The SharePoint Site results returned from OneDrive API do not contain the required items to match. Please check your permissions with your site administrator."); - log.error("ERROR: Your site security settings is preventing the following details from being accessed: 'displayName' or 'id'"); - log.vlog(" - Is 'displayName' available = ", displayNameAvailable); - log.vlog(" - Is 'id' available = ", idAvailable); - log.error("ERROR: To debug this further, please increase verbosity (--verbose or --verbose --verbose) to provide further insight as to what details are actually being returned."); + addLogEntry(); + addLogEntry("ERROR: SharePoint Site details not provided for: " ~ siteNameAvailable); + addLogEntry("ERROR: The SharePoint Site results returned from OneDrive API do not contain the required items to match. Please check your permissions with your site administrator."); + addLogEntry("ERROR: Your site security settings is preventing the following details from being accessed: 'displayName' or 'id'"); + addLogEntry(" - Is 'displayName' available = " ~ to!string(displayNameAvailable), ["verbose"]); + addLogEntry(" - Is 'id' available = " ~ to!string(idAvailable), ["verbose"]); + addLogEntry("ERROR: To debug this further, please increase verbosity (--verbose or --verbose --verbose) to provide further insight as to what details are actually being returned."); } } @@ -6372,15 +7392,21 @@ final class SyncEngine siteSearchResults ~= siteSearchResultsEntry; } else { // displayName and id unavailable, display in debug log the entry - log.vdebug("Bad SharePoint Data for site: ", searchResult); + addLogEntry("Bad SharePoint Data for site: " ~ to!string(searchResult), ["debug"]); } } } } } else { // not a valid JSON object - log.error("ERROR: There was an error performing this operation on OneDrive"); - log.error("ERROR: Increase logging verbosity to assist determining why."); + addLogEntry("ERROR: There was an error performing this operation on Microsoft OneDrive"); + addLogEntry("ERROR: Increase logging verbosity to assist determining why."); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + querySharePointLibraryNameApiInstance.releaseCurlEngine(); + querySharePointLibraryNameApiInstance = null; + // Perform Garbage Collection + GC.collect(); return; } @@ -6389,914 +7415,1231 @@ final class SyncEngine if ("@odata.nextLink" in siteQuery) { // Update nextLink to next set of SharePoint library names nextLink = siteQuery["@odata.nextLink"].str; - log.vdebug("Setting nextLink to (@odata.nextLink): ", nextLink); + addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); } else break; } // Was the intended target found? if(!found) { - writeln(); - log.error("ERROR: The requested SharePoint site could not be found. Please check it's name and your permissions to access the site."); + // Was the search a wildcard? + if (sharepointLibraryNameToQuery != "*") { + // Only print this out if the search was not a wildcard + addLogEntry(); + addLogEntry("ERROR: The requested SharePoint site could not be found. Please check it's name and your permissions to access the site."); + } // List all sites returned to assist user - writeln(); - log.log("The following SharePoint site names were returned:"); + addLogEntry(); + addLogEntry("The following SharePoint site names were returned:"); foreach (searchResultEntry; siteSearchResults) { // list the display name that we use to match against the user query - log.log(searchResultEntry); + addLogEntry(searchResultEntry); } } + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + querySharePointLibraryNameApiInstance.releaseCurlEngine(); + querySharePointLibraryNameApiInstance = null; + // Perform Garbage Collection + GC.collect(); } - // Create an anonymous read-only shareable link for an existing file on OneDrive - void createShareableLinkForFile(string filePath, bool writeablePermissions) - { - JSONValue onedrivePathDetails; - JSONValue createShareableLinkResponse; - string driveId; - string itemId; - string fileShareLink; + // Query the sync status of the client and the local system + void queryOneDriveForSyncStatus(string pathToQueryStatusOn) { + + // Query the account driveId and rootId to get the /delta JSON information + // Process that JSON data for relevancy - // Get the path details from OneDrive - try { - onedrivePathDetails = onedrive.getPathDetails(filePath); // Returns a JSON String for the OneDrive Path - } catch (OneDriveException e) { - log.vdebug("onedrivePathDetails = onedrive.getPathDetails(filePath); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // Requested path could not be found - log.error("ERROR: The requested path to query was not found on OneDrive"); - log.error("ERROR: Cannot create a shareable link for a file that does not exist on OneDrive"); - return; + // Function variables + ulong downloadSize = 0; + string deltaLink = null; + string driveIdToQuery = appConfig.defaultDriveId; + string itemIdToQuery = appConfig.defaultRootId; + JSONValue deltaChanges; + + // Array of JSON items + JSONValue[] jsonItemsArray; + + // Query Database for a potential deltaLink starting point + deltaLink = itemDB.getDeltaLink(driveIdToQuery, itemIdToQuery); + + // Log what we are doing + addProcessingLogHeaderEntry("Querying the change status of Drive ID: " ~ driveIdToQuery, appConfig.verbosityCount); + + for (;;) { + // Add a processing '.' + if (appConfig.verbosityCount == 0) { + addProcessingDotEntry(); } + + // Get the /delta changes via the OneDrive API + // getDeltaChangesByItemId has the re-try logic for transient errors + deltaChanges = getDeltaChangesByItemId(driveIdToQuery, itemIdToQuery, deltaLink); - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling queryDriveForChanges(path);"); - createShareableLinkForFile(filePath, writeablePermissions); - // return back to original call - return; + // If the initial deltaChanges response is an invalid JSON object, keep trying until we get a valid response .. + if (deltaChanges.type() != JSONType.object) { + while (deltaChanges.type() != JSONType.object) { + // Handle the invalid JSON response and retry + addLogEntry("ERROR: Query of the OneDrive API via deltaChanges = getDeltaChangesByItemId() returned an invalid JSON response", ["debug"]); + deltaChanges = getDeltaChangesByItemId(driveIdToQuery, itemIdToQuery, deltaLink); + } } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying request"); - // Retry original request by calling function again to avoid replicating any further error handling - createShareableLinkForFile(filePath, writeablePermissions); - // return back to original call - return; - } else { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + // We have a valid deltaChanges JSON array. This means we have at least 200+ JSON items to process. + // The API response however cannot be run in parallel as the OneDrive API sends the JSON items in the order in which they must be processed + foreach (onedriveJSONItem; deltaChanges["value"].array) { + // is the JSON a root object - we dont want to count this + if (!isItemRoot(onedriveJSONItem)) { + // Files are the only item that we want to calculate + if (isItemFile(onedriveJSONItem)) { + // JSON item is a file + // Is the item filtered out due to client side filtering rules? + if (!checkJSONAgainstClientSideFiltering(onedriveJSONItem)) { + // Is the path of this JSON item 'in-scope' or 'out-of-scope' ? + if (pathToQueryStatusOn != "/") { + // We need to check the path of this item against pathToQueryStatusOn + string thisItemPath = ""; + if (("path" in onedriveJSONItem["parentReference"]) != null) { + // If there is a parent reference path, try and use it + string selfBuiltPath = onedriveJSONItem["parentReference"]["path"].str ~ "/" ~ onedriveJSONItem["name"].str; + + // Check for ':' and split if present + auto splitIndex = selfBuiltPath.indexOf(":"); + if (splitIndex != -1) { + // Keep only the part after ':' + selfBuiltPath = selfBuiltPath[splitIndex + 1 .. $]; + } + + // Set thisItemPath to the self built path + thisItemPath = selfBuiltPath; + } else { + // no parent reference path available + thisItemPath = onedriveJSONItem["name"].str; + } + // can we find 'pathToQueryStatusOn' in 'thisItemPath' ? + if (canFind(thisItemPath, pathToQueryStatusOn)) { + // Add this to the array for processing + jsonItemsArray ~= onedriveJSONItem; + } + } else { + // We are not doing a --single-directory check + // Add this to the array for processing + jsonItemsArray ~= onedriveJSONItem; + } + } + } + } } - } - - // Was a valid JSON response received? - if (onedrivePathDetails.type() == JSONType.object) { - // valid JSON response for the file was received - // Configure the required variables - driveId = onedrivePathDetails["parentReference"]["driveId"].str; - itemId = onedrivePathDetails["id"].str; - - // What sort of shareable link is required? - JSONValue accessScope; - if (writeablePermissions) { - // configure the read-write access scope - accessScope = [ - "type": "edit", - "scope": "anonymous" - ]; - } else { - // configure the read-only access scope (default) - accessScope = [ - "type": "view", - "scope": "anonymous" - ]; + + // The response may contain either @odata.deltaLink or @odata.nextLink + if ("@odata.deltaLink" in deltaChanges) { + deltaLink = deltaChanges["@odata.deltaLink"].str; + addLogEntry("Setting next deltaLink to (@odata.deltaLink): " ~ deltaLink, ["debug"]); + } + + // Update deltaLink to next changeSet bundle + if ("@odata.nextLink" in deltaChanges) { + deltaLink = deltaChanges["@odata.nextLink"].str; + addLogEntry("Setting next deltaLink to (@odata.nextLink): " ~ deltaLink, ["debug"]); } + else break; + } + + // Needed after printing out '....' when fetching changes from OneDrive API + if (appConfig.verbosityCount == 0) { + addLogEntry("\n", ["consoleOnlyNoNewLine"]); + } + + // Are there any JSON items to process? + if (count(jsonItemsArray) != 0) { + // There are items to process + foreach (onedriveJSONItem; jsonItemsArray.array) { - // Create the shareable file link - createShareableLinkResponse = onedrive.createShareableLink(driveId, itemId, accessScope); - if ((createShareableLinkResponse.type() == JSONType.object) && ("link" in createShareableLinkResponse)) { - // Extract the file share link from the JSON response - fileShareLink = createShareableLinkResponse["link"]["webUrl"].str; - writeln("File Shareable Link: ", fileShareLink); - if (writeablePermissions) { - writeln("Shareable Link has read-write permissions - use and provide with caution"); + // variables we need + string thisItemParentDriveId; + string thisItemId; + string thisItemHash; + bool existingDBEntry = false; + + // Is this file a remote item (on a shared folder) ? + if (isItemRemote(onedriveJSONItem)) { + // remote drive item + thisItemParentDriveId = onedriveJSONItem["remoteItem"]["parentReference"]["driveId"].str; + thisItemId = onedriveJSONItem["id"].str; + } else { + // standard drive item + thisItemParentDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + thisItemId = onedriveJSONItem["id"].str; } + // Get the file hash + if (hasHashes(onedriveJSONItem)) { + thisItemHash = onedriveJSONItem["file"]["hashes"]["quickXorHash"].str; + + // Check if the item has been seen before + Item existingDatabaseItem; + existingDBEntry = itemDB.selectById(thisItemParentDriveId, thisItemId, existingDatabaseItem); + + if (existingDBEntry) { + // item exists in database .. do the database details match the JSON record? + if (existingDatabaseItem.quickXorHash != thisItemHash) { + // file hash is different, this will trigger a download event + downloadSize = downloadSize + onedriveJSONItem["size"].integer; + } + } else { + // item does not exist in the database + // this item has already passed client side filtering rules (skip_dir, skip_file, sync_list) + // this will trigger a download event + downloadSize = downloadSize + onedriveJSONItem["size"].integer; + } + } + } + } + + // Was anything detected that would constitute a download? + if (downloadSize > 0) { + // we have something to download + if (pathToQueryStatusOn != "/") { + writeln("The selected local directory via --single-directory is out of sync with Microsoft OneDrive"); } else { - // not a valid JSON object - log.error("ERROR: There was an error performing this operation on OneDrive"); - log.error("ERROR: Increase logging verbosity to assist determining why."); - return; + writeln("The configured local 'sync_dir' directory is out of sync with Microsoft OneDrive"); } + writeln("Approximate data to download from Microsoft OneDrive: ", (downloadSize/1024), " KB"); } else { - // not a valid JSON object - log.error("ERROR: There was an error performing this operation on OneDrive"); - log.error("ERROR: Increase logging verbosity to assist determining why."); - return; - } + // No changes were returned + writeln("There are no pending changes from Microsoft OneDrive; your local directory matches the data online."); + } } - // Query OneDrive for file details of a given path - void queryOneDriveForFileDetails(string localFilePath, string syncDir, string outputType) - { + // Query OneDrive for file details of a given path, returning either the 'webURL' or 'lastModifiedBy' JSON facet + void queryOneDriveForFileDetails(string inputFilePath, string runtimePath, string outputType) { + + OneDriveApi queryOneDriveForFileDetailsApiInstance; + + // Calculate the full local file path + string fullLocalFilePath = buildNormalizedPath(buildPath(runtimePath, inputFilePath)); + // Query if file is valid locally - if (exists(localFilePath)) { - // File exists locally, does it exist in the database - // Path needs to be relative to sync_dir path - Item item; - string[] distinctDriveIds = itemdb.selectDistinctDriveIds(); - string relativePath = relativePath(localFilePath, syncDir); - bool fileInDB = false; + if (exists(fullLocalFilePath)) { + // search drive_id list + string[] distinctDriveIds = itemDB.selectDistinctDriveIds(); + bool pathInDB = false; + Item dbItem; + foreach (searchDriveId; distinctDriveIds) { - if (itemdb.selectByPath(relativePath, searchDriveId, item)) { - // File is in the local database cache - fileInDB = true; - JSONValue fileDetails; + // Does this path exist in the database, use the 'inputFilePath' + if (itemDB.selectByPath(inputFilePath, searchDriveId, dbItem)) { + // item is in the database + pathInDB = true; + JSONValue fileDetailsFromOneDrive; + + // Create a new API Instance for this thread and initialise it + queryOneDriveForFileDetailsApiInstance = new OneDriveApi(appConfig); + queryOneDriveForFileDetailsApiInstance.initialise(); + try { - fileDetails = onedrive.getFileDetails(item.driveId, item.id); - } catch (OneDriveException e) { + fileDetailsFromOneDrive = queryOneDriveForFileDetailsApiInstance.getPathDetailsById(dbItem.driveId, dbItem.id); + // Dont cleanup here as if we are creating a shareable file link (below) it is still needed + + } catch (OneDriveException exception) { // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + queryOneDriveForFileDetailsApiInstance.releaseCurlEngine(); + queryOneDriveForFileDetailsApiInstance = null; + // Perform Garbage Collection + GC.collect(); return; } - // debug output of response - log.vdebug("API Response: ", fileDetails); + // Is the API response a valid JSON file? + if (fileDetailsFromOneDrive.type() == JSONType.object) { - // What sort of response to we generate - // --get-file-link response - if (outputType == "URL") { - if ((fileDetails.type() == JSONType.object) && ("webUrl" in fileDetails)) { - // Valid JSON object - writeln(fileDetails["webUrl"].str); + // debug output of response + addLogEntry("API Response: " ~ to!string(fileDetailsFromOneDrive), ["debug"]); + + // What sort of response to we generate + // --get-file-link response + if (outputType == "URL") { + if ((fileDetailsFromOneDrive.type() == JSONType.object) && ("webUrl" in fileDetailsFromOneDrive)) { + // Valid JSON object + addLogEntry(); + writeln("WebURL: ", fileDetailsFromOneDrive["webUrl"].str); + } } - } - - // --modified-by response - if (outputType == "ModifiedBy") { - if ((fileDetails.type() == JSONType.object) && ("lastModifiedBy" in fileDetails)) { - // Valid JSON object - writeln("Last modified: ", fileDetails["lastModifiedDateTime"].str); - writeln("Last modified by: ", fileDetails["lastModifiedBy"]["user"]["displayName"].str); - // if 'email' provided, add this to the output - if ("email" in fileDetails["lastModifiedBy"]["user"]) { - writeln("Email Address: ", fileDetails["lastModifiedBy"]["user"]["email"].str); + + // --modified-by response + if (outputType == "ModifiedBy") { + if ((fileDetailsFromOneDrive.type() == JSONType.object) && ("lastModifiedBy" in fileDetailsFromOneDrive)) { + // Valid JSON object + writeln(); + writeln("Last modified: ", fileDetailsFromOneDrive["lastModifiedDateTime"].str); + writeln("Last modified by: ", fileDetailsFromOneDrive["lastModifiedBy"]["user"]["displayName"].str); + // if 'email' provided, add this to the output + if ("email" in fileDetailsFromOneDrive["lastModifiedBy"]["user"]) { + writeln("Email Address: ", fileDetailsFromOneDrive["lastModifiedBy"]["user"]["email"].str); + } } } - } + + // --create-share-link response + if (outputType == "ShareableLink") { + + JSONValue accessScope; + JSONValue createShareableLinkResponse; + string thisDriveId = fileDetailsFromOneDrive["parentReference"]["driveId"].str; + string thisItemId = fileDetailsFromOneDrive["id"].str; + string fileShareLink; + bool writeablePermissions = appConfig.getValueBool("with_editing_perms"); + + // What sort of shareable link is required? + if (writeablePermissions) { + // configure the read-write access scope + accessScope = [ + "type": "edit", + "scope": "anonymous" + ]; + } else { + // configure the read-only access scope (default) + accessScope = [ + "type": "view", + "scope": "anonymous" + ]; + } + + // Try and create the shareable file link + try { + createShareableLinkResponse = queryOneDriveForFileDetailsApiInstance.createShareableLink(thisDriveId, thisItemId, accessScope); + } catch (OneDriveException exception) { + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + return; + } + + // Is the API response a valid JSON file? + if ((createShareableLinkResponse.type() == JSONType.object) && ("link" in createShareableLinkResponse)) { + // Extract the file share link from the JSON response + fileShareLink = createShareableLinkResponse["link"]["webUrl"].str; + writeln("File Shareable Link: ", fileShareLink); + if (writeablePermissions) { + writeln("Shareable Link has read-write permissions - use and provide with caution"); + } + } + } + } + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + queryOneDriveForFileDetailsApiInstance.releaseCurlEngine(); + queryOneDriveForFileDetailsApiInstance = null; + // Perform Garbage Collection + GC.collect(); } } + // was path found? - if (!fileInDB) { + if (!pathInDB) { // File has not been synced with OneDrive - log.error("Path has not been synced with OneDrive: ", localFilePath); + addLogEntry("Selected path has not been synced with Microsoft OneDrive: " ~ inputFilePath); } } else { // File does not exist locally - log.error("Path not found on local system: ", localFilePath); + addLogEntry("Selected path not found on local system: " ~ inputFilePath); } } - // Query the OneDrive 'drive' to determine if we are 'in sync' or if there are pending changes - void queryDriveForChanges(const(string) path) - { - - // Function variables - int validChanges = 0; - long downloadSize = 0; + // Query OneDrive for the quota details + void queryOneDriveForQuotaDetails() { + // This function is similar to getRemainingFreeSpace() but is different in data being analysed and output method + JSONValue currentDriveQuota; string driveId; - string folderId; - string deltaLink; - string thisItemId; - string thisItemParentPath; - string syncFolderName; - string syncFolderPath; - string syncFolderChildPath; - JSONValue changes; - JSONValue onedrivePathDetails; - - // Get the path details from OneDrive - try { - onedrivePathDetails = onedrive.getPathDetails(path); // Returns a JSON String for the OneDrive Path - } catch (OneDriveException e) { - log.vdebug("onedrivePathDetails = onedrive.getPathDetails(path); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // Requested path could not be found - log.error("ERROR: The requested path to query was not found on OneDrive"); - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling queryDriveForChanges(path);"); - queryDriveForChanges(path); - // return back to original call - return; - } - - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying request"); - // Retry original request by calling function again to avoid replicating any further error handling - queryDriveForChanges(path); - // return back to original call - return; - } else { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - } - - if(isItemRemote(onedrivePathDetails)){ - // remote changes - driveId = onedrivePathDetails["remoteItem"]["parentReference"]["driveId"].str; // Should give something like 66d53be8a5056eca - folderId = onedrivePathDetails["remoteItem"]["id"].str; // Should give something like BC7D88EC1F539DCF!107 - syncFolderName = onedrivePathDetails["name"].str; - // A remote drive item will not have ["parentReference"]["path"] - syncFolderPath = ""; - syncFolderChildPath = ""; - } else { - driveId = defaultDriveId; - folderId = onedrivePathDetails["id"].str; // Should give something like 12345ABCDE1234A1!101 - syncFolderName = onedrivePathDetails["name"].str; - if (hasParentReferencePath(onedrivePathDetails)) { - syncFolderPath = onedrivePathDetails["parentReference"]["path"].str; - syncFolderChildPath = syncFolderPath ~ "/" ~ syncFolderName ~ "/"; - } else { - // root drive item will not have ["parentReference"]["path"] - syncFolderPath = ""; - syncFolderChildPath = ""; - } - } - - // Query Database for the deltaLink - deltaLink = itemdb.getDeltaLink(driveId, folderId); - - const(char)[] idToQuery; - if (driveId == defaultDriveId) { - // The drive id matches our users default drive id - idToQuery = defaultRootId.dup; + OneDriveApi getCurrentDriveQuotaApiInstance; + + if (appConfig.getValueString("drive_id").length) { + driveId = appConfig.getValueString("drive_id"); } else { - // The drive id does not match our users default drive id - // Potentially the 'path id' we are requesting the details of is a Shared Folder (remote item) - // Use folderId - idToQuery = folderId; + driveId = appConfig.defaultDriveId; } - // Query OneDrive changes try { - changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); + // Create a new OneDrive API instance + getCurrentDriveQuotaApiInstance = new OneDriveApi(appConfig); + getCurrentDriveQuotaApiInstance.initialise(); + addLogEntry("Seeking available quota for this drive id: " ~ driveId, ["debug"]); + currentDriveQuota = getCurrentDriveQuotaApiInstance.getDriveQuota(driveId); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + getCurrentDriveQuotaApiInstance.releaseCurlEngine(); + getCurrentDriveQuotaApiInstance = null; + // Perform Garbage Collection + GC.collect(); + } catch (OneDriveException e) { - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling queryDriveForChanges(path);"); - queryDriveForChanges(path); - // return back to original call - return; - } else { - // OneDrive threw an error - log.vdebug("Error query: changes = onedrive.viewChangesById(driveId, idToQuery, deltaLink)"); - log.vdebug("OneDrive threw an error when querying for these changes:"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("Previous deltaLink: ", deltaLink); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } + addLogEntry("currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException", ["debug"]); + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + getCurrentDriveQuotaApiInstance.releaseCurlEngine(); + getCurrentDriveQuotaApiInstance = null; + // Perform Garbage Collection + GC.collect(); } - // Are there any changes on OneDrive? - if (count(changes["value"].array) != 0) { - // Were we given a remote path to check if we are in sync for, or the root? - if (path != "/") { - // we were given a directory to check, we need to validate the list of changes against this path only - foreach (item; changes["value"].array) { - // Is this change valid for the 'path' we are checking? - if (hasParentReferencePath(item)) { - thisItemId = item["parentReference"]["id"].str; - thisItemParentPath = item["parentReference"]["path"].str; - } else { - thisItemId = item["id"].str; - // Is the defaultDriveId == driveId - if (driveId == defaultDriveId){ - // 'root' items will not have ["parentReference"]["path"] - if (isItemRoot(item)){ - thisItemParentPath = ""; - } else { - thisItemParentPath = item["parentReference"]["path"].str; - } - } else { - // A remote drive item will not have ["parentReference"]["path"] - thisItemParentPath = ""; - } - } - - if ( (thisItemId == folderId) || (canFind(thisItemParentPath, syncFolderChildPath)) || (canFind(thisItemParentPath, folderId)) ){ - // This is a change we want count - validChanges++; - if ((isItemFile(item)) && (hasFileSize(item))) { - downloadSize = downloadSize + item["size"].integer; - } - } + // validate that currentDriveQuota is a JSON value + if (currentDriveQuota.type() == JSONType.object) { + // was 'quota' in response? + if ("quota" in currentDriveQuota) { + + // debug output of response + addLogEntry("currentDriveQuota: " ~ to!string(currentDriveQuota), ["debug"]); + + // human readable output of response + string deletedValue = "Not Provided"; + string remainingValue = "Not Provided"; + string stateValue = "Not Provided"; + string totalValue = "Not Provided"; + string usedValue = "Not Provided"; + + // Update values + if ("deleted" in currentDriveQuota["quota"]) { + deletedValue = byteToGibiByte(currentDriveQuota["quota"]["deleted"].integer); } - // Are there any valid changes? - if (validChanges != 0){ - writeln("Selected directory is out of sync with OneDrive"); - if (downloadSize > 0){ - downloadSize = downloadSize / 1000; - writeln("Approximate data to download from OneDrive: ", downloadSize, " KB"); - } - } else { - writeln("No pending remote changes - selected directory is in sync"); + + if ("remaining" in currentDriveQuota["quota"]) { + remainingValue = byteToGibiByte(currentDriveQuota["quota"]["remaining"].integer); } - } else { - writeln("Local directory is out of sync with OneDrive"); - foreach (item; changes["value"].array) { - if ((isItemFile(item)) && (hasFileSize(item))) { - downloadSize = downloadSize + item["size"].integer; - } + + if ("state" in currentDriveQuota["quota"]) { + stateValue = currentDriveQuota["quota"]["state"].str; + } + + if ("total" in currentDriveQuota["quota"]) { + totalValue = byteToGibiByte(currentDriveQuota["quota"]["total"].integer); } - if (downloadSize > 0){ - downloadSize = downloadSize / 1000; - writeln("Approximate data to download from OneDrive: ", downloadSize, " KB"); + + if ("used" in currentDriveQuota["quota"]) { + usedValue = byteToGibiByte(currentDriveQuota["quota"]["used"].integer); } + + writeln("Microsoft OneDrive quota information as reported for this Drive ID: ", driveId); + writeln(); + writeln("Deleted: ", deletedValue, " GB (", currentDriveQuota["quota"]["deleted"].integer, " bytes)"); + writeln("Remaining: ", remainingValue, " GB (", currentDriveQuota["quota"]["remaining"].integer, " bytes)"); + writeln("State: ", stateValue); + writeln("Total: ", totalValue, " GB (", currentDriveQuota["quota"]["total"].integer, " bytes)"); + writeln("Used: ", usedValue, " GB (", currentDriveQuota["quota"]["used"].integer, " bytes)"); + writeln(); + } else { + writeln("Microsoft OneDrive quota information is being restricted for this Drive ID: ", driveId); } - } else { - writeln("No pending remote changes - in sync"); - } + } } - // Create a fake OneDrive response suitable for use with saveItem - JSONValue createFakeResponse(const(string) path) - { - import std.digest.sha; - // Generate a simulated JSON response which can be used - // At a minimum we need: - // 1. eTag - // 2. cTag - // 3. fileSystemInfo - // 4. file or folder. if file, hash of file - // 5. id - // 6. name - // 7. parent reference - - string fakeDriveId = defaultDriveId; - string fakeRootId = defaultRootId; - SysTime mtime = timeLastModified(path).toUTC(); - - // Need to update the 'fakeDriveId' & 'fakeRootId' with elements from the --dry-run database - // Otherwise some calls to validate objects will fail as the actual driveId being used is invalid - string parentPath = dirName(path); - Item databaseItem; + // Query the system for session_upload.* files + bool checkForInterruptedSessionUploads() { + + bool interruptedUploads = false; + ulong interruptedUploadsCount; - if (parentPath != ".") { - // Not a 'root' parent - // For each driveid in the existing driveIDsArray - foreach (searchDriveId; driveIDsArray) { - log.vdebug("FakeResponse: searching database for: ", searchDriveId, " ", parentPath); - if (itemdb.selectByPath(parentPath, searchDriveId, databaseItem)) { - log.vdebug("FakeResponse: Found Database Item: ", databaseItem); - fakeDriveId = databaseItem.driveId; - fakeRootId = databaseItem.id; - } - } + // Scan the filesystem for the files we are interested in, build up interruptedUploadsSessionFiles array + foreach (sessionFile; dirEntries(appConfig.configDirName, "session_upload.*", SpanMode.shallow)) { + // calculate the full path + string tempPath = buildNormalizedPath(buildPath(appConfig.configDirName, sessionFile)); + // add to array + interruptedUploadsSessionFiles ~= [tempPath]; } - // real id / eTag / cTag are different format for personal / business account - auto sha1 = new SHA1Digest(); - ubyte[] fakedOneDriveItemValues = sha1.digest(path); - - JSONValue fakeResponse; + // Count all 'session_upload' files in appConfig.configDirName + //interruptedUploadsCount = count(dirEntries(appConfig.configDirName, "session_upload.*", SpanMode.shallow)); + interruptedUploadsCount = count(interruptedUploadsSessionFiles); + if (interruptedUploadsCount != 0) { + interruptedUploads = true; + } - if (isDir(path)) { - // path is a directory - fakeResponse = [ - "id": JSONValue(toHexString(fakedOneDriveItemValues)), - "cTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "eTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "fileSystemInfo": JSONValue([ - "createdDateTime": mtime.toISOExtString(), - "lastModifiedDateTime": mtime.toISOExtString() - ]), - "name": JSONValue(baseName(path)), - "parentReference": JSONValue([ - "driveId": JSONValue(fakeDriveId), - "driveType": JSONValue(accountType), - "id": JSONValue(fakeRootId) - ]), - "folder": JSONValue("") - ]; - } else { - // path is a file - // compute file hash - both business and personal responses use quickXorHash - string quickXorHash = computeQuickXorHash(path); + // return if there are interrupted uploads to process + return interruptedUploads; + } - fakeResponse = [ - "id": JSONValue(toHexString(fakedOneDriveItemValues)), - "cTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "eTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "fileSystemInfo": JSONValue([ - "createdDateTime": mtime.toISOExtString(), - "lastModifiedDateTime": mtime.toISOExtString() - ]), - "name": JSONValue(baseName(path)), - "parentReference": JSONValue([ - "driveId": JSONValue(fakeDriveId), - "driveType": JSONValue(accountType), - "id": JSONValue(fakeRootId) - ]), - "file": JSONValue([ - "hashes":JSONValue([ - "quickXorHash": JSONValue(quickXorHash) - ]) - - ]) - ]; + // Clear any session_upload.* files + void clearInterruptedSessionUploads() { + // Scan the filesystem for the files we are interested in, build up interruptedUploadsSessionFiles array + foreach (sessionFile; dirEntries(appConfig.configDirName, "session_upload.*", SpanMode.shallow)) { + // calculate the full path + string tempPath = buildNormalizedPath(buildPath(appConfig.configDirName, sessionFile)); + JSONValue sessionFileData = readText(tempPath).parseJSON(); + addLogEntry("Removing interrupted session upload file due to --resync for: " ~ sessionFileData["localPath"].str, ["info"]); + + // Process removal + if (!dryRun) { + safeRemove(tempPath); + } } - - log.vdebug("Generated Fake OneDrive Response: ", fakeResponse); - return fakeResponse; } - void handleOneDriveThrottleRequest() - { - // If OneDrive sends a status code 429 then this function will be used to process the Retry-After response header which contains the value by which we need to wait - log.vdebug("Handling a OneDrive HTTP 429 Response Code (Too Many Requests)"); - // Read in the Retry-After HTTP header as set and delay as per this value before retrying the request - auto retryAfterValue = onedrive.getRetryAfterValue(); - log.vdebug("Using Retry-After Value = ", retryAfterValue); - - // HTTP request returned status code 429 (Too Many Requests) - // https://github.com/abraunegg/onedrive/issues/133 - // https://github.com/abraunegg/onedrive/issues/815 - - ulong delayBeforeRetry = 0; - if (retryAfterValue != 0) { - // Use the HTTP Response Header Value - delayBeforeRetry = retryAfterValue; - } else { - // Use a 120 second delay as a default given header value was zero - // This value is based on log files and data when determining correct process for 429 response handling - delayBeforeRetry = 120; - // Update that we are over-riding the provided value with a default - log.vdebug("HTTP Response Header retry-after value was 0 - Using a preconfigured default of: ", delayBeforeRetry); + // Process interrupted 'session_upload' files + void processForInterruptedSessionUploads() { + // For each upload_session file that has been found, process the data to ensure it is still valid + foreach (sessionFilePath; interruptedUploadsSessionFiles) { + if (!validateUploadSessionFileData(sessionFilePath)) { + // Remove upload_session file as it is invalid + // upload_session file file contains an error - cant resume this session + addLogEntry("Restore file upload session failed - cleaning up resumable session data file: " ~ sessionFilePath, ["verbose"]); + + // cleanup session path + if (exists(sessionFilePath)) { + if (!dryRun) { + remove(sessionFilePath); + } + } + } } - // Sleep thread as per request - log.log("Thread sleeping due to 'HTTP request returned status code 429' - The request has been throttled"); - log.log("Sleeping for ", delayBeforeRetry, " seconds"); - Thread.sleep(dur!"seconds"(delayBeforeRetry)); - - // Reset retry-after value to zero as we have used this value now and it may be changed in the future to a different value - onedrive.resetRetryAfterValue(); + // At this point we should have an array of JSON items to resume uploading + if (count(jsonItemsToResumeUpload) > 0) { + // there are valid items to resume upload + // Lets deal with all the JSON items that need to be reumed for upload in a batch process + size_t batchSize = to!int(appConfig.getValueLong("threads")); + ulong batchCount = (jsonItemsToResumeUpload.length + batchSize - 1) / batchSize; + ulong batchesProcessed = 0; + + foreach (chunk; jsonItemsToResumeUpload.chunks(batchSize)) { + // send an array containing 'appConfig.getValueLong("threads")' JSON items to resume upload + resumeSessionUploadsInParallel(chunk); + } + } } - // Generage a /delta compatible response when using National Azure AD deployments that do not support /delta queries - // see: https://docs.microsoft.com/en-us/graph/deployments#supported-features - JSONValue generateDeltaResponse(const(char)[] driveId, const(char)[] idToQuery) - { - // JSON value which will be responded with - JSONValue deltaResponse; - // initial data - JSONValue rootData; - JSONValue driveData; - JSONValue topLevelChildren; - JSONValue[] childrenData; - string nextLink; + // A resume session upload file need to be valid to be used + // This function validates this data + bool validateUploadSessionFileData(string sessionFilePath) { - // Get drive details for the provided driveId + JSONValue sessionFileData; + OneDriveApi validateUploadSessionFileDataApiInstance; + + // Try and read the text from the session file as a JSON array try { - driveData = onedrive.getPathDetailsById(driveId, idToQuery); - } catch (OneDriveException e) { - log.vdebug("driveData = onedrive.getPathDetailsById(driveId, idToQuery) generated a OneDriveException"); - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - if (e.httpStatusCode == 429) { - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - retrying applicable request"); - handleOneDriveThrottleRequest(); - } - if (e.httpStatusCode == 504) { - log.vdebug("Retrying original request that generated the HTTP 504 (Gateway Timeout) - retrying applicable request"); - Thread.sleep(dur!"seconds"(30)); - } - // Retry original request by calling function again to avoid replicating any further error handling - driveData = onedrive.getPathDetailsById(driveId, idToQuery); - } else { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + sessionFileData = readText(sessionFilePath).parseJSON(); + } catch (JSONException e) { + addLogEntry("SESSION-RESUME: Invalid JSON data in: " ~ sessionFilePath, ["debug"]); + return false; + } + + // Does the file we wish to resume uploading exist locally still? + if ("localPath" in sessionFileData) { + string sessionLocalFilePath = sessionFileData["localPath"].str; + addLogEntry("SESSION-RESUME: sessionLocalFilePath: " ~ sessionLocalFilePath, ["debug"]); + + // Does the file exist? + if (!exists(sessionLocalFilePath)) { + addLogEntry("The local file to upload does not exist locally anymore", ["verbose"]); + return false; } + + // Can we read the file? + if (!readLocalFile(sessionLocalFilePath)) { + // filesystem error already returned if unable to read + return false; + } + + } else { + addLogEntry("SESSION-RESUME: No localPath data in: " ~ sessionFilePath, ["debug"]); + return false; } - if (!isItemRoot(driveData)) { - // Get root details for the provided driveId - try { - rootData = onedrive.getDriveIdRoot(driveId); - } catch (OneDriveException e) { - log.vdebug("rootData = onedrive.getDriveIdRoot(driveId) generated a OneDriveException"); - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - if (e.httpStatusCode == 429) { - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - retrying applicable request"); - handleOneDriveThrottleRequest(); - } - if (e.httpStatusCode == 504) { - log.vdebug("Retrying original request that generated the HTTP 504 (Gateway Timeout) - retrying applicable request"); - Thread.sleep(dur!"seconds"(30)); - } - // Retry original request by calling function again to avoid replicating any further error handling - rootData = onedrive.getDriveIdRoot(driveId); - - } else { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); - } + // Check the session data for expirationDateTime + if ("expirationDateTime" in sessionFileData) { + auto expiration = SysTime.fromISOExtString(sessionFileData["expirationDateTime"].str); + if (expiration < Clock.currTime()) { + addLogEntry("The upload session has expired for: " ~ sessionFilePath, ["verbose"]); + return false; } - // Add driveData JSON data to array - log.vlog("Adding OneDrive root details for processing"); - childrenData ~= rootData; + } else { + addLogEntry("SESSION-RESUME: No expirationDateTime data in: " ~ sessionFilePath, ["debug"]); + return false; } - // Add driveData JSON data to array - log.vlog("Adding OneDrive folder details for processing"); - childrenData ~= driveData; - - for (;;) { - // query top level children + // Check the online upload status, using the uloadURL in sessionFileData + if ("uploadUrl" in sessionFileData) { + JSONValue response; + try { - topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink); - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("nextLink: ", nextLink); + // Create a new OneDrive API instance + validateUploadSessionFileDataApiInstance = new OneDriveApi(appConfig); + validateUploadSessionFileDataApiInstance.initialise(); - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); - } + // Request upload status + response = validateUploadSessionFileDataApiInstance.requestUploadStatus(sessionFileData["uploadUrl"].str); - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query OneDrive drive children"); - } + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + validateUploadSessionFileDataApiInstance.releaseCurlEngine(); + validateUploadSessionFileDataApiInstance = null; + // Perform Garbage Collection + GC.collect(); - // HTTP request returned status code 500 (Internal Server Error) - if (e.httpStatusCode == 500) { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - } + } catch (OneDriveException e) { + // handle any onedrive error response as invalid + addLogEntry("SESSION-RESUME: Invalid response when using uploadUrl in: " ~ sessionFilePath, ["debug"]); - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query OneDrive drive children - retrying applicable request"); - log.vdebug("topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - } - // re-try original request - retried for 429 and 504 - try { - log.vdebug("Retrying Query: topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)"); - topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink); - log.vdebug("Query 'topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)' performed successfully on re-try"); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("Query Error: topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink) on re-try after delay"); - // error was not a 504 this time - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + validateUploadSessionFileDataApiInstance.releaseCurlEngine(); + validateUploadSessionFileDataApiInstance = null; + // Perform Garbage Collection + GC.collect(); + return false; + } + + // Do we have a valid response from OneDrive? + if (response.type() == JSONType.object) { + // Valid JSON object was returned + if (("expirationDateTime" in response) && ("nextExpectedRanges" in response)) { + // The 'uploadUrl' is valid, and the response contains elements we need + sessionFileData["expirationDateTime"] = response["expirationDateTime"]; + sessionFileData["nextExpectedRanges"] = response["nextExpectedRanges"]; + + if (sessionFileData["nextExpectedRanges"].array.length == 0) { + addLogEntry("The upload session was already completed", ["verbose"]); + return false; } } else { - // Default operation if not 404, 410, 429, 500 or 504 errors - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + addLogEntry("SESSION-RESUME: No expirationDateTime & nextExpectedRanges data in Microsoft OneDrive API response: " ~ to!string(response), ["debug"]); + return false; } + } else { + // not a JSON object + addLogEntry("Restore file upload session failed - invalid response from Microsoft OneDrive", ["verbose"]); + return false; } + } else { + addLogEntry("SESSION-RESUME: No uploadUrl data in: " ~ sessionFilePath, ["debug"]); + return false; + } + + // Add 'sessionFilePath' to 'sessionFileData' so that it can be used when we reuse the JSON data to resume the upload + sessionFileData["sessionFilePath"] = sessionFilePath; + + // Add sessionFileData to jsonItemsToResumeUpload as it is now valid + jsonItemsToResumeUpload ~= sessionFileData; + return true; + } + + // Resume all resumable session uploads in parallel + void resumeSessionUploadsInParallel(JSONValue[] array) { + // This function received an array of JSON items to resume upload, the number of elements based on appConfig.getValueLong("threads") + foreach (i, jsonItemToResume; processPool.parallel(array)) { + // Take each JSON item and resume upload using the JSON data - // process top level children - log.vlog("Adding ", count(topLevelChildren["value"].array), " OneDrive items for processing from OneDrive folder"); - foreach (child; topLevelChildren["value"].array) { - // add this child to the array of objects - childrenData ~= child; - // is this child a folder? - if (isItemFolder(child)){ - // We have to query this folders children if childCount > 0 - if (child["folder"]["childCount"].integer > 0){ - // This child folder has children - string childIdToQuery = child["id"].str; - string childDriveToQuery = child["parentReference"]["driveId"].str; - auto childParentPath = child["parentReference"]["path"].str.split(":"); - string folderPathToScan = childParentPath[1] ~ "/" ~ child["name"].str; - string pathForLogging = "/" ~ driveData["name"].str ~ "/" ~ child["name"].str; - JSONValue[] grandChildrenData = queryForChildren(childDriveToQuery, childIdToQuery, folderPathToScan, pathForLogging); - foreach (grandChild; grandChildrenData.array) { - // add the grandchild to the array - childrenData ~= grandChild; + JSONValue uploadResponse; + OneDriveApi uploadFileOneDriveApiInstance; + + // Create a new API instance + uploadFileOneDriveApiInstance = new OneDriveApi(appConfig); + uploadFileOneDriveApiInstance.initialise(); + + // Pull out data from this JSON element + string threadUploadSessionFilePath = jsonItemToResume["sessionFilePath"].str; + ulong thisFileSizeLocal = getSize(jsonItemToResume["localPath"].str); + + // Try to resume the session upload using the provided data + try { + uploadResponse = performSessionFileUpload(uploadFileOneDriveApiInstance, thisFileSizeLocal, jsonItemToResume, threadUploadSessionFilePath); + } catch (OneDriveException exception) { + writeln("CODING TO DO: Handle an exception when performing a resume session upload"); + } + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + uploadFileOneDriveApiInstance.releaseCurlEngine(); + uploadFileOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + // Was the response from the OneDrive API a valid JSON item? + if (uploadResponse.type() == JSONType.object) { + // A valid JSON object was returned - session resumption upload successful + + // Are we in an --upload-only & --remove-source-files scenario? + // Use actual config values as we are doing an upload session recovery + if (localDeleteAfterUpload) { + // Log that we are deleting a local item + addLogEntry("Removing local file as --upload-only & --remove-source-files configured"); + // are we in a --dry-run scenario? + if (!dryRun) { + // No --dry-run ... process local file delete + // Only perform the delete if we have a valid file path + if (exists(jsonItemToResume["localPath"].str)) { + // file exists + addLogEntry("Removing local file: " ~ jsonItemToResume["localPath"].str, ["debug"]); + safeRemove(jsonItemToResume["localPath"].str); } } + // as file is removed, we have nothing to add to the local database + addLogEntry("Skipping adding to database as --upload-only & --remove-source-files configured", ["debug"]); + } else { + // Save JSON item in database + saveItem(uploadResponse); } + } else { + // No valid response was returned + addLogEntry("CODING TO DO: what to do when session upload resumption JSON data is not valid ... nothing ? error message ?"); } - // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response - // to indicate more items are available and provide the request URL for the next page of items. - if ("@odata.nextLink" in topLevelChildren) { - // Update nextLink to next changeSet bundle - log.vdebug("Setting nextLink to (@odata.nextLink): ", nextLink); - nextLink = topLevelChildren["@odata.nextLink"].str; - } else break; } + } + + // Function to process the path by removing prefix up to ':' - remove '/drive/root:' from a path string + string processPathToRemoveRootReference(ref string pathToCheck) { + size_t colonIndex = pathToCheck.indexOf(":"); + if (colonIndex != -1) { + addLogEntry("Updating " ~ pathToCheck ~ " to remove prefix up to ':'", ["debug"]); + pathToCheck = pathToCheck[colonIndex + 1 .. $]; + addLogEntry("Updated path for 'skip_dir' check: " ~ pathToCheck, ["debug"]); + } + return pathToCheck; + } + + // Function to find a given DriveId in the onlineDriveDetails associative array that maps driveId to DriveDetailsCache + // If 'true' will return 'driveDetails' containing the struct data 'DriveDetailsCache' + bool canFindDriveId(string driveId, out DriveDetailsCache driveDetails) { + auto ptr = driveId in onlineDriveDetails; + if (ptr !is null) { + driveDetails = *ptr; // Dereference the pointer to get the value + return true; + } else { + return false; + } + } + + // Add this driveId plus relevant details for future reference and use + void addOrUpdateOneDriveOnlineDetails(string driveId) { + + bool quotaRestricted; + bool quotaAvailable; + ulong quotaRemaining; - // craft response from all returned elements - deltaResponse = [ - "@odata.context": JSONValue("https://graph.microsoft.com/v1.0/$metadata#Collection(driveItem)"), - "value": JSONValue(childrenData.array) - ]; + // Get the data from online + auto onlineDriveData = getRemainingFreeSpaceOnline(driveId); + quotaRestricted = to!bool(onlineDriveData[0][0]); + quotaAvailable = to!bool(onlineDriveData[0][1]); + quotaRemaining = to!long(onlineDriveData[0][2]); + onlineDriveDetails[driveId] = DriveDetailsCache(driveId, quotaRestricted, quotaAvailable, quotaRemaining); - // return the generated JSON response - return deltaResponse; + // Debug log what the cached array now contains + addLogEntry("onlineDriveDetails: " ~ to!string(onlineDriveDetails), ["debug"]); + } + + // Return a specific 'driveId' details from 'onlineDriveDetails' + DriveDetailsCache getDriveDetails(string driveId) { + auto ptr = driveId in onlineDriveDetails; + if (ptr !is null) { + return *ptr; // Dereference the pointer to get the value + } else { + // Return a default DriveDetailsCache or handle the case where the driveId is not found + return DriveDetailsCache.init; // Return default-initialised struct + } } - // query child for children - JSONValue[] queryForChildren(const(char)[] driveId, const(char)[] idToQuery, const(char)[] childParentPath, string pathForLogging) - { - // function variables + // Search a given Drive ID, Item ID and filename to see if this exists in the location specified + JSONValue searchDriveItemForFile(string parentItemDriveId, string parentItemId, string fileToUpload) { + + JSONValue onedriveJSONItem; + string searchName = baseName(fileToUpload); JSONValue thisLevelChildren; - JSONValue[] thisLevelChildrenData; + string nextLink; - + + // Create a new API Instance for this thread and initialise it + OneDriveApi checkFileOneDriveApiInstance; + checkFileOneDriveApiInstance = new OneDriveApi(appConfig); + checkFileOneDriveApiInstance.initialise(); + for (;;) { - // query children - thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink); - - // process this level children - if (!childParentPath.empty) { - // We dont use childParentPath to log, as this poses an information leak risk. - // The full parent path of the child, as per the JSON might be: - // /Level 1/Level 2/Level 3/Child Shared Folder/some folder/another folder - // But 'Child Shared Folder' is what is shared, thus '/Level 1/Level 2/Level 3/' is a potential information leak if logged. - // Plus, the application output now shows accuratly what is being shared - so that is a good thing. - log.vlog("Adding ", count(thisLevelChildren["value"].array), " OneDrive items for processing from ", pathForLogging); + // query top level children + try { + thisLevelChildren = checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink); + } catch (OneDriveException exception) { + // OneDrive threw an error + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("Query Error: thisLevelChildren = checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink)", ["debug"]); + addLogEntry("driveId: " ~ parentItemDriveId, ["debug"]); + addLogEntry("idToQuery: " ~ parentItemId, ["debug"]); + addLogEntry("nextLink: " ~ nextLink, ["debug"]); + + string thisFunctionName = getFunctionName!({}); + // Default operation if not 408,429,503,504 errors + // - 408,429,503,504 errors are handled as a retry within oneDriveApiInstance + // Display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); } + + // process thisLevelChildren response foreach (child; thisLevelChildren["value"].array) { - // add this child to the array of objects - thisLevelChildrenData ~= child; - // is this child a folder? - if (isItemFolder(child)){ - // We have to query this folders children if childCount > 0 - if (child["folder"]["childCount"].integer > 0){ - // This child folder has children - string childIdToQuery = child["id"].str; - string childDriveToQuery = child["parentReference"]["driveId"].str; - auto grandchildParentPath = child["parentReference"]["path"].str.split(":"); - string folderPathToScan = grandchildParentPath[1] ~ "/" ~ child["name"].str; - string newLoggingPath = pathForLogging ~ "/" ~ child["name"].str; - JSONValue[] grandChildrenData = queryForChildren(childDriveToQuery, childIdToQuery, folderPathToScan, newLoggingPath); - foreach (grandChild; grandChildrenData.array) { - // add the grandchild to the array - thisLevelChildrenData ~= grandChild; - } - } - } - } + // Only looking at files + if ((child["name"].str == searchName) && (("file" in child) != null)) { + // Found the matching file, return its JSON representation + // Operations in this thread are done / complete + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + checkFileOneDriveApiInstance.releaseCurlEngine(); + checkFileOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + // Return child + return child; + } + } + // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response // to indicate more items are available and provide the request URL for the next page of items. if ("@odata.nextLink" in thisLevelChildren) { // Update nextLink to next changeSet bundle + addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); nextLink = thisLevelChildren["@odata.nextLink"].str; - log.vdebug("Setting nextLink to (@odata.nextLink): ", nextLink); } else break; } - // return response - return thisLevelChildrenData; + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + checkFileOneDriveApiInstance.releaseCurlEngine(); + checkFileOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + + // return an empty JSON item + return onedriveJSONItem; } - // Query from OneDrive the child objects for this element - JSONValue queryThisLevelChildren(const(char)[] driveId, const(char)[] idToQuery, string nextLink) - { - JSONValue thisLevelChildren; + // Update 'onlineDriveDetails' with the latest data about this drive + void updateDriveDetailsCache(string driveId, bool quotaRestricted, bool quotaAvailable, ulong localFileSize) { - // query children - try { - // attempt API call - log.vdebug("Attempting Query: thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)"); - thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink); - log.vdebug("Query 'thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)' performed successfully"); - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("nextLink: ", nextLink); - - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); - } - - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query OneDrive drive children"); - } - - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - // transient error - try again in 30 seconds - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query OneDrive drive children - retrying applicable request"); - log.vdebug("thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - } - // re-try original request - retried for 429 and 504 - but loop back calling this function - log.vdebug("Retrying Query: thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink)"); - thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink); + // As each thread is running differently, what is the current 'quotaRemaining' for 'driveId' ? + ulong quotaRemaining; + DriveDetailsCache cachedOnlineDriveData; + cachedOnlineDriveData = getDriveDetails(driveId); + quotaRemaining = cachedOnlineDriveData.quotaRemaining; + + // Update 'quotaRemaining' + quotaRemaining = quotaRemaining - localFileSize; + + // Do the flags get updated? + if (quotaRemaining <= 0) { + if (appConfig.accountType == "personal"){ + // zero space available + addLogEntry("ERROR: OneDrive account currently has zero space available. Please free up some space online or purchase additional space."); + quotaRemaining = 0; + quotaAvailable = false; } else { - // Default operation if not 404, 429 or 504 errors - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + // zero space available is being reported, maybe being restricted? + addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]); + quotaRemaining = 0; + quotaRestricted = true; } } - // return response - return thisLevelChildren; + + // Updated the details + onlineDriveDetails[driveId] = DriveDetailsCache(driveId, quotaRestricted, quotaAvailable, quotaRemaining); + } + + // Update all of the known cached driveId quota details + void freshenCachedDriveQuotaDetails() { + foreach (driveId; onlineDriveDetails.keys) { + // Update this driveid quota details + addLogEntry("Freshen Quota Details: " ~ driveId, ["debug"]); + addOrUpdateOneDriveOnlineDetails(driveId); + } + } + + // Create a 'root' DB Tie Record for a Shared Folder from the JSON data + void createDatabaseRootTieRecordForOnlineSharedFolder(JSONValue onedriveJSONItem) { + // Creating|Updating a DB Tie + addLogEntry("Creating|Updating a 'root' DB Tie Record for this Shared Folder: " ~ onedriveJSONItem["name"].str, ["debug"]); + addLogEntry("Raw JSON for 'root' DB Tie Record: " ~ to!string(onedriveJSONItem), ["debug"]); + + // New DB Tie Item to detail the 'root' of the Shared Folder + Item tieDBItem; + tieDBItem.name = "root"; + + // Get the right parentReference details + if (isItemRemote(onedriveJSONItem)) { + tieDBItem.driveId = onedriveJSONItem["remoteItem"]["parentReference"]["driveId"].str; + tieDBItem.id = onedriveJSONItem["remoteItem"]["id"].str; + } else { + if (onedriveJSONItem["name"].str != "root") { + tieDBItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str; + tieDBItem.id = onedriveJSONItem["parentReference"]["id"].str; + } else { + tieDBItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str; + tieDBItem.id = onedriveJSONItem["id"].str; + } + } + + tieDBItem.type = ItemType.dir; + tieDBItem.mtime = SysTime.fromISOExtString(onedriveJSONItem["fileSystemInfo"]["lastModifiedDateTime"].str); + tieDBItem.parentId = null; + + // Add this DB Tie parent record to the local database + addLogEntry("Creating|Updating into local database a 'root' DB Tie record: " ~ to!string(tieDBItem), ["debug"]); + itemDB.upsert(tieDBItem); + } + + // Create a DB Tie Record for a Shared Folder + void createDatabaseTieRecordForOnlineSharedFolder(Item parentItem) { + // Creating|Updating a DB Tie + addLogEntry("Creating|Updating a DB Tie Record for this Shared Folder: " ~ parentItem.name, ["debug"]); + addLogEntry("Parent Item Record: " ~ to!string(parentItem), ["debug"]); + + // New DB Tie Item to bind the 'remote' path to our parent path + Item tieDBItem; + tieDBItem.name = parentItem.name; + tieDBItem.driveId = parentItem.remoteDriveId; + tieDBItem.id = parentItem.remoteId; + tieDBItem.type = ItemType.dir; + tieDBItem.mtime = parentItem.mtime; + + // What account type is this as this determines what 'tieDBItem.parentId' should be set to + // There is a difference in the JSON responses between 'personal' and 'business' account types for Shared Folders + // Essentially an API inconsistency + if (appConfig.accountType == "personal") { + // Set tieDBItem.parentId to null + tieDBItem.parentId = null; + } else { + // The tieDBItem.parentId needs to be the correct driveId id reference + // Query the DB + Item[] rootDriveItems; + Item dbRecord; + rootDriveItems = itemDB.selectByDriveId(parentItem.remoteDriveId); + dbRecord = rootDriveItems[0]; + tieDBItem.parentId = dbRecord.id; + rootDriveItems = []; + } + + // Add tie DB record to the local database + addLogEntry("Creating|Updating into local database a DB Tie record: " ~ to!string(tieDBItem), ["debug"]); + itemDB.upsert(tieDBItem); } - // OneDrive Business Shared Folder support - void listOneDriveBusinessSharedFolders() - { - // List OneDrive Business Shared Folders - log.log("\nListing available OneDrive Business Shared Folders:"); - // Query the GET /me/drive/sharedWithMe API - JSONValue graphQuery; + // List all the OneDrive Business Shared Items for the user to see + void listBusinessSharedObjects() { + + JSONValue sharedWithMeItems; + + // Create a new API Instance for this thread and initialise it + OneDriveApi sharedWithMeOneDriveApiInstance; + sharedWithMeOneDriveApiInstance = new OneDriveApi(appConfig); + sharedWithMeOneDriveApiInstance.initialise(); + try { - graphQuery = onedrive.getSharedWithMe(); + sharedWithMeItems = sharedWithMeOneDriveApiInstance.getSharedWithMe(); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + sharedWithMeOneDriveApiInstance.releaseCurlEngine(); + sharedWithMeOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - graphQuery = onedrive.getSharedWithMe();"); - graphQuery = onedrive.getSharedWithMe(); - } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); - } + // Display error message + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + sharedWithMeOneDriveApiInstance.releaseCurlEngine(); + sharedWithMeOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + return; } - if (graphQuery.type() == JSONType.object) { - if (count(graphQuery["value"].array) == 0) { - // no shared folders returned - write("\nNo OneDrive Business Shared Folders were returned\n"); - } else { - // shared folders were returned - log.vdebug("onedrive.getSharedWithMe API Response: ", graphQuery); - foreach (searchResult; graphQuery["value"].array) { - // loop variables - string sharedFolderName; + if (sharedWithMeItems.type() == JSONType.object) { + + if (count(sharedWithMeItems["value"].array) > 0) { + // No shared items + addLogEntry(); + addLogEntry("Listing available OneDrive Business Shared Items:"); + addLogEntry(); + + // Iterate through the array + foreach (searchResult; sharedWithMeItems["value"].array) { + + // loop variables for each item string sharedByName; string sharedByEmail; - // is the shared item with us a 'folder' ? - // we only handle folders, not files or other items - if (isItemFolder(searchResult)) { - // Debug response output - log.vdebug("shared folder entry: ", searchResult); - sharedFolderName = searchResult["name"].str; - - // configure who this was shared by - if ("sharedBy" in searchResult["remoteItem"]["shared"]) { - // we have shared by details we can use - if ("displayName" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByName = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["displayName"].str; - } - if ("email" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByEmail = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["email"].str; - } + + // Debug response output + addLogEntry("shared folder entry: " ~ to!string(searchResult), ["debug"]); + + // Configure 'who' this was shared by + if ("sharedBy" in searchResult["remoteItem"]["shared"]) { + // we have shared by details we can use + if ("displayName" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { + sharedByName = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["displayName"].str; } - // Output query result - log.log("---------------------------------------"); - log.log("Shared Folder: ", sharedFolderName); - if ((sharedByName != "") && (sharedByEmail != "")) { - log.log("Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.log("Shared By: ", sharedByName); - } + if ("email" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { + sharedByEmail = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["email"].str; } - log.vlog("Item Id: ", searchResult["remoteItem"]["id"].str); - log.vlog("Parent Drive Id: ", searchResult["remoteItem"]["parentReference"]["driveId"].str); - if ("id" in searchResult["remoteItem"]["parentReference"]) { - log.vlog("Parent Item Id: ", searchResult["remoteItem"]["parentReference"]["id"].str); + } + + // Output query result + addLogEntry("-----------------------------------------------------------------------------------"); + if (isItemFile(searchResult)) { + addLogEntry("Shared File: " ~ to!string(searchResult["name"].str)); + } else { + addLogEntry("Shared Folder: " ~ to!string(searchResult["name"].str)); + } + + // Detail 'who' shared this + if ((sharedByName != "") && (sharedByEmail != "")) { + addLogEntry("Shared By: " ~ sharedByName ~ " (" ~ sharedByEmail ~ ")"); + } else { + if (sharedByName != "") { + addLogEntry("Shared By: " ~ sharedByName); } } + + // More detail if --verbose is being used + addLogEntry("Item Id: " ~ searchResult["remoteItem"]["id"].str, ["verbose"]); + addLogEntry("Parent Drive Id: " ~ searchResult["remoteItem"]["parentReference"]["driveId"].str, ["verbose"]); + if ("id" in searchResult["remoteItem"]["parentReference"]) { + addLogEntry("Parent Item Id: " ~ searchResult["remoteItem"]["parentReference"]["id"].str, ["verbose"]); + } } + + // Close out the loop + addLogEntry("-----------------------------------------------------------------------------------"); + addLogEntry(); + + } else { + // No shared items + addLogEntry(); + addLogEntry("No OneDrive Business Shared Folders were returned"); + addLogEntry(); } - write("\n"); - } else { - // Log that an invalid JSON object was returned - log.error("ERROR: onedrive.getSharedWithMe call returned an invalid JSON Object"); } } - // Query itemdb.computePath() and catch potential assert when DB consistency issue occurs - string computeItemPath(string thisDriveId, string thisItemId) - { - static import core.exception; - string calculatedPath; - log.vdebug("Attempting to calculate local filesystem path for ", thisDriveId, " and ", thisItemId); + // Query all the OneDrive Business Shared Objects to sync only Shared Files + void queryBusinessSharedObjects() { + + JSONValue sharedWithMeItems; + Item sharedFilesRootDirectoryDatabaseRecord; + + // Create a new API Instance for this thread and initialise it + OneDriveApi sharedWithMeOneDriveApiInstance; + sharedWithMeOneDriveApiInstance = new OneDriveApi(appConfig); + sharedWithMeOneDriveApiInstance.initialise(); + try { - calculatedPath = itemdb.computePath(thisDriveId, thisItemId); - } catch (core.exception.AssertError) { - // broken tree in the database, we cant compute the path for this item id, exit - log.error("ERROR: A database consistency issue has been caught. A --resync is needed to rebuild the database."); - // Must exit here to preserve data - onedrive.shutdown(); - exit(-1); + sharedWithMeItems = sharedWithMeOneDriveApiInstance.getSharedWithMe(); + + // We cant shutdown the API instance here, as we reuse it below + + } catch (OneDriveException e) { + // Display error message + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + sharedWithMeOneDriveApiInstance.releaseCurlEngine(); + sharedWithMeOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + return; } - // return calculated path as string - return calculatedPath; - } - - void handleClientUnauthorised() - { - // common code for handling when a client is unauthorised - writeln(); - log.errorAndNotify("ERROR: Check your configuration as your refresh_token may be empty or invalid. You may need to issue a --reauth and re-authorise this client."); - writeln(); - // Must exit here - onedrive.shutdown(); - exit(-1); - } - - // Wrapper function for makeDatabaseItem so we can check if the item, if a file, has any hashes - private Item makeItem(JSONValue onedriveJSONItem) - { - Item newDatabaseItem = makeDatabaseItem(onedriveJSONItem); + // Valid JSON response + if (sharedWithMeItems.type() == JSONType.object) { - // Check for hashes in this DB item - if (newDatabaseItem.type == ItemType.file) { - // Does this file have a size greater than 0 - zero size files will potentially not have a hash - if (hasFileSize(onedriveJSONItem)) { - if (onedriveJSONItem["size"].integer > 0) { - // Does the item have any hashes? - if ((newDatabaseItem.quickXorHash.empty) && (newDatabaseItem.sha256Hash.empty)) { - // Odd .. no hash ...... - string apiMessage = "WARNING: OneDrive API inconsistency - this file does not have any hash: "; - // This is computationally expensive .. but we are only doing this if there are no hashses provided: - bool parentInDatabase = itemdb.idInLocalDatabase(newDatabaseItem.driveId, newDatabaseItem.parentId); - if (parentInDatabase) { - // Calculate this item path - string newItemPath = computeItemPath(newDatabaseItem.driveId, newDatabaseItem.parentId) ~ "/" ~ newDatabaseItem.name; - log.log(apiMessage, newItemPath); + // Get the configuredBusinessSharedFilesDirectoryName DB item + // We need this as we need to 'fake' create all the folders for the shared files + // Then fake create the file entries for the database with the correct parent folder that is the local folder + itemDB.selectByPath(baseName(appConfig.configuredBusinessSharedFilesDirectoryName), appConfig.defaultDriveId, sharedFilesRootDirectoryDatabaseRecord); + + // For each item returned, if a file, process it + foreach (searchResult; sharedWithMeItems["value"].array) { + + // Shared Business Folders are added to the account using 'Add shortcut to My files' + // We only care here about any remaining 'files' that are shared with the user + + if (isItemFile(searchResult)) { + // Debug response output + addLogEntry("getSharedWithMe Response Shared File JSON: " ~ to!string(searchResult), ["debug"]); + + // Make a DB item from this JSON + Item sharedFileOriginalData = makeItem(searchResult); + + // Variables for each item + string sharedByName; + string sharedByEmail; + string sharedByFolderName; + string newLocalSharedFilePath; + string newItemPath; + Item sharedFilesPath; + JSONValue fileToDownload; + JSONValue detailsToUpdate; + JSONValue latestOnlineDetails; + + // Configure 'who' this was shared by + if ("sharedBy" in searchResult["remoteItem"]["shared"]) { + // we have shared by details we can use + if ("displayName" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { + sharedByName = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["displayName"].str; + } + if ("email" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { + sharedByEmail = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["email"].str; + } + } + + // Configure 'who' shared this, so that we can create the directory for that users shared files with us + if ((sharedByName != "") && (sharedByEmail != "")) { + sharedByFolderName = sharedByName ~ " (" ~ sharedByEmail ~ ")"; + + } else { + if (sharedByName != "") { + sharedByFolderName = sharedByName; + } + } + + // Create the local path to store this users shared files with us + newLocalSharedFilePath = buildNormalizedPath(buildPath(appConfig.configuredBusinessSharedFilesDirectoryName, sharedByFolderName)); + + // Does the Shared File Users Local Directory to store the shared file(s) exist? + if (!exists(newLocalSharedFilePath)) { + // Folder does not exist locally and needs to be created + addLogEntry("Creating the OneDrive Business Shared File Users Local Directory: " ~ newLocalSharedFilePath); + + // Local folder does not exist, thus needs to be created + mkdirRecurse(newLocalSharedFilePath); + + // As this will not be created online, generate a response so it can be saved to the database + sharedFilesPath = makeItem(createFakeResponse(baseName(newLocalSharedFilePath))); + + // Update sharedFilesPath parent items to that of sharedFilesRootDirectoryDatabaseRecord + sharedFilesPath.parentId = sharedFilesRootDirectoryDatabaseRecord.id; + + // Add DB record to the local database + addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]); + itemDB.upsert(sharedFilesPath); + } else { + // Folder exists locally, is the folder in the database? + // Query DB for this path + Item dbRecord; + if (!itemDB.selectByPath(baseName(newLocalSharedFilePath), appConfig.defaultDriveId, dbRecord)) { + // As this will not be created online, generate a response so it can be saved to the database + sharedFilesPath = makeItem(createFakeResponse(baseName(newLocalSharedFilePath))); + + // Update sharedFilesPath parent items to that of sharedFilesRootDirectoryDatabaseRecord + sharedFilesPath.parentId = sharedFilesRootDirectoryDatabaseRecord.id; + + // Add DB record to the local database + addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]); + itemDB.upsert(sharedFilesPath); + } + } + + // The file to download JSON details + fileToDownload = searchResult; + + // Get the latest online details + latestOnlineDetails = sharedWithMeOneDriveApiInstance.getPathDetailsById(sharedFileOriginalData.remoteDriveId, sharedFileOriginalData.remoteId); + Item tempOnlineRecord = makeItem(latestOnlineDetails); + + // With the local folders created, now update 'fileToDownload' to download the file to our location: + // "parentReference": { + // "driveId": "", + // "driveType": "business", + // "id": "", + // }, + + // The getSharedWithMe() JSON response also contains an API bug where the 'hash' of the file is not provided + // Use the 'latestOnlineDetails' response to obtain the hash + // "file": { + // "hashes": { + // "quickXorHash": "" + // } + // }, + // + + // The getSharedWithMe() JSON response also contains an API bug where the 'size' of the file is not the actual size of the file + // The getSharedWithMe() JSON response also contains an API bug where the 'eTag' of the file is not present + // The getSharedWithMe() JSON response also contains an API bug where the 'lastModifiedDateTime' of the file is date when the file was shared, not the actual date last modified + + detailsToUpdate = [ + "parentReference": JSONValue([ + "driveId": JSONValue(appConfig.defaultDriveId), + "driveType": JSONValue("business"), + "id": JSONValue(sharedFilesPath.id) + ]), + "file": JSONValue([ + "hashes":JSONValue([ + "quickXorHash": JSONValue(tempOnlineRecord.quickXorHash) + ]) + ]), + "eTag": JSONValue(tempOnlineRecord.eTag) + ]; + + foreach (string key, JSONValue value; detailsToUpdate.object) { + fileToDownload[key] = value; + } + + // Update specific items + // Update 'size' + fileToDownload["size"] = to!int(tempOnlineRecord.size); + fileToDownload["remoteItem"]["size"] = to!int(tempOnlineRecord.size); + // Update 'lastModifiedDateTime' + fileToDownload["lastModifiedDateTime"] = latestOnlineDetails["fileSystemInfo"]["lastModifiedDateTime"].str; + fileToDownload["fileSystemInfo"]["lastModifiedDateTime"] = latestOnlineDetails["fileSystemInfo"]["lastModifiedDateTime"].str; + fileToDownload["remoteItem"]["lastModifiedDateTime"] = latestOnlineDetails["fileSystemInfo"]["lastModifiedDateTime"].str; + fileToDownload["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"] = latestOnlineDetails["fileSystemInfo"]["lastModifiedDateTime"].str; + + // Final JSON that will be used to download the file + addLogEntry("Final fileToDownload: " ~ to!string(fileToDownload), ["debug"]); + + // Make the new DB item from the consolidated JSON item + Item downloadSharedFileDbItem = makeItem(fileToDownload); + + // Calculate the full local path for this shared file + newItemPath = computeItemPath(downloadSharedFileDbItem.driveId, downloadSharedFileDbItem.parentId) ~ "/" ~ downloadSharedFileDbItem.name; + + // Does this potential file exists on disk? + if (!exists(newItemPath)) { + // The shared file does not exists locally + // Is this something we actually want? Check the JSON against Client Side Filtering Rules + bool unwanted = checkJSONAgainstClientSideFiltering(fileToDownload); + if (!unwanted) { + // File has not been excluded via Client Side Filtering + // Submit this shared file to be processed further for downloading + applyPotentiallyNewLocalItem(downloadSharedFileDbItem, fileToDownload, newItemPath); + } + } else { + // A file, in the desired local location already exists with the same name + // Is this local file in sync? + string itemSource = "remote"; + if (!isItemSynced(downloadSharedFileDbItem, newItemPath, itemSource)) { + // Not in sync .... + Item existingDatabaseItem; + bool existingDBEntry = itemDB.selectById(downloadSharedFileDbItem.driveId, downloadSharedFileDbItem.id, existingDatabaseItem); + + // Is there a DB entry? + if (existingDBEntry) { + // Existing DB entry + // Need to be consistent here with how 'newItemPath' was calculated + string existingItemPath = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.parentId) ~ "/" ~ existingDatabaseItem.name; + // Attempt to apply this changed item + applyPotentiallyChangedItem(existingDatabaseItem, existingItemPath, downloadSharedFileDbItem, newItemPath, fileToDownload); + } else { + // File exists locally, it is not in sync, there is no record in the DB of this file + // In case the renamed path is needed + string renamedPath; + // Rename the local file + safeBackup(newItemPath, dryRun, renamedPath); + // Submit this shared file to be processed further for downloading + applyPotentiallyNewLocalItem(downloadSharedFileDbItem, fileToDownload, newItemPath); + } } else { - // Use the item ID - log.log(apiMessage, newDatabaseItem.id); + // Item is in sync, ensure the DB record is the same + itemDB.upsert(downloadSharedFileDbItem); } } } } } - return newDatabaseItem; - } - -} + + // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory + sharedWithMeOneDriveApiInstance.releaseCurlEngine(); + sharedWithMeOneDriveApiInstance = null; + // Perform Garbage Collection + GC.collect(); + } +} \ No newline at end of file diff --git a/src/upload.d b/src/upload.d deleted file mode 100644 index 012598a05..000000000 --- a/src/upload.d +++ /dev/null @@ -1,302 +0,0 @@ -import std.algorithm, std.conv, std.datetime, std.file, std.json; -import std.stdio, core.thread, std.string; -import progress, onedrive, util; -static import log; - -private long fragmentSize = 10 * 2^^20; // 10 MiB - -struct UploadSession -{ - private OneDriveApi onedrive; - private bool verbose; - // https://dev.onedrive.com/resources/uploadSession.htm - private JSONValue session; - // path where to save the session - private string sessionFilePath; - - this(OneDriveApi onedrive, string sessionFilePath) - { - assert(onedrive); - this.onedrive = onedrive; - this.sessionFilePath = sessionFilePath; - this.verbose = verbose; - } - - JSONValue upload(string localPath, const(char)[] parentDriveId, const(char)[] parentId, const(char)[] filename, const(char)[] eTag = null) - { - // Fix https://github.com/abraunegg/onedrive/issues/2 - // More Details https://github.com/OneDrive/onedrive-api-docs/issues/778 - - SysTime localFileLastModifiedTime = timeLastModified(localPath).toUTC(); - localFileLastModifiedTime.fracSecs = Duration.zero; - - JSONValue fileSystemInfo = [ - "item": JSONValue([ - "@name.conflictBehavior": JSONValue("replace"), - "fileSystemInfo": JSONValue([ - "lastModifiedDateTime": localFileLastModifiedTime.toISOExtString() - ]) - ]) - ]; - - // Try to create the upload session for this file - session = onedrive.createUploadSession(parentDriveId, parentId, filename, eTag, fileSystemInfo); - - if ("uploadUrl" in session){ - session["localPath"] = localPath; - save(); - return upload(); - } else { - // there was an error - log.vlog("Create file upload session failed ... skipping file upload"); - // return upload() will return a JSONValue response, create an empty JSONValue response to return - JSONValue response; - return response; - } - } - - /* Restore the previous upload session. - * Returns true if the session is valid. Call upload() to resume it. - * Returns false if there is no session or the session is expired. */ - bool restore() - { - if (exists(sessionFilePath)) { - log.vlog("Trying to restore the upload session ..."); - // We cant use JSONType.object check, as this is currently a string - // We cant use a try & catch block, as it does not catch std.json.JSONException - auto sessionFileText = readText(sessionFilePath); - if(canFind(sessionFileText,"@odata.context")) { - session = readText(sessionFilePath).parseJSON(); - } else { - log.vlog("Upload session resume data is invalid"); - remove(sessionFilePath); - return false; - } - - // Check the session resume file for expirationDateTime - if ("expirationDateTime" in session){ - // expirationDateTime in the file - auto expiration = SysTime.fromISOExtString(session["expirationDateTime"].str); - if (expiration < Clock.currTime()) { - log.vlog("The upload session is expired"); - return false; - } - if (!exists(session["localPath"].str)) { - log.vlog("The file does not exist anymore"); - return false; - } - // Can we read the file - as a permissions issue or file corruption will cause a failure on resume - // https://github.com/abraunegg/onedrive/issues/113 - if (readLocalFile(session["localPath"].str)){ - // able to read the file - // request the session status - JSONValue response; - try { - response = onedrive.requestUploadStatus(session["uploadUrl"].str); - } catch (OneDriveException e) { - // handle any onedrive error response - if (e.httpStatusCode == 400) { - log.vlog("Upload session not found"); - return false; - } - } - - // do we have a valid response from OneDrive? - if (response.type() == JSONType.object){ - // JSON object - if (("expirationDateTime" in response) && ("nextExpectedRanges" in response)){ - // has the elements we need - session["expirationDateTime"] = response["expirationDateTime"]; - session["nextExpectedRanges"] = response["nextExpectedRanges"]; - if (session["nextExpectedRanges"].array.length == 0) { - log.vlog("The upload session is completed"); - return false; - } - } else { - // bad data - log.vlog("Restore file upload session failed - invalid data response from OneDrive"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return false; - } - } else { - // not a JSON object - log.vlog("Restore file upload session failed - invalid response from OneDrive"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return false; - } - return true; - } else { - // unable to read the local file - log.vlog("Restore file upload session failed - unable to read the local file"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return false; - } - } else { - // session file contains an error - cant resume - log.vlog("Restore file upload session failed - cleaning up session resume"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return false; - } - } - return false; - } - - JSONValue upload() - { - // Response for upload - JSONValue response; - - // session JSON needs to contain valid elements - long offset; - long fileSize; - - if ("nextExpectedRanges" in session){ - offset = session["nextExpectedRanges"][0].str.splitter('-').front.to!long; - } - - if ("localPath" in session){ - fileSize = getSize(session["localPath"].str); - } - - if ("uploadUrl" in session){ - // Upload file via session created - // Upload Progress Bar - size_t iteration = (roundTo!int(double(fileSize)/double(fragmentSize)))+1; - Progress p = new Progress(iteration); - p.title = "Uploading"; - long fragmentCount = 0; - long fragSize = 0; - - // Initialise the download bar at 0% - p.next(); - - while (true) { - fragmentCount++; - log.vdebugNewLine("Fragment: ", fragmentCount, " of ", iteration); - p.next(); - log.vdebugNewLine("fragmentSize: ", fragmentSize, "offset: ", offset, " fileSize: ", fileSize ); - fragSize = fragmentSize < fileSize - offset ? fragmentSize : fileSize - offset; - log.vdebugNewLine("Using fragSize: ", fragSize); - - // fragSize must not be a negative value - if (fragSize < 0) { - // Session upload will fail - // not a JSON object - fragment upload failed - log.vlog("File upload session failed - invalid calculation of fragment size"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - // set response to null as error - response = null; - return response; - } - - // If the resume upload fails, we need to check for a return code here - try { - response = onedrive.uploadFragment( - session["uploadUrl"].str, - session["localPath"].str, - offset, - fragSize, - fileSize - ); - } catch (OneDriveException e) { - // if a 100 response is generated, continue - if (e.httpStatusCode == 100) { - continue; - } - // there was an error response from OneDrive when uploading the file fragment - // handle 'HTTP request returned status code 429 (Too Many Requests)' first - if (e.httpStatusCode == 429) { - auto retryAfterValue = onedrive.getRetryAfterValue(); - log.vdebug("Fragment upload failed - received throttle request response from OneDrive"); - log.vdebug("Using Retry-After Value = ", retryAfterValue); - // Sleep thread as per request - log.log("\nThread sleeping due to 'HTTP request returned status code 429' - The request has been throttled"); - log.log("Sleeping for ", retryAfterValue, " seconds"); - Thread.sleep(dur!"seconds"(retryAfterValue)); - log.log("Retrying fragment upload"); - } else { - // insert a new line as well, so that the below error is inserted on the console in the right location - log.vlog("\nFragment upload failed - received an exception response from OneDrive"); - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // retry fragment upload in case error is transient - log.vlog("Retrying fragment upload"); - } - - try { - response = onedrive.uploadFragment( - session["uploadUrl"].str, - session["localPath"].str, - offset, - fragSize, - fileSize - ); - } catch (OneDriveException e) { - // OneDrive threw another error on retry - log.vlog("Retry to upload fragment failed"); - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // set response to null as the fragment upload was in error twice - response = null; - } - } - // was the fragment uploaded without issue? - if (response.type() == JSONType.object){ - offset += fragmentSize; - if (offset >= fileSize) break; - // update the session details - session["expirationDateTime"] = response["expirationDateTime"]; - session["nextExpectedRanges"] = response["nextExpectedRanges"]; - save(); - } else { - // not a JSON object - fragment upload failed - log.vlog("File upload session failed - invalid response from OneDrive"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - // set response to null as error - response = null; - return response; - } - } - // upload complete - p.next(); - writeln(); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return response; - } else { - // session elements were not present - log.vlog("Session has no valid upload URL ... skipping this file upload"); - // return an empty JSON response - response = null; - return response; - } - } - - string getUploadSessionLocalFilePath() { - // return the session file path - string localPath = ""; - if ("localPath" in session){ - localPath = session["localPath"].str; - } - return localPath; - } - - // save session details to temp file - private void save() - { - std.file.write(sessionFilePath, session.toString()); - } -} diff --git a/src/util.d b/src/util.d index cbaa5b8ef..02c0c01d9 100644 --- a/src/util.d +++ b/src/util.d @@ -1,6 +1,12 @@ +// What is this module called? +module util; + +// What does this module require to function? +import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; import std.base64; import std.conv; -import std.digest.crc, std.digest.sha; +import std.digest.crc; +import std.digest.sha; import std.net.curl; import std.datetime; import std.file; @@ -13,222 +19,391 @@ import std.algorithm; import std.uri; import std.json; import std.traits; -import qxor; +import std.utf; import core.stdc.stdlib; +import core.thread; +import core.memory; +import std.math; +import std.format; +import std.random; +import std.array; +import std.ascii; +import std.range; +import std.exception; +import core.sys.posix.pwd; +import core.sys.posix.unistd; +import core.stdc.string; +// What other modules that we have created do we need to import? import log; import config; +import qxor; +import curlEngine; +// module variables shared string deviceName; +ulong previousRSS; -static this() -{ +static this() { deviceName = Socket.hostName; } -// gives a new name to the specified file or directory -void safeRename(const(char)[] path) -{ - auto ext = extension(path); - auto newPath = path.chomp(ext) ~ "-" ~ deviceName; +// Creates a safe backup of the given item, and only performs the function if not in a --dry-run scenario +void safeBackup(const(char)[] path, bool dryRun, out string renamedPath) { + auto ext = extension(path); + auto newPath = path.chomp(ext) ~ "-" ~ deviceName; + int n = 2; + + // Limit to 1000 iterations .. 1000 file backups + while (exists(newPath ~ ext) && n < 1000) { + newPath = newPath.chomp("-" ~ (n - 1).to!string) ~ "-" ~ n.to!string; + n++; + } + + // Check if unique file name was found if (exists(newPath ~ ext)) { - int n = 2; - char[] newPath2; - do { - newPath2 = newPath ~ "-" ~ n.to!string; - n++; - } while (exists(newPath2 ~ ext)); - newPath = newPath2; + // On the 1000th backup of this file, this should be triggered + addLogEntry("Failed to backup " ~ to!string(path) ~ ": Unique file name could not be found after 1000 attempts", ["error"]); + return; // Exit function as a unique file name could not be found } + + // Configure the new name newPath ~= ext; - rename(path, newPath); + + // Log that we are perform the backup by renaming the file + addLogEntry("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: " ~ to!string(path) ~ " -> " ~ to!string(newPath) , ["verbose"]); + + if (!dryRun) { + // Not a --dry-run scenario - do the file rename + // + // There are 2 options to rename a file + // rename() - https://dlang.org/library/std/file/rename.html + // std.file.copy() - https://dlang.org/library/std/file/copy.html + // + // rename: + // It is not possible to rename a file across different mount points or drives. On POSIX, the operation is atomic. That means, if to already exists there will be no time period during the operation where to is missing. + // + // std.file.copy + // Copy file from to file to. File timestamps are preserved. File attributes are preserved, if preserve equals Yes.preserveAttributes + // + // Use rename() as Linux is POSIX compliant, we have an atomic operation where at no point in time the 'to' is missing. + try { + rename(path, newPath); + renamedPath = to!string(newPath); + } catch (Exception e) { + // Handle exceptions, e.g., log error + addLogEntry("Renaming of local file failed for " ~ to!string(path) ~ ": " ~ e.msg, ["error"]); + } + } else { + addLogEntry("DRY-RUN: Skipping renaming local file to preserve existing file and prevent data loss: " ~ to!string(path) ~ " -> " ~ to!string(newPath), ["debug"]); + } +} + +// Rename the given item, and only performs the function if not in a --dry-run scenario +void safeRename(const(char)[] oldPath, const(char)[] newPath, bool dryRun) { + // Perform the rename + if (!dryRun) { + addLogEntry("Calling rename(oldPath, newPath)", ["debug"]); + // Use rename() as Linux is POSIX compliant, we have an atomic operation where at no point in time the 'to' is missing. + rename(oldPath, newPath); + } else { + addLogEntry("DRY-RUN: Skipping local file rename", ["debug"]); + } } -// deletes the specified file without throwing an exception if it does not exists -void safeRemove(const(char)[] path) -{ +// Deletes the specified file without throwing an exception if it does not exists +void safeRemove(const(char)[] path) { if (exists(path)) remove(path); } -// returns the quickXorHash base64 string of a file -string computeQuickXorHash(string path) -{ +// Returns the SHA1 hash hex string of a file +string computeSha1Hash(string path) { + SHA1 sha; + auto file = File(path, "rb"); + scope(exit) file.close(); // Ensure file is closed post read + foreach (ubyte[] data; chunks(file, 4096)) { + sha.put(data); + } + + // Store the hash in a local variable before converting to string + auto hashResult = sha.finish(); + return toHexString(hashResult).idup; // Convert the hash to a hex string +} + +// Returns the quickXorHash base64 string of a file +string computeQuickXorHash(string path) { QuickXor qxor; auto file = File(path, "rb"); + scope(exit) file.close(); // Ensure file is closed post read foreach (ubyte[] data; chunks(file, 4096)) { qxor.put(data); } - return Base64.encode(qxor.finish()); + + // Store the hash in a local variable before converting to string + auto hashResult = qxor.finish(); + return Base64.encode(hashResult).idup; // Convert the hash to a base64 string } -// returns the SHA256 hex string of a file +// Returns the SHA256 hex string of a file string computeSHA256Hash(string path) { SHA256 sha256; auto file = File(path, "rb"); + scope(exit) file.close(); // Ensure file is closed post read foreach (ubyte[] data; chunks(file, 4096)) { sha256.put(data); } - return sha256.finish().toHexString().dup; -} - -// converts wildcards (*, ?) to regex -Regex!char wild2regex(const(char)[] pattern) -{ - string str; - str.reserve(pattern.length + 2); - str ~= "^"; - foreach (c; pattern) { - switch (c) { - case '*': - str ~= "[^/]*"; - break; - case '.': - str ~= "\\."; - break; - case '?': - str ~= "[^/]"; - break; - case '|': - str ~= "$|^"; - break; - case '+': - str ~= "\\+"; - break; - case ' ': - str ~= "\\s+"; - break; - case '/': - str ~= "\\/"; - break; - case '(': - str ~= "\\("; - break; - case ')': - str ~= "\\)"; - break; - default: - str ~= c; - break; - } - } - str ~= "$"; - return regex(str, "i"); -} - -// returns true if the network connection is available -bool testNetwork(Config cfg) -{ - // Use low level HTTP struct - auto http = HTTP(); - http.url = "https://login.microsoftonline.com"; - // DNS lookup timeout - http.dnsTimeout = (dur!"seconds"(cfg.getValueLong("dns_timeout"))); - // Timeout for connecting - http.connectTimeout = (dur!"seconds"(cfg.getValueLong("connect_timeout"))); - // Data Timeout for HTTPS connections - http.dataTimeout = (dur!"seconds"(cfg.getValueLong("data_timeout"))); - // maximum time any operation is allowed to take - // This includes dns resolution, connecting, data transfer, etc. - http.operationTimeout = (dur!"seconds"(cfg.getValueLong("operation_timeout"))); - // What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6 - http.handle.set(CurlOption.ipresolve,cfg.getValueLong("ip_protocol_version")); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only - - // HTTP connection test method - http.method = HTTP.Method.head; - // Attempt to contact the Microsoft Online Service - try { - log.vdebug("Attempting to contact online service"); + + // Store the hash in a local variable before converting to string + auto hashResult = sha256.finish(); + return toHexString(hashResult).idup; // Convert the hash to a hex string +} + +// Converts wildcards (*, ?) to regex +// The changes here need to be 100% regression tested before full release +Regex!char wild2regex(const(char)[] pattern) { + string str; + str.reserve(pattern.length + 2); + str ~= "^"; + foreach (c; pattern) { + switch (c) { + case '*': + str ~= ".*"; // Changed to match any character. Was: str ~= "[^/]*"; + break; + case '.': + str ~= "\\."; + break; + case '?': + str ~= "."; // Changed to match any single character. Was: str ~= "[^/]"; + break; + case '|': + str ~= "$|^"; + break; + case '+': + str ~= "\\+"; + break; + case ' ': + str ~= "\\s"; // Changed to match exactly one whitespace. Was: str ~= "\\s+"; + break; + case '/': + str ~= "\\/"; + break; + case '(': + str ~= "\\("; + break; + case ')': + str ~= "\\)"; + break; + default: + str ~= c; + break; + } + } + str ~= "$"; + return regex(str, "i"); +} + +// Test Internet access to Microsoft OneDrive using a simple HTTP HEAD request +bool testInternetReachability(ApplicationConfig appConfig) { + HTTP http = HTTP(); + http.url = "https://login.microsoftonline.com"; + + // Configure timeouts based on application configuration + http.dnsTimeout = dur!"seconds"(appConfig.getValueLong("dns_timeout")); + http.connectTimeout = dur!"seconds"(appConfig.getValueLong("connect_timeout")); + http.dataTimeout = dur!"seconds"(appConfig.getValueLong("data_timeout")); + http.operationTimeout = dur!"seconds"(appConfig.getValueLong("operation_timeout")); + + // Set IP protocol version + http.handle.set(CurlOption.ipresolve, appConfig.getValueLong("ip_protocol_version")); + + // Set HTTP method to HEAD for minimal data transfer + http.method = HTTP.Method.head; + + // Execute the request and handle exceptions + try { + addLogEntry("Attempting to contact Microsoft OneDrive Login Service"); http.perform(); - log.vdebug("Shutting down HTTP engine as successfully reached OneDrive Online Service"); + + // Check response for HTTP status code + if (http.statusLine.code >= 200 && http.statusLine.code < 400) { + addLogEntry("Successfully reached Microsoft OneDrive Login Service"); + } else { + addLogEntry("Failed to reach Microsoft OneDrive Login Service. HTTP status code: " ~ to!string(http.statusLine.code)); + throw new Exception("HTTP Request Failed with Status Code: " ~ to!string(http.statusLine.code)); + } + http.shutdown(); + object.destroy(http); return true; - } catch (SocketException e) { - // Socket issue - log.vdebug("HTTP Socket Issue"); - log.error("Cannot connect to Microsoft OneDrive Service - Socket Issue"); + } catch (SocketException e) { + addLogEntry("Cannot connect to Microsoft OneDrive Service - Socket Issue: " ~ e.msg); displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + http.shutdown(); + object.destroy(http); return false; - } catch (CurlException e) { - // No network connection to OneDrive Service - log.vdebug("No Network Connection"); - log.error("Cannot connect to Microsoft OneDrive Service - Network Connection Issue"); + } catch (CurlException e) { + addLogEntry("Cannot connect to Microsoft OneDrive Service - Network Connection Issue: " ~ e.msg); displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + http.shutdown(); + object.destroy(http); return false; - } + } catch (Exception e) { + addLogEntry("Unexpected error occurred: " ~ e.toString()); + displayOneDriveErrorMessage(e.toString(), getFunctionName!({})); + http.shutdown(); + object.destroy(http); + return false; + } +} + +// Retry Internet access test to Microsoft OneDrive +bool retryInternetConnectivtyTest(ApplicationConfig appConfig) { + int retryAttempts = 0; + int backoffInterval = 1; // initial backoff interval in seconds + int maxBackoffInterval = 3600; // maximum backoff interval in seconds + int maxRetryCount = 100; // max retry attempts, reduced for practicality + bool isOnline = false; + + while (retryAttempts < maxRetryCount && !isOnline) { + if (backoffInterval < maxBackoffInterval) { + backoffInterval = min(backoffInterval * 2, maxBackoffInterval); // exponential increase + } + + addLogEntry(" Retry Attempt: " ~ to!string(retryAttempts + 1), ["debug"]); + addLogEntry(" Retry In (seconds): " ~ to!string(backoffInterval), ["debug"]); + + Thread.sleep(dur!"seconds"(backoffInterval)); + isOnline = testInternetReachability(appConfig); // assuming this function is defined elsewhere + + if (isOnline) { + addLogEntry("Internet connectivity to Microsoft OneDrive service has been restored"); + } + + retryAttempts++; + } + + if (!isOnline) { + addLogEntry("ERROR: Was unable to reconnect to the Microsoft OneDrive service after " ~ to!string(maxRetryCount) ~ " attempts!"); + } + + // Return state + return isOnline; } -// Can we read the file - as a permissions issue or file corruption will cause a failure +// Can we read the local file - as a permissions issue or file corruption will cause a failure // https://github.com/abraunegg/onedrive/issues/113 // returns true if file can be accessed -bool readLocalFile(string path) -{ - try { - // attempt to read up to the first 1 byte of the file - // validates we can 'read' the file based on file permissions - read(path,1); - } catch (std.file.FileException e) { - // unable to read the new local file - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return false; - } - return true; -} +bool readLocalFile(string path) { + // What is the file size + if (getSize(path) != 0) { + try { + // Attempt to read up to the first 1 byte of the file + auto data = read(path, 1); -// calls globMatch for each string in pattern separated by '|' -bool multiGlobMatch(const(char)[] path, const(char)[] pattern) -{ - foreach (glob; pattern.split('|')) { - if (globMatch!(std.path.CaseSensitive.yes)(path, glob)) { - return true; + // Check if the read operation was successful + if (data.length != 1) { + // Read operation not successful + addLogEntry("Failed to read the required amount from the file: " ~ path); + return false; + } + } catch (std.file.FileException e) { + // Unable to read the file, log the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + return false; } + return true; + } else { + // zero byte files cannot be read, return true + return true; } - return false; } -bool isValidName(string path) -{ - // Restriction and limitations about windows naming files +// Calls globMatch for each string in pattern separated by '|' +bool multiGlobMatch(const(char)[] path, const(char)[] pattern) { + if (path.length == 0 || pattern.length == 0) { + return false; + } + + if (!pattern.canFind('|')) { + return globMatch!(std.path.CaseSensitive.yes)(path, pattern); + } + + foreach (glob; pattern.split('|')) { + if (globMatch!(std.path.CaseSensitive.yes)(path, glob)) { + return true; + } + } + return false; +} + +// Does the path pass the Microsoft restriction and limitations about naming files and folders +bool isValidName(string path) { + // Restriction and limitations about windows naming files and folders // https://msdn.microsoft.com/en-us/library/aa365247 // https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders - // allow root item - if (path == ".") { - return true; - } + if (path == ".") { + return true; + } - bool matched = true; - string itemName = baseName(path); - - auto invalidNameReg = - ctRegex!( - // Leading whitespace and trailing whitespace/dot - `^\s.*|^.*[\s\.]$|` ~ - // Invalid characters - `.*[<>:"\|\?*/\\].*|` ~ - // Reserved device name and trailing .~ - `(?:^CON|^PRN|^AUX|^NUL|^COM[0-9]|^LPT[0-9])(?:[.].+)?$` - ); - auto m = match(itemName, invalidNameReg); - matched = m.empty; - - // Additional explicit validation checks - if (itemName == ".lock") {matched = false;} - if (itemName == "desktop.ini") {matched = false;} - // _vti_ cannot appear anywhere in a file or folder name - if(canFind(itemName, "_vti_")){matched = false;} - // Item name cannot equal '~' - if (itemName == "~") {matched = false;} - - // return response - return matched; -} - -bool containsBadWhiteSpace(string path) -{ - // allow root item - if (path == ".") { - return true; - } + string itemName = baseName(path).toLower(); // Ensure case-insensitivity + + // Check for explicitly disallowed names + // https://support.microsoft.com/en-us/office/restrictions-and-limitations-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us#invalidfilefoldernames + string[] disallowedNames = [ + ".lock", "desktop.ini", "CON", "PRN", "AUX", "NUL", + "COM0", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", + "LPT0", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9" + ]; + + // Creating an associative array for faster lookup + bool[string] disallowedSet; + foreach (name; disallowedNames) { + disallowedSet[name.toLower()] = true; // Normalise to lowercase + } + + if (disallowedSet.get(itemName, false) || itemName.startsWith("~$") || canFind(itemName, "_vti_")) { + return false; + } + + // Regular expression for invalid patterns + // https://support.microsoft.com/en-us/office/restrictions-and-limitations-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us#invalidcharacters + // Leading whitespace and trailing whitespace + // Invalid characters + // Trailing dot '.' (not documented above) , however see issue https://github.com/abraunegg/onedrive/issues/2678 + + //auto invalidNameReg = ctRegex!(`^\s.*|^.*[\s\.]$|.*[<>:"\|\?*/\\].*`); - original to remove at some point + auto invalidNameReg = ctRegex!(`^\s+|\s$|\.$|[<>:"\|\?*/\\]`); // revised 25/3/2024 + // - ^\s+ matches one or more whitespace characters at the start of the string. The + ensures we match one or more whitespaces, making it more efficient than .* for detecting leading whitespaces. + // - \s$ matches a whitespace character at the end of the string. This is more precise than [\s\.]$ because we'll handle the dot separately. + // - \.$ specifically matches a dot character at the end of the string, addressing the requirement to catch trailing dots as invalid. + // - [<>:"\|\?*/\\] matches any single instance of the specified invalid characters: ", *, :, <, >, ?, /, \, | + + auto matchResult = match(itemName, invalidNameReg); + if (!matchResult.empty) { + return false; + } + + // Determine if the path is at the root level, if yes, check that 'forms' is not the first folder + auto segments = pathSplitter(path).array; + if (segments.length <= 2 && segments.back.toLower() == "forms") { // Check only the last segment, convert to lower as OneDrive is not POSIX compliant, easier to compare + return false; + } + + return true; +} + +// Does the path contain any bad whitespace characters +bool containsBadWhiteSpace(string path) { + // Check for null or empty string + if (path.length == 0) { + return false; + } + + // Check for root item + if (path == ".") { + return false; + } // https://github.com/abraunegg/onedrive/issues/35 // Issue #35 presented an interesting issue where the filename contained a newline item @@ -237,50 +412,131 @@ bool containsBadWhiteSpace(string path) // /v1.0/me/drive/root:/.%2FState-of-the-art%2C%20challenges%2C%20and%20open%20issues%20in%20the%20integration%20of%20Internet%20of%0AThings%20and%20Cloud%20Computing.pdf // The '$'\n'' is translated to %0A which causes the OneDrive query to fail // Check for the presence of '%0A' via regex - - string itemName = encodeComponent(baseName(path)); - auto invalidWhitespaceReg = - ctRegex!( - // Check for \n which is %0A when encoded - `%0A` - ); - auto m = match(itemName, invalidWhitespaceReg); - return m.empty; + + string itemName = encodeComponent(baseName(path)); + // Check for encoded newline character + return itemName.indexOf("%0A") != -1; } -bool containsASCIIHTMLCodes(string path) -{ +// Does the path contain any ASCII HTML Codes +bool containsASCIIHTMLCodes(string path) { + // Check for null or empty string + if (path.length == 0) { + return false; + } + + // Check for root item + if (path == ".") { + return false; + } + // https://github.com/abraunegg/onedrive/issues/151 - // If a filename contains ASCII HTML codes, regardless of if it gets encoded, it generates an error + // If a filename contains ASCII HTML codes, it generates an error when attempting to upload this to Microsoft OneDrive // Check if the filename contains an ASCII HTML code sequence - auto invalidASCIICode = - ctRegex!( - // Check to see if &#XXXX is in the filename - `(?:&#|&#[0-9][0-9]|&#[0-9][0-9][0-9]|&#[0-9][0-9][0-9][0-9])` - ); - - auto m = match(path, invalidASCIICode); - return m.empty; + // Check for the pattern &# followed by 1 to 4 digits and a semicolon + auto invalidASCIICode = ctRegex!(`&#[0-9]{1,4};`); + + // Use match to search for ASCII HTML codes in the path + auto matchResult = match(path, invalidASCIICode); + + // Return true if ASCII HTML codes are found + return !matchResult.empty; +} + +// Does the path contain any ASCII Control Codes +bool containsASCIIControlCodes(string path) { + // Check for null or empty string + if (path.length == 0) { + return false; + } + + // Check for root item + if (path == ".") { + return false; + } + + // https://github.com/abraunegg/onedrive/discussions/2553#discussioncomment-7995254 + // Define a ctRegex pattern for ASCII control codes and specific non-ASCII control characters + // This pattern includes the ASCII control range and common non-ASCII control characters + // Adjust the pattern as needed to include specific characters of concern + auto controlCodePattern = ctRegex!(`[\x00-\x1F\x7F]|\p{Cc}`); // Blocks ƒ†¯~‰ (#2553) , allows α (#2598) + + // Use match to search for ASCII control codes in the path + auto matchResult = match(path, controlCodePattern); + + // Return true if matchResult is not empty (indicating a control code was found) + return !matchResult.empty; +} + +// Is the path a valid UTF-16 encoded path? +bool isValidUTF16(string path) { + // Check for null or empty string + if (path.length == 0) { + return true; + } + + // Check for root item + if (path == ".") { + return true; + } + + auto wpath = toUTF16(path); // Convert to UTF-16 encoding + auto it = wpath.byCodeUnit; + + while (!it.empty) { + ushort current = it.front; + + // Check for valid single unit + if (current <= 0xD7FF || (current >= 0xE000 && current <= 0xFFFF)) { + it.popFront(); + } + // Check for valid surrogate pair + else if (current >= 0xD800 && current <= 0xDBFF) { + it.popFront(); + if (it.empty || it.front < 0xDC00 || it.front > 0xDFFF) { + return false; // Invalid surrogate pair + } + it.popFront(); + } else { + return false; // Invalid code unit + } + } + + return true; +} + +// Does the path contain any HTML URL encoded items (e.g., '%20' for space) +bool containsURLEncodedItems(string path) { + // Check for null or empty string + if (path.length == 0) { + return false; + } + + // Pattern for percent encoding: % followed by two hexadecimal digits + auto urlEncodedPattern = ctRegex!(`%[0-9a-fA-F]{2}`); + + // Search for URL encoded items in the string + auto matchResult = match(path, urlEncodedPattern); + + // Return true if URL encoded items are found + return !matchResult.empty; } // Parse and display error message received from OneDrive -void displayOneDriveErrorMessage(string message, string callingFunction) -{ - writeln(); - log.error("ERROR: Microsoft OneDrive API returned an error with the following message:"); +void displayOneDriveErrorMessage(string message, string callingFunction) { + addLogEntry(); + addLogEntry("ERROR: Microsoft OneDrive API returned an error with the following message:"); auto errorArray = splitLines(message); - log.error(" Error Message: ", errorArray[0]); + addLogEntry(" Error Message: " ~ to!string(errorArray[0])); // Extract 'message' as the reason JSONValue errorMessage = parseJSON(replace(message, errorArray[0], "")); - // extra debug - log.vdebug("Raw Error Data: ", message); - log.vdebug("JSON Message: ", errorMessage); // What is the reason for the error if (errorMessage.type() == JSONType.object) { // configure the error reason string errorReason; + string errorCode; string requestDate; string requestId; @@ -303,11 +559,20 @@ void displayOneDriveErrorMessage(string message, string callingFunction) // Display the error reason if (errorReason.startsWith(" 0 ? to!string(errorArray[0]) : "No error message available"; + addLogEntry(" Error Message: " ~ errorMessage); + + // Log the calling function + addLogEntry(" Calling Function: " ~ callingFunction); + + try { + // Safely check for disk space + ulong localActualFreeSpace = to!ulong(getAvailableDiskSpace(".")); + if (localActualFreeSpace == 0) { + // Must force exit here, allow logging to be done + forceExit(); + } + } catch (Exception e) { + // Handle exceptions from disk space check or type conversion + addLogEntry(" Exception in disk space check: " ~ e.msg); + } +} + +// Display the POSIX Error Message +void displayPosixErrorMessage(string message) { + addLogEntry(); // used rather than writeln + addLogEntry("ERROR: Microsoft OneDrive API returned data that highlights a POSIX compliance issue:"); + addLogEntry(" Error Message: " ~ message); +} + +// Display the Error Message +void displayGeneralErrorMessage(Exception e, string callingFunction=__FUNCTION__, int lineno=__LINE__) { + addLogEntry(); // used rather than writeln + addLogEntry("ERROR: Encounter " ~ e.classinfo.name ~ ":"); + addLogEntry(" Error Message: " ~ e.msg); + addLogEntry(" Calling Function: " ~ callingFunction); + addLogEntry(" Line number: " ~ to!string(lineno)); } // Get the function name that is being called to assist with identifying where an error is being generated @@ -358,30 +687,60 @@ string getFunctionName(alias func)() { return __traits(identifier, __traits(parent, func)) ~ "()\n"; } +JSONValue fetchOnlineURLContent(string url) { + // Function variables + char[] content; + JSONValue onlineContent; + + // Setup HTTP request + HTTP http = HTTP(); + + // Exit scope to ensure cleanup + scope(exit) { + http.shutdown(); + object.destroy(http); + } + + // Configure the URL to access + http.url = url; + // HTTP the connection method + http.method = HTTP.Method.get; + + // Data receive handler + http.onReceive = (ubyte[] data) { + content ~= data; // Append data as it's received + return data.length; + }; + + // Perform HTTP request + http.perform(); + + // Parse Content + onlineContent = parseJSON(to!string(content)); + + // Ensure resources are cleaned up + http.shutdown(); + object.destroy(http); + + // Return onlineResponse + return onlineContent; +} + // Get the latest release version from GitHub JSONValue getLatestReleaseDetails() { - // Import curl just for this function - import std.net.curl; - char[] content; JSONValue githubLatest; JSONValue versionDetails; string latestTag; string publishedDate; - try { - content = get("https://api.github.com/repos/abraunegg/onedrive/releases/latest"); - } catch (CurlException e) { - // curl generated an error - meaning we could not query GitHub - log.vdebug("Unable to query GitHub for latest release"); - } - - try { - githubLatest = content.parseJSON(); - } catch (JSONException e) { - // unable to parse the content JSON, set to blank JSON - log.vdebug("Unable to parse GitHub JSON response"); - githubLatest = parseJSON("{}"); - } + // Query GitHub for the 'latest' release details + try { + githubLatest = fetchOnlineURLContent("https://api.github.com/repos/abraunegg/onedrive/releases/latest"); + } catch (CurlException e) { + addLogEntry("CurlException: Unable to query GitHub for latest release - " ~ e.msg, ["debug"]); + } catch (JSONException e) { + addLogEntry("JSONException: Unable to parse GitHub JSON response - " ~ e.msg, ["debug"]); + } // githubLatest has to be a valid JSON object if (githubLatest.type() == JSONType.object){ @@ -392,7 +751,7 @@ JSONValue getLatestReleaseDetails() { latestTag = strip(githubLatest["tag_name"].str, "v"); } else { // set to latestTag zeros - log.vdebug("'tag_name' unavailable in JSON response. Setting GitHub 'tag_name' release version to 0.0.0"); + addLogEntry("'tag_name' unavailable in JSON response. Setting GitHub 'tag_name' release version to 0.0.0", ["debug"]); latestTag = "0.0.0"; } // use the returned published_at date @@ -401,15 +760,15 @@ JSONValue getLatestReleaseDetails() { publishedDate = githubLatest["published_at"].str; } else { // set to v2.0.0 release date - log.vdebug("'published_at' unavailable in JSON response. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z"); + addLogEntry("'published_at' unavailable in JSON response. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z", ["debug"]); publishedDate = "2018-07-18T18:00:00Z"; } } else { // JSONValue is not an object - log.vdebug("Invalid JSON Object. Setting GitHub 'tag_name' release version to 0.0.0"); + addLogEntry("Invalid JSON Object response from GitHub. Setting GitHub 'tag_name' release version to 0.0.0", ["debug"]); latestTag = "0.0.0"; - log.vdebug("Invalid JSON Object. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z"); - publishedDate = "2018-07-18T18:00:00Z"; + addLogEntry("Invalid JSON Object. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z", ["debug"]); + publishedDate = "2018-07-18T18:00:00Z"; } // return the latest github version and published date as our own JSON @@ -424,37 +783,30 @@ JSONValue getLatestReleaseDetails() { // Get the release details from the 'current' running version JSONValue getCurrentVersionDetails(string thisVersion) { - // Import curl just for this function - import std.net.curl; - char[] content; JSONValue githubDetails; JSONValue versionDetails; string versionTag = "v" ~ thisVersion; string publishedDate; + // Query GitHub for the release details to match the running version try { - content = get("https://api.github.com/repos/abraunegg/onedrive/releases"); + githubDetails = fetchOnlineURLContent("https://api.github.com/repos/abraunegg/onedrive/releases"); } catch (CurlException e) { - // curl generated an error - meaning we could not query GitHub - log.vdebug("Unable to query GitHub for release details"); - } - - try { - githubDetails = content.parseJSON(); - } catch (JSONException e) { - // unable to parse the content JSON, set to blank JSON - log.vdebug("Unable to parse GitHub JSON response"); - githubDetails = parseJSON("{}"); - } + addLogEntry("CurlException: Unable to query GitHub for release details - " ~ e.msg, ["debug"]); + return parseJSON(`{"Error": "CurlException", "message": "` ~ e.msg ~ `"}`); + } catch (JSONException e) { + addLogEntry("JSONException: Unable to parse GitHub JSON response - " ~ e.msg, ["debug"]); + return parseJSON(`{"Error": "JSONException", "message": "` ~ e.msg ~ `"}`); + } // githubDetails has to be a valid JSON array if (githubDetails.type() == JSONType.array){ foreach (searchResult; githubDetails.array) { // searchResult["tag_name"].str; if (searchResult["tag_name"].str == versionTag) { - log.vdebug("MATCHED version"); - log.vdebug("tag_name: ", searchResult["tag_name"].str); - log.vdebug("published_at: ", searchResult["published_at"].str); + addLogEntry("MATCHED version", ["debug"]); + addLogEntry("tag_name: " ~ searchResult["tag_name"].str, ["debug"]); + addLogEntry("published_at: " ~ searchResult["published_at"].str, ["debug"]); publishedDate = searchResult["published_at"].str; } } @@ -462,13 +814,13 @@ JSONValue getCurrentVersionDetails(string thisVersion) { if (publishedDate.empty) { // empty .. no version match ? // set to v2.0.0 release date - log.vdebug("'published_at' unavailable in JSON response. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z"); + addLogEntry("'published_at' unavailable in JSON response. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z", ["debug"]); publishedDate = "2018-07-18T18:00:00Z"; } } else { // JSONValue is not an Array - log.vdebug("Invalid JSON Array. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z"); - publishedDate = "2018-07-18T18:00:00Z"; + addLogEntry("Invalid JSON Array. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z", ["debug"]); + publishedDate = "2018-07-18T18:00:00Z"; } // return the latest github version and published date as our own JSON @@ -502,11 +854,11 @@ void checkApplicationVersion() { string applicationVersion = currentVersionArray[0]; // debug output - log.vdebug("applicationVersion: ", applicationVersion); - log.vdebug("latestVersion: ", latestVersion); - log.vdebug("publishedDate: ", publishedDate); - log.vdebug("currentTime: ", currentTime); - log.vdebug("releaseGracePeriod: ", releaseGracePeriod); + addLogEntry("applicationVersion: " ~ applicationVersion, ["debug"]); + addLogEntry("latestVersion: " ~ latestVersion, ["debug"]); + addLogEntry("publishedDate: " ~ to!string(publishedDate), ["debug"]); + addLogEntry("currentTime: " ~ to!string(currentTime), ["debug"]); + addLogEntry("releaseGracePeriod: " ~ to!string(releaseGracePeriod), ["debug"]); // display details if not current // is application version is older than available on GitHub @@ -520,14 +872,14 @@ void checkApplicationVersion() { JSONValue thisVersionDetails = getCurrentVersionDetails(applicationVersion); SysTime thisVersionPublishedDate = SysTime.fromISOExtString(thisVersionDetails["publishedDate"].str).toUTC(); thisVersionPublishedDate.fracSecs = Duration.zero; - log.vdebug("thisVersionPublishedDate: ", thisVersionPublishedDate); + addLogEntry("thisVersionPublishedDate: " ~ to!string(thisVersionPublishedDate), ["debug"]); // the running version grace period is its release date + 1 month SysTime thisVersionReleaseGracePeriod = thisVersionPublishedDate; thisVersionReleaseGracePeriod = thisVersionReleaseGracePeriod.add!"months"(1); - log.vdebug("thisVersionReleaseGracePeriod: ", thisVersionReleaseGracePeriod); + addLogEntry("thisVersionReleaseGracePeriod: " ~ to!string(thisVersionReleaseGracePeriod), ["debug"]); - // is this running version obsolete ? + // Is this running version obsolete ? if (!displayObsolete) { // if releaseGracePeriod > currentTime // display an information warning that there is a new release available @@ -541,69 +893,366 @@ void checkApplicationVersion() { } // display version response - writeln(); + addLogEntry(); if (!displayObsolete) { // display the new version is available message - log.logAndNotify("INFO: A new onedrive client version is available. Please upgrade your client version when possible."); + addLogEntry("INFO: A new onedrive client version is available. Please upgrade your client version when possible.", ["info", "notify"]); } else { // display the obsolete message - log.logAndNotify("WARNING: Your onedrive client version is now obsolete and unsupported. Please upgrade your client version."); + addLogEntry("WARNING: Your onedrive client version is now obsolete and unsupported. Please upgrade your client version.", ["info", "notify"]); } - log.log("Current Application Version: ", applicationVersion); - log.log("Version Available: ", latestVersion); - writeln(); + addLogEntry("Current Application Version: " ~ applicationVersion); + addLogEntry("Version Available: " ~ latestVersion); + addLogEntry(); } } } -// Unit Tests -unittest -{ - assert(multiGlobMatch(".hidden", ".*")); - assert(multiGlobMatch(".hidden", "file|.*")); - assert(!multiGlobMatch("foo.bar", "foo|bar")); - // that should detect invalid file/directory name. - assert(isValidName(".")); - assert(isValidName("./general.file")); - assert(!isValidName("./ leading_white_space")); - assert(!isValidName("./trailing_white_space ")); - assert(!isValidName("./trailing_dot.")); - assert(!isValidName("./includesin the path")); - assert(!isValidName("./includes:in the path")); - assert(!isValidName(`./includes"in the path`)); - assert(!isValidName("./includes|in the path")); - assert(!isValidName("./includes?in the path")); - assert(!isValidName("./includes*in the path")); - assert(!isValidName("./includes / in the path")); - assert(!isValidName(`./includes\ in the path`)); - assert(!isValidName(`./includes\\ in the path`)); - assert(!isValidName(`./includes\\\\ in the path`)); - assert(!isValidName("./includes\\ in the path")); - assert(!isValidName("./includes\\\\ in the path")); - assert(!isValidName("./CON")); - assert(!isValidName("./CON.text")); - assert(!isValidName("./PRN")); - assert(!isValidName("./AUX")); - assert(!isValidName("./NUL")); - assert(!isValidName("./COM0")); - assert(!isValidName("./COM1")); - assert(!isValidName("./COM2")); - assert(!isValidName("./COM3")); - assert(!isValidName("./COM4")); - assert(!isValidName("./COM5")); - assert(!isValidName("./COM6")); - assert(!isValidName("./COM7")); - assert(!isValidName("./COM8")); - assert(!isValidName("./COM9")); - assert(!isValidName("./LPT0")); - assert(!isValidName("./LPT1")); - assert(!isValidName("./LPT2")); - assert(!isValidName("./LPT3")); - assert(!isValidName("./LPT4")); - assert(!isValidName("./LPT5")); - assert(!isValidName("./LPT6")); - assert(!isValidName("./LPT7")); - assert(!isValidName("./LPT8")); - assert(!isValidName("./LPT9")); +bool hasId(JSONValue item) { + return ("id" in item) != null; +} + +bool hasQuota(JSONValue item) { + return ("quota" in item) != null; +} + +bool isItemDeleted(JSONValue item) { + return ("deleted" in item) != null; +} + +bool isItemRoot(JSONValue item) { + return ("root" in item) != null; +} + +bool hasParentReference(const ref JSONValue item) { + return ("parentReference" in item) != null; +} + +bool hasParentReferenceId(JSONValue item) { + return ("id" in item["parentReference"]) != null; +} + +bool hasParentReferencePath(JSONValue item) { + return ("path" in item["parentReference"]) != null; +} + +bool isFolderItem(const ref JSONValue item) { + return ("folder" in item) != null; +} + +bool isFileItem(const ref JSONValue item) { + return ("file" in item) != null; +} + +bool isItemRemote(const ref JSONValue item) { + return ("remoteItem" in item) != null; +} + +bool isItemFile(const ref JSONValue item) { + return ("file" in item) != null; +} + +bool isItemFolder(const ref JSONValue item) { + return ("folder" in item) != null; +} + +bool hasFileSize(const ref JSONValue item) { + return ("size" in item) != null; +} + +// Function to determine if the final component of the provided path is a .file or .folder +bool isDotFile(const(string) path) { + // Check for null or empty path + if (path is null || path.length == 0) { + return false; + } + + // Special case for root + if (path == ".") { + return false; + } + + // Extract the last component of the path + auto paths = pathSplitter(buildNormalizedPath(path)); + + // Optimised way to fetch the last component + string lastComponent = paths.empty ? "" : paths.back; + + // Check if the last component starts with a dot + return startsWith(lastComponent, "."); +} + +bool isMalware(const ref JSONValue item) { + return ("malware" in item) != null; +} + +bool hasHashes(const ref JSONValue item) { + return ("hashes" in item["file"]) != null; +} + +bool hasQuickXorHash(const ref JSONValue item) { + return ("quickXorHash" in item["file"]["hashes"]) != null; +} + +bool hasSHA256Hash(const ref JSONValue item) { + return ("sha256Hash" in item["file"]["hashes"]) != null; +} + +bool isMicrosoftOneNoteMimeType1(const ref JSONValue item) { + return (item["file"]["mimeType"].str) == "application/msonenote"; +} + +bool isMicrosoftOneNoteMimeType2(const ref JSONValue item) { + return (item["file"]["mimeType"].str) == "application/octet-stream"; +} + +bool hasUploadURL(const ref JSONValue item) { + return ("uploadUrl" in item) != null; +} + +bool hasNextExpectedRanges(const ref JSONValue item) { + return ("nextExpectedRanges" in item) != null; +} + +bool hasLocalPath(const ref JSONValue item) { + return ("localPath" in item) != null; +} + +bool hasETag(const ref JSONValue item) { + return ("eTag" in item) != null; +} + +bool hasSharedElement(const ref JSONValue item) { + return ("shared" in item) != null; +} + +bool hasName(const ref JSONValue item) { + return ("name" in item) != null; +} + +// Convert bytes to GB +string byteToGibiByte(ulong bytes) { + if (bytes == 0) { + return "0.00"; // or handle the zero case as needed + } + + double gib = bytes / 1073741824.0; // 1024^3 for direct conversion + return format("%.2f", gib); // Format to ensure two decimal places +} + +// Test if entrypoint.sh exists on the root filesystem +bool entrypointExists(string basePath = "/") { + try { + // Build the path to the entrypoint.sh file + string entrypointPath = buildNormalizedPath(buildPath(basePath, "entrypoint.sh")); + + // Check if the path exists and return the result + return exists(entrypointPath); + } catch (Exception e) { + // Handle any exceptions (e.g., permission issues, invalid path) + writeln("An error occurred: ", e.msg); + return false; + } +} + +// Generate a random alphanumeric string with specified length +string generateAlphanumericString(size_t length = 16) { + // Ensure length is not zero + if (length == 0) { + throw new Exception("Length must be greater than 0"); + } + + auto asciiLetters = to!(dchar[])(letters); + auto asciiDigits = to!(dchar[])(digits); + dchar[] randomString; + randomString.length = length; + + // Create a random number generator + auto rndGen = Random(unpredictableSeed); + + // Fill the string with random alphanumeric characters + fill(randomString[], randomCover(chain(asciiLetters, asciiDigits), rndGen)); + + return to!string(randomString); +} + +// Display internal memory stats pre garbage collection +void displayMemoryUsagePreGC() { + // Display memory usage + addLogEntry(); + addLogEntry("Memory Usage PRE Garbage Collection (KB)"); + addLogEntry("-----------------------------------------------------"); + writeMemoryStats(); + addLogEntry(); +} + +// Display internal memory stats post garbage collection + RSS (actual memory being used) +void displayMemoryUsagePostGC() { + // Display memory usage title + addLogEntry("Memory Usage POST Garbage Collection (KB)"); + addLogEntry("-----------------------------------------------------"); + writeMemoryStats(); // Assuming this function logs memory stats correctly + + // Query the actual Resident Set Size (RSS) for the PID + pid_t pid = getCurrentPID(); + ulong rss = getRSS(pid); + + // Check and log the previous RSS value + if (previousRSS != 0) { + addLogEntry("previous Resident Set Size (RSS) = " ~ to!string(previousRSS) ~ " KB"); + + // Calculate and log the difference in RSS + long difference = rss - previousRSS; // 'difference' can be negative, use 'long' to handle it + string sign = difference > 0 ? "+" : (difference < 0 ? "" : ""); // Determine the sign for display, no sign for zero + addLogEntry("difference in Resident Set Size (RSS) = " ~ sign ~ to!string(difference) ~ " KB"); + } + + // Update previous RSS with the new value + previousRSS = rss; + + // Closout + addLogEntry(); +} + +// Write internal memory stats +void writeMemoryStats() { + addLogEntry("current memory usedSize = " ~ to!string((GC.stats.usedSize/1024))); // number of used bytes on the GC heap (might only get updated after a collection) + addLogEntry("current memory freeSize = " ~ to!string((GC.stats.freeSize/1024))); // number of free bytes on the GC heap (might only get updated after a collection) + addLogEntry("current memory allocatedInCurrentThread = " ~ to!string((GC.stats.allocatedInCurrentThread/1024))); // number of bytes allocated for current thread since program start + + // Query the actual Resident Set Size (RSS) for the PID + pid_t pid = getCurrentPID(); + ulong rss = getRSS(pid); + // The RSS includes all memory that is currently marked as occupied by the process. + // Over time, the heap can become fragmented. Even after garbage collection, fragmented memory blocks may not be contiguous enough to be returned to the OS, leading to an increase in the reported memory usage despite having free space. + // This includes memory that might not be actively used but has not been returned to the system. + // The GC.minimize() function can sometimes cause an increase in RSS due to how memory pages are managed and freed. + addLogEntry("current Resident Set Size (RSS) = " ~ to!string(rss) ~ " KB"); // actual memory in RAM used by the process at this point in time +} + +// Return the username of the UID running the 'onedrive' process +string getUserName() { + // Retrieve the UID of the current user + auto uid = getuid(); + + // Retrieve password file entry for the user + auto pw = getpwuid(uid); + enforce(pw !is null, "Failed to retrieve user information for UID: " ~ to!string(uid)); + + // Extract username and convert to immutable string + string userName = to!string(fromStringz(pw.pw_name)); + + // Log User identifiers from process + addLogEntry("Process ID: " ~ to!string(pw), ["debug"]); + addLogEntry("User UID: " ~ to!string(pw.pw_uid), ["debug"]); + addLogEntry("User GID: " ~ to!string(pw.pw_gid), ["debug"]); + + // Check if username is valid + if (!userName.empty) { + addLogEntry("User Name: " ~ userName, ["debug"]); + return userName; + } else { + // Log and return unknown user + addLogEntry("User Name: unknown", ["debug"]); + return "unknown"; + } +} + +// Calculate the ETA for when a 'large file' will be completed (upload & download operations) +int calc_eta(size_t counter, size_t iterations, ulong start_time) { + if (counter == 0) { + return 0; // Avoid division by zero + } + + double ratio = cast(double) counter / iterations; + auto current_time = Clock.currTime.toUnixTime(); + ulong duration = (current_time - start_time); + + // Segments left to download + auto segments_remaining = (iterations > counter) ? (iterations - counter) : 0; + + // Calculate the average time per iteration so far + double avg_time_per_iteration = cast(double) duration / counter; + + // Debug output for the ETA calculation + addLogEntry("counter: " ~ to!string(counter), ["debug"]); + addLogEntry("iterations: " ~ to!string(iterations), ["debug"]); + addLogEntry("segments_remaining: " ~ to!string(segments_remaining), ["debug"]); + addLogEntry("ratio: " ~ format("%.2f", ratio), ["debug"]); + addLogEntry("start_time: " ~ to!string(start_time), ["debug"]); + addLogEntry("current_time: " ~ to!string(current_time), ["debug"]); + addLogEntry("duration: " ~ to!string(duration), ["debug"]); + addLogEntry("avg_time_per_iteration: " ~ format("%.2f", avg_time_per_iteration), ["debug"]); + + // Return the ETA or duration + if (counter != iterations) { + auto eta_sec = avg_time_per_iteration * segments_remaining; + // ETA Debug + addLogEntry("eta_sec: " ~ to!string(eta_sec), ["debug"]); + addLogEntry("estimated_total_time: " ~ to!string(avg_time_per_iteration * iterations), ["debug"]); + // Return ETA + return eta_sec > 0 ? cast(int) ceil(eta_sec) : 0; + } else { + // Return the average time per iteration for the last iteration + return cast(int) ceil(avg_time_per_iteration); + } +} + +// Force Exit due to failure +void forceExit() { + // Allow any logging complete before we force exit + Thread.sleep(dur!("msecs")(500)); + + // Shutdown logging, which also flushes all logging buffers + (cast() logBuffer).shutdown(); + object.destroy(logBuffer); + + // Force Exit + exit(EXIT_FAILURE); +} + +// Get the current PID of the application +pid_t getCurrentPID() { + // The '/proc/self' is a symlink to the current process's proc directory + string path = "/proc/self/stat"; + + // Read the content of the stat file + string content; + try { + content = readText(path); + } catch (Exception e) { + writeln("Failed to read stat file: ", e.msg); + return 0; + } + + // The first value in the stat file is the PID + auto parts = split(content); + return to!pid_t(parts[0]); // Convert the first part to pid_t +} + +// Access the Resident Set Size (RSS) based on the PID of the running application +ulong getRSS(pid_t pid) { + // Construct the path to the statm file for the given PID + string path = format("/proc/%s/statm", to!string(pid)); + + // Read the content of the file + string content; + try { + content = readText(path); + } catch (Exception e) { + writeln("Failed to read statm file: ", e.msg); + return 0; + } + + // Split the content and get the RSS (second value) + auto stats = split(content); + if (stats.length < 2) { + writeln("Unexpected format in statm file."); + return 0; + } + + // RSS is in pages, convert it to kilobytes + ulong rssPages = to!ulong(stats[1]); + ulong rssKilobytes = rssPages * sysconf(_SC_PAGESIZE) / 1024; + return rssKilobytes; } diff --git a/src/webhook.d b/src/webhook.d new file mode 100644 index 000000000..bd4fb8a0b --- /dev/null +++ b/src/webhook.d @@ -0,0 +1,342 @@ +module webhook; + +// What does this module require to function? +import core.atomic : atomicOp; +import std.datetime; +import std.concurrency; +import std.json; + +// What other modules that we have created do we need to import? +import arsd.cgi; +import config; +import onedrive; +import log; +import util; + +class OneDriveWebhook { + private RequestServer server; + private string host; + private ushort port; + private Tid parentTid; + private bool started; + + private ApplicationConfig appConfig; + private OneDriveApi oneDriveApiInstance; + string subscriptionId = ""; + SysTime subscriptionExpiration, subscriptionLastErrorAt; + Duration subscriptionExpirationInterval, subscriptionRenewalInterval, subscriptionRetryInterval; + string notificationUrl = ""; + + private uint count; + + this(Tid parentTid, ApplicationConfig appConfig) { + this.host = appConfig.getValueString("webhook_listening_host"); + this.port = to!ushort(appConfig.getValueLong("webhook_listening_port")); + this.parentTid = parentTid; + this.appConfig = appConfig; + + subscriptionExpiration = Clock.currTime(UTC()); + subscriptionLastErrorAt = SysTime.fromUnixTime(0); + subscriptionExpirationInterval = dur!"seconds"(appConfig.getValueLong("webhook_expiration_interval")); + subscriptionRenewalInterval = dur!"seconds"(appConfig.getValueLong("webhook_renewal_interval")); + subscriptionRetryInterval = dur!"seconds"(appConfig.getValueLong("webhook_retry_interval")); + notificationUrl = appConfig.getValueString("webhook_public_url"); + } + + // The static serve() is necessary because spawn() does not like instance methods + void serve() { + if (this.started) { + return; + } + + this.started = true; + this.count = 0; + + server.listeningHost = this.host; + server.listeningPort = this.port; + + spawn(&serveImpl, cast(shared) this); + addLogEntry("Started webhook server"); + + // Subscriptions + oneDriveApiInstance = new OneDriveApi(this.appConfig); + oneDriveApiInstance.initialise(); + createOrRenewSubscription(); + } + + void stop() { + if (!this.started) + return; + server.stop(); + this.started = false; + + addLogEntry("Stopped webhook server"); + object.destroy(server); + + // Delete subscription if there exists any + try { + deleteSubscription(); + } catch (OneDriveException e) { + logSubscriptionError(e); + } + // Release API instance back to the pool + oneDriveApiInstance.releaseCurlEngine(); + object.destroy(oneDriveApiInstance); + oneDriveApiInstance = null; + } + + private static void handle(shared OneDriveWebhook _this, Cgi cgi) { + if (debugHTTPResponseOutput) { + addLogEntry("Webhook request: " ~ to!string(cgi.requestMethod) ~ " " ~ to!string(cgi.requestUri)); + if (!cgi.postBody.empty) { + addLogEntry("Webhook post body: " ~ to!string(cgi.postBody)); + } + } + + cgi.setResponseContentType("text/plain"); + + if ("validationToken" in cgi.get) { + // For validation requests, respond with the validation token passed in the query string + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/webhook-receiver-validation-request + cgi.write(cgi.get["validationToken"]); + addLogEntry("Webhook: handled validation request"); + } else { + // Notifications don't include any information about the changes that triggered them. + // Put a refresh signal in the queue and let the main monitor loop process it. + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/using-webhooks + _this.count.atomicOp!"+="(1); + send(cast()_this.parentTid, to!ulong(_this.count)); + cgi.write("OK"); + addLogEntry("Webhook: sent refresh signal #" ~ to!string(_this.count)); + } + } + + private static void serveImpl(shared OneDriveWebhook _this) { + _this.server.serveEmbeddedHttp!(handle, OneDriveWebhook)(_this); + } + + // Create a new subscription or renew the existing subscription + void createOrRenewSubscription() { + auto elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt; + if (elapsed < subscriptionRetryInterval) { + return; + } + + try { + if (!hasValidSubscription()) { + createSubscription(); + } else if (isSubscriptionUpForRenewal()) { + renewSubscription(); + } + } catch (OneDriveException e) { + logSubscriptionError(e); + subscriptionLastErrorAt = Clock.currTime(UTC()); + addLogEntry("Will retry creating or renewing subscription in " ~ to!string(subscriptionRetryInterval)); + } catch (JSONException e) { + addLogEntry("ERROR: Unexpected JSON error when attempting to validate subscription: " ~ e.msg); + subscriptionLastErrorAt = Clock.currTime(UTC()); + addLogEntry("Will retry creating or renewing subscription in " ~ to!string(subscriptionRetryInterval)); + } + } + + // Return the duration to next subscriptionExpiration check + Duration getNextExpirationCheckDuration() { + SysTime now = Clock.currTime(UTC()); + if (hasValidSubscription()) { + Duration elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt; + // Check if we are waiting for the next retry + if (elapsed < subscriptionRetryInterval) + return subscriptionRetryInterval - elapsed; + else + return subscriptionExpiration - now - subscriptionRenewalInterval; + } + else + return subscriptionRetryInterval; + } + + private bool hasValidSubscription() { + return !subscriptionId.empty && subscriptionExpiration > Clock.currTime(UTC()); + } + + private bool isSubscriptionUpForRenewal() { + return subscriptionExpiration < Clock.currTime(UTC()) + subscriptionRenewalInterval; + } + + private void createSubscription() { + addLogEntry("Initializing subscription for updates ..."); + + auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; + try { + JSONValue response = oneDriveApiInstance.createSubscription(notificationUrl, expirationDateTime); + // Save important subscription metadata including id and expiration + subscriptionId = response["id"].str; + subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); + addLogEntry("Created new subscription " ~ subscriptionId ~ " with expiration: " ~ to!string(subscriptionExpiration.toISOExtString())); + } catch (OneDriveException e) { + if (e.httpStatusCode == 409) { + // Take over an existing subscription on HTTP 409. + // + // Sample 409 error: + // { + // "error": { + // "code": "ObjectIdentifierInUse", + // "innerError": { + // "client-request-id": "615af209-467a-4ab7-8eff-27c1d1efbc2d", + // "date": "2023-09-26T09:27:45", + // "request-id": "615af209-467a-4ab7-8eff-27c1d1efbc2d" + // }, + // "message": "Subscription Id c0bba80e-57a3-43a7-bac2-e6f525a76e7c already exists for the requested combination" + // } + // } + + // Make sure the error code is "ObjectIdentifierInUse" + try { + if (e.error["error"]["code"].str != "ObjectIdentifierInUse") { + throw e; + } + } catch (JSONException jsonEx) { + throw e; + } + + // Extract the existing subscription id from the error message + import std.regex; + auto idReg = ctRegex!(r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}", "i"); + auto m = matchFirst(e.error["error"]["message"].str, idReg); + if (!m) { + throw e; + } + + // Save the subscription id and renew it immediately since we don't know the expiration timestamp + subscriptionId = m[0]; + addLogEntry("Found existing subscription " ~ subscriptionId); + renewSubscription(); + } else { + throw e; + } + } + } + + private void renewSubscription() { + addLogEntry("Renewing subscription for updates ..."); + + auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; + try { + JSONValue response = oneDriveApiInstance.renewSubscription(subscriptionId, expirationDateTime); + + // Update subscription expiration from the response + subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); + addLogEntry("Created new subscription " ~ subscriptionId ~ " with expiration: " ~ to!string(subscriptionExpiration.toISOExtString())); + } catch (OneDriveException e) { + if (e.httpStatusCode == 404) { + addLogEntry("The subscription is not found on the server. Recreating subscription ..."); + subscriptionId = null; + subscriptionExpiration = Clock.currTime(UTC()); + createSubscription(); + } else { + throw e; + } + } + } + + private void deleteSubscription() { + if (!hasValidSubscription()) { + return; + } + oneDriveApiInstance.deleteSubscription(subscriptionId); + addLogEntry("Deleted subscription"); + } + + private void logSubscriptionError(OneDriveException e) { + if (e.httpStatusCode == 400) { + // Log known 400 error where Microsoft cannot get a 200 OK from the webhook endpoint + // + // Sample 400 error: + // { + // "error": { + // "code": "InvalidRequest", + // "innerError": { + // "client-request-id": "", + // "date": "", + // "request-id": "" + // }, + // "message": "Subscription validation request failed. Notification endpoint must respond with 200 OK to validation request." + // } + // } + + try { + if (e.error["error"]["code"].str == "InvalidRequest") { + import std.regex; + auto msgReg = ctRegex!(r"Subscription validation request failed", "i"); + auto m = matchFirst(e.error["error"]["message"].str, msgReg); + if (m) { + addLogEntry("ERROR: Cannot create or renew subscription: Microsoft did not get 200 OK from the webhook endpoint."); + return; + } + } + } catch (JSONException) { + // fallthrough + } + } else if (e.httpStatusCode == 401) { + // Log known 401 error where authentication failed + // + // Sample 401 error: + // { + // "error": { + // "code": "ExtensionError", + // "innerError": { + // "client-request-id": "", + // "date": "", + // "request-id": "" + // }, + // "message": "Operation: Create; Exception: [Status Code: Unauthorized; Reason: Authentication failed]" + // } + // } + + try { + if (e.error["error"]["code"].str == "ExtensionError") { + import std.regex; + auto msgReg = ctRegex!(r"Authentication failed", "i"); + auto m = matchFirst(e.error["error"]["message"].str, msgReg); + if (m) { + addLogEntry("ERROR: Cannot create or renew subscription: Authentication failed."); + return; + } + } + } catch (JSONException) { + // fallthrough + } + } else if (e.httpStatusCode == 403) { + // Log known 403 error where the number of subscriptions on item has exceeded limit + // + // Sample 403 error: + // { + // "error": { + // "code": "ExtensionError", + // "innerError": { + // "client-request-id": "", + // "date": "", + // "request-id": "" + // }, + // "message": "Operation: Create; Exception: [Status Code: Forbidden; Reason: Number of subscriptions on item has exceeded limit]" + // } + // } + try { + if (e.error["error"]["code"].str == "ExtensionError") { + import std.regex; + auto msgReg = ctRegex!(r"Number of subscriptions on item has exceeded limit", "i"); + auto m = matchFirst(e.error["error"]["message"].str, msgReg); + if (m) { + addLogEntry("ERROR: Cannot create or renew subscription: Number of subscriptions has exceeded limit."); + return; + } + } + } catch (JSONException) { + // fallthrough + } + } + + // Log detailed message for unknown errors + addLogEntry("ERROR: Cannot create or renew subscription."); + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + } +} \ No newline at end of file