diff --git a/.github/workflows/build-binary-dists.yml b/.github/workflows/build-binary-dists.yml index f438e25..93355ff 100644 --- a/.github/workflows/build-binary-dists.yml +++ b/.github/workflows/build-binary-dists.yml @@ -8,7 +8,7 @@ on: - 'configs/**' - 'scripts/**' - pull_request_target: + pull_request: branches: [ main ] types: [ labeled ] paths: @@ -16,18 +16,38 @@ on: - 'configs/**' - 'scripts/**' -env: - REDIS_VERSION: "8.0-m04" - permissions: id-token: write contents: read jobs: + set_variables: + name: Extract variables from JSON config + if: ${{ (github.event.label.name == 'build-binary-dists') || (github.event_name == 'push' && github.ref == 'refs/heads/main') }} + runs-on: ubuntu-latest + outputs: + redis_version: ${{ steps.read-attribute.outputs.redis_version }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - id: version + run: | + echo "json<> "$GITHUB_OUTPUT" + cat ./configs/redis_version.json >> "$GITHUB_OUTPUT" + echo >> "$GITHUB_OUTPUT" + echo "EOF" >> "$GITHUB_OUTPUT" + + - name: Extract redis_version + id: read-attribute + run: echo "redis_version=${{fromJson(steps.version.outputs.json).ref}}" >> "$GITHUB_OUTPUT" + build: if: ${{ (github.event.label.name == 'build-binary-dists') || (github.event_name == 'push' && github.ref == 'refs/heads/main') }} + needs: [set_variables] name: Build Redis CE MacOS Binary Distributions strategy: + fail-fast: false matrix: os_version: # See: https://github.com/actions/runner-images/blob/main/README.md#available-images - macos-13 # macOS 13 x86_64 @@ -40,45 +60,13 @@ jobs: - name: Install build dependencies run: | - export HOMEBREW_NO_AUTO_UPDATE=1 - brew update - brew install coreutils - brew install make - brew install openssl - brew install llvm@18 - brew install cmake - brew install gnu-sed - brew install make - brew install automake - brew install libtool - - RUST_INSTALLER=rust-1.80.1-$(if [ "$(uname -m)" = "arm64" ]; then echo "aarch64"; else echo "x86_64"; fi)-apple-darwin - echo "Downloading and installing Rust standalone installer: ${RUST_INSTALLER}" - wget --quiet -O ${RUST_INSTALLER}.tar.xz https://static.rust-lang.org/dist/${RUST_INSTALLER}.tar.xz - tar -xf ${RUST_INSTALLER}.tar.xz - (cd ${RUST_INSTALLER} && sudo ./install.sh) - rm -rf ${RUST_INSTALLER} + scripts/install_deps.sh - name: Build Redis CE id: build run: | - export HOMEBREW_PREFIX="$(brew --prefix)" - export BUILD_WITH_MODULES=yes - export BUILD_TLS=yes - export DISABLE_WERRORS=yes - PATH="$HOMEBREW_PREFIX/opt/libtool/libexec/gnubin:$HOMEBREW_PREFIX/opt/llvm@18/bin:$HOMEBREW_PREFIX/opt/make/libexec/gnubin:$HOMEBREW_PREFIX/opt/gnu-sed/libexec/gnubin:$HOMEBREW_PREFIX/opt/coreutils/libexec/gnubin:$PATH" # Override macOS defaults. - export LDFLAGS="-L$HOMEBREW_PREFIX/opt/llvm@18/lib" - export CPPFLAGS="-I$HOMEBREW_PREFIX/opt/llvm@18/include" - - curl -L "https://github.com/redis/redis/archive/refs/tags/${{ vars.BINARY_VERSION_TO_BUILD }}.tar.gz" -o redis.tar.gz - tar xzf redis.tar.gz - - mkdir -p build_dir/etc - make -C redis-${{ vars.BINARY_VERSION_TO_BUILD }} -j "$(nproc)" all OS=macos - make -C redis-${{ vars.BINARY_VERSION_TO_BUILD }} install PREFIX=$(pwd)/build_dir OS=macos - cp ./configs/redis.conf build_dir/etc/redis.conf - (cd build_dir && zip -r ../redis-ce-${{ vars.BINARY_VERSION_TO_BUILD }}-$(uname -m).zip .) - echo "UNSIGNED_REDIS_BINARY=unsigned-redis-ce-${{ vars.BINARY_VERSION_TO_BUILD }}-$(uname -m).zip" >> $GITHUB_OUTPUT + scripts/build.sh ${{ needs.set_variables.outputs.redis_version }} + echo "UNSIGNED_REDIS_BINARY=unsigned-redis-ce-${{ needs.set_variables.outputs.redis_version }}-$(uname -m).zip" >> $GITHUB_OUTPUT - name: Upload Redis CE Binary Distribution uses: actions/upload-artifact@v4 @@ -133,8 +121,8 @@ jobs: done # Create distribution archive - (cd build_dir && zip -r ../redis-ce-${{ vars.BINARY_VERSION_TO_BUILD }}-$(uname -m).zip .) - echo "REDIS_BINARY=redis-ce-${{ vars.BINARY_VERSION_TO_BUILD }}-$(uname -m).zip" >> $GITHUB_OUTPUT + (cd build_dir && zip -r ../redis-ce-${{ needs.set_variables.outputs.redis_version }}-$(uname -m).zip .) + echo "REDIS_BINARY=redis-ce-${{ needs.set_variables.outputs.redis_version }}-$(uname -m).zip" >> $GITHUB_OUTPUT - name: Notarize Redis CE Binary Distribution if: github.event_name == 'push' && github.ref == 'refs/heads/main' @@ -150,4 +138,4 @@ jobs: - name: Upload Redis CE Binary Distribution to S3 if: github.event_name == 'push' && github.ref == 'refs/heads/main' run: | - aws s3 cp ${{ steps.sign.outputs.REDIS_BINARY }} s3://${{ secrets.S3_BUCKET }}/homebrew/ --acl public-read \ No newline at end of file + aws s3 cp ${{ steps.sign.outputs.REDIS_BINARY }} s3://${{ secrets.S3_BUCKET }}/homebrew/ --acl public-read diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 51fab9d..319fe99 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,6 +7,9 @@ on: paths: - '.github/workflows/test.yml' - 'Casks/**' + - 'configs/**' + paths-ignore: + - 'configs/redis_version.json' jobs: test: diff --git a/configs/redis.conf b/configs/redis.conf index cc45ed6..892c3b0 100644 --- a/configs/redis.conf +++ b/configs/redis.conf @@ -672,7 +672,7 @@ repl-diskless-sync-max-replicas 0 repl-diskless-load disabled # Master send PINGs to its replicas in a predefined interval. It's possible to -# change this interval with the repl_ping_replica_period option. The default +# change this interval with the repl-ping-replica-period option. The default # value is 10 seconds. # # repl-ping-replica-period 10 @@ -731,6 +731,24 @@ repl-disable-tcp-nodelay no # # repl-backlog-ttl 3600 +# During a fullsync, the master may decide to send both the RDB file and the +# replication stream to the replica in parallel. This approach shifts the +# responsibility of buffering the replication stream to the replica during the +# fullsync process. The replica accumulates the replication stream data until +# the RDB file is fully loaded. Once the RDB delivery is completed and +# successfully loaded, the replica begins processing and applying the +# accumulated replication data to the db. The configuration below controls how +# much replication data the replica can accumulate during a fullsync. +# +# When the replica reaches this limit, it will stop accumulating further data. +# At this point, additional data accumulation may occur on the master side +# depending on the 'client-output-buffer-limit ' config of master. +# +# A value of 0 means replica inherits hard limit of +# 'client-output-buffer-limit ' config to limit accumulation size. +# +# replica-full-sync-buffer-limit 0 + # The replica priority is an integer number published by Redis in the INFO # output. It is used by Redis Sentinel in order to select a replica to promote # into a master if the master is no longer working correctly. @@ -1018,6 +1036,14 @@ replica-priority 100 # * hyperloglog - Data type: hyperloglog related. # * geo - Data type: geo related. # * stream - Data type: streams related. +# * search: Query engine related. +# * json: Data type: JSON related. +# * timeseries: Data type: time series related. +# * bloom: Data type: Bloom filter related. +# * cuckoo: Data type: cuckoo filter related. +# * topk: Data type: - top-k related. +# * cms: Data type: count-min sketch related. +# * tdigest: Data type: t-digest related. # # For more information about ACL configuration please refer to # the Redis web site at https://redis.io/docs/latest/operate/oss_and_stack/management/security/acl/ @@ -2316,4 +2342,365 @@ jemalloc-bg-thread yes # by setting the following config which takes a space delimited list of warnings # to suppress # -# ignore-warnings ARM64-COW-BUG \ No newline at end of file +# ignore-warnings ARM64-COW-BUG + +############################## QUERY ENGINE CONFIG ############################ + +# Keep numeric ranges in numeric tree parent nodes of leafs for `x` generations. +# numeric, valid range: [0, 2], default: 0 +# +# search-_numeric-ranges-parents 0 + +# The number of iterations to run while performing background indexing +# before we call usleep(1) (sleep for 1 micro-second) and make sure that we +# allow redis to process other commands. +# numeric, valid range: [1, UINT32_MAX], default: 100 +# +# search-bg-index-sleep-gap 100 + +# The default dialect used in search queries. +# numeric, valid range: [1, 4], default: 1 +# +# search-default-dialect 1 + +# the fork gc will only start to clean when the number of not cleaned document +# will exceed this threshold. +# numeric, valid range: [1, LLONG_MAX], default: 100 +# +# search-fork-gc-clean-threshold 100 + +# interval (in seconds) in which to retry running the forkgc after failure. +# numeric, valid range: [1, LLONG_MAX], default: 5 +# +# search-fork-gc-retry-interval 5 + +# interval (in seconds) in which to run the fork gc (relevant only when fork +# gc is used). +# numeric, valid range: [1, LLONG_MAX], default: 30 +# +# search-fork-gc-run-interval 30 + +# the amount of seconds for the fork GC to sleep before exiting. +# numeric, valid range: [0, LLONG_MAX], default: 0 +# +# search-fork-gc-sleep-before-exit 0 + +# Scan this many documents at a time during every GC iteration. +# numeric, valid range: [1, LLONG_MAX], default: 100 +# +# search-gc-scan-size 100 + +# Max number of cursors for a given index that can be opened inside of a shard. +# numeric, valid range: [0, LLONG_MAX], default: 128 +# +# search-index-cursor-limit 128 + +# Maximum number of results from ft.aggregate command. +# numeric, valid range: [0, (1ULL << 31)], default: 1ULL << 31 +# +# search-max-aggregate-results 2147483648 + +# Maximum prefix expansions to be used in a query. +# numeric, valid range: [1, LLONG_MAX], default: 200 +# +# search-max-prefix-expansions 200 + +# Maximum runtime document table size (for this process). +# numeric, valid range: [1, 100000000], default: 1000000 +# +# search-max-doctablesize 1000000 + +# max idle time allowed to be set for cursor, setting it high might cause +# high memory consumption. +# numeric, valid range: [1, LLONG_MAX], default: 300000 +# +# search-cursor-max-idle 300000 + +# Maximum number of results from ft.search command. +# numeric, valid range: [0, 1ULL << 31], default: 1000000 +# +# search-max-search-results 1000000 + +# Number of worker threads to use for background tasks when the server is +# in an operation event. +# numeric, valid range: [1, 16], default: 4 +# +# search-min-operation-workers 4 + +# Minimum length of term to be considered for phonetic matching. +# numeric, valid range: [1, LLONG_MAX], default: 3 +# +# search-min-phonetic-term-len 3 + +# the minimum prefix for expansions (`*`). +# numeric, valid range: [1, LLONG_MAX], default: 2 +# +# search-min-prefix 2 + +# the minimum word length to stem. +# numeric, valid range: [2, UINT32_MAX], default: 4 +# +# search-min-stem-len 4 + +# Delta used to increase positional offsets between array +# slots for multi text values. +# Can control the level of separation between phrases in different +# array slots (related to the SLOP parameter of ft.search command)" +# numeric, valid range: [1, UINT32_MAX], default: 100 +# +# search-multi-text-slop 100 + +# Used for setting the buffer limit threshold for vector similarity tiered +# HNSW index, so that if we are using WORKERS for indexing, and the +# number of vectors waiting in the buffer to be indexed exceeds this limit, +# we insert new vectors directly into HNSW. +# numeric, valid range: [0, LLONG_MAX], default: 1024 +# +# search-tiered-hnsw-buffer-limit 1024 + +# Query timeout. +# numeric, valid range: [1, LLONG_MAX], default: 500 +# +# search-timeout 500 + +# minimum number of iterators in a union from which the iterator will +# will switch to heap-based implementation. +# numeric, valid range: [1, LLONG_MAX], default: 20 +# switch to heap based implementation. +# +# search-union-iterator-heap 20 + +# The maximum memory resize for vector similarity indexes (in bytes). +# numeric, valid range: [0, UINT32_MAX], default: 0 +# +# search-vss-max-resize 0 + +# Number of worker threads to use for query processing and background tasks. +# numeric, valid range: [0, 16], default: 0 +# This configuration also affects the number of connections per shard. +# +# search-workers 0 + +# The number of high priority tasks to be executed at any given time by the +# worker thread pool, before executing low priority tasks. After this number +# of high priority tasks are being executed, the worker thread pool will +# execute high and low priority tasks alternately. +# numeric, valid range: [0, LLONG_MAX], default: 1 +# +# search-workers-priority-bias-threshold 1 + +# Load extension scoring/expansion module. Immutable. +# string, default: "" +# +# search-ext-load "" + +# Path to Chinese dictionary configuration file (for Chinese tokenization). Immutable. +# string, default: "" +# +# search-friso-ini "" + +# Action to perform when search timeout is exceeded (choose RETURN or FAIL). +# enum, valid values: ["return", "fail"], default: "fail" +# +# search-on-timeout fail + +# Determine whether some index resources are free on a second thread. +# bool, default: yes +# +# search-_free-resource-on-thread yes + +# Enable legacy compression of double to float. +# bool, default: no +# +# search-_numeric-compress no + +# Disable print of time for ft.profile. For testing only. +# bool, default: yes +# +# search-_print-profile-clock yes + +# Intersection iterator orders the children iterators by their relative estimated +# number of results in ascending order, so that if we see first iterators with +# a lower count of results we will skip a larger number of results, which +# translates into faster iteration. If this flag is set, we use this +# optimization in a way where union iterators are being factorize by the number +# of their own children, so that we sort by the number of children times the +# overall estimated number of results instead. +# bool, default: no +# +# search-_prioritize-intersect-union-children no + +# Set to run without memory pools. +# bool, default: no +# +# search-no-mem-pools no + +# Disable garbage collection (for this process). +# bool, default: no +# +# search-no-gc no + +# Enable commands filter which optimize indexing on partial hash updates. +# bool, default: no +# +# search-partial-indexed-docs no + +# Disable compression for DocID inverted index. Boost CPU performance. +# bool, default: no +# +# search-raw-docid-encoding no + +# Number of search threads in the coordinator thread pool. +# numeric, valid range: [1, LLONG_MAX], default: 20 +# +# search-threads 20 + +# Timeout for topology validation (in milliseconds). After this timeout, +# any pending requests will be processed, even if the topology is not fully connected. +# numeric, valid range: [0, LLONG_MAX], default: 30000 +# +# search-topology-validation-timeout 30000 + +############################## TIME SERIES CONFIG ############################# + + +# The maximal number of per-shard threads for cross-key queries when using cluster mode +# (TS.MRANGE, TS.MREVRANGE, TS.MGET, and TS.QUERYINDEX). +# Note: increasing this value may either increase or decrease the performance. +# integer, valid range: [1..16], default: 3 +# This is a load-time configuration parameter. +# +# ts-num-threads 3 + + +# Default compaction rules for newly created key with TS.ADD, TS.INCRBY, and TS.DECRBY. +# Has no effect on keys created with TS.CREATE. +# This default value is applied to each new time series upon its creation. +# string, see documentation for rules format, default: no compaction rules +# +# ts-compaction-policy “” + +# Default chunk encoding for automatically-created compacted time series. +# This default value is applied to each new compacted time series automatically +# created when ts-compaction-policy is specified. +# valid values: COMPRESSED, UNCOMPRESSED, default: COMPRESSED +# +# ts-encoding COMPRESSED + + +# Default retention period, in milliseconds. 0 means no expiration. +# This default value is applied to each new time series upon its creation. +# If ts-compaction-policy is specified - it is overridden for created +# compactions as specified in ts-compaction-policy. +# integer, valid range: [0 .. LLONG_MAX], default: 0 +# +# ts-retention-policy 0 + +# Default policy for handling insertion (TS.ADD and TS.MADD) of multiple +# samples with identical timestamps. +# This default value is applied to each new time series upon its creation. +# string, valid values: BLOCK, FIRST, LAST, MIN, MAX, SUM, default: BLOCK +# +# ts-duplicate-policy BLOCK + +# Default initial allocation size, in bytes, for the data part of each new chunk +# This default value is applied to each new time series upon its creation. +# integer, valid range: [48 .. 1048576]; must be a multiple of 8, default: 4096 +# +# ts-chunk-size-bytes 4096 + + +# Default values for newly created time series. +# Many sensors report data periodically. Often, the difference between the measured +# value and the previous measured value is negligible and related to random noise +# or to measurement accuracy limitations. In such situations it may be preferable +# not to add the new measurement to the time series. +# A new sample is considered a duplicate and is ignored if the following conditions are met: +# - The time series is not a compaction; +# - The time series' DUPLICATE_POLICY IS LAST; +# - The sample is added in-order (timestamp ≥ max_timestamp); +# - The difference of the current timestamp from the previous timestamp +# (timestamp - max_timestamp) is less than or equal to ts-ignore-max-time-diff +# - The absolute value difference of the current value from the value at the previous maximum timestamp +# (abs(value - value_at_max_timestamp) is less than or equal to ts-ignore-max-val-diff. +# where max_timestamp is the timestamp of the sample with the largest timestamp in the time series, +# and value_at_max_timestamp is the value at max_timestamp. +# ts-ignore-max-time-diff: integer, valid range: [0 .. LLONG_MAX], default: 0 +# ts-ignore-max-val-diff: double, Valid range: [0 .. DBL_MAX], default: 0 +# +# ts-ignore-max-time-diff 0 +# ts-ignore-max-val-diff 0 + +########################### BLOOM FILTERS CONFIG ############################## + +# Defaults values for new Bloom filters created with BF.ADD, BF.MADD, BF.INSERT, and BF.RESERVE +# These defaults are applied to each new Bloom filter upon its creation. + +# Error ratio +# The desired probability for false positives. +# For a false positive rate of 0.1% (1 in 1000) - the value should be 0.001. +# double, Valid range: (0 .. 1), value greater than 0.25 is treated as 0.25, default: 0.01 +# +# bf-error-rate 0.01 + + +# Initial capacity +# The number of entries intended to be added to the filter. +# integer, valid range: [1 .. 1GB], default: 100 +# +# bf-initial-size 100 + + +# Expansion factor +# When capacity is reached, an additional sub-filter is created. +# The size of the new sub-filter is the size of the last sub-filter multiplied +# by expansion. +# integer, [0 .. 32768]. 0 is equivalent to NONSCALING. default: 2 +# +# bf-expansion-factor 2 + + +########################### CUCKOO FILTERS CONFIG ############################# + +# Defaults values for new Cuckoo filters created with +# CF.ADD, CF.ADDNX, CF.INSERT, CF.INSERTNX, and CF.RESERVE +# These defaults are applied to each new Cuckoo filter upon its creation. + +# Initial capacity +# A filter will likely not fill up to 100% of its capacity. +# Make sure to reserve extra capacity if you want to avoid expansions. +# value is rounded to the next 2^n integer. +# integer, valid range: [2*cf-bucket-size .. 1GB], default: 1024 +# +# cf-initial-size 1024 + + +# Number of items in each bucket +# The minimal false positive rate is 2/255 ≈ 0.78% when bucket size of 1 is used. +# Larger buckets increase the error rate linearly, but improve the fill rate. +# integer, valid range: [1 .. 255], default: 2 +# +# cf-bucket-size 2 + + +# Maximum iterations +# Number of attempts to swap items between buckets before declaring filter +# as full and creating an additional filter. +# A lower value improves performance. A higher value improves fill rate. +# integer, Valid range: [1 .. 65535], default: 20 +# +# cf-max-iterations 20 + + +# Expansion factor +# When a new filter is created, its size is the size of the current filter +# multiplied by this factor. +# integer, Valid range: [0 .. 32768], 0 is equivalent to NONSCALING, default: 1 +# +# cf-expansion-factor 1 + + +# Maximum expansions +# integer, Valid range: [1 .. 65536], default: 32 +# +# cf-max-expansions 32 \ No newline at end of file diff --git a/configs/redis_version.json b/configs/redis_version.json new file mode 100644 index 0000000..7ba5417 --- /dev/null +++ b/configs/redis_version.json @@ -0,0 +1,3 @@ +{ + "ref": "8.0-rc1" +} \ No newline at end of file diff --git a/scripts/build.sh b/scripts/build.sh new file mode 100755 index 0000000..568f032 --- /dev/null +++ b/scripts/build.sh @@ -0,0 +1,26 @@ +#!/bin/sh + +export HOMEBREW_PREFIX="$(brew --prefix)" +export BUILD_WITH_MODULES=yes +export BUILD_TLS=yes +export DISABLE_WERRORS=yes +PATH="$HOMEBREW_PREFIX/opt/libtool/libexec/gnubin:$HOMEBREW_PREFIX/opt/llvm@18/bin:$HOMEBREW_PREFIX/opt/make/libexec/gnubin:$HOMEBREW_PREFIX/opt/gnu-sed/libexec/gnubin:$HOMEBREW_PREFIX/opt/coreutils/libexec/gnubin:$PATH" # Override macOS defaults. +export LDFLAGS="-L$HOMEBREW_PREFIX/opt/llvm@18/lib" +export CPPFLAGS="-I$HOMEBREW_PREFIX/opt/llvm@18/include" + +# Check if Redis version is provided as an argument +if [ $# -lt 1 ]; then + echo "Usage: $0 " + exit 1 +fi + +REDIS_VERSION="$1" + +curl -L "https://github.com/redis/redis/archive/refs/tags/$REDIS_VERSION.tar.gz" -o redis.tar.gz +tar xzf redis.tar.gz + +mkdir -p build_dir/etc +make -C redis-$REDIS_VERSION -j "$(nproc)" all OS=macos +make -C redis-$REDIS_VERSION install PREFIX=$(pwd)/build_dir OS=macos +cp ./configs/redis.conf build_dir/etc/redis.conf +(cd build_dir && zip -r ../redis-ce-$REDIS_VERSION-$(uname -m).zip .) diff --git a/scripts/install_deps.sh b/scripts/install_deps.sh new file mode 100755 index 0000000..8cf6659 --- /dev/null +++ b/scripts/install_deps.sh @@ -0,0 +1,28 @@ +#!/bin/sh + +export HOMEBREW_NO_AUTO_UPDATE=1 +brew update +brew install coreutils +brew install make +brew install openssl +brew install llvm@18 +brew install gnu-sed +brew install automake +brew install libtool + +rm -f /usr/local/bin/cmake +CMAKE_VERSION=3.31.6 +mkdir ~/Downloads/CMake +curl --location --retry 3 "https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-macos-universal.dmg" --output ~/Downloads/CMake/cmake-macos.dmg +hdiutil attach -mountpoint /Volumes/cmake-macos ~/Downloads/CMake/cmake-macos.dmg +cp -R /Volumes/cmake-macos/CMake.app /Applications/ +hdiutil detach /Volumes/cmake-macos +sudo "/Applications/CMake.app/Contents/bin/cmake-gui" --install=/usr/local/bin +cmake --version + +RUST_INSTALLER=rust-1.80.1-$(if [ "$(uname -m)" = "arm64" ]; then echo "aarch64"; else echo "x86_64"; fi)-apple-darwin +echo "Downloading and installing Rust standalone installer: ${RUST_INSTALLER}" +wget --quiet -O ${RUST_INSTALLER}.tar.xz https://static.rust-lang.org/dist/${RUST_INSTALLER}.tar.xz +tar -xf ${RUST_INSTALLER}.tar.xz +(cd ${RUST_INSTALLER} && sudo ./install.sh) +rm -rf ${RUST_INSTALLER} diff --git a/scripts/notarize.sh b/scripts/notarize.sh index cb680dc..0996cae 100755 --- a/scripts/notarize.sh +++ b/scripts/notarize.sh @@ -34,4 +34,4 @@ for i in `seq 1 20`; do done echo "No succesful notarization found, exiting." -exit 1 \ No newline at end of file +exit 1