diff --git a/dev/subtree_config.xml b/dev/subtree_config.xml index 3a38f54dbc6..c0f65bd509c 100644 --- a/dev/subtree_config.xml +++ b/dev/subtree_config.xml @@ -33,7 +33,7 @@ name="capnproto" internal_path="libs/EXTERNAL/capnproto" external_url="https://github.com/capnproto/capnproto.git" - default_external_ref="v0.7.0"/> + default_external_ref="v0.9.1"/> > $env:GITHUB_PATH + - name: Install dependencies via Conda + run: | + conda update -n base -c defaults -q conda + conda install -n base -c defaults -q ninja openssl zlib + - name: Build and test + shell: cmd + run: | + echo "Activate conda base environment" + call activate base + echo "Building Cap'n Proto with ${{ matrix.target }}" + cmake -Hc++ -Bbuild-output ${{ matrix.arch }} -G "${{ matrix.target }}" -DCMAKE_BUILD_TYPE=debug -DCMAKE_PREFIX_PATH="%CONDA_PREFIX%" -DCMAKE_INSTALL_PREFIX=%CD%\capnproto-c++-install + cmake --build build-output --config debug --target install + + echo "Building Cap'n Proto samples with ${{ matrix.target }}" + cmake -Hc++/samples -Bbuild-output-samples ${{ matrix.arch }} -G "${{ matrix.target }}" -DCMAKE_BUILD_TYPE=debug -DCMAKE_PREFIX_PATH=%CD%\capnproto-c++-install + cmake --build build-output-samples --config debug + + cd build-output\src + ctest -V -C debug + MinGW: + runs-on: windows-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v2 + - name: Build and test + shell: cmd + run: | + echo "Deleting broken Postgres install until https://github.com/actions/virtual-environments/issues/1089 is fixed..." + rmdir /s /q C:\PROGRA~1\POSTGR~1 + + echo "Building Cap'n Proto with MinGW" + cmake -Hc++ -Bbuild-output -G "MinGW Makefiles" -DCMAKE_BUILD_TYPE=debug -DCMAKE_INSTALL_PREFIX=%CD%\capnproto-c++-install -DCMAKE_SH="CMAKE_SH-NOTFOUND" + cmake --build build-output --target install -- -j2 + + echo "Building Cap'n Proto samples with MinGW" + cmake -Hc++/samples -Bbuild-output-samples -G "MinGW Makefiles" -DCMAKE_BUILD_TYPE=debug -DCMAKE_PREFIX_PATH=%CD%\capnproto-c++-install -DCMAKE_SH="CMAKE_SH-NOTFOUND" + cmake --build build-output-samples + + cd build-output\src + ctest -V -C debug + Cygwin: + runs-on: windows-latest + strategy: + fail-fast: false + steps: + - run: git config --global core.autocrlf false + - uses: actions/checkout@v2 + # TODO(someday): If we could cache the Cygwin installation we wouldn't have to spend three + # minutes installing it for every build. Unfortuntaley, actions/cache@v1 does not preserve + # DOS file attributes, which corrupts the Cygwin install. In particular, Cygwin marks + # symlinks with the "DOS SYSTEM" attribute. We could cache just the downloaded packages, + # but it turns out that only saves a couple seconds; most of the time is spend unpacking. + - name: Install Cygwin + run: | + choco config get cacheLocation + choco install --no-progress cygwin + - name: Install Cygwin additional packages + shell: cmd + run: | + C:\tools\cygwin\cygwinsetup.exe -qgnNdO -R C:/tools/cygwin -l C:/tools/cygwin/packages -s http://mirrors.kernel.org/sourceware/cygwin/ -P autoconf,automake,libtool,gcc,gcc-g++,binutils,libssl-devel,make,zlib-devel,pkg-config,cmake,xxd + - name: Build and test + shell: cmd + run: | + C:\tools\cygwin\bin\bash -lc 'export PATH=/usr/local/bin:/usr/bin:/bin; cd /cygdrive/d/a/capnproto/capnproto; ./super-test.sh quick' diff --git a/libs/EXTERNAL/capnproto/.github/workflows/release-test.yml b/libs/EXTERNAL/capnproto/.github/workflows/release-test.yml new file mode 100644 index 00000000000..cdcedfaf73e --- /dev/null +++ b/libs/EXTERNAL/capnproto/.github/workflows/release-test.yml @@ -0,0 +1,120 @@ +name: Release Tests + +on: + push: + branches: + - master + - 'release-*' + - 'fix-release*' + +jobs: + Linux: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + # We can only run extended tests with the default version of g++, because it has to match + # the verison of g++-multilib for 32-bit cross-compilation, and alternate versions of + # g++-multilib generally aren't available. Clang is more lenient, but we might as well be + # consistent. The quick tests should be able to catch issues with older and newer compiler + # versions. + compiler: [g++, clang] + steps: + - uses: actions/checkout@v2 + - name: install dependencies + run: | + export DEBIAN_FRONTEND=noninteractive + sudo apt-get update + sudo apt-get install -y build-essential git zlib1g-dev cmake libssl-dev valgrind gcc-multilib g++-multilib ${{ matrix.compiler }} + - name: super-test + run: | + ./super-test.sh ${{ matrix.compiler }} + MacOS: + runs-on: macos-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v2 + - name: install dependencies + run: | + brew install autoconf automake libtool pkg-config + - name: super-test + run: | + ./super-test.sh + MinGW-Wine: + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v2 + - name: install dependencies + run: | + export DEBIAN_FRONTEND=noninteractive + sudo dpkg --add-architecture i386 + sudo apt-get update + sudo apt-get install -y build-essential git cmake mingw-w64 wine-stable wine64 wine32 wine-binfmt + sudo update-binfmts --import wine + - name: 64-bit Build and Test + run: | + ./super-test.sh mingw x86_64-w64-mingw32 + - name: 32-bit Build and Test + run: | + ./super-test.sh mingw i686-w64-mingw32 + cmake-packaging: + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v2 + - name: install dependencies + run: | + export DEBIAN_FRONTEND=noninteractive + sudo apt-get install -y build-essential git cmake + - name: autotools-shared + run: | + ./super-test.sh cmake-package autotools-shared + - name: autotools-static + run: | + ./super-test.sh cmake-package autotools-static + - name: cmake-shared + run: | + ./super-test.sh cmake-package cmake-shared + - name: cmake-static + run: | + ./super-test.sh cmake-package cmake-static + Android: + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v2 + - name: install dependencies + run: | + export DEBIAN_FRONTEND=noninteractive + sudo apt-get install -y build-essential git + - name: fetch Android tools + if: steps.cache-android-sdk.outputs.cache-hit != 'true' + run: | + # The installed Android SDK is broken. + unset ANDROID_SDK_ROOT + unset ANDROID_HOME + + mkdir android-sdk + cd android-sdk + curl -o commandlinetools.zip https://dl.google.com/android/repository/commandlinetools-linux-6200805_latest.zip + unzip commandlinetools.zip + (yes || true) | tools/bin/sdkmanager --sdk_root=$PWD platform-tools 'platforms;android-25' 'system-images;android-25;google_apis;armeabi-v7a' emulator 'build-tools;25.0.2' ndk-bundle + - name: 32-bit Build and Test + run: | + # The installed Android SDK is broken. + unset ANDROID_SDK_ROOT + unset ANDROID_HOME + + echo | android-sdk/tools/bin/avdmanager create avd -n capnp -k 'system-images;android-25;google_apis;armeabi-v7a' -b google_apis/armeabi-v7a + + # avdmanager seems to set image.sysdir.1 incorrectly in the AVD's config.ini, which + # causes the emulator to fail. I don't know why. I don't know how to fix it, other than + # to patch the config like so. + sed -i -re 's,^image\.sysdir\.1=android-sdk/,image.sysdir.1=,g' $HOME/.android/avd/capnp.avd/config.ini + + ./super-test.sh android $PWD/android-sdk arm-linux-androideabi armv7a-linux-androideabi24 diff --git a/libs/EXTERNAL/capnproto/.travis.yml b/libs/EXTERNAL/capnproto/.travis.yml deleted file mode 100644 index 7355d1d5d77..00000000000 --- a/libs/EXTERNAL/capnproto/.travis.yml +++ /dev/null @@ -1,77 +0,0 @@ -branches: - only: - - master - - /release-.*/ -language: cpp -dist: trusty -sudo: false -addons: - apt: - packages: - - automake - - autoconf - - libtool - - pkg-config -# limit parallelism due to limited memory on Travis -script: CC=$MATRIX_CC CXX=$MATRIX_CXX ./super-test.sh -j2 quick - -matrix: - include: - # Old GCC - - os: linux - addons: - apt: - sources: - - ubuntu-toolchain-r-test - packages: - - g++-4.9 - env: - - MATRIX_CC=gcc-4.9 - - MATRIX_CXX=g++-4.9 - - # New GCC - - os: linux - addons: - apt: - sources: - - ubuntu-toolchain-r-test - packages: - - g++-7 - env: - - MATRIX_CC=gcc-7 - - MATRIX_CXX=g++-7 - - # Old Clang - - os: linux - addons: - apt: - sources: - - ubuntu-toolchain-r-test - - llvm-toolchain-trusty-3.6 - packages: - - clang-3.6 - - libc++-dev # clang-3.6 can't compile C++14 against libstdc++, apparently. - env: - - MATRIX_CC=clang-3.6 - - MATRIX_CXX=clang++-3.6 - - # New Clang - - os: linux - addons: - apt: - sources: - - ubuntu-toolchain-r-test - - llvm-toolchain-trusty-5.0 - packages: - - clang-5.0 - env: - - MATRIX_CC=clang-5.0 - - MATRIX_CXX=clang++-5.0 - - # Mac. We only test Clang because Mac builds are expensive for Travis and probably any - # compiler-specific problems will be caught on the Linux matrix anyway. - - os: osx - osx_image: xcode9.3 - env: - - MATRIX_CC=clang - - MATRIX_CXX=clang++ diff --git a/libs/EXTERNAL/capnproto/CMakeLists.txt b/libs/EXTERNAL/capnproto/CMakeLists.txt index 1a4b12fd0a0..eb40764019a 100644 --- a/libs/EXTERNAL/capnproto/CMakeLists.txt +++ b/libs/EXTERNAL/capnproto/CMakeLists.txt @@ -1,3 +1,3 @@ -cmake_minimum_required(VERSION 3.1) +cmake_minimum_required(VERSION 3.4) project("Cap'n Proto Root" CXX) add_subdirectory(c++) diff --git a/libs/EXTERNAL/capnproto/README.md b/libs/EXTERNAL/capnproto/README.md index 587cd036177..895233f191e 100644 --- a/libs/EXTERNAL/capnproto/README.md +++ b/libs/EXTERNAL/capnproto/README.md @@ -1,9 +1,10 @@ -Unix: [![Unix Build Status](https://travis-ci.org/capnproto/capnproto.svg?branch=master)](https://travis-ci.org/capnproto/capnproto) Windows: [![Windows Build Status](https://ci.appveyor.com/api/projects/status/9rxff2tujkae4hte?svg=true)](https://ci.appveyor.com/project/kentonv/capnproto) +[![Quick Tests](https://github.com/capnproto/capnproto/workflows/Quick%20Tests/badge.svg?branch=master&event=push)](https://github.com/capnproto/capnproto/actions?query=workflow%3A%22Quick+Tests%22) +[![Release Tests](https://github.com/capnproto/capnproto/workflows/Release%20Tests/badge.svg?branch=master&event=push)](https://github.com/capnproto/capnproto/actions?query=workflow%3A%22Release+Tests%22) - + Cap'n Proto is an insanely fast data interchange format and capability-based RPC system. Think JSON, except binary. Or think [Protocol Buffers](https://github.com/google/protobuf), except faster. In fact, in benchmarks, Cap'n Proto is INFINITY TIMES faster than Protocol Buffers. -[Read more...](http://kentonv.github.com/capnproto/) +[Read more...](http://kentonv.github.io/capnproto/) diff --git a/libs/EXTERNAL/capnproto/appveyor.yml b/libs/EXTERNAL/capnproto/appveyor.yml deleted file mode 100644 index d9c261a6641..00000000000 --- a/libs/EXTERNAL/capnproto/appveyor.yml +++ /dev/null @@ -1,78 +0,0 @@ -# Cap'n Proto AppVeyor configuration -# -# See https://www.appveyor.com/docs/appveyor-yml/ for configuration options. -# -# This script configures AppVeyor to: -# - Use CMake to ... -# build Cap'n Proto with VS2017. -# build Cap'n Proto samples with VS2017. -# build Cap'n Proto with MinGW. -# build Cap'n Proto with Cygwin. - -version: "{build}" - -branches: - only: - - master - - /release-.*/ -# Don't build non-master branches (unless they open a pull request). - -image: Visual Studio 2017 -# AppVeyor build worker image (VM template). - -shallow_clone: true -# Fetch repository as zip archive. - -environment: - MINGW_DIR: C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64 - BUILD_TYPE: debug - - matrix: - # TODO(someday): Add MSVC x64 builds, MinGW x86 build? - - - CMAKE_GENERATOR: Visual Studio 15 2017 - BUILD_NAME: vs2017 - EXTRA_BUILD_FLAGS: # /maxcpucount - # TODO(someday): Right now /maxcpucount occasionally expresses a filesystem-related race: - # capnp-capnpc++ complains that it can't create test.capnp.h. - - - CMAKE_GENERATOR: MinGW Makefiles - BUILD_NAME: mingw - EXTRA_BUILD_FLAGS: -j2 - - - BUILD_NAME: cygwin - -install: - - ps: Get-Command sh.exe -All | Remove-Item - # CMake refuses to generate MinGW Makefiles if sh.exe is in the PATH - -before_build: - - set PATH=%MINGW_DIR%\bin;%PATH% - - set BUILD_DIR=build-%BUILD_NAME% - - set INSTALL_PREFIX=%CD%\capnproto-c++-%BUILD_NAME% - - cmake --version - -build_script: - - echo "Building Cap'n Proto with %CMAKE_GENERATOR%" - - if NOT "%BUILD_NAME%"=="cygwin" cmake -Hc++ -B%BUILD_DIR% -G "%CMAKE_GENERATOR%" -DCMAKE_BUILD_TYPE=%BUILD_TYPE% -DCMAKE_INSTALL_PREFIX=%INSTALL_PREFIX% - - if NOT "%BUILD_NAME%"=="cygwin" cmake --build %BUILD_DIR% --config %BUILD_TYPE% --target install -- %EXTRA_BUILD_FLAGS% - # MinGW wants the build type at configure-time while MSVC wants the build type at build-time. We - # can satisfy both by passing the build type to both cmake invocations. We have to suffer a - # warning, but both generators will work. - - - echo "Building Cap'n Proto samples with %CMAKE_GENERATOR%" - - if NOT "%BUILD_NAME%"=="cygwin" cmake -Hc++/samples -B%BUILD_DIR%-samples -G "%CMAKE_GENERATOR%" -DCMAKE_BUILD_TYPE=%BUILD_TYPE% -DCMAKE_PREFIX_PATH=%INSTALL_PREFIX% - - if NOT "%BUILD_NAME%"=="cygwin" cmake --build %BUILD_DIR%-samples --config %BUILD_TYPE% - - # Cygwin build -- use super-test.sh like other Unix builds. - # But, we need to install Cygwin's cmake package in order to pass the cmake part of super-test. - # Somewhat ridiculously, this requires downloading Cygwin's setup program and running it. - - if "%BUILD_NAME%"=="cygwin" appveyor DownloadFile "https://cygwin.com/setup-x86_64.exe" -FileName "C:\cygwin64\setup-x86_64.exe" - - if "%BUILD_NAME%"=="cygwin" C:\cygwin64\setup-x86_64.exe --quiet-mode --no-shortcuts --upgrade-also --root "C:\cygwin64" --packages cmake - - if "%BUILD_NAME%"=="cygwin" C:\cygwin64\bin\bash -lc 'cd /cygdrive/c/projects/capnproto; ./super-test.sh -j2 quick' - -test_script: - # Sleep a little to prevent interleaving test output with build output. - - if NOT "%BUILD_NAME%"=="cygwin" timeout /t 2 - - if NOT "%BUILD_NAME%"=="cygwin" cd %BUILD_DIR%\src - - if NOT "%BUILD_NAME%"=="cygwin" ctest -V -C %BUILD_TYPE% diff --git a/libs/EXTERNAL/capnproto/c++/CMakeLists.txt b/libs/EXTERNAL/capnproto/c++/CMakeLists.txt index 283d54938eb..548dfd1fe7a 100644 --- a/libs/EXTERNAL/capnproto/c++/CMakeLists.txt +++ b/libs/EXTERNAL/capnproto/c++/CMakeLists.txt @@ -1,8 +1,8 @@ -cmake_minimum_required(VERSION 3.1) +cmake_minimum_required(VERSION 3.4) project("Cap'n Proto" CXX) -set(VERSION 0.7.0) +set(VERSION 0.9.1) -set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") include(CheckIncludeFileCXX) include(GNUInstallDirs) @@ -41,6 +41,29 @@ else() set(CAPNP_LITE_FLAG) endif() +set(WITH_OPENSSL "AUTO" CACHE STRING + "Whether or not to build libkj-tls by linking against openssl") +# define list of values GUI will offer for the variable +set_property(CACHE WITH_OPENSSL PROPERTY STRINGS AUTO ON OFF) + +# shadow cache variable original value with ON/OFF, +# so from now on OpenSSL-specific code just has to check: +# if (WITH_OPENSSL) +# ... +# endif() +if (CAPNP_LITE) + set(WITH_OPENSSL OFF) +elseif (WITH_OPENSSL STREQUAL "AUTO") + find_package(OpenSSL COMPONENTS Crypto SSL) + if (OPENSSL_FOUND) + set(WITH_OPENSSL ON) + else() + set(WITH_OPENSSL OFF) + endif() +elseif (WITH_OPENSSL) + find_package(OpenSSL REQUIRED COMPONENTS Crypto SSL) +endif() + if(MSVC) # TODO(cleanup): Enable higher warning level in MSVC, but make sure to test # build with that warning level and clean out false positives. @@ -129,14 +152,18 @@ if(NOT MSVC) # Don't install pkg-config files when building with MSVC set(CAPNP_PKG_CONFIG_FILES pkgconfig/kj.pc pkgconfig/capnp.pc + pkgconfig/capnpc.pc ) if(NOT CAPNP_LITE) list(APPEND CAPNP_PKG_CONFIG_FILES pkgconfig/kj-async.pc + pkgconfig/kj-gzip.pc pkgconfig/kj-http.pc pkgconfig/kj-test.pc + pkgconfig/kj-tls.pc pkgconfig/capnp-rpc.pc + pkgconfig/capnp-websocket.pc pkgconfig/capnp-json.pc ) endif() diff --git a/libs/EXTERNAL/capnproto/c++/Makefile.am b/libs/EXTERNAL/capnproto/c++/Makefile.am index 39c57b1aa26..1e3fd8e948f 100644 --- a/libs/EXTERNAL/capnproto/c++/Makefile.am +++ b/libs/EXTERNAL/capnproto/c++/Makefile.am @@ -2,7 +2,11 @@ ACLOCAL_AMFLAGS = -I m4 -AUTOMAKE_OPTIONS = foreign subdir-objects +# We use serial-tests so that test output will be written directly to stdout +# which is much preferred in CI environments where the test logs may be hard +# to get at after the fact. Most of our tests are bundled into a single +# executable anyway so cannot easily be parallelized. +AUTOMAKE_OPTIONS = foreign subdir-objects serial-tests # When running distcheck, verify that we've included all the files needed by # the cmake build. @@ -22,10 +26,12 @@ EXTRA_DIST = \ src/capnp/compiler/capnp-test.sh \ src/capnp/testdata/segmented-packed \ src/capnp/testdata/errors.capnp.nobuild \ + src/capnp/testdata/errors2.capnp.nobuild \ src/capnp/testdata/short.txt \ src/capnp/testdata/flat \ src/capnp/testdata/binary \ src/capnp/testdata/errors.txt \ + src/capnp/testdata/errors2.txt \ src/capnp/testdata/segmented \ src/capnp/testdata/packed \ src/capnp/testdata/pretty.txt \ @@ -81,21 +87,24 @@ maintainer-clean-local: public_capnpc_inputs = \ src/capnp/c++.capnp \ src/capnp/schema.capnp \ + src/capnp/stream.capnp \ src/capnp/rpc.capnp \ src/capnp/rpc-twoparty.capnp \ - src/capnp/persistent.capnp \ - src/capnp/compat/json.capnp + src/capnp/persistent.capnp capnpc_inputs = \ $(public_capnpc_inputs) \ src/capnp/compiler/lexer.capnp \ - src/capnp/compiler/grammar.capnp + src/capnp/compiler/grammar.capnp \ + src/capnp/compat/json.capnp capnpc_outputs = \ src/capnp/c++.capnp.c++ \ src/capnp/c++.capnp.h \ src/capnp/schema.capnp.c++ \ src/capnp/schema.capnp.h \ + src/capnp/stream.capnp.c++ \ + src/capnp/stream.capnp.h \ src/capnp/rpc.capnp.c++ \ src/capnp/rpc.capnp.h \ src/capnp/rpc-twoparty.capnp.c++ \ @@ -117,6 +126,7 @@ includekjstddir = $(includekjdir)/std includekjcompatdir = $(includekjdir)/compat dist_includecapnp_DATA = $(public_capnpc_inputs) +dist_includecapnpcompat_DATA = src/capnp/compat/json.capnp pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = $(CAPNP_PKG_CONFIG_FILES) @@ -136,6 +146,7 @@ includekj_HEADERS = \ src/kj/memory.h \ src/kj/refcount.h \ src/kj/array.h \ + src/kj/list.h \ src/kj/vector.h \ src/kj/string.h \ src/kj/string-tree.h \ @@ -151,6 +162,7 @@ includekj_HEADERS = \ src/kj/one-of.h \ src/kj/function.h \ src/kj/mutex.h \ + src/kj/source-location.h \ src/kj/thread.h \ src/kj/threadlocal.h \ src/kj/filesystem.h \ @@ -162,6 +174,7 @@ includekj_HEADERS = \ src/kj/async-unix.h \ src/kj/async-win32.h \ src/kj/async-io.h \ + src/kj/async-queue.h \ src/kj/main.h \ src/kj/test.h \ src/kj/windows-sanity.h @@ -194,6 +207,7 @@ includecapnp_HEADERS = \ src/capnp/capability.h \ src/capnp/membrane.h \ src/capnp/schema.capnp.h \ + src/capnp/stream.capnp.h \ src/capnp/schema-lite.h \ src/capnp/schema.h \ src/capnp/schema-loader.h \ @@ -217,7 +231,9 @@ includecapnp_HEADERS = \ includecapnpcompat_HEADERS = \ src/capnp/compat/json.h \ - src/capnp/compat/json.capnp.h + src/capnp/compat/json.capnp.h \ + src/capnp/compat/std-iterator.h \ + src/capnp/compat/websocket-rpc.h if BUILD_KJ_TLS MAYBE_KJ_TLS_LA=libkj-tls.la @@ -229,16 +245,21 @@ MAYBE_KJ_TLS_LA= MAYBE_KJ_TLS_TESTS= endif +if BUILD_KJ_GZIP +MAYBE_KJ_GZIP_LA=libkj-gzip.la +MAYBE_KJ_GZIP_TESTS= \ + src/kj/compat/gzip-test.c++ +else +MAYBE_KJ_TLS_LA= +MAYBE_KJ_TLS_TESTS= +endif + if LITE_MODE lib_LTLIBRARIES = libkj.la libkj-test.la libcapnp.la else -lib_LTLIBRARIES = libkj.la libkj-test.la libkj-async.la libkj-http.la $(MAYBE_KJ_TLS_LA) libcapnp.la libcapnp-rpc.la libcapnp-json.la libcapnpc.la +lib_LTLIBRARIES = libkj.la libkj-test.la libkj-async.la libkj-http.la $(MAYBE_KJ_TLS_LA) $(MAYBE_KJ_GZIP_LA) libcapnp.la libcapnp-rpc.la libcapnp-json.la libcapnp-websocket.la libcapnpc.la endif -# Don't include security release in soname -- we want to replace old binaries -# in this case. -SO_VERSION = $(shell echo $(VERSION) | sed -e 's/^\([0-9]*[.][0-9]*[.][0-9]*\)\([.][0-9]*\)*\(-.*\)*$$/\1\3/g') - libkj_la_LIBADD = $(PTHREAD_LIBS) libkj_la_LDFLAGS = -release $(SO_VERSION) -no-undefined libkj_la_SOURCES= \ @@ -247,8 +268,10 @@ libkj_la_SOURCES= \ src/kj/memory.c++ \ src/kj/refcount.c++ \ src/kj/array.c++ \ + src/kj/list.c++ \ src/kj/string.c++ \ src/kj/string-tree.c++ \ + src/kj/source-location.c++ \ src/kj/hash.c++ \ src/kj/table.c++ \ src/kj/encoding.c++ \ @@ -286,8 +309,7 @@ libkj_http_la_LIBADD = libkj-async.la libkj.la $(ASYNC_LIBS) $(PTHREAD_LIBS) libkj_http_la_LDFLAGS = -release $(SO_VERSION) -no-undefined libkj_http_la_SOURCES= \ src/kj/compat/url.c++ \ - src/kj/compat/http.c++ \ - src/kj/compat/gzip.c++ + src/kj/compat/http.c++ libkj_tls_la_LIBADD = libkj-async.la libkj.la -lssl -lcrypto $(ASYNC_LIBS) $(PTHREAD_LIBS) libkj_tls_la_LDFLAGS = -release $(SO_VERSION) -no-undefined @@ -295,6 +317,11 @@ libkj_tls_la_SOURCES= \ src/kj/compat/readiness-io.c++ \ src/kj/compat/tls.c++ +libkj_gzip_la_LIBADD = libkj-async.la libkj.la -lz $(ASYNC_LIBS) $(PTHREAD_LIBS) +libkj_gzip_la_LDFLAGS = -release $(SO_VERSION) -no-undefined +libkj_gzip_la_SOURCES= \ + src/kj/compat/gzip.c++ + endif !LITE_MODE if !LITE_MODE @@ -317,6 +344,7 @@ libcapnp_la_SOURCES= \ src/capnp/any.c++ \ src/capnp/message.c++ \ src/capnp/schema.capnp.c++ \ + src/capnp/stream.capnp.c++ \ src/capnp/serialize.c++ \ src/capnp/serialize-packed.c++ \ $(heavy_sources) @@ -343,6 +371,11 @@ libcapnp_json_la_SOURCES= \ src/capnp/compat/json.c++ \ src/capnp/compat/json.capnp.c++ +libcapnp_websocket_la_LIBADD = libcapnp.la libcapnp-rpc.la libkj.la libkj-async.la libkj-http.la $(PTHREAD_LIBS) +libcapnp_websocket_la_LDFLAGS = -release $(SO_VERSION) -no-undefined +libcapnp_websocket_la_SOURCES= \ + src/capnp/compat/websocket-rpc.c++ + libcapnpc_la_LIBADD = libcapnp.la libkj.la $(PTHREAD_LIBS) libcapnpc_la_LDFLAGS = -release $(SO_VERSION) -no-undefined libcapnpc_la_SOURCES= \ @@ -358,6 +391,9 @@ libcapnpc_la_SOURCES= \ src/capnp/compiler/grammar.capnp.c++ \ src/capnp/compiler/parser.h \ src/capnp/compiler/parser.c++ \ + src/capnp/compiler/resolver.h \ + src/capnp/compiler/generics.h \ + src/capnp/compiler/generics.c++ \ src/capnp/compiler/node-translator.h \ src/capnp/compiler/node-translator.c++ \ src/capnp/compiler/compiler.h \ @@ -385,10 +421,15 @@ capnpc_c___SOURCES = src/capnp/compiler/capnpc-c++.c++ # Also attempt to run ldconfig, because otherwise users get confused. If # it fails (e.g. because the platform doesn't have it, or because the # user doesn't have root privileges), don't worry about it. +# +# We need to specify the path for OpenBSD. install-exec-hook: ln -sf capnp $(DESTDIR)$(bindir)/capnpc - ldconfig < /dev/null > /dev/null 2>&1 || true - + if [ `uname` == 'OpenBSD' ]; then \ + (ldconfig /usr/local/lib /usr/lib /usr/X11R6/lib > /dev/null 2>&1 || true); \ + else \ + ldconfig < /dev/null > /dev/null 2>&1 || true; \ + fi uninstall-hook: rm -f $(DESTDIR)$(bindir)/capnpc @@ -426,14 +467,14 @@ if USE_EXTERNAL_CAPNP test_capnpc_middleman: $(test_capnpc_inputs) @$(MKDIR_P) src - $(CAPNP) compile --src-prefix=$(srcdir)/src -o$(CAPNPC_CXX):src -I$(srcdir)/src $^ + $(CAPNP) compile --src-prefix=$(srcdir)/src -o$(CAPNPC_CXX):src -I$(srcdir)/src $$(for FILE in $(test_capnpc_inputs); do echo $(srcdir)/$$FILE; done) touch test_capnpc_middleman else test_capnpc_middleman: capnp$(EXEEXT) capnpc-c++$(EXEEXT) $(test_capnpc_inputs) @$(MKDIR_P) src - echo $^ | (read CAPNP CAPNPC_CXX SOURCES && ./$$CAPNP compile --src-prefix=$(srcdir)/src -o./$$CAPNPC_CXX:src -I$(srcdir)/src $$SOURCES) + ./capnp$(EXEEXT) compile --src-prefix=$(srcdir)/src -o./capnpc-c++$(EXEEXT):src -I$(srcdir)/src $$(for FILE in $(test_capnpc_inputs); do echo $(srcdir)/$$FILE; done) touch test_capnpc_middleman endif @@ -457,17 +498,24 @@ capnp_test_LDADD = libcapnp-test.a libcapnp.la libkj-test.la libkj.la else !LITE_MODE check_PROGRAMS = capnp-test capnp-evolution-test capnp-afl-testcase +if HAS_FUZZING_ENGINE + check_PROGRAMS += capnp-llvm-fuzzer-testcase +endif heavy_tests = \ src/kj/async-test.c++ \ + src/kj/async-xthread-test.c++ \ src/kj/async-unix-test.c++ \ + src/kj/async-unix-xthread-test.c++ \ src/kj/async-win32-test.c++ \ + src/kj/async-win32-xthread-test.c++ \ src/kj/async-io-test.c++ \ + src/kj/async-queue-test.c++ \ src/kj/parse/common-test.c++ \ src/kj/parse/char-test.c++ \ src/kj/std/iostream-test.c++ \ src/kj/compat/url-test.c++ \ src/kj/compat/http-test.c++ \ - src/kj/compat/gzip-test.c++ \ + $(MAYBE_KJ_GZIP_TESTS) \ $(MAYBE_KJ_TLS_TESTS) \ src/capnp/canonicalize-test.c++ \ src/capnp/capability-test.c++ \ @@ -483,15 +531,18 @@ heavy_tests = \ src/capnp/rpc-twoparty-test.c++ \ src/capnp/ez-rpc-test.c++ \ src/capnp/compat/json-test.c++ \ + src/capnp/compat/websocket-rpc-test.c++ \ src/capnp/compiler/lexer-test.c++ \ src/capnp/compiler/type-id-test.c++ capnp_test_LDADD = \ libcapnp-test.a \ libcapnpc.la \ libcapnp-rpc.la \ + libcapnp-websocket.la \ libcapnp-json.la \ libcapnp.la \ libkj-http.la \ + $(MAYBE_KJ_GZIP_LA) \ $(MAYBE_KJ_TLS_LA) \ libkj-async.la \ libkj-test.la \ @@ -507,6 +558,7 @@ capnp_test_SOURCES = \ src/kj/memory-test.c++ \ src/kj/refcount-test.c++ \ src/kj/array-test.c++ \ + src/kj/list-test.c++ \ src/kj/string-test.c++ \ src/kj/string-tree-test.c++ \ src/kj/table-test.c++ \ @@ -521,8 +573,8 @@ capnp_test_SOURCES = \ src/kj/function-test.c++ \ src/kj/io-test.c++ \ src/kj/mutex-test.c++ \ + src/kj/time-test.c++ \ src/kj/threadlocal-test.c++ \ - src/kj/threadlocal-pthread-test.c++ \ src/kj/filesystem-test.c++ \ src/kj/filesystem-disk-test.c++ \ src/kj/test-test.c++ \ @@ -547,6 +599,12 @@ capnp_evolution_test_SOURCES = src/capnp/compiler/evolution-test.c++ capnp_afl_testcase_LDADD = libcapnp-test.a libcapnp-rpc.la libcapnp.la libkj.la libkj-async.la capnp_afl_testcase_SOURCES = src/capnp/afl-testcase.c++ + +if HAS_FUZZING_ENGINE + capnp_llvm_fuzzer_testcase_LDADD = libcapnp-test.a libcapnp-rpc.la libcapnp.la libkj.la libkj-async.la + capnp_llvm_fuzzer_testcase_SOURCES = src/capnp/llvm-fuzzer-testcase.c++ + capnp_llvm_fuzzer_testcase_LDFLAGS = $(LIB_FUZZING_ENGINE) +endif endif !LITE_MODE if LITE_MODE diff --git a/libs/EXTERNAL/capnproto/c++/cmake/CapnProtoConfig.cmake.in b/libs/EXTERNAL/capnproto/c++/cmake/CapnProtoConfig.cmake.in index 94ceec0a739..667f502fb55 100644 --- a/libs/EXTERNAL/capnproto/c++/cmake/CapnProtoConfig.cmake.in +++ b/libs/EXTERNAL/capnproto/c++/cmake/CapnProtoConfig.cmake.in @@ -49,7 +49,18 @@ if(NOT _IMPORT_PREFIX) set(_IMPORT_PREFIX ${PACKAGE_PREFIX_DIR}) endif() - +if (@WITH_OPENSSL@) # WITH_OPENSSL + include(CMakeFindDependencyMacro) + if (CMAKE_VERSION VERSION_LESS 3.9) + # find_dependency() did not support COMPONENTS until CMake 3.9 + # + # in practice, this call can be erroneous + # if the user has only libcrypto installed, but not libssl + find_dependency(OpenSSL) + else() + find_dependency(OpenSSL COMPONENTS Crypto SSL) + endif() +endif() include("${CMAKE_CURRENT_LIST_DIR}/CapnProtoTargets.cmake") include("${CMAKE_CURRENT_LIST_DIR}/CapnProtoMacros.cmake") @@ -65,7 +76,7 @@ set(CAPNP_INCLUDE_DIRS ${CAPNP_INCLUDE_DIRECTORY}) # No need to list all libraries, just the leaves of the dependency tree. set(CAPNP_LIBRARIES_LITE CapnProto::capnp) set(CAPNP_LIBRARIES CapnProto::capnp-rpc CapnProto::capnp-json - CapnProto::kj-http CapnProto::kj-test) + CapnProto::kj-http) set(CAPNP_DEFINITIONS) if(TARGET CapnProto::capnp AND NOT TARGET CapnProto::capnp-rpc) diff --git a/libs/EXTERNAL/capnproto/c++/cmake/CapnProtoMacros.cmake b/libs/EXTERNAL/capnproto/c++/cmake/CapnProtoMacros.cmake index b37cb22c40c..e44b66ea0fb 100644 --- a/libs/EXTERNAL/capnproto/c++/cmake/CapnProtoMacros.cmake +++ b/libs/EXTERNAL/capnproto/c++/cmake/CapnProtoMacros.cmake @@ -30,9 +30,15 @@ function(CAPNP_GENERATE_CPP SOURCES HEADERS) set(tool_depends ${EMPTY_STRING}) #Use cmake targets available if(TARGET capnp_tool) - set(CAPNP_EXECUTABLE capnp_tool) - GET_TARGET_PROPERTY(CAPNPC_CXX_EXECUTABLE capnpc_cpp CAPNPC_CXX_EXECUTABLE) - GET_TARGET_PROPERTY(CAPNP_INCLUDE_DIRECTORY capnp_tool CAPNP_INCLUDE_DIRECTORY) + if(NOT CAPNP_EXECUTABLE) + set(CAPNP_EXECUTABLE $) + endif() + if(NOT CAPNPC_CXX_EXECUTABLE) + get_target_property(CAPNPC_CXX_EXECUTABLE capnpc_cpp CAPNPC_CXX_EXECUTABLE) + endif() + if(NOT CAPNP_INCLUDE_DIRECTORY) + get_target_property(CAPNP_INCLUDE_DIRECTORY capnp_tool CAPNP_INCLUDE_DIRECTORY) + endif() list(APPEND tool_depends capnp_tool capnpc_cpp) endif() if(NOT CAPNP_EXECUTABLE) diff --git a/libs/EXTERNAL/capnproto/c++/configure.ac b/libs/EXTERNAL/capnproto/c++/configure.ac index 39977ea62e2..72fe8456f19 100644 --- a/libs/EXTERNAL/capnproto/c++/configure.ac +++ b/libs/EXTERNAL/capnproto/c++/configure.ac @@ -1,6 +1,6 @@ ## Process this file with autoconf to produce configure. -AC_INIT([Capn Proto],[0.7.0],[capnproto@googlegroups.com],[capnproto-c++]) +AC_INIT([Capn Proto],[0.9.1],[capnproto@googlegroups.com],[capnproto-c++]) AC_CONFIG_SRCDIR([src/capnp/layout.c++]) AC_CONFIG_AUX_DIR([build-aux]) @@ -22,6 +22,11 @@ AC_ARG_WITH([external-capnp], one (useful for cross-compiling)])], [external_capnp=yes],[external_capnp=no]) +AC_ARG_WITH([zlib], + [AS_HELP_STRING([--with-zlib], + [build libkj-gzip by linking against zlib @<:@default=check@:>@])], + [],[with_zlib=check]) + AC_ARG_WITH([openssl], [AS_HELP_STRING([--with-openssl], [build libkj-tls by linking against openssl @<:@default=check@:>@])], @@ -112,11 +117,15 @@ CXXFLAGS="$CXXFLAGS $PTHREAD_CFLAGS" AC_DEFUN([CAPNP_PKG_CONFIG_FILES], [ \ pkgconfig/capnp.pc \ + pkgconfig/capnpc.pc \ pkgconfig/capnp-rpc.pc \ pkgconfig/capnp-json.pc \ + pkgconfig/capnp-websocket.pc \ pkgconfig/kj.pc \ pkgconfig/kj-async.pc \ pkgconfig/kj-http.pc \ + pkgconfig/kj-gzip.pc \ + pkgconfig/kj-tls.pc \ pkgconfig/kj-test.pc \ ]) AC_DEFUN([CAPNP_CMAKE_CONFIG_FILES], [ \ @@ -129,6 +138,11 @@ AC_DEFUN([CAPNP_CMAKE_CONFIG_FILES], [ \ AC_SUBST([CAPNP_PKG_CONFIG_FILES]) AC_SUBST([CAPNP_CMAKE_CONFIG_FILES]) +# Don't include security release in soname -- we want to replace old binaries +# in this case. +SO_VERSION=$(echo $VERSION | sed -e 's/^\([0-9]*[.][0-9]*[.][0-9]*\)\([.][0-9]*\)*\(-.*\)*$/\1\3/g') +AC_SUBST([SO_VERSION]) + # CapnProtoConfig.cmake.in needs these PACKAGE_* output variables. PACKAGE_INIT="set([CAPNP_PKG_CONFIG_FILES] CAPNP_PKG_CONFIG_FILES)" PACKAGE_CMAKE_INSTALL_FULL_INCLUDEDIR="\${CMAKE_CURRENT_LIST_DIR}/../../../include" @@ -140,6 +154,25 @@ AC_SUBST([PACKAGE_CMAKE_INSTALL_FULL_INCLUDEDIR]) AC_CHECK_SIZEOF([void *]) AC_SUBST(CMAKE_SIZEOF_VOID_P, $ac_cv_sizeof_void_p) +# Detect presence of zlib, if it was not specified explicitly. +AS_IF([test "$with_zlib" = check], [ + AC_CHECK_LIB(z, deflate, [:], [ + with_zlib=no + ]) + AC_CHECK_HEADER([zlib.h], [:], [ + with_zlib=no + ]) + AS_IF([test "$with_zlib" = no], [ + AC_MSG_WARN("could not find zlib -- won't build libkj-gzip") + ], [ + with_zlib=yes + ]) +]) +AS_IF([test "$with_zlib" != no], [ + CXXFLAGS="$CXXFLAGS -DKJ_HAS_ZLIB" +]) +AM_CONDITIONAL([BUILD_KJ_GZIP], [test "$with_zlib" != no]) + # Detect presence of OpenSSL, if it was not specified explicitly. AS_IF([test "$with_openssl" = check], [ AC_CHECK_LIB(crypto, CRYPTO_new_ex_data, [:], [ @@ -162,5 +195,10 @@ AS_IF([test "$with_openssl" != no], [ ]) AM_CONDITIONAL([BUILD_KJ_TLS], [test "$with_openssl" != no]) +# CapnProtoConfig.cmake.in needs this variable. +AC_SUBST(WITH_OPENSSL, $with_openssl) + +AM_CONDITIONAL([HAS_FUZZING_ENGINE], [test "x$LIB_FUZZING_ENGINE" != "x"]) + AC_CONFIG_FILES([Makefile] CAPNP_PKG_CONFIG_FILES CAPNP_CMAKE_CONFIG_FILES) AC_OUTPUT diff --git a/libs/EXTERNAL/capnproto/c++/pkgconfig/capnp-websocket.pc.in b/libs/EXTERNAL/capnproto/c++/pkgconfig/capnp-websocket.pc.in new file mode 100644 index 00000000000..e64a28be152 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/pkgconfig/capnp-websocket.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: Cap'n Proto WebSocket RPC +Description: WebSocket MessageStream for Cap'n Proto +Version: @VERSION@ +Libs: -L${libdir} -lcapnp-websocket +Requires: capnp = @VERSION@ capnp-rpc = @VERSION@ kj = @VERSION@ kj-async = @VERSION@ kj-http = @VERSION@ +Cflags: -I${includedir} diff --git a/libs/EXTERNAL/capnproto/c++/pkgconfig/capnpc.pc.in b/libs/EXTERNAL/capnproto/c++/pkgconfig/capnpc.pc.in new file mode 100644 index 00000000000..4e62944b4b0 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/pkgconfig/capnpc.pc.in @@ -0,0 +1,12 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: Cap'n Proto +Description: Insanely fast serialization system compiler library +Version: @VERSION@ +Libs: -L${libdir} -lcapnpc @PTHREAD_CFLAGS@ @PTHREAD_LIBS@ @STDLIB_FLAG@ +Libs.private: @LIBS@ +Requires: kj = @VERSION@ +Cflags: -I${includedir} @PTHREAD_CFLAGS@ @STDLIB_FLAG@ @CAPNP_LITE_FLAG@ diff --git a/libs/EXTERNAL/capnproto/c++/pkgconfig/kj-async.pc.in b/libs/EXTERNAL/capnproto/c++/pkgconfig/kj-async.pc.in index 765197f34b9..49d5ff69968 100644 --- a/libs/EXTERNAL/capnproto/c++/pkgconfig/kj-async.pc.in +++ b/libs/EXTERNAL/capnproto/c++/pkgconfig/kj-async.pc.in @@ -6,6 +6,6 @@ includedir=@includedir@ Name: KJ Async Framework Library Description: Basic utility library called KJ (async part) Version: @VERSION@ -Libs: -L${libdir} -lkj-async @PTHREAD_CFLAGS@ @PTHREAD_LIBS@ @STDLIB_FLAG@ +Libs: -L${libdir} -lkj-async @ASYNC_LIBS@ @PTHREAD_CFLAGS@ @PTHREAD_LIBS@ @STDLIB_FLAG@ Requires: kj = @VERSION@ -Cflags: -I${includedir} @PTHREAD_CFLAGS@ @STDLIB_FLAG@ @CAPNP_LITE_FLAG@ +Cflags: -I${includedir} @ASYNC_LIBS@ @PTHREAD_CFLAGS@ @STDLIB_FLAG@ @CAPNP_LITE_FLAG@ diff --git a/libs/EXTERNAL/capnproto/c++/pkgconfig/kj-gzip.pc.in b/libs/EXTERNAL/capnproto/c++/pkgconfig/kj-gzip.pc.in new file mode 100644 index 00000000000..cc999a08c5b --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/pkgconfig/kj-gzip.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: KJ Gzip Adapters +Description: Basic utility library called KJ (gzip part) +Version: @VERSION@ +Libs: -L${libdir} -lkj-gzip @PTHREAD_CFLAGS@ @PTHREAD_LIBS@ @STDLIB_FLAG@ +Requires: kj-async = @VERSION@ +Cflags: -I${includedir} @PTHREAD_CFLAGS@ @STDLIB_FLAG@ @CAPNP_LITE_FLAG@ diff --git a/libs/EXTERNAL/capnproto/c++/pkgconfig/kj-tls.pc.in b/libs/EXTERNAL/capnproto/c++/pkgconfig/kj-tls.pc.in new file mode 100644 index 00000000000..421255efbef --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/pkgconfig/kj-tls.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: KJ TLS Adapters +Description: Basic utility library called KJ (TLS part) +Version: @VERSION@ +Libs: -L${libdir} -lkj-tls @PTHREAD_CFLAGS@ @PTHREAD_LIBS@ @STDLIB_FLAG@ +Requires: kj-async = @VERSION@ +Cflags: -I${includedir} @PTHREAD_CFLAGS@ @STDLIB_FLAG@ @CAPNP_LITE_FLAG@ diff --git a/libs/EXTERNAL/capnproto/c++/regenerate-bootstraps.sh b/libs/EXTERNAL/capnproto/c++/regenerate-bootstraps.sh index e0f350a7501..d806b835eac 100755 --- a/libs/EXTERNAL/capnproto/c++/regenerate-bootstraps.sh +++ b/libs/EXTERNAL/capnproto/c++/regenerate-bootstraps.sh @@ -5,7 +5,7 @@ set -euo pipefail export PATH=$PWD/bin:$PWD:$PATH capnp compile -Isrc --no-standard-import --src-prefix=src -oc++:src \ - src/capnp/c++.capnp src/capnp/schema.capnp \ + src/capnp/c++.capnp src/capnp/schema.capnp src/capnp/stream.capnp \ src/capnp/compiler/lexer.capnp src/capnp/compiler/grammar.capnp \ src/capnp/rpc.capnp src/capnp/rpc-twoparty.capnp src/capnp/persistent.capnp \ src/capnp/compat/json.capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/CMakeLists.txt b/libs/EXTERNAL/capnproto/c++/src/CMakeLists.txt index 621e29aa054..30353014364 100644 --- a/libs/EXTERNAL/capnproto/c++/src/CMakeLists.txt +++ b/libs/EXTERNAL/capnproto/c++/src/CMakeLists.txt @@ -5,7 +5,7 @@ if(BUILD_TESTING) include(CTest) if(EXTERNAL_CAPNP) - # Setup CAPNP_GENERATE_CPP for compiling test schemas + # Set up CAPNP_GENERATE_CPP for compiling test schemas find_package(CapnProto CONFIG QUIET) if(NOT CapnProto_FOUND) # No working installation of Cap'n Proto found, so fall back to searching the environment. diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/CMakeLists.txt b/libs/EXTERNAL/capnproto/c++/src/capnp/CMakeLists.txt index 11cbf5afa1b..3b515507604 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/CMakeLists.txt +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/CMakeLists.txt @@ -10,6 +10,7 @@ set(capnp_sources_lite any.c++ message.c++ schema.capnp.c++ + stream.capnp.c++ serialize.c++ serialize-packed.c++ ) @@ -40,6 +41,7 @@ set(capnp_headers dynamic.h schema.h schema.capnp.h + stream.capnp.h schema-lite.h schema-loader.h schema-parser.h @@ -52,9 +54,13 @@ set(capnp_headers generated-header-support.h raw-schema.h ) +set(capnp_compat_headers + compat/std-iterator.h +) set(capnp_schemas c++.capnp schema.capnp + stream.capnp ) add_library(capnp ${capnp_sources}) add_library(CapnProto::capnp ALIAS capnp) @@ -68,6 +74,7 @@ target_include_directories(capnp INTERFACE set_target_properties(capnp PROPERTIES VERSION ${VERSION}) install(TARGETS capnp ${INSTALL_TARGETS_DEFAULT_ARGS}) install(FILES ${capnp_headers} ${capnp_schemas} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/capnp") +install(FILES ${capnp_compat_headers} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/capnp/compat") set(capnp-rpc_sources serialize-async.c++ @@ -128,6 +135,24 @@ if(NOT CAPNP_LITE) install(FILES ${capnp-json_headers} ${capnp-json_schemas} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/capnp/compat") endif() +# capnp-websocket ======================================================================== + +set(capnp-websocket_sources + compat/websocket-rpc.c++ +) +set(capnp-websocket_headers + compat/websocket-rpc.h +) +if(NOT CAPNP_LITE) + add_library(capnp-websocket ${capnp-websocket_sources}) + add_library(CapnProto::capnp-websocket ALIAS capnp-websocket) + target_link_libraries(capnp-websocket PUBLIC capnp capnp-rpc kj-http kj-async kj) + # Ensure the library has a version set to match autotools build + set_target_properties(capnp-websocket PROPERTIES VERSION ${VERSION}) + install(TARGETS capnp-websocket ${INSTALL_TARGETS_DEFAULT_ARGS}) + install(FILES ${capnp-websocket_headers} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/capnp/compat") +endif() + # Tools/Compilers ============================================================== set(capnpc_sources @@ -137,6 +162,7 @@ set(capnpc_sources compiler/lexer.c++ compiler/grammar.capnp.c++ compiler/parser.c++ + compiler/generics.c++ compiler/node-translator.c++ compiler/compiler.c++ schema-parser.c++ @@ -161,6 +187,10 @@ if(NOT CAPNP_LITE) set_target_properties(capnp_tool PROPERTIES CAPNP_INCLUDE_DIRECTORY $,$> ) + target_compile_definitions(capnp_tool PRIVATE + "CAPNP_INCLUDE_DIR=\"${CMAKE_INSTALL_FULL_INCLUDEDIR}\"" + "VERSION=\"${VERSION}\"" + ) add_executable(capnpc_cpp compiler/capnpc-c++.c++ @@ -181,8 +211,14 @@ if(NOT CAPNP_LITE) install(TARGETS capnp_tool capnpc_cpp capnpc_capnp ${INSTALL_TARGETS_DEFAULT_ARGS}) - # Symlink capnpc -> capnp - install(CODE "execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink capnp \"\$ENV{DESTDIR}${CMAKE_INSTALL_FULL_BINDIR}/capnpc\")") + if(WIN32) + # On Windows platforms symlinks are not guranteed to support. Also differnt version of CMake handle create_symlink in a different way. + # The most portable way in this case just copy the file. + install(CODE "execute_process(COMMAND \"${CMAKE_COMMAND}\" -E copy \"\$ENV{DESTDIR}${CMAKE_INSTALL_FULL_BINDIR}/capnp${CMAKE_EXECUTABLE_SUFFIX}\" \"\$ENV{DESTDIR}${CMAKE_INSTALL_FULL_BINDIR}/capnpc${CMAKE_EXECUTABLE_SUFFIX}\")") + else() + # Symlink capnpc -> capnp + install(CODE "execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink capnp${CMAKE_EXECUTABLE_SUFFIX} \"\$ENV{DESTDIR}${CMAKE_INSTALL_FULL_BINDIR}/capnpc${CMAKE_EXECUTABLE_SUFFIX}\")") + endif() endif() # NOT CAPNP_LITE # Tests ======================================================================== @@ -215,7 +251,7 @@ if(BUILD_TESTING) if(CAPNP_LITE) set(test_libraries capnp kj-test kj) else() - set(test_libraries capnp-json capnp-rpc capnp capnpc kj-async kj-test kj) + set(test_libraries capnp-json capnp-rpc capnp-websocket capnp capnpc kj-http kj-async kj-test kj) endif() add_executable(capnp-tests @@ -260,6 +296,7 @@ if(BUILD_TESTING) compiler/type-id-test.c++ test-util.c++ compat/json-test.c++ + compat/websocket-rpc-test.c++ ${test_capnp_cpp_files} ${test_capnp_h_files} ) @@ -280,3 +317,8 @@ if(BUILD_TESTING) add_test(NAME capnp-evolution-tests-run COMMAND capnp-evolution-tests) endif() # NOT CAPNP_LITE endif() # BUILD_TESTING + +if(DEFINED ENV{LIB_FUZZING_ENGINE}) + add_executable(capnp_llvm_fuzzer_testcase llvm-fuzzer-testcase.c++ test-util.c++ test-util.h ${test_capnp_cpp_files} ${test_capnp_h_files}) + target_link_libraries(capnp_llvm_fuzzer_testcase capnp-rpc capnp kj kj-async capnp-json $ENV{LIB_FUZZING_ENGINE}) +endif() diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/any-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/any-test.c++ index dab6ecd56fd..c5b76a1d2f8 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/any-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/any-test.c++ @@ -130,7 +130,7 @@ TEST(Any, AnyStruct) { EXPECT_EQ(48, b.getDataSection().size()); EXPECT_EQ(20, b.getPointerSection().size()); -#if !_MSC_VER // TODO(msvc): ICE on the necessary constructor; see any.h. +#if !_MSC_VER || defined(__clang__) // TODO(msvc): ICE on the necessary constructor; see any.h. b = root.getAnyPointerField().getAs(); EXPECT_EQ(48, b.getDataSection().size()); EXPECT_EQ(20, b.getPointerSection().size()); @@ -144,7 +144,7 @@ TEST(Any, AnyStruct) { EXPECT_EQ(48, r.getDataSection().size()); EXPECT_EQ(20, r.getPointerSection().size()); -#if !_MSC_VER // TODO(msvc): ICE on the necessary constructor; see any.h. +#if !_MSC_VER || defined(__clang__) // TODO(msvc): ICE on the necessary constructor; see any.h. r = root.getAnyPointerField().getAs().asReader(); EXPECT_EQ(48, r.getDataSection().size()); EXPECT_EQ(20, r.getPointerSection().size()); @@ -201,7 +201,7 @@ TEST(Any, AnyList) { EXPECT_EQ(48, alb.as>()[0].getDataSection().size()); EXPECT_EQ(20, alb.as>()[0].getPointerSection().size()); -#if !_MSC_VER // TODO(msvc): ICE on the necessary constructor; see any.h. +#if !_MSC_VER || defined(__clang__) // TODO(msvc): ICE on the necessary constructor; see any.h. alb = root.getAnyPointerField().getAs>(); EXPECT_EQ(2, alb.size()); EXPECT_EQ(48, alb.as>()[0].getDataSection().size()); @@ -218,7 +218,7 @@ TEST(Any, AnyList) { EXPECT_EQ(48, alr.as>()[0].getDataSection().size()); EXPECT_EQ(20, alr.as>()[0].getPointerSection().size()); -#if !_MSC_VER // TODO(msvc): ICE on the necessary constructor; see any.h. +#if !_MSC_VER || defined(__clang__) // TODO(msvc): ICE on the necessary constructor; see any.h. alr = root.getAnyPointerField().getAs>().asReader(); EXPECT_EQ(2, alr.size()); EXPECT_EQ(48, alr.as>()[0].getDataSection().size()); diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/any.h b/libs/EXTERNAL/capnproto/c++/src/capnp/any.h index 3f29161c748..94b527dc3de 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/any.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/any.h @@ -21,15 +21,14 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "layout.h" #include "pointer-helpers.h" #include "orphan.h" #include "list.h" #include // work-around macro conflict with `VOID` +#include + +CAPNP_BEGIN_HEADER namespace capnp { @@ -509,7 +508,7 @@ class AnyStruct::Builder { inline Builder(decltype(nullptr)) {} inline Builder(_::StructBuilder builder): _builder(builder) {} -#if !_MSC_VER // TODO(msvc): MSVC ICEs on this. Try restoring when compiler improves. +#if !_MSC_VER || defined(__clang__) // TODO(msvc): MSVC ICEs on this. Try restoring when compiler improves. template ) == Kind::STRUCT>> inline Builder(T&& value) : _builder(_::PointerHelpers>::getInternalBuilder(kj::fwd(value))) {} @@ -641,7 +640,7 @@ class AnyList::Reader { inline Reader(): _reader(ElementSize::VOID) {} inline Reader(_::ListReader reader): _reader(reader) {} -#if !_MSC_VER // TODO(msvc): MSVC ICEs on this. Try restoring when compiler improves. +#if !_MSC_VER || defined(__clang__) // TODO(msvc): MSVC ICEs on this. Try restoring when compiler improves. template ) == Kind::LIST>> inline Reader(T&& value) : _reader(_::PointerHelpers>::getInternalReader(kj::fwd(value))) {} @@ -681,7 +680,7 @@ class AnyList::Builder { inline Builder(decltype(nullptr)): _builder(ElementSize::VOID) {} inline Builder(_::ListBuilder builder): _builder(builder) {} -#if !_MSC_VER // TODO(msvc): MSVC ICEs on this. Try restoring when compiler improves. +#if !_MSC_VER || defined(__clang__) // TODO(msvc): MSVC ICEs on this. Try restoring when compiler improves. template ) == Kind::LIST>> inline Builder(T&& value) : _builder(_::PointerHelpers>::getInternalBuilder(kj::fwd(value))) {} @@ -737,6 +736,27 @@ struct PipelineOp { }; }; +inline uint KJ_HASHCODE(const PipelineOp& op) { + switch (op.type) { + case PipelineOp::NOOP: return kj::hashCode(op.type); + case PipelineOp::GET_POINTER_FIELD: return kj::hashCode(op.type, op.pointerIndex); + } + KJ_CLANG_KNOWS_THIS_IS_UNREACHABLE_BUT_GCC_DOESNT +} + +inline bool operator==(const PipelineOp& a, const PipelineOp& b) { + if (a.type != b.type) return false; + switch (a.type) { + case PipelineOp::NOOP: return true; + case PipelineOp::GET_POINTER_FIELD: return a.pointerIndex == b.pointerIndex; + } + KJ_CLANG_KNOWS_THIS_IS_UNREACHABLE_BUT_GCC_DOESNT +} + +inline bool operator!=(const PipelineOp& a, const PipelineOp& b) { + return !(a == b); +} + class PipelineHook { // Represents a currently-running call, and implements pipelined requests on its result. @@ -754,6 +774,9 @@ class PipelineHook { template > static inline kj::Own from(Pipeline&& pipeline); + template > + static inline PipelineHook& from(Pipeline& pipeline); + private: template struct FromImpl; }; @@ -1076,6 +1099,9 @@ struct PipelineHook::FromImpl { static inline kj::Own apply(typename T::Pipeline&& pipeline) { return from(kj::mv(pipeline._typeless)); } + static inline PipelineHook& apply(typename T::Pipeline& pipeline) { + return from(pipeline._typeless); + } }; template <> @@ -1083,6 +1109,9 @@ struct PipelineHook::FromImpl { static inline kj::Own apply(AnyPointer::Pipeline&& pipeline) { return kj::mv(pipeline.hook); } + static inline PipelineHook& apply(AnyPointer::Pipeline& pipeline) { + return *pipeline.hook; + } }; template @@ -1090,6 +1119,13 @@ inline kj::Own PipelineHook::from(Pipeline&& pipeline) { return FromImpl::apply(kj::fwd(pipeline)); } +template +inline PipelineHook& PipelineHook::from(Pipeline& pipeline) { + return FromImpl::apply(pipeline); +} + #endif // !CAPNP_LITE } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/arena.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/arena.c++ index eaf195e5686..58dd07faf5a 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/arena.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/arena.c++ @@ -42,10 +42,10 @@ void ReadLimiter::unread(WordCount64 amount) { // Be careful not to overflow here. Since ReadLimiter has no thread-safety, it's possible that // the limit value was not updated correctly for one or more reads, and therefore unread() could // overflow it even if it is only unreading bytes that were actually read. - uint64_t oldValue = limit; + uint64_t oldValue = readLimit(); uint64_t newValue = oldValue + unbound(amount / WORDS); if (newValue > oldValue) { - limit = newValue; + setLimit(newValue); } } @@ -71,6 +71,22 @@ static SegmentWordCount verifySegmentSize(size_t size) { }); } +static SegmentWordCount verifySegment(kj::ArrayPtr segment) { +#if !CAPNP_ALLOW_UNALIGNED + KJ_REQUIRE(reinterpret_cast(segment.begin()) % sizeof(void*) == 0, + "Detected unaligned data in Cap'n Proto message. Messages must be aligned to the " + "architecture's word size. Yes, even on x86: Unaligned access is undefined behavior " + "under the C/C++ language standard, and compilers can and do assume alignment for the " + "purpose of optimizations. Unaligned access may lead to crashes or subtle corruption. " + "For example, GCC will use SIMD instructions in optimizations, and those instrsuctions " + "require alignment. If you really insist on taking your changes with unaligned data, " + "compile the Cap'n Proto library with -DCAPNP_ALLOW_UNALIGNED to remove this check.") { + break; + } +#endif + return verifySegmentSize(segment.size()); +} + inline ReaderArena::ReaderArena(MessageReader* message, const word* firstSegment, SegmentWordCount firstSegmentSize) : message(message), @@ -78,13 +94,23 @@ inline ReaderArena::ReaderArena(MessageReader* message, const word* firstSegment segment0(this, SegmentId(0), firstSegment, firstSegmentSize, &readLimiter) {} inline ReaderArena::ReaderArena(MessageReader* message, kj::ArrayPtr firstSegment) - : ReaderArena(message, firstSegment.begin(), verifySegmentSize(firstSegment.size())) {} + : ReaderArena(message, firstSegment.begin(), verifySegment(firstSegment)) {} ReaderArena::ReaderArena(MessageReader* message) : ReaderArena(message, message->getSegment(0)) {} ReaderArena::~ReaderArena() noexcept(false) {} +size_t ReaderArena::sizeInWords() { + size_t total = segment0.getArray().size(); + + for (uint i = 0; ; i++) { + SegmentReader* segment = tryGetSegment(SegmentId(i)); + if (segment == nullptr) return total; + total += unboundAs(segment->getSize() / WORDS); + } +} + SegmentReader* ReaderArena::tryGetSegment(SegmentId id) { if (id == SegmentId(0)) { if (segment0.getArray() == nullptr) { @@ -109,7 +135,7 @@ SegmentReader* ReaderArena::tryGetSegment(SegmentId id) { return nullptr; } - SegmentWordCount newSegmentSize = verifySegmentSize(newSegment.size()); + SegmentWordCount newSegmentSize = verifySegment(newSegment); if (*lock == nullptr) { // OK, the segment exists, so allocate the map. @@ -138,7 +164,7 @@ BuilderArena::BuilderArena(MessageBuilder* message, kj::ArrayPtr segments) : message(message), segment0(this, SegmentId(0), segments[0].space.begin(), - verifySegmentSize(segments[0].space.size()), + verifySegment(segments[0].space), &this->dummyLimiter, verifySegmentSize(segments[0].wordsUsed)) { if (segments.size() > 1) { kj::Vector> builders(segments.size() - 1); @@ -146,7 +172,7 @@ BuilderArena::BuilderArena(MessageBuilder* message, uint i = 1; for (auto& segment: segments.slice(1, segments.size())) { builders.add(kj::heap( - this, SegmentId(i++), segment.space.begin(), verifySegmentSize(segment.space.size()), + this, SegmentId(i++), segment.space.begin(), verifySegment(segment.space), &this->dummyLimiter, verifySegmentSize(segment.wordsUsed))); } @@ -165,6 +191,24 @@ BuilderArena::BuilderArena(MessageBuilder* message, BuilderArena::~BuilderArena() noexcept(false) {} +size_t BuilderArena::sizeInWords() { + KJ_IF_MAYBE(segmentState, moreSegments) { + size_t total = segment0.currentlyAllocated().size(); + for (auto& builder: segmentState->get()->builders) { + total += builder->currentlyAllocated().size(); + } + return total; + } else { + if (segment0.getArena() == nullptr) { + // We haven't actually allocated any segments yet. + return 0; + } else { + // We have only one segment so far. + return segment0.currentlyAllocated().size(); + } + } +} + SegmentBuilder* BuilderArena::getSegment(SegmentId id) { // This method is allowed to fail if the segment ID is not valid. if (id == SegmentId(0)) { @@ -183,7 +227,7 @@ BuilderArena::AllocateResult BuilderArena::allocate(SegmentWordCount amount) { if (segment0.getArena() == nullptr) { // We're allocating the first segment. kj::ArrayPtr ptr = message->allocateSegment(unbound(amount / WORDS)); - auto actualSize = verifySegmentSize(ptr.size()); + auto actualSize = verifySegment(ptr); // Re-allocate segment0 in-place. This is a bit of a hack, but we have not returned any // pointers to this segment yet, so it should be fine. @@ -311,28 +355,38 @@ void BuilderArena::reportReadLimitReached() { } } -#if !CAPNP_LITE kj::Maybe> BuilderArena::LocalCapTable::extractCap(uint index) { +#if CAPNP_LITE + KJ_UNIMPLEMENTED("no cap tables in lite mode"); +#else if (index < capTable.size()) { return capTable[index].map([](kj::Own& cap) { return cap->addRef(); }); } else { return nullptr; } +#endif } uint BuilderArena::LocalCapTable::injectCap(kj::Own&& cap) { +#if CAPNP_LITE + KJ_UNIMPLEMENTED("no cap tables in lite mode"); +#else uint result = capTable.size(); capTable.add(kj::mv(cap)); return result; +#endif } void BuilderArena::LocalCapTable::dropCap(uint index) { +#if CAPNP_LITE + KJ_UNIMPLEMENTED("no cap tables in lite mode"); +#else KJ_ASSERT(index < capTable.size(), "Invalid capability descriptor in message.") { return; } capTable[index] = nullptr; +#endif } -#endif // !CAPNP_LITE } // namespace _ (private) } // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/arena.h b/libs/EXTERNAL/capnproto/c++/src/capnp/arena.h index eedab3a28ab..be4785c90c4 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/arena.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/arena.h @@ -21,10 +21,6 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #ifndef CAPNP_PRIVATE #error "This header is only meant to be included by Cap'n Proto's own source code." #endif @@ -43,6 +39,8 @@ #include "capability.h" #endif // !CAPNP_LITE +CAPNP_BEGIN_HEADER + namespace capnp { #if !CAPNP_LITE @@ -92,12 +90,32 @@ class ReadLimiter { // some data. private: - volatile uint64_t limit; - // Current limit, decremented each time catRead() is called. Volatile because multiple threads - // could be trying to modify it at once. (This is not real thread-safety, but good enough for - // the purpose of this class. See class comment.) + alignas(8) volatile uint64_t limit; + // Current limit, decremented each time catRead() is called. We modify this variable using atomics + // with "relaxed" thread safety to make TSAN happy (on ARM & x86 this is no different from a + // regular read/write of the variable). See the class comment for why this is OK (previously we + // used a regular volatile variable - this is just to make ASAN happy). + // + // alignas(8) is the default on 64-bit systems, but needed on 32-bit to avoid an expensive + // unaligned atomic operation. KJ_DISALLOW_COPY(ReadLimiter); + + KJ_ALWAYS_INLINE(void setLimit(uint64_t newLimit)) { +#if defined(__GNUC__) || defined(__clang__) + __atomic_store_n(&limit, newLimit, __ATOMIC_RELAXED); +#else + limit = newLimit; +#endif + } + + KJ_ALWAYS_INLINE(uint64_t readLimit() const) { +#if defined(__GNUC__) || defined(__clang__) + return __atomic_load_n(&limit, __ATOMIC_RELAXED); +#else + return limit; +#endif + } }; #if !CAPNP_LITE @@ -160,7 +178,7 @@ class SegmentReader { friend class SegmentBuilder; - static void abortCheckObjectFault(); + [[noreturn]] static void abortCheckObjectFault(); // Called in debug mode in cases that would segfault in opt mode. (Should be impossible!) }; @@ -206,7 +224,7 @@ class SegmentBuilder: public SegmentReader { bool readOnly; - void throwNotWritable(); + [[noreturn]] void throwNotWritable(); KJ_DISALLOW_COPY(SegmentBuilder); }; @@ -230,6 +248,8 @@ class ReaderArena final: public Arena { ~ReaderArena() noexcept(false); KJ_DISALLOW_COPY(ReaderArena); + size_t sizeInWords(); + // implements Arena ------------------------------------------------ SegmentReader* tryGetSegment(SegmentId id) override; void reportReadLimitReached() override; @@ -264,6 +284,8 @@ class BuilderArena final: public Arena { ~BuilderArena() noexcept(false); KJ_DISALLOW_COPY(BuilderArena); + size_t sizeInWords(); + inline SegmentBuilder* getRootSegment() { return &segment0; } kj::ArrayPtr> getSegmentsForOutput(); @@ -288,6 +310,10 @@ class BuilderArena final: public Arena { return &localCapTable; } + kj::Own<_::CapTableBuilder> releaseLocalCapTable() { + return kj::heap(kj::mv(localCapTable)); + } + SegmentBuilder* getSegment(SegmentId id); // Get the segment with the given id. Crashes or throws an exception if no such segment exists. @@ -321,13 +347,13 @@ class BuilderArena final: public Arena { MessageBuilder* message; ReadLimiter dummyLimiter; - class LocalCapTable: public CapTableBuilder { -#if !CAPNP_LITE + class LocalCapTable final: public CapTableBuilder { public: kj::Maybe> extractCap(uint index) override; uint injectCap(kj::Own&& cap) override; void dropCap(uint index) override; +#if !CAPNP_LITE private: kj::Vector>> capTable; #endif // ! CAPNP_LITE @@ -360,17 +386,19 @@ inline ReadLimiter::ReadLimiter() inline ReadLimiter::ReadLimiter(WordCount64 limit): limit(unbound(limit / WORDS)) {} -inline void ReadLimiter::reset(WordCount64 limit) { this->limit = unbound(limit / WORDS); } +inline void ReadLimiter::reset(WordCount64 limit) { + setLimit(unbound(limit / WORDS)); +} inline bool ReadLimiter::canRead(WordCount64 amount, Arena* arena) { // Be careful not to store an underflowed value into `limit`, even if multiple threads are // decrementing it. - uint64_t current = limit; + uint64_t current = readLimit(); if (KJ_UNLIKELY(unbound(amount / WORDS) > current)) { arena->reportReadLimitReached(); return false; } else { - limit = current - unbound(amount / WORDS); + setLimit(current - unbound(amount / WORDS)); return true; } } @@ -491,3 +519,5 @@ inline bool SegmentBuilder::tryExtend(word* from, word* to) { } // namespace _ (private) } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/blob.h b/libs/EXTERNAL/capnproto/c++/src/capnp/blob.h index 0f2b074ce09..7e24e18cea5 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/blob.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/blob.h @@ -21,15 +21,13 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include #include #include "common.h" #include +CAPNP_BEGIN_HEADER + namespace capnp { struct Data { @@ -217,3 +215,5 @@ inline kj::ArrayPtr Text::Builder::slice(size_t start, size_t end) { } } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/bootstrap-test.ekam-rule b/libs/EXTERNAL/capnproto/c++/src/capnp/bootstrap-test.ekam-rule index e70ee02854a..ae93b9ace80 100755 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/bootstrap-test.ekam-rule +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/bootstrap-test.ekam-rule @@ -51,8 +51,9 @@ fi mkdir -p tmp/capnp/bootstrap-test-tmp -INPUTS="capnp/c++.capnp capnp/schema.capnp capnp/compiler/lexer.capnp capnp/compiler/grammar.capnp \ -capnp/rpc.capnp capnp/rpc-twoparty.capnp capnp/persistent.capnp" +INPUTS="capnp/c++.capnp capnp/schema.capnp capnp/stream.capnp capnp/compiler/lexer.capnp \ +capnp/compiler/grammar.capnp capnp/rpc.capnp capnp/rpc-twoparty.capnp capnp/persistent.capnp \ +capnp/compat/json.capnp" SRC_INPUTS="" for file in $INPUTS; do diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/c++.capnp.h b/libs/EXTERNAL/capnproto/c++/src/capnp/c++.capnp.h index 69a2bdf37a6..73d35cad904 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/c++.capnp.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/c++.capnp.h @@ -6,11 +6,13 @@ #include #include -#if CAPNP_VERSION != 7000 +#if CAPNP_VERSION != 9001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif +CAPNP_BEGIN_HEADER + namespace capnp { namespace schemas { @@ -30,3 +32,5 @@ namespace annotations { } // namespace } // namespace +CAPNP_END_HEADER + diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/capability-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/capability-test.c++ index edd034cb29e..6a934a94c83 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/capability-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/capability-test.c++ @@ -80,6 +80,39 @@ TEST(Capability, Basic) { EXPECT_TRUE(barFailed); } +TEST(Capability, CapabilityList) { + kj::EventLoop loop; + kj::WaitScope waitScope(loop); + + MallocMessageBuilder builder; + auto root = builder.initRoot(); + auto initCapList = root.initCapList(2); + + int callCount0 = 0; + int callCount1 = 0; + initCapList.set(0, kj::heap(callCount0)); + initCapList.set(1, kj::heap(callCount1)); + + auto capList = root.getCapList(); + auto cap0 = capList[0].castAs(); + auto cap1 = capList[1].castAs(); + + EXPECT_EQ(2u, root.getCapList().size()); + + auto request0 = cap0.fooRequest(); + request0.setI(123); + request0.setJ(true); + EXPECT_EQ("foo", request0.send().wait(waitScope).getX()); + + auto request1 = cap1.fooRequest(); + request1.setI(123); + request1.setJ(true); + EXPECT_EQ("foo", request1.send().wait(waitScope).getX()); + + EXPECT_EQ(1, callCount0); + EXPECT_EQ(1, callCount1); +} + TEST(Capability, Inheritance) { kj::EventLoop loop; kj::WaitScope waitScope(loop); @@ -145,6 +178,73 @@ TEST(Capability, Pipelining) { EXPECT_EQ(1, chainedCallCount); } +KJ_TEST("use pipeline after dropping response") { + kj::EventLoop loop; + kj::WaitScope waitScope(loop); + + int callCount = 0; + int chainedCallCount = 0; + test::TestPipeline::Client client(kj::heap(callCount)); + + auto request = client.getCapRequest(); + request.setN(234); + request.setInCap(test::TestInterface::Client(kj::heap(chainedCallCount))); + + auto promise = request.send(); + test::TestPipeline::GetCapResults::Pipeline pipeline = kj::mv(promise); + + { + auto response = promise.wait(waitScope); + KJ_EXPECT(response.getS() == "bar"); + } + + auto pipelineRequest = pipeline.getOutBox().getCap().fooRequest(); + pipelineRequest.setI(321); + auto pipelinePromise = pipelineRequest.send(); + + auto pipelineRequest2 = pipeline.getOutBox().getCap().castAs().graultRequest(); + auto pipelinePromise2 = pipelineRequest2.send(); + + auto response = pipelinePromise.wait(waitScope); + EXPECT_EQ("bar", response.getX()); + + auto response2 = pipelinePromise2.wait(waitScope); + checkTestMessage(response2); + + EXPECT_EQ(3, callCount); + EXPECT_EQ(1, chainedCallCount); +} + +KJ_TEST("context.setPipeline") { + kj::EventLoop loop; + kj::WaitScope waitScope(loop); + + int callCount = 0; + test::TestPipeline::Client client(kj::heap(callCount)); + + auto promise = client.getCapPipelineOnlyRequest().send(); + + auto pipelineRequest = promise.getOutBox().getCap().fooRequest(); + pipelineRequest.setI(321); + auto pipelinePromise = pipelineRequest.send(); + + auto pipelineRequest2 = promise.getOutBox().getCap().castAs().graultRequest(); + auto pipelinePromise2 = pipelineRequest2.send(); + + EXPECT_EQ(0, callCount); + + auto response = pipelinePromise.wait(waitScope); + EXPECT_EQ("bar", response.getX()); + + auto response2 = pipelinePromise2.wait(waitScope); + checkTestMessage(response2); + + EXPECT_EQ(3, callCount); + + // The original promise never completed. + KJ_EXPECT(!promise.poll(waitScope)); +} + TEST(Capability, TailCall) { kj::EventLoop loop; kj::WaitScope waitScope(loop); @@ -558,7 +658,7 @@ public: // in 4.8.x nor in 4.9.4: // https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=781060 // - // Unfortunatley 4.9.2 is present on many Debian Jessie systems.. + // Unfortunately 4.9.2 is present on many Debian Jessie systems.. // // For the moment, we can get away with skipping the last line as the previous line // will set things up in a way that allows the test to complete successfully. @@ -1080,6 +1180,219 @@ KJ_TEST("Promise> automatically reduces to RemotePromise wit EXPECT_EQ(1, chainedCallCount); } +KJ_TEST("clone() with caps") { + int dummy = 0; + MallocMessageBuilder builder(2048); + auto root = builder.getRoot().initAs>(3); + root.set(0, kj::heap(dummy)); + root.set(1, kj::heap(dummy)); + root.set(2, kj::heap(dummy)); + + auto copyPtr = clone(root.asReader()); + auto& copy = *copyPtr; + + KJ_ASSERT(copy.size() == 3); + KJ_EXPECT(ClientHook::from(copy[0]).get() == ClientHook::from(root[0]).get()); + KJ_EXPECT(ClientHook::from(copy[1]).get() == ClientHook::from(root[1]).get()); + KJ_EXPECT(ClientHook::from(copy[2]).get() == ClientHook::from(root[2]).get()); + + KJ_EXPECT(ClientHook::from(copy[0]).get() != ClientHook::from(root[1]).get()); + KJ_EXPECT(ClientHook::from(copy[1]).get() != ClientHook::from(root[2]).get()); + KJ_EXPECT(ClientHook::from(copy[2]).get() != ClientHook::from(root[0]).get()); +} + +KJ_TEST("Streaming calls block subsequent calls") { + kj::EventLoop loop; + kj::WaitScope waitScope(loop); + + auto ownServer = kj::heap(); + auto& server = *ownServer; + test::TestStreaming::Client cap = kj::mv(ownServer); + + kj::Promise promise1 = nullptr, promise2 = nullptr, promise3 = nullptr; + + { + auto req = cap.doStreamIRequest(); + req.setI(123); + promise1 = req.send(); + } + + { + auto req = cap.doStreamJRequest(); + req.setJ(321); + promise2 = req.send(); + } + + { + auto req = cap.doStreamIRequest(); + req.setI(456); + promise3 = req.send(); + } + + auto promise4 = cap.finishStreamRequest().send(); + + KJ_EXPECT(server.iSum == 0); + KJ_EXPECT(server.jSum == 0); + + KJ_EXPECT(!promise1.poll(waitScope)); + KJ_EXPECT(!promise2.poll(waitScope)); + KJ_EXPECT(!promise3.poll(waitScope)); + KJ_EXPECT(!promise4.poll(waitScope)); + + KJ_EXPECT(server.iSum == 123); + KJ_EXPECT(server.jSum == 0); + + KJ_ASSERT_NONNULL(server.fulfiller)->fulfill(); + + KJ_EXPECT(promise1.poll(waitScope)); + KJ_EXPECT(!promise2.poll(waitScope)); + KJ_EXPECT(!promise3.poll(waitScope)); + KJ_EXPECT(!promise4.poll(waitScope)); + + KJ_EXPECT(server.iSum == 123); + KJ_EXPECT(server.jSum == 321); + + KJ_ASSERT_NONNULL(server.fulfiller)->fulfill(); + + KJ_EXPECT(promise1.poll(waitScope)); + KJ_EXPECT(promise2.poll(waitScope)); + KJ_EXPECT(!promise3.poll(waitScope)); + KJ_EXPECT(!promise4.poll(waitScope)); + + KJ_EXPECT(server.iSum == 579); + KJ_EXPECT(server.jSum == 321); + + KJ_ASSERT_NONNULL(server.fulfiller)->fulfill(); + + KJ_EXPECT(promise1.poll(waitScope)); + KJ_EXPECT(promise2.poll(waitScope)); + KJ_EXPECT(promise3.poll(waitScope)); + KJ_EXPECT(promise4.poll(waitScope)); + + auto result = promise4.wait(waitScope); + KJ_EXPECT(result.getTotalI() == 579); + KJ_EXPECT(result.getTotalJ() == 321); +} + +KJ_TEST("Streaming calls can be canceled") { + kj::EventLoop loop; + kj::WaitScope waitScope(loop); + + auto ownServer = kj::heap(); + auto& server = *ownServer; + test::TestStreaming::Client cap = kj::mv(ownServer); + + kj::Promise promise1 = nullptr, promise2 = nullptr, promise3 = nullptr; + + { + auto req = cap.doStreamIRequest(); + req.setI(123); + promise1 = req.send(); + } + + { + auto req = cap.doStreamJRequest(); + req.setJ(321); + promise2 = req.send(); + } + + { + auto req = cap.doStreamIRequest(); + req.setI(456); + promise3 = req.send(); + } + + auto promise4 = cap.finishStreamRequest().send(); + + // Cancel the streaming calls. + promise1 = nullptr; + promise2 = nullptr; + promise3 = nullptr; + + KJ_EXPECT(server.iSum == 0); + KJ_EXPECT(server.jSum == 0); + + KJ_EXPECT(!promise4.poll(waitScope)); + + KJ_EXPECT(server.iSum == 123); + KJ_EXPECT(server.jSum == 0); + + KJ_ASSERT_NONNULL(server.fulfiller)->fulfill(); + + KJ_EXPECT(!promise4.poll(waitScope)); + + // The call to doStreamJ() opted into cancellation so the next call to doStreamI() happens + // immediately. + KJ_EXPECT(server.iSum == 579); + KJ_EXPECT(server.jSum == 321); + + KJ_ASSERT_NONNULL(server.fulfiller)->fulfill(); + + KJ_EXPECT(promise4.poll(waitScope)); + + auto result = promise4.wait(waitScope); + KJ_EXPECT(result.getTotalI() == 579); + KJ_EXPECT(result.getTotalJ() == 321); +} + +KJ_TEST("Streaming call throwing cascades to following calls") { + kj::EventLoop loop; + kj::WaitScope waitScope(loop); + + auto ownServer = kj::heap(); + auto& server = *ownServer; + test::TestStreaming::Client cap = kj::mv(ownServer); + + server.jShouldThrow = true; + + kj::Promise promise1 = nullptr, promise2 = nullptr, promise3 = nullptr; + + { + auto req = cap.doStreamIRequest(); + req.setI(123); + promise1 = req.send(); + } + + { + auto req = cap.doStreamJRequest(); + req.setJ(321); + promise2 = req.send(); + } + + { + auto req = cap.doStreamIRequest(); + req.setI(456); + promise3 = req.send(); + } + + auto promise4 = cap.finishStreamRequest().send(); + + KJ_EXPECT(server.iSum == 0); + KJ_EXPECT(server.jSum == 0); + + KJ_EXPECT(!promise1.poll(waitScope)); + KJ_EXPECT(!promise2.poll(waitScope)); + KJ_EXPECT(!promise3.poll(waitScope)); + KJ_EXPECT(!promise4.poll(waitScope)); + + KJ_EXPECT(server.iSum == 123); + KJ_EXPECT(server.jSum == 0); + + KJ_ASSERT_NONNULL(server.fulfiller)->fulfill(); + + KJ_EXPECT(promise1.poll(waitScope)); + KJ_EXPECT(promise2.poll(waitScope)); + KJ_EXPECT(promise3.poll(waitScope)); + KJ_EXPECT(promise4.poll(waitScope)); + + KJ_EXPECT(server.iSum == 123); + KJ_EXPECT(server.jSum == 321); + + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("throw requested", promise2.wait(waitScope)); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("throw requested", promise3.wait(waitScope)); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("throw requested", promise4.ignoreResult().wait(waitScope)); +} + } // namespace } // namespace _ } // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/capability.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/capability.c++ index e5c830b4d92..dc5f7f2e3bf 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/capability.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/capability.c++ @@ -69,15 +69,38 @@ Capability::Client::Client(decltype(nullptr)) Capability::Client::Client(kj::Exception&& exception) : hook(newBrokenCap(kj::mv(exception))) {} -kj::Promise Capability::Server::internalUnimplemented( +kj::Promise> Capability::Client::getFd() { + auto fd = hook->getFd(); + if (fd != nullptr) { + return fd; + } else KJ_IF_MAYBE(promise, hook->whenMoreResolved()) { + return promise->attach(hook->addRef()).then([](kj::Own newHook) { + return Client(kj::mv(newHook)).getFd(); + }); + } else { + return kj::Maybe(nullptr); + } +} + +kj::Maybe> Capability::Server::shortenPath() { + return nullptr; +} + +Capability::Server::DispatchCallResult Capability::Server::internalUnimplemented( const char* actualInterfaceName, uint64_t requestedTypeId) { - return KJ_EXCEPTION(UNIMPLEMENTED, "Requested interface not implemented.", - actualInterfaceName, requestedTypeId); + return { + KJ_EXCEPTION(UNIMPLEMENTED, "Requested interface not implemented.", + actualInterfaceName, requestedTypeId), + false + }; } -kj::Promise Capability::Server::internalUnimplemented( +Capability::Server::DispatchCallResult Capability::Server::internalUnimplemented( const char* interfaceName, uint64_t typeId, uint16_t methodId) { - return KJ_EXCEPTION(UNIMPLEMENTED, "Method not implemented.", interfaceName, typeId, methodId); + return { + KJ_EXCEPTION(UNIMPLEMENTED, "Method not implemented.", interfaceName, typeId, methodId), + false + }; } kj::Promise Capability::Server::internalUnimplemented( @@ -98,6 +121,10 @@ kj::Promise ClientHook::whenResolved() { } } +kj::Promise Capability::Client::whenResolved() { + return hook->whenResolved().attach(hook->addRef()); +} + // ======================================================================================= static inline uint firstSegmentSize(kj::Maybe sizeHint) { @@ -116,7 +143,7 @@ public: MallocMessageBuilder message; }; -class LocalCallContext final: public CallContextHook, public kj::Refcounted { +class LocalCallContext final: public CallContextHook, public ResponseHook, public kj::Refcounted { public: LocalCallContext(kj::Own&& request, kj::Own clientRef, kj::Own> cancelAllowedFulfiller) @@ -141,6 +168,11 @@ public: } return responseBuilder; } + void setPipeline(kj::Own&& pipeline) override { + KJ_IF_MAYBE(f, tailCallPipelineFulfiller) { + f->get()->fulfill(AnyPointer::Pipeline(kj::mv(pipeline))); + } + } kj::Promise tailCall(kj::Own&& request) override { auto result = directTailCall(kj::mv(request)); KJ_IF_MAYBE(f, tailCallPipelineFulfiller) { @@ -189,10 +221,6 @@ public: RemotePromise send() override { KJ_REQUIRE(message.get() != nullptr, "Already called send() on this request."); - // For the lambda capture. - uint64_t interfaceId = this->interfaceId; - uint16_t methodId = this->methodId; - auto cancelPaf = kj::newPromiseAndFulfiller(); auto context = kj::refcounted( @@ -213,8 +241,23 @@ public: // Now the other branch returns the response from the context. auto promise = forked.addBranch().then(kj::mvCapture(context, [](kj::Own&& context) { - context->getResults(MessageSize { 0, 0 }); // force response allocation - return kj::mv(KJ_ASSERT_NONNULL(context->response)); + // force response allocation + auto reader = context->getResults(MessageSize { 0, 0 }).asReader(); + + if (context->isShared()) { + // We can't just move away context->response as `context` itself is still referenced by + // something -- probably a Pipeline object. As a bit of a hack, LocalCallContext itself + // implements ResponseHook so that we can just return a ref on it. + // + // TODO(cleanup): Maybe ResponseHook should be refcounted? Note that context->response + // might not necessarily contain a LocalResponse if it was resolved by a tail call, so + // we'd have to add refcounting to all ResponseHook implementations. + context->releaseParams(); // The call is done so params can definitely be dropped. + context->clientRef = nullptr; // Definitely not using the client cap anymore either. + return Response(reader, kj::mv(context)); + } else { + return kj::mv(KJ_ASSERT_NONNULL(context->response)); + } })); // We return the other branch. @@ -222,6 +265,12 @@ public: kj::mv(promise), AnyPointer::Pipeline(kj::mv(promiseAndPipeline.pipeline))); } + kj::Promise sendStreaming() override { + // We don't do any special handling of streaming in RequestHook for local requests, because + // there is no latency to compensate for between the client and server in this case. + return send().ignoreResult(); + } + const void* getBrand() override { return nullptr; } @@ -275,6 +324,49 @@ private: kj::Promise selfResolutionOp; // Represents the operation which will set `redirect` when possible. + + kj::HashMap, kj::Own> clientMap; + // If the same pipelined cap is requested twice, we have to return the same object. This is + // necessary because each ClientHook we create is a QueuedClient which queues up calls. If we + // return a new one each time, there will be several queues, and ordering of calls will be lost + // between the queues. + // + // One case where this is particularly problematic is with promises resolved over RPC. Consider + // this case: + // + // * Alice holds a promise capability P pointing towards Bob. + // * Bob makes a call Q on an object hosted by Alice. + // * Without waiting for Q to complete, Bob obtains a pipelined-promise capability for Q's + // eventual result, P2. + // * Alice invokes a method M on P. The call is sent to Bob. + // * Bob resolves Alice's original promise P to P2. + // * Alice receives a Resolve message from Bob resolving P to Q's eventual result. + // * As a result, Alice calls getPipelinedCap() on the QueuedPipeline for Q's result, which + // returns a QueuedClient for that result, which we'll call QR1. + // * Alice also sends a Disembargo to Bob. + // * Alice calls a method M2 on P. This call is blocked locally waiting for the disembargo to + // complete. + // * Bob receives Alice's first method call, M. Since it's addressed to P, which later resolved + // to Q's result, Bob reflects the call back to Alice. + // * Alice receives the reflected call, which is addressed to Q's result. + // * Alice calls getPipelinedCap() on the QueuedPipeline for Q's result, which returns a + // QueuedClient for that result, which we'll call QR2. + // * Alice enqueues the call M on QR2. + // * Bob receives Alice's Disembargo message, and reflects it back. + // * Alices receives the Disembrago. + // * Alice unblocks the method cgall M2, which had been blocked on the embargo. + // * The call M2 is then equeued onto QR1. + // * Finally, the call Q completes. + // * This causes QR1 and QR2 to resolve to their final destinations. But if QR1 and QR2 are + // separate objects, then one of them must resolve first. QR1 was created first, so naturally + // it resolves first, followed by QR2. + // * Because QR1 resolves first, method call M2 is delivered first. + // * QR2 resolves second, so method call M1 is delivered next. + // * THIS IS THE WRONG ORDER! + // + // In order to avoid this problem, it's necessary for QR1 and QR2 to be the same object, so that + // they share the same call queue. In this case, M2 is correctly enqueued onto QR2 *after* M1 was + // enqueued on QR1, and so the method calls are delivered in the correct order. }; class QueuedClient final: public ClientHook, public kj::Refcounted { @@ -374,6 +466,14 @@ public: return nullptr; } + kj::Maybe getFd() override { + KJ_IF_MAYBE(r, redirect) { + return r->get()->getFd(); + } else { + return nullptr; + } + } + private: typedef kj::ForkedPromise> ClientHookPromiseFork; @@ -407,12 +507,15 @@ kj::Own QueuedPipeline::getPipelinedCap(kj::Array&& ops) KJ_IF_MAYBE(r, redirect) { return r->get()->getPipelinedCap(kj::mv(ops)); } else { - auto clientPromise = promise.addBranch().then(kj::mvCapture(ops, - [](kj::Array&& ops, kj::Own pipeline) { - return pipeline->getPipelinedCap(kj::mv(ops)); - })); - - return kj::refcounted(kj::mv(clientPromise)); + return clientMap.findOrCreate(ops.asPtr(), [&]() { + auto clientPromise = promise.addBranch() + .then([ops = KJ_MAP(op, ops) { return op; }](kj::Own pipeline) { + return pipeline->getPipelinedCap(kj::mv(ops)); + }); + return kj::HashMap, kj::Own>::Entry { + kj::mv(ops), kj::refcounted(kj::mv(clientPromise)) + }; + })->addRef(); } } @@ -442,11 +545,13 @@ public: LocalClient(kj::Own&& serverParam) : server(kj::mv(serverParam)) { server->thisHook = this; + startResolveTask(); } LocalClient(kj::Own&& serverParam, _::CapabilityServerSetBase& capServerSet, void* ptr) : server(kj::mv(serverParam)), capServerSet(&capServerSet), ptr(ptr) { server->thisHook = this; + startResolveTask(); } ~LocalClient() noexcept(false) { @@ -455,6 +560,14 @@ public: Request newCall( uint64_t interfaceId, uint16_t methodId, kj::Maybe sizeHint) override { + KJ_IF_MAYBE(r, resolved) { + // We resolved to a shortened path. New calls MUST go directly to the replacement capability + // so that their ordering is consistent with callers who call getResolved() to get direct + // access to the new capability. In particular it's important that we don't place these calls + // in our streaming queue. + return r->get()->newCall(interfaceId, methodId, sizeHint); + } + auto hook = kj::heap( interfaceId, methodId, sizeHint, kj::addRef(*this)); auto root = hook->message->getRoot(); @@ -463,6 +576,14 @@ public: VoidPromiseAndPipeline call(uint64_t interfaceId, uint16_t methodId, kj::Own&& context) override { + KJ_IF_MAYBE(r, resolved) { + // We resolved to a shortened path. New calls MUST go directly to the replacement capability + // so that their ordering is consistent with callers who call getResolved() to get direct + // access to the new capability. In particular it's important that we don't place these calls + // in our streaming queue. + return r->get()->call(interfaceId, methodId, kj::mv(context)); + } + auto contextPtr = context.get(); // We don't want to actually dispatch the call synchronously, because we don't want the callee @@ -474,8 +595,12 @@ public: // Note also that QueuedClient depends on this evalLater() to ensure that pipelined calls don't // complete before 'whenMoreResolved()' promises resolve. auto promise = kj::evalLater([this,interfaceId,methodId,contextPtr]() { - return server->dispatchCall(interfaceId, methodId, - CallContext(*contextPtr)); + if (blocked) { + return kj::newAdaptedPromise, BlockedCall>( + *this, interfaceId, methodId, *contextPtr); + } else { + return callInternal(interfaceId, methodId, *contextPtr); + } }).attach(kj::addRef(*this)); // We have to fork this promise for the pipeline to receive a copy of the answer. @@ -500,36 +625,213 @@ public: } kj::Maybe getResolved() override { - return nullptr; + return resolved.map([](kj::Own& hook) -> ClientHook& { return *hook; }); } kj::Maybe>> whenMoreResolved() override { - return nullptr; + KJ_IF_MAYBE(r, resolved) { + return kj::Promise>(r->get()->addRef()); + } else KJ_IF_MAYBE(t, resolveTask) { + return t->addBranch().then([this]() { + return KJ_ASSERT_NONNULL(resolved)->addRef(); + }); + } else { + return nullptr; + } } kj::Own addRef() override { return kj::addRef(*this); } + static const uint BRAND; + // Value is irrelevant; used for pointer. + const void* getBrand() override { - // We have no need to detect local objects. - return nullptr; + return &BRAND; } - void* getLocalServer(_::CapabilityServerSetBase& capServerSet) override { + kj::Maybe> getLocalServer(_::CapabilityServerSetBase& capServerSet) { + // If this is a local capability created through `capServerSet`, return the underlying Server. + // Otherwise, return nullptr. Default implementation (which everyone except LocalClient should + // use) always returns nullptr. + if (this->capServerSet == &capServerSet) { - return ptr; + if (blocked) { + // If streaming calls are in-flight, it could be the case that they were originally sent + // over RPC and reflected back, before the capability had resolved to a local object. In + // that case, the client may already perceive these calls as "done" because the RPC + // implementation caused the client promise to resolve early. However, the capability is + // now local, and the app is trying to break through the LocalClient wrapper and access + // the server directly, bypassing the stream queue. Since the app thinks that all + // previous calls already completed, it may then try to queue a new call directly on the + // server, jumping the queue. + // + // We can solve this by delaying getLocalServer() until all current streaming calls have + // finished. Note that if a new streaming call is started *after* this point, we need not + // worry about that, because in this case it is presumably a local call and the caller + // won't be informed of completion until the call actually does complete. Thus the caller + // is well-aware that this call is still in-flight. + // + // However, the app still cannot assume that there aren't multiple clients, perhaps even + // a malicious client that tries to send stream requests that overlap with the app's + // direct use of the server... so it's up to the app to check for and guard against + // concurrent calls after using getLocalServer(). + return kj::newAdaptedPromise, BlockedCall>(*this) + .then([this]() { return ptr; }); + } else { + return kj::Promise(ptr); + } } else { return nullptr; } } + kj::Maybe getFd() override { + return server->getFd(); + } + private: kj::Own server; _::CapabilityServerSetBase* capServerSet = nullptr; void* ptr = nullptr; + + kj::Maybe> resolveTask; + kj::Maybe> resolved; + + void startResolveTask() { + resolveTask = server->shortenPath().map([this](kj::Promise promise) { + return promise.then([this](Capability::Client&& cap) { + auto hook = ClientHook::from(kj::mv(cap)); + + if (blocked) { + // This is a streaming interface and we have some calls queued up as a result. We cannot + // resolve directly to the new shorter path because this may allow new calls to hop + // the queue -- we need to embargo new calls until the queue clears out. + auto promise = kj::newAdaptedPromise, BlockedCall>(*this) + .then([hook = kj::mv(hook)]() mutable { return kj::mv(hook); }); + hook = newLocalPromiseClient(kj::mv(promise)); + } + + resolved = kj::mv(hook); + }).fork(); + }); + } + + class BlockedCall { + public: + BlockedCall(kj::PromiseFulfiller>& fulfiller, LocalClient& client, + uint64_t interfaceId, uint16_t methodId, CallContextHook& context) + : fulfiller(fulfiller), client(client), + interfaceId(interfaceId), methodId(methodId), context(context), + prev(client.blockedCallsEnd) { + *prev = *this; + client.blockedCallsEnd = &next; + } + + BlockedCall(kj::PromiseFulfiller>& fulfiller, LocalClient& client) + : fulfiller(fulfiller), client(client), prev(client.blockedCallsEnd) { + *prev = *this; + client.blockedCallsEnd = &next; + } + + ~BlockedCall() noexcept(false) { + unlink(); + } + + void unblock() { + unlink(); + KJ_IF_MAYBE(c, context) { + fulfiller.fulfill(kj::evalNow([&]() { + return client.callInternal(interfaceId, methodId, *c); + })); + } else { + // This is just a barrier. + fulfiller.fulfill(kj::READY_NOW); + } + } + + private: + kj::PromiseFulfiller>& fulfiller; + LocalClient& client; + uint64_t interfaceId; + uint16_t methodId; + kj::Maybe context; + + kj::Maybe next; + kj::Maybe* prev; + + void unlink() { + if (prev != nullptr) { + *prev = next; + KJ_IF_MAYBE(n, next) { + n->prev = prev; + } else { + client.blockedCallsEnd = prev; + } + prev = nullptr; + } + } + }; + + class BlockingScope { + public: + BlockingScope(LocalClient& client): client(client) { client.blocked = true; } + BlockingScope(): client(nullptr) {} + BlockingScope(BlockingScope&& other): client(other.client) { other.client = nullptr; } + KJ_DISALLOW_COPY(BlockingScope); + + ~BlockingScope() noexcept(false) { + KJ_IF_MAYBE(c, client) { + c->unblock(); + } + } + + private: + kj::Maybe client; + }; + + bool blocked = false; + kj::Maybe brokenException; + kj::Maybe blockedCalls; + kj::Maybe* blockedCallsEnd = &blockedCalls; + + void unblock() { + blocked = false; + while (!blocked) { + KJ_IF_MAYBE(t, blockedCalls) { + t->unblock(); + } else { + break; + } + } + } + + kj::Promise callInternal(uint64_t interfaceId, uint16_t methodId, + CallContextHook& context) { + KJ_ASSERT(!blocked); + + KJ_IF_MAYBE(e, brokenException) { + // Previous streaming call threw, so everything fails from now on. + return kj::cp(*e); + } + + auto result = server->dispatchCall(interfaceId, methodId, + CallContext(context)); + if (result.isStreaming) { + return result.promise + .catch_([this](kj::Exception&& e) { + brokenException = kj::cp(e); + kj::throwRecoverableException(kj::mv(e)); + }).attach(BlockingScope(*this)); + } else { + return kj::mv(result.promise); + } + } }; +const uint LocalClient::BRAND = 0; + kj::Own Capability::Client::makeLocalClient(kj::Own&& server) { return kj::refcounted(kj::mv(server)); } @@ -544,6 +846,36 @@ kj::Own newLocalPromisePipeline(kj::Promise> // ======================================================================================= +namespace _ { // private + +class PipelineBuilderHook final: public PipelineHook, public kj::Refcounted { +public: + PipelineBuilderHook(uint firstSegmentWords) + : message(firstSegmentWords), + root(message.getRoot()) {} + + kj::Own addRef() override { + return kj::addRef(*this); + } + + kj::Own getPipelinedCap(kj::ArrayPtr ops) override { + return root.asReader().getPipelinedCap(ops); + } + + MallocMessageBuilder message; + AnyPointer::Builder root; +}; + +PipelineBuilderPair newPipelineBuilder(uint firstSegmentWords) { + auto hook = kj::refcounted(firstSegmentWords); + auto root = hook->root; + return { root, kj::mv(hook) }; +} + +} // namespace _ (private) + +// ======================================================================================= + namespace { class BrokenPipeline final: public PipelineHook, public kj::Refcounted { @@ -570,6 +902,10 @@ public: AnyPointer::Pipeline(kj::refcounted(exception))); } + kj::Promise sendStreaming() override { + return kj::cp(exception); + } + const void* getBrand() override { return nullptr; } @@ -580,9 +916,9 @@ public: class BrokenClient final: public ClientHook, public kj::Refcounted { public: - BrokenClient(const kj::Exception& exception, bool resolved, const void* brand = nullptr) + BrokenClient(const kj::Exception& exception, bool resolved, const void* brand) : exception(exception), resolved(resolved), brand(brand) {} - BrokenClient(const kj::StringPtr description, bool resolved, const void* brand = nullptr) + BrokenClient(const kj::StringPtr description, bool resolved, const void* brand) : exception(kj::Exception::Type::FAILED, "", 0, kj::str(description)), resolved(resolved), brand(brand) {} @@ -616,6 +952,10 @@ public: return brand; } + kj::Maybe getFd() override { + return nullptr; + } + private: kj::Exception exception; bool resolved; @@ -623,7 +963,7 @@ private: }; kj::Own BrokenPipeline::getPipelinedCap(kj::ArrayPtr ops) { - return kj::refcounted(exception, false); + return kj::refcounted(exception, false, &ClientHook::BROKEN_CAPABILITY_BRAND); } kj::Own newNullCap() { @@ -635,11 +975,11 @@ kj::Own newNullCap() { } // namespace kj::Own newBrokenCap(kj::StringPtr reason) { - return kj::refcounted(reason, false); + return kj::refcounted(reason, false, &ClientHook::BROKEN_CAPABILITY_BRAND); } kj::Own newBrokenCap(kj::Exception&& reason) { - return kj::refcounted(kj::mv(reason), false); + return kj::refcounted(kj::mv(reason), false, &ClientHook::BROKEN_CAPABILITY_BRAND); } kj::Own newBrokenPipeline(kj::Exception&& reason) { @@ -708,19 +1048,35 @@ kj::Promise CapabilityServerSetBase::getLocalServerInternal(Capability::C ClientHook* hook = client.hook.get(); // Get the most-resolved-so-far version of the hook. - KJ_IF_MAYBE(h, hook->getResolved()) { - hook = h; - }; + for (;;) { + KJ_IF_MAYBE(h, hook->getResolved()) { + hook = h; + } else { + break; + } + } + + // Try to unwrap that. + if (hook->getBrand() == &LocalClient::BRAND) { + KJ_IF_MAYBE(promise, kj::downcast(*hook).getLocalServer(*this)) { + // This is definitely a member of our set and will resolve to non-null. We just have to wait + // for any existing streaming calls to complete. + return kj::mv(*promise); + } + } + // OK, the capability isn't part of this set. KJ_IF_MAYBE(p, hook->whenMoreResolved()) { - // This hook is an unresolved promise. We need to wait for it. + // This hook is an unresolved promise. It might resolve eventually to a local server, so wait + // for it. return p->attach(hook->addRef()) .then([this](kj::Own&& resolved) { Capability::Client client(kj::mv(resolved)); return getLocalServerInternal(client); }); } else { - return hook->getLocalServer(*this); + // Cap is settled, so it definitely will never resolve to a member of this set. + return kj::implicitCast(nullptr); } } diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/capability.h b/libs/EXTERNAL/capnproto/c++/src/capnp/capability.h index 893aa5d6a6a..125d2ccf252 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/capability.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/capability.h @@ -21,10 +21,6 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #if CAPNP_LITE #error "RPC APIs, including this header, are not available in lite mode." #endif @@ -35,6 +31,8 @@ #include "any.h" #include "pointer-helpers.h" +CAPNP_BEGIN_HEADER + namespace capnp { template @@ -61,6 +59,11 @@ class RemotePromise: public kj::Promise>, public T::Pipeline { RemotePromise(RemotePromise&& other) = default; RemotePromise& operator=(RemotePromise&& other) = default; + kj::Promise> dropPipeline() { + // Convenience method to convert this into a plain promise. + return kj::mv(*this); + } + static RemotePromise reducePromise(kj::Promise&& promise); // Hook for KJ so that Promise> automatically reduces to RemotePromise. }; @@ -69,6 +72,7 @@ class LocalClient; namespace _ { // private extern const RawSchema NULL_INTERFACE_SCHEMA; // defined in schema.c++ class CapabilityServerSetBase; +struct PipelineBuilderPair; } // namespace _ (private) struct Capability { @@ -125,6 +129,27 @@ class Request: public Params::Builder { friend class RequestHook; }; +template +class StreamingRequest: public Params::Builder { + // Like `Request` but for streaming requests. + +public: + inline StreamingRequest(typename Params::Builder builder, kj::Own&& hook) + : Params::Builder(builder), hook(kj::mv(hook)) {} + inline StreamingRequest(decltype(nullptr)): Params::Builder(nullptr) {} + + kj::Promise send() KJ_WARN_UNUSED_RESULT; + +private: + kj::Own hook; + + friend class Capability::Client; + friend struct DynamicCapability; + template + friend class CallContext; + friend class RequestHook; +}; + template class Response: public Results::Reader { // A completed call. This class extends a Reader for the call's answer structure. The Response @@ -206,6 +231,19 @@ class Capability::Client { // Make a request without knowing the types of the params or results. You specify the type ID // and method number manually. + kj::Promise> getFd(); + // If the capability's server implemented Capability::Server::getFd() returning non-null, and all + // RPC links between the client and server support FD passing, returns a file descriptor pointing + // to the same underlying file description as the server did. Returns null if the server provided + // no FD or if FD passing was unavailable at some intervening link. + // + // This returns a Promise to handle the case of an unresolved promise capability, e.g. a + // pipelined capability. The promise resolves no later than when the capability settles, i.e. + // the same time `whenResolved()` would complete. + // + // The file descriptor will remain open at least as long as the Capability::Client remains alive. + // If you need it to last longer, you will need to `dup()` it. + // TODO(someday): method(s) for Join protected: @@ -214,6 +252,9 @@ class Capability::Client { template Request newCall(uint64_t interfaceId, uint16_t methodId, kj::Maybe sizeHint); + template + StreamingRequest newStreamingCall(uint64_t interfaceId, uint16_t methodId, + kj::Maybe sizeHint); private: kj::Own hook; @@ -275,6 +316,57 @@ class CallContext: public kj::DisallowConstCopy { // should not be included in the size. So, if you are simply going to copy some existing message // directly into the results, just call `.totalSize()` and pass that in. + void setPipeline(typename Results::Pipeline&& pipeline); + void setPipeline(typename Results::Pipeline& pipeline); + // Tells the system where the capabilities in the response will eventually resolve to. This + // allows requests that are promise-pipelined on this call's results to continue their journey + // to the final destination before this call itself has completed. + // + // This is particularly useful when forwarding RPC calls to other remote servers, but where a + // tail call can't be used. For example, imagine Alice calls `foo()` on Bob. In `foo()`'s + // implementation, Bob calls `bar()` on Charlie. `bar()` returns a capability to Bob, and then + // `foo()` returns the same capability on to Alice. Now imagine Alice is actually using promise + // pipelining in a chain like `foo().getCap().baz()`. The `baz()` call will travel to Bob as a + // pipelined call without waiting for `foo()` to return first. But once it gets to Bob, the + // message has to patiently wait until `foo()` has completed there, before it can then be + // forwarded on to Charlie. It would be better if immediately upon Bob calling `bar()` on + // Charlie, then Alice's call to `baz()` could be forwarded to Charlie as a pipelined call, + // without waiting for `bar()` to return. This would avoid a network round trip of latency + // between Bob and Charlie. + // + // To solve this problem, Bob takes the pipeline object from the `bar()` call, transforms it into + // an appropriate pipeline for a `foo()` call, and passes that to `setPipeline()`. This allows + // Alice's pipelined `baz()` call to flow through immediately. The code looks like: + // + // kj::Promise foo(FooContext context) { + // auto barPromise = charlie.barRequest().send(); + // + // // Set up the final pipeline using pipelined capabilities from `barPromise`. + // capnp::PipelineBuilder pipeline; + // pipeline.setResultCap(barPromise.getSomeCap()); + // context.setPipeline(pipeline.build()); + // + // // Now actually wait for the results and process them. + // return barPromise + // .then([context](capnp::Response response) mutable { + // auto results = context.initResults(); + // + // // Make sure to set up the capabilities exactly as we did in the pipeline. + // results.setResultCap(response.getSomeCap()); + // + // // ... do other stuff with the real response ... + // }); + // } + // + // Of course, if `foo()` and `bar()` return exactly the same type, and Bob doesn't intend + // to do anything with `bar()`'s response except pass it through, then `tailCall()` is a better + // choice here. `setPipeline()` is useful when some transformation is needed on the response, + // or the middleman needs to inspect the response for some reason. + // + // Note: This method has an overload that takes an lvalue reference for convenience. This + // overload increments the refcount on the underlying PipelineHook -- it does not keep the + // reference. + template kj::Promise tailCall(Request&& tailRequest); // Resolve the call by making a tail call. `tailRequest` is a request that has been filled in @@ -317,6 +409,30 @@ class CallContext: public kj::DisallowConstCopy { friend struct DynamicCapability; }; +template +class StreamingCallContext: public kj::DisallowConstCopy { + // Like CallContext but for streaming calls. + +public: + explicit StreamingCallContext(CallContextHook& hook); + + typename Params::Reader getParams(); + void releaseParams(); + + // Note: tailCall() is not supported because: + // - It would significantly complicate the implementation of streaming. + // - It wouldn't be particularly useful since streaming calls don't return anything, and they + // already compensate for latency. + + void allowCancellation(); + +private: + CallContextHook* hook; + + friend class Capability::Server; + friend struct DynamicCapability; +}; + class Capability::Server { // Objects implementing a Cap'n Proto interface must subclass this. Typically, such objects // will instead subclass a typed Server interface which will take care of implementing @@ -325,12 +441,44 @@ class Capability::Server { public: typedef Capability Serves; - virtual kj::Promise dispatchCall(uint64_t interfaceId, uint16_t methodId, - CallContext context) = 0; + struct DispatchCallResult { + kj::Promise promise; + // Promise for completion of the call. + + bool isStreaming; + // If true, this method was declared as `-> stream;`. No other calls should be permitted until + // this call finishes, and if this call throws an exception, all future calls will throw the + // same exception. + }; + + virtual DispatchCallResult dispatchCall(uint64_t interfaceId, uint16_t methodId, + CallContext context) = 0; // Call the given method. `params` is the input struct, and should be released as soon as it // is no longer needed. `context` may be used to allocate the output struct and deal with // cancellation. + virtual kj::Maybe getFd() { return nullptr; } + // If this capability is backed by a file descriptor that is safe to directly expose to clients, + // returns that FD. When FD passing has been enabled in the RPC layer, this FD may be sent to + // other processes along with the capability. + + virtual kj::Maybe> shortenPath(); + // If this returns non-null, then it is a promise which, when resolved, points to a new + // capability to which future calls can be sent. Use this in cases where an object implementation + // might discover a more-optimized path some time after it starts. + // + // Implementing this (and returning non-null) will cause the capability to be advertised as a + // promise at the RPC protocol level. Once the promise returned by shortenPath() resolves, the + // remote client will receive a `Resolve` message updating it to point at the new destination. + // + // `shortenPath()` can also be used as a hack to shut up the client. If shortenPath() returns + // a promise that resolves to an exception, then the client will be notified that the capability + // is now broken. Assuming the client is using a correct RPC implemnetation, this should cause + // all further calls initiated by the client to this capability to immediately fail client-side, + // sparing the server's bandwidth. + // + // The default implementation always returns nullptr. + // TODO(someday): Method which can optionally be overridden to implement Join when the object is // a proxy. @@ -349,10 +497,13 @@ class Capability::Server { template CallContext internalGetTypedContext( CallContext typeless); - kj::Promise internalUnimplemented(const char* actualInterfaceName, - uint64_t requestedTypeId); - kj::Promise internalUnimplemented(const char* interfaceName, - uint64_t typeId, uint16_t methodId); + template + StreamingCallContext internalGetTypedStreamingContext( + CallContext typeless); + DispatchCallResult internalUnimplemented(const char* actualInterfaceName, + uint64_t requestedTypeId); + DispatchCallResult internalUnimplemented(const char* interfaceName, + uint64_t typeId, uint16_t methodId); kj::Promise internalUnimplemented(const char* interfaceName, const char* methodName, uint64_t typeId, uint16_t methodId); @@ -363,6 +514,34 @@ class Capability::Server { // ======================================================================================= +template +class PipelineBuilder: public T::Builder { + // Convenience class to build a Pipeline object for use with CallContext::setPipeline(). + // + // Building a pipeline object is like building an RPC result message, except that you only need + // to fill in the capabilities, since the purpose is only to allow pipelined RPC requests to + // flow through. + // + // See the docs for `CallContext::setPipeline()` for an example. + +public: + PipelineBuilder(uint firstSegmentWords = 64); + // Construct a builder, allocating the given number of words for the first segment of the backing + // message. Since `PipelineBuilder` is typically used with small RPC messages, the default size + // here is considerably smaller than with MallocMessageBuilder. + + typename T::Pipeline build(); + // Constructs a `Pipeline` object backed by the current content of this builder. Calling this + // consumes the `PipelineBuilder`; no further methods can be invoked. + +private: + kj::Own hook; + + PipelineBuilder(_::PipelineBuilderPair pair); +}; + +// ======================================================================================= + class ReaderCapabilityTable: private _::CapTableReader { // Class which imbues Readers with the ability to read capabilities. // @@ -469,6 +648,9 @@ class RequestHook { virtual RemotePromise send() = 0; // Send the call and return a promise for the result. + virtual kj::Promise sendStreaming() = 0; + // Send a streaming call. + virtual const void* getBrand() = 0; // Returns a void* that identifies who made this request. This can be used by an RPC adapter to // discover when tail call is going to be sent over its own connection and therefore can be @@ -552,16 +734,19 @@ class ClientHook { // therefore it can transfer the capability without proxying. static const uint NULL_CAPABILITY_BRAND; - // Value is irrelevant; used for pointer. + static const uint BROKEN_CAPABILITY_BRAND; + // Values are irrelevant; used for pointers. inline bool isNull() { return getBrand() == &NULL_CAPABILITY_BRAND; } // Returns true if the capability was created as a result of assigning a Client to null or by // reading a null pointer out of a Cap'n Proto message. - virtual void* getLocalServer(_::CapabilityServerSetBase& capServerSet); - // If this is a local capability created through `capServerSet`, return the underlying Server. - // Otherwise, return nullptr. Default implementation (which everyone except LocalClient should - // use) always returns nullptr. + inline bool isError() { return getBrand() == &BROKEN_CAPABILITY_BRAND; } + // Returns true if the capability was created by newBrokenCap(). + + virtual kj::Maybe getFd() = 0; + // Implements Capability::Client::getFd(). If this returns null but whenMoreResolved() returns + // non-null, then Capability::Client::getFd() waits for resolution and tries again. static kj::Own from(Capability::Client client) { return kj::mv(client.hook); } }; @@ -577,6 +762,8 @@ class CallContextHook { virtual kj::Promise tailCall(kj::Own&& request) = 0; virtual void allowCancellation() = 0; + virtual void setPipeline(kj::Own&& pipeline) = 0; + virtual kj::Promise onTailCall() = 0; // If `tailCall()` is called, resolves to the PipelineHook from the tail call. An // implementation of `ClientHook::call()` is allowed to call this at most once. @@ -777,6 +964,13 @@ RemotePromise Request::send() { return RemotePromise(kj::mv(typedPromise), kj::mv(typedPipeline)); } +template +kj::Promise StreamingRequest::send() { + auto promise = hook->sendStreaming(); + hook = nullptr; // prevent reuse + return promise; +} + inline Capability::Client::Client(kj::Own&& hook): hook(kj::mv(hook)) {} template inline Capability::Client::Client(kj::Own&& server) @@ -793,9 +987,6 @@ template inline typename T::Client Capability::Client::castAs() { return typename T::Client(hook->addRef()); } -inline kj::Promise Capability::Client::whenResolved() { - return hook->whenResolved(); -} inline Request Capability::Client::typelessRequest( uint64_t interfaceId, uint16_t methodId, kj::Maybe sizeHint) { @@ -807,17 +998,33 @@ inline Request Capability::Client::newCall( auto typeless = hook->newCall(interfaceId, methodId, sizeHint); return Request(typeless.template getAs(), kj::mv(typeless.hook)); } +template +inline StreamingRequest Capability::Client::newStreamingCall( + uint64_t interfaceId, uint16_t methodId, kj::Maybe sizeHint) { + auto typeless = hook->newCall(interfaceId, methodId, sizeHint); + return StreamingRequest(typeless.template getAs(), kj::mv(typeless.hook)); +} template inline CallContext::CallContext(CallContextHook& hook): hook(&hook) {} +template +inline StreamingCallContext::StreamingCallContext(CallContextHook& hook): hook(&hook) {} template inline typename Params::Reader CallContext::getParams() { return hook->getParams().template getAs(); } +template +inline typename Params::Reader StreamingCallContext::getParams() { + return hook->getParams().template getAs(); +} template inline void CallContext::releaseParams() { hook->releaseParams(); } +template +inline void StreamingCallContext::releaseParams() { + hook->releaseParams(); +} template inline typename Results::Builder CallContext::getResults( kj::Maybe sizeHint) { @@ -844,6 +1051,14 @@ inline Orphanage CallContext::getResultsOrphanage( return Orphanage::getForMessageContaining(hook->getResults(sizeHint)); } template +void CallContext::setPipeline(typename Results::Pipeline&& pipeline) { + hook->setPipeline(PipelineHook::from(kj::mv(pipeline))); +} +template +void CallContext::setPipeline(typename Results::Pipeline& pipeline) { + hook->setPipeline(PipelineHook::from(pipeline).addRef()); +} +template template inline kj::Promise CallContext::tailCall( Request&& tailRequest) { @@ -853,6 +1068,10 @@ template inline void CallContext::allowCancellation() { hook->allowCancellation(); } +template +inline void StreamingCallContext::allowCancellation() { + hook->allowCancellation(); +} template CallContext Capability::Server::internalGetTypedContext( @@ -860,10 +1079,45 @@ CallContext Capability::Server::internalGetTypedContext( return CallContext(*typeless.hook); } +template +StreamingCallContext Capability::Server::internalGetTypedStreamingContext( + CallContext typeless) { + return StreamingCallContext(*typeless.hook); +} + Capability::Client Capability::Server::thisCap() { return Client(thisHook->addRef()); } +namespace _ { // private + +struct PipelineBuilderPair { + AnyPointer::Builder root; + kj::Own hook; +}; + +PipelineBuilderPair newPipelineBuilder(uint firstSegmentWords); + +} // namespace _ (private) + +template +PipelineBuilder::PipelineBuilder(uint firstSegmentWords) + : PipelineBuilder(_::newPipelineBuilder(firstSegmentWords)) {} + +template +PipelineBuilder::PipelineBuilder(_::PipelineBuilderPair pair) + : T::Builder(pair.root.initAs()), + hook(kj::mv(pair.hook)) {} + +template +typename T::Pipeline PipelineBuilder::build() { + // Prevent subsequent accidental modification. A good compiler should be able to optimize this + // assignment away assuming the PipelineBuilder is not accessed again after this point. + static_cast(*this) = nullptr; + + return typename T::Pipeline(AnyPointer::Pipeline(kj::mv(hook))); +} + template T ReaderCapabilityTable::imbue(T reader) { return T(_::PointerHelpers>::getInternalReader(reader).imbue(this)); @@ -905,3 +1159,5 @@ struct Orphanage::GetInnerReader { #define CAPNP_CAPABILITY_H_INCLUDED // for testing includes in unit test } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/capnpc.ekam-rule b/libs/EXTERNAL/capnproto/c++/src/capnp/capnpc.ekam-rule index 30bf26dd015..dcecb48077f 100755 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/capnpc.ekam-rule +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/capnpc.ekam-rule @@ -33,6 +33,7 @@ INPUT=$1 case "$INPUT" in *capnp/c++.capnp | \ *capnp/schema.capnp | \ + *capnp/stream.capnp | \ *capnp/rpc.capnp | \ *capnp/rpc-twoparty.capnp | \ *capnp/persistent.capnp | \ diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/common.h b/libs/EXTERNAL/capnproto/c++/src/capnp/common.h index 26d67a6aece..aece4e51808 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/common.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/common.h @@ -25,10 +25,6 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include #include #include @@ -38,11 +34,21 @@ #include #endif +#if !defined(CAPNP_HEADER_WARNINGS) || !CAPNP_HEADER_WARNINGS +#define CAPNP_BEGIN_HEADER KJ_BEGIN_SYSTEM_HEADER +#define CAPNP_END_HEADER KJ_END_SYSTEM_HEADER +#else +#define CAPNP_BEGIN_HEADER +#define CAPNP_END_HEADER +#endif + +CAPNP_BEGIN_HEADER + namespace capnp { #define CAPNP_VERSION_MAJOR 0 -#define CAPNP_VERSION_MINOR 7 -#define CAPNP_VERSION_MICRO 0 +#define CAPNP_VERSION_MINOR 9 +#define CAPNP_VERSION_MICRO 1 #define CAPNP_VERSION \ (CAPNP_VERSION_MAJOR * 1000000 + CAPNP_VERSION_MINOR * 1000 + CAPNP_VERSION_MICRO) @@ -167,7 +173,7 @@ inline constexpr Kind kind() { return k; } -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) #define CAPNP_KIND(T) ::capnp::_::Kind_::kind // Avoid constexpr methods in MSVC (it remains buggy in many situations). @@ -193,7 +199,7 @@ inline constexpr Style style() { template struct List; -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) template struct List {}; @@ -308,7 +314,7 @@ namespace _ { // private template struct PointerHelpers; -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) template struct PointerHelpers {}; @@ -352,7 +358,7 @@ class word { uint64_t content KJ_UNUSED_MEMBER; #if __GNUC__ < 8 || __clang__ // GCC 8's -Wclass-memaccess complains whenever we try to memcpy() a `word` if we've disallowed - // the copy constructor. We don't want to disable the warning becaues it's a useful warning and + // the copy constructor. We don't want to disable the warning because it's a useful warning and // we'd have to disable it for all applications that include this header. Instead we allow `word` // to be copyable on GCC. KJ_DISALLOW_COPY(word); @@ -741,3 +747,5 @@ inline constexpr kj::ArrayPtr arrayPtr(U* ptr, T size) { #endif } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/byte-stream-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/byte-stream-test.c++ new file mode 100644 index 00000000000..297165a63cf --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/byte-stream-test.c++ @@ -0,0 +1,715 @@ +// Copyright (c) 2019 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "byte-stream.h" +#include +#include +#include + +namespace capnp { +namespace { + +kj::Promise expectRead(kj::AsyncInputStream& in, kj::StringPtr expected) { + if (expected.size() == 0) return kj::READY_NOW; + + auto buffer = kj::heapArray(expected.size()); + + auto promise = in.tryRead(buffer.begin(), 1, buffer.size()); + return promise.then(kj::mvCapture(buffer, [&in,expected](kj::Array buffer, size_t amount) { + if (amount == 0) { + KJ_FAIL_ASSERT("expected data never sent", expected); + } + + auto actual = buffer.slice(0, amount); + if (memcmp(actual.begin(), expected.begin(), actual.size()) != 0) { + KJ_FAIL_ASSERT("data from stream doesn't match expected", expected, actual); + } + + return expectRead(in, expected.slice(amount)); + })); +} + +kj::String makeString(size_t size) { + auto bytes = kj::heapArray(size); + for (char& c: bytes) { + c = 'a' + rand() % 26; + } + bytes[bytes.size() - 1] = 0; + return kj::String(kj::mv(bytes)); +}; + +KJ_TEST("KJ -> ByteStream -> KJ without shortening") { + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + + ByteStreamFactory factory1; + ByteStreamFactory factory2; + + auto pipe = kj::newOneWayPipe(); + + auto wrapped = factory1.capnpToKj(factory2.kjToCapnp(kj::mv(pipe.out))); + + { + auto promise = wrapped->write("foo", 3); + KJ_EXPECT(!promise.poll(waitScope)); + expectRead(*pipe.in, "foo").wait(waitScope); + promise.wait(waitScope); + } + + { + // Write more than 1 << 16 bytes at once to exercise write splitting. + auto str = makeString(1 << 17); + auto promise = wrapped->write(str.begin(), str.size()); + KJ_EXPECT(!promise.poll(waitScope)); + expectRead(*pipe.in, str).wait(waitScope); + promise.wait(waitScope); + } + + { + // Write more than 1 << 16 bytes via an array to exercise write splitting. + auto str = makeString(1 << 18); + auto pieces = kj::heapArrayBuilder>(4); + + // Two 2^15 pieces will be combined. + pieces.add(kj::arrayPtr(reinterpret_cast(str.begin()), 1 << 15)); + pieces.add(kj::arrayPtr(reinterpret_cast(str.begin() + (1 << 15)), 1 << 15)); + + // One 2^16 piece will be written alone. + pieces.add(kj::arrayPtr(reinterpret_cast( + str.begin() + (1 << 16)), 1 << 16)); + + // One 2^17 piece will be split. + pieces.add(kj::arrayPtr(reinterpret_cast( + str.begin() + (1 << 17)), str.size() - (1 << 17))); + + auto promise = wrapped->write(pieces); + KJ_EXPECT(!promise.poll(waitScope)); + expectRead(*pipe.in, str).wait(waitScope); + promise.wait(waitScope); + } + + wrapped = nullptr; + KJ_EXPECT(pipe.in->readAllText().wait(waitScope) == ""); +} + +class ExactPointerWriter: public kj::AsyncOutputStream { +public: + kj::ArrayPtr receivedBuffer; + + void fulfill() { + KJ_ASSERT_NONNULL(fulfiller)->fulfill(); + fulfiller = nullptr; + receivedBuffer = nullptr; + } + + kj::Promise write(const void* buffer, size_t size) override { + KJ_ASSERT(fulfiller == nullptr); + receivedBuffer = kj::arrayPtr(reinterpret_cast(buffer), size); + auto paf = kj::newPromiseAndFulfiller(); + fulfiller = kj::mv(paf.fulfiller); + return kj::mv(paf.promise); + } + kj::Promise write(kj::ArrayPtr> pieces) override { + KJ_UNIMPLEMENTED("not implemented for test"); + } + kj::Promise whenWriteDisconnected() override { + return kj::NEVER_DONE; + } + + void expectBuffer(kj::StringPtr expected) { + KJ_EXPECT(receivedBuffer == expected.asArray(), receivedBuffer, expected); + } + +private: + kj::Maybe>> fulfiller; +}; + +KJ_TEST("KJ -> ByteStream -> KJ with shortening") { + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + + ByteStreamFactory factory; + + auto pipe = kj::newOneWayPipe(); + + ExactPointerWriter exactPointerWriter; + auto pumpPromise = pipe.in->pumpTo(exactPointerWriter); + + auto wrapped = factory.capnpToKj(factory.kjToCapnp(kj::mv(pipe.out))); + + { + char buffer[4] = "foo"; + auto promise = wrapped->write(buffer, 3); + KJ_EXPECT(!promise.poll(waitScope)); + + // This first write won't have been path-shortened because we didn't know about the shorter + // path yet when it started. + KJ_EXPECT(exactPointerWriter.receivedBuffer.begin() != buffer); + KJ_EXPECT(kj::str(exactPointerWriter.receivedBuffer) == "foo"); + exactPointerWriter.fulfill(); + promise.wait(waitScope); + } + + { + char buffer[4] = "foo"; + auto promise = wrapped->write(buffer, 3); + KJ_EXPECT(!promise.poll(waitScope)); + + // The second write was path-shortened so passes through the exact buffer! + KJ_EXPECT(exactPointerWriter.receivedBuffer.begin() == buffer); + KJ_EXPECT(exactPointerWriter.receivedBuffer.size() == 3); + exactPointerWriter.fulfill(); + promise.wait(waitScope); + } + + wrapped = nullptr; + KJ_EXPECT(pipe.in->readAllText().wait(waitScope) == ""); +} + +KJ_TEST("KJ -> ByteStream -> KJ -> ByteStream -> KJ with shortening") { + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + + ByteStreamFactory factory; + + auto pipe = kj::newOneWayPipe(); + + ExactPointerWriter exactPointerWriter; + auto pumpPromise = pipe.in->pumpTo(exactPointerWriter); + + auto wrapped = factory.capnpToKj(factory.kjToCapnp( + factory.capnpToKj(factory.kjToCapnp(kj::mv(pipe.out))))); + + { + char buffer[4] = "foo"; + auto promise = wrapped->write(buffer, 3); + KJ_EXPECT(!promise.poll(waitScope)); + + // This first write won't have been path-shortened because we didn't know about the shorter + // path yet when it started. + KJ_EXPECT(exactPointerWriter.receivedBuffer.begin() != buffer); + KJ_EXPECT(kj::str(exactPointerWriter.receivedBuffer) == "foo"); + exactPointerWriter.fulfill(); + promise.wait(waitScope); + } + + { + char buffer[4] = "bar"; + auto promise = wrapped->write(buffer, 3); + KJ_EXPECT(!promise.poll(waitScope)); + + // The second write was path-shortened so passes through the exact buffer! + KJ_EXPECT(exactPointerWriter.receivedBuffer.begin() == buffer); + KJ_EXPECT(exactPointerWriter.receivedBuffer.size() == 3); + exactPointerWriter.fulfill(); + promise.wait(waitScope); + } + + wrapped = nullptr; + KJ_EXPECT(pumpPromise.wait(waitScope) == 6); +} + +KJ_TEST("KJ -> ByteStream -> KJ pipe -> ByteStream -> KJ with shortening") { + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + + ByteStreamFactory factory; + + auto backPipe = kj::newOneWayPipe(); + auto middlePipe = kj::newOneWayPipe(); + + ExactPointerWriter exactPointerWriter; + auto backPumpPromise = backPipe.in->pumpTo(exactPointerWriter); + + auto backWrapped = factory.capnpToKj(factory.kjToCapnp(kj::mv(backPipe.out))); + auto midPumpPormise = middlePipe.in->pumpTo(*backWrapped, 3); + + auto wrapped = factory.capnpToKj(factory.kjToCapnp(kj::mv(middlePipe.out))); + + // Poll whenWriteDisconnected(), mainly as a way to let all the path-shortening settle. + auto disconnectPromise = wrapped->whenWriteDisconnected(); + KJ_EXPECT(!disconnectPromise.poll(waitScope)); + + char buffer[7] = "foobar"; + auto writePromise = wrapped->write(buffer, 6); + KJ_EXPECT(!writePromise.poll(waitScope)); + + // The first three bytes will tunnel all the way down to the destination. + KJ_EXPECT(exactPointerWriter.receivedBuffer.begin() == buffer); + KJ_EXPECT(exactPointerWriter.receivedBuffer.size() == 3); + exactPointerWriter.fulfill(); + + KJ_EXPECT(midPumpPormise.wait(waitScope) == 3); + + ExactPointerWriter exactPointerWriter2; + midPumpPormise = middlePipe.in->pumpTo(exactPointerWriter2, 6); + KJ_EXPECT(!writePromise.poll(waitScope)); + + // The second half of the "foobar" write will have taken a slow path, because the write was + // restarted in the middle of the stream re-resolving itself. + KJ_EXPECT(kj::str(exactPointerWriter2.receivedBuffer) == "bar"); + exactPointerWriter2.fulfill(); + + // Now that write is done. + writePromise.wait(waitScope); + KJ_EXPECT(!midPumpPormise.poll(waitScope)); + + // If we write again, it'll hit the fast path. + char buffer2[4] = "baz"; + writePromise = wrapped->write(buffer2, 3); + KJ_EXPECT(!writePromise.poll(waitScope)); + KJ_EXPECT(exactPointerWriter2.receivedBuffer.begin() == buffer2); + KJ_EXPECT(exactPointerWriter2.receivedBuffer.size() == 3); + exactPointerWriter2.fulfill(); + + KJ_EXPECT(midPumpPormise.wait(waitScope) == 6); + writePromise.wait(waitScope); +} + +KJ_TEST("KJ -> ByteStream RPC -> KJ pipe -> ByteStream RPC -> KJ with shortening") { + // For this test, we're going to verify that if we have ByteStreams over RPC in both directions + // and we pump a ByteStream to another ByteStream at one end of the connection, it gets shortened + // all the way to the other end! + + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + + ByteStreamFactory clientFactory; + ByteStreamFactory serverFactory; + + auto backPipe = kj::newOneWayPipe(); + auto middlePipe = kj::newOneWayPipe(); + + ExactPointerWriter exactPointerWriter; + auto backPumpPromise = backPipe.in->pumpTo(exactPointerWriter); + + auto rpcConnection = kj::newTwoWayPipe(); + capnp::TwoPartyClient client(*rpcConnection.ends[0], + clientFactory.kjToCapnp(kj::mv(backPipe.out)), + rpc::twoparty::Side::CLIENT); + capnp::TwoPartyClient server(*rpcConnection.ends[1], + serverFactory.kjToCapnp(kj::mv(middlePipe.out)), + rpc::twoparty::Side::CLIENT); + + auto backWrapped = serverFactory.capnpToKj(server.bootstrap().castAs()); + auto midPumpPormise = middlePipe.in->pumpTo(*backWrapped, 3); + + auto wrapped = clientFactory.capnpToKj(client.bootstrap().castAs()); + + // Poll whenWriteDisconnected(), mainly as a way to let all the path-shortening settle. + auto disconnectPromise = wrapped->whenWriteDisconnected(); + KJ_EXPECT(!disconnectPromise.poll(waitScope)); + + char buffer[7] = "foobar"; + auto writePromise = wrapped->write(buffer, 6); + + // The server side did a 3-byte pump. Path-shortening magic kicks in, and the first three bytes + // of the write on the client side go *directly* to the endpoint without a copy! + KJ_EXPECT(exactPointerWriter.receivedBuffer.begin() == buffer); + KJ_EXPECT(exactPointerWriter.receivedBuffer.size() == 3); + exactPointerWriter.fulfill(); + + KJ_EXPECT(midPumpPormise.wait(waitScope) == 3); + + ExactPointerWriter exactPointerWriter2; + midPumpPormise = middlePipe.in->pumpTo(exactPointerWriter2, 6); + midPumpPormise.poll(waitScope); + + // The second half of the "foobar" write will have taken a slow path, because the write was + // restarted in the middle of the stream re-resolving itself. + KJ_EXPECT(kj::str(exactPointerWriter2.receivedBuffer) == "bar"); + exactPointerWriter2.fulfill(); + + // Now that write is done. + writePromise.wait(waitScope); + KJ_EXPECT(!midPumpPormise.poll(waitScope)); + + // If we write again, it'll finish the server-side pump (but won't be a zero-copy write since + // it has to go over RPC). + char buffer2[4] = "baz"; + writePromise = wrapped->write(buffer2, 3); + KJ_EXPECT(!midPumpPormise.poll(waitScope)); + KJ_EXPECT(kj::str(exactPointerWriter2.receivedBuffer) == "baz"); + exactPointerWriter2.fulfill(); + + KJ_EXPECT(midPumpPormise.wait(waitScope) == 6); + writePromise.wait(waitScope); +} + +KJ_TEST("KJ -> ByteStream RPC -> KJ pipe -> ByteStream RPC -> KJ with concurrent shortening") { + // This is similar to the previous test, but we start writing before the path-shortening has + // settled. This should result in some writes optimistically bouncing back and forth before + // the stream settles in. + + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + + ByteStreamFactory clientFactory; + ByteStreamFactory serverFactory; + + auto backPipe = kj::newOneWayPipe(); + auto middlePipe = kj::newOneWayPipe(); + + ExactPointerWriter exactPointerWriter; + auto backPumpPromise = backPipe.in->pumpTo(exactPointerWriter); + + auto rpcConnection = kj::newTwoWayPipe(); + capnp::TwoPartyClient client(*rpcConnection.ends[0], + clientFactory.kjToCapnp(kj::mv(backPipe.out)), + rpc::twoparty::Side::CLIENT); + capnp::TwoPartyClient server(*rpcConnection.ends[1], + serverFactory.kjToCapnp(kj::mv(middlePipe.out)), + rpc::twoparty::Side::CLIENT); + + auto backWrapped = serverFactory.capnpToKj(server.bootstrap().castAs()); + auto midPumpPormise = middlePipe.in->pumpTo(*backWrapped); + + auto wrapped = clientFactory.capnpToKj(client.bootstrap().castAs()); + + char buffer[7] = "foobar"; + auto writePromise = wrapped->write(buffer, 6); + + // The write went to RPC so it's not immediately received. + KJ_EXPECT(exactPointerWriter.receivedBuffer == nullptr); + + // Write should be received after we turn the event loop. + waitScope.poll(); + KJ_EXPECT(exactPointerWriter.receivedBuffer != nullptr); + + // Note that the promise that write() returned above has already resolved, because it hit RPC + // and went into the streaming window. + KJ_ASSERT(writePromise.poll(waitScope)); + writePromise.wait(waitScope); + + // Let's start a second write. Even though the first write technically isn't done yet, it's + // legal for us to start a second one because the first write's returned promise optimistically + // resolved for streaming window reasons. This ends up being a very tricky case for our code! + char buffer2[7] = "bazqux"; + auto writePromise2 = wrapped->write(buffer2, 6); + + // Now check the first write was correct, and close it out. + KJ_EXPECT(kj::str(exactPointerWriter.receivedBuffer) == "foobar"); + exactPointerWriter.fulfill(); + + // Turn event loop again. Now the second write arrives. + waitScope.poll(); + KJ_EXPECT(kj::str(exactPointerWriter.receivedBuffer) == "bazqux"); + exactPointerWriter.fulfill(); + writePromise2.wait(waitScope); + + // If we do another write now, it should be zero-copy, because everything has settled. + char buffer3[6] = "corge"; + auto writePromise3 = wrapped->write(buffer3, 5); + KJ_EXPECT(exactPointerWriter.receivedBuffer.begin() == buffer3); + KJ_EXPECT(exactPointerWriter.receivedBuffer.size() == 5); + KJ_EXPECT(!writePromise3.poll(waitScope)); + exactPointerWriter.fulfill(); + writePromise3.wait(waitScope); +} + +KJ_TEST("KJ -> KJ pipe -> ByteStream RPC -> KJ pipe -> ByteStream RPC -> KJ with concurrent shortening") { + // Same as previous test, except we add a KJ pipe at the beginning and pump it into the top of + // the pipe, which invokes tryPumpFrom() on the KjToCapnpStreamAdapter. + + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + + ByteStreamFactory clientFactory; + ByteStreamFactory serverFactory; + + auto backPipe = kj::newOneWayPipe(); + auto middlePipe = kj::newOneWayPipe(); + auto frontPipe = kj::newOneWayPipe(); + + ExactPointerWriter exactPointerWriter; + auto backPumpPromise = backPipe.in->pumpTo(exactPointerWriter); + + auto rpcConnection = kj::newTwoWayPipe(); + capnp::TwoPartyClient client(*rpcConnection.ends[0], + clientFactory.kjToCapnp(kj::mv(backPipe.out)), + rpc::twoparty::Side::CLIENT); + capnp::TwoPartyClient server(*rpcConnection.ends[1], + serverFactory.kjToCapnp(kj::mv(middlePipe.out)), + rpc::twoparty::Side::CLIENT); + + auto backWrapped = serverFactory.capnpToKj(server.bootstrap().castAs()); + auto midPumpPormise = middlePipe.in->pumpTo(*backWrapped); + + auto wrapped = clientFactory.capnpToKj(client.bootstrap().castAs()); + auto frontPumpPromise = frontPipe.in->pumpTo(*wrapped); + + char buffer[7] = "foobar"; + auto writePromise = frontPipe.out->write(buffer, 6); + + // The write went to RPC so it's not immediately received. + KJ_EXPECT(exactPointerWriter.receivedBuffer == nullptr); + + // Write should be received after we turn the event loop. + waitScope.poll(); + KJ_EXPECT(exactPointerWriter.receivedBuffer != nullptr); + + // Note that the promise that write() returned above has already resolved, because it hit RPC + // and went into the streaming window. + KJ_ASSERT(writePromise.poll(waitScope)); + writePromise.wait(waitScope); + + // Let's start a second write. Even though the first write technically isn't done yet, it's + // legal for us to start a second one because the first write's returned promise optimistically + // resolved for streaming window reasons. This ends up being a very tricky case for our code! + char buffer2[7] = "bazqux"; + auto writePromise2 = frontPipe.out->write(buffer2, 6); + + // Now check the first write was correct, and close it out. + KJ_EXPECT(kj::str(exactPointerWriter.receivedBuffer) == "foobar"); + exactPointerWriter.fulfill(); + + // Turn event loop again. Now the second write arrives. + waitScope.poll(); + KJ_EXPECT(kj::str(exactPointerWriter.receivedBuffer) == "bazqux"); + exactPointerWriter.fulfill(); + writePromise2.wait(waitScope); + + // If we do another write now, it should be zero-copy, because everything has settled. + char buffer3[6] = "corge"; + auto writePromise3 = frontPipe.out->write(buffer3, 5); + KJ_EXPECT(exactPointerWriter.receivedBuffer.begin() == buffer3); + KJ_EXPECT(exactPointerWriter.receivedBuffer.size() == 5); + KJ_EXPECT(!writePromise3.poll(waitScope)); + exactPointerWriter.fulfill(); + writePromise3.wait(waitScope); +} + +KJ_TEST("Two Substreams on one destination") { + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + + ByteStreamFactory factory; + + auto backPipe = kj::newOneWayPipe(); + auto middlePipe1 = kj::newOneWayPipe(); + auto middlePipe2 = kj::newOneWayPipe(); + + ExactPointerWriter exactPointerWriter; + auto backPumpPromise = backPipe.in->pumpTo(exactPointerWriter); + + auto backWrapped = factory.capnpToKj(factory.kjToCapnp(kj::mv(backPipe.out))); + + auto wrapped1 = factory.capnpToKj(factory.kjToCapnp(kj::mv(middlePipe1.out))); + auto wrapped2 = factory.capnpToKj(factory.kjToCapnp(kj::mv(middlePipe2.out))); + + // Declare these buffers out here so that they can't possibly end up with the same address. + char buffer1[4] = "foo"; + char buffer2[4] = "bar"; + + { + auto wrapped = kj::mv(wrapped1); + + // First pump 3 bytes from the first stream. + auto midPumpPormise = middlePipe1.in->pumpTo(*backWrapped, 3); + + // Poll whenWriteDisconnected(), mainly as a way to let all the path-shortening settle. + auto disconnectPromise = wrapped->whenWriteDisconnected(); + KJ_EXPECT(!disconnectPromise.poll(waitScope)); + + auto writePromise = wrapped->write(buffer1, 3); + KJ_EXPECT(!writePromise.poll(waitScope)); + + // The first write will tunnel all the way down to the destination. + KJ_EXPECT(exactPointerWriter.receivedBuffer.begin() == buffer1); + KJ_EXPECT(exactPointerWriter.receivedBuffer.size() == 3); + exactPointerWriter.fulfill(); + + writePromise.wait(waitScope); + KJ_EXPECT(midPumpPormise.wait(waitScope) == 3); + } + + { + auto wrapped = kj::mv(wrapped2); + + // Now pump another 3 bytes from the second stream. + auto midPumpPormise = middlePipe2.in->pumpTo(*backWrapped, 3); + + // Poll whenWriteDisconnected(), mainly as a way to let all the path-shortening settle. + auto disconnectPromise = wrapped->whenWriteDisconnected(); + KJ_EXPECT(!disconnectPromise.poll(waitScope)); + + auto writePromise = wrapped->write(buffer2, 3); + KJ_EXPECT(!writePromise.poll(waitScope)); + + // The second write will also tunnel all the way down to the destination. + KJ_EXPECT(exactPointerWriter.receivedBuffer.begin() == buffer2); + KJ_EXPECT(exactPointerWriter.receivedBuffer.size() == 3); + exactPointerWriter.fulfill(); + + writePromise.wait(waitScope); + KJ_EXPECT(midPumpPormise.wait(waitScope) == 3); + } +} + +KJ_TEST("Two Substreams on one destination no limits (pump to EOF)") { + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + + ByteStreamFactory factory; + + auto backPipe = kj::newOneWayPipe(); + auto middlePipe1 = kj::newOneWayPipe(); + auto middlePipe2 = kj::newOneWayPipe(); + + ExactPointerWriter exactPointerWriter; + auto backPumpPromise = backPipe.in->pumpTo(exactPointerWriter); + + auto backWrapped = factory.capnpToKj(factory.kjToCapnp(kj::mv(backPipe.out))); + + auto wrapped1 = factory.capnpToKj(factory.kjToCapnp(kj::mv(middlePipe1.out))); + auto wrapped2 = factory.capnpToKj(factory.kjToCapnp(kj::mv(middlePipe2.out))); + + // Declare these buffers out here so that they can't possibly end up with the same address. + char buffer1[4] = "foo"; + char buffer2[4] = "bar"; + + { + auto wrapped = kj::mv(wrapped1); + + // First pump from the first stream until EOF. + auto midPumpPormise = middlePipe1.in->pumpTo(*backWrapped); + + // Poll whenWriteDisconnected(), mainly as a way to let all the path-shortening settle. + auto disconnectPromise = wrapped->whenWriteDisconnected(); + KJ_EXPECT(!disconnectPromise.poll(waitScope)); + + auto writePromise = wrapped->write(buffer1, 3); + KJ_EXPECT(!writePromise.poll(waitScope)); + + // The first write will tunnel all the way down to the destination. + KJ_EXPECT(exactPointerWriter.receivedBuffer.begin() == buffer1); + KJ_EXPECT(exactPointerWriter.receivedBuffer.size() == 3); + exactPointerWriter.fulfill(); + + writePromise.wait(waitScope); + { auto drop = kj::mv(wrapped); } + KJ_EXPECT(midPumpPormise.wait(waitScope) == 3); + } + + { + auto wrapped = kj::mv(wrapped2); + + // Now pump from the second stream until EOF. + auto midPumpPormise = middlePipe2.in->pumpTo(*backWrapped); + + // Poll whenWriteDisconnected(), mainly as a way to let all the path-shortening settle. + auto disconnectPromise = wrapped->whenWriteDisconnected(); + KJ_EXPECT(!disconnectPromise.poll(waitScope)); + + auto writePromise = wrapped->write(buffer2, 3); + KJ_EXPECT(!writePromise.poll(waitScope)); + + // The second write will also tunnel all the way down to the destination. + KJ_EXPECT(exactPointerWriter.receivedBuffer.begin() == buffer2); + KJ_EXPECT(exactPointerWriter.receivedBuffer.size() == 3); + exactPointerWriter.fulfill(); + + writePromise.wait(waitScope); + { auto drop = kj::mv(wrapped); } + KJ_EXPECT(midPumpPormise.wait(waitScope) == 3); + } +} + +KJ_TEST("KJ -> ByteStream RPC -> KJ promise stream -> ByteStream -> KJ") { + // Test what happens if we queue up several requests on a ByteStream and then it resolves to + // a shorter path. + + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + + ByteStreamFactory factory; + ExactPointerWriter exactPointerWriter; + + auto paf = kj::newPromiseAndFulfiller>(); + auto backCap = factory.kjToCapnp(kj::newPromisedStream(kj::mv(paf.promise))); + + auto rpcPipe = kj::newTwoWayPipe(); + capnp::TwoPartyClient client(*rpcPipe.ends[0]); + capnp::TwoPartyClient server(*rpcPipe.ends[1], kj::mv(backCap), rpc::twoparty::Side::SERVER); + auto front = factory.capnpToKj(client.bootstrap().castAs()); + + // These will all queue up in the RPC layer. + front->write("foo", 3).wait(waitScope); + front->write("bar", 3).wait(waitScope); + front->write("baz", 3).wait(waitScope); + front->write("qux", 3).wait(waitScope); + + // Make sure those writes manage to get all the way through the RPC system and queue up in the + // LocalClient wrapping the CapnpToKjStreamAdapter at the other end. + waitScope.poll(); + + // Fulfill the promise. + paf.fulfiller->fulfill(factory.capnpToKj(factory.kjToCapnp(kj::attachRef(exactPointerWriter)))); + waitScope.poll(); + + // Now: + // - "foo" should have made it all the way down to the final output stream. + // - "bar", "baz", and "qux" are queued on the CapnpToKjStreamAdapter immediately wrapping the + // KJ promise stream. + // - But that stream adapter has discovered that there's another capnp stream downstream and has + // resolved itself to the later stream. + // - A new call at this time should NOT be allowed to hop the queue. + + exactPointerWriter.expectBuffer("foo"); + + front->write("corge", 5).wait(waitScope); + waitScope.poll(); + + exactPointerWriter.fulfill(); + + waitScope.poll(); + exactPointerWriter.expectBuffer("bar"); + exactPointerWriter.fulfill(); + + waitScope.poll(); + exactPointerWriter.expectBuffer("baz"); + exactPointerWriter.fulfill(); + + waitScope.poll(); + exactPointerWriter.expectBuffer("qux"); + exactPointerWriter.fulfill(); + + waitScope.poll(); + exactPointerWriter.expectBuffer("corge"); + exactPointerWriter.fulfill(); + + // There may still be some detach()ed promises holding on to some capabilities that transitively + // hold a fake Own pointing at exactPointerWriter, which is actually on the + // stack. We created a fake Own pointing to a stack variable by using + // kj::attachRef(exactPointerWriter), above; it does not actually own the object it points to. + // We need to make sure those Owns are dropped before exactPoniterWriter is destroyed, otherwise + // ASAN will flag some invalid reads (of exactPointerWriter's vtable, in particular). + waitScope.cancelAllDetached(); +} + +// TODO: +// - Parallel writes (requires streaming) +// - Write to KJ -> capnp -> RPC -> capnp -> KJ loopback without shortening, verify we can write +// several things to buffer (requires streaming). +// - Again, but with shortening which only occurs after some promise resolve. + +} // namespace +} // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/byte-stream.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/byte-stream.c++ new file mode 100644 index 00000000000..c7b709dc54f --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/byte-stream.c++ @@ -0,0 +1,1029 @@ +// Copyright (c) 2019 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "byte-stream.h" +#include +#include + +namespace capnp { + +const uint MAX_BYTES_PER_WRITE = 1 << 16; + +class ByteStreamFactory::StreamServerBase: public capnp::ByteStream::Server { +public: + virtual void returnStream(uint64_t written) = 0; + // Called after the StreamServerBase's internal kj::AsyncOutputStream has been borrowed, to + // indicate that the borrower is done. + // + // A stream becomes borrowed either when getShortestPath() returns a BorrowedStream, or when + // a SubstreamImpl is constructed wrapping an existing stream. + + struct BorrowedStream { + // Represents permission to use the StreamServerBase's inner AsyncOutputStream directly, up + // to some limit of bytes written. + + StreamServerBase& lender; + kj::AsyncOutputStream& stream; + uint64_t limit; + }; + + typedef kj::OneOf, capnp::ByteStream::Client*, BorrowedStream> ShortestPath; + + virtual ShortestPath getShortestPath() = 0; + // Called by KjToCapnpStreamAdapter when it has determined that its inner ByteStream::Client + // actually points back to a StreamServerBase in the same process created by the same + // ByteStreamFactory. Returns the best shortened path to use, or a promise that resolves when the + // shortest path is known. + + virtual void directEnd() = 0; + // Called by KjToCapnpStreamAdapter's destructor when it has determined that its inner + // ByteStream::Client actually points back to a StreamServerBase in the same process created by + // the same ByteStreamFactory. Since destruction of a KJ stream signals EOF, we need to propagate + // that by destroying our underlying stream. + // TODO(cleanup): When KJ streams evolve an end() method, this can go away. +}; + +class ByteStreamFactory::SubstreamImpl final: public StreamServerBase { +public: + SubstreamImpl(ByteStreamFactory& factory, + StreamServerBase& parent, + capnp::ByteStream::Client ownParent, + kj::AsyncOutputStream& stream, + capnp::ByteStream::SubstreamCallback::Client callback, + uint64_t limit, + kj::PromiseFulfillerPair paf = kj::newPromiseAndFulfiller()) + : factory(factory), + state(Streaming {parent, kj::mv(ownParent), stream, kj::mv(callback)}), + limit(limit), + resolveFulfiller(kj::mv(paf.fulfiller)), + resolvePromise(paf.promise.fork()) {} + + // --------------------------------------------------------------------------- + // implements StreamServerBase + + void returnStream(uint64_t written) override { + completed += written; + KJ_ASSERT(completed <= limit); + auto borrowed = kj::mv(state.get()); + state = kj::mv(borrowed.originalState); + + if (completed == limit) { + limitReached(); + } + } + + ShortestPath getShortestPath() override { + KJ_SWITCH_ONEOF(state) { + KJ_CASE_ONEOF(redirected, Redirected) { + return &redirected.replacement; + } + KJ_CASE_ONEOF(e, Ended) { + KJ_FAIL_REQUIRE("already called end()"); + } + KJ_CASE_ONEOF(b, Borrowed) { + KJ_FAIL_REQUIRE("can't call other methods while substream is active"); + } + KJ_CASE_ONEOF(streaming, Streaming) { + auto& stream = streaming.stream; + auto oldState = kj::mv(streaming); + state = Borrowed { kj::mv(oldState) }; + return BorrowedStream { *this, stream, limit - completed }; + } + } + KJ_UNREACHABLE; + } + + void directEnd() override { + KJ_SWITCH_ONEOF(state) { + KJ_CASE_ONEOF(redirected, Redirected) { + // Ugh I guess we need to send a real end() request here. + redirected.replacement.endRequest(MessageSize {2, 0}).send().detach([](kj::Exception&&){}); + } + KJ_CASE_ONEOF(e, Ended) { + // whatever + } + KJ_CASE_ONEOF(b, Borrowed) { + // ... whatever. + } + KJ_CASE_ONEOF(streaming, Streaming) { + auto req = streaming.callback.endedRequest(MessageSize {4, 0}); + req.setByteCount(completed); + req.send().detach([](kj::Exception&&){}); + streaming.parent.returnStream(completed); + state = Ended(); + } + } + } + + // --------------------------------------------------------------------------- + // implements ByteStream::Server RPC interface + + kj::Maybe> shortenPath() override { + return resolvePromise.addBranch() + .then([this]() -> Capability::Client { + return state.get().replacement; + }); + } + + kj::Promise write(WriteContext context) override { + auto params = context.getParams(); + auto data = params.getBytes(); + + KJ_SWITCH_ONEOF(state) { + KJ_CASE_ONEOF(redirected, Redirected) { + auto req = redirected.replacement.writeRequest(params.totalSize()); + req.setBytes(data); + return req.send(); + } + KJ_CASE_ONEOF(e, Ended) { + KJ_FAIL_REQUIRE("already called end()"); + } + KJ_CASE_ONEOF(b, Borrowed) { + KJ_FAIL_REQUIRE("can't call other methods while stream is borrowed"); + } + KJ_CASE_ONEOF(streaming, Streaming) { + if (completed + data.size() < limit) { + completed += data.size(); + return streaming.stream.write(data.begin(), data.size()); + } else { + // This write passes the limit. + uint64_t remainder = limit - completed; + auto leftover = data.slice(remainder, data.size()); + return streaming.stream.write(data.begin(), remainder) + .then([this, leftover]() -> kj::Promise { + completed = limit; + limitReached(); + + if (leftover.size() > 0) { + // Need to forward the leftover bytes to the next stream. + auto req = state.get().replacement.writeRequest( + MessageSize { 4 + leftover.size() / sizeof(capnp::word), 0 }); + req.setBytes(leftover); + return req.send(); + } else { + return kj::READY_NOW; + } + }); + } + } + } + KJ_UNREACHABLE; + } + + kj::Promise end(EndContext context) override { + KJ_SWITCH_ONEOF(state) { + KJ_CASE_ONEOF(redirected, Redirected) { + return context.tailCall(redirected.replacement.endRequest(MessageSize {2,0})); + } + KJ_CASE_ONEOF(e, Ended) { + KJ_FAIL_REQUIRE("already called end()"); + } + KJ_CASE_ONEOF(b, Borrowed) { + KJ_FAIL_REQUIRE("can't call other methods while stream is borrowed"); + } + KJ_CASE_ONEOF(streaming, Streaming) { + auto req = streaming.callback.endedRequest(MessageSize {4, 0}); + req.setByteCount(completed); + auto result = req.send().ignoreResult(); + streaming.parent.returnStream(completed); + state = Ended(); + return result; + } + } + KJ_UNREACHABLE; + } + + kj::Promise getSubstream(GetSubstreamContext context) override { + KJ_SWITCH_ONEOF(state) { + KJ_CASE_ONEOF(redirected, Redirected) { + auto params = context.getParams(); + auto req = redirected.replacement.getSubstreamRequest(params.totalSize()); + req.setCallback(params.getCallback()); + req.setLimit(params.getLimit()); + return context.tailCall(kj::mv(req)); + } + KJ_CASE_ONEOF(e, Ended) { + KJ_FAIL_REQUIRE("already called end()"); + } + KJ_CASE_ONEOF(b, Borrowed) { + KJ_FAIL_REQUIRE("can't call other methods while stream is borrowed"); + } + KJ_CASE_ONEOF(streaming, Streaming) { + auto params = context.getParams(); + auto callback = params.getCallback(); + auto limit = params.getLimit(); + context.releaseParams(); + auto results = context.getResults(MessageSize { 2, 1 }); + results.setSubstream(factory.streamSet.add(kj::heap( + factory, *this, thisCap(), streaming.stream, kj::mv(callback), kj::mv(limit)))); + state = Borrowed { kj::mv(streaming) }; + return kj::READY_NOW; + } + } + KJ_UNREACHABLE; + } + +private: + ByteStreamFactory& factory; + + struct Streaming { + StreamServerBase& parent; + capnp::ByteStream::Client ownParent; + kj::AsyncOutputStream& stream; + capnp::ByteStream::SubstreamCallback::Client callback; + }; + struct Borrowed { + Streaming originalState; + }; + struct Redirected { + capnp::ByteStream::Client replacement; + }; + struct Ended {}; + + kj::OneOf state; + + uint64_t limit; + uint64_t completed = 0; + + kj::Own> resolveFulfiller; + kj::ForkedPromise resolvePromise; + + void limitReached() { + auto& streaming = state.get(); + auto next = streaming.callback.reachedLimitRequest(capnp::MessageSize {2,0}) + .send().getNext(); + + // Set the next stream as our replacement. + streaming.parent.returnStream(limit); + state = Redirected { kj::mv(next) }; + resolveFulfiller->fulfill(); + } +}; + +// ======================================================================================= + +class ByteStreamFactory::CapnpToKjStreamAdapter final: public StreamServerBase { + // Implements Cap'n Proto ByteStream as a wrapper around a KJ stream. + + class SubstreamCallbackImpl; + +public: + class PathProber; + + CapnpToKjStreamAdapter(ByteStreamFactory& factory, + kj::Own inner) + : factory(factory), + state(kj::heap(*this, kj::mv(inner))) { + state.get>()->startProbing(); + } + + CapnpToKjStreamAdapter(ByteStreamFactory& factory, + kj::Own pathProber) + : factory(factory), + state(kj::mv(pathProber)) { + state.get>()->setNewParent(*this); + } + + // --------------------------------------------------------------------------- + // implements StreamServerBase + + void returnStream(uint64_t written) override { + auto stream = kj::mv(state.get().stream); + state = kj::mv(stream); + } + + ShortestPath getShortestPath() override { + // Called by KjToCapnpStreamAdapter when it has determined that its inner ByteStream::Client + // actually points back to a CapnpToKjStreamAdapter in the same process. Returns the best + // shortened path to use, or a promise that resolves when the shortest path is known. + + KJ_SWITCH_ONEOF(state) { + KJ_CASE_ONEOF(prober, kj::Own) { + return prober->whenReady(); + } + KJ_CASE_ONEOF(kjStream, kj::Own) { + auto& streamRef = *kjStream; + state = Borrowed { kj::mv(kjStream) }; + return StreamServerBase::BorrowedStream { *this, streamRef, kj::maxValue }; + } + KJ_CASE_ONEOF(capnpStream, capnp::ByteStream::Client) { + return &capnpStream; + } + KJ_CASE_ONEOF(b, Borrowed) { + KJ_FAIL_REQUIRE("concurrent streaming calls disallowed") { break; } + return kj::Promise(kj::READY_NOW); + } + KJ_CASE_ONEOF(e, Ended) { + KJ_FAIL_REQUIRE("already ended") { break; } + return kj::Promise(kj::READY_NOW); + } + } + KJ_UNREACHABLE; + } + + void directEnd() override { + KJ_SWITCH_ONEOF(state) { + KJ_CASE_ONEOF(prober, kj::Own) { + state = Ended(); + } + KJ_CASE_ONEOF(kjStream, kj::Own) { + state = Ended(); + } + KJ_CASE_ONEOF(capnpStream, capnp::ByteStream::Client) { + // Ugh I guess we need to send a real end() request here. + capnpStream.endRequest(MessageSize {2, 0}).send().detach([](kj::Exception&&){}); + } + KJ_CASE_ONEOF(b, Borrowed) { + // Fine, ignore. + } + KJ_CASE_ONEOF(e, Ended) { + // Fine, ignore. + } + } + } + + // --------------------------------------------------------------------------- + // PathProber + + class PathProber final: public kj::AsyncInputStream { + public: + PathProber(CapnpToKjStreamAdapter& parent, kj::Own inner, + kj::PromiseFulfillerPair paf = kj::newPromiseAndFulfiller()) + : parent(parent), inner(kj::mv(inner)), + readyPromise(paf.promise.fork()), + readyFulfiller(kj::mv(paf.fulfiller)), + task(nullptr) {} + + void startProbing() { + task = probeForShorterPath(); + } + + void setNewParent(CapnpToKjStreamAdapter& newParent) { + KJ_ASSERT(parent == nullptr); + parent = newParent; + auto paf = kj::newPromiseAndFulfiller(); + readyPromise = paf.promise.fork(); + readyFulfiller = kj::mv(paf.fulfiller); + } + + kj::Promise whenReady() { + return readyPromise.addBranch(); + } + + kj::Promise pumpToShorterPath(capnp::ByteStream::Client target, uint64_t limit) { + // If our probe succeeds in finding a KjToCapnpStreamAdapter somewhere down the stack, that + // will call this method to provide the shortened path. + + KJ_IF_MAYBE(currentParent, parent) { + parent = nullptr; + + auto self = kj::mv(currentParent->state.get>()); + currentParent->state = Ended(); // temporary, we'll set this properly below + KJ_ASSERT(self.get() == this); + + // Open a substream on the target stream. + auto req = target.getSubstreamRequest(); + req.setLimit(limit); + auto paf = kj::newPromiseAndFulfiller(); + req.setCallback(kj::heap(currentParent->factory, + kj::mv(self), kj::mv(paf.fulfiller), limit)); + + // Now we hook up the incoming stream adapter to point directly to this substream, yay. + currentParent->state = req.send().getSubstream(); + + // Let the original CapnpToKjStreamAdapter know that it's safe to handle incoming requests. + readyFulfiller->fulfill(); + + // It's now up to the SubstreamCallbackImpl to signal when the pump is done. + return kj::mv(paf.promise); + } else { + // We already completed a path-shortening. Probably SubstreamCallbackImpl::ended() was + // eventually called, meaning the substream was ended without redirecting back to us. So, + // we're at EOF. + return uint64_t(0); + } + } + + kj::Promise tryRead(void* buffer, size_t minBytes, size_t maxBytes) override { + // If this is called, it means the tryPumpFrom() in probeForShorterPath() eventually invoked + // code that tries to read manually from the source. We don't know what this code is doing + // exactly, but we do know for sure that the endpoint is not a KjToCapnpStreamAdapter, so + // we can't optimize. Instead, we pretend that we immediately hit EOF, ending the pump. This + // works because pumps do not propagate EOF -- the destination can still receive further + // writes and pumps. Basically our probing pump becomes a no-op, and then we revert to having + // each write() RPC directly call write() on the inner stream. + return size_t(0); + } + + kj::Promise pumpTo(kj::AsyncOutputStream& output, uint64_t amount) override { + // Call the stream's `tryPumpFrom()` as a way to discover where the data will eventually go, + // in hopes that we find we can shorten the path. + KJ_IF_MAYBE(promise, output.tryPumpFrom(*this, amount)) { + // tryPumpFrom() returned non-null. Either it called `tryRead()` or `pumpTo()` (see + // below), or it plans to do so in the future. + return kj::mv(*promise); + } else { + // There is no shorter path. As with tryRead(), we pretend we get immediate EOF. + return uint64_t(0); + } + } + + private: + kj::Maybe parent; + kj::Own inner; + kj::ForkedPromise readyPromise; + kj::Own> readyFulfiller; + kj::Promise task; + + friend class SubstreamCallbackImpl; + + kj::Promise probeForShorterPath() { + return kj::evalNow([&]() -> kj::Promise { + return pumpTo(*inner, kj::maxValue); + }).then([this](uint64_t actual) { + KJ_IF_MAYBE(currentParent, parent) { + KJ_IF_MAYBE(prober, currentParent->state.tryGet>()) { + // Either we didn't find any shorter path at all during probing and faked an EOF + // to get out of the probe (see comments in tryRead(), or we DID find a shorter path, + // completed a pumpTo() using a substream, and that substream redirected back to us, + // and THEN we couldn't find any further shorter paths for subsequent pumps. + + // HACK: If we overwrite the Probing state now, we'll delete ourselves and delete + // this task promise, which is an error... let the event loop do it later by + // detaching. + task.attach(kj::mv(*prober)).detach([](kj::Exception&&){}); + parent = nullptr; + + // OK, now we can change the parent state and signal it to proceed. + currentParent->state = kj::mv(inner); + readyFulfiller->fulfill(); + } + } + }).eagerlyEvaluate([this](kj::Exception&& exception) mutable { + // Something threw, so propagate the exception to break the parent. + readyFulfiller->reject(kj::mv(exception)); + }); + } + }; + +protected: + // --------------------------------------------------------------------------- + // implements ByteStream::Server RPC interface + + kj::Maybe> shortenPath() override { + return shortenPathImpl(); + } + kj::Promise shortenPathImpl() { + // Called by RPC implementation to find out if a shorter path presents itself. + KJ_SWITCH_ONEOF(state) { + KJ_CASE_ONEOF(prober, kj::Own) { + return prober->whenReady().then([this]() { + KJ_ASSERT(!state.is>()); + return shortenPathImpl(); + }); + } + KJ_CASE_ONEOF(kjStream, kj::Own) { + // No shortening possible. Pretend we never resolve so that calls continue to be routed + // to us forever. + return kj::NEVER_DONE; + } + KJ_CASE_ONEOF(capnpStream, capnp::ByteStream::Client) { + return Capability::Client(capnpStream); + } + KJ_CASE_ONEOF(b, Borrowed) { + KJ_FAIL_REQUIRE("concurrent streaming calls disallowed") { break; } + return kj::NEVER_DONE; + } + KJ_CASE_ONEOF(e, Ended) { + // No shortening possible. Pretend we never resolve so that calls continue to be routed + // to us forever. + return kj::NEVER_DONE; + } + } + KJ_UNREACHABLE; + } + + kj::Promise write(WriteContext context) override { + KJ_SWITCH_ONEOF(state) { + KJ_CASE_ONEOF(prober, kj::Own) { + return prober->whenReady().then([this, context]() mutable { + KJ_ASSERT(!state.is>()); + return write(context); + }); + } + KJ_CASE_ONEOF(kjStream, kj::Own) { + auto data = context.getParams().getBytes(); + return kjStream->write(data.begin(), data.size()); + } + KJ_CASE_ONEOF(capnpStream, capnp::ByteStream::Client) { + auto params = context.getParams(); + auto req = capnpStream.writeRequest(params.totalSize()); + req.setBytes(params.getBytes()); + return req.send(); + } + KJ_CASE_ONEOF(b, Borrowed) { + KJ_FAIL_REQUIRE("concurrent streaming calls disallowed") { break; } + return kj::READY_NOW; + } + KJ_CASE_ONEOF(e, Ended) { + KJ_FAIL_REQUIRE("already called end()") { break; } + return kj::READY_NOW; + } + } + KJ_UNREACHABLE; + } + + kj::Promise end(EndContext context) override { + KJ_SWITCH_ONEOF(state) { + KJ_CASE_ONEOF(prober, kj::Own) { + return prober->whenReady().then([this, context]() mutable { + KJ_ASSERT(!state.is>()); + return end(context); + }); + } + KJ_CASE_ONEOF(kjStream, kj::Own) { + // TODO(someday): When KJ adds a proper .end() call, use it here. For now, we must + // drop the stream to close it. + state = Ended(); + return kj::READY_NOW; + } + KJ_CASE_ONEOF(capnpStream, capnp::ByteStream::Client) { + auto params = context.getParams(); + auto req = capnpStream.endRequest(params.totalSize()); + return context.tailCall(kj::mv(req)); + } + KJ_CASE_ONEOF(b, Borrowed) { + KJ_FAIL_REQUIRE("concurrent streaming calls disallowed") { break; } + return kj::READY_NOW; + } + KJ_CASE_ONEOF(e, Ended) { + KJ_FAIL_REQUIRE("already called end()") { break; } + return kj::READY_NOW; + } + } + KJ_UNREACHABLE; + } + + kj::Promise getSubstream(GetSubstreamContext context) override { + KJ_SWITCH_ONEOF(state) { + KJ_CASE_ONEOF(prober, kj::Own) { + return prober->whenReady().then([this, context]() mutable { + KJ_ASSERT(!state.is>()); + return getSubstream(context); + }); + } + KJ_CASE_ONEOF(kjStream, kj::Own) { + auto params = context.getParams(); + auto callback = params.getCallback(); + uint64_t limit = params.getLimit(); + context.releaseParams(); + + auto results = context.initResults(MessageSize {2, 1}); + results.setSubstream(factory.streamSet.add(kj::heap( + factory, *this, thisCap(), *kjStream, kj::mv(callback), kj::mv(limit)))); + state = Borrowed { kj::mv(kjStream) }; + return kj::READY_NOW; + } + KJ_CASE_ONEOF(capnpStream, capnp::ByteStream::Client) { + auto params = context.getParams(); + auto req = capnpStream.getSubstreamRequest(params.totalSize()); + req.setCallback(params.getCallback()); + req.setLimit(params.getLimit()); + return context.tailCall(kj::mv(req)); + } + KJ_CASE_ONEOF(b, Borrowed) { + KJ_FAIL_REQUIRE("concurrent streaming calls disallowed") { break; } + return kj::READY_NOW; + } + KJ_CASE_ONEOF(e, Ended) { + KJ_FAIL_REQUIRE("already called end()") { break; } + return kj::READY_NOW; + } + } + KJ_UNREACHABLE; + } + +private: + ByteStreamFactory& factory; + + struct Borrowed { kj::Own stream; }; + struct Ended {}; + + kj::OneOf, kj::Own, + capnp::ByteStream::Client, Borrowed, Ended> state; + + class SubstreamCallbackImpl final: public capnp::ByteStream::SubstreamCallback::Server { + public: + SubstreamCallbackImpl(ByteStreamFactory& factory, + kj::Own pathProber, + kj::Own> originalPumpfulfiller, + uint64_t originalPumpLimit) + : factory(factory), + pathProber(kj::mv(pathProber)), + originalPumpfulfiller(kj::mv(originalPumpfulfiller)), + originalPumpLimit(originalPumpLimit) {} + + ~SubstreamCallbackImpl() noexcept(false) { + if (!done) { + originalPumpfulfiller->reject(KJ_EXCEPTION(DISCONNECTED, + "stream disconnected because SubstreamCallbackImpl was never called back")); + } + } + + kj::Promise ended(EndedContext context) override { + KJ_REQUIRE(!done); + uint64_t actual = context.getParams().getByteCount(); + KJ_REQUIRE(actual <= originalPumpLimit); + + done = true; + + // EOF before pump completed. Signal a short pump. + originalPumpfulfiller->fulfill(context.getParams().getByteCount()); + + // Give the original pump task a chance to finish up. + return pathProber->task.attach(kj::mv(pathProber)); + } + + kj::Promise reachedLimit(ReachedLimitContext context) override { + KJ_REQUIRE(!done); + done = true; + + // Allow the shortened stream to redirect back to our original underlying stream. + auto results = context.getResults(capnp::MessageSize { 4, 1 }); + results.setNext(factory.streamSet.add( + kj::heap(factory, kj::mv(pathProber)))); + + // The full pump completed. Note that it's important that we fulfill this after the + // PathProber has been attached to the new CapnpToKjStreamAdapter, which will have happened + // in CapnpToKjStreamAdapter's constructor, which calls pathProber->setNewParent(). + originalPumpfulfiller->fulfill(kj::cp(originalPumpLimit)); + + return kj::READY_NOW; + } + + private: + ByteStreamFactory& factory; + kj::Own pathProber; + kj::Own> originalPumpfulfiller; + uint64_t originalPumpLimit; + bool done = false; + }; +}; + +// ======================================================================================= + +class ByteStreamFactory::KjToCapnpStreamAdapter final: public kj::AsyncOutputStream { +public: + KjToCapnpStreamAdapter(ByteStreamFactory& factory, capnp::ByteStream::Client innerParam) + : factory(factory), + inner(kj::mv(innerParam)), + findShorterPathTask(findShorterPath(inner).fork()) {} + + ~KjToCapnpStreamAdapter() noexcept(false) { + // HACK: KJ streams are implicitly ended on destruction, but the RPC stream needs a call. We + // use a detached promise for now, which is probably OK since capabilities are refcounted and + // asynchronously destroyed anyway. + // TODO(cleanup): Fix this when KJ streads add an explicit end() method. + KJ_IF_MAYBE(o, optimized) { + o->directEnd(); + } else { + inner.endRequest(MessageSize {2, 0}).send().detach([](kj::Exception&&){}); + } + } + + kj::Promise write(const void* buffer, size_t size) override { + KJ_SWITCH_ONEOF(getShortestPath()) { + KJ_CASE_ONEOF(promise, kj::Promise) { + return promise.then([this,buffer,size]() { + return write(buffer, size); + }); + } + KJ_CASE_ONEOF(kjStream, StreamServerBase::BorrowedStream) { + auto limit = kj::min(kjStream.limit, MAX_BYTES_PER_WRITE); + if (size <= limit) { + auto promise = kjStream.stream.write(buffer, size); + return promise.then([kjStream,size]() mutable { + kjStream.lender.returnStream(size); + }); + } else { + auto promise = kjStream.stream.write(buffer, limit); + return promise.then([this,kjStream,buffer,size,limit]() mutable { + kjStream.lender.returnStream(limit); + return write(reinterpret_cast(buffer) + limit, + size - limit); + }); + } + } + KJ_CASE_ONEOF(capnpStream, capnp::ByteStream::Client*) { + if (size <= MAX_BYTES_PER_WRITE) { + auto req = capnpStream->writeRequest(MessageSize { 8 + size / sizeof(word), 0 }); + req.setBytes(kj::arrayPtr(reinterpret_cast(buffer), size)); + return req.send(); + } else { + auto req = capnpStream->writeRequest( + MessageSize { 8 + MAX_BYTES_PER_WRITE / sizeof(word), 0 }); + req.setBytes(kj::arrayPtr(reinterpret_cast(buffer), MAX_BYTES_PER_WRITE)); + return req.send().then([this,buffer,size]() mutable { + return write(reinterpret_cast(buffer) + MAX_BYTES_PER_WRITE, + size - MAX_BYTES_PER_WRITE); + }); + } + } + } + KJ_UNREACHABLE; + } + + kj::Promise write(kj::ArrayPtr> pieces) override { + KJ_SWITCH_ONEOF(getShortestPath()) { + KJ_CASE_ONEOF(promise, kj::Promise) { + return promise.then([this,pieces]() { + return write(pieces); + }); + } + KJ_CASE_ONEOF(kjStream, StreamServerBase::BorrowedStream) { + size_t size = 0; + for (auto& piece: pieces) { size += piece.size(); } + auto limit = kj::min(kjStream.limit, MAX_BYTES_PER_WRITE); + if (size <= limit) { + auto promise = kjStream.stream.write(pieces); + return promise.then([kjStream,size]() mutable { + kjStream.lender.returnStream(size); + }); + } else { + // ughhhhhhhhhh, we need to split the pieces. + return splitAndWrite(pieces, kjStream.limit, + [kjStream,limit](kj::ArrayPtr> pieces) mutable { + return kjStream.stream.write(pieces).then([kjStream,limit]() mutable { + kjStream.lender.returnStream(limit); + }); + }); + } + } + KJ_CASE_ONEOF(capnpStream, capnp::ByteStream::Client*) { + auto writePieces = [capnpStream](kj::ArrayPtr> pieces) { + size_t size = 0; + for (auto& piece: pieces) size += piece.size(); + auto req = capnpStream->writeRequest(MessageSize { 8 + size / sizeof(word), 0 }); + auto out = req.initBytes(size); + byte* ptr = out.begin(); + for (auto& piece: pieces) { + memcpy(ptr, piece.begin(), piece.size()); + ptr += piece.size(); + } + KJ_ASSERT(ptr == out.end()); + return req.send(); + }; + + size_t size = 0; + for (auto& piece: pieces) size += piece.size(); + if (size <= MAX_BYTES_PER_WRITE) { + return writePieces(pieces); + } else { + // ughhhhhhhhhh, we need to split the pieces. + return splitAndWrite(pieces, MAX_BYTES_PER_WRITE, writePieces); + } + } + } + KJ_UNREACHABLE; + } + + kj::Maybe> tryPumpFrom( + kj::AsyncInputStream& input, uint64_t amount = kj::maxValue) override { + KJ_IF_MAYBE(rpc, kj::dynamicDowncastIfAvailable(input)) { + // Oh interesting, it turns we're hosting an incoming ByteStream which is pumping to this + // outgoing ByteStream. We can let the Cap'n Proto RPC layer know that it can shorten the + // path from one to the other. + return rpc->pumpToShorterPath(inner, amount); + } else { + return pumpLoop(input, 0, amount); + } + } + + kj::Promise whenWriteDisconnected() override { + return findShorterPathTask.addBranch(); + } + +private: + ByteStreamFactory& factory; + capnp::ByteStream::Client inner; + kj::Maybe optimized; + + kj::ForkedPromise findShorterPathTask; + // This serves two purposes: + // 1. Waits for the capability to resolve (if it is a promise), and then shortens the path if + // possible. + // 2. Implements whenWriteDisconnected(). + + kj::Promise findShorterPath(capnp::ByteStream::Client& capnpClient) { + // If the capnp stream turns out to resolve back to this process, shorten the path. + // Also, implement whenWriteDisconnected() based on this. + return factory.streamSet.getLocalServer(capnpClient) + .then([this](kj::Maybe server) -> kj::Promise { + KJ_IF_MAYBE(s, server) { + // Yay, we discovered that the ByteStream actually points back to a local KJ stream. + // We can use this to shorten the path by skipping the RPC machinery. + return findShorterPath(kj::downcast(*s)); + } else { + // The capability is fully-resolved. This suggests that the remote implementation is + // NOT a CapnpToKjStreamAdapter at all, because CapnpToKjStreamAdapter is designed to + // always look like a promise. It's some other implementation that doesn't present + // itself as a promise. We have no way to detect when it is disconnected. + return kj::NEVER_DONE; + } + }, [](kj::Exception&& e) -> kj::Promise { + // getLocalServer() thrown when the capability is a promise cap that rejects. We can + // use this to implement whenWriteDisconnected(). + // + // (Note that because this exception handler is passed to the .then(), it does NOT catch + // eoxceptions thrown by the success handler immediately above it. This handler will ONLY + // catch exceptions from getLocalServer() itself.) + return kj::READY_NOW; + }); + } + + kj::Promise findShorterPath(StreamServerBase& capnpServer) { + // We found a shorter path back to this process. Record it. + optimized = capnpServer; + + KJ_SWITCH_ONEOF(capnpServer.getShortestPath()) { + KJ_CASE_ONEOF(promise, kj::Promise) { + return promise.then([this,&capnpServer]() { + return findShorterPath(capnpServer); + }); + } + KJ_CASE_ONEOF(kjStream, StreamServerBase::BorrowedStream) { + // The ByteStream::Server wraps a regular KJ stream that does not wrap another capnp + // stream. + if (kjStream.limit < (uint64_t)kj::maxValue / 2) { + // But it isn't wrapping that stream forever. Eventually it plans to redirect back to + // some other stream. So, let's wait for that, and possibly shorten again. + kjStream.lender.returnStream(0); + return KJ_ASSERT_NONNULL(capnpServer.shortenPath()) + .then([this, &capnpServer](auto&&) { + return findShorterPath(capnpServer); + }); + } else { + // This KJ stream is (effectively) the permanent endpoint. We can't get any shorter + // from here. All we want to do now is watch for disconnect. + auto promise = kjStream.stream.whenWriteDisconnected(); + kjStream.lender.returnStream(0); + return promise; + } + } + KJ_CASE_ONEOF(capnpStream, capnp::ByteStream::Client*) { + return findShorterPath(*capnpStream); + } + } + KJ_UNREACHABLE; + } + + StreamServerBase::ShortestPath getShortestPath() { + KJ_IF_MAYBE(o, optimized) { + return o->getShortestPath(); + } else { + return &inner; + } + } + + kj::Promise pumpLoop(kj::AsyncInputStream& input, + uint64_t completed, uint64_t remaining) { + if (remaining == 0) return completed; + + KJ_SWITCH_ONEOF(getShortestPath()) { + KJ_CASE_ONEOF(promise, kj::Promise) { + return promise.then([this,&input,completed,remaining]() { + return pumpLoop(input,completed,remaining); + }); + } + KJ_CASE_ONEOF(kjStream, StreamServerBase::BorrowedStream) { + // Oh hell yes, this capability actually points back to a stream in our own thread. We can + // stop sending RPCs and just pump directly. + + if (remaining <= kjStream.limit) { + return input.pumpTo(kjStream.stream, remaining) + .then([kjStream,completed](uint64_t actual) { + kjStream.lender.returnStream(actual); + return actual + completed; + }); + } else { + auto promise = input.pumpTo(kjStream.stream, kjStream.limit); + return promise.then([this,&input,completed,remaining,kjStream] + (uint64_t actual) mutable -> kj::Promise { + kjStream.lender.returnStream(actual); + if (actual < kjStream.limit) { + // EOF reached. + return completed + actual; + } else { + return pumpLoop(input, completed + actual, remaining - actual); + } + }); + } + } + KJ_CASE_ONEOF(capnpStream, capnp::ByteStream::Client*) { + // Pumping from some other kind of steram. Optimize the pump by reading from the input + // directly into outgoing RPC messages. + size_t size = kj::min(remaining, 8192); + auto req = capnpStream->writeRequest(MessageSize { 8 + size / sizeof(word) }); + + auto orphanage = Orphanage::getForMessageContaining( + capnp::ByteStream::WriteParams::Builder(req)); + + auto buffer = orphanage.newOrphan(size); + + struct WriteRequestAndBuffer { + // The order of construction/destruction of lambda captures is unspecified, but we care + // about ordering between these two things that we want to capture, so... we need a + // struct. + StreamingRequest request; + Orphan buffer; // points into `request`... + }; + + WriteRequestAndBuffer wrab = { kj::mv(req), kj::mv(buffer) }; + + return input.tryRead(wrab.buffer.get().begin(), 1, size) + .then([this, &input, completed, remaining, size, wrab = kj::mv(wrab)] + (size_t actual) mutable -> kj::Promise { + if (actual == 0) { + return completed; + } if (actual < size) { + wrab.buffer.truncate(actual); + } + + wrab.request.adoptBytes(kj::mv(wrab.buffer)); + return wrab.request.send() + .then([this, &input, completed, remaining, actual]() { + return pumpLoop(input, completed + actual, remaining - actual); + }); + }); + } + } + KJ_UNREACHABLE; + } + + template + kj::Promise splitAndWrite(kj::ArrayPtr> pieces, + size_t limit, WritePieces&& writeFirstPieces) { + size_t splitByte = limit; + size_t splitPiece = 0; + while (pieces[splitPiece].size() <= splitByte) { + splitByte -= pieces[splitPiece].size(); + ++splitPiece; + } + + if (splitByte == 0) { + // Oh thank god, the split is between two pieces. + auto rest = pieces.slice(splitPiece, pieces.size()); + return writeFirstPieces(pieces.slice(0, splitPiece)) + .then([this,rest]() mutable { + return write(rest); + }); + } else { + // FUUUUUUUU---- we need to split one of the pieces in two. + auto left = kj::heapArray>(splitPiece + 1); + auto right = kj::heapArray>(pieces.size() - splitPiece); + for (auto i: kj::zeroTo(splitPiece)) { + left[i] = pieces[i]; + } + for (auto i: kj::zeroTo(right.size())) { + right[i] = pieces[splitPiece + i]; + } + left.back() = pieces[splitPiece].slice(0, splitByte); + right.front() = pieces[splitPiece].slice(splitByte, pieces[splitPiece].size()); + + return writeFirstPieces(left).attach(kj::mv(left)) + .then([this,right=kj::mv(right)]() mutable { + return write(right).attach(kj::mv(right)); + }); + } + } +}; + +// ======================================================================================= + +capnp::ByteStream::Client ByteStreamFactory::kjToCapnp(kj::Own kjStream) { + return streamSet.add(kj::heap(*this, kj::mv(kjStream))); +} + +kj::Own ByteStreamFactory::capnpToKj(capnp::ByteStream::Client capnpStream) { + return kj::heap(*this, kj::mv(capnpStream)); +} + +} // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/byte-stream.capnp b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/byte-stream.capnp new file mode 100644 index 00000000000..b98d85e9fb2 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/byte-stream.capnp @@ -0,0 +1,39 @@ +@0x8f5d14e1c273738d; + +$import "/capnp/c++.capnp".namespace("capnp"); + +interface ByteStream { + write @0 (bytes :Data) -> stream; + # Write a chunk. + + end @1 (); + # Signals clean EOF. (If the ByteStream is dropped without calling this, then the stream was + # prematurely canceled and so the body should not be considered complete.) + + getSubstream @2 (callback :SubstreamCallback, + limit :UInt64 = 0xffffffffffffffff) -> (substream :ByteStream); + # This method is used to implement path shortening optimization. It is designed in particular + # with KJ streams' pumpTo() in mind. + # + # getSubstream() returns a new stream object that can be used to write to the same destination + # as this stream. The substream will operate until it has received `limit` bytes, or its `end()` + # method has been called, whichever occurs first. At that time, it invokes one of the methods of + # `callback` based on the termination condition. + # + # While a substream is active, it is an error to call write() on the original stream. Doing so + # may throw an exception or may arbitrarily interleave bytes with the substream's writes. + + interface SubstreamCallback { + ended @0 (byteCount :UInt64); + # `end()` was called on the substream after writing `byteCount` bytes. The `end()` call was + # NOT forwarded to the underlying stream, which remains open. + + reachedLimit @1 () -> (next :ByteStream); + # The number of bytes specified by the `limit` parameter of `getSubstream()` was reached. + # The substream will "resolve itself" to `next`, so that all future calls to the substream + # are forwarded to `next`. + # + # If the `write()` call which reached the limit included bytes past the limit, then the first + # `write()` call to `next` will be for those leftover bytes. + } +} diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/byte-stream.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/byte-stream.h new file mode 100644 index 00000000000..545e6e592b7 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/byte-stream.h @@ -0,0 +1,47 @@ +// Copyright (c) 2019 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#pragma once +// Bridges from KJ streams to Cap'n Proto ByteStream RPC protocol. + +#include +#include + +namespace capnp { + +class ByteStreamFactory { + // In order to allow path-shortening through KJ, a common factory must be used for converting + // between RPC ByteStreams and KJ streams. + +public: + capnp::ByteStream::Client kjToCapnp(kj::Own kjStream); + kj::Own capnpToKj(capnp::ByteStream::Client capnpStream); + +private: + CapabilityServerSet streamSet; + + class StreamServerBase; + class SubstreamImpl; + class CapnpToKjStreamAdapter; + class KjToCapnpStreamAdapter; +}; + +} // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/http-over-capnp-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/http-over-capnp-test.c++ new file mode 100644 index 00000000000..771e25a9e0b --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/http-over-capnp-test.c++ @@ -0,0 +1,648 @@ +// Copyright (c) 2019 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "http-over-capnp.h" +#include + +namespace capnp { +namespace { + +KJ_TEST("KJ and RPC HTTP method enums match") { +#define EXPECT_MATCH(METHOD) \ + KJ_EXPECT(static_cast(kj::HttpMethod::METHOD) == \ + static_cast(capnp::HttpMethod::METHOD)); + + KJ_HTTP_FOR_EACH_METHOD(EXPECT_MATCH); +#undef EXPECT_MATCH +} + +// ======================================================================================= + +kj::Promise expectRead(kj::AsyncInputStream& in, kj::StringPtr expected) { + if (expected.size() == 0) return kj::READY_NOW; + + auto buffer = kj::heapArray(expected.size()); + + auto promise = in.tryRead(buffer.begin(), 1, buffer.size()); + return promise.then(kj::mvCapture(buffer, [&in,expected](kj::Array buffer, size_t amount) { + if (amount == 0) { + KJ_FAIL_ASSERT("expected data never sent", expected); + } + + auto actual = buffer.slice(0, amount); + if (memcmp(actual.begin(), expected.begin(), actual.size()) != 0) { + KJ_FAIL_ASSERT("data from stream doesn't match expected", expected, actual); + } + + return expectRead(in, expected.slice(amount)); + })); +} + +enum Direction { + CLIENT_TO_SERVER, + SERVER_TO_CLIENT +}; + +struct TestStep { + Direction direction; + kj::StringPtr send; + kj::StringPtr receive; + + constexpr TestStep(Direction direction, kj::StringPtr send, kj::StringPtr receive) + : direction(direction), send(send), receive(receive) {} + constexpr TestStep(Direction direction, kj::StringPtr data) + : direction(direction), send(data), receive(data) {} +}; + +constexpr TestStep TEST_STEPS[] = { + // Test basic request. + { + CLIENT_TO_SERVER, + + "GET / HTTP/1.1\r\n" + "Host: example.com\r\n" + "\r\n"_kj, + }, + { + SERVER_TO_CLIENT, + + "HTTP/1.1 200 OK\r\n" + "Content-Length: 3\r\n" + "\r\n" + "foo"_kj + }, + + // Try PUT, vary path, vary status + { + CLIENT_TO_SERVER, + + "PUT /foo/bar HTTP/1.1\r\n" + "Content-Length: 5\r\n" + "Host: example.com\r\n" + "\r\n" + "corge"_kj, + }, + { + SERVER_TO_CLIENT, + + "HTTP/1.1 403 Unauthorized\r\n" + "Content-Length: 4\r\n" + "\r\n" + "nope"_kj + }, + + // HEAD request + { + CLIENT_TO_SERVER, + + "HEAD /foo/bar HTTP/1.1\r\n" + "Host: example.com\r\n" + "\r\n"_kj, + }, + { + SERVER_TO_CLIENT, + + "HTTP/1.1 200 OK\r\n" + "Content-Length: 4\r\n" + "\r\n"_kj + }, + + // Empty-body response + { + CLIENT_TO_SERVER, + + "GET /foo/bar HTTP/1.1\r\n" + "Host: example.com\r\n" + "\r\n"_kj, + }, + { + SERVER_TO_CLIENT, + + "HTTP/1.1 304 Not Modified\r\n" + "Server: foo\r\n" + "\r\n"_kj + }, + + // Chonky body + { + CLIENT_TO_SERVER, + + "POST / HTTP/1.1\r\n" + "Transfer-Encoding: chunked\r\n" + "Host: example.com\r\n" + "\r\n" + "3\r\n" + "foo\r\n" + "5\r\n" + "corge\r\n" + "0\r\n" + "\r\n"_kj, + }, + { + SERVER_TO_CLIENT, + + "HTTP/1.1 200 OK\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n" + "6\r\n" + "barbaz\r\n" + "6\r\n" + "garply\r\n" + "0\r\n" + "\r\n"_kj + }, + + // Streaming + { + CLIENT_TO_SERVER, + + "POST / HTTP/1.1\r\n" + "Content-Length: 9\r\n" + "Host: example.com\r\n" + "\r\n"_kj, + }, + { + CLIENT_TO_SERVER, + + "foo"_kj, + }, + { + CLIENT_TO_SERVER, + + "bar"_kj, + }, + { + CLIENT_TO_SERVER, + + "baz"_kj, + }, + { + SERVER_TO_CLIENT, + + "HTTP/1.1 200 OK\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n"_kj, + }, + { + SERVER_TO_CLIENT, + + "6\r\n" + "barbaz\r\n"_kj, + }, + { + SERVER_TO_CLIENT, + + "6\r\n" + "garply\r\n"_kj, + }, + { + SERVER_TO_CLIENT, + + "0\r\n" + "\r\n"_kj + }, + + // Bidirectional. + { + CLIENT_TO_SERVER, + + "POST / HTTP/1.1\r\n" + "Content-Length: 9\r\n" + "Host: example.com\r\n" + "\r\n"_kj, + }, + { + SERVER_TO_CLIENT, + + "HTTP/1.1 200 OK\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n"_kj, + }, + { + CLIENT_TO_SERVER, + + "foo"_kj, + }, + { + SERVER_TO_CLIENT, + + "6\r\n" + "barbaz\r\n"_kj, + }, + { + CLIENT_TO_SERVER, + + "bar"_kj, + }, + { + SERVER_TO_CLIENT, + + "6\r\n" + "garply\r\n"_kj, + }, + { + CLIENT_TO_SERVER, + + "baz"_kj, + }, + { + SERVER_TO_CLIENT, + + "0\r\n" + "\r\n"_kj + }, + + // Test headers being re-ordered by KJ. This isn't necessary behavior, but it does prove that + // we're not testing a pure streaming pass-through... + { + CLIENT_TO_SERVER, + + "GET / HTTP/1.1\r\n" + "Host: example.com\r\n" + "Accept: text/html\r\n" + "Foo-Header: 123\r\n" + "User-Agent: kj\r\n" + "Accept-Language: en\r\n" + "\r\n"_kj, + + "GET / HTTP/1.1\r\n" + "Host: example.com\r\n" + "Accept-Language: en\r\n" + "Accept: text/html\r\n" + "User-Agent: kj\r\n" + "Foo-Header: 123\r\n" + "\r\n"_kj + }, + { + SERVER_TO_CLIENT, + + "HTTP/1.1 200 OK\r\n" + "Server: kj\r\n" + "Bar: 321\r\n" + "Content-Length: 3\r\n" + "\r\n" + "foo"_kj, + + "HTTP/1.1 200 OK\r\n" + "Content-Length: 3\r\n" + "Server: kj\r\n" + "Bar: 321\r\n" + "\r\n" + "foo"_kj + }, + + // We finish up a request with no response, to test cancellation. + { + CLIENT_TO_SERVER, + + "GET / HTTP/1.1\r\n" + "Host: example.com\r\n" + "\r\n"_kj, + }, +}; + +class OneConnectNetworkAddress final: public kj::NetworkAddress { +public: + OneConnectNetworkAddress(kj::Own stream) + : stream(kj::mv(stream)) {} + + kj::Promise> connect() override { + auto result = KJ_ASSERT_NONNULL(kj::mv(stream)); + stream = nullptr; + return kj::mv(result); + } + + kj::Own listen() override { KJ_UNIMPLEMENTED("test"); } + kj::Own clone() override { KJ_UNIMPLEMENTED("test"); } + kj::String toString() override { KJ_UNIMPLEMENTED("test"); } + +private: + kj::Maybe> stream; +}; + +void runEndToEndTests(kj::Timer& timer, kj::HttpHeaderTable& headerTable, + HttpOverCapnpFactory& clientFactory, HttpOverCapnpFactory& serverFactory, + kj::WaitScope& waitScope) { + auto clientPipe = kj::newTwoWayPipe(); + auto serverPipe = kj::newTwoWayPipe(); + + OneConnectNetworkAddress oneConnectAddr(kj::mv(serverPipe.ends[0])); + + auto backHttp = kj::newHttpClient(timer, headerTable, oneConnectAddr); + auto backCapnp = serverFactory.kjToCapnp(kj::newHttpService(*backHttp)); + auto frontCapnp = clientFactory.capnpToKj(backCapnp); + kj::HttpServer frontKj(timer, headerTable, *frontCapnp); + auto listenTask = frontKj.listenHttp(kj::mv(clientPipe.ends[1])) + .eagerlyEvaluate([](kj::Exception&& e) { KJ_LOG(ERROR, e); }); + + for (auto& step: TEST_STEPS) { + KJ_CONTEXT(step.send); + + kj::AsyncOutputStream* out; + kj::AsyncInputStream* in; + + switch (step.direction) { + case CLIENT_TO_SERVER: + out = clientPipe.ends[0]; + in = serverPipe.ends[1]; + break; + case SERVER_TO_CLIENT: + out = serverPipe.ends[1]; + in = clientPipe.ends[0]; + break; + } + + auto writePromise = out->write(step.send.begin(), step.send.size()); + auto readPromise = expectRead(*in, step.receive); + if (!writePromise.poll(waitScope)) { + if (readPromise.poll(waitScope)) { + readPromise.wait(waitScope); + KJ_FAIL_ASSERT("write hung, read worked fine"); + } else { + KJ_FAIL_ASSERT("write and read both hung"); + } + } + + writePromise.wait(waitScope); + KJ_ASSERT(readPromise.poll(waitScope), "read hung"); + readPromise.wait(waitScope); + } + + // The last test message was a request with no response. If we now close the client end, this + // should propagate all the way through to close the server end! + clientPipe.ends[0] = nullptr; + auto lastRead = serverPipe.ends[1]->readAllText(); + KJ_ASSERT(lastRead.poll(waitScope), "last read hung"); + KJ_EXPECT(lastRead.wait(waitScope) == nullptr); +} + +KJ_TEST("HTTP-over-Cap'n-Proto E2E, no path shortening") { + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + kj::TimerImpl timer(kj::origin()); + + ByteStreamFactory streamFactory1; + ByteStreamFactory streamFactory2; + kj::HttpHeaderTable::Builder tableBuilder; + HttpOverCapnpFactory factory1(streamFactory1, tableBuilder); + HttpOverCapnpFactory factory2(streamFactory2, tableBuilder); + auto headerTable = tableBuilder.build(); + + runEndToEndTests(timer, *headerTable, factory1, factory2, waitScope); +} + +KJ_TEST("HTTP-over-Cap'n-Proto E2E, with path shortening") { + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + kj::TimerImpl timer(kj::origin()); + + ByteStreamFactory streamFactory; + kj::HttpHeaderTable::Builder tableBuilder; + HttpOverCapnpFactory factory(streamFactory, tableBuilder); + auto headerTable = tableBuilder.build(); + + runEndToEndTests(timer, *headerTable, factory, factory, waitScope); +} + +KJ_TEST("HTTP-over-Cap'n-Proto 205 bug with HttpClientAdapter") { + // Test that a 205 with a hanging body doesn't prevent headers from being delivered. (This was + // a bug at one point. See, 205 responses are supposed to have empty bodies. But they must + // explicitly indicate an empty body. http-over-capnp, though, *assumed* an empty body when it + // saw a 205. But, on the client side, when HttpClientAdapter sees an empty body, it blocks + // delivery of the *headers* until the service promise resolves, in order to avoid prematurely + // cancelling the service. But on the server side, the service method is left hanging because + // it's waiting for the 205 to actually produce its empty body. If that didn't make any sense, + // consider yourself lucky.) + + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + kj::TimerImpl timer(kj::origin()); + + ByteStreamFactory streamFactory; + kj::HttpHeaderTable::Builder tableBuilder; + HttpOverCapnpFactory factory(streamFactory, tableBuilder); + auto headerTable = tableBuilder.build(); + + auto pipe = kj::newTwoWayPipe(); + + OneConnectNetworkAddress oneConnectAddr(kj::mv(pipe.ends[0])); + + auto backHttp = kj::newHttpClient(timer, *headerTable, oneConnectAddr); + auto backCapnp = factory.kjToCapnp(kj::newHttpService(*backHttp)); + auto frontCapnp = factory.capnpToKj(backCapnp); + + auto frontClient = kj::newHttpClient(*frontCapnp); + + auto req = frontClient->request(kj::HttpMethod::GET, "/", kj::HttpHeaders(*headerTable)); + + { + auto readPromise = expectRead(*pipe.ends[1], "GET / HTTP/1.1\r\n\r\n"); + KJ_ASSERT(readPromise.poll(waitScope)); + readPromise.wait(waitScope); + } + + KJ_EXPECT(!req.response.poll(waitScope)); + + { + // A 205 response with no content-length or transfer-encoding is terminated by EOF (but also + // the body is required to be empty). We don't send the EOF yet, just the response line and + // empty headers. + kj::StringPtr resp = "HTTP/1.1 205 Reset Content\r\n\r\n"; + pipe.ends[1]->write(resp.begin(), resp.size()).wait(waitScope); + } + + // On the client end, we should get a response now! + KJ_ASSERT(req.response.poll(waitScope)); + + auto resp = req.response.wait(waitScope); + KJ_EXPECT(resp.statusCode == 205); + + // But the body is still blocked. + auto promise = resp.body->readAllText(); + KJ_EXPECT(!promise.poll(waitScope)); + + // OK now send the EOF it's waiting for. + pipe.ends[1]->shutdownWrite(); + + // And now the body is unblocked. + KJ_ASSERT(promise.poll(waitScope)); + KJ_EXPECT(promise.wait(waitScope) == ""); +} + +// ======================================================================================= + +class WebSocketAccepter final: public kj::HttpService { +public: + WebSocketAccepter(kj::HttpHeaderTable& headerTable, + kj::Own>> fulfiller, + kj::Promise done) + : headerTable(headerTable), fulfiller(kj::mv(fulfiller)), done(kj::mv(done)) {} + + kj::Promise request( + kj::HttpMethod method, kj::StringPtr url, const kj::HttpHeaders& headers, + kj::AsyncInputStream& requestBody, Response& response) { + kj::HttpHeaders respHeaders(headerTable); + respHeaders.add("X-Foo", "bar"); + fulfiller->fulfill(response.acceptWebSocket(respHeaders)); + return kj::mv(done); + } + +private: + kj::HttpHeaderTable& headerTable; + kj::Own>> fulfiller; + kj::Promise done; +}; + +void runWebSocketTests(kj::HttpHeaderTable& headerTable, + HttpOverCapnpFactory& clientFactory, HttpOverCapnpFactory& serverFactory, + kj::WaitScope& waitScope) { + // We take a different approach here, because writing out raw WebSocket frames is a pain. + // It's easier to test WebSockets at the KJ API level. + + auto wsPaf = kj::newPromiseAndFulfiller>(); + auto donePaf = kj::newPromiseAndFulfiller(); + + auto back = serverFactory.kjToCapnp(kj::heap( + headerTable, kj::mv(wsPaf.fulfiller), kj::mv(donePaf.promise))); + auto front = clientFactory.capnpToKj(back); + auto client = kj::newHttpClient(*front); + + auto resp = client->openWebSocket("/ws", kj::HttpHeaders(headerTable)).wait(waitScope); + KJ_ASSERT(resp.webSocketOrBody.is>()); + + auto clientWs = kj::mv(resp.webSocketOrBody.get>()); + auto serverWs = wsPaf.promise.wait(waitScope); + + { + auto promise = clientWs->send("foo"_kj); + auto message = serverWs->receive().wait(waitScope); + promise.wait(waitScope); + KJ_ASSERT(message.is()); + KJ_EXPECT(message.get() == "foo"); + } + + { + auto promise = serverWs->send("bar"_kj.asBytes()); + auto message = clientWs->receive().wait(waitScope); + promise.wait(waitScope); + KJ_ASSERT(message.is>()); + KJ_EXPECT(kj::str(message.get>().asChars()) == "bar"); + } + + { + auto promise = clientWs->close(1234, "baz"_kj); + auto message = serverWs->receive().wait(waitScope); + promise.wait(waitScope); + KJ_ASSERT(message.is()); + KJ_EXPECT(message.get().code == 1234); + KJ_EXPECT(message.get().reason == "baz"); + } + + { + auto promise = serverWs->disconnect(); + auto receivePromise = clientWs->receive(); + KJ_EXPECT(receivePromise.poll(waitScope)); + KJ_EXPECT_THROW(DISCONNECTED, receivePromise.wait(waitScope)); + promise.wait(waitScope); + } +} + +KJ_TEST("HTTP-over-Cap'n Proto WebSocket, no path shortening") { + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + + ByteStreamFactory streamFactory1; + ByteStreamFactory streamFactory2; + kj::HttpHeaderTable::Builder tableBuilder; + HttpOverCapnpFactory factory1(streamFactory1, tableBuilder); + HttpOverCapnpFactory factory2(streamFactory2, tableBuilder); + auto headerTable = tableBuilder.build(); + + runWebSocketTests(*headerTable, factory1, factory2, waitScope); +} + +KJ_TEST("HTTP-over-Cap'n Proto WebSocket, with path shortening") { + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + + ByteStreamFactory streamFactory; + kj::HttpHeaderTable::Builder tableBuilder; + HttpOverCapnpFactory factory(streamFactory, tableBuilder); + auto headerTable = tableBuilder.build(); + + runWebSocketTests(*headerTable, factory, factory, waitScope); +} + +// ======================================================================================= +// bug fixes + +class HangingHttpService final: public kj::HttpService { +public: + HangingHttpService(bool& called, bool& destroyed) + : called(called), destroyed(destroyed) {} + ~HangingHttpService() noexcept(false) { + destroyed = true; + } + + kj::Promise request( + kj::HttpMethod method, kj::StringPtr url, const kj::HttpHeaders& headers, + kj::AsyncInputStream& requestBody, Response& response) { + called = true; + return kj::NEVER_DONE; + } + +private: + bool& called; + bool& destroyed; +}; + +KJ_TEST("HttpService isn't destroyed while call outstanding") { + kj::EventLoop eventLoop; + kj::WaitScope waitScope(eventLoop); + + ByteStreamFactory streamFactory; + kj::HttpHeaderTable::Builder tableBuilder; + HttpOverCapnpFactory factory(streamFactory, tableBuilder); + auto headerTable = tableBuilder.build(); + + bool called = false; + bool destroyed = false; + auto service = factory.kjToCapnp(kj::heap(called, destroyed)); + + KJ_EXPECT(!called); + KJ_EXPECT(!destroyed); + + auto req = service.startRequestRequest(); + auto httpReq = req.initRequest(); + httpReq.setMethod(capnp::HttpMethod::GET); + httpReq.setUrl("/"); + auto serverContext = req.send().wait(waitScope).getContext(); + service = nullptr; + + auto promise = serverContext.whenResolved(); + KJ_EXPECT(!promise.poll(waitScope)); + + KJ_EXPECT(called); + KJ_EXPECT(!destroyed); +} + +} // namespace +} // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/http-over-capnp.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/http-over-capnp.c++ new file mode 100644 index 00000000000..f76e309f69c --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/http-over-capnp.c++ @@ -0,0 +1,761 @@ +// Copyright (c) 2019 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "http-over-capnp.h" +#include +#include + +namespace capnp { + +using kj::uint; +using kj::byte; + +class HttpOverCapnpFactory::RequestState final + : public kj::Refcounted, public kj::TaskSet::ErrorHandler { +public: + RequestState() { + tasks.emplace(*this); + } + + template + auto wrap(Func&& func) -> decltype(func()) { + if (tasks == nullptr) { + return KJ_EXCEPTION(DISCONNECTED, "client canceled HTTP request"); + } else { + return canceler.wrap(func()); + } + } + + void cancel() { + if (tasks != nullptr) { + if (!canceler.isEmpty()) { + canceler.cancel(KJ_EXCEPTION(DISCONNECTED, "request canceled")); + } + tasks = nullptr; + webSocket = nullptr; + } + } + + void assertNotCanceled() { + if (tasks == nullptr) { + kj::throwFatalException(KJ_EXCEPTION(DISCONNECTED, "client canceled HTTP request")); + } + } + + void addTask(kj::Promise task) { + KJ_IF_MAYBE(t, tasks) { + t->add(kj::mv(task)); + } else { + // Just drop the task. + } + } + + kj::Promise finishTasks() { + // This is merged into the final promise, so we don't need to worry about wrapping it for + // cancellation. + return KJ_REQUIRE_NONNULL(tasks).onEmpty() + .then([this]() { + KJ_IF_MAYBE(e, error) { + kj::throwRecoverableException(kj::mv(*e)); + } + }); + } + + void taskFailed(kj::Exception&& exception) override { + if (error == nullptr) { + error = kj::mv(exception); + } + } + + void holdWebSocket(kj::Own webSocket) { + // Hold on to this WebSocket until cancellation. + KJ_REQUIRE(this->webSocket == nullptr); + KJ_REQUIRE(tasks != nullptr); + this->webSocket = kj::mv(webSocket); + } + + void disconnectWebSocket() { + KJ_IF_MAYBE(t, tasks) { + t->add(kj::evalNow([&]() { return KJ_ASSERT_NONNULL(webSocket)->disconnect(); })); + } + } + +private: + kj::Maybe error; + kj::Maybe> webSocket; + kj::Canceler canceler; + kj::Maybe tasks; +}; + +// ======================================================================================= + +class HttpOverCapnpFactory::CapnpToKjWebSocketAdapter final: public capnp::WebSocket::Server { +public: + CapnpToKjWebSocketAdapter(kj::Own state, kj::WebSocket& webSocket, + kj::Promise shorteningPromise) + : state(kj::mv(state)), webSocket(webSocket), + shorteningPromise(kj::mv(shorteningPromise)) {} + + ~CapnpToKjWebSocketAdapter() noexcept(false) { + state->disconnectWebSocket(); + } + + kj::Maybe> shortenPath() override { + return kj::mv(shorteningPromise); + } + + kj::Promise sendText(SendTextContext context) override { + return state->wrap([&]() { return webSocket.send(context.getParams().getText()); }); + } + kj::Promise sendData(SendDataContext context) override { + return state->wrap([&]() { return webSocket.send(context.getParams().getData()); }); + } + kj::Promise close(CloseContext context) override { + auto params = context.getParams(); + return state->wrap([&]() { return webSocket.close(params.getCode(), params.getReason()); }); + } + +private: + kj::Own state; + kj::WebSocket& webSocket; + kj::Promise shorteningPromise; +}; + +class HttpOverCapnpFactory::KjToCapnpWebSocketAdapter final: public kj::WebSocket { +public: + KjToCapnpWebSocketAdapter( + kj::Maybe> in, capnp::WebSocket::Client out, + kj::Own>> shorteningFulfiller) + : in(kj::mv(in)), out(kj::mv(out)), shorteningFulfiller(kj::mv(shorteningFulfiller)) {} + ~KjToCapnpWebSocketAdapter() noexcept(false) { + if (shorteningFulfiller->isWaiting()) { + // We want to make sure the fulfiller is not rejected with a bogus "PromiseFulfiller + // destroyed" error, so fulfill it with never-done. + shorteningFulfiller->fulfill(kj::NEVER_DONE); + } + } + + kj::Promise send(kj::ArrayPtr message) override { + auto req = KJ_REQUIRE_NONNULL(out, "already called disconnect()").sendDataRequest( + MessageSize { 8 + message.size() / sizeof(word), 0 }); + req.setData(message); + sentBytes += message.size(); + return req.send(); + } + + kj::Promise send(kj::ArrayPtr message) override { + auto req = KJ_REQUIRE_NONNULL(out, "already called disconnect()").sendTextRequest( + MessageSize { 8 + message.size() / sizeof(word), 0 }); + memcpy(req.initText(message.size()).begin(), message.begin(), message.size()); + sentBytes += message.size(); + return req.send(); + } + + kj::Promise close(uint16_t code, kj::StringPtr reason) override { + auto req = KJ_REQUIRE_NONNULL(out, "already called disconnect()").closeRequest(); + req.setCode(code); + req.setReason(reason); + sentBytes += reason.size() + 2; + return req.send().ignoreResult(); + } + + kj::Promise disconnect() override { + out = nullptr; + return kj::READY_NOW; + } + + void abort() override { + KJ_ASSERT_NONNULL(in)->abort(); + } + + kj::Promise whenAborted() override { + return KJ_ASSERT_NONNULL(out).whenResolved() + .then([]() -> kj::Promise { + // It would seem this capability resolved to an implementation of the WebSocket RPC interface + // that does not support further path-shortening (so, it's not the implementation found in + // this file). Since the path-shortening facility is also how we discover disconnects, we + // apparently have no way to be alerted on disconnect. We have to assume the other end + // never aborts. + return kj::NEVER_DONE; + }, [](kj::Exception&& e) -> kj::Promise { + if (e.getType() == kj::Exception::Type::DISCONNECTED) { + // Looks like we were aborted! + return kj::READY_NOW; + } else { + // Some other error... propagate it. + return kj::mv(e); + } + }); + } + + kj::Promise receive(size_t maxSize) override { + return KJ_ASSERT_NONNULL(in)->receive(maxSize); + } + + kj::Promise pumpTo(WebSocket& other) override { + KJ_IF_MAYBE(optimized, kj::dynamicDowncastIfAvailable(other)) { + shorteningFulfiller->fulfill( + kj::cp(KJ_REQUIRE_NONNULL(optimized->out, "already called disconnect()"))); + + // We expect the `in` pipe will stop receiving messages after the redirect, but we need to + // pump anything already in-flight. + return KJ_ASSERT_NONNULL(in)->pumpTo(other); + } else KJ_IF_MAYBE(promise, other.tryPumpFrom(*this)) { + // We may have unwrapped some layers around `other` leading to a shorter path. + return kj::mv(*promise); + } else { + return KJ_ASSERT_NONNULL(in)->pumpTo(other); + } + } + + uint64_t sentByteCount() override { return sentBytes; } + uint64_t receivedByteCount() override { return KJ_ASSERT_NONNULL(in)->receivedByteCount(); } + +private: + kj::Maybe> in; // One end of a WebSocketPipe, used only for receiving. + kj::Maybe out; // Used only for sending. + kj::Own>> shorteningFulfiller; + uint64_t sentBytes = 0; +}; + +// ======================================================================================= + +class HttpOverCapnpFactory::ClientRequestContextImpl final + : public capnp::HttpService::ClientRequestContext::Server { +public: + ClientRequestContextImpl(HttpOverCapnpFactory& factory, + kj::Own state, + kj::HttpService::Response& kjResponse) + : factory(factory), state(kj::mv(state)), kjResponse(kjResponse) {} + + ~ClientRequestContextImpl() noexcept(false) { + // Note this implicitly cancels the upstream pump task. + } + + kj::Promise startResponse(StartResponseContext context) override { + KJ_REQUIRE(!sent, "already called startResponse() or startWebSocket()"); + sent = true; + state->assertNotCanceled(); + + auto params = context.getParams(); + auto rpcResponse = params.getResponse(); + + auto bodySize = rpcResponse.getBodySize(); + kj::Maybe expectedSize; + bool hasBody = true; + if (bodySize.isFixed()) { + auto size = bodySize.getFixed(); + expectedSize = bodySize.getFixed(); + hasBody = size > 0; + } + + auto bodyStream = kjResponse.send(rpcResponse.getStatusCode(), rpcResponse.getStatusText(), + factory.headersToKj(rpcResponse.getHeaders()), expectedSize); + + auto results = context.getResults(MessageSize { 16, 1 }); + if (hasBody) { + auto pipe = kj::newOneWayPipe(); + results.setBody(factory.streamFactory.kjToCapnp(kj::mv(pipe.out))); + state->addTask(pipe.in->pumpTo(*bodyStream) + .ignoreResult() + .attach(kj::mv(bodyStream), kj::mv(pipe.in))); + } + return kj::READY_NOW; + } + + kj::Promise startWebSocket(StartWebSocketContext context) override { + KJ_REQUIRE(!sent, "already called startResponse() or startWebSocket()"); + sent = true; + state->assertNotCanceled(); + + auto params = context.getParams(); + + auto shorteningPaf = kj::newPromiseAndFulfiller>(); + + auto ownWebSocket = kjResponse.acceptWebSocket(factory.headersToKj(params.getHeaders())); + auto& webSocket = *ownWebSocket; + state->holdWebSocket(kj::mv(ownWebSocket)); + + auto upWrapper = kj::heap( + nullptr, params.getUpSocket(), kj::mv(shorteningPaf.fulfiller)); + state->addTask(webSocket.pumpTo(*upWrapper).attach(kj::mv(upWrapper)) + .catch_([&webSocket=webSocket](kj::Exception&& e) -> kj::Promise { + // The pump in the client -> server direction failed. The error may have originated from + // either the client or the server. In case it came from the server, we want to call .abort() + // to propagate the problem back to the client. If the error came from the client, then + // .abort() probably is a noop. + webSocket.abort(); + return kj::mv(e); + })); + + auto results = context.getResults(MessageSize { 16, 1 }); + results.setDownSocket(kj::heap( + kj::addRef(*state), webSocket, kj::mv(shorteningPaf.promise))); + + return kj::READY_NOW; + } + +private: + HttpOverCapnpFactory& factory; + kj::Own state; + bool sent = false; + + kj::HttpService::Response& kjResponse; + // Must check state->assertNotCanceled() before using this. +}; + +class HttpOverCapnpFactory::KjToCapnpHttpServiceAdapter final: public kj::HttpService { +public: + KjToCapnpHttpServiceAdapter(HttpOverCapnpFactory& factory, capnp::HttpService::Client inner) + : factory(factory), inner(kj::mv(inner)) {} + + kj::Promise request( + kj::HttpMethod method, kj::StringPtr url, const kj::HttpHeaders& headers, + kj::AsyncInputStream& requestBody, kj::HttpService::Response& kjResponse) override { + auto rpcRequest = inner.startRequestRequest(); + + auto metadata = rpcRequest.initRequest(); + metadata.setMethod(static_cast(method)); + metadata.setUrl(url); + metadata.adoptHeaders(factory.headersToCapnp( + headers, Orphanage::getForMessageContaining(metadata))); + + kj::Maybe maybeRequestBody; + + KJ_IF_MAYBE(s, requestBody.tryGetLength()) { + metadata.getBodySize().setFixed(*s); + if (*s == 0) { + maybeRequestBody = nullptr; + } else { + maybeRequestBody = requestBody; + } + } else if ((method == kj::HttpMethod::GET || method == kj::HttpMethod::HEAD) && + headers.get(kj::HttpHeaderId::TRANSFER_ENCODING) == nullptr) { + maybeRequestBody = nullptr; + metadata.getBodySize().setFixed(0); + } else { + metadata.getBodySize().setUnknown(); + maybeRequestBody = requestBody; + } + + auto state = kj::refcounted(); + auto deferredCancel = kj::defer([state = kj::addRef(*state)]() mutable { + state->cancel(); + }); + + rpcRequest.setContext( + kj::heap(factory, kj::addRef(*state), kjResponse)); + + auto pipeline = rpcRequest.send(); + + // Pump upstream -- unless we don't expect a request body. + kj::Maybe> pumpRequestTask; + KJ_IF_MAYBE(rb, maybeRequestBody) { + auto bodyOut = factory.streamFactory.capnpToKj(pipeline.getRequestBody()); + pumpRequestTask = rb->pumpTo(*bodyOut).attach(kj::mv(bodyOut)).ignoreResult() + .eagerlyEvaluate([state = kj::addRef(*state)](kj::Exception&& e) mutable { + // A DISCONNECTED exception probably means the server decided not to read the whole request + // before responding. In that case we simply want the pump to end, so that on this end it + // also appears that the service simply didn't read everything. So we don't propagate the + // exception in that case. For any other exception, we want to merge the exception with + // the final result. + if (e.getType() != kj::Exception::Type::DISCONNECTED) { + state->taskFailed(kj::mv(e)); + } + }); + } + + // Wait for the ServerRequestContext to resolve, which indicates completion. Meanwhile, if the + // promise is canceled from the client side, we drop the ServerRequestContext naturally, and we + // also call state->cancel(). + return pipeline.getContext().whenResolved() + // Once the server indicates it is done, then we can cancel pumping the request, because + // obviously the server won't use it. We should not cancel pumping the response since there + // could be data in-flight still. + .attach(kj::mv(pumpRequestTask)) + // finishTasks() will wait for the respones to complete. + .then([state = kj::mv(state)]() mutable { return state->finishTasks(); }) + .attach(kj::mv(deferredCancel)); + } + +private: + HttpOverCapnpFactory& factory; + capnp::HttpService::Client inner; +}; + +kj::Own HttpOverCapnpFactory::capnpToKj(capnp::HttpService::Client rpcService) { + return kj::heap(*this, kj::mv(rpcService)); +} + +// ======================================================================================= + +namespace { + +class NullInputStream final: public kj::AsyncInputStream { + // TODO(cleanup): This class has been replicated in a bunch of places now, make it public + // somewhere. + +public: + kj::Promise tryRead(void* buffer, size_t minBytes, size_t maxBytes) override { + return size_t(0); + } + + kj::Maybe tryGetLength() override { + return uint64_t(0); + } + + kj::Promise pumpTo(kj::AsyncOutputStream& output, uint64_t amount) override { + return uint64_t(0); + } +}; + +class NullOutputStream final: public kj::AsyncOutputStream { + // TODO(cleanup): This class has been replicated in a bunch of places now, make it public + // somewhere. + +public: + kj::Promise write(const void* buffer, size_t size) override { + return kj::READY_NOW; + } + kj::Promise write(kj::ArrayPtr> pieces) override { + return kj::READY_NOW; + } + kj::Promise whenWriteDisconnected() override { + return kj::NEVER_DONE; + } + + // We can't really optimize tryPumpFrom() unless AsyncInputStream grows a skip() method. +}; + +class ResolvedServerRequestContext final: public capnp::HttpService::ServerRequestContext::Server { +public: + // Nothing! It's done. +}; + +} // namespace + +class HttpOverCapnpFactory::ServerRequestContextImpl final + : public capnp::HttpService::ServerRequestContext::Server, + public kj::HttpService::Response { +public: + ServerRequestContextImpl(HttpOverCapnpFactory& factory, + HttpService::Client serviceCap, + capnp::HttpRequest::Reader request, + capnp::HttpService::ClientRequestContext::Client clientContext, + kj::Own requestBodyIn, + kj::HttpService& kjService) + : factory(factory), serviceCap(kj::mv(serviceCap)), + method(validateMethod(request.getMethod())), + url(kj::str(request.getUrl())), + headers(factory.headersToKj(request.getHeaders()).clone()), + clientContext(kj::mv(clientContext)), + // Note we attach `requestBodyIn` to `task` so that we will implicitly cancel reading + // the request body as soon as the service returns. This is important in particular when + // the request body is not fully consumed, in order to propagate cancellation. + task(kjService.request(method, url, headers, *requestBodyIn, *this) + .attach(kj::mv(requestBodyIn))) {} + + KJ_DISALLOW_COPY(ServerRequestContextImpl); + + kj::Maybe> shortenPath() override { + return task.then([]() -> Capability::Client { + // If all went well, resolve to a settled capability. + // TODO(perf): Could save a message by resolving to a capability hosted by the client, or + // some special "null" capability that isn't an error but is still transmitted by value. + // Otherwise we need a Release message from client -> server just to drop this... + return kj::heap(); + }); + } + + kj::Own send( + uint statusCode, kj::StringPtr statusText, const kj::HttpHeaders& headers, + kj::Maybe expectedBodySize = nullptr) override { + KJ_REQUIRE(replyTask == nullptr, "already called send() or acceptWebSocket()"); + + auto req = clientContext.startResponseRequest(); + + if (method == kj::HttpMethod::HEAD || + statusCode == 204 || statusCode == 304) { + expectedBodySize = uint64_t(0); + } + + auto rpcResponse = req.initResponse(); + rpcResponse.setStatusCode(statusCode); + rpcResponse.setStatusText(statusText); + rpcResponse.adoptHeaders(factory.headersToCapnp( + headers, Orphanage::getForMessageContaining(rpcResponse))); + bool hasBody = true; + KJ_IF_MAYBE(s, expectedBodySize) { + rpcResponse.getBodySize().setFixed(*s); + hasBody = *s > 0; + } + + if (hasBody) { + auto pipeline = req.send(); + auto result = factory.streamFactory.capnpToKj(pipeline.getBody()); + replyTask = pipeline.ignoreResult() + .eagerlyEvaluate([](kj::Exception&& e) { + KJ_LOG(ERROR, "HTTP-over-RPC startResponse() failed", e); + }); + return result; + } else { + replyTask = req.send().ignoreResult() + .eagerlyEvaluate([](kj::Exception&& e) { + KJ_LOG(ERROR, "HTTP-over-RPC startResponse() failed", e); + }); + return kj::heap(); + } + + // We don't actually wait for replyTask anywhere, because we may be all done with this HTTP + // message before the client gets a chance to respond, and we don't want to force an extra + // network round trip. If the client fails this call that's the client's problem, really. + } + + kj::Own acceptWebSocket(const kj::HttpHeaders& headers) override { + KJ_REQUIRE(replyTask == nullptr, "already called send() or acceptWebSocket()"); + + auto req = clientContext.startWebSocketRequest(); + + req.adoptHeaders(factory.headersToCapnp( + headers, Orphanage::getForMessageContaining( + capnp::HttpService::ClientRequestContext::StartWebSocketParams::Builder(req)))); + + auto pipe = kj::newWebSocketPipe(); + auto shorteningPaf = kj::newPromiseAndFulfiller>(); + + // We don't need the RequestState mechanism on the server side because + // CapnpToKjWebSocketAdapter wraps a pipe end, and that pipe end can continue to exist beyond + // the lifetime of the request, because the other end will have been dropped. We only create + // a RequestState here so that we can reuse the implementation of CapnpToKjWebSocketAdapter + // that needs this for the client side. + auto dummyState = kj::refcounted(); + auto& pipeEnd0Ref = *pipe.ends[0]; + dummyState->holdWebSocket(kj::mv(pipe.ends[0])); + req.setUpSocket(kj::heap( + kj::mv(dummyState), pipeEnd0Ref, kj::mv(shorteningPaf.promise))); + + auto pipeline = req.send(); + auto result = kj::heap( + kj::mv(pipe.ends[1]), pipeline.getDownSocket(), kj::mv(shorteningPaf.fulfiller)); + + // Note we need eagerlyEvaluate() here to force proactively discarding the response object, + // since it holds a reference to `downSocket`. + replyTask = pipeline.ignoreResult() + .eagerlyEvaluate([](kj::Exception&& e) { + KJ_LOG(ERROR, "HTTP-over-RPC startWebSocketRequest() failed", e); + }); + + return result; + } + +private: + HttpOverCapnpFactory& factory; + HttpService::Client serviceCap; // ensures the inner kj::HttpService isn't destroyed + kj::HttpMethod method; + kj::String url; + kj::HttpHeaders headers; + capnp::HttpService::ClientRequestContext::Client clientContext; + kj::Maybe> replyTask; + kj::Promise task; + + static kj::HttpMethod validateMethod(capnp::HttpMethod method) { + KJ_REQUIRE(method <= capnp::HttpMethod::UNSUBSCRIBE, "unknown method", method); + return static_cast(method); + } +}; + +class HttpOverCapnpFactory::CapnpToKjHttpServiceAdapter final: public capnp::HttpService::Server { +public: + CapnpToKjHttpServiceAdapter(HttpOverCapnpFactory& factory, kj::Own inner) + : factory(factory), inner(kj::mv(inner)) {} + + kj::Promise startRequest(StartRequestContext context) override { + auto params = context.getParams(); + auto metadata = params.getRequest(); + + auto bodySize = metadata.getBodySize(); + kj::Maybe expectedSize; + bool hasBody = true; + if (bodySize.isFixed()) { + auto size = bodySize.getFixed(); + expectedSize = bodySize.getFixed(); + hasBody = size > 0; + } + + auto results = context.getResults(MessageSize {8, 2}); + kj::Own requestBody; + if (hasBody) { + auto pipe = kj::newOneWayPipe(expectedSize); + results.setRequestBody(factory.streamFactory.kjToCapnp(kj::mv(pipe.out))); + requestBody = kj::mv(pipe.in); + } else { + requestBody = kj::heap(); + } + results.setContext(kj::heap( + factory, thisCap(), metadata, params.getContext(), kj::mv(requestBody), *inner)); + + return kj::READY_NOW; + } + +private: + HttpOverCapnpFactory& factory; + kj::Own inner; +}; + +capnp::HttpService::Client HttpOverCapnpFactory::kjToCapnp(kj::Own service) { + return kj::heap(*this, kj::mv(service)); +} + +// ======================================================================================= + +static constexpr uint64_t COMMON_TEXT_ANNOTATION = 0x857745131db6fc83ull; +// Type ID of `commonText` from `http.capnp`. +// TODO(cleanup): Cap'n Proto should auto-generate constants for these. + +HttpOverCapnpFactory::HeaderIdBundle::HeaderIdBundle(kj::HttpHeaderTable::Builder& builder) + : table(builder.getFutureTable()) { + auto commonHeaderNames = Schema::from().getEnumerants(); + nameCapnpToKj = kj::heapArray(commonHeaderNames.size()); + for (size_t i = 1; i < commonHeaderNames.size(); i++) { + kj::StringPtr nameText; + for (auto ann: commonHeaderNames[i].getProto().getAnnotations()) { + if (ann.getId() == COMMON_TEXT_ANNOTATION) { + nameText = ann.getValue().getText(); + break; + } + } + KJ_ASSERT(nameText != nullptr); + kj::HttpHeaderId headerId = builder.add(nameText); + nameCapnpToKj[i] = headerId; + maxHeaderId = kj::max(maxHeaderId, headerId.hashCode()); + } +} + +HttpOverCapnpFactory::HeaderIdBundle::HeaderIdBundle( + const kj::HttpHeaderTable& table, kj::Array nameCapnpToKj, size_t maxHeaderId) + : table(table), nameCapnpToKj(kj::mv(nameCapnpToKj)), maxHeaderId(maxHeaderId) {} + +HttpOverCapnpFactory::HeaderIdBundle HttpOverCapnpFactory::HeaderIdBundle::clone() const { + return HeaderIdBundle(table, kj::heapArray(nameCapnpToKj), maxHeaderId); +} + +HttpOverCapnpFactory::HttpOverCapnpFactory(ByteStreamFactory& streamFactory, + HeaderIdBundle headerIds) + : streamFactory(streamFactory), headerTable(headerIds.table), + nameCapnpToKj(kj::mv(headerIds.nameCapnpToKj)) { + auto commonHeaderNames = Schema::from().getEnumerants(); + nameKjToCapnp = kj::heapArray(headerIds.maxHeaderId + 1); + for (auto& slot: nameKjToCapnp) slot = capnp::CommonHeaderName::INVALID; + + for (size_t i = 1; i < commonHeaderNames.size(); i++) { + auto& slot = nameKjToCapnp[nameCapnpToKj[i].hashCode()]; + KJ_ASSERT(slot == capnp::CommonHeaderName::INVALID); + slot = static_cast(i); + } + + auto commonHeaderValues = Schema::from().getEnumerants(); + valueCapnpToKj = kj::heapArray(commonHeaderValues.size()); + for (size_t i = 1; i < commonHeaderValues.size(); i++) { + kj::StringPtr valueText; + for (auto ann: commonHeaderValues[i].getProto().getAnnotations()) { + if (ann.getId() == COMMON_TEXT_ANNOTATION) { + valueText = ann.getValue().getText(); + break; + } + } + KJ_ASSERT(valueText != nullptr); + valueCapnpToKj[i] = valueText; + valueKjToCapnp.insert(valueText, static_cast(i)); + } +} + +Orphan> HttpOverCapnpFactory::headersToCapnp( + const kj::HttpHeaders& headers, Orphanage orphanage) { + auto result = orphanage.newOrphan>(headers.size()); + auto rpcHeaders = result.get(); + uint i = 0; + headers.forEach([&](kj::HttpHeaderId id, kj::StringPtr value) { + auto capnpName = id.hashCode() < nameKjToCapnp.size() + ? nameKjToCapnp[id.hashCode()] + : capnp::CommonHeaderName::INVALID; + if (capnpName == capnp::CommonHeaderName::INVALID) { + auto header = rpcHeaders[i++].initUncommon(); + header.setName(id.toString()); + header.setValue(value); + } else { + auto header = rpcHeaders[i++].initCommon(); + header.setName(capnpName); + header.setValue(value); + } + }, [&](kj::StringPtr name, kj::StringPtr value) { + auto header = rpcHeaders[i++].initUncommon(); + header.setName(name); + header.setValue(value); + }); + KJ_ASSERT(i == rpcHeaders.size()); + return result; +} + +kj::HttpHeaders HttpOverCapnpFactory::headersToKj( + List::Reader capnpHeaders) const { + kj::HttpHeaders result(headerTable); + + for (auto header: capnpHeaders) { + switch (header.which()) { + case capnp::HttpHeader::COMMON: { + auto nv = header.getCommon(); + auto nameInt = static_cast(nv.getName()); + KJ_REQUIRE(nameInt < nameCapnpToKj.size(), "unknown common header name", nv.getName()); + + switch (nv.which()) { + case capnp::HttpHeader::Common::COMMON_VALUE: { + auto cvInt = static_cast(nv.getCommonValue()); + KJ_REQUIRE(nameInt < valueCapnpToKj.size(), + "unknown common header value", nv.getCommonValue()); + result.set(nameCapnpToKj[nameInt], valueCapnpToKj[cvInt]); + break; + } + case capnp::HttpHeader::Common::VALUE: { + auto headerId = nameCapnpToKj[nameInt]; + if (result.get(headerId) == nullptr) { + result.set(headerId, nv.getValue()); + } else { + // Unusual: This is a duplicate header, so fall back to add(), which may trigger + // comma-concatenation, except in certain cases where comma-concatentaion would + // be problematic. + result.add(headerId.toString(), nv.getValue()); + } + break; + } + } + break; + } + case capnp::HttpHeader::UNCOMMON: { + auto nv = header.getUncommon(); + result.add(nv.getName(), nv.getValue()); + } + } + } + + return result; +} + +} // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/http-over-capnp.capnp b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/http-over-capnp.capnp new file mode 100644 index 00000000000..eb8578de696 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/http-over-capnp.capnp @@ -0,0 +1,217 @@ +# Copyright (c) 2019 Cloudflare, Inc. and contributors +# Licensed under the MIT License: +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +@0xb665280aaff2e632; +# Cap'n Proto interface for HTTP. + +using import "byte-stream.capnp".ByteStream; + +$import "/capnp/c++.capnp".namespace("capnp"); + +interface HttpService { + startRequest @0 (request :HttpRequest, context :ClientRequestContext) + -> (requestBody :ByteStream, context :ServerRequestContext); + # Begin an HTTP request. + # + # The client sends the request method/url/headers. The server responds with a `ByteStream` where + # the client can make calls to stream up the request body. `requestBody` will be null in the case + # that request.bodySize.fixed == 0. + + interface ClientRequestContext { + # Provides callbacks for the server to send the response. + + startResponse @0 (response :HttpResponse) -> (body :ByteStream); + # Server calls this method to send the response status and headers and to begin streaming the + # response body. `body` will be null in the case that response.bodySize.fixed == 0, which is + # required for HEAD responses and status codes 204, 205, and 304. + + startWebSocket @1 (headers :List(HttpHeader), upSocket :WebSocket) + -> (downSocket :WebSocket); + # Server calls this method to indicate that the request is a valid WebSocket handshake and it + # wishes to accept it as a WebSocket. + # + # Client -> Server WebSocket frames will be sent via method calls on `upSocket`, while + # Server -> Client will be sent as calls to `downSocket`. + } + + interface ServerRequestContext { + # Represents execution of a particular request on the server side. + # + # Dropping this object before the request completes will cancel the request. + # + # ServerRequestContext is always a promise capability. The client must wait for it to + # resolve using whenMoreResolved() in order to find out when the server is really done + # processing the request. This will throw an exception if the server failed in some way that + # could not be captured in the HTTP response. Note that it's possible for such an exception to + # be thrown even after the response body has been completely transmitted. + } +} + +interface WebSocket { + sendText @0 (text :Text) -> stream; + sendData @1 (data :Data) -> stream; + # Send a text or data frame. + + close @2 (code :UInt16, reason :Text); + # Send a close frame. +} + +struct HttpRequest { + # Standard HTTP request metadata. + + method @0 :HttpMethod; + url @1 :Text; + headers @2 :List(HttpHeader); + bodySize :union { + unknown @3 :Void; # e.g. due to transfer-encoding: chunked + fixed @4 :UInt64; # e.g. due to content-length + } +} + +struct HttpResponse { + # Standard HTTP response metadata. + + statusCode @0 :UInt16; + statusText @1 :Text; # leave null if it matches the default for statusCode + headers @2 :List(HttpHeader); + bodySize :union { + unknown @3 :Void; # e.g. due to transfer-encoding: chunked + fixed @4 :UInt64; # e.g. due to content-length + } +} + +enum HttpMethod { + # This enum aligns precisely with the kj::HttpMethod enum. However, the backwards-compat + # constraints of a public-facing C++ enum vs. an internal Cap'n Proto interface differ in + # several ways, which could possibly lead to divergence someday. For now, a unit test verifies + # that they match exactly; if that test ever fails, we'll have to figure out what to do about it. + + get @0; + head @1; + post @2; + put @3; + delete @4; + patch @5; + purge @6; + options @7; + trace @8; + + copy @9; + lock @10; + mkcol @11; + move @12; + propfind @13; + proppatch @14; + search @15; + unlock @16; + acl @17; + + report @18; + mkactivity @19; + checkout @20; + merge @21; + + msearch @22; + notify @23; + subscribe @24; + unsubscribe @25; +} + +annotation commonText @0x857745131db6fc83(enumerant) :Text; + +enum CommonHeaderName { + invalid @0; + # Dummy to serve as default value. Should never actually appear on wire. + + acceptCharset @1 $commonText("Accept-Charset"); + acceptEncoding @2 $commonText("Accept-Encoding"); + acceptLanguage @3 $commonText("Accept-Language"); + acceptRanges @4 $commonText("Accept-Ranges"); + accept @5 $commonText("Accept"); + accessControlAllowOrigin @6 $commonText("Access-Control-Allow-Origin"); + age @7 $commonText("Age"); + allow @8 $commonText("Allow"); + authorization @9 $commonText("Authorization"); + cacheControl @10 $commonText("Cache-Control"); + contentDisposition @11 $commonText("Content-Disposition"); + contentEncoding @12 $commonText("Content-Encoding"); + contentLanguage @13 $commonText("Content-Language"); + contentLength @14 $commonText("Content-Length"); + contentLocation @15 $commonText("Content-Location"); + contentRange @16 $commonText("Content-Range"); + contentType @17 $commonText("Content-Type"); + cookie @18 $commonText("Cookie"); + date @19 $commonText("Date"); + etag @20 $commonText("ETag"); + expect @21 $commonText("Expect"); + expires @22 $commonText("Expires"); + from @23 $commonText("From"); + host @24 $commonText("Host"); + ifMatch @25 $commonText("If-Match"); + ifModifiedSince @26 $commonText("If-Modified-Since"); + ifNoneMatch @27 $commonText("If-None-Match"); + ifRange @28 $commonText("If-Range"); + ifUnmodifiedSince @29 $commonText("If-Unmodified-Since"); + lastModified @30 $commonText("Last-Modified"); + link @31 $commonText("Link"); + location @32 $commonText("Location"); + maxForwards @33 $commonText("Max-Forwards"); + proxyAuthenticate @34 $commonText("Proxy-Authenticate"); + proxyAuthorization @35 $commonText("Proxy-Authorization"); + range @36 $commonText("Range"); + referer @37 $commonText("Referer"); + refresh @38 $commonText("Refresh"); + retryAfter @39 $commonText("Retry-After"); + server @40 $commonText("Server"); + setCookie @41 $commonText("Set-Cookie"); + strictTransportSecurity @42 $commonText("Strict-Transport-Security"); + transferEncoding @43 $commonText("Transfer-Encoding"); + userAgent @44 $commonText("User-Agent"); + vary @45 $commonText("Vary"); + via @46 $commonText("Via"); + wwwAuthenticate @47 $commonText("WWW-Authenticate"); +} + +enum CommonHeaderValue { + invalid @0; + + gzipDeflate @1 $commonText("gzip, deflate"); + + # TODO(someday): "gzip, deflate" is the only common header value recognized by HPACK. +} + +struct HttpHeader { + union { + common :group { + name @0 :CommonHeaderName; + union { + commonValue @1 :CommonHeaderValue; + value @2 :Text; + } + } + uncommon @3 :NameValue; + } + + struct NameValue { + name @0 :Text; + value @1 :Text; + } +} diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/http-over-capnp.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/http-over-capnp.h new file mode 100644 index 00000000000..aedd6f0d835 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/http-over-capnp.h @@ -0,0 +1,84 @@ +// Copyright (c) 2019 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#pragma once +// Bridges from KJ HTTP to Cap'n Proto HTTP-over-RPC. + +#include +#include +#include +#include "byte-stream.h" + +namespace capnp { + +class HttpOverCapnpFactory { +public: + class HeaderIdBundle { + public: + HeaderIdBundle(kj::HttpHeaderTable::Builder& builder); + + HeaderIdBundle clone() const; + + private: + HeaderIdBundle(const kj::HttpHeaderTable& table, kj::Array nameCapnpToKj, + size_t maxHeaderId); + // Constructor for clone(). + + const kj::HttpHeaderTable& table; + + kj::Array nameCapnpToKj; + size_t maxHeaderId = 0; + + friend class HttpOverCapnpFactory; + }; + + HttpOverCapnpFactory(ByteStreamFactory& streamFactory, HeaderIdBundle headerIds); + + kj::Own capnpToKj(capnp::HttpService::Client rpcService); + capnp::HttpService::Client kjToCapnp(kj::Own service); + +private: + ByteStreamFactory& streamFactory; + const kj::HttpHeaderTable& headerTable; + kj::Array nameKjToCapnp; + kj::Array nameCapnpToKj; + kj::Array valueCapnpToKj; + kj::HashMap valueKjToCapnp; + + class RequestState; + + class CapnpToKjWebSocketAdapter; + class KjToCapnpWebSocketAdapter; + + class ClientRequestContextImpl; + class KjToCapnpHttpServiceAdapter; + + class ServerRequestContextImpl; + class CapnpToKjHttpServiceAdapter; + + kj::HttpHeaders headersToKj(capnp::List::Reader capnpHeaders) const; + // Returned headers may alias into `capnpHeaders`. + + capnp::Orphan> headersToCapnp( + const kj::HttpHeaders& headers, capnp::Orphanage orphanage); +}; + +} // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-rpc-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-rpc-test.c++ new file mode 100644 index 00000000000..bf78bb577b9 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-rpc-test.c++ @@ -0,0 +1,100 @@ +// Copyright (c) 2018 Kenton Varda and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "json-rpc.h" +#include +#include + +namespace capnp { +namespace _ { // private +namespace { + +KJ_TEST("json-rpc basics") { + auto io = kj::setupAsyncIo(); + auto pipe = kj::newTwoWayPipe(); + + JsonRpc::ContentLengthTransport clientTransport(*pipe.ends[0]); + JsonRpc::ContentLengthTransport serverTransport(*pipe.ends[1]); + + int callCount = 0; + + JsonRpc client(clientTransport); + JsonRpc server(serverTransport, toDynamic(kj::heap(callCount))); + + auto cap = client.getPeer(); + auto req = cap.fooRequest(); + req.setI(123); + req.setJ(true); + auto resp = req.send().wait(io.waitScope); + KJ_EXPECT(resp.getX() == "foo"); + + KJ_EXPECT(callCount == 1); +} + +KJ_TEST("json-rpc error") { + auto io = kj::setupAsyncIo(); + auto pipe = kj::newTwoWayPipe(); + + JsonRpc::ContentLengthTransport clientTransport(*pipe.ends[0]); + JsonRpc::ContentLengthTransport serverTransport(*pipe.ends[1]); + + int callCount = 0; + + JsonRpc client(clientTransport); + JsonRpc server(serverTransport, toDynamic(kj::heap(callCount))); + + auto cap = client.getPeer(); + KJ_EXPECT_THROW_MESSAGE("Method not implemented", cap.barRequest().send().wait(io.waitScope)); +} + +KJ_TEST("json-rpc multiple calls") { + auto io = kj::setupAsyncIo(); + auto pipe = kj::newTwoWayPipe(); + + JsonRpc::ContentLengthTransport clientTransport(*pipe.ends[0]); + JsonRpc::ContentLengthTransport serverTransport(*pipe.ends[1]); + + int callCount = 0; + + JsonRpc client(clientTransport); + JsonRpc server(serverTransport, toDynamic(kj::heap(callCount))); + + auto cap = client.getPeer(); + auto req1 = cap.fooRequest(); + req1.setI(123); + req1.setJ(true); + auto promise1 = req1.send(); + + auto req2 = cap.bazRequest(); + initTestMessage(req2.initS()); + auto promise2 = req2.send(); + + auto resp1 = promise1.wait(io.waitScope); + KJ_EXPECT(resp1.getX() == "foo"); + + auto resp2 = promise2.wait(io.waitScope); + + KJ_EXPECT(callCount == 2); +} + +} // namespace +} // namespace _ (private) +} // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-rpc.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-rpc.c++ new file mode 100644 index 00000000000..bd1fbf125bb --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-rpc.c++ @@ -0,0 +1,340 @@ +// Copyright (c) 2018 Kenton Varda and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "json-rpc.h" +#include +#include + +namespace capnp { + +static constexpr uint64_t JSON_NAME_ANNOTATION_ID = 0xfa5b1fd61c2e7c3dull; +static constexpr uint64_t JSON_NOTIFICATION_ANNOTATION_ID = 0xa0a054dea32fd98cull; + +class JsonRpc::CapabilityImpl final: public DynamicCapability::Server { +public: + CapabilityImpl(JsonRpc& parent, InterfaceSchema schema) + : DynamicCapability::Server(schema), parent(parent) {} + + kj::Promise call(InterfaceSchema::Method method, + CallContext context) override { + auto proto = method.getProto(); + bool isNotification = false; + kj::StringPtr name = proto.getName(); + for (auto annotation: proto.getAnnotations()) { + switch (annotation.getId()) { + case JSON_NAME_ANNOTATION_ID: + name = annotation.getValue().getText(); + break; + case JSON_NOTIFICATION_ANNOTATION_ID: + isNotification = true; + break; + } + } + + capnp::MallocMessageBuilder message; + auto value = message.getRoot(); + auto list = value.initObject(3 + !isNotification); + + uint index = 0; + + auto jsonrpc = list[index++]; + jsonrpc.setName("jsonrpc"); + jsonrpc.initValue().setString("2.0"); + + uint callId = parent.callCount++; + + if (!isNotification) { + auto id = list[index++]; + id.setName("id"); + id.initValue().setNumber(callId); + } + + auto methodName = list[index++]; + methodName.setName("method"); + methodName.initValue().setString(name); + + auto params = list[index++]; + params.setName("params"); + parent.codec.encode(context.getParams(), params.initValue()); + + auto writePromise = parent.queueWrite(parent.codec.encode(value)); + + if (isNotification) { + auto sproto = context.getResultsType().getProto().getStruct(); + MessageSize size { sproto.getDataWordCount(), sproto.getPointerCount() }; + context.initResults(size); + return kj::mv(writePromise); + } else { + auto paf = kj::newPromiseAndFulfiller(); + parent.awaitedResponses.insert(callId, AwaitedResponse { context, kj::mv(paf.fulfiller) }); + auto promise = writePromise.then([p = kj::mv(paf.promise)]() mutable { return kj::mv(p); }); + auto& parentRef = parent; + return promise.attach(kj::defer([&parentRef,callId]() { + parentRef.awaitedResponses.erase(callId); + })); + } + } + +private: + JsonRpc& parent; +}; + +JsonRpc::JsonRpc(Transport& transport, DynamicCapability::Client interface) + : JsonRpc(transport, kj::mv(interface), kj::newPromiseAndFulfiller()) {} +JsonRpc::JsonRpc(Transport& transport, DynamicCapability::Client interfaceParam, + kj::PromiseFulfillerPair paf) + : transport(transport), + interface(kj::mv(interfaceParam)), + errorPromise(paf.promise.fork()), + errorFulfiller(kj::mv(paf.fulfiller)), + readTask(readLoop().eagerlyEvaluate([this](kj::Exception&& e) { + errorFulfiller->reject(kj::mv(e)); + })), + tasks(*this) { + codec.handleByAnnotation(interface.getSchema()); + codec.handleByAnnotation(); + + for (auto method: interface.getSchema().getMethods()) { + auto proto = method.getProto(); + kj::StringPtr name = proto.getName(); + for (auto annotation: proto.getAnnotations()) { + switch (annotation.getId()) { + case JSON_NAME_ANNOTATION_ID: + name = annotation.getValue().getText(); + break; + } + } + methodMap.insert(name, method); + } +} + +DynamicCapability::Client JsonRpc::getPeer(InterfaceSchema schema) { + codec.handleByAnnotation(interface.getSchema()); + return kj::heap(*this, schema); +} + +static kj::HttpHeaderTable& staticHeaderTable() { + static kj::HttpHeaderTable HEADER_TABLE; + return HEADER_TABLE; +} + +kj::Promise JsonRpc::queueWrite(kj::String text) { + auto fork = writeQueue.then([this, text = kj::mv(text)]() mutable { + auto promise = transport.send(text); + return promise.attach(kj::mv(text)); + }).eagerlyEvaluate([this](kj::Exception&& e) { + errorFulfiller->reject(kj::mv(e)); + }).fork(); + writeQueue = fork.addBranch(); + return fork.addBranch(); +} + +void JsonRpc::queueError(kj::Maybe id, int code, kj::StringPtr message) { + MallocMessageBuilder capnpMessage; + auto jsonResponse = capnpMessage.getRoot(); + jsonResponse.setJsonrpc("2.0"); + KJ_IF_MAYBE(i, id) { + jsonResponse.setId(*i); + } else { + jsonResponse.initId().setNull(); + } + auto error = jsonResponse.initError(); + error.setCode(code); + error.setMessage(message); + + // OK to discard result of queueWrite() since it's just one branch of a fork. + queueWrite(codec.encode(jsonResponse)); +} + +kj::Promise JsonRpc::readLoop() { + return transport.receive().then([this](kj::String message) -> kj::Promise { + MallocMessageBuilder capnpMessage; + auto rpcMessageBuilder = capnpMessage.getRoot(); + + KJ_IF_MAYBE(exception, kj::runCatchingExceptions([&]() { + codec.decode(message, rpcMessageBuilder); + })) { + queueError(nullptr, -32700, kj::str("Parse error: ", exception->getDescription())); + return readLoop(); + } + + KJ_CONTEXT("decoding JSON-RPC message", message); + + auto rpcMessage = rpcMessageBuilder.asReader(); + + if (!rpcMessage.hasJsonrpc()) { + queueError(nullptr, -32700, kj::str("Missing 'jsonrpc' field.")); + return readLoop(); + } else if (rpcMessage.getJsonrpc() != "2.0") { + queueError(nullptr, -32700, + kj::str("Unknown JSON-RPC version. This peer implements version '2.0'.")); + return readLoop(); + } + + switch (rpcMessage.which()) { + case json::RpcMessage::NONE: + queueError(nullptr, -32700, kj::str("message has none of params, result, or error")); + break; + + case json::RpcMessage::PARAMS: { + // a call + KJ_IF_MAYBE(method, methodMap.find(rpcMessage.getMethod())) { + auto req = interface.newRequest(*method); + KJ_IF_MAYBE(exception, kj::runCatchingExceptions([&]() { + codec.decode(rpcMessage.getParams(), req); + })) { + kj::Maybe id; + if (rpcMessage.hasId()) id = rpcMessage.getId(); + queueError(id, -32602, + kj::str("Type error in method params: ", exception->getDescription())); + break; + } + + if (rpcMessage.hasId()) { + auto id = rpcMessage.getId(); + auto idCopy = kj::heapArray(id.totalSize().wordCount + 1); + memset(idCopy.begin(), 0, idCopy.asBytes().size()); + copyToUnchecked(id, idCopy); + auto idPtr = readMessageUnchecked(idCopy.begin()); + + auto promise = req.send() + .then([this,idPtr](Response response) mutable { + MallocMessageBuilder capnpMessage; + auto jsonResponse = capnpMessage.getRoot(); + jsonResponse.setJsonrpc("2.0"); + jsonResponse.setId(idPtr); + codec.encode(DynamicStruct::Reader(response), jsonResponse.initResult()); + return queueWrite(codec.encode(jsonResponse)); + }, [this,idPtr](kj::Exception&& e) { + MallocMessageBuilder capnpMessage; + auto jsonResponse = capnpMessage.getRoot(); + jsonResponse.setJsonrpc("2.0"); + jsonResponse.setId(idPtr); + auto error = jsonResponse.initError(); + switch (e.getType()) { + case kj::Exception::Type::FAILED: + error.setCode(-32000); + break; + case kj::Exception::Type::DISCONNECTED: + error.setCode(-32001); + break; + case kj::Exception::Type::OVERLOADED: + error.setCode(-32002); + break; + case kj::Exception::Type::UNIMPLEMENTED: + error.setCode(-32601); // method not found + break; + } + error.setMessage(e.getDescription()); + return queueWrite(codec.encode(jsonResponse)); + }); + tasks.add(promise.attach(kj::mv(idCopy))); + } else { + // No 'id', so this is a notification. + tasks.add(req.send().ignoreResult().catch_([](kj::Exception&& exception) { + if (exception.getType() != kj::Exception::Type::UNIMPLEMENTED) { + KJ_LOG(ERROR, "JSON-RPC notification threw exception into the abyss", exception); + } + })); + } + } else { + if (rpcMessage.hasId()) { + queueError(rpcMessage.getId(), -32601, "Method not found"); + } else { + // Ignore notification for unknown method. + } + } + break; + } + + case json::RpcMessage::RESULT: { + auto id = rpcMessage.getId(); + if (!id.isNumber()) { + // JSON-RPC doesn't define what to do if receiving a response with an invalid id. + KJ_LOG(ERROR, "JSON-RPC response has invalid ID"); + } else KJ_IF_MAYBE(awaited, awaitedResponses.find((uint)id.getNumber())) { + KJ_IF_MAYBE(exception, kj::runCatchingExceptions([&]() { + codec.decode(rpcMessage.getResult(), awaited->context.getResults()); + awaited->fulfiller->fulfill(); + })) { + // Errors always propagate from callee to caller, so we don't want to throw this error + // back to the server. + awaited->fulfiller->reject(kj::mv(*exception)); + } + } else { + // Probably, this is the response to a call that was canceled. + } + break; + } + + case json::RpcMessage::ERROR: { + auto id = rpcMessage.getId(); + if (id.isNull()) { + // Error message will be logged by KJ_CONTEXT, above. + KJ_LOG(ERROR, "peer reports JSON-RPC protocol error"); + } else if (!id.isNumber()) { + // JSON-RPC doesn't define what to do if receiving a response with an invalid id. + KJ_LOG(ERROR, "JSON-RPC response has invalid ID"); + } else KJ_IF_MAYBE(awaited, awaitedResponses.find((uint)id.getNumber())) { + auto error = rpcMessage.getError(); + auto code = error.getCode(); + kj::Exception::Type type = + code == -32601 ? kj::Exception::Type::UNIMPLEMENTED + : kj::Exception::Type::FAILED; + awaited->fulfiller->reject(kj::Exception( + type, __FILE__, __LINE__, kj::str(error.getMessage()))); + } else { + // Probably, this is the response to a call that was canceled. + } + break; + } + } + + return readLoop(); + }); +} + +void JsonRpc::taskFailed(kj::Exception&& exception) { + errorFulfiller->reject(kj::mv(exception)); +} + +// ======================================================================================= + +JsonRpc::ContentLengthTransport::ContentLengthTransport(kj::AsyncIoStream& stream) + : stream(stream), input(kj::newHttpInputStream(stream, staticHeaderTable())) {} +JsonRpc::ContentLengthTransport::~ContentLengthTransport() noexcept(false) {} + +kj::Promise JsonRpc::ContentLengthTransport::send(kj::StringPtr text) { + auto headers = kj::str("Content-Length: ", text.size(), "\r\n\r\n"); + parts[0] = headers.asBytes(); + parts[1] = text.asBytes(); + return stream.write(parts).attach(kj::mv(headers)); +} + +kj::Promise JsonRpc::ContentLengthTransport::receive() { + return input->readMessage() + .then([](kj::HttpInputStream::Message&& message) { + auto promise = message.body->readAllText(); + return promise.attach(kj::mv(message.body)); + }); +} + +} // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-rpc.capnp b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-rpc.capnp new file mode 100644 index 00000000000..9380788cd74 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-rpc.capnp @@ -0,0 +1,43 @@ +@0xd04299800d6725ba; + +$import "/capnp/c++.capnp".namespace("capnp::json"); + +using Json = import "json.capnp"; + +struct RpcMessage { + jsonrpc @0 :Text; + # Must always be "2.0". + + id @1 :Json.Value; + # Correlates a request to a response. Technically must be a string or number. Our implementation + # will always use a number for calls it initiates, and will reflect IDs of any type for calls + # it receives. + # + # May be omitted when caller doesn't care about the response. The implementation will omit `id` + # and return immediately when calling methods with the annotation `@notification` (defined in + # `json.capnp`). The `@notification` annotation only matters for outgoing calls; for incoming + # calls, it's the client's decision whether it wants to receive the response. + + method @2 :Text; + # Method name. Only expected when `params` is sent. + + union { + none @3 :Void $Json.name("!missing params, result, or error"); + # Dummy default value of union, to detect when none of the fields below were received. + + params @4 :Json.Value; + # Initiates a call. + + result @5 :Json.Value; + # Completes a call. + + error @6 :Error; + # Completes a call throwing an exception. + } + + struct Error { + code @0 :Int32; + message @1 :Text; + data @2 :Json.Value; + } +} diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-rpc.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-rpc.h new file mode 100644 index 00000000000..6954c644ede --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-rpc.h @@ -0,0 +1,112 @@ +// Copyright (c) 2018 Kenton Varda and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#pragma once + +#include "json.h" +#include +#include +#include + +namespace kj { class HttpInputStream; } + +namespace capnp { + +class JsonRpc: private kj::TaskSet::ErrorHandler { + // An implementation of JSON-RPC 2.0: https://www.jsonrpc.org/specification + // + // This allows you to use Cap'n Proto interface declarations to implement JSON-RPC protocols. + // Of course, JSON-RPC does not support capabilities. So, the client and server each expose + // exactly one object to the other. + +public: + class Transport; + class ContentLengthTransport; + + JsonRpc(Transport& transport, DynamicCapability::Client interface = {}); + KJ_DISALLOW_COPY(JsonRpc); + + DynamicCapability::Client getPeer(InterfaceSchema schema); + + template + typename T::Client getPeer() { + return getPeer(Schema::from()).template castAs(); + } + + kj::Promise onError() { return errorPromise.addBranch(); } + +private: + JsonCodec codec; + Transport& transport; + DynamicCapability::Client interface; + kj::HashMap methodMap; + uint callCount = 0; + kj::Promise writeQueue = kj::READY_NOW; + kj::ForkedPromise errorPromise; + kj::Own> errorFulfiller; + kj::Promise readTask; + + struct AwaitedResponse { + CallContext context; + kj::Own> fulfiller; + }; + kj::HashMap awaitedResponses; + + kj::TaskSet tasks; + + class CapabilityImpl; + + kj::Promise queueWrite(kj::String text); + void queueError(kj::Maybe id, int code, kj::StringPtr message); + + kj::Promise readLoop(); + + void taskFailed(kj::Exception&& exception) override; + + JsonRpc(Transport& transport, DynamicCapability::Client interface, + kj::PromiseFulfillerPair paf); +}; + +class JsonRpc::Transport { +public: + virtual kj::Promise send(kj::StringPtr text) = 0; + virtual kj::Promise receive() = 0; +}; + +class JsonRpc::ContentLengthTransport: public Transport { + // The transport used by Visual Studio Code: Each message is composed like an HTTP message + // without the first line. That is, a list of headers, followed by a blank line, followed by the + // content whose length is determined by the content-length header. +public: + explicit ContentLengthTransport(kj::AsyncIoStream& stream); + ~ContentLengthTransport() noexcept(false); + KJ_DISALLOW_COPY(ContentLengthTransport); + + kj::Promise send(kj::StringPtr text) override; + kj::Promise receive() override; + +private: + kj::AsyncIoStream& stream; + kj::Own input; + kj::ArrayPtr parts[2]; +}; + +} // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-test.c++ index c7e24901260..7a632ebab83 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-test.c++ @@ -647,6 +647,23 @@ KJ_TEST("maximum nesting depth") { } } +KJ_TEST("unknown fields") { + JsonCodec json; + MallocMessageBuilder message; + auto root = message.initRoot(); + auto valid = R"({"foo": "a"})"_kj; + auto unknown = R"({"foo": "a", "unknown-field": "b"})"_kj; + json.decode(valid, root); + json.decode(unknown, root); + json.setRejectUnknownFields(true); + json.decode(valid, root); + KJ_EXPECT_THROW_MESSAGE("Unknown field", json.decode(unknown, root)); + + // Verify unknown field rejection still works when handling by annotation. + json.handleByAnnotation(); + KJ_EXPECT_THROW_MESSAGE("Unknown field", json.decode(unknown, root)); +} + class TestCallHandler: public JsonCodec::Handler { public: void encode(const JsonCodec& codec, Text::Reader input, @@ -975,6 +992,21 @@ KJ_TEST("rename fields") { } } +KJ_TEST("base64 union encoded correctly") { + // At one point field handlers were not correctly applied when the field was a member of a union + // in a type that was handled by annotation. + + JsonCodec json; + json.handleByAnnotation(); + json.setPrettyPrint(true); + + MallocMessageBuilder message; + auto root = message.getRoot(); + root.initFoo(5); + + KJ_EXPECT(json.encode(root) == "{\"foo\": \"AAAAAAA=\"}", json.encode(root)); +} + } // namespace } // namespace _ (private) } // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-test.capnp b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-test.capnp index 1446f8a6b22..4406fd24b5f 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-test.capnp +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json-test.capnp @@ -114,3 +114,10 @@ enum TestJsonAnnotatedEnum { baz @2 $Json.name("renamed-baz"); qux @3; } + +struct TestBase64Union { + union { + foo @0 :Data $Json.base64; + bar @1 :Text; + } +} diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json.c++ index e30ade44c94..dd8b07b0e12 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json.c++ @@ -20,9 +20,6 @@ // THE SOFTWARE. #include "json.h" -#include // for HUGEVAL to check for overflow in strtod -#include // strtod -#include // for strtod errors #include #include #include @@ -37,6 +34,7 @@ struct JsonCodec::Impl { bool prettyPrint = false; HasMode hasMode = HasMode::NON_NULL; size_t maxNestingDepth = 64; + bool rejectUnknownFields = false; kj::HashMap typeHandlers; kj::HashMap fieldHandlers; @@ -109,7 +107,6 @@ struct JsonCodec::Impl { switch (c) { case '\"': escaped.addAll(kj::StringPtr("\\\"")); break; case '\\': escaped.addAll(kj::StringPtr("\\\\")); break; - case '/' : escaped.addAll(kj::StringPtr("\\/" )); break; case '\b': escaped.addAll(kj::StringPtr("\\b")); break; case '\f': escaped.addAll(kj::StringPtr("\\f")); break; case '\n': escaped.addAll(kj::StringPtr("\\n")); break; @@ -186,6 +183,8 @@ void JsonCodec::setMaxNestingDepth(size_t maxNestingDepth) { void JsonCodec::setHasMode(HasMode mode) { impl->hasMode = mode; } +void JsonCodec::setRejectUnknownFields(bool enabled) { impl->rejectUnknownFields = enabled; } + kj::String JsonCodec::encode(DynamicValue::Reader value, Type type) const { MallocMessageBuilder message; auto json = message.getRoot(); @@ -387,7 +386,7 @@ void JsonCodec::decodeObject(JsonValue::Reader input, StructSchema type, Orphana KJ_IF_MAYBE(fieldSchema, type.findFieldByName(field.getName())) { decodeField(*fieldSchema, field.getValue(), orphanage, output); } else { - // Unknown json fields are ignored to allow schema evolution + KJ_REQUIRE(!impl->rejectUnknownFields, "Unknown field", field.getName()); } } } @@ -926,7 +925,7 @@ public: // * Named unions, which are special cases of named groups. In this case, the union may be // annotated by annotating the field. In this case, we receive a non-null `discriminator` // as a constructor parameter, and schemaProto.getAnnotations() must be empty because - // it's not possible to annotate a group's type (becaues the type is anonymous). + // it's not possible to annotate a group's type (because the type is anonymous). // * Unnamed unions, of which there can only be one in any particular scope. In this case, // the parent struct type itself is annotated. // So if we received `null` as the constructor parameter, check for annotations on the struct @@ -1172,7 +1171,7 @@ private: // The parent struct is a flattened union, and some of the union's members are flattened // structs or groups, and this field is possibly a member of one or more of them. `index` // is not used, because it's possible that the same field name appears in multiple variants. - // Intsead, the parser must find the union tag, and then can descend and attempt to parse + // Instead, the parser must find the union tag, and then can descend and attempt to parse // the field in the context of whichever variant is selected. UNION_VALUE @@ -1255,7 +1254,7 @@ private: // When we have an explicit union discriminant, we don't need to encode void fields. } else { flattenedFields.add(FlattenedField { - prefix, info.name, which->getType(), reader.get(*which) }); + prefix, info.name, *which, reader.get(*which) }); } } } @@ -1317,7 +1316,8 @@ private: KJ_UNREACHABLE; } else { - // Ignore undefined field. + // Ignore undefined field -- unless the flag is set to reject them. + KJ_REQUIRE(!codec.impl->rejectUnknownFields, "Unknown field", name); return true; } } @@ -1384,7 +1384,7 @@ class JsonCodec::JsonValueHandler final: public JsonCodec::Handler b_a0a054dea32fd98c = { + { 0, 0, 0, 0, 5, 0, 6, 0, + 140, 217, 47, 163, 222, 84, 160, 160, + 24, 0, 0, 0, 5, 0, 0, 2, + 52, 94, 58, 164, 151, 146, 249, 142, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 21, 0, 0, 0, 42, 1, 0, 0, + 37, 0, 0, 0, 7, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 32, 0, 0, 0, 3, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 99, 97, 112, 110, 112, 47, 99, 111, + 109, 112, 97, 116, 47, 106, 115, 111, + 110, 46, 99, 97, 112, 110, 112, 58, + 110, 111, 116, 105, 102, 105, 99, 97, + 116, 105, 111, 110, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, } +}; +::capnp::word const* const bp_a0a054dea32fd98c = b_a0a054dea32fd98c.words; +#if !CAPNP_LITE +const ::capnp::_::RawSchema s_a0a054dea32fd98c = { + 0xa0a054dea32fd98c, b_a0a054dea32fd98c.words, 22, nullptr, nullptr, + 0, 0, nullptr, nullptr, nullptr, { &s_a0a054dea32fd98c, nullptr, nullptr, 0, 0, nullptr } +}; +#endif // !CAPNP_LITE } // namespace schemas } // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json.capnp.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json.capnp.h index 1c397d26227..1454e3e6494 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json.capnp.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json.capnp.h @@ -9,11 +9,13 @@ #include #endif // !CAPNP_LITE -#if CAPNP_VERSION != 7000 +#if CAPNP_VERSION != 9001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif +CAPNP_BEGIN_HEADER + namespace capnp { namespace schemas { @@ -27,6 +29,7 @@ CAPNP_DECLARE_SCHEMA(cfa794e8d19a0162); CAPNP_DECLARE_SCHEMA(c2f8c20c293e5319); CAPNP_DECLARE_SCHEMA(d7d879450a253e4b); CAPNP_DECLARE_SCHEMA(f061e22f0ae5c7b5); +CAPNP_DECLARE_SCHEMA(a0a054dea32fd98c); } // namespace schemas } // namespace capnp @@ -1172,3 +1175,5 @@ inline ::capnp::Orphan< ::capnp::Text> DiscriminatorOptions::Builder::disownValu } // namespace } // namespace +CAPNP_END_HEADER + diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json.h index dd0c7465bc4..f5dbf38b423 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/json.h @@ -60,9 +60,7 @@ class JsonCodec { // different ways people do this. // - Encoding/decoding capabilities and AnyPointers requires registering a Handler, since there's // no obvious default behavior. - // - When decoding, unrecognized field names are ignored. Note: This means that JSON is NOT a - // good format for receiving input from a human. Consider `capnp eval` or the SchemaParser - // library for human input. + // - When decoding, fields with unknown names are ignored by default to allow schema evolution. public: JsonCodec(); @@ -85,6 +83,11 @@ class JsonCodec { // setHasMode(HasMode::NON_DEFAULT) to specify that default-valued primitive fields should be // omitted as well. + void setRejectUnknownFields(bool enable); + // Choose whether decoding JSON with unknown fields should produce an error. You may trade + // allowing schema evolution against a guarantee that all data is preserved when decoding JSON + // by toggling this option. The default is to ignore unknown fields. + template kj::String encode(T&& value) const; // Encode any Cap'n Proto value to JSON, including primitives and @@ -303,6 +306,12 @@ void JsonCodec::encode(T&& value, JsonValue::Builder output) const { encode(DynamicValue::Reader(ReaderFor(kj::fwd(value))), Type::from(), output); } +template <> +inline void JsonCodec::encode( + DynamicStruct::Reader&& value, JsonValue::Builder output) const { + encode(DynamicValue::Reader(value), value.getSchema(), output); +} + template inline Orphan JsonCodec::decode(JsonValue::Reader input, Orphanage orphanage) const { return decode(input, Type::from(), orphanage).template releaseAs(); diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/std-iterator.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/std-iterator.h new file mode 100644 index 00000000000..1e6d7b947a0 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/std-iterator.h @@ -0,0 +1,39 @@ +// Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#pragma once + +// This exposes IndexingIterator as something compatible with std::iterator so that things like +// std::copy work with List::begin/List::end. + +// Make sure that if this header is before list.h by the user it includes it to make +// IndexingIterator visible to avoid brittle header problems. +#include "../list.h" +#include + +namespace std { + +template +struct iterator_traits> + : public std::iterator {}; + +} // namespace std + diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/websocket-rpc-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/websocket-rpc-test.c++ new file mode 100644 index 00000000000..359573a8dc6 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/websocket-rpc-test.c++ @@ -0,0 +1,113 @@ +// Copyright (c) 2021 Ian Denhardt and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "websocket-rpc.h" +#include + +#include + +KJ_TEST("WebSocketMessageStream") { + kj::EventLoop loop; + kj::WaitScope waitScope(loop); + + auto pipe = kj::newWebSocketPipe(); + + auto msgStreamA = capnp::WebSocketMessageStream(*pipe.ends[0]); + auto msgStreamB = capnp::WebSocketMessageStream(*pipe.ends[1]); + + // Make a message, fill it with some stuff + capnp::MallocMessageBuilder originalMsg; + auto object = originalMsg.initRoot().initStructList(10); + object[0].setTextField("Test"); + object[1].initStructField().setTextField("A string"); + object[2].setTextField("Another field"); + object[3].setInt64Field(42); + auto originalSegments = originalMsg.getSegmentsForOutput(); + + // Send the message across the websocket, make sure it comes out unharmed. + auto writePromise = msgStreamA.writeMessage(nullptr, originalSegments); + msgStreamB.tryReadMessage(nullptr) + .then([&](auto maybeResult) -> kj::Promise { + KJ_IF_MAYBE(result, maybeResult) { + KJ_ASSERT(result->fds.size() == 0); + KJ_ASSERT(result->reader->getSegment(originalSegments.size()) == nullptr); + for(size_t i = 0; i < originalSegments.size(); i++) { + auto oldSegment = originalSegments[i]; + auto newSegment = result->reader->getSegment(i); + + KJ_ASSERT(oldSegment.size() == newSegment.size()); + KJ_ASSERT(memcmp( + &oldSegment[0], + &newSegment[0], + oldSegment.size() * sizeof(capnp::word) + ) == 0); + } + return kj::READY_NOW; + } else { + KJ_FAIL_ASSERT("Reading first message failed"); + } + }).wait(waitScope); + writePromise.wait(waitScope); + + // Close the websocket, and make sure the other end gets nullptr when reading. + auto endPromise = msgStreamA.end(); + msgStreamB.tryReadMessage(nullptr).then([](auto maybe) -> kj::Promise { + KJ_IF_MAYBE(segments, maybe) { + KJ_FAIL_ASSERT("Should have gotten nullptr after websocket was closed"); + } + return kj::READY_NOW; + }).wait(waitScope); + endPromise.wait(waitScope); +} + +KJ_TEST("WebSocketMessageStreamByteCount") { + kj::EventLoop loop; + kj::WaitScope waitScope(loop); + + auto pipe1 = kj::newWebSocketPipe(); + auto pipe2 = kj::newWebSocketPipe(); + + auto msgStreamA = capnp::WebSocketMessageStream(*pipe1.ends[0]); + auto msgStreamB = capnp::WebSocketMessageStream(*pipe2.ends[1]); + + auto pumpTask = pipe1.ends[1]->pumpTo(*pipe2.ends[0]); + + capnp::MallocMessageBuilder originalMsg; + auto object = originalMsg.initRoot().initStructList(10); + object[0].setTextField("Test"); + object[1].initStructField().setTextField("A string"); + object[2].setTextField("Another field"); + object[3].setInt64Field(42); + auto originalSegments = originalMsg.getSegmentsForOutput(); + + auto writePromise = msgStreamA.writeMessage(nullptr, originalSegments); + msgStreamB.tryReadMessage(nullptr).wait(waitScope); + writePromise.wait(waitScope); + + auto endPromise = msgStreamA.end(); + msgStreamB.tryReadMessage(nullptr).wait(waitScope); + pumpTask.wait(waitScope); + endPromise.wait(waitScope); + KJ_EXPECT(pipe1.ends[0]->sentByteCount() == 2585); + KJ_EXPECT(pipe1.ends[1]->receivedByteCount() == 2585); + KJ_EXPECT(pipe2.ends[0]->sentByteCount() == 2585); + KJ_EXPECT(pipe2.ends[1]->receivedByteCount() == 2585); +} diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/websocket-rpc.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/websocket-rpc.c++ new file mode 100644 index 00000000000..1db2ebc02ca --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/websocket-rpc.c++ @@ -0,0 +1,128 @@ +// Copyright (c) 2021 Ian Denhardt and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include +#include +#include + +namespace capnp { + +WebSocketMessageStream::WebSocketMessageStream(kj::WebSocket& socket) + : socket(socket) + {}; + +kj::Promise> WebSocketMessageStream::tryReadMessage( + kj::ArrayPtr fdSpace, + ReaderOptions options, kj::ArrayPtr scratchSpace) { + return socket.receive(options.traversalLimitInWords * sizeof(word)) + .then([options](auto msg) -> kj::Promise> { + KJ_SWITCH_ONEOF(msg) { + KJ_CASE_ONEOF(closeMsg, kj::WebSocket::Close) { + return kj::Maybe(); + } + KJ_CASE_ONEOF(str, kj::String) { + KJ_FAIL_REQUIRE( + "Unexpected websocket text message; expected only binary messages."); + break; + } + KJ_CASE_ONEOF(bytes, kj::Array) { + kj::Own reader; + size_t sizeInWords = bytes.size() / sizeof(word); + if (reinterpret_cast(bytes.begin()) % alignof(word) == 0) { + reader = kj::heap( + kj::arrayPtr( + reinterpret_cast(bytes.begin()), + sizeInWords + ), + options).attach(kj::mv(bytes)); + } else { + // The array is misaligned, so we need to copy it. + auto words = kj::heapArray(sizeInWords); + + // Note: can't just use bytes.size(), since the the target buffer may + // be shorter due to integer division. + memcpy(words.begin(), bytes.begin(), sizeInWords * sizeof(word)); + reader = kj::heap( + kj::arrayPtr(words.begin(), sizeInWords), + options).attach(kj::mv(words)); + } + return kj::Maybe(MessageReaderAndFds { + kj::mv(reader), + nullptr + }); + } + } + KJ_UNREACHABLE; + }); +} + +kj::Promise WebSocketMessageStream::writeMessage( + kj::ArrayPtr fds, + kj::ArrayPtr> segments) { + // TODO(perf): Right now the WebSocket interface only supports send() for + // contiguous arrays, so we need to copy the whole message into a new buffer + // in order to send it, whereas ideally we could just write each segment + // (and the segment table) in sequence. Perhaps we should extend the WebSocket + // interface to be able to send an ArrayPtr> as one binary + // message, and then use that to avoid an extra copy here. + + auto stream = kj::heap( + computeSerializedSizeInWords(segments) * sizeof(word)); + capnp::writeMessage(*stream, segments); + auto arrayPtr = stream->getArray(); + return socket.send(arrayPtr).attach(kj::mv(stream)); +} + +kj::Promise WebSocketMessageStream::writeMessages( + kj::ArrayPtr>> messages) { + // TODO(perf): Extend WebSocket interface with a way to write multiple messages at once. + + if(messages.size() == 0) { + return kj::READY_NOW; + } + return writeMessage(nullptr, messages[0]) + .then([this, messages = messages.slice(1, messages.size())]() mutable -> kj::Promise { + return writeMessages(messages); + }); +} + +kj::Maybe WebSocketMessageStream::getSendBufferSize() { + return nullptr; +} + +kj::Promise WebSocketMessageStream::end() { + return socket.close( + 1005, // most generic code, indicates "No Status Received." + // Since the MessageStream API doesn't tell us why + // we're closing the connection, this is the best + // we can do. This is consistent with what browser + // implementations do if no status is provided, see: + // + // * https://developer.mozilla.org/en-US/docs/Web/API/WebSocket/close + // * https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent + + "Capnp connection closed" // Similarly not much information to go on here, + // but this at least lets us trace this back to + // capnp. + ); +}; + +}; diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compat/websocket-rpc.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/websocket-rpc.h new file mode 100644 index 00000000000..a94b27adc24 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compat/websocket-rpc.h @@ -0,0 +1,53 @@ +// Copyright (c) 2021 Ian Denhardt and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#pragma once + +#include +#include + +namespace capnp { + +class WebSocketMessageStream final : public MessageStream { + // An implementation of MessageStream that sends messages over a websocket. + // + // Each capnproto message is sent in a single binary websocket frame. +public: + WebSocketMessageStream(kj::WebSocket& socket); + + // Implements MessageStream + kj::Promise> tryReadMessage( + kj::ArrayPtr fdSpace, + ReaderOptions options = ReaderOptions(), kj::ArrayPtr scratchSpace = nullptr) override; + kj::Promise writeMessage( + kj::ArrayPtr fds, + kj::ArrayPtr> segments) override + KJ_WARN_UNUSED_RESULT; + kj::Promise writeMessages( + kj::ArrayPtr>> messages) override + KJ_WARN_UNUSED_RESULT; + kj::Maybe getSendBufferSize() override; + kj::Promise end() override; +private: + kj::WebSocket& socket; +}; + +} // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnp-test.ekam-rule b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnp-test.ekam-rule index 254401f2ed1..9f3c04e41d2 100755 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnp-test.ekam-rule +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnp-test.ekam-rule @@ -42,6 +42,8 @@ echo findProvider file:capnp/testdata/pretty.txt; read JUNK echo findProvider file:capnp/testdata/short.txt; read JUNK echo findProvider file:capnp/testdata/errors.capnp.nobuild; read JUNK echo findProvider file:capnp/testdata/errors.txt; read JUNK +echo findProvider file:capnp/testdata/errors2.capnp.nobuild; read JUNK +echo findProvider file:capnp/testdata/errors2.txt; read JUNK # Register our interest in the schema files. echo findProvider file:capnp/c++.capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnp-test.sh b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnp-test.sh index bb8ae101975..b35e5072e3e 100755 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnp-test.sh +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnp-test.sh @@ -76,8 +76,13 @@ $CAPNP convert binary:json --short $SCHEMA TestAllTypes < $TESTDATA/binary | cmp $CAPNP convert json:binary $SCHEMA TestAllTypes < $TESTDATA/pretty.json | cmp $TESTDATA/binary - || fail json to binary $CAPNP convert json:binary $SCHEMA TestAllTypes < $TESTDATA/short.json | cmp $TESTDATA/binary - || fail short json to binary -$CAPNP convert json:binary $JSON_SCHEMA TestJsonAnnotations -I"$SRCDIR" < $TESTDATA/annotated.json | cmp $TESTDATA/annotated-json.binary || fail annotated json to binary -$CAPNP convert binary:json $JSON_SCHEMA TestJsonAnnotations -I"$SRCDIR" < $TESTDATA/annotated-json.binary | cmp $TESTDATA/annotated.json || fail annotated json to binary +$CAPNP convert json:binary $JSON_SCHEMA TestJsonAnnotations -I"$SRCDIR" < $TESTDATA/annotated.json | cmp $TESTDATA/annotated-json.binary - || fail annotated json to binary +$CAPNP convert binary:json $JSON_SCHEMA TestJsonAnnotations -I"$SRCDIR" < $TESTDATA/annotated-json.binary | cmp $TESTDATA/annotated.json - || fail annotated binary to json + +[ "$(echo '(foo = (text = "abc"))' | $CAPNP convert text:text "$SRCDIR/capnp/test.capnp" BrandedAlias)" = '(foo = (text = "abc"), uv = void)' ] || fail branded alias +[ "$(echo '(foo = (text = "abc"))' | $CAPNP convert text:text "$SRCDIR/capnp/test.capnp" BrandedAlias.Inner)" = '(foo = (text = "abc"))' ] || fail branded alias +[ "$(echo '(foo = (text = "abc"))' | $CAPNP convert text:text "$SRCDIR/capnp/test.capnp" 'TestGenerics(BoxedText, Text)')" = '(foo = (text = "abc"), uv = void)' ] || fail branded alias +[ "$(echo '(baz = (text = "abc"))' | $CAPNP convert text:text "$SRCDIR/capnp/test.capnp" 'TestGenerics(TestAllTypes, List(Int32)).Inner2(BoxedText)')" = '(baz = (text = "abc"))' ] || fail branded alias # ======================================================================================== # DEPRECATED encode/decode @@ -114,5 +119,8 @@ test_eval 'TestListDefaults.lists.int32ListList[2][0]' 12341234 test "x`$CAPNP eval $SCHEMA -ojson globalPrintableStruct | tr -d '\r'`" = "x{\"someText\": \"foo\"}" || fail eval json "globalPrintableStruct == {someText = \"foo\"}" -$CAPNP compile --src-prefix="$PREFIX" -ofoo $TESTDATA/errors.capnp.nobuild 2>&1 | sed -e "s,^.*errors[.]capnp[.]nobuild:,file:,g" | tr -d '\r' | - cmp $TESTDATA/errors.txt - || fail error output +$CAPNP compile --no-standard-import --src-prefix="$PREFIX" -ofoo $TESTDATA/errors.capnp.nobuild 2>&1 | sed -e "s,^.*errors[.]capnp[.]nobuild:,file:,g" | tr -d '\r' | + diff -u $TESTDATA/errors.txt - || fail error output + +$CAPNP compile --no-standard-import --src-prefix="$PREFIX" -ofoo $TESTDATA/errors2.capnp.nobuild 2>&1 | sed -e "s,^.*errors2[.]capnp[.]nobuild:,file:,g" | tr -d '\r' | + diff -u $TESTDATA/errors2.txt - || fail error2 output diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnp.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnp.c++ index e39db5561e6..79569546dac 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnp.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnp.c++ @@ -23,6 +23,10 @@ #define _GNU_SOURCE #endif +#if _WIN32 +#include +#endif + #include "lexer.h" #include "parser.h" #include "compiler.h" @@ -50,10 +54,8 @@ #if _WIN32 #include -#define WIN32_LEAN_AND_MEAN // ::eyeroll:: #include #include -#undef VOID #undef CONST #else #include @@ -78,7 +80,7 @@ public: : context(context), disk(kj::newDiskFilesystem()), loader(*this) {} kj::MainFunc getMain() { - if (context.getProgramName().endsWith("capnpc")) { + if (context.getProgramName().endsWith("capnpc") || context.getProgramName().endsWith("capnpc.exe")) { kj::MainBuilder builder(context, VERSION_STRING, "Compiles Cap'n Proto schema files and generates corresponding source code in one or " "more languages."); @@ -131,7 +133,7 @@ public: annotationFlag = Compiler::DROP_ANNOTATIONS; kj::MainBuilder builder(context, VERSION_STRING, - "Convers messages between formats. Reads a stream of messages from stdin in format " + "Converts messages between formats. Reads a stream of messages from stdin in format " " and writes them to stdout in format . Valid formats are:\n" " binary standard binary format\n" " packed packed binary format (deflates zeroes)\n" @@ -355,9 +357,9 @@ public: auto dirPathPair = interpretSourceFile(file); KJ_IF_MAYBE(module, loader.loadModule(dirPathPair.dir, dirPathPair.path)) { - uint64_t id = compiler->add(*module); - compiler->eagerlyCompile(id, compileEagerness); - sourceFiles.add(SourceFile { id, module->getSourceName(), &*module }); + auto compiled = compiler->add(*module); + compiler->eagerlyCompile(compiled.getId(), compileEagerness); + sourceFiles.add(SourceFile { compiled.getId(), compiled, module->getSourceName(), &*module }); } else { return "no such file"; } @@ -902,7 +904,7 @@ private: state = COMMENT; break; } - // fallthrough + KJ_FALLTHROUGH; case NORMAL: switch (c) { case '#': state = COMMENT; break; @@ -1163,44 +1165,88 @@ public: return true; } - kj::MainBuilder::Validity setRootType(kj::StringPtr type) { + kj::MainBuilder::Validity setRootType(kj::StringPtr input) { KJ_ASSERT(sourceFiles.size() == 1); - KJ_IF_MAYBE(schema, resolveName(sourceFiles[0].id, type)) { - if (schema->getProto().which() != schema::Node::STRUCT) { - return "not a struct type"; + class CliArgumentErrorReporter: public ErrorReporter { + public: + void addError(uint32_t startByte, uint32_t endByte, kj::StringPtr message) override { + if (startByte < endByte) { + error = kj::str(startByte + 1, "-", endByte, ": ", message); + } else if (startByte > 0) { + error = kj::str(startByte + 1, ": ", message); + } else { + error = kj::str(message); + } } - rootType = schema->asStruct(); - return true; - } else { - return "no such type"; - } - } -private: - kj::Maybe resolveName(uint64_t scopeId, kj::StringPtr name) { - while (name.size() > 0) { - kj::String temp; - kj::StringPtr part; - KJ_IF_MAYBE(dotpos, name.findFirst('.')) { - temp = kj::heapString(name.slice(0, *dotpos)); - part = temp; - name = name.slice(*dotpos + 1); - } else { - part = name; - name = nullptr; + bool hadErrors() override { + return error != nullptr; } - KJ_IF_MAYBE(childId, compiler->lookup(scopeId, part)) { - scopeId = *childId; + kj::MainBuilder::Validity getValidity() { + KJ_IF_MAYBE(e, error) { + return kj::mv(*e); + } else { + return true; + } + } + + private: + kj::Maybe error; + }; + + CliArgumentErrorReporter errorReporter; + + capnp::MallocMessageBuilder tokenArena; + auto lexedTokens = tokenArena.initRoot(); + lex(input, lexedTokens, errorReporter); + + CapnpParser parser(tokenArena.getOrphanage(), errorReporter); + auto tokens = lexedTokens.asReader().getTokens(); + CapnpParser::ParserInput parserInput(tokens.begin(), tokens.end()); + + bool success = false; + + if (parserInput.getPosition() == tokens.end()) { + // Empty argument? + errorReporter.addError(0, 0, "Couldn't parse type name."); + } else { + KJ_IF_MAYBE(expression, parser.getParsers().expression(parserInput)) { + // The input is expected to contain a *single* expression. + if (parserInput.getPosition() == tokens.end()) { + // Hooray, now parse it. + KJ_IF_MAYBE(compiledType, + sourceFiles[0].compiled.evalType(expression->getReader(), errorReporter)) { + KJ_IF_MAYBE(type, compiledType->getSchema()) { + if (type->isStruct()) { + rootType = type->asStruct(); + success = true; + } else { + errorReporter.addError(0, 0, "Type is not a struct."); + } + } else { + // Apparently named a file scope. + errorReporter.addError(0, 0, "Type is not a struct."); + } + } + } else { + errorReporter.addErrorOn(parserInput.current(), "Couldn't parse type name."); + } } else { - return nullptr; + auto best = parserInput.getBest(); + if (best == tokens.end()) { + errorReporter.addError(input.size(), input.size(), "Couldn't parse type name."); + } else { + errorReporter.addErrorOn(*best, "Couldn't parse type name."); + } } } - return compiler->getLoader().get(scopeId); + + KJ_ASSERT(success || errorReporter.hadErrors()); + return errorReporter.getValidity(); } -public: kj::MainBuilder::Validity decode() { convertTo = Format::TEXT; convertFrom = formatFromDeprecatedFlags(Format::BINARY); @@ -1241,8 +1287,13 @@ private: return IMPOSSIBLE; } if ((prefix[3] & 0x80) != 0) { - // Offset is negative (invalid). - return IMPOSSIBLE; + if (prefix[0] == 0xff && prefix[1] == 0xff && prefix[2] == 0xff && prefix[3] == 0xff && + prefix[4] == 0 && prefix[5] == 0 && prefix[6] == 0 && prefix[7] == 0) { + // This is an empty struct with offset of -1. That's valid. + } else { + // Offset is negative (invalid). + return IMPOSSIBLE; + } } if ((prefix[3] & 0xe0) != 0) { // Offset is over a gigabyte (implausible). @@ -1377,7 +1428,7 @@ private: } Plausibility isPlausiblyText(kj::ArrayPtr prefix) { - enum { PREAMBLE, COMMENT, BODY } state; + enum { PREAMBLE, COMMENT, BODY } state = PREAMBLE; for (char c: prefix.asChars()) { switch (state) { @@ -1427,7 +1478,7 @@ private: } Plausibility isPlausiblyJson(kj::ArrayPtr prefix) { - enum { PREAMBLE, COMMENT, BODY } state; + enum { PREAMBLE, COMMENT, BODY } state = PREAMBLE; for (char c: prefix.asChars()) { switch (state) { @@ -1821,6 +1872,7 @@ private: struct SourceFile { uint64_t id; + Compiler::ModuleScope compiled; kj::StringPtr name; Module* module; }; @@ -1899,7 +1951,9 @@ private: auto remainder = path.slice(i, path.size()); KJ_IF_MAYBE(sdir, sourceDirectories.find(prefix)) { - return { *sdir->dir, remainder.clone() }; + if (sdir->isSourcePrefix) { + return { *sdir->dir, remainder.clone() }; + } } } diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnpc-c++.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnpc-c++.c++ index 0ab19808611..853375f4267 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnpc-c++.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnpc-c++.c++ @@ -21,6 +21,10 @@ // This program is a code generator plugin for `capnp compile` which generates C++ code. +#if _WIN32 +#include +#endif + #include #include "../serialize.h" #include @@ -37,12 +41,11 @@ #include #include #include +#include #if _WIN32 -#define WIN32_LEAN_AND_MEAN // ::eyeroll:: #include #include -#undef VOID #undef CONST #else #include @@ -301,7 +304,7 @@ kj::String KJ_STRINGIFY(const CppTypeName& typeName) { } } -CppTypeName whichKind(schema::Type::Which which) { +CppTypeName whichKind(Type type) { // Make a CppTypeName representing the capnp::Kind value for the given schema type. This makes // CppTypeName conflate types and values, but this is all just a hack for MSVC's benefit. Its // primary use is as a non-type template parameter to `capnp::List` -- normally the Kind K @@ -309,30 +312,37 @@ CppTypeName whichKind(schema::Type::Which which) { // of `capnp::List` is the return type of a function, and the element type T is a template // instantiation. - switch (which) { - case schema::Type::VOID: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); - - case schema::Type::BOOL: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); - case schema::Type::INT8: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); - case schema::Type::INT16: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); - case schema::Type::INT32: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); - case schema::Type::INT64: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); - case schema::Type::UINT8: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); - case schema::Type::UINT16: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); - case schema::Type::UINT32: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); - case schema::Type::UINT64: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); - case schema::Type::FLOAT32: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); - case schema::Type::FLOAT64: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); - - case schema::Type::TEXT: return CppTypeName::makePrimitive(" ::capnp::Kind::BLOB"); - case schema::Type::DATA: return CppTypeName::makePrimitive(" ::capnp::Kind::BLOB"); - - case schema::Type::ENUM: return CppTypeName::makePrimitive(" ::capnp::Kind::ENUM"); - case schema::Type::STRUCT: return CppTypeName::makePrimitive(" ::capnp::Kind::STRUCT"); - case schema::Type::INTERFACE: return CppTypeName::makePrimitive(" ::capnp::Kind::INTERFACE"); - - case schema::Type::LIST: return CppTypeName::makePrimitive(" ::capnp::Kind::LIST"); - case schema::Type::ANY_POINTER: return CppTypeName::makePrimitive(" ::capnp::Kind::OTHER"); + switch (type.which()) { + case schema::Type::VOID: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); + + case schema::Type::BOOL: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); + case schema::Type::INT8: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); + case schema::Type::INT16: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); + case schema::Type::INT32: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); + case schema::Type::INT64: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); + case schema::Type::UINT8: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); + case schema::Type::UINT16: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); + case schema::Type::UINT32: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); + case schema::Type::UINT64: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); + case schema::Type::FLOAT32: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); + case schema::Type::FLOAT64: return CppTypeName::makePrimitive(" ::capnp::Kind::PRIMITIVE"); + + case schema::Type::TEXT: return CppTypeName::makePrimitive(" ::capnp::Kind::BLOB"); + case schema::Type::DATA: return CppTypeName::makePrimitive(" ::capnp::Kind::BLOB"); + + case schema::Type::ENUM: return CppTypeName::makePrimitive(" ::capnp::Kind::ENUM"); + case schema::Type::STRUCT: return CppTypeName::makePrimitive(" ::capnp::Kind::STRUCT"); + case schema::Type::INTERFACE: return CppTypeName::makePrimitive(" ::capnp::Kind::INTERFACE"); + + case schema::Type::LIST: return CppTypeName::makePrimitive(" ::capnp::Kind::LIST"); + case schema::Type::ANY_POINTER: { + switch (type.whichAnyPointerKind()) { + case schema::Type::AnyPointer::Unconstrained::CAPABILITY: + return CppTypeName::makePrimitive(" ::capnp::Kind::INTERFACE"); + default: + return CppTypeName::makePrimitive(" ::capnp::Kind::OTHER"); + } + } } KJ_UNREACHABLE; @@ -513,7 +523,7 @@ private: auto params = kj::heapArrayBuilder(2); auto list = type.asList(); params.add(typeName(list.getElementType(), method)); - params.add(whichKind(list.whichElementType())); + params.add(whichKind(list.getElementType())); result.addMemberTemplate("List", params.finish()); return result; } @@ -538,9 +548,12 @@ private: return CppTypeName::makePrimitive(" ::capnp::AnyStruct"); case schema::Type::AnyPointer::Unconstrained::LIST: return CppTypeName::makePrimitive(" ::capnp::AnyList"); - case schema::Type::AnyPointer::Unconstrained::CAPABILITY: + case schema::Type::AnyPointer::Unconstrained::CAPABILITY: { hasInterfaces = true; // Probably need to #include . - return CppTypeName::makePrimitive(" ::capnp::Capability"); + auto result = CppTypeName::makePrimitive(" ::capnp::Capability"); + result.setHasInterfaces(); + return result; + } } KJ_UNREACHABLE; } @@ -829,7 +842,14 @@ private: } kj::Maybe makeBrandDepInitializer(Schema type) { - return makeBrandDepInitializer(type, cppFullName(type, nullptr)); + // Be careful not to invoke cppFullName() if it would just be thrown away, as doing so will + // add the type's declaring file to `usedImports`. In particular, this causes `stream.capnp.h` + // to be #included unnecessarily. + if (type.isBranded()) { + return makeBrandDepInitializer(type, cppFullName(type, nullptr)); + } else { + return nullptr; + } } kj::Maybe makeBrandDepInitializer( @@ -1777,7 +1797,7 @@ private: " ::capnp::bounded<", offset, ">() * ::capnp::POINTERS), kj::mv(value));\n" "}\n", COND(type.hasDisambiguatedTemplate(), - "#ifndef _MSC_VER\n" + "#if !defined(_MSC_VER) || defined(__clang__)\n" "// Excluded under MSVC because bugs may make it unable to compile this method.\n"), templateContext.allDecls(), "inline ::capnp::Orphan<", type, "> ", scope, "Builder::disown", titleCase, "() {\n", @@ -1785,7 +1805,7 @@ private: " return ::capnp::_::PointerHelpers<", type, ">::disown(_builder.getPointerField(\n" " ::capnp::bounded<", offset, ">() * ::capnp::POINTERS));\n" "}\n", - COND(type.hasDisambiguatedTemplate(), "#endif // !_MSC_VER\n"), + COND(type.hasDisambiguatedTemplate(), "#endif // !_MSC_VER || __clang__\n"), COND(shouldExcludeInLiteMode, "#endif // !CAPNP_LITE\n"), "\n") }; @@ -2139,6 +2159,8 @@ private: auto paramProto = paramSchema.getProto(); auto resultProto = resultSchema.getProto(); + bool isStreaming = method.isStreaming(); + auto implicitParamsReader = proto.getImplicitParameters(); auto implicitParamsBuilder = kj::heapArrayBuilder(implicitParamsReader.size()); for (auto param: implicitParamsReader) { @@ -2175,7 +2197,10 @@ private: } CppTypeName resultType; CppTypeName genericResultType; - if (resultProto.getScopeId() == 0) { + if (isStreaming) { + // We don't use resultType or genericResultType in this case. We want to avoid computing them + // at all so that we don't end up marking stream.capnp.h in usedImports. + } else if (resultProto.getScopeId() == 0) { resultType = interfaceTypeName; if (implicitParams.size() == 0) { resultType.addMemberType(kj::str(titleCase, "Results")); @@ -2193,7 +2218,7 @@ private: kj::String shortParamType = paramProto.getScopeId() == 0 ? kj::str(titleCase, "Params") : kj::str(genericParamType); - kj::String shortResultType = resultProto.getScopeId() == 0 ? + kj::String shortResultType = resultProto.getScopeId() == 0 || isStreaming ? kj::str(titleCase, "Results") : kj::str(genericResultType); auto interfaceProto = method.getContainingInterface().getProto(); @@ -2215,10 +2240,13 @@ private: templateContext.allDecls(), implicitParamsTemplateDecl, templateContext.isGeneric() ? "CAPNP_AUTO_IF_MSVC(" : "", - "::capnp::Request<", paramType, ", ", resultType, ">", + isStreaming ? kj::strTree("::capnp::StreamingRequest<", paramType, ">") + : kj::strTree("::capnp::Request<", paramType, ", ", resultType, ">"), templateContext.isGeneric() ? ")\n" : "\n", - interfaceName, "::Client::", name, "Request(::kj::Maybe< ::capnp::MessageSize> sizeHint) {\n" - " return newCall<", paramType, ", ", resultType, ">(\n" + interfaceName, "::Client::", name, "Request(::kj::Maybe< ::capnp::MessageSize> sizeHint) {\n", + isStreaming + ? kj::strTree(" return newStreamingCall<", paramType, ">(\n") + : kj::strTree(" return newCall<", paramType, ", ", resultType, ">(\n"), " 0x", interfaceIdHex, "ull, ", methodId, ", sizeHint);\n" "}\n"); @@ -2226,7 +2254,8 @@ private: kj::strTree( implicitParamsTemplateDecl.size() == 0 ? "" : " ", implicitParamsTemplateDecl, templateContext.isGeneric() ? " CAPNP_AUTO_IF_MSVC(" : " ", - "::capnp::Request<", paramType, ", ", resultType, ">", + isStreaming ? kj::strTree("::capnp::StreamingRequest<", paramType, ">") + : kj::strTree("::capnp::Request<", paramType, ", ", resultType, ">"), templateContext.isGeneric() ? ")" : "", " ", name, "Request(\n" " ::kj::Maybe< ::capnp::MessageSize> sizeHint = nullptr);\n"), @@ -2236,8 +2265,11 @@ private: " typedef ", genericParamType, " ", titleCase, "Params;\n"), resultProto.getScopeId() != 0 ? kj::strTree() : kj::strTree( " typedef ", genericResultType, " ", titleCase, "Results;\n"), - " typedef ::capnp::CallContext<", shortParamType, ", ", shortResultType, "> ", - titleCase, "Context;\n" + isStreaming + ? kj::strTree(" typedef ::capnp::StreamingCallContext<", shortParamType, "> ") + : kj::strTree( + " typedef ::capnp::CallContext<", shortParamType, ", ", shortResultType, "> "), + titleCase, "Context;\n" " virtual ::kj::Promise ", identifierName, "(", titleCase, "Context context);\n"), implicitParams.size() == 0 ? kj::strTree() : kj::mv(requestMethodImpl), @@ -2252,9 +2284,29 @@ private: "}\n"), kj::strTree( - " case ", methodId, ":\n" - " return ", identifierName, "(::capnp::Capability::Server::internalGetTypedContext<\n" - " ", genericParamType, ", ", genericResultType, ">(context));\n") + " case ", methodId, ":\n", + isStreaming + ? kj::strTree( + // For streaming calls, we need to add an evalNow() here so that exceptions thrown + // directly from the call can propagate to later calls. If we don't capture the + // exception properly then the caller will never find out that this is a streaming + // call (indicated by the boolean in the return value) so won't know to propagate + // the exception. + " return {\n" + " kj::evalNow([&]() {\n" + " return ", identifierName, "(::capnp::Capability::Server::internalGetTypedStreamingContext<\n" + " ", genericParamType, ">(context));\n" + " }),\n" + " true\n" + " };\n") + : kj::strTree( + // For non-streaming calls we let exceptions just flow through for a little more + // efficiency. + " return {\n" + " ", identifierName, "(::capnp::Capability::Server::internalGetTypedContext<\n" + " ", genericParamType, ", ", genericResultType, ">(context)),\n" + " false\n" + " };\n")) }; } @@ -2403,7 +2455,8 @@ private: "public:\n", " typedef ", name, " Serves;\n" "\n" - " ::kj::Promise dispatchCall(uint64_t interfaceId, uint16_t methodId,\n" + " ::capnp::Capability::Server::DispatchCallResult dispatchCall(\n" + " uint64_t interfaceId, uint16_t methodId,\n" " ::capnp::CallContext< ::capnp::AnyPointer, ::capnp::AnyPointer> context)\n" " override;\n" "\n" @@ -2415,7 +2468,8 @@ private: " .template castAs<", typeName, ">();\n" " }\n" "\n" - " ::kj::Promise dispatchCallInternal(uint16_t methodId,\n" + " ::capnp::Capability::Server::DispatchCallResult dispatchCallInternal(\n" + " uint16_t methodId,\n" " ::capnp::CallContext< ::capnp::AnyPointer, ::capnp::AnyPointer> context);\n" "};\n" "#endif // !CAPNP_LITE\n" @@ -2459,7 +2513,7 @@ private: "#if !CAPNP_LITE\n", KJ_MAP(m, methods) { return kj::mv(m.sourceDefs); }, templateContext.allDecls(), - "::kj::Promise ", fullName, "::Server::dispatchCall(\n" + "::capnp::Capability::Server::DispatchCallResult ", fullName, "::Server::dispatchCall(\n" " uint64_t interfaceId, uint16_t methodId,\n" " ::capnp::CallContext< ::capnp::AnyPointer, ::capnp::AnyPointer> context) {\n" " switch (interfaceId) {\n" @@ -2476,7 +2530,7 @@ private: " }\n" "}\n", templateContext.allDecls(), - "::kj::Promise ", fullName, "::Server::dispatchCallInternal(\n" + "::capnp::Capability::Server::DispatchCallResult ", fullName, "::Server::dispatchCallInternal(\n" " uint16_t methodId,\n" " ::capnp::CallContext< ::capnp::AnyPointer, ::capnp::AnyPointer> context) {\n" " switch (methodId) {\n", @@ -2531,7 +2585,7 @@ private: scope.size() == 0 ? kj::strTree() : kj::strTree( // TODO(msvc): MSVC doesn't like definitions of constexprs, but other compilers and // the standard require them. - "#ifndef _MSC_VER\n" + "#if !defined(_MSC_VER) || defined(__clang__)\n" "constexpr ", typeName_, ' ', scope, upperCase, ";\n" "#endif\n") }; @@ -3001,6 +3055,8 @@ private: } }, "\n" + "CAPNP_BEGIN_HEADER\n" + "\n" "namespace capnp {\n" "namespace schemas {\n" "\n", @@ -3016,7 +3072,10 @@ private: KJ_MAP(n, nodeTexts) { return kj::mv(n.readerBuilderDefs); }, separator, "\n", KJ_MAP(n, nodeTexts) { return kj::mv(n.inlineMethodDefs); }, - KJ_MAP(n, namespaceParts) { return kj::strTree("} // namespace\n"); }, "\n"), + KJ_MAP(n, namespaceParts) { return kj::strTree("} // namespace\n"); }, + "\n" + "CAPNP_END_HEADER\n" + "\n"), kj::strTree( "// Generated by Cap'n Proto compiler, DO NOT EDIT\n" diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnpc-capnp.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnpc-capnp.c++ index 388145d2014..3660c827df5 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnpc-capnp.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/capnpc-capnp.c++ @@ -39,6 +39,7 @@ #include #include #include +#include #if HAVE_CONFIG_H #include "config.h" @@ -493,7 +494,9 @@ private: kj::StringTree genParamList(InterfaceSchema interface, StructSchema schema, schema::Brand::Reader brand, InterfaceSchema::Method method) { - if (schema.getProto().getScopeId() == 0) { + if (schema.getProto().getId() == typeId()) { + return kj::strTree("stream"); + } else if (schema.getProto().getScopeId() == 0) { // A named parameter list. return kj::strTree("(", kj::StringTree( KJ_MAP(field, schema.getFields()) { diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/compiler.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/compiler.c++ index f20ddae7bfa..03eda9fc3c6 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/compiler.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/compiler.c++ @@ -41,18 +41,18 @@ public: Alias(CompiledModule& module, Node& parent, const Expression::Reader& targetName) : module(module), parent(parent), targetName(targetName) {} - kj::Maybe compile(); + kj::Maybe compile(); private: CompiledModule& module; Node& parent; Expression::Reader targetName; - kj::Maybe target; + kj::Maybe target; Orphan brandOrphan; bool initialized = false; }; -class Compiler::Node final: public NodeTranslator::Resolver { +class Compiler::Node final: public Resolver { // Passes through four states: // - Stub: On initial construction, the Node is just a placeholder object. Its ID has been // determined, and it is placed in its parent's member table as well as the compiler's @@ -90,7 +90,7 @@ public: void addError(kj::StringPtr error); // Report an error on this Node. - // implements NodeTranslator::Resolver ----------------------------- + // implements Resolver --------------------------------------------- kj::Maybe resolve(kj::StringPtr name) override; kj::Maybe resolveMember(kj::StringPtr name) override; ResolvedDecl resolveBuiltin(Declaration::Which which) override; @@ -356,7 +356,7 @@ private: // ======================================================================================= -kj::Maybe Compiler::Alias::compile() { +kj::Maybe Compiler::Alias::compile() { if (!initialized) { initialized = true; @@ -520,7 +520,7 @@ kj::Maybe Compiler::Node::getContent(Content::State mi } content.advanceState(Content::EXPANDED); - } // fallthrough + } KJ_FALLTHROUGH; case Content::EXPANDED: { if (minimumState <= Content::EXPANDED) break; @@ -583,19 +583,28 @@ kj::Maybe Compiler::Node::getContent(Content::State mi })); content.advanceState(Content::BOOTSTRAP); - } // fallthrough + } KJ_FALLTHROUGH; case Content::BOOTSTRAP: { if (minimumState <= Content::BOOTSTRAP) break; // Create the final schema. - auto nodeSet = content.translator->finish(); + NodeTranslator::NodeSet nodeSet; + if (content.bootstrapSchema == nullptr) { + // Must have failed in an earlier stage. + KJ_ASSERT(module->getErrorReporter().hadErrors()); + nodeSet = content.translator->getBootstrapNode(); + } else { + nodeSet = content.translator->finish( + module->getCompiler().getWorkspace().bootstrapLoader.getUnbound(id)); + } + content.finalSchema = nodeSet.node; content.auxSchemas = kj::mv(nodeSet.auxNodes); content.sourceInfo = kj::mv(nodeSet.sourceInfo); content.advanceState(Content::FINISHED); - } // fallthrough + } KJ_FALLTHROUGH; case Content::FINISHED: break; @@ -853,7 +862,7 @@ void Compiler::Node::addError(kj::StringPtr error) { module->getErrorReporter().addError(startByte, endByte, error); } -kj::Maybe +kj::Maybe Compiler::Node::resolve(kj::StringPtr name) { // Check members. KJ_IF_MAYBE(member, resolveMember(name)) { @@ -883,7 +892,7 @@ Compiler::Node::resolve(kj::StringPtr name) { } } -kj::Maybe +kj::Maybe Compiler::Node::resolveMember(kj::StringPtr name) { if (isBuiltin) return nullptr; @@ -908,25 +917,25 @@ Compiler::Node::resolveMember(kj::StringPtr name) { return nullptr; } -NodeTranslator::Resolver::ResolvedDecl Compiler::Node::resolveBuiltin(Declaration::Which which) { +Resolver::ResolvedDecl Compiler::Node::resolveBuiltin(Declaration::Which which) { auto& b = module->getCompiler().getBuiltin(which); return { b.id, b.genericParamCount, 0, b.kind, &b, nullptr }; } -NodeTranslator::Resolver::ResolvedDecl Compiler::Node::resolveId(uint64_t id) { +Resolver::ResolvedDecl Compiler::Node::resolveId(uint64_t id) { auto& n = KJ_ASSERT_NONNULL(module->getCompiler().findNode(id)); uint64_t parentId = n.parent.map([](Node& n) { return n.id; }).orDefault(0); return { n.id, n.genericParamCount, parentId, n.kind, &n, nullptr }; } -kj::Maybe Compiler::Node::getParent() { +kj::Maybe Compiler::Node::getParent() { return parent.map([](Node& parent) { uint64_t scopeId = parent.parent.map([](Node& gp) { return gp.id; }).orDefault(0); return ResolvedDecl { parent.id, parent.genericParamCount, scopeId, parent.kind, &parent, nullptr }; }); } -NodeTranslator::Resolver::ResolvedDecl Compiler::Node::getTopScope() { +Resolver::ResolvedDecl Compiler::Node::getTopScope() { Node& node = module->getRootNode(); return ResolvedDecl { node.id, 0, 0, node.kind, &node, nullptr }; } @@ -954,7 +963,7 @@ kj::Maybe Compiler::Node::resolveFinalSchema(uint64_t id) } } -kj::Maybe +kj::Maybe Compiler::Node::resolveImport(kj::StringPtr name) { KJ_IF_MAYBE(m, module->importRelative(name)) { Node& root = m->getRootNode(); @@ -1048,6 +1057,25 @@ static void findImports(Expression::Reader exp, std::set& output) } } +static void findImports(Declaration::ParamList::Reader paramList, std::set& output) { + switch (paramList.which()) { + case Declaration::ParamList::NAMED_LIST: + for (auto param: paramList.getNamedList()) { + findImports(param.getType(), output); + for (auto ann: param.getAnnotations()) { + findImports(ann.getName(), output); + } + } + break; + case Declaration::ParamList::TYPE: + findImports(paramList.getType(), output); + break; + case Declaration::ParamList::STREAM: + output.insert("/capnp/stream.capnp"); + break; + } +} + static void findImports(Declaration::Reader decl, std::set& output) { switch (decl.which()) { case Declaration::USING: @@ -1067,30 +1095,9 @@ static void findImports(Declaration::Reader decl, std::set& outpu case Declaration::METHOD: { auto method = decl.getMethod(); - auto params = method.getParams(); - if (params.isNamedList()) { - for (auto param: params.getNamedList()) { - findImports(param.getType(), output); - for (auto ann: param.getAnnotations()) { - findImports(ann.getName(), output); - } - } - } else { - findImports(params.getType(), output); - } - + findImports(method.getParams(), output); if (method.getResults().isExplicit()) { - auto results = method.getResults().getExplicit(); - if (results.isNamedList()) { - for (auto param: results.getNamedList()) { - findImports(param.getType(), output); - for (auto ann: param.getAnnotations()) { - findImports(ann.getName(), output); - } - } - } else { - findImports(results.getType(), output); - } + findImports(method.getResults().getExplicit(), output); } break; } @@ -1227,16 +1234,12 @@ Compiler::Node& Compiler::Impl::getBuiltin(Declaration::Which which) { return *iter->second; } -uint64_t Compiler::Impl::add(Module& module) { - return addInternal(module).getRootNode().getId(); -} - kj::Maybe Compiler::Impl::lookup(uint64_t parent, kj::StringPtr childName) { // Looking up members does not use the workspace, so we don't need to lock it. KJ_IF_MAYBE(parentNode, findNode(parent)) { KJ_IF_MAYBE(child, parentNode->resolveMember(childName)) { - if (child->is()) { - return child->get().id; + if (child->is()) { + return child->get().id; } else { // An alias. We don't support looking up aliases with this method. return nullptr; @@ -1319,8 +1322,9 @@ Compiler::Compiler(AnnotationFlag annotationFlag) loader(*this) {} Compiler::~Compiler() noexcept(false) {} -uint64_t Compiler::add(Module& module) const { - return impl.lockExclusive()->get()->add(module); +Compiler::ModuleScope Compiler::add(Module& module) const { + Node& root = impl.lockExclusive()->get()->addInternal(module).getRootNode(); + return ModuleScope(*this, root.getId(), root); } kj::Maybe Compiler::lookup(uint64_t parent, kj::StringPtr childName) const { @@ -1352,5 +1356,116 @@ void Compiler::load(const SchemaLoader& loader, uint64_t id) const { impl.lockExclusive()->get()->loadFinal(loader, id); } +// ----------------------------------------------------------------------------- + +class Compiler::ErrorIgnorer: public ErrorReporter { +public: + void addError(uint32_t startByte, uint32_t endByte, kj::StringPtr message) override {} + bool hadErrors() override { return false; } + + static ErrorIgnorer instance; +}; +Compiler::ErrorIgnorer Compiler::ErrorIgnorer::instance; + +kj::Maybe Compiler::CompiledType::getSchema() { + capnp::word scratch[32]; + memset(&scratch, 0, sizeof(scratch)); + capnp::MallocMessageBuilder message(scratch); + auto builder = message.getRoot(); + + { + auto lock = compiler.impl.lockShared(); + decl.get(lock).compileAsType(ErrorIgnorer::instance, builder); + } + + // No need to pass `scope` as second parameter since CompiledType always represents a type + // expression evaluated free-standing, not in any scope. + return compiler.loader.getType(builder.asReader()); +} + +Compiler::CompiledType Compiler::CompiledType::clone() { + kj::ExternalMutexGuarded newDecl; + { + auto lock = compiler.impl.lockExclusive(); + newDecl.set(lock, kj::cp(decl.get(lock))); + } + return CompiledType(compiler, kj::mv(newDecl)); +} + +kj::Maybe Compiler::CompiledType::getMember(kj::StringPtr name) { + kj::ExternalMutexGuarded newDecl; + bool found = false; + + { + auto lock = compiler.impl.lockShared(); + KJ_IF_MAYBE(member, decl.get(lock).getMember(name, {})) { + newDecl.set(lock, kj::mv(*member)); + found = true; + } + } + + if (found) { + return CompiledType(compiler, kj::mv(newDecl)); + } else { + return nullptr; + } +} + +kj::Maybe Compiler::CompiledType::applyBrand( + kj::Array arguments) { + kj::ExternalMutexGuarded newDecl; + bool found = false; + + { + auto lock = compiler.impl.lockShared(); + auto args = KJ_MAP(arg, arguments) { return kj::mv(arg.decl.get(lock)); }; + KJ_IF_MAYBE(member, decl.get(lock).applyParams(kj::mv(args), {})) { + newDecl.set(lock, kj::mv(*member)); + found = true; + } + } + + if (found) { + return CompiledType(compiler, kj::mv(newDecl)); + } else { + return nullptr; + } +} + +Compiler::CompiledType Compiler::ModuleScope::getRoot() { + kj::ExternalMutexGuarded newDecl; + + { + auto lock = compiler.impl.lockExclusive(); + auto brandScope = kj::refcounted(ErrorIgnorer::instance, node.getId(), 0, node); + Resolver::ResolvedDecl decl { node.getId(), 0, 0, node.getKind(), &node, nullptr }; + newDecl.set(lock, BrandedDecl(kj::mv(decl), kj::mv(brandScope), {})); + } + + return CompiledType(compiler, kj::mv(newDecl)); +} + +kj::Maybe Compiler::ModuleScope::evalType( + Expression::Reader expression, ErrorReporter& errorReporter) { + kj::ExternalMutexGuarded newDecl; + bool found = false; + + { + auto lock = compiler.impl.lockExclusive(); + auto brandScope = kj::refcounted(errorReporter, node.getId(), 0, node); + KJ_IF_MAYBE(result, brandScope->compileDeclExpression( + expression, node, ImplicitParams::none())) { + newDecl.set(lock, kj::mv(*result)); + found = true; + }; + } + + if (found) { + return CompiledType(compiler, kj::mv(newDecl)); + } else { + return nullptr; + } +} + } // namespace compiler } // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/compiler.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/compiler.h index 017cbc68df0..36c5dca5f52 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/compiler.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/compiler.h @@ -21,14 +21,13 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include #include #include #include "error-reporter.h" +#include "generics.h" + +CAPNP_BEGIN_HEADER namespace capnp { namespace compiler { @@ -56,6 +55,8 @@ class Compiler final: private SchemaLoader::LazyLoadCallback { // // This class is thread-safe, hence all its methods are const. + class Node; + public: enum AnnotationFlag { COMPILE_ANNOTATIONS, @@ -77,11 +78,66 @@ class Compiler final: private SchemaLoader::LazyLoadCallback { ~Compiler() noexcept(false); KJ_DISALLOW_COPY(Compiler); - uint64_t add(Module& module) const; - // Add a module to the Compiler, returning the module's file ID. The ID can then be looked up in - // the `SchemaLoader` returned by `getLoader()`. However, the SchemaLoader may behave as if the - // schema node doesn't exist if any compilation errors occur (reported via the module's - // ErrorReporter). The module is parsed at the time `add()` is called, but not fully compiled -- + class CompiledType { + // Represents a compiled type expression, from which you can traverse to nested types, apply + // generics, etc. + + public: + CompiledType clone(); + // Make another CompiledType pointing to the same type. + + kj::Maybe getSchema(); + // Evaluate to a type schema. Returns null if this "type" cannot actually be used as a field + // type, e.g. because it's the pseudo-type representing a file's top-level scope. + + kj::Maybe getMember(kj::StringPtr name); + // Look up a nested declaration. Returns null if there is no such member, or if the member is + // not a type. + + kj::Maybe applyBrand(kj::Array arguments); + // If this is a generic type, specializes apply a brand to it. Returns null if this is + // not a generic type or too many arguments were specified. + + private: + const Compiler& compiler; + kj::ExternalMutexGuarded decl; + + CompiledType(const Compiler& compiler, kj::ExternalMutexGuarded decl) + : compiler(compiler), decl(kj::mv(decl)) {} + + friend class Compiler; + }; + + class ModuleScope { + // Result of compiling a module. + + public: + uint64_t getId() { return id; } + + CompiledType getRoot(); + // Get a CompiledType representing the root, which can be used to programmatically look up + // declarations. + + kj::Maybe evalType(Expression::Reader expression, ErrorReporter& errorReporter); + // Evaluate some type expression within the scope of this module. + // + // Returns null if errors prevented evaluation; the errors will have been reported to + // `errorReporter`. + + private: + const Compiler& compiler; + uint64_t id; + Node& node; + + ModuleScope(const Compiler& compiler, uint64_t id, Node& node) + : compiler(compiler), id(id), node(node) {} + + friend class Compiler; + }; + + ModuleScope add(Module& module) const; + // Add a module to the Compiler, returning a CompiledType representing the top-level scope of + // the module. The module is parsed at the time `add()` is called, but not fully compiled -- // individual schema nodes are compiled lazily. If you want to force eager compilation, // see `eagerlyCompile()`, below. @@ -89,6 +145,9 @@ class Compiler final: private SchemaLoader::LazyLoadCallback { // Given the type ID of a schema node, find the ID of a node nested within it. Throws an // exception if the parent ID is not recognized; returns null if the parent has no child of the // given name. Neither the parent nor the child schema node is actually compiled. + // + // TODO(cleanup): This interface does not handle generics correctly. Use the + // ModuleScope/CompiledType interface instead. kj::Maybe getSourceInfo(uint64_t id) const; // Get the SourceInfo for the given type ID, if available. @@ -191,11 +250,13 @@ class Compiler final: private SchemaLoader::LazyLoadCallback { SchemaLoader loader; class CompiledModule; - class Node; class Alias; + class ErrorIgnorer; void load(const SchemaLoader& loader, uint64_t id) const override; }; } // namespace compiler } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/error-reporter.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/error-reporter.h index 1c32abf3b03..e3bf6acf6ff 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/error-reporter.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/error-reporter.h @@ -21,16 +21,14 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "../common.h" #include #include #include #include +CAPNP_BEGIN_HEADER + namespace capnp { namespace compiler { @@ -94,3 +92,5 @@ class LineBreakTable { } // namespace compiler } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/evolution-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/evolution-test.c++ index 9d41c23523a..48fe66bf534 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/evolution-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/evolution-test.c++ @@ -682,7 +682,7 @@ static kj::Maybe loadFile( uint sharedOrdinalCount) { Compiler compiler; ModuleImpl module(file); - KJ_ASSERT(compiler.add(module) == 0x8123456789abcdefllu); + KJ_ASSERT(compiler.add(module).getId() == 0x8123456789abcdefllu); if (allNodes) { // Eagerly compile and load the whole thing. diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/generics.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/generics.c++ new file mode 100644 index 00000000000..71df4e14da8 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/generics.c++ @@ -0,0 +1,656 @@ +// Copyright (c) 2013-2020 Sandstorm Development Group, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "generics.h" +#include "parser.h" // for expressionString() + +namespace capnp { +namespace compiler { + +BrandedDecl::BrandedDecl(BrandedDecl& other) + : body(other.body), + source(other.source) { + if (body.is()) { + brand = kj::addRef(*other.brand); + } +} + +BrandedDecl& BrandedDecl::operator=(BrandedDecl& other) { + body = other.body; + source = other.source; + if (body.is()) { + brand = kj::addRef(*other.brand); + } + return *this; +} + +kj::Maybe BrandedDecl::applyParams( + kj::Array params, Expression::Reader subSource) { + if (body.is()) { + return nullptr; + } else { + return brand->setParams(kj::mv(params), body.get().kind, subSource) + .map([&](kj::Own&& scope) { + BrandedDecl result = *this; + result.brand = kj::mv(scope); + result.source = subSource; + return result; + }); + } +} + +kj::Maybe BrandedDecl::getMember( + kj::StringPtr memberName, Expression::Reader subSource) { + if (body.is()) { + return nullptr; + } else KJ_IF_MAYBE(r, body.get().resolver->resolveMember(memberName)) { + return brand->interpretResolve(*body.get().resolver, *r, subSource); + } else { + return nullptr; + } +} + +kj::Maybe BrandedDecl::getKind() { + if (body.is()) { + return nullptr; + } else { + return body.get().kind; + } +} + +kj::Maybe BrandedDecl::getListParam() { + KJ_REQUIRE(body.is()); + + auto& decl = body.get(); + KJ_REQUIRE(decl.kind == Declaration::BUILTIN_LIST); + + auto params = KJ_ASSERT_NONNULL(brand->getParams(decl.id)); + if (params.size() != 1) { + return nullptr; + } else { + return params[0]; + } +} + +Resolver::ResolvedParameter BrandedDecl::asVariable() { + KJ_REQUIRE(body.is()); + + return body.get(); +} + +bool BrandedDecl::compileAsType( + ErrorReporter& errorReporter, schema::Type::Builder target) { + KJ_IF_MAYBE(kind, getKind()) { + switch (*kind) { + case Declaration::ENUM: { + auto enum_ = target.initEnum(); + enum_.setTypeId(getIdAndFillBrand([&]() { return enum_.initBrand(); })); + return true; + } + + case Declaration::STRUCT: { + auto struct_ = target.initStruct(); + struct_.setTypeId(getIdAndFillBrand([&]() { return struct_.initBrand(); })); + return true; + } + + case Declaration::INTERFACE: { + auto interface = target.initInterface(); + interface.setTypeId(getIdAndFillBrand([&]() { return interface.initBrand(); })); + return true; + } + + case Declaration::BUILTIN_LIST: { + auto elementType = target.initList().initElementType(); + + KJ_IF_MAYBE(param, getListParam()) { + if (!param->compileAsType(errorReporter, elementType)) { + return false; + } + } else { + addError(errorReporter, "'List' requires exactly one parameter."); + return false; + } + + if (elementType.isAnyPointer()) { + auto unconstrained = elementType.getAnyPointer().getUnconstrained(); + + if (unconstrained.isAnyKind()) { + addError(errorReporter, "'List(AnyPointer)' is not supported."); + // Seeing List(AnyPointer) later can mess things up, so change the type to Void. + elementType.setVoid(); + return false; + } else if (unconstrained.isStruct()) { + addError(errorReporter, "'List(AnyStruct)' is not supported."); + // Seeing List(AnyStruct) later can mess things up, so change the type to Void. + elementType.setVoid(); + return false; + } + } + + return true; + } + + case Declaration::BUILTIN_VOID: target.setVoid(); return true; + case Declaration::BUILTIN_BOOL: target.setBool(); return true; + case Declaration::BUILTIN_INT8: target.setInt8(); return true; + case Declaration::BUILTIN_INT16: target.setInt16(); return true; + case Declaration::BUILTIN_INT32: target.setInt32(); return true; + case Declaration::BUILTIN_INT64: target.setInt64(); return true; + case Declaration::BUILTIN_U_INT8: target.setUint8(); return true; + case Declaration::BUILTIN_U_INT16: target.setUint16(); return true; + case Declaration::BUILTIN_U_INT32: target.setUint32(); return true; + case Declaration::BUILTIN_U_INT64: target.setUint64(); return true; + case Declaration::BUILTIN_FLOAT32: target.setFloat32(); return true; + case Declaration::BUILTIN_FLOAT64: target.setFloat64(); return true; + case Declaration::BUILTIN_TEXT: target.setText(); return true; + case Declaration::BUILTIN_DATA: target.setData(); return true; + + case Declaration::BUILTIN_OBJECT: + addError(errorReporter, + "As of Cap'n Proto 0.4, 'Object' has been renamed to 'AnyPointer'. Sorry for the " + "inconvenience, and thanks for being an early adopter. :)"); + KJ_FALLTHROUGH; + case Declaration::BUILTIN_ANY_POINTER: + target.initAnyPointer().initUnconstrained().setAnyKind(); + return true; + case Declaration::BUILTIN_ANY_STRUCT: + target.initAnyPointer().initUnconstrained().setStruct(); + return true; + case Declaration::BUILTIN_ANY_LIST: + target.initAnyPointer().initUnconstrained().setList(); + return true; + case Declaration::BUILTIN_CAPABILITY: + target.initAnyPointer().initUnconstrained().setCapability(); + return true; + + case Declaration::FILE: + case Declaration::USING: + case Declaration::CONST: + case Declaration::ENUMERANT: + case Declaration::FIELD: + case Declaration::UNION: + case Declaration::GROUP: + case Declaration::METHOD: + case Declaration::ANNOTATION: + case Declaration::NAKED_ID: + case Declaration::NAKED_ANNOTATION: + addError(errorReporter, kj::str("'", toString(), "' is not a type.")); + return false; + } + + KJ_UNREACHABLE; + } else { + // Oh, this is a type variable. + auto var = asVariable(); + if (var.id == 0) { + // This is actually a method implicit parameter. + auto builder = target.initAnyPointer().initImplicitMethodParameter(); + builder.setParameterIndex(var.index); + return true; + } else { + auto builder = target.initAnyPointer().initParameter(); + builder.setScopeId(var.id); + builder.setParameterIndex(var.index); + return true; + } + } +} + +Resolver::ResolveResult BrandedDecl::asResolveResult( + uint64_t scopeId, schema::Brand::Builder brandBuilder) { + auto result = body; + if (result.is()) { + // May need to compile our context as the "brand". + + result.get().scopeId = scopeId; + + getIdAndFillBrand([&]() { + result.get().brand = brandBuilder.asReader(); + return brandBuilder; + }); + } + return result; +} + +kj::String BrandedDecl::toString() { + return expressionString(source); +} + +kj::String BrandedDecl::toDebugString() { + if (body.is()) { + auto variable = body.get(); + return kj::str("variable(", variable.id, ", ", variable.index, ")"); + } else { + auto decl = body.get(); + return kj::str("decl(", decl.id, ", ", (uint)decl.kind, "')"); + } +} + +BrandScope::BrandScope(ErrorReporter& errorReporter, uint64_t startingScopeId, + uint startingScopeParamCount, Resolver& startingScope) + : errorReporter(errorReporter), parent(nullptr), leafId(startingScopeId), + leafParamCount(startingScopeParamCount), inherited(true) { + // Create all lexical parent scopes, all with no brand bindings. + KJ_IF_MAYBE(p, startingScope.getParent()) { + parent = kj::refcounted( + errorReporter, p->id, p->genericParamCount, *p->resolver); + } +} + +bool BrandScope::isGeneric() { + if (leafParamCount > 0) return true; + + KJ_IF_MAYBE(p, parent) { + return p->get()->isGeneric(); + } else { + return false; + } +} + +kj::Own BrandScope::push(uint64_t typeId, uint paramCount) { + return kj::refcounted(kj::addRef(*this), typeId, paramCount); +} + +kj::Maybe> BrandScope::setParams( + kj::Array params, Declaration::Which genericType, Expression::Reader source) { + if (this->params.size() != 0) { + errorReporter.addErrorOn(source, "Double-application of generic parameters."); + return nullptr; + } else if (params.size() > leafParamCount) { + if (leafParamCount == 0) { + errorReporter.addErrorOn(source, "Declaration does not accept generic parameters."); + } else { + errorReporter.addErrorOn(source, "Too many generic parameters."); + } + return nullptr; + } else if (params.size() < leafParamCount) { + errorReporter.addErrorOn(source, "Not enough generic parameters."); + return nullptr; + } else { + if (genericType != Declaration::BUILTIN_LIST) { + for (auto& param: params) { + KJ_IF_MAYBE(kind, param.getKind()) { + switch (*kind) { + case Declaration::BUILTIN_LIST: + case Declaration::BUILTIN_TEXT: + case Declaration::BUILTIN_DATA: + case Declaration::BUILTIN_ANY_POINTER: + case Declaration::STRUCT: + case Declaration::INTERFACE: + break; + + default: + param.addError(errorReporter, + "Sorry, only pointer types can be used as generic parameters."); + break; + } + } + } + } + + return kj::refcounted(*this, kj::mv(params)); + } +} + +kj::Own BrandScope::pop(uint64_t newLeafId) { + if (leafId == newLeafId) { + return kj::addRef(*this); + } + KJ_IF_MAYBE(p, parent) { + return (*p)->pop(newLeafId); + } else { + // Looks like we're moving into a whole top-level scope. + return kj::refcounted(errorReporter, newLeafId); + } +} + +kj::Maybe BrandScope::lookupParameter( + Resolver& resolver, uint64_t scopeId, uint index) { + // Returns null if the param should be inherited from the client scope. + + if (scopeId == leafId) { + if (index < params.size()) { + return params[index]; + } else if (inherited) { + return nullptr; + } else { + // Unbound and not inherited, so return AnyPointer. + auto decl = resolver.resolveBuiltin(Declaration::BUILTIN_ANY_POINTER); + return BrandedDecl(decl, + evaluateBrand(resolver, decl, List::Reader()), + Expression::Reader()); + } + } else KJ_IF_MAYBE(p, parent) { + return p->get()->lookupParameter(resolver, scopeId, index); + } else { + KJ_FAIL_REQUIRE("scope is not a parent"); + } +} + +kj::Maybe> BrandScope::getParams(uint64_t scopeId) { + // Returns null if params at the requested scope should be inherited from the client scope. + + if (scopeId == leafId) { + if (inherited) { + return nullptr; + } else { + return params.asPtr(); + } + } else KJ_IF_MAYBE(p, parent) { + return p->get()->getParams(scopeId); + } else { + KJ_FAIL_REQUIRE("scope is not a parent"); + } +} + +BrandedDecl BrandScope::interpretResolve( + Resolver& resolver, Resolver::ResolveResult& result, Expression::Reader source) { + if (result.is()) { + auto& decl = result.get(); + + auto scope = pop(decl.scopeId); + KJ_IF_MAYBE(brand, decl.brand) { + scope = scope->evaluateBrand(resolver, decl, brand->getScopes()); + } else { + scope = scope->push(decl.id, decl.genericParamCount); + } + + return BrandedDecl(decl, kj::mv(scope), source); + } else { + auto& param = result.get(); + KJ_IF_MAYBE(p, lookupParameter(resolver, param.id, param.index)) { + return *p; + } else { + return BrandedDecl(param, source); + } + } +} + +kj::Own BrandScope::evaluateBrand( + Resolver& resolver, Resolver::ResolvedDecl decl, + List::Reader brand, uint index) { + auto result = kj::refcounted(errorReporter, decl.id); + result->leafParamCount = decl.genericParamCount; + + // Fill in `params`. + if (index < brand.size()) { + auto nextScope = brand[index]; + if (decl.id == nextScope.getScopeId()) { + // Initialize our parameters. + + switch (nextScope.which()) { + case schema::Brand::Scope::BIND: { + auto bindings = nextScope.getBind(); + auto params = kj::heapArrayBuilder(bindings.size()); + for (auto binding: bindings) { + switch (binding.which()) { + case schema::Brand::Binding::UNBOUND: { + // Build an AnyPointer-equivalent. + auto anyPointerDecl = resolver.resolveBuiltin(Declaration::BUILTIN_ANY_POINTER); + params.add(BrandedDecl(anyPointerDecl, + kj::refcounted(errorReporter, anyPointerDecl.scopeId), + Expression::Reader())); + break; + } + + case schema::Brand::Binding::TYPE: + // Reverse this schema::Type back into a BrandedDecl. + params.add(decompileType(resolver, binding.getType())); + break; + } + } + result->params = params.finish(); + break; + } + + case schema::Brand::Scope::INHERIT: + KJ_IF_MAYBE(p, getParams(decl.id)) { + result->params = kj::heapArray(*p); + } else { + result->inherited = true; + } + break; + } + + // Parent should start one level deeper in the list. + ++index; + } + } + + // Fill in `parent`. + KJ_IF_MAYBE(parent, decl.resolver->getParent()) { + result->parent = evaluateBrand(resolver, *parent, brand, index); + } + + return result; +} + +BrandedDecl BrandScope::decompileType( + Resolver& resolver, schema::Type::Reader type) { + auto builtin = [&](Declaration::Which which) -> BrandedDecl { + auto decl = resolver.resolveBuiltin(which); + return BrandedDecl(decl, + evaluateBrand(resolver, decl, List::Reader()), + Expression::Reader()); + }; + + switch (type.which()) { + case schema::Type::VOID: return builtin(Declaration::BUILTIN_VOID); + case schema::Type::BOOL: return builtin(Declaration::BUILTIN_BOOL); + case schema::Type::INT8: return builtin(Declaration::BUILTIN_INT8); + case schema::Type::INT16: return builtin(Declaration::BUILTIN_INT16); + case schema::Type::INT32: return builtin(Declaration::BUILTIN_INT32); + case schema::Type::INT64: return builtin(Declaration::BUILTIN_INT64); + case schema::Type::UINT8: return builtin(Declaration::BUILTIN_U_INT8); + case schema::Type::UINT16: return builtin(Declaration::BUILTIN_U_INT16); + case schema::Type::UINT32: return builtin(Declaration::BUILTIN_U_INT32); + case schema::Type::UINT64: return builtin(Declaration::BUILTIN_U_INT64); + case schema::Type::FLOAT32: return builtin(Declaration::BUILTIN_FLOAT32); + case schema::Type::FLOAT64: return builtin(Declaration::BUILTIN_FLOAT64); + case schema::Type::TEXT: return builtin(Declaration::BUILTIN_TEXT); + case schema::Type::DATA: return builtin(Declaration::BUILTIN_DATA); + + case schema::Type::ENUM: { + auto enumType = type.getEnum(); + Resolver::ResolvedDecl decl = resolver.resolveId(enumType.getTypeId()); + return BrandedDecl(decl, + evaluateBrand(resolver, decl, enumType.getBrand().getScopes()), + Expression::Reader()); + } + + case schema::Type::INTERFACE: { + auto interfaceType = type.getInterface(); + Resolver::ResolvedDecl decl = resolver.resolveId(interfaceType.getTypeId()); + return BrandedDecl(decl, + evaluateBrand(resolver, decl, interfaceType.getBrand().getScopes()), + Expression::Reader()); + } + + case schema::Type::STRUCT: { + auto structType = type.getStruct(); + Resolver::ResolvedDecl decl = resolver.resolveId(structType.getTypeId()); + return BrandedDecl(decl, + evaluateBrand(resolver, decl, structType.getBrand().getScopes()), + Expression::Reader()); + } + + case schema::Type::LIST: { + auto elementType = decompileType(resolver, type.getList().getElementType()); + return KJ_ASSERT_NONNULL(builtin(Declaration::BUILTIN_LIST) + .applyParams(kj::heapArray(&elementType, 1), Expression::Reader())); + } + + case schema::Type::ANY_POINTER: { + auto anyPointer = type.getAnyPointer(); + switch (anyPointer.which()) { + case schema::Type::AnyPointer::UNCONSTRAINED: + return builtin(Declaration::BUILTIN_ANY_POINTER); + + case schema::Type::AnyPointer::PARAMETER: { + auto param = anyPointer.getParameter(); + auto id = param.getScopeId(); + uint index = param.getParameterIndex(); + KJ_IF_MAYBE(binding, lookupParameter(resolver, id, index)) { + return *binding; + } else { + return BrandedDecl(Resolver::ResolvedParameter {id, index}, Expression::Reader()); + } + } + + case schema::Type::AnyPointer::IMPLICIT_METHOD_PARAMETER: + KJ_FAIL_ASSERT("Alias pointed to implicit method type parameter?"); + } + + KJ_UNREACHABLE; + } + } + + KJ_UNREACHABLE; +} + +kj::Maybe BrandScope::compileDeclExpression( + Expression::Reader source, Resolver& resolver, + ImplicitParams implicitMethodParams) { + switch (source.which()) { + case Expression::UNKNOWN: + // Error reported earlier. + return nullptr; + + case Expression::POSITIVE_INT: + case Expression::NEGATIVE_INT: + case Expression::FLOAT: + case Expression::STRING: + case Expression::BINARY: + case Expression::LIST: + case Expression::TUPLE: + case Expression::EMBED: + errorReporter.addErrorOn(source, "Expected name."); + return nullptr; + + case Expression::RELATIVE_NAME: { + auto name = source.getRelativeName(); + auto nameValue = name.getValue(); + + // Check implicit method params first. + for (auto i: kj::indices(implicitMethodParams.params)) { + if (implicitMethodParams.params[i].getName() == nameValue) { + if (implicitMethodParams.scopeId == 0) { + return BrandedDecl::implicitMethodParam(i); + } else { + return BrandedDecl(Resolver::ResolvedParameter { + implicitMethodParams.scopeId, static_cast(i) }, + Expression::Reader()); + } + } + } + + KJ_IF_MAYBE(r, resolver.resolve(nameValue)) { + return interpretResolve(resolver, *r, source); + } else { + errorReporter.addErrorOn(name, kj::str("Not defined: ", nameValue)); + return nullptr; + } + } + + case Expression::ABSOLUTE_NAME: { + auto name = source.getAbsoluteName(); + KJ_IF_MAYBE(r, resolver.getTopScope().resolver->resolveMember(name.getValue())) { + return interpretResolve(resolver, *r, source); + } else { + errorReporter.addErrorOn(name, kj::str("Not defined: ", name.getValue())); + return nullptr; + } + } + + case Expression::IMPORT: { + auto filename = source.getImport(); + KJ_IF_MAYBE(decl, resolver.resolveImport(filename.getValue())) { + // Import is always a root scope, so create a fresh BrandScope. + return BrandedDecl(*decl, kj::refcounted( + errorReporter, decl->id, decl->genericParamCount, *decl->resolver), source); + } else { + errorReporter.addErrorOn(filename, kj::str("Import failed: ", filename.getValue())); + return nullptr; + } + } + + case Expression::APPLICATION: { + auto app = source.getApplication(); + KJ_IF_MAYBE(decl, compileDeclExpression(app.getFunction(), resolver, implicitMethodParams)) { + // Compile all params. + auto params = app.getParams(); + auto compiledParams = kj::heapArrayBuilder(params.size()); + bool paramFailed = false; + for (auto param: params) { + if (param.isNamed()) { + errorReporter.addErrorOn(param.getNamed(), "Named parameter not allowed here."); + } + + KJ_IF_MAYBE(d, compileDeclExpression(param.getValue(), resolver, implicitMethodParams)) { + compiledParams.add(kj::mv(*d)); + } else { + // Param failed to compile. Error was already reported. + paramFailed = true; + } + }; + + if (paramFailed) { + return kj::mv(*decl); + } + + // Add the parameters to the brand. + KJ_IF_MAYBE(applied, decl->applyParams(compiledParams.finish(), source)) { + return kj::mv(*applied); + } else { + // Error already reported. Ignore parameters. + return kj::mv(*decl); + } + } else { + // error already reported + return nullptr; + } + } + + case Expression::MEMBER: { + auto member = source.getMember(); + KJ_IF_MAYBE(decl, compileDeclExpression(member.getParent(), resolver, implicitMethodParams)) { + auto name = member.getName(); + KJ_IF_MAYBE(memberDecl, decl->getMember(name.getValue(), source)) { + return kj::mv(*memberDecl); + } else { + errorReporter.addErrorOn(name, kj::str( + "'", expressionString(member.getParent()), + "' has no member named '", name.getValue(), "'")); + return nullptr; + } + } else { + // error already reported + return nullptr; + } + } + } + + KJ_UNREACHABLE; +} + +} // namespace compiler +} // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/generics.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/generics.h new file mode 100644 index 00000000000..fbdbddb488d --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/generics.h @@ -0,0 +1,310 @@ +// Copyright (c) 2013-2020 Sandstorm Development Group, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include "error-reporter.h" +#include "resolver.h" + +CAPNP_BEGIN_HEADER + +namespace capnp { +namespace compiler { + +class BrandedDecl; +class BrandScope; + +struct ImplicitParams { + // Represents a set of implicit brand parameters visible in the current context. + // + // As of this writing, implicit parameters occur only in the context of RPC methods. That is, + // like this: + // + // makeBox @0 [T] (value :T) -> Box(T); + // + // Here, `T` is an implicit parameter. + + uint64_t scopeId; + // If zero, then any reference to an implicit param in this context should be compiled to a + // `implicitMethodParam` AnyPointer. If non-zero, it should be compiled to a `parameter` + // AnyPointer using this scopeId. This comes into play when compiling the implicitly-generated + // struct types corresponding to a method's params or results; these implicitly-generated types + // themselves have *explicit* brand parameters corresponding to the *implicit* brand parameters + // of the method. + // + // TODO(cleanup): Unclear why ImplicitParams is even used when compiling the implicit structs + // with explicit params. Missing abstraction? + + List::Reader params; + // Name and metadata about the parameter declaration. + + static inline ImplicitParams none() { + // Convenience helper to create an empty `ImplicitParams`. + return { 0, List::Reader() }; + } +}; + +class BrandedDecl { + // Represents a declaration possibly with generic parameter bindings. + +public: + inline BrandedDecl(Resolver::ResolvedDecl decl, + kj::Own&& brand, + Expression::Reader source) + : brand(kj::mv(brand)), source(source) { + // `source`, is the expression which specified this branded decl. It is provided so that errors + // can be reported against it. It is acceptable to pass a default-initialized reader if there's + // no source expression; errors will then be reported at 0, 0. + + body.init(kj::mv(decl)); + } + inline BrandedDecl(Resolver::ResolvedParameter variable, Expression::Reader source) + : source(source) { + body.init(kj::mv(variable)); + } + inline BrandedDecl(decltype(nullptr)) {} + inline BrandedDecl() {} // exists only for ExternalMutexGuarded to work... + + static BrandedDecl implicitMethodParam(uint index) { + // Get a BrandedDecl referring to an implicit method parameter. + // (As a hack, we internally represent this as a ResolvedParameter. Sorry.) + return BrandedDecl(Resolver::ResolvedParameter { 0, index }, Expression::Reader()); + } + + BrandedDecl(BrandedDecl& other); + BrandedDecl(BrandedDecl&& other) = default; + + BrandedDecl& operator=(BrandedDecl& other); + BrandedDecl& operator=(BrandedDecl&& other) = default; + + kj::Maybe applyParams(kj::Array params, Expression::Reader subSource); + // Treat the declaration as a generic and apply it to the given parameter list. + + kj::Maybe getMember(kj::StringPtr memberName, Expression::Reader subSource); + // Get a member of this declaration. + + kj::Maybe getKind(); + // Returns the kind of declaration, or null if this is an unbound generic variable. + + template + uint64_t getIdAndFillBrand(InitBrandFunc&& initBrand); + // Returns the type ID of this node. `initBrand` is a zero-arg functor which returns + // schema::Brand::Builder; this will be called if this decl has brand bindings, and + // the returned builder filled in to reflect those bindings. + // + // It is an error to call this when `getKind()` returns null. + + kj::Maybe getListParam(); + // Only if the kind is BUILTIN_LIST: Get the list's type parameter. + + Resolver::ResolvedParameter asVariable(); + // If this is an unbound generic variable (i.e. `getKind()` returns null), return information + // about the variable. + // + // It is an error to call this when `getKind()` does not return null. + + bool compileAsType(ErrorReporter& errorReporter, schema::Type::Builder target); + // Compile this decl to a schema::Type. + + inline void addError(ErrorReporter& errorReporter, kj::StringPtr message) { + errorReporter.addErrorOn(source, message); + } + + Resolver::ResolveResult asResolveResult(uint64_t scopeId, schema::Brand::Builder brandBuilder); + // Reverse this into a ResolveResult. If necessary, use `brandBuilder` to fill in + // ResolvedDecl.brand. + + kj::String toString(); + kj::String toDebugString(); + +private: + Resolver::ResolveResult body; + kj::Own brand; // null if parameter + Expression::Reader source; +}; + +class BrandScope: public kj::Refcounted { + // Tracks the brand parameter bindings affecting the scope specified by some expression. For + // example, if we are interpreting the type expression "Foo(Text).Bar", we would start with the + // current scope's BrandScope, create a new child BrandScope representing "Foo", add the "(Text)" + // parameter bindings to it, then create a further child scope for "Bar". Thus the BrandScope for + // Bar knows that Foo's parameter list has been bound to "(Text)". + +public: + BrandScope(ErrorReporter& errorReporter, uint64_t startingScopeId, + uint startingScopeParamCount, Resolver& startingScope); + // TODO(bug): Passing an `errorReporter` to the constructor of `BrandScope` turns out not to + // make a ton of sense, as an `errorReporter` is meant to report errors in a specific module, + // but `BrandScope` might be constructed while compiling one module but then used when + // compiling a different module, or not compiling a module at all. Note, though, that it DOES + // make sense for BrandedDecl to have an ErrorReporter, specifically associated with its + // `source` expression. + + bool isGeneric(); + // Returns true if this scope or any parent scope is a generic (has brand parameters). + + kj::Own push(uint64_t typeId, uint paramCount); + // Creates a new child scope with the given type ID and number of brand parameters. + + kj::Maybe> setParams( + kj::Array params, Declaration::Which genericType, Expression::Reader source); + // Create a new BrandScope representing the same scope, but with parameters filled in. + // + // This should only be called on the generic version of the scope. If called on a branded + // version, an error will be reported. + // + // Returns null if an error occurred that prevented creating the BrandScope; the error will have + // been reported to the ErrorReporter. + + kj::Own pop(uint64_t newLeafId); + // Return the parent scope. + + kj::Maybe lookupParameter(Resolver& resolver, uint64_t scopeId, uint index); + // Search up the scope chain for the scope matching `scopeId`, and return its `index`th parameter + // binding. Returns null if the parameter is from a scope that we are currently compiling, and + // hasn't otherwise been bound to any argument (see Brand.Scope.inherit in schema.capnp). + // + // In the case that a parameter wasn't specified, but isn't part of the current scope, this + // returns the declaration for `AnyPointer`. + // + // TODO(cleanup): Should be called lookupArgument()? + + kj::Maybe> getParams(uint64_t scopeId); + // Get the whole list of parameter bindings at the given scope. Returns null if the scope is + // currently be compiled and the parameters are unbound. + // + // Note that it's possible that not all declared parameters were actually specified for a given + // scope. For example, if you declare a generic `Foo(T, U)`, and then you intiantiate it + // somewhere as `Foo(Text)`, then `U` is unspecified -- this is not an error, because Cap'n + // Proto allows new type parameters to be added over time. `U` should be treated as `AnyPointer` + // in this case, but `getParams()` doesn't know how many parameters are expected, so it will + // return an array that only contains one item. Use `lookupParameter()` if you want unspecified + // parameters to be filled in with `AnyPointer` automatically. + // + // TODO(cleanup): Should be called getArguments()? + + template + void compile(InitBrandFunc&& initBrand); + // Constructs the schema::Brand corresponding to this brand scope. + // + // `initBrand` is a zero-arg functor which returns an empty schema::Brand::Builder, into which + // the brand is constructed. If no generics are present, then `initBrand` is never called. + // + // TODO(cleanup): Should this return Maybe> instead? + + kj::Maybe compileDeclExpression( + Expression::Reader source, Resolver& resolver, + ImplicitParams implicitMethodParams); + // Interpret a type expression within this branded scope. + + BrandedDecl interpretResolve( + Resolver& resolver, Resolver::ResolveResult& result, Expression::Reader source); + // After using a Resolver to resolve a symbol, call interpretResolve() to interpret the result + // within the current brand scope. For example, if a name resolved to a brand parameter, this + // replaces it with the appropriate argument from the scope. + + inline uint64_t getScopeId() { return leafId; } + +private: + ErrorReporter& errorReporter; + kj::Maybe> parent; + uint64_t leafId; // zero = this is the root + uint leafParamCount; // number of generic parameters on this leaf + bool inherited; + kj::Array params; + + BrandScope(kj::Own parent, uint64_t leafId, uint leafParamCount) + : errorReporter(parent->errorReporter), + parent(kj::mv(parent)), leafId(leafId), leafParamCount(leafParamCount), + inherited(false) {} + BrandScope(BrandScope& base, kj::Array params) + : errorReporter(base.errorReporter), + leafId(base.leafId), leafParamCount(base.leafParamCount), + inherited(false), params(kj::mv(params)) { + KJ_IF_MAYBE(p, base.parent) { + parent = kj::addRef(**p); + } + } + BrandScope(ErrorReporter& errorReporter, uint64_t scopeId) + : errorReporter(errorReporter), leafId(scopeId), leafParamCount(0), inherited(false) {} + + kj::Own evaluateBrand( + Resolver& resolver, Resolver::ResolvedDecl decl, + List::Reader brand, uint index = 0); + + BrandedDecl decompileType(Resolver& resolver, schema::Type::Reader type); + + template + friend kj::Own kj::refcounted(Params&&... params); + friend class BrandedDecl; +}; + +template +uint64_t BrandedDecl::getIdAndFillBrand(InitBrandFunc&& initBrand) { + KJ_REQUIRE(body.is()); + + brand->compile(kj::fwd(initBrand)); + return body.get().id; +} + +template +void BrandScope::compile(InitBrandFunc&& initBrand) { + kj::Vector levels; + BrandScope* ptr = this; + for (;;) { + if (ptr->params.size() > 0 || (ptr->inherited && ptr->leafParamCount > 0)) { + levels.add(ptr); + } + KJ_IF_MAYBE(p, ptr->parent) { + ptr = *p; + } else { + break; + } + } + + if (levels.size() > 0) { + auto scopes = initBrand().initScopes(levels.size()); + for (uint i: kj::indices(levels)) { + auto scope = scopes[i]; + scope.setScopeId(levels[i]->leafId); + + if (levels[i]->inherited) { + scope.setInherit(); + } else { + auto bindings = scope.initBind(levels[i]->params.size()); + for (uint j: kj::indices(bindings)) { + levels[i]->params[j].compileAsType(errorReporter, bindings[j].initType()); + } + } + } + } +} + +} // namespace compiler +} // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/grammar.capnp b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/grammar.capnp index 209a3279012..4434f2d9447 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/grammar.capnp +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/grammar.capnp @@ -244,6 +244,9 @@ struct Declaration { type @1 :Expression; # Specified some other struct type instead of a named list. + + stream @4 :Void; + # The keyword "stream". } startByte @2 :UInt32; diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/grammar.capnp.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/grammar.capnp.c++ index f84b43703f6..65b20e9bf96 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/grammar.capnp.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/grammar.capnp.c++ @@ -1675,17 +1675,17 @@ const ::capnp::_::RawSchema s_94099c3f9eb32d6b = { 0, 0, nullptr, nullptr, nullptr, { &s_94099c3f9eb32d6b, nullptr, nullptr, 0, 0, nullptr } }; #endif // !CAPNP_LITE -static const ::capnp::_::AlignedData<87> b_b3f66e7a79d81bcd = { +static const ::capnp::_::AlignedData<102> b_b3f66e7a79d81bcd = { { 0, 0, 0, 0, 5, 0, 6, 0, 205, 27, 216, 121, 122, 110, 246, 179, 41, 0, 0, 0, 1, 0, 2, 0, 187, 131, 126, 193, 135, 231, 239, 150, - 1, 0, 7, 0, 0, 0, 2, 0, + 1, 0, 7, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 154, 1, 0, 0, 45, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 41, 0, 0, 0, 231, 0, 0, 0, + 41, 0, 0, 0, 31, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 99, 97, 112, 110, 112, 47, 99, 111, @@ -1696,35 +1696,42 @@ static const ::capnp::_::AlignedData<87> b_b3f66e7a79d81bcd = { 46, 80, 97, 114, 97, 109, 76, 105, 115, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, - 16, 0, 0, 0, 3, 0, 4, 0, + 20, 0, 0, 0, 3, 0, 4, 0, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 97, 0, 0, 0, 82, 0, 0, 0, + 125, 0, 0, 0, 82, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 96, 0, 0, 0, 3, 0, 1, 0, - 124, 0, 0, 0, 2, 0, 1, 0, + 124, 0, 0, 0, 3, 0, 1, 0, + 152, 0, 0, 0, 2, 0, 1, 0, 1, 0, 254, 255, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 121, 0, 0, 0, 42, 0, 0, 0, + 149, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 116, 0, 0, 0, 3, 0, 1, 0, - 128, 0, 0, 0, 2, 0, 1, 0, - 2, 0, 0, 0, 1, 0, 0, 0, + 144, 0, 0, 0, 3, 0, 1, 0, + 156, 0, 0, 0, 2, 0, 1, 0, + 3, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 125, 0, 0, 0, 82, 0, 0, 0, + 153, 0, 0, 0, 82, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 124, 0, 0, 0, 3, 0, 1, 0, - 136, 0, 0, 0, 2, 0, 1, 0, - 3, 0, 0, 0, 2, 0, 0, 0, + 152, 0, 0, 0, 3, 0, 1, 0, + 164, 0, 0, 0, 2, 0, 1, 0, + 4, 0, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 133, 0, 0, 0, 66, 0, 0, 0, + 161, 0, 0, 0, 66, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 156, 0, 0, 0, 3, 0, 1, 0, + 168, 0, 0, 0, 2, 0, 1, 0, + 2, 0, 253, 255, 0, 0, 0, 0, + 0, 0, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 128, 0, 0, 0, 3, 0, 1, 0, - 140, 0, 0, 0, 2, 0, 1, 0, + 165, 0, 0, 0, 58, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 160, 0, 0, 0, 3, 0, 1, 0, + 172, 0, 0, 0, 2, 0, 1, 0, 110, 97, 109, 101, 100, 76, 105, 115, 116, 0, 0, 0, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, @@ -1762,6 +1769,14 @@ static const ::capnp::_::AlignedData<87> b_b3f66e7a79d81bcd = { 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 115, 116, 114, 101, 97, 109, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } }; ::capnp::word const* const bp_b3f66e7a79d81bcd = b_b3f66e7a79d81bcd.words; @@ -1770,11 +1785,11 @@ static const ::capnp::_::RawSchema* const d_b3f66e7a79d81bcd[] = { &s_8e207d4dfe54d0de, &s_fffe08a9a697d2a5, }; -static const uint16_t m_b3f66e7a79d81bcd[] = {3, 0, 2, 1}; -static const uint16_t i_b3f66e7a79d81bcd[] = {0, 1, 2, 3}; +static const uint16_t m_b3f66e7a79d81bcd[] = {3, 0, 2, 4, 1}; +static const uint16_t i_b3f66e7a79d81bcd[] = {0, 1, 4, 2, 3}; const ::capnp::_::RawSchema s_b3f66e7a79d81bcd = { - 0xb3f66e7a79d81bcd, b_b3f66e7a79d81bcd.words, 87, d_b3f66e7a79d81bcd, m_b3f66e7a79d81bcd, - 2, 4, i_b3f66e7a79d81bcd, nullptr, nullptr, { &s_b3f66e7a79d81bcd, nullptr, nullptr, 0, 0, nullptr } + 0xb3f66e7a79d81bcd, b_b3f66e7a79d81bcd.words, 102, d_b3f66e7a79d81bcd, m_b3f66e7a79d81bcd, + 2, 5, i_b3f66e7a79d81bcd, nullptr, nullptr, { &s_b3f66e7a79d81bcd, nullptr, nullptr, 0, 0, nullptr } }; #endif // !CAPNP_LITE static const ::capnp::_::AlignedData<110> b_fffe08a9a697d2a5 = { diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/grammar.capnp.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/grammar.capnp.h index 77cdecdce91..34825b31c2b 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/grammar.capnp.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/grammar.capnp.h @@ -6,11 +6,13 @@ #include #include -#if CAPNP_VERSION != 7000 +#if CAPNP_VERSION != 9001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif +CAPNP_BEGIN_HEADER + namespace capnp { namespace schemas { @@ -295,6 +297,7 @@ struct Declaration::ParamList { enum Which: uint16_t { NAMED_LIST, TYPE, + STREAM, }; struct _capnpPrivate { @@ -1968,6 +1971,9 @@ class Declaration::ParamList::Reader { inline ::uint32_t getEndByte() const; + inline bool isStream() const; + inline ::capnp::Void getStream() const; + private: ::capnp::_::StructReader _reader; template @@ -2019,6 +2025,10 @@ class Declaration::ParamList::Builder { inline ::uint32_t getEndByte(); inline void setEndByte( ::uint32_t value); + inline bool isStream(); + inline ::capnp::Void getStream(); + inline void setStream( ::capnp::Void value = ::capnp::VOID); + private: ::capnp::_::StructBuilder _builder; template @@ -5758,6 +5768,32 @@ inline void Declaration::ParamList::Builder::setEndByte( ::uint32_t value) { ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } +inline bool Declaration::ParamList::Reader::isStream() const { + return which() == Declaration::ParamList::STREAM; +} +inline bool Declaration::ParamList::Builder::isStream() { + return which() == Declaration::ParamList::STREAM; +} +inline ::capnp::Void Declaration::ParamList::Reader::getStream() const { + KJ_IREQUIRE((which() == Declaration::ParamList::STREAM), + "Must check which() before get()ing a union member."); + return _reader.getDataField< ::capnp::Void>( + ::capnp::bounded<0>() * ::capnp::ELEMENTS); +} + +inline ::capnp::Void Declaration::ParamList::Builder::getStream() { + KJ_IREQUIRE((which() == Declaration::ParamList::STREAM), + "Must check which() before get()ing a union member."); + return _builder.getDataField< ::capnp::Void>( + ::capnp::bounded<0>() * ::capnp::ELEMENTS); +} +inline void Declaration::ParamList::Builder::setStream( ::capnp::Void value) { + _builder.setDataField( + ::capnp::bounded<0>() * ::capnp::ELEMENTS, Declaration::ParamList::STREAM); + _builder.setDataField< ::capnp::Void>( + ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); +} + inline bool Declaration::Param::Reader::hasName() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); @@ -6834,3 +6870,5 @@ inline ::capnp::Orphan< ::capnp::compiler::Declaration> ParsedFile::Builder::dis } // namespace } // namespace +CAPNP_END_HEADER + diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/lexer.capnp.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/lexer.capnp.h index 3ab84ea3664..a93065a2c29 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/lexer.capnp.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/lexer.capnp.h @@ -6,11 +6,13 @@ #include #include -#if CAPNP_VERSION != 7000 +#if CAPNP_VERSION != 9001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif +CAPNP_BEGIN_HEADER + namespace capnp { namespace schemas { @@ -1239,3 +1241,5 @@ inline ::capnp::Orphan< ::capnp::List< ::capnp::compiler::Statement, ::capnp::K } // namespace } // namespace +CAPNP_END_HEADER + diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/lexer.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/lexer.h index 23c97166e96..a1c05418b67 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/lexer.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/lexer.h @@ -21,15 +21,13 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include #include #include #include "error-reporter.h" +CAPNP_BEGIN_HEADER + namespace capnp { namespace compiler { @@ -97,3 +95,5 @@ class Lexer { } // namespace compiler } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/module-loader.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/module-loader.c++ index ae12e75b4e3..803d39238ed 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/module-loader.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/module-loader.c++ @@ -115,7 +115,7 @@ struct FileKeyHash { key.size * 103 + (key.lastModified - kj::UNIX_EPOCH) / kj::MILLISECONDS * 73; } else { return key.hashCode + key.size * 103 + - (key.lastModified - kj::UNIX_EPOCH) / kj::NANOSECONDS * 73; + (key.lastModified - kj::UNIX_EPOCH) / kj::NANOSECONDS * 73ull; } } }; diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/module-loader.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/module-loader.h index d530b6d824c..5e51ac488b5 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/module-loader.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/module-loader.h @@ -21,10 +21,6 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "compiler.h" #include "error-reporter.h" #include @@ -32,6 +28,8 @@ #include #include +CAPNP_BEGIN_HEADER + namespace capnp { namespace compiler { @@ -60,3 +58,5 @@ class ModuleLoader { } // namespace compiler } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/node-translator.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/node-translator.c++ index a59185302a4..d7227386fd1 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/node-translator.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/node-translator.c++ @@ -20,7 +20,7 @@ // THE SOFTWARE. #include "node-translator.h" -#include "parser.h" // only for generateGroupId() +#include "parser.h" // only for generateGroupId() and expressionString() #include #include #include @@ -28,6 +28,7 @@ #include #include #include +#include namespace capnp { namespace compiler { @@ -129,6 +130,11 @@ public: // No expansion requested. return true; } + if (oldLgSize == kj::size(holes)) { + // Old value is already a full word. Further expansion is impossible. + return false; + } + KJ_ASSERT(oldLgSize < kj::size(holes)); if (holes[oldLgSize] != oldOffset + 1) { // The space immediately after the location is not a hole. return false; @@ -423,7 +429,7 @@ public: // in cases involving unions nested in other unions. The bug could lead to multiple // fields in a group incorrectly being assigned overlapping offsets. Although the bug // is now fixed by adding the `newHoles` parameter, this silently breaks - // backwards-compatibilty with affected schemas. Therefore, for now, we throw an + // backwards-compatibility with affected schemas. Therefore, for now, we throw an // exception to alert developers of the problem. // // TODO(cleanup): Once sufficient time has elapsed, remove this assert. @@ -572,802 +578,6 @@ private: // ======================================================================================= -class NodeTranslator::BrandedDecl { - // Represents a declaration possibly with generic parameter bindings. - // - // TODO(cleaup): This is too complicated to live here. We should refactor this class and - // BrandScope out into their own file, independent of NodeTranslator. - -public: - inline BrandedDecl(Resolver::ResolvedDecl decl, - kj::Own&& brand, - Expression::Reader source) - : brand(kj::mv(brand)), source(source) { - body.init(kj::mv(decl)); - } - inline BrandedDecl(Resolver::ResolvedParameter variable, Expression::Reader source) - : source(source) { - body.init(kj::mv(variable)); - } - inline BrandedDecl(decltype(nullptr)) {} - - static BrandedDecl implicitMethodParam(uint index) { - // Get a BrandedDecl referring to an implicit method parameter. - // (As a hack, we internally represent this as a ResolvedParameter. Sorry.) - return BrandedDecl(Resolver::ResolvedParameter { 0, index }, Expression::Reader()); - } - - BrandedDecl(BrandedDecl& other); - BrandedDecl(BrandedDecl&& other) = default; - - BrandedDecl& operator=(BrandedDecl& other); - BrandedDecl& operator=(BrandedDecl&& other) = default; - - // TODO(cleanup): A lot of the methods below are actually only called within compileAsType(), - // which was originally a method on NodeTranslator, but now is a method here and thus doesn't - // need these to be public. We should privatize most of these. - - kj::Maybe applyParams(kj::Array params, Expression::Reader subSource); - // Treat the declaration as a generic and apply it to the given parameter list. - - kj::Maybe getMember(kj::StringPtr memberName, Expression::Reader subSource); - // Get a member of this declaration. - - kj::Maybe getKind(); - // Returns the kind of declaration, or null if this is an unbound generic variable. - - template - uint64_t getIdAndFillBrand(InitBrandFunc&& initBrand); - // Returns the type ID of this node. `initBrand` is a zero-arg functor which returns - // schema::Brand::Builder; this will be called if this decl has brand bindings, and - // the returned builder filled in to reflect those bindings. - // - // It is an error to call this when `getKind()` returns null. - - kj::Maybe getListParam(); - // Only if the kind is BUILTIN_LIST: Get the list's type parameter. - - Resolver::ResolvedParameter asVariable(); - // If this is an unbound generic variable (i.e. `getKind()` returns null), return information - // about the variable. - // - // It is an error to call this when `getKind()` does not return null. - - bool compileAsType(ErrorReporter& errorReporter, schema::Type::Builder target); - // Compile this decl to a schema::Type. - - inline void addError(ErrorReporter& errorReporter, kj::StringPtr message) { - errorReporter.addErrorOn(source, message); - } - - Resolver::ResolveResult asResolveResult(uint64_t scopeId, schema::Brand::Builder brandBuilder); - // Reverse this into a ResolveResult. If necessary, use `brandBuilder` to fill in - // ResolvedDecl.brand. - - kj::String toString(); - kj::String toDebugString(); - -private: - Resolver::ResolveResult body; - kj::Own brand; // null if parameter - Expression::Reader source; -}; - -class NodeTranslator::BrandScope: public kj::Refcounted { - // Tracks the brand parameter bindings affecting the current scope. For example, if we are - // interpreting the type expression "Foo(Text).Bar", we would start with the current scopes - // BrandScope, create a new child BrandScope representing "Foo", add the "(Text)" parameter - // bindings to it, then create a further child scope for "Bar". Thus the BrandScope for Bar - // knows that Foo's parameter list has been bound to "(Text)". - // - // TODO(cleanup): This is too complicated to live here. We should refactor this class and - // BrandedDecl out into their own file, independent of NodeTranslator. - -public: - BrandScope(ErrorReporter& errorReporter, uint64_t startingScopeId, - uint startingScopeParamCount, Resolver& startingScope) - : errorReporter(errorReporter), parent(nullptr), leafId(startingScopeId), - leafParamCount(startingScopeParamCount), inherited(true) { - // Create all lexical parent scopes, all with no brand bindings. - KJ_IF_MAYBE(p, startingScope.getParent()) { - parent = kj::refcounted( - errorReporter, p->id, p->genericParamCount, *p->resolver); - } - } - - bool isGeneric() { - if (leafParamCount > 0) return true; - - KJ_IF_MAYBE(p, parent) { - return p->get()->isGeneric(); - } else { - return false; - } - } - - kj::Own push(uint64_t typeId, uint paramCount) { - return kj::refcounted(kj::addRef(*this), typeId, paramCount); - } - - kj::Maybe> setParams( - kj::Array params, Declaration::Which genericType, Expression::Reader source) { - if (this->params.size() != 0) { - errorReporter.addErrorOn(source, "Double-application of generic parameters."); - return nullptr; - } else if (params.size() > leafParamCount) { - if (leafParamCount == 0) { - errorReporter.addErrorOn(source, "Declaration does not accept generic parameters."); - } else { - errorReporter.addErrorOn(source, "Too many generic parameters."); - } - return nullptr; - } else if (params.size() < leafParamCount) { - errorReporter.addErrorOn(source, "Not enough generic parameters."); - return nullptr; - } else { - if (genericType != Declaration::BUILTIN_LIST) { - for (auto& param: params) { - KJ_IF_MAYBE(kind, param.getKind()) { - switch (*kind) { - case Declaration::BUILTIN_LIST: - case Declaration::BUILTIN_TEXT: - case Declaration::BUILTIN_DATA: - case Declaration::BUILTIN_ANY_POINTER: - case Declaration::STRUCT: - case Declaration::INTERFACE: - break; - - default: - param.addError(errorReporter, - "Sorry, only pointer types can be used as generic parameters."); - break; - } - } - } - } - - return kj::refcounted(*this, kj::mv(params)); - } - } - - kj::Own pop(uint64_t newLeafId) { - if (leafId == newLeafId) { - return kj::addRef(*this); - } - KJ_IF_MAYBE(p, parent) { - return (*p)->pop(newLeafId); - } else { - // Looks like we're moving into a whole top-level scope. - return kj::refcounted(errorReporter, newLeafId); - } - } - - kj::Maybe lookupParameter(Resolver& resolver, uint64_t scopeId, uint index) { - // Returns null if the param should be inherited from the client scope. - - if (scopeId == leafId) { - if (index < params.size()) { - return params[index]; - } else if (inherited) { - return nullptr; - } else { - // Unbound and not inherited, so return AnyPointer. - auto decl = resolver.resolveBuiltin(Declaration::BUILTIN_ANY_POINTER); - return BrandedDecl(decl, - evaluateBrand(resolver, decl, List::Reader()), - Expression::Reader()); - } - } else KJ_IF_MAYBE(p, parent) { - return p->get()->lookupParameter(resolver, scopeId, index); - } else { - KJ_FAIL_REQUIRE("scope is not a parent"); - } - } - - kj::Maybe> getParams(uint64_t scopeId) { - // Returns null if params at the requested scope should be inherited from the client scope. - - if (scopeId == leafId) { - if (inherited) { - return nullptr; - } else { - return params.asPtr(); - } - } else KJ_IF_MAYBE(p, parent) { - return p->get()->getParams(scopeId); - } else { - KJ_FAIL_REQUIRE("scope is not a parent"); - } - } - - template - void compile(InitBrandFunc&& initBrand) { - kj::Vector levels; - BrandScope* ptr = this; - for (;;) { - if (ptr->params.size() > 0 || (ptr->inherited && ptr->leafParamCount > 0)) { - levels.add(ptr); - } - KJ_IF_MAYBE(p, ptr->parent) { - ptr = *p; - } else { - break; - } - } - - if (levels.size() > 0) { - auto scopes = initBrand().initScopes(levels.size()); - for (uint i: kj::indices(levels)) { - auto scope = scopes[i]; - scope.setScopeId(levels[i]->leafId); - - if (levels[i]->inherited) { - scope.setInherit(); - } else { - auto bindings = scope.initBind(levels[i]->params.size()); - for (uint j: kj::indices(bindings)) { - levels[i]->params[j].compileAsType(errorReporter, bindings[j].initType()); - } - } - } - } - } - - kj::Maybe compileDeclExpression( - Expression::Reader source, Resolver& resolver, - ImplicitParams implicitMethodParams); - - NodeTranslator::BrandedDecl interpretResolve( - Resolver& resolver, Resolver::ResolveResult& result, Expression::Reader source); - - kj::Own evaluateBrand( - Resolver& resolver, Resolver::ResolvedDecl decl, - List::Reader brand, uint index = 0); - - BrandedDecl decompileType(Resolver& resolver, schema::Type::Reader type); - - inline uint64_t getScopeId() { return leafId; } - -private: - ErrorReporter& errorReporter; - kj::Maybe> parent; - uint64_t leafId; // zero = this is the root - uint leafParamCount; // number of generic parameters on this leaf - bool inherited; - kj::Array params; - - BrandScope(kj::Own parent, uint64_t leafId, uint leafParamCount) - : errorReporter(parent->errorReporter), - parent(kj::mv(parent)), leafId(leafId), leafParamCount(leafParamCount), - inherited(false) {} - BrandScope(BrandScope& base, kj::Array params) - : errorReporter(base.errorReporter), - leafId(base.leafId), leafParamCount(base.leafParamCount), - inherited(false), params(kj::mv(params)) { - KJ_IF_MAYBE(p, base.parent) { - parent = kj::addRef(**p); - } - } - BrandScope(ErrorReporter& errorReporter, uint64_t scopeId) - : errorReporter(errorReporter), leafId(scopeId), leafParamCount(0), inherited(false) {} - - template - friend kj::Own kj::refcounted(Params&&... params); -}; - -NodeTranslator::BrandedDecl::BrandedDecl(BrandedDecl& other) - : body(other.body), - source(other.source) { - if (body.is()) { - brand = kj::addRef(*other.brand); - } -} - -NodeTranslator::BrandedDecl& NodeTranslator::BrandedDecl::operator=(BrandedDecl& other) { - body = other.body; - source = other.source; - if (body.is()) { - brand = kj::addRef(*other.brand); - } - return *this; -} - -kj::Maybe NodeTranslator::BrandedDecl::applyParams( - kj::Array params, Expression::Reader subSource) { - if (body.is()) { - return nullptr; - } else { - return brand->setParams(kj::mv(params), body.get().kind, subSource) - .map([&](kj::Own&& scope) { - BrandedDecl result = *this; - result.brand = kj::mv(scope); - result.source = subSource; - return result; - }); - } -} - -kj::Maybe NodeTranslator::BrandedDecl::getMember( - kj::StringPtr memberName, Expression::Reader subSource) { - if (body.is()) { - return nullptr; - } else KJ_IF_MAYBE(r, body.get().resolver->resolveMember(memberName)) { - return brand->interpretResolve(*body.get().resolver, *r, subSource); - } else { - return nullptr; - } -} - -kj::Maybe NodeTranslator::BrandedDecl::getKind() { - if (body.is()) { - return nullptr; - } else { - return body.get().kind; - } -} - -template -uint64_t NodeTranslator::BrandedDecl::getIdAndFillBrand(InitBrandFunc&& initBrand) { - KJ_REQUIRE(body.is()); - - brand->compile(kj::fwd(initBrand)); - return body.get().id; -} - -kj::Maybe NodeTranslator::BrandedDecl::getListParam() { - KJ_REQUIRE(body.is()); - - auto& decl = body.get(); - KJ_REQUIRE(decl.kind == Declaration::BUILTIN_LIST); - - auto params = KJ_ASSERT_NONNULL(brand->getParams(decl.id)); - if (params.size() != 1) { - return nullptr; - } else { - return params[0]; - } -} - -NodeTranslator::Resolver::ResolvedParameter NodeTranslator::BrandedDecl::asVariable() { - KJ_REQUIRE(body.is()); - - return body.get(); -} - -bool NodeTranslator::BrandedDecl::compileAsType( - ErrorReporter& errorReporter, schema::Type::Builder target) { - KJ_IF_MAYBE(kind, getKind()) { - switch (*kind) { - case Declaration::ENUM: { - auto enum_ = target.initEnum(); - enum_.setTypeId(getIdAndFillBrand([&]() { return enum_.initBrand(); })); - return true; - } - - case Declaration::STRUCT: { - auto struct_ = target.initStruct(); - struct_.setTypeId(getIdAndFillBrand([&]() { return struct_.initBrand(); })); - return true; - } - - case Declaration::INTERFACE: { - auto interface = target.initInterface(); - interface.setTypeId(getIdAndFillBrand([&]() { return interface.initBrand(); })); - return true; - } - - case Declaration::BUILTIN_LIST: { - auto elementType = target.initList().initElementType(); - - KJ_IF_MAYBE(param, getListParam()) { - if (!param->compileAsType(errorReporter, elementType)) { - return false; - } - } else { - addError(errorReporter, "'List' requires exactly one parameter."); - return false; - } - - if (elementType.isAnyPointer()) { - addError(errorReporter, "'List(AnyPointer)' is not supported."); - // Seeing List(AnyPointer) later can mess things up, so change the type to Void. - elementType.setVoid(); - return false; - } - - return true; - } - - case Declaration::BUILTIN_VOID: target.setVoid(); return true; - case Declaration::BUILTIN_BOOL: target.setBool(); return true; - case Declaration::BUILTIN_INT8: target.setInt8(); return true; - case Declaration::BUILTIN_INT16: target.setInt16(); return true; - case Declaration::BUILTIN_INT32: target.setInt32(); return true; - case Declaration::BUILTIN_INT64: target.setInt64(); return true; - case Declaration::BUILTIN_U_INT8: target.setUint8(); return true; - case Declaration::BUILTIN_U_INT16: target.setUint16(); return true; - case Declaration::BUILTIN_U_INT32: target.setUint32(); return true; - case Declaration::BUILTIN_U_INT64: target.setUint64(); return true; - case Declaration::BUILTIN_FLOAT32: target.setFloat32(); return true; - case Declaration::BUILTIN_FLOAT64: target.setFloat64(); return true; - case Declaration::BUILTIN_TEXT: target.setText(); return true; - case Declaration::BUILTIN_DATA: target.setData(); return true; - - case Declaration::BUILTIN_OBJECT: - addError(errorReporter, - "As of Cap'n Proto 0.4, 'Object' has been renamed to 'AnyPointer'. Sorry for the " - "inconvenience, and thanks for being an early adopter. :)"); - // fallthrough - case Declaration::BUILTIN_ANY_POINTER: - target.initAnyPointer().initUnconstrained().setAnyKind(); - return true; - case Declaration::BUILTIN_ANY_STRUCT: - target.initAnyPointer().initUnconstrained().setStruct(); - return true; - case Declaration::BUILTIN_ANY_LIST: - target.initAnyPointer().initUnconstrained().setList(); - return true; - case Declaration::BUILTIN_CAPABILITY: - target.initAnyPointer().initUnconstrained().setCapability(); - return true; - - case Declaration::FILE: - case Declaration::USING: - case Declaration::CONST: - case Declaration::ENUMERANT: - case Declaration::FIELD: - case Declaration::UNION: - case Declaration::GROUP: - case Declaration::METHOD: - case Declaration::ANNOTATION: - case Declaration::NAKED_ID: - case Declaration::NAKED_ANNOTATION: - addError(errorReporter, kj::str("'", toString(), "' is not a type.")); - return false; - } - - KJ_UNREACHABLE; - } else { - // Oh, this is a type variable. - auto var = asVariable(); - if (var.id == 0) { - // This is actually a method implicit parameter. - auto builder = target.initAnyPointer().initImplicitMethodParameter(); - builder.setParameterIndex(var.index); - return true; - } else { - auto builder = target.initAnyPointer().initParameter(); - builder.setScopeId(var.id); - builder.setParameterIndex(var.index); - return true; - } - } -} - -NodeTranslator::Resolver::ResolveResult NodeTranslator::BrandedDecl::asResolveResult( - uint64_t scopeId, schema::Brand::Builder brandBuilder) { - auto result = body; - if (result.is()) { - // May need to compile our context as the "brand". - - result.get().scopeId = scopeId; - - getIdAndFillBrand([&]() { - result.get().brand = brandBuilder.asReader(); - return brandBuilder; - }); - } - return result; -} - -static kj::String expressionString(Expression::Reader name); // defined later - -kj::String NodeTranslator::BrandedDecl::toString() { - return expressionString(source); -} - -kj::String NodeTranslator::BrandedDecl::toDebugString() { - if (body.is()) { - auto variable = body.get(); - return kj::str("variable(", variable.id, ", ", variable.index, ")"); - } else { - auto decl = body.get(); - return kj::str("decl(", decl.id, ", ", (uint)decl.kind, "')"); - } -} - -NodeTranslator::BrandedDecl NodeTranslator::BrandScope::interpretResolve( - Resolver& resolver, Resolver::ResolveResult& result, Expression::Reader source) { - if (result.is()) { - auto& decl = result.get(); - - auto scope = pop(decl.scopeId); - KJ_IF_MAYBE(brand, decl.brand) { - scope = scope->evaluateBrand(resolver, decl, brand->getScopes()); - } else { - scope = scope->push(decl.id, decl.genericParamCount); - } - - return BrandedDecl(decl, kj::mv(scope), source); - } else { - auto& param = result.get(); - KJ_IF_MAYBE(p, lookupParameter(resolver, param.id, param.index)) { - return *p; - } else { - return BrandedDecl(param, source); - } - } -} - -kj::Own NodeTranslator::BrandScope::evaluateBrand( - Resolver& resolver, Resolver::ResolvedDecl decl, - List::Reader brand, uint index) { - auto result = kj::refcounted(errorReporter, decl.id); - result->leafParamCount = decl.genericParamCount; - - // Fill in `params`. - if (index < brand.size()) { - auto nextScope = brand[index]; - if (decl.id == nextScope.getScopeId()) { - // Initialize our parameters. - - switch (nextScope.which()) { - case schema::Brand::Scope::BIND: { - auto bindings = nextScope.getBind(); - auto params = kj::heapArrayBuilder(bindings.size()); - for (auto binding: bindings) { - switch (binding.which()) { - case schema::Brand::Binding::UNBOUND: { - // Build an AnyPointer-equivalent. - auto anyPointerDecl = resolver.resolveBuiltin(Declaration::BUILTIN_ANY_POINTER); - params.add(BrandedDecl(anyPointerDecl, - kj::refcounted(errorReporter, anyPointerDecl.scopeId), - Expression::Reader())); - break; - } - - case schema::Brand::Binding::TYPE: - // Reverse this schema::Type back into a BrandedDecl. - params.add(decompileType(resolver, binding.getType())); - break; - } - } - result->params = params.finish(); - break; - } - - case schema::Brand::Scope::INHERIT: - KJ_IF_MAYBE(p, getParams(decl.id)) { - result->params = kj::heapArray(*p); - } else { - result->inherited = true; - } - break; - } - - // Parent should start one level deeper in the list. - ++index; - } - } - - // Fill in `parent`. - KJ_IF_MAYBE(parent, decl.resolver->getParent()) { - result->parent = evaluateBrand(resolver, *parent, brand, index); - } - - return result; -} - -NodeTranslator::BrandedDecl NodeTranslator::BrandScope::decompileType( - Resolver& resolver, schema::Type::Reader type) { - auto builtin = [&](Declaration::Which which) -> BrandedDecl { - auto decl = resolver.resolveBuiltin(which); - return BrandedDecl(decl, - evaluateBrand(resolver, decl, List::Reader()), - Expression::Reader()); - }; - - switch (type.which()) { - case schema::Type::VOID: return builtin(Declaration::BUILTIN_VOID); - case schema::Type::BOOL: return builtin(Declaration::BUILTIN_BOOL); - case schema::Type::INT8: return builtin(Declaration::BUILTIN_INT8); - case schema::Type::INT16: return builtin(Declaration::BUILTIN_INT16); - case schema::Type::INT32: return builtin(Declaration::BUILTIN_INT32); - case schema::Type::INT64: return builtin(Declaration::BUILTIN_INT64); - case schema::Type::UINT8: return builtin(Declaration::BUILTIN_U_INT8); - case schema::Type::UINT16: return builtin(Declaration::BUILTIN_U_INT16); - case schema::Type::UINT32: return builtin(Declaration::BUILTIN_U_INT32); - case schema::Type::UINT64: return builtin(Declaration::BUILTIN_U_INT64); - case schema::Type::FLOAT32: return builtin(Declaration::BUILTIN_FLOAT32); - case schema::Type::FLOAT64: return builtin(Declaration::BUILTIN_FLOAT64); - case schema::Type::TEXT: return builtin(Declaration::BUILTIN_TEXT); - case schema::Type::DATA: return builtin(Declaration::BUILTIN_DATA); - - case schema::Type::ENUM: { - auto enumType = type.getEnum(); - Resolver::ResolvedDecl decl = resolver.resolveId(enumType.getTypeId()); - return BrandedDecl(decl, - evaluateBrand(resolver, decl, enumType.getBrand().getScopes()), - Expression::Reader()); - } - - case schema::Type::INTERFACE: { - auto interfaceType = type.getInterface(); - Resolver::ResolvedDecl decl = resolver.resolveId(interfaceType.getTypeId()); - return BrandedDecl(decl, - evaluateBrand(resolver, decl, interfaceType.getBrand().getScopes()), - Expression::Reader()); - } - - case schema::Type::STRUCT: { - auto structType = type.getStruct(); - Resolver::ResolvedDecl decl = resolver.resolveId(structType.getTypeId()); - return BrandedDecl(decl, - evaluateBrand(resolver, decl, structType.getBrand().getScopes()), - Expression::Reader()); - } - - case schema::Type::LIST: { - auto elementType = decompileType(resolver, type.getList().getElementType()); - return KJ_ASSERT_NONNULL(builtin(Declaration::BUILTIN_LIST) - .applyParams(kj::heapArray(&elementType, 1), Expression::Reader())); - } - - case schema::Type::ANY_POINTER: { - auto anyPointer = type.getAnyPointer(); - switch (anyPointer.which()) { - case schema::Type::AnyPointer::UNCONSTRAINED: - return builtin(Declaration::BUILTIN_ANY_POINTER); - - case schema::Type::AnyPointer::PARAMETER: { - auto param = anyPointer.getParameter(); - auto id = param.getScopeId(); - uint index = param.getParameterIndex(); - KJ_IF_MAYBE(binding, lookupParameter(resolver, id, index)) { - return *binding; - } else { - return BrandedDecl(Resolver::ResolvedParameter {id, index}, Expression::Reader()); - } - } - - case schema::Type::AnyPointer::IMPLICIT_METHOD_PARAMETER: - KJ_FAIL_ASSERT("Alias pointed to implicit method type parameter?"); - } - - KJ_UNREACHABLE; - } - } - - KJ_UNREACHABLE; -} - -kj::Maybe NodeTranslator::BrandScope::compileDeclExpression( - Expression::Reader source, Resolver& resolver, - ImplicitParams implicitMethodParams) { - switch (source.which()) { - case Expression::UNKNOWN: - // Error reported earlier. - return nullptr; - - case Expression::POSITIVE_INT: - case Expression::NEGATIVE_INT: - case Expression::FLOAT: - case Expression::STRING: - case Expression::BINARY: - case Expression::LIST: - case Expression::TUPLE: - case Expression::EMBED: - errorReporter.addErrorOn(source, "Expected name."); - return nullptr; - - case Expression::RELATIVE_NAME: { - auto name = source.getRelativeName(); - auto nameValue = name.getValue(); - - // Check implicit method params first. - for (auto i: kj::indices(implicitMethodParams.params)) { - if (implicitMethodParams.params[i].getName() == nameValue) { - if (implicitMethodParams.scopeId == 0) { - return BrandedDecl::implicitMethodParam(i); - } else { - return BrandedDecl(Resolver::ResolvedParameter { - implicitMethodParams.scopeId, static_cast(i) }, - Expression::Reader()); - } - } - } - - KJ_IF_MAYBE(r, resolver.resolve(nameValue)) { - return interpretResolve(resolver, *r, source); - } else { - errorReporter.addErrorOn(name, kj::str("Not defined: ", nameValue)); - return nullptr; - } - } - - case Expression::ABSOLUTE_NAME: { - auto name = source.getAbsoluteName(); - KJ_IF_MAYBE(r, resolver.getTopScope().resolver->resolveMember(name.getValue())) { - return interpretResolve(resolver, *r, source); - } else { - errorReporter.addErrorOn(name, kj::str("Not defined: ", name.getValue())); - return nullptr; - } - } - - case Expression::IMPORT: { - auto filename = source.getImport(); - KJ_IF_MAYBE(decl, resolver.resolveImport(filename.getValue())) { - // Import is always a root scope, so create a fresh BrandScope. - return BrandedDecl(*decl, kj::refcounted( - errorReporter, decl->id, decl->genericParamCount, *decl->resolver), source); - } else { - errorReporter.addErrorOn(filename, kj::str("Import failed: ", filename.getValue())); - return nullptr; - } - } - - case Expression::APPLICATION: { - auto app = source.getApplication(); - KJ_IF_MAYBE(decl, compileDeclExpression(app.getFunction(), resolver, implicitMethodParams)) { - // Compile all params. - auto params = app.getParams(); - auto compiledParams = kj::heapArrayBuilder(params.size()); - bool paramFailed = false; - for (auto param: params) { - if (param.isNamed()) { - errorReporter.addErrorOn(param.getNamed(), "Named parameter not allowed here."); - } - - KJ_IF_MAYBE(d, compileDeclExpression(param.getValue(), resolver, implicitMethodParams)) { - compiledParams.add(kj::mv(*d)); - } else { - // Param failed to compile. Error was already reported. - paramFailed = true; - } - }; - - if (paramFailed) { - return kj::mv(*decl); - } - - // Add the parameters to the brand. - KJ_IF_MAYBE(applied, decl->applyParams(compiledParams.finish(), source)) { - return kj::mv(*applied); - } else { - // Error already reported. Ignore parameters. - return kj::mv(*decl); - } - } else { - // error already reported - return nullptr; - } - } - - case Expression::MEMBER: { - auto member = source.getMember(); - KJ_IF_MAYBE(decl, compileDeclExpression(member.getParent(), resolver, implicitMethodParams)) { - auto name = member.getName(); - KJ_IF_MAYBE(memberDecl, decl->getMember(name.getValue(), source)) { - return kj::mv(*memberDecl); - } else { - errorReporter.addErrorOn(name, kj::str( - "'", expressionString(member.getParent()), - "' has no member named '", name.getValue(), "'")); - return nullptr; - } - } else { - // error already reported - return nullptr; - } - } - } - - KJ_UNREACHABLE; -} - -// ======================================================================================= - NodeTranslator::NodeTranslator( Resolver& resolver, ErrorReporter& errorReporter, const Declaration::Reader& decl, Orphan wipNodeParam, @@ -1412,12 +622,13 @@ NodeTranslator::NodeSet NodeTranslator::getBootstrapNode() { } } -NodeTranslator::NodeSet NodeTranslator::finish() { +NodeTranslator::NodeSet NodeTranslator::finish(Schema selfBootstrapSchema) { // Careful about iteration here: compileFinalValue() may actually add more elements to // `unfinishedValues`, invalidating iterators in the process. for (size_t i = 0; i < unfinishedValues.size(); i++) { auto& value = unfinishedValues[i]; - compileValue(value.source, value.type, value.typeScope, value.target, false); + compileValue(value.source, value.type, value.typeScope.orDefault(selfBootstrapSchema), + value.target, false); } return getBootstrapNode(); @@ -1648,14 +859,14 @@ void NodeTranslator::DuplicateNameDetector::check( void NodeTranslator::compileConst(Declaration::Const::Reader decl, schema::Node::Const::Builder builder) { auto typeBuilder = builder.initType(); - if (compileType(decl.getType(), typeBuilder, noImplicitParams())) { + if (compileType(decl.getType(), typeBuilder, ImplicitParams::none())) { compileBootstrapValue(decl.getValue(), typeBuilder.asReader(), builder.initValue()); } } void NodeTranslator::compileAnnotation(Declaration::Annotation::Reader decl, schema::Node::Annotation::Builder builder) { - compileType(decl.getType(), builder.initType(), noImplicitParams()); + compileType(decl.getType(), builder.initType(), ImplicitParams::none()); // Dynamically copy over the values of all of the "targets" members. DynamicStruct::Reader src = decl; @@ -2309,13 +1520,12 @@ private: void NodeTranslator::compileStruct(Void decl, List::Reader members, schema::Node::Builder builder) { - StructTranslator(*this, noImplicitParams()).translate(decl, members, builder, sourceInfo.get()); + StructTranslator(*this, ImplicitParams::none()) + .translate(decl, members, builder, sourceInfo.get()); } // ------------------------------------------------------------------- -static kj::String expressionString(Expression::Reader name); - void NodeTranslator::compileInterface(Declaration::Interface::Reader decl, List::Reader members, schema::Node::Builder builder) { @@ -2326,7 +1536,7 @@ void NodeTranslator::compileInterface(Declaration::Interface::Reader decl, for (uint i: kj::indices(superclassesDecl)) { auto superclass = superclassesDecl[i]; - KJ_IF_MAYBE(decl, compileDeclExpression(superclass, noImplicitParams())) { + KJ_IF_MAYBE(decl, compileDeclExpression(superclass, ImplicitParams::none())) { KJ_IF_MAYBE(kind, decl->getKind()) { if (*kind == Declaration::INTERFACE) { auto s = superclassesBuilder[i]; @@ -2384,9 +1594,13 @@ void NodeTranslator::compileInterface(Declaration::Interface::Reader decl, implicitsBuilder[i].setName(implicits[i].getName()); } + auto params = methodReader.getParams(); + if (params.isStream()) { + errorReporter.addErrorOn(params, "'stream' can only appear after '->', not before."); + } methodBuilder.setParamStructType(compileParamList( methodDecl.getName().getValue(), ordinal, false, - methodReader.getParams(), implicits, + params, implicits, [&]() { return methodBuilder.initParamBrand(); })); auto results = methodReader.getResults(); @@ -2478,113 +1692,37 @@ uint64_t NodeTranslator::compileParamList( } } return 0; - } - KJ_UNREACHABLE; -} - -// ------------------------------------------------------------------- - -static const char HEXDIGITS[] = "0123456789abcdef"; - -static kj::StringTree stringLiteral(kj::StringPtr chars) { - return kj::strTree('"', kj::encodeCEscape(chars), '"'); -} - -static kj::StringTree binaryLiteral(Data::Reader data) { - kj::Vector escaped(data.size() * 3); - - for (byte b: data) { - escaped.add(HEXDIGITS[b % 16]); - escaped.add(HEXDIGITS[b / 16]); - escaped.add(' '); - } - - escaped.removeLast(); - return kj::strTree("0x\"", escaped, '"'); -} - -static kj::StringTree expressionStringTree(Expression::Reader exp); - -static kj::StringTree tupleLiteral(List::Reader params) { - auto parts = kj::heapArrayBuilder(params.size()); - for (auto param: params) { - auto part = expressionStringTree(param.getValue()); - if (param.isNamed()) { - part = kj::strTree(param.getNamed().getValue(), " = ", kj::mv(part)); - } - parts.add(kj::mv(part)); - } - return kj::strTree("( ", kj::StringTree(parts.finish(), ", "), " )"); -} - -static kj::StringTree expressionStringTree(Expression::Reader exp) { - switch (exp.which()) { - case Expression::UNKNOWN: - return kj::strTree(""); - case Expression::POSITIVE_INT: - return kj::strTree(exp.getPositiveInt()); - case Expression::NEGATIVE_INT: - return kj::strTree('-', exp.getNegativeInt()); - case Expression::FLOAT: - return kj::strTree(exp.getFloat()); - case Expression::STRING: - return stringLiteral(exp.getString()); - case Expression::BINARY: - return binaryLiteral(exp.getBinary()); - case Expression::RELATIVE_NAME: - return kj::strTree(exp.getRelativeName().getValue()); - case Expression::ABSOLUTE_NAME: - return kj::strTree('.', exp.getAbsoluteName().getValue()); - case Expression::IMPORT: - return kj::strTree("import ", stringLiteral(exp.getImport().getValue())); - case Expression::EMBED: - return kj::strTree("embed ", stringLiteral(exp.getEmbed().getValue())); - - case Expression::LIST: { - auto list = exp.getList(); - auto parts = kj::heapArrayBuilder(list.size()); - for (auto element: list) { - parts.add(expressionStringTree(element)); + case Declaration::ParamList::STREAM: + KJ_IF_MAYBE(streamCapnp, resolver.resolveImport("/capnp/stream.capnp")) { + if (streamCapnp->resolver->resolveMember("StreamResult") == nullptr) { + errorReporter.addErrorOn(paramList, + "The version of '/capnp/stream.capnp' found in your import path does not appear " + "to be the official one; it is missing the declaration of StreamResult."); + } + } else { + errorReporter.addErrorOn(paramList, + "A method declaration uses streaming, but '/capnp/stream.capnp' is not found " + "in the import path. This is a standard file that should always be installed " + "with the Cap'n Proto compiler."); } - return kj::strTree("[ ", kj::StringTree(parts.finish(), ", "), " ]"); - } - - case Expression::TUPLE: - return tupleLiteral(exp.getTuple()); - - case Expression::APPLICATION: { - auto app = exp.getApplication(); - return kj::strTree(expressionStringTree(app.getFunction()), - '(', tupleLiteral(app.getParams()), ')'); - } - - case Expression::MEMBER: { - auto member = exp.getMember(); - return kj::strTree(expressionStringTree(member.getParent()), '.', - member.getName().getValue()); - } + return typeId(); } - KJ_UNREACHABLE; } -static kj::String expressionString(Expression::Reader name) { - return expressionStringTree(name).flatten(); -} - // ------------------------------------------------------------------- -kj::Maybe +kj::Maybe NodeTranslator::compileDeclExpression( Expression::Reader source, ImplicitParams implicitMethodParams) { return localBrand->compileDeclExpression(source, resolver, implicitMethodParams); } -/* static */ kj::Maybe NodeTranslator::compileDecl( +/* static */ kj::Maybe NodeTranslator::compileDecl( uint64_t scopeId, uint scopeParameterCount, Resolver& resolver, ErrorReporter& errorReporter, Expression::Reader expression, schema::Brand::Builder brandBuilder) { auto scope = kj::refcounted(errorReporter, scopeId, scopeParameterCount, resolver); - KJ_IF_MAYBE(decl, scope->compileDeclExpression(expression, resolver, noImplicitParams())) { + KJ_IF_MAYBE(decl, scope->compileDeclExpression(expression, resolver, ImplicitParams::none())) { return decl->asResolveResult(scope->getScopeId(), brandBuilder); } else { return nullptr; @@ -2632,7 +1770,7 @@ void NodeTranslator::compileDefaultDefaultValue( void NodeTranslator::compileBootstrapValue( Expression::Reader source, schema::Type::Reader type, schema::Value::Builder target, - Schema typeScope) { + kj::Maybe typeScope) { // Start by filling in a default default value so that if for whatever reason we don't end up // initializing the value, this won't cause schema validation to fail. compileDefaultDefaultValue(type, target); @@ -2646,8 +1784,9 @@ void NodeTranslator::compileBootstrapValue( break; default: - // Primitive value. - compileValue(source, type, typeScope, target, true); + // Primitive value. (Note that the scope can't possibly matter since primitives are not + // generic.) + compileValue(source, type, typeScope.orDefault(Schema()), target, true); break; } } @@ -2691,8 +1830,21 @@ void NodeTranslator::compileValue(Expression::Reader source, schema::Type::Reade } kj::Maybe> ValueTranslator::compileValue(Expression::Reader src, Type type) { + if (type.isAnyPointer()) { + if (type.getBrandParameter() != nullptr || type.getImplicitParameter() != nullptr) { + errorReporter.addErrorOn(src, + "Cannot interpret value because the type is a generic type parameter which is not " + "yet bound. We don't know what type to expect here."); + return nullptr; + } + } + Orphan result = compileValueInner(src, type); + // compileValueInner() evaluated `src` and only used `type` as a hint in interpreting `src` if + // `src`'s type wasn't already obvious. So, now we need to check that the resulting value + // actually matches `type`. + switch (result.getType()) { case DynamicValue::UNKNOWN: // Error already reported. @@ -2741,7 +1893,7 @@ kj::Maybe> ValueTranslator::compileValue(Expression::Reader return kj::mv(result); } - } // fallthrough -- value is positive, so we can just go on to the uint case below. + } KJ_FALLTHROUGH; // value is positive, so we can just go on to the uint case below. case DynamicValue::UINT: { uint64_t maxValue = 0; @@ -3082,8 +2234,8 @@ kj::String ValueTranslator::makeTypeName(Type type) { kj::Maybe NodeTranslator::readConstant( Expression::Reader source, bool isBootstrap) { // Look up the constant decl. - NodeTranslator::BrandedDecl constDecl = nullptr; - KJ_IF_MAYBE(decl, compileDeclExpression(source, noImplicitParams())) { + BrandedDecl constDecl = nullptr; + KJ_IF_MAYBE(decl, compileDeclExpression(source, ImplicitParams::none())) { constDecl = *decl; } else { // Lookup will have reported an error. @@ -3204,7 +2356,7 @@ Orphan> NodeTranslator::compileAnnotationApplications( annotationBuilder.initValue().setVoid(); auto name = annotation.getName(); - KJ_IF_MAYBE(decl, compileDeclExpression(name, noImplicitParams())) { + KJ_IF_MAYBE(decl, compileDeclExpression(name, ImplicitParams::none())) { KJ_IF_MAYBE(kind, decl->getKind()) { if (*kind != Declaration::ANNOTATION) { errorReporter.addErrorOn(name, kj::str( @@ -3243,7 +2395,7 @@ Orphan> NodeTranslator::compileAnnotationApplications( } } } - } else if (*kind != Declaration::ANNOTATION) { + } else { errorReporter.addErrorOn(name, kj::str( "'", expressionString(name), "' is not an annotation.")); } diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/node-translator.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/node-translator.h index 6c89a38d443..6365fa005ea 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/node-translator.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/node-translator.h @@ -21,10 +21,6 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include #include #include @@ -32,8 +28,12 @@ #include #include #include "error-reporter.h" +#include "resolver.h" +#include "generics.h" #include +CAPNP_BEGIN_HEADER + namespace capnp { namespace compiler { @@ -41,84 +41,7 @@ class NodeTranslator { // Translates one node in the schema from AST form to final schema form. A "node" is anything // that has a unique ID, such as structs, enums, constants, and annotations, but not fields, // unions, enumerants, or methods (the latter set have 16-bit ordinals but not 64-bit global IDs). - public: - class Resolver { - // Callback class used to find other nodes relative to this one. - // - // TODO(cleanup): This has evolved into being a full interface for traversing the node tree. - // Maybe we should rename it as such, and move it out of NodeTranslator. See also - // TODO(cleanup) on NodeTranslator::BrandedDecl. - - public: - struct ResolvedDecl { - uint64_t id; - uint genericParamCount; - uint64_t scopeId; - Declaration::Which kind; - Resolver* resolver; - - kj::Maybe brand; - // If present, then it is necessary to replace the brand scope with the given brand before - // using the target type. This happens when the decl resolved to an alias; all other fields - // of `ResolvedDecl` refer to the target of the alias, except for `scopeId` which is the - // scope that contained the alias. - }; - - struct ResolvedParameter { - uint64_t id; // ID of the node declaring the parameter. - uint index; // Index of the parameter. - }; - - typedef kj::OneOf ResolveResult; - - virtual kj::Maybe resolve(kj::StringPtr name) = 0; - // Look up the given name, relative to this node, and return basic information about the - // target. - - virtual kj::Maybe resolveMember(kj::StringPtr name) = 0; - // Look up a member of this node. - - virtual ResolvedDecl resolveBuiltin(Declaration::Which which) = 0; - virtual ResolvedDecl resolveId(uint64_t id) = 0; - - virtual kj::Maybe getParent() = 0; - // Returns the parent of this scope, or null if this is the top scope. - - virtual ResolvedDecl getTopScope() = 0; - // Get the top-level scope containing this node. - - virtual kj::Maybe resolveBootstrapSchema(uint64_t id, schema::Brand::Reader brand) = 0; - // Get the schema for the given ID. If a schema is returned, it must be safe to traverse its - // dependencies via the Schema API. A schema that is only at the bootstrap stage is - // acceptable. - // - // Throws an exception if the id is not one that was found by calling resolve() or by - // traversing other schemas. Returns null if the ID is recognized, but the corresponding - // schema node failed to be built for reasons that were already reported. - - virtual kj::Maybe resolveFinalSchema(uint64_t id) = 0; - // Get the final schema for the given ID. A bootstrap schema is not acceptable. A raw - // node reader is returned rather than a Schema object because using a Schema object built - // by the final schema loader could trigger lazy initialization of dependencies which could - // lead to a cycle and deadlock. - // - // Throws an exception if the id is not one that was found by calling resolve() or by - // traversing other schemas. Returns null if the ID is recognized, but the corresponding - // schema node failed to be built for reasons that were already reported. - - virtual kj::Maybe resolveImport(kj::StringPtr name) = 0; - // Get the ID of an imported file given the import path. - - virtual kj::Maybe> readEmbed(kj::StringPtr name) = 0; - // Read and return the contents of a file for an `embed` expression. - - virtual kj::Maybe resolveBootstrapType(schema::Type::Reader type, Schema scope) = 0; - // Compile a schema::Type into a Type whose dependencies may safely be traversed via the schema - // API. These dependencies may have only bootstrap schemas. Returns null if the type could not - // be constructed due to already-reported errors. - }; - NodeTranslator(Resolver& resolver, ErrorReporter& errorReporter, const Declaration::Reader& decl, Orphan wipNode, bool compileAnnotations); @@ -150,9 +73,12 @@ class NodeTranslator { // If the final node has already been built, this will actually return the final node (in fact, // it's the same node object). - NodeSet finish(); + NodeSet finish(Schema selfUnboundBootstrap); // Finish translating the node (including filling in all the pieces that are missing from the // bootstrap node) and return it. + // + // `selfUnboundBootstrap` is a Schema build using the Node returned by getBootstrapNode(), and + // with generic parameters "unbound", i.e. it was returned by SchemaLoader::getUnbound(). static kj::Maybe compileDecl( uint64_t scopeId, uint scopeParameterCount, Resolver& resolver, ErrorReporter& errorReporter, @@ -168,8 +94,6 @@ class NodeTranslator { class DuplicateOrdinalDetector; class StructLayout; class StructTranslator; - class BrandedDecl; - class BrandScope; Resolver& resolver; ErrorReporter& errorReporter; @@ -198,7 +122,7 @@ class NodeTranslator { struct UnfinishedValue { Expression::Reader source; schema::Type::Reader type; - Schema typeScope; + kj::Maybe typeScope; schema::Value::Builder target; }; kj::Vector unfinishedValues; @@ -223,21 +147,6 @@ class NodeTranslator { // The `members` arrays contain only members with ordinal numbers, in code order. Other members // are handled elsewhere. - struct ImplicitParams { - // Represents a set of implicit parameters visible in the current context. - - uint64_t scopeId; - // If zero, then any reference to an implciit param in this context should be compiled to a - // `implicitMethodParam` AnyPointer. If non-zero, it should be compiled to a `parameter` - // AnyPointer. - - List::Reader params; - }; - - static inline ImplicitParams noImplicitParams() { - return { 0, List::Reader() }; - } - template uint64_t compileParamList(kj::StringPtr methodName, uint16_t ordinal, bool isResults, Declaration::ParamList::Reader paramList, @@ -259,15 +168,13 @@ class NodeTranslator { void compileBootstrapValue( Expression::Reader source, schema::Type::Reader type, schema::Value::Builder target, - Schema typeScope = Schema()); + kj::Maybe typeScope = nullptr); // Calls compileValue() if this value should be interpreted at bootstrap time. Otheriwse, // adds the value to `unfinishedValues` for later evaluation. // - // If `type` comes from some other node, `typeScope` is the schema for that node. This is only - // really needed for looking up generic parameter bindings, therefore if the type comes from - // the node being built, an empty "Schema" (the default) works here because the node being built - // is of course being built for all possible bindings and thus none of its generic parameters are - // bound. + // If `type` comes from some other node, `typeScope` is the schema for that node. Otherwise the + // scope of the type expression is assumed to be this node (meaning, in particular, that no + // generic type parameters are bound). void compileValue(Expression::Reader source, schema::Type::Reader type, Schema typeScope, schema::Value::Builder target, bool isBootstrap); @@ -318,3 +225,5 @@ class ValueTranslator { } // namespace compiler } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/parser.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/parser.c++ index b6f069f25bd..eadb279577f 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/parser.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/parser.c++ @@ -19,10 +19,15 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +#if _WIN32 +#include +#endif + #include "parser.h" #include "type-id.h" #include #include +#include #if !_MSC_VER #include #endif @@ -31,10 +36,10 @@ #include #if _WIN32 -#define WIN32_LEAN_AND_MEAN #include #include -#undef VOID +#undef CONST +#include #endif namespace capnp { @@ -218,6 +223,27 @@ constexpr auto op(const char* expected) return p::transformOrReject(operatorToken, ExactString(expected)); } +class LocatedExactString { +public: + constexpr LocatedExactString(const char* expected): expected(expected) {} + + kj::Maybe> operator()(Located&& text) const { + if (text.value == expected) { + return kj::mv(text); + } else { + return nullptr; + } + } + +private: + const char* expected; +}; + +constexpr auto locatedKeyword(const char* expected) + -> decltype(p::transformOrReject(identifier, LocatedExactString(expected))) { + return p::transformOrReject(identifier, LocatedExactString(expected)); +} + // ======================================================================================= template @@ -856,6 +882,14 @@ CapnpParser::CapnpParser(Orphanage orphanageParam, ErrorReporter& errorReporterP } return decl; }), + p::transform(locatedKeyword("stream"), + [this](Located&& kw) -> Orphan { + auto decl = orphanage.newOrphan(); + auto builder = decl.get(); + kw.copyLocationTo(builder); + builder.setStream(); + return decl; + }), p::transform(parsers.expression, [this](Orphan&& name) -> Orphan { auto decl = orphanage.newOrphan(); @@ -1059,5 +1093,95 @@ kj::Maybe> CapnpParser::parseStatement( } } +// ======================================================================================= + +static const char HEXDIGITS[] = "0123456789abcdef"; + +static kj::StringTree stringLiteralStringTree(kj::StringPtr chars) { + return kj::strTree('"', kj::encodeCEscape(chars), '"'); +} + +static kj::StringTree binaryLiteralStringTree(Data::Reader data) { + kj::Vector escaped(data.size() * 3); + + for (byte b: data) { + escaped.add(HEXDIGITS[b % 16]); + escaped.add(HEXDIGITS[b / 16]); + escaped.add(' '); + } + + escaped.removeLast(); + return kj::strTree("0x\"", escaped, '"'); +} + +static kj::StringTree expressionStringTree(Expression::Reader exp); + +static kj::StringTree tupleLiteral(List::Reader params) { + auto parts = kj::heapArrayBuilder(params.size()); + for (auto param: params) { + auto part = expressionStringTree(param.getValue()); + if (param.isNamed()) { + part = kj::strTree(param.getNamed().getValue(), " = ", kj::mv(part)); + } + parts.add(kj::mv(part)); + } + return kj::strTree("( ", kj::StringTree(parts.finish(), ", "), " )"); +} + +static kj::StringTree expressionStringTree(Expression::Reader exp) { + switch (exp.which()) { + case Expression::UNKNOWN: + return kj::strTree(""); + case Expression::POSITIVE_INT: + return kj::strTree(exp.getPositiveInt()); + case Expression::NEGATIVE_INT: + return kj::strTree('-', exp.getNegativeInt()); + case Expression::FLOAT: + return kj::strTree(exp.getFloat()); + case Expression::STRING: + return stringLiteralStringTree(exp.getString()); + case Expression::BINARY: + return binaryLiteralStringTree(exp.getBinary()); + case Expression::RELATIVE_NAME: + return kj::strTree(exp.getRelativeName().getValue()); + case Expression::ABSOLUTE_NAME: + return kj::strTree('.', exp.getAbsoluteName().getValue()); + case Expression::IMPORT: + return kj::strTree("import ", stringLiteralStringTree(exp.getImport().getValue())); + case Expression::EMBED: + return kj::strTree("embed ", stringLiteralStringTree(exp.getEmbed().getValue())); + + case Expression::LIST: { + auto list = exp.getList(); + auto parts = kj::heapArrayBuilder(list.size()); + for (auto element: list) { + parts.add(expressionStringTree(element)); + } + return kj::strTree("[ ", kj::StringTree(parts.finish(), ", "), " ]"); + } + + case Expression::TUPLE: + return tupleLiteral(exp.getTuple()); + + case Expression::APPLICATION: { + auto app = exp.getApplication(); + return kj::strTree(expressionStringTree(app.getFunction()), + '(', tupleLiteral(app.getParams()), ')'); + } + + case Expression::MEMBER: { + auto member = exp.getMember(); + return kj::strTree(expressionStringTree(member.getParent()), '.', + member.getName().getValue()); + } + } + + KJ_UNREACHABLE; +} + +kj::String expressionString(Expression::Reader name) { + return expressionStringTree(name).flatten(); +} + } // namespace compiler } // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/parser.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/parser.h index 966425abbfa..f881a0d9345 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/parser.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/parser.h @@ -21,16 +21,14 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include #include #include #include #include "error-reporter.h" +CAPNP_BEGIN_HEADER + namespace capnp { namespace compiler { @@ -144,5 +142,10 @@ class CapnpParser { Parsers parsers; }; +kj::String expressionString(Expression::Reader name); +// Stringify the expression as code. + } // namespace compiler } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/resolver.h b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/resolver.h new file mode 100644 index 00000000000..b4a39423ba6 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/compiler/resolver.h @@ -0,0 +1,132 @@ +// Copyright (c) 2013-2020 Sandstorm Development Group, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#pragma once + +#include +#include +#include +#include + +CAPNP_BEGIN_HEADER + +namespace capnp { +namespace compiler { + +class Resolver { + // Callback class used to find other nodes relative to some existing node. + // + // `Resolver` is used when compiling one declaration requires inspecting the compiled versions + // of other declarations it depends on. For example, if struct type Foo contains a field of type + // Bar, and specifies a default value for that field, then to parse that default value we need + // the compiled version of `Bar`. Or, more commonly, if a struct type Foo refers to some other + // type `Bar.Baz`, this requires doing a lookup that depends on at least partial compilation of + // `Bar`, in order to discover its nested type `Baz`. + // + // Note that declarations are often compiled just-in-time the first time they are resolved. So, + // the methods of Resolver may recurse back into other parts of the compiler. It must detect when + // a dependency cycle occurs and report an error in order to prevent an infinite loop. + +public: + struct ResolvedDecl { + // Information about a resolved declaration. + + uint64_t id; + // Type ID / node ID of the resolved declaration. + + uint genericParamCount; + // If non-zero, the declaration is a generic with the given number of parameters. + + uint64_t scopeId; + // The ID of the parent scope of this declaration. + + Declaration::Which kind; + // What basic kind of declaration is this? E.g. struct, interface, const, etc. + + Resolver* resolver; + // `Resolver` instance that can be used to further resolve other declarations relative to this + // one. + + kj::Maybe brand; + // If present, then it is necessary to replace the brand scope with the given brand before + // using the target type. This happens when the decl resolved to an alias; all other fields + // of `ResolvedDecl` refer to the target of the alias, except for `scopeId` which is the + // scope that contained the alias. + }; + + struct ResolvedParameter { + uint64_t id; // ID of the node declaring the parameter. + uint index; // Index of the parameter. + }; + + typedef kj::OneOf ResolveResult; + + virtual kj::Maybe resolve(kj::StringPtr name) = 0; + // Look up the given name, relative to this node, and return basic information about the + // target. + + virtual kj::Maybe resolveMember(kj::StringPtr name) = 0; + // Look up a member of this node. + + virtual ResolvedDecl resolveBuiltin(Declaration::Which which) = 0; + virtual ResolvedDecl resolveId(uint64_t id) = 0; + + virtual kj::Maybe getParent() = 0; + // Returns the parent of this scope, or null if this is the top scope. + + virtual ResolvedDecl getTopScope() = 0; + // Get the top-level scope containing this node. + + virtual kj::Maybe resolveBootstrapSchema(uint64_t id, schema::Brand::Reader brand) = 0; + // Get the schema for the given ID. If a schema is returned, it must be safe to traverse its + // dependencies via the Schema API. A schema that is only at the bootstrap stage is + // acceptable. + // + // Throws an exception if the id is not one that was found by calling resolve() or by + // traversing other schemas. Returns null if the ID is recognized, but the corresponding + // schema node failed to be built for reasons that were already reported. + + virtual kj::Maybe resolveFinalSchema(uint64_t id) = 0; + // Get the final schema for the given ID. A bootstrap schema is not acceptable. A raw + // node reader is returned rather than a Schema object because using a Schema object built + // by the final schema loader could trigger lazy initialization of dependencies which could + // lead to a cycle and deadlock. + // + // Throws an exception if the id is not one that was found by calling resolve() or by + // traversing other schemas. Returns null if the ID is recognized, but the corresponding + // schema node failed to be built for reasons that were already reported. + + virtual kj::Maybe resolveImport(kj::StringPtr name) = 0; + // Get the ID of an imported file given the import path. + + virtual kj::Maybe> readEmbed(kj::StringPtr name) = 0; + // Read and return the contents of a file for an `embed` expression. + + virtual kj::Maybe resolveBootstrapType(schema::Type::Reader type, Schema scope) = 0; + // Compile a schema::Type into a Type whose dependencies may safely be traversed via the schema + // API. These dependencies may have only bootstrap schemas. Returns null if the type could not + // be constructed due to already-reported errors. +}; + +} // namespace compiler +} // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/dynamic-capability.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/dynamic-capability.c++ index e56d8fadc63..5a5cb3570ba 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/dynamic-capability.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/dynamic-capability.c++ @@ -52,15 +52,19 @@ Request DynamicCapability::Client::newRequest( return newRequest(schema.getMethodByName(methodName), sizeHint); } -kj::Promise DynamicCapability::Server::dispatchCall( +Capability::Server::DispatchCallResult DynamicCapability::Server::dispatchCall( uint64_t interfaceId, uint16_t methodId, CallContext context) { KJ_IF_MAYBE(interface, schema.findSuperclass(interfaceId)) { auto methods = interface->getMethods(); if (methodId < methods.size()) { auto method = methods[methodId]; - return call(method, CallContext(*context.hook, - method.getParamType(), method.getResultType())); + auto resultType = method.getResultType(); + return { + call(method, CallContext(*context.hook, + method.getParamType(), resultType)), + resultType.isStreamResult() + }; } else { return internalUnimplemented( interface->getProto().getDisplayName().cStr(), interfaceId, methodId); @@ -72,6 +76,7 @@ kj::Promise DynamicCapability::Server::dispatchCall( RemotePromise Request::send() { auto typelessPromise = hook->send(); + hook = nullptr; // prevent reuse auto resultSchemaCopy = resultSchema; // Convert the Promise to return the correct response type. @@ -90,4 +95,12 @@ RemotePromise Request::send() { return RemotePromise(kj::mv(typedPromise), kj::mv(typedPipeline)); } +kj::Promise Request::sendStreaming() { + KJ_REQUIRE(resultSchema.isStreamResult()); + + auto promise = hook->sendStreaming(); + hook = nullptr; // prevent reuse + return promise; +} + } // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/dynamic.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/dynamic.c++ index a35dca5e1c9..5983db47b1c 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/dynamic.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/dynamic.c++ @@ -1467,6 +1467,14 @@ DynamicValue::Reader::Reader(ConstSchema constant): type(VOID) { } } +#if __GNUC__ && !__clang__ && __GNUC__ >= 9 +// In the copy constructors below, we use memcpy() to copy only after verifying that it is safe. +// But GCC 9 doesn't know we've checked, and whines. I suppose GCC is probably right: our checks +// probably don't technically make memcpy safe according to the standard. But it works in practice, +// and if it ever stops working, the tests will catch it. +#pragma GCC diagnostic ignored "-Wclass-memaccess" +#endif + DynamicValue::Reader::Reader(const Reader& other) { switch (other.type) { case UNKNOWN: @@ -1725,15 +1733,26 @@ int64_t unsignedToSigned(unsigned long long value) { template T checkRoundTrip(U value) { -#if __aarch64__ - // Work around an apparently broken compiler optimization on Clang / arm64. It appears that - // for T = int8_t, U = double, and value = 128, the compiler incorrectly believes that the - // round-trip does not change the value, where in fact it should change to -128. Similar problems - // exist for various other types and inputs -- json-test seems to exercise several problem cases. - // The problem only exists when compiling with optimization. In any case, declaring the variable - // `volatile` kills the optimization. - volatile -#endif + T result = value; + KJ_REQUIRE(U(result) == value, "Value out-of-range for requested type.", value) { + // Use it anyway. + break; + } + return result; +} + +template +T checkRoundTripFromFloat(U value) { + // When `U` is `float` or `double`, we have to use a different approach, because casting an + // out-of-range float to an integer is, surprisingly, UB. + constexpr T MIN = kj::minValue; + constexpr T MAX = kj::maxValue; + KJ_REQUIRE(value >= U(MIN), "Value out-of-range for requested type.", value) { + return MIN; + } + KJ_REQUIRE(value <= U(MAX), "Value out-of-range for requested type.", value) { + return MAX; + } T result = value; KJ_REQUIRE(U(result) == value, "Value out-of-range for requested type.", value) { // Use it anyway. @@ -1774,14 +1793,14 @@ typeName DynamicValue::Builder::AsImpl::apply(Builder& builder) { \ } \ } -HANDLE_NUMERIC_TYPE(int8_t, checkRoundTrip, unsignedToSigned, checkRoundTrip) -HANDLE_NUMERIC_TYPE(int16_t, checkRoundTrip, unsignedToSigned, checkRoundTrip) -HANDLE_NUMERIC_TYPE(int32_t, checkRoundTrip, unsignedToSigned, checkRoundTrip) -HANDLE_NUMERIC_TYPE(int64_t, kj::implicitCast, unsignedToSigned, checkRoundTrip) -HANDLE_NUMERIC_TYPE(uint8_t, signedToUnsigned, checkRoundTrip, checkRoundTrip) -HANDLE_NUMERIC_TYPE(uint16_t, signedToUnsigned, checkRoundTrip, checkRoundTrip) -HANDLE_NUMERIC_TYPE(uint32_t, signedToUnsigned, checkRoundTrip, checkRoundTrip) -HANDLE_NUMERIC_TYPE(uint64_t, signedToUnsigned, kj::implicitCast, checkRoundTrip) +HANDLE_NUMERIC_TYPE(int8_t, checkRoundTrip, unsignedToSigned, checkRoundTripFromFloat) +HANDLE_NUMERIC_TYPE(int16_t, checkRoundTrip, unsignedToSigned, checkRoundTripFromFloat) +HANDLE_NUMERIC_TYPE(int32_t, checkRoundTrip, unsignedToSigned, checkRoundTripFromFloat) +HANDLE_NUMERIC_TYPE(int64_t, kj::implicitCast, unsignedToSigned, checkRoundTripFromFloat) +HANDLE_NUMERIC_TYPE(uint8_t, signedToUnsigned, checkRoundTrip, checkRoundTripFromFloat) +HANDLE_NUMERIC_TYPE(uint16_t, signedToUnsigned, checkRoundTrip, checkRoundTripFromFloat) +HANDLE_NUMERIC_TYPE(uint32_t, signedToUnsigned, checkRoundTrip, checkRoundTripFromFloat) +HANDLE_NUMERIC_TYPE(uint64_t, signedToUnsigned, kj::implicitCast, checkRoundTripFromFloat) HANDLE_NUMERIC_TYPE(float, kj::implicitCast, kj::implicitCast, kj::implicitCast) HANDLE_NUMERIC_TYPE(double, kj::implicitCast, kj::implicitCast, kj::implicitCast) diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/dynamic.h b/libs/EXTERNAL/capnproto/c++/src/capnp/dynamic.h index 2e3d28f15c2..3fd6cf2d76f 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/dynamic.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/dynamic.h @@ -32,10 +32,6 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "schema.h" #include "layout.h" #include "message.h" @@ -43,6 +39,8 @@ #include "capability.h" #include // work-around macro conflict with `VOID` +CAPNP_BEGIN_HEADER + namespace capnp { class MessageReader; @@ -531,8 +529,8 @@ class DynamicCapability::Server: public Capability::Server { virtual kj::Promise call(InterfaceSchema::Method method, CallContext context) = 0; - kj::Promise dispatchCall(uint64_t interfaceId, uint16_t methodId, - CallContext context) override final; + DispatchCallResult dispatchCall(uint64_t interfaceId, uint16_t methodId, + CallContext context) override final; inline InterfaceSchema getSchema() const { return schema; } @@ -552,6 +550,10 @@ class Request: public DynamicStruct::Builder { RemotePromise send(); // Send the call and return a promise for the results. + kj::Promise sendStreaming(); + // Use when the caller is aware that the response type is StreamResult and wants to invoke + // streaming behavior. It is an error to call this if the response type is not StreamResult. + private: kj::Own hook; StructSchema resultSchema; @@ -584,6 +586,9 @@ class CallContext: public kj::DisallowConstCopy { kj::Promise tailCall(Request&& tailRequest); void allowCancellation(); + StructSchema getParamsType() const { return paramType; } + StructSchema getResultsType() const { return resultType; } + private: CallContextHook* hook; StructSchema paramType; @@ -1671,3 +1676,5 @@ ReaderFor ConstSchema::as() const { } } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/encoding-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/encoding-test.c++ index b7ac5f65177..6b71c5e1c22 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/encoding-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/encoding-test.c++ @@ -1471,6 +1471,57 @@ TEST(Encoding, ListSetters) { dst.set(0, src[0]); dst.set(1, src[1]); } + + checkTestMessage(root2); + + // Now let's do some adopting and disowning. + auto adopter = builder2.getOrphanage().newOrphan(); + auto disowner = root2.disownLists(); + + adopter.get().adoptList0(disowner.get().disownList0()); + adopter.get().adoptList1(disowner.get().disownList1()); + adopter.get().adoptList8(disowner.get().disownList8()); + adopter.get().adoptList16(disowner.get().disownList16()); + adopter.get().adoptList32(disowner.get().disownList32()); + adopter.get().adoptList64(disowner.get().disownList64()); + adopter.get().adoptListP(disowner.get().disownListP()); + + { + auto dst = adopter.get().initInt32ListList(3); + auto src = disowner.get().getInt32ListList(); + + auto orphan = src.disown(0); + checkList(orphan.getReader(), {1, 2, 3}); + dst.adopt(0, kj::mv(orphan)); + dst.adopt(1, src.disown(1)); + dst.adopt(2, src.disown(2)); + } + + { + auto dst = adopter.get().initTextListList(3); + auto src = disowner.get().getTextListList(); + + auto orphan = src.disown(0); + checkList(orphan.getReader(), {"foo", "bar"}); + dst.adopt(0, kj::mv(orphan)); + dst.adopt(1, src.disown(1)); + dst.adopt(2, src.disown(2)); + } + + { + auto dst = adopter.get().initStructListList(2); + auto src = disowner.get().getStructListList(); + + auto orphan = src.disown(0); + KJ_EXPECT(orphan.getReader()[0].getInt32Field() == 123); + KJ_EXPECT(orphan.getReader()[1].getInt32Field() == 456); + dst.adopt(0, kj::mv(orphan)); + dst.adopt(1, src.disown(1)); + } + + root2.adoptLists(kj::mv(adopter)); + + checkTestMessage(root2); } } diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/endian.h b/libs/EXTERNAL/capnproto/c++/src/capnp/endian.h index 096120a84f3..c0e3d758407 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/endian.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/endian.h @@ -21,14 +21,12 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "common.h" #include #include // memcpy +CAPNP_BEGIN_HEADER + namespace capnp { namespace _ { // private @@ -304,3 +302,5 @@ using WireValue = ShiftingWireValue; } // namespace _ (private) } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/ez-rpc.h b/libs/EXTERNAL/capnproto/c++/src/capnp/ez-rpc.h index 025c66bfaaa..ef2649239a8 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/ez-rpc.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/ez-rpc.h @@ -21,13 +21,11 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "rpc.h" #include "message.h" +CAPNP_BEGIN_HEADER + struct sockaddr; namespace kj { class AsyncIoProvider; class LowLevelAsyncIoProvider; } @@ -249,3 +247,5 @@ inline typename Type::Client EzRpcClient::importCap(kj::StringPtr name) { } } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/generated-header-support.h b/libs/EXTERNAL/capnproto/c++/src/capnp/generated-header-support.h index a4d27c08117..21f73126e7a 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/generated-header-support.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/generated-header-support.h @@ -23,10 +23,6 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "raw-schema.h" #include "layout.h" #include "list.h" @@ -37,6 +33,8 @@ #include #include +CAPNP_BEGIN_HEADER + namespace capnp { class MessageBuilder; // So that it can be declared a friend. @@ -317,7 +315,7 @@ inline constexpr uint sizeInWords() { } // namespace capnp -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) // MSVC doesn't understand floating-point constexpr yet. // // TODO(msvc): Remove this hack when MSVC is fixed. @@ -328,7 +326,7 @@ inline constexpr uint sizeInWords() { #define CAPNP_NON_INT_CONSTEXPR_DEF_INIT(value) #endif -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) // TODO(msvc): A little hack to allow MSVC to use C++14 return type deduction in cases where the // explicit type exposes bugs in the compiler. #define CAPNP_AUTO_IF_MSVC(...) auto @@ -351,8 +349,8 @@ inline constexpr uint sizeInWords() { static inline ::capnp::word const* encodedSchema() { return bp_##id; } \ } -#if _MSC_VER -// TODO(msvc): MSVC dosen't expect constexprs to have definitions. +#if _MSC_VER && !defined(__clang__) +// TODO(msvc): MSVC doesn't expect constexprs to have definitions. #define CAPNP_DEFINE_ENUM(type, id) #else #define CAPNP_DEFINE_ENUM(type, id) \ @@ -403,3 +401,13 @@ inline constexpr uint sizeInWords() { static constexpr ::capnp::_::RawSchema const* schema = &::capnp::schemas::s_##id; #endif // CAPNP_LITE, else + +namespace capnp { +namespace schemas { +CAPNP_DECLARE_SCHEMA(995f9a3377c0b16e); +// HACK: Forward-declare the RawSchema for StreamResult, from stream.capnp. This allows capnp +// files which declare streaming methods to avoid including stream.capnp.h. +} +} + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/layout.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/layout.c++ index 8f5c92047be..7fa5b4e85f2 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/layout.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/layout.c++ @@ -34,32 +34,38 @@ namespace capnp { namespace _ { // private #if !CAPNP_LITE -static BrokenCapFactory* brokenCapFactory = nullptr; +static BrokenCapFactory* globalBrokenCapFactory = nullptr; // Horrible hack: We need to be able to construct broken caps without any capability context, // but we can't have a link-time dependency on libcapnp-rpc. void setGlobalBrokenCapFactoryForLayoutCpp(BrokenCapFactory& factory) { // Called from capability.c++ when the capability API is used, to make sure that layout.c++ // is ready for it. May be called multiple times but always with the same value. -#if __GNUC__ - __atomic_store_n(&brokenCapFactory, &factory, __ATOMIC_RELAXED); +#if __GNUC__ || defined(__clang__) + __atomic_store_n(&globalBrokenCapFactory, &factory, __ATOMIC_RELAXED); #elif _MSC_VER - *static_cast(&brokenCapFactory) = &factory; + *static_cast(&globalBrokenCapFactory) = &factory; #else #error "Platform not supported" #endif } +static BrokenCapFactory* readGlobalBrokenCapFactoryForLayoutCpp() { +#if __GNUC__ || defined(__clang__) + // Thread-sanitizer doesn't have the right information to know this is safe without doing an + // atomic read. https://groups.google.com/g/capnproto/c/634juhn5ap0/m/pyRiwWl1AAAJ + return __atomic_load_n(&globalBrokenCapFactory, __ATOMIC_RELAXED); +#else + return globalBrokenCapFactory; +#endif +} + } // namespace _ (private) const uint ClientHook::NULL_CAPABILITY_BRAND = 0; +const uint ClientHook::BROKEN_CAPABILITY_BRAND = 0; // Defined here rather than capability.c++ so that we can safely call isNull() in this file. -void* ClientHook::getLocalServer(_::CapabilityServerSetBase& capServerSet) { - // Defined here rather than capability.c++ because otherwise building with -fsanitize=vptr fails. - return nullptr; -} - namespace _ { // private #endif // !CAPNP_LITE @@ -74,7 +80,7 @@ namespace _ { // private #if __GNUC__ >= 8 && !__clang__ // GCC 8 introduced a warning which complains whenever we try to memset() or memcpy() a -// WirePointer, becaues we deleted the regular copy constructor / assignment operator. Weirdly, if +// WirePointer, because we deleted the regular copy constructor / assignment operator. Weirdly, if // I remove those deletions, GCC *still* complains that WirePointer is non-trivial. I don't // understand why -- maybe because WireValue has private members? We don't want to make WireValue's // member public, but memset() and memcpy() on it are certainly valid and desirable, so we'll just @@ -446,7 +452,9 @@ struct WireHelpers { static KJ_ALWAYS_INLINE(word* allocate( WirePointer*& ref, SegmentBuilder*& segment, CapTableBuilder* capTable, SegmentWordCount amount, WirePointer::Kind kind, BuilderArena* orphanArena)) { - // Allocate space in the message for a new object, creating far pointers if necessary. + // Allocate space in the message for a new object, creating far pointers if necessary. The + // space is guaranteed to be zero'd (because MessageBuilder implementations are required to + // return zero'd memory). // // * `ref` starts out being a reference to the pointer which shall be assigned to point at the // new object. On return, `ref` points to a pointer which needs to be initialized with @@ -1614,7 +1622,8 @@ struct WireHelpers { // Initialize the pointer. ref->listRef.set(ElementSize::BYTE, byteSize * (ONE * ELEMENTS / BYTES)); - // Build the Text::Builder. This will initialize the NUL terminator. + // Build the Text::Builder. Note that since allocate()ed memory is pre-zero'd, we don't need + // to initialize the NUL terminator. return { segment, Text::Builder(reinterpret_cast(ptr), unbound(size / BYTES)) }; } @@ -2189,6 +2198,8 @@ struct WireHelpers { const WirePointer* ref, int nestingLimit)) { kj::Maybe> maybeCap; + auto brokenCapFactory = readGlobalBrokenCapFactoryForLayoutCpp(); + KJ_REQUIRE(brokenCapFactory != nullptr, "Trying to read capabilities without ever having created a capability context. " "To read capabilities from a message, you must imbue it with CapReaderContext, or " @@ -2773,12 +2784,23 @@ bool PointerReader::isCanonical(const word **readHead) { // The pointer is null, we are canonical and do not read return true; case PointerType::STRUCT: { - bool dataTrunc, ptrTrunc; + bool dataTrunc = false, ptrTrunc = false; auto structReader = this->getStruct(nullptr); if (structReader.getDataSectionSize() == ZERO * BITS && structReader.getPointerSectionSize() == ZERO * POINTERS) { return reinterpret_cast(this->pointer) == structReader.getLocation(); } else { + // Fun fact: Once this call to isCanonical() returns, Clang may re-order the evaluation of + // the && operators. In theory this is wrong because && is short-circuiting, but Clang + // apparently sees that there are no side effects to the right of &&, so decides it is + // safe to skip short-circuiting. It turns out, though, this is observable under + // valgrind: if we don't initialize `dataTrunc` when declaring it above, then valgrind + // reports "Conditional jump or move depends on uninitialised value(s)". Specifically + // this happens in cases where structReader.isCanonical() returns false -- it is allowed + // to skip initializing `dataTrunc` in that case. The short-circuiting && should mean + // that we don't read `dataTrunc` in that case, except Clang's optimizations. Ultimately + // the uninitialized read is fine because eventually the whole expression evaluates false + // either way. But, to make valgrind happy, we initialize the bools above... return structReader.isCanonical(readHead, readHead, &dataTrunc, &ptrTrunc) && dataTrunc && ptrTrunc; } } @@ -3501,8 +3523,6 @@ OrphanBuilder OrphanBuilder::concat( } OrphanBuilder OrphanBuilder::referenceExternalData(BuilderArena* arena, Data::Reader data) { - // TODO(someday): We now allow unaligned segments on architectures thata support it. We could - // consider relaxing this check as well? KJ_REQUIRE(reinterpret_cast(data.begin()) % sizeof(void*) == 0, "Cannot referenceExternalData() that is not aligned."); diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/layout.h b/libs/EXTERNAL/capnproto/c++/src/capnp/layout.h index 756de77d489..c8d533cff1b 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/layout.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/layout.h @@ -28,10 +28,6 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include #include #include "common.h" @@ -39,6 +35,8 @@ #include "endian.h" #include // work-around macro conflict with `VOID` +CAPNP_BEGIN_HEADER + #if (defined(__mips__) || defined(__hppa__)) && !defined(CAPNP_CANONICALIZE_NAN) #define CAPNP_CANONICALIZE_NAN 1 // Explicitly detect NaNs and canonicalize them to the quiet NaN value as would be returned by @@ -60,9 +58,7 @@ namespace capnp { -#if !CAPNP_LITE class ClientHook; -#endif // !CAPNP_LITE namespace _ { // private @@ -314,15 +310,12 @@ inline double unmask(uint64_t value, uint64_t mask) { class CapTableReader { public: -#if !CAPNP_LITE virtual kj::Maybe> extractCap(uint index) = 0; // Extract the capability at the given index. If the index is invalid, returns null. -#endif // !CAPNP_LITE }; class CapTableBuilder: public CapTableReader { public: -#if !CAPNP_LITE virtual uint injectCap(kj::Own&& cap) = 0; // Add the capability to the message and return its index. If the same ClientHook is injected // twice, this may return the same index both times, but in this case dropCap() needs to be @@ -330,7 +323,6 @@ class CapTableBuilder: public CapTableReader { virtual void dropCap(uint index) = 0; // Remove a capability injected earlier. Called when the pointer is overwritten or zero'd out. -#endif // !CAPNP_LITE }; // ------------------------------------------------------------------- @@ -1271,3 +1263,5 @@ inline OrphanBuilder& OrphanBuilder::operator=(OrphanBuilder&& other) { } // namespace _ (private) } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/list.h b/libs/EXTERNAL/capnproto/c++/src/capnp/list.h index 56e874a6a3a..2c777f8179a 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/list.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/list.h @@ -21,16 +21,11 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "layout.h" #include "orphan.h" #include -#ifdef KJ_STD_COMPAT -#include -#endif // KJ_STD_COMPAT + +CAPNP_BEGIN_HEADER namespace capnp { namespace _ { // private @@ -51,6 +46,8 @@ class TemporaryPointer { T value; }; +// By default this isn't compatible with STL algorithms. To add STL support either define +// KJ_STD_COMPAT at the top of your compilation unit or include capnp/compat/std-iterator.h. template class IndexingIterator { public: @@ -399,13 +396,13 @@ struct List, Kind::LIST> { l.set(i++, element); } } - inline void adopt(uint index, Orphan&& value) { + inline void adopt(uint index, Orphan>&& value) { KJ_IREQUIRE(index < size()); builder.getPointerElement(bounded(index) * ELEMENTS).adopt(kj::mv(value.builder)); } - inline Orphan disown(uint index) { + inline Orphan> disown(uint index) { KJ_IREQUIRE(index < size()); - return Orphan(builder.getPointerElement(bounded(index) * ELEMENTS).disown()); + return Orphan>(builder.getPointerElement(bounded(index) * ELEMENTS).disown()); } typedef _::IndexingIterator::Builder> Iterator; @@ -549,11 +546,7 @@ struct List { } // namespace capnp #ifdef KJ_STD_COMPAT -namespace std { - -template -struct iterator_traits> - : public std::iterator {}; - -} // namespace std +#include "compat/std-iterator.h" #endif // KJ_STD_COMPAT + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/llvm-fuzzer-testcase.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/llvm-fuzzer-testcase.c++ new file mode 100644 index 00000000000..1d47e276c77 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/llvm-fuzzer-testcase.c++ @@ -0,0 +1,26 @@ +#include "test-util.h" +#include +#include "serialize.h" +#include +#include + +/* This is the entry point of a fuzz target to be used with libFuzzer + * or another fuzz driver. + * Such a fuzz driver is used by the autotools target capnp-llvm-fuzzer-testcase + * when the environment variable LIB_FUZZING_ENGINE is defined + * for instance LIB_FUZZING_ENGINE=-fsanitize=fuzzer for libFuzzer + */ +extern "C" int LLVMFuzzerTestOneInput(const uint8_t* Data, size_t Size) { + kj::ArrayPtr array(Data, Size); + kj::ArrayInputStream ais(array); + + KJ_IF_MAYBE(e, kj::runCatchingExceptions([&]() { + capnp::InputStreamMessageReader reader(ais); + capnp::_::checkTestMessage(reader.getRoot()); + capnp::_::checkDynamicTestMessage(reader.getRoot(capnp::Schema::from())); + kj::str(reader.getRoot()); + })) { + KJ_LOG(ERROR, "threw"); + } + return 0; +} diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/membrane-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/membrane-test.c++ index 9adaa4a0071..4fa928e45eb 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/membrane-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/membrane-test.c++ @@ -277,15 +277,16 @@ KJ_TEST("apply membrane using copyOutOfMembrane() on AnyPointer") { } struct TestRpcEnv { - kj::AsyncIoContext io; + kj::EventLoop loop; + kj::WaitScope waitScope; kj::TwoWayPipe pipe; TwoPartyClient client; TwoPartyClient server; test::TestMembrane::Client membraned; TestRpcEnv(kj::Maybe> revokePromise = nullptr) - : io(kj::setupAsyncIo()), - pipe(io.provider->newTwoWayPipe()), + : waitScope(loop), + pipe(kj::newTwoWayPipe()), client(*pipe.ends[0]), server(*pipe.ends[1], membrane(kj::heap(), @@ -296,7 +297,7 @@ struct TestRpcEnv { void testThing(kj::Function makeThing, kj::StringPtr localPassThrough, kj::StringPtr localIntercept, kj::StringPtr remotePassThrough, kj::StringPtr remoteIntercept) { - testThingImpl(io.waitScope, membraned, kj::mv(makeThing), + testThingImpl(waitScope, membraned, kj::mv(makeThing), localPassThrough, localIntercept, remotePassThrough, remoteIntercept); } }; @@ -304,7 +305,7 @@ struct TestRpcEnv { KJ_TEST("call remote object inside membrane") { TestRpcEnv env; env.testThing([&]() { - return env.membraned.makeThingRequest().send().wait(env.io.waitScope).getThing(); + return env.membraned.makeThingRequest().send().wait(env.waitScope).getThing(); }, "inside", "inbound", "inside", "inside"); } @@ -336,7 +337,7 @@ KJ_TEST("call remote capability that has passed into and back out of membrane") env.testThing([&]() { auto req = env.membraned.loopbackRequest(); req.setThing(kj::heap("outside")); - return req.send().wait(env.io.waitScope).getThing(); + return req.send().wait(env.waitScope).getThing(); }, "outside", "outside", "outside", "outbound"); } @@ -354,11 +355,11 @@ KJ_TEST("revoke membrane") { TestRpcEnv env(kj::mv(paf.promise)); - auto thing = env.membraned.makeThingRequest().send().wait(env.io.waitScope).getThing(); + auto thing = env.membraned.makeThingRequest().send().wait(env.waitScope).getThing(); auto callPromise = env.membraned.waitForeverRequest().send(); - KJ_EXPECT(!callPromise.poll(env.io.waitScope)); + KJ_EXPECT(!callPromise.poll(env.waitScope)); paf.fulfiller->reject(KJ_EXCEPTION(DISCONNECTED, "foobar")); @@ -368,14 +369,14 @@ KJ_TEST("revoke membrane") { // involves fork()ing the process to run the code so if it has side effects on file descriptors // then we'll get in a bad state... - KJ_ASSERT(callPromise.poll(env.io.waitScope)); - KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("foobar", callPromise.ignoreResult().wait(env.io.waitScope)); + KJ_ASSERT(callPromise.poll(env.waitScope)); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("foobar", callPromise.ignoreResult().wait(env.waitScope)); KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("foobar", - env.membraned.makeThingRequest().send().ignoreResult().wait(env.io.waitScope)); + env.membraned.makeThingRequest().send().ignoreResult().wait(env.waitScope)); KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("foobar", - thing.passThroughRequest().send().ignoreResult().wait(env.io.waitScope)); + thing.passThroughRequest().send().ignoreResult().wait(env.waitScope)); } } // namespace diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/membrane.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/membrane.c++ index 1655ec28e11..732f062fd9b 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/membrane.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/membrane.c++ @@ -219,6 +219,18 @@ public: return RemotePromise(kj::mv(newPromise), kj::mv(newPipeline)); } + kj::Promise sendStreaming() override { + auto promise = inner->sendStreaming(); + + KJ_IF_MAYBE(r, policy->onRevoked()) { + promise = promise.exclusiveJoin(r->then([]() { + KJ_FAIL_REQUIRE("onRevoked() promise resolved; it should only reject"); + })); + } + + return promise; + } + const void* getBrand() override { return MEMBRANE_BRAND; } @@ -250,7 +262,7 @@ public: } void releaseParams() override { - KJ_REQUIRE(!releasedParams); + // Note that releaseParams() is idempotent -- it can be called multiple times. releasedParams = true; inner->releaseParams(); } @@ -265,6 +277,11 @@ public: } } + void setPipeline(kj::Own&& pipeline) override { + inner->setPipeline(kj::refcounted( + kj::mv(pipeline), policy->addRef(), !reverse)); + } + kj::Promise tailCall(kj::Own&& request) override { return inner->tailCall(MembraneRequestHook::wrap(kj::mv(request), *policy, !reverse)); } @@ -368,13 +385,15 @@ public: ? policy->outboundCall(interfaceId, methodId, Capability::Client(inner->addRef())) : policy->inboundCall(interfaceId, methodId, Capability::Client(inner->addRef())); KJ_IF_MAYBE(r, redirect) { - // The policy says that *if* this capability points into the membrane, then we want to - // redirect the call. However, if this capability is a promise, then it could resolve to - // something outside the membrane later. We have to wait before we actually redirect, - // otherwise behavior will differ depending on whether the promise is resolved. - KJ_IF_MAYBE(p, whenMoreResolved()) { - return newLocalPromiseClient(p->attach(addRef())) - ->newCall(interfaceId, methodId, sizeHint); + if (policy->shouldResolveBeforeRedirecting()) { + // The policy says that *if* this capability points into the membrane, then we want to + // redirect the call. However, if this capability is a promise, then it could resolve to + // something outside the membrane later. We have to wait before we actually redirect, + // otherwise behavior will differ depending on whether the promise is resolved. + KJ_IF_MAYBE(p, whenMoreResolved()) { + return newLocalPromiseClient(p->attach(addRef())) + ->newCall(interfaceId, methodId, sizeHint); + } } return ClientHook::from(kj::mv(*r))->newCall(interfaceId, methodId, sizeHint); @@ -396,13 +415,15 @@ public: ? policy->outboundCall(interfaceId, methodId, Capability::Client(inner->addRef())) : policy->inboundCall(interfaceId, methodId, Capability::Client(inner->addRef())); KJ_IF_MAYBE(r, redirect) { - // The policy says that *if* this capability points into the membrane, then we want to - // redirect the call. However, if this capability is a promise, then it could resolve to - // something outside the membrane later. We have to wait before we actually redirect, - // otherwise behavior will differ depending on whether the promise is resolved. - KJ_IF_MAYBE(p, whenMoreResolved()) { - return newLocalPromiseClient(p->attach(addRef())) - ->call(interfaceId, methodId, kj::mv(context)); + if (policy->shouldResolveBeforeRedirecting()) { + // The policy says that *if* this capability points into the membrane, then we want to + // redirect the call. However, if this capability is a promise, then it could resolve to + // something outside the membrane later. We have to wait before we actually redirect, + // otherwise behavior will differ depending on whether the promise is resolved. + KJ_IF_MAYBE(p, whenMoreResolved()) { + return newLocalPromiseClient(p->attach(addRef())) + ->call(interfaceId, methodId, kj::mv(context)); + } } return ClientHook::from(kj::mv(*r))->call(interfaceId, methodId, kj::mv(context)); @@ -469,6 +490,13 @@ public: return MEMBRANE_BRAND; } + kj::Maybe getFd() override { + // We can't let FDs pass over membranes because we have no way to enforce the membrane policy + // on them. If the MembranePolicy wishes to explicitly permit certain FDs to pass, it can + // always do so by overriding the appropriate policy methods. + return nullptr; + } + private: kj::Own inner; kj::Own policy; diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/membrane.h b/libs/EXTERNAL/capnproto/c++/src/capnp/membrane.h index a6f8b219b9c..d51e2308585 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/membrane.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/membrane.h @@ -114,6 +114,24 @@ class MembranePolicy { // invoked for new calls, but the `target` passed to them will be a capability that always // rethrows the revocation exception. + virtual bool shouldResolveBeforeRedirecting() { return true; } + // If this returns true, then when inboundCall() or outboundCall() returns a redirect, but the + // original target is a promise, then the membrane will discard the redirect and instead wait + // for the promise to become more resolved and try again. + // + // This behavior is important in particular when implementing a membrane that wants to intercept + // calls that would otherwise terminate inside the membrane, but needs to be careful not to + // intercept calls that might be reflected back out of the membrane. If the promise eventually + // resolves to a capability outside the membrane, then the call will be forwarded to that + // capability without applying the policy at all. + // + // However, some membranes don't need this behavior, and may be negatively impacted by the + // unnecessary waiting. Such membranes should override this to return false. + // + // TODO(cleanup): Consider a backwards-incompatible revamp of the MembranePolicy API with a + // better design here. Maybe we should more carefully distinguish between MembranePolicies + // which are reversible vs. those which are one-way? + // --------------------------------------------------------------------------- // Control over importing and exporting. // diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/message-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/message-test.c++ index e7a65ebf7af..4d7362e3ff6 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/message-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/message-test.c++ @@ -168,6 +168,30 @@ TEST(Message, ReadWriteDataStruct) { checkTestMessageAllZero(defaultValue()); } +KJ_TEST("clone()") { + MallocMessageBuilder builder(2048); + initTestMessage(builder.getRoot()); + + auto copy = clone(builder.getRoot().asReader()); + checkTestMessage(*copy); +} + +#if !CAPNP_ALLOW_UNALIGNED +KJ_TEST("disallow unaligned") { + union { + char buffer[16]; + word align; + }; + memset(buffer, 0, sizeof(buffer)); + + auto unaligned = kj::arrayPtr(reinterpret_cast(buffer + 1), 1); + + kj::ArrayPtr segments[1] = {unaligned}; + SegmentArrayMessageReader message(segments); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("unaligned", message.getRoot()); +} +#endif + // TODO(test): More tests. } // namespace diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/message.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/message.c++ index 74a760608c8..90a66cca1fc 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/message.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/message.c++ @@ -33,11 +33,13 @@ namespace { class DummyCapTableReader: public _::CapTableReader { public: -#if !CAPNP_LITE kj::Maybe> extractCap(uint index) override { +#if CAPNP_LITE + KJ_UNIMPLEMENTED("no cap tables in lite mode"); +#else return nullptr; - } #endif + } }; static KJ_CONSTEXPR(const) DummyCapTableReader dummyCapTableReader = DummyCapTableReader(); @@ -80,6 +82,9 @@ bool MessageReader::isCanonical() { return rootIsCanonical && allWordsConsumed; } +size_t MessageReader::sizeInWords() { + return arena()->sizeInWords(); +} AnyPointer::Reader MessageReader::getRootInternal() { if (!allocatedArena) { @@ -178,6 +183,14 @@ bool MessageBuilder::isCanonical() { .isCanonical(&readHead); } +size_t MessageBuilder::sizeInWords() { + return arena()->sizeInWords(); +} + +kj::Own<_::CapTableBuilder> MessageBuilder::releaseBuiltinCapTable() { + return arena()->releaseLocalCapTable(); +} + // ======================================================================================= SegmentArrayMessageReader::SegmentArrayMessageReader( diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/message.h b/libs/EXTERNAL/capnproto/c++/src/capnp/message.h index 8157a0b761c..55a8b2e98d2 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/message.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/message.h @@ -19,6 +19,8 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +#pragma once + #include #include #include @@ -28,17 +30,14 @@ #include "layout.h" #include "any.h" -#pragma once - -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif +CAPNP_BEGIN_HEADER namespace capnp { namespace _ { // private class ReaderArena; class BuilderArena; + struct CloneImpl; } class StructSchema; @@ -105,10 +104,6 @@ class MessageReader { virtual kj::ArrayPtr getSegment(uint id) = 0; // Gets the segment with the given ID, or returns null if no such segment exists. This method // will be called at most once for each segment ID. - // - // The returned array must be aligned properly for the host architecture. This means that on - // x86/x64, alignment is optional, though recommended for performance, whereas on many other - // architectures, alignment is required. inline const ReaderOptions& getOptions(); // Get the options passed to the constructor. @@ -126,14 +121,23 @@ class MessageReader { bool isCanonical(); // Returns whether the message encoded in the reader is in canonical form. + size_t sizeInWords(); + // Add up the size of all segments. + private: ReaderOptions options; +#if defined(__EMSCRIPTEN__) + static constexpr size_t arenaSpacePadding = 19; +#else + static constexpr size_t arenaSpacePadding = 18; +#endif + // Space in which we can construct a ReaderArena. We don't use ReaderArena directly here // because we don't want clients to have to #include arena.h, which itself includes a bunch of // other headers. We don't use a pointer to a ReaderArena because that would require an // extra malloc on every message which could be expensive when processing small messages. - void* arenaSpace[18 + sizeof(kj::MutexGuarded) / sizeof(void*)]; + alignas(8) void* arenaSpace[arenaSpacePadding + sizeof(kj::MutexGuarded) / sizeof(void*)]; bool allocatedArena; _::ReaderArena* arena() { return reinterpret_cast<_::ReaderArena*>(arenaSpace); } @@ -196,10 +200,6 @@ class MessageBuilder { // allocateSegment() is responsible for zeroing the memory before returning. This is required // because otherwise the Cap'n Proto implementation would have to zero the memory anyway, and // many allocators are able to provide already-zero'd memory more efficiently. - // - // The returned array must be aligned properly for the host architecture. This means that on - // x86/x64, alignment is optional, though recommended for performance, whereas on many other - // architectures, alignment is required. template typename RootType::Builder initRoot(); @@ -237,8 +237,11 @@ class MessageBuilder { bool isCanonical(); // Check whether the message builder is in canonical form + size_t sizeInWords(); + // Add up the allocated space from all segments. + private: - void* arenaSpace[22]; + alignas(8) void* arenaSpace[22]; // Space in which we can construct a BuilderArena. We don't use BuilderArena directly here // because we don't want clients to have to #include arena.h, which itself includes a bunch of // big STL headers. We don't use a pointer to a BuilderArena because that would require an @@ -254,6 +257,14 @@ class MessageBuilder { _::BuilderArena* arena() { return reinterpret_cast<_::BuilderArena*>(arenaSpace); } _::SegmentBuilder* getRootSegment(); AnyPointer::Builder getRootInternal(); + + kj::Own<_::CapTableBuilder> releaseBuiltinCapTable(); + // Hack for clone() to extract the cap table. + + friend struct _::CloneImpl; + // We can't declare clone() as a friend directly because old versions of GCC incorrectly demand + // that the first declaration (even if it is a friend declaration) specify the default type args, + // whereas correct compilers do not permit default type args to be specified on a friend decl. }; template @@ -315,6 +326,10 @@ static typename Type::Reader defaultValue(); // // TODO(cleanup): Find a better home for this function? +template > +kj::Own> clone(Reader&& reader); +// Make a deep copy of the given Reader on the heap, producing an owned pointer. + // ======================================================================================= class SegmentArrayMessageReader: public MessageReader { @@ -506,9 +521,38 @@ static typename Type::Reader defaultValue() { return typename Type::Reader(_::StructReader()); } +namespace _ { + struct CloneImpl { + static inline kj::Own<_::CapTableBuilder> releaseBuiltinCapTable(MessageBuilder& message) { + return message.releaseBuiltinCapTable(); + } + }; +}; + +template +kj::Own> clone(Reader&& reader) { + auto size = reader.totalSize(); + auto buffer = kj::heapArray(size.wordCount + 1); + memset(buffer.asBytes().begin(), 0, buffer.asBytes().size()); + if (size.capCount == 0) { + copyToUnchecked(reader, buffer); + auto result = readMessageUnchecked>(buffer.begin()); + return kj::attachVal(result, kj::mv(buffer)); + } else { + FlatMessageBuilder builder(buffer); + builder.setRoot(kj::fwd(reader)); + builder.requireFilled(); + auto capTable = _::CloneImpl::releaseBuiltinCapTable(builder); + AnyPointer::Reader raw(_::PointerReader::getRootUnchecked(buffer.begin()).imbue(capTable)); + return kj::attachVal(raw.getAs>(), kj::mv(buffer), kj::mv(capTable)); + } +} + template kj::Array canonicalize(T&& reader) { return _::PointerHelpers>::getInternalReader(reader).canonicalize(); } } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/orphan.h b/libs/EXTERNAL/capnproto/c++/src/capnp/orphan.h index fe3e16e8e16..ab226500db8 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/orphan.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/orphan.h @@ -21,12 +21,10 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "layout.h" +CAPNP_BEGIN_HEADER + namespace capnp { class StructSchema; @@ -435,3 +433,5 @@ inline Orphan Orphanage::referenceExternalData(Data::Reader data) const { } } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/persistent.capnp b/libs/EXTERNAL/capnproto/c++/src/capnp/persistent.capnp index a13b47168a4..fefd188aef3 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/persistent.capnp +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/persistent.capnp @@ -108,23 +108,6 @@ interface Persistent@0xc8cb212fcd9f5691(SturdyRef, Owner) { } } -interface RealmGateway(InternalRef, ExternalRef, InternalOwner, ExternalOwner) { - # Interface invoked when a SturdyRef is about to cross realms. The RPC system supports providing - # a RealmGateway as a callback hook when setting up RPC over some VatNetwork. - - import @0 (cap :Persistent(ExternalRef, ExternalOwner), - params :Persistent(InternalRef, InternalOwner).SaveParams) - -> Persistent(InternalRef, InternalOwner).SaveResults; - # Given an external capability, save it and return an internal reference. Used when someone - # inside the realm tries to save a capability from outside the realm. - - export @1 (cap :Persistent(InternalRef, InternalOwner), - params :Persistent(ExternalRef, ExternalOwner).SaveParams) - -> Persistent(ExternalRef, ExternalOwner).SaveResults; - # Given an internal capability, save it and return an external reference. Used when someone - # outside the realm tries to save a capability from inside the realm. -} - annotation persistent(interface, field) :Void; # Apply this annotation to interfaces for objects that will always be persistent, instead of # extending the Persistent capability, since the correct type parameters to Persistent depend on diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/persistent.capnp.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/persistent.capnp.c++ index 028aefe7888..17ee6f45101 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/persistent.capnp.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/persistent.capnp.c++ @@ -170,336 +170,6 @@ const ::capnp::_::RawSchema s_b76848c18c40efbf = { 0, 1, i_b76848c18c40efbf, nullptr, nullptr, { &s_b76848c18c40efbf, nullptr, nullptr, 0, 0, nullptr } }; #endif // !CAPNP_LITE -static const ::capnp::_::AlignedData<99> b_84ff286cd00a3ed4 = { - { 0, 0, 0, 0, 5, 0, 6, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 23, 0, 0, 0, 3, 0, 0, 0, - 215, 238, 63, 152, 54, 8, 99, 184, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 0, 0, 0, - 21, 0, 0, 0, 34, 1, 0, 0, - 37, 0, 0, 0, 7, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 33, 0, 0, 0, 135, 0, 0, 0, - 41, 1, 0, 0, 7, 0, 0, 0, - 41, 1, 0, 0, 39, 0, 0, 0, - 99, 97, 112, 110, 112, 47, 112, 101, - 114, 115, 105, 115, 116, 101, 110, 116, - 46, 99, 97, 112, 110, 112, 58, 82, - 101, 97, 108, 109, 71, 97, 116, 101, - 119, 97, 121, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 0, 1, 0, - 8, 0, 0, 0, 3, 0, 5, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 77, 87, 9, 57, 29, 204, 194, 240, - 191, 239, 64, 140, 193, 72, 104, 183, - 49, 0, 0, 0, 58, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 44, 0, 0, 0, 0, 0, 1, 0, - 60, 0, 0, 0, 0, 0, 1, 0, - 129, 0, 0, 0, 7, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 170, 163, 45, 72, 139, 161, 175, 236, - 191, 239, 64, 140, 193, 72, 104, 183, - 117, 0, 0, 0, 58, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 112, 0, 0, 0, 0, 0, 1, 0, - 128, 0, 0, 0, 0, 0, 1, 0, - 197, 0, 0, 0, 7, 0, 0, 0, - 105, 109, 112, 111, 114, 116, 0, 0, - 1, 0, 0, 0, 31, 0, 0, 0, - 4, 0, 0, 0, 2, 0, 1, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 31, 0, 0, 0, - 4, 0, 0, 0, 2, 0, 1, 0, - 145, 86, 159, 205, 47, 33, 203, 200, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 39, 0, 0, 0, - 8, 0, 0, 0, 1, 0, 1, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 8, 0, 0, 0, 3, 0, 1, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 16, 0, 0, 0, 3, 0, 1, 0, - 18, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 0, 0, 0, 0, 0, 0, 0, 0, - 18, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 2, 0, 0, 0, 0, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0, - 101, 120, 112, 111, 114, 116, 0, 0, - 1, 0, 0, 0, 31, 0, 0, 0, - 4, 0, 0, 0, 2, 0, 1, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 31, 0, 0, 0, - 4, 0, 0, 0, 2, 0, 1, 0, - 145, 86, 159, 205, 47, 33, 203, 200, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 39, 0, 0, 0, - 8, 0, 0, 0, 1, 0, 1, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 8, 0, 0, 0, 3, 0, 1, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 16, 0, 0, 0, 3, 0, 1, 0, - 18, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 1, 0, 0, 0, 0, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 0, 0, 0, 0, 0, 0, 0, 0, - 18, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 3, 0, 0, 0, 0, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0, - 0, 0, 0, 0, 1, 0, 1, 0, - 16, 0, 0, 0, 0, 0, 1, 0, - 13, 0, 0, 0, 98, 0, 0, 0, - 17, 0, 0, 0, 98, 0, 0, 0, - 21, 0, 0, 0, 114, 0, 0, 0, - 25, 0, 0, 0, 114, 0, 0, 0, - 73, 110, 116, 101, 114, 110, 97, 108, - 82, 101, 102, 0, 0, 0, 0, 0, - 69, 120, 116, 101, 114, 110, 97, 108, - 82, 101, 102, 0, 0, 0, 0, 0, - 73, 110, 116, 101, 114, 110, 97, 108, - 79, 119, 110, 101, 114, 0, 0, 0, - 69, 120, 116, 101, 114, 110, 97, 108, - 79, 119, 110, 101, 114, 0, 0, 0, } -}; -::capnp::word const* const bp_84ff286cd00a3ed4 = b_84ff286cd00a3ed4.words; -#if !CAPNP_LITE -static const ::capnp::_::RawSchema* const d_84ff286cd00a3ed4[] = { - &s_b76848c18c40efbf, - &s_ecafa18b482da3aa, - &s_f0c2cc1d3909574d, -}; -static const uint16_t m_84ff286cd00a3ed4[] = {1, 0}; -KJ_CONSTEXPR(const) ::capnp::_::RawBrandedSchema::Dependency bd_84ff286cd00a3ed4[] = { - { 33554432, ::capnp::RealmGateway< ::capnp::AnyPointer, ::capnp::AnyPointer, ::capnp::AnyPointer, ::capnp::AnyPointer>::ImportParams::_capnpPrivate::brand() }, - { 33554433, ::capnp::RealmGateway< ::capnp::AnyPointer, ::capnp::AnyPointer, ::capnp::AnyPointer, ::capnp::AnyPointer>::ExportParams::_capnpPrivate::brand() }, - { 50331648, ::capnp::Persistent< ::capnp::AnyPointer, ::capnp::AnyPointer>::SaveResults::_capnpPrivate::brand() }, - { 50331649, ::capnp::Persistent< ::capnp::AnyPointer, ::capnp::AnyPointer>::SaveResults::_capnpPrivate::brand() }, -}; -const ::capnp::_::RawSchema s_84ff286cd00a3ed4 = { - 0x84ff286cd00a3ed4, b_84ff286cd00a3ed4.words, 99, d_84ff286cd00a3ed4, m_84ff286cd00a3ed4, - 3, 2, nullptr, nullptr, nullptr, { &s_84ff286cd00a3ed4, nullptr, bd_84ff286cd00a3ed4, 0, sizeof(bd_84ff286cd00a3ed4) / sizeof(bd_84ff286cd00a3ed4[0]), nullptr } -}; -#endif // !CAPNP_LITE -static const ::capnp::_::AlignedData<86> b_f0c2cc1d3909574d = { - { 0, 0, 0, 0, 5, 0, 6, 0, - 77, 87, 9, 57, 29, 204, 194, 240, - 36, 0, 0, 0, 1, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 2, 0, 7, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 0, 0, 0, - 21, 0, 0, 0, 146, 1, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 37, 0, 0, 0, 119, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 99, 97, 112, 110, 112, 47, 112, 101, - 114, 115, 105, 115, 116, 101, 110, 116, - 46, 99, 97, 112, 110, 112, 58, 82, - 101, 97, 108, 109, 71, 97, 116, 101, - 119, 97, 121, 46, 105, 109, 112, 111, - 114, 116, 36, 80, 97, 114, 97, 109, - 115, 0, 0, 0, 0, 0, 0, 0, - 8, 0, 0, 0, 3, 0, 4, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 41, 0, 0, 0, 34, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 36, 0, 0, 0, 3, 0, 1, 0, - 120, 0, 0, 0, 2, 0, 1, 0, - 1, 0, 0, 0, 1, 0, 0, 0, - 0, 0, 1, 0, 1, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 117, 0, 0, 0, 58, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 112, 0, 0, 0, 3, 0, 1, 0, - 196, 0, 0, 0, 2, 0, 1, 0, - 99, 97, 112, 0, 0, 0, 0, 0, - 17, 0, 0, 0, 0, 0, 0, 0, - 145, 86, 159, 205, 47, 33, 203, 200, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0, - 1, 0, 0, 0, 31, 0, 0, 0, - 4, 0, 0, 0, 2, 0, 1, 0, - 145, 86, 159, 205, 47, 33, 203, 200, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 39, 0, 0, 0, - 8, 0, 0, 0, 1, 0, 1, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 8, 0, 0, 0, 3, 0, 1, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 16, 0, 0, 0, 3, 0, 1, 0, - 18, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 1, 0, 0, 0, 0, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 0, 0, 0, 0, 0, 0, 0, 0, - 18, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 3, 0, 0, 0, 0, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 0, 0, 0, 0, 0, 0, 0, 0, - 17, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 112, 97, 114, 97, 109, 115, 0, 0, - 16, 0, 0, 0, 0, 0, 0, 0, - 165, 115, 48, 24, 89, 186, 111, 247, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0, - 1, 0, 0, 0, 31, 0, 0, 0, - 4, 0, 0, 0, 2, 0, 1, 0, - 145, 86, 159, 205, 47, 33, 203, 200, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 39, 0, 0, 0, - 8, 0, 0, 0, 1, 0, 1, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 8, 0, 0, 0, 3, 0, 1, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 16, 0, 0, 0, 3, 0, 1, 0, - 18, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 0, 0, 0, 0, 0, 0, 0, 0, - 18, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 2, 0, 0, 0, 0, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 0, 0, 0, 0, 0, 0, 0, 0, - 16, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, } -}; -::capnp::word const* const bp_f0c2cc1d3909574d = b_f0c2cc1d3909574d.words; -#if !CAPNP_LITE -static const ::capnp::_::RawSchema* const d_f0c2cc1d3909574d[] = { - &s_c8cb212fcd9f5691, - &s_f76fba59183073a5, -}; -static const uint16_t m_f0c2cc1d3909574d[] = {0, 1}; -static const uint16_t i_f0c2cc1d3909574d[] = {0, 1}; -KJ_CONSTEXPR(const) ::capnp::_::RawBrandedSchema::Dependency bd_f0c2cc1d3909574d[] = { - { 16777216, ::capnp::Persistent< ::capnp::AnyPointer, ::capnp::AnyPointer>::_capnpPrivate::brand() }, - { 16777217, ::capnp::Persistent< ::capnp::AnyPointer, ::capnp::AnyPointer>::SaveParams::_capnpPrivate::brand() }, -}; -const ::capnp::_::RawSchema s_f0c2cc1d3909574d = { - 0xf0c2cc1d3909574d, b_f0c2cc1d3909574d.words, 86, d_f0c2cc1d3909574d, m_f0c2cc1d3909574d, - 2, 2, i_f0c2cc1d3909574d, nullptr, nullptr, { &s_f0c2cc1d3909574d, nullptr, bd_f0c2cc1d3909574d, 0, sizeof(bd_f0c2cc1d3909574d) / sizeof(bd_f0c2cc1d3909574d[0]), nullptr } -}; -#endif // !CAPNP_LITE -static const ::capnp::_::AlignedData<86> b_ecafa18b482da3aa = { - { 0, 0, 0, 0, 5, 0, 6, 0, - 170, 163, 45, 72, 139, 161, 175, 236, - 36, 0, 0, 0, 1, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 2, 0, 7, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 0, 0, 0, - 21, 0, 0, 0, 146, 1, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 37, 0, 0, 0, 119, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 99, 97, 112, 110, 112, 47, 112, 101, - 114, 115, 105, 115, 116, 101, 110, 116, - 46, 99, 97, 112, 110, 112, 58, 82, - 101, 97, 108, 109, 71, 97, 116, 101, - 119, 97, 121, 46, 101, 120, 112, 111, - 114, 116, 36, 80, 97, 114, 97, 109, - 115, 0, 0, 0, 0, 0, 0, 0, - 8, 0, 0, 0, 3, 0, 4, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 41, 0, 0, 0, 34, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 36, 0, 0, 0, 3, 0, 1, 0, - 120, 0, 0, 0, 2, 0, 1, 0, - 1, 0, 0, 0, 1, 0, 0, 0, - 0, 0, 1, 0, 1, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 117, 0, 0, 0, 58, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 112, 0, 0, 0, 3, 0, 1, 0, - 196, 0, 0, 0, 2, 0, 1, 0, - 99, 97, 112, 0, 0, 0, 0, 0, - 17, 0, 0, 0, 0, 0, 0, 0, - 145, 86, 159, 205, 47, 33, 203, 200, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0, - 1, 0, 0, 0, 31, 0, 0, 0, - 4, 0, 0, 0, 2, 0, 1, 0, - 145, 86, 159, 205, 47, 33, 203, 200, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 39, 0, 0, 0, - 8, 0, 0, 0, 1, 0, 1, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 8, 0, 0, 0, 3, 0, 1, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 16, 0, 0, 0, 3, 0, 1, 0, - 18, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 0, 0, 0, 0, 0, 0, 0, 0, - 18, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 2, 0, 0, 0, 0, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 0, 0, 0, 0, 0, 0, 0, 0, - 17, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 112, 97, 114, 97, 109, 115, 0, 0, - 16, 0, 0, 0, 0, 0, 0, 0, - 165, 115, 48, 24, 89, 186, 111, 247, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0, - 1, 0, 0, 0, 31, 0, 0, 0, - 4, 0, 0, 0, 2, 0, 1, 0, - 145, 86, 159, 205, 47, 33, 203, 200, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 39, 0, 0, 0, - 8, 0, 0, 0, 1, 0, 1, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 8, 0, 0, 0, 3, 0, 1, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 16, 0, 0, 0, 3, 0, 1, 0, - 18, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 1, 0, 0, 0, 0, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 0, 0, 0, 0, 0, 0, 0, 0, - 18, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 3, 0, 0, 0, 0, 0, - 212, 62, 10, 208, 108, 40, 255, 132, - 0, 0, 0, 0, 0, 0, 0, 0, - 16, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, } -}; -::capnp::word const* const bp_ecafa18b482da3aa = b_ecafa18b482da3aa.words; -#if !CAPNP_LITE -static const ::capnp::_::RawSchema* const d_ecafa18b482da3aa[] = { - &s_c8cb212fcd9f5691, - &s_f76fba59183073a5, -}; -static const uint16_t m_ecafa18b482da3aa[] = {0, 1}; -static const uint16_t i_ecafa18b482da3aa[] = {0, 1}; -KJ_CONSTEXPR(const) ::capnp::_::RawBrandedSchema::Dependency bd_ecafa18b482da3aa[] = { - { 16777216, ::capnp::Persistent< ::capnp::AnyPointer, ::capnp::AnyPointer>::_capnpPrivate::brand() }, - { 16777217, ::capnp::Persistent< ::capnp::AnyPointer, ::capnp::AnyPointer>::SaveParams::_capnpPrivate::brand() }, -}; -const ::capnp::_::RawSchema s_ecafa18b482da3aa = { - 0xecafa18b482da3aa, b_ecafa18b482da3aa.words, 86, d_ecafa18b482da3aa, m_ecafa18b482da3aa, - 2, 2, i_ecafa18b482da3aa, nullptr, nullptr, { &s_ecafa18b482da3aa, nullptr, bd_ecafa18b482da3aa, 0, sizeof(bd_ecafa18b482da3aa) / sizeof(bd_ecafa18b482da3aa[0]), nullptr } -}; -#endif // !CAPNP_LITE static const ::capnp::_::AlignedData<22> b_f622595091cafb67 = { { 0, 0, 0, 0, 5, 0, 6, 0, 103, 251, 202, 145, 80, 89, 34, 246, diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/persistent.capnp.h b/libs/EXTERNAL/capnproto/c++/src/capnp/persistent.capnp.h index d481a426b96..60ea65b24fe 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/persistent.capnp.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/persistent.capnp.h @@ -9,20 +9,19 @@ #include #endif // !CAPNP_LITE -#if CAPNP_VERSION != 7000 +#if CAPNP_VERSION != 9001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif +CAPNP_BEGIN_HEADER + namespace capnp { namespace schemas { CAPNP_DECLARE_SCHEMA(c8cb212fcd9f5691); CAPNP_DECLARE_SCHEMA(f76fba59183073a5); CAPNP_DECLARE_SCHEMA(b76848c18c40efbf); -CAPNP_DECLARE_SCHEMA(84ff286cd00a3ed4); -CAPNP_DECLARE_SCHEMA(f0c2cc1d3909574d); -CAPNP_DECLARE_SCHEMA(ecafa18b482da3aa); CAPNP_DECLARE_SCHEMA(f622595091cafb67); } // namespace schemas @@ -92,70 +91,6 @@ struct Persistent::SaveResults { }; }; -template -struct RealmGateway { - RealmGateway() = delete; - -#if !CAPNP_LITE - class Client; - class Server; -#endif // !CAPNP_LITE - - struct ImportParams; - struct ExportParams; - - #if !CAPNP_LITE - struct _capnpPrivate { - CAPNP_DECLARE_INTERFACE_HEADER(84ff286cd00a3ed4) - static const ::capnp::_::RawBrandedSchema::Scope brandScopes[]; - static const ::capnp::_::RawBrandedSchema::Binding brandBindings[]; - static const ::capnp::_::RawBrandedSchema::Dependency brandDependencies[]; - static const ::capnp::_::RawBrandedSchema specificBrand; - static constexpr ::capnp::_::RawBrandedSchema const* brand() { return ::capnp::_::ChooseBrand<_capnpPrivate, InternalRef, ExternalRef, InternalOwner, ExternalOwner>::brand(); } - }; - #endif // !CAPNP_LITE -}; - -template -struct RealmGateway::ImportParams { - ImportParams() = delete; - - class Reader; - class Builder; - class Pipeline; - - struct _capnpPrivate { - CAPNP_DECLARE_STRUCT_HEADER(f0c2cc1d3909574d, 0, 2) - #if !CAPNP_LITE - static const ::capnp::_::RawBrandedSchema::Scope brandScopes[]; - static const ::capnp::_::RawBrandedSchema::Binding brandBindings[]; - static const ::capnp::_::RawBrandedSchema::Dependency brandDependencies[]; - static const ::capnp::_::RawBrandedSchema specificBrand; - static constexpr ::capnp::_::RawBrandedSchema const* brand() { return ::capnp::_::ChooseBrand<_capnpPrivate, InternalRef, ExternalRef, InternalOwner, ExternalOwner>::brand(); } - #endif // !CAPNP_LITE - }; -}; - -template -struct RealmGateway::ExportParams { - ExportParams() = delete; - - class Reader; - class Builder; - class Pipeline; - - struct _capnpPrivate { - CAPNP_DECLARE_STRUCT_HEADER(ecafa18b482da3aa, 0, 2) - #if !CAPNP_LITE - static const ::capnp::_::RawBrandedSchema::Scope brandScopes[]; - static const ::capnp::_::RawBrandedSchema::Binding brandBindings[]; - static const ::capnp::_::RawBrandedSchema::Dependency brandDependencies[]; - static const ::capnp::_::RawBrandedSchema specificBrand; - static constexpr ::capnp::_::RawBrandedSchema const* brand() { return ::capnp::_::ChooseBrand<_capnpPrivate, InternalRef, ExternalRef, InternalOwner, ExternalOwner>::brand(); } - #endif // !CAPNP_LITE - }; -}; - // ======================================================================================= #if !CAPNP_LITE @@ -196,7 +131,8 @@ class Persistent::Server public: typedef Persistent Serves; - ::kj::Promise dispatchCall(uint64_t interfaceId, uint16_t methodId, + ::capnp::Capability::Server::DispatchCallResult dispatchCall( + uint64_t interfaceId, uint16_t methodId, ::capnp::CallContext< ::capnp::AnyPointer, ::capnp::AnyPointer> context) override; @@ -209,7 +145,8 @@ class Persistent::Server .template castAs< ::capnp::Persistent>(); } - ::kj::Promise dispatchCallInternal(uint16_t methodId, + ::capnp::Capability::Server::DispatchCallResult dispatchCallInternal( + uint16_t methodId, ::capnp::CallContext< ::capnp::AnyPointer, ::capnp::AnyPointer> context); }; #endif // !CAPNP_LITE @@ -406,288 +343,6 @@ class Persistent::SaveResults::Pipeline { }; #endif // !CAPNP_LITE -#if !CAPNP_LITE -template -class RealmGateway::Client - : public virtual ::capnp::Capability::Client { -public: - typedef RealmGateway Calls; - typedef RealmGateway Reads; - - Client(decltype(nullptr)); - explicit Client(::kj::Own< ::capnp::ClientHook>&& hook); - template ()>> - Client(::kj::Own<_t>&& server); - template ()>> - Client(::kj::Promise<_t>&& promise); - Client(::kj::Exception&& exception); - Client(Client&) = default; - Client(Client&&) = default; - Client& operator=(Client& other); - Client& operator=(Client&& other); - - template - typename RealmGateway::Client asGeneric() { - return castAs>(); - } - - CAPNP_AUTO_IF_MSVC(::capnp::Request::ImportParams, typename ::capnp::Persistent::SaveResults>) importRequest( - ::kj::Maybe< ::capnp::MessageSize> sizeHint = nullptr); - CAPNP_AUTO_IF_MSVC(::capnp::Request::ExportParams, typename ::capnp::Persistent::SaveResults>) exportRequest( - ::kj::Maybe< ::capnp::MessageSize> sizeHint = nullptr); - -protected: - Client() = default; -}; - -template -class RealmGateway::Server - : public virtual ::capnp::Capability::Server { -public: - typedef RealmGateway Serves; - - ::kj::Promise dispatchCall(uint64_t interfaceId, uint16_t methodId, - ::capnp::CallContext< ::capnp::AnyPointer, ::capnp::AnyPointer> context) - override; - -protected: - typedef typename ::capnp::RealmGateway::ImportParams ImportParams; - typedef ::capnp::CallContext::SaveResults> ImportContext; - virtual ::kj::Promise import(ImportContext context); - typedef typename ::capnp::RealmGateway::ExportParams ExportParams; - typedef ::capnp::CallContext::SaveResults> ExportContext; - virtual ::kj::Promise export_(ExportContext context); - - inline typename ::capnp::RealmGateway::Client thisCap() { - return ::capnp::Capability::Server::thisCap() - .template castAs< ::capnp::RealmGateway>(); - } - - ::kj::Promise dispatchCallInternal(uint16_t methodId, - ::capnp::CallContext< ::capnp::AnyPointer, ::capnp::AnyPointer> context); -}; -#endif // !CAPNP_LITE - -template -class RealmGateway::ImportParams::Reader { -public: - typedef ImportParams Reads; - - Reader() = default; - inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} - - inline ::capnp::MessageSize totalSize() const { - return _reader.totalSize().asPublic(); - } - -#if !CAPNP_LITE - inline ::kj::StringTree toString() const { - return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); - } -#endif // !CAPNP_LITE - - template - typename RealmGateway::ImportParams::Reader asRealmGatewayGeneric() { - return typename RealmGateway::ImportParams::Reader(_reader); - } - - inline bool hasCap() const; -#if !CAPNP_LITE - inline typename ::capnp::Persistent::Client getCap() const; -#endif // !CAPNP_LITE - - inline bool hasParams() const; - inline typename ::capnp::Persistent::SaveParams::Reader getParams() const; - -private: - ::capnp::_::StructReader _reader; - template - friend struct ::capnp::ToDynamic_; - template - friend struct ::capnp::_::PointerHelpers; - template - friend struct ::capnp::List; - friend class ::capnp::MessageBuilder; - friend class ::capnp::Orphanage; -}; - -template -class RealmGateway::ImportParams::Builder { -public: - typedef ImportParams Builds; - - Builder() = delete; // Deleted to discourage incorrect usage. - // You can explicitly initialize to nullptr instead. - inline Builder(decltype(nullptr)) {} - inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} - inline operator Reader() const { return Reader(_builder.asReader()); } - inline Reader asReader() const { return *this; } - - inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } -#if !CAPNP_LITE - inline ::kj::StringTree toString() const { return asReader().toString(); } -#endif // !CAPNP_LITE - - template - typename RealmGateway::ImportParams::Builder asRealmGatewayGeneric() { - return typename RealmGateway::ImportParams::Builder(_builder); - } - - inline bool hasCap(); -#if !CAPNP_LITE - inline typename ::capnp::Persistent::Client getCap(); - inline void setCap(typename ::capnp::Persistent::Client&& value); - inline void setCap(typename ::capnp::Persistent::Client& value); - inline void adoptCap(::capnp::Orphan< ::capnp::Persistent>&& value); - inline ::capnp::Orphan< ::capnp::Persistent> disownCap(); -#endif // !CAPNP_LITE - - inline bool hasParams(); - inline typename ::capnp::Persistent::SaveParams::Builder getParams(); - inline void setParams(typename ::capnp::Persistent::SaveParams::Reader value); - inline typename ::capnp::Persistent::SaveParams::Builder initParams(); - inline void adoptParams(::capnp::Orphan::SaveParams>&& value); - inline ::capnp::Orphan::SaveParams> disownParams(); - -private: - ::capnp::_::StructBuilder _builder; - template - friend struct ::capnp::ToDynamic_; - friend class ::capnp::Orphanage; - template - friend struct ::capnp::_::PointerHelpers; -}; - -#if !CAPNP_LITE -template -class RealmGateway::ImportParams::Pipeline { -public: - typedef ImportParams Pipelines; - - inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} - inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) - : _typeless(kj::mv(typeless)) {} - - inline typename ::capnp::Persistent::Client getCap(); - inline typename ::capnp::Persistent::SaveParams::Pipeline getParams(); -private: - ::capnp::AnyPointer::Pipeline _typeless; - friend class ::capnp::PipelineHook; - template - friend struct ::capnp::ToDynamic_; -}; -#endif // !CAPNP_LITE - -template -class RealmGateway::ExportParams::Reader { -public: - typedef ExportParams Reads; - - Reader() = default; - inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} - - inline ::capnp::MessageSize totalSize() const { - return _reader.totalSize().asPublic(); - } - -#if !CAPNP_LITE - inline ::kj::StringTree toString() const { - return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); - } -#endif // !CAPNP_LITE - - template - typename RealmGateway::ExportParams::Reader asRealmGatewayGeneric() { - return typename RealmGateway::ExportParams::Reader(_reader); - } - - inline bool hasCap() const; -#if !CAPNP_LITE - inline typename ::capnp::Persistent::Client getCap() const; -#endif // !CAPNP_LITE - - inline bool hasParams() const; - inline typename ::capnp::Persistent::SaveParams::Reader getParams() const; - -private: - ::capnp::_::StructReader _reader; - template - friend struct ::capnp::ToDynamic_; - template - friend struct ::capnp::_::PointerHelpers; - template - friend struct ::capnp::List; - friend class ::capnp::MessageBuilder; - friend class ::capnp::Orphanage; -}; - -template -class RealmGateway::ExportParams::Builder { -public: - typedef ExportParams Builds; - - Builder() = delete; // Deleted to discourage incorrect usage. - // You can explicitly initialize to nullptr instead. - inline Builder(decltype(nullptr)) {} - inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} - inline operator Reader() const { return Reader(_builder.asReader()); } - inline Reader asReader() const { return *this; } - - inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } -#if !CAPNP_LITE - inline ::kj::StringTree toString() const { return asReader().toString(); } -#endif // !CAPNP_LITE - - template - typename RealmGateway::ExportParams::Builder asRealmGatewayGeneric() { - return typename RealmGateway::ExportParams::Builder(_builder); - } - - inline bool hasCap(); -#if !CAPNP_LITE - inline typename ::capnp::Persistent::Client getCap(); - inline void setCap(typename ::capnp::Persistent::Client&& value); - inline void setCap(typename ::capnp::Persistent::Client& value); - inline void adoptCap(::capnp::Orphan< ::capnp::Persistent>&& value); - inline ::capnp::Orphan< ::capnp::Persistent> disownCap(); -#endif // !CAPNP_LITE - - inline bool hasParams(); - inline typename ::capnp::Persistent::SaveParams::Builder getParams(); - inline void setParams(typename ::capnp::Persistent::SaveParams::Reader value); - inline typename ::capnp::Persistent::SaveParams::Builder initParams(); - inline void adoptParams(::capnp::Orphan::SaveParams>&& value); - inline ::capnp::Orphan::SaveParams> disownParams(); - -private: - ::capnp::_::StructBuilder _builder; - template - friend struct ::capnp::ToDynamic_; - friend class ::capnp::Orphanage; - template - friend struct ::capnp::_::PointerHelpers; -}; - -#if !CAPNP_LITE -template -class RealmGateway::ExportParams::Pipeline { -public: - typedef ExportParams Pipelines; - - inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} - inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) - : _typeless(kj::mv(typeless)) {} - - inline typename ::capnp::Persistent::Client getCap(); - inline typename ::capnp::Persistent::SaveParams::Pipeline getParams(); -private: - ::capnp::AnyPointer::Pipeline _typeless; - friend class ::capnp::PipelineHook; - template - friend struct ::capnp::ToDynamic_; -}; -#endif // !CAPNP_LITE - // ======================================================================================= #if !CAPNP_LITE @@ -893,7 +548,7 @@ ::kj::Promise Persistent::Server::save(SaveContext) { 0xc8cb212fcd9f5691ull, 0); } template -::kj::Promise Persistent::Server::dispatchCall( +::capnp::Capability::Server::DispatchCallResult Persistent::Server::dispatchCall( uint64_t interfaceId, uint16_t methodId, ::capnp::CallContext< ::capnp::AnyPointer, ::capnp::AnyPointer> context) { switch (interfaceId) { @@ -904,13 +559,16 @@ ::kj::Promise Persistent::Server::dispatchCall( } } template -::kj::Promise Persistent::Server::dispatchCallInternal( +::capnp::Capability::Server::DispatchCallResult Persistent::Server::dispatchCallInternal( uint16_t methodId, ::capnp::CallContext< ::capnp::AnyPointer, ::capnp::AnyPointer> context) { switch (methodId) { case 0: - return save(::capnp::Capability::Server::internalGetTypedContext< - typename ::capnp::Persistent::SaveParams, typename ::capnp::Persistent::SaveResults>(context)); + return { + save(::capnp::Capability::Server::internalGetTypedContext< + typename ::capnp::Persistent::SaveParams, typename ::capnp::Persistent::SaveResults>(context)), + false + }; default: (void)context; return ::capnp::Capability::Server::internalUnimplemented( @@ -947,381 +605,7 @@ const ::capnp::_::RawBrandedSchema Persistent::_capnpPrivate:: }; #endif // !CAPNP_LITE -#if !CAPNP_LITE -template -inline RealmGateway::Client::Client(decltype(nullptr)) - : ::capnp::Capability::Client(nullptr) {} -template -inline RealmGateway::Client::Client( - ::kj::Own< ::capnp::ClientHook>&& hook) - : ::capnp::Capability::Client(::kj::mv(hook)) {} -template -template -inline RealmGateway::Client::Client(::kj::Own<_t>&& server) - : ::capnp::Capability::Client(::kj::mv(server)) {} -template -template -inline RealmGateway::Client::Client(::kj::Promise<_t>&& promise) - : ::capnp::Capability::Client(::kj::mv(promise)) {} -template -inline RealmGateway::Client::Client(::kj::Exception&& exception) - : ::capnp::Capability::Client(::kj::mv(exception)) {} -template -inline typename ::capnp::RealmGateway::Client& RealmGateway::Client::operator=(Client& other) { - ::capnp::Capability::Client::operator=(other); - return *this; -} -template -inline typename ::capnp::RealmGateway::Client& RealmGateway::Client::operator=(Client&& other) { - ::capnp::Capability::Client::operator=(kj::mv(other)); - return *this; -} - -#endif // !CAPNP_LITE -template -inline bool RealmGateway::ImportParams::Reader::hasCap() const { - return !_reader.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); -} -template -inline bool RealmGateway::ImportParams::Builder::hasCap() { - return !_builder.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); -} -#if !CAPNP_LITE -template -inline typename ::capnp::Persistent::Client RealmGateway::ImportParams::Reader::getCap() const { - return ::capnp::_::PointerHelpers< ::capnp::Persistent>::get(_reader.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS)); -} -template -inline typename ::capnp::Persistent::Client RealmGateway::ImportParams::Builder::getCap() { - return ::capnp::_::PointerHelpers< ::capnp::Persistent>::get(_builder.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS)); -} -template -inline typename ::capnp::Persistent::Client RealmGateway::ImportParams::Pipeline::getCap() { - return typename ::capnp::Persistent::Client(_typeless.getPointerField(0).asCap()); -} -template -inline void RealmGateway::ImportParams::Builder::setCap(typename ::capnp::Persistent::Client&& cap) { - ::capnp::_::PointerHelpers< ::capnp::Persistent>::set(_builder.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(cap)); -} -template -inline void RealmGateway::ImportParams::Builder::setCap(typename ::capnp::Persistent::Client& cap) { - ::capnp::_::PointerHelpers< ::capnp::Persistent>::set(_builder.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS), cap); -} -template -inline void RealmGateway::ImportParams::Builder::adoptCap( - ::capnp::Orphan< ::capnp::Persistent>&& value) { - ::capnp::_::PointerHelpers< ::capnp::Persistent>::adopt(_builder.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); -} -template -inline ::capnp::Orphan< ::capnp::Persistent> RealmGateway::ImportParams::Builder::disownCap() { - return ::capnp::_::PointerHelpers< ::capnp::Persistent>::disown(_builder.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS)); -} -#endif // !CAPNP_LITE - -template -inline bool RealmGateway::ImportParams::Reader::hasParams() const { - return !_reader.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); -} -template -inline bool RealmGateway::ImportParams::Builder::hasParams() { - return !_builder.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); -} -template -inline typename ::capnp::Persistent::SaveParams::Reader RealmGateway::ImportParams::Reader::getParams() const { - return ::capnp::_::PointerHelpers::SaveParams>::get(_reader.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS)); -} -template -inline typename ::capnp::Persistent::SaveParams::Builder RealmGateway::ImportParams::Builder::getParams() { - return ::capnp::_::PointerHelpers::SaveParams>::get(_builder.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS)); -} -#if !CAPNP_LITE -template -inline typename ::capnp::Persistent::SaveParams::Pipeline RealmGateway::ImportParams::Pipeline::getParams() { - return typename ::capnp::Persistent::SaveParams::Pipeline(_typeless.getPointerField(1)); -} -#endif // !CAPNP_LITE -template -inline void RealmGateway::ImportParams::Builder::setParams(typename ::capnp::Persistent::SaveParams::Reader value) { - ::capnp::_::PointerHelpers::SaveParams>::set(_builder.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS), value); -} -template -inline typename ::capnp::Persistent::SaveParams::Builder RealmGateway::ImportParams::Builder::initParams() { - return ::capnp::_::PointerHelpers::SaveParams>::init(_builder.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS)); -} -template -inline void RealmGateway::ImportParams::Builder::adoptParams( - ::capnp::Orphan::SaveParams>&& value) { - ::capnp::_::PointerHelpers::SaveParams>::adopt(_builder.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); -} -template -inline ::capnp::Orphan::SaveParams> RealmGateway::ImportParams::Builder::disownParams() { - return ::capnp::_::PointerHelpers::SaveParams>::disown(_builder.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS)); -} - -// RealmGateway::ImportParams -template -constexpr uint16_t RealmGateway::ImportParams::_capnpPrivate::dataWordSize; -template -constexpr uint16_t RealmGateway::ImportParams::_capnpPrivate::pointerCount; -#if !CAPNP_LITE -template -constexpr ::capnp::Kind RealmGateway::ImportParams::_capnpPrivate::kind; -template -constexpr ::capnp::_::RawSchema const* RealmGateway::ImportParams::_capnpPrivate::schema; -template -const ::capnp::_::RawBrandedSchema::Scope RealmGateway::ImportParams::_capnpPrivate::brandScopes[] = { - { 0x84ff286cd00a3ed4, brandBindings + 0, 4, false}, -}; -template -const ::capnp::_::RawBrandedSchema::Binding RealmGateway::ImportParams::_capnpPrivate::brandBindings[] = { - ::capnp::_::brandBindingFor(), - ::capnp::_::brandBindingFor(), - ::capnp::_::brandBindingFor(), - ::capnp::_::brandBindingFor(), -}; -template -const ::capnp::_::RawBrandedSchema::Dependency RealmGateway::ImportParams::_capnpPrivate::brandDependencies[] = { - { 16777216, ::capnp::Persistent::_capnpPrivate::brand() }, - { 16777217, ::capnp::Persistent::SaveParams::_capnpPrivate::brand() }, -}; -template -const ::capnp::_::RawBrandedSchema RealmGateway::ImportParams::_capnpPrivate::specificBrand = { - &::capnp::schemas::s_f0c2cc1d3909574d, brandScopes, brandDependencies, - 1, 2, nullptr -}; -#endif // !CAPNP_LITE - -template -inline bool RealmGateway::ExportParams::Reader::hasCap() const { - return !_reader.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); -} -template -inline bool RealmGateway::ExportParams::Builder::hasCap() { - return !_builder.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); -} -#if !CAPNP_LITE -template -inline typename ::capnp::Persistent::Client RealmGateway::ExportParams::Reader::getCap() const { - return ::capnp::_::PointerHelpers< ::capnp::Persistent>::get(_reader.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS)); -} -template -inline typename ::capnp::Persistent::Client RealmGateway::ExportParams::Builder::getCap() { - return ::capnp::_::PointerHelpers< ::capnp::Persistent>::get(_builder.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS)); -} -template -inline typename ::capnp::Persistent::Client RealmGateway::ExportParams::Pipeline::getCap() { - return typename ::capnp::Persistent::Client(_typeless.getPointerField(0).asCap()); -} -template -inline void RealmGateway::ExportParams::Builder::setCap(typename ::capnp::Persistent::Client&& cap) { - ::capnp::_::PointerHelpers< ::capnp::Persistent>::set(_builder.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(cap)); -} -template -inline void RealmGateway::ExportParams::Builder::setCap(typename ::capnp::Persistent::Client& cap) { - ::capnp::_::PointerHelpers< ::capnp::Persistent>::set(_builder.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS), cap); -} -template -inline void RealmGateway::ExportParams::Builder::adoptCap( - ::capnp::Orphan< ::capnp::Persistent>&& value) { - ::capnp::_::PointerHelpers< ::capnp::Persistent>::adopt(_builder.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); -} -template -inline ::capnp::Orphan< ::capnp::Persistent> RealmGateway::ExportParams::Builder::disownCap() { - return ::capnp::_::PointerHelpers< ::capnp::Persistent>::disown(_builder.getPointerField( - ::capnp::bounded<0>() * ::capnp::POINTERS)); -} -#endif // !CAPNP_LITE - -template -inline bool RealmGateway::ExportParams::Reader::hasParams() const { - return !_reader.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); -} -template -inline bool RealmGateway::ExportParams::Builder::hasParams() { - return !_builder.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); -} -template -inline typename ::capnp::Persistent::SaveParams::Reader RealmGateway::ExportParams::Reader::getParams() const { - return ::capnp::_::PointerHelpers::SaveParams>::get(_reader.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS)); -} -template -inline typename ::capnp::Persistent::SaveParams::Builder RealmGateway::ExportParams::Builder::getParams() { - return ::capnp::_::PointerHelpers::SaveParams>::get(_builder.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS)); -} -#if !CAPNP_LITE -template -inline typename ::capnp::Persistent::SaveParams::Pipeline RealmGateway::ExportParams::Pipeline::getParams() { - return typename ::capnp::Persistent::SaveParams::Pipeline(_typeless.getPointerField(1)); -} -#endif // !CAPNP_LITE -template -inline void RealmGateway::ExportParams::Builder::setParams(typename ::capnp::Persistent::SaveParams::Reader value) { - ::capnp::_::PointerHelpers::SaveParams>::set(_builder.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS), value); -} -template -inline typename ::capnp::Persistent::SaveParams::Builder RealmGateway::ExportParams::Builder::initParams() { - return ::capnp::_::PointerHelpers::SaveParams>::init(_builder.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS)); -} -template -inline void RealmGateway::ExportParams::Builder::adoptParams( - ::capnp::Orphan::SaveParams>&& value) { - ::capnp::_::PointerHelpers::SaveParams>::adopt(_builder.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); -} -template -inline ::capnp::Orphan::SaveParams> RealmGateway::ExportParams::Builder::disownParams() { - return ::capnp::_::PointerHelpers::SaveParams>::disown(_builder.getPointerField( - ::capnp::bounded<1>() * ::capnp::POINTERS)); -} - -// RealmGateway::ExportParams -template -constexpr uint16_t RealmGateway::ExportParams::_capnpPrivate::dataWordSize; -template -constexpr uint16_t RealmGateway::ExportParams::_capnpPrivate::pointerCount; -#if !CAPNP_LITE -template -constexpr ::capnp::Kind RealmGateway::ExportParams::_capnpPrivate::kind; -template -constexpr ::capnp::_::RawSchema const* RealmGateway::ExportParams::_capnpPrivate::schema; -template -const ::capnp::_::RawBrandedSchema::Scope RealmGateway::ExportParams::_capnpPrivate::brandScopes[] = { - { 0x84ff286cd00a3ed4, brandBindings + 0, 4, false}, -}; -template -const ::capnp::_::RawBrandedSchema::Binding RealmGateway::ExportParams::_capnpPrivate::brandBindings[] = { - ::capnp::_::brandBindingFor(), - ::capnp::_::brandBindingFor(), - ::capnp::_::brandBindingFor(), - ::capnp::_::brandBindingFor(), -}; -template -const ::capnp::_::RawBrandedSchema::Dependency RealmGateway::ExportParams::_capnpPrivate::brandDependencies[] = { - { 16777216, ::capnp::Persistent::_capnpPrivate::brand() }, - { 16777217, ::capnp::Persistent::SaveParams::_capnpPrivate::brand() }, -}; -template -const ::capnp::_::RawBrandedSchema RealmGateway::ExportParams::_capnpPrivate::specificBrand = { - &::capnp::schemas::s_ecafa18b482da3aa, brandScopes, brandDependencies, - 1, 2, nullptr -}; -#endif // !CAPNP_LITE - -#if !CAPNP_LITE -template -CAPNP_AUTO_IF_MSVC(::capnp::Request::ImportParams, typename ::capnp::Persistent::SaveResults>) -RealmGateway::Client::importRequest(::kj::Maybe< ::capnp::MessageSize> sizeHint) { - return newCall::ImportParams, typename ::capnp::Persistent::SaveResults>( - 0x84ff286cd00a3ed4ull, 0, sizeHint); -} -template -::kj::Promise RealmGateway::Server::import(ImportContext) { - return ::capnp::Capability::Server::internalUnimplemented( - "capnp/persistent.capnp:RealmGateway", "import", - 0x84ff286cd00a3ed4ull, 0); -} -template -CAPNP_AUTO_IF_MSVC(::capnp::Request::ExportParams, typename ::capnp::Persistent::SaveResults>) -RealmGateway::Client::exportRequest(::kj::Maybe< ::capnp::MessageSize> sizeHint) { - return newCall::ExportParams, typename ::capnp::Persistent::SaveResults>( - 0x84ff286cd00a3ed4ull, 1, sizeHint); -} -template -::kj::Promise RealmGateway::Server::export_(ExportContext) { - return ::capnp::Capability::Server::internalUnimplemented( - "capnp/persistent.capnp:RealmGateway", "export", - 0x84ff286cd00a3ed4ull, 1); -} -template -::kj::Promise RealmGateway::Server::dispatchCall( - uint64_t interfaceId, uint16_t methodId, - ::capnp::CallContext< ::capnp::AnyPointer, ::capnp::AnyPointer> context) { - switch (interfaceId) { - case 0x84ff286cd00a3ed4ull: - return dispatchCallInternal(methodId, context); - default: - return internalUnimplemented("capnp/persistent.capnp:RealmGateway", interfaceId); - } -} -template -::kj::Promise RealmGateway::Server::dispatchCallInternal( - uint16_t methodId, - ::capnp::CallContext< ::capnp::AnyPointer, ::capnp::AnyPointer> context) { - switch (methodId) { - case 0: - return import(::capnp::Capability::Server::internalGetTypedContext< - typename ::capnp::RealmGateway::ImportParams, typename ::capnp::Persistent::SaveResults>(context)); - case 1: - return export_(::capnp::Capability::Server::internalGetTypedContext< - typename ::capnp::RealmGateway::ExportParams, typename ::capnp::Persistent::SaveResults>(context)); - default: - (void)context; - return ::capnp::Capability::Server::internalUnimplemented( - "capnp/persistent.capnp:RealmGateway", - 0x84ff286cd00a3ed4ull, methodId); - } -} -#endif // !CAPNP_LITE - -// RealmGateway -#if !CAPNP_LITE -template -constexpr ::capnp::Kind RealmGateway::_capnpPrivate::kind; -template -constexpr ::capnp::_::RawSchema const* RealmGateway::_capnpPrivate::schema; -template -const ::capnp::_::RawBrandedSchema::Scope RealmGateway::_capnpPrivate::brandScopes[] = { - { 0x84ff286cd00a3ed4, brandBindings + 0, 4, false}, -}; -template -const ::capnp::_::RawBrandedSchema::Binding RealmGateway::_capnpPrivate::brandBindings[] = { - ::capnp::_::brandBindingFor(), - ::capnp::_::brandBindingFor(), - ::capnp::_::brandBindingFor(), - ::capnp::_::brandBindingFor(), -}; -template -const ::capnp::_::RawBrandedSchema::Dependency RealmGateway::_capnpPrivate::brandDependencies[] = { - { 33554432, ::capnp::RealmGateway::ImportParams::_capnpPrivate::brand() }, - { 33554433, ::capnp::RealmGateway::ExportParams::_capnpPrivate::brand() }, - { 50331648, ::capnp::Persistent::SaveResults::_capnpPrivate::brand() }, - { 50331649, ::capnp::Persistent::SaveResults::_capnpPrivate::brand() }, -}; -template -const ::capnp::_::RawBrandedSchema RealmGateway::_capnpPrivate::specificBrand = { - &::capnp::schemas::s_84ff286cd00a3ed4, brandScopes, brandDependencies, - 1, 4, nullptr -}; -#endif // !CAPNP_LITE - } // namespace +CAPNP_END_HEADER + diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/pointer-helpers.h b/libs/EXTERNAL/capnproto/c++/src/capnp/pointer-helpers.h index ea95b63f492..c5ce574527d 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/pointer-helpers.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/pointer-helpers.h @@ -21,13 +21,11 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "layout.h" #include "list.h" +CAPNP_BEGIN_HEADER + namespace capnp { namespace _ { // private @@ -155,3 +153,5 @@ struct PointerHelpers { } // namespace _ (private) } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/pretty-print.h b/libs/EXTERNAL/capnproto/c++/src/capnp/pretty-print.h index cafc932438e..f3c6ced82f1 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/pretty-print.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/pretty-print.h @@ -21,13 +21,11 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "dynamic.h" #include +CAPNP_BEGIN_HEADER + namespace capnp { kj::StringTree prettyPrint(DynamicStruct::Reader value); @@ -42,3 +40,5 @@ kj::StringTree prettyPrint(DynamicList::Builder value); // any of the KJ debug macros, etc.). } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/raw-schema.h b/libs/EXTERNAL/capnproto/c++/src/capnp/raw-schema.h index 984e632e9eb..88101692a20 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/raw-schema.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/raw-schema.h @@ -21,16 +21,14 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "common.h" // for uint and friends -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) #include #endif +CAPNP_BEGIN_HEADER + namespace capnp { namespace _ { // private @@ -147,7 +145,7 @@ struct RawBrandedSchema { // is required in particular when traversing the dependency list. RawSchemas for compiled-in // types are always initialized; only dynamically-loaded schemas may be lazy. -#if __GNUC__ +#if __GNUC__ || defined(__clang__) const Initializer* i = __atomic_load_n(&lazyInitializer, __ATOMIC_ACQUIRE); #elif _MSC_VER const Initializer* i = *static_cast(&lazyInitializer); @@ -213,7 +211,7 @@ struct RawSchema { // is required in particular when traversing the dependency list. RawSchemas for compiled-in // types are always initialized; only dynamically-loaded schemas may be lazy. -#if __GNUC__ +#if __GNUC__ || defined(__clang__) const Initializer* i = __atomic_load_n(&lazyInitializer, __ATOMIC_ACQUIRE); #elif _MSC_VER const Initializer* i = *static_cast(&lazyInitializer); @@ -237,3 +235,5 @@ inline bool RawBrandedSchema::isUnbound() const { } // namespace _ (private) } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/reconnect-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/reconnect-test.c++ new file mode 100644 index 00000000000..492f5489be3 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/reconnect-test.c++ @@ -0,0 +1,221 @@ +// Copyright (c) 2020 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "reconnect.h" +#include "test-util.h" +#include +#include +#include +#include "rpc-twoparty.h" + +namespace capnp { +namespace _ { +namespace { + +class TestInterfaceImpl final: public test::TestInterface::Server { +public: + TestInterfaceImpl(uint generation): generation(generation) {} + + void setError(kj::Exception e) { + error = kj::mv(e); + } + + kj::Own> block() { + auto paf = kj::newPromiseAndFulfiller(); + blocker = paf.promise.fork(); + return kj::mv(paf.fulfiller); + } + +protected: + kj::Promise foo(FooContext context) override { + KJ_IF_MAYBE(e, error) { + return kj::cp(*e); + } + auto params = context.getParams(); + context.initResults().setX(kj::str(params.getI(), ' ', params.getJ(), ' ', generation)); + return blocker.addBranch(); + } + +private: + uint generation; + kj::Maybe error; + kj::ForkedPromise blocker = kj::Promise(kj::READY_NOW).fork(); +}; + +void doAutoReconnectTest(kj::WaitScope& ws, + kj::Function wrapClient) { + TestInterfaceImpl* currentServer = nullptr; + uint connectCount = 0; + + test::TestInterface::Client client = wrapClient(autoReconnect([&]() { + auto server = kj::heap(connectCount++); + currentServer = server; + return test::TestInterface::Client(kj::mv(server)); + })); + + auto testPromise = [&](uint i, bool j) { + auto req = client.fooRequest(); + req.setI(i); + req.setJ(j); + return req.send(); + }; + + auto test = [&](uint i, bool j) { + return kj::str(testPromise(i, j).wait(ws).getX()); + }; + + KJ_EXPECT(test(123, true) == "123 true 0"); + + currentServer->setError(KJ_EXCEPTION(DISCONNECTED, "test1 disconnect")); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("test1 disconnect", test(456, true)); + + KJ_EXPECT(test(789, false) == "789 false 1"); + KJ_EXPECT(test(21, true) == "21 true 1"); + + { + // We cause two disconnect promises to be thrown concurrently. This should only cause the + // reconnector to reconnect once, not twice. + auto fulfiller = currentServer->block(); + auto promise1 = testPromise(32, false); + auto promise2 = testPromise(43, true); + KJ_EXPECT(!promise1.poll(ws)); + KJ_EXPECT(!promise2.poll(ws)); + fulfiller->reject(KJ_EXCEPTION(DISCONNECTED, "test2 disconnect")); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("test2 disconnect", promise1.wait(ws)); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("test2 disconnect", promise2.wait(ws)); + } + + KJ_EXPECT(test(43, false) == "43 false 2"); + + // Start a couple calls that will block at the server end, plus an unsent request. + auto fulfiller = currentServer->block(); + + auto promise1 = testPromise(1212, true); + auto promise2 = testPromise(3434, false); + auto req3 = client.fooRequest(); + req3.setI(5656); + req3.setJ(true); + KJ_EXPECT(!promise1.poll(ws)); + KJ_EXPECT(!promise2.poll(ws)); + + // Now force a reconnect. + currentServer->setError(KJ_EXCEPTION(DISCONNECTED, "test3 disconnect")); + + // Initiate a request that will fail with DISCONNECTED. + auto promise4 = testPromise(7878, false); + + // And throw away our capability entirely, just to make sure that anyone who needs it is holding + // onto their own ref. + client = nullptr; + + // Everything we initiated should still finish. + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("test3 disconnect", promise4.wait(ws)); + + // Send the request which we created before the disconnect. There are two behaviors we accept + // as correct here: it may throw the disconnect exception, or it may automatically redirect to + // the newly-reconnected destination. + req3.send().then([](Response resp) { + KJ_EXPECT(resp.getX() == "5656 true 3"); + }, [](kj::Exception e) { + KJ_EXPECT(e.getDescription().endsWith("test3 disconnect")); + }).wait(ws); + + KJ_EXPECT(!promise1.poll(ws)); + KJ_EXPECT(!promise2.poll(ws)); + fulfiller->fulfill(); + KJ_EXPECT(promise1.wait(ws).getX() == "1212 true 2"); + KJ_EXPECT(promise2.wait(ws).getX() == "3434 false 2"); +} + +KJ_TEST("autoReconnect() direct call (exercises newCall() / RequestHook)") { + kj::EventLoop loop; + kj::WaitScope ws(loop); + + doAutoReconnectTest(ws, [](auto c) {return kj::mv(c);}); +} + +KJ_TEST("autoReconnect() through RPC (exercises call() / CallContextHook)") { + kj::EventLoop loop; + kj::WaitScope ws(loop); + + auto paf = kj::newPromiseAndFulfiller(); + + auto pipe = kj::newTwoWayPipe(); + TwoPartyClient client(*pipe.ends[0]); + TwoPartyClient server(*pipe.ends[1], kj::mv(paf.promise), rpc::twoparty::Side::SERVER); + + doAutoReconnectTest(ws, [&](test::TestInterface::Client c) { + paf.fulfiller->fulfill(kj::mv(c)); + return client.bootstrap().castAs(); + }); +} + +KJ_TEST("lazyAutoReconnect() direct call (exercises newCall() / RequestHook)") { + kj::EventLoop loop; + kj::WaitScope ws(loop); + + doAutoReconnectTest(ws, [](auto c) {return kj::mv(c);}); +} + +KJ_TEST("lazyAutoReconnect() initialies lazily") { + kj::EventLoop loop; + kj::WaitScope ws(loop); + + int connectCount = 0; + TestInterfaceImpl* currentServer = nullptr; + auto connectCounter = [&]() { + auto server = kj::heap(connectCount++); + currentServer = server; + return test::TestInterface::Client(kj::mv(server)); + }; + + test::TestInterface::Client client = autoReconnect(connectCounter); + + auto test = [&](uint i, bool j) { + auto req = client.fooRequest(); + req.setI(i); + req.setJ(j); + return kj::str(req.send().wait(ws).getX()); + }; + + KJ_EXPECT(connectCount == 1); + KJ_EXPECT(test(123, true) == "123 true 0"); + KJ_EXPECT(connectCount == 1); + + client = lazyAutoReconnect(connectCounter); + KJ_EXPECT(connectCount == 1); + KJ_EXPECT(test(123, true) == "123 true 1"); + KJ_EXPECT(connectCount == 2); + KJ_EXPECT(test(234, false) == "234 false 1"); + KJ_EXPECT(connectCount == 2); + + currentServer->setError(KJ_EXCEPTION(DISCONNECTED, "test1 disconnect")); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("test1 disconnect", test(345, true)); + + // lazyAutoReconnect is only lazy on the first request, not on reconnects. + KJ_EXPECT(connectCount == 3); + KJ_EXPECT(test(456, false) == "456 false 2"); + KJ_EXPECT(connectCount == 3); +} + +} // namespace +} // namespace _ +} // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/reconnect.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/reconnect.c++ new file mode 100644 index 00000000000..fe2bd07f8dc --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/reconnect.c++ @@ -0,0 +1,141 @@ +// Copyright (c) 2020 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "reconnect.h" + +namespace capnp { + +namespace { + +class ReconnectHook final: public ClientHook, public kj::Refcounted { +public: + ReconnectHook(kj::Function connectParam, bool lazy = false) + : connect(kj::mv(connectParam)), + current(lazy ? kj::Maybe>() : ClientHook::from(connect())) {} + + Request newCall( + uint64_t interfaceId, uint16_t methodId, kj::Maybe sizeHint) override { + auto result = getCurrent().newCall(interfaceId, methodId, sizeHint); + AnyPointer::Builder builder = result; + auto hook = kj::heap(kj::addRef(*this), RequestHook::from(kj::mv(result))); + return { builder, kj::mv(hook) }; + } + + VoidPromiseAndPipeline call(uint64_t interfaceId, uint16_t methodId, + kj::Own&& context) override { + auto result = getCurrent().call(interfaceId, methodId, kj::mv(context)); + wrap(result.promise); + return result; + } + + kj::Maybe getResolved() override { + // We can't let people resolve to the underlying capability because then we wouldn't be able + // to redirect them later. + return nullptr; + } + + kj::Maybe>> whenMoreResolved() override { + return nullptr; + } + + kj::Own addRef() override { + return kj::addRef(*this); + } + + const void* getBrand() override { + return nullptr; + } + + kj::Maybe getFd() override { + // It's not safe to return current->getFd() because normally callers wouldn't expect the FD to + // change or go away over time, but this one could whenever we reconnect. If there's a use + // case for being able to access the FD here, we'll need a different interface to do it. + return nullptr; + } + +private: + kj::Function connect; + kj::Maybe> current; + uint generation = 0; + + template + void wrap(kj::Promise& promise) { + promise = promise.catch_( + [self = kj::addRef(*this), startGeneration = generation] + (kj::Exception&& exception) mutable -> kj::Promise { + if (exception.getType() == kj::Exception::Type::DISCONNECTED && + self->generation == startGeneration) { + self->generation++; + KJ_IF_MAYBE(e2, kj::runCatchingExceptions([&]() { + self->current = ClientHook::from(self->connect()); + })) { + self->current = newBrokenCap(kj::mv(*e2)); + } + } + return kj::mv(exception); + }); + } + + ClientHook& getCurrent() { + KJ_IF_MAYBE(c, current) { + return **c; + } else { + return *current.emplace(ClientHook::from(connect())); + } + } + + class RequestImpl final: public RequestHook { + public: + RequestImpl(kj::Own parent, kj::Own inner) + : parent(kj::mv(parent)), inner(kj::mv(inner)) {} + + RemotePromise send() override { + auto result = inner->send(); + parent->wrap(result); + return result; + } + + kj::Promise sendStreaming() override { + auto result = inner->sendStreaming(); + parent->wrap(result); + return result; + } + + const void* getBrand() override { + return nullptr; + } + + private: + kj::Own parent; + kj::Own inner; + }; +}; + +} // namespace + +Capability::Client autoReconnect(kj::Function connect) { + return Capability::Client(kj::refcounted(kj::mv(connect))); +} + +Capability::Client lazyAutoReconnect(kj::Function connect) { + return Capability::Client(kj::refcounted(kj::mv(connect), true)); +} +} // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/reconnect.h b/libs/EXTERNAL/capnproto/c++/src/capnp/reconnect.h new file mode 100644 index 00000000000..6f7d3d62d0d --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/reconnect.h @@ -0,0 +1,80 @@ +// Copyright (c) 2020 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#pragma once + +#include "capability.h" +#include + +CAPNP_BEGIN_HEADER + +namespace capnp { + +template +auto autoReconnect(ConnectFunc&& connect); +// Creates a capability that reconstructs itself every time it becomes disconnected. +// +// `connect()` is a function which is invoked to initially construct the capability, and then +// invoked again each time the capability is found to be disconnected. `connect()` may return +// any capability `Client` type. +// +// Example usage might look like: +// +// Foo::Client foo = autoReconnect([&rpcSystem, vatId]() { +// return rpcSystem.bootstrap(vatId).castAs().getFooRequest().send().getFoo(); +// }); +// +// The given function is initially called synchronously, and the returned `foo` is a wrapper +// around what the function returned. But any time this capability becomes disconnected, the +// function is invoked again, and future calls are directed to the new result. +// +// Any call that is in-flight when the capability becomes disconnected still fails with a +// DISCONNECTED exception. The caller should respond by retrying, as a retry will target the +// newly-reconnected capability. However, the caller should limit the number of times it retries, +// to avoid an infinite loop in the case that the DISCONNECTED exception actually represents a +// permanent problem. Consider using `kj::retryOnDisconnect()` to implement this behavior. + +template +auto lazyAutoReconnect(ConnectFunc&& connect); +// The same as autoReconnect, but doesn't call the provided connect function until the first +// time the capability is used. Note that only the initial connection is lazy -- upon +// disconnected errors this will still reconnect eagerly. + +// ======================================================================================= +// inline implementation details + +Capability::Client autoReconnect(kj::Function connect); +template +auto autoReconnect(ConnectFunc&& connect) { + return autoReconnect(kj::Function(kj::fwd(connect))) + .castAs>>(); +} + +Capability::Client lazyAutoReconnect(kj::Function connect); +template +auto lazyAutoReconnect(ConnectFunc&& connect) { + return lazyAutoReconnect(kj::Function(kj::fwd(connect))) + .castAs>>(); +} + +} // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-prelude.h b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-prelude.h index 0541e0b1b84..c6165d13233 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-prelude.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-prelude.h @@ -24,17 +24,16 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "capability.h" #include "persistent.capnp.h" +CAPNP_BEGIN_HEADER + namespace capnp { class OutgoingRpcMessage; class IncomingRpcMessage; +class RpcFlowController; template class RpcSystem; @@ -59,6 +58,7 @@ class VatNetworkBase { virtual kj::Promise>> receiveIncomingMessage() = 0; virtual kj::Promise shutdown() = 0; virtual AnyStruct::Reader baseGetPeerVatId() = 0; + virtual kj::Own newStream() = 0; }; virtual kj::Maybe> baseConnect(AnyStruct::Reader vatId) = 0; virtual kj::Promise> baseAccept() = 0; @@ -79,14 +79,16 @@ class RpcSystemBase { // Non-template version of RpcSystem. Ignore this class; see RpcSystem in rpc.h. public: - RpcSystemBase(VatNetworkBase& network, kj::Maybe bootstrapInterface, - kj::Maybe::Client> gateway); - RpcSystemBase(VatNetworkBase& network, BootstrapFactoryBase& bootstrapFactory, - kj::Maybe::Client> gateway); + RpcSystemBase(VatNetworkBase& network, kj::Maybe bootstrapInterface); + RpcSystemBase(VatNetworkBase& network, BootstrapFactoryBase& bootstrapFactory); RpcSystemBase(VatNetworkBase& network, SturdyRefRestorerBase& restorer); RpcSystemBase(RpcSystemBase&& other) noexcept; ~RpcSystemBase() noexcept(false); + void setTraceEncoder(kj::Function func); + + kj::Promise run(); + private: class Impl; kj::Own impl; @@ -99,29 +101,7 @@ class RpcSystemBase { friend class capnp::RpcSystem; }; -template struct InternalRefFromRealmGateway_; -template -struct InternalRefFromRealmGateway_> { - typedef InternalRef Type; -}; -template -using InternalRefFromRealmGateway = typename InternalRefFromRealmGateway_::Type; -template -using InternalRefFromRealmGatewayClient = InternalRefFromRealmGateway; - -template struct ExternalRefFromRealmGateway_; -template -struct ExternalRefFromRealmGateway_> { - typedef ExternalRef Type; -}; -template -using ExternalRefFromRealmGateway = typename ExternalRefFromRealmGateway_::Type; -template -using ExternalRefFromRealmGatewayClient = ExternalRefFromRealmGateway; - } // namespace _ (private) } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-test.c++ index 0ca78a8a249..6211d7e3649 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-test.c++ @@ -198,7 +198,7 @@ typedef VatNetwork< class TestNetworkAdapter final: public TestNetworkAdapterBase { public: - TestNetworkAdapter(TestNetwork& network): network(network) {} + TestNetworkAdapter(TestNetwork& network, kj::StringPtr self): network(network), self(self) {} ~TestNetworkAdapter() { kj::Exception exception = KJ_EXCEPTION(FAILED, "Network was destroyed."); @@ -210,6 +210,12 @@ public: uint getSentCount() { return sent; } uint getReceivedCount() { return received; } + void onSend(kj::Function callback) { + // Invokes the given callback every time a message is sent. Callback can return false to cause + // send() to do nothing. + sendCallback = kj::mv(callback); + } + typedef TestNetworkAdapterBase::Connection Connection; class ConnectionImpl final @@ -246,6 +252,10 @@ public: return message.getRoot(); } + size_t sizeInWords() override { + return data.size(); + } + kj::Array data; FlatArrayMessageReader message; }; @@ -262,6 +272,8 @@ public: } void send() override { + if (!connection.network.sendCallback(message)) return; + if (connection.networkException != nullptr) { return; } @@ -291,6 +303,10 @@ public: }))); } + size_t sizeInWords() override { + return message.sizeInWords(); + } + private: ConnectionImpl& connection; MallocMessageBuilder message; @@ -354,6 +370,10 @@ public: }; kj::Maybe> connect(test::TestSturdyRefHostId::Reader hostId) override { + if (hostId.getHost() == self) { + return nullptr; + } + TestNetworkAdapter& dst = KJ_REQUIRE_NONNULL(network.find(hostId.getHost())); auto iter = connections.find(&dst); @@ -392,18 +412,21 @@ public: private: TestNetwork& network; + kj::StringPtr self; uint sent = 0; uint received = 0; std::map> connections; std::queue>>> fulfillerQueue; std::queue> connectionQueue; + + kj::Function sendCallback = [](MessageBuilder&) { return true; }; }; TestNetwork::~TestNetwork() noexcept(false) {} TestNetworkAdapter& TestNetwork::add(kj::StringPtr name) { - return *(map[name] = kj::heap(*this)); + return *(map[name] = kj::heap(*this, name)); } // ======================================================================================= @@ -448,21 +471,12 @@ struct TestContext { serverNetwork(network.add("server")), rpcClient(makeRpcClient(clientNetwork)), rpcServer(makeRpcServer(serverNetwork, restorer)) {} - TestContext(Capability::Client bootstrap, - RealmGateway::Client gateway) - : waitScope(loop), - clientNetwork(network.add("client")), - serverNetwork(network.add("server")), - rpcClient(makeRpcClient(clientNetwork, gateway)), - rpcServer(makeRpcServer(serverNetwork, bootstrap)) {} - TestContext(Capability::Client bootstrap, - RealmGateway::Client gateway, - bool) + TestContext(Capability::Client bootstrap) : waitScope(loop), clientNetwork(network.add("client")), serverNetwork(network.add("server")), rpcClient(makeRpcClient(clientNetwork)), - rpcServer(makeRpcServer(serverNetwork, bootstrap, gateway)) {} + rpcServer(makeRpcServer(serverNetwork, bootstrap)) {} Capability::Client connect(test::TestSturdyRefObjectId::Tag tag) { MallocMessageBuilder refMessage(128); @@ -552,6 +566,35 @@ TEST(Rpc, Pipelining) { EXPECT_EQ(1, chainedCallCount); } +KJ_TEST("RPC context.setPipeline") { + TestContext context; + + auto client = context.connect(test::TestSturdyRefObjectId::Tag::TEST_PIPELINE) + .castAs(); + + auto promise = client.getCapPipelineOnlyRequest().send(); + + auto pipelineRequest = promise.getOutBox().getCap().fooRequest(); + pipelineRequest.setI(321); + auto pipelinePromise = pipelineRequest.send(); + + auto pipelineRequest2 = promise.getOutBox().getCap().castAs().graultRequest(); + auto pipelinePromise2 = pipelineRequest2.send(); + + EXPECT_EQ(0, context.restorer.callCount); + + auto response = pipelinePromise.wait(context.waitScope); + EXPECT_EQ("bar", response.getX()); + + auto response2 = pipelinePromise2.wait(context.waitScope); + checkTestMessage(response2); + + EXPECT_EQ(3, context.restorer.callCount); + + // The original promise never completed. + KJ_EXPECT(!promise.poll(context.waitScope)); +} + TEST(Rpc, Release) { TestContext context; @@ -633,16 +676,128 @@ TEST(Rpc, TailCall) { auto dependentCall1 = promise.getC().getCallSequenceRequest().send(); - auto dependentCall2 = response.getC().getCallSequenceRequest().send(); - EXPECT_EQ(0, dependentCall0.wait(context.waitScope).getN()); EXPECT_EQ(1, dependentCall1.wait(context.waitScope).getN()); + + // TODO(someday): We used to initiate dependentCall2 here before waiting on the first two calls, + // and the ordering was still "correct". But this was apparently by accident. Calling getC() on + // the final response returns a different capability from calling getC() on the promise. There + // are no guarantees on the ordering of calls on the response capability vs. the earlier + // promise. When ordering matters, applications should take the original promise capability and + // keep using that. In theory the RPC system could create continuity here, but it would be + // annoying: for each capability that had been fetched on the promise, it would need to + // traverse to the same capability in the final response and swap it out in-place for the + // pipelined cap returned earlier. Maybe we'll determine later that that's really needed but + // for now I'm not gonna do it. + auto dependentCall2 = response.getC().getCallSequenceRequest().send(); + EXPECT_EQ(2, dependentCall2.wait(context.waitScope).getN()); EXPECT_EQ(1, calleeCallCount); EXPECT_EQ(1, context.restorer.callCount); } +class TestHangingTailCallee final: public test::TestTailCallee::Server { +public: + TestHangingTailCallee(int& callCount, int& cancelCount) + : callCount(callCount), cancelCount(cancelCount) {} + + kj::Promise foo(FooContext context) override { + context.allowCancellation(); + ++callCount; + return kj::Promise(kj::NEVER_DONE) + .attach(kj::defer([&cancelCount = cancelCount]() { ++cancelCount; })); + } + +private: + int& callCount; + int& cancelCount; +}; + +class TestRacingTailCaller final: public test::TestTailCaller::Server { +public: + TestRacingTailCaller(kj::Promise unblock): unblock(kj::mv(unblock)) {} + + kj::Promise foo(FooContext context) override { + return unblock.then([context]() mutable { + auto tailRequest = context.getParams().getCallee().fooRequest(); + return context.tailCall(kj::mv(tailRequest)); + }); + } + +private: + kj::Promise unblock; +}; + +TEST(Rpc, TailCallCancel) { + TestContext context; + + auto caller = context.connect(test::TestSturdyRefObjectId::Tag::TEST_TAIL_CALLER) + .castAs(); + + int callCount = 0, cancelCount = 0; + + test::TestTailCallee::Client callee(kj::heap(callCount, cancelCount)); + + { + auto request = caller.fooRequest(); + request.setCallee(callee); + + auto promise = request.send(); + + KJ_ASSERT(callCount == 0); + KJ_ASSERT(cancelCount == 0); + + KJ_ASSERT(!promise.poll(context.waitScope)); + + KJ_ASSERT(callCount == 1); + KJ_ASSERT(cancelCount == 0); + } + + kj::Promise(kj::NEVER_DONE).poll(context.waitScope); + + KJ_ASSERT(callCount == 1); + KJ_ASSERT(cancelCount == 1); +} + +TEST(Rpc, TailCallCancelRace) { + auto paf = kj::newPromiseAndFulfiller(); + TestContext context(kj::heap(kj::mv(paf.promise))); + + MallocMessageBuilder serverHostIdBuilder; + auto serverHostId = serverHostIdBuilder.getRoot(); + serverHostId.setHost("server"); + + auto caller = context.rpcClient.bootstrap(serverHostId).castAs(); + + int callCount = 0, cancelCount = 0; + + test::TestTailCallee::Client callee(kj::heap(callCount, cancelCount)); + + { + auto request = caller.fooRequest(); + request.setCallee(callee); + + auto promise = request.send(); + + KJ_ASSERT(callCount == 0); + KJ_ASSERT(cancelCount == 0); + + KJ_ASSERT(!promise.poll(context.waitScope)); + + KJ_ASSERT(callCount == 0); + KJ_ASSERT(cancelCount == 0); + + // Unblock the server and at the same time cancel the client. + paf.fulfiller->fulfill(); + } + + kj::Promise(kj::NEVER_DONE).poll(context.waitScope); + + KJ_ASSERT(callCount == 1); + KJ_ASSERT(cancelCount == 1); +} + TEST(Rpc, Cancelation) { // Tests allowCancellation(). @@ -901,6 +1056,55 @@ TEST(Rpc, Embargo) { EXPECT_EQ(5, call5.wait(context.waitScope).getN()); } +TEST(Rpc, EmbargoUnwrap) { + // Test that embargos properly block unwraping a capability using CapabilityServerSet. + + TestContext context; + + capnp::CapabilityServerSet capSet; + + auto client = context.connect(test::TestSturdyRefObjectId::Tag::TEST_MORE_STUFF) + .castAs(); + + auto cap = capSet.add(kj::heap()); + + auto earlyCall = client.getCallSequenceRequest().send(); + + auto echoRequest = client.echoRequest(); + echoRequest.setCap(cap); + auto echo = echoRequest.send(); + + auto pipeline = echo.getCap(); + + auto unwrap = capSet.getLocalServer(pipeline) + .then([](kj::Maybe unwrapped) { + return kj::downcast(KJ_ASSERT_NONNULL(unwrapped)).getCount(); + }).eagerlyEvaluate(nullptr); + + auto call0 = getCallSequence(pipeline, 0); + auto call1 = getCallSequence(pipeline, 1); + + earlyCall.wait(context.waitScope); + + auto call2 = getCallSequence(pipeline, 2); + + auto resolved = echo.wait(context.waitScope).getCap(); + + auto call3 = getCallSequence(pipeline, 4); + auto call4 = getCallSequence(pipeline, 4); + auto call5 = getCallSequence(pipeline, 5); + + EXPECT_EQ(0, call0.wait(context.waitScope).getN()); + EXPECT_EQ(1, call1.wait(context.waitScope).getN()); + EXPECT_EQ(2, call2.wait(context.waitScope).getN()); + EXPECT_EQ(3, call3.wait(context.waitScope).getN()); + EXPECT_EQ(4, call4.wait(context.waitScope).getN()); + EXPECT_EQ(5, call5.wait(context.waitScope).getN()); + + uint unwrappedAt = unwrap.wait(context.waitScope); + KJ_EXPECT(unwrappedAt >= 3, unwrappedAt); +} + template void expectPromiseThrows(kj::Promise&& promise, kj::WaitScope& waitScope) { EXPECT_TRUE(promise.then([](T&&) { return false; }, [](kj::Exception&&) { return true; }) @@ -1064,196 +1268,261 @@ TEST(Rpc, Abort) { EXPECT_TRUE(conn->receiveIncomingMessage().wait(context.waitScope) == nullptr); } -// ======================================================================================= - -typedef RealmGateway TestRealmGateway; - -class TestGateway final: public TestRealmGateway::Server { -public: - kj::Promise import(ImportContext context) override { - auto cap = context.getParams().getCap(); - context.releaseParams(); - return cap.saveRequest().send() - .then([KJ_CPCAP(context)](Response::SaveResults> response) mutable { - context.getResults().initSturdyRef().getObjectId().setAs( - kj::str("imported-", response.getSturdyRef())); - }); - } - - kj::Promise export_(ExportContext context) override { - auto cap = context.getParams().getCap(); - context.releaseParams(); - return cap.saveRequest().send() - .then([KJ_CPCAP(context)] - (Response::SaveResults> response) mutable { - context.getResults().setSturdyRef(kj::str("exported-", - response.getSturdyRef().getObjectId().getAs())); - }); - } -}; - -class TestPersistent final: public Persistent::Server { -public: - TestPersistent(kj::StringPtr name): name(name) {} - - kj::Promise save(SaveContext context) override { - context.initResults().initSturdyRef().getObjectId().setAs(name); - return kj::READY_NOW; - } - -private: - kj::StringPtr name; -}; +KJ_TEST("loopback bootstrap()") { + int callCount = 0; + test::TestInterface::Client bootstrap = kj::heap(callCount); -class TestPersistentText final: public Persistent::Server { -public: - TestPersistentText(kj::StringPtr name): name(name) {} + MallocMessageBuilder hostIdBuilder; + auto hostId = hostIdBuilder.getRoot(); + hostId.setHost("server"); - kj::Promise save(SaveContext context) override { - context.initResults().setSturdyRef(name); - return kj::READY_NOW; - } + TestContext context(bootstrap); + auto client = context.rpcServer.bootstrap(hostId).castAs(); -private: - kj::StringPtr name; -}; + auto request = client.fooRequest(); + request.setI(123); + request.setJ(true); + auto response = request.send().wait(context.waitScope); -TEST(Rpc, RealmGatewayImport) { - TestRealmGateway::Client gateway = kj::heap(); - Persistent::Client bootstrap = kj::heap("foo"); + KJ_EXPECT(response.getX() == "foo"); + KJ_EXPECT(callCount == 1); +} - MallocMessageBuilder hostIdBuilder; - auto hostId = hostIdBuilder.getRoot(); - hostId.setHost("server"); +KJ_TEST("method throws exception") { + TestContext context; - TestContext context(bootstrap, gateway); - auto client = context.rpcClient.bootstrap(hostId).castAs>(); + auto client = context.connect(test::TestSturdyRefObjectId::Tag::TEST_MORE_STUFF) + .castAs(); - auto response = client.saveRequest().send().wait(context.waitScope); + kj::Maybe maybeException; + client.throwExceptionRequest().send().ignoreResult() + .catch_([&](kj::Exception&& e) { + maybeException = kj::mv(e); + }).wait(context.waitScope); - EXPECT_EQ("imported-foo", response.getSturdyRef().getObjectId().getAs()); + auto exception = KJ_ASSERT_NONNULL(maybeException); + KJ_EXPECT(exception.getDescription() == "remote exception: test exception"); + KJ_EXPECT(exception.getRemoteTrace() == nullptr); } -TEST(Rpc, RealmGatewayExport) { - TestRealmGateway::Client gateway = kj::heap(); - Persistent::Client bootstrap = kj::heap("foo"); +KJ_TEST("method throws exception with trace encoder") { + TestContext context; - MallocMessageBuilder hostIdBuilder; - auto hostId = hostIdBuilder.getRoot(); - hostId.setHost("server"); + context.rpcServer.setTraceEncoder([](const kj::Exception& e) { + return kj::str("trace for ", e.getDescription()); + }); - TestContext context(bootstrap, gateway, true); - auto client = context.rpcClient.bootstrap(hostId).castAs>(); + auto client = context.connect(test::TestSturdyRefObjectId::Tag::TEST_MORE_STUFF) + .castAs(); - auto response = client.saveRequest().send().wait(context.waitScope); + kj::Maybe maybeException; + client.throwExceptionRequest().send().ignoreResult() + .catch_([&](kj::Exception&& e) { + maybeException = kj::mv(e); + }).wait(context.waitScope); - EXPECT_EQ("exported-foo", response.getSturdyRef()); + auto exception = KJ_ASSERT_NONNULL(maybeException); + KJ_EXPECT(exception.getDescription() == "remote exception: test exception"); + KJ_EXPECT(exception.getRemoteTrace() == "trace for test exception"); } -TEST(Rpc, RealmGatewayImportExport) { - // Test that a save request which leaves the realm, bounces through a promise capability, and - // then comes back into the realm, does not actually get translated both ways. +KJ_TEST("when OutgoingRpcMessage::send() throws, we don't leak exports") { + // When OutgoingRpcMessage::send() throws an exception on a Call message, we need to clean up + // anything that had been added to the export table as part of the call. At one point this + // cleanup was missing, so exports would leak. - TestRealmGateway::Client gateway = kj::heap(); - Persistent::Client bootstrap = kj::heap("foo"); - - MallocMessageBuilder serverHostIdBuilder; - auto serverHostId = serverHostIdBuilder.getRoot(); - serverHostId.setHost("server"); + TestContext context; - MallocMessageBuilder clientHostIdBuilder; - auto clientHostId = clientHostIdBuilder.getRoot(); - clientHostId.setHost("client"); + uint32_t expectedExportNumber = 0; + uint interceptCount = 0; + bool shouldThrowFromSend = false; + context.clientNetwork.onSend([&](MessageBuilder& builder) { + auto message = builder.getRoot().asReader(); + if (message.isCall()) { + auto call = message.getCall(); + if (call.getInterfaceId() == capnp::typeId() && + call.getMethodId() == 0) { + // callFoo() request, expect a capability in the param caps. Specifically we expect a + // promise, because that's what we send below. + auto capTable = call.getParams().getCapTable(); + KJ_ASSERT(capTable.size() == 1); + auto desc = capTable[0]; + KJ_ASSERT(desc.isSenderPromise()); + KJ_ASSERT(desc.getSenderPromise() == expectedExportNumber); + + ++interceptCount; + if (shouldThrowFromSend) { + kj::throwRecoverableException(KJ_EXCEPTION(FAILED, "intercepted")); + return false; // only matters when -fno-exceptions + } + } + } + return true; + }); - kj::EventLoop loop; - kj::WaitScope waitScope(loop); - TestNetwork network; - TestNetworkAdapter& clientNetwork = network.add("client"); - TestNetworkAdapter& serverNetwork = network.add("server"); - RpcSystem rpcClient = - makeRpcServer(clientNetwork, bootstrap, gateway); - auto paf = kj::newPromiseAndFulfiller(); - RpcSystem rpcServer = - makeRpcServer(serverNetwork, kj::mv(paf.promise)); - - auto client = rpcClient.bootstrap(serverHostId).castAs>(); - - bool responseReady = false; - auto responsePromise = client.saveRequest().send() - .then([&](Response::SaveResults>&& response) { - responseReady = true; - return kj::mv(response); - }).eagerlyEvaluate(nullptr); + auto client = context.connect(test::TestSturdyRefObjectId::Tag::TEST_MORE_STUFF) + .castAs(); - // Crank the event loop to give the message time to reach the server and block on the promise - // resolution. - kj::evalLater([]() {}).wait(waitScope); - kj::evalLater([]() {}).wait(waitScope); - kj::evalLater([]() {}).wait(waitScope); - kj::evalLater([]() {}).wait(waitScope); + { + shouldThrowFromSend = true; + auto req = client.callFooRequest(); + req.setCap(kj::Promise(kj::NEVER_DONE)); + req.send().then([](auto&&) { + KJ_FAIL_ASSERT("should have thrown"); + }, [](kj::Exception&& e) { + KJ_EXPECT(e.getDescription() == "intercepted", e); + }).wait(context.waitScope); + } - EXPECT_FALSE(responseReady); + KJ_EXPECT(interceptCount == 1); - paf.fulfiller->fulfill(rpcServer.bootstrap(clientHostId)); + // Sending again should use the same export number, because the export table entry should have + // been released when send() threw. (At one point, this was a bug...) + { + shouldThrowFromSend = true; + auto req = client.callFooRequest(); + req.setCap(kj::Promise(kj::NEVER_DONE)); + req.send().then([](auto&&) { + KJ_FAIL_ASSERT("should have thrown"); + }, [](kj::Exception&& e) { + KJ_EXPECT(e.getDescription() == "intercepted", e); + }).wait(context.waitScope); + } - auto response = responsePromise.wait(waitScope); + KJ_EXPECT(interceptCount == 2); - // Should have the original value. If it went through export and re-import, though, then this - // will be "imported-exported-foo", which is wrong. - EXPECT_EQ("foo", response.getSturdyRef().getObjectId().getAs()); -} + // Now lets start a call that doesn't throw. The export number should still be zero because + // the previous exports were released. + { + shouldThrowFromSend = false; + auto req = client.callFooRequest(); + req.setCap(kj::Promise(kj::NEVER_DONE)); + auto promise = req.send(); + KJ_EXPECT(!promise.poll(context.waitScope)); -TEST(Rpc, RealmGatewayImportExport) { - // Test that a save request which enters the realm, bounces through a promise capability, and - // then goes back out of the realm, does not actually get translated both ways. + KJ_EXPECT(interceptCount == 3); + } - TestRealmGateway::Client gateway = kj::heap(); - Persistent::Client bootstrap = kj::heap("foo"); + // We canceled the previous call, BUT the exported capability is still present until the other + // side drops it, which it won't because the call isn't marked cancelable and never completes. + // Now, let's send another call. This time, we expect a new export number will actually be + // allocated. + { + shouldThrowFromSend = false; + expectedExportNumber = 1; + auto req = client.callFooRequest(); + auto paf = kj::newPromiseAndFulfiller(); + req.setCap(kj::mv(paf.promise)); + auto promise = req.send(); + KJ_EXPECT(!promise.poll(context.waitScope)); + + KJ_EXPECT(interceptCount == 4); + + // Now let's actually let the RPC complete so we can verify the RPC system isn't broken or + // anything. + int callCount = 0; + paf.fulfiller->fulfill(kj::heap(callCount)); + auto resp = promise.wait(context.waitScope); + KJ_EXPECT(resp.getS() == "bar"); + KJ_EXPECT(callCount == 1); + } - MallocMessageBuilder serverHostIdBuilder; - auto serverHostId = serverHostIdBuilder.getRoot(); - serverHostId.setHost("server"); + // Now if we do yet another call, it'll reuse export number 1. + { + shouldThrowFromSend = false; + expectedExportNumber = 1; + auto req = client.callFooRequest(); + req.setCap(kj::Promise(kj::NEVER_DONE)); + auto promise = req.send(); + KJ_EXPECT(!promise.poll(context.waitScope)); + + KJ_EXPECT(interceptCount == 5); + } +} - MallocMessageBuilder clientHostIdBuilder; - auto clientHostId = clientHostIdBuilder.getRoot(); - clientHostId.setHost("client"); +KJ_TEST("export the same promise twice") { + TestContext context; - kj::EventLoop loop; - kj::WaitScope waitScope(loop); - TestNetwork network; - TestNetworkAdapter& clientNetwork = network.add("client"); - TestNetworkAdapter& serverNetwork = network.add("server"); - RpcSystem rpcClient = - makeRpcServer(clientNetwork, bootstrap); - auto paf = kj::newPromiseAndFulfiller(); - RpcSystem rpcServer = - makeRpcServer(serverNetwork, kj::mv(paf.promise), gateway); - - auto client = rpcClient.bootstrap(serverHostId).castAs>(); - - bool responseReady = false; - auto responsePromise = client.saveRequest().send() - .then([&](Response::SaveResults>&& response) { - responseReady = true; - return kj::mv(response); - }).eagerlyEvaluate(nullptr); + bool exportIsPromise; + uint32_t expectedExportNumber; + uint interceptCount = 0; + context.clientNetwork.onSend([&](MessageBuilder& builder) { + auto message = builder.getRoot().asReader(); + if (message.isCall()) { + auto call = message.getCall(); + if (call.getInterfaceId() == capnp::typeId() && + call.getMethodId() == 0) { + // callFoo() request, expect a capability in the param caps. Specifically we expect a + // promise, because that's what we send below. + auto capTable = call.getParams().getCapTable(); + KJ_ASSERT(capTable.size() == 1); + auto desc = capTable[0]; + if (exportIsPromise) { + KJ_ASSERT(desc.isSenderPromise()); + KJ_ASSERT(desc.getSenderPromise() == expectedExportNumber); + } else { + KJ_ASSERT(desc.isSenderHosted()); + KJ_ASSERT(desc.getSenderHosted() == expectedExportNumber); + } - // Crank the event loop to give the message time to reach the server and block on the promise - // resolution. - kj::evalLater([]() {}).wait(waitScope); - kj::evalLater([]() {}).wait(waitScope); - kj::evalLater([]() {}).wait(waitScope); - kj::evalLater([]() {}).wait(waitScope); + ++interceptCount; + } + } + return true; + }); - EXPECT_FALSE(responseReady); + auto client = context.connect(test::TestSturdyRefObjectId::Tag::TEST_MORE_STUFF) + .castAs(); - paf.fulfiller->fulfill(rpcServer.bootstrap(clientHostId)); + auto sendReq = [&](test::TestInterface::Client cap) { + auto req = client.callFooRequest(); + req.setCap(kj::mv(cap)); + return req.send(); + }; - auto response = responsePromise.wait(waitScope); + auto expectNeverDone = [&](auto& promise) { + if (promise.poll(context.waitScope)) { + promise.wait(context.waitScope); // let it throw if it's going to + KJ_FAIL_ASSERT("promise finished without throwing"); + } + }; - // Should have the original value. If it went through import and re-export, though, then this - // will be "exported-imported-foo", which is wrong. - EXPECT_EQ("foo", response.getSturdyRef()); + int callCount = 0; + test::TestInterface::Client normalCap = kj::heap(callCount); + test::TestInterface::Client promiseCap = kj::Promise(kj::NEVER_DONE); + + // Send request with a promise capability in the params. + exportIsPromise = true; + expectedExportNumber = 0; + auto promise1 = sendReq(promiseCap); + expectNeverDone(promise1); + KJ_EXPECT(interceptCount == 1); + + // Send a second request with the same promise should use the same export table entry. + auto promise2 = sendReq(promiseCap); + expectNeverDone(promise2); + KJ_EXPECT(interceptCount == 2); + + // Sending a request with a different promise should use a different export table entry. + expectedExportNumber = 1; + auto promise3 = sendReq(kj::Promise(kj::NEVER_DONE)); + expectNeverDone(promise3); + KJ_EXPECT(interceptCount == 3); + + // Now try sending a non-promise cap. We'll send all these requests at once before waiting on + // any of them since these will acutally complete.k + exportIsPromise = false; + expectedExportNumber = 2; + auto promise4 = sendReq(normalCap); + auto promise5 = sendReq(normalCap); + expectedExportNumber = 3; + auto promise6 = sendReq(kj::heap(callCount)); + KJ_EXPECT(interceptCount == 6); + + KJ_EXPECT(promise4.wait(context.waitScope).getS() == "bar"); + KJ_EXPECT(promise5.wait(context.waitScope).getS() == "bar"); + KJ_EXPECT(promise6.wait(context.waitScope).getS() == "bar"); + KJ_EXPECT(callCount == 3); } } // namespace diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty-test.c++ index 9a001bcba2d..a3e5749c7c0 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty-test.c++ @@ -21,12 +21,30 @@ #define CAPNP_TESTING_CAPNP 1 +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +// Includes just for need SOL_SOCKET and SO_SNDBUF +#if _WIN32 +#include +#endif + #include "rpc-twoparty.h" #include "test-util.h" #include #include #include #include +#include + +#if _WIN32 +#include +#include +#include +#else +#include +#endif // TODO(cleanup): Auto-generate stringification functions for union discriminants. namespace capnp { @@ -69,6 +87,18 @@ private: int& handleCount; }; +class TestMonotonicClock final: public kj::MonotonicClock { +public: + kj::TimePoint now() const override { + return time; + } + + void reset() { time = kj::systemCoarseMonotonicClock().now(); } + void increment(kj::Duration d) { time += d; } +private: + kj::TimePoint time = kj::systemCoarseMonotonicClock().now(); +}; + kj::AsyncIoProvider::PipeThread runServer(kj::AsyncIoProvider& ioProvider, int& callCount, int& handleCount) { return ioProvider.newPipeThread( @@ -99,16 +129,27 @@ Capability::Client getPersistentCap(RpcSystem& client, TEST(TwoPartyNetwork, Basic) { auto ioContext = kj::setupAsyncIo(); + TestMonotonicClock clock; int callCount = 0; int handleCount = 0; auto serverThread = runServer(*ioContext.provider, callCount, handleCount); - TwoPartyVatNetwork network(*serverThread.pipe, rpc::twoparty::Side::CLIENT); + TwoPartyVatNetwork network(*serverThread.pipe, rpc::twoparty::Side::CLIENT, capnp::ReaderOptions(), clock); auto rpcClient = makeRpcClient(network); + KJ_EXPECT(network.getCurrentQueueCount() == 0); + KJ_EXPECT(network.getCurrentQueueSize() == 0); + KJ_EXPECT(network.getOutgoingMessageWaitTime() == 0 * kj::SECONDS); + // Request the particular capability from the server. auto client = getPersistentCap(rpcClient, rpc::twoparty::Side::SERVER, test::TestSturdyRefObjectId::Tag::TEST_INTERFACE).castAs(); + clock.increment(1 * kj::SECONDS); + + KJ_EXPECT(network.getCurrentQueueCount() == 1); + KJ_EXPECT(network.getCurrentQueueSize() > 0); + KJ_EXPECT(network.getOutgoingMessageWaitTime() == 1 * kj::SECONDS); + size_t oldSize = network.getCurrentQueueSize(); // Use the capability. auto request1 = client.fooRequest(); @@ -116,10 +157,21 @@ TEST(TwoPartyNetwork, Basic) { request1.setJ(true); auto promise1 = request1.send(); + KJ_EXPECT(network.getCurrentQueueCount() == 2); + KJ_EXPECT(network.getCurrentQueueSize() > oldSize); + KJ_EXPECT(network.getOutgoingMessageWaitTime() == 1 * kj::SECONDS); + oldSize = network.getCurrentQueueSize(); + auto request2 = client.bazRequest(); initTestMessage(request2.initS()); auto promise2 = request2.send(); + KJ_EXPECT(network.getCurrentQueueCount() == 3); + KJ_EXPECT(network.getCurrentQueueSize() > oldSize); + oldSize = network.getCurrentQueueSize(); + + clock.increment(1 * kj::SECONDS); + bool barFailed = false; auto request3 = client.barRequest(); auto promise3 = request3.send().then( @@ -131,6 +183,12 @@ TEST(TwoPartyNetwork, Basic) { EXPECT_EQ(0, callCount); + KJ_EXPECT(network.getCurrentQueueCount() == 4); + KJ_EXPECT(network.getCurrentQueueSize() > oldSize); + // Oldest message is now 2 seconds old + KJ_EXPECT(network.getOutgoingMessageWaitTime() == 2 * kj::SECONDS); + oldSize = network.getCurrentQueueSize(); + auto response1 = promise1.wait(ioContext.waitScope); EXPECT_EQ("foo", response1.getX()); @@ -141,6 +199,20 @@ TEST(TwoPartyNetwork, Basic) { EXPECT_EQ(2, callCount); EXPECT_TRUE(barFailed); + + // There's still a `Finish` message queued. + KJ_EXPECT(network.getCurrentQueueCount() > 0); + KJ_EXPECT(network.getCurrentQueueSize() > 0); + // Oldest message was sent, next oldest should be 0 seconds old since we haven't incremented + // the clock yet. + KJ_EXPECT(network.getOutgoingMessageWaitTime() == 0 * kj::SECONDS); + + // Let any I/O finish. + kj::Promise(kj::NEVER_DONE).poll(ioContext.waitScope); + + // Now nothing is queued. + KJ_EXPECT(network.getCurrentQueueCount() == 0); + KJ_EXPECT(network.getCurrentQueueSize() == 0); } TEST(TwoPartyNetwork, Pipelining) { @@ -195,6 +267,10 @@ TEST(TwoPartyNetwork, Pipelining) { EXPECT_FALSE(disconnected); // What if we disconnect? + // TODO(cleanup): This is kind of cheating, we are shutting down the underlying socket to + // simulate a disconnect, but it's weird to pull the rug out from under our VatNetwork like + // this and it causes a bit of a race between write failures and read failures. This part of + // the test should maybe be restructured. serverThread.pipe->shutdownWrite(); // The other side should also disconnect. @@ -216,8 +292,19 @@ TEST(TwoPartyNetwork, Pipelining) { .castAs().graultRequest(); auto pipelinePromise2 = pipelineRequest2.send(); - EXPECT_ANY_THROW(pipelinePromise.wait(ioContext.waitScope)); - EXPECT_ANY_THROW(pipelinePromise2.wait(ioContext.waitScope)); + pipelinePromise.then([](auto) { + KJ_FAIL_EXPECT("should have thrown"); + }, [](kj::Exception&& e) { + KJ_EXPECT(e.getType() == kj::Exception::Type::DISCONNECTED); + // I wish we could test stack traces somehow... oh well. + }).wait(ioContext.waitScope); + + pipelinePromise2.then([](auto) { + KJ_FAIL_EXPECT("should have thrown"); + }, [](kj::Exception&& e) { + KJ_EXPECT(e.getType() == kj::Exception::Type::DISCONNECTED); + // I wish we could test stack traces somehow... oh well. + }).wait(ioContext.waitScope); EXPECT_EQ(3, callCount); EXPECT_EQ(1, reverseCallCount); @@ -248,7 +335,7 @@ TEST(TwoPartyNetwork, Release) { // There once was a bug where the last outgoing message (and any capabilities attached) would // not get cleaned up (until a new message was sent). This appeared to be a bug in Release, - // becaues if a client received a message and then released a capability from it but then did + // because if a client received a message and then released a capability from it but then did // not make any further calls, then the capability would not be released because the message // introducing it remained the last server -> client message (because a "Release" message has // no reply). Here we are explicitly trying to catch this bug. This proves tricky, because when @@ -419,6 +506,391 @@ TEST(TwoPartyNetwork, BootstrapFactory) { EXPECT_TRUE(bootstrapFactory.called); } +// ======================================================================================= + +#if !_WIN32 && !__CYGWIN__ // Windows and Cygwin don't support SCM_RIGHTS. +KJ_TEST("send FD over RPC") { + auto io = kj::setupAsyncIo(); + + int callCount = 0; + int handleCount = 0; + TwoPartyServer server(kj::heap(callCount, handleCount)); + auto pipe = io.provider->newCapabilityPipe(); + server.accept(kj::mv(pipe.ends[0]), 2); + TwoPartyClient client(*pipe.ends[1], 2); + + auto cap = client.bootstrap().castAs(); + + int pipeFds[2]; + KJ_SYSCALL(kj::miniposix::pipe(pipeFds)); + kj::AutoCloseFd in1(pipeFds[0]); + kj::AutoCloseFd out1(pipeFds[1]); + KJ_SYSCALL(kj::miniposix::pipe(pipeFds)); + kj::AutoCloseFd in2(pipeFds[0]); + kj::AutoCloseFd out2(pipeFds[1]); + + capnp::RemotePromise promise = nullptr; + { + auto req = cap.writeToFdRequest(); + + // Order reversal intentional, just trying to mix things up. + req.setFdCap1(kj::heap(kj::mv(out2))); + req.setFdCap2(kj::heap(kj::mv(out1))); + + promise = req.send(); + } + + int in3 = KJ_ASSERT_NONNULL(promise.getFdCap3().getFd().wait(io.waitScope)); + KJ_EXPECT(io.lowLevelProvider->wrapInputFd(kj::mv(in3))->readAllText().wait(io.waitScope) + == "baz"); + + { + auto promise2 = kj::mv(promise); // make sure the PipelineHook also goes out of scope + auto response = promise2.wait(io.waitScope); + KJ_EXPECT(response.getSecondFdPresent()); + } + + KJ_EXPECT(io.lowLevelProvider->wrapInputFd(kj::mv(in1))->readAllText().wait(io.waitScope) + == "bar"); + KJ_EXPECT(io.lowLevelProvider->wrapInputFd(kj::mv(in2))->readAllText().wait(io.waitScope) + == "foo"); +} + +KJ_TEST("FD per message limit") { + auto io = kj::setupAsyncIo(); + + int callCount = 0; + int handleCount = 0; + TwoPartyServer server(kj::heap(callCount, handleCount)); + auto pipe = io.provider->newCapabilityPipe(); + server.accept(kj::mv(pipe.ends[0]), 1); + TwoPartyClient client(*pipe.ends[1], 1); + + auto cap = client.bootstrap().castAs(); + + int pipeFds[2]; + KJ_SYSCALL(kj::miniposix::pipe(pipeFds)); + kj::AutoCloseFd in1(pipeFds[0]); + kj::AutoCloseFd out1(pipeFds[1]); + KJ_SYSCALL(kj::miniposix::pipe(pipeFds)); + kj::AutoCloseFd in2(pipeFds[0]); + kj::AutoCloseFd out2(pipeFds[1]); + + capnp::RemotePromise promise = nullptr; + { + auto req = cap.writeToFdRequest(); + + // Order reversal intentional, just trying to mix things up. + req.setFdCap1(kj::heap(kj::mv(out2))); + req.setFdCap2(kj::heap(kj::mv(out1))); + + promise = req.send(); + } + + int in3 = KJ_ASSERT_NONNULL(promise.getFdCap3().getFd().wait(io.waitScope)); + KJ_EXPECT(io.lowLevelProvider->wrapInputFd(kj::mv(in3))->readAllText().wait(io.waitScope) + == "baz"); + + { + auto promise2 = kj::mv(promise); // make sure the PipelineHook also goes out of scope + auto response = promise2.wait(io.waitScope); + KJ_EXPECT(!response.getSecondFdPresent()); + } + + KJ_EXPECT(io.lowLevelProvider->wrapInputFd(kj::mv(in1))->readAllText().wait(io.waitScope) + == ""); + KJ_EXPECT(io.lowLevelProvider->wrapInputFd(kj::mv(in2))->readAllText().wait(io.waitScope) + == "foo"); +} +#endif // !_WIN32 && !__CYGWIN__ + +// ======================================================================================= + +class MockSndbufStream final: public kj::AsyncIoStream { +public: + MockSndbufStream(kj::Own inner, size_t& window, size_t& written) + : inner(kj::mv(inner)), window(window), written(written) {} + + kj::Promise read(void* buffer, size_t minBytes, size_t maxBytes) override { + return inner->read(buffer, minBytes, maxBytes); + } + kj::Promise tryRead(void* buffer, size_t minBytes, size_t maxBytes) override { + return inner->tryRead(buffer, minBytes, maxBytes); + } + kj::Maybe tryGetLength() override { + return inner->tryGetLength(); + } + kj::Promise pumpTo(AsyncOutputStream& output, uint64_t amount) override { + return inner->pumpTo(output, amount); + } + kj::Promise write(const void* buffer, size_t size) override { + written += size; + return inner->write(buffer, size); + } + kj::Promise write(kj::ArrayPtr> pieces) override { + for (auto& piece: pieces) written += piece.size(); + return inner->write(pieces); + } + kj::Maybe> tryPumpFrom( + kj::AsyncInputStream& input, uint64_t amount) override { + return inner->tryPumpFrom(input, amount); + } + kj::Promise whenWriteDisconnected() override { return inner->whenWriteDisconnected(); } + void shutdownWrite() override { return inner->shutdownWrite(); } + void abortRead() override { return inner->abortRead(); } + + void getsockopt(int level, int option, void* value, uint* length) override { + if (level == SOL_SOCKET && option == SO_SNDBUF) { + KJ_ASSERT(*length == sizeof(int)); + *reinterpret_cast(value) = window; + } else { + KJ_UNIMPLEMENTED("not implemented for test", level, option); + } + } + +private: + kj::Own inner; + size_t& window; + size_t& written; +}; + +KJ_TEST("Streaming over RPC") { + kj::EventLoop loop; + kj::WaitScope waitScope(loop); + + auto pipe = kj::newTwoWayPipe(); + + size_t window = 1024; + size_t clientWritten = 0; + size_t serverWritten = 0; + + pipe.ends[0] = kj::heap(kj::mv(pipe.ends[0]), window, clientWritten); + pipe.ends[1] = kj::heap(kj::mv(pipe.ends[1]), window, serverWritten); + + auto ownServer = kj::heap(); + auto& server = *ownServer; + test::TestStreaming::Client serverCap(kj::mv(ownServer)); + + TwoPartyClient tpClient(*pipe.ends[0]); + TwoPartyClient tpServer(*pipe.ends[1], serverCap, rpc::twoparty::Side::SERVER); + + auto cap = tpClient.bootstrap().castAs(); + + // Send stream requests until we can't anymore. + kj::Promise promise = kj::READY_NOW; + uint count = 0; + while (promise.poll(waitScope)) { + promise.wait(waitScope); + + auto req = cap.doStreamIRequest(); + req.setI(++count); + promise = req.send(); + } + + // We should have sent... several. + KJ_EXPECT(count > 5); + + // Now, cause calls to finish server-side one-at-a-time and check that this causes the client + // side to be willing to send more. + uint countReceived = 0; + for (uint i = 0; i < 50; i++) { + KJ_EXPECT(server.iSum == ++countReceived); + server.iSum = 0; + KJ_ASSERT_NONNULL(server.fulfiller)->fulfill(); + + KJ_ASSERT(promise.poll(waitScope)); + promise.wait(waitScope); + + auto req = cap.doStreamIRequest(); + req.setI(++count); + promise = req.send(); + if (promise.poll(waitScope)) { + // We'll see a couple of instances where completing one request frees up space to make two + // more. This is because the first few requests we made are a little bit larger than the + // rest due to being pipelined on the bootstrap. Once the bootstrap resolves, the request + // size gets smaller. + promise.wait(waitScope); + req = cap.doStreamIRequest(); + req.setI(++count); + promise = req.send(); + + // We definitely shouldn't have freed up stream space for more than two additional requests! + KJ_ASSERT(!promise.poll(waitScope)); + } + } +} + +KJ_TEST("Streaming over RPC then unwrap with CapabilitySet") { + kj::EventLoop loop; + kj::WaitScope waitScope(loop); + + auto pipe = kj::newTwoWayPipe(); + + CapabilityServerSet capSet; + + auto ownServer = kj::heap(); + auto& server = *ownServer; + auto serverCap = capSet.add(kj::mv(ownServer)); + + auto paf = kj::newPromiseAndFulfiller(); + + TwoPartyClient tpClient(*pipe.ends[0], serverCap); + TwoPartyClient tpServer(*pipe.ends[1], kj::mv(paf.promise), rpc::twoparty::Side::SERVER); + + auto clientCap = tpClient.bootstrap().castAs(); + + // Send stream requests until we can't anymore. + kj::Promise promise = kj::READY_NOW; + uint count = 0; + while (promise.poll(waitScope)) { + promise.wait(waitScope); + + auto req = clientCap.doStreamIRequest(); + req.setI(++count); + promise = req.send(); + } + + // We should have sent... several. + KJ_EXPECT(count > 10); + + // Now try to unwrap. + auto unwrapPromise = capSet.getLocalServer(clientCap); + + // It won't work yet, obviously, because we haven't resolved the promise. + KJ_EXPECT(!unwrapPromise.poll(waitScope)); + + // So do that. + paf.fulfiller->fulfill(tpServer.bootstrap().castAs()); + clientCap.whenResolved().wait(waitScope); + + // But the unwrap still doesn't resolve because streaming requests are queued up. + KJ_EXPECT(!unwrapPromise.poll(waitScope)); + + // OK, let's resolve a streaming request. + KJ_ASSERT_NONNULL(server.fulfiller)->fulfill(); + + // All of our call promises have now completed from the client's perspective. + promise.wait(waitScope); + + // But we still can't unwrap, because calls are queued server-side. + KJ_EXPECT(!unwrapPromise.poll(waitScope)); + + // Let's even make one more call now. But this is actually a local call since the promise + // resolved. + { + auto req = clientCap.doStreamIRequest(); + req.setI(++count); + promise = req.send(); + } + + // Because it's a local call, it doesn't resolve early. The window is no longer in effect. + KJ_EXPECT(!promise.poll(waitScope)); + KJ_ASSERT_NONNULL(server.fulfiller)->fulfill(); + KJ_EXPECT(!promise.poll(waitScope)); + KJ_ASSERT_NONNULL(server.fulfiller)->fulfill(); + KJ_EXPECT(!promise.poll(waitScope)); + KJ_ASSERT_NONNULL(server.fulfiller)->fulfill(); + KJ_EXPECT(!promise.poll(waitScope)); + KJ_ASSERT_NONNULL(server.fulfiller)->fulfill(); + KJ_EXPECT(!promise.poll(waitScope)); + + // Our unwrap promise is also still not resolved. + KJ_EXPECT(!unwrapPromise.poll(waitScope)); + + // Close out stream calls until it does resolve! + while (!unwrapPromise.poll(waitScope)) { + KJ_ASSERT_NONNULL(server.fulfiller)->fulfill(); + } + + // Now we can unwrap! + KJ_EXPECT(&KJ_ASSERT_NONNULL(unwrapPromise.wait(waitScope)) == &server); + + // But our last stream call still isn't done. + KJ_EXPECT(!promise.poll(waitScope)); + + // Finish it. + KJ_ASSERT_NONNULL(server.fulfiller)->fulfill(); + promise.wait(waitScope); +} + +KJ_TEST("promise cap resolves between starting request and sending it") { + kj::EventLoop loop; + kj::WaitScope waitScope(loop); + auto pipe = kj::newTwoWayPipe(); + + // Client exports TestCallOrderImpl as its bootstrap. + TwoPartyClient client(*pipe.ends[0], kj::heap(), rpc::twoparty::Side::CLIENT); + + // Server exports a promise, which will later resolve to loop back to the capability the client + // exported. + auto paf = kj::newPromiseAndFulfiller(); + TwoPartyClient server(*pipe.ends[1], kj::mv(paf.promise), rpc::twoparty::Side::SERVER); + + // Create a request but don't send it yet. + auto cap = client.bootstrap().castAs(); + auto req1 = cap.getCallSequenceRequest(); + + // Fulfill the promise now so that the server's bootstrap loops back to the client's bootstrap. + paf.fulfiller->fulfill(server.bootstrap()); + cap.whenResolved().wait(waitScope); + + // Send the request we created earlier, and also create and send a second request. + auto promise1 = req1.send(); + auto promise2 = cap.getCallSequenceRequest().send(); + + // They should arrive in order of send()s. + auto n1 = promise1.wait(waitScope).getN(); + KJ_EXPECT(n1 == 0, n1); + auto n2 = promise2.wait(waitScope).getN(); + KJ_EXPECT(n2 == 1, n2); +} + +KJ_TEST("write error propagates to read error") { + kj::EventLoop loop; + kj::WaitScope waitScope(loop); + auto frontPipe = kj::newTwoWayPipe(); + auto backPipe = kj::newTwoWayPipe(); + + TwoPartyClient client(*frontPipe.ends[0]); + + int callCount; + TwoPartyClient server(*backPipe.ends[1], kj::heap(callCount), + rpc::twoparty::Side::SERVER); + + auto pumpUpTask = frontPipe.ends[1]->pumpTo(*backPipe.ends[0]); + auto pumpDownTask = backPipe.ends[0]->pumpTo(*frontPipe.ends[1]); + + auto cap = client.bootstrap().castAs(); + + // Make sure the connections work. + { + auto req = cap.fooRequest(); + req.setI(123); + req.setJ(true); + auto resp = req.send().wait(waitScope); + EXPECT_EQ("foo", resp.getX()); + } + + // Disconnect upstream task in such a way that future writes on the client will fail, but the + // server doesn't notice the disconnect and so won't react. + pumpUpTask = nullptr; + frontPipe.ends[1]->abortRead(); // causes write() on ends[0] to fail in the future + + { + auto req = cap.fooRequest(); + req.setI(123); + req.setJ(true); + auto promise = req.send().then([](auto) { + KJ_FAIL_EXPECT("expected exception"); + }, [](kj::Exception&& e) { + KJ_ASSERT(e.getDescription() == "abortRead() has been called"); + }); + + KJ_ASSERT(promise.poll(waitScope)); + promise.wait(waitScope); + } +} + } // namespace } // namespace _ } // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty.c++ index 5839025e3e8..f1bede8cb8a 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty.c++ @@ -22,13 +22,25 @@ #include "rpc-twoparty.h" #include "serialize-async.h" #include +#include namespace capnp { -TwoPartyVatNetwork::TwoPartyVatNetwork(kj::AsyncIoStream& stream, rpc::twoparty::Side side, - ReaderOptions receiveOptions) - : stream(stream), side(side), peerVatId(4), - receiveOptions(receiveOptions), previousWrite(kj::READY_NOW) { +TwoPartyVatNetwork::TwoPartyVatNetwork( + kj::OneOf>&& stream, + uint maxFdsPerMessage, + rpc::twoparty::Side side, + ReaderOptions receiveOptions, + const kj::MonotonicClock& clock) + + : stream(kj::mv(stream)), + maxFdsPerMessage(maxFdsPerMessage), + side(side), + peerVatId(4), + receiveOptions(receiveOptions), + previousWrite(kj::READY_NOW), + clock(clock), + currentOutgoingMessageSendTime(clock.now()) { peerVatId.initRoot().setSide( side == rpc::twoparty::Side::CLIENT ? rpc::twoparty::Side::SERVER : rpc::twoparty::Side::CLIENT); @@ -38,6 +50,43 @@ TwoPartyVatNetwork::TwoPartyVatNetwork(kj::AsyncIoStream& stream, rpc::twoparty: disconnectFulfiller.fulfiller = kj::mv(paf.fulfiller); } +TwoPartyVatNetwork::TwoPartyVatNetwork(capnp::MessageStream& stream, + rpc::twoparty::Side side, ReaderOptions receiveOptions, + const kj::MonotonicClock& clock) + : TwoPartyVatNetwork(stream, 0, side, receiveOptions, clock) {} + +TwoPartyVatNetwork::TwoPartyVatNetwork( + capnp::MessageStream& stream, + uint maxFdsPerMessage, + rpc::twoparty::Side side, + ReaderOptions receiveOptions, + const kj::MonotonicClock& clock) + : TwoPartyVatNetwork(&stream, maxFdsPerMessage, side, receiveOptions, clock) {} + +TwoPartyVatNetwork::TwoPartyVatNetwork(kj::AsyncIoStream& stream, rpc::twoparty::Side side, + ReaderOptions receiveOptions, + const kj::MonotonicClock& clock) + : TwoPartyVatNetwork(kj::Own(kj::heap(stream)), + 0, side, receiveOptions, clock) {} + +TwoPartyVatNetwork::TwoPartyVatNetwork(kj::AsyncCapabilityStream& stream, uint maxFdsPerMessage, + rpc::twoparty::Side side, ReaderOptions receiveOptions, + const kj::MonotonicClock& clock) + : TwoPartyVatNetwork(kj::Own(kj::heap(stream)), + maxFdsPerMessage, side, receiveOptions, clock) {} + +MessageStream& TwoPartyVatNetwork::getStream() { + KJ_SWITCH_ONEOF(stream) { + KJ_CASE_ONEOF(s, MessageStream*) { + return *s; + } + KJ_CASE_ONEOF(s, kj::Own) { + return *s; + } + } + KJ_UNREACHABLE; +} + void TwoPartyVatNetwork::FulfillerDisposer::disposeImpl(void* pointer) const { if (--refcount == 0) { fulfiller->fulfill(); @@ -81,6 +130,12 @@ public: return message.getRoot(); } + void setFds(kj::Array fds) override { + if (network.maxFdsPerMessage > 0) { + this->fds = kj::mv(fds); + } + } + void send() override { size_t size = 0; for (auto& segment: message.getSegmentsForOutput()) { @@ -93,36 +148,121 @@ public: return; } + network.currentQueueSize += size * sizeof(capnp::word); + ++network.currentQueueCount; + auto deferredSizeUpdate = kj::defer([&network = network, size]() mutable { + network.currentQueueSize -= size * sizeof(capnp::word); + --network.currentQueueCount; + }); + + auto sendTime = network.clock.now(); network.previousWrite = KJ_ASSERT_NONNULL(network.previousWrite, "already shut down") - .then([&]() { - // Note that if the write fails, all further writes will be skipped due to the exception. - // We never actually handle this exception because we assume the read end will fail as well - // and it's cleaner to handle the failure there. - return writeMessage(network.stream, message); - }).attach(kj::addRef(*this)) + .then([this, sendTime]() { + return kj::evalNow([&]() { + network.currentOutgoingMessageSendTime = sendTime; + return network.getStream().writeMessage(fds, message); + }).catch_([this](kj::Exception&& e) { + // Since no one checks write failures, we need to propagate them into read failures, + // otherwise we might get stuck sending all messages into a black hole and wondering why + // the peer never replies. + network.readCancelReason = kj::cp(e); + if (!network.readCanceler.isEmpty()) { + network.readCanceler.cancel(kj::cp(e)); + } + kj::throwRecoverableException(kj::mv(e)); + }); + }).attach(kj::addRef(*this), kj::mv(deferredSizeUpdate)) // Note that it's important that the eagerlyEvaluate() come *after* the attach() because // otherwise the message (and any capabilities in it) will not be released until a new // message is written! (Kenton once spent all afternoon tracking this down...) .eagerlyEvaluate(nullptr); } + size_t sizeInWords() override { + return message.sizeInWords(); + } + private: TwoPartyVatNetwork& network; MallocMessageBuilder message; + kj::Array fds; }; +kj::Duration TwoPartyVatNetwork::getOutgoingMessageWaitTime() { + if (currentQueueCount > 0) { + return clock.now() - currentOutgoingMessageSendTime; + } else { + return 0 * kj::SECONDS; + } +} + class TwoPartyVatNetwork::IncomingMessageImpl final: public IncomingRpcMessage { public: IncomingMessageImpl(kj::Own message): message(kj::mv(message)) {} + IncomingMessageImpl(MessageReaderAndFds init, kj::Array fdSpace) + : message(kj::mv(init.reader)), + fdSpace(kj::mv(fdSpace)), + fds(init.fds) { + KJ_DASSERT(this->fds.begin() == this->fdSpace.begin()); + } + AnyPointer::Reader getBody() override { return message->getRoot(); } + kj::ArrayPtr getAttachedFds() override { + return fds; + } + + size_t sizeInWords() override { + return message->sizeInWords(); + } + private: kj::Own message; + kj::Array fdSpace; + kj::ArrayPtr fds; }; +kj::Own TwoPartyVatNetwork::newStream() { + return RpcFlowController::newVariableWindowController(*this); +} + +size_t TwoPartyVatNetwork::getWindow() { + // The socket's send buffer size -- as returned by getsockopt(SO_SNDBUF) -- tells us how much + // data the kernel itself is willing to buffer. The kernel will increase the send buffer size if + // needed to fill the connection's congestion window. So we can cheat and use it as our stream + // window, too, to make sure we saturate said congestion window. + // + // TODO(perf): Unfortunately, this hack breaks down in the presence of proxying. What we really + // want is the window all the way to the endpoint, which could cross multiple connections. The + // first-hop window could be either too big or too small: it's too big if the first hop has + // much higher bandwidth than the full path (causing buffering at the bottleneck), and it's + // too small if the first hop has much lower latency than the full path (causing not enough + // data to be sent to saturate the connection). To handle this, we could either: + // 1. Have proxies be aware of streaming, by flagging streaming calls in the RPC protocol. The + // proxies would then handle backpressure at each hop. This seems simple to implement but + // requires base RPC protocol changes and might require thinking carefully about e-ordering + // implications. Also, it only fixes underutilization; it does not fix buffer bloat. + // 2. Do our own BBR-like computation, where the client measures the end-to-end latency and + // bandwidth based on the observed sends and returns, and then compute the window based on + // that. This seems complicated, but avoids the need for any changes to the RPC protocol. + // In theory it solves both underutilization and buffer bloat. Note that this approach would + // require the RPC system to use a clock, which feels dirty and adds non-determinism. + + if (solSndbufUnimplemented) { + return RpcFlowController::DEFAULT_WINDOW_SIZE; + } else { + KJ_IF_MAYBE(bufSize, getStream().getSendBufferSize()) { + return *bufSize; + } else { + solSndbufUnimplemented = true; + return RpcFlowController::DEFAULT_WINDOW_SIZE; + } + } +} + rpc::twoparty::VatId::Reader TwoPartyVatNetwork::getPeerVatId() { return peerVatId.getRoot(); } @@ -132,12 +272,27 @@ kj::Own TwoPartyVatNetwork::newOutgoingMessage(uint firstSeg } kj::Promise>> TwoPartyVatNetwork::receiveIncomingMessage() { - return kj::evalLater([&]() { - return tryReadMessage(stream, receiveOptions) - .then([&](kj::Maybe>&& message) - -> kj::Maybe> { - KJ_IF_MAYBE(m, message) { - return kj::Own(kj::heap(kj::mv(*m))); + return kj::evalLater([this]() -> kj::Promise>> { + KJ_IF_MAYBE(e, readCancelReason) { + // A previous write failed; propagate the failure to reads, too. + return kj::cp(*e); + } + + kj::Array fdSpace = nullptr; + if(maxFdsPerMessage > 0) { + fdSpace = kj::heapArray(maxFdsPerMessage); + } + auto promise = readCanceler.wrap(getStream().tryReadMessage(fdSpace, receiveOptions)); + return promise.then([fdSpace = kj::mv(fdSpace)] + (kj::Maybe&& messageAndFds) mutable + -> kj::Maybe> { + KJ_IF_MAYBE(m, messageAndFds) { + if (m->fds.size() > 0) { + return kj::Own( + kj::heap(kj::mv(*m), kj::mv(fdSpace))); + } else { + return kj::Own(kj::heap(kj::mv(m->reader))); + } } else { return nullptr; } @@ -147,7 +302,7 @@ kj::Promise>> TwoPartyVatNetwork::receiveI kj::Promise TwoPartyVatNetwork::shutdown() { kj::Promise result = KJ_ASSERT_NONNULL(previousWrite, "already shut down").then([this]() { - stream.shutdownWrite(); + return getStream().end(); }); previousWrite = nullptr; return kj::mv(result); @@ -168,6 +323,14 @@ struct TwoPartyServer::AcceptedConnection { : connection(kj::mv(connectionParam)), network(*connection, rpc::twoparty::Side::SERVER), rpcSystem(makeRpcServer(network, kj::mv(bootstrapInterface))) {} + + explicit AcceptedConnection(Capability::Client bootstrapInterface, + kj::Own&& connectionParam, + uint maxFdsPerMessage) + : connection(kj::mv(connectionParam)), + network(kj::downcast(*connection), + maxFdsPerMessage, rpc::twoparty::Side::SERVER), + rpcSystem(makeRpcServer(network, kj::mv(bootstrapInterface))) {} }; void TwoPartyServer::accept(kj::Own&& connection) { @@ -178,6 +341,36 @@ void TwoPartyServer::accept(kj::Own&& connection) { tasks.add(promise.attach(kj::mv(connectionState))); } +void TwoPartyServer::accept( + kj::Own&& connection, uint maxFdsPerMessage) { + auto connectionState = kj::heap( + bootstrapInterface, kj::mv(connection), maxFdsPerMessage); + + // Run the connection until disconnect. + auto promise = connectionState->network.onDisconnect(); + tasks.add(promise.attach(kj::mv(connectionState))); +} + +kj::Promise TwoPartyServer::accept(kj::AsyncIoStream& connection) { + auto connectionState = kj::heap(bootstrapInterface, + kj::Own(&connection, kj::NullDisposer::instance)); + + // Run the connection until disconnect. + auto promise = connectionState->network.onDisconnect(); + return promise.attach(kj::mv(connectionState)); +} + +kj::Promise TwoPartyServer::accept( + kj::AsyncCapabilityStream& connection, uint maxFdsPerMessage) { + auto connectionState = kj::heap(bootstrapInterface, + kj::Own(&connection, kj::NullDisposer::instance), + maxFdsPerMessage); + + // Run the connection until disconnect. + auto promise = connectionState->network.onDisconnect(); + return promise.attach(kj::mv(connectionState)); +} + kj::Promise TwoPartyServer::listen(kj::ConnectionReceiver& listener) { return listener.accept() .then([this,&listener](kj::Own&& connection) mutable { @@ -186,6 +379,15 @@ kj::Promise TwoPartyServer::listen(kj::ConnectionReceiver& listener) { }); } +kj::Promise TwoPartyServer::listenCapStreamReceiver( + kj::ConnectionReceiver& listener, uint maxFdsPerMessage) { + return listener.accept() + .then([this,&listener,maxFdsPerMessage](kj::Own&& connection) mutable { + accept(connection.downcast(), maxFdsPerMessage); + return listenCapStreamReceiver(listener, maxFdsPerMessage); + }); +} + void TwoPartyServer::taskFailed(kj::Exception&& exception) { KJ_LOG(ERROR, exception); } @@ -195,14 +397,26 @@ TwoPartyClient::TwoPartyClient(kj::AsyncIoStream& connection) rpcSystem(makeRpcClient(network)) {} +TwoPartyClient::TwoPartyClient(kj::AsyncCapabilityStream& connection, uint maxFdsPerMessage) + : network(connection, maxFdsPerMessage, rpc::twoparty::Side::CLIENT), + rpcSystem(makeRpcClient(network)) {} + TwoPartyClient::TwoPartyClient(kj::AsyncIoStream& connection, Capability::Client bootstrapInterface, rpc::twoparty::Side side) : network(connection, side), rpcSystem(network, bootstrapInterface) {} +TwoPartyClient::TwoPartyClient(kj::AsyncCapabilityStream& connection, uint maxFdsPerMessage, + Capability::Client bootstrapInterface, + rpc::twoparty::Side side) + : network(connection, maxFdsPerMessage, side), + rpcSystem(network, bootstrapInterface) {} + Capability::Client TwoPartyClient::bootstrap() { - MallocMessageBuilder message(4); + capnp::word scratch[4]; + memset(&scratch, 0, sizeof(scratch)); + capnp::MallocMessageBuilder message(scratch); auto vatId = message.getRoot(); vatId.setSide(network.getSide() == rpc::twoparty::Side::CLIENT ? rpc::twoparty::Side::SERVER @@ -210,4 +424,8 @@ Capability::Client TwoPartyClient::bootstrap() { return rpcSystem.bootstrap(vatId); } +void TwoPartyClient::setTraceEncoder(kj::Function func) { + rpcSystem.setTraceEncoder(kj::mv(func)); +} + } // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty.capnp.h b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty.capnp.h index e161940fa2e..d447706c922 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty.capnp.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty.capnp.h @@ -6,11 +6,13 @@ #include #include -#if CAPNP_VERSION != 7000 +#if CAPNP_VERSION != 9001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif +CAPNP_BEGIN_HEADER + namespace capnp { namespace schemas { @@ -723,3 +725,5 @@ inline ::capnp::AnyPointer::Builder JoinResult::Builder::initCap() { } // namespace } // namespace +CAPNP_END_HEADER + diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty.h b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty.h index f2dc7b3ae1e..58fed747615 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc-twoparty.h @@ -21,14 +21,14 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "rpc.h" #include "message.h" #include +#include #include +#include + +CAPNP_BEGIN_HEADER namespace capnp { @@ -43,7 +43,8 @@ typedef VatNetwork onDisconnect() { return disconnectPromise.addBranch(); } @@ -60,6 +86,18 @@ class TwoPartyVatNetwork: public TwoPartyVatNetworkBase, rpc::twoparty::Side getSide() { return side; } + size_t getCurrentQueueSize() { return currentQueueSize; } + // Get the number of bytes worth of outgoing messages that are currently queued in memory waiting + // to be sent on this connection. This may be useful for backpressure. + + size_t getCurrentQueueCount() { return currentQueueCount; } + // Get the count of outgoing messages that are currently queued in memory waiting + // to be sent on this connection. This may be useful for backpressure. + + kj::Duration getOutgoingMessageWaitTime(); + // Get how long the current outgoing message has been waiting to be sent on this connection. + // Returns 0 if the queue is empty. This may be useful for backpressure. + // implements VatNetwork ----------------------------------------------------- kj::Maybe> connect( @@ -70,12 +108,23 @@ class TwoPartyVatNetwork: public TwoPartyVatNetworkBase, class OutgoingMessageImpl; class IncomingMessageImpl; - kj::AsyncIoStream& stream; + kj::OneOf> stream; + // The underlying stream, which we may or may not own. Get a reference to + // this with getStream, rather than reading it directly. + + uint maxFdsPerMessage; rpc::twoparty::Side side; MallocMessageBuilder peerVatId; ReaderOptions receiveOptions; bool accepted = false; + bool solSndbufUnimplemented = false; + // Whether stream.getsockopt(SO_SNDBUF) has been observed to throw UNIMPLEMENTED. + + kj::Canceler readCanceler; + kj::Maybe readCancelReason; + // Used to propagate write errors into (permanent) read errors. + kj::Maybe> previousWrite; // Resolves when the previous write completes. This effectively serves as the write queue. // Becomes null when shutdown() is called. @@ -86,6 +135,11 @@ class TwoPartyVatNetwork: public TwoPartyVatNetworkBase, kj::ForkedPromise disconnectPromise = nullptr; + size_t currentQueueSize = 0; + size_t currentQueueCount = 0; + const kj::MonotonicClock& clock; + kj::TimePoint currentOutgoingMessageSendTime; + class FulfillerDisposer: public kj::Disposer { // Hack: TwoPartyVatNetwork is both a VatNetwork and a VatNetwork::Connection. When the RPC // system detects (or initiates) a disconnection, it drops its reference to the Connection. @@ -101,15 +155,30 @@ class TwoPartyVatNetwork: public TwoPartyVatNetworkBase, }; FulfillerDisposer disconnectFulfiller; + + TwoPartyVatNetwork( + kj::OneOf>&& stream, + uint maxFdsPerMessage, + rpc::twoparty::Side side, + ReaderOptions receiveOptions, + const kj::MonotonicClock& clock); + + MessageStream& getStream(); + kj::Own asConnection(); // Returns a pointer to this with the disposer set to disconnectFulfiller. // implements Connection ----------------------------------------------------- + kj::Own newStream() override; rpc::twoparty::VatId::Reader getPeerVatId() override; kj::Own newOutgoingMessage(uint firstSegmentWordSize) override; kj::Promise>> receiveIncomingMessage() override; kj::Promise shutdown() override; + + // implements WindowGetter --------------------------------------------------- + + size_t getWindow() override; }; class TwoPartyServer: private kj::TaskSet::ErrorHandler { @@ -120,13 +189,35 @@ class TwoPartyServer: private kj::TaskSet::ErrorHandler { explicit TwoPartyServer(Capability::Client bootstrapInterface); void accept(kj::Own&& connection); + void accept(kj::Own&& connection, uint maxFdsPerMessage); // Accepts the connection for servicing. + kj::Promise accept(kj::AsyncIoStream& connection) KJ_WARN_UNUSED_RESULT; + kj::Promise accept(kj::AsyncCapabilityStream& connection, uint maxFdsPerMessage) + KJ_WARN_UNUSED_RESULT; + // Accept connection without taking ownership. The returned promise resolves when the client + // disconnects. Dropping the promise forcefully cancels the RPC protocol. + // + // You probably can't do anything with `connection` after the RPC protocol has terminated, other + // than to close it. The main reason to use these methods rather than the ownership-taking ones + // is if your stream object becomes invalid outside some scope, so you want to make sure to + // cancel all usage of it before that by cancelling the promise. + kj::Promise listen(kj::ConnectionReceiver& listener); // Listens for connections on the given listener. The returned promise never resolves unless an // exception is thrown while trying to accept. You may discard the returned promise to cancel // listening. + kj::Promise listenCapStreamReceiver( + kj::ConnectionReceiver& listener, uint maxFdsPerMessage); + // Listen with support for FD transfers. `listener.accept()` must return instances of + // AsyncCapabilityStream, otherwise this will crash. + + kj::Promise drain() { return tasks.onEmpty(); } + // Resolves when all clients have disconnected. + // + // Only considers clients whose connections TwoPartyServer took ownership of. + private: Capability::Client bootstrapInterface; kj::TaskSet tasks; @@ -141,17 +232,30 @@ class TwoPartyClient { public: explicit TwoPartyClient(kj::AsyncIoStream& connection); + explicit TwoPartyClient(kj::AsyncCapabilityStream& connection, uint maxFdsPerMessage); TwoPartyClient(kj::AsyncIoStream& connection, Capability::Client bootstrapInterface, rpc::twoparty::Side side = rpc::twoparty::Side::CLIENT); + TwoPartyClient(kj::AsyncCapabilityStream& connection, uint maxFdsPerMessage, + Capability::Client bootstrapInterface, + rpc::twoparty::Side side = rpc::twoparty::Side::CLIENT); Capability::Client bootstrap(); // Get the server's bootstrap interface. inline kj::Promise onDisconnect() { return network.onDisconnect(); } + void setTraceEncoder(kj::Function func); + // Forwarded to rpcSystem.setTraceEncoder(). + + size_t getCurrentQueueSize() { return network.getCurrentQueueSize(); } + size_t getCurrentQueueCount() { return network.getCurrentQueueCount(); } + kj::Duration getOutgoingMessageWaitTime() { return network.getOutgoingMessageWaitTime(); } + private: TwoPartyVatNetwork network; RpcSystem rpcSystem; }; } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.c++ index e8b40de72d2..a75fd72199e 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.c++ @@ -31,6 +31,8 @@ #include #include #include +#include +#include namespace capnp { namespace _ { // private @@ -55,7 +57,9 @@ constexpr const uint CAP_DESCRIPTOR_SIZE_HINT = sizeInWords( constexpr const uint64_t MAX_SIZE_HINT = 1 << 20; uint copySizeHint(MessageSize size) { - uint64_t sizeHint = size.wordCount + size.capCount * CAP_DESCRIPTOR_SIZE_HINT; + uint64_t sizeHint = size.wordCount + size.capCount * CAP_DESCRIPTOR_SIZE_HINT + // if capCount > 0, the cap descriptor list has a 1-word tag + + (size.capCount > 0); return kj::min(MAX_SIZE_HINT, sizeHint); } @@ -108,14 +112,16 @@ Orphan> fromPipelineOps( } kj::Exception toException(const rpc::Exception::Reader& exception) { - return kj::Exception(static_cast(exception.getType()), + kj::Exception result(static_cast(exception.getType()), "(remote)", 0, kj::str("remote exception: ", exception.getReason())); + if (exception.hasTrace()) { + result.setRemoteTrace(kj::str(exception.getTrace())); + } + return result; } -void fromException(const kj::Exception& exception, rpc::Exception::Builder builder) { - // TODO(someday): Indicate the remote server name as part of the stack trace. Maybe even - // transmit stack traces? - +void fromException(const kj::Exception& exception, rpc::Exception::Builder builder, + kj::Maybe&> traceEncoder) { kj::StringPtr description = exception.getDescription(); // Include context, if any. @@ -137,6 +143,10 @@ void fromException(const kj::Exception& exception, rpc::Exception::Builder build builder.setReason(description); builder.setType(static_cast(exception.getType())); + KJ_IF_MAYBE(t, traceEncoder) { + builder.setTrace((*t)(exception)); + } + if (exception.getType() == kj::Exception::Type::FAILED && !exception.getDescription().startsWith("remote exception:")) { KJ_LOG(INFO, "returning failure over rpc", exception); @@ -265,14 +275,14 @@ public: }; RpcConnectionState(BootstrapFactoryBase& bootstrapFactory, - kj::Maybe::Client> gateway, kj::Maybe restorer, kj::Own&& connectionParam, kj::Own>&& disconnectFulfiller, - size_t flowLimit) - : bootstrapFactory(bootstrapFactory), gateway(kj::mv(gateway)), + size_t flowLimit, + kj::Maybe&> traceEncoder) + : bootstrapFactory(bootstrapFactory), restorer(restorer), disconnectFulfiller(kj::mv(disconnectFulfiller)), flowLimit(flowLimit), - tasks(*this) { + traceEncoder(traceEncoder), tasks(*this) { connection.init(kj::mv(connectionParam)); tasks.add(messageLoop()); } @@ -315,6 +325,11 @@ public: } void disconnect(kj::Exception&& exception) { + // After disconnect(), the RpcSystem could be destroyed, making `traceEncoder` a dangling + // reference, so null it out before we return from here. We don't need it anymore once + // disconnected anyway. + KJ_DEFER(traceEncoder = nullptr); + if (!connection.is()) { // Already disconnected. return; @@ -323,6 +338,18 @@ public: kj::Exception networkException(kj::Exception::Type::DISCONNECTED, exception.getFile(), exception.getLine(), kj::heapString(exception.getDescription())); + // Don't throw away the stack trace. + if (exception.getRemoteTrace() != nullptr) { + networkException.setRemoteTrace(kj::str(exception.getRemoteTrace())); + } + for (void* addr: exception.getStackTrace()) { + networkException.addTrace(addr); + } + // If your stack trace points here, it means that the exception became the reason that the + // RPC connection was disconnected. The exception was then thrown by all in-flight calls and + // all future calls on this connection. + networkException.addTraceHere(); + KJ_IF_MAYBE(newException, kj::runCatchingExceptions([&]() { // Carefully pull all the objects out of the tables prior to releasing them because their // destructors could come back and mess with the tables. @@ -355,7 +382,9 @@ public: exports.forEach([&](ExportId id, Export& exp) { clientsToRelease.add(kj::mv(exp.clientHook)); - resolveOpsToRelease.add(kj::mv(exp.resolveOp)); + KJ_IF_MAYBE(op, exp.resolveOp) { + resolveOpsToRelease.add(kj::mv(*op)); + } exp = Export(); }); @@ -389,15 +418,22 @@ public: auto shutdownPromise = connection.get()->shutdown() .attach(kj::mv(connection.get())) .then([]() -> kj::Promise { return kj::READY_NOW; }, - [](kj::Exception&& e) -> kj::Promise { + [origException = kj::mv(exception)](kj::Exception&& e) -> kj::Promise { // Don't report disconnects as an error. - if (e.getType() != kj::Exception::Type::DISCONNECTED) { - return kj::mv(e); + if (e.getType() == kj::Exception::Type::DISCONNECTED) { + return kj::READY_NOW; } - return kj::READY_NOW; + // If the error is just what was passed in to disconnect(), don't report it back out + // since it shouldn't be anything the caller doesn't already know about. + if (e.getType() == origException.getType() && + e.getDescription() == origException.getDescription()) { + return kj::READY_NOW; + } + return kj::mv(e); }); disconnectFulfiller->fulfill(DisconnectInfo { kj::mv(shutdownPromise) }); connection.init(kj::mv(networkException)); + canceler.cancel(networkException); } void setFlowLimit(size_t words) { @@ -490,7 +526,7 @@ private: kj::Own clientHook; - kj::Promise resolveOp = nullptr; + kj::Maybe> resolveOp = nullptr; // If this export is a promise (not a settled capability), the `resolveOp` represents the // ongoing operation to wait for that promise to resolve and then send a `Resolve` message. @@ -533,7 +569,6 @@ private: // OK, now we can define RpcConnectionState's member data. BootstrapFactoryBase& bootstrapFactory; - kj::Maybe::Client> gateway; kj::Maybe restorer; typedef kj::Own Connected; @@ -542,6 +577,11 @@ private: // Once the connection has failed, we drop it and replace it with an exception, which will be // thrown from all further calls. + kj::Canceler canceler; + // Will be canceled if and when `connection` is changed from `Connected` to `Disconnected`. + // TODO(cleanup): `Connected` should be a struct that contains the connection and the Canceler, + // but that's more refactoring than I want to do right now. + kj::Own> disconnectFulfiller; ExportTable exports; @@ -565,6 +605,8 @@ private: // If non-null, we're currently blocking incoming messages waiting for callWordsInFlight to drop // below flowLimit. Fulfill this to un-block. + kj::Maybe&> traceEncoder; + kj::TaskSet tasks; // ===================================================================================== @@ -575,7 +617,8 @@ private: RpcClient(RpcConnectionState& connectionState) : connectionState(kj::addRef(connectionState)) {} - virtual kj::Maybe writeDescriptor(rpc::CapDescriptor::Builder descriptor) = 0; + virtual kj::Maybe writeDescriptor(rpc::CapDescriptor::Builder descriptor, + kj::Vector& fds) = 0; // Writes a CapDescriptor referencing this client. The CapDescriptor must be sent as part of // the very next message sent on the connection, as it may become invalid if other things // happen. @@ -598,42 +641,29 @@ private: // that other client -- return a reference to the other client, transitively. Otherwise, // return a new reference to *this. + virtual void adoptFlowController(kj::Own flowController) { + // Called when a PromiseClient resolves to another RpcClient. If streaming calls were + // outstanding on the old client, we'd like to keep using the same FlowController on the new + // client, so as to keep the flow steady. + + if (this->flowController == nullptr) { + // We don't have any existing flowController so we can adopt this one, yay! + this->flowController = kj::mv(flowController); + } else { + // Apparently, there is an existing flowController. This is an unusual scenario: Apparently + // we had two stream capabilities, we were streaming to both of them, and they later + // resolved to the same capability. This probably never happens because streaming use cases + // normally call for there to be only one client. But, it's certainly possible, and we need + // to handle it. We'll do the conservative thing and just make sure that all the calls + // finish. This may mean we'll over-buffer temporarily; oh well. + connectionState->tasks.add(flowController->waitAllAcked().attach(kj::mv(flowController))); + } + } + // implements ClientHook ----------------------------------------- Request newCall( uint64_t interfaceId, uint16_t methodId, kj::Maybe sizeHint) override { - if (interfaceId == typeId>() && methodId == 0) { - KJ_IF_MAYBE(g, connectionState->gateway) { - // Wait, this is a call to Persistent.save() and we need to translate it through our - // gateway. - // - // We pull a neat trick here: We actually end up returning a RequestHook for an import - // request on the gateway cap, but with the "root" of the request actually pointing - // to the "params" field of the real request. - - sizeHint = sizeHint.map([](MessageSize hint) { - ++hint.capCount; - hint.wordCount += sizeInWords::ImportParams>(); - return hint; - }); - - auto request = g->importRequest(sizeHint); - request.setCap(Persistent<>::Client(kj::refcounted(*this))); - - // Awkwardly, request.initParams() would return a SaveParams struct, but to construct - // the Request to return we need an AnyPointer::Builder, and you - // can't go backwards from a struct builder to an AnyPointer builder. So instead we - // manually get at the pointer by converting the outer request to AnyStruct and then - // pulling the pointer from the pointer section. - auto pointers = toAny(request).getPointerSection(); - KJ_ASSERT(pointers.size() >= 2); - auto paramsPtr = pointers[1]; - KJ_ASSERT(paramsPtr.isNull()); - - return Request(paramsPtr, RequestHook::from(kj::mv(request))); - } - } - return newCallNoIntercept(interfaceId, methodId, sizeHint); } @@ -657,26 +687,6 @@ private: VoidPromiseAndPipeline call(uint64_t interfaceId, uint16_t methodId, kj::Own&& context) override { - if (interfaceId == typeId>() && methodId == 0) { - KJ_IF_MAYBE(g, connectionState->gateway) { - // Wait, this is a call to Persistent.save() and we need to translate it through our - // gateway. - auto params = context->getParams().getAs::SaveParams>(); - - auto requestSize = params.totalSize(); - ++requestSize.capCount; - requestSize.wordCount += sizeInWords::ImportParams>(); - - auto request = g->importRequest(requestSize); - request.setCap(Persistent<>::Client(kj::refcounted(*this))); - request.setParams(params); - - context->allowCancellation(); - context->releaseParams(); - return context->directTailCall(RequestHook::from(kj::mv(request))); - } - } - return callNoIntercept(interfaceId, methodId, kj::mv(context)); } @@ -704,14 +714,18 @@ private: } kj::Own connectionState; + + kj::Maybe> flowController; + // Becomes non-null the first time a streaming call is made on this capability. }; class ImportClient final: public RpcClient { // A ClientHook that wraps an entry in the import table. public: - ImportClient(RpcConnectionState& connectionState, ImportId importId) - : RpcClient(connectionState), importId(importId) {} + ImportClient(RpcConnectionState& connectionState, ImportId importId, + kj::Maybe fd) + : RpcClient(connectionState), importId(importId), fd(kj::mv(fd)) {} ~ImportClient() noexcept(false) { unwindDetector.catchExceptionsIfUnwinding([&]() { @@ -736,12 +750,19 @@ private: }); } + void setFdIfMissing(kj::Maybe newFd) { + if (fd == nullptr) { + fd = kj::mv(newFd); + } + } + void addRemoteRef() { // Add a new RemoteRef and return a new ref to this client representing it. ++remoteRefcount; } - kj::Maybe writeDescriptor(rpc::CapDescriptor::Builder descriptor) override { + kj::Maybe writeDescriptor(rpc::CapDescriptor::Builder descriptor, + kj::Vector& fds) override { descriptor.setReceiverHosted(importId); return nullptr; } @@ -766,8 +787,13 @@ private: return nullptr; } + kj::Maybe getFd() override { + return fd.map([](auto& f) { return f.get(); }); + } + private: ImportId importId; + kj::Maybe fd; uint remoteRefcount = 0; // Number of times we've received this import from the peer. @@ -784,7 +810,8 @@ private: kj::Array&& ops) : RpcClient(connectionState), questionRef(kj::mv(questionRef)), ops(kj::mv(ops)) {} - kj::Maybe writeDescriptor(rpc::CapDescriptor::Builder descriptor) override { + kj::Maybe writeDescriptor(rpc::CapDescriptor::Builder descriptor, + kj::Vector& fds) override { auto promisedAnswer = descriptor.initReceiverAnswer(); promisedAnswer.setQuestionId(questionRef->getId()); promisedAnswer.adoptTransform(fromPipelineOps( @@ -814,6 +841,10 @@ private: return nullptr; } + kj::Maybe getFd() override { + return nullptr; + } + private: kj::Own questionRef; kj::Array ops; @@ -825,31 +856,25 @@ private: public: PromiseClient(RpcConnectionState& connectionState, - kj::Own initial, + kj::Own initial, kj::Promise> eventual, kj::Maybe importId) : RpcClient(connectionState), - isResolved(false), cap(kj::mv(initial)), importId(importId), - fork(eventual.fork()), - resolveSelfPromise(fork.addBranch().then( + fork(eventual.then( [this](kj::Own&& resolution) { - resolve(kj::mv(resolution), false); + return resolve(kj::mv(resolution)); }, [this](kj::Exception&& exception) { - resolve(newBrokenCap(kj::mv(exception)), true); - }).eagerlyEvaluate([&](kj::Exception&& e) { + return resolve(newBrokenCap(kj::mv(exception))); + }).catch_([&](kj::Exception&& e) { // Make any exceptions thrown from resolve() go to the connection's TaskSet which // will cause the connection to be terminated. - connectionState.tasks.add(kj::mv(e)); - })) { - // Create a client that starts out forwarding all calls to `initial` but, once `eventual` - // resolves, will forward there instead. In addition, `whenMoreResolved()` will return a fork - // of `eventual`. Note that this means the application could hold on to `eventual` even after - // the `PromiseClient` is destroyed; `eventual` must therefore make sure to hold references to - // anything that needs to stay alive in order to resolve it correctly (such as making sure the - // import ID is not released). - } + connectionState.tasks.add(kj::cp(e)); + return newBrokenCap(kj::mv(e)); + }).fork()) {} + // Create a client that starts out forwarding all calls to `initial` but, once `eventual` + // resolves, will forward there instead. ~PromiseClient() noexcept(false) { KJ_IF_MAYBE(id, importId) { @@ -867,9 +892,10 @@ private: } } - kj::Maybe writeDescriptor(rpc::CapDescriptor::Builder descriptor) override { + kj::Maybe writeDescriptor(rpc::CapDescriptor::Builder descriptor, + kj::Vector& fds) override { receivedCall = true; - return connectionState->writeDescriptor(*cap, descriptor); + return connectionState->writeDescriptor(*cap, descriptor, fds); } kj::Maybe> writeTarget( @@ -883,52 +909,37 @@ private: return connectionState->getInnermostClient(*cap); } + void adoptFlowController(kj::Own flowController) override { + if (cap->getBrand() == connectionState.get()) { + // Pass the flow controller on to our inner cap. + kj::downcast(*cap).adoptFlowController(kj::mv(flowController)); + } else { + // We resolved to a capability that isn't another RPC capability. We should simply make + // sure that all the calls complete. + connectionState->tasks.add(flowController->waitAllAcked().attach(kj::mv(flowController))); + } + } + // implements ClientHook ----------------------------------------- Request newCall( uint64_t interfaceId, uint16_t methodId, kj::Maybe sizeHint) override { - if (!isResolved && interfaceId == typeId>() && methodId == 0 && - connectionState->gateway != nullptr) { - // This is a call to Persistent.save(), and we're not resolved yet, and the underlying - // remote capability will perform a gateway translation. This isn't right if the promise - // ultimately resolves to a local capability. Instead, we'll need to queue the call until - // the promise resolves. - return newLocalPromiseClient(fork.addBranch()) - ->newCall(interfaceId, methodId, sizeHint); - } - receivedCall = true; - return cap->newCall(interfaceId, methodId, sizeHint); + + // IMPORTANT: We must call our superclass's version of newCall(), NOT cap->newCall(), because + // the Request object we create needs to check at send() time whether the promise has + // resolved and, if so, redirect to the new target. + return RpcClient::newCall(interfaceId, methodId, sizeHint); } VoidPromiseAndPipeline call(uint64_t interfaceId, uint16_t methodId, kj::Own&& context) override { - if (!isResolved && interfaceId == typeId>() && methodId == 0 && - connectionState->gateway != nullptr) { - // This is a call to Persistent.save(), and we're not resolved yet, and the underlying - // remote capability will perform a gateway translation. This isn't right if the promise - // ultimately resolves to a local capability. Instead, we'll need to queue the call until - // the promise resolves. - - auto vpapPromises = fork.addBranch().then(kj::mvCapture(context, - [interfaceId,methodId](kj::Own&& context, - kj::Own resolvedCap) { - auto vpap = resolvedCap->call(interfaceId, methodId, kj::mv(context)); - return kj::tuple(kj::mv(vpap.promise), kj::mv(vpap.pipeline)); - })).split(); - - return { - kj::mv(kj::get<0>(vpapPromises)), - newLocalPromisePipeline(kj::mv(kj::get<1>(vpapPromises))), - }; - } - receivedCall = true; return cap->call(interfaceId, methodId, kj::mv(context)); } kj::Maybe getResolved() override { - if (isResolved) { + if (isResolved()) { return *cap; } else { return nullptr; @@ -939,24 +950,133 @@ private: return fork.addBranch(); } + kj::Maybe getFd() override { + if (isResolved()) { + return cap->getFd(); + } else { + // In theory, before resolution, the ImportClient for the promise could have an FD + // attached, if the promise itself was presented with an attached FD. However, we can't + // really return that one here because it may be closed when we get the Resolve message + // later. In theory we could have the PromiseClient itself take ownership of an FD that + // arrived attached to a promise cap, but the use case for that is questionable. I'm + // keeping it simple for now. + return nullptr; + } + } + private: - bool isResolved; kj::Own cap; kj::Maybe importId; kj::ForkedPromise> fork; - // Keep this last, because the continuation uses *this, so it should be destroyed first to - // ensure the continuation is not still running. - kj::Promise resolveSelfPromise; - bool receivedCall = false; - void resolve(kj::Own replacement, bool isError) { + enum { + UNRESOLVED, + // Not resolved at all yet. + + REMOTE, + // Remote promise resolved to a remote settled capability (or null/error). + + REFLECTED, + // Remote promise resolved to one of our own exports. + + MERGED, + // Remote promise resolved to another remote promise which itself wasn't resolved yet, so we + // merged them. In this case, `cap` is guaranteed to point to another PromiseClient. + + BROKEN + // Resolved to null or error. + } resolutionType = UNRESOLVED; + + inline bool isResolved() { + return resolutionType != UNRESOLVED; + } + + kj::Promise> resolve(kj::Own replacement) { + KJ_DASSERT(!isResolved()); + const void* replacementBrand = replacement->getBrand(); - if (replacementBrand != connectionState.get() && - replacementBrand != &ClientHook::NULL_CAPABILITY_BRAND && - receivedCall && !isError && connectionState->connection.is()) { + bool isSameConnection = replacementBrand == connectionState.get(); + if (isSameConnection) { + // We resolved to some other RPC capability hosted by the same peer. + KJ_IF_MAYBE(promise, replacement->whenMoreResolved()) { + // We resolved to another remote promise. If *that* promise eventually resolves back + // to us, we'll need a disembargo. Possibilities: + // 1. The other promise hasn't resolved at all yet. In that case we can simply set its + // `receivedCall` flag and let it handle the disembargo later. + // 2. The other promise has received a Resolve message and decided to initiate a + // disembargo which it is still waiting for. In that case we will certainly also need + // a disembargo for the same reason that the other promise did. And, we can't simply + // wait for their disembargo; we need to start a new one of our own. + // 3. The other promise has resolved already (with or without a disembargo). In this + // case we should treat it as if we resolved directly to the other promise's result, + // possibly requiring a disembargo under the same conditions. + + // We know the other object is a PromiseClient because it's the only ClientHook + // type in the RPC implementation which returns non-null for `whenMoreResolved()`. + PromiseClient* other = &kj::downcast(*replacement); + while (other->resolutionType == MERGED) { + // There's no need to resolve to a thing that's just going to resolve to another thing. + replacement = other->cap->addRef(); + other = &kj::downcast(*replacement); + + // Note that replacementBrand is unchanged since we'd only merge with other + // PromiseClients on the same connection. + KJ_DASSERT(replacement->getBrand() == replacementBrand); + } + + if (other->isResolved()) { + // The other capability resolved already. If it determined that it resolved as + // relfected, then we determine the same. + resolutionType = other->resolutionType; + } else { + // The other capability hasn't resolved yet, so we can safely merge with it and do a + // single combined disembargo if needed later. + other->receivedCall = other->receivedCall || receivedCall; + resolutionType = MERGED; + } + } else { + resolutionType = REMOTE; + } + } else { + if (replacementBrand == &ClientHook::NULL_CAPABILITY_BRAND || + replacementBrand == &ClientHook::BROKEN_CAPABILITY_BRAND) { + // We don't consider null or broken capabilities as "reflected" because they may have + // been communicated to us literally as a null pointer or an exception on the wire, + // rather than as a reference to one of our exports, in which case a disembargo won't + // work. But also, call ordering is completely irrelevant with these so there's no need + // to disembargo anyway. + resolutionType = BROKEN; + } else { + resolutionType = REFLECTED; + } + } + + // Every branch above ends by setting resolutionType to something other than UNRESOLVED. + KJ_DASSERT(isResolved()); + + // If the original capability was used for streaming calls, it will have a + // `flowController` that might still be shepherding those calls. We'll need make sure that + // it doesn't get thrown away. Note that we know that *cap is an RpcClient because resolve() + // is only called once and our constructor required that the initial capability is an + // RpcClient. + KJ_IF_MAYBE(f, kj::downcast(*cap).flowController) { + if (isSameConnection) { + // The new target is on the same connection. It would make a lot of sense to keep using + // the same flow controller if possible. + kj::downcast(*replacement).adoptFlowController(kj::mv(*f)); + } else { + // The new target is something else. The best we can do is wait for the controller to + // drain. New calls will be flow-controlled in a new way without knowing about the old + // controller. + connectionState->tasks.add(f->get()->waitAllAcked().attach(kj::mv(*f))); + } + } + + if (resolutionType == REFLECTED && receivedCall && + connectionState->connection.is()) { // The new capability is hosted locally, not on the remote machine. And, we had made calls // to the promise. We need to make sure those calls echo back to us before we allow new // calls to go directly to the local capability, so we need to set a local embargo and send @@ -982,10 +1102,9 @@ private: embargo.fulfiller = kj::mv(paf.fulfiller); // Make a promise which resolves to `replacement` as soon as the `Disembargo` comes back. - auto embargoPromise = paf.promise.then( - kj::mvCapture(replacement, [](kj::Own&& replacement) { - return kj::mv(replacement); - })); + auto embargoPromise = paf.promise.then([replacement = kj::mv(replacement)]() mutable { + return kj::mv(replacement); + }); // We need to queue up calls in the meantime, so we'll resolve ourselves to a local promise // client instead. @@ -995,61 +1114,14 @@ private: message->send(); } - cap = kj::mv(replacement); - isResolved = true; - } - }; - - class NoInterceptClient final: public RpcClient { - // A wrapper around an RpcClient which bypasses special handling of "save" requests. When we - // intercept a "save" request and invoke a RealmGateway, we give it a version of the capability - // with intercepting disabled, since usually the first thing the RealmGateway will do is turn - // around and call save() again. - // - // This is admittedly sort of backwards: the interception of "save" ought to be the part - // implemented by a wrapper. However, that would require placing a wrapper around every - // RpcClient we create whereas NoInterceptClient only needs to be injected after a save() - // request occurs and is intercepted. - - public: - NoInterceptClient(RpcClient& inner) - : RpcClient(*inner.connectionState), - inner(kj::addRef(inner)) {} + cap = replacement->addRef(); - kj::Maybe writeDescriptor(rpc::CapDescriptor::Builder descriptor) override { - return inner->writeDescriptor(descriptor); - } - - kj::Maybe> writeTarget(rpc::MessageTarget::Builder target) override { - return inner->writeTarget(target); - } - - kj::Own getInnermostClient() override { - return inner->getInnermostClient(); + return kj::mv(replacement); } - - Request newCall( - uint64_t interfaceId, uint16_t methodId, kj::Maybe sizeHint) override { - return inner->newCallNoIntercept(interfaceId, methodId, sizeHint); - } - VoidPromiseAndPipeline call(uint64_t interfaceId, uint16_t methodId, - kj::Own&& context) override { - return inner->callNoIntercept(interfaceId, methodId, kj::mv(context)); - } - - kj::Maybe getResolved() override { - return nullptr; - } - - kj::Maybe>> whenMoreResolved() override { - return nullptr; - } - - private: - kj::Own inner; }; - kj::Maybe writeDescriptor(ClientHook& cap, rpc::CapDescriptor::Builder descriptor) { + kj::Maybe writeDescriptor(ClientHook& cap, rpc::CapDescriptor::Builder descriptor, + kj::Vector& fds) { // Write a descriptor for the given capability. // Find the innermost wrapped capability. @@ -1062,15 +1134,24 @@ private: } } + KJ_IF_MAYBE(fd, inner->getFd()) { + descriptor.setAttachedFd(fds.size()); + fds.add(kj::mv(*fd)); + } + if (inner->getBrand() == this) { - return kj::downcast(*inner).writeDescriptor(descriptor); + return kj::downcast(*inner).writeDescriptor(descriptor, fds); } else { auto iter = exportsByCap.find(inner); if (iter != exportsByCap.end()) { // We've already seen and exported this capability before. Just up the refcount. auto& exp = KJ_ASSERT_NONNULL(exports.find(iter->second)); ++exp.refcount; - descriptor.setSenderHosted(iter->second); + if (exp.resolveOp == nullptr) { + descriptor.setSenderHosted(iter->second); + } else { + descriptor.setSenderPromise(iter->second); + } return iter->second; } else { // This is the first time we've seen this capability. @@ -1094,12 +1175,17 @@ private: } kj::Array writeDescriptors(kj::ArrayPtr>> capTable, - rpc::Payload::Builder payload) { + rpc::Payload::Builder payload, kj::Vector& fds) { + if (capTable.size() == 0) { + // Calling initCapTable(0) will still allocate a 1-word tag, which we'd like to avoid... + return nullptr; + } + auto capTableBuilder = payload.initCapTable(capTable.size()); kj::Vector exports(capTable.size()); for (uint i: kj::indices(capTable)) { KJ_IF_MAYBE(cap, capTable[i]) { - KJ_IF_MAYBE(exportId, writeDescriptor(**cap, capTableBuilder[i])) { + KJ_IF_MAYBE(exportId, writeDescriptor(**cap, capTableBuilder[i], fds)) { exports.add(*exportId); } } else { @@ -1199,7 +1285,9 @@ private: messageSizeHint() + sizeInWords() + 16); auto resolve = message->getBody().initAs().initResolve(); resolve.setPromiseId(exportId); - writeDescriptor(*exp.clientHook, resolve.initCap()); + kj::Vector fds; + writeDescriptor(*exp.clientHook, resolve.initCap(), fds); + message->setFds(fds.releaseAsArray()); message->send(); return kj::READY_NOW; @@ -1217,10 +1305,14 @@ private: }); } + void fromException(const kj::Exception& exception, rpc::Exception::Builder builder) { + _::fromException(exception, builder, traceEncoder); + } + // ===================================================================================== // Interpreting CapDescriptor - kj::Own import(ImportId importId, bool isPromise) { + kj::Own import(ImportId importId, bool isPromise, kj::Maybe fd) { // Receive a new import. auto& import = imports[importId]; @@ -1229,8 +1321,17 @@ private: // Create the ImportClient, or if one already exists, use it. KJ_IF_MAYBE(c, import.importClient) { importClient = kj::addRef(*c); + + // If the same import is introduced multiple times, and it is missing an FD the first time, + // but it has one on a later attempt, we want to attach the later one. This could happen + // because the first introduction was part of a message that had too many other FDs and went + // over the per-message limit. Perhaps the protocol design is such that this other message + // doesn't really care if the FDs are transferred or not, but the later message really does + // care; it would be bad if the previous message blocked later messages from delivering the + // FD just because it happened to reference the same capability. + importClient->setFdIfMissing(kj::mv(fd)); } else { - importClient = kj::refcounted(*this, importId); + importClient = kj::refcounted(*this, importId, kj::mv(fd)); import.importClient = *importClient; } @@ -1262,19 +1363,114 @@ private: } } - kj::Maybe> receiveCap(rpc::CapDescriptor::Reader descriptor) { + class TribbleRaceBlocker: public ClientHook, public kj::Refcounted { + // Hack to work around a problem that arises during the Tribble 4-way Race Condition as + // described in rpc.capnp in the documentation for the `Disembargo` message. + // + // Consider a remote promise that is resolved by a `Resolve` message. PromiseClient::resolve() + // is eventually called and given the `ClientHook` for the resolution. Imagine that the + // `ClientHook` it receives turns out to be an `ImportClient`. There are two ways this could + // have happened: + // + // 1. The `Resolve` message contained a `CapDescriptor` of type `senderHosted`, naming an entry + // in the sender's export table, and the `ImportClient` refers to the corresponding slot on + // the receiver's import table. In this case, no embargo is needed, because messages to the + // resolved location traverse the same path as messages to the promise would have. + // + // 2. The `Resolve` message contained a `CapDescriptor` of type `receiverHosted`, naming an + // entry in the receiver's export table. That entry just happened to contain an + // `ImportClient` refering back to the sender. This specifically happens when the entry + // in question had previously itself referred to a promise, and that promise has since + // resolved to a remote capability, at which point the export table entry was replaced by + // the appropriate `ImportClient` representing that. Presumably, the peer *did not yet know* + // about this resolution, which is why it sent a `receiverHosted` pointing to something that + // reflects back to the sender, rather than sending `senderHosted` in the first place. + // + // In this case, an embargo *is* required, because peer may still be reflecting messages + // sent to this promise back to us. In fact, the peer *must* continue reflecting messages, + // even when it eventually learns that the eventual destination is one of its own + // capabilities, due to the Tribble 4-way Race Condition rule. + // + // Since this case requires an embargo, somehow PromiseClient::resolve() must be able to + // distinguish it from the case (1). One solution would be for us to pass some extra flag + // all the way from where the `Resolve` messages is received to `PromiseClient::resolve()`. + // That solution is reasonably easy in the `Resolve` case, but gets notably more difficult + // in the case of `Return`s, which also resolve promises and are subject to all the same + // problems. In the case of a `Return`, some non-RPC-specific code is involved in the + // resolution, making it harder to pass along a flag. + // + // Instead, we use this hack: When we read an entry in the export table and discover that + // it actually contains an `ImportClient` or a `PipelineClient` reflecting back over our + // own connection, then we wrap it in a `TribbleRaceBlocker`. This wrapper prevents + // `PromiseClient` from recognizing the capability as being remote, so it instead treats it + // as local. That causes it to set up an embargo as desired. + // + // TODO(perf): This actually blocks further promise resolution in the case where the + // ImportClient or PipelineClient itself ends up being yet another promise that resolves + // back over the connection again. What we probably really need to do here is, instead of + // placing `ImportClient` or `PipelineClient` on the export table, place a special type there + // that both knows what to do with future incoming messages to that export ID, but also knows + // what to do when that export is the subject of a `Resolve`. + + public: + TribbleRaceBlocker(kj::Own inner): inner(kj::mv(inner)) {} + + Request newCall( + uint64_t interfaceId, uint16_t methodId, kj::Maybe sizeHint) override { + return inner->newCall(interfaceId, methodId, sizeHint); + } + VoidPromiseAndPipeline call(uint64_t interfaceId, uint16_t methodId, + kj::Own&& context) override { + return inner->call(interfaceId, methodId, kj::mv(context)); + } + kj::Maybe getResolved() override { + // We always wrap either PipelineClient or ImportClient, both of which return null for this + // anyway. + return nullptr; + } + kj::Maybe>> whenMoreResolved() override { + // We always wrap either PipelineClient or ImportClient, both of which return null for this + // anyway. + return nullptr; + } + kj::Own addRef() override { + return kj::addRef(*this); + } + const void* getBrand() override { + return nullptr; + } + kj::Maybe getFd() override { + return inner->getFd(); + } + + private: + kj::Own inner; + }; + + kj::Maybe> receiveCap(rpc::CapDescriptor::Reader descriptor, + kj::ArrayPtr fds) { + uint fdIndex = descriptor.getAttachedFd(); + kj::Maybe fd; + if (fdIndex < fds.size() && fds[fdIndex] != nullptr) { + fd = kj::mv(fds[fdIndex]); + } + switch (descriptor.which()) { case rpc::CapDescriptor::NONE: return nullptr; case rpc::CapDescriptor::SENDER_HOSTED: - return import(descriptor.getSenderHosted(), false); + return import(descriptor.getSenderHosted(), false, kj::mv(fd)); case rpc::CapDescriptor::SENDER_PROMISE: - return import(descriptor.getSenderPromise(), true); + return import(descriptor.getSenderPromise(), true, kj::mv(fd)); case rpc::CapDescriptor::RECEIVER_HOSTED: KJ_IF_MAYBE(exp, exports.find(descriptor.getReceiverHosted())) { - return exp->clientHook->addRef(); + auto result = exp->clientHook->addRef(); + if (result->getBrand() == this) { + result = kj::refcounted(kj::mv(result)); + } + return kj::mv(result); } else { return newBrokenCap("invalid 'receiverHosted' export ID"); } @@ -1286,7 +1482,11 @@ private: if (answer->active) { KJ_IF_MAYBE(pipeline, answer->pipeline) { KJ_IF_MAYBE(ops, toPipelineOps(promisedAnswer.getTransform())) { - return pipeline->get()->getPipelinedCap(*ops); + auto result = pipeline->get()->getPipelinedCap(*ops); + if (result->getBrand() == this) { + result = kj::refcounted(kj::mv(result)); + } + return kj::mv(result); } else { return newBrokenCap("unrecognized pipeline ops"); } @@ -1299,7 +1499,7 @@ private: case rpc::CapDescriptor::THIRD_PARTY_HOSTED: // We don't support third-party caps, so use the vine instead. - return import(descriptor.getThirdPartyHosted().getVineId(), false); + return import(descriptor.getThirdPartyHosted().getVineId(), false, kj::mv(fd)); default: KJ_FAIL_REQUIRE("unknown CapDescriptor type") { break; } @@ -1307,10 +1507,11 @@ private: } } - kj::Array>> receiveCaps(List::Reader capTable) { + kj::Array>> receiveCaps(List::Reader capTable, + kj::ArrayPtr fds) { auto result = kj::heapArrayBuilder>>(capTable.size()); for (auto cap: capTable) { - result.add(receiveCap(cap)); + result.add(receiveCap(cap, fds)); } return result.finish(); } @@ -1328,13 +1529,17 @@ private: kj::Own>>> fulfiller) : connectionState(kj::addRef(connectionState)), id(id), fulfiller(kj::mv(fulfiller)) {} - ~QuestionRef() { - unwindDetector.catchExceptionsIfUnwinding([&]() { - auto& question = KJ_ASSERT_NONNULL( - connectionState->questions.find(id), "Question ID no longer on table?"); + ~QuestionRef() noexcept { + // Contrary to KJ style, we declare this destructor `noexcept` because if anything in here + // throws (without being caught) we're probably in pretty bad shape and going to be crashing + // later anyway. Better to abort now. - // Send the "Finish" message (if the connection is not already broken). - if (connectionState->connection.is() && !question.skipFinish) { + auto& question = KJ_ASSERT_NONNULL( + connectionState->questions.find(id), "Question ID no longer on table?"); + + // Send the "Finish" message (if the connection is not already broken). + if (connectionState->connection.is() && !question.skipFinish) { + KJ_IF_MAYBE(e, kj::runCatchingExceptions([&]() { auto message = connectionState->connection.get()->newOutgoingMessage( messageSizeHint()); auto builder = message->getBody().getAs().initFinish(); @@ -1345,19 +1550,21 @@ private: // will send Release messages when those are destroyed. builder.setReleaseResultCaps(question.isAwaitingReturn); message->send(); + })) { + connectionState->disconnect(kj::mv(*e)); } + } - // Check if the question has returned and, if so, remove it from the table. - // Remove question ID from the table. Must do this *after* sending `Finish` to ensure that - // the ID is not re-allocated before the `Finish` message can be sent. - if (question.isAwaitingReturn) { - // Still waiting for return, so just remove the QuestionRef pointer from the table. - question.selfRef = nullptr; - } else { - // Call has already returned, so we can now remove it from the table. - connectionState->questions.erase(id, question); - } - }); + // Check if the question has returned and, if so, remove it from the table. + // Remove question ID from the table. Must do this *after* sending `Finish` to ensure that + // the ID is not re-allocated before the `Finish` message can be sent. + if (question.isAwaitingReturn) { + // Still waiting for return, so just remove the QuestionRef pointer from the table. + question.selfRef = nullptr; + } else { + // Call has already returned, so we can now remove it from the table. + connectionState->questions.erase(id, question); + } } inline QuestionId getId() const { return id; } @@ -1378,7 +1585,6 @@ private: kj::Own connectionState; QuestionId id; kj::Own>>> fulfiller; - kj::UnwindDetector unwindDetector; }; class RpcRequest final: public RequestHook { @@ -1438,6 +1644,25 @@ private: } } + kj::Promise sendStreaming() override { + if (!connectionState->connection.is()) { + // Connection is broken. + return kj::cp(connectionState->connection.get()); + } + + KJ_IF_MAYBE(redirect, target->writeTarget(callBuilder.getTarget())) { + // Whoops, this capability has been redirected while we were building the request! + // We'll have to make a new request and do a copy. Ick. + + auto replacement = redirect->get()->newCall( + callBuilder.getInterfaceId(), callBuilder.getMethodId(), paramsBuilder.targetSize()); + replacement.set(paramsBuilder); + return RequestHook::from(kj::mv(replacement))->sendStreaming(); + } else { + return sendStreamingInternal(false); + } + } + struct TailInfo { QuestionId questionId; kj::Promise promise; @@ -1495,10 +1720,21 @@ private: kj::Promise> promise = nullptr; }; - SendInternalResult sendInternal(bool isTailCall) { + struct SetupSendResult: public SendInternalResult { + QuestionId questionId; + Question& question; + + SetupSendResult(SendInternalResult&& super, QuestionId questionId, Question& question) + : SendInternalResult(kj::mv(super)), questionId(questionId), question(question) {} + // TODO(cleanup): This constructor is implicit in C++17. + }; + + SetupSendResult setupSend(bool isTailCall) { // Build the cap table. + kj::Vector fds; auto exports = connectionState->writeDescriptors( - capTable.getTable(), callBuilder.getParams()); + capTable.getTable(), callBuilder.getParams(), fds); + message->setFds(fds.releaseAsArray()); // Init the question table. Do this after writing descriptors to avoid interference. QuestionId questionId; @@ -1515,8 +1751,14 @@ private: question.selfRef = *result.questionRef; result.promise = paf.promise.attach(kj::addRef(*result.questionRef)); + return { kj::mv(result), questionId, question }; + } + + SendInternalResult sendInternal(bool isTailCall) { + auto result = setupSend(isTailCall); + // Finish and send. - callBuilder.setQuestionId(questionId); + callBuilder.setQuestionId(result.questionId); if (isTailCall) { callBuilder.getSendResultsTo().setYourself(); } @@ -1527,14 +1769,47 @@ private: })) { // We can't safely throw the exception from here since we've already modified the question // table state. We'll have to reject the promise instead. - question.isAwaitingReturn = false; - question.skipFinish = true; + result.question.isAwaitingReturn = false; + result.question.skipFinish = true; + connectionState->releaseExports(result.question.paramExports); result.questionRef->reject(kj::mv(*exception)); } // Send and return. return kj::mv(result); } + + kj::Promise sendStreamingInternal(bool isTailCall) { + auto setup = setupSend(isTailCall); + + // Finish and send. + callBuilder.setQuestionId(setup.questionId); + if (isTailCall) { + callBuilder.getSendResultsTo().setYourself(); + } + kj::Promise flowPromise = nullptr; + KJ_IF_MAYBE(exception, kj::runCatchingExceptions([&]() { + KJ_CONTEXT("sending RPC call", + callBuilder.getInterfaceId(), callBuilder.getMethodId()); + RpcFlowController* flow; + KJ_IF_MAYBE(f, target->flowController) { + flow = *f; + } else { + flow = target->flowController.emplace( + connectionState->connection.get()->newStream()); + } + flowPromise = flow->send(kj::mv(message), setup.promise.ignoreResult()); + })) { + // We can't safely throw the exception from here since we've already modified the question + // table state. We'll have to reject the promise instead. + setup.question.isAwaitingReturn = false; + setup.question.skipFinish = true; + setup.questionRef->reject(kj::cp(*exception)); + return kj::mv(*exception); + } + + return kj::mv(flowPromise); + } }; class RpcPipeline final: public PipelineHook, public kj::Refcounted { @@ -1581,28 +1856,40 @@ private: } kj::Own getPipelinedCap(kj::Array&& ops) override { - if (state.is()) { - // Wrap a PipelineClient in a PromiseClient. - auto pipelineClient = kj::refcounted( - *connectionState, kj::addRef(*state.get()), kj::heapArray(ops.asPtr())); - - KJ_IF_MAYBE(r, redirectLater) { - auto resolutionPromise = r->addBranch().then(kj::mvCapture(ops, - [](kj::Array ops, kj::Own&& response) { - return response->getResults().getPipelinedCap(ops); - })); - - return kj::refcounted( - *connectionState, kj::mv(pipelineClient), kj::mv(resolutionPromise), nullptr); + return clientMap.findOrCreate(ops.asPtr(), [&]() { + if (state.is()) { + // Wrap a PipelineClient in a PromiseClient. + auto pipelineClient = kj::refcounted( + *connectionState, kj::addRef(*state.get()), kj::heapArray(ops.asPtr())); + + KJ_IF_MAYBE(r, redirectLater) { + auto resolutionPromise = r->addBranch().then( + [ops = kj::heapArray(ops.asPtr())](kj::Own&& response) { + return response->getResults().getPipelinedCap(kj::mv(ops)); + }); + + return kj::HashMap, kj::Own>::Entry { + kj::mv(ops), + kj::refcounted( + *connectionState, kj::mv(pipelineClient), kj::mv(resolutionPromise), nullptr) + }; + } else { + // Oh, this pipeline will never get redirected, so just return the PipelineClient. + return kj::HashMap, kj::Own>::Entry { + kj::mv(ops), kj::mv(pipelineClient) + }; + } + } else if (state.is()) { + auto pipelineClient = state.get()->getResults().getPipelinedCap(ops); + return kj::HashMap, kj::Own>::Entry { + kj::mv(ops), kj::mv(pipelineClient) + }; } else { - // Oh, this pipeline will never get redirected, so just return the PipelineClient. - return kj::mv(pipelineClient); + return kj::HashMap, kj::Own>::Entry { + kj::mv(ops), newBrokenCap(kj::cp(state.get())) + }; } - } else if (state.is()) { - return state.get()->getResults().getPipelinedCap(ops); - } else { - return newBrokenCap(kj::cp(state.get())); - } + })->addRef(); } private: @@ -1614,6 +1901,12 @@ private: typedef kj::Exception Broken; kj::OneOf state; + kj::HashMap, kj::Own> clientMap; + // See QueuedPipeline::clientMap in capability.c++ for a discussion of why we must memoize + // the results of getPipelinedCap(). RpcPipeline has a similar problem when a capability we + // return is later subject to an embargo. It's important that the embargo is correctly applied + // across all calls to the same capability. + // Keep this last, because the continuation uses *this, so it should be destroyed first to // ensure the continuation is not still running. kj::Promise resolveSelfPromise; @@ -1691,7 +1984,9 @@ private: // Build the cap table. auto capTable = this->capTable.getTable(); - auto exports = connectionState.writeDescriptors(capTable, payload); + kj::Vector fds; + auto exports = connectionState.writeDescriptors(capTable, payload, fds); + message->setFds(fds.releaseAsArray()); // Capabilities that we are returning are subject to embargos. See `Disembargo` in rpc.capnp. // As explained there, in order to deal with the Tribble 4-way race condition, we need to @@ -1754,7 +2049,7 @@ private: answerId(answerId), interfaceId(interfaceId), methodId(methodId), - requestSize(request->getBody().targetSize().wordCount), + requestSize(request->sizeInWords()), request(kj::mv(request)), paramsCapTable(kj::mv(capTableArray)), params(paramsCapTable.imbue(params)), @@ -1769,6 +2064,7 @@ private: // We haven't sent a return yet, so we must have been canceled. Send a cancellation return. unwindDetector.catchExceptionsIfUnwinding([&]() { // Don't send anything if the connection is broken. + bool shouldFreePipeline = true; if (connectionState->connection.is()) { auto message = connectionState->connection.get()->newOutgoingMessage( messageSizeHint() + sizeInWords()); @@ -1781,6 +2077,9 @@ private: // The reason we haven't sent a return is because the results were sent somewhere // else. builder.setResultsSentElsewhere(); + + // The pipeline could still be valid and in-use in this case. + shouldFreePipeline = false; } else { builder.setCanceled(); } @@ -1788,7 +2087,7 @@ private: message->send(); } - cleanupAnswerTable(nullptr, true); + cleanupAnswerTable(nullptr, shouldFreePipeline); }); } } @@ -1849,7 +2148,7 @@ private: builder.setAnswerId(answerId); builder.setReleaseParamCaps(false); - fromException(exception, builder.initException()); + connectionState->fromException(exception, builder.initException()); message->send(); } @@ -1859,6 +2158,23 @@ private: cleanupAnswerTable(nullptr, false); } } + void sendRedirectReturn() { + KJ_ASSERT(redirectResults); + + if (isFirstResponder()) { + auto message = connectionState->connection.get()->newOutgoingMessage( + messageSizeHint()); + auto builder = message->getBody().initAs().initReturn(); + + builder.setAnswerId(answerId); + builder.setReleaseParamCaps(false); + builder.setResultsSentElsewhere(); + + message->send(); + + cleanupAnswerTable(nullptr, false); + } + } void requestCancel() { // Hints that the caller wishes to cancel this call. At the next time when cancellation is @@ -1908,6 +2224,11 @@ private: return results; } } + void setPipeline(kj::Own&& pipeline) override { + KJ_IF_MAYBE(f, tailCallPipelineFulfiller) { + f->get()->fulfill(AnyPointer::Pipeline(kj::mv(pipeline))); + } + } kj::Promise tailCall(kj::Own&& request) override { auto result = directTailCall(kj::mv(request)); KJ_IF_MAYBE(f, tailCallPipelineFulfiller) { @@ -2083,7 +2404,7 @@ private: }); } - return connection.get()->receiveIncomingMessage().then( + return canceler.wrap(connection.get()->receiveIncomingMessage()).then( [this](kj::Maybe>&& message) { KJ_IF_MAYBE(m, message) { handleMessage(kj::mv(*m)); @@ -2097,7 +2418,19 @@ private: // // (We do this in a separate continuation to handle the case where exceptions are // disabled.) - if (keepGoing) tasks.add(messageLoop()); + // + // TODO(perf): We add an evalLater() here so that anything we needed to do in reaction to + // the previous message has a chance to complete before the next message is handled. In + // paticular, without this, I observed an ordering problem: I saw a case where a `Return` + // message was followed by a `Resolve` message, but the `PromiseClient` associated with the + // `Resolve` had its `resolve()` method invoked _before_ any `PromiseClient`s associated + // with pipelined capabilities resolved by the `Return`. This could lead to an + // incorrectly-ordered interaction between `PromiseClient`s when they resolve to each + // other. This is probably really a bug in the way `Return`s are handled -- apparently, + // resolution of `PromiseClient`s based on returned capabilites does not occur in a + // depth-first way, when it should. If we could fix that then we can probably remove this + // `evalLater()`. However, the `evalLater()` is not that bad and solves the problem... + if (keepGoing) tasks.add(kj::evalLater([this]() { return messageLoop(); })); }); } @@ -2130,7 +2463,7 @@ private: break; case rpc::Message::RESOLVE: - handleResolve(reader.getResolve()); + handleResolve(kj::mv(message), reader.getResolve()); break; case rpc::Message::RELEASE: @@ -2262,7 +2595,9 @@ private: auto capTableArray = capTable.getTable(); KJ_DASSERT(capTableArray.size() == 1); - resultExports = writeDescriptors(capTableArray, payload); + kj::Vector fds; + resultExports = writeDescriptors(capTableArray, payload, fds); + response->setFds(fds.releaseAsArray()); capHook = KJ_ASSERT_NONNULL(capTableArray[0])->addRef(); })) { fromException(*exception, ret.initException()); @@ -2307,7 +2642,7 @@ private: } auto payload = call.getParams(); - auto capTableArray = receiveCaps(payload.getCapTable()); + auto capTableArray = receiveCaps(payload.getCapTable(), message->getAttachedFds()); auto cancelPaf = kj::newPromiseAndFulfiller(); AnswerId answerId = call.getQuestionId(); @@ -2380,52 +2715,6 @@ private: ClientHook::VoidPromiseAndPipeline startCall( uint64_t interfaceId, uint64_t methodId, kj::Own&& capability, kj::Own&& context) { - if (interfaceId == typeId>() && methodId == 0) { - KJ_IF_MAYBE(g, gateway) { - // Wait, this is a call to Persistent.save() and we need to translate it through our - // gateway. - - KJ_IF_MAYBE(resolvedPromise, capability->whenMoreResolved()) { - // The plot thickens: We're looking at a promise capability. It could end up resolving - // to a capability outside the gateway, in which case we don't want to translate at all. - - auto promises = resolvedPromise->then(kj::mvCapture(context, - [this,interfaceId,methodId](kj::Own&& context, - kj::Own resolvedCap) { - auto vpap = startCall(interfaceId, methodId, kj::mv(resolvedCap), kj::mv(context)); - return kj::tuple(kj::mv(vpap.promise), kj::mv(vpap.pipeline)); - })).attach(addRef(*this), kj::mv(capability)).split(); - - return { - kj::mv(kj::get<0>(promises)), - newLocalPromisePipeline(kj::mv(kj::get<1>(promises))), - }; - } - - if (capability->getBrand() == this) { - // This capability is one of our own, pointing back out over the network. That means - // that it would be inappropriate to apply the gateway transformation. We just want to - // reflect the call back. - return kj::downcast(*capability) - .callNoIntercept(interfaceId, methodId, kj::mv(context)); - } - - auto params = context->getParams().getAs::SaveParams>(); - - auto requestSize = params.totalSize(); - ++requestSize.capCount; - requestSize.wordCount += sizeInWords::ExportParams>(); - - auto request = g->exportRequest(requestSize); - request.setCap(Persistent<>::Client(capability->addRef())); - request.setParams(params); - - context->allowCancellation(); - context->releaseParams(); - return context->directTailCall(RequestHook::from(kj::mv(request))); - } - } - return capability->call(interfaceId, methodId, kj::mv(context)); } @@ -2500,7 +2789,7 @@ private: } auto payload = ret.getResults(); - auto capTableArray = receiveCaps(payload.getCapTable()); + auto capTableArray = receiveCaps(payload.getCapTable(), message->getAttachedFds()); questionRef->fulfill(kj::refcounted( *this, kj::addRef(*questionRef), kj::mv(message), kj::mv(capTableArray), payload.getContent())); @@ -2534,6 +2823,23 @@ private: KJ_IF_MAYBE(answer, answers.find(ret.getTakeFromOtherQuestion())) { KJ_IF_MAYBE(response, answer->redirectedResults) { questionRef->fulfill(kj::mv(*response)); + answer->redirectedResults = nullptr; + + KJ_IF_MAYBE(context, answer->callContext) { + // Send the `Return` message for the call of which we're taking ownership, so + // that the peer knows it can now tear down the call state. + context->sendRedirectReturn(); + + // There are three conditions, all of which must be true, before a call is + // canceled: + // 1. The RPC opts in by calling context->allowCancellation(). + // 2. We request cancellation with context->requestCancel(). + // 3. The final response promise -- which we passed to questionRef->fulfill() + // above -- must be dropped. + // + // We would like #3 to imply #2. So... we can just make #2 be true. + context->requestCancel(); + } } else { KJ_FAIL_REQUIRE("`Return.takeFromOtherQuestion` referenced a call that did not " "use `sendResultsTo.yourself`.") { return; } @@ -2548,10 +2854,27 @@ private: KJ_FAIL_REQUIRE("Unknown 'Return' type.") { return; } } } else { + // This is a response to a question that we canceled earlier. + if (ret.isTakeFromOtherQuestion()) { - // Be sure to release the tail call's promise. + // This turned out to be a tail call back to us! We now take ownership of the tail call. + // Since the caller canceled, we need to cancel out the tail call, if it still exists. + KJ_IF_MAYBE(answer, answers.find(ret.getTakeFromOtherQuestion())) { + // Indeed, it does still exist. + + // Throw away the result promise. promiseToRelease = kj::mv(answer->redirectedResults); + + KJ_IF_MAYBE(context, answer->callContext) { + // Send the `Return` message for the call of which we're taking ownership, so + // that the peer knows it can now tear down the call state. + context->sendRedirectReturn(); + + // Since the caller has been canceled, make sure the callee that we're tailing to + // gets canceled. + context->requestCancel(); + } } } @@ -2593,21 +2916,21 @@ private: answerToRelease = answers.erase(finish.getQuestionId()); } } else { - KJ_REQUIRE(answer->active, "'Finish' for invalid question ID.") { return; } + KJ_FAIL_REQUIRE("'Finish' for invalid question ID.") { return; } } } // --------------------------------------------------------------------------- // Level 1 - void handleResolve(const rpc::Resolve::Reader& resolve) { + void handleResolve(kj::Own&& message, const rpc::Resolve::Reader& resolve) { kj::Own replacement; kj::Maybe exception; // Extract the replacement capability. switch (resolve.which()) { case rpc::Resolve::CAP: - KJ_IF_MAYBE(cap, receiveCap(resolve.getCap())) { + KJ_IF_MAYBE(cap, receiveCap(resolve.getCap(), message->getAttachedFds())) { replacement = kj::mv(*cap); } else { KJ_FAIL_REQUIRE("'Resolve' contained 'CapDescriptor.none'.") { return; } @@ -2699,9 +3022,9 @@ private: EmbargoId embargoId = context.getSenderLoopback(); - // We need to insert an evalLater() here to make sure that any pending calls towards this + // We need to insert an evalLast() here to make sure that any pending calls towards this // cap have had time to find their way through the event loop. - tasks.add(kj::evalLater(kj::mvCapture( + tasks.add(canceler.wrap(kj::evalLast(kj::mvCapture( target, [this,embargoId](kj::Own&& target) { if (!connection.is()) { return; @@ -2731,7 +3054,7 @@ private: builder.getContext().setReceiverLoopback(embargoId); message->send(); - }))); + })))); break; } @@ -2761,21 +3084,18 @@ private: class RpcSystemBase::Impl final: private BootstrapFactoryBase, private kj::TaskSet::ErrorHandler { public: - Impl(VatNetworkBase& network, kj::Maybe bootstrapInterface, - kj::Maybe::Client> gateway) + Impl(VatNetworkBase& network, kj::Maybe bootstrapInterface) : network(network), bootstrapInterface(kj::mv(bootstrapInterface)), - bootstrapFactory(*this), gateway(kj::mv(gateway)), tasks(*this) { - tasks.add(acceptLoop()); + bootstrapFactory(*this), tasks(*this) { + acceptLoopPromise = acceptLoop().eagerlyEvaluate([](kj::Exception&& e) { KJ_LOG(ERROR, e); }); } - Impl(VatNetworkBase& network, BootstrapFactoryBase& bootstrapFactory, - kj::Maybe::Client> gateway) - : network(network), bootstrapFactory(bootstrapFactory), - gateway(kj::mv(gateway)), tasks(*this) { - tasks.add(acceptLoop()); + Impl(VatNetworkBase& network, BootstrapFactoryBase& bootstrapFactory) + : network(network), bootstrapFactory(bootstrapFactory), tasks(*this) { + acceptLoopPromise = acceptLoop().eagerlyEvaluate([](kj::Exception&& e) { KJ_LOG(ERROR, e); }); } Impl(VatNetworkBase& network, SturdyRefRestorerBase& restorer) : network(network), bootstrapFactory(*this), restorer(restorer), tasks(*this) { - tasks.add(acceptLoop()); + acceptLoopPromise = acceptLoop().eagerlyEvaluate([](kj::Exception&& e) { KJ_LOG(ERROR, e); }); } ~Impl() noexcept(false) { @@ -2803,11 +3123,16 @@ public: KJ_IF_MAYBE(connection, network.baseConnect(vatId)) { auto& state = getConnectionState(kj::mv(*connection)); return Capability::Client(state.restore(objectId)); + } else if (objectId.isNull()) { + // Turns out `vatId` refers to ourselves, so we can also pass it as the client ID for + // baseCreateFor(). + return bootstrapFactory.baseCreateFor(vatId); } else KJ_IF_MAYBE(r, restorer) { return r->baseRestore(objectId); } else { return Capability::Client(newBrokenCap( - "SturdyRef referred to a local object but there is no local SturdyRef restorer.")); + "This vat only supports a bootstrap interface, not the old Cap'n-Proto-0.4-style " + "named exports.")); } } @@ -2819,13 +3144,20 @@ public: } } + void setTraceEncoder(kj::Function func) { + traceEncoder = kj::mv(func); + } + + kj::Promise run() { return kj::mv(acceptLoopPromise); } + private: VatNetworkBase& network; kj::Maybe bootstrapInterface; BootstrapFactoryBase& bootstrapFactory; - kj::Maybe::Client> gateway; kj::Maybe restorer; size_t flowLimit = kj::maxValue; + kj::Maybe> traceEncoder; + kj::Promise acceptLoopPromise = nullptr; kj::TaskSet tasks; typedef std::unordered_map> @@ -2845,8 +3177,8 @@ private: tasks.add(kj::mv(info.shutdownPromise)); })); auto newState = kj::refcounted( - bootstrapFactory, gateway, restorer, kj::mv(connection), - kj::mv(onDisconnect.fulfiller), flowLimit); + bootstrapFactory, restorer, kj::mv(connection), + kj::mv(onDisconnect.fulfiller), flowLimit, traceEncoder); RpcConnectionState& result = *newState; connections.insert(std::make_pair(connectionPtr, kj::mv(newState))); return result; @@ -2856,16 +3188,10 @@ private: } kj::Promise acceptLoop() { - auto receive = network.baseAccept().then( + return network.baseAccept().then( [this](kj::Own&& connection) { getConnectionState(kj::mv(connection)); - }); - return receive.then([this]() { - // No exceptions; continue loop. - // - // (We do this in a separate continuation to handle the case where exceptions are - // disabled.) - tasks.add(acceptLoop()); + return acceptLoop(); }); } @@ -2888,13 +3214,11 @@ private: }; RpcSystemBase::RpcSystemBase(VatNetworkBase& network, - kj::Maybe bootstrapInterface, - kj::Maybe::Client> gateway) - : impl(kj::heap(network, kj::mv(bootstrapInterface), kj::mv(gateway))) {} + kj::Maybe bootstrapInterface) + : impl(kj::heap(network, kj::mv(bootstrapInterface))) {} RpcSystemBase::RpcSystemBase(VatNetworkBase& network, - BootstrapFactoryBase& bootstrapFactory, - kj::Maybe::Client> gateway) - : impl(kj::heap(network, bootstrapFactory, kj::mv(gateway))) {} + BootstrapFactoryBase& bootstrapFactory) + : impl(kj::heap(network, bootstrapFactory)) {} RpcSystemBase::RpcSystemBase(VatNetworkBase& network, SturdyRefRestorerBase& restorer) : impl(kj::heap(network, restorer)) {} RpcSystemBase::RpcSystemBase(RpcSystemBase&& other) noexcept = default; @@ -2913,5 +3237,155 @@ void RpcSystemBase::baseSetFlowLimit(size_t words) { return impl->setFlowLimit(words); } +void RpcSystemBase::setTraceEncoder(kj::Function func) { + impl->setTraceEncoder(kj::mv(func)); +} + +kj::Promise RpcSystemBase::run() { + return impl->run(); +} + } // namespace _ (private) + +// ======================================================================================= + +namespace { + +class WindowFlowController final: public RpcFlowController, private kj::TaskSet::ErrorHandler { +public: + WindowFlowController(RpcFlowController::WindowGetter& windowGetter) + : windowGetter(windowGetter), tasks(*this) { + state.init(); + } + + kj::Promise send(kj::Own message, kj::Promise ack) override { + auto size = message->sizeInWords() * sizeof(capnp::word); + maxMessageSize = kj::max(size, maxMessageSize); + + // We are REQUIRED to send the message NOW to maintain correct ordering. + message->send(); + + inFlight += size; + tasks.add(ack.then([this, size]() { + inFlight -= size; + KJ_SWITCH_ONEOF(state) { + KJ_CASE_ONEOF(blockedSends, Running) { + if (isReady()) { + // Release all fulfillers. + for (auto& fulfiller: blockedSends) { + fulfiller->fulfill(); + } + blockedSends.clear(); + + } + + KJ_IF_MAYBE(f, emptyFulfiller) { + if (inFlight == 0) { + f->get()->fulfill(tasks.onEmpty()); + } + } + } + KJ_CASE_ONEOF(exception, kj::Exception) { + // A previous call failed, but this one -- which was already in-flight at the time -- + // ended up succeeding. That may indicate that the server side is not properly + // handling streaming error propagation. Nothing much we can do about it here though. + } + } + })); + + KJ_SWITCH_ONEOF(state) { + KJ_CASE_ONEOF(blockedSends, Running) { + if (isReady()) { + return kj::READY_NOW; + } else { + auto paf = kj::newPromiseAndFulfiller(); + blockedSends.add(kj::mv(paf.fulfiller)); + return kj::mv(paf.promise); + } + } + KJ_CASE_ONEOF(exception, kj::Exception) { + return kj::cp(exception); + } + } + KJ_UNREACHABLE; + } + + kj::Promise waitAllAcked() override { + KJ_IF_MAYBE(q, state.tryGet()) { + if (!q->empty()) { + auto paf = kj::newPromiseAndFulfiller>(); + emptyFulfiller = kj::mv(paf.fulfiller); + return kj::mv(paf.promise); + } + } + return tasks.onEmpty(); + } + +private: + RpcFlowController::WindowGetter& windowGetter; + size_t inFlight = 0; + size_t maxMessageSize = 0; + + typedef kj::Vector>> Running; + kj::OneOf state; + + kj::Maybe>>> emptyFulfiller; + + kj::TaskSet tasks; + + void taskFailed(kj::Exception&& exception) override { + KJ_SWITCH_ONEOF(state) { + KJ_CASE_ONEOF(blockedSends, Running) { + // Fail out all pending sends. + for (auto& fulfiller: blockedSends) { + fulfiller->reject(kj::cp(exception)); + } + // Fail out all future sends. + state = kj::mv(exception); + } + KJ_CASE_ONEOF(exception, kj::Exception) { + // ignore redundant exception + } + } + } + + bool isReady() { + // We extend the window by maxMessageSize to avoid a pathological situation when a message + // is larger than the window size. Otherwise, after sending that message, we would end up + // not sending any others until the ack was received, wasting a round trip's worth of + // bandwidth. + return inFlight <= maxMessageSize // avoid getWindow() call if unnecessary + || inFlight < windowGetter.getWindow() + maxMessageSize; + } +}; + +class FixedWindowFlowController final + : public RpcFlowController, public RpcFlowController::WindowGetter { +public: + FixedWindowFlowController(size_t windowSize): windowSize(windowSize), inner(*this) {} + + kj::Promise send(kj::Own message, kj::Promise ack) override { + return inner.send(kj::mv(message), kj::mv(ack)); + } + + kj::Promise waitAllAcked() override { + return inner.waitAllAcked(); + } + + size_t getWindow() override { return windowSize; } + +private: + size_t windowSize; + WindowFlowController inner; +}; + +} // namespace + +kj::Own RpcFlowController::newFixedWindowController(size_t windowSize) { + return kj::heap(windowSize); +} +kj::Own RpcFlowController::newVariableWindowController(WindowGetter& getter) { + return kj::heap(getter); +} + } // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.capnp b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.capnp index 86e86ea2d90..50aa496369b 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.capnp +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.capnp @@ -614,7 +614,7 @@ struct Resolve { # # The sender promises that from this point forth, until `promiseId` is released, it shall # simply forward all messages to the capability designated by `cap`. This is true even if - # `cap` itself happens to desigate another promise, and that other promise later resolves -- + # `cap` itself happens to designate another promise, and that other promise later resolves -- # messages sent to `promiseId` shall still go to that other promise, not to its resolution. # This is important in the case that the receiver of the `Resolve` ends up sending a # `Disembargo` message towards `promiseId` in order to control message ordering -- that @@ -855,7 +855,7 @@ struct Join { # - Dana receives the first request and sees that the JoinKeyPart is one of two. She notes that # she doesn't have the other part yet, so she records the request and responds with a # JoinResult. - # - Alice relays the JoinAswer back to Bob. + # - Alice relays the JoinAnswer back to Bob. # - Carol is also proxying a capability from Dana, and so forwards her Join request to Dana as # well. # - Dana receives Carol's request and notes that she now has both parts of a JoinKey. She @@ -988,6 +988,63 @@ struct CapDescriptor { # Level 1 and 2 implementations that receive a `thirdPartyHosted` may simply send calls to its # `vine` instead. } + + attachedFd @6 :UInt8 = 0xff; + # If the RPC message in which this CapDescriptor was delivered also had file descriptors + # attached, and `fd` is a valid index into the list of attached file descriptors, then + # that file descriptor should be attached to this capability. If `attachedFd` is out-of-bounds + # for said list, then no FD is attached. + # + # For example, if the RPC message arrived over a Unix socket, then file descriptors may be + # attached by sending an SCM_RIGHTS ancillary message attached to the data bytes making up the + # raw message. Receivers who wish to opt into FD passing should arrange to receive SCM_RIGHTS + # whenever receiving an RPC message. Senders who wish to send FDs need not verify whether the + # receiver knows how to receive them, because the operating system will automatically discard + # ancillary messages like SCM_RIGHTS if the receiver doesn't ask to receive them, including + # automatically closing any FDs. + # + # It is up to the application protocol to define what capabilities are expected to have file + # descriptors attached, and what those FDs mean. But, for example, an application could use this + # to open a file on disk and then transmit the open file descriptor to a sandboxed process that + # does not otherwise have permission to access the filesystem directly. This is usually an + # optimization: the sending process could instead provide an RPC interface supporting all the + # operations needed (such as reading and writing a file), but by passing the file descriptor + # directly, the recipient can often perform operations much more efficiently. Application + # designers are encouraged to provide such RPC interfaces and automatically fall back to them + # when FD passing is not available, so that the application can still work when the parties are + # remote over a network. + # + # An attached FD is most often associated with a `senderHosted` descriptor. It could also make + # sense in the case of `thirdPartyHosted`: in this case, the sender is forwarding the FD that + # they received from the third party, so that the receiver can start using it without first + # interacting with the third party. This is an optional optimization -- the middleman may choose + # not to forward capabilities, in which case the receiver will need to complete the handshake + # with the third party directly before receiving the FD. If an implementation receives a second + # attached FD after having already received one previously (e.g. both in a `thirdPartyHosted` + # CapDescriptor and then later again when receiving the final capability directly from the + # third party), the implementation should discard the later FD and stick with the original. At + # present, there is no known reason why other capability types (e.g. `receiverHosted`) would want + # to carry an attached FD, but we reserve the right to define a meaning for this in the future. + # + # Each file descriptor attached to the message must be used in no more than one CapDescriptor, + # so that the receiver does not need to use dup() or refcounting to handle the possibility of + # multiple capabilities using the same descriptor. If multiple CapDescriptors do point to the + # same FD index, then the receiver can arbitrarily choose which capability ends up having the + # FD attached. + # + # To mitigate DoS attacks, RPC implementations should limit the number of FDs they are willing to + # receive in a single message to a small value. If a message happens to contain more than that, + # the list is truncated. Moreover, in some cases, FD passing needs to be blocked entirely for + # security or implementation reasons, in which case the list may be truncated to zero. Hence, + # `attachedFd` might point past the end of the list, which the implementation should treat as if + # no FD was attached at all. + # + # The type of this field was chosen to be UInt8 because Linux supports sending only a maximum + # of 253 file descriptors in an SCM_RIGHTS message anyway, and CapDescriptor had two bytes of + # padding left -- so after adding this, there is still one byte for a future feature. + # Conveniently, this also means we're able to use 0xff as the default value, which will always + # be out-of-range (of course, the implementation should explicitly enforce that 255 descriptors + # cannot be sent at once, rather than relying on Linux to do so). } struct PromisedAnswer { @@ -1129,7 +1186,7 @@ struct Exception { # start over. This should in turn cause the server to obtain a new copy of the capability that # it lost, thus making everything work. # - # If the client receives another `disconnencted` error in the process of rebuilding the + # If the client receives another `disconnected` error in the process of rebuilding the # capability and retrying the call, it should treat this as an `overloaded` error: the network # is currently unreliable, possibly due to load or other temporary issues. @@ -1144,6 +1201,11 @@ struct Exception { obsoleteDurability @2 :UInt16; # OBSOLETE. See `type` instead. + + trace @4 :Text; + # Stack trace text from the remote server. The format is not specified. By default, + # implementations do not provide stack traces; the application must explicitly enable them + # when desired. } # ======================================================================================== @@ -1231,7 +1293,7 @@ using SturdyRef = AnyPointer; # - How to authenticate the vat after connecting (e.g. a public key fingerprint). # - The identity of a specific object hosted by the vat. Generally, this is an opaque pointer whose # format is defined by the specific vat -- the client has no need to inspect the object ID. -# It is important that the objec ID be unguessable if the object is not public (and objects +# It is important that the object ID be unguessable if the object is not public (and objects # should almost never be public). # # The above are only suggestions. Some networks might work differently. For example, a private @@ -1256,6 +1318,11 @@ using RecipientId = AnyPointer; # # In a network where each vat has a public/private key pair, this could simply be the public key # fingerprint of the recipient along with a nonce matching the one in the `ProvisionId`. +# +# As another example, when communicating between processes on the same machine over Unix sockets, +# RecipientId could simply refer to a file descriptor attached to the message via SCM_RIGHTS. +# This file descriptor would be one end of a newly-created socketpair, with the other end having +# been sent to the capability's recipient in ThirdPartyCapId. using ThirdPartyCapId = AnyPointer; # **(level 3)** @@ -1266,6 +1333,11 @@ using ThirdPartyCapId = AnyPointer; # third party's public key fingerprint, hints on how to connect to the third party (e.g. an IP # address), and the nonce used in the corresponding `Provide` message's `RecipientId` as sent # to that third party (used to identify which capability to pick up). +# +# As another example, when communicating between processes on the same machine over Unix sockets, +# ThirdPartyCapId could simply refer to a file descriptor attached to the message via SCM_RIGHTS. +# This file descriptor would be one end of a newly-created socketpair, with the other end having +# been sent to the process hosting the capability in RecipientId. using JoinKeyPart = AnyPointer; # **(level 4)** diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.capnp.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.capnp.c++ index 0135f9b624a..553e72d6991 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.capnp.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.capnp.c++ @@ -1413,7 +1413,7 @@ const ::capnp::_::RawSchema s_9a0e61223d96743b = { 1, 2, i_9a0e61223d96743b, nullptr, nullptr, { &s_9a0e61223d96743b, nullptr, nullptr, 0, 0, nullptr } }; #endif // !CAPNP_LITE -static const ::capnp::_::AlignedData<114> b_8523ddc40b86b8b0 = { +static const ::capnp::_::AlignedData<130> b_8523ddc40b86b8b0 = { { 0, 0, 0, 0, 5, 0, 6, 0, 176, 184, 134, 11, 196, 221, 35, 133, 16, 0, 0, 0, 1, 0, 1, 0, @@ -1423,7 +1423,7 @@ static const ::capnp::_::AlignedData<114> b_8523ddc40b86b8b0 = { 21, 0, 0, 0, 242, 0, 0, 0, 33, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 29, 0, 0, 0, 87, 1, 0, 0, + 29, 0, 0, 0, 143, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 99, 97, 112, 110, 112, 47, 114, 112, @@ -1431,49 +1431,56 @@ static const ::capnp::_::AlignedData<114> b_8523ddc40b86b8b0 = { 67, 97, 112, 68, 101, 115, 99, 114, 105, 112, 116, 111, 114, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, - 24, 0, 0, 0, 3, 0, 4, 0, + 28, 0, 0, 0, 3, 0, 4, 0, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 153, 0, 0, 0, 42, 0, 0, 0, + 181, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 148, 0, 0, 0, 3, 0, 1, 0, - 160, 0, 0, 0, 2, 0, 1, 0, + 176, 0, 0, 0, 3, 0, 1, 0, + 188, 0, 0, 0, 2, 0, 1, 0, 1, 0, 254, 255, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 157, 0, 0, 0, 106, 0, 0, 0, + 185, 0, 0, 0, 106, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 156, 0, 0, 0, 3, 0, 1, 0, - 168, 0, 0, 0, 2, 0, 1, 0, + 184, 0, 0, 0, 3, 0, 1, 0, + 196, 0, 0, 0, 2, 0, 1, 0, 2, 0, 253, 255, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 165, 0, 0, 0, 114, 0, 0, 0, + 193, 0, 0, 0, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 164, 0, 0, 0, 3, 0, 1, 0, - 176, 0, 0, 0, 2, 0, 1, 0, + 192, 0, 0, 0, 3, 0, 1, 0, + 204, 0, 0, 0, 2, 0, 1, 0, 3, 0, 252, 255, 1, 0, 0, 0, 0, 0, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 173, 0, 0, 0, 122, 0, 0, 0, + 201, 0, 0, 0, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 172, 0, 0, 0, 3, 0, 1, 0, - 184, 0, 0, 0, 2, 0, 1, 0, + 200, 0, 0, 0, 3, 0, 1, 0, + 212, 0, 0, 0, 2, 0, 1, 0, 4, 0, 251, 255, 0, 0, 0, 0, 0, 0, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 181, 0, 0, 0, 122, 0, 0, 0, + 209, 0, 0, 0, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 180, 0, 0, 0, 3, 0, 1, 0, - 192, 0, 0, 0, 2, 0, 1, 0, + 208, 0, 0, 0, 3, 0, 1, 0, + 220, 0, 0, 0, 2, 0, 1, 0, 5, 0, 250, 255, 0, 0, 0, 0, 0, 0, 1, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 189, 0, 0, 0, 138, 0, 0, 0, + 217, 0, 0, 0, 138, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 192, 0, 0, 0, 3, 0, 1, 0, - 204, 0, 0, 0, 2, 0, 1, 0, + 220, 0, 0, 0, 3, 0, 1, 0, + 232, 0, 0, 0, 2, 0, 1, 0, + 6, 0, 0, 0, 2, 0, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, + 229, 0, 0, 0, 90, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 228, 0, 0, 0, 3, 0, 1, 0, + 240, 0, 0, 0, 2, 0, 1, 0, 110, 111, 110, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -1526,6 +1533,15 @@ static const ::capnp::_::AlignedData<114> b_8523ddc40b86b8b0 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 97, 116, 116, 97, 99, 104, 101, 100, + 70, 100, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 6, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } }; @@ -1535,11 +1551,11 @@ static const ::capnp::_::RawSchema* const d_8523ddc40b86b8b0[] = { &s_d37007fde1f0027d, &s_d800b1d6cd6f1ca0, }; -static const uint16_t m_8523ddc40b86b8b0[] = {0, 4, 3, 1, 2, 5}; -static const uint16_t i_8523ddc40b86b8b0[] = {0, 1, 2, 3, 4, 5}; +static const uint16_t m_8523ddc40b86b8b0[] = {6, 0, 4, 3, 1, 2, 5}; +static const uint16_t i_8523ddc40b86b8b0[] = {0, 1, 2, 3, 4, 5, 6}; const ::capnp::_::RawSchema s_8523ddc40b86b8b0 = { - 0x8523ddc40b86b8b0, b_8523ddc40b86b8b0.words, 114, d_8523ddc40b86b8b0, m_8523ddc40b86b8b0, - 2, 6, i_8523ddc40b86b8b0, nullptr, nullptr, { &s_8523ddc40b86b8b0, nullptr, nullptr, 0, 0, nullptr } + 0x8523ddc40b86b8b0, b_8523ddc40b86b8b0.words, 130, d_8523ddc40b86b8b0, m_8523ddc40b86b8b0, + 2, 7, i_8523ddc40b86b8b0, nullptr, nullptr, { &s_8523ddc40b86b8b0, nullptr, nullptr, 0, 0, nullptr } }; #endif // !CAPNP_LITE static const ::capnp::_::AlignedData<57> b_d800b1d6cd6f1ca0 = { @@ -1734,17 +1750,17 @@ const ::capnp::_::RawSchema s_d37007fde1f0027d = { 0, 2, i_d37007fde1f0027d, nullptr, nullptr, { &s_d37007fde1f0027d, nullptr, nullptr, 0, 0, nullptr } }; #endif // !CAPNP_LITE -static const ::capnp::_::AlignedData<85> b_d625b7063acf691a = { +static const ::capnp::_::AlignedData<100> b_d625b7063acf691a = { { 0, 0, 0, 0, 5, 0, 6, 0, 26, 105, 207, 58, 6, 183, 37, 214, 16, 0, 0, 0, 1, 0, 1, 0, 80, 162, 82, 37, 27, 152, 18, 179, - 1, 0, 7, 0, 0, 0, 0, 0, + 2, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 210, 0, 0, 0, 33, 0, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 41, 0, 0, 0, 231, 0, 0, 0, + 41, 0, 0, 0, 31, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 99, 97, 112, 110, 112, 47, 114, 112, @@ -1755,35 +1771,42 @@ static const ::capnp::_::AlignedData<85> b_d625b7063acf691a = { 88, 189, 76, 63, 226, 150, 140, 178, 1, 0, 0, 0, 42, 0, 0, 0, 84, 121, 112, 101, 0, 0, 0, 0, - 16, 0, 0, 0, 3, 0, 4, 0, + 20, 0, 0, 0, 3, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 97, 0, 0, 0, 58, 0, 0, 0, + 125, 0, 0, 0, 58, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 92, 0, 0, 0, 3, 0, 1, 0, - 104, 0, 0, 0, 2, 0, 1, 0, + 120, 0, 0, 0, 3, 0, 1, 0, + 132, 0, 0, 0, 2, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 101, 0, 0, 0, 186, 0, 0, 0, + 129, 0, 0, 0, 186, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 104, 0, 0, 0, 3, 0, 1, 0, - 116, 0, 0, 0, 2, 0, 1, 0, + 132, 0, 0, 0, 3, 0, 1, 0, + 144, 0, 0, 0, 2, 0, 1, 0, 3, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 113, 0, 0, 0, 154, 0, 0, 0, + 141, 0, 0, 0, 154, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 116, 0, 0, 0, 3, 0, 1, 0, - 128, 0, 0, 0, 2, 0, 1, 0, + 144, 0, 0, 0, 3, 0, 1, 0, + 156, 0, 0, 0, 2, 0, 1, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 125, 0, 0, 0, 42, 0, 0, 0, + 153, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 120, 0, 0, 0, 3, 0, 1, 0, - 132, 0, 0, 0, 2, 0, 1, 0, + 148, 0, 0, 0, 3, 0, 1, 0, + 160, 0, 0, 0, 2, 0, 1, 0, + 4, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 1, 0, 4, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 157, 0, 0, 0, 50, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 152, 0, 0, 0, 3, 0, 1, 0, + 164, 0, 0, 0, 2, 0, 1, 0, 114, 101, 97, 115, 111, 110, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -1818,6 +1841,14 @@ static const ::capnp::_::AlignedData<85> b_d625b7063acf691a = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 116, 114, 97, 99, 101, 0, 0, 0, + 12, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } }; @@ -1826,11 +1857,11 @@ static const ::capnp::_::AlignedData<85> b_d625b7063acf691a = { static const ::capnp::_::RawSchema* const d_d625b7063acf691a[] = { &s_b28c96e23f4cbd58, }; -static const uint16_t m_d625b7063acf691a[] = {2, 1, 0, 3}; -static const uint16_t i_d625b7063acf691a[] = {0, 1, 2, 3}; +static const uint16_t m_d625b7063acf691a[] = {2, 1, 0, 4, 3}; +static const uint16_t i_d625b7063acf691a[] = {0, 1, 2, 3, 4}; const ::capnp::_::RawSchema s_d625b7063acf691a = { - 0xd625b7063acf691a, b_d625b7063acf691a.words, 85, d_d625b7063acf691a, m_d625b7063acf691a, - 1, 4, i_d625b7063acf691a, nullptr, nullptr, { &s_d625b7063acf691a, nullptr, nullptr, 0, 0, nullptr } + 0xd625b7063acf691a, b_d625b7063acf691a.words, 100, d_d625b7063acf691a, m_d625b7063acf691a, + 1, 5, i_d625b7063acf691a, nullptr, nullptr, { &s_d625b7063acf691a, nullptr, nullptr, 0, 0, nullptr } }; #endif // !CAPNP_LITE static const ::capnp::_::AlignedData<37> b_b28c96e23f4cbd58 = { diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.capnp.h b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.capnp.h index 7cbb2bcfa08..58eb6a2dd38 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.capnp.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.capnp.h @@ -6,11 +6,13 @@ #include #include -#if CAPNP_VERSION != 7000 +#if CAPNP_VERSION != 9001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif +CAPNP_BEGIN_HEADER + namespace capnp { namespace schemas { @@ -402,7 +404,7 @@ struct Exception { struct _capnpPrivate { - CAPNP_DECLARE_STRUCT_HEADER(d625b7063acf691a, 1, 1) + CAPNP_DECLARE_STRUCT_HEADER(d625b7063acf691a, 1, 2) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE @@ -2028,6 +2030,8 @@ class CapDescriptor::Reader { inline bool hasThirdPartyHosted() const; inline ::capnp::rpc::ThirdPartyCapDescriptor::Reader getThirdPartyHosted() const; + inline ::uint8_t getAttachedFd() const; + private: ::capnp::_::StructReader _reader; template @@ -2089,6 +2093,9 @@ class CapDescriptor::Builder { inline void adoptThirdPartyHosted(::capnp::Orphan< ::capnp::rpc::ThirdPartyCapDescriptor>&& value); inline ::capnp::Orphan< ::capnp::rpc::ThirdPartyCapDescriptor> disownThirdPartyHosted(); + inline ::uint8_t getAttachedFd(); + inline void setAttachedFd( ::uint8_t value); + private: ::capnp::_::StructBuilder _builder; template @@ -2397,6 +2404,9 @@ class Exception::Reader { inline ::capnp::rpc::Exception::Type getType() const; + inline bool hasTrace() const; + inline ::capnp::Text::Reader getTrace() const; + private: ::capnp::_::StructReader _reader; template @@ -2441,6 +2451,13 @@ class Exception::Builder { inline ::capnp::rpc::Exception::Type getType(); inline void setType( ::capnp::rpc::Exception::Type value); + inline bool hasTrace(); + inline ::capnp::Text::Builder getTrace(); + inline void setTrace( ::capnp::Text::Reader value); + inline ::capnp::Text::Builder initTrace(unsigned int size); + inline void adoptTrace(::capnp::Orphan< ::capnp::Text>&& value); + inline ::capnp::Orphan< ::capnp::Text> disownTrace(); + private: ::capnp::_::StructBuilder _builder; template @@ -4670,6 +4687,20 @@ inline ::capnp::Orphan< ::capnp::rpc::ThirdPartyCapDescriptor> CapDescriptor::Bu ::capnp::bounded<0>() * ::capnp::POINTERS)); } +inline ::uint8_t CapDescriptor::Reader::getAttachedFd() const { + return _reader.getDataField< ::uint8_t>( + ::capnp::bounded<2>() * ::capnp::ELEMENTS, 255u); +} + +inline ::uint8_t CapDescriptor::Builder::getAttachedFd() { + return _builder.getDataField< ::uint8_t>( + ::capnp::bounded<2>() * ::capnp::ELEMENTS, 255u); +} +inline void CapDescriptor::Builder::setAttachedFd( ::uint8_t value) { + _builder.setDataField< ::uint8_t>( + ::capnp::bounded<2>() * ::capnp::ELEMENTS, value, 255u); +} + inline ::uint32_t PromisedAnswer::Reader::getQuestionId() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); @@ -4892,6 +4923,42 @@ inline void Exception::Builder::setType( ::capnp::rpc::Exception::Type value) { ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } +inline bool Exception::Reader::hasTrace() const { + return !_reader.getPointerField( + ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); +} +inline bool Exception::Builder::hasTrace() { + return !_builder.getPointerField( + ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); +} +inline ::capnp::Text::Reader Exception::Reader::getTrace() const { + return ::capnp::_::PointerHelpers< ::capnp::Text>::get(_reader.getPointerField( + ::capnp::bounded<1>() * ::capnp::POINTERS)); +} +inline ::capnp::Text::Builder Exception::Builder::getTrace() { + return ::capnp::_::PointerHelpers< ::capnp::Text>::get(_builder.getPointerField( + ::capnp::bounded<1>() * ::capnp::POINTERS)); +} +inline void Exception::Builder::setTrace( ::capnp::Text::Reader value) { + ::capnp::_::PointerHelpers< ::capnp::Text>::set(_builder.getPointerField( + ::capnp::bounded<1>() * ::capnp::POINTERS), value); +} +inline ::capnp::Text::Builder Exception::Builder::initTrace(unsigned int size) { + return ::capnp::_::PointerHelpers< ::capnp::Text>::init(_builder.getPointerField( + ::capnp::bounded<1>() * ::capnp::POINTERS), size); +} +inline void Exception::Builder::adoptTrace( + ::capnp::Orphan< ::capnp::Text>&& value) { + ::capnp::_::PointerHelpers< ::capnp::Text>::adopt(_builder.getPointerField( + ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); +} +inline ::capnp::Orphan< ::capnp::Text> Exception::Builder::disownTrace() { + return ::capnp::_::PointerHelpers< ::capnp::Text>::disown(_builder.getPointerField( + ::capnp::bounded<1>() * ::capnp::POINTERS)); +} + } // namespace } // namespace +CAPNP_END_HEADER + diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.h b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.h index df44e889880..8a0dede4985 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/rpc.h @@ -21,13 +21,13 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "capability.h" #include "rpc-prelude.h" +CAPNP_BEGIN_HEADER + +namespace kj { class AutoCloseFd; } + namespace capnp { template RpcSystem( VatNetwork& network, - kj::Maybe bootstrapInterface, - kj::Maybe::Client> gateway = nullptr); + kj::Maybe bootstrapInterface); template RpcSystem( VatNetwork& network, - BootstrapFactory& bootstrapFactory, - kj::Maybe::Client> gateway = nullptr); + BootstrapFactory& bootstrapFactory); template func); + // + // (Inherited from _::RpcSystemBase) + // + // Set a function to call to encode exception stack traces for transmission to remote parties. + // By default, traces are not transmitted at all. If a callback is provided, then the returned + // string will be sent with the exception. If the remote end is KJ/C++ based, then this trace + // text ends up being accessible as kj::Exception::getRemoteTrace(). + // + // Stack traces can sometimes contain sensitive information, so you should think carefully about + // what information you are willing to reveal to the remote party. + + kj::Promise run() { return RpcSystemBase::run(); } + // Listens for incoming RPC connections and handles them. Never returns normally, but could throw + // an exception if the system becomes unable to accept new connections (e.g. because the + // underlying listen socket becomes broken somehow). + // + // For historical reasons, the RpcSystem will actually run itself even if you do not call this. + // However, if an exception is thrown, the RpcSystem will log the exception to the console and + // then cease accepting new connections. In this case, your server may be in a broken state, but + // without restarting. All servers should therefore call run() and handle failures in some way. }; template makeRpcServer( // See also ez-rpc.h, which has simpler instructions for the common case of a two-party // client-server RPC connection. -template , - typename ExternalRef = _::ExternalRefFromRealmGatewayClient> -RpcSystem makeRpcServer( - VatNetwork& network, - Capability::Client bootstrapInterface, RealmGatewayClient gateway) - CAPNP_DEPRECATED("Please transition to using MembranePolicy instead of RealmGateway."); -// ** DEPRECATED ** -// -// This uses a RealmGateway to create a membrane between the external network and internal -// capabilites to translate save() requests. However, MembranePolicy (membrane.h) allows for the -// creation of much more powerful membranes and doesn't need to be tied to an RpcSystem. -// Applications should transition to using membranes instead of RealmGateway. RealmGateway will be -// removed in a future version of Cap'n Proto. -// -// Original description: -// -// Make an RPC server for a VatNetwork that resides in a different realm from the application. -// The given RealmGateway is used to translate SturdyRefs between the app's ("internal") format -// and the network's ("external") format. - template RpcSystem makeRpcServer( @@ -190,28 +188,6 @@ RpcSystem makeRpcServer( // Make an RPC server that can serve different bootstrap interfaces to different clients via a // BootstrapInterface. -template , - typename ExternalRef = _::ExternalRefFromRealmGatewayClient> -RpcSystem makeRpcServer( - VatNetwork& network, - BootstrapFactory& bootstrapFactory, RealmGatewayClient gateway) - CAPNP_DEPRECATED("Please transition to using MembranePolicy instead of RealmGateway."); -// ** DEPRECATED ** -// -// This uses a RealmGateway to create a membrane between the external network and internal -// capabilites to translate save() requests. However, MembranePolicy (membrane.h) allows for the -// creation of much more powerful membranes and doesn't need to be tied to an RpcSystem. -// Applications should transition to using membranes instead of RealmGateway. RealmGateway will be -// removed in a future version of Cap'n Proto. -// -// Original description: -// -// Make an RPC server that can serve different bootstrap interfaces to different clients via a -// BootstrapInterface and communicates with a different realm than the application is in via a -// RealmGateway. - template RpcSystem makeRpcServer( @@ -247,28 +223,6 @@ RpcSystem makeRpcClient( // See also ez-rpc.h, which has simpler instructions for the common case of a two-party // client-server RPC connection. -template , - typename ExternalRef = _::ExternalRefFromRealmGatewayClient> -RpcSystem makeRpcClient( - VatNetwork& network, - RealmGatewayClient gateway) - CAPNP_DEPRECATED("Please transition to using MembranePolicy instead of RealmGateway."); -// ** DEPRECATED ** -// -// This uses a RealmGateway to create a membrane between the external network and internal -// capabilites to translate save() requests. However, MembranePolicy (membrane.h) allows for the -// creation of much more powerful membranes and doesn't need to be tied to an RpcSystem. -// Applications should transition to using membranes instead of RealmGateway. RealmGateway will be -// removed in a future version of Cap'n Proto. -// -// Original description: -// -// Make an RPC client for a VatNetwork that resides in a different realm from the application. -// The given RealmGateway is used to translate SturdyRefs between the app's ("internal") format -// and the network's ("external") format. - template class SturdyRefRestorer: public _::SturdyRefRestorerBase { // ** DEPRECATED ** @@ -305,9 +259,18 @@ class OutgoingRpcMessage { // Get the message body, which the caller may fill in any way it wants. (The standard RPC // implementation initializes it as a Message as defined in rpc.capnp.) + virtual void setFds(kj::Array fds) {} + // Set the list of file descriptors to send along with this message, if FD passing is supported. + // An implementation may ignore this. + virtual void send() = 0; // Send the message, or at least put it in a queue to be sent later. Note that the builder // returned by `getBody()` remains valid at least until the `OutgoingRpcMessage` is destroyed. + + virtual size_t sizeInWords() = 0; + // Get the total size of the message, for flow control purposes. Although the caller could + // also call getBody().targetSize(), doing that would walk the message tree, whereas typical + // implementations can compute the size more cheaply by summing segment sizes. }; class IncomingRpcMessage { @@ -317,6 +280,71 @@ class IncomingRpcMessage { virtual AnyPointer::Reader getBody() = 0; // Get the message body, to be interpreted by the caller. (The standard RPC implementation // interprets it as a Message as defined in rpc.capnp.) + + virtual kj::ArrayPtr getAttachedFds() { return nullptr; } + // If the transport supports attached file descriptors and some were attached to this message, + // returns them. Otherwise returns an empty array. It is intended that the caller will move the + // FDs out of this table when they are consumed, possibly leaving behind a null slot. Callers + // should be careful to check if an FD was already consumed by comparing the slot with `nullptr`. + // (We don't use Maybe here because moving from a Maybe doesn't make it null, so it would only + // add confusion. Moving from an AutoCloseFd does in fact make it null.) + + virtual size_t sizeInWords() = 0; + // Get the total size of the message, for flow control purposes. Although the caller could + // also call getBody().targetSize(), doing that would walk the message tree, whereas typical + // implementations can compute the size more cheaply by summing segment sizes. +}; + +class RpcFlowController { + // Tracks a particular RPC stream in order to implement a flow control algorithm. + +public: + virtual kj::Promise send(kj::Own message, kj::Promise ack) = 0; + // Like calling message->send(), but the promise resolves when it's a good time to send the + // next message. + // + // `ack` is a promise that resolves when the message has been acknowledged from the other side. + // In practice, `message` is typically a `Call` message and `ack` is a `Return`. Note that this + // means `ack` counts not only time to transmit the message but also time for the remote + // application to process the message. The flow controller is expected to apply backpressure if + // the remote application responds slowly. If `ack` rejects, then all outstanding and future + // sends will propagate the exception. + // + // Note that messages sent with this method must still be delivered in the same order as if they + // had been sent with `message->send()`; they cannot be delayed until later. This is important + // because the message may introduce state changes in the RPC system that later messages rely on, + // such as introducing a new Question ID that a later message may reference. Thus, the controller + // can only create backpressure by having the returned promise resolve slowly. + // + // Dropping the returned promise does not cancel the send. Once send() is called, there's no way + // to stop it. + + virtual kj::Promise waitAllAcked() = 0; + // Wait for all `ack`s previously passed to send() to finish. It is an error to call send() again + // after this. + + // --------------------------------------------------------------------------- + // Common implementations. + + static kj::Own newFixedWindowController(size_t windowSize); + // Constructs a flow controller that implements a strict fixed window of the given size. In other + // words, the controller will throttle the stream when the total bytes in-flight exceeds the + // window. + + class WindowGetter { + public: + virtual size_t getWindow() = 0; + }; + + static kj::Own newVariableWindowController(WindowGetter& getter); + // Like newFixedWindowController(), but the window size is allowed to vary over time. Useful if + // you have a technique for estimating one good window size for the connection as a whole but not + // for individual streams. Keep in mind, though, that in situations where the other end of the + // connection is merely proxying capabilities from a variety of final destinations across a + // variety of networks, no single window will be appropriate for all streams. + + static constexpr size_t DEFAULT_WINDOW_SIZE = 65536; + // The window size used by the default implementation of Connection::newStream(). }; template newStream() override + { return RpcFlowController::newFixedWindowController(65536); } + // Construct a flow controller for a new stream on this connection. The controller can be + // passed into OutgoingRpcMessage::sendStreaming(). + // + // The default implementation returns a dummy stream controller that just applies a fixed + // window of 64k to everything. This always works but may constrain throughput on networks + // where the bandwidth-delay product is high, while conversely providing too much buffer when + // the bandwidth-delay product is low. + // + // WARNING: The RPC system may keep the `RpcFlowController` object alive past the lifetime of + // the `Connection` itself. However, it will not call `send()` any more after the + // `Connection` is destroyed. + // + // TODO(perf): We should introduce a flow controller implementation that uses a clock to + // measure RTT and bandwidth and dynamically update the window size, like BBR. + // Level 0 features ---------------------------------------------- virtual typename VatId::Reader getPeerVatId() = 0; @@ -376,10 +421,17 @@ class VatNetwork: public _::VatNetworkBase { // If `firstSegmentWordSize` is non-zero, it should be treated as a hint suggesting how large // to make the first segment. This is entirely a hint and the connection may adjust it up or // down. If it is zero, the connection should choose the size itself. + // + // WARNING: The RPC system may keep the `OutgoingRpcMessage` object alive past the lifetime of + // the `Connection` itself. However, it will not call `send()` any more after the + // `Connection` is destroyed. virtual kj::Promise>> receiveIncomingMessage() override = 0; // Wait for a message to be received and return it. If the read stream cleanly terminates, // return null. If any other problem occurs, throw an exception. + // + // WARNING: The RPC system may keep the `IncomingRpcMessage` object alive past the lifetime of + // the `Connection` itself. virtual kj::Promise shutdown() override KJ_WARN_UNUSED_RESULT = 0; // Waits until all outgoing messages have been sent, then shuts down the outgoing stream. The @@ -466,18 +518,16 @@ template RpcSystem::RpcSystem( VatNetwork& network, - kj::Maybe bootstrap, - kj::Maybe::Client> gateway) - : _::RpcSystemBase(network, kj::mv(bootstrap), kj::mv(gateway)) {} + kj::Maybe bootstrap) + : _::RpcSystemBase(network, kj::mv(bootstrap)) {} template template RpcSystem::RpcSystem( VatNetwork& network, - BootstrapFactory& bootstrapFactory, - kj::Maybe::Client> gateway) - : _::RpcSystemBase(network, bootstrapFactory, kj::mv(gateway)) {} + BootstrapFactory& bootstrapFactory) + : _::RpcSystemBase(network, bootstrapFactory) {} template template makeRpcServer( return RpcSystem(network, kj::mv(bootstrapInterface)); } -template -RpcSystem makeRpcServer( - VatNetwork& network, - Capability::Client bootstrapInterface, RealmGatewayClient gateway) { - return RpcSystem(network, kj::mv(bootstrapInterface), - gateway.template castAs>()); -} - template RpcSystem makeRpcServer( @@ -530,15 +570,6 @@ RpcSystem makeRpcServer( return RpcSystem(network, bootstrapFactory); } -template -RpcSystem makeRpcServer( - VatNetwork& network, - BootstrapFactory& bootstrapFactory, RealmGatewayClient gateway) { - return RpcSystem(network, bootstrapFactory, gateway.template castAs>()); -} - template RpcSystem makeRpcServer( @@ -554,13 +585,6 @@ RpcSystem makeRpcClient( return RpcSystem(network, nullptr); } -template -RpcSystem makeRpcClient( - VatNetwork& network, - RealmGatewayClient gateway) { - return RpcSystem(network, nullptr, gateway.template castAs>()); -} - } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-lite.h b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-lite.h index 59582278c5e..0d7b9156792 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-lite.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-lite.h @@ -21,13 +21,11 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include #include "message.h" +CAPNP_BEGIN_HEADER + namespace capnp { template @@ -43,3 +41,5 @@ inline schema::Node::Reader schemaProto() { } } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-loader-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-loader-test.c++ index 228e00374d4..8e7c2d749c9 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-loader-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-loader-test.c++ @@ -389,6 +389,17 @@ TEST(SchemaLoader, Generics) { } } +TEST(SchemaLoader, LoadStreaming) { + SchemaLoader loader; + + InterfaceSchema schema = + loader.load(Schema::from().getProto()).asInterface(); + + auto results = schema.getMethodByName("doStreamI").getResultType(); + KJ_EXPECT(results.isStreamResult()); + KJ_EXPECT(results.getShortDisplayName() == "StreamResult", results.getShortDisplayName()); +} + } // namespace } // namespace _ (private) } // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-loader.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-loader.c++ index a40c4684301..154f37de0ca 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-loader.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-loader.c++ @@ -29,8 +29,9 @@ #include #include #include +#include -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) #include #endif @@ -1294,7 +1295,7 @@ _::RawSchema* SchemaLoader::Impl::load(const schema::Node::Reader& reader, bool // If this schema is not newly-allocated, it may already be in the wild, specifically in the // dependency list of other schemas. Once the initializer is null, it is live, so we must do // a release-store here. -#if __GNUC__ +#if __GNUC__ || defined(__clang__) __atomic_store_n(&schema->lazyInitializer, nullptr, __ATOMIC_RELEASE); __atomic_store_n(&schema->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE); #elif _MSC_VER @@ -1391,7 +1392,7 @@ _::RawSchema* SchemaLoader::Impl::loadNative(const _::RawSchema* nativeSchema) { // If this schema is not newly-allocated, it may already be in the wild, specifically in the // dependency list of other schemas. Once the initializer is null, it is live, so we must do // a release-store here. -#if __GNUC__ +#if __GNUC__ || defined(__clang__) __atomic_store_n(&schema->lazyInitializer, nullptr, __ATOMIC_RELEASE); __atomic_store_n(&schema->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE); #elif _MSC_VER @@ -1507,14 +1508,8 @@ const _::RawBrandedSchema* SchemaLoader::Impl::makeBranded( const _::RawBrandedSchema* SchemaLoader::Impl::makeBranded( const _::RawSchema* schema, kj::ArrayPtr bindings) { - // Note that even if `bindings` is empty, we never want to return defaultBrand here because - // defaultBrand has special status. Normally, the lack of bindings means all parameters are - // "unspecified", which means their bindings are unknown and should be treated as AnyPointer. - // But defaultBrand represents a special case where all parameters are still parameters -- they - // haven't been bound in the first place. defaultBrand is used to represent the unbranded generic - // type, while a no-binding brand is equivalent to binding all parameters to AnyPointer. - if (bindings.size() == 0) { + // `defaultBrand` is the version where all type parameters are bound to `AnyPointer`. return &schema->defaultBrand; } @@ -1728,9 +1723,17 @@ void SchemaLoader::Impl::makeDep(_::RawBrandedSchema::Binding& result, uint64_t typeId, schema::Type::Which whichType, schema::Node::Which expectedKind, schema::Brand::Reader brand, kj::StringPtr scopeName, kj::Maybe> brandBindings) { - const _::RawSchema* schema = loadEmpty(typeId, - kj::str("(unknown type; seen as dependency of ", scopeName, ")"), - expectedKind, true); + const _::RawSchema* schema; + if (typeId == capnp::typeId()) { + // StreamResult is a very special type that is used to mark when a method is declared as + // streaming ("foo @0 () -> stream;"). We like to auto-load it if we see it as someone's + // dependency. + schema = loadNative(&_::rawSchema()); + } else { + schema = loadEmpty(typeId, + kj::str("(unknown type; seen as dependency of ", scopeName, ")"), + expectedKind, true); + } result.which = static_cast(whichType); result.schema = makeBranded(schema, brand, brandBindings); } @@ -1909,7 +1912,7 @@ void SchemaLoader::InitializerImpl::init(const _::RawSchema* schema) const { "A schema not belonging to this loader used its initializer."); // Disable the initializer. -#if __GNUC__ +#if __GNUC__ || defined(__clang__) __atomic_store_n(&mutableSchema->lazyInitializer, nullptr, __ATOMIC_RELEASE); __atomic_store_n(&mutableSchema->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE); #elif _MSC_VER @@ -1946,7 +1949,7 @@ void SchemaLoader::BrandedInitializerImpl::init(const _::RawBrandedSchema* schem mutableSchema->dependencyCount = deps.size(); // It's initialized now, so disable the initializer. -#if __GNUC__ +#if __GNUC__ || defined(__clang__) __atomic_store_n(&mutableSchema->lazyInitializer, nullptr, __ATOMIC_RELEASE); #elif _MSC_VER std::atomic_thread_fence(std::memory_order_release); @@ -1986,7 +1989,10 @@ kj::Maybe SchemaLoader::tryGet( if (getResult.schema != nullptr && getResult.schema->lazyInitializer == nullptr) { if (brand.getScopes().size() > 0) { auto brandedSchema = impl.lockExclusive()->get()->makeBranded( - getResult.schema, brand, kj::arrayPtr(scope.raw->scopes, scope.raw->scopeCount)); + getResult.schema, brand, + scope.raw->isUnbound() + ? kj::Maybe>(nullptr) + : kj::arrayPtr(scope.raw->scopes, scope.raw->scopeCount)); brandedSchema->ensureInitialized(); return Schema(brandedSchema); } else { diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-loader.h b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-loader.h index 9479d3f92e0..90533158eb9 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-loader.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-loader.h @@ -21,14 +21,12 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "schema.h" #include #include +CAPNP_BEGIN_HEADER + namespace capnp { class SchemaLoader { @@ -168,3 +166,5 @@ inline void SchemaLoader::loadCompiledTypeAndDependencies() { } } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-parser-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-parser-test.c++ index 4b1a32a7edd..c9435943ffd 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-parser-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-parser-test.c++ @@ -112,6 +112,14 @@ TEST(SchemaParser, Basic) { EXPECT_EQ("garply", barFields[3].getProto().getName()); EXPECT_EQ(0x856789abcdef1234ull, getFieldTypeFileId(barFields[3])); + auto barStructs = barSchema.getAllNested(); + ASSERT_EQ(1, barStructs.size()); + EXPECT_EQ("Bar", barStructs[0].getUnqualifiedName()); + barFields = barStructs[0].asStruct().getFields(); + ASSERT_EQ(4u, barFields.size()); + EXPECT_EQ("baz", barFields[0].getProto().getName()); + EXPECT_EQ(0x823456789abcdef1ull, getFieldTypeFileId(barFields[0])); + auto bazSchema = parser.parseDiskFile( "not/used/because/already/loaded", "src/foo/baz.capnp", importPath); diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-parser.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-parser.c++ index 6368ad131ec..909be25cc8b 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-parser.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-parser.c++ @@ -287,7 +287,7 @@ void SchemaParser::setDiskFilesystem(kj::Filesystem& fs) { ParsedSchema SchemaParser::parseFile(kj::Own&& file) const { KJ_DEFER(impl->compiler.clearWorkspace()); - uint64_t id = impl->compiler.add(getModuleImpl(kj::mv(file))); + uint64_t id = impl->compiler.add(getModuleImpl(kj::mv(file))).getId(); impl->compiler.eagerlyCompile(id, compiler::Compiler::NODE | compiler::Compiler::CHILDREN | compiler::Compiler::DEPENDENCIES | compiler::Compiler::DEPENDENCY_DEPENDENCIES); @@ -314,6 +314,9 @@ SchemaLoader& SchemaParser::getLoader() { } kj::Maybe ParsedSchema::findNested(kj::StringPtr name) const { + // TODO(someday): lookup() doesn't handle generics correctly. Use the ModuleScope/CompiledType + // interface instead. We can also add an applybrand() method to ParsedSchema using those + // interfaces, which would allow us to expose generics more explicitly to e.g. Python. return parser->impl->compiler.lookup(getProto().getId(), name).map( [this](uint64_t childId) { return ParsedSchema(parser->impl->compiler.getLoader().get(childId), *parser); @@ -328,12 +331,24 @@ ParsedSchema ParsedSchema::getNested(kj::StringPtr nestedName) const { } } +ParsedSchema::ParsedSchemaList ParsedSchema::getAllNested() const { + return ParsedSchemaList(*this, getProto().getNestedNodes()); +} + schema::Node::SourceInfo::Reader ParsedSchema::getSourceInfo() const { return KJ_ASSERT_NONNULL(parser->getSourceInfo(*this)); } // ------------------------------------------------------------------- +ParsedSchema ParsedSchema::ParsedSchemaList::operator[](uint index) const { + return ParsedSchema( + parent.parser->impl->compiler.getLoader().get(list[index].getId()), + *parent.parser); +} + +// ------------------------------------------------------------------- + class SchemaFile::DiskSchemaFile final: public SchemaFile { public: DiskSchemaFile(const kj::ReadableDirectory& baseDir, kj::Path pathParam, diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-parser.h b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-parser.h index 7e54ddf3241..283b8cea638 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-parser.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-parser.h @@ -21,14 +21,12 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "schema-loader.h" #include #include +CAPNP_BEGIN_HEADER + namespace capnp { class ParsedSchema; @@ -46,7 +44,7 @@ class SchemaParser { ParsedSchema parseFromDirectory( const kj::ReadableDirectory& baseDir, kj::Path path, kj::ArrayPtr importPath) const; - // Parse a file from the KJ filesystem API. Throws an exception if the file dosen't exist. + // Parse a file from the KJ filesystem API. Throws an exception if the file doesn't exist. // // `baseDir` and `path` are used together to resolve relative imports. `path` is the source // file's path within `baseDir`. Relative imports will be interpreted relative to `path` and @@ -60,7 +58,7 @@ class SchemaParser { // the `importPath` array must remain valid. `path` will be copied; it need not remain valid. // // This method is a shortcut, equivalent to: - // parser.parseFromDirectory(SchemaFile::newDiskFile(baseDir, path, importPath))`; + // parser.parseFile(SchemaFile::newDiskFile(baseDir, path, importPath))`; // // This method throws an exception if any errors are encountered in the file or in anything the // file depends on. Note that merely importing another file does not count as a dependency on @@ -73,7 +71,7 @@ class SchemaParser { // // auto fs = kj::newDiskFilesystem(); // SchemaParser parser; - // auto schema = parser->parseFromDirectory(fs->getCurrent(), + // auto schema = parser.parseFromDirectory(fs->getCurrent(), // kj::Path::parse("foo/bar.capnp"), nullptr); // // Hint: To use in-memory data rather than real disk, you can use kj::newInMemoryDirectory(), @@ -83,7 +81,7 @@ class SchemaParser { // auto path = kj::Path::parse("foo/bar.capnp"); // dir->openFile(path, kj::WriteMode::CREATE | kj::WriteMode::CREATE_PARENT) // ->writeAll("struct Foo {}"); - // auto schema = parser->parseFromDirectory(*dir, path, nullptr); + // auto schema = parser.parseFromDirectory(*dir, path, nullptr); // // Hint: You can create an in-memory directory but then populate it with real files from disk, // in order to control what is visible while also avoiding reading files yourself or making @@ -95,7 +93,7 @@ class SchemaParser { // auto realPath = kj::Path::parse("path/to/some/file.capnp"); // dir->transfer(fakePath, kj::WriteMode::CREATE | kj::WriteMode::CREATE_PARENT, // fs->getCurrent(), realPath, kj::TransferMode::LINK); - // auto schema = parser->parseFromDirectory(*dir, fakePath, nullptr); + // auto schema = parser.parseFromDirectory(*dir, fakePath, nullptr); // // In this example, note that any imports in the file will fail, since the in-memory directory // you created contains no files except the specific one you linked in. @@ -160,6 +158,9 @@ class ParsedSchema: public Schema { // ParsedSchema is an extension of Schema which also has the ability to look up nested nodes // by name. See `SchemaParser`. + class ParsedSchemaList; + friend class ParsedSchemaList; + public: inline ParsedSchema(): parser(nullptr) {} @@ -171,6 +172,9 @@ class ParsedSchema: public Schema { // Gets the nested node with the given name, or throws an exception if there is no such nested // declaration. + ParsedSchemaList getAllNested() const; + // Get all the nested nodes + schema::Node::SourceInfo::Reader getSourceInfo() const; // Get the source info for this schema. @@ -181,6 +185,27 @@ class ParsedSchema: public Schema { friend class SchemaParser; }; +class ParsedSchema::ParsedSchemaList { +public: + ParsedSchemaList() = default; // empty list + + inline uint size() const { return list.size(); } + ParsedSchema operator[](uint index) const; + + typedef _::IndexingIterator Iterator; + inline Iterator begin() const { return Iterator(this, 0); } + inline Iterator end() const { return Iterator(this, size()); } + +private: + ParsedSchema parent; + List::Reader list; + + inline ParsedSchemaList(ParsedSchema parent, List::Reader list) + : parent(parent), list(list) {} + + friend class ParsedSchema; +}; + // ======================================================================================= // Advanced API @@ -243,3 +268,5 @@ class SchemaFile { }; } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-test.c++ index 31962499d95..f33152e2c52 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/schema-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema-test.c++ @@ -52,6 +52,7 @@ TEST(Schema, Structs) { EXPECT_TRUE(schema.asStruct() == schema); EXPECT_NONFATAL_FAILURE(schema.asEnum()); EXPECT_NONFATAL_FAILURE(schema.asInterface()); + ASSERT_EQ("TestAllTypes", schema.getUnqualifiedName()); ASSERT_EQ(schema.getFields().size(), schema.getProto().getStruct().getFields().size()); StructSchema::Field field = schema.getFields()[0]; diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/schema.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema.c++ index 5c883abc0b6..8df65313a62 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/schema.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema.c++ @@ -22,6 +22,7 @@ #include "schema.h" #include "message.h" #include +#include namespace capnp { @@ -301,6 +302,11 @@ kj::StringPtr Schema::getShortDisplayName() const { return proto.getDisplayName().slice(proto.getDisplayNamePrefixLength()); } +const kj::StringPtr Schema::getUnqualifiedName() const { + auto proto = getProto(); + return proto.getDisplayName().slice(proto.getDisplayNamePrefixLength()); +} + void Schema::requireUsableAs(const _::RawSchema* expected) const { KJ_REQUIRE(raw->generic == expected || (expected != nullptr && raw->generic->canCastTo == expected), @@ -503,6 +509,11 @@ kj::Maybe StructSchema::getFieldByDiscriminant(uint16_t dis } } +bool StructSchema::isStreamResult() const { + auto& streamRaw = _::rawSchema(); + return raw->generic == &streamRaw || raw->generic->canCastTo == &streamRaw; +} + Type StructSchema::Field::getType() const { auto proto = getProto(); uint location = _::RawBrandedSchema::makeDepLocation(_::RawBrandedSchema::DepKind::FIELD, index); @@ -890,12 +901,24 @@ uint Type::hashCode() const { case schema::Type::FLOAT64: case schema::Type::TEXT: case schema::Type::DATA: - return kj::hashCode(baseType, listDepth); + if (listDepth == 0) { + // Make sure that hashCode(Type(baseType)) == hashCode(baseType), otherwise HashMap lookups + // keyed by `Type` won't work when the caller passes `baseType` as the key. + return kj::hashCode(baseType); + } else { + return kj::hashCode(baseType, listDepth); + } case schema::Type::STRUCT: case schema::Type::ENUM: case schema::Type::INTERFACE: - return kj::hashCode(schema, listDepth); + if (listDepth == 0) { + // Make sure that hashCode(Type(schema)) == hashCode(schema), otherwise HashMap lookups + // keyed by `Type` won't work when the caller passes `schema` as the key. + return kj::hashCode(schema); + } else { + return kj::hashCode(schema, listDepth); + } case schema::Type::LIST: KJ_UNREACHABLE; @@ -905,7 +928,7 @@ uint Type::hashCode() const { // both branches compile to the same instructions and can optimize it away. uint16_t val = scopeId != 0 || isImplicitParam ? paramIndex : static_cast(anyPointerKind); - return kj::hashCode(val, isImplicitParam, scopeId); + return kj::hashCode(val, isImplicitParam, scopeId, listDepth); } } diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/schema.capnp b/libs/EXTERNAL/capnproto/c++/src/capnp/schema.capnp index f9fbe615335..a47c1517c00 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/schema.capnp +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema.capnp @@ -398,8 +398,21 @@ struct Brand { # List of parameter bindings. inherit @2 :Void; - # The place where this Brand appears is actually within this scope or a sub-scope, - # and the bindings for this scope should be inherited from the reference point. + # The place where the Brand appears is within this scope or a sub-scope, and bindings + # for this scope are deferred to later Brand applications. This is equivalent to a + # pass-through binding list, where each of this scope's parameters is bound to itself. + # For example: + # + # struct Outer(T) { + # struct Inner { + # value @0 :T; + # } + # innerInherit @0 :Inner; # Outer Brand.Scope is `inherit`. + # innerBindSelf @1 :Outer(T).Inner; # Outer Brand.Scope explicitly binds T to T. + # } + # + # The innerInherit and innerBindSelf fields have equivalent types, but different Brand + # styles. } } diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/schema.capnp.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema.capnp.c++ index c144fda67a8..efe699fd067 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/schema.capnp.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema.capnp.c++ @@ -3647,7 +3647,7 @@ constexpr ::capnp::Kind Field::_capnpPrivate::kind; constexpr ::capnp::_::RawSchema const* Field::_capnpPrivate::schema; #endif // !CAPNP_LITE -#ifndef _MSC_VER +#if !defined(_MSC_VER) || defined(__clang__) constexpr ::uint16_t Field::NO_DISCRIMINANT; #endif // Field::Slot diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/schema.capnp.h b/libs/EXTERNAL/capnproto/c++/src/capnp/schema.capnp.h index f6efdd743a9..114d83af05f 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/schema.capnp.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema.capnp.h @@ -6,11 +6,13 @@ #include #include -#if CAPNP_VERSION != 7000 +#if CAPNP_VERSION != 9001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif +CAPNP_BEGIN_HEADER + namespace capnp { namespace schemas { @@ -8229,3 +8231,5 @@ inline ::capnp::Orphan< ::capnp::Text> CodeGeneratorRequest::RequestedFile::Impo } // namespace } // namespace +CAPNP_END_HEADER + diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/schema.h b/libs/EXTERNAL/capnproto/c++/src/capnp/schema.h index c46233af8aa..5cc20b5e2ea 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/schema.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/schema.h @@ -21,18 +21,27 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #if CAPNP_LITE #error "Reflection APIs, including this header, are not available in lite mode." #endif +#undef CONST +// For some ridiculous reason, Windows defines CONST to const. We have an enum value called CONST +// in schema.capnp.h, so if this is defined, compilation is gonna fail. So we undef it because +// that seems strictly better than failing entirely. But this could cause trouble for people later +// on if they, say, include windows.h, then include schema.h, then include another windows API +// header that uses CONST. I suppose they may have to re-#define CONST in between, or change the +// header ordering. Sorry. +// +// Please don't file a bug report telling us to change our enum naming style. You are at least +// seven years too late. + #include #include #include // work-around macro conflict with `VOID` +CAPNP_BEGIN_HEADER + namespace capnp { class Schema; @@ -143,6 +152,9 @@ class Schema { kj::StringPtr getShortDisplayName() const; // Get the short version of the node's display name. + const kj::StringPtr getUnqualifiedName() const; + // Get the display name "nickname" of this node minus the prefix + private: const _::RawBrandedSchema* raw; @@ -260,6 +272,9 @@ class StructSchema: public Schema { // there is no such field. (If the schema does not represent a union or a struct containing // an unnamed union, then this always returns null.) + bool isStreamResult() const; + // Convenience method to check if this is the result type of a streaming RPC method. + private: StructSchema(Schema base): Schema(base) {} template static inline StructSchema fromImpl() { @@ -491,6 +506,9 @@ class InterfaceSchema::Method { inline uint16_t getOrdinal() const { return ordinal; } inline uint getIndex() const { return ordinal; } + bool isStreaming() const { return getResultType().isStreamResult(); } + // Check if this is a streaming method. + StructSchema getParamType() const; StructSchema getResultType() const; // Get the parameter and result types, including substituting generic parameters. @@ -974,3 +992,5 @@ inline Type Type::wrapInList(uint depth) const { } } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-async-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-async-test.c++ index d69e2e91ee0..380b142a067 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-async-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-async-test.c++ @@ -23,6 +23,10 @@ #define _GNU_SOURCE #endif +#if _WIN32 +#include +#endif + #include "serialize-async.h" #include "serialize.h" #include @@ -33,7 +37,6 @@ #include #if _WIN32 -#define WIN32_LEAN_AND_MEAN #include #include namespace kj { @@ -343,6 +346,42 @@ TEST(SerializeAsyncTest, WriteAsyncEvenSegmentCount) { writeMessage(*output, message).wait(ioContext.waitScope); } +TEST(SerializeAsyncTest, WriteMultipleMessagesAsync) { + PipeWithSmallBuffer fds; + auto ioContext = kj::setupAsyncIo(); + auto output = ioContext.lowLevelProvider->wrapOutputFd(fds[1]); + + const int numMessages = 5; + const int baseListSize = 16; + auto messages = kj::heapArrayBuilder(numMessages); + for (int i = 0; i < numMessages; ++i) { + messages.add(i+1); + auto root = messages[i].getRoot(); + auto list = root.initStructList(baseListSize+i); + for (auto element: list) { + initTestMessage(element); + } + } + + kj::Thread thread([&]() { + SocketInputStream input(fds[0]); + for (int i = 0; i < numMessages; ++i) { + InputStreamMessageReader reader(input); + auto listReader = reader.getRoot().getStructList(); + EXPECT_EQ(baseListSize+i, listReader.size()); + for (auto element: listReader) { + checkTestMessage(element); + } + } + }); + + auto msgs = kj::heapArray(numMessages); + for (int i = 0; i < numMessages; ++i) { + msgs[i] = &messages[i]; + } + writeMessages(*output, msgs).wait(ioContext.waitScope); +} + } // namespace } // namespace _ (private) } // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-async.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-async.c++ index c23f562df82..53b30e9ac0f 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-async.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-async.c++ @@ -19,8 +19,20 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +// Includes just for need SOL_SOCKET and SO_SNDBUF +#if _WIN32 +#include + +#include +#include +#include +#else +#include +#endif + #include "serialize-async.h" #include +#include namespace capnp { @@ -35,6 +47,10 @@ public: kj::Promise read(kj::AsyncInputStream& inputStream, kj::ArrayPtr scratchSpace); + kj::Promise> readWithFds( + kj::AsyncCapabilityStream& inputStream, + kj::ArrayPtr fds, kj::ArrayPtr scratchSpace); + // implements MessageReader ---------------------------------------- kj::ArrayPtr getSegment(uint id) override { @@ -71,15 +87,35 @@ kj::Promise AsyncMessageReader::read(kj::AsyncInputStream& inputStream, return false; } else if (n < sizeof(firstWord)) { // EOF in first word. - KJ_FAIL_REQUIRE("Premature EOF.") { - return false; - } + kj::throwRecoverableException(KJ_EXCEPTION(DISCONNECTED, "Premature EOF.")); + return false; } return readAfterFirstWord(inputStream, scratchSpace).then([]() { return true; }); }); } +kj::Promise> AsyncMessageReader::readWithFds( + kj::AsyncCapabilityStream& inputStream, kj::ArrayPtr fds, + kj::ArrayPtr scratchSpace) { + return inputStream.tryReadWithFds(firstWord, sizeof(firstWord), sizeof(firstWord), + fds.begin(), fds.size()) + .then([this,&inputStream,KJ_CPCAP(scratchSpace)] + (kj::AsyncCapabilityStream::ReadResult result) mutable + -> kj::Promise> { + if (result.byteCount == 0) { + return kj::Maybe(nullptr); + } else if (result.byteCount < sizeof(firstWord)) { + // EOF in first word. + kj::throwRecoverableException(KJ_EXCEPTION(DISCONNECTED, "Premature EOF.")); + return kj::Maybe(nullptr); + } + + return readAfterFirstWord(inputStream, scratchSpace) + .then([result]() -> kj::Maybe { return result.capCount; }); + }); +} + kj::Promise AsyncMessageReader::readAfterFirstWord(kj::AsyncInputStream& inputStream, kj::ArrayPtr scratchSpace) { if (segmentCount() == 0) { @@ -152,24 +188,57 @@ kj::Promise> readMessage( kj::AsyncInputStream& input, ReaderOptions options, kj::ArrayPtr scratchSpace) { auto reader = kj::heap(options); auto promise = reader->read(input, scratchSpace); - return promise.then(kj::mvCapture(reader, [](kj::Own&& reader, bool success) { - KJ_REQUIRE(success, "Premature EOF.") { break; } + return promise.then([reader = kj::mv(reader)](bool success) mutable -> kj::Own { + if (!success) { + kj::throwRecoverableException(KJ_EXCEPTION(DISCONNECTED, "Premature EOF.")); + } return kj::mv(reader); - })); + }); } kj::Promise>> tryReadMessage( kj::AsyncInputStream& input, ReaderOptions options, kj::ArrayPtr scratchSpace) { auto reader = kj::heap(options); auto promise = reader->read(input, scratchSpace); - return promise.then(kj::mvCapture(reader, - [](kj::Own&& reader, bool success) -> kj::Maybe> { + return promise.then([reader = kj::mv(reader)](bool success) mutable + -> kj::Maybe> { if (success) { return kj::mv(reader); } else { return nullptr; } - })); + }); +} + +kj::Promise readMessage( + kj::AsyncCapabilityStream& input, kj::ArrayPtr fdSpace, + ReaderOptions options, kj::ArrayPtr scratchSpace) { + auto reader = kj::heap(options); + auto promise = reader->readWithFds(input, fdSpace, scratchSpace); + return promise.then([reader = kj::mv(reader), fdSpace](kj::Maybe nfds) mutable + -> MessageReaderAndFds { + KJ_IF_MAYBE(n, nfds) { + return { kj::mv(reader), fdSpace.slice(0, *n) }; + } else { + kj::throwRecoverableException(KJ_EXCEPTION(DISCONNECTED, "Premature EOF.")); + return { kj::mv(reader), nullptr }; + } + }); +} + +kj::Promise> tryReadMessage( + kj::AsyncCapabilityStream& input, kj::ArrayPtr fdSpace, + ReaderOptions options, kj::ArrayPtr scratchSpace) { + auto reader = kj::heap(options); + auto promise = reader->readWithFds(input, fdSpace, scratchSpace); + return promise.then([reader = kj::mv(reader), fdSpace](kj::Maybe nfds) mutable + -> kj::Maybe { + KJ_IF_MAYBE(n, nfds) { + return MessageReaderAndFds { kj::mv(reader), fdSpace.slice(0, *n) }; + } else { + return nullptr; + } + }); } // ======================================================================================= @@ -183,38 +252,254 @@ struct WriteArrays { kj::Array> pieces; }; -} // namespace +inline size_t tableSizeForSegments(size_t segmentsSize) { + return (segmentsSize + 2) & ~size_t(1); +} -kj::Promise writeMessage(kj::AsyncOutputStream& output, - kj::ArrayPtr> segments) { +// Helper function that allocates and fills the pointed-to table with info about the segments and +// populates the pieces array with pointers to the segments. +void fillWriteArraysWithMessage(kj::ArrayPtr> segments, + kj::ArrayPtr<_::WireValue> table, + kj::ArrayPtr> pieces) { KJ_REQUIRE(segments.size() > 0, "Tried to serialize uninitialized message."); - WriteArrays arrays; - arrays.table = kj::heapArray<_::WireValue>((segments.size() + 2) & ~size_t(1)); - // We write the segment count - 1 because this makes the first word zero for single-segment // messages, improving compression. We don't bother doing this with segment sizes because // one-word segments are rare anyway. - arrays.table[0].set(segments.size() - 1); + table[0].set(segments.size() - 1); for (uint i = 0; i < segments.size(); i++) { - arrays.table[i + 1].set(segments[i].size()); + table[i + 1].set(segments[i].size()); } if (segments.size() % 2 == 0) { // Set padding byte. - arrays.table[segments.size() + 1].set(0); + table[segments.size() + 1].set(0); } - arrays.pieces = kj::heapArray>(segments.size() + 1); - arrays.pieces[0] = arrays.table.asBytes(); - + KJ_ASSERT(pieces.size() == segments.size() + 1, "incorrectly sized pieces array during write"); + pieces[0] = table.asBytes(); for (uint i = 0; i < segments.size(); i++) { - arrays.pieces[i + 1] = segments[i].asBytes(); + pieces[i + 1] = segments[i].asBytes(); } +} + +template +kj::Promise writeMessageImpl(kj::ArrayPtr> segments, + WriteFunc&& writeFunc) { + KJ_REQUIRE(segments.size() > 0, "Tried to serialize uninitialized message."); + + WriteArrays arrays; + arrays.table = kj::heapArray<_::WireValue>(tableSizeForSegments(segments.size())); + arrays.pieces = kj::heapArray>(segments.size() + 1); + fillWriteArraysWithMessage(segments, arrays.table, arrays.pieces); - auto promise = output.write(arrays.pieces); + auto promise = writeFunc(arrays.pieces); // Make sure the arrays aren't freed until the write completes. return promise.then(kj::mvCapture(arrays, [](WriteArrays&&) {})); } +template +kj::Promise writeMessagesImpl( + kj::ArrayPtr>> messages, WriteFunc&& writeFunc) { + KJ_REQUIRE(messages.size() > 0, "Tried to serialize zero messages."); + + // Determine how large the shared table and pieces arrays needs to be. + size_t tableSize = 0; + size_t piecesSize = 0; + for (auto& segments : messages) { + tableSize += tableSizeForSegments(segments.size()); + piecesSize += segments.size() + 1; + } + auto table = kj::heapArray<_::WireValue>(tableSize); + auto pieces = kj::heapArray>(piecesSize); + + size_t tableValsWritten = 0; + size_t piecesWritten = 0; + for (int i = 0; i < messages.size(); ++i) { + const size_t tableValsToWrite = tableSizeForSegments(messages[i].size()); + const size_t piecesToWrite = messages[i].size() + 1; + fillWriteArraysWithMessage( + messages[i], + table.slice(tableValsWritten, tableValsWritten + tableValsToWrite), + pieces.slice(piecesWritten, piecesWritten + piecesToWrite)); + tableValsWritten += tableValsToWrite; + piecesWritten += piecesToWrite; + } + + auto promise = writeFunc(pieces); + return promise.attach(kj::mv(table), kj::mv(pieces)); +} + +} // namespace + +kj::Promise writeMessage(kj::AsyncOutputStream& output, + kj::ArrayPtr> segments) { + return writeMessageImpl(segments, + [&](kj::ArrayPtr> pieces) { + return output.write(pieces); + }); +} + +kj::Promise writeMessage(kj::AsyncCapabilityStream& output, kj::ArrayPtr fds, + kj::ArrayPtr> segments) { + return writeMessageImpl(segments, + [&](kj::ArrayPtr> pieces) { + return output.writeWithFds(pieces[0], pieces.slice(1, pieces.size()), fds); + }); +} + +kj::Promise writeMessages( + kj::AsyncOutputStream& output, + kj::ArrayPtr>> messages) { + return writeMessagesImpl(messages, + [&](kj::ArrayPtr> pieces) { + return output.write(pieces); + }); +} + +kj::Promise writeMessages( + kj::AsyncOutputStream& output, kj::ArrayPtr builders) { + auto messages = kj::heapArray>>(builders.size()); + for (int i = 0; i < builders.size(); ++i) { + messages[i] = builders[i]->getSegmentsForOutput(); + } + return writeMessages(output, messages); +} + +kj::Promise MessageStream::writeMessages(kj::ArrayPtr builders) { + auto messages = kj::heapArray>>(builders.size()); + for (int i = 0; i < builders.size(); ++i) { + messages[i] = builders[i]->getSegmentsForOutput(); + } + return writeMessages(messages); +} + +AsyncIoMessageStream::AsyncIoMessageStream(kj::AsyncIoStream& stream) + : stream(stream) {}; + +kj::Promise> AsyncIoMessageStream::tryReadMessage( + kj::ArrayPtr fdSpace, + ReaderOptions options, + kj::ArrayPtr scratchSpace) { + return capnp::tryReadMessage(stream, options, scratchSpace) + .then([](kj::Maybe> maybeReader) -> kj::Maybe { + KJ_IF_MAYBE(reader, maybeReader) { + return MessageReaderAndFds { kj::mv(*reader), nullptr }; + } else { + return nullptr; + } + }); +} + +kj::Promise AsyncIoMessageStream::writeMessage( + kj::ArrayPtr fds, + kj::ArrayPtr> segments) { + return capnp::writeMessage(stream, segments); +} + +kj::Promise AsyncIoMessageStream::writeMessages( + kj::ArrayPtr>> messages) { + return capnp::writeMessages(stream, messages); +} + +kj::Maybe getSendBufferSize(kj::AsyncIoStream& stream) { + // TODO(perf): It might be nice to have a tryGetsockopt() that doesn't require catching + // exceptions? + int bufSize = 0; + KJ_IF_MAYBE(exception, kj::runCatchingExceptions([&]() { + uint len = sizeof(int); + stream.getsockopt(SOL_SOCKET, SO_SNDBUF, &bufSize, &len); + KJ_ASSERT(len == sizeof(bufSize)) { break; } + })) { + if (exception->getType() != kj::Exception::Type::UNIMPLEMENTED) { + // TODO(someday): Figure out why getting SO_SNDBUF sometimes throws EINVAL. I suspect it + // happens when the remote side has closed their read end, meaning we no longer have + // a send buffer, but I don't know what is the best way to verify that that was actually + // the reason. I'd prefer not to ignore EINVAL errors in general. + + // kj::throwRecoverableException(kj::mv(*exception)); + } + return nullptr; + } + return bufSize; +} + +kj::Promise AsyncIoMessageStream::end() { + stream.shutdownWrite(); + return kj::READY_NOW; +} + +kj::Maybe AsyncIoMessageStream::getSendBufferSize() { + return capnp::getSendBufferSize(stream); +} + +AsyncCapabilityMessageStream::AsyncCapabilityMessageStream(kj::AsyncCapabilityStream& stream) + : stream(stream) {}; + +kj::Promise> AsyncCapabilityMessageStream::tryReadMessage( + kj::ArrayPtr fdSpace, + ReaderOptions options, + kj::ArrayPtr scratchSpace) { + return capnp::tryReadMessage(stream, fdSpace, options, scratchSpace); +} + +kj::Promise AsyncCapabilityMessageStream::writeMessage( + kj::ArrayPtr fds, + kj::ArrayPtr> segments) { + return capnp::writeMessage(stream, fds, segments); +} + +kj::Promise AsyncCapabilityMessageStream::writeMessages( + kj::ArrayPtr>> messages) { + return capnp::writeMessages(stream, messages); +} + +kj::Maybe AsyncCapabilityMessageStream::getSendBufferSize() { + return capnp::getSendBufferSize(stream); +} + +kj::Promise AsyncCapabilityMessageStream::end() { + stream.shutdownWrite(); + return kj::READY_NOW; +} + +kj::Promise> MessageStream::readMessage( + ReaderOptions options, + kj::ArrayPtr scratchSpace) { + return tryReadMessage(options, scratchSpace).then([](kj::Maybe> maybeResult) { + KJ_IF_MAYBE(result, maybeResult) { + return kj::mv(*result); + } else { + kj::throwRecoverableException(KJ_EXCEPTION(DISCONNECTED, "Premature EOF.")); + KJ_UNREACHABLE; + } + }); +} + +kj::Promise>> MessageStream::tryReadMessage( + ReaderOptions options, + kj::ArrayPtr scratchSpace) { + return tryReadMessage(nullptr, options, scratchSpace) + .then([](auto maybeReaderAndFds) -> kj::Maybe> { + KJ_IF_MAYBE(readerAndFds, maybeReaderAndFds) { + return kj::mv(readerAndFds->reader); + } else { + return nullptr; + } + }); +} + +kj::Promise MessageStream::readMessage( + kj::ArrayPtr fdSpace, + ReaderOptions options, kj::ArrayPtr scratchSpace) { + return tryReadMessage(fdSpace, options, scratchSpace).then([](auto maybeResult) { + KJ_IF_MAYBE(result, maybeResult) { + return kj::mv(*result); + } else { + kj::throwRecoverableException(KJ_EXCEPTION(DISCONNECTED, "Premature EOF.")); + KJ_UNREACHABLE; + } + }); +} + } // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-async.h b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-async.h index 944dc8b1305..9bee9ce4c3c 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-async.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-async.h @@ -21,35 +21,180 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include #include "message.h" +CAPNP_BEGIN_HEADER + namespace capnp { +struct MessageReaderAndFds { + kj::Own reader; + kj::ArrayPtr fds; +}; + +class MessageStream { + // Interface over which messages can be sent and received; virtualizes + // the functionality above. +public: + virtual kj::Promise> tryReadMessage( + kj::ArrayPtr fdSpace, + ReaderOptions options = ReaderOptions(), kj::ArrayPtr scratchSpace = nullptr) = 0; + // Read a message that may also have file descriptors attached, e.g. from a Unix socket with + // SCM_RIGHTS. Returns null on EOF. + // + // `scratchSpace`, if provided, must remain valid until the returned MessageReader is destroyed. + + kj::Promise>> tryReadMessage( + ReaderOptions options = ReaderOptions(), + kj::ArrayPtr scratchSpace = nullptr); + // Equivalent to the above with fdSpace = nullptr. + + kj::Promise readMessage( + kj::ArrayPtr fdSpace, + ReaderOptions options = ReaderOptions(), kj::ArrayPtr scratchSpace = nullptr); + kj::Promise> readMessage( + ReaderOptions options = ReaderOptions(), + kj::ArrayPtr scratchSpace = nullptr); + // Like tryReadMessage, but throws an exception on EOF. + + virtual kj::Promise writeMessage( + kj::ArrayPtr fds, + kj::ArrayPtr> segments) + KJ_WARN_UNUSED_RESULT = 0; + kj::Promise writeMessage( + kj::ArrayPtr fds, + MessageBuilder& builder) + KJ_WARN_UNUSED_RESULT; + // Write a message with FDs attached, e.g. to a Unix socket with SCM_RIGHTS. + // The parameters must remain valid until the returned promise resolves. + + kj::Promise writeMessage( + kj::ArrayPtr> segments) + KJ_WARN_UNUSED_RESULT; + kj::Promise writeMessage(MessageBuilder& builder) + KJ_WARN_UNUSED_RESULT; + // Equivalent to the above with fds = nullptr. + + virtual kj::Promise writeMessages( + kj::ArrayPtr>> messages) + KJ_WARN_UNUSED_RESULT = 0; + kj::Promise writeMessages(kj::ArrayPtr builders) + KJ_WARN_UNUSED_RESULT; + // Similar to the above, but for writing multiple messages at a time in a batch. + + virtual kj::Maybe getSendBufferSize() = 0; + // Get the size of the underlying send buffer, if applicable. The RPC + // system uses this as a hint for flow control purposes; see: + // + // https://capnproto.org/news/2020-04-23-capnproto-0.8.html#multi-stream-flow-control + // + // ...for a more thorough explanation of how this is used. Implementations + // may return nullptr if they do not have access to this information, or if + // the underlying transport does not use a congestion window. + + virtual kj::Promise end() = 0; + // Cleanly shut down just the write end of the transport, while keeping the read end open. + +}; + +class AsyncIoMessageStream final: public MessageStream { + // A MessageStream that wraps an AsyncIoStream. +public: + explicit AsyncIoMessageStream(kj::AsyncIoStream& stream); + + // Implements MessageStream + kj::Promise> tryReadMessage( + kj::ArrayPtr fdSpace, + ReaderOptions options = ReaderOptions(), kj::ArrayPtr scratchSpace = nullptr) override; + kj::Promise writeMessage( + kj::ArrayPtr fds, + kj::ArrayPtr> segments) override; + kj::Promise writeMessages( + kj::ArrayPtr>> messages) override; + kj::Maybe getSendBufferSize() override; + + kj::Promise end() override; +private: + kj::AsyncIoStream& stream; +}; + +class AsyncCapabilityMessageStream final: public MessageStream { + // A MessageStream that wraps an AsyncCapabilityStream. +public: + explicit AsyncCapabilityMessageStream(kj::AsyncCapabilityStream& stream); + + // Implements MessageStream + kj::Promise> tryReadMessage( + kj::ArrayPtr fdSpace, + ReaderOptions options = ReaderOptions(), kj::ArrayPtr scratchSpace = nullptr) override; + kj::Promise writeMessage( + kj::ArrayPtr fds, + kj::ArrayPtr> segments) override; + kj::Promise writeMessages( + kj::ArrayPtr>> messages) override; + kj::Maybe getSendBufferSize() override; + kj::Promise end() override; +private: + kj::AsyncCapabilityStream& stream; +}; + +// ----------------------------------------------------------------------------- +// Stand-alone functions for reading & writing messages on AsyncInput/AsyncOutputStreams. +// +// In general, foo(stream, ...) is equivalent to +// AsyncIoMessageStream(stream).foo(...), whenever the latter would type check. +// +// The first argument must remain valid until the returned promise resolves +// (or is canceled). + kj::Promise> readMessage( kj::AsyncInputStream& input, ReaderOptions options = ReaderOptions(), kj::ArrayPtr scratchSpace = nullptr); -// Read a message asynchronously. -// -// `input` must remain valid until the returned promise resolves (or is canceled). -// -// `scratchSpace`, if provided, must remain valid until the returned MessageReader is destroyed. kj::Promise>> tryReadMessage( kj::AsyncInputStream& input, ReaderOptions options = ReaderOptions(), kj::ArrayPtr scratchSpace = nullptr); -// Like `readMessage` but returns null on EOF. kj::Promise writeMessage(kj::AsyncOutputStream& output, kj::ArrayPtr> segments) KJ_WARN_UNUSED_RESULT; + kj::Promise writeMessage(kj::AsyncOutputStream& output, MessageBuilder& builder) KJ_WARN_UNUSED_RESULT; -// Write asynchronously. The parameters must remain valid until the returned promise resolves. + +// ----------------------------------------------------------------------------- +// Stand-alone versions that support FD passing. +// +// For each of these, `foo(stream, ...)` is equivalent to +// `AsyncCapabilityMessageStream(stream).foo(...)`. + +kj::Promise readMessage( + kj::AsyncCapabilityStream& input, kj::ArrayPtr fdSpace, + ReaderOptions options = ReaderOptions(), kj::ArrayPtr scratchSpace = nullptr); + +kj::Promise> tryReadMessage( + kj::AsyncCapabilityStream& input, kj::ArrayPtr fdSpace, + ReaderOptions options = ReaderOptions(), kj::ArrayPtr scratchSpace = nullptr); + +kj::Promise writeMessage(kj::AsyncCapabilityStream& output, kj::ArrayPtr fds, + kj::ArrayPtr> segments) + KJ_WARN_UNUSED_RESULT; +kj::Promise writeMessage(kj::AsyncCapabilityStream& output, kj::ArrayPtr fds, + MessageBuilder& builder) + KJ_WARN_UNUSED_RESULT; + + +// ----------------------------------------------------------------------------- +// Stand-alone functions for writing multiple messages at once on AsyncOutputStreams. + +kj::Promise writeMessages(kj::AsyncOutputStream& output, + kj::ArrayPtr>> messages) + KJ_WARN_UNUSED_RESULT; + +kj::Promise writeMessages( + kj::AsyncOutputStream& output, kj::ArrayPtr builders) + KJ_WARN_UNUSED_RESULT; // ======================================================================================= // inline implementation details @@ -57,5 +202,24 @@ kj::Promise writeMessage(kj::AsyncOutputStream& output, MessageBuilder& bu inline kj::Promise writeMessage(kj::AsyncOutputStream& output, MessageBuilder& builder) { return writeMessage(output, builder.getSegmentsForOutput()); } +inline kj::Promise writeMessage( + kj::AsyncCapabilityStream& output, kj::ArrayPtr fds, MessageBuilder& builder) { + return writeMessage(output, fds, builder.getSegmentsForOutput()); +} + +inline kj::Promise MessageStream::writeMessage(kj::ArrayPtr> segments) { + return writeMessage(nullptr, segments); +} + +inline kj::Promise MessageStream::writeMessage(MessageBuilder& builder) { + return writeMessage(builder.getSegmentsForOutput()); +} + +inline kj::Promise MessageStream::writeMessage( + kj::ArrayPtr fds, MessageBuilder& builder) { + return writeMessage(fds, builder.getSegmentsForOutput()); +} } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-packed.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-packed.c++ index 262ec100b8b..04fd42d91cc 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-packed.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-packed.c++ @@ -140,7 +140,7 @@ size_t PackedInputStream::tryRead(void* dst, size_t minBytes, size_t maxBytes) { return out - reinterpret_cast(dst); } - uint inRemaining = BUFFER_REMAINING; + size_t inRemaining = BUFFER_REMAINING; if (inRemaining >= runLength) { // Fast path. memcpy(out, in, runLength); @@ -266,7 +266,7 @@ void PackedInputStream::skip(size_t bytes) { bytes -= runLength; - uint inRemaining = BUFFER_REMAINING; + size_t inRemaining = BUFFER_REMAINING; if (inRemaining > runLength) { // Fast path. in += runLength; diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-packed.h b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-packed.h index cea1fb4016a..99131f4e8fb 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-packed.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-packed.h @@ -21,12 +21,10 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "serialize.h" +CAPNP_BEGIN_HEADER + namespace capnp { namespace _ { // private @@ -126,3 +124,5 @@ inline void writePackedMessageToFd(int fd, MessageBuilder& builder) { } } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-test.c++ index 6badc206000..d114358abd6 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-test.c++ @@ -102,20 +102,6 @@ TEST(Serialize, FlatArray) { EXPECT_EQ(serializedWithSuffix.end() - 5, reader.getEnd()); } -#if __i386__ || __x86_64__ || __aarch64__ || _MSC_VER - // Try unaligned. - { - auto bytes = kj::heapArray(serializedWithSuffix.size() * sizeof(word) + 1); - auto unalignedWords = kj::arrayPtr( - reinterpret_cast(bytes.begin() + 1), serializedWithSuffix.size()); - memcpy(unalignedWords.asBytes().begin(), serializedWithSuffix.asBytes().begin(), - serializedWithSuffix.asBytes().size()); - UnalignedFlatArrayMessageReader reader(unalignedWords); - checkTestMessage(reader.getRoot()); - EXPECT_EQ(unalignedWords.end() - 5, reader.getEnd()); - } -#endif - { MallocMessageBuilder builder2; auto remaining = initMessageBuilderFromFlatArrayCopy(serializedWithSuffix, builder2); diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-text-test.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-text-test.c++ index 5ee12a3cbb0..c92838c2ffd 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-text-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-text-test.c++ @@ -32,7 +32,7 @@ namespace capnp { namespace _ { // private namespace { -KJ_TEST("TestAllTypes") { +KJ_TEST("TextCodec TestAllTypes") { MallocMessageBuilder builder; initTestMessage(builder.initRoot()); @@ -66,7 +66,7 @@ KJ_TEST("TestAllTypes") { } } -KJ_TEST("TestDefaults") { +KJ_TEST("TextCodec TestDefaults") { MallocMessageBuilder builder; initTestMessage(builder.initRoot()); @@ -79,7 +79,7 @@ KJ_TEST("TestDefaults") { checkTestMessage(structReader); } -KJ_TEST("TestListDefaults") { +KJ_TEST("TextCodec TestListDefaults") { MallocMessageBuilder builder; initTestMessage(builder.initRoot()); @@ -92,7 +92,7 @@ KJ_TEST("TestListDefaults") { checkTestMessage(structReader); } -KJ_TEST("raw text") { +KJ_TEST("TextCodec raw text") { using TestType = capnproto_test::capnp::test::TestLateUnion; kj::String message = @@ -126,6 +126,22 @@ KJ_TEST("raw text") { KJ_EXPECT(reader.getAnotherUnion().getCorge()[2] == 9); } +KJ_TEST("TextCodec parse error") { + auto message = "\n (,)"_kj; + + MallocMessageBuilder builder; + auto root = builder.initRoot(); + + TextCodec codec; + auto exception = KJ_ASSERT_NONNULL(kj::runCatchingExceptions( + [&]() { codec.decode(message, root); })); + + KJ_EXPECT(exception.getFile() == "(capnp text input)"_kj); + KJ_EXPECT(exception.getLine() == 2); + KJ_EXPECT(exception.getDescription() == "3-6: Parse error: Empty list item.", + exception.getDescription()); +} + } // namespace } // namespace _ (private) } // namespace capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-text.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-text.c++ index 738005f258f..4583e5dfe14 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-text.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-text.c++ @@ -29,16 +29,36 @@ #include "compiler/node-translator.h" #include "compiler/parser.h" +namespace capnp { + namespace { class ThrowingErrorReporter final: public capnp::compiler::ErrorReporter { // Throws all errors as assertion failures. public: + ThrowingErrorReporter(kj::StringPtr input): input(input) {} + void addError(uint32_t startByte, uint32_t endByte, kj::StringPtr message) override { - KJ_FAIL_REQUIRE(kj::str(message, " (", startByte, ":", endByte, ").")); + // Note: Line and column numbers are usually 1-based. + uint line = 1; + uint32_t lineStart = 0; + for (auto i: kj::zeroTo(startByte)) { + if (input[i] == '\n') { + ++line; + lineStart = i; // Omit +1 so that column is 1-based. + } + } + + kj::throwRecoverableException(kj::Exception( + kj::Exception::Type::FAILED, "(capnp text input)", line, + kj::str(startByte - lineStart, "-", endByte - lineStart, ": ", message) + )); } bool hadErrors() override { return false; } + +private: + kj::StringPtr input; }; class ExternalResolver final: public capnp::compiler::ValueTranslator::Resolver { @@ -59,7 +79,7 @@ template void lexAndParseExpression(kj::StringPtr input, Function f) { // Parses a single expression from the input and calls `f(expression)`. - ThrowingErrorReporter errorReporter; + ThrowingErrorReporter errorReporter(input); capnp::MallocMessageBuilder tokenArena; auto lexedTokens = tokenArena.initRoot(); @@ -90,8 +110,6 @@ void lexAndParseExpression(kj::StringPtr input, Function f) { } // namespace -namespace capnp { - TextCodec::TextCodec() : prettyPrint(false) {} TextCodec::~TextCodec() noexcept(true) {} @@ -112,10 +130,10 @@ kj::String TextCodec::encode(DynamicValue::Reader value) const { } void TextCodec::decode(kj::StringPtr input, DynamicStruct::Builder output) const { - lexAndParseExpression(input, [&output](compiler::Expression::Reader expression) { - KJ_REQUIRE(expression.isTuple(), "Input does not contain a struct."); + lexAndParseExpression(input, [&](compiler::Expression::Reader expression) { + KJ_REQUIRE(expression.isTuple(), "Input does not contain a struct.") { return; } - ThrowingErrorReporter errorReporter; + ThrowingErrorReporter errorReporter(input); ExternalResolver nullResolver; Orphanage orphanage = Orphanage::getForMessageContaining(output); @@ -126,9 +144,9 @@ void TextCodec::decode(kj::StringPtr input, DynamicStruct::Builder output) const Orphan TextCodec::decode(kj::StringPtr input, Type type, Orphanage orphanage) const { Orphan output; - - lexAndParseExpression(input, [&type, &orphanage, &output](compiler::Expression::Reader expression) { - ThrowingErrorReporter errorReporter; + + lexAndParseExpression(input, [&](compiler::Expression::Reader expression) { + ThrowingErrorReporter errorReporter(input); ExternalResolver nullResolver; compiler::ValueTranslator translator(nullResolver, errorReporter, orphanage); @@ -138,7 +156,7 @@ Orphan TextCodec::decode(kj::StringPtr input, Type type, Orphanage // An error should have already been given to the errorReporter. } }); - + return output; } diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-text.h b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-text.h index 9f01c0de942..8acd9be844d 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-text.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize-text.h @@ -21,15 +21,13 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include #include "dynamic.h" #include "orphan.h" #include "schema.h" +CAPNP_BEGIN_HEADER + namespace capnp { class TextCodec { @@ -91,3 +89,5 @@ inline Orphan TextCodec::decode(kj::StringPtr input, Orphanage orphanage) con } } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize.c++ index 666e007fa46..df7e45e0304 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize.c++ @@ -26,7 +26,7 @@ namespace capnp { -UnalignedFlatArrayMessageReader::UnalignedFlatArrayMessageReader( +FlatArrayMessageReader::FlatArrayMessageReader( kj::ArrayPtr array, ReaderOptions options) : MessageReader(options), end(array.end()) { if (array.size() < 1) { @@ -98,7 +98,7 @@ size_t expectedSizeInWordsFromPrefix(kj::ArrayPtr array) { return totalSize; } -kj::ArrayPtr UnalignedFlatArrayMessageReader::getSegment(uint id) { +kj::ArrayPtr FlatArrayMessageReader::getSegment(uint id) { if (id == 0) { return segment0; } else if (id <= moreSegments.size()) { @@ -108,15 +108,6 @@ kj::ArrayPtr UnalignedFlatArrayMessageReader::getSegment(uint id) { } } -kj::ArrayPtr FlatArrayMessageReader::checkAlignment(kj::ArrayPtr array) { - KJ_REQUIRE((uintptr_t)array.begin() % sizeof(void*) == 0, - "Input to FlatArrayMessageReader is not aligned. If your architecture supports unaligned " - "access (e.g. x86/x64/modern ARM), you may use UnalignedFlatArrayMessageReader instead, " - "though this may harm performance."); - - return array; -} - kj::ArrayPtr initMessageBuilderFromFlatArrayCopy( kj::ArrayPtr array, MessageBuilder& target, ReaderOptions options) { FlatArrayMessageReader reader(array, options); diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize.h b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize.h index fd91111d0be..b79dae935f9 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/serialize.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/serialize.h @@ -40,39 +40,14 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include "message.h" #include -namespace capnp { - -class UnalignedFlatArrayMessageReader: public MessageReader { - // Like FlatArrayMessageReader, but skips checking that the array is properly-aligned. - // - // WARNING: This only works on architectures that support unaligned reads, like x86/x64 and - // modern ARM. Unaligned access may incur a performance penalty on these platforms. On many - // other platforms, the program will simply crash on unaligned reads. Also note that unaligned - // data access may be considered undefined behavior by compilers; use at your own risk. If at - // all possible, try to ensure your data ends up in aligned buffers rather than rely on this - // class. - -public: - UnalignedFlatArrayMessageReader( - kj::ArrayPtr array, ReaderOptions options = ReaderOptions()); - kj::ArrayPtr getSegment(uint id) override; - const word* getEnd() const { return end; } +CAPNP_BEGIN_HEADER -private: - // Optimize for single-segment case. - kj::ArrayPtr segment0; - kj::Array> moreSegments; - const word* end; -}; +namespace capnp { -class FlatArrayMessageReader: public UnalignedFlatArrayMessageReader { +class FlatArrayMessageReader: public MessageReader { // Parses a message from a flat array. Note that it makes sense to use this together with mmap() // for extremely fast parsing. @@ -80,14 +55,19 @@ class FlatArrayMessageReader: public UnalignedFlatArrayMessageReader { FlatArrayMessageReader(kj::ArrayPtr array, ReaderOptions options = ReaderOptions()); // The array must remain valid until the MessageReader is destroyed. - const word* getEnd() const { return UnalignedFlatArrayMessageReader::getEnd(); } + kj::ArrayPtr getSegment(uint id) override; + + const word* getEnd() const { return end; } // Get a pointer just past the end of the message as determined by reading the message header. // This could actually be before the end of the input array. This pointer is useful e.g. if // you know that the input array has extra stuff appended after the message and you want to // get at it. private: - static kj::ArrayPtr checkAlignment(kj::ArrayPtr array); + // Optimize for single-segment case. + kj::ArrayPtr segment0; + kj::Array> moreSegments; + const word* end; }; kj::ArrayPtr initMessageBuilderFromFlatArrayCopy( @@ -189,7 +169,7 @@ void writeMessage(kj::OutputStream& output, kj::ArrayPtr> segme // ======================================================================================= // inline stuff -inline FlatArrayMessageReader::FlatArrayMessageReader( - kj::ArrayPtr array, ReaderOptions options) -#ifdef KJ_DEBUG - : UnalignedFlatArrayMessageReader(checkAlignment(array), options) {} -#else - : UnalignedFlatArrayMessageReader(array, options) {} -#endif - inline kj::Array messageToFlatArray(MessageBuilder& builder) { return messageToFlatArray(builder.getSegmentsForOutput()); } @@ -258,3 +230,5 @@ inline void writeMessageToFd(int fd, MessageBuilder& builder) { } } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/stream.capnp b/libs/EXTERNAL/capnproto/c++/src/capnp/stream.capnp new file mode 100644 index 00000000000..bfcb72bc43e --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/stream.capnp @@ -0,0 +1,50 @@ +# Copyright (c) 2019 Cloudflare, Inc. and contributors +# Licensed under the MIT License: +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +@0x86c366a91393f3f8; +# Defines placeholder types used to provide backwards-compatibility while introducing streaming +# to the language. The goal is that old code generators that don't know about streaming can still +# generate code that functions, leaving it up to the application to implement flow control +# manually. + +$import "/capnp/c++.capnp".namespace("capnp"); + +struct StreamResult @0x995f9a3377c0b16e { + # Empty struct that serves as the return type for "streaming" methods. + # + # Defining a method like: + # + # write @0 (bytes :Data) -> stream; + # + # Is equivalent to: + # + # write @0 (bytes :Data) -> import "/capnp/stream.capnp".StreamResult; + # + # However, implementations that recognize streaming will elide the reference to StreamResult + # and instead give write() a different signature appropriate for streaming. + # + # Streaming methods do not return a result -- that is, they return Promise. This promise + # resolves not to indicate that the call was actually delivered, but instead to provide + # backpressure. When the previous call's promise resolves, it is time to make another call. On + # the client side, the RPC system will resolve promises immediately until an appropriate number + # of requests are in-flight, and then will delay promise resolution to apply back-pressure. + # On the server side, the RPC system will deliver one call at a time. +} diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/stream.capnp.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/stream.capnp.c++ new file mode 100644 index 00000000000..a5937b2557d --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/stream.capnp.c++ @@ -0,0 +1,51 @@ +// Generated by Cap'n Proto compiler, DO NOT EDIT +// source: stream.capnp + +#include "stream.capnp.h" + +namespace capnp { +namespace schemas { +static const ::capnp::_::AlignedData<17> b_995f9a3377c0b16e = { + { 0, 0, 0, 0, 5, 0, 6, 0, + 110, 177, 192, 119, 51, 154, 95, 153, + 19, 0, 0, 0, 1, 0, 0, 0, + 248, 243, 147, 19, 169, 102, 195, 134, + 0, 0, 7, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 21, 0, 0, 0, 2, 1, 0, 0, + 33, 0, 0, 0, 7, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 99, 97, 112, 110, 112, 47, 115, 116, + 114, 101, 97, 109, 46, 99, 97, 112, + 110, 112, 58, 83, 116, 114, 101, 97, + 109, 82, 101, 115, 117, 108, 116, 0, + 0, 0, 0, 0, 1, 0, 1, 0, } +}; +::capnp::word const* const bp_995f9a3377c0b16e = b_995f9a3377c0b16e.words; +#if !CAPNP_LITE +const ::capnp::_::RawSchema s_995f9a3377c0b16e = { + 0x995f9a3377c0b16e, b_995f9a3377c0b16e.words, 17, nullptr, nullptr, + 0, 0, nullptr, nullptr, nullptr, { &s_995f9a3377c0b16e, nullptr, nullptr, 0, 0, nullptr } +}; +#endif // !CAPNP_LITE +} // namespace schemas +} // namespace capnp + +// ======================================================================================= + +namespace capnp { + +// StreamResult +constexpr uint16_t StreamResult::_capnpPrivate::dataWordSize; +constexpr uint16_t StreamResult::_capnpPrivate::pointerCount; +#if !CAPNP_LITE +constexpr ::capnp::Kind StreamResult::_capnpPrivate::kind; +constexpr ::capnp::_::RawSchema const* StreamResult::_capnpPrivate::schema; +#endif // !CAPNP_LITE + + +} // namespace + diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/stream.capnp.h b/libs/EXTERNAL/capnproto/c++/src/capnp/stream.capnp.h new file mode 100644 index 00000000000..4ac41404d4b --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/stream.capnp.h @@ -0,0 +1,119 @@ +// Generated by Cap'n Proto compiler, DO NOT EDIT +// source: stream.capnp + +#pragma once + +#include +#include + +#if CAPNP_VERSION != 9001 +#error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." +#endif + + +CAPNP_BEGIN_HEADER + +namespace capnp { +namespace schemas { + +CAPNP_DECLARE_SCHEMA(995f9a3377c0b16e); + +} // namespace schemas +} // namespace capnp + +namespace capnp { + +struct StreamResult { + StreamResult() = delete; + + class Reader; + class Builder; + class Pipeline; + + struct _capnpPrivate { + CAPNP_DECLARE_STRUCT_HEADER(995f9a3377c0b16e, 0, 0) + #if !CAPNP_LITE + static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } + #endif // !CAPNP_LITE + }; +}; + +// ======================================================================================= + +class StreamResult::Reader { +public: + typedef StreamResult Reads; + + Reader() = default; + inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} + + inline ::capnp::MessageSize totalSize() const { + return _reader.totalSize().asPublic(); + } + +#if !CAPNP_LITE + inline ::kj::StringTree toString() const { + return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); + } +#endif // !CAPNP_LITE + +private: + ::capnp::_::StructReader _reader; + template + friend struct ::capnp::ToDynamic_; + template + friend struct ::capnp::_::PointerHelpers; + template + friend struct ::capnp::List; + friend class ::capnp::MessageBuilder; + friend class ::capnp::Orphanage; +}; + +class StreamResult::Builder { +public: + typedef StreamResult Builds; + + Builder() = delete; // Deleted to discourage incorrect usage. + // You can explicitly initialize to nullptr instead. + inline Builder(decltype(nullptr)) {} + inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} + inline operator Reader() const { return Reader(_builder.asReader()); } + inline Reader asReader() const { return *this; } + + inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } +#if !CAPNP_LITE + inline ::kj::StringTree toString() const { return asReader().toString(); } +#endif // !CAPNP_LITE + +private: + ::capnp::_::StructBuilder _builder; + template + friend struct ::capnp::ToDynamic_; + friend class ::capnp::Orphanage; + template + friend struct ::capnp::_::PointerHelpers; +}; + +#if !CAPNP_LITE +class StreamResult::Pipeline { +public: + typedef StreamResult Pipelines; + + inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} + inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) + : _typeless(kj::mv(typeless)) {} + +private: + ::capnp::AnyPointer::Pipeline _typeless; + friend class ::capnp::PipelineHook; + template + friend struct ::capnp::ToDynamic_; +}; +#endif // !CAPNP_LITE + +// ======================================================================================= + +} // namespace + +CAPNP_END_HEADER + diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/stringify.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/stringify.c++ index ad00b6785f5..e17d47faf53 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/stringify.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/stringify.c++ @@ -139,17 +139,14 @@ static kj::StringTree print(const DynamicValue::Reader& value, } else { return kj::strTree(value.as()); } - case DynamicValue::TEXT: + case DynamicValue::TEXT: { + kj::ArrayPtr chars = value.as(); + return kj::strTree('"', kj::encodeCEscape(chars), '"'); + } case DynamicValue::DATA: { // TODO(someday): Maybe data should be printed as binary literal. - kj::ArrayPtr chars; - if (value.getType() == DynamicValue::DATA) { - chars = value.as().asChars(); - } else { - chars = value.as(); - } - - return kj::strTree('"', kj::encodeCEscape(chars), '"'); + kj::ArrayPtr bytes = value.as().asBytes(); + return kj::strTree('"', kj::encodeCEscape(bytes), '"'); } case DynamicValue::LIST: { auto listValue = value.as(); diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/test-util.c++ b/libs/EXTERNAL/capnproto/c++/src/capnp/test-util.c++ index c4a2f8e4f7e..d16d955e950 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/test-util.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/test-util.c++ @@ -19,9 +19,15 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + #include "test-util.h" #include #include +#include +#include namespace capnp { namespace _ { // private @@ -975,6 +981,14 @@ kj::Promise TestPipelineImpl::getAnyCap(GetAnyCapContext context) { }); } +kj::Promise TestPipelineImpl::getCapPipelineOnly(GetCapPipelineOnlyContext context) { + ++callCount; + PipelineBuilder pb; + pb.initOutBox().setCap(kj::heap(callCount)); + context.setPipeline(pb.build()); + return kj::NEVER_DONE; +} + kj::Promise TestCallOrderImpl::getCallSequence(GetCallSequenceContext context) { auto result = context.getResults(); result.setN(count++); @@ -1144,6 +1158,38 @@ kj::Promise TestMoreStuffImpl::getEnormousString(GetEnormousStringContext return kj::READY_NOW; } +kj::Promise TestMoreStuffImpl::writeToFd(WriteToFdContext context) { + auto params = context.getParams(); + + auto promises = kj::heapArrayBuilder>(2); + + promises.add(params.getFdCap1().getFd() + .then([](kj::Maybe fd) { + kj::FdOutputStream(KJ_ASSERT_NONNULL(fd)).write("foo", 3); + })); + promises.add(params.getFdCap2().getFd() + .then([context](kj::Maybe fd) mutable { + context.getResults().setSecondFdPresent(fd != nullptr); + KJ_IF_MAYBE(f, fd) { + kj::FdOutputStream(*f).write("bar", 3); + } + })); + + int pair[2]; + KJ_SYSCALL(kj::miniposix::pipe(pair)); + kj::AutoCloseFd in(pair[0]); + kj::AutoCloseFd out(pair[1]); + + kj::FdOutputStream(kj::mv(out)).write("baz", 3); + context.getResults().setFdCap3(kj::heap(kj::mv(in))); + + return kj::joinPromises(promises.finish()); +} + +kj::Promise TestMoreStuffImpl::throwException(ThrowExceptionContext context) { + return KJ_EXCEPTION(FAILED, "test exception"); +} + #endif // !CAPNP_LITE } // namespace _ (private) diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/test-util.h b/libs/EXTERNAL/capnproto/c++/src/capnp/test-util.h index 67d0ba423bb..2cf47cb8465 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/test-util.h +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/test-util.h @@ -21,10 +21,6 @@ #pragma once -#if defined(__GNUC__) && !defined(CAPNP_HEADER_WARNINGS) -#pragma GCC system_header -#endif - #include #include #include "blob.h" @@ -32,8 +28,11 @@ #if !CAPNP_LITE #include "dynamic.h" +#include #endif // !CAPNP_LITE +CAPNP_BEGIN_HEADER + // TODO(cleanup): Auto-generate stringification functions for union discriminants. namespace capnproto_test { namespace capnp { @@ -213,6 +212,7 @@ class TestPipelineImpl final: public test::TestPipeline::Server { kj::Promise getCap(GetCapContext context) override; kj::Promise getAnyCap(GetAnyCapContext context) override; + kj::Promise getCapPipelineOnly(GetCapPipelineOnlyContext context) override; private: int& callCount; @@ -222,6 +222,8 @@ class TestCallOrderImpl final: public test::TestCallOrder::Server { public: kj::Promise getCallSequence(GetCallSequenceContext context) override; + uint getCount() { return count; } + private: uint count = 0; }; @@ -274,6 +276,10 @@ class TestMoreStuffImpl final: public test::TestMoreStuff::Server { kj::Promise getEnormousString(GetEnormousStringContext context) override; + kj::Promise writeToFd(WriteToFdContext context) override; + + kj::Promise throwException(ThrowExceptionContext context) override; + private: int& callCount; int& handleCount; @@ -303,7 +309,57 @@ class TestCapDestructor final: public test::TestInterface::Server { TestInterfaceImpl impl; }; +class TestFdCap final: public test::TestInterface::Server { + // Implementation of TestInterface that wraps a file descriptor. + +public: + TestFdCap(kj::AutoCloseFd fd): fd(kj::mv(fd)) {} + + kj::Maybe getFd() override { return fd.get(); } + +private: + kj::AutoCloseFd fd; +}; + +class TestStreamingImpl final: public test::TestStreaming::Server { +public: + uint iSum = 0; + uint jSum = 0; + kj::Maybe>> fulfiller; + bool jShouldThrow = false; + + kj::Promise doStreamI(DoStreamIContext context) override { + iSum += context.getParams().getI(); + auto paf = kj::newPromiseAndFulfiller(); + fulfiller = kj::mv(paf.fulfiller); + return kj::mv(paf.promise); + } + + kj::Promise doStreamJ(DoStreamJContext context) override { + context.allowCancellation(); + jSum += context.getParams().getJ(); + + if (jShouldThrow) { + KJ_FAIL_ASSERT("throw requested") { break; } + return kj::READY_NOW; + } + + auto paf = kj::newPromiseAndFulfiller(); + fulfiller = kj::mv(paf.fulfiller); + return kj::mv(paf.promise); + } + + kj::Promise finishStream(FinishStreamContext context) override { + auto results = context.getResults(); + results.setTotalI(iSum); + results.setTotalJ(jSum); + return kj::READY_NOW; + } +}; + #endif // !CAPNP_LITE } // namespace _ (private) } // namespace capnp + +CAPNP_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/test.capnp b/libs/EXTERNAL/capnproto/c++/src/capnp/test.capnp index 0a4a5076368..7efe6aee447 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/test.capnp +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/test.capnp @@ -587,6 +587,9 @@ struct TestGenerics(Foo, Bar) { } } +struct BoxedText { text @0 :Text; } +using BrandedAlias = TestGenerics(BoxedText, Text); + struct TestGenericsWrapper(Foo, Bar) { value @0 :TestGenerics(Foo, Bar); } @@ -760,7 +763,6 @@ struct TestAnyPointerConstants { anyStructAsStruct @1 :AnyStruct; anyKindAsList @2 :AnyPointer; anyListAsList @3 :AnyList; - } const anyPointerConstants :TestAnyPointerConstants = ( @@ -770,6 +772,11 @@ const anyPointerConstants :TestAnyPointerConstants = ( anyListAsList = TestConstants.int32ListConst, ); +struct TestListOfAny { + capList @0 :List(Capability); + #listList @1 :List(AnyList); # TODO(0.10): Make List(AnyList) work correctly in C++ generated code. +} + interface TestInterface { foo @0 (i :UInt32, j :Bool) -> (x :Text); bar @1 () -> (); @@ -789,6 +796,9 @@ interface TestPipeline { testPointers @1 (cap :TestInterface, obj :AnyPointer, list :List(TestInterface)) -> (); getAnyCap @2 (n: UInt32, inCap :Capability) -> (s: Text, outBox :AnyBox); + getCapPipelineOnly @3 () -> (outBox :Box); + # Never returns, but uses setPipeline() to make the pipeline work. + struct Box { cap @0 :TestInterface; } @@ -818,6 +828,13 @@ interface TestTailCaller { foo @0 (i :Int32, callee :TestTailCallee) -> TestTailCallee.TailResult; } +interface TestStreaming { + doStreamI @0 (i :UInt32) -> stream; + doStreamJ @1 (j :UInt32) -> stream; + finishStream @2 () -> (totalI :UInt32, totalJ :UInt32); + # Test streaming. finishStream() returns the totals of the values streamed to the other calls. +} + interface TestHandle {} interface TestMoreStuff extends(TestCallOrder) { @@ -860,6 +877,13 @@ interface TestMoreStuff extends(TestCallOrder) { getEnormousString @11 () -> (str :Text); # Attempts to return an 100MB string. Should always fail. + + writeToFd @13 (fdCap1 :TestInterface, fdCap2 :TestInterface) + -> (fdCap3 :TestInterface, secondFdPresent :Bool); + # Expects fdCap1 and fdCap2 wrap socket file descriptors. Writes "foo" to the first and "bar" to + # the second. Also creates a socketpair, writes "baz" to one end, and returns the other end. + + throwException @14 (); } interface TestMembrane { diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/testdata/errors.capnp.nobuild b/libs/EXTERNAL/capnproto/c++/src/capnp/testdata/errors.capnp.nobuild index a909e970a00..9cd3beb5414 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/testdata/errors.capnp.nobuild +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/testdata/errors.capnp.nobuild @@ -97,6 +97,7 @@ struct Foo { listWithoutParam @31 :List; listWithTooManyParams @32 :List(Int32, Int64); listAnyPointer @33 :List(AnyPointer); + listAnyStruct @48 :List(AnyStruct); notAType @34 :notType; noParams @35 :Foo(Int32); @@ -141,6 +142,7 @@ enum DupEnumerants { const recursive: UInt32 = .recursive; struct Generic(T, U) { + foo @0 :UInt32 $T; } struct UseGeneric { @@ -158,4 +160,5 @@ using Baz = import "nosuchfile-unused.capnp".Baz; interface TestInterface { foo @0 (a :UInt32 = null); + bar @1 stream -> (); } diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/testdata/errors.txt b/libs/EXTERNAL/capnproto/c++/src/capnp/testdata/errors.txt index ed238e482cc..a455cacab7e 100644 --- a/libs/EXTERNAL/capnproto/c++/src/capnp/testdata/errors.txt +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/testdata/errors.txt @@ -2,7 +2,7 @@ file:74:30-32: error: As of Cap'n Proto v0.3, it is no longer necessary to assig file:74:30-32: error: As of Cap'n Proto v0.3, the 'union' keyword should be prefixed with a colon for named unions, e.g. `foo :union {`. file:79:23-25: error: As of Cap'n Proto v0.3, it is no longer necessary to assign numbers to unions. However, removing the number will break binary compatibility. If this is an old protocol and you need to retain compatibility, please add an exclamation point after the number to indicate that it is really needed, e.g. `foo @1! :union {`. If this is a new protocol or compatibility doesn't matter, just remove the @n entirely. Sorry for the inconvenience, and thanks for being an early adopter! :) file:84:17-19: error: As of Cap'n Proto v0.3, the 'union' keyword should be prefixed with a colon for named unions, e.g. `foo :union {`. -file:132:7-10: error: 'using' declaration without '=' must specify a named declaration from a different scope. +file:133:7-10: error: 'using' declaration without '=' must specify a named declaration from a different scope. file:37:3-10: error: 'dupName' is already defined in this scope. file:36:3-10: error: 'dupName' previously defined here. file:52:5-12: error: 'dupName' is already defined in this scope. @@ -21,40 +21,44 @@ file:39:15-16: error: Duplicate ordinal number. file:38:15-16: error: Ordinal @2 originally used here. file:41:18-19: error: Skipped ordinal @3. Ordinals must be sequential with no holes. file:69:15-17: error: Union ordinal, if specified, must be greater than no more than one of its member ordinals (i.e. there can only be one field retroactively unionized). -file:116:31-50: error: Import failed: noshuchfile.capnp -file:118:26-32: error: Not defined: NoSuch -file:119:28-34: error: 'Foo' has no member named 'NoSuch' +file:117:31-50: error: Import failed: noshuchfile.capnp +file:119:26-32: error: Not defined: NoSuch +file:120:28-34: error: 'Foo' has no member named 'NoSuch' file:97:25-29: error: 'List' requires exactly one parameter. file:98:30-48: error: Too many generic parameters. file:98:30-34: error: 'List' requires exactly one parameter. file:99:23-39: error: 'List(AnyPointer)' is not supported. -file:100:17-24: error: 'notType' is not a type. -file:101:17-27: error: Declaration does not accept generic parameters. -file:103:34-41: error: Integer value out of range. -file:104:37-38: error: Integer value out of range. -file:105:32-35: error: Type mismatch; expected Text. -file:106:33-38: error: Type mismatch; expected Text. -file:107:33-55: error: Type mismatch; expected Text. -file:108:43-61: error: Integer is too big to be negative. -file:109:35-39: error: '.Foo' does not refer to a constant. -file:110:44-51: error: Constant names must be qualified to avoid confusion. Please replace 'notType' with '.notType', if that's what you intended. -file:117:28-34: error: Not defined: NoSuch -file:112:29-32: error: 'Foo' is not an annotation. -file:113:29-47: error: 'notFieldAnnotation' cannot be applied to this kind of declaration. -file:114:33-48: error: 'fieldAnnotation' requires a value. -file:126:35-46: error: Struct has no field named 'nosuchfield'. -file:127:49-52: error: Type mismatch; expected group. -file:125:52-55: error: Missing field name. -file:136:3-10: error: 'dupName' is already defined in this scope. -file:135:3-10: error: 'dupName' previously defined here. -file:138:15-16: error: Duplicate ordinal number. -file:137:15-16: error: Ordinal @2 originally used here. -file:141:7-16: error: Declaration recursively depends on itself. -file:147:14-27: error: Not enough generic parameters. -file:148:15-47: error: Too many generic parameters. -file:149:18-49: error: Double-application of generic parameters. -file:150:38-43: error: Sorry, only pointer types can be used as generic parameters. -file:153:30-44: error: Embeds can only be used when Text, Data, or a struct is expected. -file:154:37-51: error: Couldn't read file for embed: no-such-file -file:160:23-27: error: Only pointer parameters can declare their default as 'null'. -file:156:20-45: error: Import failed: nosuchfile-unused.capnp +file:101:17-24: error: 'notType' is not a type. +file:102:17-27: error: Declaration does not accept generic parameters. +file:104:34-41: error: Integer value out of range. +file:105:37-38: error: Integer value out of range. +file:106:32-35: error: Type mismatch; expected Text. +file:107:33-38: error: Type mismatch; expected Text. +file:108:33-55: error: Type mismatch; expected Text. +file:109:43-61: error: Integer is too big to be negative. +file:110:35-39: error: '.Foo' does not refer to a constant. +file:111:44-51: error: Constant names must be qualified to avoid confusion. Please replace 'notType' with '.notType', if that's what you intended. +file:118:28-34: error: Not defined: NoSuch +file:100:22-37: error: 'List(AnyStruct)' is not supported. +file:113:29-32: error: 'Foo' is not an annotation. +file:114:29-47: error: 'notFieldAnnotation' cannot be applied to this kind of declaration. +file:115:33-48: error: 'fieldAnnotation' requires a value. +file:127:35-46: error: Struct has no field named 'nosuchfield'. +file:128:49-52: error: Type mismatch; expected group. +file:126:52-55: error: Missing field name. +file:137:3-10: error: 'dupName' is already defined in this scope. +file:136:3-10: error: 'dupName' previously defined here. +file:139:15-16: error: Duplicate ordinal number. +file:138:15-16: error: Ordinal @2 originally used here. +file:142:7-16: error: Declaration recursively depends on itself. +file:145:19-20: error: 'T' is not an annotation. +file:149:14-27: error: Not enough generic parameters. +file:150:15-47: error: Too many generic parameters. +file:151:18-49: error: Double-application of generic parameters. +file:152:38-43: error: Sorry, only pointer types can be used as generic parameters. +file:155:30-44: error: Embeds can only be used when Text, Data, or a struct is expected. +file:156:37-51: error: Couldn't read file for embed: no-such-file +file:162:23-27: error: Only pointer parameters can declare their default as 'null'. +file:163:10-16: error: 'stream' can only appear after '->', not before. +file:163:10-16: error: A method declaration uses streaming, but '/capnp/stream.capnp' is not found in the import path. This is a standard file that should always be installed with the Cap'n Proto compiler. +file:158:20-45: error: Import failed: nosuchfile-unused.capnp diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/testdata/errors2.capnp.nobuild b/libs/EXTERNAL/capnproto/c++/src/capnp/testdata/errors2.capnp.nobuild new file mode 100644 index 00000000000..e2fbf016b4f --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/testdata/errors2.capnp.nobuild @@ -0,0 +1,37 @@ +# Copyright (c) 2020 Cloudflare, Inc. and contributors +# Licensed under the MIT License: +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +@0xea7dcf0ca9acfa97; +# This is much like errors.capnp.nobuild but expresses errors that occur in a later phase of +# compilation, which is never reached when building errors.capnp.nobuild because the compiler +# bails out after other errors. + +struct DummyType {} +const dummyValue :DummyType = (); + +struct TestDefaultValueForGeneric(A) { + single @0 :A = .dummyValue; + nested @1 :Box(A) = (val = .dummyValue); +} + +struct Box(B) { + val @0 :B; +} diff --git a/libs/EXTERNAL/capnproto/c++/src/capnp/testdata/errors2.txt b/libs/EXTERNAL/capnproto/c++/src/capnp/testdata/errors2.txt new file mode 100644 index 00000000000..ed65d05e881 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/capnp/testdata/errors2.txt @@ -0,0 +1,2 @@ +file:31:18-29: error: Cannot interpret value because the type is a generic type parameter which is not yet bound. We don't know what type to expect here. +file:32:30-41: error: Cannot interpret value because the type is a generic type parameter which is not yet bound. We don't know what type to expect here. diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/CMakeLists.txt b/libs/EXTERNAL/capnproto/c++/src/kj/CMakeLists.txt index 58009b464be..813fac4deed 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/CMakeLists.txt +++ b/libs/EXTERNAL/capnproto/c++/src/kj/CMakeLists.txt @@ -3,6 +3,7 @@ set(kj_sources_lite array.c++ + list.c++ common.c++ debug.c++ exception.c++ @@ -10,6 +11,7 @@ set(kj_sources_lite memory.c++ mutex.c++ string.c++ + source-location.c++ hash.c++ table.c++ thread.c++ @@ -40,9 +42,11 @@ set(kj_headers memory.h refcount.h array.h + list.h vector.h string.h string-tree.h + source-location.h hash.h table.h map.h @@ -125,6 +129,7 @@ set(kj-async_headers async-unix.h async-win32.h async-io.h + async-queue.h timer.h ) if(NOT CAPNP_LITE) @@ -163,6 +168,29 @@ if(NOT CAPNP_LITE) install(FILES ${kj-http_headers} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/kj/compat") endif() +# kj-tls ====================================================================== +set(kj-tls_sources + compat/readiness-io.c++ + compat/tls.c++ +) +set(kj-tls_headers + compat/readiness-io.h + compat/tls.h +) +if(NOT CAPNP_LITE) + add_library(kj-tls ${kj-tls_sources}) + add_library(CapnProto::kj-tls ALIAS kj-tls) + target_link_libraries(kj-tls PUBLIC kj-async) + if (WITH_OPENSSL) + target_compile_definitions(kj-tls PRIVATE KJ_HAS_OPENSSL) + target_link_libraries(kj-tls PRIVATE OpenSSL::SSL OpenSSL::Crypto) + endif() + # Ensure the library has a version set to match autotools build + set_target_properties(kj-tls PROPERTIES VERSION ${VERSION}) + install(TARGETS kj-tls ${INSTALL_TARGETS_DEFAULT_ARGS}) + install(FILES ${kj-tls_headers} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/kj/compat") +endif() + # kj-gzip ====================================================================== set(kj-gzip_sources @@ -195,6 +223,7 @@ if(BUILD_TESTING) common-test.c++ memory-test.c++ array-test.c++ + list-test.c++ string-test.c++ table-test.c++ map-test.c++ @@ -202,6 +231,7 @@ if(BUILD_TESTING) debug-test.c++ io-test.c++ mutex-test.c++ + time-test.c++ threadlocal-test.c++ test-test.c++ std/iostream-test.c++ @@ -214,9 +244,13 @@ if(BUILD_TESTING) if(NOT CAPNP_LITE) add_executable(kj-heavy-tests async-test.c++ + async-xthread-test.c++ async-unix-test.c++ + async-unix-xthread-test.c++ async-win32-test.c++ + async-win32-xthread-test.c++ async-io-test.c++ + async-queue-test.c++ refcount-test.c++ string-tree-test.c++ encoding-test.c++ @@ -225,7 +259,6 @@ if(BUILD_TESTING) tuple-test.c++ one-of-test.c++ function-test.c++ - threadlocal-pthread-test.c++ filesystem-test.c++ filesystem-disk-test.c++ parse/common-test.c++ @@ -233,8 +266,15 @@ if(BUILD_TESTING) compat/url-test.c++ compat/http-test.c++ compat/gzip-test.c++ + compat/tls-test.c++ ) - target_link_libraries(kj-heavy-tests kj-http kj-gzip kj-async kj-test kj) + target_link_libraries(kj-heavy-tests kj-http kj-gzip kj-tls kj-async kj-test kj) + if (WITH_OPENSSL) + set_source_files_properties(compat/tls-test.c++ + PROPERTIES + COMPILE_DEFINITIONS KJ_HAS_OPENSSL + ) + endif() add_dependencies(check kj-heavy-tests) add_test(NAME kj-heavy-tests-run COMMAND kj-heavy-tests) endif() # NOT CAPNP_LITE diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/arena.h b/libs/EXTERNAL/capnproto/c++/src/kj/arena.h index 3a8a7e423bb..63e0e31ed03 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/arena.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/arena.h @@ -21,14 +21,12 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "memory.h" #include "array.h" #include "string.h" +KJ_BEGIN_HEADER + namespace kj { class Arena { @@ -208,3 +206,5 @@ ArrayBuilder Arena::allocateOwnArrayBuilder(size_t capacity) { } } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/array-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/array-test.c++ index 518d40623dc..d361b65cab1 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/array-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/array-test.c++ @@ -383,6 +383,20 @@ KJ_TEST("kj::arr()") { kj::Array array = kj::arr(kj::str("foo"), kj::str(123)); KJ_EXPECT(array == kj::ArrayPtr({"foo", "123"})); } + +struct ImmovableInt { + ImmovableInt(int i): i(i) {} + KJ_DISALLOW_COPY(ImmovableInt); + int i; +}; + +KJ_TEST("kj::arrOf()") { + kj::Array array = kj::arrOf(123, 456, 789); + KJ_ASSERT(array.size() == 3); + KJ_EXPECT(array[0].i == 123); + KJ_EXPECT(array[1].i == 456); + KJ_EXPECT(array[2].i == 789); +} #endif struct DestructionOrderRecorder { @@ -447,6 +461,7 @@ TEST(Array, AttachNested) { Array> combined = arr.attach(kj::mv(obj2)).attach(kj::mv(obj3)); KJ_EXPECT(combined.begin() == ptr); + KJ_EXPECT(combined.size() == 1); KJ_EXPECT(obj1.get() == nullptr); KJ_EXPECT(obj2.get() == nullptr); diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/array.h b/libs/EXTERNAL/capnproto/c++/src/kj/array.h index a1a4937a3d3..fdd7d4cf504 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/array.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/array.h @@ -21,14 +21,12 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "memory.h" #include #include +KJ_BEGIN_HEADER + namespace kj { // ======================================================================================= @@ -142,58 +140,62 @@ class Array { other.ptr = nullptr; other.size_ = 0; } - inline Array(T* firstElement, size_t size, const ArrayDisposer& disposer) + inline Array(T* firstElement KJ_LIFETIMEBOUND, size_t size, const ArrayDisposer& disposer) : ptr(firstElement), size_(size), disposer(&disposer) {} KJ_DISALLOW_COPY(Array); inline ~Array() noexcept { dispose(); } - inline operator ArrayPtr() { + inline operator ArrayPtr() KJ_LIFETIMEBOUND { return ArrayPtr(ptr, size_); } - inline operator ArrayPtr() const { + inline operator ArrayPtr() const KJ_LIFETIMEBOUND { return ArrayPtr(ptr, size_); } - inline ArrayPtr asPtr() { + inline ArrayPtr asPtr() KJ_LIFETIMEBOUND { return ArrayPtr(ptr, size_); } - inline ArrayPtr asPtr() const { + inline ArrayPtr asPtr() const KJ_LIFETIMEBOUND { return ArrayPtr(ptr, size_); } inline size_t size() const { return size_; } - inline T& operator[](size_t index) const { + inline T& operator[](size_t index) KJ_LIFETIMEBOUND { + KJ_IREQUIRE(index < size_, "Out-of-bounds Array access."); + return ptr[index]; + } + inline const T& operator[](size_t index) const KJ_LIFETIMEBOUND { KJ_IREQUIRE(index < size_, "Out-of-bounds Array access."); return ptr[index]; } - inline const T* begin() const { return ptr; } - inline const T* end() const { return ptr + size_; } - inline const T& front() const { return *ptr; } - inline const T& back() const { return *(ptr + size_ - 1); } - inline T* begin() { return ptr; } - inline T* end() { return ptr + size_; } - inline T& front() { return *ptr; } - inline T& back() { return *(ptr + size_ - 1); } + inline const T* begin() const KJ_LIFETIMEBOUND { return ptr; } + inline const T* end() const KJ_LIFETIMEBOUND { return ptr + size_; } + inline const T& front() const KJ_LIFETIMEBOUND { return *ptr; } + inline const T& back() const KJ_LIFETIMEBOUND { return *(ptr + size_ - 1); } + inline T* begin() KJ_LIFETIMEBOUND { return ptr; } + inline T* end() KJ_LIFETIMEBOUND { return ptr + size_; } + inline T& front() KJ_LIFETIMEBOUND { return *ptr; } + inline T& back() KJ_LIFETIMEBOUND { return *(ptr + size_ - 1); } template inline bool operator==(const U& other) const { return asPtr() == other; } template inline bool operator!=(const U& other) const { return asPtr() != other; } - inline ArrayPtr slice(size_t start, size_t end) { + inline ArrayPtr slice(size_t start, size_t end) KJ_LIFETIMEBOUND { KJ_IREQUIRE(start <= end && end <= size_, "Out-of-bounds Array::slice()."); return ArrayPtr(ptr + start, end - start); } - inline ArrayPtr slice(size_t start, size_t end) const { + inline ArrayPtr slice(size_t start, size_t end) const KJ_LIFETIMEBOUND { KJ_IREQUIRE(start <= end && end <= size_, "Out-of-bounds Array::slice()."); return ArrayPtr(ptr + start, end - start); } - inline ArrayPtr asBytes() const { return asPtr().asBytes(); } - inline ArrayPtr> asBytes() { return asPtr().asBytes(); } - inline ArrayPtr asChars() const { return asPtr().asChars(); } - inline ArrayPtr> asChars() { return asPtr().asChars(); } + inline ArrayPtr asBytes() const KJ_LIFETIMEBOUND { return asPtr().asBytes(); } + inline ArrayPtr> asBytes() KJ_LIFETIMEBOUND { return asPtr().asBytes(); } + inline ArrayPtr asChars() const KJ_LIFETIMEBOUND { return asPtr().asChars(); } + inline ArrayPtr> asChars() KJ_LIFETIMEBOUND { return asPtr().asChars(); } inline Array> releaseAsBytes() { // Like asBytes() but transfers ownership. @@ -341,34 +343,38 @@ class ArrayBuilder { KJ_DISALLOW_COPY(ArrayBuilder); inline ~ArrayBuilder() noexcept(false) { dispose(); } - inline operator ArrayPtr() { + inline operator ArrayPtr() KJ_LIFETIMEBOUND { return arrayPtr(ptr, pos); } - inline operator ArrayPtr() const { + inline operator ArrayPtr() const KJ_LIFETIMEBOUND { return arrayPtr(ptr, pos); } - inline ArrayPtr asPtr() { + inline ArrayPtr asPtr() KJ_LIFETIMEBOUND { return arrayPtr(ptr, pos); } - inline ArrayPtr asPtr() const { + inline ArrayPtr asPtr() const KJ_LIFETIMEBOUND { return arrayPtr(ptr, pos); } inline size_t size() const { return pos - ptr; } inline size_t capacity() const { return endPtr - ptr; } - inline T& operator[](size_t index) const { + inline T& operator[](size_t index) KJ_LIFETIMEBOUND { + KJ_IREQUIRE(index < implicitCast(pos - ptr), "Out-of-bounds Array access."); + return ptr[index]; + } + inline const T& operator[](size_t index) const KJ_LIFETIMEBOUND { KJ_IREQUIRE(index < implicitCast(pos - ptr), "Out-of-bounds Array access."); return ptr[index]; } - inline const T* begin() const { return ptr; } - inline const T* end() const { return pos; } - inline const T& front() const { return *ptr; } - inline const T& back() const { return *(pos - 1); } - inline T* begin() { return ptr; } - inline T* end() { return pos; } - inline T& front() { return *ptr; } - inline T& back() { return *(pos - 1); } + inline const T* begin() const KJ_LIFETIMEBOUND { return ptr; } + inline const T* end() const KJ_LIFETIMEBOUND { return pos; } + inline const T& front() const KJ_LIFETIMEBOUND { return *ptr; } + inline const T& back() const KJ_LIFETIMEBOUND { return *(pos - 1); } + inline T* begin() KJ_LIFETIMEBOUND { return ptr; } + inline T* end() KJ_LIFETIMEBOUND { return pos; } + inline T& front() KJ_LIFETIMEBOUND { return *ptr; } + inline T& back() KJ_LIFETIMEBOUND { return *(pos - 1); } ArrayBuilder& operator=(ArrayBuilder&& other) { dispose(); @@ -387,7 +393,7 @@ class ArrayBuilder { } template - T& add(Params&&... params) { + T& add(Params&&... params) KJ_LIFETIMEBOUND { KJ_IREQUIRE(pos < endPtr, "Added too many elements to ArrayBuilder."); ctor(*pos, kj::fwd(params)...); return *pos++; @@ -457,7 +463,7 @@ class ArrayBuilder { Array finish() { // We could safely remove this check if we assume that the disposer implementation doesn't - // need to know the original capacity, as is thes case with HeapArrayDisposer since it uses + // need to know the original capacity, as is the case with HeapArrayDisposer since it uses // operator new() or if we created a custom disposer for ArrayBuilder which stores the capacity // in a prefix. But that would make it hard to write cleverer heap allocators, and anyway this // check might catch bugs. Probably people should use Vector if they want to build arrays @@ -478,7 +484,7 @@ class ArrayBuilder { T* ptr; RemoveConst* pos; T* endPtr; - const ArrayDisposer* disposer; + const ArrayDisposer* disposer = &NullArrayDisposer::instance; inline void dispose() { // Make sure that if an exception is thrown, we are left with a null ptr, so we won't possibly @@ -512,21 +518,23 @@ class FixedArray { // A fixed-width array whose storage is allocated inline rather than on the heap. public: - inline size_t size() const { return fixedSize; } - inline T* begin() { return content; } - inline T* end() { return content + fixedSize; } - inline const T* begin() const { return content; } - inline const T* end() const { return content + fixedSize; } + inline constexpr size_t size() const { return fixedSize; } + inline constexpr T* begin() KJ_LIFETIMEBOUND { return content; } + inline constexpr T* end() KJ_LIFETIMEBOUND { return content + fixedSize; } + inline constexpr const T* begin() const KJ_LIFETIMEBOUND { return content; } + inline constexpr const T* end() const KJ_LIFETIMEBOUND { return content + fixedSize; } - inline operator ArrayPtr() { + inline constexpr operator ArrayPtr() KJ_LIFETIMEBOUND { return arrayPtr(content, fixedSize); } - inline operator ArrayPtr() const { + inline constexpr operator ArrayPtr() const KJ_LIFETIMEBOUND { return arrayPtr(content, fixedSize); } - inline T& operator[](size_t index) { return content[index]; } - inline const T& operator[](size_t index) const { return content[index]; } + inline constexpr T& operator[](size_t index) KJ_LIFETIMEBOUND { return content[index]; } + inline constexpr const T& operator[](size_t index) const KJ_LIFETIMEBOUND { + return content[index]; + } private: T content[fixedSize]; @@ -545,20 +553,20 @@ class CappedArray { inline size_t size() const { return currentSize; } inline void setSize(size_t s) { KJ_IREQUIRE(s <= fixedSize); currentSize = s; } - inline T* begin() { return content; } - inline T* end() { return content + currentSize; } - inline const T* begin() const { return content; } - inline const T* end() const { return content + currentSize; } + inline T* begin() KJ_LIFETIMEBOUND { return content; } + inline T* end() KJ_LIFETIMEBOUND { return content + currentSize; } + inline const T* begin() const KJ_LIFETIMEBOUND { return content; } + inline const T* end() const KJ_LIFETIMEBOUND { return content + currentSize; } - inline operator ArrayPtr() { + inline operator ArrayPtr() KJ_LIFETIMEBOUND { return arrayPtr(content, currentSize); } - inline operator ArrayPtr() const { + inline operator ArrayPtr() const KJ_LIFETIMEBOUND { return arrayPtr(content, currentSize); } - inline T& operator[](size_t index) { return content[index]; } - inline const T& operator[](size_t index) const { return content[index]; } + inline T& operator[](size_t index) KJ_LIFETIMEBOUND { return content[index]; } + inline const T& operator[](size_t index) const KJ_LIFETIMEBOUND { return content[index]; } private: size_t currentSize; @@ -631,7 +639,8 @@ struct ArrayDisposer::Dispose_ { static void dispose(T* firstElement, size_t elementCount, size_t capacity, const ArrayDisposer& disposer) { - disposer.disposeImpl(firstElement, sizeof(T), elementCount, capacity, &destruct); + disposer.disposeImpl(const_cast*>(firstElement), + sizeof(T), elementCount, capacity, &destruct); } }; @@ -846,6 +855,12 @@ inline Array> arr(T&& param1, Params&&... params) { (builder.add(kj::fwd(param1)), ... , builder.add(kj::fwd(params))); return builder.finish(); } +template +inline Array> arrOf(Params&&... params) { + ArrayBuilder> builder = heapArrayBuilder>(sizeof...(params)); + (... , builder.add(kj::fwd(params))); + return builder.finish(); +} #endif namespace _ { // private @@ -862,6 +877,7 @@ template template Array Array::attach(Attachments&&... attachments) { T* ptrCopy = ptr; + auto sizeCopy = size_; KJ_IREQUIRE(ptrCopy != nullptr, "cannot attach to null pointer"); @@ -872,7 +888,7 @@ Array Array::attach(Attachments&&... attachments) { auto bundle = new _::ArrayDisposableOwnedBundle, Attachments...>( kj::mv(*this), kj::fwd(attachments)...); - return Array(ptrCopy, size_, *bundle); + return Array(ptrCopy, sizeCopy, *bundle); } template @@ -893,3 +909,5 @@ Array ArrayPtr::attach(Attachments&&... attachments) const { } } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-inl.h b/libs/EXTERNAL/capnproto/c++/src/kj/async-inl.h index 61afa28ddfa..55ab97a335a 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async-inl.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-inl.h @@ -26,15 +26,15 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #ifndef KJ_ASYNC_H_INCLUDED #error "Do not include this directly; include kj/async.h." #include "async.h" // help IDE parse this file #endif +KJ_BEGIN_HEADER + +#include "list.h" + namespace kj { namespace _ { // private @@ -78,12 +78,69 @@ class ExceptionOr: public ExceptionOrValue { Maybe value; }; +template +inline T convertToReturn(ExceptionOr&& result) { + KJ_IF_MAYBE(value, result.value) { + KJ_IF_MAYBE(exception, result.exception) { + throwRecoverableException(kj::mv(*exception)); + } + return _::returnMaybeVoid(kj::mv(*value)); + } else KJ_IF_MAYBE(exception, result.exception) { + throwFatalException(kj::mv(*exception)); + } else { + // Result contained neither a value nor an exception? + KJ_UNREACHABLE; + } +} + +inline void convertToReturn(ExceptionOr&& result) { + // Override case to use throwRecoverableException(). + + if (result.value != nullptr) { + KJ_IF_MAYBE(exception, result.exception) { + throwRecoverableException(kj::mv(*exception)); + } + } else KJ_IF_MAYBE(exception, result.exception) { + throwRecoverableException(kj::mv(*exception)); + } else { + // Result contained neither a value nor an exception? + KJ_UNREACHABLE; + } +} + +class TraceBuilder { + // Helper for methods that build a call trace. +public: + TraceBuilder(ArrayPtr space) + : start(space.begin()), current(space.begin()), limit(space.end()) {} + + inline void add(void* addr) { + if (current < limit) { + *current++ = addr; + } + } + + inline bool full() const { return current == limit; } + + ArrayPtr finish() { + return arrayPtr(start, current); + } + + String toString(); + +private: + void** start; + void** current; + void** limit; +}; + class Event { // An event waiting to be executed. Not for direct use by applications -- promises use this // internally. public: Event(); + Event(kj::EventLoop& loop); ~Event() noexcept(false); KJ_DISALLOW_COPY(Event); @@ -105,12 +162,25 @@ class Event { void armBreadthFirst(); // Like `armDepthFirst()` except that the event is placed at the end of the queue. - kj::String trace(); - // Dump debug info about this event. + void armLast(); + // Enqueues this event to happen after all other events have run to completion and there is + // really nothing left to do except wait for I/O. + + void disarm(); + // If the event is armed but hasn't fired, cancel it. (Destroying the event does this + // implicitly.) - virtual _::PromiseNode* getInnerForTrace(); - // If this event wraps a PromiseNode, get that node. Used for debug tracing. - // Default implementation returns nullptr. + virtual void traceEvent(TraceBuilder& builder) = 0; + // Build a trace of the callers leading up to this event. `builder` will be populated with + // "return addresses" of the promise chain waiting on this event. The return addresses may + // actually the addresses of lambdas passed to .then(), but in any case, feeding them into + // addr2line should produce useful source code locations. + // + // `traceEvent()` may be called from an async signal handler while `fire()` is executing. It + // must not allocate nor take locks. + + String traceEvent(); + // Helper that builds a trace and stringifies it. protected: virtual Maybe> fire() = 0; @@ -153,9 +223,43 @@ class PromiseNode { // Can only be called once, and only after the node is ready. Must be called directly from the // event loop, with no application code on the stack. - virtual PromiseNode* getInnerForTrace(); - // If this node wraps some other PromiseNode, get the wrapped node. Used for debug tracing. - // Default implementation returns nullptr. + virtual void tracePromise(TraceBuilder& builder, bool stopAtNextEvent) = 0; + // Build a trace of this promise chain, showing what it is currently waiting on. + // + // Since traces are ordered callee-before-caller, PromiseNode::tracePromise() should typically + // recurse to its child first, then after the child returns, add itself to the trace. + // + // If `stopAtNextEvent` is true, then the trace should stop as soon as it hits a PromiseNode that + // also implements Event, and should not trace that node or its children. This is used in + // conjuction with Event::traceEvent(). The chain of Events is often more sparse than the chain + // of PromiseNodes, because a TransformPromiseNode (which implements .then()) is not itself an + // Event. TransformPromiseNode instead tells its child node to directly notify its *parent* node + // when it is ready, and then TransformPromiseNode applies the .then() transformation during the + // call to .get(). + // + // So, when we trace the chain of Events backwards, we end up hoping over segments of + // TransformPromiseNodes (and other similar types). In order to get those added to the trace, + // each Event must call back down the PromiseNode chain in the opposite direction, using this + // method. + // + // `tracePromise()` may be called from an async signal handler while `get()` is executing. It + // must not allocate nor take locks. + + template + static Own from(T&& promise) { + // Given a Promise, extract the PromiseNode. + return kj::mv(promise.node); + } + template + static PromiseNode& from(T& promise) { + // Given a Promise, extract the PromiseNode. + return *promise.node; + } + template + static T to(Own&& node) { + // Construct a Promise from a PromiseNode. (T should be a Promise type.) + return T(false, kj::mv(node)); + } protected: class OnReadyEvent { @@ -165,9 +269,14 @@ class PromiseNode { void init(Event* newEvent); void arm(); + void armBreadthFirst(); // Arms the event if init() has already been called and makes future calls to init() // automatically arm the event. + inline void traceEvent(TraceBuilder& builder) { + if (event != nullptr && !builder.full()) event->traceEvent(builder); + } + private: Event* event = nullptr; }; @@ -175,12 +284,20 @@ class PromiseNode { // ------------------------------------------------------------------- +template +inline NeverDone::operator Promise() const { + return PromiseNode::to>(neverDone()); +} + +// ------------------------------------------------------------------- + class ImmediatePromiseNodeBase: public PromiseNode { public: ImmediatePromiseNodeBase(); ~ImmediatePromiseNodeBase() noexcept(false); void onReady(Event* event) noexcept override; + void tracePromise(TraceBuilder& builder, bool stopAtNextEvent) override; }; template @@ -216,7 +333,7 @@ class AttachmentPromiseNodeBase: public PromiseNode { void onReady(Event* event) noexcept override; void get(ExceptionOrValue& output) noexcept override; - PromiseNode* getInnerForTrace() override; + void tracePromise(TraceBuilder& builder, bool stopAtNextEvent) override; private: Own dependency; @@ -251,17 +368,40 @@ class AttachmentPromiseNode final: public AttachmentPromiseNodeBase { #if __GNUC__ >= 8 && !__clang__ // GCC 8's class-memaccess warning rightly does not like the memcpy()'s below, but there's no -// "legal" way for us to extract the contetn of a PTMF so too bad. +// "legal" way for us to extract the content of a PTMF so too bad. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wclass-memaccess" +#if __GNUC__ >= 11 +// GCC 11's array-bounds is similarly upset with us for digging into "private" implementation +// details. But the format is well-defined by the ABI which cannot change so please just let us +// do it kthx. +#pragma GCC diagnostic ignored "-Warray-bounds" #endif +#endif + +template +void* getMethodStartAddress(T& obj, ReturnType (T::*method)(ParamTypes...)); +template +void* getMethodStartAddress(const T& obj, ReturnType (T::*method)(ParamTypes...) const); +// Given an object and a pointer-to-method, return the start address of the method's code. The +// intent is that this address can be used in a trace; addr2line should map it to the start of +// the function's definition. For virtual methods, this does a vtable lookup on `obj` to determine +// the address of the specific implementation (otherwise, `obj` wouldn't be needed). +// +// Note that if the method is overloaded or is a template, you will need to explicitly specify +// the param and return types, otherwise the compiler won't know which overload / template +// specialization you are requesting. class PtmfHelper { - // This class is a private helper for GetFunctorStartAddress. The class represents the internal - // representation of a pointer-to-member-function. + // This class is a private helper for GetFunctorStartAddress and getMethodStartAddress(). The + // class represents the internal representation of a pointer-to-member-function. template friend struct GetFunctorStartAddress; + template + friend void* getMethodStartAddress(T& obj, ReturnType (T::*method)(ParamTypes...)); + template + friend void* getMethodStartAddress(const T& obj, ReturnType (T::*method)(ParamTypes...) const); #if __GNUG__ @@ -269,7 +409,7 @@ class PtmfHelper { ptrdiff_t adj; // Layout of a pointer-to-member-function used by GCC and compatible compilers. - void* apply(void* obj) { + void* apply(const void* obj) { #if defined(__arm__) || defined(__mips__) || defined(__aarch64__) if (adj & 1) { ptrdiff_t voff = (ptrdiff_t)ptr; @@ -292,7 +432,7 @@ class PtmfHelper { #else // __GNUG__ - void* apply(void* obj) { return nullptr; } + void* apply(const void* obj) { return nullptr; } // TODO(port): PTMF instruction address extraction #define BODY return PtmfHelper{} @@ -321,6 +461,15 @@ class PtmfHelper { #pragma GCC diagnostic pop #endif +template +void* getMethodStartAddress(T& obj, ReturnType (T::*method)(ParamTypes...)) { + return PtmfHelper::from(method).apply(&obj); +} +template +void* getMethodStartAddress(const T& obj, ReturnType (T::*method)(ParamTypes...) const) { + return PtmfHelper::from(method).apply(&obj); +} + template struct GetFunctorStartAddress { // Given a functor (any object defining operator()), return the start address of the function, @@ -353,7 +502,7 @@ class TransformPromiseNodeBase: public PromiseNode { void onReady(Event* event) noexcept override; void get(ExceptionOrValue& output) noexcept override; - PromiseNode* getInnerForTrace() override; + void tracePromise(TraceBuilder& builder, bool stopAtNextEvent) override; private: Own dependency; @@ -374,9 +523,9 @@ class TransformPromiseNode final: public TransformPromiseNodeBase { // function (implements `then()`). public: - TransformPromiseNode(Own&& dependency, Func&& func, ErrorFunc&& errorHandler) - : TransformPromiseNodeBase(kj::mv(dependency), - GetFunctorStartAddress::apply(func)), + TransformPromiseNode(Own&& dependency, Func&& func, ErrorFunc&& errorHandler, + void* continuationTracePtr) + : TransformPromiseNodeBase(kj::mv(dependency), continuationTracePtr), func(kj::fwd(func)), errorHandler(kj::fwd(errorHandler)) {} ~TransformPromiseNode() noexcept(false) { @@ -424,7 +573,7 @@ class ForkBranchBase: public PromiseNode { // implements PromiseNode ------------------------------------------ void onReady(Event* event) noexcept override; - PromiseNode* getInnerForTrace() override; + void tracePromise(TraceBuilder& builder, bool stopAtNextEvent) override; protected: inline ExceptionOrValue& getHubResultRef(); @@ -444,6 +593,11 @@ class ForkBranchBase: public PromiseNode { template T copyOrAddRef(T& t) { return t; } template Own copyOrAddRef(Own& t) { return t->addRef(); } +template Maybe> copyOrAddRef(Maybe>& t) { + return t.map([](Own& ptr) { + return ptr->addRef(); + }); +} template class ForkBranch final: public ForkBranchBase { @@ -504,7 +658,7 @@ class ForkHubBase: public Refcounted, protected Event { // Tail becomes null once the inner promise is ready and all branches have been notified. Maybe> fire() override; - _::PromiseNode* getInnerForTrace() override; + void traceEvent(TraceBuilder& builder) override; friend class ForkBranchBase; }; @@ -519,7 +673,7 @@ class ForkHub final: public ForkHubBase { ForkHub(Own&& inner): ForkHubBase(kj::mv(inner), result) {} Promise<_::UnfixVoid> addBranch() { - return Promise<_::UnfixVoid>(false, kj::heap>(addRef(*this))); + return _::PromiseNode::to>>(kj::heap>(addRef(*this))); } _::SplitTuplePromise split() { @@ -536,9 +690,9 @@ class ForkHub final: public ForkHubBase { template ReducePromises::Element> addSplit() { - return ReducePromises::Element>( - false, maybeChain(kj::heap>(addRef(*this)), - implicitCast::Element*>(nullptr))); + return _::PromiseNode::to::Element>>( + maybeChain(kj::heap>(addRef(*this)), + implicitCast::Element*>(nullptr))); } }; @@ -561,7 +715,7 @@ class ChainPromiseNode final: public PromiseNode, public Event { void onReady(Event* event) noexcept override; void setSelfPointer(Own* selfPtr) noexcept override; void get(ExceptionOrValue& output) noexcept override; - PromiseNode* getInnerForTrace() override; + void tracePromise(TraceBuilder& builder, bool stopAtNextEvent) override; private: enum State { @@ -579,6 +733,7 @@ class ChainPromiseNode final: public PromiseNode, public Event { Own* selfPtr = nullptr; Maybe> fire() override; + void traceEvent(TraceBuilder& builder) override; }; template @@ -610,7 +765,7 @@ class ExclusiveJoinPromiseNode final: public PromiseNode { void onReady(Event* event) noexcept override; void get(ExceptionOrValue& output) noexcept override; - PromiseNode* getInnerForTrace() override; + void tracePromise(TraceBuilder& builder, bool stopAtNextEvent) override; private: class Branch: public Event { @@ -622,11 +777,13 @@ class ExclusiveJoinPromiseNode final: public PromiseNode { // Returns true if this is the side that finished. Maybe> fire() override; - _::PromiseNode* getInnerForTrace() override; + void traceEvent(TraceBuilder& builder) override; private: ExclusiveJoinPromiseNode& joinNode; Own dependency; + + friend class ExclusiveJoinPromiseNode; }; Branch left; @@ -644,7 +801,7 @@ class ArrayJoinPromiseNodeBase: public PromiseNode { void onReady(Event* event) noexcept override final; void get(ExceptionOrValue& output) noexcept override final; - PromiseNode* getInnerForTrace() override final; + void tracePromise(TraceBuilder& builder, bool stopAtNextEvent) override final; protected: virtual void getNoError(ExceptionOrValue& output) noexcept = 0; @@ -661,7 +818,7 @@ class ArrayJoinPromiseNodeBase: public PromiseNode { ~Branch() noexcept(false); Maybe> fire() override; - _::PromiseNode* getInnerForTrace() override; + void traceEvent(TraceBuilder& builder) override; Maybe getPart(); // Calls dependency->get(output). If there was an exception, return it. @@ -670,6 +827,8 @@ class ArrayJoinPromiseNodeBase: public PromiseNode { ArrayJoinPromiseNodeBase& joinNode; Own dependency; ExceptionOrValue& output; + + friend class ArrayJoinPromiseNodeBase; }; Array branches; @@ -722,7 +881,7 @@ class EagerPromiseNodeBase: public PromiseNode, protected Event { EagerPromiseNodeBase(Own&& dependency, ExceptionOrValue& resultRef); void onReady(Event* event) noexcept override; - PromiseNode* getInnerForTrace() override; + void tracePromise(TraceBuilder& builder, bool stopAtNextEvent) override; private: Own dependency; @@ -731,6 +890,7 @@ class EagerPromiseNodeBase: public PromiseNode, protected Event { ExceptionOrValue& resultRef; Maybe> fire() override; + void traceEvent(TraceBuilder& builder) override; }; template @@ -759,6 +919,7 @@ Own spark(Own&& node) { class AdapterPromiseNodeBase: public PromiseNode { public: void onReady(Event* event) noexcept override; + void tracePromise(TraceBuilder& builder, bool stopAtNextEvent) override; protected: inline void setReady() { @@ -810,6 +971,73 @@ class AdapterPromiseNode final: public AdapterPromiseNodeBase, } }; +// ------------------------------------------------------------------- + +class FiberBase: public PromiseNode, private Event { + // Base class for the outer PromiseNode representing a fiber. + +public: + explicit FiberBase(size_t stackSize, _::ExceptionOrValue& result); + explicit FiberBase(const FiberPool& pool, _::ExceptionOrValue& result); + ~FiberBase() noexcept(false); + + void start() { armDepthFirst(); } + // Call immediately after construction to begin executing the fiber. + + class WaitDoneEvent; + + void onReady(_::Event* event) noexcept override; + void tracePromise(TraceBuilder& builder, bool stopAtNextEvent) override; + +protected: + bool isFinished() { return state == FINISHED; } + void destroy(); + +private: + enum { WAITING, RUNNING, CANCELED, FINISHED } state; + + _::PromiseNode* currentInner = nullptr; + OnReadyEvent onReadyEvent; + Own stack; + _::ExceptionOrValue& result; + + void run(); + virtual void runImpl(WaitScope& waitScope) = 0; + + Maybe> fire() override; + void traceEvent(TraceBuilder& builder) override; + // Implements Event. Each time the event is fired, switchToFiber() is called. + + friend class FiberStack; + friend void _::waitImpl(Own<_::PromiseNode>&& node, _::ExceptionOrValue& result, + WaitScope& waitScope); + friend bool _::pollImpl(_::PromiseNode& node, WaitScope& waitScope); +}; + +template +class Fiber final: public FiberBase { +public: + explicit Fiber(size_t stackSize, Func&& func): FiberBase(stackSize, result), func(kj::fwd(func)) {} + explicit Fiber(const FiberPool& pool, Func&& func): FiberBase(pool, result), func(kj::fwd(func)) {} + ~Fiber() noexcept(false) { destroy(); } + + typedef FixVoid()(kj::instance()))> ResultType; + + void get(ExceptionOrValue& output) noexcept override { + KJ_IREQUIRE(isFinished()); + output.as() = kj::mv(result); + } + +private: + Func func; + ExceptionOr result; + + void runImpl(WaitScope& waitScope) override { + result.template as() = + MaybeVoidCaller::apply(func, waitScope); + } +}; + } // namespace _ (private) // ======================================================================================= @@ -827,10 +1055,12 @@ template PromiseForResult Promise::then(Func&& func, ErrorFunc&& errorHandler) { typedef _::FixVoid<_::ReturnType> ResultT; + void* continuationTracePtr = _::GetFunctorStartAddress<_::FixVoid&&>::apply(func); Own<_::PromiseNode> intermediate = heap<_::TransformPromiseNode, Func, ErrorFunc>>( - kj::mv(node), kj::fwd(func), kj::fwd(errorHandler)); - auto result = _::ChainPromises<_::ReturnType>(false, + kj::mv(node), kj::fwd(func), kj::fwd(errorHandler), + continuationTracePtr); + auto result = _::PromiseNode::to<_::ChainPromises<_::ReturnType>>( _::maybeChain(kj::mv(intermediate), implicitCast(nullptr))); return _::maybeReduce(kj::mv(result), false); } @@ -870,47 +1100,25 @@ Promise Promise::catch_(ErrorFunc&& errorHandler) { // Func is being filled in automatically. We want to make sure ErrorFunc can return a Promise, // but we don't want the extra overhead of promise chaining if ErrorFunc doesn't actually // return a promise. So we make our Func return match ErrorFunc. - return then(_::IdentityFunc()))>(), - kj::fwd(errorHandler)); + typedef _::IdentityFunc()))> Func; + typedef _::FixVoid<_::ReturnType> ResultT; + + // The reason catch_() isn't simply implemented in terms of then() is because we want the trace + // pointer to be based on ErrorFunc rather than Func. + void* continuationTracePtr = _::GetFunctorStartAddress::apply(errorHandler); + Own<_::PromiseNode> intermediate = + heap<_::TransformPromiseNode, Func, ErrorFunc>>( + kj::mv(node), Func(), kj::fwd(errorHandler), continuationTracePtr); + auto result = _::PromiseNode::to<_::ChainPromises<_::ReturnType>>( + _::maybeChain(kj::mv(intermediate), implicitCast(nullptr))); + return _::maybeReduce(kj::mv(result), false); } template T Promise::wait(WaitScope& waitScope) { _::ExceptionOr<_::FixVoid> result; - _::waitImpl(kj::mv(node), result, waitScope); - - KJ_IF_MAYBE(value, result.value) { - KJ_IF_MAYBE(exception, result.exception) { - throwRecoverableException(kj::mv(*exception)); - } - return _::returnMaybeVoid(kj::mv(*value)); - } else KJ_IF_MAYBE(exception, result.exception) { - throwFatalException(kj::mv(*exception)); - } else { - // Result contained neither a value nor an exception? - KJ_UNREACHABLE; - } -} - -template <> -inline void Promise::wait(WaitScope& waitScope) { - // Override case to use throwRecoverableException(). - - _::ExceptionOr<_::Void> result; - - _::waitImpl(kj::mv(node), result, waitScope); - - if (result.value != nullptr) { - KJ_IF_MAYBE(exception, result.exception) { - throwRecoverableException(kj::mv(*exception)); - } - } else KJ_IF_MAYBE(exception, result.exception) { - throwRecoverableException(kj::mv(*exception)); - } else { - // Result contained neither a value nor an exception? - KJ_UNREACHABLE; - } + return convertToReturn(kj::mv(result)); } template @@ -928,6 +1136,11 @@ Promise ForkedPromise::addBranch() { return hub->addBranch(); } +template +bool ForkedPromise::hasBranches() { + return hub->isShared(); +} + template _::SplitTuplePromise Promise::split() { return refcounted<_::ForkHub<_::FixVoid>>(kj::mv(node))->split(); @@ -969,6 +1182,11 @@ inline PromiseForResult evalLater(Func&& func) { return _::yield().then(kj::fwd(func), _::PropagateException()); } +template +inline PromiseForResult evalLast(Func&& func) { + return _::yieldHarder().then(kj::fwd(func), _::PropagateException()); +} + template inline PromiseForResult evalNow(Func&& func) { PromiseForResult result = nullptr; @@ -980,6 +1198,64 @@ inline PromiseForResult evalNow(Func&& func) { return result; } +template +struct RetryOnDisconnect_ { + static inline PromiseForResult apply(Func&& func) { + return evalLater([func = kj::mv(func)]() mutable -> PromiseForResult { + auto promise = evalNow(func); + return promise.catch_([func = kj::mv(func)](kj::Exception&& e) mutable -> PromiseForResult { + if (e.getType() == kj::Exception::Type::DISCONNECTED) { + return func(); + } else { + return kj::mv(e); + } + }); + }); + } +}; +template +struct RetryOnDisconnect_ { + // Specialization for references. Needed because the syntax for capturing references in a + // lambda is different. :( + static inline PromiseForResult apply(Func& func) { + auto promise = evalLater(func); + return promise.catch_([&func](kj::Exception&& e) -> PromiseForResult { + if (e.getType() == kj::Exception::Type::DISCONNECTED) { + return func(); + } else { + return kj::mv(e); + } + }); + } +}; + +template +inline PromiseForResult retryOnDisconnect(Func&& func) { + return RetryOnDisconnect_::apply(kj::fwd(func)); +} + +template +inline PromiseForResult startFiber(size_t stackSize, Func&& func) { + typedef _::FixVoid<_::ReturnType> ResultT; + + Own<_::FiberBase> intermediate = kj::heap<_::Fiber>(stackSize, kj::fwd(func)); + intermediate->start(); + auto result = _::PromiseNode::to<_::ChainPromises<_::ReturnType>>( + _::maybeChain(kj::mv(intermediate), implicitCast(nullptr))); + return _::maybeReduce(kj::mv(result), false); +} + +template +inline PromiseForResult FiberPool::startFiber(Func&& func) const { + typedef _::FixVoid<_::ReturnType> ResultT; + + Own<_::FiberBase> intermediate = kj::heap<_::Fiber>(*this, kj::fwd(func)); + intermediate->start(); + auto result = _::PromiseNode::to<_::ChainPromises<_::ReturnType>>( + _::maybeChain(kj::mv(intermediate), implicitCast(nullptr))); + return _::maybeReduce(kj::mv(result), false); +} + template template void Promise::detach(ErrorFunc&& errorHandler) { @@ -994,8 +1270,8 @@ void Promise::detach(ErrorFunc&& errorHandler) { template Promise> joinPromises(Array>&& promises) { - return Promise>(false, kj::heap<_::ArrayJoinPromiseNode>( - KJ_MAP(p, promises) { return kj::mv(p.node); }, + return _::PromiseNode::to>>(kj::heap<_::ArrayJoinPromiseNode>( + KJ_MAP(p, promises) { return _::PromiseNode::from(kj::mv(p)); }, heapArray<_::ExceptionOr>(promises.size()))); } @@ -1003,8 +1279,28 @@ Promise> joinPromises(Array>&& promises) { namespace _ { // private +class WeakFulfillerBase: protected kj::Disposer { +protected: + WeakFulfillerBase(): inner(nullptr) {} + virtual ~WeakFulfillerBase() noexcept(false) {} + + template + inline PromiseFulfiller* getInner() { + return static_cast*>(inner); + }; + template + inline void setInner(PromiseFulfiller* ptr) { + inner = ptr; + }; + +private: + mutable PromiseRejector* inner; + + void disposeImpl(void* pointer) const override; +}; + template -class WeakFulfiller final: public PromiseFulfiller, private kj::Disposer { +class WeakFulfiller final: public PromiseFulfiller, public WeakFulfillerBase { // A wrapper around PromiseFulfiller which can be detached. // // There are a couple non-trivialities here: @@ -1027,54 +1323,37 @@ class WeakFulfiller final: public PromiseFulfiller, private kj::Disposer { } void fulfill(FixVoid&& value) override { - if (inner != nullptr) { - inner->fulfill(kj::mv(value)); + if (getInner() != nullptr) { + getInner()->fulfill(kj::mv(value)); } } void reject(Exception&& exception) override { - if (inner != nullptr) { - inner->reject(kj::mv(exception)); + if (getInner() != nullptr) { + getInner()->reject(kj::mv(exception)); } } bool isWaiting() override { - return inner != nullptr && inner->isWaiting(); + return getInner() != nullptr && getInner()->isWaiting(); } void attach(PromiseFulfiller& newInner) { - inner = &newInner; + setInner(&newInner); } void detach(PromiseFulfiller& from) { - if (inner == nullptr) { + if (getInner() == nullptr) { // Already disposed. delete this; } else { - KJ_IREQUIRE(inner == &from); - inner = nullptr; + KJ_IREQUIRE(getInner() == &from); + setInner(nullptr); } } private: - mutable PromiseFulfiller* inner; - - WeakFulfiller(): inner(nullptr) {} - - void disposeImpl(void* pointer) const override { - // TODO(perf): Factor some of this out so it isn't regenerated for every fulfiller type? - - if (inner == nullptr) { - // Already detached. - delete this; - } else { - if (inner->isWaiting()) { - inner->reject(kj::Exception(kj::Exception::Type::FAILED, __FILE__, __LINE__, - kj::heapString("PromiseFulfiller was destroyed without fulfilling the promise."))); - } - inner = nullptr; - } - } + WeakFulfiller() {} }; template @@ -1119,9 +1398,12 @@ bool PromiseFulfiller::rejectIfThrows(Func&& func) { } template -Promise newAdaptedPromise(Params&&... adapterConstructorParams) { - return Promise(false, heap<_::AdapterPromiseNode<_::FixVoid, Adapter>>( - kj::fwd(adapterConstructorParams)...)); +_::ReducePromises newAdaptedPromise(Params&&... adapterConstructorParams) { + Own<_::PromiseNode> intermediate( + heap<_::AdapterPromiseNode<_::FixVoid, Adapter>>( + kj::fwd(adapterConstructorParams)...)); + return _::PromiseNode::to<_::ReducePromises>( + _::maybeChain(kj::mv(intermediate), implicitCast(nullptr))); } template @@ -1130,10 +1412,360 @@ PromiseFulfillerPair newPromiseAndFulfiller() { Own<_::PromiseNode> intermediate( heap<_::AdapterPromiseNode<_::FixVoid, _::PromiseAndFulfillerAdapter>>(*wrapper)); - _::ReducePromises promise(false, + auto promise = _::PromiseNode::to<_::ReducePromises>( _::maybeChain(kj::mv(intermediate), implicitCast(nullptr))); return PromiseFulfillerPair { kj::mv(promise), kj::mv(wrapper) }; } +// ======================================================================================= +// cross-thread stuff + +namespace _ { // (private) + +class XThreadEvent: private Event, // it's an event in the target thread + public PromiseNode { // it's a PromiseNode in the requesting thread +public: + XThreadEvent(ExceptionOrValue& result, const Executor& targetExecutor, void* funcTracePtr); + + void tracePromise(TraceBuilder& builder, bool stopAtNextEvent) override; + +protected: + void ensureDoneOrCanceled(); + // MUST be called in destructor of subclasses to make sure the object is not destroyed while + // still being accessed by the other thread. (This can't be placed in ~XThreadEvent() because + // that destructor doesn't run until the subclass has already been destroyed.) + + virtual kj::Maybe> execute() = 0; + // Run the function. If the function returns a promise, returns the inner PromiseNode, otherwise + // returns null. + + // implements PromiseNode ---------------------------------------------------- + void onReady(Event* event) noexcept override; + +private: + ExceptionOrValue& result; + void* funcTracePtr; + + kj::Own targetExecutor; + Maybe replyExecutor; // If executeAsync() was used. + + kj::Maybe> promiseNode; + // Accessed only in target thread. + + ListLink targetLink; + // Membership in one of the linked lists in the target Executor's work list or cancel list. These + // fields are protected by the target Executor's mutex. + + enum { + UNUSED, + // Object was never queued on another thread. + + QUEUED, + // Target thread has not yet dequeued the event from the state.start list. The requesting + // thread can cancel execution by removing the event from the list. + + EXECUTING, + // Target thread has dequeued the event from state.start and moved it to state.executing. To + // cancel, the requesting thread must add the event to the state.cancel list and change the + // state to CANCELING. + + CANCELING, + // Requesting thread is trying to cancel this event. The target thread will change the state to + // `DONE` once canceled. + + DONE + // Target thread has completed handling this event and will not touch it again. The requesting + // thread can safely delete the object. The `state` is updated to `DONE` using an atomic + // release operation after ensuring that the event will not be touched again, so that the + // requesting can safely skip locking if it observes the state is already DONE. + } state = UNUSED; + // State, which is also protected by `targetExecutor`'s mutex. + + ListLink replyLink; + // Membership in `replyExecutor`'s reply list. Protected by `replyExecutor`'s mutex. The + // executing thread places the event in the reply list near the end of the `EXECUTING` state. + // Because the thread cannot lock two mutexes at once, it's possible that the reply executor + // will receive the reply while the event is still listed in the EXECUTING state, but it can + // ignore the state and proceed with the result. + + OnReadyEvent onReadyEvent; + // Accessed only in requesting thread. + + friend class kj::Executor; + + void done(); + // Sets the state to `DONE` and notifies the originating thread that this event is done. Do NOT + // call under lock. + + void sendReply(); + // Notifies the originating thread that this event is done, but doesn't set the state to DONE + // yet. Do NOT call under lock. + + void setDoneState(); + // Assigns `state` to `DONE`, being careful to use an atomic-release-store if needed. This must + // only be called in the destination thread, and must either be called under lock, or the thread + // must take the lock and release it again shortly after setting the state (because some threads + // may be waiting on the DONE state using a conditional wait on the mutex). After calling + // setDoneState(), the destination thread MUST NOT touch this object ever again; it now belongs + // solely to the requesting thread. + + void setDisconnected(); + // Sets the result to a DISCONNECTED exception indicating that the target event loop exited. + + class DelayedDoneHack; + + // implements Event ---------------------------------------------------------- + Maybe> fire() override; + // If called with promiseNode == nullptr, it's time to call execute(). If promiseNode != nullptr, + // then it just indicated readiness and we need to get its result. + + void traceEvent(TraceBuilder& builder) override; +}; + +template >> +class XThreadEventImpl final: public XThreadEvent { + // Implementation for a function that does not return a Promise. +public: + XThreadEventImpl(Func&& func, const Executor& target) + : XThreadEvent(result, target, GetFunctorStartAddress<>::apply(func)), + func(kj::fwd(func)) {} + ~XThreadEventImpl() noexcept(false) { ensureDoneOrCanceled(); } + + typedef _::FixVoid<_::ReturnType> ResultT; + + kj::Maybe> execute() override { + result.value = MaybeVoidCaller>::apply(func, Void()); + return nullptr; + } + + // implements PromiseNode ---------------------------------------------------- + void get(ExceptionOrValue& output) noexcept override { + output.as() = kj::mv(result); + } + +private: + Func func; + ExceptionOr result; + friend Executor; +}; + +template +class XThreadEventImpl> final: public XThreadEvent { + // Implementation for a function that DOES return a Promise. +public: + XThreadEventImpl(Func&& func, const Executor& target) + : XThreadEvent(result, target, GetFunctorStartAddress<>::apply(func)), + func(kj::fwd(func)) {} + ~XThreadEventImpl() noexcept(false) { ensureDoneOrCanceled(); } + + typedef _::FixVoid<_::UnwrapPromise>> ResultT; + + kj::Maybe> execute() override { + auto result = _::PromiseNode::from(func()); + KJ_IREQUIRE(result.get() != nullptr); + return kj::mv(result); + } + + // implements PromiseNode ---------------------------------------------------- + void get(ExceptionOrValue& output) noexcept override { + output.as() = kj::mv(result); + } + +private: + Func func; + ExceptionOr result; + friend Executor; +}; + +} // namespace _ (private) + +template +_::UnwrapPromise> Executor::executeSync(Func&& func) const { + _::XThreadEventImpl event(kj::fwd(func), *this); + send(event, true); + return convertToReturn(kj::mv(event.result)); +} + +template +PromiseForResult Executor::executeAsync(Func&& func) const { + auto event = kj::heap<_::XThreadEventImpl>(kj::fwd(func), *this); + send(*event, false); + return _::PromiseNode::to>(kj::mv(event)); +} + +// ----------------------------------------------------------------------------- + +namespace _ { // (private) + +template +class XThreadFulfiller; + +class XThreadPaf: public PromiseNode { +public: + XThreadPaf(); + virtual ~XThreadPaf() noexcept(false); + + class Disposer: public kj::Disposer { + public: + void disposeImpl(void* pointer) const override; + }; + static const Disposer DISPOSER; + + // implements PromiseNode ---------------------------------------------------- + void onReady(Event* event) noexcept override; + void tracePromise(TraceBuilder& builder, bool stopAtNextEvent) override; + +private: + enum { + WAITING, + // Not yet fulfilled, and the waiter is still waiting. + // + // Starting from this state, the state may transition to either FULFILLING or CANCELED + // using an atomic compare-and-swap. + + FULFILLING, + // The fulfiller thread atomically transitions the state from WAITING to FULFILLING when it + // wishes to fulfill the promise. By doing so, it guarantees that the `executor` will not + // disappear out from under it. It then fills in the result value, locks the executor mutex, + // adds the object to the executor's list of fulfilled XThreadPafs, changes the state to + // FULFILLED, and finally unlocks the mutex. + // + // If the waiting thread tries to cancel but discovers the object in this state, then it + // must perform a conditional wait on the executor mutex to await the state becoming FULFILLED. + // It can then delete the object. + + FULFILLED, + // The fulfilling thread has completed filling in the result value and inserting the object + // into the waiting thread's executor event queue. Moreover, the fulfilling thread no longer + // holds any pointers to this object. The waiting thread is responsible for deleting it. + + DISPATCHED, + // The object reached FULFILLED state, and then was dispatched from the waiting thread's + // executor's event queue. Therefore, the object is completely owned by the waiting thread with + // no need to lock anything. + + CANCELED + // The waiting thread atomically transitions the state from WAITING to CANCELED if it is no + // longer listening. In this state, it is the fulfiller thread's responsibility to destroy the + // object. + } state; + + const Executor& executor; + // Executor of the waiting thread. Only guaranteed to be valid when state is `WAITING` or + // `FULFILLING`. After any other state has been reached, this reference may be invalidated. + + ListLink link; + // In the FULFILLING/FULFILLED states, the object is placed in a linked list within the waiting + // thread's executor. In those states, these pointers are guarded by said executor's mutex. + + OnReadyEvent onReadyEvent; + + class FulfillScope; + + static kj::Exception unfulfilledException(); + // Construct appropriate exception to use to reject an unfulfilled XThreadPaf. + + template + friend class XThreadFulfiller; + friend Executor; +}; + +template +class XThreadPafImpl final: public XThreadPaf { +public: + // implements PromiseNode ---------------------------------------------------- + void get(ExceptionOrValue& output) noexcept override { + output.as>() = kj::mv(result); + } + +private: + ExceptionOr> result; + + friend class XThreadFulfiller; +}; + +class XThreadPaf::FulfillScope { + // Create on stack while setting `XThreadPafImpl::result`. + // + // This ensures that: + // - Only one call is carried out, even if multiple threads try to fulfill concurrently. + // - The waiting thread is correctly signaled. +public: + FulfillScope(XThreadPaf** pointer); + // Atomically nulls out *pointer and takes ownership of the pointer. + + ~FulfillScope() noexcept(false); + + KJ_DISALLOW_COPY(FulfillScope); + + bool shouldFulfill() { return obj != nullptr; } + + template + XThreadPafImpl* getTarget() { return static_cast*>(obj); } + +private: + XThreadPaf* obj; +}; + +template +class XThreadFulfiller final: public CrossThreadPromiseFulfiller { +public: + XThreadFulfiller(XThreadPafImpl* target): target(target) {} + + ~XThreadFulfiller() noexcept(false) { + if (target != nullptr) { + reject(XThreadPaf::unfulfilledException()); + } + } + void fulfill(FixVoid&& value) const override { + XThreadPaf::FulfillScope scope(&target); + if (scope.shouldFulfill()) { + scope.getTarget()->result = kj::mv(value); + } + } + void reject(Exception&& exception) const override { + XThreadPaf::FulfillScope scope(&target); + if (scope.shouldFulfill()) { + scope.getTarget()->result.addException(kj::mv(exception)); + } + } + bool isWaiting() const override { + KJ_IF_MAYBE(t, target) { +#if _MSC_VER && !__clang__ + // Just assume 1-byte loads are atomic... on what kind of absurd platform would they not be? + return t->state == XThreadPaf::WAITING; +#else + return __atomic_load_n(&t->state, __ATOMIC_RELAXED) == XThreadPaf::WAITING; +#endif + } else { + return false; + } + } + +private: + mutable XThreadPaf* target; // accessed using atomic ops +}; + +template +class XThreadFulfiller> { +public: + static_assert(sizeof(T) < 0, + "newCrosssThreadPromiseAndFulfiller>() is not currently supported"); + // TODO(someday): Is this worth supporting? Presumably, when someone calls `fulfill(somePromise)`, + // then `somePromise` should be assumed to be a promise owned by the fulfilling thread, not + // the waiting thread. +}; + +} // namespace _ (private) + +template +PromiseCrossThreadFulfillerPair newPromiseAndCrossThreadFulfiller() { + kj::Own<_::XThreadPafImpl> node(new _::XThreadPafImpl, _::XThreadPaf::DISPOSER); + auto fulfiller = kj::heap<_::XThreadFulfiller>(node); + return { _::PromiseNode::to<_::ReducePromises>(kj::mv(node)), kj::mv(fulfiller) }; +} + } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-io-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-io-test.c++ index d8c30430b1b..dc454f4107a 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async-io-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-io-test.c++ @@ -21,14 +21,18 @@ #if _WIN32 // Request Vista-level APIs. -#define WINVER 0x0600 -#define _WIN32_WINNT 0x0600 +#include "win32-api-version.h" +#elif !defined(_GNU_SOURCE) +#define _GNU_SOURCE #endif #include "async-io.h" #include "async-io-internal.h" #include "debug.h" +#include "io.h" +#include "miniposix.h" #include +#include #include #if _WIN32 #include @@ -85,6 +89,282 @@ TEST(AsyncIo, SimpleNetwork) { EXPECT_EQ("foo", result); } +#if !_WIN32 // TODO(0.10): Implement NetworkPeerIdentity for Win32. +TEST(AsyncIo, SimpleNetworkAuthentication) { + auto ioContext = setupAsyncIo(); + auto& network = ioContext.provider->getNetwork(); + + Own listener; + Own server; + Own client; + + char receiveBuffer[4]; + + auto port = newPromiseAndFulfiller(); + + port.promise.then([&](uint portnum) { + return network.parseAddress("localhost", portnum); + }).then([&](Own&& addr) { + auto promise = addr->connectAuthenticated(); + return promise.then([&,addr=kj::mv(addr)](AuthenticatedStream result) mutable { + auto id = result.peerIdentity.downcast(); + + // `addr` was resolved from `localhost` and may contain multiple addresses, but + // result.peerIdentity tells us the specific address that was used. So it should be one + // of the ones on the list, but only one. + KJ_EXPECT(strstr(addr->toString().cStr(), id->getAddress().toString().cStr()) != nullptr); + KJ_EXPECT(id->getAddress().toString().findFirst(',') == nullptr); + + client = kj::mv(result.stream); + + // `id` should match client->getpeername(). + union { + struct sockaddr generic; + struct sockaddr_in ip4; + struct sockaddr_in6 ip6; + } rawAddr; + uint len = sizeof(rawAddr); + client->getpeername(&rawAddr.generic, &len); + auto peername = network.getSockaddr(&rawAddr.generic, len); + KJ_EXPECT(id->toString() == peername->toString()); + + return client->write("foo", 3); + }); + }).detach([](kj::Exception&& exception) { + KJ_FAIL_EXPECT(exception); + }); + + kj::String result = network.parseAddress("*").then([&](Own&& result) { + listener = result->listen(); + port.fulfiller->fulfill(listener->getPort()); + return listener->acceptAuthenticated(); + }).then([&](AuthenticatedStream result) { + auto id = result.peerIdentity.downcast(); + server = kj::mv(result.stream); + + // `id` should match server->getpeername(). + union { + struct sockaddr generic; + struct sockaddr_in ip4; + struct sockaddr_in6 ip6; + } addr; + uint len = sizeof(addr); + server->getpeername(&addr.generic, &len); + auto peername = network.getSockaddr(&addr.generic, len); + KJ_EXPECT(id->toString() == peername->toString()); + + return server->tryRead(receiveBuffer, 3, 4); + }).then([&](size_t n) { + EXPECT_EQ(3u, n); + return heapString(receiveBuffer, n); + }).wait(ioContext.waitScope); + + EXPECT_EQ("foo", result); +} +#endif + +#if !_WIN32 && !__CYGWIN__ // TODO(someday): Debug why this deadlocks on Cygwin. + +#if __ANDROID__ +#define TMPDIR "/data/local/tmp" +#else +#define TMPDIR "/tmp" +#endif + +TEST(AsyncIo, UnixSocket) { + auto ioContext = setupAsyncIo(); + auto& network = ioContext.provider->getNetwork(); + + auto path = kj::str(TMPDIR "/kj-async-io-test.", getpid()); + KJ_DEFER(unlink(path.cStr())); + + Own listener; + Own server; + Own client; + + char receiveBuffer[4]; + + auto ready = newPromiseAndFulfiller(); + + ready.promise.then([&]() { + return network.parseAddress(kj::str("unix:", path)); + }).then([&](Own&& addr) { + auto promise = addr->connectAuthenticated(); + return promise.then([&,addr=kj::mv(addr)](AuthenticatedStream result) mutable { + auto id = result.peerIdentity.downcast(); + auto creds = id->getCredentials(); + KJ_IF_MAYBE(p, creds.pid) { + KJ_EXPECT(*p == getpid()); +#if __linux__ || __APPLE__ + } else { + KJ_FAIL_EXPECT("LocalPeerIdentity for unix socket had null PID"); +#endif + } + KJ_IF_MAYBE(u, creds.uid) { + KJ_EXPECT(*u == getuid()); + } else { + KJ_FAIL_EXPECT("LocalPeerIdentity for unix socket had null UID"); + } + + client = kj::mv(result.stream); + return client->write("foo", 3); + }); + }).detach([](kj::Exception&& exception) { + KJ_FAIL_EXPECT(exception); + }); + + kj::String result = network.parseAddress(kj::str("unix:", path)) + .then([&](Own&& result) { + listener = result->listen(); + ready.fulfiller->fulfill(); + return listener->acceptAuthenticated(); + }).then([&](AuthenticatedStream result) { + auto id = result.peerIdentity.downcast(); + auto creds = id->getCredentials(); + KJ_IF_MAYBE(p, creds.pid) { + KJ_EXPECT(*p == getpid()); +#if __linux__ || __APPLE__ + } else { + KJ_FAIL_EXPECT("LocalPeerIdentity for unix socket had null PID"); +#endif + } + KJ_IF_MAYBE(u, creds.uid) { + KJ_EXPECT(*u == getuid()); + } else { + KJ_FAIL_EXPECT("LocalPeerIdentity for unix socket had null UID"); + } + + server = kj::mv(result.stream); + return server->tryRead(receiveBuffer, 3, 4); + }).then([&](size_t n) { + EXPECT_EQ(3u, n); + return heapString(receiveBuffer, n); + }).wait(ioContext.waitScope); + + EXPECT_EQ("foo", result); +} + +TEST(AsyncIo, AncillaryMessageHandlerNoMsg) { + auto ioContext = setupAsyncIo(); + auto& network = ioContext.provider->getNetwork(); + + Own listener; + Own server; + Own client; + + char receiveBuffer[4]; + + bool clientHandlerCalled = false; + kj::Function)> clientHandler = + [&](kj::ArrayPtr) { + clientHandlerCalled = true; + }; + bool serverHandlerCalled = false; + kj::Function)> serverHandler = + [&](kj::ArrayPtr) { + serverHandlerCalled = true; + }; + + auto port = newPromiseAndFulfiller(); + + port.promise.then([&](uint portnum) { + return network.parseAddress("localhost", portnum); + }).then([&](Own&& addr) { + auto promise = addr->connectAuthenticated(); + return promise.then([&,addr=kj::mv(addr)](AuthenticatedStream result) mutable { + client = kj::mv(result.stream); + client->registerAncillaryMessageHandler(kj::mv(clientHandler)); + return client->write("foo", 3); + }); + }).detach([](kj::Exception&& exception) { + KJ_FAIL_EXPECT(exception); + }); + + kj::String result = network.parseAddress("*").then([&](Own&& result) { + listener = result->listen(); + port.fulfiller->fulfill(listener->getPort()); + return listener->acceptAuthenticated(); + }).then([&](AuthenticatedStream result) { + server = kj::mv(result.stream); + server->registerAncillaryMessageHandler(kj::mv(serverHandler)); + return server->tryRead(receiveBuffer, 3, 4); + }).then([&](size_t n) { + EXPECT_EQ(3u, n); + return heapString(receiveBuffer, n); + }).wait(ioContext.waitScope); + + EXPECT_EQ("foo", result); + EXPECT_FALSE(clientHandlerCalled); + EXPECT_FALSE(serverHandlerCalled); +} +#endif + +// This test uses SO_TIMESTAMP on a SOCK_STREAM, which is only supported by Linux. Ideally we'd +// rewrite the test to use some other message type that is widely supported on streams. But for +// now we just limit the test to Linux. Also, it doesn't work on Android for some reason, and it +// isn't worth investigating, so we skip it there. +#if __linux__ && !__ANDROID__ +TEST(AsyncIo, AncillaryMessageHandler) { + auto ioContext = setupAsyncIo(); + auto& network = ioContext.provider->getNetwork(); + + Own listener; + Own server; + Own client; + + char receiveBuffer[4]; + + bool clientHandlerCalled = false; + kj::Function)> clientHandler = + [&](kj::ArrayPtr) { + clientHandlerCalled = true; + }; + bool serverHandlerCalled = false; + kj::Function)> serverHandler = + [&](kj::ArrayPtr msgs) { + serverHandlerCalled = true; + EXPECT_EQ(1, msgs.size()); + EXPECT_EQ(SOL_SOCKET, msgs[0].getLevel()); + EXPECT_EQ(SO_TIMESTAMP, msgs[0].getType()); + }; + + auto port = newPromiseAndFulfiller(); + + port.promise.then([&](uint portnum) { + return network.parseAddress("localhost", portnum); + }).then([&](Own&& addr) { + auto promise = addr->connectAuthenticated(); + return promise.then([&,addr=kj::mv(addr)](AuthenticatedStream result) mutable { + client = kj::mv(result.stream); + client->registerAncillaryMessageHandler(kj::mv(clientHandler)); + return client->write("foo", 3); + }); + }).detach([](kj::Exception&& exception) { + KJ_FAIL_EXPECT(exception); + }); + + kj::String result = network.parseAddress("*").then([&](Own&& result) { + listener = result->listen(); + // Register interest in having the timestamp delivered via cmsg on each recvmsg. + int yes = 1; + listener->setsockopt(SOL_SOCKET, SO_TIMESTAMP, &yes, sizeof(yes)); + port.fulfiller->fulfill(listener->getPort()); + return listener->acceptAuthenticated(); + }).then([&](AuthenticatedStream result) { + server = kj::mv(result.stream); + server->registerAncillaryMessageHandler(kj::mv(serverHandler)); + return server->tryRead(receiveBuffer, 3, 4); + }).then([&](size_t n) { + EXPECT_EQ(3u, n); + return heapString(receiveBuffer, n); + }).wait(ioContext.waitScope); + + EXPECT_EQ("foo", result); + EXPECT_FALSE(clientHandlerCalled); + EXPECT_TRUE(serverHandlerCalled); +} +#endif + String tryParse(WaitScope& waitScope, Network& network, StringPtr text, uint portHint = 0) { return network.parseAddress(text, portHint).wait(waitScope)->toString(); } @@ -194,6 +474,46 @@ TEST(AsyncIo, TwoWayPipe) { EXPECT_EQ("bar", result2); } +TEST(AsyncIo, InMemoryCapabilityPipe) { + EventLoop loop; + WaitScope waitScope(loop); + + auto pipe = newCapabilityPipe(); + auto pipe2 = newCapabilityPipe(); + char receiveBuffer1[4]; + char receiveBuffer2[4]; + + // Expect to receive a stream, then read "foo" from it, then write "bar" to it. + Own receivedStream; + auto promise = pipe2.ends[1]->receiveStream() + .then([&](Own stream) { + receivedStream = kj::mv(stream); + return receivedStream->tryRead(receiveBuffer2, 3, 4); + }).then([&](size_t n) { + EXPECT_EQ(3u, n); + return receivedStream->write("bar", 3).then([&receiveBuffer2,n]() { + return heapString(receiveBuffer2, n); + }); + }); + + // Send a stream, then write "foo" to the other end of the sent stream, then receive "bar" + // from it. + kj::String result = pipe2.ends[0]->sendStream(kj::mv(pipe.ends[1])) + .then([&]() { + return pipe.ends[0]->write("foo", 3); + }).then([&]() { + return pipe.ends[0]->tryRead(receiveBuffer1, 3, 4); + }).then([&](size_t n) { + EXPECT_EQ(3u, n); + return heapString(receiveBuffer1, n); + }).wait(waitScope); + + kj::String result2 = promise.wait(waitScope); + + EXPECT_EQ("bar", result); + EXPECT_EQ("foo", result2); +} + #if !_WIN32 && !__CYGWIN__ TEST(AsyncIo, CapabilityPipe) { auto ioContext = setupAsyncIo(); @@ -233,7 +553,240 @@ TEST(AsyncIo, CapabilityPipe) { EXPECT_EQ("bar", result); EXPECT_EQ("foo", result2); } -#endif + +TEST(AsyncIo, CapabilityPipeBlockedSendStream) { + // Check for a bug that existed at one point where if a sendStream() call couldn't complete + // immediately, it would fail. + + auto io = setupAsyncIo(); + + auto pipe = io.provider->newCapabilityPipe(); + + Promise promise = nullptr; + Own endpoint1; + uint nonBlockedCount = 0; + for (;;) { + auto pipe2 = io.provider->newCapabilityPipe(); + promise = pipe.ends[0]->sendStream(kj::mv(pipe2.ends[0])); + if (promise.poll(io.waitScope)) { + // Send completed immediately, because there was enough space in the stream. + ++nonBlockedCount; + promise.wait(io.waitScope); + } else { + // Send blocked! Let's continue with this promise then! + endpoint1 = kj::mv(pipe2.ends[1]); + break; + } + } + + for (uint i KJ_UNUSED: kj::zeroTo(nonBlockedCount)) { + // Receive and ignore all the streams that were sent without blocking. + pipe.ends[1]->receiveStream().wait(io.waitScope); + } + + // Now that write that blocked should have been able to complete. + promise.wait(io.waitScope); + + // Now get the one that blocked. + auto endpoint2 = pipe.ends[1]->receiveStream().wait(io.waitScope); + + endpoint1->write("foo", 3).wait(io.waitScope); + endpoint1->shutdownWrite(); + KJ_EXPECT(endpoint2->readAllText().wait(io.waitScope) == "foo"); +} + +TEST(AsyncIo, CapabilityPipeMultiStreamMessage) { + auto ioContext = setupAsyncIo(); + + auto pipe = ioContext.provider->newCapabilityPipe(); + auto pipe2 = ioContext.provider->newCapabilityPipe(); + auto pipe3 = ioContext.provider->newCapabilityPipe(); + + auto streams = heapArrayBuilder>(2); + streams.add(kj::mv(pipe2.ends[0])); + streams.add(kj::mv(pipe3.ends[0])); + + ArrayPtr secondBuf = "bar"_kj.asBytes(); + pipe.ends[0]->writeWithStreams("foo"_kj.asBytes(), arrayPtr(&secondBuf, 1), streams.finish()) + .wait(ioContext.waitScope); + + char receiveBuffer[7]; + Own receiveStreams[3]; + auto result = pipe.ends[1]->tryReadWithStreams(receiveBuffer, 6, 7, receiveStreams, 3) + .wait(ioContext.waitScope); + + KJ_EXPECT(result.byteCount == 6); + receiveBuffer[6] = '\0'; + KJ_EXPECT(kj::StringPtr(receiveBuffer) == "foobar"); + + KJ_ASSERT(result.capCount == 2); + + receiveStreams[0]->write("baz", 3).wait(ioContext.waitScope); + receiveStreams[0] = nullptr; + KJ_EXPECT(pipe2.ends[1]->readAllText().wait(ioContext.waitScope) == "baz"); + + pipe3.ends[1]->write("qux", 3).wait(ioContext.waitScope); + pipe3.ends[1] = nullptr; + KJ_EXPECT(receiveStreams[1]->readAllText().wait(ioContext.waitScope) == "qux"); +} + +TEST(AsyncIo, ScmRightsTruncatedOdd) { + // Test that if we send two FDs over a unix socket, but the receiving end only receives one, we + // don't leak the other FD. + + auto io = setupAsyncIo(); + + auto capPipe = io.provider->newCapabilityPipe(); + + int pipeFds[2]; + KJ_SYSCALL(miniposix::pipe(pipeFds)); + kj::AutoCloseFd in1(pipeFds[0]); + kj::AutoCloseFd out1(pipeFds[1]); + + KJ_SYSCALL(miniposix::pipe(pipeFds)); + kj::AutoCloseFd in2(pipeFds[0]); + kj::AutoCloseFd out2(pipeFds[1]); + + { + AutoCloseFd sendFds[2] = { kj::mv(out1), kj::mv(out2) }; + capPipe.ends[0]->writeWithFds("foo"_kj.asBytes(), nullptr, sendFds).wait(io.waitScope); + } + + { + char buffer[4]; + AutoCloseFd fdBuffer[1]; + auto result = capPipe.ends[1]->tryReadWithFds(buffer, 3, 3, fdBuffer, 1).wait(io.waitScope); + KJ_ASSERT(result.capCount == 1); + kj::FdOutputStream(fdBuffer[0].get()).write("bar", 3); + } + + // We want to carefully verify that out1 and out2 were closed, without deadlocking if they + // weren't. So we manually set nonblocking mode and then issue read()s. + KJ_SYSCALL(fcntl(in1, F_SETFL, O_NONBLOCK)); + KJ_SYSCALL(fcntl(in2, F_SETFL, O_NONBLOCK)); + + char buffer[4]; + ssize_t n; + + // First we read "bar" from in1. + KJ_NONBLOCKING_SYSCALL(n = read(in1, buffer, 4)); + KJ_ASSERT(n == 3); + buffer[3] = '\0'; + KJ_ASSERT(kj::StringPtr(buffer) == "bar"); + + // Now it should be EOF. + KJ_NONBLOCKING_SYSCALL(n = read(in1, buffer, 4)); + if (n < 0) { + KJ_FAIL_ASSERT("out1 was not closed"); + } + KJ_ASSERT(n == 0); + + // Second pipe should have been closed implicitly because we didn't provide space to receive it. + KJ_NONBLOCKING_SYSCALL(n = read(in2, buffer, 4)); + if (n < 0) { + KJ_FAIL_ASSERT("out2 was not closed. This could indicate that your operating system kernel is " + "buggy and leaks file descriptors when an SCM_RIGHTS message is truncated. FreeBSD was " + "known to do this until late 2018, while MacOS still has this bug as of this writing in " + "2019. However, KJ works around the problem on those platforms. You need to enable the " + "same work-around for your OS -- search for 'SCM_RIGHTS' in src/kj/async-io-unix.c++."); + } + KJ_ASSERT(n == 0); +} + +#if !__aarch64__ +// This test fails under qemu-user, probably due to a bug in qemu's syscall emulation rather than +// a bug in the kernel. We don't have a good way to detect qemu so we just skip the test on aarch64 +// in general. + +TEST(AsyncIo, ScmRightsTruncatedEven) { + // Test that if we send three FDs over a unix socket, but the receiving end only receives two, we + // don't leak the third FD. This is different from the send-two-receive-one case in that + // CMSG_SPACE() on many systems rounds up such that there is always space for an even number of + // FDs. In that case the other test only verifies that our userspace code to close unwanted FDs + // is correct, whereas *this* test really verifies that the *kernel* properly closes truncated + // FDs. + + auto io = setupAsyncIo(); + + auto capPipe = io.provider->newCapabilityPipe(); + + int pipeFds[2]; + KJ_SYSCALL(miniposix::pipe(pipeFds)); + kj::AutoCloseFd in1(pipeFds[0]); + kj::AutoCloseFd out1(pipeFds[1]); + + KJ_SYSCALL(miniposix::pipe(pipeFds)); + kj::AutoCloseFd in2(pipeFds[0]); + kj::AutoCloseFd out2(pipeFds[1]); + + KJ_SYSCALL(miniposix::pipe(pipeFds)); + kj::AutoCloseFd in3(pipeFds[0]); + kj::AutoCloseFd out3(pipeFds[1]); + + { + AutoCloseFd sendFds[3] = { kj::mv(out1), kj::mv(out2), kj::mv(out3) }; + capPipe.ends[0]->writeWithFds("foo"_kj.asBytes(), nullptr, sendFds).wait(io.waitScope); + } + + { + char buffer[4]; + AutoCloseFd fdBuffer[2]; + auto result = capPipe.ends[1]->tryReadWithFds(buffer, 3, 3, fdBuffer, 2).wait(io.waitScope); + KJ_ASSERT(result.capCount == 2); + kj::FdOutputStream(fdBuffer[0].get()).write("bar", 3); + kj::FdOutputStream(fdBuffer[1].get()).write("baz", 3); + } + + // We want to carefully verify that out1, out2, and out3 were closed, without deadlocking if they + // weren't. So we manually set nonblocking mode and then issue read()s. + KJ_SYSCALL(fcntl(in1, F_SETFL, O_NONBLOCK)); + KJ_SYSCALL(fcntl(in2, F_SETFL, O_NONBLOCK)); + KJ_SYSCALL(fcntl(in3, F_SETFL, O_NONBLOCK)); + + char buffer[4]; + ssize_t n; + + // First we read "bar" from in1. + KJ_NONBLOCKING_SYSCALL(n = read(in1, buffer, 4)); + KJ_ASSERT(n == 3); + buffer[3] = '\0'; + KJ_ASSERT(kj::StringPtr(buffer) == "bar"); + + // Now it should be EOF. + KJ_NONBLOCKING_SYSCALL(n = read(in1, buffer, 4)); + if (n < 0) { + KJ_FAIL_ASSERT("out1 was not closed"); + } + KJ_ASSERT(n == 0); + + // Next we read "baz" from in2. + KJ_NONBLOCKING_SYSCALL(n = read(in2, buffer, 4)); + KJ_ASSERT(n == 3); + buffer[3] = '\0'; + KJ_ASSERT(kj::StringPtr(buffer) == "baz"); + + // Now it should be EOF. + KJ_NONBLOCKING_SYSCALL(n = read(in2, buffer, 4)); + if (n < 0) { + KJ_FAIL_ASSERT("out2 was not closed"); + } + KJ_ASSERT(n == 0); + + // Third pipe should have been closed implicitly because we didn't provide space to receive it. + KJ_NONBLOCKING_SYSCALL(n = read(in3, buffer, 4)); + if (n < 0) { + KJ_FAIL_ASSERT("out3 was not closed. This could indicate that your operating system kernel is " + "buggy and leaks file descriptors when an SCM_RIGHTS message is truncated. FreeBSD was " + "known to do this until late 2018, while MacOS still has this bug as of this writing in " + "2019. However, KJ works around the problem on those platforms. You need to enable the " + "same work-around for your OS -- search for 'SCM_RIGHTS' in src/kj/async-io-unix.c++."); + } + KJ_ASSERT(n == 0); +} + +#endif // !__aarch64__ + +#endif // !_WIN32 && !__CYGWIN__ TEST(AsyncIo, PipeThread) { auto ioContext = setupAsyncIo(); @@ -466,6 +1019,10 @@ TEST(AsyncIo, Udp) { } } +#if __APPLE__ +// On MacOS, `CMSG_SPACE(0)` triggers a bogus warning. +#pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic" +#endif // See what happens if there's not enough space even for the cmsghdr. capacity.ancillary = CMSG_SPACE(0) - 8; recv1 = port1->makeReceiver(capacity); @@ -495,8 +1052,10 @@ TEST(AsyncIo, Udp) { TEST(AsyncIo, AbstractUnixSocket) { auto ioContext = setupAsyncIo(); auto& network = ioContext.provider->getNetwork(); + auto elapsedSinceEpoch = systemPreciseMonotonicClock().now() - kj::origin(); + auto address = kj::str("unix-abstract:foo", getpid(), elapsedSinceEpoch / kj::NANOSECONDS); - Own addr = network.parseAddress("unix-abstract:foo").wait(ioContext.waitScope); + Own addr = network.parseAddress(address).wait(ioContext.waitScope); Own listener = addr->listen(); // chdir proves no filesystem dependence. Test fails for regular unix socket @@ -681,7 +1240,7 @@ kj::Promise expectRead(kj::AsyncInputStream& in, kj::StringPtr expected) { })); } -class MockAsyncInputStream: public AsyncInputStream { +class MockAsyncInputStream final: public AsyncInputStream { public: MockAsyncInputStream(kj::ArrayPtr bytes, size_t blockSize) : bytes(bytes), blockSize(blockSize) {} @@ -914,7 +1473,8 @@ KJ_TEST("Userland pipe with limit") { } // Further writes throw and reads return EOF. - KJ_EXPECT_THROW_MESSAGE("abortRead() has been called", pipe.out->write("baz", 3).wait(ws)); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "abortRead() has been called", pipe.out->write("baz", 3).wait(ws)); KJ_EXPECT(pipe.in->readAllText().wait(ws) == ""); } @@ -943,29 +1503,56 @@ KJ_TEST("Userland pipe pumpTo with limit") { } // Further writes throw. - KJ_EXPECT_THROW_MESSAGE("abortRead() has been called", pipe.out->write("baz", 3).wait(ws)); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "abortRead() has been called", pipe.out->write("baz", 3).wait(ws)); } -KJ_TEST("Userland pipe gather write") { +KJ_TEST("Userland pipe pump into zero-limited pipe, no data to pump") { kj::EventLoop loop; WaitScope ws(loop); auto pipe = newOneWayPipe(); + auto pipe2 = newOneWayPipe(uint64_t(0)); + auto pumpPromise = KJ_ASSERT_NONNULL(pipe2.out->tryPumpFrom(*pipe.in)); - ArrayPtr parts[] = { "foo"_kj.asBytes(), "bar"_kj.asBytes() }; - auto promise = pipe.out->write(parts); - KJ_EXPECT(!promise.poll(ws)); - expectRead(*pipe.in, "foobar").wait(ws); - promise.wait(ws); - - auto promise2 = pipe.in->readAllText(); - KJ_EXPECT(!promise2.poll(ws)); - + expectRead(*pipe2.in, ""); pipe.out = nullptr; - KJ_EXPECT(promise2.wait(ws) == ""); + KJ_EXPECT(pumpPromise.wait(ws) == 0); } -KJ_TEST("Userland pipe gather write split on buffer boundary") { +KJ_TEST("Userland pipe pump into zero-limited pipe, data is pumped") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto pipe = newOneWayPipe(); + auto pipe2 = newOneWayPipe(uint64_t(0)); + auto pumpPromise = KJ_ASSERT_NONNULL(pipe2.out->tryPumpFrom(*pipe.in)); + + expectRead(*pipe2.in, ""); + auto writePromise = pipe.out->write("foo", 3); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("abortRead() has been called", pumpPromise.wait(ws)); +} + +KJ_TEST("Userland pipe gather write") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto pipe = newOneWayPipe(); + + ArrayPtr parts[] = { "foo"_kj.asBytes(), "bar"_kj.asBytes() }; + auto promise = pipe.out->write(parts); + KJ_EXPECT(!promise.poll(ws)); + expectRead(*pipe.in, "foobar").wait(ws); + promise.wait(ws); + + auto promise2 = pipe.in->readAllText(); + KJ_EXPECT(!promise2.poll(ws)); + + pipe.out = nullptr; + KJ_EXPECT(promise2.wait(ws) == ""); +} + +KJ_TEST("Userland pipe gather write split on buffer boundary") { kj::EventLoop loop; WaitScope ws(loop); @@ -1375,5 +1962,928 @@ KJ_TEST("Userland pipe pumpFrom EOF on abortRead()") { pipe2.out = nullptr; } +KJ_TEST("Userland pipe EOF fulfills pumpFrom promise") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto pipe = newOneWayPipe(); + auto pipe2 = newOneWayPipe(); + auto pumpPromise = KJ_ASSERT_NONNULL(pipe2.out->tryPumpFrom(*pipe.in)); + + auto writePromise = pipe.out->write("foobar", 6); + KJ_EXPECT(!writePromise.poll(ws)); + auto pipe3 = newOneWayPipe(); + auto pumpPromise2 = pipe2.in->pumpTo(*pipe3.out); + KJ_EXPECT(!pumpPromise2.poll(ws)); + expectRead(*pipe3.in, "foobar").wait(ws); + writePromise.wait(ws); + + KJ_EXPECT(!pumpPromise.poll(ws)); + pipe.out = nullptr; + KJ_EXPECT(pumpPromise.wait(ws) == 6); + + KJ_EXPECT(!pumpPromise2.poll(ws)); + pipe2.out = nullptr; + KJ_EXPECT(pumpPromise2.wait(ws) == 6); +} + +KJ_TEST("Userland pipe tryPumpFrom to pumpTo for same amount fulfills simultaneously") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto pipe = newOneWayPipe(); + auto pipe2 = newOneWayPipe(); + auto pumpPromise = KJ_ASSERT_NONNULL(pipe2.out->tryPumpFrom(*pipe.in, 6)); + + auto writePromise = pipe.out->write("foobar", 6); + KJ_EXPECT(!writePromise.poll(ws)); + auto pipe3 = newOneWayPipe(); + auto pumpPromise2 = pipe2.in->pumpTo(*pipe3.out, 6); + KJ_EXPECT(!pumpPromise2.poll(ws)); + expectRead(*pipe3.in, "foobar").wait(ws); + writePromise.wait(ws); + + KJ_EXPECT(pumpPromise.wait(ws) == 6); + KJ_EXPECT(pumpPromise2.wait(ws) == 6); +} + +KJ_TEST("Userland pipe multi-part write doesn't quit early") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto pipe = newOneWayPipe(); + + auto readPromise = expectRead(*pipe.in, "foo"); + + kj::ArrayPtr pieces[2] = { "foobar"_kj.asBytes(), "baz"_kj.asBytes() }; + auto writePromise = pipe.out->write(pieces); + + readPromise.wait(ws); + KJ_EXPECT(!writePromise.poll(ws)); + expectRead(*pipe.in, "bar").wait(ws); + KJ_EXPECT(!writePromise.poll(ws)); + expectRead(*pipe.in, "baz").wait(ws); + writePromise.wait(ws); +} + +KJ_TEST("Userland pipe BlockedRead gets empty tryPumpFrom") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto pipe = newOneWayPipe(); + auto pipe2 = newOneWayPipe(); + + // First start a read from the back end. + char buffer[4]; + auto readPromise = pipe2.in->tryRead(buffer, 1, 4); + + // Now arrange a pump between the pipes, using tryPumpFrom(). + auto pumpPromise = KJ_ASSERT_NONNULL(pipe2.out->tryPumpFrom(*pipe.in)); + + // Disconnect the front pipe, causing EOF on the pump. + pipe.out = nullptr; + + // The pump should have produced zero bytes. + KJ_EXPECT(pumpPromise.wait(ws) == 0); + + // The read is incomplete. + KJ_EXPECT(!readPromise.poll(ws)); + + // A subsequent write() completes the read. + pipe2.out->write("foo", 3).wait(ws); + KJ_EXPECT(readPromise.wait(ws) == 3); + buffer[3] = '\0'; + KJ_EXPECT(kj::StringPtr(buffer, 3) == "foo"); +} + +constexpr static auto TEE_MAX_CHUNK_SIZE = 1 << 14; +// AsyncTee::MAX_CHUNK_SIZE, 16k as of this writing + +KJ_TEST("Userland tee") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto pipe = newOneWayPipe(); + auto tee = newTee(kj::mv(pipe.in)); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + auto writePromise = pipe.out->write("foobar", 6); + + expectRead(*left, "foobar").wait(ws); + writePromise.wait(ws); + expectRead(*right, "foobar").wait(ws); +} + +KJ_TEST("Userland tee concurrent read") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto pipe = newOneWayPipe(); + auto tee = newTee(kj::mv(pipe.in)); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + uint8_t leftBuf[6] = { 0 }; + uint8_t rightBuf[6] = { 0 }; + auto leftPromise = left->tryRead(leftBuf, 6, 6); + auto rightPromise = right->tryRead(rightBuf, 6, 6); + KJ_EXPECT(!leftPromise.poll(ws)); + KJ_EXPECT(!rightPromise.poll(ws)); + + pipe.out->write("foobar", 6).wait(ws); + + KJ_EXPECT(leftPromise.wait(ws) == 6); + KJ_EXPECT(rightPromise.wait(ws) == 6); + + KJ_EXPECT(memcmp(leftBuf, "foobar", 6) == 0); + KJ_EXPECT(memcmp(leftBuf, "foobar", 6) == 0); +} + +KJ_TEST("Userland tee cancel and restart read") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto pipe = newOneWayPipe(); + auto tee = newTee(kj::mv(pipe.in)); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + auto writePromise = pipe.out->write("foobar", 6); + + { + // Initiate a read and immediately cancel it. + uint8_t buf[6] = { 0 }; + auto promise = left->tryRead(buf, 6, 6); + } + + // Subsequent reads still see the full data. + expectRead(*left, "foobar").wait(ws); + writePromise.wait(ws); + expectRead(*right, "foobar").wait(ws); +} + +KJ_TEST("Userland tee cancel read and destroy branch then read other branch") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto pipe = newOneWayPipe(); + auto tee = newTee(kj::mv(pipe.in)); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + auto writePromise = pipe.out->write("foobar", 6); + + { + // Initiate a read and immediately cancel it. + uint8_t buf[6] = { 0 }; + auto promise = left->tryRead(buf, 6, 6); + } + + // And destroy the branch for good measure. + left = nullptr; + + // Subsequent reads on the other branch still see the full data. + expectRead(*right, "foobar").wait(ws); + writePromise.wait(ws); +} + +KJ_TEST("Userland tee subsequent other-branch reads are READY_NOW") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto pipe = newOneWayPipe(); + auto tee = newTee(kj::mv(pipe.in)); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + uint8_t leftBuf[6] = { 0 }; + auto leftPromise = left->tryRead(leftBuf, 6, 6); + // This is the first read, so there should NOT be buffered data. + KJ_EXPECT(!leftPromise.poll(ws)); + pipe.out->write("foobar", 6).wait(ws); + leftPromise.wait(ws); + KJ_EXPECT(memcmp(leftBuf, "foobar", 6) == 0); + + uint8_t rightBuf[6] = { 0 }; + auto rightPromise = right->tryRead(rightBuf, 6, 6); + // The left read promise was fulfilled, so there SHOULD be buffered data. + KJ_EXPECT(rightPromise.poll(ws)); + rightPromise.wait(ws); + KJ_EXPECT(memcmp(rightBuf, "foobar", 6) == 0); +} + +KJ_TEST("Userland tee read EOF propagation") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto pipe = newOneWayPipe(); + auto writePromise = pipe.out->write("foobar", 6); + auto tee = newTee(mv(pipe.in)); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + // Lengthless pipe, so ... + KJ_EXPECT(left->tryGetLength() == nullptr); + KJ_EXPECT(right->tryGetLength() == nullptr); + + uint8_t leftBuf[7] = { 0 }; + auto leftPromise = left->tryRead(leftBuf, size(leftBuf), size(leftBuf)); + writePromise.wait(ws); + // Destroying the output side should force a short read. + pipe.out = nullptr; + + KJ_EXPECT(leftPromise.wait(ws) == 6); + KJ_EXPECT(memcmp(leftBuf, "foobar", 6) == 0); + + // And we should see a short read here, too. + uint8_t rightBuf[7] = { 0 }; + auto rightPromise = right->tryRead(rightBuf, size(rightBuf), size(rightBuf)); + KJ_EXPECT(rightPromise.wait(ws) == 6); + KJ_EXPECT(memcmp(rightBuf, "foobar", 6) == 0); + + // Further reads should all be short. + KJ_EXPECT(left->tryRead(leftBuf, 1, size(leftBuf)).wait(ws) == 0); + KJ_EXPECT(right->tryRead(rightBuf, 1, size(rightBuf)).wait(ws) == 0); +} + +KJ_TEST("Userland tee read exception propagation") { + kj::EventLoop loop; + WaitScope ws(loop); + + // Make a pipe expecting to read more than we're actually going to write. This will force a "pipe + // ended prematurely" exception when we destroy the output side early. + auto pipe = newOneWayPipe(7); + auto writePromise = pipe.out->write("foobar", 6); + auto tee = newTee(mv(pipe.in)); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + // Test tryGetLength() while we're at it. + KJ_EXPECT(KJ_ASSERT_NONNULL(left->tryGetLength()) == 7); + KJ_EXPECT(KJ_ASSERT_NONNULL(right->tryGetLength()) == 7); + + uint8_t leftBuf[7] = { 0 }; + auto leftPromise = left->tryRead(leftBuf, 6, size(leftBuf)); + writePromise.wait(ws); + // Destroying the output side should force a fulfillment of the read (since we reached minBytes). + pipe.out = nullptr; + KJ_EXPECT(leftPromise.wait(ws) == 6); + KJ_EXPECT(memcmp(leftBuf, "foobar", 6) == 0); + + // The next read sees the exception. + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("pipe ended prematurely", + left->tryRead(leftBuf, 1, size(leftBuf)).ignoreResult().wait(ws)); + + // Test tryGetLength() here -- the unread branch still sees the original length value. + KJ_EXPECT(KJ_ASSERT_NONNULL(left->tryGetLength()) == 1); + KJ_EXPECT(KJ_ASSERT_NONNULL(right->tryGetLength()) == 7); + + // We should see the buffered data on the other side, even though we don't reach our minBytes. + uint8_t rightBuf[7] = { 0 }; + auto rightPromise = right->tryRead(rightBuf, size(rightBuf), size(rightBuf)); + KJ_EXPECT(rightPromise.wait(ws) == 6); + KJ_EXPECT(memcmp(rightBuf, "foobar", 6) == 0); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("pipe ended prematurely", + right->tryRead(rightBuf, 1, size(leftBuf)).ignoreResult().wait(ws)); + + // Further reads should all see the exception again. + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("pipe ended prematurely", + left->tryRead(leftBuf, 1, size(leftBuf)).ignoreResult().wait(ws)); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("pipe ended prematurely", + right->tryRead(rightBuf, 1, size(leftBuf)).ignoreResult().wait(ws)); +} + +KJ_TEST("Userland tee read exception propagation w/ data loss") { + kj::EventLoop loop; + WaitScope ws(loop); + + // Make a pipe expecting to read more than we're actually going to write. This will force a "pipe + // ended prematurely" exception once the pipe sees a short read. + auto pipe = newOneWayPipe(7); + auto writePromise = pipe.out->write("foobar", 6); + auto tee = newTee(mv(pipe.in)); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + uint8_t leftBuf[7] = { 0 }; + auto leftPromise = left->tryRead(leftBuf, 7, 7); + writePromise.wait(ws); + // Destroying the output side should force an exception, since we didn't reach our minBytes. + pipe.out = nullptr; + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "pipe ended prematurely", leftPromise.ignoreResult().wait(ws)); + + // And we should see a short read here, too. In fact, we shouldn't see anything: the short read + // above read all of the pipe's data, but then failed to buffer it because it encountered an + // exception. It buffered the exception, instead. + uint8_t rightBuf[7] = { 0 }; + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("pipe ended prematurely", + right->tryRead(rightBuf, 1, 1).ignoreResult().wait(ws)); +} + +KJ_TEST("Userland tee read into different buffer sizes") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto tee = newTee(heap("foo bar baz"_kj.asBytes(), 11)); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + uint8_t leftBuf[5] = { 0 }; + uint8_t rightBuf[11] = { 0 }; + + auto leftPromise = left->tryRead(leftBuf, 5, 5); + auto rightPromise = right->tryRead(rightBuf, 11, 11); + + KJ_EXPECT(leftPromise.wait(ws) == 5); + KJ_EXPECT(rightPromise.wait(ws) == 11); +} + +KJ_TEST("Userland tee reads see max(minBytes...) and min(maxBytes...)") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto tee = newTee(heap("foo bar baz"_kj.asBytes(), 11)); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + { + uint8_t leftBuf[5] = { 0 }; + uint8_t rightBuf[11] = { 0 }; + + // Subrange of another range. The smaller maxBytes should win. + auto leftPromise = left->tryRead(leftBuf, 3, 5); + auto rightPromise = right->tryRead(rightBuf, 1, 11); + + KJ_EXPECT(leftPromise.wait(ws) == 5); + KJ_EXPECT(rightPromise.wait(ws) == 5); + } + + { + uint8_t leftBuf[5] = { 0 }; + uint8_t rightBuf[11] = { 0 }; + + // Disjoint ranges. The larger minBytes should win. + auto leftPromise = left->tryRead(leftBuf, 3, 5); + auto rightPromise = right->tryRead(rightBuf, 6, 11); + + KJ_EXPECT(leftPromise.wait(ws) == 5); + KJ_EXPECT(rightPromise.wait(ws) == 6); + + KJ_EXPECT(left->tryRead(leftBuf, 1, 2).wait(ws) == 1); + } +} + +KJ_TEST("Userland tee read stress test") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto bigText = strArray(kj::repeat("foo bar baz"_kj, 12345), ","); + + auto tee = newTee(heap(bigText.asBytes(), bigText.size())); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + auto leftBuffer = heapArray(bigText.size()); + + { + auto leftSlice = leftBuffer.slice(0, leftBuffer.size()); + while (leftSlice.size() > 0) { + for (size_t blockSize: { 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59 }) { + if (leftSlice.size() == 0) break; + auto maxBytes = min(blockSize, leftSlice.size()); + auto amount = left->tryRead(leftSlice.begin(), 1, maxBytes).wait(ws); + leftSlice = leftSlice.slice(amount, leftSlice.size()); + } + } + } + + KJ_EXPECT(memcmp(leftBuffer.begin(), bigText.begin(), leftBuffer.size()) == 0); + KJ_EXPECT(right->readAllText().wait(ws) == bigText); +} + +KJ_TEST("Userland tee pump") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto bigText = strArray(kj::repeat("foo bar baz"_kj, 12345), ","); + + auto tee = newTee(heap(bigText.asBytes(), bigText.size())); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + auto leftPipe = newOneWayPipe(); + auto rightPipe = newOneWayPipe(); + + auto leftPumpPromise = left->pumpTo(*leftPipe.out, 7); + KJ_EXPECT(!leftPumpPromise.poll(ws)); + + auto rightPumpPromise = right->pumpTo(*rightPipe.out); + // Neither are ready yet, because the left pump's backpressure has blocked the AsyncTee's pull + // loop until we read from leftPipe. + KJ_EXPECT(!leftPumpPromise.poll(ws)); + KJ_EXPECT(!rightPumpPromise.poll(ws)); + + expectRead(*leftPipe.in, "foo bar").wait(ws); + KJ_EXPECT(leftPumpPromise.wait(ws) == 7); + KJ_EXPECT(!rightPumpPromise.poll(ws)); + + // We should be able to read up to how far the left side pumped, and beyond. The left side will + // now have data in its buffer. + expectRead(*rightPipe.in, "foo bar baz,foo bar baz,foo").wait(ws); + + // Consume the left side buffer. + expectRead(*left, " baz,foo bar").wait(ws); + + // We can destroy the left branch entirely and the right branch will still see all data. + left = nullptr; + KJ_EXPECT(!rightPumpPromise.poll(ws)); + auto allTextPromise = rightPipe.in->readAllText(); + KJ_EXPECT(rightPumpPromise.wait(ws) == bigText.size()); + // Need to force an EOF in the right pipe to check the result. + rightPipe.out = nullptr; + KJ_EXPECT(allTextPromise.wait(ws) == bigText.slice(27)); +} + +KJ_TEST("Userland tee pump slows down reads") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto bigText = strArray(kj::repeat("foo bar baz"_kj, 12345), ","); + + auto tee = newTee(heap(bigText.asBytes(), bigText.size())); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + auto leftPipe = newOneWayPipe(); + auto leftPumpPromise = left->pumpTo(*leftPipe.out); + KJ_EXPECT(!leftPumpPromise.poll(ws)); + + // The left pump will cause some data to be buffered on the right branch, which we can read. + auto rightExpectation0 = kj::str(bigText.slice(0, TEE_MAX_CHUNK_SIZE)); + expectRead(*right, rightExpectation0).wait(ws); + + // But the next right branch read is blocked by the left pipe's backpressure. + auto rightExpectation1 = kj::str(bigText.slice(TEE_MAX_CHUNK_SIZE, TEE_MAX_CHUNK_SIZE + 10)); + auto rightPromise = expectRead(*right, rightExpectation1); + KJ_EXPECT(!rightPromise.poll(ws)); + + // The right branch read finishes when we relieve the pressure in the left pipe. + auto allTextPromise = leftPipe.in->readAllText(); + rightPromise.wait(ws); + KJ_EXPECT(leftPumpPromise.wait(ws) == bigText.size()); + leftPipe.out = nullptr; + KJ_EXPECT(allTextPromise.wait(ws) == bigText); +} + +KJ_TEST("Userland tee pump EOF propagation") { + kj::EventLoop loop; + WaitScope ws(loop); + + { + // EOF encountered by two pump operations. + auto pipe = newOneWayPipe(); + auto writePromise = pipe.out->write("foo bar", 7); + auto tee = newTee(mv(pipe.in)); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + auto leftPipe = newOneWayPipe(); + auto rightPipe = newOneWayPipe(); + + // Pump the first bit, and block. + + auto leftPumpPromise = left->pumpTo(*leftPipe.out); + KJ_EXPECT(!leftPumpPromise.poll(ws)); + auto rightPumpPromise = right->pumpTo(*rightPipe.out); + writePromise.wait(ws); + KJ_EXPECT(!leftPumpPromise.poll(ws)); + KJ_EXPECT(!rightPumpPromise.poll(ws)); + + // Induce an EOF. We should see it propagated to both pump promises. + + pipe.out = nullptr; + + // Relieve backpressure. + auto leftAllPromise = leftPipe.in->readAllText(); + auto rightAllPromise = rightPipe.in->readAllText(); + KJ_EXPECT(leftPumpPromise.wait(ws) == 7); + KJ_EXPECT(rightPumpPromise.wait(ws) == 7); + + // Make sure we got the data on the pipes that were being pumped to. + KJ_EXPECT(!leftAllPromise.poll(ws)); + KJ_EXPECT(!rightAllPromise.poll(ws)); + leftPipe.out = nullptr; + rightPipe.out = nullptr; + KJ_EXPECT(leftAllPromise.wait(ws) == "foo bar"); + KJ_EXPECT(rightAllPromise.wait(ws) == "foo bar"); + } + + { + // EOF encountered by a read and pump operation. + auto pipe = newOneWayPipe(); + auto writePromise = pipe.out->write("foo bar", 7); + auto tee = newTee(mv(pipe.in)); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + auto leftPipe = newOneWayPipe(); + auto rightPipe = newOneWayPipe(); + + // Pump one branch, read another. + + auto leftPumpPromise = left->pumpTo(*leftPipe.out); + KJ_EXPECT(!leftPumpPromise.poll(ws)); + expectRead(*right, "foo bar").wait(ws); + writePromise.wait(ws); + uint8_t dummy = 0; + auto rightReadPromise = right->tryRead(&dummy, 1, 1); + + // Induce an EOF. We should see it propagated to both the read and pump promises. + + pipe.out = nullptr; + + // Relieve backpressure in the tee to see the EOF. + auto leftAllPromise = leftPipe.in->readAllText(); + KJ_EXPECT(leftPumpPromise.wait(ws) == 7); + KJ_EXPECT(rightReadPromise.wait(ws) == 0); + + // Make sure we got the data on the pipe that was being pumped to. + KJ_EXPECT(!leftAllPromise.poll(ws)); + leftPipe.out = nullptr; + KJ_EXPECT(leftAllPromise.wait(ws) == "foo bar"); + } +} + +KJ_TEST("Userland tee pump EOF on chunk boundary") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto bigText = strArray(kj::repeat("foo bar baz"_kj, 12345), ","); + + // Conjure an EOF right on the boundary of the tee's internal chunk. + auto chunkText = kj::str(bigText.slice(0, TEE_MAX_CHUNK_SIZE)); + auto tee = newTee(heap(chunkText.asBytes(), chunkText.size())); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + auto leftPipe = newOneWayPipe(); + auto rightPipe = newOneWayPipe(); + + auto leftPumpPromise = left->pumpTo(*leftPipe.out); + auto rightPumpPromise = right->pumpTo(*rightPipe.out); + KJ_EXPECT(!leftPumpPromise.poll(ws)); + KJ_EXPECT(!rightPumpPromise.poll(ws)); + + auto leftAllPromise = leftPipe.in->readAllText(); + auto rightAllPromise = rightPipe.in->readAllText(); + + // The pumps should see the EOF and stop. + KJ_EXPECT(leftPumpPromise.wait(ws) == TEE_MAX_CHUNK_SIZE); + KJ_EXPECT(rightPumpPromise.wait(ws) == TEE_MAX_CHUNK_SIZE); + + // Verify that we saw the data on the other end of the destination pipes. + leftPipe.out = nullptr; + rightPipe.out = nullptr; + KJ_EXPECT(leftAllPromise.wait(ws) == chunkText); + KJ_EXPECT(rightAllPromise.wait(ws) == chunkText); +} + +KJ_TEST("Userland tee pump read exception propagation") { + kj::EventLoop loop; + WaitScope ws(loop); + + { + // Exception encountered by two pump operations. + auto pipe = newOneWayPipe(14); + auto writePromise = pipe.out->write("foo bar", 7); + auto tee = newTee(mv(pipe.in)); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + auto leftPipe = newOneWayPipe(); + auto rightPipe = newOneWayPipe(); + + // Pump the first bit, and block. + + auto leftPumpPromise = left->pumpTo(*leftPipe.out); + KJ_EXPECT(!leftPumpPromise.poll(ws)); + auto rightPumpPromise = right->pumpTo(*rightPipe.out); + writePromise.wait(ws); + KJ_EXPECT(!leftPumpPromise.poll(ws)); + KJ_EXPECT(!rightPumpPromise.poll(ws)); + + // Induce a read exception. We should see it propagated to both pump promises. + + pipe.out = nullptr; + + // Both promises must exist before the backpressure in the tee is relieved, and the tee pull + // loop actually sees the exception. + auto leftAllPromise = leftPipe.in->readAllText(); + auto rightAllPromise = rightPipe.in->readAllText(); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "pipe ended prematurely", leftPumpPromise.ignoreResult().wait(ws)); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "pipe ended prematurely", rightPumpPromise.ignoreResult().wait(ws)); + + // Make sure we got the data on the destination pipes. + KJ_EXPECT(!leftAllPromise.poll(ws)); + KJ_EXPECT(!rightAllPromise.poll(ws)); + leftPipe.out = nullptr; + rightPipe.out = nullptr; + KJ_EXPECT(leftAllPromise.wait(ws) == "foo bar"); + KJ_EXPECT(rightAllPromise.wait(ws) == "foo bar"); + } + + { + // Exception encountered by a read and pump operation. + auto pipe = newOneWayPipe(14); + auto writePromise = pipe.out->write("foo bar", 7); + auto tee = newTee(mv(pipe.in)); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + auto leftPipe = newOneWayPipe(); + auto rightPipe = newOneWayPipe(); + + // Pump one branch, read another. + + auto leftPumpPromise = left->pumpTo(*leftPipe.out); + KJ_EXPECT(!leftPumpPromise.poll(ws)); + expectRead(*right, "foo bar").wait(ws); + writePromise.wait(ws); + uint8_t dummy = 0; + auto rightReadPromise = right->tryRead(&dummy, 1, 1); + + // Induce a read exception. We should see it propagated to both the read and pump promises. + + pipe.out = nullptr; + + // Relieve backpressure in the tee to see the exceptions. + auto leftAllPromise = leftPipe.in->readAllText(); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "pipe ended prematurely", leftPumpPromise.ignoreResult().wait(ws)); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "pipe ended prematurely", rightReadPromise.ignoreResult().wait(ws)); + + // Make sure we got the data on the destination pipe. + KJ_EXPECT(!leftAllPromise.poll(ws)); + leftPipe.out = nullptr; + KJ_EXPECT(leftAllPromise.wait(ws) == "foo bar"); + } +} + +KJ_TEST("Userland tee pump write exception propagation") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto bigText = strArray(kj::repeat("foo bar baz"_kj, 12345), ","); + + auto tee = newTee(heap(bigText.asBytes(), bigText.size())); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + // Set up two pumps and let them block. + auto leftPipe = newOneWayPipe(); + auto rightPipe = newOneWayPipe(); + auto leftPumpPromise = left->pumpTo(*leftPipe.out); + auto rightPumpPromise = right->pumpTo(*rightPipe.out); + KJ_EXPECT(!leftPumpPromise.poll(ws)); + KJ_EXPECT(!rightPumpPromise.poll(ws)); + + // Induce a write exception in the right branch pump. It should propagate to the right pump + // promise. + rightPipe.in = nullptr; + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "read end of pipe was aborted", rightPumpPromise.ignoreResult().wait(ws)); + + // The left pump promise does not see the right branch's write exception. + KJ_EXPECT(!leftPumpPromise.poll(ws)); + auto allTextPromise = leftPipe.in->readAllText(); + KJ_EXPECT(leftPumpPromise.wait(ws) == bigText.size()); + leftPipe.out = nullptr; + KJ_EXPECT(allTextPromise.wait(ws) == bigText); +} + +KJ_TEST("Userland tee pump cancellation implies write cancellation") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto text = "foo bar baz"_kj; + + auto tee = newTee(heap(text.asBytes(), text.size())); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + auto leftPipe = newOneWayPipe(); + auto leftPumpPromise = left->pumpTo(*leftPipe.out); + + // Arrange to block the left pump on its write operation. + expectRead(*right, "foo ").wait(ws); + KJ_EXPECT(!leftPumpPromise.poll(ws)); + + // Then cancel the pump, while it's still blocked. + leftPumpPromise = nullptr; + // It should cancel its write operations, so it should now be safe to destroy the output stream to + // which it was pumping. + KJ_IF_MAYBE(exception, kj::runCatchingExceptions([&]() { + leftPipe.out = nullptr; + })) { + KJ_FAIL_EXPECT("write promises were not canceled", exception); + } +} + +KJ_TEST("Userland tee buffer size limit") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto text = "foo bar baz"_kj; + + { + // We can carefully read data to stay under our ridiculously low limit. + + auto tee = newTee(heap(text.asBytes(), text.size()), 2); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + expectRead(*left, "fo").wait(ws); + expectRead(*right, "foo ").wait(ws); + expectRead(*left, "o ba").wait(ws); + expectRead(*right, "bar ").wait(ws); + expectRead(*left, "r ba").wait(ws); + expectRead(*right, "baz").wait(ws); + expectRead(*left, "z").wait(ws); + } + + { + // Exceeding the limit causes both branches to see the exception after exhausting their buffers. + + auto tee = newTee(heap(text.asBytes(), text.size()), 2); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + + expectRead(*left, "fo").wait(ws); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("tee buffer size limit exceeded", + expectRead(*left, "o").wait(ws)); + expectRead(*right, "fo").wait(ws); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("tee buffer size limit exceeded", + expectRead(*right, "o").wait(ws)); + } + + { + // We guarantee that two pumps started simultaneously will never exceed our buffer size limit. + + auto tee = newTee(heap(text.asBytes(), text.size()), 2); + auto left = kj::mv(tee.branches[0]); + auto right = kj::mv(tee.branches[1]); + auto leftPipe = kj::newOneWayPipe(); + auto rightPipe = kj::newOneWayPipe(); + + auto leftPumpPromise = left->pumpTo(*leftPipe.out); + auto rightPumpPromise = right->pumpTo(*rightPipe.out); + KJ_EXPECT(!leftPumpPromise.poll(ws)); + KJ_EXPECT(!rightPumpPromise.poll(ws)); + + uint8_t leftBuf[11] = { 0 }; + uint8_t rightBuf[11] = { 0 }; + + // The first read on the left pipe will succeed. + auto leftPromise = leftPipe.in->tryRead(leftBuf, 1, 11); + KJ_EXPECT(leftPromise.wait(ws) == 2); + KJ_EXPECT(memcmp(leftBuf, text.begin(), 2) == 0); + + // But the second will block until we relieve pressure on the right pipe. + leftPromise = leftPipe.in->tryRead(leftBuf + 2, 1, 9); + KJ_EXPECT(!leftPromise.poll(ws)); + + // Relieve the right pipe pressure ... + auto rightPromise = rightPipe.in->tryRead(rightBuf, 1, 11); + KJ_EXPECT(rightPromise.wait(ws) == 2); + KJ_EXPECT(memcmp(rightBuf, text.begin(), 2) == 0); + + // Now the second left pipe read will complete. + KJ_EXPECT(leftPromise.wait(ws) == 2); + KJ_EXPECT(memcmp(leftBuf, text.begin(), 4) == 0); + + // Leapfrog the left branch with the right. There should be 2 bytes in the buffer, so we can + // demand a total of 4. + rightPromise = rightPipe.in->tryRead(rightBuf + 2, 4, 9); + KJ_EXPECT(rightPromise.wait(ws) == 4); + KJ_EXPECT(memcmp(rightBuf, text.begin(), 6) == 0); + + // Leapfrog the right with the left. We demand the entire rest of the stream, so this should + // block. Note that a regular read for this amount on one of the tee branches directly would + // exceed our buffer size limit, but this one does not, because we have the pipe to regulate + // backpressure for us. + leftPromise = leftPipe.in->tryRead(leftBuf + 4, 7, 7); + KJ_EXPECT(!leftPromise.poll(ws)); + + // Ask for the entire rest of the stream on the right branch and wrap things up. + rightPromise = rightPipe.in->tryRead(rightBuf + 6, 5, 5); + + KJ_EXPECT(leftPromise.wait(ws) == 7); + KJ_EXPECT(memcmp(leftBuf, text.begin(), 11) == 0); + + KJ_EXPECT(rightPromise.wait(ws) == 5); + KJ_EXPECT(memcmp(rightBuf, text.begin(), 11) == 0); + } +} + +KJ_TEST("Userspace OneWayPipe whenWriteDisconnected()") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto pipe = newOneWayPipe(); + + auto abortedPromise = pipe.out->whenWriteDisconnected(); + KJ_ASSERT(!abortedPromise.poll(ws)); + + pipe.in = nullptr; + + KJ_ASSERT(abortedPromise.poll(ws)); + abortedPromise.wait(ws); +} + +KJ_TEST("Userspace TwoWayPipe whenWriteDisconnected()") { + kj::EventLoop loop; + WaitScope ws(loop); + + auto pipe = newTwoWayPipe(); + + auto abortedPromise = pipe.ends[0]->whenWriteDisconnected(); + KJ_ASSERT(!abortedPromise.poll(ws)); + + pipe.ends[1] = nullptr; + + KJ_ASSERT(abortedPromise.poll(ws)); + abortedPromise.wait(ws); +} + +#if !_WIN32 // We don't currently support detecting disconnect with IOCP. +#if !__CYGWIN__ // TODO(someday): Figure out why whenWriteDisconnected() doesn't work on Cygwin. + +KJ_TEST("OS OneWayPipe whenWriteDisconnected()") { + auto io = setupAsyncIo(); + + auto pipe = io.provider->newOneWayPipe(); + + pipe.out->write("foo", 3).wait(io.waitScope); + auto abortedPromise = pipe.out->whenWriteDisconnected(); + KJ_ASSERT(!abortedPromise.poll(io.waitScope)); + + pipe.in = nullptr; + + KJ_ASSERT(abortedPromise.poll(io.waitScope)); + abortedPromise.wait(io.waitScope); +} + +KJ_TEST("OS TwoWayPipe whenWriteDisconnected()") { + auto io = setupAsyncIo(); + + auto pipe = io.provider->newTwoWayPipe(); + + pipe.ends[0]->write("foo", 3).wait(io.waitScope); + pipe.ends[1]->write("bar", 3).wait(io.waitScope); + + auto abortedPromise = pipe.ends[0]->whenWriteDisconnected(); + KJ_ASSERT(!abortedPromise.poll(io.waitScope)); + + pipe.ends[1] = nullptr; + + KJ_ASSERT(abortedPromise.poll(io.waitScope)); + abortedPromise.wait(io.waitScope); + + char buffer[4]; + KJ_ASSERT(pipe.ends[0]->tryRead(&buffer, 3, 3).wait(io.waitScope) == 3); + buffer[3] = '\0'; + KJ_EXPECT(buffer == "bar"_kj); + + // Note: Reading any further in pipe.ends[0] would throw "connection reset". +} + +KJ_TEST("import socket FD that's already broken") { + auto io = setupAsyncIo(); + + int fds[2]; + KJ_SYSCALL(socketpair(AF_UNIX, SOCK_STREAM, 0, fds)); + KJ_SYSCALL(write(fds[1], "foo", 3)); + KJ_SYSCALL(close(fds[1])); + + auto stream = io.lowLevelProvider->wrapSocketFd(fds[0], LowLevelAsyncIoProvider::TAKE_OWNERSHIP); + + auto abortedPromise = stream->whenWriteDisconnected(); + KJ_ASSERT(abortedPromise.poll(io.waitScope)); + abortedPromise.wait(io.waitScope); + + char buffer[4]; + KJ_ASSERT(stream->tryRead(&buffer, sizeof(buffer), sizeof(buffer)).wait(io.waitScope) == 3); + buffer[3] = '\0'; + KJ_EXPECT(buffer == "foo"_kj); +} + +#endif // !__CYGWIN__ +#endif // !_WIN32 + } // namespace } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-io-unix.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-io-unix.c++ index 4950930dcc0..551f4c44449 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async-io-unix.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-io-unix.c++ @@ -51,6 +51,15 @@ #include #include +#if !defined(SO_PEERCRED) && defined(LOCAL_PEERCRED) +#include +#endif + +#if !defined(SOL_LOCAL) && (__FreeBSD__ || __DragonflyBSD__) +// On DragonFly or FreeBSD < 12.2 you're supposed to use 0 for SOL_LOCAL. +#define SOL_LOCAL 0 +#endif + namespace kj { namespace { @@ -135,12 +144,34 @@ public: virtual ~AsyncStreamFd() noexcept(false) {} Promise tryRead(void* buffer, size_t minBytes, size_t maxBytes) override { - return tryReadInternal(buffer, minBytes, maxBytes, 0); + return tryReadInternal(buffer, minBytes, maxBytes, nullptr, 0, {0,0}) + .then([](ReadResult r) { return r.byteCount; }); + } + + Promise tryReadWithFds(void* buffer, size_t minBytes, size_t maxBytes, + AutoCloseFd* fdBuffer, size_t maxFds) override { + return tryReadInternal(buffer, minBytes, maxBytes, fdBuffer, maxFds, {0,0}); + } + + Promise tryReadWithStreams( + void* buffer, size_t minBytes, size_t maxBytes, + Own* streamBuffer, size_t maxStreams) override { + auto fdBuffer = kj::heapArray(maxStreams); + auto promise = tryReadInternal(buffer, minBytes, maxBytes, fdBuffer.begin(), maxStreams, {0,0}); + + return promise.then([this, fdBuffer = kj::mv(fdBuffer), streamBuffer] + (ReadResult result) mutable { + for (auto i: kj::zeroTo(result.capCount)) { + streamBuffer[i] = kj::heap(eventPort, fdBuffer[i].release(), + LowLevelAsyncIoProvider::TAKE_OWNERSHIP | LowLevelAsyncIoProvider::ALREADY_CLOEXEC); + } + return result; + }); } Promise write(const void* buffer, size_t size) override { - ssize_t writeResult; - KJ_NONBLOCKING_SYSCALL(writeResult = ::write(fd, buffer, size)) { + ssize_t n; + KJ_NONBLOCKING_SYSCALL(n = ::write(fd, buffer, size)) { // Error. // We can't "return kj::READY_NOW;" inside this block because it causes a memory leak due to @@ -154,28 +185,57 @@ public: return kj::READY_NOW; } - // A negative result means EAGAIN, which we can treat the same as having written zero bytes. - size_t n = writeResult < 0 ? 0 : writeResult; - - if (n == size) { + if (n < 0) { + // EAGAIN -- need to wait for writability and try again. + return observer.whenBecomesWritable().then([=]() { + return write(buffer, size); + }); + } else if (n == size) { + // All done. return READY_NOW; - } - - // Fewer than `size` bytes were written, therefore we must be out of buffer space. Wait until - // the fd becomes writable again. - buffer = reinterpret_cast(buffer) + n; - size -= n; - - return observer.whenBecomesWritable().then([=]() { + } else { + // Fewer than `size` bytes were written, but we CANNOT assume we're out of buffer space, as + // Linux is known to return partial reads/writes when interrupted by a signal -- yes, even + // for non-blocking operations. So, we'll need to write() again now, even though it will + // almost certainly fail with EAGAIN. See comments in the read path for more info. + buffer = reinterpret_cast(buffer) + n; + size -= n; return write(buffer, size); - }); + } } Promise write(ArrayPtr> pieces) override { if (pieces.size() == 0) { - return writeInternal(nullptr, nullptr); + return writeInternal(nullptr, nullptr, nullptr); } else { - return writeInternal(pieces[0], pieces.slice(1, pieces.size())); + return writeInternal(pieces[0], pieces.slice(1, pieces.size()), nullptr); + } + } + + Promise writeWithFds(ArrayPtr data, + ArrayPtr> moreData, + ArrayPtr fds) override { + return writeInternal(data, moreData, fds); + } + + Promise writeWithStreams(ArrayPtr data, + ArrayPtr> moreData, + Array> streams) override { + auto fds = KJ_MAP(stream, streams) { + return downcast(*stream).fd; + }; + auto promise = writeInternal(data, moreData, fds); + return promise.attach(kj::mv(fds), kj::mv(streams)); + } + + Promise whenWriteDisconnected() override { + KJ_IF_MAYBE(p, writeDisconnectedPromise) { + return p->addBranch(); + } else { + auto fork = observer.whenWriteDisconnected().fork(); + auto result = fork.addBranch(); + writeDisconnectedPromise = kj::mv(fork); + return kj::mv(result); } } @@ -213,55 +273,13 @@ public: *length = socklen; } - kj::Promise>> tryReceiveStream() override { - return tryReceiveFdImpl>(); - } - - kj::Promise sendStream(Own stream) override { - auto downcasted = stream.downcast(); - auto promise = sendFd(downcasted->fd); - return promise.attach(kj::mv(downcasted)); - } - - kj::Promise> tryReceiveFd() override { - return tryReceiveFdImpl(); + kj::Maybe getFd() const override { + return fd; } - kj::Promise sendFd(int fdToSend) override { - struct msghdr msg; - struct iovec iov; - union { - struct cmsghdr cmsg; - char cmsgSpace[CMSG_LEN(sizeof(int))]; - }; - memset(&msg, 0, sizeof(msg)); - memset(&iov, 0, sizeof(iov)); - memset(cmsgSpace, 0, sizeof(cmsgSpace)); - - char c = 0; - iov.iov_base = &c; - iov.iov_len = 1; - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - msg.msg_control = &cmsg; - msg.msg_controllen = sizeof(cmsgSpace); - - cmsg.cmsg_len = sizeof(cmsgSpace); - cmsg.cmsg_level = SOL_SOCKET; - cmsg.cmsg_type = SCM_RIGHTS; - *reinterpret_cast(CMSG_DATA(&cmsg)) = fdToSend; - - ssize_t n; - KJ_NONBLOCKING_SYSCALL(n = sendmsg(fd, &msg, 0)); - if (n < 0) { - return observer.whenBecomesWritable().then([this,fdToSend]() { - return sendFd(fdToSend); - }); - } else { - KJ_ASSERT(n == 1); - return kj::READY_NOW; - } + void registerAncillaryMessageHandler( + kj::Function)> fn) override { + ancillaryMsgCallback = kj::mv(fn); } Promise waitConnected() { @@ -290,23 +308,162 @@ public: private: UnixEventPort& eventPort; UnixEventPort::FdObserver observer; + Maybe> writeDisconnectedPromise; + Maybe)>> ancillaryMsgCallback; - Promise tryReadInternal(void* buffer, size_t minBytes, size_t maxBytes, - size_t alreadyRead) { + Promise tryReadInternal(void* buffer, size_t minBytes, size_t maxBytes, + AutoCloseFd* fdBuffer, size_t maxFds, + ReadResult alreadyRead) { // `alreadyRead` is the number of bytes we have already received via previous reads -- minBytes, // maxBytes, and buffer have already been adjusted to account for them, but this count must // be included in the final return value. ssize_t n; - KJ_NONBLOCKING_SYSCALL(n = ::read(fd, buffer, maxBytes)) { - // Error. + if (maxFds == 0 && ancillaryMsgCallback == nullptr) { + KJ_NONBLOCKING_SYSCALL(n = ::read(fd, buffer, maxBytes)) { + // Error. + + // We can't "return kj::READY_NOW;" inside this block because it causes a memory leak due to + // a bug that exists in both Clang and GCC: + // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33799 + // http://llvm.org/bugs/show_bug.cgi?id=12286 + goto error; + } + } else { + struct msghdr msg; + memset(&msg, 0, sizeof(msg)); + + struct iovec iov; + memset(&iov, 0, sizeof(iov)); + iov.iov_base = buffer; + iov.iov_len = maxBytes; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + // Allocate space to receive a cmsg. + size_t msgBytes; + if (ancillaryMsgCallback == nullptr) { +#if __APPLE__ || __FreeBSD__ + // Until very recently (late 2018 / early 2019), FreeBSD suffered from a bug in which when + // an SCM_RIGHTS message was truncated on delivery, it would not close the FDs that weren't + // delivered -- they would simply leak: https://bugs.freebsd.org/131876 + // + // My testing indicates that MacOS has this same bug as of today (April 2019). I don't know + // if they plan to fix it or are even aware of it. + // + // To handle both cases, we will always provide space to receive 512 FDs. Hopefully, this is + // greater than the maximum number of FDs that these kernels will transmit in one message + // PLUS enough space for any other ancillary messages that could be sent before the + // SCM_RIGHTS message to push it back in the buffer. I couldn't find any firm documentation + // on these limits, though -- I only know that Linux is limited to 253, and I saw a hint in + // a comment in someone else's application that suggested FreeBSD is the same. Hopefully, + // then, this is sufficient to prevent attacks. But if not, there's nothing more we can do; + // it's really up to the kernel to fix this. + msgBytes = CMSG_SPACE(sizeof(int) * 512); +#else + msgBytes = CMSG_SPACE(sizeof(int) * maxFds); +#endif + } else { + // If we want room for ancillary messages instead of or in addition to FDs, just use the + // same amount of cushion as in the MacOS/FreeBSD case above. + // Someday we may want to allow customization here, but there's no immediate use for it. + msgBytes = CMSG_SPACE(sizeof(int) * 512); + } - // We can't "return kj::READY_NOW;" inside this block because it causes a memory leak due to - // a bug that exists in both Clang and GCC: - // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33799 - // http://llvm.org/bugs/show_bug.cgi?id=12286 - goto error; + // On Linux, CMSG_SPACE will align to a word-size boundary, but on Mac it always aligns to a + // 32-bit boundary. I guess aligning to 32 bits helps avoid the problem where you + // surprisingly end up with space for two file descriptors when you only wanted one. However, + // cmsghdr's preferred alignment is word-size (it contains a size_t). If we stack-allocate + // the buffer, we need to make sure it is aligned properly (maybe not on x64, but maybe on + // other platforms), so we want to allocate an array of words (we use void*). So... we use + // CMSG_SPACE() and then additionally round up to deal with Mac. + size_t msgWords = (msgBytes + sizeof(void*) - 1) / sizeof(void*); + KJ_STACK_ARRAY(void*, cmsgSpace, msgWords, 16, 256); + auto cmsgBytes = cmsgSpace.asBytes(); + memset(cmsgBytes.begin(), 0, cmsgBytes.size()); + msg.msg_control = cmsgBytes.begin(); + msg.msg_controllen = msgBytes; + +#ifdef MSG_CMSG_CLOEXEC + static constexpr int RECVMSG_FLAGS = MSG_CMSG_CLOEXEC; +#else + static constexpr int RECVMSG_FLAGS = 0; +#endif + + KJ_NONBLOCKING_SYSCALL(n = ::recvmsg(fd, &msg, RECVMSG_FLAGS)) { + // Error. + + // We can't "return kj::READY_NOW;" inside this block because it causes a memory leak due to + // a bug that exists in both Clang and GCC: + // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33799 + // http://llvm.org/bugs/show_bug.cgi?id=12286 + goto error; + } + + if (n >= 0) { + // Process all messages. + // + // WARNING DANGER: We have to be VERY careful not to miss a file descriptor here, because + // if we do, then that FD will never be closed, and a malicious peer could exploit this to + // fill up our FD table, creating a DoS attack. Some things to keep in mind: + // - CMSG_SPACE() could have rounded up the space for alignment purposes, and this could + // mean we permitted the kernel to deliver more file descriptors than `maxFds`. We need + // to close the extras. + // - We can receive multiple ancillary messages at once. In particular, there is also + // SCM_CREDENTIALS. The sender decides what to send. They could send SCM_CREDENTIALS + // first followed by SCM_RIGHTS. We need to make sure we see both. + size_t nfds = 0; + size_t spaceLeft = msg.msg_controllen; + Vector ancillaryMessages; + for (struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + cmsg != nullptr; cmsg = CMSG_NXTHDR(&msg, cmsg)) { + if (spaceLeft >= CMSG_LEN(0) && + cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { + // Some operating systems (like MacOS) do not adjust csmg_len when the message is + // truncated. We must do so ourselves or risk overrunning the buffer. + auto len = kj::min(cmsg->cmsg_len, spaceLeft); + auto data = arrayPtr(reinterpret_cast(CMSG_DATA(cmsg)), + (len - CMSG_LEN(0)) / sizeof(int)); + kj::Vector trashFds; + for (auto fd: data) { + kj::AutoCloseFd ownFd(fd); + if (nfds < maxFds) { + fdBuffer[nfds++] = kj::mv(ownFd); + } else { + trashFds.add(kj::mv(ownFd)); + } + } + } else if (spaceLeft >= CMSG_LEN(0) && ancillaryMsgCallback != nullptr) { + auto len = kj::min(cmsg->cmsg_len, spaceLeft); + auto data = ArrayPtr(CMSG_DATA(cmsg), len - CMSG_LEN(0)); + ancillaryMessages.add(cmsg->cmsg_level, cmsg->cmsg_type, data); + } + + if (spaceLeft >= CMSG_LEN(0) && spaceLeft >= cmsg->cmsg_len) { + spaceLeft -= cmsg->cmsg_len; + } else { + spaceLeft = 0; + } + } + +#ifndef MSG_CMSG_CLOEXEC + for (size_t i = 0; i < nfds; i++) { + setCloseOnExec(fdBuffer[i]); + } +#endif + + if (ancillaryMessages.size() > 0) { + KJ_IF_MAYBE(fn, ancillaryMsgCallback) { + (*fn)(ancillaryMessages.asPtr()); + } + } + + alreadyRead.capCount += nfds; + fdBuffer += nfds; + maxFds -= nfds; + } } + if (false) { error: return alreadyRead; @@ -315,51 +472,41 @@ private: if (n < 0) { // Read would block. return observer.whenBecomesReadable().then([=]() { - return tryReadInternal(buffer, minBytes, maxBytes, alreadyRead); + return tryReadInternal(buffer, minBytes, maxBytes, fdBuffer, maxFds, alreadyRead); }); } else if (n == 0) { // EOF -OR- maxBytes == 0. return alreadyRead; } else if (implicitCast(n) >= minBytes) { // We read enough to stop here. - return alreadyRead + n; + alreadyRead.byteCount += n; + return alreadyRead; } else { // The kernel returned fewer bytes than we asked for (and fewer than we need). buffer = reinterpret_cast(buffer) + n; minBytes -= n; maxBytes -= n; - alreadyRead += n; - - KJ_IF_MAYBE(atEnd, observer.atEndHint()) { - if (*atEnd) { - // We've already received an indication that the next read() will return EOF, so there's - // nothing to wait for. - return alreadyRead; - } else { - // As of the last time the event queue was checked, the kernel reported that we were - // *not* at the end of the stream. It's unlikely that this has changed in the short time - // it took to handle the event, therefore calling read() now will almost certainly fail - // with EAGAIN. Moreover, since EOF had not been received as of the last check, we know - // that even if it was received since then, whenBecomesReadable() will catch that. So, - // let's go ahead and skip calling read() here and instead go straight to waiting for - // more input. - return observer.whenBecomesReadable().then([=]() { - return tryReadInternal(buffer, minBytes, maxBytes, alreadyRead); - }); - } - } else { - // The kernel has not indicated one way or the other whether we are likely to be at EOF. - // In this case we *must* keep calling read() until we either get a return of zero or - // EAGAIN. - return tryReadInternal(buffer, minBytes, maxBytes, alreadyRead); - } + alreadyRead.byteCount += n; + + // According to David Klempner, who works on Stubby at Google, we sadly CANNOT assume that + // we've consumed the whole read buffer here. If a signal is delivered in the middle of a + // read() -- yes, even a non-blocking read -- it can cause the kernel to return a partial + // result, with data still in the buffer. + // https://bugzilla.kernel.org/show_bug.cgi?id=199131 + // https://twitter.com/CaptainSegfault/status/1112622245531144194 + // + // Unfortunately, we have no choice but to issue more read()s until it either tells us EOF + // or EAGAIN. We used to have an optimization here using observer.atEndHint() (when it is + // non-null) to avoid a redundant call to read(). Alas... + return tryReadInternal(buffer, minBytes, maxBytes, fdBuffer, maxFds, alreadyRead); } } Promise writeInternal(ArrayPtr firstPiece, - ArrayPtr> morePieces) { - const size_t iovmax = kj::miniposix::iovMax(1 + morePieces.size()); + ArrayPtr> morePieces, + ArrayPtr fds) { + const size_t iovmax = kj::miniposix::iovMax(); // If there are more than IOV_MAX pieces, we'll only write the first IOV_MAX for now, and // then we'll loop later. KJ_STACK_ARRAY(struct iovec, iov, kj::min(1 + morePieces.size(), iovmax), 16, 128); @@ -375,23 +522,87 @@ private: iovTotal += iov[i].iov_len; } - ssize_t writeResult; - KJ_NONBLOCKING_SYSCALL(writeResult = ::writev(fd, iov.begin(), iov.size())) { - // Error. + if (iovTotal == 0) { + KJ_REQUIRE(fds.size() == 0, "can't write FDs without bytes"); + return kj::READY_NOW; + } - // We can't "return kj::READY_NOW;" inside this block because it causes a memory leak due to - // a bug that exists in both Clang and GCC: - // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33799 - // http://llvm.org/bugs/show_bug.cgi?id=12286 - goto error; + ssize_t n; + if (fds.size() == 0) { + KJ_NONBLOCKING_SYSCALL(n = ::writev(fd, iov.begin(), iov.size()), iovTotal, iov.size()) { + // Error. + + // We can't "return kj::READY_NOW;" inside this block because it causes a memory leak due to + // a bug that exists in both Clang and GCC: + // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33799 + // http://llvm.org/bugs/show_bug.cgi?id=12286 + goto error; + } + } else { + struct msghdr msg; + memset(&msg, 0, sizeof(msg)); + msg.msg_iov = iov.begin(); + msg.msg_iovlen = iov.size(); + + // Allocate space to send a cmsg. + size_t msgBytes = CMSG_SPACE(sizeof(int) * fds.size()); + // On Linux, CMSG_SPACE will align to a word-size boundary, but on Mac it always aligns to a + // 32-bit boundary. I guess aligning to 32 bits helps avoid the problem where you + // surprisingly end up with space for two file descriptors when you only wanted one. However, + // cmsghdr's preferred alignment is word-size (it contains a size_t). If we stack-allocate + // the buffer, we need to make sure it is aligned properly (maybe not on x64, but maybe on + // other platforms), so we want to allocate an array of words (we use void*). So... we use + // CMSG_SPACE() and then additionally round up to deal with Mac. + size_t msgWords = (msgBytes + sizeof(void*) - 1) / sizeof(void*); + KJ_STACK_ARRAY(void*, cmsgSpace, msgWords, 16, 256); + auto cmsgBytes = cmsgSpace.asBytes(); + memset(cmsgBytes.begin(), 0, cmsgBytes.size()); + msg.msg_control = cmsgBytes.begin(); + msg.msg_controllen = msgBytes; + + struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + cmsg->cmsg_len = CMSG_LEN(sizeof(int) * fds.size()); + memcpy(CMSG_DATA(cmsg), fds.begin(), fds.asBytes().size()); + + KJ_NONBLOCKING_SYSCALL(n = ::sendmsg(fd, &msg, 0)) { + // Error. + + // We can't "return kj::READY_NOW;" inside this block because it causes a memory leak due to + // a bug that exists in both Clang and GCC: + // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33799 + // http://llvm.org/bugs/show_bug.cgi?id=12286 + goto error; + } } + if (false) { error: return kj::READY_NOW; } - // A negative result means EAGAIN, which we can treat the same as having written zero bytes. - size_t n = writeResult < 0 ? 0 : writeResult; + if (n < 0) { + // Got EAGAIN. Nothing was written. + return observer.whenBecomesWritable().then([=]() { + return writeInternal(firstPiece, morePieces, fds); + }); + } else if (n == 0) { + // Why would a sendmsg() with a non-empty message ever return 0 when writing to a stream + // socket? If there's no room in the send buffer, it should fail with EAGAIN. If the + // connection is closed, it should fail with EPIPE. Various documents and forum posts around + // the internet claim this can happen but no one seems to know when. My guess is it can only + // happen if we try to send an empty message -- which we didn't. So I think this is + // impossible. If it is possible, we need to figure out how to correctly handle it, which + // depends on what caused it. + // + // Note in particular that if 0 is a valid return here, and we sent an SCM_RIGHTS message, + // we need to know whether the message was sent or not, in order to decide whether to retry + // sending it! + KJ_FAIL_ASSERT("non-empty sendmsg() returned 0"); + } + + // Non-zero bytes were written. This also implies that *all* FDs were written. // Discard all data that was written, then issue a new write for what's left (if any). for (;;) { @@ -402,12 +613,12 @@ private: if (iovTotal == 0) { // Oops, what actually happened is that we hit the IOV_MAX limit. Don't wait. - return writeInternal(firstPiece, morePieces); + return writeInternal(firstPiece, morePieces, nullptr); } - return observer.whenBecomesWritable().then([=]() { - return writeInternal(firstPiece, morePieces); - }); + // As with read(), we cannot assume that a short write() really means the write buffer is + // full (see comments in the read path above). We have to write again. + return writeInternal(firstPiece, morePieces, nullptr); } else if (morePieces.size() == 0) { // First piece was fully-consumed and there are no more pieces, so we're done. KJ_DASSERT(n == firstPiece.size(), n); @@ -421,71 +632,6 @@ private: } } } - - template - kj::Promise> tryReceiveFdImpl() { - struct msghdr msg; - memset(&msg, 0, sizeof(msg)); - - struct iovec iov; - memset(&iov, 0, sizeof(iov)); - char c; - iov.iov_base = &c; - iov.iov_len = 1; - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - // Allocate space to receive a cmsg. - union { - struct cmsghdr cmsg; - char cmsgSpace[CMSG_SPACE(sizeof(int))]; - }; - msg.msg_control = &cmsg; - msg.msg_controllen = sizeof(cmsgSpace); - -#ifdef MSG_CMSG_CLOEXEC - int recvmsgFlags = MSG_CMSG_CLOEXEC; -#else - int recvmsgFlags = 0; -#endif - - ssize_t n; - KJ_NONBLOCKING_SYSCALL(n = recvmsg(fd, &msg, recvmsgFlags)); - if (n < 0) { - return observer.whenBecomesReadable().then([this]() { - return tryReceiveFdImpl(); - }); - } else if (n == 0) { - return kj::Maybe(nullptr); - } else { - KJ_REQUIRE(msg.msg_controllen >= sizeof(cmsg), - "expected to receive FD over socket; received data instead"); - - // We expect an SCM_RIGHTS message with a single FD. - KJ_REQUIRE(cmsg.cmsg_level == SOL_SOCKET); - KJ_REQUIRE(cmsg.cmsg_type == SCM_RIGHTS); - KJ_REQUIRE(cmsg.cmsg_len == CMSG_LEN(sizeof(int))); - - int receivedFd; - memcpy(&receivedFd, CMSG_DATA(&cmsg), sizeof(receivedFd)); - return kj::Maybe(wrapFd(receivedFd, (T*)nullptr)); - } - } - - AutoCloseFd wrapFd(int newFd, AutoCloseFd*) { - auto result = AutoCloseFd(newFd); -#ifndef MSG_CMSG_CLOEXEC - setCloseOnExec(result); -#endif - return result; - } - Own wrapFd(int newFd, Own*) { - return kj::heap(eventPort, newFd, -#ifdef MSG_CMSG_CLOEXEC - LowLevelAsyncIoProvider::ALREADY_CLOEXEC | -#endif - LowLevelAsyncIoProvider::TAKE_OWNERSHIP); - } }; // ======================================================================================= @@ -781,6 +927,10 @@ public: return filter.shouldAllowParse(&addr.generic, addrlen); } + kj::Own getIdentity(LowLevelAsyncIoProvider& llaiop, + LowLevelAsyncIoProvider::NetworkFilter& filter, + AsyncIoStream& stream) const; + private: SocketAddress() { // We need to memset the whole object 0 otherwise Valgrind gets unhappy when we write it to a @@ -952,12 +1102,21 @@ Promise> SocketAddress::lookupHost( class FdConnectionReceiver final: public ConnectionReceiver, public OwnedFileDescriptor { public: - FdConnectionReceiver(UnixEventPort& eventPort, int fd, + FdConnectionReceiver(LowLevelAsyncIoProvider& lowLevel, + UnixEventPort& eventPort, int fd, LowLevelAsyncIoProvider::NetworkFilter& filter, uint flags) - : OwnedFileDescriptor(fd, flags), eventPort(eventPort), filter(filter), + : OwnedFileDescriptor(fd, flags), lowLevel(lowLevel), eventPort(eventPort), filter(filter), observer(eventPort, fd, UnixEventPort::FdObserver::OBSERVE_READ) {} Promise> accept() override { + return acceptImpl(false).then([](AuthenticatedStream&& a) { return kj::mv(a.stream); }); + } + + Promise acceptAuthenticated() override { + return acceptImpl(true); + } + + Promise acceptImpl(bool authenticated) { int newFd; struct sockaddr_storage addr; @@ -972,12 +1131,36 @@ public: #endif if (newFd >= 0) { + kj::AutoCloseFd ownFd(newFd); if (!filter.shouldAllow(reinterpret_cast(&addr), addrlen)) { - // Drop disallowed address. - close(newFd); - return accept(); + // Ignore disallowed address. + return acceptImpl(authenticated); } else { - return Own(heap(eventPort, newFd, NEW_FD_FLAGS)); + // TODO(perf): As a hack for the 0.4 release we are always setting + // TCP_NODELAY because Nagle's algorithm pretty much kills Cap'n Proto's + // RPC protocol. Later, we should extend the interface to provide more + // control over this. Perhaps write() should have a flag which + // specifies whether to pass MSG_MORE. + int one = 1; + KJ_SYSCALL_HANDLE_ERRORS(::setsockopt( + ownFd.get(), IPPROTO_TCP, TCP_NODELAY, (char*)&one, sizeof(one))) { + case EOPNOTSUPP: + case ENOPROTOOPT: // (returned for AF_UNIX in cygwin) +#if __FreeBSD__ + case EINVAL: // (returned for AF_UNIX in FreeBSD) +#endif + break; + default: + KJ_FAIL_SYSCALL("setsocketopt(IPPROTO_TCP, TCP_NODELAY)", error); + } + + AuthenticatedStream result; + result.stream = heap(eventPort, ownFd.release(), NEW_FD_FLAGS); + if (authenticated) { + result.peerIdentity = SocketAddress(reinterpret_cast(&addr), addrlen) + .getIdentity(lowLevel, filter, *result.stream); + } + return kj::mv(result); } } else { int error = errno; @@ -988,8 +1171,8 @@ public: case EWOULDBLOCK: #endif // Not ready yet. - return observer.whenBecomesReadable().then([this]() { - return accept(); + return observer.whenBecomesReadable().then([this,authenticated]() { + return acceptImpl(authenticated); }); case EINTR: @@ -1028,8 +1211,14 @@ public: void setsockopt(int level, int option, const void* value, uint length) override { KJ_SYSCALL(::setsockopt(fd, level, option, value, length)); } + void getsockname(struct sockaddr* addr, uint* length) override { + socklen_t socklen = *length; + KJ_SYSCALL(::getsockname(fd, addr, &socklen)); + *length = socklen; + } public: + LowLevelAsyncIoProvider& lowLevel; UnixEventPort& eventPort; LowLevelAsyncIoProvider::NetworkFilter& filter; UnixEventPort::FdObserver observer; @@ -1127,7 +1316,7 @@ public: } Own wrapListenSocketFd( int fd, NetworkFilter& filter, uint flags = 0) override { - return heap(eventPort, fd, filter, flags); + return heap(*this, eventPort, fd, filter, flags); } Own wrapDatagramSocketFd( int fd, NetworkFilter& filter, uint flags = 0) override { @@ -1155,7 +1344,14 @@ public: Promise> connect() override { auto addrsCopy = heapArray(addrs.asPtr()); - auto promise = connectImpl(lowLevel, filter, addrsCopy); + auto promise = connectImpl(lowLevel, filter, addrsCopy, false); + return promise.attach(kj::mv(addrsCopy)) + .then([](AuthenticatedStream&& a) { return kj::mv(a.stream); }); + } + + Promise connectAuthenticated() override { + auto addrsCopy = heapArray(addrs.asPtr()); + auto promise = connectImpl(lowLevel, filter, addrsCopy, true); return promise.attach(kj::mv(addrsCopy)); } @@ -1227,10 +1423,11 @@ private: Array addrs; uint counter = 0; - static Promise> connectImpl( + static Promise connectImpl( LowLevelAsyncIoProvider& lowLevel, LowLevelAsyncIoProvider::NetworkFilter& filter, - ArrayPtr addrs) { + ArrayPtr addrs, + bool authenticated) { KJ_ASSERT(addrs.size() > 0); return kj::evalNow([&]() -> Promise> { @@ -1241,14 +1438,21 @@ private: return lowLevel.wrapConnectingSocketFd( fd, addrs[0].getRaw(), addrs[0].getRawSize(), NEW_FD_FLAGS); } - }).then([](Own&& stream) -> Promise> { + }).then([&lowLevel,&filter,addrs,authenticated](Own&& stream) + -> Promise { // Success, pass along. - return kj::mv(stream); - }, [&lowLevel,&filter,addrs](Exception&& exception) mutable -> Promise> { + AuthenticatedStream result; + result.stream = kj::mv(stream); + if (authenticated) { + result.peerIdentity = addrs[0].getIdentity(lowLevel, filter, *result.stream); + } + return kj::mv(result); + }, [&lowLevel,&filter,addrs,authenticated](Exception&& exception) mutable + -> Promise { // Connect failed. if (addrs.size() > 1) { // Try the next address instead. - return connectImpl(lowLevel, filter, addrs.slice(1, addrs.size())); + return connectImpl(lowLevel, filter, addrs.slice(1, addrs.size()), authenticated); } else { // No more addresses to try, so propagate the exception. return kj::mv(exception); @@ -1257,6 +1461,66 @@ private: } }; +kj::Own SocketAddress::getIdentity(kj::LowLevelAsyncIoProvider& llaiop, + LowLevelAsyncIoProvider::NetworkFilter& filter, + AsyncIoStream& stream) const { + switch (addr.generic.sa_family) { + case AF_INET: + case AF_INET6: { + auto builder = kj::heapArrayBuilder(1); + builder.add(*this); + return NetworkPeerIdentity::newInstance( + kj::heap(llaiop, filter, builder.finish())); + } + case AF_UNIX: { + LocalPeerIdentity::Credentials result; + + // There is little documentation on what happens when the uid/pid can't be obtained, but I've + // seen vague references on the internet saying that a PID of 0 and a UID of uid_t(-1) are used + // as invalid values. + +// OpenBSD defines SO_PEERCRED but uses a different interface for it +// hence we're falling back to LOCAL_PEERCRED +#if defined(SO_PEERCRED) && !__OpenBSD__ + struct ucred creds; + uint length = sizeof(creds); + stream.getsockopt(SOL_SOCKET, SO_PEERCRED, &creds, &length); + if (creds.pid > 0) { + result.pid = creds.pid; + } + if (creds.uid != static_cast(-1)) { + result.uid = creds.uid; + } + +#elif defined(LOCAL_PEERCRED) + // MacOS / FreeBSD / OpenBSD + struct xucred creds; + uint length = sizeof(creds); + stream.getsockopt(SOL_LOCAL, LOCAL_PEERCRED, &creds, &length); + KJ_ASSERT(length == sizeof(creds)); + if (creds.cr_uid != static_cast(-1)) { + result.uid = creds.cr_uid; + } + +#if defined(LOCAL_PEERPID) + // MacOS only? + pid_t pid; + length = sizeof(pid); + stream.getsockopt(SOL_LOCAL, LOCAL_PEERPID, &pid, &length); + KJ_ASSERT(length == sizeof(pid)); + if (pid > 0) { + result.pid = pid; + } +#endif +#endif + + return LocalPeerIdentity::newInstance(result); + } + default: + return UnknownPeerIdentity::newInstance(); + } +} + class SocketNetwork final: public Network { public: explicit SocketNetwork(LowLevelAsyncIoProvider& lowLevel): lowLevel(lowLevel) {} @@ -1320,7 +1584,7 @@ Promise DatagramPortImpl::send( msg.msg_name = const_cast(implicitCast(addr.getRaw())); msg.msg_namelen = addr.getRawSize(); - const size_t iovmax = kj::miniposix::iovMax(pieces.size()); + const size_t iovmax = kj::miniposix::iovMax(); KJ_STACK_ARRAY(struct iovec, iov, kj::min(pieces.size(), iovmax), 16, 64); for (size_t i: kj::indices(pieces)) { @@ -1333,7 +1597,7 @@ Promise DatagramPortImpl::send( // Too many pieces, but we can't use multiple syscalls because they'd send separate // datagrams. We'll have to copy the trailing pieces into a temporary array. // - // TODO(perf): On Linux we could use multiple syscalls via MSG_MORE. + // TODO(perf): On Linux we could use multiple syscalls via MSG_MORE or sendmsg/sendmmsg. size_t extraSize = 0; for (size_t i = iovmax - 1; i < pieces.size(); i++) { extraSize += pieces[i].size(); @@ -1344,8 +1608,8 @@ Promise DatagramPortImpl::send( memcpy(extra.begin() + extraSize, pieces[i].begin(), pieces[i].size()); extraSize += pieces[i].size(); } - iov[iovmax - 1].iov_base = extra.begin(); - iov[iovmax - 1].iov_len = extra.size(); + iov.back().iov_base = extra.begin(); + iov.back().iov_len = extra.size(); } msg.msg_iov = iov.begin(); @@ -1419,6 +1683,10 @@ public: // when truncated. On other platforms (Linux) the length in cmsghdr will itself be // truncated to fit within the buffer. +#if __APPLE__ +// On MacOS, `CMSG_SPACE(0)` triggers a bogus warning. +#pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic" +#endif const byte* pos = reinterpret_cast(cmsg); size_t available = ancillaryBuffer.end() - pos; if (available < CMSG_SPACE(0)) { diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-io-win32.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-io-win32.c++ index d85fe89f9f9..aaa65a20d39 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async-io-win32.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-io-win32.c++ @@ -23,8 +23,7 @@ // For Unix implementation, see async-io-unix.c++. // Request Vista-level APIs. -#define WINVER 0x0600 -#define _WIN32_WINNT 0x0600 +#include "win32-api-version.h" #include "async-io.h" #include "async-io-internal.h" @@ -189,17 +188,6 @@ int win32Socketpair(SOCKET socks[2]) { namespace { -bool detectWine() { - HMODULE hntdll = GetModuleHandle("ntdll.dll"); - if(hntdll == NULL) return false; - return GetProcAddress(hntdll, "wine_get_version") != nullptr; -} - -bool isWine() { - static bool result = detectWine(); - return result; -} - // ======================================================================================= static constexpr uint NEW_FD_FLAGS = LowLevelAsyncIoProvider::TAKE_OWNERSHIP; @@ -311,6 +299,23 @@ public: }); } + Promise whenWriteDisconnected() override { + // Windows IOCP does not provide a direct, documented way to detect when the socket disconnects + // without actually doing a read or write. However, there is an undocoumented-but-stable + // ioctl called IOCTL_AFD_POLL which can be used for this purpose. In fact, select() is + // implemented in terms of this ioctl -- performed synchronously -- but it's entirely possible + // to put only one socket into the list and perform the ioctl asynchronously. Here's the + // source code for select() in Windows 2000 (not sure how this became public...): + // + // https://github.com/pustladi/Windows-2000/blob/661d000d50637ed6fab2329d30e31775046588a9/private/net/sockets/winsock2/wsp/msafd/select.c#L59-L655 + // + // And here's an interesting discussion: https://github.com/python-trio/trio/issues/52 + // + // TODO(someday): Implement this with IOCTL_AFD_POLL. For now I'm leaving it unimplemented + // because I added this method for a Linux-only use case. + return NEVER_DONE; + } + void shutdownWrite() override { // There's no legitimate way to get an AsyncStreamFd that isn't a socket through the // Win32AsyncIoProvider interface. @@ -786,7 +791,7 @@ Promise> SocketAddress::lookupHost( // - Not implemented in Wine. // - Doesn't seem compatible with I/O completion ports, in particular because it's not associated // with a handle. Could signal completion as an APC instead, but that requires the IOCP code - // to use GetQueuedCompletionStatusEx() which it doesn't right now becaues it's not available + // to use GetQueuedCompletionStatusEx() which it doesn't right now because it's not available // in Wine. // - Requires Unicode, for some reason. Only GetAddrInfoExW() supports async, according to the // docs. Never mind that DNS itself is ASCII... @@ -949,6 +954,11 @@ public: KJ_WINSOCK(::setsockopt(fd, level, option, reinterpret_cast(value), length)); } + void getsockname(struct sockaddr* addr, uint* length) override { + socklen_t socklen = *length; + KJ_WINSOCK(::getsockname(fd, addr, &socklen)); + *length = socklen; + } public: Win32EventPort& eventPort; diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-io.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-io.c++ index a966442f520..ec9fc9d71fb 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async-io.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-io.c++ @@ -21,8 +21,7 @@ #if _WIN32 // Request Vista-level APIs. -#define WINVER 0x0600 -#define _WIN32_WINNT 0x0600 +#include "win32-api-version.h" #endif #include "async-io.h" @@ -31,6 +30,7 @@ #include "vector.h" #include "io.h" #include "one-of.h" +#include #if _WIN32 #include @@ -39,11 +39,14 @@ #include "windows-sanity.h" #define inet_pton InetPtonA #define inet_ntop InetNtopA +#include +#define dup _dup #else #include #include #include #include +#include #endif namespace kj { @@ -67,6 +70,11 @@ Promise AsyncInputStream::read(void* buffer, size_t minBytes, size_t max Maybe AsyncInputStream::tryGetLength() { return nullptr; } +void AsyncInputStream::registerAncillaryMessageHandler( + Function)> fn) { + KJ_UNIMPLEMENTED("registerAncillaryMsgHandler is not implemented by this AsyncInputStream"); +} + namespace { class AsyncPump { @@ -190,7 +198,7 @@ Maybe> AsyncOutputStream::tryPumpFrom( namespace { -class AsyncPipe final: public AsyncIoStream, public Refcounted { +class AsyncPipe final: public AsyncCapabilityStream, public Refcounted { public: ~AsyncPipe() noexcept(false) { KJ_REQUIRE(state == nullptr || ownState.get() != nullptr, @@ -206,8 +214,36 @@ public: } else KJ_IF_MAYBE(s, state) { return s->tryRead(buffer, minBytes, maxBytes); } else { - return newAdaptedPromise( - *this, arrayPtr(reinterpret_cast(buffer), maxBytes), minBytes); + return newAdaptedPromise( + *this, arrayPtr(reinterpret_cast(buffer), maxBytes), minBytes) + .then([](ReadResult r) { return r.byteCount; }); + } + } + + Promise tryReadWithFds(void* buffer, size_t minBytes, size_t maxBytes, + AutoCloseFd* fdBuffer, size_t maxFds) override { + if (minBytes == 0) { + return ReadResult { 0, 0 }; + } else KJ_IF_MAYBE(s, state) { + return s->tryReadWithFds(buffer, minBytes, maxBytes, fdBuffer, maxFds); + } else { + return newAdaptedPromise( + *this, arrayPtr(reinterpret_cast(buffer), maxBytes), minBytes, + kj::arrayPtr(fdBuffer, maxFds)); + } + } + + Promise tryReadWithStreams( + void* buffer, size_t minBytes, size_t maxBytes, + Own* streamBuffer, size_t maxStreams) override { + if (minBytes == 0) { + return ReadResult { 0, 0 }; + } else KJ_IF_MAYBE(s, state) { + return s->tryReadWithStreams(buffer, minBytes, maxBytes, streamBuffer, maxStreams); + } else { + return newAdaptedPromise( + *this, arrayPtr(reinterpret_cast(buffer), maxBytes), minBytes, + kj::arrayPtr(streamBuffer, maxStreams)); } } @@ -227,6 +263,12 @@ public: } else { ownState = kj::heap(); state = *ownState; + + readAborted = true; + KJ_IF_MAYBE(f, readAbortFulfiller) { + f->get()->fulfill(); + readAbortFulfiller = nullptr; + } } } @@ -256,6 +298,42 @@ public: } } + Promise writeWithFds(ArrayPtr data, + ArrayPtr> moreData, + ArrayPtr fds) override { + while (data.size() == 0 && moreData.size() > 0) { + data = moreData.front(); + moreData = moreData.slice(1, moreData.size()); + } + + if (data.size() == 0) { + KJ_REQUIRE(fds.size() == 0, "can't attach FDs to empty message"); + return READY_NOW; + } else KJ_IF_MAYBE(s, state) { + return s->writeWithFds(data, moreData, fds); + } else { + return newAdaptedPromise(*this, data, moreData, fds); + } + } + + Promise writeWithStreams(ArrayPtr data, + ArrayPtr> moreData, + Array> streams) override { + while (data.size() == 0 && moreData.size() > 0) { + data = moreData.front(); + moreData = moreData.slice(1, moreData.size()); + } + + if (data.size() == 0) { + KJ_REQUIRE(streams.size() == 0, "can't attach capabilities to empty message"); + return READY_NOW; + } else KJ_IF_MAYBE(s, state) { + return s->writeWithStreams(data, moreData, kj::mv(streams)); + } else { + return newAdaptedPromise(*this, data, moreData, kj::mv(streams)); + } + } + Maybe> tryPumpFrom( AsyncInputStream& input, uint64_t amount) override { if (amount == 0) { @@ -267,6 +345,21 @@ public: } } + Promise whenWriteDisconnected() override { + if (readAborted) { + return kj::READY_NOW; + } else KJ_IF_MAYBE(p, readAbortPromise) { + return p->addBranch(); + } else { + auto paf = newPromiseAndFulfiller(); + readAbortFulfiller = kj::mv(paf.fulfiller); + auto fork = paf.promise.fork(); + auto result = fork.addBranch(); + readAbortPromise = kj::mv(fork); + return result; + } + } + void shutdownWrite() override { KJ_IF_MAYBE(s, state) { s->shutdownWrite(); @@ -277,12 +370,16 @@ public: } private: - Maybe state; + Maybe state; // Object-oriented state! If any method call is blocked waiting on activity from the other end, // then `state` is non-null and method calls should be forwarded to it. If no calls are // outstanding, `state` is null. - kj::Own ownState; + kj::Own ownState; + + bool readAborted = false; + Maybe>> readAbortFulfiller = nullptr; + Maybe> readAbortPromise = nullptr; void endState(AsyncIoStream& obj) { KJ_IF_MAYBE(s, state) { @@ -292,14 +389,45 @@ private: } } - class BlockedWrite final: public AsyncIoStream { + template + static auto teeExceptionVoid(F& fulfiller) { + // Returns a functor that can be passed as the second parameter to .then() to propagate the + // exception to a given fulfiller. The functor's return type is void. + return [&fulfiller](kj::Exception&& e) { + fulfiller.reject(kj::cp(e)); + kj::throwRecoverableException(kj::mv(e)); + }; + } + template + static auto teeExceptionSize(F& fulfiller) { + // Returns a functor that can be passed as the second parameter to .then() to propagate the + // exception to a given fulfiller. The functor's return type is size_t. + return [&fulfiller](kj::Exception&& e) -> size_t { + fulfiller.reject(kj::cp(e)); + kj::throwRecoverableException(kj::mv(e)); + return 0; + }; + } + template + static auto teeExceptionPromise(F& fulfiller) { + // Returns a functor that can be passed as the second parameter to .then() to propagate the + // exception to a given fulfiller. The functor's return type is Promise. + return [&fulfiller](kj::Exception&& e) -> kj::Promise { + fulfiller.reject(kj::cp(e)); + return kj::mv(e); + }; + } + + class BlockedWrite final: public AsyncCapabilityStream { // AsyncPipe state when a write() is currently waiting for a corresponding read(). public: BlockedWrite(PromiseFulfiller& fulfiller, AsyncPipe& pipe, ArrayPtr writeBuffer, - ArrayPtr> morePieces) - : fulfiller(fulfiller), pipe(pipe), writeBuffer(writeBuffer), morePieces(morePieces) { + ArrayPtr> morePieces, + kj::OneOf, Array>> capBuffer = {}) + : fulfiller(fulfiller), pipe(pipe), writeBuffer(writeBuffer), morePieces(morePieces), + capBuffer(kj::mv(capBuffer)) { KJ_REQUIRE(pipe.state == nullptr); pipe.state = *this; } @@ -308,53 +436,118 @@ private: pipe.endState(*this); } - Promise tryRead(void* readBufferPtr, size_t minBytes, size_t maxBytes) override { - KJ_REQUIRE(canceler.isEmpty(), "already pumping"); + Promise tryRead(void* buffer, size_t minBytes, size_t maxBytes) override { + KJ_SWITCH_ONEOF(tryReadImpl(buffer, minBytes, maxBytes)) { + KJ_CASE_ONEOF(done, Done) { + return done.result; + } + KJ_CASE_ONEOF(retry, Retry) { + return pipe.tryRead(retry.buffer, retry.minBytes, retry.maxBytes) + .then([n = retry.alreadyRead](size_t amount) { return amount + n; }); + } + } + KJ_UNREACHABLE; + } - auto readBuffer = arrayPtr(reinterpret_cast(readBufferPtr), maxBytes); + Promise tryReadWithFds(void* buffer, size_t minBytes, size_t maxBytes, + AutoCloseFd* fdBuffer, size_t maxFds) override { + size_t capCount = 0; + { // TODO(cleanup): Remove redundant braces when we update to C++17. + KJ_SWITCH_ONEOF(capBuffer) { + KJ_CASE_ONEOF(fds, ArrayPtr) { + capCount = kj::max(fds.size(), maxFds); + // Unfortunately, we have to dup() each FD, because the writer doesn't release ownership + // by default. + // TODO(perf): Should we add an ownership-releasing version of writeWithFds()? + for (auto i: kj::zeroTo(capCount)) { + int duped; + KJ_SYSCALL(duped = dup(fds[i])); + fdBuffer[i] = kj::AutoCloseFd(fds[i]); + } + fdBuffer += capCount; + maxFds -= capCount; + } + KJ_CASE_ONEOF(streams, Array>) { + if (streams.size() > 0 && maxFds > 0) { + // TODO(someday): We could let people pass a LowLevelAsyncIoProvider to + // newTwoWayPipe() if we wanted to auto-wrap FDs, but does anyone care? + KJ_FAIL_REQUIRE( + "async pipe message was written with streams attached, but corresponding read " + "asked for FDs, and we don't know how to convert here"); + } + } + } + } - size_t totalRead = 0; - while (readBuffer.size() >= writeBuffer.size()) { - // The whole current write buffer can be copied into the read buffer. + // Drop any unclaimed caps. This mirrors the behavior of unix sockets, where if we didn't + // provide enough buffer space for all the written FDs, the remaining ones are lost. + capBuffer = {}; - { - auto n = writeBuffer.size(); - memcpy(readBuffer.begin(), writeBuffer.begin(), n); - totalRead += n; - readBuffer = readBuffer.slice(n, readBuffer.size()); + KJ_SWITCH_ONEOF(tryReadImpl(buffer, minBytes, maxBytes)) { + KJ_CASE_ONEOF(done, Done) { + return ReadResult { done.result, capCount }; } + KJ_CASE_ONEOF(retry, Retry) { + return pipe.tryReadWithFds( + retry.buffer, retry.minBytes, retry.maxBytes, fdBuffer, maxFds) + .then([byteCount = retry.alreadyRead, capCount](ReadResult result) { + result.byteCount += byteCount; + result.capCount += capCount; + return result; + }); + } + } + KJ_UNREACHABLE; + } - if (morePieces.size() == 0) { - // All done writing. - fulfiller.fulfill(); - pipe.endState(*this); - - if (totalRead >= minBytes) { - // Also all done reading. - return totalRead; - } else { - return pipe.tryRead(readBuffer.begin(), minBytes - totalRead, readBuffer.size()) - .then([totalRead](size_t amount) { return amount + totalRead; }); + Promise tryReadWithStreams( + void* buffer, size_t minBytes, size_t maxBytes, + Own* streamBuffer, size_t maxStreams) override { + size_t capCount = 0; + { // TODO(cleanup): Remove redundant braces when we update to C++17. + KJ_SWITCH_ONEOF(capBuffer) { + KJ_CASE_ONEOF(fds, ArrayPtr) { + if (fds.size() > 0 && maxStreams > 0) { + // TODO(someday): Use AsyncIoStream's `Maybe getFd()` method? + KJ_FAIL_REQUIRE( + "async pipe message was written with FDs attached, but corresponding read " + "asked for streams, and we don't know how to convert here"); + } + } + KJ_CASE_ONEOF(streams, Array>) { + capCount = kj::max(streams.size(), maxStreams); + for (auto i: kj::zeroTo(capCount)) { + streamBuffer[i] = kj::mv(streams[i]); + } + streamBuffer += capCount; + maxStreams -= capCount; } } - - writeBuffer = morePieces[0]; - morePieces = morePieces.slice(1, morePieces.size()); } - // At this point, the read buffer is smaller than the current write buffer, so we can fill - // it completely. - { - auto n = readBuffer.size(); - memcpy(readBuffer.begin(), writeBuffer.begin(), n); - writeBuffer = writeBuffer.slice(n, writeBuffer.size()); - totalRead += n; - } + // Drop any unclaimed caps. This mirrors the behavior of unix sockets, where if we didn't + // provide enough buffer space for all the written FDs, the remaining ones are lost. + capBuffer = {}; - return totalRead; + KJ_SWITCH_ONEOF(tryReadImpl(buffer, minBytes, maxBytes)) { + KJ_CASE_ONEOF(done, Done) { + return ReadResult { done.result, capCount }; + } + KJ_CASE_ONEOF(retry, Retry) { + return pipe.tryReadWithStreams( + retry.buffer, retry.minBytes, retry.maxBytes, streamBuffer, maxStreams) + .then([byteCount = retry.alreadyRead, capCount](ReadResult result) { + result.byteCount += byteCount; + result.capCount += capCount; + return result; + }); + } + } + KJ_UNREACHABLE; } Promise pumpTo(AsyncOutputStream& output, uint64_t amount) override { + // Note: Pumps drop all capabilities. KJ_REQUIRE(canceler.isEmpty(), "already pumping"); if (amount < writeBuffer.size()) { @@ -364,7 +557,7 @@ private: writeBuffer = writeBuffer.slice(amount, writeBuffer.size()); // We pumped the full amount, so we're done pumping. return amount; - })); + }, teeExceptionSize(fulfiller))); } // First piece doesn't cover the whole pump. Figure out how many more pieces to add. @@ -398,7 +591,7 @@ private: return pipe.pumpTo(output, amount - actual) .then([actual](uint64_t actual2) { return actual + actual2; }); } - })); + }, teeExceptionPromise(fulfiller))); } else { // Pump ends mid-piece. Write the last, partial piece. auto n = amount - actual; @@ -418,7 +611,7 @@ private: morePieces = newMorePieces; canceler.release(); return amount; - })); + }, teeExceptionSize(fulfiller))); } } @@ -435,6 +628,16 @@ private: Promise write(ArrayPtr> pieces) override { KJ_FAIL_REQUIRE("can't write() again until previous write() completes"); } + Promise writeWithFds(ArrayPtr data, + ArrayPtr> moreData, + ArrayPtr fds) override { + KJ_FAIL_REQUIRE("can't write() again until previous write() completes"); + } + Promise writeWithStreams(ArrayPtr data, + ArrayPtr> moreData, + Array> streams) override { + KJ_FAIL_REQUIRE("can't write() again until previous write() completes"); + } Maybe> tryPumpFrom(AsyncInputStream& input, uint64_t amount) override { KJ_FAIL_REQUIRE("can't tryPumpFrom() again until previous write() completes"); } @@ -442,15 +645,68 @@ private: KJ_FAIL_REQUIRE("can't shutdownWrite() until previous write() completes"); } + Promise whenWriteDisconnected() override { + KJ_FAIL_ASSERT("can't get here -- implemented by AsyncPipe"); + } + private: PromiseFulfiller& fulfiller; AsyncPipe& pipe; ArrayPtr writeBuffer; ArrayPtr> morePieces; + kj::OneOf, Array>> capBuffer; Canceler canceler; + + struct Done { size_t result; }; + struct Retry { void* buffer; size_t minBytes; size_t maxBytes; size_t alreadyRead; }; + + OneOf tryReadImpl(void* readBufferPtr, size_t minBytes, size_t maxBytes) { + KJ_REQUIRE(canceler.isEmpty(), "already pumping"); + + auto readBuffer = arrayPtr(reinterpret_cast(readBufferPtr), maxBytes); + + size_t totalRead = 0; + while (readBuffer.size() >= writeBuffer.size()) { + // The whole current write buffer can be copied into the read buffer. + + { + auto n = writeBuffer.size(); + memcpy(readBuffer.begin(), writeBuffer.begin(), n); + totalRead += n; + readBuffer = readBuffer.slice(n, readBuffer.size()); + } + + if (morePieces.size() == 0) { + // All done writing. + fulfiller.fulfill(); + pipe.endState(*this); + + if (totalRead >= minBytes) { + // Also all done reading. + return Done { totalRead }; + } else { + return Retry { readBuffer.begin(), minBytes - totalRead, readBuffer.size(), totalRead }; + } + } + + writeBuffer = morePieces[0]; + morePieces = morePieces.slice(1, morePieces.size()); + } + + // At this point, the read buffer is smaller than the current write buffer, so we can fill + // it completely. + { + auto n = readBuffer.size(); + memcpy(readBuffer.begin(), writeBuffer.begin(), n); + writeBuffer = writeBuffer.slice(n, writeBuffer.size()); + totalRead += n; + } + + return Done { totalRead }; + } }; - class BlockedPumpFrom final: public AsyncIoStream { + class BlockedPumpFrom final: public AsyncCapabilityStream { // AsyncPipe state when a tryPumpFrom() is currently waiting for a corresponding read(). public: @@ -490,7 +746,24 @@ private: minBytes - actual, maxBytes - actual) .then([actual](size_t actual2) { return actual + actual2; }); } - })); + }, teeExceptionPromise(fulfiller))); + } + + Promise tryReadWithFds(void* readBuffer, size_t minBytes, size_t maxBytes, + AutoCloseFd* fdBuffer, size_t maxFds) override { + // Pumps drop all capabilities, so fall back to regular read. (We don't even know if the + // destination is an AsyncCapabilityStream...) + return tryRead(readBuffer, minBytes, maxBytes) + .then([](size_t n) { return ReadResult { n, 0 }; }); + } + + Promise tryReadWithStreams( + void* readBuffer, size_t minBytes, size_t maxBytes, + Own* streamBuffer, size_t maxStreams) override { + // Pumps drop all capabilities, so fall back to regular read. (We don't even know if the + // destination is an AsyncCapabilityStream...) + return tryRead(readBuffer, minBytes, maxBytes) + .then([](size_t n) { return ReadResult { n, 0 }; }); } Promise pumpTo(AsyncOutputStream& output, uint64_t amount2) override { @@ -502,24 +775,18 @@ private: canceler.release(); pumpedSoFar += actual; KJ_ASSERT(pumpedSoFar <= amount); - if (pumpedSoFar == amount) { - fulfiller.fulfill(kj::cp(amount)); + if (pumpedSoFar == amount || actual < n) { + // Either we pumped all we wanted or we hit EOF. + fulfiller.fulfill(kj::cp(pumpedSoFar)); pipe.endState(*this); + return pipe.pumpTo(output, amount2 - actual) + .then([actual](uint64_t actual2) { return actual + actual2; }); } - KJ_ASSERT(actual <= amount2); - if (actual == amount2) { - // Completed entire pumpTo amount. - return amount2; - } else if (actual < n) { - // Received less than requested, presumably because EOF. - return actual; - } else { - // We received all the bytes that were requested but it didn't complete the pump. - KJ_ASSERT(pumpedSoFar == amount); - return pipe.pumpTo(output, amount2 - actual); - } - })); + // Completed entire pumpTo amount. + KJ_ASSERT(actual == amount2); + return amount2; + }, teeExceptionSize(fulfiller))); } void abortRead() override { @@ -554,6 +821,16 @@ private: Promise write(ArrayPtr> pieces) override { KJ_FAIL_REQUIRE("can't write() again until previous tryPumpFrom() completes"); } + Promise writeWithFds(ArrayPtr data, + ArrayPtr> moreData, + ArrayPtr fds) override { + KJ_FAIL_REQUIRE("can't write() again until previous tryPumpFrom() completes"); + } + Promise writeWithStreams(ArrayPtr data, + ArrayPtr> moreData, + Array> streams) override { + KJ_FAIL_REQUIRE("can't write() again until previous tryPumpFrom() completes"); + } Maybe> tryPumpFrom(AsyncInputStream& input, uint64_t amount) override { KJ_FAIL_REQUIRE("can't tryPumpFrom() again until previous tryPumpFrom() completes"); } @@ -561,6 +838,10 @@ private: KJ_FAIL_REQUIRE("can't shutdownWrite() until previous tryPumpFrom() completes"); } + Promise whenWriteDisconnected() override { + KJ_FAIL_ASSERT("can't get here -- implemented by AsyncPipe"); + } + private: PromiseFulfiller& fulfiller; AsyncPipe& pipe; @@ -571,13 +852,16 @@ private: kj::Promise checkEofTask = nullptr; }; - class BlockedRead final: public AsyncIoStream { + class BlockedRead final: public AsyncCapabilityStream { // AsyncPipe state when a tryRead() is currently waiting for a corresponding write(). public: - BlockedRead(PromiseFulfiller& fulfiller, AsyncPipe& pipe, - ArrayPtr readBuffer, size_t minBytes) - : fulfiller(fulfiller), pipe(pipe), readBuffer(readBuffer), minBytes(minBytes) { + BlockedRead( + PromiseFulfiller& fulfiller, AsyncPipe& pipe, + ArrayPtr readBuffer, size_t minBytes, + kj::OneOf, ArrayPtr>> capBuffer = {}) + : fulfiller(fulfiller), pipe(pipe), readBuffer(readBuffer), minBytes(minBytes), + capBuffer(capBuffer) { KJ_REQUIRE(pipe.state == nullptr); pipe.state = *this; } @@ -589,6 +873,15 @@ private: Promise tryRead(void* readBuffer, size_t minBytes, size_t maxBytes) override { KJ_FAIL_REQUIRE("can't read() again until previous read() completes"); } + Promise tryReadWithFds(void* readBuffer, size_t minBytes, size_t maxBytes, + AutoCloseFd* fdBuffer, size_t maxFds) override { + KJ_FAIL_REQUIRE("can't read() again until previous read() completes"); + } + Promise tryReadWithStreams( + void* readBuffer, size_t minBytes, size_t maxBytes, + Own* streamBuffer, size_t maxStreams) override { + KJ_FAIL_REQUIRE("can't read() again until previous read() completes"); + } Promise pumpTo(AsyncOutputStream& output, uint64_t amount) override { KJ_FAIL_REQUIRE("can't read() again until previous read() completes"); } @@ -603,126 +896,187 @@ private: Promise write(const void* writeBuffer, size_t size) override { KJ_REQUIRE(canceler.isEmpty(), "already pumping"); - if (size < readBuffer.size()) { - // Consume a portion of the read buffer. - memcpy(readBuffer.begin(), writeBuffer, size); - readSoFar += size; - readBuffer = readBuffer.slice(size, readBuffer.size()); - if (readSoFar >= minBytes) { - // We've read enough to close out this read. - fulfiller.fulfill(kj::cp(readSoFar)); - pipe.endState(*this); - } - return READY_NOW; - } else { - // Consume entire read buffer. - auto n = readBuffer.size(); - fulfiller.fulfill(readSoFar + n); - pipe.endState(*this); - memcpy(readBuffer.begin(), writeBuffer, n); - if (n == size) { - // That's it. + auto data = arrayPtr(reinterpret_cast(writeBuffer), size); + KJ_SWITCH_ONEOF(writeImpl(data, nullptr)) { + KJ_CASE_ONEOF(done, Done) { return READY_NOW; - } else { - return pipe.write(reinterpret_cast(writeBuffer) + n, size - n); + } + KJ_CASE_ONEOF(retry, Retry) { + KJ_ASSERT(retry.moreData == nullptr); + return pipe.write(retry.data.begin(), retry.data.size()); } } + KJ_UNREACHABLE; } Promise write(ArrayPtr> pieces) override { KJ_REQUIRE(canceler.isEmpty(), "already pumping"); - while (pieces.size() > 0) { - if (pieces[0].size() < readBuffer.size()) { - // Consume a portion of the read buffer. - auto n = pieces[0].size(); - memcpy(readBuffer.begin(), pieces[0].begin(), n); - readSoFar += n; - readBuffer = readBuffer.slice(n, readBuffer.size()); - pieces = pieces.slice(1, pieces.size()); - // loop - } else { - // Consume entire read buffer. - auto n = readBuffer.size(); - fulfiller.fulfill(readSoFar + n); - pipe.endState(*this); - memcpy(readBuffer.begin(), pieces[0].begin(), n); - - auto restOfPiece = pieces[0].slice(n, pieces[0].size()); - pieces = pieces.slice(1, pieces.size()); - if (restOfPiece.size() == 0) { + KJ_SWITCH_ONEOF(writeImpl(pieces[0], pieces.slice(1, pieces.size()))) { + KJ_CASE_ONEOF(done, Done) { + return READY_NOW; + } + KJ_CASE_ONEOF(retry, Retry) { + if (retry.data.size() == 0) { // We exactly finished the current piece, so just issue a write for the remaining // pieces. - if (pieces.size() == 0) { + if (retry.moreData.size() == 0) { // Nothing left. return READY_NOW; } else { // Write remaining pieces. - return pipe.write(pieces); + return pipe.write(retry.moreData); } } else { // Unfortunately we have to execute a separate write() for the remaining part of this // piece, because we can't modify the pieces array. - auto promise = pipe.write(restOfPiece.begin(), restOfPiece.size()); - if (pieces.size() > 0) { + auto promise = pipe.write(retry.data.begin(), retry.data.size()); + if (retry.moreData.size() == 0) { // No more pieces so that's it. return kj::mv(promise); } else { // Also need to write the remaining pieces. auto& pipeRef = pipe; - return promise.then([pieces,&pipeRef]() { + return promise.then([pieces=retry.moreData,&pipeRef]() { return pipeRef.write(pieces); }); } } } } + KJ_UNREACHABLE; + } + + Promise writeWithFds(ArrayPtr data, + ArrayPtr> moreData, + ArrayPtr fds) override { +#if __GNUC__ && !__clang__ && __GNUC__ >= 7 +// GCC 7 decides the open-brace below is "misleadingly indented" as if it were guarded by the `for` +// that appears in the implementation of KJ_REQUIRE(). Shut up shut up shut up. +#pragma GCC diagnostic ignored "-Wmisleading-indentation" +#endif + KJ_REQUIRE(canceler.isEmpty(), "already pumping"); + + { // TODO(cleanup): Remove redundant braces when we update to C++17. + KJ_SWITCH_ONEOF(capBuffer) { + KJ_CASE_ONEOF(fdBuffer, ArrayPtr) { + size_t count = kj::max(fdBuffer.size(), fds.size()); + // Unfortunately, we have to dup() each FD, because the writer doesn't release ownership + // by default. + // TODO(perf): Should we add an ownership-releasing version of writeWithFds()? + for (auto i: kj::zeroTo(count)) { + int duped; + KJ_SYSCALL(duped = dup(fds[i])); + fdBuffer[i] = kj::AutoCloseFd(duped); + } + capBuffer = fdBuffer.slice(count, fdBuffer.size()); + readSoFar.capCount += count; + } + KJ_CASE_ONEOF(streamBuffer, ArrayPtr>) { + if (streamBuffer.size() > 0 && fds.size() > 0) { + // TODO(someday): Use AsyncIoStream's `Maybe getFd()` method? + KJ_FAIL_REQUIRE( + "async pipe message was written with FDs attached, but corresponding read " + "asked for streams, and we don't know how to convert here"); + } + } + } + } - // Consumed all written pieces. - if (readSoFar >= minBytes) { - // We've read enough to close out this read. - fulfiller.fulfill(kj::cp(readSoFar)); - pipe.endState(*this); + KJ_SWITCH_ONEOF(writeImpl(data, moreData)) { + KJ_CASE_ONEOF(done, Done) { + return READY_NOW; + } + KJ_CASE_ONEOF(retry, Retry) { + // Any leftover fds in `fds` are dropped on the floor, per contract. + // TODO(cleanup): We use another writeWithFds() call here only because it accepts `data` + // and `moreData` directly. After the stream API refactor, we should be able to avoid + // this. + return pipe.writeWithFds(retry.data, retry.moreData, nullptr); + } } + KJ_UNREACHABLE; + } - return READY_NOW; + Promise writeWithStreams(ArrayPtr data, + ArrayPtr> moreData, + Array> streams) override { + KJ_REQUIRE(canceler.isEmpty(), "already pumping"); + + { // TODO(cleanup): Remove redundant braces when we update to C++17. + KJ_SWITCH_ONEOF(capBuffer) { + KJ_CASE_ONEOF(fdBuffer, ArrayPtr) { + if (fdBuffer.size() > 0 && streams.size() > 0) { + // TODO(someday): We could let people pass a LowLevelAsyncIoProvider to newTwoWayPipe() + // if we wanted to auto-wrap FDs, but does anyone care? + KJ_FAIL_REQUIRE( + "async pipe message was written with streams attached, but corresponding read " + "asked for FDs, and we don't know how to convert here"); + } + } + KJ_CASE_ONEOF(streamBuffer, ArrayPtr>) { + size_t count = kj::max(streamBuffer.size(), streams.size()); + for (auto i: kj::zeroTo(count)) { + streamBuffer[i] = kj::mv(streams[i]); + } + capBuffer = streamBuffer.slice(count, streamBuffer.size()); + readSoFar.capCount += count; + } + } + } + + KJ_SWITCH_ONEOF(writeImpl(data, moreData)) { + KJ_CASE_ONEOF(done, Done) { + return READY_NOW; + } + KJ_CASE_ONEOF(retry, Retry) { + // Any leftover fds in `fds` are dropped on the floor, per contract. + // TODO(cleanup): We use another writeWithStreams() call here only because it accepts + // `data` and `moreData` directly. After the stream API refactor, we should be able to + // avoid this. + return pipe.writeWithStreams(retry.data, retry.moreData, nullptr); + } + } + KJ_UNREACHABLE; } Maybe> tryPumpFrom(AsyncInputStream& input, uint64_t amount) override { + // Note: Pumps drop all capabilities. KJ_REQUIRE(canceler.isEmpty(), "already pumping"); - KJ_ASSERT(minBytes > readSoFar); - auto minToRead = kj::min(amount, minBytes - readSoFar); + KJ_ASSERT(minBytes > readSoFar.byteCount); + auto minToRead = kj::min(amount, minBytes - readSoFar.byteCount); auto maxToRead = kj::min(amount, readBuffer.size()); return canceler.wrap(input.tryRead(readBuffer.begin(), minToRead, maxToRead) - .then([this,&input,amount,minToRead](size_t actual) -> Promise { + .then([this,&input,amount](size_t actual) -> Promise { readBuffer = readBuffer.slice(actual, readBuffer.size()); - readSoFar += actual; + readSoFar.byteCount += actual; - if (readSoFar >= minBytes || actual < minToRead) { - // We've read enough to close out this read (readSoFar >= minBytes) - // OR we reached EOF and couldn't complete the read (actual < minToRead) - // Either way, we want to close out this read. + if (readSoFar.byteCount >= minBytes) { + // We've read enough to close out this read (readSoFar >= minBytes). canceler.release(); fulfiller.fulfill(kj::cp(readSoFar)); pipe.endState(*this); if (actual < amount) { - // We din't complete pumping. Restart from the pipe. + // We didn't read as much data as the pump requested, but we did fulfill the read, so + // we don't know whether we reached EOF on the input. We need to continue the pump, + // replacing the BlockedRead state. return input.pumpTo(pipe, amount - actual) .then([actual](uint64_t actual2) -> uint64_t { return actual + actual2; }); + } else { + // We pumped as much data as was requested, so we can return that now. + return actual; } + } else { + // The pump completed without fulfilling the read. This either means that the pump + // reached EOF or the `amount` requested was not enough to satisfy the read in the first + // place. Pumps do not propagate EOF, so either way we want to leave the BlockedRead in + // place waiting for more data. + return actual; } - - // If we read less than `actual`, but more than `minToRead`, it can only have been - // because we reached `minBytes`, so the conditional above would have executed. So, here - // we know that actual == amount. - KJ_ASSERT(actual == amount); - - // We pumped the full amount, so we're done pumping. - return amount; - })); + }, teeExceptionPromise(fulfiller))); } void shutdownWrite() override { @@ -732,16 +1086,66 @@ private: pipe.shutdownWrite(); } + Promise whenWriteDisconnected() override { + KJ_FAIL_ASSERT("can't get here -- implemented by AsyncPipe"); + } + private: - PromiseFulfiller& fulfiller; + PromiseFulfiller& fulfiller; AsyncPipe& pipe; ArrayPtr readBuffer; size_t minBytes; - size_t readSoFar = 0; + kj::OneOf, ArrayPtr>> capBuffer; + ReadResult readSoFar = {0, 0}; Canceler canceler; + + struct Done {}; + struct Retry { ArrayPtr data; ArrayPtr> moreData; }; + + OneOf writeImpl(ArrayPtr data, + ArrayPtr> moreData) { + for (;;) { + if (data.size() < readBuffer.size()) { + // First write segment consumes a portion of the read buffer but not all of it. + auto n = data.size(); + memcpy(readBuffer.begin(), data.begin(), n); + readSoFar.byteCount += n; + readBuffer = readBuffer.slice(n, readBuffer.size()); + if (moreData.size() == 0) { + // Consumed all written pieces. + if (readSoFar.byteCount >= minBytes) { + // We've read enough to close out this read. + fulfiller.fulfill(kj::cp(readSoFar)); + pipe.endState(*this); + } + return Done(); + } + data = moreData[0]; + moreData = moreData.slice(1, moreData.size()); + // loop + } else { + // First write segment consumes entire read buffer. + auto n = readBuffer.size(); + readSoFar.byteCount += n; + fulfiller.fulfill(kj::cp(readSoFar)); + pipe.endState(*this); + memcpy(readBuffer.begin(), data.begin(), n); + + data = data.slice(n, data.size()); + if (data.size() == 0 && moreData.size() == 0) { + return Done(); + } else { + // Note: Even if `data` is empty, we don't replace it with moreData[0], because the + // retry might need to use write(ArrayPtr>) which doesn't allow + // passing a separate first segment. + return Retry { data, moreData }; + } + } + } + } }; - class BlockedPumpTo final: public AsyncIoStream { + class BlockedPumpTo final: public AsyncCapabilityStream { // AsyncPipe state when a pumpTo() is currently waiting for a corresponding write(). public: @@ -759,6 +1163,15 @@ private: Promise tryRead(void* readBuffer, size_t minBytes, size_t maxBytes) override { KJ_FAIL_REQUIRE("can't read() again until previous pumpTo() completes"); } + Promise tryReadWithFds(void* readBuffer, size_t minBytes, size_t maxBytes, + AutoCloseFd* fdBuffer, size_t maxFds) override { + KJ_FAIL_REQUIRE("can't read() again until previous pumpTo() completes"); + } + Promise tryReadWithStreams( + void* readBuffer, size_t minBytes, size_t maxBytes, + Own* streamBuffer, size_t maxStreams) override { + KJ_FAIL_REQUIRE("can't read() again until previous pumpTo() completes"); + } Promise pumpTo(AsyncOutputStream& output, uint64_t amount) override { KJ_FAIL_REQUIRE("can't read() again until previous pumpTo() completes"); } @@ -794,7 +1207,7 @@ private: KJ_ASSERT(pumpedSoFar == amount); return pipe.write(reinterpret_cast(writeBuffer) + actual, size - actual); } - })); + }, teeExceptionPromise(fulfiller))); } Promise write(ArrayPtr> pieces) override { @@ -821,7 +1234,7 @@ private: fulfiller.fulfill(kj::cp(amount)); pipe.endState(*this); return pipe.write(partial2.begin(), partial2.size()); - })); + }, teeExceptionPromise(fulfiller))); ++i; } else { // The pump ends exactly at the end of a piece, how nice. @@ -829,7 +1242,7 @@ private: canceler.release(); fulfiller.fulfill(kj::cp(amount)); pipe.endState(*this); - })); + }, teeExceptionVoid(fulfiller))); } auto remainder = pieces.slice(i, pieces.size()); @@ -858,17 +1271,51 @@ private: fulfiller.fulfill(kj::cp(amount)); pipe.endState(*this); } - })); + }, teeExceptionVoid(fulfiller))); } - Maybe> tryPumpFrom(AsyncInputStream& input, uint64_t amount2) override { - KJ_REQUIRE(canceler.isEmpty(), "already pumping"); + Promise writeWithFds(ArrayPtr data, + ArrayPtr> moreData, + ArrayPtr fds) override { + // Pumps drop all capabilities, so fall back to regular write(). - auto n = kj::min(amount2, amount - pumpedSoFar); - return output.tryPumpFrom(input, n) - .map([&](Promise subPump) { - return canceler.wrap(subPump - .then([this,&input,amount2,n](uint64_t actual) -> Promise { + // TODO(cleaunp): After stream API refactor, regular write() methods will take + // (data, moreData) and we can clean this up. + if (moreData.size() == 0) { + return write(data.begin(), data.size()); + } else { + auto pieces = kj::heapArrayBuilder>(moreData.size() + 1); + pieces.add(data); + pieces.addAll(moreData); + return write(pieces.finish()); + } + } + + Promise writeWithStreams(ArrayPtr data, + ArrayPtr> moreData, + Array> streams) override { + // Pumps drop all capabilities, so fall back to regular write(). + + // TODO(cleaunp): After stream API refactor, regular write() methods will take + // (data, moreData) and we can clean this up. + if (moreData.size() == 0) { + return write(data.begin(), data.size()); + } else { + auto pieces = kj::heapArrayBuilder>(moreData.size() + 1); + pieces.add(data); + pieces.addAll(moreData); + return write(pieces.finish()); + } + } + + Maybe> tryPumpFrom(AsyncInputStream& input, uint64_t amount2) override { + KJ_REQUIRE(canceler.isEmpty(), "already pumping"); + + auto n = kj::min(amount2, amount - pumpedSoFar); + return output.tryPumpFrom(input, n) + .map([&](Promise subPump) { + return canceler.wrap(subPump + .then([this,&input,amount2,n](uint64_t actual) -> Promise { canceler.release(); pumpedSoFar += actual; KJ_ASSERT(pumpedSoFar <= amount); @@ -889,7 +1336,7 @@ private: KJ_ASSERT(pumpedSoFar == amount); return input.pumpTo(pipe, amount2 - actual); } - })); + }, teeExceptionPromise(fulfiller))); }); } @@ -900,6 +1347,10 @@ private: pipe.shutdownWrite(); } + Promise whenWriteDisconnected() override { + KJ_FAIL_ASSERT("can't get here -- implemented by AsyncPipe"); + } + private: PromiseFulfiller& fulfiller; AsyncPipe& pipe; @@ -909,42 +1360,95 @@ private: Canceler canceler; }; - class AbortedRead final: public AsyncIoStream { + class AbortedRead final: public AsyncCapabilityStream { // AsyncPipe state when abortRead() has been called. public: Promise tryRead(void* readBufferPtr, size_t minBytes, size_t maxBytes) override { - KJ_FAIL_REQUIRE("abortRead() has been called"); + return KJ_EXCEPTION(DISCONNECTED, "abortRead() has been called"); + } + Promise tryReadWithFds(void* readBuffer, size_t minBytes, size_t maxBytes, + AutoCloseFd* fdBuffer, size_t maxFds) override { + return KJ_EXCEPTION(DISCONNECTED, "abortRead() has been called"); + } + Promise tryReadWithStreams( + void* readBuffer, size_t minBytes, size_t maxBytes, + Own* streamBuffer, size_t maxStreams) override { + return KJ_EXCEPTION(DISCONNECTED, "abortRead() has been called"); } Promise pumpTo(AsyncOutputStream& output, uint64_t amount) override { - KJ_FAIL_REQUIRE("abortRead() has been called"); + return KJ_EXCEPTION(DISCONNECTED, "abortRead() has been called"); } void abortRead() override { // ignore repeated abort } Promise write(const void* buffer, size_t size) override { - KJ_FAIL_REQUIRE("abortRead() has been called"); + return KJ_EXCEPTION(DISCONNECTED, "abortRead() has been called"); } Promise write(ArrayPtr> pieces) override { - KJ_FAIL_REQUIRE("abortRead() has been called"); + return KJ_EXCEPTION(DISCONNECTED, "abortRead() has been called"); + } + Promise writeWithFds(ArrayPtr data, + ArrayPtr> moreData, + ArrayPtr fds) override { + return KJ_EXCEPTION(DISCONNECTED, "abortRead() has been called"); + } + Promise writeWithStreams(ArrayPtr data, + ArrayPtr> moreData, + Array> streams) override { + return KJ_EXCEPTION(DISCONNECTED, "abortRead() has been called"); } Maybe> tryPumpFrom(AsyncInputStream& input, uint64_t amount) override { - KJ_FAIL_REQUIRE("abortRead() has been called"); + // There might not actually be any data in `input`, in which case a pump wouldn't actually + // write anything and wouldn't fail. + + if (input.tryGetLength().orDefault(1) == 0) { + // Yeah a pump would pump nothing. + return Promise(uint64_t(0)); + } else { + // While we *could* just return nullptr here, it would probably then fall back to a normal + // buffered pump, which would allocate a big old buffer just to find there's nothing to + // read. Let's try reading 1 byte to avoid that allocation. + static char c; + return input.tryRead(&c, 1, 1).then([](size_t n) { + if (n == 0) { + // Yay, we're at EOF as hoped. + return uint64_t(0); + } else { + // There was data in the input. The pump would have thrown. + kj::throwRecoverableException( + KJ_EXCEPTION(DISCONNECTED, "abortRead() has been called")); + return uint64_t(0); + } + }); + } } void shutdownWrite() override { // ignore -- currently shutdownWrite() actually means that the PipeWriteEnd was dropped, // which is not an error even if reads have been aborted. } + Promise whenWriteDisconnected() override { + KJ_FAIL_ASSERT("can't get here -- implemented by AsyncPipe"); + } }; - class ShutdownedWrite final: public AsyncIoStream { + class ShutdownedWrite final: public AsyncCapabilityStream { // AsyncPipe state when shutdownWrite() has been called. public: Promise tryRead(void* readBufferPtr, size_t minBytes, size_t maxBytes) override { return size_t(0); } + Promise tryReadWithFds(void* readBuffer, size_t minBytes, size_t maxBytes, + AutoCloseFd* fdBuffer, size_t maxFds) override { + return ReadResult { 0, 0 }; + } + Promise tryReadWithStreams( + void* readBuffer, size_t minBytes, size_t maxBytes, + Own* streamBuffer, size_t maxStreams) override { + return ReadResult { 0, 0 }; + } Promise pumpTo(AsyncOutputStream& output, uint64_t amount) override { return uint64_t(0); } @@ -958,6 +1462,16 @@ private: Promise write(ArrayPtr> pieces) override { KJ_FAIL_REQUIRE("shutdownWrite() has been called"); } + Promise writeWithFds(ArrayPtr data, + ArrayPtr> moreData, + ArrayPtr fds) override { + KJ_FAIL_REQUIRE("shutdownWrite() has been called"); + } + Promise writeWithStreams(ArrayPtr data, + ArrayPtr> moreData, + Array> streams) override { + KJ_FAIL_REQUIRE("shutdownWrite() has been called"); + } Maybe> tryPumpFrom(AsyncInputStream& input, uint64_t amount) override { KJ_FAIL_REQUIRE("shutdownWrite() has been called"); } @@ -965,6 +1479,9 @@ private: // ignore -- currently shutdownWrite() actually means that the PipeWriteEnd was dropped, // so it will only be called once anyhow. } + Promise whenWriteDisconnected() override { + KJ_FAIL_ASSERT("can't get here -- implemented by AsyncPipe"); + } }; }; @@ -1012,12 +1529,16 @@ public: return pipe->tryPumpFrom(input, amount); } + Promise whenWriteDisconnected() override { + return pipe->whenWriteDisconnected(); + } + private: Own pipe; UnwindDetector unwind; }; -class TwoWayPipeEnd final: public AsyncIoStream { +class TwoWayPipeEnd final: public AsyncCapabilityStream { public: TwoWayPipeEnd(kj::Own in, kj::Own out) : in(kj::mv(in)), out(kj::mv(out)) {} @@ -1031,6 +1552,15 @@ public: Promise tryRead(void* buffer, size_t minBytes, size_t maxBytes) override { return in->tryRead(buffer, minBytes, maxBytes); } + Promise tryReadWithFds(void* buffer, size_t minBytes, size_t maxBytes, + AutoCloseFd* fdBuffer, size_t maxFds) override { + return in->tryReadWithFds(buffer, minBytes, maxBytes, fdBuffer, maxFds); + } + Promise tryReadWithStreams( + void* buffer, size_t minBytes, size_t maxBytes, + Own* streamBuffer, size_t maxStreams) override { + return in->tryReadWithStreams(buffer, minBytes, maxBytes, streamBuffer, maxStreams); + } Promise pumpTo(AsyncOutputStream& output, uint64_t amount) override { return in->pumpTo(output, amount); } @@ -1044,10 +1574,23 @@ public: Promise write(ArrayPtr> pieces) override { return out->write(pieces); } + Promise writeWithFds(ArrayPtr data, + ArrayPtr> moreData, + ArrayPtr fds) override { + return out->writeWithFds(data, moreData, fds); + } + Promise writeWithStreams(ArrayPtr data, + ArrayPtr> moreData, + Array> streams) override { + return out->writeWithStreams(data, moreData, kj::mv(streams)); + } Maybe> tryPumpFrom( AsyncInputStream& input, uint64_t amount) override { return out->tryPumpFrom(input, amount); } + Promise whenWriteDisconnected() override { + return out->whenWriteDisconnected(); + } void shutdownWrite() override { out->shutdownWrite(); } @@ -1063,7 +1606,7 @@ public: LimitedInputStream(kj::Own inner, uint64_t limit) : inner(kj::mv(inner)), limit(limit) { if (limit == 0) { - inner = nullptr; + this->inner = nullptr; } } @@ -1100,7 +1643,8 @@ private: if (limit == 0) { inner = nullptr; } else if (amount < requested) { - KJ_FAIL_REQUIRE("pipe ended prematurely"); + kj::throwRecoverableException(KJ_EXCEPTION(DISCONNECTED, + "fixed-length pipe ended prematurely")); } } }; @@ -1125,6 +1669,860 @@ TwoWayPipe newTwoWayPipe() { return { { kj::mv(end1), kj::mv(end2) } }; } +CapabilityPipe newCapabilityPipe() { + auto pipe1 = kj::refcounted(); + auto pipe2 = kj::refcounted(); + auto end1 = kj::heap(kj::addRef(*pipe1), kj::addRef(*pipe2)); + auto end2 = kj::heap(kj::mv(pipe2), kj::mv(pipe1)); + return { { kj::mv(end1), kj::mv(end2) } }; +} + +namespace { + +class AsyncTee final: public Refcounted { +public: + using BranchId = uint; + + explicit AsyncTee(Own inner, uint64_t bufferSizeLimit) + : inner(mv(inner)), bufferSizeLimit(bufferSizeLimit), length(this->inner->tryGetLength()) {} + ~AsyncTee() noexcept(false) { + bool hasBranches = false; + for (auto& branch: branches) { + hasBranches = hasBranches || branch != nullptr; + } + KJ_ASSERT(!hasBranches, "destroying AsyncTee with branch still alive") { + // Don't std::terminate(). + break; + } + } + + void addBranch(BranchId branch) { + KJ_REQUIRE(branches[branch] == nullptr, "branch already exists"); + branches[branch] = Branch(); + } + + void removeBranch(BranchId branch) { + auto& state = KJ_REQUIRE_NONNULL(branches[branch], "branch was already destroyed"); + KJ_REQUIRE(state.sink == nullptr, + "destroying tee branch with operation still in-progress; probably going to segfault") { + // Don't std::terminate(). + break; + } + + branches[branch] = nullptr; + } + + Promise tryRead(BranchId branch, void* buffer, size_t minBytes, size_t maxBytes) { + auto& state = KJ_ASSERT_NONNULL(branches[branch]); + KJ_ASSERT(state.sink == nullptr); + + // If there is excess data in the buffer for us, slurp that up. + auto readBuffer = arrayPtr(reinterpret_cast(buffer), maxBytes); + auto readSoFar = state.buffer.consume(readBuffer, minBytes); + + if (minBytes == 0) { + return readSoFar; + } + + if (state.buffer.empty()) { + KJ_IF_MAYBE(reason, stoppage) { + // Prefer a short read to an exception. The exception prevents the pull loop from adding any + // data to the buffer, so `readSoFar` will be zero the next time someone calls `tryRead()`, + // and the caller will see the exception. + if (reason->is() || readSoFar > 0) { + return readSoFar; + } + return cp(reason->get()); + } + } + + auto promise = newAdaptedPromise(state.sink, readBuffer, minBytes, readSoFar); + ensurePulling(); + return mv(promise); + } + + Maybe tryGetLength(BranchId branch) { + auto& state = KJ_ASSERT_NONNULL(branches[branch]); + + return length.map([&state](uint64_t amount) { + return amount + state.buffer.size(); + }); + } + + Promise pumpTo(BranchId branch, AsyncOutputStream& output, uint64_t amount) { + auto& state = KJ_ASSERT_NONNULL(branches[branch]); + KJ_ASSERT(state.sink == nullptr); + + if (amount == 0) { + return amount; + } + + if (state.buffer.empty()) { + KJ_IF_MAYBE(reason, stoppage) { + if (reason->is()) { + return uint64_t(0); + } + return cp(reason->get()); + } + } + + auto promise = newAdaptedPromise(state.sink, output, amount); + ensurePulling(); + return mv(promise); + } + +private: + struct Eof {}; + using Stoppage = OneOf; + + class Buffer { + public: + uint64_t consume(ArrayPtr& readBuffer, size_t& minBytes); + // Consume as many bytes as possible, copying them into `readBuffer`. Return the number of bytes + // consumed. + // + // `readBuffer` and `minBytes` are both assigned appropriate new values, such that after any + // call to `consume()`, `readBuffer` will point to the remaining slice of unwritten space, and + // `minBytes` will have been decremented (clamped to zero) by the amount of bytes read. That is, + // the read can be considered fulfilled if `minBytes` is zero after a call to `consume()`. + + Array> asArray(uint64_t minBytes, uint64_t& amount); + // Consume the first `minBytes` of the buffer (or the entire buffer) and return it in an Array + // of ArrayPtrs, suitable for passing to AsyncOutputStream.write(). The outer Array + // owns the underlying data. + + void produce(Array bytes); + // Enqueue a byte array to the end of the buffer list. + + bool empty() const; + uint64_t size() const; + + private: + std::deque> bufferList; + }; + + class Sink { + public: + struct Need { + // We use uint64_t here because: + // - pumpTo() accepts it as the `amount` parameter. + // - all practical values of tryRead()'s `maxBytes` parameter (a size_t) should also fit into + // a uint64_t, unless we're on a machine with multiple exabytes of memory ... + + uint64_t minBytes = 0; + + uint64_t maxBytes = kj::maxValue; + }; + + virtual Promise fill(Buffer& inBuffer, const Maybe& stoppage) = 0; + // Attempt to fill the sink with bytes andreturn a promise which must resolve before any inner + // read may be attempted. If a sink requires backpressure to be respected, this is how it should + // be communicated. + // + // If the sink is full, it must detach from the tee before the returned promise is resolved. + // + // The returned promise must not result in an exception. + + virtual Need need() = 0; + + virtual void reject(Exception&& exception) = 0; + // Inform this sink of a catastrophic exception and detach it. Regular read exceptions should be + // propagated through `fill()`'s stoppage parameter instead. + }; + + template + class SinkBase: public Sink { + // Registers itself with the tee as a sink on construction, detaches from the tee on + // fulfillment, rejection, or destruction. + // + // A bit of a Frankenstein, avert your eyes. For one thing, it's more of a mixin than a base... + + public: + explicit SinkBase(PromiseFulfiller& fulfiller, Maybe& sinkLink) + : fulfiller(fulfiller), sinkLink(sinkLink) { + KJ_ASSERT(sinkLink == nullptr, "sink initiated with sink already in flight"); + sinkLink = *this; + } + KJ_DISALLOW_COPY(SinkBase); + ~SinkBase() noexcept(false) { detach(); } + + void reject(Exception&& exception) override { + // The tee is allowed to reject this sink if it needs to, e.g. to propagate a non-inner read + // exception from the pull loop. Only the derived class is allowed to fulfill() directly, + // though -- the tee must keep calling fill(). + + fulfiller.reject(mv(exception)); + detach(); + } + + protected: + template + void fulfill(U value) { + fulfiller.fulfill(fwd(value)); + detach(); + } + + private: + void detach() { + KJ_IF_MAYBE(sink, sinkLink) { + if (sink == this) { + sinkLink = nullptr; + } + } + } + + PromiseFulfiller& fulfiller; + Maybe& sinkLink; + }; + + struct Branch { + Buffer buffer; + Maybe sink; + }; + + class ReadSink final: public SinkBase { + public: + explicit ReadSink(PromiseFulfiller& fulfiller, Maybe& registration, + ArrayPtr buffer, size_t minBytes, size_t readSoFar) + : SinkBase(fulfiller, registration), buffer(buffer), + minBytes(minBytes), readSoFar(readSoFar) {} + + Promise fill(Buffer& inBuffer, const Maybe& stoppage) override { + auto amount = inBuffer.consume(buffer, minBytes); + readSoFar += amount; + + if (minBytes == 0) { + // We satisfied the read request. + fulfill(readSoFar); + return READY_NOW; + } + + if (amount == 0 && inBuffer.empty()) { + // We made no progress on the read request and the buffer is tapped out. + KJ_IF_MAYBE(reason, stoppage) { + if (reason->is() || readSoFar > 0) { + // Prefer short read to exception. + fulfill(readSoFar); + } else { + reject(cp(reason->get())); + } + return READY_NOW; + } + } + + return READY_NOW; + } + + Need need() override { return Need { minBytes, buffer.size() }; } + + private: + ArrayPtr buffer; + size_t minBytes; + // Arguments to the outer tryRead() call, sliced/decremented after every buffer consumption. + + size_t readSoFar; + // End result of the outer tryRead(). + }; + + class PumpSink final: public SinkBase { + public: + explicit PumpSink(PromiseFulfiller& fulfiller, Maybe& registration, + AsyncOutputStream& output, uint64_t limit) + : SinkBase(fulfiller, registration), output(output), limit(limit) {} + + ~PumpSink() noexcept(false) { + canceler.cancel("This pump has been canceled."); + } + + Promise fill(Buffer& inBuffer, const Maybe& stoppage) override { + KJ_ASSERT(limit > 0); + + uint64_t amount = 0; + + // TODO(someday): This consumes data from the buffer, but we cannot know if the stream to + // which we're pumping will accept it until after the write() promise completes. If the + // write() promise rejects, we lose this data. We should consume the data from the buffer + // only after successful writes. + auto writeBuffer = inBuffer.asArray(limit, amount); + KJ_ASSERT(limit >= amount); + if (amount > 0) { + Promise promise = kj::evalNow([&]() { + return output.write(writeBuffer).attach(mv(writeBuffer)); + }).then([this, amount]() { + limit -= amount; + pumpedSoFar += amount; + if (limit == 0) { + fulfill(pumpedSoFar); + } + }).eagerlyEvaluate([this](Exception&& exception) { + reject(mv(exception)); + }); + + return canceler.wrap(mv(promise)).catch_([](kj::Exception&&) {}); + } else KJ_IF_MAYBE(reason, stoppage) { + if (reason->is()) { + // Unlike in the read case, it makes more sense to immediately propagate exceptions to the + // pump promise rather than show it a "short pump". + fulfill(pumpedSoFar); + } else { + reject(cp(reason->get())); + } + } + + return READY_NOW; + } + + Need need() override { return Need { 1, limit }; } + + private: + AsyncOutputStream& output; + uint64_t limit; + // Arguments to the outer pumpTo() call, decremented after every buffer consumption. + // + // Equal to zero once fulfiller has been fulfilled/rejected. + + uint64_t pumpedSoFar = 0; + // End result of the outer pumpTo(). + + Canceler canceler; + // When the pump is canceled, we also need to cancel any write operations in flight. + }; + + // ===================================================================================== + + Maybe analyzeSinks() { + // Return nullptr if there are no sinks at all. Otherwise, return the largest `minBytes` and the + // smallest `maxBytes` requested by any sink. The pull loop will use these values to calculate + // the optimal buffer size for the next inner read, so that a minimum amount of data is buffered + // at any given time. + + uint64_t minBytes = 0; + uint64_t maxBytes = kj::maxValue; + + uint nBranches = 0; + uint nSinks = 0; + + for (auto& state: branches) { + KJ_IF_MAYBE(s, state) { + ++nBranches; + KJ_IF_MAYBE(sink, s->sink) { + ++nSinks; + auto need = sink->need(); + minBytes = kj::max(minBytes, need.minBytes); + maxBytes = kj::min(maxBytes, need.maxBytes); + } + } + } + + if (nSinks > 0) { + KJ_ASSERT(minBytes > 0); + KJ_ASSERT(maxBytes > 0, "sink was filled but did not detach"); + + // Sinks may report non-overlapping needs. + maxBytes = kj::max(minBytes, maxBytes); + + return Sink::Need { minBytes, maxBytes }; + } + + // No active sinks. + return nullptr; + } + + void ensurePulling() { + if (!pulling) { + pulling = true; + UnwindDetector unwind; + KJ_DEFER(if (unwind.isUnwinding()) pulling = false); + pullPromise = pull(); + } + } + + Promise pull() { + return pullLoop().eagerlyEvaluate([this](Exception&& exception) { + // Exception from our loop, not from inner tryRead(). Something is broken; tell everybody! + pulling = false; + for (auto& state: branches) { + KJ_IF_MAYBE(s, state) { + KJ_IF_MAYBE(sink, s->sink) { + sink->reject(KJ_EXCEPTION(FAILED, "Exception in tee loop", exception)); + } + } + } + }); + } + + constexpr static size_t MAX_BLOCK_SIZE = 1 << 14; // 16k + + Own inner; + const uint64_t bufferSizeLimit = kj::maxValue; + Maybe length; + Maybe branches[2]; + Maybe stoppage; + Promise pullPromise = READY_NOW; + bool pulling = false; + +private: + Promise pullLoop() { + // Use evalLater() so that two pump sinks added on the same turn of the event loop will not + // cause buffering. + return evalLater([this] { + // Attempt to fill any sinks that exist. + + Vector> promises; + + for (auto& state: branches) { + KJ_IF_MAYBE(s, state) { + KJ_IF_MAYBE(sink, s->sink) { + promises.add(sink->fill(s->buffer, stoppage)); + } + } + } + + // Respect the greatest of the sinks' backpressures. + return joinPromises(promises.releaseAsArray()); + }).then([this]() -> Promise { + // Check to see whether we need to perform an inner read. + + auto need = analyzeSinks(); + + if (need == nullptr) { + // No more sinks, stop pulling. + pulling = false; + return READY_NOW; + } + + if (stoppage != nullptr) { + // We're eof or errored, don't read, but loop so we can fill the sink(s). + return pullLoop(); + } + + auto& n = KJ_ASSERT_NONNULL(need); + + KJ_ASSERT(n.minBytes > 0); + + // We must perform an inner read. + + // We'd prefer not to explode our buffer, if that's cool. We cap `maxBytes` to the buffer size + // limit or our builtin MAX_BLOCK_SIZE, whichever is smaller. But, we make sure `maxBytes` is + // still >= `minBytes`. + n.maxBytes = kj::min(n.maxBytes, MAX_BLOCK_SIZE); + n.maxBytes = kj::min(n.maxBytes, bufferSizeLimit); + n.maxBytes = kj::max(n.minBytes, n.maxBytes); + for (auto& state: branches) { + KJ_IF_MAYBE(s, state) { + // TODO(perf): buffer.size() is O(n) where n = # of individual heap-allocated byte arrays. + if (s->buffer.size() + n.maxBytes > bufferSizeLimit) { + stoppage = Stoppage(KJ_EXCEPTION(FAILED, "tee buffer size limit exceeded")); + return pullLoop(); + } + } + } + auto heapBuffer = heapArray(n.maxBytes); + + // gcc 4.9 quirk: If I don't hoist this into a separate variable and instead call + // + // inner->tryRead(heapBuffer.begin(), n.minBytes, heapBuffer.size()) + // + // `heapBuffer` seems to get moved into the lambda capture before the arguments to `tryRead()` + // are evaluated, meaning `inner` sees a nullptr destination. Bizarrely, `inner` sees the + // correct value for `heapBuffer.size()`... I dunno, man. + auto destination = heapBuffer.begin(); + + return kj::evalNow([&]() { return inner->tryRead(destination, n.minBytes, n.maxBytes); }) + .then([this, heapBuffer = mv(heapBuffer), minBytes = n.minBytes](size_t amount) mutable + -> Promise { + length = length.map([amount](uint64_t n) { + KJ_ASSERT(n >= amount); + return n - amount; + }); + + if (amount < heapBuffer.size()) { + heapBuffer = heapBuffer.slice(0, amount).attach(mv(heapBuffer)); + } + + KJ_ASSERT(stoppage == nullptr); + Maybe> bufferPtr = nullptr; + for (auto& state: branches) { + KJ_IF_MAYBE(s, state) { + // Prefer to move the buffer into the receiving branch's deque, rather than memcpy. + // + // TODO(perf): For the 2-branch case, this is fine, since the majority of the time + // only one buffer will be in use. If we generalize to the n-branch case, this would + // become memcpy-heavy. + KJ_IF_MAYBE(ptr, bufferPtr) { + s->buffer.produce(heapArray(*ptr)); + } else { + bufferPtr = ArrayPtr(heapBuffer); + s->buffer.produce(mv(heapBuffer)); + } + } + } + + if (amount < minBytes) { + // Short read, EOF. + stoppage = Stoppage(Eof()); + } + + return pullLoop(); + }, [this](Exception&& exception) { + // Exception from the inner tryRead(). Propagate. + stoppage = Stoppage(mv(exception)); + return pullLoop(); + }); + }); + } +}; + +constexpr size_t AsyncTee::MAX_BLOCK_SIZE; + +uint64_t AsyncTee::Buffer::consume(ArrayPtr& readBuffer, size_t& minBytes) { + uint64_t totalAmount = 0; + + while (readBuffer.size() > 0 && !bufferList.empty()) { + auto& bytes = bufferList.front(); + auto amount = kj::min(bytes.size(), readBuffer.size()); + memcpy(readBuffer.begin(), bytes.begin(), amount); + totalAmount += amount; + + readBuffer = readBuffer.slice(amount, readBuffer.size()); + minBytes -= kj::min(amount, minBytes); + + if (amount == bytes.size()) { + bufferList.pop_front(); + } else { + bytes = heapArray(bytes.slice(amount, bytes.size())); + return totalAmount; + } + } + + return totalAmount; +} + +void AsyncTee::Buffer::produce(Array bytes) { + bufferList.push_back(mv(bytes)); +} + +Array> AsyncTee::Buffer::asArray( + uint64_t maxBytes, uint64_t& amount) { + amount = 0; + + Vector> buffers; + Vector> ownBuffers; + + while (maxBytes > 0 && !bufferList.empty()) { + auto& bytes = bufferList.front(); + + if (bytes.size() <= maxBytes) { + amount += bytes.size(); + maxBytes -= bytes.size(); + + buffers.add(bytes); + ownBuffers.add(mv(bytes)); + + bufferList.pop_front(); + } else { + auto ownBytes = heapArray(bytes.slice(0, maxBytes)); + buffers.add(ownBytes); + ownBuffers.add(mv(ownBytes)); + + bytes = heapArray(bytes.slice(maxBytes, bytes.size())); + + amount += maxBytes; + maxBytes = 0; + } + } + + + if (buffers.size() > 0) { + return buffers.releaseAsArray().attach(mv(ownBuffers)); + } + + return {}; +} + +bool AsyncTee::Buffer::empty() const { + return bufferList.empty(); +} + +uint64_t AsyncTee::Buffer::size() const { + uint64_t result = 0; + + for (auto& bytes: bufferList) { + result += bytes.size(); + } + + return result; +} + +class TeeBranch final: public AsyncInputStream { +public: + TeeBranch(Own tee, uint8_t branch): tee(mv(tee)), branch(branch) { + this->tee->addBranch(branch); + } + ~TeeBranch() noexcept(false) { + unwind.catchExceptionsIfUnwinding([&]() { + tee->removeBranch(branch); + }); + } + + Promise tryRead(void* buffer, size_t minBytes, size_t maxBytes) override { + return tee->tryRead(branch, buffer, minBytes, maxBytes); + } + + Promise pumpTo(AsyncOutputStream& output, uint64_t amount) override { + return tee->pumpTo(branch, output, amount); + } + + Maybe tryGetLength() override { + return tee->tryGetLength(branch); + } + +private: + Own tee; + const uint8_t branch; + UnwindDetector unwind; +}; + +} // namespace + +Tee newTee(Own input, uint64_t limit) { + auto impl = refcounted(mv(input), limit); + Own branch1 = heap(addRef(*impl), 0); + Own branch2 = heap(mv(impl), 1); + return { { mv(branch1), mv(branch2) } }; +} + +namespace { + +class PromisedAsyncIoStream final: public kj::AsyncIoStream, private kj::TaskSet::ErrorHandler { + // An AsyncIoStream which waits for a promise to resolve then forwards all calls to the promised + // stream. + +public: + PromisedAsyncIoStream(kj::Promise> promise) + : promise(promise.then([this](kj::Own result) { + stream = kj::mv(result); + }).fork()), + tasks(*this) {} + + kj::Promise read(void* buffer, size_t minBytes, size_t maxBytes) override { + KJ_IF_MAYBE(s, stream) { + return s->get()->read(buffer, minBytes, maxBytes); + } else { + return promise.addBranch().then([this,buffer,minBytes,maxBytes]() { + return KJ_ASSERT_NONNULL(stream)->read(buffer, minBytes, maxBytes); + }); + } + } + kj::Promise tryRead(void* buffer, size_t minBytes, size_t maxBytes) override { + KJ_IF_MAYBE(s, stream) { + return s->get()->tryRead(buffer, minBytes, maxBytes); + } else { + return promise.addBranch().then([this,buffer,minBytes,maxBytes]() { + return KJ_ASSERT_NONNULL(stream)->tryRead(buffer, minBytes, maxBytes); + }); + } + } + + kj::Maybe tryGetLength() override { + KJ_IF_MAYBE(s, stream) { + return s->get()->tryGetLength(); + } else { + return nullptr; + } + } + + kj::Promise pumpTo(kj::AsyncOutputStream& output, uint64_t amount) override { + KJ_IF_MAYBE(s, stream) { + return s->get()->pumpTo(output, amount); + } else { + return promise.addBranch().then([this,&output,amount]() { + return KJ_ASSERT_NONNULL(stream)->pumpTo(output, amount); + }); + } + } + + kj::Promise write(const void* buffer, size_t size) override { + KJ_IF_MAYBE(s, stream) { + return s->get()->write(buffer, size); + } else { + return promise.addBranch().then([this,buffer,size]() { + return KJ_ASSERT_NONNULL(stream)->write(buffer, size); + }); + } + } + kj::Promise write(kj::ArrayPtr> pieces) override { + KJ_IF_MAYBE(s, stream) { + return s->get()->write(pieces); + } else { + return promise.addBranch().then([this,pieces]() { + return KJ_ASSERT_NONNULL(stream)->write(pieces); + }); + } + } + + kj::Maybe> tryPumpFrom( + kj::AsyncInputStream& input, uint64_t amount = kj::maxValue) override { + KJ_IF_MAYBE(s, stream) { + // Call input.pumpTo() on the resolved stream instead, so that if it does some dynamic_casts + // or whatnot to detect stream types it can retry those on the inner stream. + return input.pumpTo(**s, amount); + } else { + return promise.addBranch().then([this,&input,amount]() { + // Here we actually have no choice but to call input.pumpTo() because if we called + // tryPumpFrom(input, amount) and it returned nullptr, what would we do? It's too late for + // us to return nullptr. But the thing about dynamic_cast also applies. + return input.pumpTo(*KJ_ASSERT_NONNULL(stream), amount); + }); + } + } + + Promise whenWriteDisconnected() override { + KJ_IF_MAYBE(s, stream) { + return s->get()->whenWriteDisconnected(); + } else { + return promise.addBranch().then([this]() { + return KJ_ASSERT_NONNULL(stream)->whenWriteDisconnected(); + }, [](kj::Exception&& e) -> kj::Promise { + if (e.getType() == kj::Exception::Type::DISCONNECTED) { + return kj::READY_NOW; + } else { + return kj::mv(e); + } + }); + } + } + + void shutdownWrite() override { + KJ_IF_MAYBE(s, stream) { + return s->get()->shutdownWrite(); + } else { + tasks.add(promise.addBranch().then([this]() { + return KJ_ASSERT_NONNULL(stream)->shutdownWrite(); + })); + } + } + + void abortRead() override { + KJ_IF_MAYBE(s, stream) { + return s->get()->abortRead(); + } else { + tasks.add(promise.addBranch().then([this]() { + return KJ_ASSERT_NONNULL(stream)->abortRead(); + })); + } + } + + kj::Maybe getFd() const override { + KJ_IF_MAYBE(s, stream) { + return s->get()->getFd(); + } else { + return nullptr; + } + } + +private: + kj::ForkedPromise promise; + kj::Maybe> stream; + kj::TaskSet tasks; + + void taskFailed(kj::Exception&& exception) override { + KJ_LOG(ERROR, exception); + } +}; + +class PromisedAsyncOutputStream final: public kj::AsyncOutputStream { + // An AsyncOutputStream which waits for a promise to resolve then forwards all calls to the + // promised stream. + // + // TODO(cleanup): Can this share implementation with PromiseIoStream? Seems hard. + +public: + PromisedAsyncOutputStream(kj::Promise> promise) + : promise(promise.then([this](kj::Own result) { + stream = kj::mv(result); + }).fork()) {} + + kj::Promise write(const void* buffer, size_t size) override { + KJ_IF_MAYBE(s, stream) { + return s->get()->write(buffer, size); + } else { + return promise.addBranch().then([this,buffer,size]() { + return KJ_ASSERT_NONNULL(stream)->write(buffer, size); + }); + } + } + kj::Promise write(kj::ArrayPtr> pieces) override { + KJ_IF_MAYBE(s, stream) { + return s->get()->write(pieces); + } else { + return promise.addBranch().then([this,pieces]() { + return KJ_ASSERT_NONNULL(stream)->write(pieces); + }); + } + } + + kj::Maybe> tryPumpFrom( + kj::AsyncInputStream& input, uint64_t amount = kj::maxValue) override { + KJ_IF_MAYBE(s, stream) { + return s->get()->tryPumpFrom(input, amount); + } else { + return promise.addBranch().then([this,&input,amount]() { + // Call input.pumpTo() on the resolved stream instead. + return input.pumpTo(*KJ_ASSERT_NONNULL(stream), amount); + }); + } + } + + Promise whenWriteDisconnected() override { + KJ_IF_MAYBE(s, stream) { + return s->get()->whenWriteDisconnected(); + } else { + return promise.addBranch().then([this]() { + return KJ_ASSERT_NONNULL(stream)->whenWriteDisconnected(); + }, [](kj::Exception&& e) -> kj::Promise { + if (e.getType() == kj::Exception::Type::DISCONNECTED) { + return kj::READY_NOW; + } else { + return kj::mv(e); + } + }); + } + } + +private: + kj::ForkedPromise promise; + kj::Maybe> stream; +}; + +} // namespace + +Own newPromisedStream(Promise> promise) { + return heap(kj::mv(promise)); +} +Own newPromisedStream(Promise> promise) { + return heap(kj::mv(promise)); +} + +Promise AsyncCapabilityStream::writeWithFds( + ArrayPtr data, ArrayPtr> moreData, + ArrayPtr fds) { + // HACK: AutoCloseFd actually contains an `int` under the hood. We can reinterpret_cast to avoid + // unnecessary memory allocation. + static_assert(sizeof(AutoCloseFd) == sizeof(int), "this optimization won't work"); + auto intArray = arrayPtr(reinterpret_cast(fds.begin()), fds.size()); + + // Be extra-paranoid about aliasing rules by injecting a compiler barrier here. Probably + // not necessary but also probably doesn't hurt. +#if _MSC_VER + _ReadWriteBarrier(); +#else + __asm__ __volatile__("": : :"memory"); +#endif + + return writeWithFds(data, moreData, intArray); +} + Promise> AsyncCapabilityStream::receiveStream() { return tryReceiveStream() .then([](Maybe>&& result) @@ -1137,6 +2535,35 @@ Promise> AsyncCapabilityStream::receiveStream() { }); } +kj::Promise>> AsyncCapabilityStream::tryReceiveStream() { + struct ResultHolder { + byte b; + Own stream; + }; + auto result = kj::heap(); + auto promise = tryReadWithStreams(&result->b, 1, 1, &result->stream, 1); + return promise.then([result = kj::mv(result)](ReadResult actual) mutable + -> Maybe> { + if (actual.byteCount == 0) { + return nullptr; + } + + KJ_REQUIRE(actual.capCount == 1, + "expected to receive a capability (e.g. file descirptor via SCM_RIGHTS), but didn't") { + return nullptr; + } + + return kj::mv(result->stream); + }); +} + +Promise AsyncCapabilityStream::sendStream(Own stream) { + static constexpr byte b = 0; + auto streams = kj::heapArray>(1); + streams[0] = kj::mv(stream); + return writeWithStreams(arrayPtr(&b, 1), nullptr, kj::mv(streams)); +} + Promise AsyncCapabilityStream::receiveFd() { return tryReceiveFd().then([](Maybe&& result) -> Promise { KJ_IF_MAYBE(r, result) { @@ -1146,36 +2573,63 @@ Promise AsyncCapabilityStream::receiveFd() { } }); } -Promise> AsyncCapabilityStream::tryReceiveFd() { - return KJ_EXCEPTION(UNIMPLEMENTED, "this stream cannot receive file descriptors"); + +kj::Promise> AsyncCapabilityStream::tryReceiveFd() { + struct ResultHolder { + byte b; + AutoCloseFd fd; + }; + auto result = kj::heap(); + auto promise = tryReadWithFds(&result->b, 1, 1, &result->fd, 1); + return promise.then([result = kj::mv(result)](ReadResult actual) mutable + -> Maybe { + if (actual.byteCount == 0) { + return nullptr; + } + + KJ_REQUIRE(actual.capCount == 1, + "expected to receive a file descriptor (e.g. via SCM_RIGHTS), but didn't") { + return nullptr; + } + + return kj::mv(result->fd); + }); } + Promise AsyncCapabilityStream::sendFd(int fd) { - return KJ_EXCEPTION(UNIMPLEMENTED, "this stream cannot send file descriptors"); + static constexpr byte b = 0; + auto fds = kj::heapArray(1); + fds[0] = fd; + auto promise = writeWithFds(arrayPtr(&b, 1), nullptr, fds); + return promise.attach(kj::mv(fds)); } void AsyncIoStream::getsockopt(int level, int option, void* value, uint* length) { - KJ_UNIMPLEMENTED("Not a socket."); + KJ_UNIMPLEMENTED("Not a socket.") { *length = 0; break; } } void AsyncIoStream::setsockopt(int level, int option, const void* value, uint length) { - KJ_UNIMPLEMENTED("Not a socket."); + KJ_UNIMPLEMENTED("Not a socket.") { break; } } void AsyncIoStream::getsockname(struct sockaddr* addr, uint* length) { - KJ_UNIMPLEMENTED("Not a socket."); + KJ_UNIMPLEMENTED("Not a socket.") { *length = 0; break; } } void AsyncIoStream::getpeername(struct sockaddr* addr, uint* length) { - KJ_UNIMPLEMENTED("Not a socket."); + KJ_UNIMPLEMENTED("Not a socket.") { *length = 0; break; } } void ConnectionReceiver::getsockopt(int level, int option, void* value, uint* length) { - KJ_UNIMPLEMENTED("Not a socket."); + KJ_UNIMPLEMENTED("Not a socket.") { *length = 0; break; } } void ConnectionReceiver::setsockopt(int level, int option, const void* value, uint length) { - KJ_UNIMPLEMENTED("Not a socket."); + KJ_UNIMPLEMENTED("Not a socket.") { break; } +} +void ConnectionReceiver::getsockname(struct sockaddr* addr, uint* length) { + KJ_UNIMPLEMENTED("Not a socket.") { *length = 0; break; } } void DatagramPort::getsockopt(int level, int option, void* value, uint* length) { - KJ_UNIMPLEMENTED("Not a socket."); + KJ_UNIMPLEMENTED("Not a socket.") { *length = 0; break; } } void DatagramPort::setsockopt(int level, int option, const void* value, uint length) { - KJ_UNIMPLEMENTED("Not a socket."); + KJ_UNIMPLEMENTED("Not a socket.") { break; } } Own NetworkAddress::bindDatagramPort() { KJ_UNIMPLEMENTED("Datagram sockets not implemented."); @@ -1251,18 +2705,34 @@ Promise> CapabilityStreamConnectionReceiver::accept() { }); } +Promise CapabilityStreamConnectionReceiver::acceptAuthenticated() { + return accept().then([](Own&& stream) { + return AuthenticatedStream { kj::mv(stream), UnknownPeerIdentity::newInstance() }; + }); +} + uint CapabilityStreamConnectionReceiver::getPort() { return 0; } Promise> CapabilityStreamNetworkAddress::connect() { - auto pipe = provider.newCapabilityPipe(); + CapabilityPipe pipe; + KJ_IF_MAYBE(p, provider) { + pipe = p->newCapabilityPipe(); + } else { + pipe = kj::newCapabilityPipe(); + } auto result = kj::mv(pipe.ends[0]); return inner.sendStream(kj::mv(pipe.ends[1])) .then(kj::mvCapture(result, [](Own&& result) { return kj::mv(result); })); } +Promise CapabilityStreamNetworkAddress::connectAuthenticated() { + return connect().then([](Own&& stream) { + return AuthenticatedStream { kj::mv(stream), UnknownPeerIdentity::newInstance() }; + }); +} Own CapabilityStreamNetworkAddress::listen() { return kj::heap(inner); } @@ -1616,4 +3086,81 @@ bool NetworkFilter::shouldAllowParse(const struct sockaddr* addr, uint addrlen) } } // namespace _ (private) + +// ======================================================================================= +// PeerIdentity implementations + +namespace { + +class NetworkPeerIdentityImpl final: public NetworkPeerIdentity { +public: + NetworkPeerIdentityImpl(kj::Own addr): addr(kj::mv(addr)) {} + + kj::String toString() override { return addr->toString(); } + NetworkAddress& getAddress() override { return *addr; } + +private: + kj::Own addr; +}; + +class LocalPeerIdentityImpl final: public LocalPeerIdentity { +public: + LocalPeerIdentityImpl(Credentials creds): creds(creds) {} + + kj::String toString() override { + char pidBuffer[16]; + kj::StringPtr pidStr = nullptr; + KJ_IF_MAYBE(p, creds.pid) { + pidStr = strPreallocated(pidBuffer, " pid:", *p); + } + + char uidBuffer[16]; + kj::StringPtr uidStr = nullptr; + KJ_IF_MAYBE(u, creds.uid) { + uidStr = strPreallocated(uidBuffer, " uid:", *u); + } + + return kj::str("(local peer", pidStr, uidStr, ")"); + } + + Credentials getCredentials() override { return creds; } + +private: + Credentials creds; +}; + +class UnknownPeerIdentityImpl final: public UnknownPeerIdentity { +public: + kj::String toString() override { + return kj::str("(unknown peer)"); + } +}; + +} // namespace + +kj::Own NetworkPeerIdentity::newInstance(kj::Own addr) { + return kj::heap(kj::mv(addr)); +} + +kj::Own LocalPeerIdentity::newInstance(LocalPeerIdentity::Credentials creds) { + return kj::heap(creds); +} + +kj::Own UnknownPeerIdentity::newInstance() { + static UnknownPeerIdentityImpl instance; + return { &instance, NullDisposer::instance }; +} + +Promise ConnectionReceiver::acceptAuthenticated() { + return accept().then([](Own stream) { + return AuthenticatedStream { kj::mv(stream), UnknownPeerIdentity::newInstance() }; + }); +} + +Promise NetworkAddress::connectAuthenticated() { + return connect().then([](Own stream) { + return AuthenticatedStream { kj::mv(stream), UnknownPeerIdentity::newInstance() }; + }); +} + } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-io.h b/libs/EXTERNAL/capnproto/c++/src/kj/async-io.h index 798069462f0..3be72377a91 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async-io.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-io.h @@ -21,15 +21,13 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "async.h" #include "function.h" #include "thread.h" #include "timer.h" +KJ_BEGIN_HEADER + struct sockaddr; namespace kj { @@ -45,6 +43,7 @@ class AutoCloseFd; class NetworkAddress; class AsyncOutputStream; class AsyncIoStream; +class AncillaryMessage; // ======================================================================================= // Streaming I/O @@ -86,6 +85,12 @@ class AsyncInputStream { // // To prevent runaway memory allocation, consider using a more conservative value for `limit` than // the default, particularly on untrusted data streams which may never see EOF. + + virtual void registerAncillaryMessageHandler(Function)> fn); + // Register interest in checking for ancillary messages (aka control messages) when reading. + // The provided callback will be called whenever any are encountered. The messages passed to + // the function do not live beyond when function returns. + // Only supported on Unix (the default impl throws UNIMPLEMENTED). Most apps will not use this. }; class AsyncOutputStream { @@ -106,6 +111,20 @@ class AsyncOutputStream { // output stream. If it finds one, it performs the pump. Otherwise, it returns null. // // The default implementation always returns null. + + virtual Promise whenWriteDisconnected() = 0; + // Returns a promise that resolves when the stream has become disconnected such that new write()s + // will fail with a DISCONNECTED exception. This is particularly useful, for example, to cancel + // work early when it is detected that no one will receive the result. + // + // Note that not all streams are able to detect this condition without actually performing a + // write(); such stream implementations may return a promise that never resolves. (In particular, + // as of this writing, whenWriteDisconnected() is not implemented on Windows. Also, for TCP + // streams, not all disconnects are detectable -- a power or network failure may lead the + // connection to hang forever, or until configured socket options lead to a timeout.) + // + // Unlike most other asynchronous stream methods, it is safe to call whenWriteDisconnected() + // multiple times without canceling the previous promises. }; class AsyncIoStream: public AsyncInputStream, public AsyncOutputStream { @@ -134,18 +153,25 @@ class AsyncIoStream: public AsyncInputStream, public AsyncOutputStream { // Note that we don't provide methods that return NetworkAddress because it usually wouldn't // be useful. You can't connect() to or listen() on these addresses, obviously, because they are // ephemeral addresses for a single connection. + + virtual kj::Maybe getFd() const { return nullptr; } + // Get the underlying Unix file descriptor, if any. Returns nullptr if this object actually + // isn't wrapping a file descriptor. }; class AsyncCapabilityStream: public AsyncIoStream { - // An AsyncIoStream that also allows sending and receiving new connections or other kinds of - // capabilities, in addition to simple data. + // An AsyncIoStream that also allows transmitting new stream objects and file descriptors + // (capabilities, in the object-capability model sense), in addition to bytes. // - // For correct functioning, a protocol must be designed such that the receiver knows when to - // expect a capability transfer. The receiver must not read() when a capability is expected, and - // must not receiveStream() when data is expected -- if it does, an exception may be thrown or - // invalid data may be returned. This implies that data sent over an AsyncCapabilityStream must - // be framed such that the receiver knows exactly how many bytes to read before receiving a - // capability. + // Capabilities can be attached to bytes when they are written. On the receiving end, the read() + // that receives the first byte of such a message will also receive the capabilities. + // + // Note that AsyncIoStream's regular byte-oriented methods can be used on AsyncCapabilityStream, + // with the effect of silently dropping any capabilities attached to the respective bytes. E.g. + // using `AsyncIoStream::tryRead()` to read bytes that had been sent with `writeWithFds()` will + // silently drop the FDs (closing them if appropriate). Also note that pumping a stream with + // `pumpTo()` always drops all capabilities attached to the pumped data. (TODO(someday): Do we + // want a version of pumpTo() that preserves capabilities?) // // On Unix, KJ provides an implementation based on Unix domain sockets and file descriptor // passing via SCM_RIGHTS. Due to the nature of SCM_RIGHTS, if the application accidentally @@ -153,23 +179,65 @@ class AsyncCapabilityStream: public AsyncIoStream { // and the capability will be discarded. Of course, an application should not depend on this // behavior; it should avoid read()ing through a capability. // - // KJ does not provide any implementation of this type on Windows, as there's no obvious - // implementation there. Handle passing on Windows requires at least one of the processes + // KJ does not provide any inter-process implementation of this type on Windows, as there's no + // obvious implementation there. Handle passing on Windows requires at least one of the processes // involved to have permission to modify the other's handle table, which is effectively full // control. Handle passing between mutually non-trusting processes would require a trusted // broker process to facilitate. One could possibly implement this type in terms of such a // broker, or in terms of direct handle passing if at least one process trusts the other. public: + virtual Promise writeWithFds(ArrayPtr data, + ArrayPtr> moreData, + ArrayPtr fds) = 0; + Promise writeWithFds(ArrayPtr data, + ArrayPtr> moreData, + ArrayPtr fds); + // Write some data to the stream with some file descriptors attached to it. + // + // The maximum number of FDs that can be sent at a time is usually subject to an OS-imposed + // limit. On Linux, this is 253. In practice, sending more than a handful of FDs at once is + // probably a bad idea. + + struct ReadResult { + size_t byteCount; + size_t capCount; + }; + + virtual Promise tryReadWithFds(void* buffer, size_t minBytes, size_t maxBytes, + AutoCloseFd* fdBuffer, size_t maxFds) = 0; + // Read data from the stream that may have file descriptors attached. Any attached descriptors + // will be placed in `fdBuffer`. If multiple bundles of FDs are encountered in the course of + // reading the amount of data requested by minBytes/maxBytes, then they will be concatenated. If + // more FDs are received than fit in the buffer, then the excess will be discarded and closed -- + // this behavior, while ugly, is important to defend against denial-of-service attacks that may + // fill up the FD table with garbage. Applications must think carefully about how many FDs they + // really need to receive at once and set a well-defined limit. + + virtual Promise writeWithStreams(ArrayPtr data, + ArrayPtr> moreData, + Array> streams) = 0; + virtual Promise tryReadWithStreams( + void* buffer, size_t minBytes, size_t maxBytes, + Own* streamBuffer, size_t maxStreams) = 0; + // Like above, but passes AsyncCapabilityStream objects. The stream implementations must be from + // the same AsyncIoProvider. + + // --------------------------------------------------------------------------- + // Helpers for sending individual capabilities. + // + // These are equivalent to the above methods with the constraint that only one FD is + // sent/received at a time and the corresponding data is a single zero-valued byte. + Promise> receiveStream(); - virtual Promise>> tryReceiveStream() = 0; - virtual Promise sendStream(Own stream) = 0; - // Transfer a stream. + Promise>> tryReceiveStream(); + Promise sendStream(Own stream); + // Transfer a single stream. Promise receiveFd(); - virtual Promise> tryReceiveFd(); - virtual Promise sendFd(int fd); - // Transfer a raw file descriptor. Default implementation throws UNIMPLEMENTED. + Promise> tryReceiveFd(); + Promise sendFd(int fd); + // Transfer a single raw file descriptor. }; struct OneWayPipe { @@ -203,6 +271,140 @@ struct CapabilityPipe { Own ends[2]; }; +CapabilityPipe newCapabilityPipe(); +// Like newTwoWayPipe() but creates a capability pipe. +// +// The requirement of `writeWithStreams()` that "The stream implementations must be from the same +// AsyncIoProvider." does not apply to this pipe; any kind of AsyncCapabilityStream implementation +// is supported. +// +// This implementation does not know how to convert streams to FDs or vice versa; if you write FDs +// you must read FDs, and if you write streams you must read streams. + +struct Tee { + // Two AsyncInputStreams which each read the same data from some wrapped inner AsyncInputStream. + + Own branches[2]; +}; + +Tee newTee(Own input, uint64_t limit = kj::maxValue); +// Constructs a Tee that operates in-process. The tee buffers data if any read or pump operations is +// called on one of the two input ends. If a read or pump operation is subsequently called on the +// other input end, the buffered data is consumed. +// +// `pumpTo()` operations on the input ends will proactively read from the inner stream and block +// while writing to the output stream. While one branch has an active `pumpTo()` operation, any +// `tryRead()` operation on the other branch will not be allowed to read faster than allowed by the +// pump's backpressure. (In other words, it will never cause buffering on the pump.) Similarly, if +// there are `pumpTo()` operations active on both branches, the greater of the two backpressures is +// respected -- the two pumps progress in lockstep, and there is no buffering. +// +// At no point will a branch's buffer be allowed to grow beyond `limit` bytes. If the buffer would +// grow beyond the limit, an exception is generated, which both branches see once they have +// exhausted their buffers. +// +// It is recommended that you use a more conservative value for `limit` than the default. + +Own newPromisedStream(Promise> promise); +Own newPromisedStream(Promise> promise); +// Constructs an Async*Stream which waits for a promise to resolve, then forwards all calls to the +// promised stream. + +// ======================================================================================= +// Authenticated streams + +class PeerIdentity { + // PeerIdentity provides information about a connecting client. Various subclasses exist to + // address different network types. +public: + virtual kj::String toString() = 0; + // Returns a human-readable string identifying the peer. Where possible, this string will be + // in the same format as the addresses you could pass to `kj::Network::parseAddress()`. However, + // only certain subclasses of `PeerIdentity` guarantee this property. +}; + +struct AuthenticatedStream { + // A pair of an `AsyncIoStream` and a `PeerIdentity`. This is used as the return type of + // `NetworkAddress::connectAuthenticated()` and `ConnectionReceiver::acceptAuthenticated()`. + + Own stream; + // The byte stream. + + Own peerIdentity; + // An object indicating who is at the other end of the stream. + // + // Different subclasses of `PeerIdentity` are used in different situations: + // - TCP connections will use NetworkPeerIdentity, which gives the network address of the client. + // - Local (unix) socket connections will use LocalPeerIdentity, which identifies the UID + // and PID of the process that initiated the connection. + // - TLS connections will use TlsPeerIdentity which provides details of the client certificate, + // if any was provided. + // - When no meaningful peer identity can be provided, `UnknownPeerIdentity` is returned. + // + // Implementations of `Network`, `ConnectionReceiver`, `NetworkAddress`, etc. should document the + // specific assumptions the caller can make about the type of `PeerIdentity`s used, allowing for + // identities to be statically downcast if the right conditions are met. In the absence of + // documented promises, RTTI may be needed to query the type. +}; + +class NetworkPeerIdentity: public PeerIdentity { + // PeerIdentity used for network protocols like TCP/IP. This identifies the remote peer. + // + // This is only "authenticated" to the extent that we know data written to the stream will be + // routed to the given address. This does not preclude the possibility of man-in-the-middle + // attacks by attackers who are able to manipulate traffic along the route. +public: + virtual NetworkAddress& getAddress() = 0; + // Obtain the peer's address as a NetworkAddress object. The returned reference's lifetime is the + // same as the `NetworkPeerIdentity`, but you can always call `clone()` on it to get a copy that + // lives longer. + + static kj::Own newInstance(kj::Own addr); + // Construct an instance of this interface wrapping the given address. +}; + +class LocalPeerIdentity: public PeerIdentity { + // PeerIdentity used for connections between processes on the local machine -- in particular, + // Unix sockets. + // + // (This interface probably isn't useful on Windows.) +public: + struct Credentials { + kj::Maybe pid; + kj::Maybe uid; + + // We don't cover groups at present because some systems produce a list of groups while others + // only provide the peer's main group, the latter being pretty useless. + }; + + virtual Credentials getCredentials() = 0; + // Get the PID and UID of the peer process, if possible. + // + // Either ID may be null if the peer could not be identified. Some operating systems do not + // support retrieving these credentials, or can only provide one or the other. Some situations + // (like user and PID namespaces on Linux) may also make it impossible to represent the peer's + // credentials accurately. + // + // Note the meaning here can be subtle. Multiple processes can potentially have the socket in + // their file descriptor tables. The identified process is the one who called `connect()` or + // `listen()`. + // + // On Linux this is implemented with SO_PEERCRED. + + static kj::Own newInstance(Credentials creds); + // Construct an instance of this interface wrapping the given credentials. +}; + +class UnknownPeerIdentity: public PeerIdentity { +public: + static kj::Own newInstance(); + // Get an instance of this interface. This actually always returns the same instance with no + // memory allocation. +}; + +// ======================================================================================= +// Accepting connections + class ConnectionReceiver { // Represents a server socket listening on a port. @@ -210,6 +412,13 @@ class ConnectionReceiver { virtual Promise> accept() = 0; // Accept the next incoming connection. + virtual Promise acceptAuthenticated(); + // Accept the next incoming connection, and also provide a PeerIdentity with any information + // about the client. + // + // For backwards-compatibility, the default implementation of this method calls `accept()` and + // then adds `UnknownPeerIdentity`. + virtual uint getPort() = 0; // Gets the port number, if applicable (i.e. if listening on IP). This is useful if you didn't // specify a port when constructing the NetworkAddress -- one will have been assigned @@ -217,6 +426,7 @@ class ConnectionReceiver { virtual void getsockopt(int level, int option, void* value, uint* length); virtual void setsockopt(int level, int option, const void* value, uint length); + virtual void getsockname(struct sockaddr* addr, uint* length); // Same as the methods of AsyncIoStream. }; @@ -238,14 +448,14 @@ class AncillaryMessage { // Protocol-specific message type. template - inline Maybe as(); + inline Maybe as() const; // Interpret the ancillary message as the given struct type. Most ancillary messages are some // sort of struct, so this is a convenient way to access it. Returns nullptr if the message // is smaller than the struct -- this can happen if the message was truncated due to // insufficient ancillary buffer space. template - inline ArrayPtr asArray(); + inline ArrayPtr asArray() const; // Interpret the ancillary message as an array of items. If the message size does not evenly // divide into elements of type T, the remainder is discarded -- this can happen if the message // was truncated due to insufficient ancillary buffer space. @@ -280,7 +490,7 @@ class DatagramReceiver { // Get the content of the datagram. virtual MaybeTruncated> getAncillary() = 0; - // Ancilarry messages received with the datagram. See the recvmsg() system call and the cmsghdr + // Ancillary messages received with the datagram. See the recvmsg() system call and the cmsghdr // struct. Most apps don't need this. // // If the returned value is truncated, then the last message in the array may itself be @@ -335,6 +545,14 @@ class NetworkAddress { // // The address must not be a wildcard ("*"). If it is an IP address, it must have a port number. + virtual Promise connectAuthenticated(); + // Connect to the address and return both the connection and information about the peer identity. + // This is especially useful when using TLS, to get certificate details. + // + // For backwards-compatibility, the default implementation of this method calls `connect()` and + // then uses a `NetworkPeerIdentity` wrapping a clone of this `NetworkAddress` -- which is not + // particularly useful. + virtual Own listen() = 0; // Listen for incoming connections on this address. // @@ -711,6 +929,11 @@ class CapabilityStreamConnectionReceiver final: public ConnectionReceiver { Promise> accept() override; uint getPort() override; + Promise acceptAuthenticated() override; + // Always produces UnknownIdentity. Capability-based security patterns should not rely on + // authenticating peers; the other end of the capability stream should only be given to + // authorized parties in the first place. + private: AsyncCapabilityStream& inner; }; @@ -719,15 +942,18 @@ class CapabilityStreamNetworkAddress final: public NetworkAddress { // Trivial wrapper which allows an AsyncCapabilityStream to act as a NetworkAddress. // // connect() is implemented by calling provider.newCapabilityPipe(), sending one end over the - // original capability stream, and returning the other end. + // original capability stream, and returning the other end. If `provider` is null, then the + // global kj::newCapabilityPipe() will be used, but this ONLY works if `inner` itself is agnostic + // to the type of streams it receives, e.g. because it was also created using + // kj::NewCapabilityPipe(). // // listen().accept() is implemented by receiving new streams over the original stream. // - // Note that clone() dosen't work (due to ownership issues) and toString() returns a static + // Note that clone() doesn't work (due to ownership issues) and toString() returns a static // string. public: - CapabilityStreamNetworkAddress(AsyncIoProvider& provider, AsyncCapabilityStream& inner) + CapabilityStreamNetworkAddress(kj::Maybe provider, AsyncCapabilityStream& inner) : provider(provider), inner(inner) {} Promise> connect() override; @@ -736,8 +962,13 @@ class CapabilityStreamNetworkAddress final: public NetworkAddress { Own clone() override; String toString() override; + Promise connectAuthenticated() override; + // Always produces UnknownIdentity. Capability-based security patterns should not rely on + // authenticating peers; the other end of the capability stream should only be given to + // authorized parties in the first place. + private: - AsyncIoProvider& provider; + kj::Maybe provider; AsyncCapabilityStream& inner; }; @@ -752,7 +983,7 @@ inline int AncillaryMessage::getLevel() const { return level; } inline int AncillaryMessage::getType() const { return type; } template -inline Maybe AncillaryMessage::as() { +inline Maybe AncillaryMessage::as() const { if (data.size() >= sizeof(T)) { return *reinterpret_cast(data.begin()); } else { @@ -761,8 +992,10 @@ inline Maybe AncillaryMessage::as() { } template -inline ArrayPtr AncillaryMessage::asArray() { +inline ArrayPtr AncillaryMessage::asArray() const { return arrayPtr(reinterpret_cast(data.begin()), data.size() / sizeof(T)); } } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-prelude.h b/libs/EXTERNAL/capnproto/c++/src/kj/async-prelude.h index 6d1751c9edd..c3fd4b19455 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async-prelude.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-prelude.h @@ -24,13 +24,11 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "exception.h" #include "tuple.h" +KJ_BEGIN_HEADER + namespace kj { class EventLoop; @@ -68,6 +66,12 @@ using ReducePromises = decltype(reducePromiseType((T*)nullptr, false)); // reduces Promise to something else. In particular this allows Promise> // to reduce to capnp::RemotePromise. +template struct UnwrapPromise_; +template struct UnwrapPromise_> { typedef T Type; }; + +template +using UnwrapPromise = typename UnwrapPromise_::Type; + class PropagateException { // A functor which accepts a kj::Exception as a parameter and returns a broken promise of // arbitrary type which simply propagates the exception. @@ -184,8 +188,12 @@ class PromiseNode; class ChainPromiseNode; template class ForkHub; +class FiberStack; +class FiberBase; class Event; +class XThreadEvent; +class XThreadPaf; class PromiseBase { public: @@ -198,31 +206,27 @@ class PromiseBase { PromiseBase() = default; PromiseBase(Own&& node): node(kj::mv(node)) {} - friend class kj::EventLoop; - friend class ChainPromiseNode; template friend class kj::Promise; - friend class kj::TaskSet; - template - friend Promise> kj::joinPromises(Array>&& promises); - friend Promise kj::joinPromises(Array>&& promises); + friend class PromiseNode; }; void detach(kj::Promise&& promise); void waitImpl(Own<_::PromiseNode>&& node, _::ExceptionOrValue& result, WaitScope& waitScope); bool pollImpl(_::PromiseNode& node, WaitScope& waitScope); Promise yield(); +Promise yieldHarder(); Own neverDone(); class NeverDone { public: template - operator Promise() const { - return Promise(false, neverDone()); - } + operator Promise() const; KJ_NORETURN(void wait(WaitScope& waitScope) const); }; } // namespace _ (private) } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-queue-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-queue-test.c++ new file mode 100644 index 00000000000..c82aadd6465 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-queue-test.c++ @@ -0,0 +1,151 @@ +// Copyright (c) 2021 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "async-queue.h" + +#include +#include +#include + +namespace kj { +namespace { + +struct QueueTest { + kj::AsyncIoContext io = setupAsyncIo(); + ProducerConsumerQueue queue; + + QueueTest() = default; + QueueTest(QueueTest&&) = delete; + QueueTest(const QueueTest&) = delete; + QueueTest& operator=(QueueTest&&) = delete; + QueueTest& operator=(const QueueTest&) = delete; + + struct Producer { + QueueTest& test; + Promise promise = kj::READY_NOW; + + Producer(QueueTest& test): test(test) {} + + void push(size_t i) { + auto push = [&, i]() -> Promise { + test.queue.push(i); + return kj::READY_NOW; + }; + promise = promise.then(kj::mv(push)); + } + }; + + struct Consumer { + QueueTest& test; + Promise promise = kj::READY_NOW; + + Consumer(QueueTest& test): test(test) {} + + void pop(Vector& bits) { + auto pop = [&]() { + return test.queue.pop(); + }; + auto checkPop = [&](size_t j) -> Promise { + bits[j] = true; + return kj::READY_NOW; + }; + promise = promise.then(kj::mv(pop)).then(kj::mv(checkPop)); + } + }; +}; + +KJ_TEST("ProducerConsumerQueue with various amounts of producers and consumers") { + QueueTest test; + + size_t constexpr kItemCount = 1000; + for (auto producerCount: { 1, 5, 10 }) { + for (auto consumerCount: { 1, 5, 10 }) { + KJ_LOG(INFO, "Testing a new set of Producers and Consumers", // + producerCount, consumerCount, kItemCount); + // Make a vector to track our entries. + auto bits = Vector(kItemCount); + for (size_t i = 0; i < kItemCount; ++i) { + bits.add(false); + } + + // Make enough producers. + auto producers = Vector(); + for (size_t i = 0; i < producerCount; ++i) { + producers.add(test); + } + + // Make enough consumers. + auto consumers = Vector(); + for (size_t i = 0; i < consumerCount; ++i) { + consumers.add(test); + } + + for (size_t i = 0; i < kItemCount; ++i) { + // Use a producer and a consumer for each entry. + + auto& producer = producers[i % producerCount]; + producer.push(i); + + auto& consumer = consumers[i % consumerCount]; + consumer.pop(bits); + } + + // Confirm that all entries are produced and consumed. + auto promises = Vector>(); + for (auto& producer: producers) { + promises.add(kj::mv(producer.promise)); + } + for (auto& consumer: consumers) { + promises.add(kj::mv(consumer.promise)); + } + joinPromises(promises.releaseAsArray()).wait(test.io.waitScope); + for (auto i = 0; i < kItemCount; ++i) { + KJ_ASSERT(bits[i], i); + } + } + } +} + +KJ_TEST("ProducerConsumerQueue with rejectAll()") { + QueueTest test; + + for (auto consumerCount: { 1, 5, 10 }) { + KJ_LOG(INFO, "Testing a new set of consumers with rejection", consumerCount); + + // Make enough consumers. + auto promises = Vector>(); + for (size_t i = 0; i < consumerCount; ++i) { + promises.add(test.queue.pop().ignoreResult()); + } + + for (auto& promise: promises) { + KJ_EXPECT(!promise.poll(test.io.waitScope), "All of our consumers should be waiting"); + } + test.queue.rejectAll(KJ_EXCEPTION(FAILED, "Total rejection")); + + // We should have finished and swallowed the errors. + auto promise = joinPromises(promises.releaseAsArray()); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("Total rejection", promise.wait(test.io.waitScope)); + } +} + +} // namespace +} // namespace kj \ No newline at end of file diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-queue.h b/libs/EXTERNAL/capnproto/c++/src/kj/async-queue.h new file mode 100644 index 00000000000..8e6f84c11ae --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-queue.h @@ -0,0 +1,156 @@ +// Copyright (c) 2021 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#pragma once + +#include "async.h" +#include "common.h" +#include "debug.h" +#include "list.h" +#include "memory.h" + +#include + +KJ_BEGIN_HEADER + +namespace kj { + +template +class WaiterQueue { +public: + // A WaiterQueue creates Nodes that blend newAdaptedPromise and List. + + WaiterQueue() = default; + KJ_DISALLOW_COPY(WaiterQueue); + + Promise wait() { + return newAdaptedPromise(queue); + } + + void fulfill(T&& value) { + KJ_IREQUIRE(!empty()); + + auto& node = static_cast(queue.front()); + node.fulfiller.fulfill(kj::mv(value)); + node.remove(); + } + + void reject(Exception&& exception) { + KJ_IREQUIRE(!empty()); + + auto& node = static_cast(queue.front()); + node.fulfiller.reject(kj::mv(exception)); + node.remove(); + } + + bool empty() const { + return queue.empty(); + } + +private: + struct BaseNode { + // This is a separate structure because List requires a predefined memory layout but + // newAdaptedPromise() only provides access to the Adaptor type in the ctor. + + BaseNode(PromiseFulfiller& fulfiller): fulfiller(fulfiller) {} + + PromiseFulfiller& fulfiller; + ListLink link; + }; + + using Queue = List; + + struct Node: public BaseNode { + Node(PromiseFulfiller& fulfiller, Queue& queue): BaseNode(fulfiller), queue(queue) { + queue.add(*this); + } + + ~Node() noexcept(false) { + // When the associated Promise is destructed, so is the Node thus we should leave the queue. + remove(); + } + + void remove() { + if(BaseNode::link.isLinked()){ + queue.remove(*this); + } + } + + Queue& queue; + }; + + Queue queue; +}; + +template +class ProducerConsumerQueue { + // ProducerConsumerQueue is an async FIFO queue. + +public: + void push(T v) { + // Push an existing value onto the queue. + + if (!waiters.empty()) { + // We have at least one waiter, give the value to the oldest. + KJ_IASSERT(values.empty()); + + // Fulfill the first waiter and return without store our value. + waiters.fulfill(kj::mv(v)); + } else { + // We don't have any waiters, store the value. + values.push_front(kj::mv(v)); + } + } + + void rejectAll(Exception e) { + // Reject all waiters with a given exception. + + while (!waiters.empty()) { + auto newE = Exception(e); + waiters.reject(kj::mv(newE)); + } + } + + Promise pop() { + // Eventually pop a value from the queue. + // Note that if your sinks lag your sources, the promise will always be ready. + + if (!values.empty()) { + // We have at least one value, get the oldest. + KJ_IASSERT(waiters.empty()); + + auto value = kj::mv(values.back()); + values.pop_back(); + return kj::mv(value); + } else { + // We don't have any values, add ourselves to the waiting queue. + return waiters.wait(); + } + } + +private: + std::list values; + WaiterQueue waiters; +}; + +} // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-test.c++ index f2bbb651737..fe11665de77 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-test.c++ @@ -22,11 +22,17 @@ #include "async.h" #include "debug.h" #include +#include "mutex.h" +#include "thread.h" + +#if !KJ_USE_FIBERS +#include +#endif namespace kj { namespace { -#if !_MSC_VER +#if !_MSC_VER || defined(__clang__) // TODO(msvc): GetFunctorStartAddress is not supported on MSVC currently, so skip the test. TEST(Async, GetFunctorStartAddress) { EXPECT_TRUE(nullptr != _::GetFunctorStartAddress<>::apply([](){return 0;})); @@ -364,12 +370,30 @@ TEST(Async, SeparateFulfillerDiscarded) { EventLoop loop; WaitScope waitScope(loop); - auto pair = newPromiseAndFulfiller(); + auto pair = newPromiseAndFulfiller(); pair.fulfiller = nullptr; - EXPECT_ANY_THROW(pair.promise.wait(waitScope)); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "PromiseFulfiller was destroyed without fulfilling the promise", + pair.promise.wait(waitScope)); } +#if !KJ_NO_EXCEPTIONS +TEST(Async, SeparateFulfillerDiscardedDuringUnwind) { + EventLoop loop; + WaitScope waitScope(loop); + + auto pair = newPromiseAndFulfiller(); + kj::runCatchingExceptions([&]() { + auto fulfillerToDrop = kj::mv(pair.fulfiller); + kj::throwFatalException(KJ_EXCEPTION(FAILED, "test exception")); + }); + + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "test exception", pair.promise.wait(waitScope)); +} +#endif + TEST(Async, SeparateFulfillerMemoryLeak) { auto paf = kj::newPromiseAndFulfiller(); paf.fulfiller->fulfill(); @@ -379,57 +403,75 @@ TEST(Async, Ordering) { EventLoop loop; WaitScope waitScope(loop); + class ErrorHandlerImpl: public TaskSet::ErrorHandler { + public: + void taskFailed(kj::Exception&& exception) override { + KJ_FAIL_EXPECT(exception); + } + }; + int counter = 0; - Promise promises[6] = {nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}; + ErrorHandlerImpl errorHandler; + kj::TaskSet tasks(errorHandler); - promises[1] = evalLater([&]() { + tasks.add(evalLater([&]() { EXPECT_EQ(0, counter++); { // Use a promise and fulfiller so that we can fulfill the promise after waiting on it in // order to induce depth-first scheduling. auto paf = kj::newPromiseAndFulfiller(); - promises[2] = paf.promise.then([&]() { + tasks.add(paf.promise.then([&]() { EXPECT_EQ(1, counter++); - }).eagerlyEvaluate(nullptr); + })); paf.fulfiller->fulfill(); } // .then() is scheduled breadth-first if the promise has already resolved, but depth-first // if the promise resolves later. - promises[3] = Promise(READY_NOW).then([&]() { + tasks.add(Promise(READY_NOW).then([&]() { EXPECT_EQ(4, counter++); }).then([&]() { EXPECT_EQ(5, counter++); - }).eagerlyEvaluate(nullptr); + tasks.add(kj::evalLast([&]() { + EXPECT_EQ(7, counter++); + tasks.add(kj::evalLater([&]() { + EXPECT_EQ(8, counter++); + })); + })); + })); { auto paf = kj::newPromiseAndFulfiller(); - promises[4] = paf.promise.then([&]() { + tasks.add(paf.promise.then([&]() { EXPECT_EQ(2, counter++); - }).eagerlyEvaluate(nullptr); + tasks.add(kj::evalLast([&]() { + EXPECT_EQ(9, counter++); + tasks.add(kj::evalLater([&]() { + EXPECT_EQ(10, counter++); + })); + })); + })); paf.fulfiller->fulfill(); } // evalLater() is like READY_NOW.then(). - promises[5] = evalLater([&]() { + tasks.add(evalLater([&]() { EXPECT_EQ(6, counter++); - }).eagerlyEvaluate(nullptr); - }).eagerlyEvaluate(nullptr); + })); + })); - promises[0] = evalLater([&]() { + tasks.add(evalLater([&]() { EXPECT_EQ(3, counter++); - // Making this a chain should NOT cause it to preempt promises[1]. (This was a problem at one - // point.) + // Making this a chain should NOT cause it to preempt the first promise. (This was a problem + // at one point.) return Promise(READY_NOW); - }).eagerlyEvaluate(nullptr); + })); - for (auto i: indices(promises)) { - kj::mv(promises[i]).wait(waitScope); - } + tasks.onEmpty().wait(waitScope); - EXPECT_EQ(7, counter); + EXPECT_EQ(11, counter); } TEST(Async, Fork) { @@ -440,14 +482,28 @@ TEST(Async, Fork) { auto fork = promise.fork(); +#if __GNUC__ && !__clang__ && __GNUC__ >= 7 +// GCC 7 decides the open-brace below is "misleadingly indented" as if it were guarded by the `for` +// that appears in the implementation of KJ_REQUIRE(). Shut up shut up shut up. +#pragma GCC diagnostic ignored "-Wmisleading-indentation" +#endif + KJ_ASSERT(!fork.hasBranches()); + { + auto cancelBranch = fork.addBranch(); + KJ_ASSERT(fork.hasBranches()); + } + KJ_ASSERT(!fork.hasBranches()); + auto branch1 = fork.addBranch().then([](int i) { EXPECT_EQ(123, i); return 456; }); + KJ_ASSERT(fork.hasBranches()); auto branch2 = fork.addBranch().then([](int i) { EXPECT_EQ(123, i); return 789; }); + KJ_ASSERT(fork.hasBranches()); { auto releaseFork = kj::mv(fork); @@ -490,6 +546,34 @@ TEST(Async, ForkRef) { EXPECT_EQ(789, branch2.wait(waitScope)); } +TEST(Async, ForkMaybeRef) { + EventLoop loop; + WaitScope waitScope(loop); + + Promise>> promise = evalLater([&]() { + return Maybe>(refcounted(123)); + }); + + auto fork = promise.fork(); + + auto branch1 = fork.addBranch().then([](Maybe>&& i) { + EXPECT_EQ(123, KJ_REQUIRE_NONNULL(i)->i); + return 456; + }); + auto branch2 = fork.addBranch().then([](Maybe>&& i) { + EXPECT_EQ(123, KJ_REQUIRE_NONNULL(i)->i); + return 789; + }); + + { + auto releaseFork = kj::mv(fork); + } + + EXPECT_EQ(456, branch1.wait(waitScope)); + EXPECT_EQ(789, branch2.wait(waitScope)); +} + + TEST(Async, Split) { EventLoop loop; WaitScope waitScope(loop); @@ -603,6 +687,16 @@ TEST(Async, Canceler) { KJ_EXPECT(nowI.wait(waitScope) == 123u); } +TEST(Async, CancelerDoubleWrap) { + EventLoop loop; + WaitScope waitScope(loop); + + // This used to crash. + Canceler canceler; + auto promise = canceler.wrap(canceler.wrap(kj::Promise(kj::NEVER_DONE))); + canceler.cancel("whoops"); +} + class ErrorHandlerImpl: public TaskSet::ErrorHandler { public: uint exceptionCount = 0; @@ -643,6 +737,75 @@ TEST(Async, TaskSet) { EXPECT_EQ(1u, errorHandler.exceptionCount); } +TEST(Async, LargeTaskSetDestruction) { + static constexpr size_t stackSize = 200 * 1024; + + static auto testBody = [] { + + ErrorHandlerImpl errorHandler; + TaskSet tasks(errorHandler); + + for (int i = 0; i < stackSize / sizeof(void*); i++) { + tasks.add(kj::NEVER_DONE); + } + }; + +#if KJ_USE_FIBERS + EventLoop loop; + WaitScope waitScope(loop); + + startFiber(stackSize, + [](WaitScope&) mutable { + testBody(); + }).wait(waitScope); + +#else + pthread_attr_t attr; + KJ_REQUIRE(0 == pthread_attr_init(&attr)); + KJ_DEFER(KJ_REQUIRE(0 == pthread_attr_destroy(&attr))); + + KJ_REQUIRE(0 == pthread_attr_setstacksize(&attr, stackSize)); + pthread_t thread; + KJ_REQUIRE(0 == pthread_create(&thread, &attr, [](void*) -> void* { + EventLoop loop; + WaitScope waitScope(loop); + testBody(); + return nullptr; + }, nullptr)); + KJ_REQUIRE(0 == pthread_join(thread, nullptr)); +#endif +} + +TEST(Async, TaskSet) { + EventLoop loop; + WaitScope waitScope(loop); + + bool destroyed = false; + + { + ErrorHandlerImpl errorHandler; + TaskSet tasks(errorHandler); + + tasks.add(kj::Promise(kj::NEVER_DONE) + .attach(kj::defer([&]() { + // During cancellation, append another task! + // It had better be canceled too! + tasks.add(kj::Promise(kj::READY_NOW) + .then([]() { KJ_FAIL_EXPECT("shouldn't get here"); }, + [](auto) { KJ_FAIL_EXPECT("shouldn't get here"); }) + .attach(kj::defer([&]() { + destroyed = true; + }))); + }))); + } + + KJ_EXPECT(destroyed); + + // Give a chance for the "shouldn't get here" asserts to execute, if the event is still running, + // which it shouldn't be. + waitScope.poll(); +} + TEST(Async, TaskSetOnEmpty) { EventLoop loop; WaitScope waitScope(loop); @@ -809,5 +972,462 @@ TEST(Async, Poll) { paf.promise.wait(waitScope); } +KJ_TEST("exclusiveJoin both events complete simultaneously") { + // Previously, if both branches of an exclusiveJoin() completed simultaneously, then the parent + // event could be armed twice. This is an error, but the exact results of this error depend on + // the parent PromiseNode type. One case where it matters is ArrayJoinPromiseNode, which counts + // events and decides it is done when it has received exactly the number of events expected. + + EventLoop loop; + WaitScope waitScope(loop); + + auto builder = kj::heapArrayBuilder>(2); + builder.add(kj::Promise(123).exclusiveJoin(kj::Promise(456))); + builder.add(kj::NEVER_DONE); + auto joined = kj::joinPromises(builder.finish()); + + KJ_EXPECT(!joined.poll(waitScope)); +} + +#if KJ_USE_FIBERS +KJ_TEST("start a fiber") { + EventLoop loop; + WaitScope waitScope(loop); + + auto paf = newPromiseAndFulfiller(); + + Promise fiber = startFiber(65536, + [promise = kj::mv(paf.promise)](WaitScope& fiberScope) mutable { + int i = promise.wait(fiberScope); + KJ_EXPECT(i == 123); + return "foo"_kj; + }); + + KJ_EXPECT(!fiber.poll(waitScope)); + + paf.fulfiller->fulfill(123); + + KJ_ASSERT(fiber.poll(waitScope)); + KJ_EXPECT(fiber.wait(waitScope) == "foo"); +} + +KJ_TEST("fiber promise chaining") { + EventLoop loop; + WaitScope waitScope(loop); + + auto paf = newPromiseAndFulfiller(); + bool ran = false; + + Promise fiber = startFiber(65536, + [promise = kj::mv(paf.promise), &ran](WaitScope& fiberScope) mutable { + ran = true; + return kj::mv(promise); + }); + + KJ_EXPECT(!ran); + KJ_EXPECT(!fiber.poll(waitScope)); + KJ_EXPECT(ran); + + paf.fulfiller->fulfill(123); + + KJ_ASSERT(fiber.poll(waitScope)); + KJ_EXPECT(fiber.wait(waitScope) == 123); +} + +KJ_TEST("throw from a fiber") { + EventLoop loop; + WaitScope waitScope(loop); + + auto paf = newPromiseAndFulfiller(); + + Promise fiber = startFiber(65536, + [promise = kj::mv(paf.promise)](WaitScope& fiberScope) mutable { + promise.wait(fiberScope); + KJ_FAIL_EXPECT("wait() should have thrown"); + }); + + KJ_EXPECT(!fiber.poll(waitScope)); + + paf.fulfiller->reject(KJ_EXCEPTION(FAILED, "test exception")); + + KJ_ASSERT(fiber.poll(waitScope)); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("test exception", fiber.wait(waitScope)); +} + +#if !__MINGW32__ || __MINGW64__ +// This test fails on MinGW 32-bit builds due to a compiler bug with exceptions + fibers: +// https://sourceforge.net/p/mingw-w64/bugs/835/ +KJ_TEST("cancel a fiber") { + EventLoop loop; + WaitScope waitScope(loop); + + // When exceptions are disabled we can't wait() on a non-void promise that throws. + auto paf = newPromiseAndFulfiller(); + + bool exited = false; + bool canceled = false; + + { + Promise fiber = startFiber(65536, + [promise = kj::mv(paf.promise), &exited, &canceled](WaitScope& fiberScope) mutable { + KJ_DEFER(exited = true); + try { + promise.wait(fiberScope); + } catch (kj::CanceledException) { + canceled = true; + throw; + } + return "foo"_kj; + }); + + KJ_EXPECT(!fiber.poll(waitScope)); + KJ_EXPECT(!exited); + KJ_EXPECT(!canceled); + } + + KJ_EXPECT(exited); + KJ_EXPECT(canceled); +} +#endif + +KJ_TEST("fiber pool") { + EventLoop loop; + WaitScope waitScope(loop); + FiberPool pool(65536); + + int* i1_local = nullptr; + int* i2_local = nullptr; + + auto run = [&]() mutable { + auto paf1 = newPromiseAndFulfiller(); + auto paf2 = newPromiseAndFulfiller(); + + { + Promise fiber1 = pool.startFiber([&, promise = kj::mv(paf1.promise)](WaitScope& scope) mutable { + int i = promise.wait(scope); + KJ_EXPECT(i == 123); + if (i1_local == nullptr) { + i1_local = &i; + } else { + KJ_ASSERT(i1_local == &i); + } + return i; + }); + { + Promise fiber2 = pool.startFiber([&, promise = kj::mv(paf2.promise)](WaitScope& scope) mutable { + int i = promise.wait(scope); + KJ_EXPECT(i == 456); + if (i2_local == nullptr) { + i2_local = &i; + } else { + KJ_ASSERT(i2_local == &i); + } + return i; + }); + + KJ_EXPECT(!fiber1.poll(waitScope)); + KJ_EXPECT(!fiber2.poll(waitScope)); + + KJ_EXPECT(pool.getFreelistSize() == 0); + + paf2.fulfiller->fulfill(456); + + KJ_EXPECT(!fiber1.poll(waitScope)); + KJ_ASSERT(fiber2.poll(waitScope)); + KJ_EXPECT(fiber2.wait(waitScope) == 456); + + KJ_EXPECT(pool.getFreelistSize() == 1); + } + + paf1.fulfiller->fulfill(123); + + KJ_ASSERT(fiber1.poll(waitScope)); + KJ_EXPECT(fiber1.wait(waitScope) == 123); + + KJ_EXPECT(pool.getFreelistSize() == 2); + } + }; + run(); + KJ_ASSERT_NONNULL(i1_local); + KJ_ASSERT_NONNULL(i2_local); + // run the same thing and reuse the fibers + run(); +} + +bool onOurStack(char* p) { + // If p points less than 64k away from a random stack variable, then it must be on the same + // stack, since we never allocate stacks smaller than 64k. + char c; + ptrdiff_t diff = p - &c; + return diff < 65536 && diff > -65536; +} + +KJ_TEST("fiber pool runSynchronously()") { + FiberPool pool(65536); + + { + char c; + KJ_EXPECT(onOurStack(&c)); // sanity check... + } + + char* ptr1 = nullptr; + char* ptr2 = nullptr; + + pool.runSynchronously([&]() { + char c; + ptr1 = &c; + }); + KJ_ASSERT(ptr1 != nullptr); + + pool.runSynchronously([&]() { + char c; + ptr2 = &c; + }); + KJ_ASSERT(ptr2 != nullptr); + + // Should have used the same stack both times, so local var would be in the same place. + KJ_EXPECT(ptr1 == ptr2); + + // Should have been on a different stack from the main stack. + KJ_EXPECT(!onOurStack(ptr1)); + + KJ_EXPECT_THROW_MESSAGE("test exception", + pool.runSynchronously([&]() { KJ_FAIL_ASSERT("test exception"); })); +} + +KJ_TEST("fiber pool limit") { + FiberPool pool(65536); + + pool.setMaxFreelist(1); + + kj::MutexGuarded state; + + char* ptr1; + char* ptr2; + + // Run some code that uses two stacks in separate threads at the same time. + { + kj::Thread thread([&]() noexcept { + auto lock = state.lockExclusive(); + lock.wait([](uint val) { return val == 1; }); + + pool.runSynchronously([&]() { + char c; + ptr2 = &c; + + *lock = 2; + lock.wait([](uint val) { return val == 3; }); + }); + }); + + ([&]() noexcept { + auto lock = state.lockExclusive(); + + pool.runSynchronously([&]() { + char c; + ptr1 = &c; + + *lock = 1; + lock.wait([](uint val) { return val == 2; }); + }); + + *lock = 3; + })(); + } + + KJ_EXPECT(pool.getFreelistSize() == 1); + + // We expect that if we reuse a stack from the pool, it will be the last one that exited, which + // is the one from the thread. + pool.runSynchronously([&]() { + KJ_EXPECT(onOurStack(ptr2)); + KJ_EXPECT(!onOurStack(ptr1)); + + KJ_EXPECT(pool.getFreelistSize() == 0); + }); + + KJ_EXPECT(pool.getFreelistSize() == 1); + + // Note that it would NOT work to try to allocate two stacks at the same time again and verify + // that the second stack doesn't match the previously-deleted stack, because there's a high + // likelihood that the new stack would be allocated in the same location. +} + +KJ_TEST("run event loop on freelisted stacks") { + FiberPool pool(65536); + + class MockEventPort: public EventPort { + public: + bool wait() override { + char c; + waitStack = &c; + KJ_IF_MAYBE(f, fulfiller) { + f->get()->fulfill(); + fulfiller = nullptr; + } + return false; + } + bool poll() override { + char c; + pollStack = &c; + KJ_IF_MAYBE(f, fulfiller) { + f->get()->fulfill(); + fulfiller = nullptr; + } + return false; + } + + char* waitStack = nullptr; + char* pollStack = nullptr; + + kj::Maybe>> fulfiller; + }; + + MockEventPort port; + EventLoop loop(port); + WaitScope waitScope(loop); + waitScope.runEventCallbacksOnStackPool(pool); + + { + auto paf = newPromiseAndFulfiller(); + port.fulfiller = kj::mv(paf.fulfiller); + + char* ptr1 = nullptr; + char* ptr2 = nullptr; + kj::evalLater([&]() { + char c; + ptr1 = &c; + return kj::mv(paf.promise); + }).then([&]() { + char c; + ptr2 = &c; + }).wait(waitScope); + + KJ_EXPECT(ptr1 != nullptr); + KJ_EXPECT(ptr2 != nullptr); + KJ_EXPECT(port.waitStack != nullptr); + KJ_EXPECT(port.pollStack == nullptr); + + // The event callbacks should have run on a different stack, but the wait should have been on + // the main stack. + KJ_EXPECT(!onOurStack(ptr1)); + KJ_EXPECT(!onOurStack(ptr2)); + KJ_EXPECT(onOurStack(port.waitStack)); + + pool.runSynchronously([&]() { + // This should run on the same stack where the event callbacks ran. + KJ_EXPECT(onOurStack(ptr1)); + KJ_EXPECT(onOurStack(ptr2)); + KJ_EXPECT(!onOurStack(port.waitStack)); + }); + } + + port.waitStack = nullptr; + port.pollStack = nullptr; + + // Now try poll() instead of wait(). Note that since poll() doesn't block, we let it run on the + // event stack. + { + auto paf = newPromiseAndFulfiller(); + port.fulfiller = kj::mv(paf.fulfiller); + + char* ptr1 = nullptr; + char* ptr2 = nullptr; + auto promise = kj::evalLater([&]() { + char c; + ptr1 = &c; + return kj::mv(paf.promise); + }).then([&]() { + char c; + ptr2 = &c; + }); + + KJ_EXPECT(promise.poll(waitScope)); + + KJ_EXPECT(ptr1 != nullptr); + KJ_EXPECT(ptr2 == nullptr); // didn't run because of lazy continuation evaluation + KJ_EXPECT(port.waitStack == nullptr); + KJ_EXPECT(port.pollStack != nullptr); + + // The event callback should have run on a different stack, and poll() should have run on + // a separate stack too. + KJ_EXPECT(!onOurStack(ptr1)); + KJ_EXPECT(!onOurStack(port.pollStack)); + + pool.runSynchronously([&]() { + // This should run on the same stack where the event callbacks ran. + KJ_EXPECT(onOurStack(ptr1)); + KJ_EXPECT(onOurStack(port.pollStack)); + }); + } +} +#endif + +KJ_TEST("retryOnDisconnect") { + EventLoop loop; + WaitScope waitScope(loop); + + { + uint i = 0; + auto promise = retryOnDisconnect([&]() -> Promise { + i++; + return 123; + }); + KJ_EXPECT(i == 0); + KJ_EXPECT(promise.wait(waitScope) == 123); + KJ_EXPECT(i == 1); + } + + { + uint i = 0; + auto promise = retryOnDisconnect([&]() -> Promise { + if (i++ == 0) { + return KJ_EXCEPTION(DISCONNECTED, "test disconnect"); + } else { + return 123; + } + }); + KJ_EXPECT(i == 0); + KJ_EXPECT(promise.wait(waitScope) == 123); + KJ_EXPECT(i == 2); + } + + + { + uint i = 0; + auto promise = retryOnDisconnect([&]() -> Promise { + if (i++ <= 1) { + return KJ_EXCEPTION(DISCONNECTED, "test disconnect", i); + } else { + return 123; + } + }); + KJ_EXPECT(i == 0); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("test disconnect; i = 2", + promise.ignoreResult().wait(waitScope)); + KJ_EXPECT(i == 2); + } + + { + // Test passing a reference to a function. + struct Func { + uint i = 0; + Promise operator()() { + if (i++ == 0) { + return KJ_EXCEPTION(DISCONNECTED, "test disconnect"); + } else { + return 123; + } + } + }; + Func func; + + auto promise = retryOnDisconnect(func); + KJ_EXPECT(func.i == 0); + KJ_EXPECT(promise.wait(waitScope) == 123); + KJ_EXPECT(func.i == 2); + } +} + } // namespace } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-unix-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-unix-test.c++ index 38d4a3fadb1..c8012e4ecb8 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async-unix-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-unix-test.c++ @@ -37,6 +37,13 @@ #include #include #include +#include "mutex.h" + +#if __BIONIC__ +// Android's Bionic defines SIGRTMIN but using it in sigaddset() throws EINVAL, which means we +// definitely can't actually use RT signals. +#undef SIGRTMIN +#endif namespace kj { namespace { @@ -53,15 +60,19 @@ inline void delay() { usleep(10000); } void captureSignals() { static bool captured = false; if (!captured) { - captured = true; - // We use SIGIO and SIGURG as our test signals because they're two signals that we can be // reasonably confident won't otherwise be delivered to any KJ or Cap'n Proto test. We can't // use SIGUSR1 because it is reserved by UnixEventPort and SIGUSR2 is used by Valgrind on OSX. UnixEventPort::captureSignal(SIGURG); UnixEventPort::captureSignal(SIGIO); +#ifdef SIGRTMIN + UnixEventPort::captureSignal(SIGRTMIN); +#endif + UnixEventPort::captureChildExit(); + + captured = true; } } @@ -99,7 +110,14 @@ TEST(AsyncUnixTest, SignalWithValue) { union sigval value; memset(&value, 0, sizeof(value)); value.sival_int = 123; - sigqueue(getpid(), SIGURG, value); + KJ_SYSCALL_HANDLE_ERRORS(sigqueue(getpid(), SIGURG, value)) { + case ENOSYS: + // sigqueue() not supported. Maybe running on WSL. + KJ_LOG(WARNING, "sigqueue() is not implemented by your system; skipping test"); + return; + default: + KJ_FAIL_SYSCALL("sigqueue(getpid(), SIGURG, value)", error); + } siginfo_t info = port.onSignal(SIGURG).wait(waitScope); EXPECT_EQ(SIGURG, info.si_signo); @@ -127,7 +145,14 @@ TEST(AsyncUnixTest, SignalWithPointerValue) { union sigval value; memset(&value, 0, sizeof(value)); value.sival_ptr = &port; - sigqueue(getpid(), SIGURG, value); + KJ_SYSCALL_HANDLE_ERRORS(sigqueue(getpid(), SIGURG, value)) { + case ENOSYS: + // sigqueue() not supported. Maybe running on WSL. + KJ_LOG(WARNING, "sigqueue() is not implemented by your system; skipping test"); + return; + default: + KJ_FAIL_SYSCALL("sigqueue(getpid(), SIGURG, value)", error); + } siginfo_t info = port.onSignal(SIGURG).wait(waitScope); EXPECT_EQ(SIGURG, info.si_signo); @@ -489,6 +514,14 @@ TEST(AsyncUnixTest, UrgentObserver) { KJ_SYSCALL(getsockname(serverFd, reinterpret_cast(&saddr), &saddrLen)); KJ_SYSCALL(listen(serverFd, 1)); + // Create a pipe that we'll use to signal if MSG_OOB return EINVAL. + int failpipe[2]; + KJ_SYSCALL(pipe(failpipe)); + KJ_DEFER({ + close(failpipe[0]); + close(failpipe[1]); + }); + // Accept one connection, send in-band and OOB byte, wait for a quit message Thread thread([&]() { int tmpFd; @@ -504,7 +537,14 @@ TEST(AsyncUnixTest, UrgentObserver) { c = 'i'; KJ_SYSCALL(send(clientFd, &c, 1, 0)); c = 'o'; - KJ_SYSCALL(send(clientFd, &c, 1, MSG_OOB)); + KJ_SYSCALL_HANDLE_ERRORS(send(clientFd, &c, 1, MSG_OOB)) { + case EINVAL: + // Looks like MSG_OOB is not supported. (This is the case e.g. on WSL.) + KJ_SYSCALL(write(failpipe[1], &c, 1)); + break; + default: + KJ_FAIL_SYSCALL("send(..., MSG_OOB)", error); + } KJ_SYSCALL(recv(clientFd, &c, 1, 0)); EXPECT_EQ('q', c); @@ -517,24 +557,32 @@ TEST(AsyncUnixTest, UrgentObserver) { UnixEventPort::FdObserver observer(port, clientFd, UnixEventPort::FdObserver::OBSERVE_READ | UnixEventPort::FdObserver::OBSERVE_URGENT); + UnixEventPort::FdObserver failObserver(port, failpipe[0], + UnixEventPort::FdObserver::OBSERVE_READ | UnixEventPort::FdObserver::OBSERVE_URGENT); - observer.whenUrgentDataAvailable().wait(waitScope); + auto promise = observer.whenUrgentDataAvailable().then([]() { return true; }); + auto failPromise = failObserver.whenBecomesReadable().then([]() { return false; }); + bool oobSupported = promise.exclusiveJoin(kj::mv(failPromise)).wait(waitScope); + if (oobSupported) { #if __CYGWIN__ - // On Cygwin, reading the urgent byte first causes the subsequent regular read to block until - // such a time as the connection closes -- and then the byte is successfully returned. This - // seems to be a cygwin bug. - KJ_SYSCALL(recv(clientFd, &c, 1, 0)); - EXPECT_EQ('i', c); - KJ_SYSCALL(recv(clientFd, &c, 1, MSG_OOB)); - EXPECT_EQ('o', c); + // On Cygwin, reading the urgent byte first causes the subsequent regular read to block until + // such a time as the connection closes -- and then the byte is successfully returned. This + // seems to be a cygwin bug. + KJ_SYSCALL(recv(clientFd, &c, 1, 0)); + EXPECT_EQ('i', c); + KJ_SYSCALL(recv(clientFd, &c, 1, MSG_OOB)); + EXPECT_EQ('o', c); #else - // Attempt to read the urgent byte prior to reading the in-band byte. - KJ_SYSCALL(recv(clientFd, &c, 1, MSG_OOB)); - EXPECT_EQ('o', c); - KJ_SYSCALL(recv(clientFd, &c, 1, 0)); - EXPECT_EQ('i', c); + // Attempt to read the urgent byte prior to reading the in-band byte. + KJ_SYSCALL(recv(clientFd, &c, 1, MSG_OOB)); + EXPECT_EQ('o', c); + KJ_SYSCALL(recv(clientFd, &c, 1, 0)); + EXPECT_EQ('i', c); #endif + } else { + KJ_LOG(WARNING, "MSG_OOB doesn't seem to be supported on your platform."); + } // Allow server thread to let its clientFd go out of scope. c = 'q'; @@ -641,18 +689,53 @@ TEST(AsyncUnixTest, Wake) { EXPECT_FALSE(port.wait()); } - bool woken = false; - Thread thread([&]() { + // Test wake() when already wait()ing. + { + Thread thread([&]() { + delay(); + port.wake(); + }); + + EXPECT_TRUE(port.wait()); + } + + // Test wait() after wake() already happened. + { + Thread thread([&]() { + port.wake(); + }); + delay(); - woken = true; - port.wake(); - }); + EXPECT_TRUE(port.wait()); + } - EXPECT_TRUE(port.wait()); + // Test wake() during poll() busy loop. + { + Thread thread([&]() { + delay(); + port.wake(); + }); + + EXPECT_FALSE(port.poll()); + while (!port.poll()) {} + } + + // Test poll() when wake() already delivered. + { + EXPECT_FALSE(port.poll()); + + Thread thread([&]() { + port.wake(); + }); + + do { + delay(); + } while (!port.poll()); + } } int exitCodeForSignal = 0; -void exitSignalHandler(int) { +[[noreturn]] void exitSignalHandler(int) { _exit(exitCodeForSignal); } @@ -670,7 +753,7 @@ struct TestChild { sigset_t sigs; sigemptyset(&sigs); sigaddset(&sigs, SIGTERM); - sigprocmask(SIG_UNBLOCK, &sigs, nullptr); + pthread_sigmask(SIG_UNBLOCK, &sigs, nullptr); for (;;) pause(); } @@ -703,8 +786,8 @@ TEST(AsyncUnixTest, ChildProcess) { sigset_t sigs, oldsigs; KJ_SYSCALL(sigemptyset(&sigs)); KJ_SYSCALL(sigaddset(&sigs, SIGTERM)); - KJ_SYSCALL(sigprocmask(SIG_BLOCK, &sigs, &oldsigs)); - KJ_DEFER(KJ_SYSCALL(sigprocmask(SIG_SETMASK, &oldsigs, nullptr)) { break; }); + KJ_SYSCALL(pthread_sigmask(SIG_BLOCK, &sigs, &oldsigs)); + KJ_DEFER(KJ_SYSCALL(pthread_sigmask(SIG_SETMASK, &oldsigs, nullptr)) { break; }); TestChild child1(port, 123); KJ_EXPECT(!child1.promise.poll(waitScope)); @@ -737,6 +820,148 @@ TEST(AsyncUnixTest, ChildProcess) { // child3 will be killed and synchronously waited on the way out. } +#if !__CYGWIN__ +// TODO(someday): Figure out why whenWriteDisconnected() never resolves on Cygwin. + +KJ_TEST("UnixEventPort whenWriteDisconnected()") { + captureSignals(); + UnixEventPort port; + EventLoop loop(port); + WaitScope waitScope(loop); + + int fds_[2]; + KJ_SYSCALL(socketpair(AF_UNIX, SOCK_STREAM, 0, fds_)); + kj::AutoCloseFd fds[2] = { kj::AutoCloseFd(fds_[0]), kj::AutoCloseFd(fds_[1]) }; + + UnixEventPort::FdObserver observer(port, fds[0], UnixEventPort::FdObserver::OBSERVE_READ); + + // At one point, the poll()-based version of UnixEventPort had a bug where if some other event + // had completed previously, whenWriteDisconnected() would stop being watched for. So we watch + // for readability as well and check that that goes away first. + auto readablePromise = observer.whenBecomesReadable(); + auto hupPromise = observer.whenWriteDisconnected(); + + KJ_EXPECT(!readablePromise.poll(waitScope)); + KJ_EXPECT(!hupPromise.poll(waitScope)); + + KJ_SYSCALL(write(fds[1], "foo", 3)); + + KJ_ASSERT(readablePromise.poll(waitScope)); + readablePromise.wait(waitScope); + + { + char junk[16]; + ssize_t n; + KJ_SYSCALL(n = read(fds[0], junk, 16)); + KJ_EXPECT(n == 3); + } + + KJ_EXPECT(!hupPromise.poll(waitScope)); + + fds[1] = nullptr; + KJ_ASSERT(hupPromise.poll(waitScope)); + hupPromise.wait(waitScope); +} + +KJ_TEST("UnixEventPort FdObserver(..., flags=0)::whenWriteDisconnected()") { + // Verifies that given `0' as a `flags' argument, + // FdObserver still observes whenWriteDisconnected(). + // + // This can be useful to watch disconnection on a blocking file descriptor. + // See discussion: https://github.com/capnproto/capnproto/issues/924 + + captureSignals(); + UnixEventPort port; + EventLoop loop(port); + WaitScope waitScope(loop); + + int pipefds[2]; + KJ_SYSCALL(pipe(pipefds)); + kj::AutoCloseFd infd(pipefds[0]), outfd(pipefds[1]); + + UnixEventPort::FdObserver observer(port, outfd, 0); + + auto hupPromise = observer.whenWriteDisconnected(); + + KJ_EXPECT(!hupPromise.poll(waitScope)); + + infd = nullptr; + KJ_ASSERT(hupPromise.poll(waitScope)); + hupPromise.wait(waitScope); +} + +#endif + +KJ_TEST("UnixEventPort poll for signals") { + captureSignals(); + UnixEventPort port; + EventLoop loop(port); + WaitScope waitScope(loop); + + auto promise1 = port.onSignal(SIGURG); + auto promise2 = port.onSignal(SIGIO); + + KJ_EXPECT(!promise1.poll(waitScope)); + KJ_EXPECT(!promise2.poll(waitScope)); + + KJ_SYSCALL(raise(SIGURG)); + KJ_SYSCALL(raise(SIGIO)); + port.wake(); + + KJ_EXPECT(port.poll()); + KJ_EXPECT(promise1.poll(waitScope)); + KJ_EXPECT(promise2.poll(waitScope)); + + promise1.wait(waitScope); + promise2.wait(waitScope); +} + +#if defined(SIGRTMIN) && !__CYGWIN__ && !__aarch64__ +// TODO(someday): Figure out why RT signals don't seem to work correctly on Cygwin. It looks like +// only the first signal is delivered, like how non-RT signals work. Is it possible Cygwin +// advertites RT signal support but doesn't actually implement them correctly? I can't find any +// information on the internet about this and TBH I don't care about Cygwin enough to dig in. +// TODO(someday): Figure out why RT signals don't work under qemu-user emulating aarch64 on +// Debian Buster. + +void testRtSignals(UnixEventPort& port, WaitScope& waitScope, bool doPoll) { + union sigval value; + memset(&value, 0, sizeof(value)); + + // Queue three copies of the signal upfront. + for (uint i = 0; i < 3; i++) { + value.sival_int = 123 + i; + KJ_SYSCALL(sigqueue(getpid(), SIGRTMIN, value)); + } + + // Now wait for them. + for (uint i = 0; i < 3; i++) { + auto promise = port.onSignal(SIGRTMIN); + if (doPoll) { + KJ_ASSERT(promise.poll(waitScope)); + } + auto info = promise.wait(waitScope); + KJ_EXPECT(info.si_value.sival_int == 123 + i); + } + + KJ_EXPECT(!port.onSignal(SIGRTMIN).poll(waitScope)); +} + +KJ_TEST("UnixEventPort can receive multiple queued instances of an RT signal") { + captureSignals(); + UnixEventPort port; + EventLoop loop(port); + WaitScope waitScope(loop); + + testRtSignals(port, waitScope, true); + + // Test again, but don't poll() the promises. This may test a different code path, if poll() and + // wait() are very different in how they read signals. (For the poll(2)-based implementation of + // UnixEventPort, they are indeed pretty different.) + testRtSignals(port, waitScope, false); +} +#endif + } // namespace } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/threadlocal-pthread-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-unix-xthread-test.c++ similarity index 81% rename from libs/EXTERNAL/capnproto/c++/src/kj/threadlocal-pthread-test.c++ rename to libs/EXTERNAL/capnproto/c++/src/kj/async-unix-xthread-test.c++ index d4c270ea295..e57a8d84a06 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/threadlocal-pthread-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-unix-xthread-test.c++ @@ -1,4 +1,4 @@ -// Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors +// Copyright (c) 2019 Cloudflare, Inc. and contributors // Licensed under the MIT License: // // Permission is hereby granted, free of charge, to any person obtaining a copy @@ -20,6 +20,13 @@ // THE SOFTWARE. #if !_WIN32 -#define KJ_USE_PTHREAD_TLS 1 -#include "threadlocal-test.c++" -#endif + +#include "async-unix.h" + +#define KJ_XTHREAD_TEST_SETUP_LOOP \ + UnixEventPort port; \ + EventLoop loop(port); \ + WaitScope waitScope(loop) +#include "async-xthread-test.c++" + +#endif // !_WIN32 diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-unix.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-unix.c++ index 7e94c1a5f78..796f629182e 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async-unix.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-unix.c++ @@ -28,30 +28,22 @@ #include #include #include -#include #include #include #include +#include #if KJ_USE_EPOLL -#include #include #include #include #else #include +#include #endif namespace kj { -// ======================================================================================= -// Timer code common to multiple implementations - -TimePoint UnixEventPort::readClock() { - return origin() + std::chrono::duration_cast( - std::chrono::steady_clock::now().time_since_epoch()).count() * NANOSECONDS; -} - // ======================================================================================= // Signal code common to multiple implementations @@ -65,6 +57,30 @@ bool threadClaimedChildExits = false; struct SignalCapture { sigjmp_buf jumpTo; siginfo_t siginfo; + +#if __APPLE__ + sigset_t originalMask; + // The signal mask to be restored when jumping out of the signal handler. + // + // "But wait!" you say, "Isn't the whole point of siglongjmp() that it does this for you?" Well, + // yes, that is supposed to be the point. However, Apple implemented in wrong. On macOS, + // siglongjmp() uses sigprocmask() -- not pthread_sigmask() -- to restore the signal mask. + // Unfortunately, sigprocmask() on macOS affects threads other than the current thread. Arguably + // this is conformant: sigprocmask() is documented as having unspecified behavior in the presence + // of threads, and pthread_sigmask() must be used instead. However, this means siglongjmp() + // cannot be used in the presence of threads. + // + // We'll just have to restore the signal mask ourselves, rather than rely on siglongjmp()... + // + // ... but we ONLY do that on Apple systems, because it turns out, ironically, on Android, this + // hack breaks signal delivery. pthread_sigmask() vs. sigprocmask() is not the issue; we + // apparently MUST let siglongjmp() itself deal with the signal mask, otherwise various tests in + // async-unix-test.c++ end up hanging (I haven't gotten to the bottom of why). Note that on stock + // Linux, _either_ strategy works fine; this appears to be a problem with Android's Bionic libc. + // Since letting siglongjmp() do the work _seeems_ more "correct", we'll make it the default and + // only do something different on Apple platforms. +#define KJ_BROKEN_SIGLONGJMP 1 +#endif }; #if !KJ_USE_EPOLL // on Linux we'll use signalfd @@ -74,7 +90,18 @@ void signalHandler(int, siginfo_t* siginfo, void*) { SignalCapture* capture = threadCapture; if (capture != nullptr) { capture->siginfo = *siginfo; - siglongjmp(capture->jumpTo, 1); + +#if KJ_BROKEN_SIGLONGJMP + // See comments on SignalCapture::originalMask, above: We can't rely on siglongjmp() to restore + // the signal mask; we must do it ourselves using pthread_sigmask(). We pass false as the + // second parameter to siglongjmp() so that it skips changing the signal mask. This makes it + // equivalent to `longjmp()` on Linux or `_longjmp()` on BSD/macOS. See comments on + // SignalCapture::originalMask for explanation. + pthread_sigmask(SIG_SETMASK, &capture->originalMask, nullptr); + siglongjmp(capture->jumpTo, false); +#else + siglongjmp(capture->jumpTo, true); +#endif } } #endif @@ -85,7 +112,7 @@ void registerSignalHandler(int signum) { sigset_t mask; KJ_SYSCALL(sigemptyset(&mask)); KJ_SYSCALL(sigaddset(&mask, signum)); - KJ_SYSCALL(sigprocmask(SIG_BLOCK, &mask, nullptr)); + KJ_SYSCALL(pthread_sigmask(SIG_BLOCK, &mask, nullptr)); #if !KJ_USE_EPOLL // on Linux we'll use signalfd struct sigaction action; @@ -97,10 +124,14 @@ void registerSignalHandler(int signum) { #endif } +#if !KJ_USE_EPOLL && !KJ_USE_PIPE_FOR_WAKEUP void registerReservedSignal() { registerSignalHandler(reservedSignal); +} +#endif - // We also disable SIGPIPE because users of UnixEventPort almost certainly don't want it. +void ignoreSigpipe() { + // We disable SIGPIPE because users of UnixEventPort almost certainly don't want it. while (signal(SIGPIPE, SIG_IGN) == SIG_ERR) { int error = errno; if (error != EINTR) { @@ -109,8 +140,6 @@ void registerReservedSignal() { } } -pthread_once_t registerReservedSignalOnce = PTHREAD_ONCE_INIT; - } // namespace struct UnixEventPort::ChildSet { @@ -284,16 +313,19 @@ void UnixEventPort::gotSignal(const siginfo_t& siginfo) { // epoll FdObserver implementation UnixEventPort::UnixEventPort() - : timerImpl(readClock()), + : clock(systemPreciseMonotonicClock()), + timerImpl(clock.now()), epollFd(-1), signalFd(-1), eventFd(-1) { - pthread_once(®isterReservedSignalOnce, ®isterReservedSignal); + ignoreSigpipe(); int fd; KJ_SYSCALL(fd = epoll_create1(EPOLL_CLOEXEC)); epollFd = AutoCloseFd(fd); + memset(&signalFdSigset, 0, sizeof(signalFdSigset)); + KJ_SYSCALL(sigemptyset(&signalFdSigset)); KJ_SYSCALL(fd = signalfd(-1, &signalFdSigset, SFD_NONBLOCK | SFD_CLOEXEC)); signalFd = AutoCloseFd(fd); @@ -365,6 +397,13 @@ void UnixEventPort::FdObserver::fire(short events) { } } + if (events & (EPOLLHUP | EPOLLERR)) { + KJ_IF_MAYBE(f, hupFulfiller) { + f->get()->fulfill(); + hupFulfiller = nullptr; + } + } + if (events & EPOLLPRI) { KJ_IF_MAYBE(f, urgentFulfiller) { f->get()->fulfill(); @@ -398,9 +437,15 @@ Promise UnixEventPort::FdObserver::whenUrgentDataAvailable() { return kj::mv(paf.promise); } +Promise UnixEventPort::FdObserver::whenWriteDisconnected() { + auto paf = newPromiseAndFulfiller(); + hupFulfiller = kj::mv(paf.fulfiller); + return kj::mv(paf.promise); +} + bool UnixEventPort::wait() { return doEpollWait( - timerImpl.timeoutToNextEvent(readClock(), MILLISECONDS, int(maxValue)) + timerImpl.timeoutToNextEvent(clock.now(), MILLISECONDS, int(maxValue)) .map([](uint64_t t) -> int { return t; }) .orDefault(-1)); } @@ -526,6 +571,7 @@ static siginfo_t toRegularSiginfo(const struct signalfd_siginfo& siginfo) { bool UnixEventPort::doEpollWait(int timeout) { sigset_t newMask; + memset(&newMask, 0, sizeof(newMask)); sigemptyset(&newMask); { @@ -573,6 +619,19 @@ bool UnixEventPort::doEpollWait(int timeout) { KJ_ASSERT(n == sizeof(siginfo)); gotSignal(toRegularSiginfo(siginfo)); + +#ifdef SIGRTMIN + if (siginfo.ssi_signo >= SIGRTMIN) { + // This is an RT signal. There could be multiple copies queued. We need to remove it from + // the signalfd's signal mask before we continue, to avoid accidentally reading and + // discarding the extra copies. + // TODO(perf): If high throughput of RT signals is desired then perhaps we should read + // them all into userspace and queue them here. Maybe we even need a better interface + // than onSignal() for receiving high-volume RT signals. + KJ_SYSCALL(sigdelset(&signalFdSigset, siginfo.ssi_signo)); + KJ_SYSCALL(signalfd(signalFd, &signalFdSigset, SFD_NONBLOCK | SFD_CLOEXEC)); + } +#endif } } else if (events[i].data.u64 == 1) { // Someone called wake() from another thread. Consume the event. @@ -589,7 +648,7 @@ bool UnixEventPort::doEpollWait(int timeout) { } } - timerImpl.advanceTo(readClock()); + timerImpl.advanceTo(clock.now()); return woken; } @@ -603,12 +662,33 @@ bool UnixEventPort::doEpollWait(int timeout) { #endif UnixEventPort::UnixEventPort() - : timerImpl(readClock()) { + : clock(systemPreciseMonotonicClock()), + timerImpl(clock.now()) { +#if KJ_USE_PIPE_FOR_WAKEUP + // Allocate a pipe to which we'll write a byte in order to wake this thread. + int fds[2]; + KJ_SYSCALL(pipe(fds)); + wakePipeIn = kj::AutoCloseFd(fds[0]); + wakePipeOut = kj::AutoCloseFd(fds[1]); + KJ_SYSCALL(fcntl(wakePipeIn, F_SETFD, FD_CLOEXEC)); + KJ_SYSCALL(fcntl(wakePipeOut, F_SETFD, FD_CLOEXEC)); +#else static_assert(sizeof(threadId) >= sizeof(pthread_t), "pthread_t is larger than a long long on your platform. Please port."); *reinterpret_cast(&threadId) = pthread_self(); - pthread_once(®isterReservedSignalOnce, ®isterReservedSignal); + // Note: We used to use a pthread_once to call registerReservedSignal() only once per process. + // This didn't work correctly because registerReservedSignal() not only registers the + // (process-wide) signal handler, but also sets the (per-thread) signal mask to block the + // signal. Thus, if threads were spawned before the first UnixEventPort was created, and then + // multiple threads created UnixEventPorts, only one of them would have the signal properly + // blocked. We could have changed things so that only the handler registration was protected + // by the pthread_once and the mask update happened in every thread, but registering a signal + // handler is not an expensive operation, so whatever... we'll do it in every thread. + registerReservedSignal(); +#endif + + ignoreSigpipe(); } UnixEventPort::~UnixEventPort() noexcept(false) {} @@ -652,6 +732,13 @@ void UnixEventPort::FdObserver::fire(short events) { } } + if (events & (POLLHUP | POLLERR | POLLNVAL)) { + KJ_IF_MAYBE(f, hupFulfiller) { + f->get()->fulfill(); + hupFulfiller = nullptr; + } + } + if (events & POLLPRI) { KJ_IF_MAYBE(f, urgentFulfiller) { f->get()->fulfill(); @@ -659,7 +746,8 @@ void UnixEventPort::FdObserver::fire(short events) { } } - if (readFulfiller == nullptr && writeFulfiller == nullptr && urgentFulfiller == nullptr) { + if (readFulfiller == nullptr && writeFulfiller == nullptr && urgentFulfiller == nullptr && + hupFulfiller == nullptr) { // Remove from list. if (next == nullptr) { eventPort.observersTail = prev; @@ -675,7 +763,16 @@ void UnixEventPort::FdObserver::fire(short events) { short UnixEventPort::FdObserver::getEventMask() { return (readFulfiller == nullptr ? 0 : (POLLIN | POLLRDHUP)) | (writeFulfiller == nullptr ? 0 : POLLOUT) | - (urgentFulfiller == nullptr ? 0 : POLLPRI); + (urgentFulfiller == nullptr ? 0 : POLLPRI) | + // The POSIX standard says POLLHUP and POLLERR will be reported even if not requested. + // But on MacOS, if `events` is 0, then POLLHUP apparently will not be reported: + // https://openradar.appspot.com/37537852 + // It seems that by settingc any non-zero value -- even one documented as ignored -- we + // cause POLLHUP to be reported. Both POLLHUP and POLLERR are documented as being ignored. + // So, we'll go ahead and set them. This has no effect on non-broken OSs, causes MacOS to + // do the right thing, and sort of looks as if we're explicitly requesting notification of + // these two conditions, which we do after all want to know about. + POLLHUP | POLLERR; } Promise UnixEventPort::FdObserver::whenBecomesReadable() { @@ -724,18 +821,40 @@ Promise UnixEventPort::FdObserver::whenUrgentDataAvailable() { return kj::mv(paf.promise); } +Promise UnixEventPort::FdObserver::whenWriteDisconnected() { + if (prev == nullptr) { + KJ_DASSERT(next == nullptr); + prev = eventPort.observersTail; + *prev = this; + eventPort.observersTail = &next; + } + + auto paf = newPromiseAndFulfiller(); + hupFulfiller = kj::mv(paf.fulfiller); + return kj::mv(paf.promise); +} + class UnixEventPort::PollContext { public: - PollContext(FdObserver* ptr) { - while (ptr != nullptr) { + PollContext(UnixEventPort& port) { + for (FdObserver* ptr = port.observersHead; ptr != nullptr; ptr = ptr->next) { struct pollfd pollfd; memset(&pollfd, 0, sizeof(pollfd)); pollfd.fd = ptr->fd; pollfd.events = ptr->getEventMask(); pollfds.add(pollfd); pollEvents.add(ptr); - ptr = ptr->next; } + +#if KJ_USE_PIPE_FOR_WAKEUP + { + struct pollfd pollfd; + memset(&pollfd, 0, sizeof(pollfd)); + pollfd.fd = port.wakePipeIn; + pollfd.events = POLLIN; + pollfds.add(pollfd); + } +#endif } void run(int timeout) { @@ -751,19 +870,36 @@ public: } } - void processResults() { + bool processResults() { if (pollResult < 0) { KJ_FAIL_SYSCALL("poll()", pollError); } + bool woken = false; for (auto i: indices(pollfds)) { if (pollfds[i].revents != 0) { - pollEvents[i]->fire(pollfds[i].revents); +#if KJ_USE_PIPE_FOR_WAKEUP + if (i == pollEvents.size()) { + // The last pollfd is our cross-thread wake pipe. + woken = true; + // Discard junk in the wake pipe. + char junk[256]; + ssize_t n; + do { + KJ_NONBLOCKING_SYSCALL(n = read(pollfds[i].fd, junk, sizeof(junk))); + } while (n >= 256); + } else { +#endif + pollEvents[i]->fire(pollfds[i].revents); +#if KJ_USE_PIPE_FOR_WAKEUP + } +#endif if (--pollResult <= 0) { break; } } } + return woken; } private: @@ -776,7 +912,10 @@ private: bool UnixEventPort::wait() { sigset_t newMask; sigemptyset(&newMask); + +#if !KJ_USE_PIPE_FOR_WAKEUP sigaddset(&newMask, reservedSignal); +#endif { auto ptr = signalHead; @@ -789,41 +928,53 @@ bool UnixEventPort::wait() { } } - PollContext pollContext(observersHead); + PollContext pollContext(*this); // Capture signals. SignalCapture capture; +#if KJ_BROKEN_SIGLONGJMP + if (sigsetjmp(capture.jumpTo, false)) { +#else if (sigsetjmp(capture.jumpTo, true)) { +#endif // We received a signal and longjmp'd back out of the signal handler. threadCapture = nullptr; +#if !KJ_USE_PIPE_FOR_WAKEUP if (capture.siginfo.si_signo == reservedSignal) { return true; } else { +#endif gotSignal(capture.siginfo); return false; +#if !KJ_USE_PIPE_FOR_WAKEUP } +#endif } // Enable signals, run the poll, then mask them again. - sigset_t origMask; +#if KJ_BROKEN_SIGLONGJMP + auto& originalMask = capture.originalMask; +#else + sigset_t originalMask; +#endif threadCapture = &capture; - sigprocmask(SIG_UNBLOCK, &newMask, &origMask); + pthread_sigmask(SIG_UNBLOCK, &newMask, &originalMask); pollContext.run( - timerImpl.timeoutToNextEvent(readClock(), MILLISECONDS, int(maxValue)) + timerImpl.timeoutToNextEvent(clock.now(), MILLISECONDS, int(maxValue)) .map([](uint64_t t) -> int { return t; }) .orDefault(-1)); - sigprocmask(SIG_SETMASK, &origMask, nullptr); + pthread_sigmask(SIG_SETMASK, &originalMask, nullptr); threadCapture = nullptr; // Queue events. - pollContext.processResults(); - timerImpl.advanceTo(readClock()); + bool result = pollContext.processResults(); + timerImpl.advanceTo(clock.now()); - return false; + return result; } bool UnixEventPort::poll() { @@ -839,11 +990,13 @@ bool UnixEventPort::poll() { KJ_SYSCALL(sigpending(&pending)); uint signalCount = 0; +#if !KJ_USE_PIPE_FOR_WAKEUP if (sigismember(&pending, reservedSignal)) { ++signalCount; sigdelset(&pending, reservedSignal); sigdelset(&waitMask, reservedSignal); } +#endif { auto ptr = signalHead; @@ -859,40 +1012,81 @@ bool UnixEventPort::poll() { // Wait for each pending signal. It would be nice to use sigtimedwait() here but it is not // available on OSX. :( Instead, we call sigsuspend() once per expected signal. - while (signalCount-- > 0) { + { SignalCapture capture; +#if KJ_BROKEN_SIGLONGJMP + pthread_sigmask(SIG_SETMASK, nullptr, &capture.originalMask); +#endif threadCapture = &capture; - if (sigsetjmp(capture.jumpTo, true)) { - // We received a signal and longjmp'd back out of the signal handler. - sigdelset(&waitMask, capture.siginfo.si_signo); - if (capture.siginfo.si_signo == reservedSignal) { - woken = true; + KJ_DEFER(threadCapture = nullptr); + while (signalCount-- > 0) { +#if KJ_BROKEN_SIGLONGJMP + if (sigsetjmp(capture.jumpTo, false)) { +#else + if (sigsetjmp(capture.jumpTo, true)) { +#endif + // We received a signal and longjmp'd back out of the signal handler. + sigdelset(&waitMask, capture.siginfo.si_signo); +#if !KJ_USE_PIPE_FOR_WAKEUP + if (capture.siginfo.si_signo == reservedSignal) { + woken = true; + } else { +#endif + gotSignal(capture.siginfo); +#if !KJ_USE_PIPE_FOR_WAKEUP + } +#endif } else { - gotSignal(capture.siginfo); +#if __CYGWIN__ + // Cygwin's sigpending() incorrectly reports signals pending for any thread, not just our + // own thread. As a work-around, instead of using sigsuspend() (which would block forever + // if the signal is not pending on *this* thread), we un-mask the signals and immediately + // mask them again. If any signals are pending, they *should* be delivered before the first + // sigprocmask() returns, and the handler will then longjmp() to the block above. If it + // turns out no signal is pending, we'll block the signals again and break out of the + // loop. + // + // Bug reported here: https://cygwin.com/ml/cygwin/2019-07/msg00051.html + sigset_t origMask; + sigprocmask(SIG_SETMASK, &waitMask, &origMask); + sigprocmask(SIG_SETMASK, &origMask, nullptr); + break; +#else + sigsuspend(&waitMask); + KJ_FAIL_ASSERT("sigsuspend() shouldn't return because the signal handler should " + "have siglongjmp()ed."); +#endif } - } else { - sigsuspend(&waitMask); - KJ_FAIL_ASSERT("sigsuspend() shouldn't return because the signal handler should " - "have siglongjmp()ed."); } - threadCapture = nullptr; } { - PollContext pollContext(observersHead); + PollContext pollContext(*this); pollContext.run(0); - pollContext.processResults(); + if (pollContext.processResults()) { + woken = true; + } } - timerImpl.advanceTo(readClock()); + timerImpl.advanceTo(clock.now()); return woken; } void UnixEventPort::wake() const { +#if KJ_USE_PIPE_FOR_WAKEUP + // We're going to write() a single byte to our wake pipe in order to cause poll() to complete in + // the target thread. + // + // If this write() fails with EWOULDBLOCK, we don't care, because the target thread is already + // scheduled to wake up. + char c = 0; + KJ_NONBLOCKING_SYSCALL(write(wakePipeOut, &c, 1)); +#else int error = pthread_kill(*reinterpret_cast(&threadId), reservedSignal); if (error != 0) { KJ_FAIL_SYSCALL("pthread_kill", error); } +#endif } #endif // KJ_USE_EPOLL, else diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-unix.h b/libs/EXTERNAL/capnproto/c++/src/kj/async-unix.h index 45544d17b4c..63fe92790f0 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async-unix.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-unix.h @@ -25,21 +25,27 @@ #error "This file is Unix-specific. On Windows, include async-win32.h instead." #endif -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "async.h" #include "timer.h" #include "vector.h" #include "io.h" #include +KJ_BEGIN_HEADER + #if __linux__ && !__BIONIC__ && !defined(KJ_USE_EPOLL) // Default to epoll on Linux, except on Bionic (Android) which doesn't have signalfd.h. #define KJ_USE_EPOLL 1 #endif +#if __CYGWIN__ && !defined(KJ_USE_PIPE_FOR_WAKEUP) +// Cygwin has serious issues with the intersection of signals and threads, reported here: +// https://cygwin.com/ml/cygwin/2019-07/msg00052.html +// On Cygwin, therefore, we do not use signals to wake threads. Instead, each thread allocates a +// pipe, and we write a byte to the pipe to wake the thread... ick. +#define KJ_USE_PIPE_FOR_WAKEUP 1 +#endif + namespace kj { class UnixEventPort: public EventPort { @@ -142,12 +148,12 @@ class UnixEventPort: public EventPort { class SignalPromiseAdapter; class ChildExitPromiseAdapter; + const MonotonicClock& clock; TimerImpl timerImpl; SignalPromiseAdapter* signalHead = nullptr; SignalPromiseAdapter** signalTail = &signalHead; - TimePoint readClock(); void gotSignal(const siginfo_t& siginfo); friend class TimerPromiseAdapter; @@ -169,7 +175,12 @@ class UnixEventPort: public EventPort { FdObserver* observersHead = nullptr; FdObserver** observersTail = &observersHead; +#if KJ_USE_PIPE_FOR_WAKEUP + AutoCloseFd wakePipeIn; + AutoCloseFd wakePipeOut; +#else unsigned long long threadId; // actually pthread_t +#endif #endif struct ChildSet; @@ -278,6 +289,9 @@ class UnixEventPort::FdObserver { // WARNING: This has some known weird behavior on macOS. See // https://github.com/sandstorm-io/capnproto/issues/374. + Promise whenWriteDisconnected(); + // Resolves when poll() on the file descriptor reports POLLHUP or POLLERR. + private: UnixEventPort& eventPort; int fd; @@ -286,6 +300,7 @@ class UnixEventPort::FdObserver { kj::Maybe>> readFulfiller; kj::Maybe>> writeFulfiller; kj::Maybe>> urgentFulfiller; + kj::Maybe>> hupFulfiller; // Replaced each time `whenBecomesReadable()` or `whenBecomesWritable()` is called. Reverted to // null every time an event is fired. @@ -306,3 +321,5 @@ class UnixEventPort::FdObserver { }; } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-win32-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-win32-test.c++ index 6866bda9b74..3dd6e5bc981 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async-win32-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-win32-test.c++ @@ -24,6 +24,7 @@ #include "async-win32.h" #include "thread.h" #include "test.h" +#include "mutex.h" namespace kj { namespace { diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-win32-xthread-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-win32-xthread-test.c++ new file mode 100644 index 00000000000..c93be7fe99a --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-win32-xthread-test.c++ @@ -0,0 +1,32 @@ +// Copyright (c) 2019 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#if _WIN32 + +#include "async-win32.h" + +#define KJ_XTHREAD_TEST_SETUP_LOOP \ + Win32IocpEventPort port; \ + EventLoop loop(port); \ + WaitScope waitScope(loop) +#include "async-xthread-test.c++" + +#endif // _WIN32 diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-win32.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-win32.c++ index 6a5d470fd15..ba0ee88b8e2 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async-win32.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-win32.c++ @@ -22,13 +22,12 @@ #if _WIN32 // Request Vista-level APIs. -#define WINVER 0x0600 -#define _WIN32_WINNT 0x0600 +#include "win32-api-version.h" #include "async-win32.h" #include "debug.h" #include -#include +#include "time.h" #include "refcount.h" #include // NTSTATUS #include // STATUS_SUCCESS @@ -38,7 +37,8 @@ namespace kj { Win32IocpEventPort::Win32IocpEventPort() - : iocp(newIocpHandle()), thread(openCurrentThread()), timerImpl(readClock()) {} + : clock(systemPreciseMonotonicClock()), + iocp(newIocpHandle()), thread(openCurrentThread()), timerImpl(clock.now()) {} Win32IocpEventPort::~Win32IocpEventPort() noexcept(false) {} @@ -157,17 +157,12 @@ Own Win32IocpEventPort::observeSignalState(HANDL return waitThreads.observeSignalState(handle); } -TimePoint Win32IocpEventPort::readClock() { - return origin() + std::chrono::duration_cast( - std::chrono::steady_clock::now().time_since_epoch()).count() * NANOSECONDS; -} - bool Win32IocpEventPort::wait() { - waitIocp(timerImpl.timeoutToNextEvent(readClock(), MILLISECONDS, INFINITE - 1) + waitIocp(timerImpl.timeoutToNextEvent(clock.now(), MILLISECONDS, INFINITE - 1) .map([](uint64_t t) -> DWORD { return t; }) .orDefault(INFINITE)); - timerImpl.advanceTo(readClock()); + timerImpl.advanceTo(clock.now()); return receivedWake(); } diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-win32.h b/libs/EXTERNAL/capnproto/c++/src/kj/async-win32.h index 489a9a7ba96..2085118c792 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async-win32.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-win32.h @@ -25,18 +25,16 @@ #error "This file is Windows-specific. On Unix, include async-unix.h instead." #endif +// Include windows.h as lean as possible. (If you need more of the Windows API for your app, +// #include windows.h yourself before including this header.) +#include "win32-api-version.h" + #include "async.h" #include "timer.h" #include "io.h" #include #include -// Include windows.h as lean as possible. (If you need more of the Windows API for your app, -// #include windows.h yourself before including this header.) -#define WIN32_LEAN_AND_MEAN 1 -#define NOSERVICE 1 -#define NOMCX 1 -#define NOIME 1 #include #include "windows-sanity.h" @@ -127,7 +125,7 @@ class Win32EventPort: public EventPort { // an exception. virtual Promise onSignaledOrAbandoned() = 0; - // Like onSingaled(), but instead of throwing when a mutex is abandoned, resolves to `true`. + // Like onSignaled(), but instead of throwing when a mutex is abandoned, resolves to `true`. // Resolves to `false` for non-abandoned signals. }; @@ -209,6 +207,8 @@ class Win32IocpEventPort final: public Win32EventPort { class IoOperationImpl; class IoObserverImpl; + const MonotonicClock& clock; + AutoCloseHandle iocp; AutoCloseHandle thread; Win32WaitObjectThreadPool waitThreads; @@ -216,8 +216,6 @@ class Win32IocpEventPort final: public Win32EventPort { mutable std::atomic sentWake {false}; bool isAllowApc = false; - static TimePoint readClock(); - void waitIocp(DWORD timeoutMs); // Wait on the I/O completion port for up to timeoutMs and pump events. Does not advance the // timer; caller must do that. diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async-xthread-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-xthread-test.c++ new file mode 100644 index 00000000000..7d6fa80d6e5 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async-xthread-test.c++ @@ -0,0 +1,1044 @@ +// Copyright (c) 2019 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#if _WIN32 +#include "win32-api-version.h" +#endif + +#include "async.h" +#include "debug.h" +#include "thread.h" +#include "mutex.h" +#include + +#if _WIN32 +#include +#include "windows-sanity.h" +inline void delay() { Sleep(10); } +#else +#include +inline void delay() { usleep(10000); } +#endif + +// This file is #included from async-unix-xthread-test.c++ and async-win32-xthread-test.c++ after +// defining KJ_XTHREAD_TEST_SETUP_LOOP to set up a loop with the corresponding EventPort. +#ifndef KJ_XTHREAD_TEST_SETUP_LOOP +#define KJ_XTHREAD_TEST_SETUP_LOOP \ + EventLoop loop; \ + WaitScope waitScope(loop) +#endif + +namespace kj { +namespace { + +KJ_TEST("synchonous simple cross-thread events") { + MutexGuarded> executor; // to get the Executor from the other thread + Own> fulfiller; // accessed only from the subthread + thread_local bool isChild = false; // to assert which thread we're in + + // We use `noexcept` so that any uncaught exceptions immediately terminate the process without + // unwinding. Otherwise, the unwind would likely deadlock waiting for some synchronization with + // the other thread. + Thread thread([&]() noexcept { + isChild = true; + + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = newPromiseAndFulfiller(); + fulfiller = kj::mv(paf.fulfiller); + + *executor.lockExclusive() = getCurrentThreadExecutor(); + + KJ_ASSERT(paf.promise.wait(waitScope) == 123); + + // Wait until parent thread sets executor to null, as a way to tell us to quit. + executor.lockExclusive().wait([](auto& val) { return val == nullptr; }); + }); + + ([&]() noexcept { + const Executor* exec; + { + auto lock = executor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec = &KJ_ASSERT_NONNULL(*lock); + } + + KJ_ASSERT(!isChild); + + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("test exception", exec->executeSync([&]() { + KJ_ASSERT(isChild); + KJ_FAIL_ASSERT("test exception") { break; } + })); + + uint i = exec->executeSync([&]() { + KJ_ASSERT(isChild); + fulfiller->fulfill(123); + return 456; + }); + KJ_EXPECT(i == 456); + + *executor.lockExclusive() = nullptr; + })(); +} + +KJ_TEST("asynchonous simple cross-thread events") { + MutexGuarded> executor; // to get the Executor from the other thread + Own> fulfiller; // accessed only from the subthread + thread_local bool isChild = false; // to assert which thread we're in + + // We use `noexcept` so that any uncaught exceptions immediately terminate the process without + // unwinding. Otherwise, the unwind would likely deadlock waiting for some synchronization with + // the other thread. + Thread thread([&]() noexcept { + isChild = true; + + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = newPromiseAndFulfiller(); + fulfiller = kj::mv(paf.fulfiller); + + *executor.lockExclusive() = getCurrentThreadExecutor(); + + KJ_ASSERT(paf.promise.wait(waitScope) == 123); + + // Wait until parent thread sets executor to null, as a way to tell us to quit. + executor.lockExclusive().wait([](auto& val) { return val == nullptr; }); + }); + + ([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + const Executor* exec; + { + auto lock = executor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec = &KJ_ASSERT_NONNULL(*lock); + } + + KJ_ASSERT(!isChild); + + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("test exception", exec->executeAsync([&]() { + KJ_ASSERT(isChild); + KJ_FAIL_ASSERT("test exception") { break; } + }).wait(waitScope)); + + Promise promise = exec->executeAsync([&]() { + KJ_ASSERT(isChild); + fulfiller->fulfill(123); + return 456u; + }); + KJ_EXPECT(promise.wait(waitScope) == 456); + + *executor.lockExclusive() = nullptr; + })(); +} + +KJ_TEST("synchonous promise cross-thread events") { + MutexGuarded> executor; // to get the Executor from the other thread + Own> fulfiller; // accessed only from the subthread + Promise promise = nullptr; // accessed only from the subthread + thread_local bool isChild = false; // to assert which thread we're in + + // We use `noexcept` so that any uncaught exceptions immediately terminate the process without + // unwinding. Otherwise, the unwind would likely deadlock waiting for some synchronization with + // the other thread. + Thread thread([&]() noexcept { + isChild = true; + + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = newPromiseAndFulfiller(); + fulfiller = kj::mv(paf.fulfiller); + + auto paf2 = newPromiseAndFulfiller(); + promise = kj::mv(paf2.promise); + + *executor.lockExclusive() = getCurrentThreadExecutor(); + + KJ_ASSERT(paf.promise.wait(waitScope) == 123); + + paf2.fulfiller->fulfill(321); + + // Make sure reply gets sent. + loop.run(); + + // Wait until parent thread sets executor to null, as a way to tell us to quit. + executor.lockExclusive().wait([](auto& val) { return val == nullptr; }); + }); + + ([&]() noexcept { + const Executor* exec; + { + auto lock = executor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec = &KJ_ASSERT_NONNULL(*lock); + } + + KJ_ASSERT(!isChild); + + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("test exception", exec->executeSync([&]() { + KJ_ASSERT(isChild); + return kj::Promise(KJ_EXCEPTION(FAILED, "test exception")); + })); + + uint i = exec->executeSync([&]() { + KJ_ASSERT(isChild); + fulfiller->fulfill(123); + return kj::mv(promise); + }); + KJ_EXPECT(i == 321); + + *executor.lockExclusive() = nullptr; + })(); +} + +KJ_TEST("asynchonous promise cross-thread events") { + MutexGuarded> executor; // to get the Executor from the other thread + Own> fulfiller; // accessed only from the subthread + Promise promise = nullptr; // accessed only from the subthread + thread_local bool isChild = false; // to assert which thread we're in + + // We use `noexcept` so that any uncaught exceptions immediately terminate the process without + // unwinding. Otherwise, the unwind would likely deadlock waiting for some synchronization with + // the other thread. + Thread thread([&]() noexcept { + isChild = true; + + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = newPromiseAndFulfiller(); + fulfiller = kj::mv(paf.fulfiller); + + auto paf2 = newPromiseAndFulfiller(); + promise = kj::mv(paf2.promise); + + *executor.lockExclusive() = getCurrentThreadExecutor(); + + KJ_ASSERT(paf.promise.wait(waitScope) == 123); + + paf2.fulfiller->fulfill(321); + + // Make sure reply gets sent. + loop.run(); + + // Wait until parent thread sets executor to null, as a way to tell us to quit. + executor.lockExclusive().wait([](auto& val) { return val == nullptr; }); + }); + + ([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + const Executor* exec; + { + auto lock = executor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec = &KJ_ASSERT_NONNULL(*lock); + } + + KJ_ASSERT(!isChild); + + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("test exception", exec->executeAsync([&]() { + KJ_ASSERT(isChild); + return kj::Promise(KJ_EXCEPTION(FAILED, "test exception")); + }).wait(waitScope)); + + Promise promise2 = exec->executeAsync([&]() { + KJ_ASSERT(isChild); + fulfiller->fulfill(123); + return kj::mv(promise); + }); + KJ_EXPECT(promise2.wait(waitScope) == 321); + + *executor.lockExclusive() = nullptr; + })(); +} + +KJ_TEST("cancel cross-thread event before it runs") { + MutexGuarded> executor; // to get the Executor from the other thread + + // We use `noexcept` so that any uncaught exceptions immediately terminate the process without + // unwinding. Otherwise, the unwind would likely deadlock waiting for some synchronization with + // the other thread. + Thread thread([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + *executor.lockExclusive() = getCurrentThreadExecutor(); + + // We never run the loop here, so that when the event is canceled, it's still queued. + + // Wait until parent thread sets executor to null, as a way to tell us to quit. + executor.lockExclusive().wait([](auto& val) { return val == nullptr; }); + }); + + ([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + const Executor* exec; + { + auto lock = executor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec = &KJ_ASSERT_NONNULL(*lock); + } + + volatile bool called = false; + { + Promise promise = exec->executeAsync([&]() { called = true; return 123u; }); + delay(); + KJ_EXPECT(!promise.poll(waitScope)); + } + KJ_EXPECT(!called); + + *executor.lockExclusive() = nullptr; + })(); +} + +KJ_TEST("cancel cross-thread event while it runs") { + MutexGuarded> executor; // to get the Executor from the other thread + Own> fulfiller; // accessed only from the subthread + + // We use `noexcept` so that any uncaught exceptions immediately terminate the process without + // unwinding. Otherwise, the unwind would likely deadlock waiting for some synchronization with + // the other thread. + Thread thread([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = newPromiseAndFulfiller(); + fulfiller = kj::mv(paf.fulfiller); + + *executor.lockExclusive() = getCurrentThreadExecutor(); + + paf.promise.wait(waitScope); + + // Wait until parent thread sets executor to null, as a way to tell us to quit. + executor.lockExclusive().wait([](auto& val) { return val == nullptr; }); + }); + + ([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + const Executor* exec; + { + auto lock = executor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec = &KJ_ASSERT_NONNULL(*lock); + } + + { + volatile bool called = false; + Promise promise = exec->executeAsync([&]() -> kj::Promise { + called = true; + return kj::NEVER_DONE; + }); + while (!called) { + delay(); + } + KJ_EXPECT(!promise.poll(waitScope)); + } + + exec->executeSync([&]() { fulfiller->fulfill(); }); + + *executor.lockExclusive() = nullptr; + })(); +} + +KJ_TEST("cross-thread cancellation in both directions at once") { + MutexGuarded> childExecutor; + MutexGuarded> parentExecutor; + + MutexGuarded readyCount(0); + + thread_local uint threadNumber = 0; + thread_local bool receivedFinalCall = false; + + // Code to execute simultaneously in two threads... + // We mark this noexcept so that any exceptions thrown will immediately invoke the termination + // handler, skipping any destructors that would deadlock. + auto simultaneous = [&](MutexGuarded>& selfExecutor, + MutexGuarded>& otherExecutor, + uint threadCount) noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + *selfExecutor.lockExclusive() = getCurrentThreadExecutor(); + + const Executor* exec; + { + auto lock = otherExecutor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec = &KJ_ASSERT_NONNULL(*lock); + } + + // Create a ton of cross-thread promises to cancel. + Vector> promises; + for (uint i = 0; i < 1000; i++) { + promises.add(exec->executeAsync([&]() -> kj::Promise { + return kj::Promise(kj::NEVER_DONE) + .attach(kj::defer([wasThreadNumber = threadNumber]() { + // Make sure destruction happens in the correct thread. + KJ_ASSERT(threadNumber == wasThreadNumber); + })); + })); + } + + // Signal other thread that we're done queueing, and wait for it to signal same. + { + auto lock = readyCount.lockExclusive(); + ++*lock; + lock.wait([&](uint i) { return i >= threadCount; }); + } + + // Run event loop to start all executions queued by the other thread. + waitScope.poll(); + loop.run(); + + // Signal other thread that we've run the loop, and wait for it to signal same. + { + auto lock = readyCount.lockExclusive(); + ++*lock; + lock.wait([&](uint i) { return i >= threadCount * 2; }); + } + + // Cancel all the promises. + promises.clear(); + + // All our cancellations completed, but the other thread may still be waiting for some + // cancellations from us. We need to pump our event loop to make sure we continue handling + // those cancellation requests. In particular we'll queue a function to the other thread and + // wait for it to complete. The other thread will queue its own function to this thread just + // before completing the function we queued to it. + receivedFinalCall = false; + exec->executeAsync([&]() { receivedFinalCall = true; }).wait(waitScope); + + // To be safe, make sure we've actually executed the function that the other thread queued to + // us by repeatedly polling until `receivedFinalCall` becomes true in this thread. + while (!receivedFinalCall) { + waitScope.poll(); + loop.run(); + } + + // OK, signal other that we're all done. + *otherExecutor.lockExclusive() = nullptr; + + // Wait until other thread sets executor to null, as a way to tell us to quit. + selfExecutor.lockExclusive().wait([](auto& val) { return val == nullptr; }); + }; + + { + Thread thread([&]() { + threadNumber = 1; + simultaneous(childExecutor, parentExecutor, 2); + }); + + threadNumber = 0; + simultaneous(parentExecutor, childExecutor, 2); + } + + // Let's even have a three-thread version, with cyclic cancellation requests. + MutexGuarded> child2Executor; + *readyCount.lockExclusive() = 0; + + { + Thread thread1([&]() { + threadNumber = 1; + simultaneous(childExecutor, child2Executor, 3); + }); + + Thread thread2([&]() { + threadNumber = 2; + simultaneous(child2Executor, parentExecutor, 3); + }); + + threadNumber = 0; + simultaneous(parentExecutor, childExecutor, 3); + } +} + +KJ_TEST("cross-thread cancellation cycle") { + // Another multi-way cancellation test where we set up an actual cycle between three threads + // waiting on each other to complete a single event. + + MutexGuarded> child1Executor, child2Executor; + + Own> fulfiller1, fulfiller2; + + auto threadMain = [](MutexGuarded>& executor, + Own>& fulfiller) noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = newPromiseAndFulfiller(); + fulfiller = kj::mv(paf.fulfiller); + + *executor.lockExclusive() = getCurrentThreadExecutor(); + + paf.promise.wait(waitScope); + + // Wait until parent thread sets executor to null, as a way to tell us to quit. + executor.lockExclusive().wait([](auto& val) { return val == nullptr; }); + }; + + Thread thread1([&]() noexcept { threadMain(child1Executor, fulfiller1); }); + Thread thread2([&]() noexcept { threadMain(child2Executor, fulfiller2); }); + + ([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + auto& parentExecutor = getCurrentThreadExecutor(); + + const Executor* exec1; + { + auto lock = child1Executor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec1 = &KJ_ASSERT_NONNULL(*lock); + } + const Executor* exec2; + { + auto lock = child2Executor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec2 = &KJ_ASSERT_NONNULL(*lock); + } + + // Create an event that cycles through both threads and back to this one, and then cancel it. + bool cycleAllDestroyed = false; + { + auto paf = kj::newPromiseAndFulfiller(); + Promise promise = exec1->executeAsync([&]() -> kj::Promise { + return exec2->executeAsync([&]() -> kj::Promise { + return parentExecutor.executeAsync([&]() -> kj::Promise { + paf.fulfiller->fulfill(); + return kj::Promise(kj::NEVER_DONE).attach(kj::defer([&]() { + cycleAllDestroyed = true; + })); + }); + }); + }); + + // Wait until the cycle has come all the way around. + paf.promise.wait(waitScope); + + KJ_EXPECT(!promise.poll(waitScope)); + } + + KJ_EXPECT(cycleAllDestroyed); + + exec1->executeSync([&]() { fulfiller1->fulfill(); }); + exec2->executeSync([&]() { fulfiller2->fulfill(); }); + + *child1Executor.lockExclusive() = nullptr; + *child2Executor.lockExclusive() = nullptr; + })(); +} + +KJ_TEST("call own thread's executor") { + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto& executor = getCurrentThreadExecutor(); + + { + uint i = executor.executeSync([]() { + return 123u; + }); + KJ_EXPECT(i == 123); + } + + KJ_EXPECT_THROW_MESSAGE( + "can't call executeSync() on own thread's executor with a promise-returning function", + executor.executeSync([]() { return kj::evalLater([]() {}); })); + + { + uint i = executor.executeAsync([]() { + return 123u; + }).wait(waitScope); + KJ_EXPECT(i == 123); + } +} + +KJ_TEST("synchronous cross-thread event disconnected") { + MutexGuarded> executor; // to get the Executor from the other thread + Own> fulfiller; // accessed only from the subthread + thread_local bool isChild = false; // to assert which thread we're in + + Thread thread([&]() noexcept { + isChild = true; + + { + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = newPromiseAndFulfiller(); + fulfiller = kj::mv(paf.fulfiller); + + *executor.lockExclusive() = getCurrentThreadExecutor(); + + paf.promise.wait(waitScope); + + // Exit the event loop! + } + + // Wait until parent thread sets executor to null, as a way to tell us to quit. + executor.lockExclusive().wait([](auto& val) { return val == nullptr; }); + }); + + ([&]() noexcept { + Own exec; + { + auto lock = executor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec = KJ_ASSERT_NONNULL(*lock).addRef(); + } + + KJ_EXPECT(!isChild); + + KJ_EXPECT(exec->isLive()); + + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "Executor's event loop exited before cross-thread event could complete", + exec->executeSync([&]() -> Promise { + fulfiller->fulfill(); + return kj::NEVER_DONE; + })); + + KJ_EXPECT(!exec->isLive()); + + KJ_EXPECT_THROW_MESSAGE( + "Executor's event loop has exited", + exec->executeSync([&]() {})); + + *executor.lockExclusive() = nullptr; + })(); +} + +KJ_TEST("asynchronous cross-thread event disconnected") { + MutexGuarded> executor; // to get the Executor from the other thread + Own> fulfiller; // accessed only from the subthread + thread_local bool isChild = false; // to assert which thread we're in + + Thread thread([&]() noexcept { + isChild = true; + + { + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = newPromiseAndFulfiller(); + fulfiller = kj::mv(paf.fulfiller); + + *executor.lockExclusive() = getCurrentThreadExecutor(); + + paf.promise.wait(waitScope); + + // Exit the event loop! + } + + // Wait until parent thread sets executor to null, as a way to tell us to quit. + executor.lockExclusive().wait([](auto& val) { return val == nullptr; }); + }); + + ([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + Own exec; + { + auto lock = executor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec = KJ_ASSERT_NONNULL(*lock).addRef(); + } + + KJ_EXPECT(!isChild); + + KJ_EXPECT(exec->isLive()); + + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "Executor's event loop exited before cross-thread event could complete", + exec->executeAsync([&]() -> Promise { + fulfiller->fulfill(); + return kj::NEVER_DONE; + }).wait(waitScope)); + + KJ_EXPECT(!exec->isLive()); + + KJ_EXPECT_THROW_MESSAGE( + "Executor's event loop has exited", + exec->executeAsync([&]() {}).wait(waitScope)); + + *executor.lockExclusive() = nullptr; + })(); +} + +KJ_TEST("cross-thread event disconnected before it runs") { + MutexGuarded> executor; // to get the Executor from the other thread + thread_local bool isChild = false; // to assert which thread we're in + + Thread thread([&]() noexcept { + isChild = true; + + KJ_XTHREAD_TEST_SETUP_LOOP; + + *executor.lockExclusive() = getCurrentThreadExecutor(); + + // Don't actually run the event loop. Destroy it when the other thread signals us to. + executor.lockExclusive().wait([](auto& val) { return val == nullptr; }); + }); + + ([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + Own exec; + { + auto lock = executor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec = KJ_ASSERT_NONNULL(*lock).addRef(); + } + + KJ_EXPECT(!isChild); + + KJ_EXPECT(exec->isLive()); + + auto promise = exec->executeAsync([&]() { + KJ_LOG(ERROR, "shouldn't have executed"); + }); + KJ_EXPECT(!promise.poll(waitScope)); + + *executor.lockExclusive() = nullptr; + + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "Executor's event loop exited before cross-thread event could complete", + promise.wait(waitScope)); + + KJ_EXPECT(!exec->isLive()); + })(); +} + +KJ_TEST("cross-thread event disconnected without holding Executor ref") { + MutexGuarded> executor; // to get the Executor from the other thread + Own> fulfiller; // accessed only from the subthread + thread_local bool isChild = false; // to assert which thread we're in + + Thread thread([&]() noexcept { + isChild = true; + + { + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = newPromiseAndFulfiller(); + fulfiller = kj::mv(paf.fulfiller); + + *executor.lockExclusive() = getCurrentThreadExecutor(); + + paf.promise.wait(waitScope); + + // Exit the event loop! + } + + // Wait until parent thread sets executor to null, as a way to tell us to quit. + executor.lockExclusive().wait([](auto& val) { return val == nullptr; }); + }); + + ([&]() noexcept { + const Executor* exec; + { + auto lock = executor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec = &KJ_ASSERT_NONNULL(*lock); + } + + KJ_EXPECT(!isChild); + + KJ_EXPECT(exec->isLive()); + + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "Executor's event loop exited before cross-thread event could complete", + exec->executeSync([&]() -> Promise { + fulfiller->fulfill(); + return kj::NEVER_DONE; + })); + + // Can't check `exec->isLive()` because it's been destroyed by now. + + *executor.lockExclusive() = nullptr; + })(); +} + +KJ_TEST("detached cross-thread event doesn't cause crash") { + MutexGuarded> executor; // to get the Executor from the other thread + Own> fulfiller; // accessed only from the subthread + + Thread thread([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = newPromiseAndFulfiller(); + fulfiller = kj::mv(paf.fulfiller); + + *executor.lockExclusive() = getCurrentThreadExecutor(); + + paf.promise.wait(waitScope); + + // Without this poll(), we don't attempt to reply to the other thread? But this isn't required + // in other tests, for some reason? Oh well. + waitScope.poll(); + + executor.lockExclusive().wait([](auto& val) { return val == nullptr; }); + }); + + ([&]() noexcept { + { + KJ_XTHREAD_TEST_SETUP_LOOP; + + const Executor* exec; + { + auto lock = executor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec = &KJ_ASSERT_NONNULL(*lock); + } + + exec->executeAsync([&]() -> kj::Promise { + // Make sure other thread gets time to exit its EventLoop. + delay(); + delay(); + delay(); + fulfiller->fulfill(); + return kj::READY_NOW; + }).detach([&](kj::Exception&& e) { + KJ_LOG(ERROR, e); + }); + + // Give the other thread a chance to wake up and start working on the event. + delay(); + + // Now we'll destroy our EventLoop. That *should* cause detached promises to be destroyed, + // thereby cancelling it, before disabling our own executor. However, at one point in the + // past, our executor was shut down first, followed by destroying detached promises, which + // led to an abort because the other thread had no way to reply back to this thread. + } + + *executor.lockExclusive() = nullptr; + })(); +} + +KJ_TEST("cross-thread event cancel requested while destination thread being destroyed") { + // This exercises the code in Executor::Impl::disconnect() which tears down the list of + // cross-thread events which have already been canceled. At one point this code had a bug which + // would cause it to throw if any events were present in the cancel list. + + MutexGuarded> executor; // to get the Executor from the other thread + Own> fulfiller; // accessed only from the subthread + + Thread thread([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = newPromiseAndFulfiller(); + fulfiller = kj::mv(paf.fulfiller); + + *executor.lockExclusive() = getCurrentThreadExecutor(); + + // Wait for other thread to start a cross-thread task. + paf.promise.wait(waitScope); + + // Let the other thread know, out-of-band, that the task is running, so that it can now request + // cancellation. We do this by setting `executor` to null (but we could also use some separate + // MutexGuarded conditional variable instead). + *executor.lockExclusive() = nullptr; + + // Give other thread a chance to request cancellation of the promise. + delay(); + + // now we exit the event loop + }); + + ([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + const Executor* exec; + { + auto lock = executor.lockExclusive(); + lock.wait([&](kj::Maybe value) { return value != nullptr; }); + exec = &KJ_ASSERT_NONNULL(*lock); + } + + KJ_EXPECT(exec->isLive()); + + auto promise = exec->executeAsync([&]() -> Promise { + fulfiller->fulfill(); + return kj::NEVER_DONE; + }); + + // Wait for the other thread to signal to us that it has indeed started executing our task. + executor.lockExclusive().wait([](auto& val) { return val == nullptr; }); + + // Cancel the promise. + promise = nullptr; + })(); +} + +KJ_TEST("cross-thread fulfiller") { + MutexGuarded>>> fulfillerMutex; + + Thread thread([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = kj::newPromiseAndCrossThreadFulfiller(); + *fulfillerMutex.lockExclusive() = kj::mv(paf.fulfiller); + + int result = paf.promise.wait(waitScope); + KJ_EXPECT(result == 123); + }); + + ([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + Own> fulfiller; + { + auto lock = fulfillerMutex.lockExclusive(); + lock.wait([&](auto& value) { return value != nullptr; }); + fulfiller = kj::mv(KJ_ASSERT_NONNULL(*lock)); + } + + fulfiller->fulfill(123); + })(); +} + +KJ_TEST("cross-thread fulfiller rejects") { + MutexGuarded>>> fulfillerMutex; + + Thread thread([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = kj::newPromiseAndCrossThreadFulfiller(); + *fulfillerMutex.lockExclusive() = kj::mv(paf.fulfiller); + + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("foo exception", paf.promise.wait(waitScope)); + }); + + ([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + Own> fulfiller; + { + auto lock = fulfillerMutex.lockExclusive(); + lock.wait([&](auto& value) { return value != nullptr; }); + fulfiller = kj::mv(KJ_ASSERT_NONNULL(*lock)); + } + + fulfiller->reject(KJ_EXCEPTION(FAILED, "foo exception")); + })(); +} + +KJ_TEST("cross-thread fulfiller destroyed") { + MutexGuarded>>> fulfillerMutex; + + Thread thread([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = kj::newPromiseAndCrossThreadFulfiller(); + *fulfillerMutex.lockExclusive() = kj::mv(paf.fulfiller); + + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "cross-thread PromiseFulfiller was destroyed without fulfilling the promise", + paf.promise.wait(waitScope)); + }); + + ([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + Own> fulfiller; + { + auto lock = fulfillerMutex.lockExclusive(); + lock.wait([&](auto& value) { return value != nullptr; }); + fulfiller = kj::mv(KJ_ASSERT_NONNULL(*lock)); + } + + fulfiller = nullptr; + })(); +} + +KJ_TEST("cross-thread fulfiller canceled") { + MutexGuarded>>> fulfillerMutex; + MutexGuarded done; + + Thread thread([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = kj::newPromiseAndCrossThreadFulfiller(); + { + auto lock = fulfillerMutex.lockExclusive(); + *lock = kj::mv(paf.fulfiller); + lock.wait([](auto& value) { return value == nullptr; }); + } + + // cancel + paf.promise = nullptr; + + { + auto lock = done.lockExclusive(); + lock.wait([](bool value) { return value; }); + } + }); + + ([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + Own> fulfiller; + { + auto lock = fulfillerMutex.lockExclusive(); + lock.wait([&](auto& value) { return value != nullptr; }); + fulfiller = kj::mv(KJ_ASSERT_NONNULL(*lock)); + KJ_ASSERT(fulfiller->isWaiting()); + *lock = nullptr; + } + + // Should eventually show not waiting. + while (fulfiller->isWaiting()) { + delay(); + } + + *done.lockExclusive() = true; + })(); +} + +KJ_TEST("cross-thread fulfiller multiple fulfills") { + MutexGuarded>>> fulfillerMutex; + + Thread thread([&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + auto paf = kj::newPromiseAndCrossThreadFulfiller(); + *fulfillerMutex.lockExclusive() = kj::mv(paf.fulfiller); + + int result = paf.promise.wait(waitScope); + KJ_EXPECT(result == 123); + }); + + auto func = [&]() noexcept { + KJ_XTHREAD_TEST_SETUP_LOOP; + + PromiseFulfiller* fulfiller; + { + auto lock = fulfillerMutex.lockExclusive(); + lock.wait([&](auto& value) { return value != nullptr; }); + fulfiller = KJ_ASSERT_NONNULL(*lock).get(); + } + + fulfiller->fulfill(123); + }; + + kj::Thread thread1(func); + kj::Thread thread2(func); + kj::Thread thread3(func); + kj::Thread thread4(func); +} + +} // namespace +} // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/async.c++ index 67445f01e4a..0d2db1a1f4e 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async.c++ @@ -19,25 +19,86 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +#undef _FORTIFY_SOURCE +// If _FORTIFY_SOURCE is defined, longjmp will complain when it detects the stack +// pointer moving in the "wrong direction", thinking you're jumping to a non-existent +// stack frame. But we use longjmp to jump between different stacks to implement fibers, +// so this check isn't appropriate for us. + +#if _WIN32 || __CYGWIN__ +#include "win32-api-version.h" +#elif __APPLE__ +// getcontext() and friends are marked deprecated on MacOS but seemingly no replacement is +// provided. It appears as if they deprecated it solely because the standards bodies deprecated it, +// which they seemingly did mainly because the proper sematics are too difficult for them to +// define. I doubt MacOS would actually remove these functions as they are widely used. But if they +// do, then I guess we'll need to fall back to using setjmp()/longjmp(), and some sort of hack +// involving sigaltstack() (and generating a fake signal I guess) in order to initialize the fiber +// in the first place. Or we could use assembly, I suppose. Either way, ick. +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#define _XOPEN_SOURCE // Must be defined to see getcontext() on MacOS. +#endif + #include "async.h" #include "debug.h" #include "vector.h" #include "threadlocal.h" +#include "mutex.h" +#include "one-of.h" +#include "function.h" +#include "list.h" +#include + +#if _WIN32 || __CYGWIN__ +#include // for Sleep(0) and fibers +#include "windows-sanity.h" +#else + +#if KJ_USE_FIBERS +#include +#include // for fibers +#endif + +#include // mmap(), for allocating new stacks +#include // sysconf() +#include +#endif -#if KJ_USE_FUTEX -#include -#include -#include +#if !_WIN32 +#include // just for sched_yield() #endif #if !KJ_NO_RTTI #include #if __GNUC__ #include -#include #endif #endif +#include + +#if _MSC_VER && !__clang__ +// MSVC's atomic intrinsics are weird and different, whereas the C++ standard atomics match the GCC +// builtins -- except for requiring the obnoxious std::atomic wrapper. So, on MSVC let's just +// #define the builtins based on the C++ library, reinterpret-casting native types to +// std::atomic... this is cheating but ugh, whatever. +#include +template +static std::atomic* reinterpretAtomic(T* ptr) { return reinterpret_cast*>(ptr); } +#define __atomic_store_n(ptr, val, order) \ + std::atomic_store_explicit(reinterpretAtomic(ptr), val, order) +#define __atomic_load_n(ptr, order) \ + std::atomic_load_explicit(reinterpretAtomic(ptr), order) +#define __atomic_compare_exchange_n(ptr, expected, desired, weak, succ, fail) \ + std::atomic_compare_exchange_strong_explicit( \ + reinterpretAtomic(ptr), expected, desired, succ, fail) +#define __atomic_exchange_n(ptr, val, order) \ + std::atomic_exchange_explicit(reinterpretAtomic(ptr), val, order) +#define __ATOMIC_RELAXED std::memory_order_relaxed +#define __ATOMIC_ACQUIRE std::memory_order_acquire +#define __ATOMIC_RELEASE std::memory_order_release +#endif + namespace kj { namespace { @@ -52,14 +113,29 @@ EventLoop& currentEventLoop() { return *loop; } -class BoolEvent: public _::Event { +class RootEvent: public _::Event { public: + RootEvent(_::PromiseNode* node, void* traceAddr): node(node), traceAddr(traceAddr) {} + bool fired = false; Maybe> fire() override { fired = true; return nullptr; } + + void traceEvent(_::TraceBuilder& builder) override { + node->tracePromise(builder, true); + builder.add(traceAddr); + } + +private: + _::PromiseNode* node; + void* traceAddr; +}; + +struct DummyFunctor { + void operator()() {}; }; class YieldPromiseNode final: public _::PromiseNode { @@ -70,6 +146,22 @@ public: void get(_::ExceptionOrValue& output) noexcept override { output.as<_::Void>() = _::Void(); } + void tracePromise(_::TraceBuilder& builder, bool stopAtNextEvent) override { + builder.add(reinterpret_cast(&kj::evalLater)); + } +}; + +class YieldHarderPromiseNode final: public _::PromiseNode { +public: + void onReady(_::Event* event) noexcept override { + if (event) event->armLast(); + } + void get(_::ExceptionOrValue& output) noexcept override { + output.as<_::Void>() = _::Void(); + } + void tracePromise(_::TraceBuilder& builder, bool stopAtNextEvent) override { + builder.add(reinterpret_cast(&kj::evalLast)); + } }; class NeverDonePromiseNode final: public _::PromiseNode { @@ -80,27 +172,38 @@ public: void get(_::ExceptionOrValue& output) noexcept override { KJ_FAIL_REQUIRE("Not ready."); } + void tracePromise(_::TraceBuilder& builder, bool stopAtNextEvent) override { + builder.add(_::getMethodStartAddress(kj::NEVER_DONE, &_::NeverDone::wait)); + } }; } // namespace // ======================================================================================= +void END_CANCELER_STACK_START_CANCELEE_STACK() {} +// Dummy symbol used when reporting how a Canceler was canceled. We end up combining two stack +// traces into one and we use this as a separator. + Canceler::~Canceler() noexcept(false) { - cancel("operation canceled"); + if (isEmpty()) return; + cancel(getDestructionReason( + reinterpret_cast(&END_CANCELER_STACK_START_CANCELEE_STACK), + Exception::Type::DISCONNECTED, __FILE__, __LINE__, "operation canceled"_kj)); } void Canceler::cancel(StringPtr cancelReason) { if (isEmpty()) return; - cancel(Exception(Exception::Type::FAILED, __FILE__, __LINE__, kj::str(cancelReason))); + // We can't use getDestructionReason() here because if an exception is in-flight, it would use + // that exception, totally discarding the reason given by the caller. This would probably be + // unexpected. The caller can always use getDestructionReason() themselves if desired. + cancel(Exception(Exception::Type::DISCONNECTED, __FILE__, __LINE__, kj::str(cancelReason))); } void Canceler::cancel(const Exception& exception) { for (;;) { KJ_IF_MAYBE(a, list) { - list = a->next; - a->prev = nullptr; - a->next = nullptr; + a->unlink(); a->cancel(kj::cp(exception)); } else { break; @@ -111,9 +214,7 @@ void Canceler::cancel(const Exception& exception) { void Canceler::release() { for (;;) { KJ_IF_MAYBE(a, list) { - list = a->next; - a->prev = nullptr; - a->next = nullptr; + a->unlink(); } else { break; } @@ -130,12 +231,18 @@ Canceler::AdapterBase::AdapterBase(Canceler& canceler) } Canceler::AdapterBase::~AdapterBase() noexcept(false) { + unlink(); +} + +void Canceler::AdapterBase::unlink() { KJ_IF_MAYBE(p, prev) { *p = next; } KJ_IF_MAYBE(n, next) { n->prev = prev; } + next = nullptr; + prev = nullptr; } Canceler::AdapterImpl::AdapterImpl(kj::PromiseFulfiller& fulfiller, @@ -156,9 +263,6 @@ void Canceler::AdapterImpl::cancel(kj::Exception&& e) { TaskSet::TaskSet(TaskSet::ErrorHandler& errorHandler) : errorHandler(errorHandler) {} - -TaskSet::~TaskSet() noexcept(false) {} - class TaskSet::Task final: public _::Event { public: Task(TaskSet& taskSet, Own<_::PromiseNode>&& nodeParam) @@ -167,9 +271,26 @@ public: node->onReady(this); } + Own pop() { + KJ_IF_MAYBE(n, next) { n->get()->prev = prev; } + Own self = kj::mv(KJ_ASSERT_NONNULL(*prev)); + KJ_ASSERT(self.get() == this); + *prev = kj::mv(next); + next = nullptr; + prev = nullptr; + return self; + } + Maybe> next; Maybe>* prev = nullptr; + kj::String trace() { + void* space[32]; + _::TraceBuilder builder(space); + node->tracePromise(builder, false); + return kj::str("task: ", builder); + } + protected: Maybe> fire() override { // Get the result. @@ -189,14 +310,7 @@ protected: } // Remove from the task list. - KJ_IF_MAYBE(n, next) { - n->get()->prev = prev; - } - Own self = kj::mv(KJ_ASSERT_NONNULL(*prev)); - KJ_ASSERT(self.get() == this); - *prev = kj::mv(next); - next = nullptr; - prev = nullptr; + auto self = pop(); KJ_IF_MAYBE(f, taskSet.emptyFulfiller) { if (taskSet.tasks == nullptr) { @@ -208,84 +322,1313 @@ protected: return mv(self); } - _::PromiseNode* getInnerForTrace() override { - return node; - } + void traceEvent(_::TraceBuilder& builder) override { + // Pointing out the ErrorHandler's taskFailed() implementation will usually identify the + // particular TaskSet that contains this event. + builder.add(_::getMethodStartAddress(taskSet.errorHandler, &ErrorHandler::taskFailed)); + } + +private: + TaskSet& taskSet; + Own<_::PromiseNode> node; +}; + +TaskSet::~TaskSet() noexcept(false) { + // You could argue it is dubious, but some applications would like for the destructor of a + // task to be able to schedule new tasks. So when we cancel our tasks... we might find new + // tasks added! We'll have to repeatedly cancel. Additionally, we need to make sure that we destroy + // the items in a loop to prevent any issues with stack overflow. + while (tasks != nullptr) { + auto removed = KJ_REQUIRE_NONNULL(tasks)->pop(); + } +} + +void TaskSet::add(Promise&& promise) { + auto task = heap(*this, _::PromiseNode::from(kj::mv(promise))); + KJ_IF_MAYBE(head, tasks) { + head->get()->prev = &task->next; + task->next = kj::mv(tasks); + } + task->prev = &tasks; + tasks = kj::mv(task); +} + +kj::String TaskSet::trace() { + kj::Vector traces; + + Maybe>* ptr = &tasks; + for (;;) { + KJ_IF_MAYBE(task, *ptr) { + traces.add(task->get()->trace()); + ptr = &task->get()->next; + } else { + break; + } + } + + return kj::strArray(traces, "\n"); +} + +Promise TaskSet::onEmpty() { + KJ_IF_MAYBE(fulfiller, emptyFulfiller) { + if (fulfiller->get()->isWaiting()) { + KJ_FAIL_REQUIRE("onEmpty() can only be called once at a time"); + } + } + + if (tasks == nullptr) { + return READY_NOW; + } else { + auto paf = newPromiseAndFulfiller(); + emptyFulfiller = kj::mv(paf.fulfiller); + return kj::mv(paf.promise); + } +} + +// ======================================================================================= + +namespace { + +#if _WIN32 || __CYGWIN__ +thread_local void* threadMainFiber = nullptr; + +void* getMainWin32Fiber() { + return threadMainFiber; +} +#endif + +inline void ensureThreadCanRunFibers() { +#if _WIN32 || __CYGWIN__ + // Make sure the current thread has been converted to a fiber. + void* fiber = threadMainFiber; + if (fiber == nullptr) { + // Thread not initialized. Convert it to a fiber now. + // Note: Unfortunately, if the application has already converted the thread to a fiber, I + // guess this will fail. But trying to call GetCurrentFiber() when the thread isn't a fiber + // doesn't work (it returns null on WINE but not on real windows, ugh). So I guess we're + // just incompatible with the application doing anything with fibers, which is sad. + threadMainFiber = fiber = ConvertThreadToFiber(nullptr); + } +#endif +} + +} // namespace + +namespace _ { + +class FiberStack final { + // A class containing a fiber stack impl. This is separate from fiber + // promises since it lets us move the stack itself around and reuse it. + +public: + FiberStack(size_t stackSize); + ~FiberStack() noexcept(false); + + struct SynchronousFunc { + kj::FunctionParam& func; + kj::Maybe exception; + }; + + void initialize(FiberBase& fiber); + void initialize(SynchronousFunc& syncFunc); + + void reset() { + main = {}; + } + + void switchToFiber(); + void switchToMain(); + + void trace(TraceBuilder& builder) { + // TODO(someday): Trace through fiber stack? Can it be done??? + builder.add(getMethodStartAddress(*this, &FiberStack::trace)); + } + +private: + size_t stackSize; + OneOf main; + + friend class FiberBase; + friend class FiberPool::Impl; + + struct StartRoutine; + +#if KJ_USE_FIBERS +#if _WIN32 || __CYGWIN__ + void* osFiber; +#else + struct Impl; + Impl* impl; +#endif +#endif + + [[noreturn]] void run(); + + bool isReset() { return main == nullptr; } +}; + +} // namespace _ + +#if __linux__ +// TODO(someday): Support core-local freelists on OSs other than Linux. The only tricky part is +// finding what to use instead of sched_getcpu() to get the current CPU ID. +#define USE_CORE_LOCAL_FREELISTS 1 +#endif + +#if USE_CORE_LOCAL_FREELISTS +static const size_t CACHE_LINE_SIZE = 64; +// Most modern architectures have 64-byte cache lines. +#endif + +class FiberPool::Impl final: private Disposer { +public: + Impl(size_t stackSize): stackSize(stackSize) {} + ~Impl() noexcept(false) { +#if USE_CORE_LOCAL_FREELISTS + if (coreLocalFreelists != nullptr) { + KJ_DEFER(free(coreLocalFreelists)); + + for (uint i: kj::zeroTo(nproc)) { + for (auto stack: coreLocalFreelists[i].stacks) { + if (stack != nullptr) { + delete stack; + } + } + } + } +#endif + + // Make sure we're not leaking anything from the global freelist either. + auto lock = freelist.lockExclusive(); + auto dangling = kj::mv(*lock); + for (auto& stack: dangling) { + delete stack; + } + } + + void setMaxFreelist(size_t count) { + maxFreelist = count; + } + + size_t getFreelistSize() const { + return freelist.lockShared()->size(); + } + + void useCoreLocalFreelists() { +#if USE_CORE_LOCAL_FREELISTS + if (coreLocalFreelists != nullptr) { + // Ignore repeat call. + return; + } + + int nproc_; + KJ_SYSCALL(nproc_ = sysconf(_SC_NPROCESSORS_CONF)); + nproc = nproc_; + + void* allocPtr; + size_t totalSize = nproc * sizeof(CoreLocalFreelist); + int error = posix_memalign(&allocPtr, CACHE_LINE_SIZE, totalSize); + if (error != 0) { + KJ_FAIL_SYSCALL("posix_memalign", error); + } + memset(allocPtr, 0, totalSize); + coreLocalFreelists = reinterpret_cast(allocPtr); +#endif + } + + Own<_::FiberStack> takeStack() const { + // Get a stack from the pool. The disposer on the returned Own pointer will return the stack + // to the pool, provided that reset() has been called to indicate that the stack is not in + // a weird state. + +#if USE_CORE_LOCAL_FREELISTS + KJ_IF_MAYBE(core, lookupCoreLocalFreelist()) { + for (auto& stackPtr: core->stacks) { + _::FiberStack* result = __atomic_exchange_n(&stackPtr, nullptr, __ATOMIC_ACQUIRE); + if (result != nullptr) { + // Found a stack in this slot! + return { result, *this }; + } + } + // No stacks found, fall back to global freelist. + } +#endif + + { + auto lock = freelist.lockExclusive(); + if (!lock->empty()) { + _::FiberStack* result = lock->back(); + lock->pop_back(); + return { result, *this }; + } + } + + _::FiberStack* result = new _::FiberStack(stackSize); + return { result, *this }; + } + +private: + size_t stackSize; + size_t maxFreelist = kj::maxValue; + MutexGuarded> freelist; + +#if USE_CORE_LOCAL_FREELISTS + struct CoreLocalFreelist { + union { + _::FiberStack* stacks[2]; + // For now, we don't try to freelist more than 2 stacks per core. If you have three or more + // threads interleaved on a core, chances are you have bigger problems... + + byte padToCacheLine[CACHE_LINE_SIZE]; + // We don't want two core-local freelists to live in the same cache line, otherwise the + // cores will fight over ownership of that line. + }; + }; + + uint nproc; + CoreLocalFreelist* coreLocalFreelists = nullptr; + + kj::Maybe lookupCoreLocalFreelist() const { + if (coreLocalFreelists == nullptr) { + return nullptr; + } else { + int cpu = sched_getcpu(); + if (cpu >= 0) { + // TODO(perf): Perhaps two hyperthreads on the same physical core should share a freelist? + // But I don't know how to find out if the system uses hyperthreading. + return coreLocalFreelists[cpu]; + } else { + static bool logged = false; + if (!logged) { + KJ_LOG(ERROR, "invalid cpu number from sched_getcpu()?", cpu, nproc); + logged = true; + } + return nullptr; + } + } + } +#endif + + void disposeImpl(void* pointer) const { + _::FiberStack* stack = reinterpret_cast<_::FiberStack*>(pointer); + KJ_DEFER(delete stack); + + // Verify that the stack was reset before returning, otherwise it might be in a weird state + // where we don't want to reuse it. + if (stack->isReset()) { +#if USE_CORE_LOCAL_FREELISTS + KJ_IF_MAYBE(core, lookupCoreLocalFreelist()) { + for (auto& stackPtr: core->stacks) { + stack = __atomic_exchange_n(&stackPtr, stack, __ATOMIC_RELEASE); + if (stack == nullptr) { + // Cool, we inserted the stack into an unused slot. We're done. + return; + } + } + // All slots were occupied, so we inserted the new stack in the front, pushed the rest back, + // and now `stack` refers to the stack that fell off the end of the core-local list. That + // needs to go into the global freelist. + } +#endif + + auto lock = freelist.lockExclusive(); + lock->push_back(stack); + if (lock->size() > maxFreelist) { + stack = lock->front(); + lock->pop_front(); + } else { + stack = nullptr; + } + } + } +}; + +FiberPool::FiberPool(size_t stackSize) : impl(kj::heap(stackSize)) {} +FiberPool::~FiberPool() noexcept(false) {} + +void FiberPool::setMaxFreelist(size_t count) { + impl->setMaxFreelist(count); +} + +size_t FiberPool::getFreelistSize() const { + return impl->getFreelistSize(); +} + +void FiberPool::useCoreLocalFreelists() { + impl->useCoreLocalFreelists(); +} + +void FiberPool::runSynchronously(kj::FunctionParam func) const { + ensureThreadCanRunFibers(); + + _::FiberStack::SynchronousFunc syncFunc { func, nullptr }; + + { + auto stack = impl->takeStack(); + stack->initialize(syncFunc); + stack->switchToFiber(); + stack->reset(); // safe to reuse + } + + KJ_IF_MAYBE(e, syncFunc.exception) { + kj::throwRecoverableException(kj::mv(*e)); + } +} + +namespace _ { // private + +class LoggingErrorHandler: public TaskSet::ErrorHandler { +public: + static LoggingErrorHandler instance; + + void taskFailed(kj::Exception&& exception) override { + KJ_LOG(ERROR, "Uncaught exception in daemonized task.", exception); + } +}; + +LoggingErrorHandler LoggingErrorHandler::instance = LoggingErrorHandler(); + +} // namespace _ (private) + +// ======================================================================================= + +struct Executor::Impl { + Impl(EventLoop& loop): state(loop) {} + + struct State { + // Queues of notifications from other threads that need this thread's attention. + + State(EventLoop& loop): loop(loop) {} + + kj::Maybe loop; + // Becomes null when the loop is destroyed. + + List<_::XThreadEvent, &_::XThreadEvent::targetLink> start; + List<_::XThreadEvent, &_::XThreadEvent::targetLink> cancel; + List<_::XThreadEvent, &_::XThreadEvent::replyLink> replies; + // Lists of events that need actioning by this thread. + + List<_::XThreadEvent, &_::XThreadEvent::targetLink> executing; + // Events that have already been dispatched and are happily executing. This list is maintained + // so that they can be canceled if the event loop exits. + + List<_::XThreadPaf, &_::XThreadPaf::link> fulfilled; + // Set of XThreadPafs that have been fulfilled by another thread. + + bool waitingForCancel = false; + // True if this thread is currently blocked waiting for some other thread to pump its + // cancellation queue. If that other thread tries to block on *this* thread, then it could + // deadlock -- it must take precautions against this. + + bool isDispatchNeeded() const { + return !start.empty() || !cancel.empty() || !replies.empty() || !fulfilled.empty(); + } + + void dispatchAll(Vector<_::XThreadEvent*>& eventsToCancelOutsideLock) { + for (auto& event: start) { + start.remove(event); + executing.add(event); + event.state = _::XThreadEvent::EXECUTING; + event.armBreadthFirst(); + } + + dispatchCancels(eventsToCancelOutsideLock); + + for (auto& event: replies) { + replies.remove(event); + event.onReadyEvent.armBreadthFirst(); + } + + for (auto& event: fulfilled) { + fulfilled.remove(event); + event.state = _::XThreadPaf::DISPATCHED; + event.onReadyEvent.armBreadthFirst(); + } + } + + void dispatchCancels(Vector<_::XThreadEvent*>& eventsToCancelOutsideLock) { + for (auto& event: cancel) { + cancel.remove(event); + + if (event.promiseNode == nullptr) { + event.setDoneState(); + } else { + // We can't destroy the promiseNode while the mutex is locked, because we don't know + // what the destructor might do. But, we *must* destroy it before acknowledging + // cancellation. So we have to add it to a list to destroy later. + eventsToCancelOutsideLock.add(&event); + } + } + } + }; + + kj::MutexGuarded state; + // After modifying state from another thread, the loop's port.wake() must be called. + + void processAsyncCancellations(Vector<_::XThreadEvent*>& eventsToCancelOutsideLock) { + // After calling dispatchAll() or dispatchCancels() with the lock held, it may be that some + // cancellations require dropping the lock before destroying the promiseNode. In that case + // those cancellations will be added to the eventsToCancelOutsideLock Vector passed to the + // method. That vector must then be passed to processAsyncCancellations() as soon as the lock + // is released. + + for (auto& event: eventsToCancelOutsideLock) { + event->promiseNode = nullptr; + event->disarm(); + } + + // Now we need to mark all the events "done" under lock. + auto lock = state.lockExclusive(); + for (auto& event: eventsToCancelOutsideLock) { + event->setDoneState(); + } + } + + void disconnect() { + state.lockExclusive()->loop = nullptr; + + // Now that `loop` is set null in `state`, other threads will no longer try to manipulate our + // lists, so we can access them without a lock. That's convenient because a bunch of the things + // we want to do with them would require dropping the lock to avoid deadlocks. We'd end up + // copying all the lists over into separate vectors first, dropping the lock, operating on + // them, and then locking again. + auto& s = state.getWithoutLock(); + + // We do, however, take and release the lock on the way out, to make sure anyone performing + // a conditional wait for state changes gets a chance to have their wait condition re-checked. + KJ_DEFER(state.lockExclusive()); + + for (auto& event: s.start) { + KJ_ASSERT(event.state == _::XThreadEvent::QUEUED, event.state) { break; } + s.start.remove(event); + event.setDisconnected(); + event.sendReply(); + event.setDoneState(); + } + + for (auto& event: s.executing) { + KJ_ASSERT(event.state == _::XThreadEvent::EXECUTING, event.state) { break; } + s.executing.remove(event); + event.promiseNode = nullptr; + event.setDisconnected(); + event.sendReply(); + event.setDoneState(); + } + + for (auto& event: s.cancel) { + KJ_ASSERT(event.state == _::XThreadEvent::CANCELING, event.state) { break; } + s.cancel.remove(event); + event.promiseNode = nullptr; + event.setDoneState(); + } + + // The replies list "should" be empty, because any locally-initiated tasks should have been + // canceled before destroying the EventLoop. + if (!s.replies.empty()) { + KJ_LOG(ERROR, "EventLoop destroyed with cross-thread event replies outstanding"); + for (auto& event: s.replies) { + s.replies.remove(event); + } + } + + // Similarly for cross-thread fulfillers. The waiting tasks should have been canceled. + if (!s.fulfilled.empty()) { + KJ_LOG(ERROR, "EventLoop destroyed with cross-thread fulfiller replies outstanding"); + for (auto& event: s.fulfilled) { + s.fulfilled.remove(event); + event.state = _::XThreadPaf::DISPATCHED; + } + } + }}; + +namespace _ { // (private) + +XThreadEvent::XThreadEvent( + ExceptionOrValue& result, const Executor& targetExecutor, void* funcTracePtr) + : Event(targetExecutor.getLoop()), result(result), funcTracePtr(funcTracePtr), + targetExecutor(targetExecutor.addRef()) {} + +void XThreadEvent::tracePromise(TraceBuilder& builder, bool stopAtNextEvent) { + // We can't safely trace into another thread, so we'll stop here. + builder.add(funcTracePtr); +} + +void XThreadEvent::ensureDoneOrCanceled() { + if (__atomic_load_n(&state, __ATOMIC_ACQUIRE) != DONE) { + auto lock = targetExecutor->impl->state.lockExclusive(); + + const EventLoop* loop; + KJ_IF_MAYBE(l, lock->loop) { + loop = l; + } else { + // Target event loop is already dead, so we know it's already working on transitioning all + // events to the DONE state. We can just wait. + lock.wait([&](auto&) { return state == DONE; }); + return; + } + + switch (state) { + case UNUSED: + // Nothing to do. + break; + case QUEUED: + lock->start.remove(*this); + // No wake needed since we removed work rather than adding it. + state = DONE; + break; + case EXECUTING: { + lock->executing.remove(*this); + lock->cancel.add(*this); + state = CANCELING; + KJ_IF_MAYBE(p, loop->port) { + p->wake(); + } + + Maybe maybeSelfExecutor = nullptr; + if (threadLocalEventLoop != nullptr) { + KJ_IF_MAYBE(e, threadLocalEventLoop->executor) { + maybeSelfExecutor = **e; + } + } + + KJ_IF_MAYBE(selfExecutor, maybeSelfExecutor) { + // If, while waiting for other threads to process our cancellation request, we have + // cancellation requests queued back to this thread, we must process them. Otherwise, + // we could deadlock with two threads waiting on each other to process cancellations. + // + // We don't have a terribly good way to detect this, except to check if the remote + // thread is itself waiting for cancellations and, if so, wake ourselves up to check for + // cancellations to process. This will busy-loop but at least it should eventually + // resolve assuming fair scheduling. + // + // To make things extra-annoying, in order to update our waitingForCancel flag, we have + // to lock our own executor state, but we can't take both locks at once, so we have to + // release the other lock in the meantime. + + // Make sure we unset waitingForCancel on the way out. + KJ_DEFER({ + lock = {}; + + Vector<_::XThreadEvent*> eventsToCancelOutsideLock; + KJ_DEFER(selfExecutor->impl->processAsyncCancellations(eventsToCancelOutsideLock)); + + auto selfLock = selfExecutor->impl->state.lockExclusive(); + selfLock->waitingForCancel = false; + selfLock->dispatchCancels(eventsToCancelOutsideLock); + + // We don't need to re-take the lock on the other executor here; it's not used again + // after this scope. + }); + + while (state != DONE) { + bool otherThreadIsWaiting = lock->waitingForCancel; + + // Make sure our waitingForCancel is on and dispatch any pending cancellations on this + // thread. + lock = {}; + { + Vector<_::XThreadEvent*> eventsToCancelOutsideLock; + KJ_DEFER(selfExecutor->impl->processAsyncCancellations(eventsToCancelOutsideLock)); + + auto selfLock = selfExecutor->impl->state.lockExclusive(); + selfLock->waitingForCancel = true; + + // Note that we don't have to proactively delete the PromiseNodes extracted from + // the canceled events because those nodes belong to this thread and can't possibly + // continue executing while we're blocked here. + selfLock->dispatchCancels(eventsToCancelOutsideLock); + } + + if (otherThreadIsWaiting) { + // We know the other thread was waiting for cancellations to complete a moment ago. + // We may have just processed the necessary cancellations in this thread, in which + // case the other thread needs a chance to receive control and notice this. Or, it + // may be that the other thread is waiting for some third thread to take action. + // Either way, we should yield control here to give things a chance to settle. + // Otherwise we could end up in a tight busy loop. +#if _WIN32 + Sleep(0); +#else + sched_yield(); +#endif + } + + // OK now we can take the original lock again. + lock = targetExecutor->impl->state.lockExclusive(); + + // OK, now we can wait for the other thread to either process our cancellation or + // indicate that it is waiting for remote cancellation. + lock.wait([&](const Executor::Impl::State& executorState) { + return state == DONE || executorState.waitingForCancel; + }); + } + } else { + // We have no executor of our own so we don't have to worry about cancellation cycles + // causing deadlock. + // + // NOTE: I don't think we can actually get here, because it implies that this is a + // synchronous execution, which means there's no way to cancel it. + lock.wait([&](auto&) { return state == DONE; }); + } + KJ_DASSERT(!targetLink.isLinked()); + break; + } + case CANCELING: + KJ_FAIL_ASSERT("impossible state: CANCELING should only be set within the above case"); + case DONE: + // Became done while we waited for lock. Nothing to do. + break; + } + } + + KJ_IF_MAYBE(e, replyExecutor) { + // Since we know we reached the DONE state (or never left UNUSED), we know that the remote + // thread is all done playing with our `replyPrev` pointer. Only the current thread could + // possibly modify it after this point. So we can skip the lock if it's already null. + if (replyLink.isLinked()) { + auto lock = e->impl->state.lockExclusive(); + lock->replies.remove(*this); + } + } +} + +void XThreadEvent::sendReply() { + KJ_IF_MAYBE(e, replyExecutor) { + // Queue the reply. + const EventLoop* replyLoop; + { + auto lock = e->impl->state.lockExclusive(); + KJ_IF_MAYBE(l, lock->loop) { + lock->replies.add(*this); + replyLoop = l; + } else { + // Calling thread exited without cancelling the promise. This is UB. In fact, + // `replyExecutor` is probably already destroyed and we are in use-after-free territory + // already. Better abort. + KJ_LOG(FATAL, + "the thread which called kj::Executor::executeAsync() apparently exited its own " + "event loop without canceling the cross-thread promise first; this is undefined " + "behavior so I will crash now"); + abort(); + } + } + + // Note that it's safe to assume `replyLoop` still exists even though we dropped the lock + // because that thread would have had to cancel any promises before destroying its own + // EventLoop, and when it tries to destroy this promise, it will wait for `state` to become + // `DONE`, which we don't set until later on. That's nice because wake() probably makes a + // syscall and we'd rather not hold the lock through syscalls. + KJ_IF_MAYBE(p, replyLoop->port) { + p->wake(); + } + } +} + +void XThreadEvent::done() { + KJ_ASSERT(targetExecutor.get() == ¤tEventLoop().getExecutor(), + "calling done() from wrong thread?"); + + sendReply(); + + { + auto lock = targetExecutor->impl->state.lockExclusive(); + + switch (state) { + case EXECUTING: + lock->executing.remove(*this); + break; + case CANCELING: + // Sending thread requested cancelation, but we're done anyway, so it doesn't matter at this + // point. + lock->cancel.remove(*this); + break; + default: + KJ_FAIL_ASSERT("can't call done() from this state", (uint)state); + } + + setDoneState(); + } +} + +inline void XThreadEvent::setDoneState() { + __atomic_store_n(&state, DONE, __ATOMIC_RELEASE); +} + +void XThreadEvent::setDisconnected() { + result.addException(KJ_EXCEPTION(DISCONNECTED, + "Executor's event loop exited before cross-thread event could complete")); +} + +class XThreadEvent::DelayedDoneHack: public Disposer { + // Crazy hack: In fire(), we want to call done() if the event is finished. But done() signals + // the requesting thread to wake up and possibly delete the XThreadEvent. But the caller (the + // EventLoop) still has to set `event->firing = false` after `fire()` returns, so this would be + // a race condition use-after-free. + // + // It just so happens, though, that fire() is allowed to return an optional `Own` to drop, + // and the caller drops that pointer immediately after setting event->firing = false. So we + // return a pointer whose disposer calls done(). + // + // It's not quite as much of a hack as it seems: The whole reason fire() returns an Own is + // so that the event can delete itself, but do so after the caller sets event->firing = false. + // It just happens to be that in this case, the event isn't deleting itself, but rather releasing + // itself back to the other thread. + +protected: + void disposeImpl(void* pointer) const override { + reinterpret_cast(pointer)->done(); + } +}; + +Maybe> XThreadEvent::fire() { + static constexpr DelayedDoneHack DISPOSER {}; + + KJ_IF_MAYBE(n, promiseNode) { + n->get()->get(result); + promiseNode = nullptr; // make sure to destroy in the thread that created it + return Own(this, DISPOSER); + } else { + KJ_IF_MAYBE(exception, kj::runCatchingExceptions([&]() { + promiseNode = execute(); + })) { + result.addException(kj::mv(*exception)); + }; + KJ_IF_MAYBE(n, promiseNode) { + n->get()->onReady(this); + } else { + return Own(this, DISPOSER); + } + } + + return nullptr; +} + +void XThreadEvent::traceEvent(TraceBuilder& builder) { + KJ_IF_MAYBE(n, promiseNode) { + n->get()->tracePromise(builder, true); + } + + // We can't safely trace into another thread, so we'll stop here. + builder.add(funcTracePtr); +} + +void XThreadEvent::onReady(Event* event) noexcept { + onReadyEvent.init(event); +} + +XThreadPaf::XThreadPaf() + : state(WAITING), executor(getCurrentThreadExecutor()) {} +XThreadPaf::~XThreadPaf() noexcept(false) {} + +void XThreadPaf::Disposer::disposeImpl(void* pointer) const { + XThreadPaf* obj = reinterpret_cast(pointer); + auto oldState = WAITING; + + if (__atomic_load_n(&obj->state, __ATOMIC_ACQUIRE) == DISPATCHED) { + // Common case: Promise was fully fulfilled and dispatched, no need for locking. + delete obj; + } else if (__atomic_compare_exchange_n(&obj->state, &oldState, CANCELED, false, + __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) { + // State transitioned from WAITING to CANCELED, so now it's the fulfiller's job to destroy the + // object. + } else { + // Whoops, another thread is already in the process of fulfilling this promise. We'll have to + // wait for it to finish and transition the state to FULFILLED. + obj->executor.impl->state.when([&](auto&) { + return obj->state == FULFILLED || obj->state == DISPATCHED; + }, [&](Executor::Impl::State& exState) { + if (obj->state == FULFILLED) { + // The object is on the queue but was not yet dispatched. Remove it. + exState.fulfilled.remove(*obj); + } + }); + + // It's ours now, delete it. + delete obj; + } +} + +const XThreadPaf::Disposer XThreadPaf::DISPOSER; + +void XThreadPaf::onReady(Event* event) noexcept { + onReadyEvent.init(event); +} + +void XThreadPaf::tracePromise(TraceBuilder& builder, bool stopAtNextEvent) { + // We can't safely trace into another thread, so we'll stop here. + // Maybe returning the address of get() will give us a function name with meaningful type + // information. + builder.add(getMethodStartAddress(implicitCast(*this), &PromiseNode::get)); +} + +XThreadPaf::FulfillScope::FulfillScope(XThreadPaf** pointer) { + obj = __atomic_exchange_n(pointer, static_cast(nullptr), __ATOMIC_ACQUIRE); + auto oldState = WAITING; + if (obj == nullptr) { + // Already fulfilled (possibly by another thread). + } else if (__atomic_compare_exchange_n(&obj->state, &oldState, FULFILLING, false, + __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) { + // Transitioned to FULFILLING, good. + } else { + // The waiting thread must have canceled. + KJ_ASSERT(oldState == CANCELED); + + // It's our responsibility to clean up, then. + delete obj; + + // Set `obj` null so that we don't try to fill it in or delete it later. + obj = nullptr; + } +} +XThreadPaf::FulfillScope::~FulfillScope() noexcept(false) { + if (obj != nullptr) { + auto lock = obj->executor.impl->state.lockExclusive(); + KJ_IF_MAYBE(l, lock->loop) { + lock->fulfilled.add(*obj); + __atomic_store_n(&obj->state, FULFILLED, __ATOMIC_RELEASE); + KJ_IF_MAYBE(p, l->port) { + // TODO(perf): It's annoying we have to call wake() with the lock held, but we have to + // prevent the destination EventLoop from being destroyed first. + p->wake(); + } + } else { + KJ_LOG(FATAL, + "the thread which called kj::newPromiseAndCrossThreadFulfiller() apparently exited " + "its own event loop without canceling the cross-thread promise first; this is " + "undefined behavior so I will crash now"); + abort(); + } + } +} + +kj::Exception XThreadPaf::unfulfilledException() { + // TODO(cleanup): Share code with regular PromiseAndFulfiller for stack tracing here. + return kj::Exception(kj::Exception::Type::FAILED, __FILE__, __LINE__, kj::heapString( + "cross-thread PromiseFulfiller was destroyed without fulfilling the promise.")); +} + +class ExecutorImpl: public Executor, public AtomicRefcounted { +public: + using Executor::Executor; + + kj::Own addRef() const override { + return kj::atomicAddRef(*this); + } +}; + +} // namespace _ + +Executor::Executor(EventLoop& loop, Badge): impl(kj::heap(loop)) {} +Executor::~Executor() noexcept(false) {} + +bool Executor::isLive() const { + return impl->state.lockShared()->loop != nullptr; +} + +void Executor::send(_::XThreadEvent& event, bool sync) const { + KJ_ASSERT(event.state == _::XThreadEvent::UNUSED); + + if (sync) { + EventLoop* thisThread = threadLocalEventLoop; + if (thisThread != nullptr && + thisThread->executor.map([this](auto& e) { return e == this; }).orDefault(false)) { + // Invoking a sync request on our own thread. Just execute it directly; if we try to queue + // it to the loop, we'll deadlock. + auto promiseNode = event.execute(); + + // If the function returns a promise, we have no way to pump the event loop to wait for it, + // because the event loop may already be pumping somewhere up the stack. + KJ_ASSERT(promiseNode == nullptr, + "can't call executeSync() on own thread's executor with a promise-returning function"); + + return; + } + } else { + event.replyExecutor = getCurrentThreadExecutor(); + + // Note that async requests will "just work" even if the target executor is our own thread's + // executor. In theory we could detect this case to avoid some locking and signals but that + // would be extra code complexity for probably little benefit. + } + + auto lock = impl->state.lockExclusive(); + const EventLoop* loop; + KJ_IF_MAYBE(l, lock->loop) { + loop = l; + } else { + event.setDisconnected(); + return; + } + + event.state = _::XThreadEvent::QUEUED; + lock->start.add(event); + + KJ_IF_MAYBE(p, loop->port) { + p->wake(); + } else { + // Event loop will be waiting on executor.wait(), which will be woken when we unlock the mutex. + } + + if (sync) { + lock.wait([&](auto&) { return event.state == _::XThreadEvent::DONE; }); + } +} + +void Executor::wait() { + Vector<_::XThreadEvent*> eventsToCancelOutsideLock; + KJ_DEFER(impl->processAsyncCancellations(eventsToCancelOutsideLock)); + + auto lock = impl->state.lockExclusive(); + + lock.wait([](const Impl::State& state) { + return state.isDispatchNeeded(); + }); + + lock->dispatchAll(eventsToCancelOutsideLock); +} + +bool Executor::poll() { + Vector<_::XThreadEvent*> eventsToCancelOutsideLock; + KJ_DEFER(impl->processAsyncCancellations(eventsToCancelOutsideLock)); + + auto lock = impl->state.lockExclusive(); + if (lock->isDispatchNeeded()) { + lock->dispatchAll(eventsToCancelOutsideLock); + return true; + } else { + return false; + } +} + +EventLoop& Executor::getLoop() const { + KJ_IF_MAYBE(l, impl->state.lockShared()->loop) { + return *l; + } else { + kj::throwFatalException(KJ_EXCEPTION(DISCONNECTED, "Executor's event loop has exited")); + } +} + +const Executor& getCurrentThreadExecutor() { + return currentEventLoop().getExecutor(); +} + +// ======================================================================================= +// Fiber implementation. + +namespace _ { // private + +#if KJ_USE_FIBERS +#if !(_WIN32 || __CYGWIN__) +struct FiberStack::Impl { + // This struct serves two purposes: + // - It contains OS-specific state that we don't want to declare in the header. + // - It is allocated at the top of the fiber's stack area, so the Impl pointer also serves to + // track where the stack was allocated. + + jmp_buf fiberJmpBuf; + jmp_buf originalJmpBuf; + + static Impl* alloc(size_t stackSize, ucontext_t* context) { +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif +#ifndef MAP_STACK +#define MAP_STACK 0 +#endif + + size_t pageSize = getPageSize(); + size_t allocSize = stackSize + pageSize; // size plus guard page + + // Allocate virtual address space for the stack but make it inaccessible initially. + // TODO(someday): Does it make sense to use MAP_GROWSDOWN on Linux? It's a kind of bizarre flag + // that causes the mapping to automatically allocate extra pages (beyond the range specified) + // until it hits something... + void* stack = mmap(nullptr, allocSize, PROT_NONE, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + KJ_FAIL_SYSCALL("mmap(new stack)", errno); + } + KJ_ON_SCOPE_FAILURE({ + KJ_SYSCALL(munmap(stack, allocSize)) { break; } + }); + + // Now mark everything except the guard page as read-write. We assume the stack grows down, so + // the guard page is at the beginning. No modern architecture uses stacks that grow up. + KJ_SYSCALL(mprotect(reinterpret_cast(stack) + pageSize, + stackSize, PROT_READ | PROT_WRITE)); + + // Stick `Impl` at the top of the stack. + Impl* impl = (reinterpret_cast(reinterpret_cast(stack) + allocSize) - 1); + + // Note: mmap() allocates zero'd pages so we don't have to memset() anything here. + + KJ_SYSCALL(getcontext(context)); + context->uc_stack.ss_size = allocSize - sizeof(Impl); + context->uc_stack.ss_sp = reinterpret_cast(stack); + context->uc_stack.ss_flags = 0; + // We don't use uc_link since our fiber start routine runs forever in a loop to allow for + // reuse. When we're done with the fiber, we just destroy it, without switching to it's + // stack. This is safe since the start routine doesn't allocate any memory or RAII objects + // before looping. + context->uc_link = 0; + + return impl; + } + + static void free(Impl* impl, size_t stackSize) { + size_t allocSize = stackSize + getPageSize(); + void* stack = reinterpret_cast(impl + 1) - allocSize; + KJ_SYSCALL(munmap(stack, allocSize)) { break; } + } + + static size_t getPageSize() { +#ifndef _SC_PAGESIZE +#define _SC_PAGESIZE _SC_PAGE_SIZE +#endif + static size_t result = sysconf(_SC_PAGE_SIZE); + return result; + } +}; +#endif +#endif + +struct FiberStack::StartRoutine { +#if _WIN32 || __CYGWIN__ + static void WINAPI run(LPVOID ptr) { + // This is the static C-style function we pass to CreateFiber(). + reinterpret_cast(ptr)->run(); + } +#else + [[noreturn]] static void run(int arg1, int arg2) { + // This is the static C-style function we pass to makeContext(). + + // POSIX says the arguments are ints, not pointers. So we split our pointer in half in order to + // work correctly on 64-bit machines. Gross. + uintptr_t ptr = static_cast(arg1); + ptr |= static_cast(static_cast(arg2)) << (sizeof(ptr) * 4); + + auto& stack = *reinterpret_cast(ptr); + + // We first switch to the fiber inside of the FiberStack constructor. This is just for + // initialization purposes, and we're expected to switch back immediately. + stack.switchToMain(); + + // OK now have a real job. + stack.run(); + } +#endif +}; + +void FiberStack::run() { + // Loop forever so that the fiber can be reused. + for (;;) { + KJ_SWITCH_ONEOF(main) { + KJ_CASE_ONEOF(event, FiberBase*) { + event->run(); + } + KJ_CASE_ONEOF(func, SynchronousFunc*) { + KJ_IF_MAYBE(exception, kj::runCatchingExceptions(func->func)) { + func->exception.emplace(kj::mv(*exception)); + } + } + } + + // Wait for the fiber to be used again. Note the fiber might simply be destroyed without this + // ever returning. That's OK because we don't have any nontrivial destructors on the stack + // at this point. + switchToMain(); + } +} + +FiberStack::FiberStack(size_t stackSizeParam) + // Force stackSize to a reasonable minimum. + : stackSize(kj::max(stackSizeParam, 65536)) +{ + +#if KJ_USE_FIBERS +#if _WIN32 || __CYGWIN__ + // We can create fibers before we convert the main thread into a fiber in FiberBase + KJ_WIN32(osFiber = CreateFiber(stackSize, &StartRoutine::run, this)); + +#else + // Note: Nothing below here can throw. If that changes then we need to call Impl::free(impl) + // on exceptions... + ucontext_t context; + impl = Impl::alloc(stackSize, &context); + + // POSIX says the arguments are ints, not pointers. So we split our pointer in half in order to + // work correctly on 64-bit machines. Gross. + uintptr_t ptr = reinterpret_cast(this); + int arg1 = ptr & ((uintptr_t(1) << (sizeof(ptr) * 4)) - 1); + int arg2 = ptr >> (sizeof(ptr) * 4); + + makecontext(&context, reinterpret_cast(&StartRoutine::run), 2, arg1, arg2); + + if (_setjmp(impl->originalJmpBuf) == 0) { + setcontext(&context); + } +#endif +#else +#if KJ_NO_EXCEPTIONS + KJ_UNIMPLEMENTED("Fibers are not implemented because exceptions are disabled"); +#else + KJ_UNIMPLEMENTED( + "Fibers are not implemented on this platform because its C library lacks setcontext() " + "and friends. If you'd like to see fiber support added, file a bug to let us know. " + "We can likely make it happen using assembly, but didn't want to try unless it was " + "actually needed."); +#endif +#endif +} + +FiberStack::~FiberStack() noexcept(false) { +#if KJ_USE_FIBERS +#if _WIN32 || __CYGWIN__ + DeleteFiber(osFiber); +#else + Impl::free(impl, stackSize); +#endif +#endif +} + +void FiberStack::initialize(FiberBase& fiber) { + KJ_REQUIRE(this->main == nullptr); + this->main = &fiber; +} + +void FiberStack::initialize(SynchronousFunc& func) { + KJ_REQUIRE(this->main == nullptr); + this->main = &func; +} + +FiberBase::FiberBase(size_t stackSize, _::ExceptionOrValue& result) + : state(WAITING), stack(kj::heap(stackSize)), result(result) { + stack->initialize(*this); + ensureThreadCanRunFibers(); +} + +FiberBase::FiberBase(const FiberPool& pool, _::ExceptionOrValue& result) + : state(WAITING), result(result) { + stack = pool.impl->takeStack(); + stack->initialize(*this); + ensureThreadCanRunFibers(); +} + +FiberBase::~FiberBase() noexcept(false) {} -private: - TaskSet& taskSet; - Own<_::PromiseNode> node; -}; +void FiberBase::destroy() { + // Called by `~Fiber()` to begin teardown. We can't do this work in `~FiberBase()` because the + // `Fiber` subclass contains members that may still be in-use until the fiber stops. -void TaskSet::add(Promise&& promise) { - auto task = heap(*this, kj::mv(promise.node)); - KJ_IF_MAYBE(head, tasks) { - head->get()->prev = &task->next; - task->next = kj::mv(tasks); - } - task->prev = &tasks; - tasks = kj::mv(task); -} + switch (state) { + case WAITING: + // We can't just free the stack while the fiber is running. We need to force it to execute + // until finished, so we cause it to throw an exception. + state = CANCELED; + stack->switchToFiber(); + + // The fiber should only switch back to the main stack on completion, because any further + // calls to wait() would throw before trying to switch. + KJ_ASSERT(state == FINISHED); + + // The fiber shut down properly so the stack is safe to reuse. + stack->reset(); + break; -kj::String TaskSet::trace() { - kj::Vector traces; + case RUNNING: + case CANCELED: + // Bad news. + KJ_LOG(FATAL, "fiber tried to destroy itself"); + ::abort(); + break; - Maybe>* ptr = &tasks; - for (;;) { - KJ_IF_MAYBE(task, *ptr) { - traces.add(task->get()->trace()); - ptr = &task->get()->next; - } else { + case FINISHED: + // Normal completion, yay. + stack->reset(); break; - } } - - return kj::strArray(traces, "\n============================================\n"); } -Promise TaskSet::onEmpty() { - KJ_REQUIRE(emptyFulfiller == nullptr, "onEmpty() can only be called once at a time"); +Maybe> FiberBase::fire() { + KJ_ASSERT(state == WAITING); + state = RUNNING; + stack->switchToFiber(); + return nullptr; +} - if (tasks == nullptr) { - return READY_NOW; - } else { - auto paf = newPromiseAndFulfiller(); - emptyFulfiller = kj::mv(paf.fulfiller); - return kj::mv(paf.promise); +void FiberStack::switchToFiber() { + // Switch from the main stack to the fiber. Returns once the fiber either calls switchToMain() + // or returns from its main function. +#if KJ_USE_FIBERS +#if _WIN32 || __CYGWIN__ + SwitchToFiber(osFiber); +#else + if (_setjmp(impl->originalJmpBuf) == 0) { + _longjmp(impl->fiberJmpBuf, 1); + } +#endif +#endif +} +void FiberStack::switchToMain() { + // Switch from the fiber to the main stack. Returns the next time the main stack calls + // switchToFiber(). +#if KJ_USE_FIBERS +#if _WIN32 || __CYGWIN__ + SwitchToFiber(getMainWin32Fiber()); +#else + if (_setjmp(impl->fiberJmpBuf) == 0) { + _longjmp(impl->originalJmpBuf, 1); } +#endif +#endif } -namespace _ { // private +void FiberBase::run() { +#if !KJ_NO_EXCEPTIONS + bool caughtCanceled = false; + state = RUNNING; + KJ_DEFER(state = FINISHED); -class LoggingErrorHandler: public TaskSet::ErrorHandler { -public: - static LoggingErrorHandler instance; + WaitScope waitScope(currentEventLoop(), *this); - void taskFailed(kj::Exception&& exception) override { - KJ_LOG(ERROR, "Uncaught exception in daemonized task.", exception); + try { + KJ_IF_MAYBE(exception, kj::runCatchingExceptions([&]() { + runImpl(waitScope); + })) { + result.addException(kj::mv(*exception)); + } + } catch (CanceledException) { + if (state != CANCELED) { + // no idea who would throw this but it's not really our problem + result.addException(KJ_EXCEPTION(FAILED, "Caught CanceledException, but fiber wasn't canceled")); + } + caughtCanceled = true; } -}; - -LoggingErrorHandler LoggingErrorHandler::instance = LoggingErrorHandler(); -class NullEventPort: public EventPort { -public: - bool wait() override { - KJ_FAIL_REQUIRE("Nothing to wait for; this thread would hang forever."); + if (state == CANCELED && !caughtCanceled) { + KJ_LOG(ERROR, "Canceled fiber apparently caught CanceledException and didn't rethrow it. " + "Generally, applications should not catch CanceledException, but if they do, they must always rethrow."); } - bool poll() override { return false; } + onReadyEvent.arm(); +#endif +} - void wake() const override { - // TODO(someday): Implement using condvar. - kj::throwRecoverableException(KJ_EXCEPTION(UNIMPLEMENTED, - "Cross-thread events are not yet implemented for EventLoops with no EventPort.")); - } +void FiberBase::onReady(_::Event* event) noexcept { + onReadyEvent.init(event); +} - static NullEventPort instance; -}; +void FiberBase::tracePromise(TraceBuilder& builder, bool stopAtNextEvent) { + if (stopAtNextEvent) return; + currentInner->tracePromise(builder, false); + stack->trace(builder); +} -NullEventPort NullEventPort::instance = NullEventPort(); +void FiberBase::traceEvent(TraceBuilder& builder) { + currentInner->tracePromise(builder, true); + stack->trace(builder); + onReadyEvent.traceEvent(builder); +} } // namespace _ (private) @@ -299,22 +1642,29 @@ void EventPort::wake() const { } EventLoop::EventLoop() - : port(_::NullEventPort::instance), - daemons(kj::heap(_::LoggingErrorHandler::instance)) {} + : daemons(kj::heap(_::LoggingErrorHandler::instance)) {} EventLoop::EventLoop(EventPort& port) : port(port), daemons(kj::heap(_::LoggingErrorHandler::instance)) {} EventLoop::~EventLoop() noexcept(false) { - // Destroy all "daemon" tasks, noting that their destructors might try to access the EventLoop - // some more. + // Destroy all "daemon" tasks, noting that their destructors might register more daemon tasks. + while (!daemons->isEmpty()) { + auto oldDaemons = kj::mv(daemons); + daemons = kj::heap(_::LoggingErrorHandler::instance); + } daemons = nullptr; + KJ_IF_MAYBE(e, executor) { + // Cancel all outstanding cross-thread events. + e->get()->impl->disconnect(); + } + // The application _should_ destroy everything using the EventLoop before destroying the // EventLoop itself, so if there are events on the loop, this indicates a memory leak. KJ_REQUIRE(head == nullptr, "EventLoop destroyed with events still in the queue. Memory leak?", - head->trace()) { + head->traceEvent()) { // Unlink all the events and hope that no one ever fires them... _::Event* event = head; while (event != nullptr) { @@ -359,6 +1709,9 @@ bool EventLoop::turn() { } depthFirstInsertPoint = &head; + if (breadthFirstInsertPoint == &event->next) { + breadthFirstInsertPoint = &head; + } if (tail == &event->next) { tail = &head; } @@ -370,6 +1723,8 @@ bool EventLoop::turn() { { event->firing = true; KJ_DEFER(event->firing = false); + currentlyFiring = event; + KJ_DEFER(currentlyFiring = nullptr); eventToDestroy = event->fire(); } @@ -382,9 +1737,19 @@ bool EventLoop::isRunnable() { return head != nullptr; } +const Executor& EventLoop::getExecutor() { + KJ_IF_MAYBE(e, executor) { + return **e; + } else { + return *executor.emplace(kj::atomicRefcounted<_::ExecutorImpl>(*this, Badge())); + } +} + void EventLoop::setRunnable(bool runnable) { if (runnable != lastRunnableState) { - port.setRunnable(runnable); + KJ_IF_MAYBE(p, port) { + p->setRunnable(runnable); + } lastRunnableState = runnable; } } @@ -402,6 +1767,34 @@ void EventLoop::leaveScope() { threadLocalEventLoop = nullptr; } +void EventLoop::wait() { + KJ_IF_MAYBE(p, port) { + if (p->wait()) { + // Another thread called wake(). Check for cross-thread events. + KJ_IF_MAYBE(e, executor) { + e->get()->poll(); + } + } + } else KJ_IF_MAYBE(e, executor) { + e->get()->wait(); + } else { + KJ_FAIL_REQUIRE("Nothing to wait for; this thread would hang forever."); + } +} + +void EventLoop::poll() { + KJ_IF_MAYBE(p, port) { + if (p->poll()) { + // Another thread called wake(). Check for cross-thread events. + KJ_IF_MAYBE(e, executor) { + e->get()->poll(); + } + } + } else KJ_IF_MAYBE(e, executor) { + e->get()->poll(); + } +} + void WaitScope::poll() { KJ_REQUIRE(&loop == threadLocalEventLoop, "WaitScope not valid for this thread."); KJ_REQUIRE(!loop.running, "poll() is not allowed from within event callbacks."); @@ -409,73 +1802,151 @@ void WaitScope::poll() { loop.running = true; KJ_DEFER(loop.running = false); - for (;;) { - if (!loop.turn()) { - // No events in the queue. Poll for I/O. - loop.port.poll(); + runOnStackPool([&]() { + for (;;) { + if (!loop.turn()) { + // No events in the queue. Poll for I/O. + loop.poll(); - if (!loop.isRunnable()) { - // Still no events in the queue. We're done. - return; + if (!loop.isRunnable()) { + // Still no events in the queue. We're done. + return; + } } } + }); +} + +void WaitScope::cancelAllDetached() { + KJ_REQUIRE(fiber == nullptr, + "can't call cancelAllDetached() on a fiber WaitScope, only top-level"); + + while (!loop.daemons->isEmpty()) { + auto oldDaemons = kj::mv(loop.daemons); + loop.daemons = kj::heap(_::LoggingErrorHandler::instance); + // Destroying `oldDaemons` could theoretically add new ones. } } namespace _ { // private +#if !KJ_NO_EXCEPTIONS +static kj::CanceledException fiberCanceledException() { + // Construct the exception to throw from wait() when the fiber has been canceled (because the + // promise returned by startFiber() was dropped before completion). + return kj::CanceledException { }; +}; +#endif + void waitImpl(Own<_::PromiseNode>&& node, _::ExceptionOrValue& result, WaitScope& waitScope) { EventLoop& loop = waitScope.loop; KJ_REQUIRE(&loop == threadLocalEventLoop, "WaitScope not valid for this thread."); - KJ_REQUIRE(!loop.running, "wait() is not allowed from within event callbacks."); - BoolEvent doneEvent; - node->setSelfPointer(&node); - node->onReady(&doneEvent); +#if !KJ_NO_EXCEPTIONS + // we don't support fibers when running without exceptions, so just remove the whole block + KJ_IF_MAYBE(fiber, waitScope.fiber) { + if (fiber->state == FiberBase::CANCELED) { + throw fiberCanceledException(); + } + KJ_REQUIRE(fiber->state == FiberBase::RUNNING, + "This WaitScope can only be used within the fiber that created it."); - loop.running = true; - KJ_DEFER(loop.running = false); + node->setSelfPointer(&node); + node->onReady(fiber); - while (!doneEvent.fired) { - if (!loop.turn()) { - // No events in the queue. Wait for callback. - loop.port.wait(); + fiber->currentInner = node; + KJ_DEFER(fiber->currentInner = nullptr); + + // Switch to the main stack to run the event loop. + fiber->state = FiberBase::WAITING; + fiber->stack->switchToMain(); + + // The main stack switched back to us, meaning either the event we registered with + // node->onReady() fired, or we are being canceled by FiberBase's destructor. + + if (fiber->state == FiberBase::CANCELED) { + throw fiberCanceledException(); } - } - loop.setRunnable(loop.isRunnable()); + KJ_ASSERT(fiber->state == FiberBase::RUNNING); + } else { +#endif + KJ_REQUIRE(!loop.running, "wait() is not allowed from within event callbacks."); - node->get(result); - KJ_IF_MAYBE(exception, kj::runCatchingExceptions([&]() { - node = nullptr; - })) { - result.addException(kj::mv(*exception)); + RootEvent doneEvent(node, reinterpret_cast(&waitImpl)); + node->setSelfPointer(&node); + node->onReady(&doneEvent); + + loop.running = true; + KJ_DEFER(loop.running = false); + + for (;;) { + waitScope.runOnStackPool([&]() { + uint counter = 0; + while (!doneEvent.fired) { + if (!loop.turn()) { + // No events in the queue. Wait for callback. + return; + } else if (++counter > waitScope.busyPollInterval) { + // Note: It's intentional that if busyPollInterval is kj::maxValue, we never poll. + counter = 0; + loop.poll(); + } + } + }); + + if (doneEvent.fired) { + break; + } else { + loop.wait(); + } + } + + loop.setRunnable(loop.isRunnable()); +#if !KJ_NO_EXCEPTIONS } +#endif + + waitScope.runOnStackPool([&]() { + node->get(result); + KJ_IF_MAYBE(exception, kj::runCatchingExceptions([&]() { + node = nullptr; + })) { + result.addException(kj::mv(*exception)); + } + }); } bool pollImpl(_::PromiseNode& node, WaitScope& waitScope) { EventLoop& loop = waitScope.loop; KJ_REQUIRE(&loop == threadLocalEventLoop, "WaitScope not valid for this thread."); + KJ_REQUIRE(waitScope.fiber == nullptr, "poll() is not supported in fibers."); KJ_REQUIRE(!loop.running, "poll() is not allowed from within event callbacks."); - BoolEvent doneEvent; + RootEvent doneEvent(&node, reinterpret_cast(&pollImpl)); node.onReady(&doneEvent); loop.running = true; KJ_DEFER(loop.running = false); - while (!doneEvent.fired) { - if (!loop.turn()) { - // No events in the queue. Poll for I/O. - loop.port.poll(); - - if (!doneEvent.fired && !loop.isRunnable()) { - // No progress. Give up. - node.onReady(nullptr); - loop.setRunnable(false); - return false; + waitScope.runOnStackPool([&]() { + while (!doneEvent.fired) { + if (!loop.turn()) { + // No events in the queue. Poll for I/O. + loop.poll(); + + if (!doneEvent.fired && !loop.isRunnable()) { + // No progress. Give up. + node.onReady(nullptr); + loop.setRunnable(false); + break; + } } } + }); + + if (!doneEvent.fired) { + return false; } loop.setRunnable(loop.isRunnable()); @@ -483,7 +1954,11 @@ bool pollImpl(_::PromiseNode& node, WaitScope& waitScope) { } Promise yield() { - return Promise(false, kj::heap()); + return _::PromiseNode::to>(kj::heap()); +} + +Promise yieldHarder() { + return _::PromiseNode::to>(kj::heap()); } Own neverDone() { @@ -505,30 +1980,19 @@ void detach(kj::Promise&& promise) { Event::Event() : loop(currentEventLoop()), next(nullptr), prev(nullptr) {} -Event::~Event() noexcept(false) { - if (prev != nullptr) { - if (loop.tail == &next) { - loop.tail = prev; - } - if (loop.depthFirstInsertPoint == &next) { - loop.depthFirstInsertPoint = prev; - } +Event::Event(kj::EventLoop& loop) + : loop(loop), next(nullptr), prev(nullptr) {} - *prev = next; - if (next != nullptr) { - next->prev = prev; - } - } +Event::~Event() noexcept(false) { + disarm(); KJ_REQUIRE(!firing, "Promise callback destroyed itself."); - KJ_REQUIRE(threadLocalEventLoop == &loop || threadLocalEventLoop == nullptr, - "Promise destroyed from a different thread than it was created in."); } void Event::armDepthFirst() { KJ_REQUIRE(threadLocalEventLoop == &loop || threadLocalEventLoop == nullptr, "Event armed from different thread than it was created in. You must use " - "the thread-safe work queue to queue events cross-thread."); + "Executor to queue events cross-thread."); if (prev == nullptr) { next = *loop.depthFirstInsertPoint; @@ -540,6 +2004,9 @@ void Event::armDepthFirst() { loop.depthFirstInsertPoint = &next; + if (loop.breadthFirstInsertPoint == prev) { + loop.breadthFirstInsertPoint = &next; + } if (loop.tail == prev) { loop.tail = &next; } @@ -551,79 +2018,122 @@ void Event::armDepthFirst() { void Event::armBreadthFirst() { KJ_REQUIRE(threadLocalEventLoop == &loop || threadLocalEventLoop == nullptr, "Event armed from different thread than it was created in. You must use " - "the thread-safe work queue to queue events cross-thread."); + "Executor to queue events cross-thread."); if (prev == nullptr) { - next = *loop.tail; - prev = loop.tail; + next = *loop.breadthFirstInsertPoint; + prev = loop.breadthFirstInsertPoint; *prev = this; if (next != nullptr) { next->prev = &next; } - loop.tail = &next; + loop.breadthFirstInsertPoint = &next; + + if (loop.tail == prev) { + loop.tail = &next; + } loop.setRunnable(true); } } -_::PromiseNode* Event::getInnerForTrace() { - return nullptr; -} +void Event::armLast() { + KJ_REQUIRE(threadLocalEventLoop == &loop || threadLocalEventLoop == nullptr, + "Event armed from different thread than it was created in. You must use " + "Executor to queue events cross-thread."); -#if !KJ_NO_RTTI -#if __GNUC__ -static kj::String demangleTypeName(const char* name) { - int status; - char* buf = abi::__cxa_demangle(name, nullptr, nullptr, &status); - kj::String result = kj::heapString(buf == nullptr ? name : buf); - free(buf); - return kj::mv(result); -} -#else -static kj::String demangleTypeName(const char* name) { - return kj::heapString(name); -} -#endif -#endif + if (prev == nullptr) { + next = *loop.breadthFirstInsertPoint; + prev = loop.breadthFirstInsertPoint; + *prev = this; + if (next != nullptr) { + next->prev = &next; + } -static kj::String traceImpl(Event* event, _::PromiseNode* node) { -#if KJ_NO_RTTI - return heapString("Trace not available because RTTI is disabled."); -#else - kj::Vector trace; + // We don't update loop.breadthFirstInsertPoint because we want further inserts to go *before* + // this event. - if (event != nullptr) { - trace.add(demangleTypeName(typeid(*event).name())); + if (loop.tail == prev) { + loop.tail = &next; + } + + loop.setRunnable(true); } +} + +void Event::disarm() { + if (prev != nullptr) { + if (threadLocalEventLoop != &loop && threadLocalEventLoop != nullptr) { + KJ_LOG(FATAL, "Promise destroyed from a different thread than it was created in."); + // There's no way out of this place without UB, so abort now. + abort(); + } + + if (loop.tail == &next) { + loop.tail = prev; + } + if (loop.depthFirstInsertPoint == &next) { + loop.depthFirstInsertPoint = prev; + } + if (loop.breadthFirstInsertPoint == &next) { + loop.breadthFirstInsertPoint = prev; + } + + *prev = next; + if (next != nullptr) { + next->prev = prev; + } - while (node != nullptr) { - trace.add(demangleTypeName(typeid(*node).name())); - node = node->getInnerForTrace(); + prev = nullptr; + next = nullptr; } +} - return strArray(trace, "\n"); -#endif +String Event::traceEvent() { + void* space[32]; + TraceBuilder builder(space); + traceEvent(builder); + return kj::str(builder); } -kj::String Event::trace() { - return traceImpl(this, getInnerForTrace()); +String TraceBuilder::toString() { + auto result = finish(); + return kj::str(stringifyStackTraceAddresses(result), + stringifyStackTrace(result)); } } // namespace _ (private) +ArrayPtr getAsyncTrace(ArrayPtr space) { + EventLoop* loop = threadLocalEventLoop; + if (loop == nullptr) return nullptr; + if (loop->currentlyFiring == nullptr) return nullptr; + + _::TraceBuilder builder(space); + loop->currentlyFiring->traceEvent(builder); + return builder.finish(); +} + +kj::String getAsyncTrace() { + void* space[32]; + auto trace = getAsyncTrace(space); + return kj::str(stringifyStackTraceAddresses(trace), stringifyStackTrace(trace)); +} + // ======================================================================================= namespace _ { // private kj::String PromiseBase::trace() { - return traceImpl(nullptr, node); + void* space[32]; + TraceBuilder builder(space); + node->tracePromise(builder, false); + return kj::str(builder); } void PromiseNode::setSelfPointer(Own* selfPtr) noexcept {} -PromiseNode* PromiseNode::getInnerForTrace() { return nullptr; } - void PromiseNode::OnReadyEvent::init(Event* newEvent) { if (event == _kJ_ALREADY_READY) { // A new continuation was added to a promise that was already ready. In this case, we schedule @@ -648,6 +2158,17 @@ void PromiseNode::OnReadyEvent::arm() { event = _kJ_ALREADY_READY; } +void PromiseNode::OnReadyEvent::armBreadthFirst() { + KJ_ASSERT(event != _kJ_ALREADY_READY, "armBreadthFirst() should only be called once"); + + if (event != nullptr) { + // A promise resolved and an event is already waiting on it. + event->armBreadthFirst(); + } + + event = _kJ_ALREADY_READY; +} + // ------------------------------------------------------------------- ImmediatePromiseNodeBase::ImmediatePromiseNodeBase() {} @@ -657,6 +2178,12 @@ void ImmediatePromiseNodeBase::onReady(Event* event) noexcept { if (event) event->armBreadthFirst(); } +void ImmediatePromiseNodeBase::tracePromise(TraceBuilder& builder, bool stopAtNextEvent) { + // Maybe returning the address of get() will give us a function name with meaningful type + // information. + builder.add(getMethodStartAddress(implicitCast(*this), &PromiseNode::get)); +} + ImmediateBrokenPromiseNode::ImmediateBrokenPromiseNode(Exception&& exception) : exception(kj::mv(exception)) {} @@ -679,8 +2206,11 @@ void AttachmentPromiseNodeBase::get(ExceptionOrValue& output) noexcept { dependency->get(output); } -PromiseNode* AttachmentPromiseNodeBase::getInnerForTrace() { - return dependency; +void AttachmentPromiseNodeBase::tracePromise(TraceBuilder& builder, bool stopAtNextEvent) { + dependency->tracePromise(builder, stopAtNextEvent); + + // TODO(debug): Maybe use __builtin_return_address to get the locations that called fork() and + // addBranch()? } void AttachmentPromiseNodeBase::dropDependency() { @@ -708,8 +2238,15 @@ void TransformPromiseNodeBase::get(ExceptionOrValue& output) noexcept { } } -PromiseNode* TransformPromiseNodeBase::getInnerForTrace() { - return dependency; +void TransformPromiseNodeBase::tracePromise(TraceBuilder& builder, bool stopAtNextEvent) { + // Note that we null out the dependency just before calling our own continuation, which + // conveniently means that if we're currently executing the continuation when the trace is + // requested, it won't trace into the obsolete dependency. Nice. + if (dependency.get() != nullptr) { + dependency->tracePromise(builder, stopAtNextEvent); + } + + builder.add(continuationTracePtr); } void TransformPromiseNodeBase::dropDependency() { @@ -767,8 +2304,15 @@ void ForkBranchBase::onReady(Event* event) noexcept { onReadyEvent.init(event); } -PromiseNode* ForkBranchBase::getInnerForTrace() { - return hub->getInnerForTrace(); +void ForkBranchBase::tracePromise(TraceBuilder& builder, bool stopAtNextEvent) { + if (stopAtNextEvent) return; + + if (hub.get() != nullptr) { + hub->inner->tracePromise(builder, false); + } + + // TODO(debug): Maybe use __builtin_return_address to get the locations that called fork() and + // addBranch()? } // ------------------------------------------------------------------- @@ -801,8 +2345,15 @@ Maybe> ForkHubBase::fire() { return nullptr; } -_::PromiseNode* ForkHubBase::getInnerForTrace() { - return inner; +void ForkHubBase::traceEvent(TraceBuilder& builder) { + if (inner.get() != nullptr) { + inner->tracePromise(builder, true); + } + + if (headBranch != nullptr) { + // We'll trace down the first branch, I guess. + headBranch->onReadyEvent.traceEvent(builder); + } } // ------------------------------------------------------------------- @@ -841,8 +2392,15 @@ void ChainPromiseNode::get(ExceptionOrValue& output) noexcept { return inner->get(output); } -PromiseNode* ChainPromiseNode::getInnerForTrace() { - return inner; +void ChainPromiseNode::tracePromise(TraceBuilder& builder, bool stopAtNextEvent) { + if (stopAtNextEvent && state == STEP1) { + // In STEP1, we are an Event -- when the inner node resolves, it will arm *this* object. + // In STEP2, we are not an Event -- when the inner node resolves, it directly arms our parent + // event. + return; + } + + inner->tracePromise(builder, stopAtNextEvent); } Maybe> ChainPromiseNode::fire() { @@ -868,7 +2426,7 @@ Maybe> ChainPromiseNode::fire() { } else KJ_IF_MAYBE(value, intermediate.value) { // There is a value and no exception. The value is itself a promise. Adopt it as our // step2. - inner = kj::mv(value->node); + inner = _::PromiseNode::from(kj::mv(*value)); } else { // We can only get here if inner->get() returned neither an exception nor a // value, which never actually happens. @@ -897,6 +2455,25 @@ Maybe> ChainPromiseNode::fire() { } } +void ChainPromiseNode::traceEvent(TraceBuilder& builder) { + switch (state) { + case STEP1: + if (inner.get() != nullptr) { + inner->tracePromise(builder, true); + } + if (!builder.full() && onReadyEvent != nullptr) { + onReadyEvent->traceEvent(builder); + } + break; + case STEP2: + // This probably never happens -- a trace being generated after the meat of fire() already + // executed. If it does, though, we probably can't do anything here. We don't know if + // `onReadyEvent` is still valid because we passed it on to the phase-2 promise, and tracing + // just `inner` would probably be confusing. Let's just do nothing. + break; + } +} + // ------------------------------------------------------------------- ExclusiveJoinPromiseNode::ExclusiveJoinPromiseNode(Own left, Own right) @@ -912,12 +2489,18 @@ void ExclusiveJoinPromiseNode::get(ExceptionOrValue& output) noexcept { KJ_REQUIRE(left.get(output) || right.get(output), "get() called before ready."); } -PromiseNode* ExclusiveJoinPromiseNode::getInnerForTrace() { - auto result = left.getInnerForTrace(); - if (result == nullptr) { - result = right.getInnerForTrace(); +void ExclusiveJoinPromiseNode::tracePromise(TraceBuilder& builder, bool stopAtNextEvent) { + // TODO(debug): Maybe use __builtin_return_address to get the locations that called + // exclusiveJoin()? + + if (stopAtNextEvent) return; + + // Trace the left branch I guess. + if (left.dependency.get() != nullptr) { + left.dependency->tracePromise(builder, false); + } else if (right.dependency.get() != nullptr) { + right.dependency->tracePromise(builder, false); } - return result; } ExclusiveJoinPromiseNode::Branch::Branch( @@ -939,19 +2522,27 @@ bool ExclusiveJoinPromiseNode::Branch::get(ExceptionOrValue& output) { } Maybe> ExclusiveJoinPromiseNode::Branch::fire() { - // Cancel the branch that didn't return first. Ignore exceptions caused by cancellation. - if (this == &joinNode.left) { - kj::runCatchingExceptions([&]() { joinNode.right.dependency = nullptr; }); + if (dependency) { + // Cancel the branch that didn't return first. Ignore exceptions caused by cancellation. + if (this == &joinNode.left) { + kj::runCatchingExceptions([&]() { joinNode.right.dependency = nullptr; }); + } else { + kj::runCatchingExceptions([&]() { joinNode.left.dependency = nullptr; }); + } + + joinNode.onReadyEvent.arm(); } else { - kj::runCatchingExceptions([&]() { joinNode.left.dependency = nullptr; }); + // The other branch already fired, and this branch was canceled. It's possible for both + // branches to fire if both were armed simultaneously. } - - joinNode.onReadyEvent.arm(); return nullptr; } -PromiseNode* ExclusiveJoinPromiseNode::Branch::getInnerForTrace() { - return dependency; +void ExclusiveJoinPromiseNode::Branch::traceEvent(TraceBuilder& builder) { + if (dependency.get() != nullptr) { + dependency->tracePromise(builder, true); + } + joinNode.onReadyEvent.traceEvent(builder); } // ------------------------------------------------------------------- @@ -992,8 +2583,16 @@ void ArrayJoinPromiseNodeBase::get(ExceptionOrValue& output) noexcept { } } -PromiseNode* ArrayJoinPromiseNodeBase::getInnerForTrace() { - return branches.size() == 0 ? nullptr : branches[0].getInnerForTrace(); +void ArrayJoinPromiseNodeBase::tracePromise(TraceBuilder& builder, bool stopAtNextEvent) { + // TODO(debug): Maybe use __builtin_return_address to get the locations that called + // joinPromises()? + + if (stopAtNextEvent) return; + + // Trace the first branch I guess. + if (branches != nullptr) { + branches[0].dependency->tracePromise(builder, false); + } } ArrayJoinPromiseNodeBase::Branch::Branch( @@ -1012,8 +2611,9 @@ Maybe> ArrayJoinPromiseNodeBase::Branch::fire() { return nullptr; } -_::PromiseNode* ArrayJoinPromiseNodeBase::Branch::getInnerForTrace() { - return dependency->getInnerForTrace(); +void ArrayJoinPromiseNodeBase::Branch::traceEvent(TraceBuilder& builder) { + dependency->tracePromise(builder, true); + joinNode.onReadyEvent.traceEvent(builder); } Maybe ArrayJoinPromiseNodeBase::Branch::getPart() { @@ -1035,8 +2635,8 @@ void ArrayJoinPromiseNode::getNoError(ExceptionOrValue& output) noexcept { } // namespace _ (private) Promise joinPromises(Array>&& promises) { - return Promise(false, kj::heap<_::ArrayJoinPromiseNode>( - KJ_MAP(p, promises) { return kj::mv(p.node); }, + return _::PromiseNode::to>(kj::heap<_::ArrayJoinPromiseNode>( + KJ_MAP(p, promises) { return _::PromiseNode::from(kj::mv(p)); }, heapArray<_::ExceptionOr<_::Void>>(promises.size()))); } @@ -1055,8 +2655,22 @@ void EagerPromiseNodeBase::onReady(Event* event) noexcept { onReadyEvent.init(event); } -PromiseNode* EagerPromiseNodeBase::getInnerForTrace() { - return dependency; +void EagerPromiseNodeBase::tracePromise(TraceBuilder& builder, bool stopAtNextEvent) { + // TODO(debug): Maybe use __builtin_return_address to get the locations that called + // eagerlyEvaluate()? But note that if a non-null exception handler was passed to it, that + // creates a TransformPromiseNode which will report the location anyhow. + + if (stopAtNextEvent) return; + if (dependency.get() != nullptr) { + dependency->tracePromise(builder, stopAtNextEvent); + } +} + +void EagerPromiseNodeBase::traceEvent(TraceBuilder& builder) { + if (dependency.get() != nullptr) { + dependency->tracePromise(builder, true); + } + onReadyEvent.traceEvent(builder); } Maybe> EagerPromiseNodeBase::fire() { @@ -1077,8 +2691,39 @@ void AdapterPromiseNodeBase::onReady(Event* event) noexcept { onReadyEvent.init(event); } +void AdapterPromiseNodeBase::tracePromise(TraceBuilder& builder, bool stopAtNextEvent) { + // Maybe returning the address of get() will give us a function name with meaningful type + // information. + builder.add(getMethodStartAddress(implicitCast(*this), &PromiseNode::get)); +} + +void END_FULFILLER_STACK_START_LISTENER_STACK() {} +// Dummy symbol used when reporting how a PromiseFulfiller was destroyed without fulfilling the +// promise. We end up combining two stack traces into one and we use this as a separator. + +void WeakFulfillerBase::disposeImpl(void* pointer) const { + if (inner == nullptr) { + // Already detached. + delete this; + } else { + if (inner->isWaiting()) { + // Let's find out if there's an exception being thrown. If so, we'll use it to reject the + // promise. + inner->reject(getDestructionReason( + reinterpret_cast(&END_FULFILLER_STACK_START_LISTENER_STACK), + kj::Exception::Type::FAILED, __FILE__, __LINE__, + "PromiseFulfiller was destroyed without fulfilling the promise."_kj)); + } + inner = nullptr; + } +} + +} // namespace _ (private) + // ------------------------------------------------------------------- +namespace _ { // (private) + Promise IdentityFunc>::operator()() const { return READY_NOW; } } // namespace _ (private) diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/async.h b/libs/EXTERNAL/capnproto/c++/src/kj/async.h index cdb640e81b0..d6e503a724f 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/async.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/async.h @@ -21,14 +21,25 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "async-prelude.h" #include "exception.h" #include "refcount.h" +KJ_BEGIN_HEADER + +#ifndef KJ_USE_FIBERS + #if __BIONIC__ || __FreeBSD__ || __OpenBSD__ || KJ_NO_EXCEPTIONS + // These platforms don't support fibers. + #define KJ_USE_FIBERS 0 + #else + #define KJ_USE_FIBERS 1 + #endif +#else + #if KJ_NO_EXCEPTIONS && KJ_USE_FIBERS + #error "Fibers cannot be enabled when exceptions are disabled." + #endif +#endif + namespace kj { class EventLoop; @@ -43,6 +54,9 @@ class PromiseFulfiller; template struct PromiseFulfillerPair; +template +class FunctionParam; + template using PromiseForResult = _::ReducePromises<_::ReturnType>; // Evaluates to the type of Promise for the result of calling functor type Func with parameter type @@ -231,8 +245,16 @@ class Promise: protected _::PromiseBase { // around them in arbitrary ways. Therefore, callers really need to know if a function they // are calling might wait(), and the `WaitScope&` parameter makes this clear. // - // TODO(someday): Implement fibers, and let them call wait() even when they are handling an - // event. + // Usually, there is only one `WaitScope` for each `EventLoop`, and it can only be used at the + // top level of the thread owning the loop. Calling `wait()` with this `WaitScope` is what + // actually causes the event loop to run at all. This top-level `WaitScope` cannot be used + // recursively, so cannot be used within an event callback. + // + // However, it is possible to obtain a `WaitScope` in lower-level code by using fibers. Use + // kj::startFiber() to start some code executing on an alternate call stack. That code will get + // its own `WaitScope` allowing it to operate in a synchronous style. In this case, `wait()` + // switches back to the main stack in order to run the event loop, returning to the fiber's stack + // once the awaited promise resolves. bool poll(WaitScope& waitScope); // Returns true if a call to wait() would complete without blocking, false if it would block. @@ -246,6 +268,8 @@ class Promise: protected _::PromiseBase { // The first poll() verifies that the promise doesn't resolve early, which would otherwise be // hard to do deterministically. The second poll() allows you to check that the promise has // resolved and avoid a wait() that might deadlock in the case that it hasn't. + // + // poll() is not supported in fibers; it will throw an exception. ForkedPromise fork() KJ_WARN_UNUSED_RESULT; // Forks the promise, so that multiple different clients can independently wait on the result. @@ -307,21 +331,7 @@ class Promise: protected _::PromiseBase { Promise(bool, Own<_::PromiseNode>&& node): PromiseBase(kj::mv(node)) {} // Second parameter prevent ambiguity with immediate-value constructor. - template - friend class Promise; - friend class EventLoop; - template - friend Promise newAdaptedPromise(Params&&... adapterConstructorParams); - template - friend PromiseFulfillerPair newPromiseAndFulfiller(); - template - friend class _::ForkHub; - friend class TaskSet; - friend Promise _::yield(); - friend class _::NeverDone; - template - friend Promise> joinPromises(Array>&& promises); - friend Promise joinPromises(Array>&& promises); + friend class _::PromiseNode; }; template @@ -335,6 +345,9 @@ class ForkedPromise { Promise addBranch(); // Add a new branch to the fork. The branch is equivalent to the original promise. + bool hasBranches(); + // Returns true if there are any branches that haven't been canceled. + private: Own<_::ForkHub<_::FixVoid>> hub; @@ -377,6 +390,111 @@ PromiseForResult evalNow(Func&& func) KJ_WARN_UNUSED_RESULT; // If `func()` throws an exception, the exception is caught and wrapped in a promise -- this is the // main reason why `evalNow()` is useful. +template +PromiseForResult evalLast(Func&& func) KJ_WARN_UNUSED_RESULT; +// Like `evalLater()`, except that the function doesn't run until the event queue is otherwise +// completely empty and the thread is about to suspend waiting for I/O. +// +// This is useful when you need to perform some disruptive action and you want to make sure that +// you don't interrupt some other task between two .then() continuations. For example, say you want +// to cancel a read() operation on a socket and know for sure that if any bytes were read, you saw +// them. It could be that a read() has completed and bytes have been transferred to the target +// buffer, but the .then() callback that handles the read result hasn't executed yet. If you +// cancel the promise at this inopportune moment, the bytes in the buffer are lost. If you do +// evalLast(), then you can be sure that any pending .then() callbacks had a chance to finish out +// and if you didn't receive the read result yet, then you know nothing has been read, and you can +// simply drop the promise. +// +// If evalLast() is called multiple times, functions are executed in LIFO order. If the first +// callback enqueues new events, then latter callbacks will not execute until those events are +// drained. + +ArrayPtr getAsyncTrace(ArrayPtr space); +kj::String getAsyncTrace(); +// If the event loop is currently running in this thread, get a trace back through the promise +// chain leading to the currently-executing event. The format is the same as kj::getStackTrace() +// from exception.c++. + +template +PromiseForResult retryOnDisconnect(Func&& func) KJ_WARN_UNUSED_RESULT; +// Promises to run `func()` asynchronously, retrying once if it fails with a DISCONNECTED exception. +// If the retry also fails, the exception is passed through. +// +// `func()` should return a `Promise`. `retryOnDisconnect(func)` returns the same promise, except +// with the retry logic added. + +template +PromiseForResult startFiber(size_t stackSize, Func&& func) KJ_WARN_UNUSED_RESULT; +// Executes `func()` in a fiber, returning a promise for the eventual reseult. `func()` will be +// passed a `WaitScope&` as its parameter, allowing it to call `.wait()` on promises. Thus, `func()` +// can be written in a synchronous, blocking style, instead of using `.then()`. This is often much +// easier to write and read, and may even be significantly faster if it allows the use of stack +// allocation rather than heap allocation. +// +// However, fibers have a major disadvantage: memory must be allocated for the fiber's call stack. +// The entire stack must be allocated at once, making it necessary to choose a stack size upfront +// that is big enough for whatever the fiber needs to do. Estimating this is often difficult. That +// said, over-estimating is not too terrible since pages of the stack will actually be allocated +// lazily when first accessed; actual memory usage will correspond to the "high watermark" of the +// actual stack usage. That said, this lazy allocation forces page faults, which can be quite slow. +// Worse, freeing a stack forces a TLB flush and shootdown -- all currently-executing threads will +// have to be interrupted to flush their CPU cores' TLB caches. +// +// In short, when performance matters, you should try to avoid creating fibers very frequently. + +class FiberPool final { + // A freelist pool of fibers with a set stack size. This improves CPU usage with fibers at + // the expense of memory usage. Fibers in this pool will always use the max amount of memory + // used until the pool is destroyed. + +public: + explicit FiberPool(size_t stackSize); + ~FiberPool() noexcept(false); + KJ_DISALLOW_COPY(FiberPool); + + void setMaxFreelist(size_t count); + // Set the maximum number of stacks to add to the freelist. If the freelist is full, stacks will + // be deleted rather than returned to the freelist. + + void useCoreLocalFreelists(); + // EXPERIMENTAL: Call to tell FiberPool to try to use core-local stack freelists, which + // in theory should increase L1/L2 cache efficacy for freelisted stacks. In practice, as of + // this writing, no performance advantage has yet been demonstrated. Note that currently this + // feature is only supported on Linux (the flag has no effect on other operating systems). + + template + PromiseForResult startFiber(Func&& func) const KJ_WARN_UNUSED_RESULT; + // Executes `func()` in a fiber from this pool, returning a promise for the eventual result. + // `func()` will be passed a `WaitScope&` as its parameter, allowing it to call `.wait()` on + // promises. Thus, `func()` can be written in a synchronous, blocking style, instead of + // using `.then()`. This is often much easier to write and read, and may even be significantly + // faster if it allows the use of stack allocation rather than heap allocation. + + void runSynchronously(kj::FunctionParam func) const; + // Use one of the stacks in the pool to synchronously execute func(), returning the result that + // func() returns. This is not the usual use case for fibers, but can be a nice optimization + // in programs that have many threads that mostly only need small stacks, but occasionally need + // a much bigger stack to run some deeply recursive algorithm. If the algorithm is run on each + // thread's normal call stack, then every thread's stack will tend to grow to be very big + // (usually, stacks automatically grow as needed, but do not shrink until the thread exits + // completely). If the thread can share a small set of big stacks that they use only when calling + // the deeply recursive algorithm, and use small stacks for everything else, overall memory usage + // is reduced. + // + // TODO(someday): If func() returns a value, return it from runSynchronously? Current use case + // doesn't need it. + + size_t getFreelistSize() const; + // Get the number of stacks currently in the freelist. Does not count stacks that are active. + +private: + class Impl; + Own impl; + + friend class _::FiberStack; + friend class _::FiberBase; +}; + template Promise> joinPromises(Array>&& promises); // Join an array of promises into a promise for an array. @@ -419,8 +537,16 @@ inline CaptureByMove> mvCapture(MovedParam&& param, Func // ======================================================================================= // Advanced promise construction +class PromiseRejector { + // Superclass of PromiseFulfiller containing the non-typed methods. Useful when you only really + // need to be able to reject a promise, and you need to operate on fulfillers of different types. +public: + virtual void reject(Exception&& exception) = 0; + virtual bool isWaiting() = 0; +}; + template -class PromiseFulfiller { +class PromiseFulfiller: public PromiseRejector { // A callback which can be used to fulfill a promise. Only the first call to fulfill() or // reject() matters; subsequent calls are ignored. @@ -444,7 +570,7 @@ class PromiseFulfiller { }; template <> -class PromiseFulfiller { +class PromiseFulfiller: public PromiseRejector { // Specialization of PromiseFulfiller for void promises. See PromiseFulfiller. public: @@ -460,7 +586,7 @@ class PromiseFulfiller { }; template -Promise newAdaptedPromise(Params&&... adapterConstructorParams); +_::ReducePromises newAdaptedPromise(Params&&... adapterConstructorParams); // Creates a new promise which owns an instance of `Adapter` which encapsulates the operation // that will eventually fulfill the promise. This is primarily useful for adapting non-KJ // asynchronous APIs to use promises. @@ -500,6 +626,55 @@ PromiseFulfillerPair newPromiseAndFulfiller(); // fulfiller will be of type `PromiseFulfiller>`. Thus you pass a `Promise` to the // `fulfill()` callback, and the promises are chained. +template +class CrossThreadPromiseFulfiller: public kj::PromiseFulfiller { + // Like PromiseFulfiller but the methods are `const`, indicating they can safely be called + // from another thread. + +public: + virtual void fulfill(T&& value) const = 0; + virtual void reject(Exception&& exception) const = 0; + virtual bool isWaiting() const = 0; + + void fulfill(T&& value) override { return constThis()->fulfill(kj::fwd(value)); } + void reject(Exception&& exception) override { return constThis()->reject(kj::mv(exception)); } + bool isWaiting() override { return constThis()->isWaiting(); } + +private: + const CrossThreadPromiseFulfiller* constThis() { return this; } +}; + +template <> +class CrossThreadPromiseFulfiller: public kj::PromiseFulfiller { + // Specialization of CrossThreadPromiseFulfiller for void promises. See + // CrossThreadPromiseFulfiller. + +public: + virtual void fulfill(_::Void&& value = _::Void()) const = 0; + virtual void reject(Exception&& exception) const = 0; + virtual bool isWaiting() const = 0; + + void fulfill(_::Void&& value) override { return constThis()->fulfill(kj::mv(value)); } + void reject(Exception&& exception) override { return constThis()->reject(kj::mv(exception)); } + bool isWaiting() override { return constThis()->isWaiting(); } + +private: + const CrossThreadPromiseFulfiller* constThis() { return this; } +}; + +template +struct PromiseCrossThreadFulfillerPair { + _::ReducePromises promise; + Own> fulfiller; +}; + +template +PromiseCrossThreadFulfillerPair newPromiseAndCrossThreadFulfiller(); +// Like `newPromiseAndFulfiller()`, but the fulfiller is allowed to be invoked from any thread, +// not just the one that called this method. Note that the Promise is still tied to the calling +// thread's event loop and *cannot* be used from another thread -- only the PromiseFulfiller is +// cross-thread. + // ======================================================================================= // Canceler @@ -547,7 +722,7 @@ class Canceler { // happens to this Canceler. bool isEmpty() const { return list == nullptr; } - // Indicates if any previously-wrapped promises are still executing. (If this returns false, then + // Indicates if any previously-wrapped promises are still executing. (If this returns true, then // cancel() would be a no-op.) private: @@ -558,6 +733,8 @@ class Canceler { virtual void cancel(Exception&& e) = 0; + void unlink(); + private: Maybe&> prev; Maybe next; @@ -649,6 +826,119 @@ class TaskSet { Maybe>> emptyFulfiller; }; +// ======================================================================================= +// Cross-thread execution. + +class Executor { + // Executes code on another thread's event loop. + // + // Use `kj::getCurrentThreadExecutor()` to get an executor that schedules calls on the current + // thread's event loop. You may then pass the reference to other threads to enable them to call + // back to this one. + +public: + Executor(EventLoop& loop, Badge); + ~Executor() noexcept(false); + + virtual kj::Own addRef() const = 0; + // Add a reference to this Executor. The Executor will not be destroyed until all references are + // dropped. This uses atomic refcounting for thread-safety. + // + // Use this when you can't guarantee that the target thread's event loop won't concurrently exit + // (including due to an uncaught exception!) while another thread is still using the Executor. + // Otherwise, the Executor object is destroyed when the owning event loop exits. + // + // If the target event loop has exited, then `execute{Async,Sync}` will throw DISCONNECTED + // exceptions. + + bool isLive() const; + // Returns true if the remote event loop still exists, false if it has been destroyed. In the + // latter case, `execute{Async,Sync}()` will definitely throw. Of course, if this returns true, + // it could still change to false at any moment, and `execute{Async,Sync}()` could still throw as + // a result. + // + // TODO(cleanup): Should we have tryExecute{Async,Sync}() that return Maybes that are null if + // the remote event loop exited? Currently there are multiple known use cases that check + // isLive() after catching a DISCONNECTED exception to decide whether it is due to the executor + // exiting, and then handling that case. This is borderline in violation of KJ exception + // philosophy, but right now I'm not excited about the extra template metaprogramming needed + // for "try" versions... + + template + PromiseForResult executeAsync(Func&& func) const; + // Call from any thread to request that the given function be executed on the executor's thread, + // returning a promise for the result. + // + // The Promise returned by executeAsync() belongs to the requesting thread, not the executor + // thread. Hence, for example, continuations added to this promise with .then() will execute in + // the requesting thread. + // + // If func() itself returns a Promise, that Promise is *not* returned verbatim to the requesting + // thread -- after all, Promise objects cannot be used cross-thread. Instead, the executor thread + // awaits the promise. Once it resolves to a final result, that result is transferred to the + // requesting thread, resolving the promise that executeAsync() returned earlier. + // + // `func` will be destroyed in the requesting thread, after the final result has been returned + // from the executor thread. This means that it is safe for `func` to capture objects that cannot + // safely be destroyed from another thread. It is also safe for `func` to be an lvalue reference, + // so long as the functor remains live until the promise completes or is canceled, and the + // function is thread-safe. + // + // Of course, the body of `func` must be careful that any access it makes on these objects is + // safe cross-thread. For example, it must not attempt to access Promise-related objects + // cross-thread; you cannot create a `PromiseFulfiller` in one thread and then `fulfill()` it + // from another. Unfortunately, the usual convention of using const-correctness to enforce + // thread-safety does not work here, because applications can often ensure that `func` has + // exclusive access to captured objects, and thus can safely mutate them even in non-thread-safe + // ways; the const qualifier is not sufficient to express this. + // + // The final return value of `func` is transferred between threads, and hence is constructed and + // destroyed in separate threads. It is the app's responsibility to make sure this is OK. + // Alternatively, the app can perhaps arrange to send the return value back to the original + // thread for destruction, if needed. + // + // If the requesting thread destroys the returned Promise, the destructor will block waiting for + // the executor thread to acknowledge cancellation. This ensures that `func` can be destroyed + // before the Promise's destructor returns. + // + // Multiple calls to executeAsync() from the same requesting thread to the same target thread + // will be delivered in the same order in which they were requested. (However, if func() returns + // a promise, delivery of subsequent calls is not blocked on that promise. In other words, this + // call provides E-Order in the same way as Cap'n Proto.) + + template + _::UnwrapPromise> executeSync(Func&& func) const; + // Schedules `func()` to execute on the executor thread, and then blocks the requesting thread + // until `func()` completes. If `func()` returns a Promise, then the wait will continue until + // that promise resolves, and the final result will be returned to the requesting thread. + // + // The requesting thread does not need to have an EventLoop. If it does have an EventLoop, that + // loop will *not* execute while the thread is blocked. This method is particularly useful to + // allow non-event-loop threads to perform I/O via a separate event-loop thread. + // + // As with `executeAsync()`, `func` is always destroyed on the requesting thread, after the + // executor thread has signaled completion. The return value is transferred between threads. + +private: + struct Impl; + Own impl; + // To avoid including mutex.h... + + friend class EventLoop; + friend class _::XThreadEvent; + friend class _::XThreadPaf; + + void send(_::XThreadEvent& event, bool sync) const; + void wait(); + bool poll(); + + EventLoop& getLoop() const; +}; + +const Executor& getCurrentThreadExecutor(); +// Get the executor for the current thread's event loop. This reference can then be passed to other +// threads. + // ======================================================================================= // The EventLoop class @@ -753,8 +1043,19 @@ class EventLoop { bool isRunnable(); // Returns true if run() would currently do anything, or false if the queue is empty. + const Executor& getExecutor(); + // Returns an Executor that can be used to schedule events on this EventLoop from another thread. + // + // Use the global function kj::getCurrentThreadExecutor() to get the current thread's EventLoop's + // Executor. + // + // Note that this is only needed for cross-thread scheduling. To schedule code to run later in + // the current thread, use `kj::evalLater()`, which will be more efficient. + private: - EventPort& port; + kj::Maybe port; + // If null, this thread doesn't receive I/O events from the OS. It can potentially receive + // events from other threads via the Executor. bool running = false; // True while looping -- wait() is then not allowed. @@ -765,20 +1066,35 @@ class EventLoop { _::Event* head = nullptr; _::Event** tail = &head; _::Event** depthFirstInsertPoint = &head; + _::Event** breadthFirstInsertPoint = &head; + + kj::Maybe> executor; + // Allocated the first time getExecutor() is requested, making cross-thread request possible. Own daemons; + _::Event* currentlyFiring = nullptr; + bool turn(); void setRunnable(bool runnable); void enterScope(); void leaveScope(); + void wait(); + void poll(); + friend void _::detach(kj::Promise&& promise); friend void _::waitImpl(Own<_::PromiseNode>&& node, _::ExceptionOrValue& result, WaitScope& waitScope); friend bool _::pollImpl(_::PromiseNode& node, WaitScope& waitScope); friend class _::Event; friend class WaitScope; + friend class Executor; + friend class _::XThreadEvent; + friend class _::XThreadPaf; + friend class _::FiberBase; + friend class _::FiberStack; + friend ArrayPtr getAsyncTrace(ArrayPtr space); }; class WaitScope { @@ -792,15 +1108,70 @@ class WaitScope { public: inline explicit WaitScope(EventLoop& loop): loop(loop) { loop.enterScope(); } - inline ~WaitScope() { loop.leaveScope(); } + inline ~WaitScope() { if (fiber == nullptr) loop.leaveScope(); } KJ_DISALLOW_COPY(WaitScope); void poll(); // Pumps the event queue and polls for I/O until there's nothing left to do (without blocking). + // + // Not supported in fibers. + + void setBusyPollInterval(uint count) { busyPollInterval = count; } + // Set the maximum number of events to run in a row before calling poll() on the EventPort to + // check for new I/O. + // + // This has no effect when used in a fiber. + + void runEventCallbacksOnStackPool(kj::Maybe pool) { runningStacksPool = pool; } + // Arranges to switch stacks while event callbacks are executing. This is an optimization that + // is useful for programs that use extremely high thread counts, where each thread has its own + // event loop, but each thread has relatively low event throughput, i.e. each thread spends + // most of its time waiting for I/O. Normally, the biggest problem with having lots of threads + // is that each thread must allocate a stack, and stacks can take a lot of memory if the + // application commonly makes deep calls. But, most of that stack space is only needed while + // the thread is executing, not while it's sleeping. So, if threads only switch to a big stack + // during execution, switching back when it's time to sleep, and if those stacks are freelisted + // so that they can be shared among threads, then a lot of memory is saved. + // + // We use the `FiberPool` type here because it implements a freelist of stacks, which is exactly + // what we happen to want! In our case, though, we don't use those stacks to implement fibers; + // we use them as the main thread stack. + // + // This has no effect if this WaitScope itself is for a fiber. + // + // Pass `nullptr` as the parameter to go back to running events on the main stack. + + void cancelAllDetached(); + // HACK: Immediately cancel all detached promises. + // + // New code should not use detached promises, and therefore should not need this. + // + // This method exists to help existing code deal with the problems of detached promises, + // especially at teardown time. + // + // This method may be removed in the future. private: EventLoop& loop; + uint busyPollInterval = kj::maxValue; + + kj::Maybe<_::FiberBase&> fiber; + kj::Maybe runningStacksPool; + + explicit WaitScope(EventLoop& loop, _::FiberBase& fiber) + : loop(loop), fiber(fiber) {} + + template + inline void runOnStackPool(Func&& func) { + KJ_IF_MAYBE(pool, runningStacksPool) { + pool->runSynchronously(kj::fwd(func)); + } else { + func(); + } + } + friend class EventLoop; + friend class _::FiberBase; friend void _::waitImpl(Own<_::PromiseNode>&& node, _::ExceptionOrValue& result, WaitScope& waitScope); friend bool _::pollImpl(_::PromiseNode& node, WaitScope& waitScope); @@ -810,3 +1181,5 @@ class WaitScope { #define KJ_ASYNC_H_INCLUDED #include "async-inl.h" + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/common-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/common-test.c++ index aa844950194..0924d41dabc 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/common-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/common-test.c++ @@ -45,6 +45,20 @@ struct ImplicitToInt { } }; +struct Immovable { + Immovable() = default; + KJ_DISALLOW_COPY(Immovable); +}; + +struct CopyOrMove { + // Type that detects the difference between copy and move. + CopyOrMove(int i): i(i) {} + CopyOrMove(CopyOrMove&& other): i(other.i) { other.i = -1; } + CopyOrMove(const CopyOrMove&) = default; + + int i; +}; + TEST(Common, Maybe) { { Maybe m = 123; @@ -61,6 +75,61 @@ TEST(Common, Maybe) { ADD_FAILURE(); } EXPECT_EQ(123, m.orDefault(456)); + bool ranLazy = false; + EXPECT_EQ(123, m.orDefault([&] { + ranLazy = true; + return 456; + })); + EXPECT_FALSE(ranLazy); + + KJ_IF_MAYBE(v, m) { + int notUsedForRef = 5; + const int& ref = m.orDefault([&]() -> int& { return notUsedForRef; }); + + EXPECT_EQ(ref, *v); + EXPECT_EQ(&ref, v); + + const int& ref2 = m.orDefault([notUsed = 5]() -> int { return notUsed; }); + EXPECT_NE(&ref, &ref2); + EXPECT_EQ(ref2, 123); + } else { + ADD_FAILURE(); + } + } + + { + Maybe empty; + int defaultValue = 5; + auto& ref1 = empty.orDefault([&defaultValue]() -> int& { + return defaultValue; + }); + EXPECT_EQ(&ref1, &defaultValue); + + auto ref2 = empty.orDefault([&]() -> int { return defaultValue; }); + EXPECT_NE(&ref2, &defaultValue); + } + + { + Maybe m = 0; + EXPECT_FALSE(m == nullptr); + EXPECT_TRUE(m != nullptr); + KJ_IF_MAYBE(v, m) { + EXPECT_EQ(0, *v); + } else { + ADD_FAILURE(); + } + KJ_IF_MAYBE(v, mv(m)) { + EXPECT_EQ(0, *v); + } else { + ADD_FAILURE(); + } + EXPECT_EQ(0, m.orDefault(456)); + bool ranLazy = false; + EXPECT_EQ(0, m.orDefault([&] { + ranLazy = true; + return 456; + })); + EXPECT_FALSE(ranLazy); } { @@ -76,6 +145,12 @@ TEST(Common, Maybe) { EXPECT_EQ(0, *v); // avoid unused warning } EXPECT_EQ(456, m.orDefault(456)); + bool ranLazy = false; + EXPECT_EQ(456, m.orDefault([&] { + ranLazy = true; + return 456; + })); + EXPECT_TRUE(ranLazy); } int i = 234; @@ -162,42 +237,67 @@ TEST(Common, Maybe) { } { - // Verify orDefault() works with move-only types. - Maybe m = nullptr; - kj::String s = kj::mv(m).orDefault(kj::str("foo")); - EXPECT_EQ("foo", s); - } - - { - Maybe m = &i; + Maybe mi = i; + Maybe m = mi; EXPECT_FALSE(m == nullptr); EXPECT_TRUE(m != nullptr); KJ_IF_MAYBE(v, m) { - EXPECT_NE(v, &i); - EXPECT_EQ(234, *v); + EXPECT_EQ(&KJ_ASSERT_NONNULL(mi), v); } else { ADD_FAILURE(); } KJ_IF_MAYBE(v, mv(m)) { - EXPECT_NE(v, &i); - EXPECT_EQ(234, *v); + EXPECT_EQ(&KJ_ASSERT_NONNULL(mi), v); } else { ADD_FAILURE(); } + EXPECT_EQ(234, m.orDefault(456)); } { - Maybe m = implicitCast(nullptr); + Maybe mi = nullptr; + Maybe m = mi; EXPECT_TRUE(m == nullptr); - EXPECT_FALSE(m != nullptr); KJ_IF_MAYBE(v, m) { + KJ_FAIL_EXPECT(*v); + } + } + + { + const Maybe mi = i; + Maybe m = mi; + EXPECT_FALSE(m == nullptr); + EXPECT_TRUE(m != nullptr); + KJ_IF_MAYBE(v, m) { + EXPECT_EQ(&KJ_ASSERT_NONNULL(mi), v); + } else { ADD_FAILURE(); - EXPECT_EQ(0, *v); // avoid unused warning } KJ_IF_MAYBE(v, mv(m)) { + EXPECT_EQ(&KJ_ASSERT_NONNULL(mi), v); + } else { ADD_FAILURE(); - EXPECT_EQ(0, *v); // avoid unused warning } + EXPECT_EQ(234, m.orDefault(456)); + } + + { + const Maybe mi = nullptr; + Maybe m = mi; + EXPECT_TRUE(m == nullptr); + KJ_IF_MAYBE(v, m) { + KJ_FAIL_EXPECT(*v); + } + } + + { + // Verify orDefault() works with move-only types. + Maybe m = nullptr; + kj::String s = kj::mv(m).orDefault(kj::str("foo")); + EXPECT_EQ("foo", s); + EXPECT_EQ("foo", kj::mv(m).orDefault([] { + return kj::str("foo"); + })); } { @@ -216,6 +316,82 @@ TEST(Common, Maybe) { ADD_FAILURE(); } } + + { + // Test usage of immovable types. + Maybe m; + KJ_EXPECT(m == nullptr); + m.emplace(); + KJ_EXPECT(m != nullptr); + m = nullptr; + KJ_EXPECT(m == nullptr); + } + + { + // Test that initializing Maybe from Maybe&& does a copy, not a move. + CopyOrMove x(123); + Maybe m(x); + Maybe m2 = kj::mv(m); + KJ_EXPECT(m == nullptr); // m is moved out of and cleared + KJ_EXPECT(x.i == 123); // but what m *referenced* was not moved out of + KJ_EXPECT(KJ_ASSERT_NONNULL(m2).i == 123); // m2 is a copy of what m referenced + } + + { + // Test that a moved-out-of Maybe is left empty after move constructor. + Maybe m = 123; + KJ_EXPECT(m != nullptr); + + Maybe n(kj::mv(m)); + KJ_EXPECT(m == nullptr); + KJ_EXPECT(n != nullptr); + } + + { + // Test that a moved-out-of Maybe is left empty after move constructor. + Maybe m = 123; + KJ_EXPECT(m != nullptr); + + Maybe n = kj::mv(m); + KJ_EXPECT(m == nullptr); + KJ_EXPECT(n != nullptr); + } + + { + // Test that a moved-out-of Maybe is left empty when moved to a Maybe. + int x = 123; + Maybe m = x; + KJ_EXPECT(m != nullptr); + + Maybe n(kj::mv(m)); + KJ_EXPECT(m == nullptr); + KJ_EXPECT(n != nullptr); + } + + { + // Test that a moved-out-of Maybe is left empty when moved to another Maybe. + int x = 123; + Maybe m = x; + KJ_EXPECT(m != nullptr); + + Maybe n(kj::mv(m)); + KJ_EXPECT(m == nullptr); + KJ_EXPECT(n != nullptr); + } + + { + Maybe m1 = 123; + Maybe m2 = 123; + Maybe m3 = 456; + Maybe m4 = nullptr; + Maybe m5 = nullptr; + + KJ_EXPECT(m1 == m2); + KJ_EXPECT(m1 != m3); + KJ_EXPECT(m1 != m4); + KJ_EXPECT(m4 == m5); + KJ_EXPECT(m4 != m1); + } } TEST(Common, MaybeConstness) { @@ -524,5 +700,33 @@ KJ_TEST("kj::range()") { KJ_EXPECT(expected == 8); } +KJ_TEST("kj::defer()") { + bool executed; + + // rvalue reference + { + executed = false; + auto deferred = kj::defer([&executed]() { + executed = true; + }); + KJ_EXPECT(!executed); + } + + KJ_EXPECT(executed); + + // lvalue reference + auto executor = [&executed]() { + executed = true; + }; + + { + executed = false; + auto deferred = kj::defer(executor); + KJ_EXPECT(!executed); + } + + KJ_EXPECT(executed); +} + } // namespace } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/common.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/common.c++ index 7d875871129..8960b2316a3 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/common.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/common.c++ @@ -22,9 +22,6 @@ #include "common.h" #include "debug.h" #include -#ifdef _MSC_VER -#include -#endif namespace kj { namespace _ { // private @@ -44,15 +41,9 @@ void unreachable() { KJ_FAIL_ASSERT("Supposedly-unreachable branch executed."); // Really make sure we abort. - abort(); + KJ_KNOWN_UNREACHABLE(abort()); } } // namespace _ (private) -#if _MSC_VER && !__clang__ - -float nan() { return std::numeric_limits::quiet_NaN(); } - -#endif - } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/common.h b/libs/EXTERNAL/capnproto/c++/src/kj/common.h index b6be5339703..2caba972f5f 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/common.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/common.h @@ -25,10 +25,43 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header +#if defined(__GNUC__) || defined(__clang__) +#define KJ_BEGIN_SYSTEM_HEADER _Pragma("GCC system_header") +#elif defined(_MSC_VER) +#define KJ_BEGIN_SYSTEM_HEADER __pragma(warning(push, 0)) +#define KJ_END_SYSTEM_HEADER __pragma(warning(pop)) +#endif + +#ifndef KJ_BEGIN_SYSTEM_HEADER +#define KJ_BEGIN_SYSTEM_HEADER +#endif + +#ifndef KJ_END_SYSTEM_HEADER +#define KJ_END_SYSTEM_HEADER +#endif + +#if !defined(KJ_HEADER_WARNINGS) || !KJ_HEADER_WARNINGS +#define KJ_BEGIN_HEADER KJ_BEGIN_SYSTEM_HEADER +#define KJ_END_HEADER KJ_END_SYSTEM_HEADER +#else +#define KJ_BEGIN_HEADER +#define KJ_END_HEADER +#endif + +#ifdef __has_cpp_attribute +#define KJ_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +#define KJ_HAS_CPP_ATTRIBUTE(x) 0 #endif +#ifdef __has_feature +#define KJ_HAS_COMPILER_FEATURE(x) __has_feature(x) +#else +#define KJ_HAS_COMPILER_FEATURE(x) 0 +#endif + +KJ_BEGIN_HEADER + #ifndef KJ_NO_COMPILER_CHECK // Technically, __cplusplus should be 201402L for C++14, but GCC 4.9 -- which is supported -- still // had it defined to 201300L even with -std=c++14. @@ -42,20 +75,20 @@ #ifdef __GNUC__ #if __clang__ - #if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 4) - #warning "This library requires at least Clang 3.4." + #if __clang_major__ < 5 + #warning "This library requires at least Clang 5.0." #elif __cplusplus >= 201402L && !__has_include() #warning "Your compiler supports C++14 but your C++ standard library does not. If your "\ "system has libc++ installed (as should be the case on e.g. Mac OSX), try adding "\ "-stdlib=libc++ to your CXXFLAGS." #endif #else - #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9) - #warning "This library requires at least GCC 4.9." + #if __GNUC__ < 5 + #warning "This library requires at least GCC 5.0." #endif #endif #elif defined(_MSC_VER) - #if _MSC_VER < 1910 + #if _MSC_VER < 1910 && !defined(__clang__) #error "You need Visual Studio 2017 or better to compile this code." #endif #else @@ -96,7 +129,19 @@ typedef unsigned char byte; // Detect whether RTTI and exceptions are enabled, assuming they are unless we have specific // evidence to the contrary. Clients can always define KJ_NO_RTTI or KJ_NO_EXCEPTIONS explicitly // to override these checks. -#ifdef __GNUC__ + +// TODO: Ideally we'd use __cpp_exceptions/__cpp_rtti not being defined as the first pass since +// that is the standard compliant way. However, it's unclear how to use those macros (or any +// others) to distinguish between the compiler supporting feature detection and the feature being +// disabled vs the compiler not supporting feature detection at all. +#if defined(__has_feature) + #if !defined(KJ_NO_RTTI) && !__has_feature(cxx_rtti) + #define KJ_NO_RTTI 1 + #endif + #if !defined(KJ_NO_EXCEPTIONS) && !__has_feature(cxx_exceptions) + #define KJ_NO_EXCEPTIONS 1 + #endif +#elif defined(__GNUC__) #if !defined(KJ_NO_RTTI) && !__GXX_RTTI #define KJ_NO_RTTI 1 #endif @@ -146,7 +191,7 @@ typedef unsigned char byte; #define KJ_ALWAYS_INLINE(...) inline __VA_ARGS__ // Don't force inline in debug mode. #else -#if defined(_MSC_VER) +#if defined(_MSC_VER) && !defined(__clang__) #define KJ_ALWAYS_INLINE(...) __forceinline __VA_ARGS__ #else #define KJ_ALWAYS_INLINE(...) inline __VA_ARGS__ __attribute__((always_inline)) @@ -154,7 +199,7 @@ typedef unsigned char byte; // Force a function to always be inlined. Apply only to the prototype, not to the definition. #endif -#if defined(_MSC_VER) +#if defined(_MSC_VER) && !defined(__clang__) #define KJ_NOINLINE __declspec(noinline) #else #define KJ_NOINLINE __attribute__((noinline)) @@ -173,6 +218,29 @@ typedef unsigned char byte; #define KJ_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) #endif +#if KJ_HAS_CPP_ATTRIBUTE(clang::lifetimebound) +// If this is generating too many false-positives, the user is responsible for disabling the +// problematic warning at the compiler switch level or by suppressing the place where the +// false-positive is reported through compiler-specific pragmas if available. +#define KJ_LIFETIMEBOUND [[clang::lifetimebound]] +#else +#define KJ_LIFETIMEBOUND +#endif +// Annotation that indicates the returned value is referencing a resource owned by this type (e.g. +// cStr() on a std::string). Unfortunately this lifetime can only be superficial currently & cannot +// track further. For example, there's no way to get `array.asPtr().slice(5, 6))` to warn if the +// last slice exceeds the lifetime of `array`. That's because in the general case `ArrayPtr::slice` +// can't have the lifetime bound annotation since it's not wrong to do something like: +// ArrayPtr doSomething(ArrayPtr foo) { +// ... +// return foo.slice(5, 6); +// } +// If `ArrayPtr::slice` had a lifetime bound then the compiler would warn about this perfectly +// legitimate method. Really there needs to be 2 more annotations. One to inherit the lifetime bound +// and another to inherit the lifetime bound from a parameter (which really could be the same thing +// by allowing a syntax like `[[clang::lifetimebound(*this)]]`. +// https://clang.llvm.org/docs/AttributeReference.html#lifetimebound + #if __clang__ #define KJ_UNUSED_MEMBER __attribute__((unused)) // Inhibits "unused" warning for member variables. Only Clang produces such a warning, while GCC @@ -181,6 +249,20 @@ typedef unsigned char byte; #define KJ_UNUSED_MEMBER #endif +#if __cplusplus > 201703L || (__clang__ && __clang_major__ >= 9 && __cplusplus >= 201103L) +// Technically this was only added to C++20 but Clang allows it for >= C++11 and spelunking the +// attributes manual indicates it first came in with Clang 9. +#define KJ_NO_UNIQUE_ADDRESS [[no_unique_address]] +#else +#define KJ_NO_UNIQUE_ADDRESS +#endif + +#if KJ_HAS_COMPILER_FEATURE(thread_sanitizer) || defined(__SANITIZE_THREAD__) +#define KJ_DISABLE_TSAN __attribute__((no_sanitize("thread"), noinline)) +#else +#define KJ_DISABLE_TSAN +#endif + #if __clang__ #define KJ_DEPRECATED(reason) \ __attribute__((deprecated(reason))) @@ -212,7 +294,7 @@ KJ_NORETURN(void unreachable()); } // namespace _ (private) #ifdef KJ_DEBUG -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) #define KJ_IREQUIRE(condition, ...) \ if (KJ_LIKELY(condition)); else ::kj::_::inlineRequireFailure( \ __FILE__, __LINE__, #condition, "" #__VA_ARGS__, __VA_ARGS__) @@ -245,6 +327,26 @@ KJ_NORETURN(void unreachable()); #define KJ_CLANG_KNOWS_THIS_IS_UNREACHABLE_BUT_GCC_DOESNT KJ_UNREACHABLE #endif +#if __clang__ +#define KJ_KNOWN_UNREACHABLE(code) \ + do { \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wunreachable-code\"") \ + code; \ + _Pragma("clang diagnostic pop") \ + } while (false) +// Suppress "unreachable code" warnings on intentionally unreachable code. +#else +// TODO(someday): Add support for non-clang compilers. +#define KJ_KNOWN_UNREACHABLE(code) do {code;} while(false) +#endif + +#if KJ_HAS_CPP_ATTRIBUTE(fallthrough) +#define KJ_FALLTHROUGH [[fallthrough]] +#else +#define KJ_FALLTHROUGH +#endif + // #define KJ_STACK_ARRAY(type, name, size, minStack, maxStack) // // Allocate an array, preferably on the stack, unless it is too big. On GCC this will use @@ -277,7 +379,7 @@ KJ_NORETURN(void unreachable()); // Create a unique identifier name. We use concatenate __LINE__ rather than __COUNTER__ so that // the name can be used multiple times in the same macro. -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) #define KJ_CONSTEXPR(...) __VA_ARGS__ // Use in cases where MSVC barfs on constexpr. A replacement keyword (e.g. "const") can be @@ -303,14 +405,6 @@ KJ_NORETURN(void unreachable()); #define KJ_CONSTEXPR(...) constexpr #endif -#if defined(_MSC_VER) && _MSC_VER < 1910 -// TODO(msvc): Visual Studio 2015 mishandles declaring the no-arg constructor `= default` for -// certain template types -- it fails to call member constructors. -#define KJ_DEFAULT_CONSTRUCTOR_VS2015_BUGGY {} -#else -#define KJ_DEFAULT_CONSTRUCTOR_VS2015_BUGGY = default; -#endif - // ======================================================================================= // Template metaprogramming helpers. @@ -344,7 +438,7 @@ template <> struct EnableIf_ { typedef void Type; }; template using EnableIf = typename EnableIf_::Type; // Use like: // -// template ()> +// template ()>> // void func(T&& t); template struct VoidSfinae_ { using Type = void; }; @@ -391,7 +485,7 @@ struct DisallowConstCopy { #endif }; -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) #define KJ_CPCAP(obj) obj=::kj::cp(obj) // TODO(msvc): MSVC refuses to invoke non-const versions of copy constructors in by-value lambda @@ -503,6 +597,35 @@ constexpr bool canMemcpy() { static_assert(kj::canMemcpy(), "this code expects this type to be memcpy()-able"); #endif +template +class Badge { + // A pattern for marking individual methods such that they can only be called from a specific + // caller class: Make the method public but give it a parameter of type `Badge`. Only + // `Caller` can construct one, so only `Caller` can call the method. + // + // // We only allow calls from the class `Bar`. + // void foo(Badge) + // + // The call site looks like: + // + // foo({}); + // + // This pattern also works well for declaring private constructors, but still being able to use + // them with `kj::heap()`, etc. + // + // Idea from: https://awesomekling.github.io/Serenity-C++-patterns-The-Badge/ + // + // Note that some forms of this idea make the copy constructor private as well, in order to + // prohibit `Badge(*(Badge*)nullptr)`. However, that would prevent badges from + // being passed through forwarding functions like `kj::heap()`, which would ruin one of the main + // use cases for this pattern in KJ. In any case, dereferencing a null pointer is UB; there are + // plenty of other ways to get access to private members if you're willing to go UB. For one-off + // debugging purposes, you might as well use `#define private public` at the top of the file. +private: + Badge() {} + friend T; +}; + // ======================================================================================= // Equivalents to std::move() and std::forward(), since these are very commonly needed and the // std header pulls in lots of other stuff. @@ -634,29 +757,13 @@ struct ThrowOverflow { // Functor which throws an exception complaining about integer overflow. Usually this is used // with the interfaces in units.h, but is defined here because Cap'n Proto wants to avoid // including units.h when not using CAPNP_DEBUG_TYPES. - void operator()() const; + [[noreturn]] void operator()() const; }; -#if __GNUC__ || __clang__ +#if __GNUC__ || __clang__ || _MSC_VER inline constexpr float inf() { return __builtin_huge_valf(); } inline constexpr float nan() { return __builtin_nanf(""); } -#elif _MSC_VER - -// Do what MSVC math.h does -#pragma warning(push) -#pragma warning(disable: 4756) // "overflow in constant arithmetic" -inline constexpr float inf() { return (float)(1e300 * 1e300); } -#pragma warning(pop) - -float nan(); -// Unfortunatley, inf() * 0.0f produces a NaN with the sign bit set, whereas our preferred -// canonical NaN should not have the sign bit set. std::numeric_limits::quiet_NaN() -// returns the correct NaN, but we don't want to #include that here. So, we give up and make -// this out-of-line on MSVC. -// -// TODO(msvc): Can we do better? - #else #error "Not sure how to support your compiler." #endif @@ -665,7 +772,7 @@ inline constexpr bool isNaN(float f) { return f != f; } inline constexpr bool isNaN(double f) { return f != f; } inline int popCount(unsigned int x) { -#if defined(_MSC_VER) +#if defined(_MSC_VER) && !defined(__clang__) return __popcnt(x); // Note: __popcnt returns unsigned int, but the value is clearly guaranteed to fit into an int #else @@ -803,6 +910,68 @@ inline constexpr Repeat> repeat(T&& value, size_t count) { return Repeat>(value, count); } +template +class MappedIterator: private Mapping { + // An iterator that wraps some other iterator and maps the values through a mapping function. + // The type `Mapping` must define a method `map()` which performs this mapping. + +public: + template + MappedIterator(Inner inner, Params&&... params) + : Mapping(kj::fwd(params)...), inner(inner) {} + + inline auto operator->() const { return &Mapping::map(*inner); } + inline decltype(auto) operator* () const { return Mapping::map(*inner); } + inline decltype(auto) operator[](size_t index) const { return Mapping::map(inner[index]); } + inline MappedIterator& operator++() { ++inner; return *this; } + inline MappedIterator operator++(int) { return MappedIterator(inner++, *this); } + inline MappedIterator& operator--() { --inner; return *this; } + inline MappedIterator operator--(int) { return MappedIterator(inner--, *this); } + inline MappedIterator& operator+=(ptrdiff_t amount) { inner += amount; return *this; } + inline MappedIterator& operator-=(ptrdiff_t amount) { inner -= amount; return *this; } + inline MappedIterator operator+ (ptrdiff_t amount) const { + return MappedIterator(inner + amount, *this); + } + inline MappedIterator operator- (ptrdiff_t amount) const { + return MappedIterator(inner - amount, *this); + } + inline ptrdiff_t operator- (const MappedIterator& other) const { return inner - other.inner; } + + inline bool operator==(const MappedIterator& other) const { return inner == other.inner; } + inline bool operator!=(const MappedIterator& other) const { return inner != other.inner; } + inline bool operator<=(const MappedIterator& other) const { return inner <= other.inner; } + inline bool operator>=(const MappedIterator& other) const { return inner >= other.inner; } + inline bool operator< (const MappedIterator& other) const { return inner < other.inner; } + inline bool operator> (const MappedIterator& other) const { return inner > other.inner; } + +private: + Inner inner; +}; + +template +class MappedIterable: private Mapping { + // An iterable that wraps some other iterable and maps the values through a mapping function. + // The type `Mapping` must define a method `map()` which performs this mapping. + +public: + template + MappedIterable(Inner inner, Params&&... params) + : Mapping(kj::fwd(params)...), inner(inner) {} + + typedef Decay().begin())> InnerIterator; + typedef MappedIterator Iterator; + typedef Decay().begin())> InnerConstIterator; + typedef MappedIterator ConstIterator; + + inline Iterator begin() { return { inner.begin(), (Mapping&)*this }; } + inline Iterator end() { return { inner.end(), (Mapping&)*this }; } + inline ConstIterator begin() const { return { inner.begin(), (const Mapping&)*this }; } + inline ConstIterator end() const { return { inner.end(), (const Mapping&)*this }; } + +private: + Inner inner; +}; + // ======================================================================================= // Manually invoking constructors and destructors // @@ -870,7 +1039,7 @@ class NullableValue { // boolean flag indicating nullness. public: - inline NullableValue(NullableValue&& other) noexcept(noexcept(T(instance()))) + inline NullableValue(NullableValue&& other) : isSet(other.isSet) { if (isSet) { ctor(value, kj::mv(other.value)); @@ -889,7 +1058,7 @@ class NullableValue { } } inline ~NullableValue() -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) // TODO(msvc): MSVC has a hard time with noexcept specifier expressions that are more complex // than `true` or `false`. We had a workaround for VS2015, but VS2017 regressed. noexcept(false) @@ -922,8 +1091,8 @@ class NullableValue { return value; } - inline NullableValue() noexcept: isSet(false) {} - inline NullableValue(T&& t) noexcept(noexcept(T(instance()))) + inline NullableValue(): isSet(false) {} + inline NullableValue(T&& t) : isSet(true) { ctor(value, kj::mv(t)); } @@ -935,12 +1104,8 @@ class NullableValue { : isSet(true) { ctor(value, t); } - inline NullableValue(const T* t) - : isSet(t != nullptr) { - if (isSet) ctor(value, *t); - } template - inline NullableValue(NullableValue&& other) noexcept(noexcept(T(instance()))) + inline NullableValue(NullableValue&& other) : isSet(other.isSet) { if (isSet) { ctor(value, kj::mv(other.value)); @@ -1007,13 +1172,56 @@ class NullableValue { return *this; } + inline NullableValue& operator=(T&& other) { emplace(kj::mv(other)); return *this; } + inline NullableValue& operator=(T& other) { emplace(other); return *this; } + inline NullableValue& operator=(const T& other) { emplace(other); return *this; } + template + inline NullableValue& operator=(NullableValue&& other) { + if (other.isSet) { + emplace(kj::mv(other.value)); + } else { + *this = nullptr; + } + return *this; + } + template + inline NullableValue& operator=(const NullableValue& other) { + if (other.isSet) { + emplace(other.value); + } else { + *this = nullptr; + } + return *this; + } + template + inline NullableValue& operator=(const NullableValue& other) { + if (other.isSet) { + emplace(other.value); + } else { + *this = nullptr; + } + return *this; + } + inline NullableValue& operator=(decltype(nullptr)) { + if (isSet) { + isSet = false; + dtor(value); + } + return *this; + } + inline bool operator==(decltype(nullptr)) const { return !isSet; } inline bool operator!=(decltype(nullptr)) const { return isSet; } + NullableValue(const T* t) = delete; + NullableValue& operator=(const T* other) = delete; + // We used to permit assigning a Maybe directly from a T*, and the assignment would check for + // nullness. This turned out never to be useful, and sometimes to be dangerous. + private: bool isSet; -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) #pragma warning(push) #pragma warning(disable: 4624) // Warns that the anonymous union has a deleted destructor when T is non-trivial. This warning @@ -1024,7 +1232,7 @@ class NullableValue { T value; }; -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) #pragma warning(pop) #endif @@ -1060,18 +1268,25 @@ class Maybe { public: Maybe(): ptr(nullptr) {} - Maybe(T&& t) noexcept(noexcept(T(instance()))): ptr(kj::mv(t)) {} + Maybe(T&& t): ptr(kj::mv(t)) {} Maybe(T& t): ptr(t) {} Maybe(const T& t): ptr(t) {} - Maybe(const T* t) noexcept: ptr(t) {} - Maybe(Maybe&& other) noexcept(noexcept(T(instance()))): ptr(kj::mv(other.ptr)) {} + Maybe(Maybe&& other): ptr(kj::mv(other.ptr)) { other = nullptr; } Maybe(const Maybe& other): ptr(other.ptr) {} Maybe(Maybe& other): ptr(other.ptr) {} template - Maybe(Maybe&& other) noexcept(noexcept(T(instance()))) { + Maybe(Maybe&& other) { KJ_IF_MAYBE(val, kj::mv(other)) { ptr.emplace(kj::mv(*val)); + other = nullptr; + } + } + template + Maybe(Maybe&& other) { + KJ_IF_MAYBE(val, other) { + ptr.emplace(*val); + other = nullptr; } } template @@ -1081,24 +1296,64 @@ class Maybe { } } - Maybe(decltype(nullptr)) noexcept: ptr(nullptr) {} + Maybe(decltype(nullptr)): ptr(nullptr) {} template inline T& emplace(Params&&... params) { - // Replace this Maybe's content with a new value constructed by passing the given parametrs to + // Replace this Maybe's content with a new value constructed by passing the given parameters to // T's constructor. This can be used to initialize a Maybe without copying or even moving a T. // Returns a reference to the newly-constructed value. return ptr.emplace(kj::fwd(params)...); } - inline Maybe& operator=(Maybe&& other) { ptr = kj::mv(other.ptr); return *this; } + inline Maybe& operator=(T&& other) { ptr = kj::mv(other); return *this; } + inline Maybe& operator=(T& other) { ptr = other; return *this; } + inline Maybe& operator=(const T& other) { ptr = other; return *this; } + + inline Maybe& operator=(Maybe&& other) { ptr = kj::mv(other.ptr); other = nullptr; return *this; } inline Maybe& operator=(Maybe& other) { ptr = other.ptr; return *this; } inline Maybe& operator=(const Maybe& other) { ptr = other.ptr; return *this; } + template + Maybe& operator=(Maybe&& other) { + KJ_IF_MAYBE(val, kj::mv(other)) { + ptr.emplace(kj::mv(*val)); + other = nullptr; + } else { + ptr = nullptr; + } + return *this; + } + template + Maybe& operator=(const Maybe& other) { + KJ_IF_MAYBE(val, other) { + ptr.emplace(*val); + } else { + ptr = nullptr; + } + return *this; + } + + inline Maybe& operator=(decltype(nullptr)) { ptr = nullptr; return *this; } + inline bool operator==(decltype(nullptr)) const { return ptr == nullptr; } inline bool operator!=(decltype(nullptr)) const { return ptr != nullptr; } + inline bool operator==(const Maybe& other) const { + if (ptr == nullptr) { + return other == nullptr; + } else { + return other.ptr != nullptr && *ptr == *other.ptr; + } + } + inline bool operator!=(const Maybe& other) const { return !(*this == other); } + + Maybe(const T* t) = delete; + Maybe& operator=(const T* other) = delete; + // We used to permit assigning a Maybe directly from a T*, and the assignment would check for + // nullness. This turned out never to be useful, and sometimes to be dangerous. + T& orDefault(T& defaultValue) & { if (ptr == nullptr) { return defaultValue; @@ -1128,6 +1383,46 @@ class Maybe { } } + template () ? instance() : instance()())> + Result orDefault(F&& lazyDefaultValue) & { + if (ptr == nullptr) { + return lazyDefaultValue(); + } else { + return *ptr; + } + } + + template () ? instance() : instance()())> + Result orDefault(F&& lazyDefaultValue) const & { + if (ptr == nullptr) { + return lazyDefaultValue(); + } else { + return *ptr; + } + } + + template () ? instance() : instance()())> + Result orDefault(F&& lazyDefaultValue) && { + if (ptr == nullptr) { + return lazyDefaultValue(); + } else { + return kj::mv(*ptr); + } + } + + template () ? instance() : instance()())> + Result orDefault(F&& lazyDefaultValue) const && { + if (ptr == nullptr) { + return lazyDefaultValue(); + } else { + return kj::mv(*ptr); + } + } + template auto map(Func&& f) & -> Maybe()))> { if (ptr == nullptr) { @@ -1178,24 +1473,51 @@ class Maybe { }; template -class Maybe: public DisallowConstCopyIfNotConst { +class Maybe { public: - Maybe() noexcept: ptr(nullptr) {} - Maybe(T& t) noexcept: ptr(&t) {} - Maybe(T* t) noexcept: ptr(t) {} + constexpr Maybe(): ptr(nullptr) {} + constexpr Maybe(T& t): ptr(&t) {} + constexpr Maybe(T* t): ptr(t) {} + + inline constexpr Maybe(PropagateConst& other): ptr(other.ptr) {} + // Allow const copy only if `T` itself is const. Otherwise allow only non-const copy, to + // protect transitive constness. Clang is happy for this constructor to be declared `= default` + // since, after evaluation of `PropagateConst`, it does end up being a default-able constructor. + // But, GCC and MSVC both complain about that, claiming this constructor cannot be declared + // default. I don't know who is correct, but whatever, we'll write out an implementation, fine. + // + // Note that we can't solve this by inheriting DisallowConstCopyIfNotConst because we want + // to override the move constructor, and if we override the move constructor then we must define + // the copy constructor here. + + inline constexpr Maybe(Maybe&& other): ptr(other.ptr) { other.ptr = nullptr; } template - inline Maybe(Maybe& other) noexcept: ptr(other.ptr) {} + inline constexpr Maybe(Maybe& other): ptr(other.ptr) {} template - inline Maybe(const Maybe& other) noexcept: ptr(const_cast(other.ptr)) {} - inline Maybe(decltype(nullptr)) noexcept: ptr(nullptr) {} - - inline Maybe& operator=(T& other) noexcept { ptr = &other; return *this; } - inline Maybe& operator=(T* other) noexcept { ptr = other; return *this; } + inline constexpr Maybe(const Maybe& other): ptr(const_cast(other.ptr)) {} + template + inline constexpr Maybe(Maybe&& other): ptr(other.ptr) { other.ptr = nullptr; } template - inline Maybe& operator=(Maybe& other) noexcept { ptr = other.ptr; return *this; } + inline constexpr Maybe(const Maybe&& other) = delete; + template ()>> + constexpr Maybe(Maybe& other): ptr(other.ptr.operator U*()) {} + template ()>> + constexpr Maybe(const Maybe& other): ptr(other.ptr.operator const U*()) {} + inline constexpr Maybe(decltype(nullptr)): ptr(nullptr) {} + + inline Maybe& operator=(T& other) { ptr = &other; return *this; } + inline Maybe& operator=(T* other) { ptr = other; return *this; } + inline Maybe& operator=(PropagateConst& other) { ptr = other.ptr; return *this; } + inline Maybe& operator=(Maybe&& other) { ptr = other.ptr; other.ptr = nullptr; return *this; } template - inline Maybe& operator=(const Maybe& other) noexcept { ptr = other.ptr; return *this; } + inline Maybe& operator=(Maybe& other) { ptr = other.ptr; return *this; } + template + inline Maybe& operator=(const Maybe& other) { ptr = other.ptr; return *this; } + template + inline Maybe& operator=(Maybe&& other) { ptr = other.ptr; other.ptr = nullptr; return *this; } + template + inline Maybe& operator=(const Maybe&& other) = delete; inline bool operator==(decltype(nullptr)) const { return ptr == nullptr; } inline bool operator!=(decltype(nullptr)) const { return ptr != nullptr; } @@ -1261,13 +1583,47 @@ class ArrayPtr: public DisallowConstCopyIfNotConst { public: inline constexpr ArrayPtr(): ptr(nullptr), size_(0) {} inline constexpr ArrayPtr(decltype(nullptr)): ptr(nullptr), size_(0) {} - inline constexpr ArrayPtr(T* ptr, size_t size): ptr(ptr), size_(size) {} - inline constexpr ArrayPtr(T* begin, T* end): ptr(begin), size_(end - begin) {} - inline KJ_CONSTEXPR() ArrayPtr(::std::initializer_list> init) + inline constexpr ArrayPtr(T* ptr KJ_LIFETIMEBOUND, size_t size): ptr(ptr), size_(size) {} + inline constexpr ArrayPtr(T* begin KJ_LIFETIMEBOUND, T* end KJ_LIFETIMEBOUND) + : ptr(begin), size_(end - begin) {} + ArrayPtr& operator=(Array&&) = delete; + ArrayPtr& operator=(decltype(nullptr)) { + ptr = nullptr; + size_ = 0; + return *this; + } + +#if __GNUC__ && !__clang__ && __GNUC__ >= 9 +// GCC 9 added a warning when we take an initializer_list as a constructor parameter and save a +// pointer to its content in a class member. GCC apparently imagines we're going to do something +// dumb like this: +// ArrayPtr ptr = { 1, 2, 3 }; +// foo(ptr[1]); // undefined behavior! +// Any KJ programmer should be able to recognize that this is UB, because an ArrayPtr does not own +// its content. That's not what this constructor is for, tohugh. This constructor is meant to allow +// code like this: +// int foo(ArrayPtr p); +// // ... later ... +// foo({1, 2, 3}); +// In this case, the initializer_list's backing array, like any temporary, lives until the end of +// the statement `foo({1, 2, 3});`. Therefore, it lives at least until the call to foo() has +// returned, which is exactly what we care about. This usage is fine! GCC is wrong to warn. +// +// Amusingly, Clang's implementation has a similar type that they call ArrayRef which apparently +// triggers this same GCC warning. My guess is that Clang will not introduce a similar warning +// given that it triggers on their own, legitimate code. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Winit-list-lifetime" +#endif + inline KJ_CONSTEXPR() ArrayPtr( + ::std::initializer_list> init KJ_LIFETIMEBOUND) : ptr(init.begin()), size_(init.size()) {} +#if __GNUC__ && !__clang__ && __GNUC__ >= 9 +#pragma GCC diagnostic pop +#endif template - inline constexpr ArrayPtr(T (&native)[size]): ptr(native), size_(size) { + inline constexpr ArrayPtr(KJ_LIFETIMEBOUND T (&native)[size]): ptr(native), size_(size) { // Construct an ArrayPtr from a native C-style array. // // We disable this constructor for const char arrays because otherwise you would be able to @@ -1375,13 +1731,13 @@ class ArrayPtr: public DisallowConstCopyIfNotConst { }; template -inline constexpr ArrayPtr arrayPtr(T* ptr, size_t size) { +inline constexpr ArrayPtr arrayPtr(T* ptr KJ_LIFETIMEBOUND, size_t size) { // Use this function to construct ArrayPtrs without writing out the type name. return ArrayPtr(ptr, size); } template -inline constexpr ArrayPtr arrayPtr(T* begin, T* end) { +inline constexpr ArrayPtr arrayPtr(T* begin KJ_LIFETIMEBOUND, T* end KJ_LIFETIMEBOUND) { // Use this function to construct ArrayPtrs without writing out the type name. return ArrayPtr(begin, end); } @@ -1448,7 +1804,7 @@ class Deferred { KJ_DISALLOW_COPY(Deferred); // This move constructor is usually optimized away by the compiler. - inline Deferred(Deferred&& other): func(kj::mv(other.func)), canceled(false) { + inline Deferred(Deferred&& other): func(kj::fwd(other.func)), canceled(false) { other.canceled = true; } private: @@ -1474,3 +1830,5 @@ _::Deferred defer(Func&& func) { // Run the given code when the function exits, whether by return or exception. } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/gzip-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/gzip-test.c++ index 080f1cc1f2c..09bb0c05be5 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/gzip-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/gzip-test.c++ @@ -126,6 +126,8 @@ public: } return kj::READY_NOW; } + + Promise whenWriteDisconnected() override { KJ_UNIMPLEMENTED("not used"); } }; KJ_TEST("gzip decompression") { diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/gzip.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/gzip.c++ index 15df96b8890..60d5a8f09d2 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/gzip.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/gzip.c++ @@ -163,7 +163,9 @@ void GzipOutputStream::pump(int flush) { auto result = ctx.pumpOnce(flush); ok = get<0>(result); auto chunk = get<1>(result); - inner.write(chunk.begin(), chunk.size()); + if (chunk.size() > 0) { + inner.write(chunk.begin(), chunk.size()); + } } while (ok); } @@ -254,11 +256,20 @@ kj::Promise GzipAsyncOutputStream::pump(int flush) { auto result = ctx.pumpOnce(flush); auto ok = get<0>(result); auto chunk = get<1>(result); - auto promise = inner.write(chunk.begin(), chunk.size()); - if (ok) { - promise = promise.then([this, flush]() { return pump(flush); }); + + if (chunk.size() == 0) { + if (ok) { + return pump(flush); + } else { + return kj::READY_NOW; + } + } else { + auto promise = inner.write(chunk.begin(), chunk.size()); + if (ok) { + promise = promise.then([this, flush]() { return pump(flush); }); + } + return promise; } - return promise; } } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/gzip.h b/libs/EXTERNAL/capnproto/c++/src/kj/compat/gzip.h index 304a87e7300..5045e11757d 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/gzip.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/gzip.h @@ -43,7 +43,7 @@ class GzipOutputContext final { z_stream ctx = {}; byte buffer[4096]; - void fail(int result); + [[noreturn]] void fail(int result); }; } // namespace _ (private) @@ -118,6 +118,8 @@ class GzipAsyncOutputStream final: public AsyncOutputStream { Promise write(const void* buffer, size_t size) override; Promise write(ArrayPtr> pieces) override; + Promise whenWriteDisconnected() override { return inner.whenWriteDisconnected(); } + inline Promise flush() { return pump(Z_SYNC_FLUSH); } diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/http-socketpair-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/http-socketpair-test.c++ new file mode 100644 index 00000000000..67c53b79b21 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/http-socketpair-test.c++ @@ -0,0 +1,25 @@ +// Copyright (c) 2019 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Run http-test, but use real OS socketpairs to connect rather than using in-process pipes. +// This is essentially an integration test between KJ HTTP and KJ OS socket handling. +#define KJ_HTTP_TEST_USE_OS_PIPE 1 +#include "http-test.c++" diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/http-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/http-test.c++ index 0aaee1af9ba..87395534e2d 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/http-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/http-test.c++ @@ -24,8 +24,34 @@ #include "http.h" #include #include +#include #include +#if KJ_HTTP_TEST_USE_OS_PIPE +// Run the test using OS-level socketpairs. (See http-socketpair-test.c++.) +#define KJ_HTTP_TEST_SETUP_IO \ + auto io = kj::setupAsyncIo(); \ + auto& waitScope KJ_UNUSED = io.waitScope +#define KJ_HTTP_TEST_SETUP_LOOPBACK_LISTENER_AND_ADDR \ + auto listener = io.provider->getNetwork().parseAddress("localhost", 0) \ + .wait(waitScope)->listen(); \ + auto addr = io.provider->getNetwork().parseAddress("localhost", listener->getPort()) \ + .wait(waitScope) +#define KJ_HTTP_TEST_CREATE_2PIPE \ + io.provider->newTwoWayPipe() +#else +// Run the test using in-process two-way pipes. +#define KJ_HTTP_TEST_SETUP_IO \ + kj::EventLoop eventLoop; \ + kj::WaitScope waitScope(eventLoop) +#define KJ_HTTP_TEST_SETUP_LOOPBACK_LISTENER_AND_ADDR \ + auto capPipe = newCapabilityPipe(); \ + auto listener = kj::heap(*capPipe.ends[0]); \ + auto addr = kj::heap(nullptr, *capPipe.ends[1]) +#define KJ_HTTP_TEST_CREATE_2PIPE \ + kj::newTwoWayPipe() +#endif + namespace kj { namespace { @@ -109,8 +135,9 @@ KJ_TEST("HttpHeaders::parseRequest") { "Content-Length: 123\r\n" "DATE: early\r\n" "other-Header: yep\r\n" + "with.dots: sure\r\n" "\r\n"); - auto result = KJ_ASSERT_NONNULL(headers.tryParseRequest(text.asArray())); + auto result = headers.tryParseRequest(text.asArray()).get(); KJ_EXPECT(result.method == HttpMethod::POST); KJ_EXPECT(result.url == "/some/path"); @@ -126,12 +153,13 @@ KJ_TEST("HttpHeaders::parseRequest") { headers.forEach([&](kj::StringPtr name, kj::StringPtr value) { KJ_EXPECT(unpackedHeaders.insert(std::make_pair(name, value)).second); }); - KJ_EXPECT(unpackedHeaders.size() == 5); + KJ_EXPECT(unpackedHeaders.size() == 6); KJ_EXPECT(unpackedHeaders["Content-Length"] == "123"); KJ_EXPECT(unpackedHeaders["Host"] == "example.com"); KJ_EXPECT(unpackedHeaders["Date"] == "early"); KJ_EXPECT(unpackedHeaders["Foo-Bar"] == "Baz"); KJ_EXPECT(unpackedHeaders["other-Header"] == "yep"); + KJ_EXPECT(unpackedHeaders["with.dots"] == "sure"); KJ_EXPECT(headers.serializeRequest(result.method, result.url) == "POST /some/path HTTP/1.1\r\n" @@ -140,6 +168,7 @@ KJ_TEST("HttpHeaders::parseRequest") { "Date: early\r\n" "Foo-Bar: Baz\r\n" "other-Header: yep\r\n" + "with.dots: sure\r\n" "\r\n"); } @@ -160,7 +189,7 @@ KJ_TEST("HttpHeaders::parseResponse") { "DATE: early\r\n" "other-Header: yep\r\n" "\r\n"); - auto result = KJ_ASSERT_NONNULL(headers.tryParseResponse(text.asArray())); + auto result = headers.tryParseResponse(text.asArray()).get(); KJ_EXPECT(result.statusCode == 418); KJ_EXPECT(result.statusText == "I'm a teapot"); @@ -199,40 +228,72 @@ KJ_TEST("HttpHeaders parse invalid") { HttpHeaders headers(*table); // NUL byte in request. - KJ_EXPECT(headers.tryParseRequest(kj::heapString( - "POST \0 /some/path \t HTTP/1.1\r\n" - "Foo-BaR: Baz\r\n" - "Host: example.com\r\n" - "DATE: early\r\n" - "other-Header: yep\r\n" - "\r\n")) == nullptr); + { + auto input = kj::heapString( + "POST \0 /some/path \t HTTP/1.1\r\n" + "Foo-BaR: Baz\r\n" + "Host: example.com\r\n" + "DATE: early\r\n" + "other-Header: yep\r\n" + "\r\n"); + + auto protocolError = headers.tryParseRequest(input).get(); + + KJ_EXPECT(protocolError.description == "Request headers have no terminal newline.", + protocolError.description); + KJ_EXPECT(protocolError.rawContent.asChars() == input); + } // Control character in header name. - KJ_EXPECT(headers.tryParseRequest(kj::heapString( - "POST /some/path \t HTTP/1.1\r\n" - "Foo-BaR: Baz\r\n" - "Cont\001ent-Length: 123\r\n" - "DATE: early\r\n" - "other-Header: yep\r\n" - "\r\n")) == nullptr); + { + auto input = kj::heapString( + "POST /some/path \t HTTP/1.1\r\n" + "Foo-BaR: Baz\r\n" + "Cont\001ent-Length: 123\r\n" + "DATE: early\r\n" + "other-Header: yep\r\n" + "\r\n"); + + auto protocolError = headers.tryParseRequest(input).get(); + + KJ_EXPECT(protocolError.description == "The headers sent by your client are not valid.", + protocolError.description); + KJ_EXPECT(protocolError.rawContent.asChars() == input); + } // Separator character in header name. - KJ_EXPECT(headers.tryParseRequest(kj::heapString( - "POST /some/path \t HTTP/1.1\r\n" - "Foo-BaR: Baz\r\n" - "Host: example.com\r\n" - "DATE/: early\r\n" - "other-Header: yep\r\n" - "\r\n")) == nullptr); + { + auto input = kj::heapString( + "POST /some/path \t HTTP/1.1\r\n" + "Foo-BaR: Baz\r\n" + "Host: example.com\r\n" + "DATE/: early\r\n" + "other-Header: yep\r\n" + "\r\n"); + + auto protocolError = headers.tryParseRequest(input).get(); + + KJ_EXPECT(protocolError.description == "The headers sent by your client are not valid.", + protocolError.description); + KJ_EXPECT(protocolError.rawContent.asChars() == input); + } // Response status code not numeric. - KJ_EXPECT(headers.tryParseResponse(kj::heapString( + { + auto input = kj::heapString( "HTTP/1.1\t\t abc\t I'm a teapot\r\n" "Foo-BaR: Baz\r\n" "Host: example.com\r\n" "DATE: early\r\n" "other-Header: yep\r\n" - "\r\n")) == nullptr); + "\r\n"); + + auto protocolError = headers.tryParseRequest(input).get(); + + KJ_EXPECT(protocolError.description == "Unrecognized request method.", + protocolError.description); + KJ_EXPECT(protocolError.rawContent.asChars() == input); + } } KJ_TEST("HttpHeaders validation") { @@ -258,6 +319,29 @@ KJ_TEST("HttpHeaders validation") { KJ_EXPECT_THROW_MESSAGE("invalid header value", headers.add("Valid-Name", "in\nvalid")); } +KJ_TEST("HttpHeaders Set-Cookie handling") { + HttpHeaderTable::Builder builder; + auto hCookie = builder.add("Cookie"); + auto hSetCookie = builder.add("Set-Cookie"); + auto table = builder.build(); + + HttpHeaders headers(*table); + headers.set(hCookie, "Foo"); + headers.add("Cookie", "Bar"); + headers.add("Cookie", "Baz"); + headers.set(hSetCookie, "Foo"); + headers.add("Set-Cookie", "Bar"); + headers.add("Set-Cookie", "Baz"); + + auto text = headers.toString(); + KJ_EXPECT(text == + "Cookie: Foo, Bar, Baz\r\n" + "Set-Cookie: Foo\r\n" + "Set-Cookie: Bar\r\n" + "Set-Cookie: Baz\r\n" + "\r\n", text); +} + // ======================================================================================= class ReadFragmenter final: public kj::AsyncIoStream { @@ -288,6 +372,10 @@ public: return inner.tryPumpFrom(input, amount); } + Promise whenWriteDisconnected() override { + return inner.whenWriteDisconnected(); + } + void shutdownWrite() override { return inner.shutdownWrite(); } @@ -428,8 +516,17 @@ kj::Promise expectRead(kj::AsyncInputStream& in, kj::ArrayPtr })); } -void testHttpClientRequest(kj::WaitScope& waitScope, const HttpRequestTestCase& testCase) { - auto pipe = kj::newTwoWayPipe(); +kj::Promise expectEnd(kj::AsyncInputStream& in) { + static char buffer; + + auto promise = in.tryRead(&buffer, 1, 1); + return promise.then([](size_t amount) { + KJ_ASSERT(amount == 0, "expected EOF"); + }); +} + +void testHttpClientRequest(kj::WaitScope& waitScope, const HttpRequestTestCase& testCase, + kj::TwoWayPipe pipe) { auto serverTask = expectRead(*pipe.ends[1], testCase.raw).then([&]() { static const char SIMPLE_RESPONSE[] = @@ -469,8 +566,7 @@ void testHttpClientRequest(kj::WaitScope& waitScope, const HttpRequestTestCase& } void testHttpClientResponse(kj::WaitScope& waitScope, const HttpResponseTestCase& testCase, - size_t readFragmentSize) { - auto pipe = kj::newTwoWayPipe(); + size_t readFragmentSize, kj::TwoWayPipe pipe) { ReadFragmenter fragmenter(*pipe.ends[0], readFragmentSize); auto expectedReqText = testCase.method == HttpMethod::GET || testCase.method == HttpMethod::HEAD @@ -610,9 +706,8 @@ private: void testHttpServerRequest(kj::WaitScope& waitScope, kj::Timer& timer, const HttpRequestTestCase& requestCase, - const HttpResponseTestCase& responseCase) { - auto pipe = kj::newTwoWayPipe(); - + const HttpResponseTestCase& responseCase, + kj::TwoWayPipe pipe) { HttpHeaderTable table; TestHttpService service(requestCase, responseCase, table); HttpServer server(timer, table, service); @@ -782,6 +877,36 @@ kj::ArrayPtr responseTestCases() { CLIENT_ONLY, // Server never sends connection: close }, + { + "HTTP/1.1 200 OK\r\n" + "Content-Type: text/plain\r\n" + "Transfer-Encoding: identity\r\n" + "Content-Length: foobar\r\n" // intentionally wrong + "\r\n" + "baz qux", + + 200, "OK", + {{HttpHeaderId::CONTENT_TYPE, "text/plain"}}, + nullptr, {"baz qux"}, + + HttpMethod::GET, + CLIENT_ONLY, // Server never sends transfer-encoding: identity + }, + + { + "HTTP/1.1 200 OK\r\n" + "Content-Type: text/plain\r\n" + "\r\n" + "baz qux", + + 200, "OK", + {{HttpHeaderId::CONTENT_TYPE, "text/plain"}}, + nullptr, {"baz qux"}, + + HttpMethod::GET, + CLIENT_ONLY, // Server never sends non-delimited message + }, + { "HTTP/1.1 200 OK\r\n" "Content-Length: 123\r\n" @@ -809,6 +934,46 @@ kj::ArrayPtr responseTestCases() { HttpMethod::HEAD, }, + // Zero-length expected size response to HEAD request has no Content-Length header. + { + "HTTP/1.1 200 OK\r\n" + "\r\n", + + 200, "OK", + {}, + uint64_t(0), {}, + + HttpMethod::HEAD, + }, + + { + "HTTP/1.1 204 No Content\r\n" + "\r\n", + + 204, "No Content", + {}, + uint64_t(0), {}, + }, + + { + "HTTP/1.1 205 Reset Content\r\n" + "Content-Length: 0\r\n" + "\r\n", + + 205, "Reset Content", + {}, + uint64_t(0), {}, + }, + + { + "HTTP/1.1 304 Not Modified\r\n" + "\r\n", + + 304, "Not Modified", + {}, + uint64_t(0), {}, + }, + { "HTTP/1.1 200 OK\r\n" "Content-Length: 8\r\n" @@ -845,35 +1010,32 @@ kj::ArrayPtr responseTestCases() { } KJ_TEST("HttpClient requests") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; for (auto& testCase: requestTestCases()) { if (testCase.side == SERVER_ONLY) continue; KJ_CONTEXT(testCase.raw); - testHttpClientRequest(waitScope, testCase); + testHttpClientRequest(waitScope, testCase, KJ_HTTP_TEST_CREATE_2PIPE); } } KJ_TEST("HttpClient responses") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; size_t FRAGMENT_SIZES[] = { 1, 2, 3, 4, 5, 6, 7, 8, 16, 31, kj::maxValue }; for (auto& testCase: responseTestCases()) { if (testCase.side == SERVER_ONLY) continue; for (size_t fragmentSize: FRAGMENT_SIZES) { KJ_CONTEXT(testCase.raw, fragmentSize); - testHttpClientResponse(waitScope, testCase, fragmentSize); + testHttpClientResponse(waitScope, testCase, fragmentSize, KJ_HTTP_TEST_CREATE_2PIPE); } } } KJ_TEST("HttpClient canceled write") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; auto serverPromise = pipe.ends[1]->readAllText(); @@ -886,6 +1048,10 @@ KJ_TEST("HttpClient canceled write") { auto req = client->request(HttpMethod::POST, "/", HttpHeaders(table), uint64_t(4096)); + // Note: This poll() forces the server to receive what was written so far. Otherwise, + // cancelling the write below may in fact cancel the previous write as well. + KJ_EXPECT(!serverPromise.poll(waitScope)); + // Start a write and immediately cancel it. { auto ignore KJ_UNUSED = req.body->write(body.begin(), body.size()); @@ -906,10 +1072,9 @@ KJ_TEST("HttpClient canceled write") { } KJ_TEST("HttpClient chunked body gather-write") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; auto serverPromise = pipe.ends[1]->readAllText(); @@ -957,10 +1122,9 @@ KJ_TEST("HttpClient chunked body pump from fixed length stream") { kj::StringPtr body = "foo bar baz"; }; - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; auto serverPromise = pipe.ends[1]->readAllText(); @@ -1009,15 +1173,15 @@ KJ_TEST("HttpServer requests") { 3, {"foo"} }; - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); for (auto& testCase: requestTestCases()) { if (testCase.side == CLIENT_ONLY) continue; KJ_CONTEXT(testCase.raw); testHttpServerRequest(waitScope, timer, testCase, - testCase.method == HttpMethod::HEAD ? HEAD_RESPONSE : RESPONSE); + testCase.method == HttpMethod::HEAD ? HEAD_RESPONSE : RESPONSE, + KJ_HTTP_TEST_CREATE_2PIPE); } } @@ -1042,15 +1206,15 @@ KJ_TEST("HttpServer responses") { uint64_t(0), {}, }; - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); for (auto& testCase: responseTestCases()) { if (testCase.side == CLIENT_ONLY) continue; KJ_CONTEXT(testCase.raw); testHttpServerRequest(waitScope, timer, - testCase.method == HttpMethod::HEAD ? HEAD_REQUEST : REQUEST, testCase); + testCase.method == HttpMethod::HEAD ? HEAD_REQUEST : REQUEST, testCase, + KJ_HTTP_TEST_CREATE_2PIPE); } } @@ -1179,6 +1343,22 @@ kj::ArrayPtr pipelineTestCases() { 200, "OK", {}, 7, { "foo bar" } }, }, + + // Zero-length expected size response to HEAD request has no Content-Length header. + { + { + "HEAD / HTTP/1.1\r\n" + "\r\n", + + HttpMethod::HEAD, "/", {}, uint64_t(0), {}, + }, + { + "HTTP/1.1 200 OK\r\n" + "\r\n", + + 200, "OK", {}, uint64_t(0), {}, HttpMethod::HEAD, + }, + }, }; // TODO(cleanup): A bug in GCC 4.8, fixed in 4.9, prevents RESPONSE_TEST_CASES from implicitly @@ -1189,9 +1369,8 @@ kj::ArrayPtr pipelineTestCases() { KJ_TEST("HttpClient pipeline") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); - auto pipe = kj::newTwoWayPipe(); + KJ_HTTP_TEST_SETUP_IO; + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; kj::Promise writeResponsesPromise = kj::READY_NOW; for (auto& testCase: PIPELINE_TESTS) { @@ -1219,9 +1398,8 @@ KJ_TEST("HttpClient pipeline") { KJ_TEST("HttpClient parallel pipeline") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); - auto pipe = kj::newTwoWayPipe(); + KJ_HTTP_TEST_SETUP_IO; + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; kj::Promise readRequestsPromise = kj::READY_NOW; kj::Promise writeResponsesPromise = kj::READY_NOW; @@ -1283,10 +1461,9 @@ KJ_TEST("HttpClient parallel pipeline") { KJ_TEST("HttpServer pipeline") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable table; TestHttpService service(PIPELINE_TESTS, table); @@ -1312,10 +1489,9 @@ KJ_TEST("HttpServer pipeline") { KJ_TEST("HttpServer parallel pipeline") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; auto allRequestText = kj::strArray(KJ_MAP(testCase, PIPELINE_TESTS) { return testCase.request.raw; }, ""); @@ -1342,10 +1518,9 @@ KJ_TEST("HttpServer parallel pipeline") { KJ_TEST("HttpClient <-> HttpServer") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable table; TestHttpService service(PIPELINE_TESTS, table); @@ -1366,10 +1541,141 @@ KJ_TEST("HttpClient <-> HttpServer") { // ----------------------------------------------------------------------------- +KJ_TEST("HttpInputStream requests") { + KJ_HTTP_TEST_SETUP_IO; + + kj::HttpHeaderTable table; + + auto pipe = kj::newOneWayPipe(); + auto input = newHttpInputStream(*pipe.in, table); + + kj::Promise writeQueue = kj::READY_NOW; + + for (auto& testCase: requestTestCases()) { + writeQueue = writeQueue.then([&]() { + return pipe.out->write(testCase.raw.begin(), testCase.raw.size()); + }); + } + writeQueue = writeQueue.then([&]() { + pipe.out = nullptr; + }); + + for (auto& testCase: requestTestCases()) { + KJ_CONTEXT(testCase.raw); + + KJ_ASSERT(input->awaitNextMessage().wait(waitScope)); + + auto req = input->readRequest().wait(waitScope); + KJ_EXPECT(req.method == testCase.method); + KJ_EXPECT(req.url == testCase.path); + for (auto& header: testCase.requestHeaders) { + KJ_EXPECT(KJ_ASSERT_NONNULL(req.headers.get(header.id)) == header.value); + } + auto body = req.body->readAllText().wait(waitScope); + KJ_EXPECT(body == kj::strArray(testCase.requestBodyParts, "")); + } + + writeQueue.wait(waitScope); + KJ_EXPECT(!input->awaitNextMessage().wait(waitScope)); +} + +KJ_TEST("HttpInputStream responses") { + KJ_HTTP_TEST_SETUP_IO; + + kj::HttpHeaderTable table; + + auto pipe = kj::newOneWayPipe(); + auto input = newHttpInputStream(*pipe.in, table); + + kj::Promise writeQueue = kj::READY_NOW; + + for (auto& testCase: responseTestCases()) { + if (testCase.side == CLIENT_ONLY) continue; // skip Connection: close case. + writeQueue = writeQueue.then([&]() { + return pipe.out->write(testCase.raw.begin(), testCase.raw.size()); + }); + } + writeQueue = writeQueue.then([&]() { + pipe.out = nullptr; + }); + + for (auto& testCase: responseTestCases()) { + if (testCase.side == CLIENT_ONLY) continue; // skip Connection: close case. + KJ_CONTEXT(testCase.raw); + + KJ_ASSERT(input->awaitNextMessage().wait(waitScope)); + + auto resp = input->readResponse(testCase.method).wait(waitScope); + KJ_EXPECT(resp.statusCode == testCase.statusCode); + KJ_EXPECT(resp.statusText == testCase.statusText); + for (auto& header: testCase.responseHeaders) { + KJ_EXPECT(KJ_ASSERT_NONNULL(resp.headers.get(header.id)) == header.value); + } + auto body = resp.body->readAllText().wait(waitScope); + KJ_EXPECT(body == kj::strArray(testCase.responseBodyParts, "")); + } + + writeQueue.wait(waitScope); + KJ_EXPECT(!input->awaitNextMessage().wait(waitScope)); +} + +KJ_TEST("HttpInputStream bare messages") { + KJ_HTTP_TEST_SETUP_IO; + + kj::HttpHeaderTable table; + + auto pipe = kj::newOneWayPipe(); + auto input = newHttpInputStream(*pipe.in, table); + + kj::StringPtr messages = + "Content-Length: 6\r\n" + "\r\n" + "foobar" + "Content-Length: 11\r\n" + "Content-Type: some/type\r\n" + "\r\n" + "bazquxcorge" + "Transfer-Encoding: chunked\r\n" + "\r\n" + "6\r\n" + "grault\r\n" + "b\r\n" + "garplywaldo\r\n" + "0\r\n" + "\r\n"_kj; + + kj::Promise writeTask = pipe.out->write(messages.begin(), messages.size()) + .then([&]() { pipe.out = nullptr; }); + + { + KJ_ASSERT(input->awaitNextMessage().wait(waitScope)); + auto message = input->readMessage().wait(waitScope); + KJ_EXPECT(KJ_ASSERT_NONNULL(message.headers.get(HttpHeaderId::CONTENT_LENGTH)) == "6"); + KJ_EXPECT(message.body->readAllText().wait(waitScope) == "foobar"); + } + { + KJ_ASSERT(input->awaitNextMessage().wait(waitScope)); + auto message = input->readMessage().wait(waitScope); + KJ_EXPECT(KJ_ASSERT_NONNULL(message.headers.get(HttpHeaderId::CONTENT_LENGTH)) == "11"); + KJ_EXPECT(KJ_ASSERT_NONNULL(message.headers.get(HttpHeaderId::CONTENT_TYPE)) == "some/type"); + KJ_EXPECT(message.body->readAllText().wait(waitScope) == "bazquxcorge"); + } + { + KJ_ASSERT(input->awaitNextMessage().wait(waitScope)); + auto message = input->readMessage().wait(waitScope); + KJ_EXPECT(KJ_ASSERT_NONNULL(message.headers.get(HttpHeaderId::TRANSFER_ENCODING)) == "chunked"); + KJ_EXPECT(message.body->readAllText().wait(waitScope) == "graultgarplywaldo"); + } + + writeTask.wait(waitScope); + KJ_EXPECT(!input->awaitNextMessage().wait(waitScope)); +} + +// ----------------------------------------------------------------------------- + KJ_TEST("WebSocket core protocol") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); - auto pipe = kj::newTwoWayPipe(); + KJ_HTTP_TEST_SETUP_IO; + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; auto client = newWebSocket(kj::mv(pipe.ends[0]), nullptr); auto server = newWebSocket(kj::mv(pipe.ends[1]), nullptr); @@ -1381,7 +1687,8 @@ KJ_TEST("WebSocket core protocol") { .then([&]() { return client->send(mediumString); }) .then([&]() { return client->send(bigString); }) .then([&]() { return client->send(kj::StringPtr("world").asBytes()); }) - .then([&]() { return client->close(1234, "bored"); }); + .then([&]() { return client->close(1234, "bored"); }) + .then([&]() { KJ_EXPECT(client->sentByteCount() == 90307)}); { auto message = server->receive().wait(waitScope); @@ -1412,6 +1719,7 @@ KJ_TEST("WebSocket core protocol") { KJ_ASSERT(message.is()); KJ_EXPECT(message.get().code == 1234); KJ_EXPECT(message.get().reason == "bored"); + KJ_EXPECT(server->receivedByteCount() == 90307); } auto serverTask = server->close(4321, "whatever"); @@ -1421,6 +1729,7 @@ KJ_TEST("WebSocket core protocol") { KJ_ASSERT(message.is()); KJ_EXPECT(message.get().code == 4321); KJ_EXPECT(message.get().reason == "whatever"); + KJ_EXPECT(client->receivedByteCount() == 12); } clientTask.wait(waitScope); @@ -1428,9 +1737,8 @@ KJ_TEST("WebSocket core protocol") { } KJ_TEST("WebSocket fragmented") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); - auto pipe = kj::newTwoWayPipe(); + KJ_HTTP_TEST_SETUP_IO; + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; auto client = kj::mv(pipe.ends[0]); auto server = newWebSocket(kj::mv(pipe.ends[1]), nullptr); @@ -1466,9 +1774,8 @@ public: }; KJ_TEST("WebSocket masked") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); - auto pipe = kj::newTwoWayPipe(); + KJ_HTTP_TEST_SETUP_IO; + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; FakeEntropySource maskGenerator; auto client = kj::mv(pipe.ends[0]); @@ -1494,9 +1801,8 @@ KJ_TEST("WebSocket masked") { } KJ_TEST("WebSocket unsolicited pong") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); - auto pipe = kj::newTwoWayPipe(); + KJ_HTTP_TEST_SETUP_IO; + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; auto client = kj::mv(pipe.ends[0]); auto server = newWebSocket(kj::mv(pipe.ends[1]), nullptr); @@ -1521,9 +1827,8 @@ KJ_TEST("WebSocket unsolicited pong") { } KJ_TEST("WebSocket ping") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); - auto pipe = kj::newTwoWayPipe(); + KJ_HTTP_TEST_SETUP_IO; + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; auto client = kj::mv(pipe.ends[0]); auto server = newWebSocket(kj::mv(pipe.ends[1]), nullptr); @@ -1559,9 +1864,8 @@ KJ_TEST("WebSocket ping") { } KJ_TEST("WebSocket ping mid-send") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); - auto pipe = kj::newTwoWayPipe(); + KJ_HTTP_TEST_SETUP_IO; + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; auto client = kj::mv(pipe.ends[0]); auto server = newWebSocket(kj::mv(pipe.ends[1]), nullptr); @@ -1628,6 +1932,10 @@ public: return out->tryPumpFrom(input, amount); } + Promise whenWriteDisconnected() override { + return out->whenWriteDisconnected(); + } + void shutdownWrite() override { out = nullptr; } @@ -1638,8 +1946,7 @@ private: }; KJ_TEST("WebSocket double-ping mid-send") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; auto upPipe = newOneWayPipe(); auto downPipe = newOneWayPipe(); @@ -1676,9 +1983,8 @@ KJ_TEST("WebSocket double-ping mid-send") { } KJ_TEST("WebSocket ping received during pong send") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); - auto pipe = kj::newTwoWayPipe(); + KJ_HTTP_TEST_SETUP_IO; + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; auto client = kj::mv(pipe.ends[0]); auto server = newWebSocket(kj::mv(pipe.ends[1]), nullptr); @@ -1711,15 +2017,56 @@ KJ_TEST("WebSocket ping received during pong send") { clientTask.wait(waitScope); } +KJ_TEST("WebSocket pump byte counting") { + KJ_HTTP_TEST_SETUP_IO; + auto pipe1 = KJ_HTTP_TEST_CREATE_2PIPE; + auto pipe2 = KJ_HTTP_TEST_CREATE_2PIPE; + + FakeEntropySource maskGenerator; + auto server1 = newWebSocket(kj::mv(pipe1.ends[1]), nullptr); + auto client2 = newWebSocket(kj::mv(pipe2.ends[0]), maskGenerator); + auto server2 = newWebSocket(kj::mv(pipe2.ends[1]), nullptr); + + auto pumpTask = server1->pumpTo(*client2); + auto receiveTask = server2->receive(); + + // Client sends three bytes of a valid message then disconnects. + const char DATA[] = {0x01, 0x06, 'h'}; + pipe1.ends[0]->write(DATA, 3).wait(waitScope); + pipe1.ends[0] = nullptr; + + // The pump completes successfully, forwarding the disconnect. + pumpTask.wait(waitScope); + + // The eventual receiver gets a disconnect execption. + // (Note: We don't use KJ_EXPECT_THROW here because under -fno-exceptions it forks and we lose + // state.) + receiveTask.then([](auto) { + KJ_FAIL_EXPECT("expected exception"); + }, [](kj::Exception&& e) { + KJ_EXPECT(e.getType() == kj::Exception::Type::DISCONNECTED); + }).wait(waitScope); + + KJ_EXPECT(server1->receivedByteCount() == 3); +#if KJ_NO_RTTI + // Optimized socket pump will be disabled, so only whole messages are counted by client2/server2. + KJ_EXPECT(client2->sentByteCount() == 0); + KJ_EXPECT(server2->receivedByteCount() == 0); +#else + KJ_EXPECT(client2->sentByteCount() == 3); + KJ_EXPECT(server2->receivedByteCount() == 3); +#endif +} + KJ_TEST("WebSocket pump disconnect on send") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); - auto pipe1 = kj::newTwoWayPipe(); - auto pipe2 = kj::newTwoWayPipe(); + KJ_HTTP_TEST_SETUP_IO; + auto pipe1 = KJ_HTTP_TEST_CREATE_2PIPE; + auto pipe2 = KJ_HTTP_TEST_CREATE_2PIPE; - auto client1 = newWebSocket(kj::mv(pipe1.ends[0]), nullptr); + FakeEntropySource maskGenerator; + auto client1 = newWebSocket(kj::mv(pipe1.ends[0]), maskGenerator); auto server1 = newWebSocket(kj::mv(pipe1.ends[1]), nullptr); - auto client2 = newWebSocket(kj::mv(pipe2.ends[0]), nullptr); + auto client2 = newWebSocket(kj::mv(pipe2.ends[0]), maskGenerator); auto pumpTask = server1->pumpTo(*client2); auto sendTask = client1->send("hello"_kj); @@ -1732,18 +2079,20 @@ KJ_TEST("WebSocket pump disconnect on send") { // Pump throws disconnected. KJ_EXPECT_THROW_RECOVERABLE(DISCONNECTED, pumpTask.wait(waitScope)); - // client1 managed to send its whole message into the pump, though. - sendTask.wait(waitScope); + // client1 may or may not have been able to send its whole message depending on buffering. + sendTask.then([]() {}, [](kj::Exception&& e) { + KJ_EXPECT(e.getType() == kj::Exception::Type::DISCONNECTED); + }).wait(waitScope); } KJ_TEST("WebSocket pump disconnect on receive") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); - auto pipe1 = kj::newTwoWayPipe(); - auto pipe2 = kj::newTwoWayPipe(); + KJ_HTTP_TEST_SETUP_IO; + auto pipe1 = KJ_HTTP_TEST_CREATE_2PIPE; + auto pipe2 = KJ_HTTP_TEST_CREATE_2PIPE; + FakeEntropySource maskGenerator; auto server1 = newWebSocket(kj::mv(pipe1.ends[1]), nullptr); - auto client2 = newWebSocket(kj::mv(pipe2.ends[0]), nullptr); + auto client2 = newWebSocket(kj::mv(pipe2.ends[0]), maskGenerator); auto server2 = newWebSocket(kj::mv(pipe2.ends[1]), nullptr); auto pumpTask = server1->pumpTo(*client2); @@ -1889,20 +2238,23 @@ void testWebSocketClient(kj::WaitScope& waitScope, HttpHeaderTable& headerTable, } } +inline kj::Promise writeA(kj::AsyncOutputStream& out, kj::ArrayPtr data) { + return out.write(data.begin(), data.size()); +} + KJ_TEST("HttpClient WebSocket handshake") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); - auto pipe = kj::newTwoWayPipe(); + KJ_HTTP_TEST_SETUP_IO; + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; auto request = kj::str("GET /websocket", WEBSOCKET_REQUEST_HANDSHAKE); auto serverTask = expectRead(*pipe.ends[1], request) - .then([&]() { return pipe.ends[1]->write({asBytes(WEBSOCKET_RESPONSE_HANDSHAKE)}); }) - .then([&]() { return pipe.ends[1]->write({WEBSOCKET_FIRST_MESSAGE_INLINE}); }) + .then([&]() { return writeA(*pipe.ends[1], asBytes(WEBSOCKET_RESPONSE_HANDSHAKE)); }) + .then([&]() { return writeA(*pipe.ends[1], WEBSOCKET_FIRST_MESSAGE_INLINE); }) .then([&]() { return expectRead(*pipe.ends[1], WEBSOCKET_SEND_MESSAGE); }) - .then([&]() { return pipe.ends[1]->write({WEBSOCKET_REPLY_MESSAGE}); }) + .then([&]() { return writeA(*pipe.ends[1], WEBSOCKET_REPLY_MESSAGE); }) .then([&]() { return expectRead(*pipe.ends[1], WEBSOCKET_SEND_CLOSE); }) - .then([&]() { return pipe.ends[1]->write({WEBSOCKET_REPLY_CLOSE}); }) + .then([&]() { return writeA(*pipe.ends[1], WEBSOCKET_REPLY_CLOSE); }) .eagerlyEvaluate([](kj::Exception&& e) { KJ_LOG(ERROR, e); }); HttpHeaderTable::Builder tableBuilder; @@ -1921,16 +2273,15 @@ KJ_TEST("HttpClient WebSocket handshake") { } KJ_TEST("HttpClient WebSocket error") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); - auto pipe = kj::newTwoWayPipe(); + KJ_HTTP_TEST_SETUP_IO; + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; auto request = kj::str("GET /websocket", WEBSOCKET_REQUEST_HANDSHAKE); auto serverTask = expectRead(*pipe.ends[1], request) - .then([&]() { return pipe.ends[1]->write({asBytes(WEBSOCKET_RESPONSE_HANDSHAKE_ERROR)}); }) + .then([&]() { return writeA(*pipe.ends[1], asBytes(WEBSOCKET_RESPONSE_HANDSHAKE_ERROR)); }) .then([&]() { return expectRead(*pipe.ends[1], request); }) - .then([&]() { return pipe.ends[1]->write({asBytes(WEBSOCKET_RESPONSE_HANDSHAKE_ERROR)}); }) + .then([&]() { return writeA(*pipe.ends[1], asBytes(WEBSOCKET_RESPONSE_HANDSHAKE_ERROR)); }) .eagerlyEvaluate([](kj::Exception&& e) { KJ_LOG(ERROR, e); }); HttpHeaderTable::Builder tableBuilder; @@ -1968,10 +2319,9 @@ KJ_TEST("HttpClient WebSocket error") { } KJ_TEST("HttpServer WebSocket handshake") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable::Builder tableBuilder; HttpHeaderId hMyHeader = tableBuilder.add("My-Header"); @@ -1982,23 +2332,22 @@ KJ_TEST("HttpServer WebSocket handshake") { auto listenTask = server.listenHttp(kj::mv(pipe.ends[0])); auto request = kj::str("GET /websocket", WEBSOCKET_REQUEST_HANDSHAKE); - pipe.ends[1]->write({request.asBytes()}).wait(waitScope); + writeA(*pipe.ends[1], request.asBytes()).wait(waitScope); expectRead(*pipe.ends[1], WEBSOCKET_RESPONSE_HANDSHAKE).wait(waitScope); expectRead(*pipe.ends[1], WEBSOCKET_FIRST_MESSAGE_INLINE).wait(waitScope); - pipe.ends[1]->write({WEBSOCKET_SEND_MESSAGE}).wait(waitScope); + writeA(*pipe.ends[1], WEBSOCKET_SEND_MESSAGE).wait(waitScope); expectRead(*pipe.ends[1], WEBSOCKET_REPLY_MESSAGE).wait(waitScope); - pipe.ends[1]->write({WEBSOCKET_SEND_CLOSE}).wait(waitScope); + writeA(*pipe.ends[1], WEBSOCKET_SEND_CLOSE).wait(waitScope); expectRead(*pipe.ends[1], WEBSOCKET_REPLY_CLOSE).wait(waitScope); listenTask.wait(waitScope); } KJ_TEST("HttpServer WebSocket handshake error") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable::Builder tableBuilder; HttpHeaderId hMyHeader = tableBuilder.add("My-Header"); @@ -2009,11 +2358,11 @@ KJ_TEST("HttpServer WebSocket handshake error") { auto listenTask = server.listenHttp(kj::mv(pipe.ends[0])); auto request = kj::str("GET /return-error", WEBSOCKET_REQUEST_HANDSHAKE); - pipe.ends[1]->write({request.asBytes()}).wait(waitScope); + writeA(*pipe.ends[1], request.asBytes()).wait(waitScope); expectRead(*pipe.ends[1], WEBSOCKET_RESPONSE_HANDSHAKE_ERROR).wait(waitScope); // Can send more requests! - pipe.ends[1]->write({request.asBytes()}).wait(waitScope); + writeA(*pipe.ends[1], request.asBytes()).wait(waitScope); expectRead(*pipe.ends[1], WEBSOCKET_RESPONSE_HANDSHAKE_ERROR).wait(waitScope); pipe.ends[1]->shutdownWrite(); @@ -2026,10 +2375,9 @@ KJ_TEST("HttpServer WebSocket handshake error") { KJ_TEST("HttpServer request timeout") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable table; TestHttpService service(PIPELINE_TESTS, table); @@ -2052,10 +2400,9 @@ KJ_TEST("HttpServer request timeout") { KJ_TEST("HttpServer pipeline timeout") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable table; TestHttpService service(PIPELINE_TESTS, table); @@ -2106,10 +2453,9 @@ private: KJ_TEST("HttpServer no response") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable table; BrokenHttpService service; @@ -2134,10 +2480,9 @@ KJ_TEST("HttpServer no response") { KJ_TEST("HttpServer disconnected") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable table; BrokenHttpService service(KJ_EXCEPTION(DISCONNECTED, "disconnected")); @@ -2156,10 +2501,9 @@ KJ_TEST("HttpServer disconnected") { KJ_TEST("HttpServer overloaded") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable table; BrokenHttpService service(KJ_EXCEPTION(OVERLOADED, "overloaded")); @@ -2178,10 +2522,9 @@ KJ_TEST("HttpServer overloaded") { KJ_TEST("HttpServer unimplemented") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable table; BrokenHttpService service(KJ_EXCEPTION(UNIMPLEMENTED, "unimplemented")); @@ -2200,10 +2543,9 @@ KJ_TEST("HttpServer unimplemented") { KJ_TEST("HttpServer threw exception") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable table; BrokenHttpService service(KJ_EXCEPTION(FAILED, "failed")); @@ -2219,69 +2561,255 @@ KJ_TEST("HttpServer threw exception") { KJ_EXPECT(text.startsWith("HTTP/1.1 500 Internal Server Error"), text); } -class PartialResponseService final: public HttpService { - // HttpService that sends a partial response then throws. -public: - kj::Promise request( - HttpMethod method, kj::StringPtr url, const HttpHeaders& headers, - kj::AsyncInputStream& requestBody, Response& response) override { - return requestBody.readAllBytes() - .then([this,&response](kj::Array&&) -> kj::Promise { - HttpHeaders headers(table); - auto body = response.send(200, "OK", headers, 32); - auto promise = body->write("foo", 3); - return promise.attach(kj::mv(body)).then([]() -> kj::Promise { - return KJ_EXCEPTION(FAILED, "failed"); - }); - }); - } +KJ_TEST("HttpServer bad request") { + KJ_HTTP_TEST_SETUP_IO; + kj::TimerImpl timer(kj::origin()); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; -private: - kj::Maybe exception; HttpHeaderTable table; -}; + BrokenHttpService service; + HttpServer server(timer, table, service); -KJ_TEST("HttpServer threw exception after starting response") { - auto PIPELINE_TESTS = pipelineTestCases(); + auto listenTask = server.listenHttp(kj::mv(pipe.ends[0])); + + static constexpr auto request = "GET / HTTP/1.1\r\nbad request\r\n\r\n"_kj; + auto writePromise = pipe.ends[1]->write(request.begin(), request.size()); + auto response = pipe.ends[1]->readAllText().wait(waitScope); + KJ_EXPECT(writePromise.poll(waitScope)); + writePromise.wait(waitScope); + + static constexpr auto expectedResponse = + "HTTP/1.1 400 Bad Request\r\n" + "Connection: close\r\n" + "Content-Length: 53\r\n" + "Content-Type: text/plain\r\n" + "\r\n" + "ERROR: The headers sent by your client are not valid."_kj; + + KJ_EXPECT(expectedResponse == response, expectedResponse, response); +} - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); +KJ_TEST("HttpServer invalid method") { + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable table; - PartialResponseService service; + BrokenHttpService service; HttpServer server(timer, table, service); auto listenTask = server.listenHttp(kj::mv(pipe.ends[0])); - KJ_EXPECT_LOG(ERROR, "HttpService threw exception after generating a partial response"); - - // Do one request. - pipe.ends[1]->write(PIPELINE_TESTS[0].request.raw.begin(), PIPELINE_TESTS[0].request.raw.size()) - .wait(waitScope); - auto text = pipe.ends[1]->readAllText().wait(waitScope); + static constexpr auto request = "bad request\r\n\r\n"_kj; + auto writePromise = pipe.ends[1]->write(request.begin(), request.size()); + auto response = pipe.ends[1]->readAllText().wait(waitScope); + KJ_EXPECT(writePromise.poll(waitScope)); + writePromise.wait(waitScope); - KJ_EXPECT(text == - "HTTP/1.1 200 OK\r\n" - "Content-Length: 32\r\n" + static constexpr auto expectedResponse = + "HTTP/1.1 501 Not Implemented\r\n" + "Connection: close\r\n" + "Content-Length: 35\r\n" + "Content-Type: text/plain\r\n" "\r\n" - "foo", text); + "ERROR: Unrecognized request method."_kj; + + KJ_EXPECT(expectedResponse == response, expectedResponse, response); } -class PartialResponseNoThrowService final: public HttpService { - // HttpService that sends a partial response then returns without throwing. +// Ensure that HttpServerSettings can continue to be constexpr. +KJ_UNUSED static constexpr HttpServerSettings STATIC_CONSTEXPR_SETTINGS {}; + +class TestErrorHandler: public HttpServerErrorHandler { public: - kj::Promise request( - HttpMethod method, kj::StringPtr url, const HttpHeaders& headers, - kj::AsyncInputStream& requestBody, Response& response) override { - return requestBody.readAllBytes() - .then([this,&response](kj::Array&&) -> kj::Promise { - HttpHeaders headers(table); - auto body = response.send(200, "OK", headers, 32); - auto promise = body->write("foo", 3); - return promise.attach(kj::mv(body)); - }); + kj::Promise handleClientProtocolError( + HttpHeaders::ProtocolError protocolError, kj::HttpService::Response& response) override { + // In a real error handler, you should redact `protocolError.rawContent`. + auto message = kj::str("Saw protocol error: ", protocolError.description, "; rawContent = ", + encodeCEscape(protocolError.rawContent)); + return sendError(400, "Bad Request", kj::mv(message), response); + } + + kj::Promise handleApplicationError( + kj::Exception exception, kj::Maybe response) override { + return sendError(500, "Internal Server Error", + kj::str("Saw application error: ", exception.getDescription()), response); + } + + kj::Promise handleNoResponse(kj::HttpService::Response& response) override { + return sendError(500, "Internal Server Error", kj::str("Saw no response."), response); + } + + static TestErrorHandler instance; + +private: + kj::Promise sendError(uint statusCode, kj::StringPtr statusText, String message, + Maybe response) { + KJ_IF_MAYBE(r, response) { + HttpHeaderTable headerTable; + HttpHeaders headers(headerTable); + auto body = r->send(statusCode, statusText, headers, message.size()); + return body->write(message.begin(), message.size()).attach(kj::mv(body), kj::mv(message)); + } else { + KJ_LOG(ERROR, "Saw an error but too late to report to client."); + return kj::READY_NOW; + } + } +}; + +TestErrorHandler TestErrorHandler::instance {}; + +KJ_TEST("HttpServer no response, custom error handler") { + auto PIPELINE_TESTS = pipelineTestCases(); + + KJ_HTTP_TEST_SETUP_IO; + kj::TimerImpl timer(kj::origin()); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; + + HttpServerSettings settings {}; + settings.errorHandler = TestErrorHandler::instance; + + HttpHeaderTable table; + BrokenHttpService service; + HttpServer server(timer, table, service, settings); + + auto listenTask = server.listenHttp(kj::mv(pipe.ends[0])); + + // Do one request. + pipe.ends[1]->write(PIPELINE_TESTS[0].request.raw.begin(), PIPELINE_TESTS[0].request.raw.size()) + .wait(waitScope); + auto text = pipe.ends[1]->readAllText().wait(waitScope); + + KJ_EXPECT(text == + "HTTP/1.1 500 Internal Server Error\r\n" + "Connection: close\r\n" + "Content-Length: 16\r\n" + "\r\n" + "Saw no response.", text); +} + +KJ_TEST("HttpServer threw exception, custom error handler") { + auto PIPELINE_TESTS = pipelineTestCases(); + + KJ_HTTP_TEST_SETUP_IO; + kj::TimerImpl timer(kj::origin()); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; + + HttpServerSettings settings {}; + settings.errorHandler = TestErrorHandler::instance; + + HttpHeaderTable table; + BrokenHttpService service(KJ_EXCEPTION(FAILED, "failed")); + HttpServer server(timer, table, service, settings); + + auto listenTask = server.listenHttp(kj::mv(pipe.ends[0])); + + // Do one request. + pipe.ends[1]->write(PIPELINE_TESTS[0].request.raw.begin(), PIPELINE_TESTS[0].request.raw.size()) + .wait(waitScope); + auto text = pipe.ends[1]->readAllText().wait(waitScope); + + KJ_EXPECT(text == + "HTTP/1.1 500 Internal Server Error\r\n" + "Connection: close\r\n" + "Content-Length: 29\r\n" + "\r\n" + "Saw application error: failed", text); +} + +KJ_TEST("HttpServer bad request, custom error handler") { + KJ_HTTP_TEST_SETUP_IO; + kj::TimerImpl timer(kj::origin()); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; + + HttpServerSettings settings {}; + settings.errorHandler = TestErrorHandler::instance; + + HttpHeaderTable table; + BrokenHttpService service; + HttpServer server(timer, table, service, settings); + + auto listenTask = server.listenHttp(kj::mv(pipe.ends[0])); + + static constexpr auto request = "bad request\r\n\r\n"_kj; + auto writePromise = pipe.ends[1]->write(request.begin(), request.size()); + auto response = pipe.ends[1]->readAllText().wait(waitScope); + KJ_EXPECT(writePromise.poll(waitScope)); + writePromise.wait(waitScope); + + static constexpr auto expectedResponse = + "HTTP/1.1 400 Bad Request\r\n" + "Connection: close\r\n" + "Content-Length: 80\r\n" + "\r\n" + "Saw protocol error: Unrecognized request method.; " + "rawContent = bad request\\000\\n"_kj; + + KJ_EXPECT(expectedResponse == response, expectedResponse, response); +} + +class PartialResponseService final: public HttpService { + // HttpService that sends a partial response then throws. +public: + kj::Promise request( + HttpMethod method, kj::StringPtr url, const HttpHeaders& headers, + kj::AsyncInputStream& requestBody, Response& response) override { + return requestBody.readAllBytes() + .then([this,&response](kj::Array&&) -> kj::Promise { + HttpHeaders headers(table); + auto body = response.send(200, "OK", headers, 32); + auto promise = body->write("foo", 3); + return promise.attach(kj::mv(body)).then([]() -> kj::Promise { + return KJ_EXCEPTION(FAILED, "failed"); + }); + }); + } + +private: + kj::Maybe exception; + HttpHeaderTable table; +}; + +KJ_TEST("HttpServer threw exception after starting response") { + auto PIPELINE_TESTS = pipelineTestCases(); + + KJ_HTTP_TEST_SETUP_IO; + kj::TimerImpl timer(kj::origin()); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; + + HttpHeaderTable table; + PartialResponseService service; + HttpServer server(timer, table, service); + + auto listenTask = server.listenHttp(kj::mv(pipe.ends[0])); + + KJ_EXPECT_LOG(ERROR, "HttpService threw exception after generating a partial response"); + + // Do one request. + pipe.ends[1]->write(PIPELINE_TESTS[0].request.raw.begin(), PIPELINE_TESTS[0].request.raw.size()) + .wait(waitScope); + auto text = pipe.ends[1]->readAllText().wait(waitScope); + + KJ_EXPECT(text == + "HTTP/1.1 200 OK\r\n" + "Content-Length: 32\r\n" + "\r\n" + "foo", text); +} + +class PartialResponseNoThrowService final: public HttpService { + // HttpService that sends a partial response then returns without throwing. +public: + kj::Promise request( + HttpMethod method, kj::StringPtr url, const HttpHeaders& headers, + kj::AsyncInputStream& requestBody, Response& response) override { + return requestBody.readAllBytes() + .then([this,&response](kj::Array&&) -> kj::Promise { + HttpHeaders headers(table); + auto body = response.send(200, "OK", headers, 32); + auto promise = body->write("foo", 3); + return promise.attach(kj::mv(body)); + }); } private: @@ -2292,10 +2820,9 @@ private: KJ_TEST("HttpServer failed to write complete response but didn't throw") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable table; PartialResponseNoThrowService service; @@ -2363,10 +2890,9 @@ private: KJ_TEST("HttpFixedLengthEntityWriter correctly implements tryPumpFrom") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable table; PumpResponseService service; @@ -2387,16 +2913,72 @@ KJ_TEST("HttpFixedLengthEntityWriter correctly implements tryPumpFrom") { "Hello, World!", text); } +class HangingHttpService final: public HttpService { + // HttpService that hangs forever. +public: + kj::Promise request( + HttpMethod method, kj::StringPtr url, const HttpHeaders& headers, + kj::AsyncInputStream& requestBody, Response& responseSender) override { + kj::Promise result = kj::NEVER_DONE; + ++inFlight; + return result.attach(kj::defer([this]() { + if (--inFlight == 0) { + KJ_IF_MAYBE(f, onCancelFulfiller) { + f->get()->fulfill(); + } + } + })); + } + + kj::Promise onCancel() { + auto paf = kj::newPromiseAndFulfiller(); + onCancelFulfiller = kj::mv(paf.fulfiller); + return kj::mv(paf.promise); + } + + uint inFlight = 0; + +private: + kj::Maybe exception; + kj::Maybe>> onCancelFulfiller; +}; + +KJ_TEST("HttpServer cancels request when client disconnects") { + KJ_HTTP_TEST_SETUP_IO; + kj::TimerImpl timer(kj::origin()); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; + + HttpHeaderTable table; + HangingHttpService service; + HttpServer server(timer, table, service); + + auto listenTask = server.listenHttp(kj::mv(pipe.ends[0])); + + KJ_EXPECT(service.inFlight == 0); + + static constexpr auto request = "GET / HTTP/1.1\r\n\r\n"_kj; + pipe.ends[1]->write(request.begin(), request.size()).wait(waitScope); + + auto cancelPromise = service.onCancel(); + KJ_EXPECT(!cancelPromise.poll(waitScope)); + KJ_EXPECT(service.inFlight == 1); + + // Disconnect client and verify server cancels. + pipe.ends[1] = nullptr; + KJ_ASSERT(cancelPromise.poll(waitScope)); + KJ_EXPECT(service.inFlight == 0); + cancelPromise.wait(waitScope); +} + // ----------------------------------------------------------------------------- KJ_TEST("newHttpService from HttpClient") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto frontPipe = kj::newTwoWayPipe(); - auto backPipe = kj::newTwoWayPipe(); + auto frontPipe = KJ_HTTP_TEST_CREATE_2PIPE; + auto backPipe = KJ_HTTP_TEST_CREATE_2PIPE; kj::Promise writeResponsesPromise = kj::READY_NOW; for (auto& testCase: PIPELINE_TESTS) { @@ -2433,27 +3015,21 @@ KJ_TEST("newHttpService from HttpClient") { } KJ_TEST("newHttpService from HttpClient WebSockets") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto frontPipe = kj::newTwoWayPipe(); - auto backPipe = kj::newTwoWayPipe(); + auto frontPipe = KJ_HTTP_TEST_CREATE_2PIPE; + auto backPipe = KJ_HTTP_TEST_CREATE_2PIPE; auto request = kj::str("GET /websocket", WEBSOCKET_REQUEST_HANDSHAKE); auto writeResponsesPromise = expectRead(*backPipe.ends[1], request) - .then([&]() { return backPipe.ends[1]->write({asBytes(WEBSOCKET_RESPONSE_HANDSHAKE)}); }) - .then([&]() { return backPipe.ends[1]->write({WEBSOCKET_FIRST_MESSAGE_INLINE}); }) + .then([&]() { return writeA(*backPipe.ends[1], asBytes(WEBSOCKET_RESPONSE_HANDSHAKE)); }) + .then([&]() { return writeA(*backPipe.ends[1], WEBSOCKET_FIRST_MESSAGE_INLINE); }) .then([&]() { return expectRead(*backPipe.ends[1], WEBSOCKET_SEND_MESSAGE); }) - .then([&]() { return backPipe.ends[1]->write({WEBSOCKET_REPLY_MESSAGE}); }) + .then([&]() { return writeA(*backPipe.ends[1], WEBSOCKET_REPLY_MESSAGE); }) .then([&]() { return expectRead(*backPipe.ends[1], WEBSOCKET_SEND_CLOSE); }) - .then([&]() { return backPipe.ends[1]->write({WEBSOCKET_REPLY_CLOSE}); }) - // expect EOF - .then([&]() { return backPipe.ends[1]->readAllBytes(); }) - .then([&](kj::ArrayPtr content) { - KJ_EXPECT(content.size() == 0); - // Send EOF. - backPipe.ends[1]->shutdownWrite(); - }) + .then([&]() { return writeA(*backPipe.ends[1], WEBSOCKET_REPLY_CLOSE); }) + .then([&]() { return expectEnd(*backPipe.ends[1]); }) + .then([&]() { backPipe.ends[1]->shutdownWrite(); }) .eagerlyEvaluate([](kj::Exception&& e) { KJ_LOG(ERROR, e); }); { @@ -2461,18 +3037,19 @@ KJ_TEST("newHttpService from HttpClient WebSockets") { FakeEntropySource entropySource; HttpClientSettings clientSettings; clientSettings.entropySource = entropySource; - auto backClient = newHttpClient(table, *backPipe.ends[0], clientSettings); + auto backClientStream = kj::mv(backPipe.ends[0]); + auto backClient = newHttpClient(table, *backClientStream, clientSettings); auto frontService = newHttpService(*backClient); HttpServer frontServer(timer, table, *frontService); auto listenTask = frontServer.listenHttp(kj::mv(frontPipe.ends[1])); - frontPipe.ends[0]->write({request.asBytes()}).wait(waitScope); + writeA(*frontPipe.ends[0], request.asBytes()).wait(waitScope); expectRead(*frontPipe.ends[0], WEBSOCKET_RESPONSE_HANDSHAKE).wait(waitScope); expectRead(*frontPipe.ends[0], WEBSOCKET_FIRST_MESSAGE_INLINE).wait(waitScope); - frontPipe.ends[0]->write({WEBSOCKET_SEND_MESSAGE}).wait(waitScope); + writeA(*frontPipe.ends[0], WEBSOCKET_SEND_MESSAGE).wait(waitScope); expectRead(*frontPipe.ends[0], WEBSOCKET_REPLY_MESSAGE).wait(waitScope); - frontPipe.ends[0]->write({WEBSOCKET_SEND_CLOSE}).wait(waitScope); + writeA(*frontPipe.ends[0], WEBSOCKET_SEND_CLOSE).wait(waitScope); expectRead(*frontPipe.ends[0], WEBSOCKET_REPLY_CLOSE).wait(waitScope); frontPipe.ends[0]->shutdownWrite(); @@ -2483,16 +3060,15 @@ KJ_TEST("newHttpService from HttpClient WebSockets") { } KJ_TEST("newHttpService from HttpClient WebSockets disconnect") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto frontPipe = kj::newTwoWayPipe(); - auto backPipe = kj::newTwoWayPipe(); + auto frontPipe = KJ_HTTP_TEST_CREATE_2PIPE; + auto backPipe = KJ_HTTP_TEST_CREATE_2PIPE; auto request = kj::str("GET /websocket", WEBSOCKET_REQUEST_HANDSHAKE); auto writeResponsesPromise = expectRead(*backPipe.ends[1], request) - .then([&]() { return backPipe.ends[1]->write({asBytes(WEBSOCKET_RESPONSE_HANDSHAKE)}); }) - .then([&]() { return backPipe.ends[1]->write({WEBSOCKET_FIRST_MESSAGE_INLINE}); }) + .then([&]() { return writeA(*backPipe.ends[1], asBytes(WEBSOCKET_RESPONSE_HANDSHAKE)); }) + .then([&]() { return writeA(*backPipe.ends[1], WEBSOCKET_FIRST_MESSAGE_INLINE); }) .then([&]() { return expectRead(*backPipe.ends[1], WEBSOCKET_SEND_MESSAGE); }) .then([&]() { backPipe.ends[1]->shutdownWrite(); }) .eagerlyEvaluate([](kj::Exception&& e) { KJ_LOG(ERROR, e); }); @@ -2507,11 +3083,11 @@ KJ_TEST("newHttpService from HttpClient WebSockets disconnect") { HttpServer frontServer(timer, table, *frontService); auto listenTask = frontServer.listenHttp(kj::mv(frontPipe.ends[1])); - frontPipe.ends[0]->write({request.asBytes()}).wait(waitScope); + writeA(*frontPipe.ends[0], request.asBytes()).wait(waitScope); expectRead(*frontPipe.ends[0], WEBSOCKET_RESPONSE_HANDSHAKE).wait(waitScope); expectRead(*frontPipe.ends[0], WEBSOCKET_FIRST_MESSAGE_INLINE).wait(waitScope); - frontPipe.ends[0]->write({WEBSOCKET_SEND_MESSAGE}).wait(waitScope); + writeA(*frontPipe.ends[0], WEBSOCKET_SEND_MESSAGE).wait(waitScope); KJ_EXPECT(frontPipe.ends[0]->readAllText().wait(waitScope) == ""); @@ -2527,8 +3103,7 @@ KJ_TEST("newHttpService from HttpClient WebSockets disconnect") { KJ_TEST("newHttpClient from HttpService") { auto PIPELINE_TESTS = pipelineTestCases(); - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); HttpHeaderTable table; @@ -2541,10 +3116,9 @@ KJ_TEST("newHttpClient from HttpService") { } KJ_TEST("newHttpClient from HttpService WebSockets") { - kj::EventLoop eventLoop; - kj::WaitScope waitScope(eventLoop); + KJ_HTTP_TEST_SETUP_IO; kj::TimerImpl timer(kj::origin()); - auto pipe = kj::newTwoWayPipe(); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; HttpHeaderTable::Builder tableBuilder; HttpHeaderId hMyHeader = tableBuilder.add("My-Header"); @@ -2555,6 +3129,226 @@ KJ_TEST("newHttpClient from HttpService WebSockets") { testWebSocketClient(waitScope, *headerTable, hMyHeader, *client); } +KJ_TEST("adapted client/server propagates request exceptions like non-adapted client") { + KJ_HTTP_TEST_SETUP_IO; + + HttpHeaderTable table; + HttpHeaders headers(table); + + class FailingHttpClient final: public HttpClient { + public: + Request request( + HttpMethod method, kj::StringPtr url, const HttpHeaders& headers, + kj::Maybe expectedBodySize = nullptr) override { + KJ_FAIL_ASSERT("request_fail"); + } + + kj::Promise openWebSocket( + kj::StringPtr url, const HttpHeaders& headers) override { + KJ_FAIL_ASSERT("websocket_fail"); + } + }; + + auto rawClient = kj::heap(); + + auto innerClient = kj::heap(); + auto adaptedService = kj::newHttpService(*innerClient).attach(kj::mv(innerClient)); + auto adaptedClient = kj::newHttpClient(*adaptedService).attach(kj::mv(adaptedService)); + + KJ_EXPECT_THROW_MESSAGE("request_fail", rawClient->request(HttpMethod::POST, "/"_kj, headers)); + KJ_EXPECT_THROW_MESSAGE("request_fail", adaptedClient->request(HttpMethod::POST, "/"_kj, headers)); + + KJ_EXPECT_THROW_MESSAGE("websocket_fail", rawClient->openWebSocket("/"_kj, headers)); + KJ_EXPECT_THROW_MESSAGE("websocket_fail", adaptedClient->openWebSocket("/"_kj, headers)); +} + +class DelayedCompletionHttpService final: public HttpService { +public: + DelayedCompletionHttpService(HttpHeaderTable& table, kj::Maybe expectedLength) + : table(table), expectedLength(expectedLength) {} + + kj::Promise request( + HttpMethod method, kj::StringPtr url, const HttpHeaders& headers, + kj::AsyncInputStream& requestBody, Response& response) override { + auto stream = response.send(200, "OK", HttpHeaders(table), expectedLength); + auto promise = stream->write("foo", 3); + return promise.attach(kj::mv(stream)).then([this]() { + return kj::mv(paf.promise); + }); + } + + kj::PromiseFulfiller& getFulfiller() { return *paf.fulfiller; } + +private: + HttpHeaderTable& table; + kj::Maybe expectedLength; + kj::PromiseFulfillerPair paf = kj::newPromiseAndFulfiller(); +}; + +void doDelayedCompletionTest(bool exception, kj::Maybe expectedLength) noexcept { + KJ_HTTP_TEST_SETUP_IO; + + HttpHeaderTable table; + + DelayedCompletionHttpService service(table, expectedLength); + auto client = newHttpClient(service); + + auto resp = client->request(HttpMethod::GET, "/", HttpHeaders(table), uint64_t(0)) + .response.wait(waitScope); + KJ_EXPECT(resp.statusCode == 200); + + // Read "foo" from the response body: works + char buffer[16]; + KJ_ASSERT(resp.body->tryRead(buffer, 1, sizeof(buffer)).wait(waitScope) == 3); + buffer[3] = '\0'; + KJ_EXPECT(buffer == "foo"_kj); + + // But reading any more hangs. + auto promise = resp.body->tryRead(buffer, 1, sizeof(buffer)); + + KJ_EXPECT(!promise.poll(waitScope)); + + // Until we cause the service to return. + if (exception) { + service.getFulfiller().reject(KJ_EXCEPTION(FAILED, "service-side failure")); + } else { + service.getFulfiller().fulfill(); + } + + KJ_ASSERT(promise.poll(waitScope)); + + if (exception) { + KJ_EXPECT_THROW_MESSAGE("service-side failure", promise.wait(waitScope)); + } else { + promise.wait(waitScope); + } +}; + +KJ_TEST("adapted client waits for service to complete before returning EOF on response stream") { + doDelayedCompletionTest(false, uint64_t(3)); +} + +KJ_TEST("adapted client waits for service to complete before returning EOF on chunked response") { + doDelayedCompletionTest(false, nullptr); +} + +KJ_TEST("adapted client propagates throw from service after complete response body sent") { + doDelayedCompletionTest(true, uint64_t(3)); +} + +KJ_TEST("adapted client propagates throw from service after incomplete response body sent") { + doDelayedCompletionTest(true, uint64_t(6)); +} + +KJ_TEST("adapted client propagates throw from service after chunked response body sent") { + doDelayedCompletionTest(true, nullptr); +} + +class DelayedCompletionWebSocketHttpService final: public HttpService { +public: + DelayedCompletionWebSocketHttpService(HttpHeaderTable& table, bool closeUpstreamFirst) + : table(table), closeUpstreamFirst(closeUpstreamFirst) {} + + kj::Promise request( + HttpMethod method, kj::StringPtr url, const HttpHeaders& headers, + kj::AsyncInputStream& requestBody, Response& response) override { + KJ_ASSERT(headers.isWebSocket()); + + auto ws = response.acceptWebSocket(HttpHeaders(table)); + kj::Promise promise = kj::READY_NOW; + if (closeUpstreamFirst) { + // Wait for a close message from the client before starting. + promise = promise.then([&ws = *ws]() { return ws.receive(); }).ignoreResult(); + } + promise = promise + .then([&ws = *ws]() { return ws.send("foo"_kj); }) + .then([&ws = *ws]() { return ws.close(1234, "closed"_kj); }); + if (!closeUpstreamFirst) { + // Wait for a close message from the client at the end. + promise = promise.then([&ws = *ws]() { return ws.receive(); }).ignoreResult(); + } + return promise.attach(kj::mv(ws)).then([this]() { + return kj::mv(paf.promise); + }); + } + + kj::PromiseFulfiller& getFulfiller() { return *paf.fulfiller; } + +private: + HttpHeaderTable& table; + bool closeUpstreamFirst; + kj::PromiseFulfillerPair paf = kj::newPromiseAndFulfiller(); +}; + +void doDelayedCompletionWebSocketTest(bool exception, bool closeUpstreamFirst) noexcept { + KJ_HTTP_TEST_SETUP_IO; + + HttpHeaderTable table; + + DelayedCompletionWebSocketHttpService service(table, closeUpstreamFirst); + auto client = newHttpClient(service); + + auto resp = client->openWebSocket("/", HttpHeaders(table)).wait(waitScope); + auto ws = kj::mv(KJ_ASSERT_NONNULL(resp.webSocketOrBody.tryGet>())); + + if (closeUpstreamFirst) { + // Send "close" immediately. + ws->close(1234, "whatever"_kj).wait(waitScope); + } + + // Read "foo" from the WebSocket: works + { + auto msg = ws->receive().wait(waitScope); + KJ_ASSERT(msg.is()); + KJ_ASSERT(msg.get() == "foo"); + } + + kj::Promise promise = nullptr; + if (closeUpstreamFirst) { + // Receiving the close hangs. + promise = ws->receive() + .then([](WebSocket::Message&& msg) { KJ_EXPECT(msg.is()); }); + } else { + auto msg = ws->receive().wait(waitScope); + KJ_ASSERT(msg.is()); + + // Sending a close hangs. + promise = ws->close(1234, "whatever"_kj); + } + KJ_EXPECT(!promise.poll(waitScope)); + + // Until we cause the service to return. + if (exception) { + service.getFulfiller().reject(KJ_EXCEPTION(FAILED, "service-side failure")); + } else { + service.getFulfiller().fulfill(); + } + + KJ_ASSERT(promise.poll(waitScope)); + + if (exception) { + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE("service-side failure", promise.wait(waitScope)); + } else { + promise.wait(waitScope); + } +}; + +KJ_TEST("adapted client waits for service to complete before completing upstream close on WebSocket") { + doDelayedCompletionWebSocketTest(false, false); +} + +KJ_TEST("adapted client waits for service to complete before returning downstream close on WebSocket") { + doDelayedCompletionWebSocketTest(false, true); +} + +KJ_TEST("adapted client propagates throw from service after WebSocket upstream close sent") { + doDelayedCompletionWebSocketTest(true, false); +} + +KJ_TEST("adapted client propagates throw from service after WebSocket downstream close sent") { + doDelayedCompletionWebSocketTest(true, true); +} + // ----------------------------------------------------------------------------- class CountingIoStream final: public kj::AsyncIoStream { @@ -2590,6 +3384,9 @@ public: kj::AsyncInputStream& input, uint64_t amount = kj::maxValue) override { return inner->tryPumpFrom(input, amount); } + Promise whenWriteDisconnected() override { + return inner->whenWriteDisconnected(); + } void shutdownWrite() override { return inner->shutdownWrite(); } @@ -2604,16 +3401,18 @@ public: class CountingNetworkAddress final: public kj::NetworkAddress { public: - CountingNetworkAddress(kj::NetworkAddress& inner, uint& count) - : inner(inner), count(count), addrCount(ownAddrCount) {} + CountingNetworkAddress(kj::NetworkAddress& inner, uint& count, uint& cumulative) + : inner(inner), count(count), addrCount(ownAddrCount), cumulative(cumulative) {} CountingNetworkAddress(kj::Own inner, uint& count, uint& addrCount) - : inner(*inner), ownInner(kj::mv(inner)), count(count), addrCount(addrCount) {} + : inner(*inner), ownInner(kj::mv(inner)), count(count), addrCount(addrCount), + cumulative(ownCumulative) {} ~CountingNetworkAddress() noexcept(false) { --addrCount; } kj::Promise> connect() override { ++count; + ++cumulative; return inner.connect() .then([this](kj::Own stream) -> kj::Own { return kj::heap(kj::mv(stream), count); @@ -2630,6 +3429,8 @@ private: uint& count; uint ownAddrCount = 1; uint& addrCount; + uint ownCumulative = 0; + uint& cumulative; }; class ConnectionCountingNetwork final: public kj::Network { @@ -2694,23 +3495,21 @@ private: }; KJ_TEST("HttpClient connection management") { - auto io = kj::setupAsyncIo(); + KJ_HTTP_TEST_SETUP_IO; + KJ_HTTP_TEST_SETUP_LOOPBACK_LISTENER_AND_ADDR; kj::TimerImpl serverTimer(kj::origin()); kj::TimerImpl clientTimer(kj::origin()); HttpHeaderTable headerTable; - auto listener = io.provider->getNetwork().parseAddress("localhost", 0) - .wait(io.waitScope)->listen(); DummyService service(headerTable); HttpServerSettings serverSettings; HttpServer server(serverTimer, headerTable, service, serverSettings); auto listenTask = server.listenHttp(*listener); - auto addr = io.provider->getNetwork().parseAddress("localhost", listener->getPort()) - .wait(io.waitScope); uint count = 0; - CountingNetworkAddress countingAddr(*addr, count); + uint cumulative = 0; + CountingNetworkAddress countingAddr(*addr, count, cumulative); FakeEntropySource entropySource; HttpClientSettings clientSettings; @@ -2718,6 +3517,7 @@ KJ_TEST("HttpClient connection management") { auto client = newHttpClient(clientTimer, headerTable, countingAddr, clientSettings); KJ_EXPECT(count == 0); + KJ_EXPECT(cumulative == 0); uint i = 0; auto doRequest = [&]() { @@ -2732,105 +3532,325 @@ KJ_TEST("HttpClient connection management") { }; // We can do several requests in a row and only have one connection. - doRequest().wait(io.waitScope); - doRequest().wait(io.waitScope); - doRequest().wait(io.waitScope); + doRequest().wait(waitScope); + doRequest().wait(waitScope); + doRequest().wait(waitScope); KJ_EXPECT(count == 1); + KJ_EXPECT(cumulative == 1); // But if we do two in parallel, we'll end up with two connections. auto req1 = doRequest(); auto req2 = doRequest(); - req1.wait(io.waitScope); - req2.wait(io.waitScope); + req1.wait(waitScope); + req2.wait(waitScope); KJ_EXPECT(count == 2); + KJ_EXPECT(cumulative == 2); // We can reuse after a POST, provided we write the whole POST body properly. { auto req = client->request( HttpMethod::POST, kj::str("/foo"), HttpHeaders(headerTable), size_t(6)); - req.body->write("foobar", 6).wait(io.waitScope); - req.response.wait(io.waitScope).body->readAllBytes().wait(io.waitScope); + req.body->write("foobar", 6).wait(waitScope); + req.response.wait(waitScope).body->readAllBytes().wait(waitScope); } KJ_EXPECT(count == 2); - doRequest().wait(io.waitScope); + KJ_EXPECT(cumulative == 2); + doRequest().wait(waitScope); KJ_EXPECT(count == 2); + KJ_EXPECT(cumulative == 2); // Advance time for half the timeout, then exercise one of the connections. - clientTimer.advanceTo(clientTimer.now() + clientSettings.idleTimout / 2); - doRequest().wait(io.waitScope); - doRequest().wait(io.waitScope); - io.waitScope.poll(); + clientTimer.advanceTo(clientTimer.now() + clientSettings.idleTimeout / 2); + doRequest().wait(waitScope); + doRequest().wait(waitScope); + waitScope.poll(); KJ_EXPECT(count == 2); + KJ_EXPECT(cumulative == 2); // Advance time past when the other connection should time out. It should be dropped. - clientTimer.advanceTo(clientTimer.now() + clientSettings.idleTimout * 3 / 4); - io.waitScope.poll(); + clientTimer.advanceTo(clientTimer.now() + clientSettings.idleTimeout * 3 / 4); + waitScope.poll(); KJ_EXPECT(count == 1); + KJ_EXPECT(cumulative == 2); // Wait for the other to drop. - clientTimer.advanceTo(clientTimer.now() + clientSettings.idleTimout / 2); - io.waitScope.poll(); + clientTimer.advanceTo(clientTimer.now() + clientSettings.idleTimeout / 2); + waitScope.poll(); KJ_EXPECT(count == 0); + KJ_EXPECT(cumulative == 2); // New request creates a new connection again. - doRequest().wait(io.waitScope); + doRequest().wait(waitScope); KJ_EXPECT(count == 1); + KJ_EXPECT(cumulative == 3); // WebSocket connections are not reused. client->openWebSocket(kj::str("/websocket"), HttpHeaders(headerTable)) - .wait(io.waitScope); + .wait(waitScope); KJ_EXPECT(count == 0); + KJ_EXPECT(cumulative == 3); // Errored connections are not reused. - doRequest().wait(io.waitScope); + doRequest().wait(waitScope); KJ_EXPECT(count == 1); + KJ_EXPECT(cumulative == 4); client->request(HttpMethod::GET, kj::str("/throw"), HttpHeaders(headerTable)).response - .wait(io.waitScope).body->readAllBytes().wait(io.waitScope); + .wait(waitScope).body->readAllBytes().wait(waitScope); KJ_EXPECT(count == 0); + KJ_EXPECT(cumulative == 4); // Connections where we failed to read the full response body are not reused. - doRequest().wait(io.waitScope); + doRequest().wait(waitScope); KJ_EXPECT(count == 1); + KJ_EXPECT(cumulative == 5); client->request(HttpMethod::GET, kj::str("/foo"), HttpHeaders(headerTable)).response - .wait(io.waitScope); + .wait(waitScope); KJ_EXPECT(count == 0); + KJ_EXPECT(cumulative == 5); // Connections where we didn't even wait for the response headers are not reused. - doRequest().wait(io.waitScope); + doRequest().wait(waitScope); KJ_EXPECT(count == 1); + KJ_EXPECT(cumulative == 6); client->request(HttpMethod::GET, kj::str("/foo"), HttpHeaders(headerTable)); KJ_EXPECT(count == 0); + KJ_EXPECT(cumulative == 6); // Connections where we failed to write the full request body are not reused. - doRequest().wait(io.waitScope); + doRequest().wait(waitScope); KJ_EXPECT(count == 1); + KJ_EXPECT(cumulative == 7); client->request(HttpMethod::POST, kj::str("/foo"), HttpHeaders(headerTable), size_t(6)).response - .wait(io.waitScope).body->readAllBytes().wait(io.waitScope); + .wait(waitScope).body->readAllBytes().wait(waitScope); KJ_EXPECT(count == 0); - -#if __linux__ - // TODO(someday): Figure out why this doesn't work on Windows and is flakey on Mac. My guess is - // that the closing of the TCP connection propagates synchronously on Linux so that by the time - // we poll() the EventPort it reports the client end of the connection has reached EOF, whereas - // on Mac and Windows this propagation probably involves some concurrent process which may or - // may not complete before we poll(). A solution in this case would be to use a dummy in-memory - // ConnectionReceiver that returns in-memory pipes (see UnbufferedPipe earlier in this file), - // so that we don't rely on any non-local behavior. Another solution would be to pause for - // a short time, maybe. + KJ_EXPECT(cumulative == 7); // If the server times out the connection, we figure it out on the client. - doRequest().wait(io.waitScope); + doRequest().wait(waitScope); + + // TODO(someday): Figure out why the following poll is necessary for the test to pass on Windows + // and Mac. Without it, it seems that the request's connection never starts, so the + // subsequent advanceTo() does not actually time out the connection. + waitScope.poll(); + KJ_EXPECT(count == 1); + KJ_EXPECT(cumulative == 8); serverTimer.advanceTo(serverTimer.now() + serverSettings.pipelineTimeout * 2); - io.waitScope.poll(); + waitScope.poll(); KJ_EXPECT(count == 0); -#endif + KJ_EXPECT(cumulative == 8); // Can still make requests. - doRequest().wait(io.waitScope); + doRequest().wait(waitScope); KJ_EXPECT(count == 1); + KJ_EXPECT(cumulative == 9); } +KJ_TEST("HttpClient disable connection reuse") { + KJ_HTTP_TEST_SETUP_IO; + KJ_HTTP_TEST_SETUP_LOOPBACK_LISTENER_AND_ADDR; + + kj::TimerImpl serverTimer(kj::origin()); + kj::TimerImpl clientTimer(kj::origin()); + HttpHeaderTable headerTable; + + DummyService service(headerTable); + HttpServerSettings serverSettings; + HttpServer server(serverTimer, headerTable, service, serverSettings); + auto listenTask = server.listenHttp(*listener); + + uint count = 0; + uint cumulative = 0; + CountingNetworkAddress countingAddr(*addr, count, cumulative); + + FakeEntropySource entropySource; + HttpClientSettings clientSettings; + clientSettings.entropySource = entropySource; + clientSettings.idleTimeout = 0 * kj::SECONDS; + auto client = newHttpClient(clientTimer, headerTable, countingAddr, clientSettings); + + KJ_EXPECT(count == 0); + KJ_EXPECT(cumulative == 0); + + uint i = 0; + auto doRequest = [&]() { + uint n = i++; + return client->request(HttpMethod::GET, kj::str("/", n), HttpHeaders(headerTable)).response + .then([](HttpClient::Response&& response) { + auto promise = response.body->readAllText(); + return promise.attach(kj::mv(response.body)); + }).then([n](kj::String body) { + KJ_EXPECT(body == kj::str("null:/", n)); + }); + }; + + // Each serial request gets its own connection. + doRequest().wait(waitScope); + doRequest().wait(waitScope); + doRequest().wait(waitScope); + KJ_EXPECT(count == 0); + KJ_EXPECT(cumulative == 3); + + // Each parallel request gets its own connection. + auto req1 = doRequest(); + auto req2 = doRequest(); + req1.wait(waitScope); + req2.wait(waitScope); + KJ_EXPECT(count == 0); + KJ_EXPECT(cumulative == 5); +} + +KJ_TEST("HttpClient concurrency limiting") { +#if KJ_HTTP_TEST_USE_OS_PIPE && !__linux__ + // On Windows and Mac, OS event delivery is not always immediate, and that seems to make this + // test flakey. On Linux, events are always immediately delivered. For now, we compile the test + // but we don't run it outside of Linux. We do run the in-memory-pipes version on all OSs since + // that mode shouldn't depend on kernel behavior at all. + return; +#endif + + KJ_HTTP_TEST_SETUP_IO; + KJ_HTTP_TEST_SETUP_LOOPBACK_LISTENER_AND_ADDR; + + kj::TimerImpl serverTimer(kj::origin()); + kj::TimerImpl clientTimer(kj::origin()); + HttpHeaderTable headerTable; + + DummyService service(headerTable); + HttpServerSettings serverSettings; + HttpServer server(serverTimer, headerTable, service, serverSettings); + auto listenTask = server.listenHttp(*listener); + + uint count = 0; + uint cumulative = 0; + CountingNetworkAddress countingAddr(*addr, count, cumulative); + + FakeEntropySource entropySource; + HttpClientSettings clientSettings; + clientSettings.entropySource = entropySource; + clientSettings.idleTimeout = 0 * kj::SECONDS; + auto innerClient = newHttpClient(clientTimer, headerTable, countingAddr, clientSettings); + + struct CallbackEvent { + uint runningCount; + uint pendingCount; + + bool operator==(const CallbackEvent& other) const { + return runningCount == other.runningCount && pendingCount == other.pendingCount; + } + bool operator!=(const CallbackEvent& other) const { return !(*this == other); } + // TODO(someday): Can use default spaceship operator in C++20: + //auto operator<=>(const CallbackEvent&) const = default; + }; + + kj::Vector callbackEvents; + auto callback = [&](uint runningCount, uint pendingCount) { + callbackEvents.add(CallbackEvent{runningCount, pendingCount}); + }; + auto client = newConcurrencyLimitingHttpClient(*innerClient, 1, kj::mv(callback)); + + KJ_EXPECT(count == 0); + KJ_EXPECT(cumulative == 0); + + uint i = 0; + auto doRequest = [&]() { + uint n = i++; + return client->request(HttpMethod::GET, kj::str("/", n), HttpHeaders(headerTable)).response + .then([](HttpClient::Response&& response) { + auto promise = response.body->readAllText(); + return promise.attach(kj::mv(response.body)); + }).then([n](kj::String body) { + KJ_EXPECT(body == kj::str("null:/", n)); + }); + }; + + // Second connection blocked by first. + auto req1 = doRequest(); + + KJ_EXPECT(callbackEvents == kj::ArrayPtr({ {1, 0} })); + callbackEvents.clear(); + + auto req2 = doRequest(); + + // TODO(someday): Figure out why this poll() is necessary on Windows and macOS. + waitScope.poll(); + + KJ_EXPECT(req1.poll(waitScope)); + KJ_EXPECT(!req2.poll(waitScope)); + KJ_EXPECT(count == 1); + KJ_EXPECT(cumulative == 1); + KJ_EXPECT(callbackEvents == kj::ArrayPtr({ {1, 1} })); + callbackEvents.clear(); + + // Releasing first connection allows second to start. + req1.wait(waitScope); + KJ_EXPECT(req2.poll(waitScope)); + KJ_EXPECT(count == 1); + KJ_EXPECT(cumulative == 2); + KJ_EXPECT(callbackEvents == kj::ArrayPtr({ {1, 0} })); + callbackEvents.clear(); + + req2.wait(waitScope); + KJ_EXPECT(count == 0); + KJ_EXPECT(cumulative == 2); + KJ_EXPECT(callbackEvents == kj::ArrayPtr({ {0, 0} })); + callbackEvents.clear(); + + // Using body stream after releasing blocked response promise throws no exception + auto req3 = doRequest(); + { + kj::Own req4Body; + { + auto req4 = client->request(HttpMethod::GET, kj::str("/", ++i), HttpHeaders(headerTable)); + waitScope.poll(); + req4Body = kj::mv(req4.body); + } + auto writePromise = req4Body->write("a", 1); + KJ_EXPECT(!writePromise.poll(waitScope)); + } + req3.wait(waitScope); + KJ_EXPECT(count == 0); + KJ_EXPECT(cumulative == 3); + + // Similar connection limiting for web sockets + // TODO(someday): Figure out why the sequencing of websockets events does + // not work correctly on Windows (and maybe macOS?). The solution is not as + // simple as inserting poll()s as above, since doing so puts the websocket in + // a state that trips a "previous HTTP message body incomplete" assertion, + // while trying to write 500 network response. + callbackEvents.clear(); + auto ws1 = kj::heap(client->openWebSocket(kj::str("/websocket"), HttpHeaders(headerTable))); + KJ_EXPECT(callbackEvents == kj::ArrayPtr({ {1, 0} })); + callbackEvents.clear(); + auto ws2 = kj::heap(client->openWebSocket(kj::str("/websocket"), HttpHeaders(headerTable))); + KJ_EXPECT(ws1->poll(waitScope)); + KJ_EXPECT(!ws2->poll(waitScope)); + KJ_EXPECT(count == 1); + KJ_EXPECT(cumulative == 4); + KJ_EXPECT(callbackEvents == kj::ArrayPtr({ {1, 1} })); + callbackEvents.clear(); + + { + auto response1 = ws1->wait(waitScope); + KJ_EXPECT(!ws2->poll(waitScope)); + KJ_EXPECT(callbackEvents == kj::ArrayPtr({})); + } + KJ_EXPECT(ws2->poll(waitScope)); + KJ_EXPECT(count == 1); + KJ_EXPECT(cumulative == 5); + KJ_EXPECT(callbackEvents == kj::ArrayPtr({ {1, 0} })); + callbackEvents.clear(); + { + auto response2 = ws2->wait(waitScope); + KJ_EXPECT(callbackEvents == kj::ArrayPtr({})); + } + KJ_EXPECT(count == 0); + KJ_EXPECT(cumulative == 5); + KJ_EXPECT(callbackEvents == kj::ArrayPtr({ {0, 0} })); +} + +#if KJ_HTTP_TEST_USE_OS_PIPE +// TODO(someday): Implement mock kj::Network for userspace version of this test? KJ_TEST("HttpClient multi host") { auto io = kj::setupAsyncIo(); @@ -2861,14 +3881,16 @@ KJ_TEST("HttpClient multi host") { uint i = 0; auto doRequest = [&](bool tls, uint port) { uint n = i++; + // We stick a double-slash in the URL to test that it doesn't get coalesced into one slash, + // which was a bug in the past. return client->request(HttpMethod::GET, - kj::str((tls ? "https://localhost:" : "http://localhost:"), port, '/', n), + kj::str((tls ? "https://localhost:" : "http://localhost:"), port, "//", n), HttpHeaders(headerTable)).response .then([](HttpClient::Response&& response) { auto promise = response.body->readAllText(); return promise.attach(kj::mv(response.body)); }).then([n, port](kj::String body) { - KJ_EXPECT(body == kj::str("localhost:", port, ":/", n), body, port, n); + KJ_EXPECT(body == kj::str("localhost:", port, "://", n), body, port, n); }); }; @@ -2916,7 +3938,7 @@ KJ_TEST("HttpClient multi host") { KJ_EXPECT(tlsAddrCount == 1); // Let everything expire. - clientTimer.advanceTo(clientTimer.now() + clientSettings.idleTimout * 2); + clientTimer.advanceTo(clientTimer.now() + clientSettings.idleTimeout * 2); io.waitScope.poll(); KJ_EXPECT(count == 0); KJ_EXPECT(tlsCount == 0); @@ -2930,9 +3952,12 @@ KJ_TEST("HttpClient multi host") { KJ_EXPECT(addrCount == 1); KJ_EXPECT(tlsAddrCount == 0); } +#endif // ----------------------------------------------------------------------------- +#if KJ_HTTP_TEST_USE_OS_PIPE +// This test only makes sense using the real network. KJ_TEST("HttpClient to capnproto.org") { auto io = kj::setupAsyncIo(); @@ -2965,6 +3990,142 @@ KJ_TEST("HttpClient to capnproto.org") { auto body = response.body->readAllText().wait(io.waitScope); } } +#endif + +// ======================================================================================= +// Misc bugfix tests + +class ReadCancelHttpService final: public HttpService { + // HttpService that tries to read all request data but cancels after 1ms and sends a response. +public: + ReadCancelHttpService(kj::Timer& timer, HttpHeaderTable& headerTable) + : timer(timer), headerTable(headerTable) {} + + kj::Promise request( + HttpMethod method, kj::StringPtr url, const HttpHeaders& headers, + kj::AsyncInputStream& requestBody, Response& responseSender) override { + if (method == HttpMethod::POST) { + // Try to read all content, but cancel after 1ms. + return requestBody.readAllBytes().ignoreResult() + .exclusiveJoin(timer.afterDelay(1 * kj::MILLISECONDS)) + .then([this, &responseSender]() { + responseSender.send(408, "Request Timeout", kj::HttpHeaders(headerTable), uint64_t(0)); + }); + } else { + responseSender.send(200, "OK", kj::HttpHeaders(headerTable), uint64_t(0)); + return kj::READY_NOW; + } + } + +private: + kj::Timer& timer; + HttpHeaderTable& headerTable; +}; + +KJ_TEST("canceling a length stream mid-read correctly discards rest of request") { + KJ_HTTP_TEST_SETUP_IO; + kj::TimerImpl timer(kj::origin()); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; + + HttpHeaderTable table; + ReadCancelHttpService service(timer, table); + HttpServer server(timer, table, service); + + auto listenTask = server.listenHttp(kj::mv(pipe.ends[0])); + + { + static constexpr kj::StringPtr REQUEST = + "POST / HTTP/1.1\r\n" + "Content-Length: 6\r\n" + "\r\n" + "fooba"_kj; // incomplete + pipe.ends[1]->write(REQUEST.begin(), REQUEST.size()).wait(waitScope); + + auto promise = expectRead(*pipe.ends[1], + "HTTP/1.1 408 Request Timeout\r\n" + "Content-Length: 0\r\n" + "\r\n"_kj); + + KJ_EXPECT(!promise.poll(waitScope)); + + // Trigger timout, then response should be sent. + timer.advanceTo(timer.now() + 1 * kj::MILLISECONDS); + KJ_ASSERT(promise.poll(waitScope)); + promise.wait(waitScope); + } + + // We left our request stream hanging. The server will try to read and discard the request body. + // Let's give it the rest of the data, followed by a second request. + { + static constexpr kj::StringPtr REQUEST = + "r" + "GET / HTTP/1.1\r\n" + "\r\n"_kj; + pipe.ends[1]->write(REQUEST.begin(), REQUEST.size()).wait(waitScope); + + auto promise = expectRead(*pipe.ends[1], + "HTTP/1.1 200 OK\r\n" + "Content-Length: 0\r\n" + "\r\n"_kj); + KJ_ASSERT(promise.poll(waitScope)); + promise.wait(waitScope); + } +} + +KJ_TEST("canceling a chunked stream mid-read correctly discards rest of request") { + KJ_HTTP_TEST_SETUP_IO; + kj::TimerImpl timer(kj::origin()); + auto pipe = KJ_HTTP_TEST_CREATE_2PIPE; + + HttpHeaderTable table; + ReadCancelHttpService service(timer, table); + HttpServer server(timer, table, service); + + auto listenTask = server.listenHttp(kj::mv(pipe.ends[0])); + + { + static constexpr kj::StringPtr REQUEST = + "POST / HTTP/1.1\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n" + "6\r\n" + "fooba"_kj; // incomplete chunk + pipe.ends[1]->write(REQUEST.begin(), REQUEST.size()).wait(waitScope); + + auto promise = expectRead(*pipe.ends[1], + "HTTP/1.1 408 Request Timeout\r\n" + "Content-Length: 0\r\n" + "\r\n"_kj); + + KJ_EXPECT(!promise.poll(waitScope)); + + // Trigger timout, then response should be sent. + timer.advanceTo(timer.now() + 1 * kj::MILLISECONDS); + KJ_ASSERT(promise.poll(waitScope)); + promise.wait(waitScope); + } + + // We left our request stream hanging. The server will try to read and discard the request body. + // Let's give it the rest of the data, followed by a second request. + { + static constexpr kj::StringPtr REQUEST = + "r\r\n" + "4a\r\n" + "this is some text that is the body of a chunk and not a valid chunk header\r\n" + "0\r\n" + "\r\n" + "GET / HTTP/1.1\r\n" + "\r\n"_kj; + pipe.ends[1]->write(REQUEST.begin(), REQUEST.size()).wait(waitScope); + + auto promise = expectRead(*pipe.ends[1], + "HTTP/1.1 200 OK\r\n" + "Content-Length: 0\r\n" + "\r\n"_kj); + KJ_ASSERT(promise.poll(waitScope)); + promise.wait(waitScope); + } +} } // namespace } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/http.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/http.c++ index 6e0b94bdce8..f8bacf4e92a 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/http.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/http.c++ @@ -27,6 +27,7 @@ #include #include #include +#include #include namespace kj { @@ -40,7 +41,7 @@ namespace kj { // verbatim. But NO, they decided to throw a whole complicated hash algorithm in there, AND // THEY CHOSE A BROKEN ONE THAT WE OTHERWISE WOULDN'T NEED ANYMORE. // -// TODO(cleanup): Move this to a shared hashing library. Maybe. Or maybe don't, becaues no one +// TODO(cleanup): Move this to a shared hashing library. Maybe. Or maybe don't, because no one // should be using SHA-1 anymore. // // THIS USAGE IS NOT SECURITY SENSITIVE. IF YOU REPORT A SECURITY ISSUE BECAUSE YOU SAW SHA1 IN THE @@ -461,13 +462,8 @@ static void requireValidHeaderName(kj::StringPtr name) { } static void requireValidHeaderValue(kj::StringPtr value) { - for (char c: value) { - // While the HTTP spec suggests that only printable ASCII characters are allowed in header - // values, reality has a different opinion. See: https://github.com/httpwg/http11bis/issues/19 - // We follow the browsers' lead. - KJ_REQUIRE(c != '\0' && c != '\r' && c != '\n', "invalid header value", - kj::encodeCEscape(value)); - } + KJ_REQUIRE(HttpHeaders::isValidHeaderValue(value), "invalid header value", + kj::encodeCEscape(value)); } static const char* BUILTIN_HEADER_NAMES[] = { @@ -565,6 +561,19 @@ kj::Maybe HttpHeaderTable::stringToId(kj::StringPtr name) const { // ======================================================================================= +bool HttpHeaders::isValidHeaderValue(kj::StringPtr value) { + for (char c: value) { + // While the HTTP spec suggests that only printable ASCII characters are allowed in header + // values, reality has a different opinion. See: https://github.com/httpwg/http11bis/issues/19 + // We follow the browsers' lead. + if (c == '\0' || c == '\r' || c == '\n') { + return false; + } + } + + return true; +} + HttpHeaders::HttpHeaders(const HttpHeaderTable& table) : table(&table), indexedHeaders(kj::heapArray(table.idCount())) {} @@ -577,6 +586,16 @@ void HttpHeaders::clear() { unindexedHeaders.clear(); } +size_t HttpHeaders::size() const { + size_t result = unindexedHeaders.size(); + for (auto i: kj::indices(indexedHeaders)) { + if (indexedHeaders[i] != nullptr) { + ++result; + } + } + return result; +} + HttpHeaders HttpHeaders::clone() const { HttpHeaders result(*table); @@ -668,9 +687,21 @@ void HttpHeaders::addNoCheck(kj::StringPtr name, kj::StringPtr value) { indexedHeaders[id->id] = value; } else { // Duplicate HTTP headers are equivalent to the values being separated by a comma. - auto concat = kj::str(indexedHeaders[id->id], ", ", value); - indexedHeaders[id->id] = concat; - ownedStrings.add(concat.releaseArray()); + +#if _MSC_VER + if (_stricmp(name.cStr(), "set-cookie") == 0) { +#else + if (strcasecmp(name.cStr(), "set-cookie") == 0) { +#endif + // Uh-oh, Set-Cookie will be corrupted if we try to concatenate it. We'll make it an + // unindexed header, which is weird, but the alternative is guaranteed corruption, so... + // TODO(cleanup): Maybe HttpHeaders should just special-case set-cookie in general? + unindexedHeaders.add(Header {name, value}); + } else { + auto concat = kj::str(indexedHeaders[id->id], ", ", value); + indexedHeaders[id->id] = concat; + ownedStrings.add(concat.releaseArray()); + } } } else { unindexedHeaders.add(Header {name, value}); @@ -844,9 +875,12 @@ static char* trimHeaderEnding(kj::ArrayPtr content) { return end; } -kj::Maybe HttpHeaders::tryParseRequest(kj::ArrayPtr content) { +HttpHeaders::RequestOrProtocolError HttpHeaders::tryParseRequest(kj::ArrayPtr content) { char* end = trimHeaderEnding(content); - if (end == nullptr) return nullptr; + if (end == nullptr) { + return ProtocolError { 400, "Bad Request", + "Request headers have no terminal newline.", content }; + } char* ptr = content.begin(); @@ -855,54 +889,79 @@ kj::Maybe HttpHeaders::tryParseRequest(kj::ArrayPtr KJ_IF_MAYBE(method, consumeHttpMethod(ptr)) { request.method = *method; if (*ptr != ' ' && *ptr != '\t') { - return nullptr; + return ProtocolError { 501, "Not Implemented", + "Unrecognized request method.", content }; } ++ptr; } else { - return nullptr; + return ProtocolError { 501, "Not Implemented", + "Unrecognized request method.", content }; } KJ_IF_MAYBE(path, consumeWord(ptr)) { request.url = *path; } else { - return nullptr; + return ProtocolError { 400, "Bad Request", + "Invalid request line.", content }; } // Ignore rest of line. Don't care about "HTTP/1.1" or whatever. consumeLine(ptr); - if (!parseHeaders(ptr, end)) return nullptr; + if (!parseHeaders(ptr, end)) { + return ProtocolError { 400, "Bad Request", + "The headers sent by your client are not valid.", content }; + } return request; } -kj::Maybe HttpHeaders::tryParseResponse(kj::ArrayPtr content) { +HttpHeaders::ResponseOrProtocolError HttpHeaders::tryParseResponse(kj::ArrayPtr content) { char* end = trimHeaderEnding(content); - if (end == nullptr) return nullptr; + if (end == nullptr) { + return ProtocolError { 502, "Bad Gateway", + "Response headers have no terminal newline.", content }; + } char* ptr = content.begin(); HttpHeaders::Response response; KJ_IF_MAYBE(version, consumeWord(ptr)) { - if (!version->startsWith("HTTP/")) return nullptr; + if (!version->startsWith("HTTP/")) { + return ProtocolError { 502, "Bad Gateway", + "Invalid response status line (invalid protocol).", content }; + } } else { - return nullptr; + return ProtocolError { 502, "Bad Gateway", + "Invalid response status line (no spaces).", content }; } KJ_IF_MAYBE(code, consumeNumber(ptr)) { response.statusCode = *code; } else { - return nullptr; + return ProtocolError { 502, "Bad Gateway", + "Invalid response status line (invalid status code).", content }; } response.statusText = consumeLine(ptr); - if (!parseHeaders(ptr, end)) return nullptr; + if (!parseHeaders(ptr, end)) { + return ProtocolError { 502, "Bad Gateway", + "The headers sent by the server are not valid.", content }; + } return response; } +bool HttpHeaders::tryParse(kj::ArrayPtr content) { + char* end = trimHeaderEnding(content); + if (end == nullptr) return false; + + char* ptr = content.begin(); + return parseHeaders(ptr, end); +} + bool HttpHeaders::parseHeaders(char* ptr, char* end) { while (*ptr != '\0') { KJ_IF_MAYBE(name, consumeHeaderName(ptr)) { @@ -985,12 +1044,12 @@ kj::String HttpHeaders::toString() const { namespace { static constexpr size_t MIN_BUFFER = 4096; -static constexpr size_t MAX_BUFFER = 65536; +static constexpr size_t MAX_BUFFER = 128 * 1024; static constexpr size_t MAX_CHUNK_HEADER_SIZE = 32; -class HttpInputStream { +class HttpInputStreamImpl final: public HttpInputStream { public: - explicit HttpInputStream(AsyncIoStream& inner, HttpHeaderTable& table) + explicit HttpInputStreamImpl(AsyncInputStream& inner, const HttpHeaderTable& table) : inner(inner), headerBuffer(kj::heapArray(MIN_BUFFER)), headers(table) { } @@ -998,6 +1057,45 @@ public: return !broken && pendingMessageCount == 0; } + // --------------------------------------------------------------------------- + // public interface + + kj::Promise readRequest() override { + return readRequestHeaders() + .then([this](HttpHeaders::RequestOrProtocolError&& requestOrProtocolError) + -> HttpInputStream::Request { + auto request = KJ_REQUIRE_NONNULL( + requestOrProtocolError.tryGet(), "bad request"); + auto body = getEntityBody(HttpInputStreamImpl::REQUEST, request.method, 0, headers); + + return { request.method, request.url, headers, kj::mv(body) }; + }); + } + + kj::Promise readResponse(HttpMethod requestMethod) override { + return readResponseHeaders() + .then([this,requestMethod](HttpHeaders::ResponseOrProtocolError&& responseOrProtocolError) + -> HttpInputStream::Response { + auto response = KJ_REQUIRE_NONNULL( + responseOrProtocolError.tryGet(), "bad response"); + auto body = getEntityBody(HttpInputStreamImpl::RESPONSE, requestMethod, + response.statusCode, headers); + + return { response.statusCode, response.statusText, headers, kj::mv(body) }; + }); + } + + kj::Promise readMessage() override { + return readMessageHeaders() + .then([this](kj::ArrayPtr text) -> HttpInputStream::Message { + headers.clear(); + KJ_REQUIRE(headers.tryParse(text), "bad message"); + auto body = getEntityBody(HttpInputStreamImpl::RESPONSE, HttpMethod::GET, 0, headers); + + return { headers, kj::mv(body) }; + }); + } + // --------------------------------------------------------------------------- // Stream locking: While an entity-body is being read, the body stream "locks" the underlying // HTTP stream. Once the entity-body is complete, we can read the next pipelined message. @@ -1022,7 +1120,7 @@ public: // --------------------------------------------------------------------------- - kj::Promise awaitNextMessage() { + kj::Promise awaitNextMessage() override { // Waits until more data is available, but doesn't consume it. Returns false on EOF. // // Used on the server after a request is handled, to check for pipelined requests. @@ -1104,14 +1202,14 @@ public: }); } - inline kj::Promise> readRequestHeaders() { + inline kj::Promise readRequestHeaders() { return readMessageHeaders().then([this](kj::ArrayPtr text) { headers.clear(); return headers.tryParseRequest(text); }); } - inline kj::Promise> readResponseHeaders() { + inline kj::Promise readResponseHeaders() { // Note: readResponseHeaders() could be called multiple times concurrently when pipelining // requests. readMessageHeaders() will serialize these, but it's important not to mess with // state (like calling headers.clear()) before said serialization has taken place. @@ -1172,7 +1270,7 @@ public: } private: - AsyncIoStream& inner; + AsyncInputStream& inner; kj::Array headerBuffer; size_t messageHeaderEnd = 0; @@ -1186,7 +1284,7 @@ private: // Parsed headers, after a call to parseAwaited*(). bool lineBreakBeforeNextHeader = false; - // If true, the next await should expect to start with a spurrious '\n' or '\r\n'. This happens + // If true, the next await should expect to start with a spurious '\n' or '\r\n'. This happens // as a side-effect of HTTP chunked encoding, where such a newline is added to the end of each // chunk, for no good reason. @@ -1232,7 +1330,7 @@ private: readPromise = leftover.size(); leftover = nullptr; } else { - // Need to read more data from the unfderlying stream. + // Need to read more data from the underlying stream. if (bufferEnd == headerBuffer.size()) { // Out of buffer space. @@ -1367,7 +1465,7 @@ private: class HttpEntityBodyReader: public kj::AsyncInputStream { public: - HttpEntityBodyReader(HttpInputStream& inner): inner(inner) {} + HttpEntityBodyReader(HttpInputStreamImpl& inner): inner(inner) {} ~HttpEntityBodyReader() noexcept(false) { if (!finished) { inner.abortRead(); @@ -1375,7 +1473,7 @@ public: } protected: - HttpInputStream& inner; + HttpInputStreamImpl& inner; void doneReading() { KJ_REQUIRE(!finished); @@ -1394,7 +1492,7 @@ class HttpNullEntityReader final: public HttpEntityBodyReader { // may indicate non-zero in the special case of a response to a HEAD request. public: - HttpNullEntityReader(HttpInputStream& inner, kj::Maybe length) + HttpNullEntityReader(HttpInputStreamImpl& inner, kj::Maybe length) : HttpEntityBodyReader(inner), length(length) { // `length` is what to return from tryGetLength(). For a response to a HEAD request, this may // be non-zero. @@ -1417,7 +1515,7 @@ class HttpConnectionCloseEntityReader final: public HttpEntityBodyReader { // Stream which reads until EOF. public: - HttpConnectionCloseEntityReader(HttpInputStream& inner) + HttpConnectionCloseEntityReader(HttpInputStreamImpl& inner) : HttpEntityBodyReader(inner) {} Promise tryRead(void* buffer, size_t minBytes, size_t maxBytes) override { @@ -1437,7 +1535,7 @@ class HttpFixedLengthEntityReader final: public HttpEntityBodyReader { // Stream which reads only up to a fixed length from the underlying stream, then emulates EOF. public: - HttpFixedLengthEntityReader(HttpInputStream& inner, size_t length) + HttpFixedLengthEntityReader(HttpInputStreamImpl& inner, size_t length) : HttpEntityBodyReader(inner), length(length) { if (length == 0) doneReading(); } @@ -1447,30 +1545,45 @@ public: } Promise tryRead(void* buffer, size_t minBytes, size_t maxBytes) override { + return tryReadInternal(buffer, minBytes, maxBytes, 0); + } + +private: + size_t length; + + Promise tryReadInternal(void* buffer, size_t minBytes, size_t maxBytes, + size_t alreadyRead) { if (length == 0) return size_t(0); - return inner.tryRead(buffer, kj::min(minBytes, length), kj::min(maxBytes, length)) - .then([=](size_t amount) { + // We have to set minBytes to 1 here so that if we read any data at all, we update our + // counter immediately, so that we still know where we are in case of cancellation. + return inner.tryRead(buffer, 1, kj::min(maxBytes, length)) + .then([=](size_t amount) -> kj::Promise { length -= amount; - if (length > 0 && amount < minBytes) { - kj::throwRecoverableException(KJ_EXCEPTION(DISCONNECTED, - "premature EOF in HTTP entity body; did not reach Content-Length")); + if (length > 0) { + // We haven't reached the end of the entity body yet. + if (amount == 0) { + kj::throwRecoverableException(KJ_EXCEPTION(DISCONNECTED, + "premature EOF in HTTP entity body; did not reach Content-Length")); + } else if (amount < minBytes) { + // We requested a minimum 1 byte above, but our own caller actually set a larger minimum + // which has not yet been reached. Keep trying until we reach it. + return tryReadInternal(reinterpret_cast(buffer) + amount, + minBytes - amount, maxBytes - amount, alreadyRead + amount); + } } else if (length == 0) { doneReading(); } - return amount; + return amount + alreadyRead; }); } - -private: - size_t length; }; class HttpChunkedEntityReader final: public HttpEntityBodyReader { // Stream which reads a Transfer-Encoding: Chunked stream. public: - HttpChunkedEntityReader(HttpInputStream& inner) + HttpChunkedEntityReader(HttpInputStreamImpl& inner) : HttpEntityBodyReader(inner) {} Promise tryRead(void* buffer, size_t minBytes, size_t maxBytes) override { @@ -1494,23 +1607,21 @@ private: chunkSize = nextChunkSize; return tryReadInternal(buffer, minBytes, maxBytes, alreadyRead); }); - } else if (chunkSize < minBytes) { - // Read entire current chunk and continue to next chunk. - return inner.tryRead(buffer, chunkSize, chunkSize) + } else { + // Read current chunk. + // We have to set minBytes to 1 here so that if we read any data at all, we update our + // counter immediately, so that we still know where we are in case of cancellation. + return inner.tryRead(buffer, 1, kj::min(maxBytes, chunkSize)) .then([=](size_t amount) -> kj::Promise { chunkSize -= amount; - if (chunkSize > 0) { - return KJ_EXCEPTION(DISCONNECTED, "premature EOF in HTTP chunk"); + if (amount == 0) { + kj::throwRecoverableException(KJ_EXCEPTION(DISCONNECTED, "premature EOF in HTTP chunk")); + } else if (amount < minBytes) { + // We requested a minimum 1 byte above, but our own caller actually set a larger minimum + // which has not yet been reached. Keep trying until we reach it. + return tryReadInternal(reinterpret_cast(buffer) + amount, + minBytes - amount, maxBytes - amount, alreadyRead + amount); } - - return tryReadInternal(reinterpret_cast(buffer) + amount, - minBytes - amount, maxBytes - amount, alreadyRead + amount); - }); - } else { - // Read only part of the current chunk. - return inner.tryRead(buffer, minBytes, kj::min(maxBytes, chunkSize)) - .then([=](size_t amount) -> size_t { - chunkSize -= amount; return alreadyRead + amount; }); } @@ -1524,10 +1635,8 @@ template struct FastCaseCmp { static constexpr bool apply(const char* actual) { return - 'a' <= first && first <= 'z' - ? (*actual | 0x20) == first && FastCaseCmp::apply(actual + 1) - : 'A' <= first && first <= 'Z' - ? (*actual & ~0x20) == first && FastCaseCmp::apply(actual + 1) + ('a' <= first && first <= 'z') || ('A' <= first && first <= 'Z') + ? (*actual | 0x20) == (first | 0x20) && FastCaseCmp::apply(actual + 1) : *actual == first && FastCaseCmp::apply(actual + 1); } }; @@ -1551,60 +1660,114 @@ static_assert(!fastCaseCmp<'n','O','o','B','1'>("FooB1"), ""); static_assert(!fastCaseCmp<'f','O','o','B'>("FooB1"), ""); static_assert(!fastCaseCmp<'f','O','o','B','1','a'>("FooB1"), ""); -kj::Own HttpInputStream::getEntityBody( +kj::Own HttpInputStreamImpl::getEntityBody( RequestOrResponse type, HttpMethod method, uint statusCode, const kj::HttpHeaders& headers) { + // Rules to determine how HTTP entity-body is delimited: + // https://tools.ietf.org/html/rfc7230#section-3.3.3 + + // #1 if (type == RESPONSE) { if (method == HttpMethod::HEAD) { // Body elided. kj::Maybe length; KJ_IF_MAYBE(cl, headers.get(HttpHeaderId::CONTENT_LENGTH)) { length = strtoull(cl->cStr(), nullptr, 10); + } else if (headers.get(HttpHeaderId::TRANSFER_ENCODING) == nullptr) { + // HACK: Neither Content-Length nor Transfer-Encoding header in response to HEAD request. + // Propagate this fact with a 0 expected body length. + length = uint64_t(0); } return kj::heap(*this, length); - } else if (statusCode == 204 || statusCode == 205 || statusCode == 304) { + } else if (statusCode == 204 || statusCode == 304) { // No body. return kj::heap(*this, uint64_t(0)); } } + // #2 deals with the CONNECT method which is handled separately. + + // #3 KJ_IF_MAYBE(te, headers.get(HttpHeaderId::TRANSFER_ENCODING)) { // TODO(someday): Support plugable transfer encodings? Or at least gzip? // TODO(someday): Support stacked transfer encodings, e.g. "gzip, chunked". + + // NOTE: #3¶3 is ambiguous about what should happen if Transfer-Encoding and Content-Length are + // both present. It says that Transfer-Encoding takes precedence, but also that the request + // "ought to be handled as an error", and that proxies "MUST" drop the Content-Length before + // forwarding. We ignore the vague "ought to" part and implement the other two. (The + // dropping of Content-Length will happen naturally if/when the message is sent back out to + // the network.) if (fastCaseCmp<'c','h','u','n','k','e','d'>(te->cStr())) { + // #3¶1 return kj::heap(*this); + } else if (fastCaseCmp<'i','d','e','n','t','i','t','y'>(te->cStr())) { + // #3¶2 + KJ_REQUIRE(type != REQUEST, "request body cannot have Transfer-Encoding other than chunked"); + return kj::heap(*this); } else { KJ_FAIL_REQUIRE("unknown transfer encoding", *te) { break; } } } + // #4 and #5 KJ_IF_MAYBE(cl, headers.get(HttpHeaderId::CONTENT_LENGTH)) { - return kj::heap(*this, strtoull(cl->cStr(), nullptr, 10)); + // NOTE: By spec, multiple Content-Length values are allowed as long as they are the same, e.g. + // "Content-Length: 5, 5, 5". Hopefully no one actually does that... + char* end; + uint64_t length = strtoull(cl->cStr(), &end, 10); + if (end > cl->begin() && *end == '\0') { + // #5 + return kj::heap(*this, length); + } else { + // #4 (bad content-length) + KJ_FAIL_REQUIRE("invalid Content-Length header value", *cl); + } } + // #6 if (type == REQUEST) { // Lack of a Content-Length or Transfer-Encoding means no body for requests. return kj::heap(*this, uint64_t(0)); } - KJ_IF_MAYBE(c, headers.get(HttpHeaderId::CONNECTION)) { - // TODO(someday): Connection header can actually have multiple tokens... but no one ever uses - // that feature? - if (fastCaseCmp<'c','l','o','s','e'>(c->cStr())) { - return kj::heap(*this); + // RFC 2616 permitted "multipart/byteranges" responses to be self-delimiting, but this was + // mercifully removed in RFC 7230, and new exceptions of this type are disallowed: + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.4 + // https://tools.ietf.org/html/rfc7230#page-81 + // To be extra-safe, we'll reject a multipart/byteranges response that lacks transfer-encoding + // and content-length. + KJ_IF_MAYBE(type, headers.get(HttpHeaderId::CONTENT_TYPE)) { + if (type->startsWith("multipart/byteranges")) { + KJ_FAIL_REQUIRE( + "refusing to handle multipart/byteranges response without transfer-encoding nor " + "content-length due to ambiguity between RFC 2616 vs RFC 7230."); } } - KJ_FAIL_REQUIRE("don't know how HTTP body is delimited", headers); - return kj::heap(*this, uint64_t(0)); + // #7 + return kj::heap(*this); +} + +} // namespace + +kj::Own newHttpInputStream( + kj::AsyncInputStream& input, const HttpHeaderTable& table) { + return kj::heap(input, table); } // ======================================================================================= +namespace { + class HttpOutputStream { public: HttpOutputStream(AsyncOutputStream& inner): inner(inner) {} + bool isInBody() { + return inBody; + } + bool canReuse() { return !inBody && !broken && !writeInProgress; } @@ -1685,6 +1848,16 @@ public: KJ_REQUIRE(inBody) { return; } inBody = false; + + if (writeInProgress) { + // It looks like the last write never completed -- possibly because it was canceled or threw + // an exception. We must treat this equivalent to abortBody(). + broken = true; + + // Cancel any writes that are still queued. + writeQueue = KJ_EXCEPTION(FAILED, + "previous HTTP message body incomplete; can't write more messages"); + } } void abortBody() { @@ -1693,10 +1866,9 @@ public: inBody = false; broken = true; - writeQueue = writeQueue.then([]() -> kj::Promise { - return KJ_EXCEPTION(FAILED, - "previous HTTP message body incomplete; can't write more messages"); - }); + // Cancel any writes that are still queued. + writeQueue = KJ_EXCEPTION(FAILED, + "previous HTTP message body incomplete; can't write more messages"); } kj::Promise flush() { @@ -1705,6 +1877,12 @@ public: return fork.addBranch(); } + Promise whenWriteDisconnected() { + return inner.whenWriteDisconnected(); + } + + bool isWriteInProgress() { return writeInProgress; } + private: AsyncOutputStream& inner; kj::Promise writeQueue = kj::READY_NOW; @@ -1717,6 +1895,13 @@ private: // underlying stream is in an inconsistent state and cannot be reused. void queueWrite(kj::String content) { + // We only use queueWrite() in cases where we can take ownership of the write buffer, and where + // it is convenient if we can return `void` rather than a promise. In particular, this is used + // to write headers and chunk boundaries. Writes of application data do not go into + // `writeQueue` because this would prevent cancellation. Instead, they wait until `writeQueue` + // is empty, then they make the write directly, using `writeInProgress` to detect and block + // concurrent writes. + writeQueue = writeQueue.then(kj::mvCapture(content, [this](kj::String&& content) { auto promise = inner.write(content.begin(), content.size()); return promise.attach(kj::mv(content)); @@ -1732,6 +1917,9 @@ public: Promise write(ArrayPtr> pieces) override { return KJ_EXCEPTION(FAILED, "HTTP message has no entity-body; can't write()"); } + Promise whenWriteDisconnected() override { + return kj::NEVER_DONE; + } }; class HttpDiscardingEntityWriter final: public kj::AsyncOutputStream { @@ -1742,6 +1930,9 @@ public: Promise write(ArrayPtr> pieces) override { return kj::READY_NOW; } + Promise whenWriteDisconnected() override { + return kj::NEVER_DONE; + } }; class HttpFixedLengthEntityWriter final: public kj::AsyncOutputStream { @@ -1751,7 +1942,9 @@ public: if (length == 0) inner.finishBody(); } ~HttpFixedLengthEntityWriter() noexcept(false) { - if (length > 0) inner.abortBody(); + if (length > 0 || inner.isWriteInProgress()) { + inner.abortBody(); + } } Promise write(const void* buffer, size_t size) override { @@ -1822,6 +2015,10 @@ public: return kj::mv(promise); } + Promise whenWriteDisconnected() override { + return inner.whenWriteDisconnected(); + } + private: HttpOutputStream& inner; uint64_t length; @@ -1905,6 +2102,10 @@ public: } } + Promise whenWriteDisconnected() override { + return inner.whenWriteDisconnected(); + } + private: HttpOutputStream& inner; }; @@ -1948,28 +2149,38 @@ public: } kj::Promise disconnect() override { - if (!sendClosed) { - KJ_REQUIRE(!currentlySending, "another message send is already in progress"); - - KJ_IF_MAYBE(p, sendingPong) { - // We recently sent a pong, make sure it's finished before proceeding. - currentlySending = true; - auto promise = p->then([this]() { - currentlySending = false; - return disconnect(); - }); - sendingPong = nullptr; - return promise; - } + KJ_REQUIRE(!currentlySending, "another message send is already in progress"); - sendClosed = true; + KJ_IF_MAYBE(p, sendingPong) { + // We recently sent a pong, make sure it's finished before proceeding. + currentlySending = true; + auto promise = p->then([this]() { + currentlySending = false; + return disconnect(); + }); + sendingPong = nullptr; + return promise; } + disconnected = true; + stream->shutdownWrite(); return kj::READY_NOW; } - kj::Promise receive() override { + void abort() override { + queuedPong = nullptr; + sendingPong = nullptr; + disconnected = true; + stream->abortRead(); + stream->shutdownWrite(); + } + + kj::Promise whenAborted() override { + return stream->whenWriteDisconnected(); + } + + kj::Promise receive(size_t maxSize) override { size_t headerSize = Header::headerSize(recvData.begin(), recvData.size()); if (headerSize > recvData.size()) { @@ -1982,7 +2193,8 @@ public: } return stream->tryRead(recvData.end(), 1, recvBuffer.end() - recvData.end()) - .then([this](size_t actual) -> kj::Promise { + .then([this,maxSize](size_t actual) -> kj::Promise { + receivedBytes += actual; if (actual == 0) { if (recvData.size() > 0) { return KJ_EXCEPTION(DISCONNECTED, "WebSocket EOF in frame header"); @@ -1994,7 +2206,7 @@ public: } recvData = recvBuffer.slice(0, recvData.size() + actual); - return receive(); + return receive(maxSize); }); } @@ -2004,6 +2216,8 @@ public: size_t payloadLen = recvHeader.getPayloadLen(); + KJ_REQUIRE(payloadLen < maxSize, "WebSocket message is too large"); + auto opcode = recvHeader.getOpcode(); bool isData = opcode < OPCODE_FIRST_CONTROL; if (opcode == OPCODE_CONTINUATION) { @@ -2057,7 +2271,7 @@ public: Mask mask = recvHeader.getMask(); auto handleMessage = kj::mvCapture(message, - [this,opcode,payloadTarget,payloadLen,mask,isFin] + [this,opcode,payloadTarget,payloadLen,mask,isFin,maxSize] (kj::Array&& message) -> kj::Promise { if (!mask.isZero()) { mask.apply(kj::arrayPtr(payloadTarget, payloadLen)); @@ -2065,8 +2279,9 @@ public: if (!isFin) { // Add fragment to the list and loop. + auto newMax = maxSize - message.size(); fragments.add(kj::mv(message)); - return receive(); + return receive(newMax); } switch (opcode) { @@ -2091,10 +2306,10 @@ public: case OPCODE_PING: // Send back a pong. queuePong(kj::mv(message)); - return receive(); + return receive(maxSize); case OPCODE_PONG: // Unsolicited pong. Ignore. - return receive(); + return receive(maxSize); default: KJ_FAIL_REQUIRE("unknown WebSocket opcode", opcode); } @@ -2110,7 +2325,8 @@ public: memcpy(payloadTarget, recvData.begin(), recvData.size()); size_t remaining = payloadLen - recvData.size(); auto promise = stream->tryRead(payloadTarget + recvData.size(), remaining, remaining) - .then([remaining](size_t amount) { + .then([this, remaining](size_t amount) { + receivedBytes += amount; if (amount < remaining) { kj::throwRecoverableException(KJ_EXCEPTION(DISCONNECTED, "WebSocket EOF in message")); } @@ -2120,6 +2336,41 @@ public: } } + kj::Maybe> tryPumpFrom(WebSocket& other) override { + KJ_IF_MAYBE(optOther, kj::dynamicDowncastIfAvailable(other)) { + // Both WebSockets are raw WebSockets, so we can pump the streams directly rather than read + // whole messages. + + if ((maskKeyGenerator == nullptr) == (optOther->maskKeyGenerator == nullptr)) { + // Oops, it appears that we either believe we are the client side of both sockets, or we + // are the server side of both sockets. Since clients must "mask" their outgoing frames but + // servers must *not* do so, we can't direct-pump. Sad. + return nullptr; + } + + // Check same error conditions as with sendImpl(). + KJ_REQUIRE(!disconnected, "WebSocket can't send after disconnect()"); + KJ_REQUIRE(!currentlySending, "another message send is already in progress"); + currentlySending = true; + + // If the application chooses to pump messages out, but receives incoming messages normally + // with `receive()`, then we will receive pings and attempt to send pongs. But we can't + // safely insert a pong in the middle of a pumped stream. We kind of don't have a choice + // except to drop them on the floor, which is what will happen if we set `hasSentClose` true. + // Hopefully most apps that set up a pump do so in both directions at once, and so pings will + // flow through and pongs will flow back. + hasSentClose = true; + + return optOther->optimizedPumpTo(*this); + } + + return nullptr; + } + + uint64_t sentByteCount() override { return sentBytes; } + + uint64_t receivedByteCount() override { return receivedBytes; } + private: class Mask { public: @@ -2293,7 +2544,8 @@ private: kj::Own stream; kj::Maybe maskKeyGenerator; - bool sendClosed = false; + bool hasSentClose = false; + bool disconnected = false; bool currentlySending = false; Header sendHeader; kj::ArrayPtr sendParts[2]; @@ -2319,8 +2571,11 @@ private: kj::Array recvBuffer; kj::ArrayPtr recvData; + uint64_t sentBytes = 0; + uint64_t receivedBytes = 0; + kj::Promise sendImpl(byte opcode, kj::ArrayPtr message) { - KJ_REQUIRE(!sendClosed, "WebSocket already closed"); + KJ_REQUIRE(!disconnected, "WebSocket can't send after disconnect()"); KJ_REQUIRE(!currentlySending, "another message send is already in progress"); currentlySending = true; @@ -2335,7 +2590,10 @@ private: return promise; } - sendClosed = opcode == OPCODE_CLOSE; + // We don't stop the application from sending further messages after close() -- this is the + // application's error to make. But, we do want to make sure we don't send any PONGs after a + // close, since that would be our error. So we stack whether we closed for that reason. + hasSentClose = hasSentClose || opcode == OPCODE_CLOSE; Mask mask(maskKeyGenerator); @@ -2354,7 +2612,7 @@ private: if (!mask.isZero()) { promise = promise.attach(kj::mv(ownMessage)); } - return promise.then([this]() { + return promise.then([this, size = sendParts[0].size() + sendParts[1].size()]() { currentlySending = false; // Send queued pong if needed. @@ -2363,6 +2621,7 @@ private: queuedPong = nullptr; queuePong(kj::mv(payload)); } + sentBytes += size; }); } @@ -2386,7 +2645,7 @@ private: } kj::Promise sendPong(kj::Array payload) { - if (sendClosed) { + if (hasSentClose || disconnected) { return kj::READY_NOW; } @@ -2394,10 +2653,55 @@ private: sendParts[1] = payload; return stream->write(sendParts).attach(kj::mv(payload)); } + + kj::Promise optimizedPumpTo(WebSocketImpl& other) { + KJ_IF_MAYBE(p, other.sendingPong) { + // We recently sent a pong, make sure it's finished before proceeding. + auto promise = p->then([this, &other]() { + return optimizedPumpTo(other); + }); + other.sendingPong = nullptr; + return promise; + } + + if (recvData.size() > 0) { + // We have some data buffered. Write it first. + return other.stream->write(recvData.begin(), recvData.size()) + .then([this, &other, size = recvData.size()]() { + recvData = nullptr; + other.sentBytes += size; + return optimizedPumpTo(other); + }); + } + + auto cancelPromise = other.stream->whenWriteDisconnected() + .then([this]() -> kj::Promise { + this->abort(); + return KJ_EXCEPTION(DISCONNECTED, + "destination of WebSocket pump disconnected prematurely"); + }); + + // There's no buffered incoming data, so start pumping stream now. + return stream->pumpTo(*other.stream).then([this, &other](size_t s) -> kj::Promise { + // WebSocket pumps are expected to include end-of-stream. + other.disconnected = true; + other.stream->shutdownWrite(); + receivedBytes += s; + other.sentBytes += s; + return kj::READY_NOW; + }, [&other](kj::Exception&& e) -> kj::Promise { + // We don't know if it was a read or a write that threw. If it was a read that threw, we need + // to send a disconnect on the destination. If it was the destination that threw, it + // shouldn't hurt to disconnect() it again, but we'll catch and squelch any exceptions. + other.disconnected = true; + kj::runCatchingExceptions([&other]() { other.stream->shutdownWrite(); }); + return kj::mv(e); + }).exclusiveJoin(kj::mv(cancelPromise)); + } }; kj::Own upgradeToWebSocket( - kj::Own stream, HttpInputStream& httpInput, HttpOutputStream& httpOutput, + kj::Own stream, HttpInputStreamImpl& httpInput, HttpOutputStream& httpOutput, kj::Maybe maskKeyGenerator) { // Create a WebSocket upgraded from an HTTP stream. auto releasedBuffer = httpInput.releaseBuffer(); @@ -2427,9 +2731,9 @@ static kj::Promise pumpWebSocketLoop(WebSocket& from, WebSocket& to) { .then([&from,&to]() { return pumpWebSocketLoop(from, to); }); } KJ_CASE_ONEOF(close, WebSocket::Close) { + // Once a close has passed through, the pump is complete. return to.close(close.code, close.reason) - .attach(kj::mv(close)) - .then([&from,&to]() { return pumpWebSocketLoop(from, to); }); + .attach(kj::mv(close)); } } KJ_UNREACHABLE; @@ -2449,7 +2753,12 @@ kj::Promise WebSocket::pumpTo(WebSocket& other) { } else { // Fall back to default implementation. return kj::evalNow([&]() { - return pumpWebSocketLoop(*this, other); + auto cancelPromise = other.whenAborted().then([this]() -> kj::Promise { + this->abort(); + return KJ_EXCEPTION(DISCONNECTED, + "destination of WebSocket pump disconnected prematurely"); + }); + return pumpWebSocketLoop(*this, other).exclusiveJoin(kj::mv(cancelPromise)); }); } } @@ -2460,12 +2769,7 @@ kj::Maybe> WebSocket::tryPumpFrom(WebSocket& other) { namespace { -class AbortableWebSocket: public WebSocket { -public: - virtual void abort() = 0; -}; - -class WebSocketPipeImpl final: public AbortableWebSocket, public kj::Refcounted { +class WebSocketPipeImpl final: public WebSocket, public kj::Refcounted { // Represents one direction of a WebSocket pipe. // // This class behaves as a "loopback" WebSocket: a message sent using send() is received using @@ -2491,28 +2795,38 @@ public: } else { ownState = heap(); state = *ownState; + + aborted = true; + KJ_IF_MAYBE(f, abortedFulfiller) { + f->get()->fulfill(); + abortedFulfiller = nullptr; + } } } kj::Promise send(kj::ArrayPtr message) override { KJ_IF_MAYBE(s, state) { - return s->send(message); + return s->send(message).then([&, size = message.size()]() { transferredBytes += size; }); } else { - return newAdaptedPromise(*this, MessagePtr(message)); + return newAdaptedPromise(*this, MessagePtr(message)) + .then([&, size = message.size()]() { transferredBytes += size; }); } } kj::Promise send(kj::ArrayPtr message) override { KJ_IF_MAYBE(s, state) { - return s->send(message); + return s->send(message).then([&, size = message.size()]() { transferredBytes += size; }); } else { - return newAdaptedPromise(*this, MessagePtr(message)); + return newAdaptedPromise(*this, MessagePtr(message)) + .then([&, size = message.size()]() { transferredBytes += size; }); } } kj::Promise close(uint16_t code, kj::StringPtr reason) override { KJ_IF_MAYBE(s, state) { - return s->close(code, reason); + return s->close(code, reason) + .then([&, size = reason.size()]() { transferredBytes += (2 +size); }); } else { - return newAdaptedPromise(*this, MessagePtr(ClosePtr { code, reason })); + return newAdaptedPromise(*this, MessagePtr(ClosePtr { code, reason })) + .then([&, size = reason.size()]() { transferredBytes += (2 +size); }); } } kj::Promise disconnect() override { @@ -2524,6 +2838,20 @@ public: return kj::READY_NOW; } } + kj::Promise whenAborted() override { + if (aborted) { + return kj::READY_NOW; + } else KJ_IF_MAYBE(p, abortedPromise) { + return p->addBranch(); + } else { + auto paf = newPromiseAndFulfiller(); + abortedFulfiller = kj::mv(paf.fulfiller); + auto fork = paf.promise.fork(); + auto result = fork.addBranch(); + abortedPromise = kj::mv(fork); + return result; + } + } kj::Maybe> tryPumpFrom(WebSocket& other) override { KJ_IF_MAYBE(s, state) { return s->tryPumpFrom(other); @@ -2532,28 +2860,44 @@ public: } } - kj::Promise receive() override { + kj::Promise receive(size_t maxSize) override { KJ_IF_MAYBE(s, state) { - return s->receive(); + return s->receive(maxSize); } else { - return newAdaptedPromise(*this); + return newAdaptedPromise(*this, maxSize); } } kj::Promise pumpTo(WebSocket& other) override { KJ_IF_MAYBE(s, state) { - return s->pumpTo(other); + auto before = other.receivedByteCount(); + return s->pumpTo(other).attach(kj::defer([this, &other, before]() { + transferredBytes += other.receivedByteCount() - before; + })); } else { return newAdaptedPromise(*this, other); } } + uint64_t sentByteCount() override { + return transferredBytes; + } + uint64_t receivedByteCount() override { + return transferredBytes; + } + private: - kj::Maybe state; + kj::Maybe state; // Object-oriented state! If any method call is blocked waiting on activity from the other end, // then `state` is non-null and method calls should be forwarded to it. If no calls are // outstanding, `state` is null. - kj::Own ownState; + kj::Own ownState; + + uint64_t transferredBytes = 0; + + bool aborted = false; + Maybe>> abortedFulfiller = nullptr; + Maybe> abortedPromise = nullptr; void endState(WebSocket& obj) { KJ_IF_MAYBE(s, state) { @@ -2569,7 +2913,7 @@ private: }; typedef kj::OneOf, kj::ArrayPtr, ClosePtr> MessagePtr; - class BlockedSend final: public AbortableWebSocket { + class BlockedSend final: public WebSocket { public: BlockedSend(kj::PromiseFulfiller& fulfiller, WebSocketPipeImpl& pipe, MessagePtr message) : fulfiller(fulfiller), pipe(pipe), message(kj::mv(message)) { @@ -2586,6 +2930,9 @@ private: pipe.endState(*this); pipe.abort(); } + kj::Promise whenAborted() override { + KJ_FAIL_ASSERT("can't get here -- implemented by WebSocketPipeImpl"); + } kj::Promise send(kj::ArrayPtr message) override { KJ_FAIL_ASSERT("another message send is already in progress"); @@ -2603,7 +2950,7 @@ private: KJ_FAIL_ASSERT("another message send is already in progress"); } - kj::Promise receive() override { + kj::Promise receive(size_t maxSize) override { KJ_REQUIRE(canceler.isEmpty(), "already pumping"); fulfiller.fulfill(); pipe.endState(*this); @@ -2649,6 +2996,13 @@ private: })); } + uint64_t sentByteCount() override { + KJ_FAIL_ASSERT("Bytes are not counted for the individual states of WebSocketPipeImpl."); + } + uint64_t receivedByteCount() override { + KJ_FAIL_ASSERT("Bytes are not counted for the individual states of WebSocketPipeImpl."); + } + private: kj::PromiseFulfiller& fulfiller; WebSocketPipeImpl& pipe; @@ -2656,7 +3010,7 @@ private: Canceler canceler; }; - class BlockedPumpFrom final: public AbortableWebSocket { + class BlockedPumpFrom final: public WebSocket { public: BlockedPumpFrom(kj::PromiseFulfiller& fulfiller, WebSocketPipeImpl& pipe, WebSocket& input) @@ -2674,6 +3028,9 @@ private: pipe.endState(*this); pipe.abort(); } + kj::Promise whenAborted() override { + KJ_FAIL_ASSERT("can't get here -- implemented by WebSocketPipeImpl"); + } kj::Promise send(kj::ArrayPtr message) override { KJ_FAIL_ASSERT("another message send is already in progress"); @@ -2691,9 +3048,9 @@ private: KJ_FAIL_ASSERT("another message send is already in progress"); } - kj::Promise receive() override { + kj::Promise receive(size_t maxSize) override { KJ_REQUIRE(canceler.isEmpty(), "another message receive is already in progress"); - return canceler.wrap(input.receive() + return canceler.wrap(input.receive(maxSize) .then([this](Message message) { if (message.is()) { canceler.release(); @@ -2724,6 +3081,13 @@ private: })); } + uint64_t sentByteCount() override { + KJ_FAIL_ASSERT("Bytes are not counted for the individual states of WebSocketPipeImpl."); + } + uint64_t receivedByteCount() override { + KJ_FAIL_ASSERT("Bytes are not counted for the individual states of WebSocketPipeImpl."); + } + private: kj::PromiseFulfiller& fulfiller; WebSocketPipeImpl& pipe; @@ -2731,10 +3095,11 @@ private: Canceler canceler; }; - class BlockedReceive final: public AbortableWebSocket { + class BlockedReceive final: public WebSocket { public: - BlockedReceive(kj::PromiseFulfiller& fulfiller, WebSocketPipeImpl& pipe) - : fulfiller(fulfiller), pipe(pipe) { + BlockedReceive(kj::PromiseFulfiller& fulfiller, WebSocketPipeImpl& pipe, + size_t maxSize) + : fulfiller(fulfiller), pipe(pipe), maxSize(maxSize) { KJ_REQUIRE(pipe.state == nullptr); pipe.state = *this; } @@ -2748,6 +3113,9 @@ private: pipe.endState(*this); pipe.abort(); } + kj::Promise whenAborted() override { + KJ_FAIL_ASSERT("can't get here -- implemented by WebSocketPipeImpl"); + } kj::Promise send(kj::ArrayPtr message) override { KJ_REQUIRE(canceler.isEmpty(), "already pumping"); @@ -2777,7 +3145,7 @@ private: } kj::Maybe> tryPumpFrom(WebSocket& other) override { KJ_REQUIRE(canceler.isEmpty(), "already pumping"); - return canceler.wrap(other.receive().then([this,&other](Message message) { + return canceler.wrap(other.receive(maxSize).then([this,&other](Message message) { canceler.release(); fulfiller.fulfill(kj::mv(message)); pipe.endState(*this); @@ -2790,20 +3158,28 @@ private: })); } - kj::Promise receive() override { + kj::Promise receive(size_t maxSize) override { KJ_FAIL_ASSERT("another message receive is already in progress"); } kj::Promise pumpTo(WebSocket& other) override { KJ_FAIL_ASSERT("another message receive is already in progress"); } + uint64_t sentByteCount() override { + KJ_FAIL_ASSERT("Bytes are not counted for the individual states of WebSocketPipeImpl."); + } + uint64_t receivedByteCount() override { + KJ_FAIL_ASSERT("Bytes are not counted for the individual states of WebSocketPipeImpl."); + } + private: kj::PromiseFulfiller& fulfiller; WebSocketPipeImpl& pipe; + size_t maxSize; Canceler canceler; }; - class BlockedPumpTo final: public AbortableWebSocket { + class BlockedPumpTo final: public WebSocket { public: BlockedPumpTo(kj::PromiseFulfiller& fulfiller, WebSocketPipeImpl& pipe, WebSocket& output) : fulfiller(fulfiller), pipe(pipe), output(output) { @@ -2824,6 +3200,9 @@ private: pipe.endState(*this); pipe.abort(); } + kj::Promise whenAborted() override { + KJ_FAIL_ASSERT("can't get here -- implemented by WebSocketPipeImpl"); + } kj::Promise send(kj::ArrayPtr message) override { KJ_REQUIRE(canceler.isEmpty(), "another message send is already in progress"); @@ -2835,7 +3214,12 @@ private: } kj::Promise close(uint16_t code, kj::StringPtr reason) override { KJ_REQUIRE(canceler.isEmpty(), "another message send is already in progress"); - return canceler.wrap(output.close(code, reason)); + return canceler.wrap(output.close(code, reason).then([this]() { + // A pump is expected to end upon seeing a Close message. + canceler.release(); + pipe.endState(*this); + fulfiller.fulfill(); + })); } kj::Promise disconnect() override { KJ_REQUIRE(canceler.isEmpty(), "another message send is already in progress"); @@ -2855,13 +3239,20 @@ private: })); } - kj::Promise receive() override { + kj::Promise receive(size_t maxSize) override { KJ_FAIL_ASSERT("another message receive is already in progress"); } kj::Promise pumpTo(WebSocket& other) override { KJ_FAIL_ASSERT("another message receive is already in progress"); } + uint64_t sentByteCount() override { + KJ_FAIL_ASSERT("Bytes are not counted for the individual states of WebSocketPipeImpl."); + } + uint64_t receivedByteCount() override { + KJ_FAIL_ASSERT("Bytes are not counted for the individual states of WebSocketPipeImpl."); + } + private: kj::PromiseFulfiller& fulfiller; WebSocketPipeImpl& pipe; @@ -2869,11 +3260,14 @@ private: Canceler canceler; }; - class Disconnected final: public AbortableWebSocket { + class Disconnected final: public WebSocket { public: void abort() override { // can ignore } + kj::Promise whenAborted() override { + KJ_FAIL_ASSERT("can't get here -- implemented by WebSocketPipeImpl"); + } kj::Promise send(kj::ArrayPtr message) override { KJ_FAIL_REQUIRE("can't send() after disconnect()"); @@ -2891,19 +3285,30 @@ private: KJ_FAIL_REQUIRE("can't tryPumpFrom() after disconnect()"); } - kj::Promise receive() override { + kj::Promise receive(size_t maxSize) override { return KJ_EXCEPTION(DISCONNECTED, "WebSocket disconnected"); } kj::Promise pumpTo(WebSocket& other) override { return kj::READY_NOW; } + + uint64_t sentByteCount() override { + KJ_FAIL_ASSERT("Bytes are not counted for the individual states of WebSocketPipeImpl."); + } + uint64_t receivedByteCount() override { + KJ_FAIL_ASSERT("Bytes are not counted for the individual states of WebSocketPipeImpl."); + } + }; - class Aborted final: public AbortableWebSocket { + class Aborted final: public WebSocket { public: void abort() override { // can ignore } + kj::Promise whenAborted() override { + KJ_FAIL_ASSERT("can't get here -- implemented by WebSocketPipeImpl"); + } kj::Promise send(kj::ArrayPtr message) override { return KJ_EXCEPTION(DISCONNECTED, "other end of WebSocketPipe was destroyed"); @@ -2922,12 +3327,19 @@ private: "other end of WebSocketPipe was destroyed")); } - kj::Promise receive() override { + kj::Promise receive(size_t maxSize) override { return KJ_EXCEPTION(DISCONNECTED, "other end of WebSocketPipe was destroyed"); } kj::Promise pumpTo(WebSocket& other) override { return KJ_EXCEPTION(DISCONNECTED, "other end of WebSocketPipe was destroyed"); } + + uint64_t sentByteCount() override { + KJ_FAIL_ASSERT("Bytes are not counted for the individual states of WebSocketPipeImpl."); + } + uint64_t receivedByteCount() override { + KJ_FAIL_ASSERT("Bytes are not counted for the individual states of WebSocketPipeImpl."); + } }; }; @@ -2952,17 +3364,27 @@ public: kj::Promise disconnect() override { return out->disconnect(); } + void abort() override { + in->abort(); + out->abort(); + } + kj::Promise whenAborted() override { + return out->whenAborted(); + } kj::Maybe> tryPumpFrom(WebSocket& other) override { return out->tryPumpFrom(other); } - kj::Promise receive() override { - return in->receive(); + kj::Promise receive(size_t maxSize) override { + return in->receive(maxSize); } kj::Promise pumpTo(WebSocket& other) override { return in->pumpTo(other); } + uint64_t sentByteCount() override { return out->sentByteCount(); } + uint64_t receivedByteCount() override { return in->sentByteCount(); } + private: kj::Own in; kj::Own out; @@ -2984,9 +3406,10 @@ WebSocketPipe newWebSocketPipe() { namespace { -class HttpClientImpl final: public HttpClient { +class HttpClientImpl final: public HttpClient, + private HttpClientErrorHandler { public: - HttpClientImpl(HttpHeaderTable& responseHeaderTable, kj::Own rawStream, + HttpClientImpl(const HttpHeaderTable& responseHeaderTable, kj::Own rawStream, HttpClientSettings settings) : httpInput(*rawStream, responseHeaderTable), httpOutput(*rawStream), @@ -3057,31 +3480,38 @@ public: auto id = ++counter; auto responsePromise = httpInput.readResponseHeaders().then( - [this,method,id](kj::Maybe&& response) -> HttpClient::Response { - KJ_IF_MAYBE(r, response) { - auto& headers = httpInput.getHeaders(); - HttpClient::Response result { - r->statusCode, - r->statusText, - &headers, - httpInput.getEntityBody(HttpInputStream::RESPONSE, method, r->statusCode, headers) - }; + [this,method,id](HttpHeaders::ResponseOrProtocolError&& responseOrProtocolError) + -> HttpClient::Response { + KJ_SWITCH_ONEOF(responseOrProtocolError) { + KJ_CASE_ONEOF(response, HttpHeaders::Response) { + auto& responseHeaders = httpInput.getHeaders(); + HttpClient::Response result { + response.statusCode, + response.statusText, + &responseHeaders, + httpInput.getEntityBody( + HttpInputStreamImpl::RESPONSE, method, response.statusCode, responseHeaders) + }; - if (fastCaseCmp<'c', 'l', 'o', 's', 'e'>( - headers.get(HttpHeaderId::CONNECTION).orDefault(nullptr).cStr())) { + if (fastCaseCmp<'c', 'l', 'o', 's', 'e'>( + responseHeaders.get(HttpHeaderId::CONNECTION).orDefault(nullptr).cStr())) { + closed = true; + } else if (counter == id) { + watchForClose(); + } else { + // Another request was already queued after this one, so we don't want to watch for + // stream closure because we're fully expecting another response. + } + return result; + } + KJ_CASE_ONEOF(protocolError, HttpHeaders::ProtocolError) { closed = true; - } else if (counter == id) { - watchForClose(); - } else { - // Anothe request was already queued after this one, so we don't want to watch for - // stream closure because we're fully expecting another response. + return settings.errorHandler.orDefault(*this).handleProtocolError( + kj::mv(protocolError)); } - return result; - } else { - closed = true; - KJ_FAIL_REQUIRE("received invalid HTTP response") { break; } - return HttpClient::Response(); } + + KJ_UNREACHABLE; }); return { kj::mv(bodyStream), kj::mv(responsePromise) }; @@ -3120,65 +3550,87 @@ public: auto id = ++counter; return httpInput.readResponseHeaders() - .then(kj::mvCapture(keyBase64, - [this,id](kj::StringPtr keyBase64, kj::Maybe&& response) + .then([this,id,keyBase64 = kj::mv(keyBase64)]( + HttpHeaders::ResponseOrProtocolError&& responseOrProtocolError) -> HttpClient::WebSocketResponse { - KJ_IF_MAYBE(r, response) { - auto& headers = httpInput.getHeaders(); - if (r->statusCode == 101) { - if (!fastCaseCmp<'w', 'e', 'b', 's', 'o', 'c', 'k', 'e', 't'>( - headers.get(HttpHeaderId::UPGRADE).orDefault(nullptr).cStr())) { - KJ_FAIL_REQUIRE("server returned incorrect Upgrade header; should be 'websocket'", - headers.get(HttpHeaderId::UPGRADE).orDefault("(null)")) { - break; + KJ_SWITCH_ONEOF(responseOrProtocolError) { + KJ_CASE_ONEOF(response, HttpHeaders::Response) { + auto& responseHeaders = httpInput.getHeaders(); + if (response.statusCode == 101) { + if (!fastCaseCmp<'w', 'e', 'b', 's', 'o', 'c', 'k', 'e', 't'>( + responseHeaders.get(HttpHeaderId::UPGRADE).orDefault(nullptr).cStr())) { + kj::String ownMessage; + kj::StringPtr message; + KJ_IF_MAYBE(actual, responseHeaders.get(HttpHeaderId::UPGRADE)) { + ownMessage = kj::str( + "Server failed WebSocket handshake: incorrect Upgrade header: " + "expected 'websocket', got '", *actual, "'."); + message = ownMessage; + } else { + message = "Server failed WebSocket handshake: missing Upgrade header."; + } + return settings.errorHandler.orDefault(*this).handleWebSocketProtocolError({ + 502, "Bad Gateway", message, nullptr + }); } - return HttpClient::WebSocketResponse(); - } - auto expectedAccept = generateWebSocketAccept(keyBase64); - if (headers.get(HttpHeaderId::SEC_WEBSOCKET_ACCEPT).orDefault(nullptr) - != expectedAccept) { - KJ_FAIL_REQUIRE("server returned incorrect Sec-WebSocket-Accept header", - headers.get(HttpHeaderId::SEC_WEBSOCKET_ACCEPT).orDefault("(null)"), - expectedAccept) { break; } - return HttpClient::WebSocketResponse(); - } + auto expectedAccept = generateWebSocketAccept(keyBase64); + if (responseHeaders.get(HttpHeaderId::SEC_WEBSOCKET_ACCEPT).orDefault(nullptr) + != expectedAccept) { + kj::String ownMessage; + kj::StringPtr message; + KJ_IF_MAYBE(actual, responseHeaders.get(HttpHeaderId::SEC_WEBSOCKET_ACCEPT)) { + ownMessage = kj::str( + "Server failed WebSocket handshake: incorrect Sec-WebSocket-Accept header: " + "expected '", expectedAccept, "', got '", *actual, "'."); + message = ownMessage; + } else { + message = "Server failed WebSocket handshake: missing Upgrade header."; + } + return settings.errorHandler.orDefault(*this).handleWebSocketProtocolError({ + 502, "Bad Gateway", message, nullptr + }); + } - return { - r->statusCode, - r->statusText, - &httpInput.getHeaders(), - upgradeToWebSocket(kj::mv(ownStream), httpInput, httpOutput, settings.entropySource), - }; - } else { - upgraded = false; - HttpClient::WebSocketResponse result { - r->statusCode, - r->statusText, - &headers, - httpInput.getEntityBody(HttpInputStream::RESPONSE, HttpMethod::GET, r->statusCode, - headers) - }; - if (fastCaseCmp<'c', 'l', 'o', 's', 'e'>( - headers.get(HttpHeaderId::CONNECTION).orDefault(nullptr).cStr())) { - closed = true; - } else if (counter == id) { - watchForClose(); + return { + response.statusCode, + response.statusText, + &httpInput.getHeaders(), + upgradeToWebSocket(kj::mv(ownStream), httpInput, httpOutput, settings.entropySource), + }; } else { - // Anothe request was already queued after this one, so we don't want to watch for - // stream closure because we're fully expecting another response. + upgraded = false; + HttpClient::WebSocketResponse result { + response.statusCode, + response.statusText, + &responseHeaders, + httpInput.getEntityBody(HttpInputStreamImpl::RESPONSE, HttpMethod::GET, + response.statusCode, responseHeaders) + }; + if (fastCaseCmp<'c', 'l', 'o', 's', 'e'>( + responseHeaders.get(HttpHeaderId::CONNECTION).orDefault(nullptr).cStr())) { + closed = true; + } else if (counter == id) { + watchForClose(); + } else { + // Another request was already queued after this one, so we don't want to watch for + // stream closure because we're fully expecting another response. + } + return result; } - return result; } - } else { - KJ_FAIL_REQUIRE("received invalid HTTP response") { break; } - return HttpClient::WebSocketResponse(); + KJ_CASE_ONEOF(protocolError, HttpHeaders::ProtocolError) { + return settings.errorHandler.orDefault(*this).handleWebSocketProtocolError( + kj::mv(protocolError)); + } } - })); + + KJ_UNREACHABLE; + }); } private: - HttpInputStream httpInput; + HttpInputStreamImpl httpInput; HttpOutputStream httpOutput; kj::Own ownStream; HttpClientSettings settings; @@ -3191,22 +3643,38 @@ private: // point in history. void watchForClose() { - closeWatcherTask = httpInput.awaitNextMessage().then([this](bool hasData) { + closeWatcherTask = httpInput.awaitNextMessage() + .then([this](bool hasData) -> kj::Promise { if (hasData) { // Uhh... The server sent some data before we asked for anything. Perhaps due to properties // of this application, the server somehow already knows what the next request will be, and // it is trying to optimize. Or maybe this is some sort of test and the server is just // replaying a script. In any case, we will humor it -- leave the data in the buffer and // let it become the response to the next request. + return kj::READY_NOW; } else { // EOF -- server disconnected. - - // Proactively free up the socket. - ownStream = nullptr; - closed = true; - } - }).eagerlyEvaluate(nullptr); + if (httpOutput.isInBody()) { + // Huh, the application is still sending a request. We should let it finish. We do not + // need to proactively free the socket in this case because we know that we're not + // sitting in a reusable connection pool, because we know the application is still + // actively using the connection. + return kj::READY_NOW; + } else { + return httpOutput.flush().then([this]() { + // We might be sitting in NetworkAddressHttpClient's `availableClients` pool. We don't + // have a way to notify it to remove this client from the pool; instead, when it tries + // to pull this client from the pool later, it will notice the client is dead and will + // discard it then. But, we would like to avoid holding on to a socket forever. So, + // destroy the socket now. + // TODO(cleanup): Maybe we should arrange to proactively remove ourselves? Seems + // like the code will be awkward. + ownStream = nullptr; + }); + } + } + }).eagerlyEvaluate(nullptr); } }; @@ -3233,180 +3701,34 @@ kj::Promise> HttpClient::connect(kj::StringPtr host) } kj::Own newHttpClient( - HttpHeaderTable& responseHeaderTable, kj::AsyncIoStream& stream, + const HttpHeaderTable& responseHeaderTable, kj::AsyncIoStream& stream, HttpClientSettings settings) { return kj::heap(responseHeaderTable, kj::Own(&stream, kj::NullDisposer::instance), kj::mv(settings)); } -// ======================================================================================= - -namespace { - -class PromiseIoStream final: public kj::AsyncIoStream, private kj::TaskSet::ErrorHandler { - // An AsyncIoStream which waits for a promise to resolve then forwards all calls to the promised - // stream. - // - // TODO(cleanup): Make this more broadly available. - -public: - PromiseIoStream(kj::Promise> promise) - : promise(promise.then([this](kj::Own result) { - stream = kj::mv(result); - }).fork()), - tasks(*this) {} - - kj::Promise read(void* buffer, size_t minBytes, size_t maxBytes) override { - KJ_IF_MAYBE(s, stream) { - return s->get()->read(buffer, minBytes, maxBytes); - } else { - return promise.addBranch().then([this,buffer,minBytes,maxBytes]() { - return KJ_ASSERT_NONNULL(stream)->read(buffer, minBytes, maxBytes); - }); - } - } - kj::Promise tryRead(void* buffer, size_t minBytes, size_t maxBytes) override { - KJ_IF_MAYBE(s, stream) { - return s->get()->tryRead(buffer, minBytes, maxBytes); - } else { - return promise.addBranch().then([this,buffer,minBytes,maxBytes]() { - return KJ_ASSERT_NONNULL(stream)->tryRead(buffer, minBytes, maxBytes); - }); - } - } - - kj::Maybe tryGetLength() override { - KJ_IF_MAYBE(s, stream) { - return s->get()->tryGetLength(); - } else { - return nullptr; - } - } - - kj::Promise pumpTo(kj::AsyncOutputStream& output, uint64_t amount) override { - KJ_IF_MAYBE(s, stream) { - return s->get()->pumpTo(output, amount); - } else { - return promise.addBranch().then([this,&output,amount]() { - return KJ_ASSERT_NONNULL(stream)->pumpTo(output, amount); - }); - } - } - - kj::Promise write(const void* buffer, size_t size) override { - KJ_IF_MAYBE(s, stream) { - return s->get()->write(buffer, size); - } else { - return promise.addBranch().then([this,buffer,size]() { - return KJ_ASSERT_NONNULL(stream)->write(buffer, size); - }); - } - } - kj::Promise write(kj::ArrayPtr> pieces) override { - KJ_IF_MAYBE(s, stream) { - return s->get()->write(pieces); - } else { - return promise.addBranch().then([this,pieces]() { - return KJ_ASSERT_NONNULL(stream)->write(pieces); - }); - } - } - - kj::Maybe> tryPumpFrom( - kj::AsyncInputStream& input, uint64_t amount = kj::maxValue) override { - KJ_IF_MAYBE(s, stream) { - return s->get()->tryPumpFrom(input, amount); - } else { - return promise.addBranch().then([this,&input,amount]() { - // Call input.pumpTo() on the resolved stream instead. - return input.pumpTo(*KJ_ASSERT_NONNULL(stream), amount); - }); - } - } - - void shutdownWrite() override { - KJ_IF_MAYBE(s, stream) { - return s->get()->shutdownWrite(); - } else { - tasks.add(promise.addBranch().then([this]() { - return KJ_ASSERT_NONNULL(stream)->shutdownWrite(); - })); - } - } - - void abortRead() override { - KJ_IF_MAYBE(s, stream) { - return s->get()->abortRead(); - } else { - tasks.add(promise.addBranch().then([this]() { - return KJ_ASSERT_NONNULL(stream)->abortRead(); - })); - } - } - -public: - kj::ForkedPromise promise; - kj::Maybe> stream; - kj::TaskSet tasks; - - void taskFailed(kj::Exception&& exception) override { - KJ_LOG(ERROR, exception); - } -}; - -class PromiseOutputStream final: public kj::AsyncOutputStream { - // An AsyncOutputStream which waits for a promise to resolve then forwards all calls to the - // promised stream. - // - // TODO(cleanup): Make this more broadly available. - // TODO(cleanup): Can this share implementation with PromiseIoStream? Seems hard. - -public: - PromiseOutputStream(kj::Promise> promise) - : promise(promise.then([this](kj::Own result) { - stream = kj::mv(result); - }).fork()) {} +HttpClient::Response HttpClientErrorHandler::handleProtocolError( + HttpHeaders::ProtocolError protocolError) { + KJ_FAIL_REQUIRE(protocolError.description) { break; } + return HttpClient::Response(); +} - kj::Promise write(const void* buffer, size_t size) override { - KJ_IF_MAYBE(s, stream) { - return s->get()->write(buffer, size); - } else { - return promise.addBranch().then([this,buffer,size]() { - return KJ_ASSERT_NONNULL(stream)->write(buffer, size); - }); - } - } - kj::Promise write(kj::ArrayPtr> pieces) override { - KJ_IF_MAYBE(s, stream) { - return s->get()->write(pieces); - } else { - return promise.addBranch().then([this,pieces]() { - return KJ_ASSERT_NONNULL(stream)->write(pieces); - }); - } - } +HttpClient::WebSocketResponse HttpClientErrorHandler::handleWebSocketProtocolError( + HttpHeaders::ProtocolError protocolError) { + auto response = handleProtocolError(protocolError); + return HttpClient::WebSocketResponse { + response.statusCode, response.statusText, response.headers, kj::mv(response.body) + }; +} - kj::Maybe> tryPumpFrom( - kj::AsyncInputStream& input, uint64_t amount = kj::maxValue) override { - KJ_IF_MAYBE(s, stream) { - return s->get()->tryPumpFrom(input, amount); - } else { - return promise.addBranch().then([this,&input,amount]() { - // Call input.pumpTo() on the resolved stream instead. - return input.pumpTo(*KJ_ASSERT_NONNULL(stream), amount); - }); - } - } +// ======================================================================================= -public: - kj::ForkedPromise promise; - kj::Maybe> stream; -}; +namespace { class NetworkAddressHttpClient final: public HttpClient { public: - NetworkAddressHttpClient(kj::Timer& timer, HttpHeaderTable& responseHeaderTable, + NetworkAddressHttpClient(kj::Timer& timer, const HttpHeaderTable& responseHeaderTable, kj::Own address, HttpClientSettings settings) : timer(timer), responseHeaderTable(responseHeaderTable), @@ -3463,7 +3785,7 @@ public: private: kj::Timer& timer; - HttpHeaderTable& responseHeaderTable; + const HttpHeaderTable& responseHeaderTable; kj::Own address; HttpClientSettings settings; @@ -3501,7 +3823,7 @@ private: kj::Own getClient() { for (;;) { if (availableClients.empty()) { - auto stream = kj::heap(address->connect()); + auto stream = newPromisedStream(address->connect()); return kj::refcounted(*this, kj::heap(responseHeaderTable, kj::mv(stream), settings)); } else { @@ -3516,10 +3838,11 @@ private: } void returnClientToAvailable(kj::Own client) { - // Only return the connection to the pool if it is reusable. - if (client->canReuse()) { + // Only return the connection to the pool if it is reusable and if our settings indicate we + // should reuse connections. + if (client->canReuse() && settings.idleTimeout > 0 * kj::SECONDS) { availableClients.push_back(AvailableClient { - kj::mv(client), timer.now() + settings.idleTimout + kj::mv(client), timer.now() + settings.idleTimeout }); } @@ -3601,7 +3924,7 @@ public: auto split = combined.split(); return { - kj::heap(kj::mv(kj::get<0>(split))), + newPromisedStream(kj::mv(kj::get<0>(split))), kj::mv(kj::get<1>(split)) }; } @@ -3629,7 +3952,7 @@ private: class NetworkHttpClient final: public HttpClient, private kj::TaskSet::ErrorHandler { public: - NetworkHttpClient(kj::Timer& timer, HttpHeaderTable& responseHeaderTable, + NetworkHttpClient(kj::Timer& timer, const HttpHeaderTable& responseHeaderTable, kj::Network& network, kj::Maybe tlsNetwork, HttpClientSettings settings) : timer(timer), @@ -3641,7 +3964,13 @@ public: Request request(HttpMethod method, kj::StringPtr url, const HttpHeaders& headers, kj::Maybe expectedBodySize = nullptr) override { - auto parsed = Url::parse(url, Url::HTTP_PROXY_REQUEST); + // We need to parse the proxy-style URL to convert it to host-style. + // Use URL parsing options that avoid unnecessary rewrites. + Url::Options urlOptions; + urlOptions.allowEmpty = true; + urlOptions.percentDecode = false; + + auto parsed = Url::parse(url, Url::HTTP_PROXY_REQUEST, urlOptions); auto path = parsed.toString(Url::HTTP_REQUEST); auto headersCopy = headers.clone(); headersCopy.set(HttpHeaderId::HOST, parsed.host); @@ -3650,7 +3979,13 @@ public: kj::Promise openWebSocket( kj::StringPtr url, const HttpHeaders& headers) override { - auto parsed = Url::parse(url, Url::HTTP_PROXY_REQUEST); + // We need to parse the proxy-style URL to convert it to host-style. + // Use URL parsing options that avoid unnecessary rewrites. + Url::Options urlOptions; + urlOptions.allowEmpty = true; + urlOptions.percentDecode = false; + + auto parsed = Url::parse(url, Url::HTTP_PROXY_REQUEST, urlOptions); auto path = parsed.toString(Url::HTTP_REQUEST); auto headersCopy = headers.clone(); headersCopy.set(HttpHeaderId::HOST, parsed.host); @@ -3659,7 +3994,7 @@ public: private: kj::Timer& timer; - HttpHeaderTable& responseHeaderTable; + const HttpHeaderTable& responseHeaderTable; kj::Network& network; kj::Maybe tlsNetwork; HttpClientSettings settings; @@ -3749,13 +4084,13 @@ private: } // namespace -kj::Own newHttpClient(kj::Timer& timer, HttpHeaderTable& responseHeaderTable, +kj::Own newHttpClient(kj::Timer& timer, const HttpHeaderTable& responseHeaderTable, kj::NetworkAddress& addr, HttpClientSettings settings) { return kj::heap(timer, responseHeaderTable, kj::Own(&addr, kj::NullDisposer::instance), kj::mv(settings)); } -kj::Own newHttpClient(kj::Timer& timer, HttpHeaderTable& responseHeaderTable, +kj::Own newHttpClient(kj::Timer& timer, const HttpHeaderTable& responseHeaderTable, kj::Network& network, kj::Maybe tlsNetwork, HttpClientSettings settings) { return kj::heap( @@ -3766,6 +4101,171 @@ kj::Own newHttpClient(kj::Timer& timer, HttpHeaderTable& responseHea namespace { +class ConcurrencyLimitingHttpClient final: public HttpClient { +public: + ConcurrencyLimitingHttpClient( + kj::HttpClient& inner, uint maxConcurrentRequests, + kj::Function countChangedCallback) + : inner(inner), + maxConcurrentRequests(maxConcurrentRequests), + countChangedCallback(kj::mv(countChangedCallback)) {} + + Request request(HttpMethod method, kj::StringPtr url, const HttpHeaders& headers, + kj::Maybe expectedBodySize = nullptr) override { + if (concurrentRequests < maxConcurrentRequests) { + auto counter = ConnectionCounter(*this); + auto request = inner.request(method, url, headers, expectedBodySize); + fireCountChanged(); + auto promise = attachCounter(kj::mv(request.response), kj::mv(counter)); + return { kj::mv(request.body), kj::mv(promise) }; + } + + auto paf = kj::newPromiseAndFulfiller(); + auto urlCopy = kj::str(url); + auto headersCopy = headers.clone(); + + auto combined = paf.promise + .then([this, + method, + urlCopy = kj::mv(urlCopy), + headersCopy = kj::mv(headersCopy), + expectedBodySize](ConnectionCounter&& counter) mutable { + auto req = inner.request(method, urlCopy, headersCopy, expectedBodySize); + return kj::tuple(kj::mv(req.body), attachCounter(kj::mv(req.response), kj::mv(counter))); + }); + auto split = combined.split(); + pendingRequests.push(kj::mv(paf.fulfiller)); + fireCountChanged(); + return { newPromisedStream(kj::mv(kj::get<0>(split))), kj::mv(kj::get<1>(split)) }; + } + + kj::Promise openWebSocket( + kj::StringPtr url, const kj::HttpHeaders& headers) override { + if (concurrentRequests < maxConcurrentRequests) { + auto counter = ConnectionCounter(*this); + auto response = inner.openWebSocket(url, headers); + fireCountChanged(); + return attachCounter(kj::mv(response), kj::mv(counter)); + } + + auto paf = kj::newPromiseAndFulfiller(); + auto urlCopy = kj::str(url); + auto headersCopy = headers.clone(); + + auto promise = paf.promise + .then([this, + urlCopy = kj::mv(urlCopy), + headersCopy = kj::mv(headersCopy)](ConnectionCounter&& counter) mutable { + return attachCounter(inner.openWebSocket(urlCopy, headersCopy), kj::mv(counter)); + }); + + pendingRequests.push(kj::mv(paf.fulfiller)); + fireCountChanged(); + return kj::mv(promise); + } + +private: + struct ConnectionCounter; + + kj::HttpClient& inner; + uint maxConcurrentRequests; + uint concurrentRequests = 0; + kj::Function countChangedCallback; + + std::queue>> pendingRequests; + // TODO(someday): want maximum cap on queue size? + + struct ConnectionCounter final { + ConnectionCounter(ConcurrencyLimitingHttpClient& client) : parent(&client) { + ++parent->concurrentRequests; + } + KJ_DISALLOW_COPY(ConnectionCounter); + ~ConnectionCounter() noexcept(false) { + if (parent != nullptr) { + --parent->concurrentRequests; + parent->serviceQueue(); + parent->fireCountChanged(); + } + } + ConnectionCounter(ConnectionCounter&& other) : parent(other.parent) { + other.parent = nullptr; + } + ConnectionCounter& operator=(ConnectionCounter&& other) { + if (this != &other) { + this->parent = other.parent; + other.parent = nullptr; + } + return *this; + } + + ConcurrencyLimitingHttpClient* parent; + }; + + void serviceQueue() { + if (concurrentRequests >= maxConcurrentRequests) { return; } + if (pendingRequests.empty()) { return; } + + auto fulfiller = kj::mv(pendingRequests.front()); + pendingRequests.pop(); + fulfiller->fulfill(ConnectionCounter(*this)); + } + + void fireCountChanged() { + countChangedCallback(concurrentRequests, pendingRequests.size()); + } + + using WebSocketOrBody = kj::OneOf, kj::Own>; + static WebSocketOrBody attachCounter(WebSocketOrBody&& webSocketOrBody, + ConnectionCounter&& counter) { + KJ_SWITCH_ONEOF(webSocketOrBody) { + KJ_CASE_ONEOF(ws, kj::Own) { + return ws.attach(kj::mv(counter)); + } + KJ_CASE_ONEOF(body, kj::Own) { + return body.attach(kj::mv(counter)); + } + } + KJ_UNREACHABLE; + } + + static kj::Promise attachCounter(kj::Promise&& promise, + ConnectionCounter&& counter) { + return promise.then([counter = kj::mv(counter)](WebSocketResponse&& response) mutable { + return WebSocketResponse { + response.statusCode, + response.statusText, + response.headers, + attachCounter(kj::mv(response.webSocketOrBody), kj::mv(counter)) + }; + }); + } + + static kj::Promise attachCounter(kj::Promise&& promise, + ConnectionCounter&& counter) { + return promise.then([counter = kj::mv(counter)](Response&& response) mutable { + return Response { + response.statusCode, + response.statusText, + response.headers, + response.body.attach(kj::mv(counter)) + }; + }); + } +}; + +} + +kj::Own newConcurrencyLimitingHttpClient( + HttpClient& inner, uint maxConcurrentRequests, + kj::Function countChangedCallback) { + return kj::heap(inner, maxConcurrentRequests, + kj::mv(countChangedCallback)); +} + +// ======================================================================================= + +namespace { + class NullInputStream final: public kj::AsyncInputStream { public: NullInputStream(kj::Maybe expectedLength = size_t(0)) @@ -3795,6 +4295,9 @@ public: Promise write(ArrayPtr> pieces) override { return kj::READY_NOW; } + Promise whenWriteDisconnected() override { + return kj::NEVER_DONE; + } // We can't really optimize tryPumpFrom() unless AsyncInputStream grows a skip() method. }; @@ -3813,10 +4316,17 @@ public: auto pipe = newOneWayPipe(expectedBodySize); + // TODO(cleanup): The ownership relationships here are a mess. Can we do something better + // involving a PromiseAdapter, maybe? auto paf = kj::newPromiseAndFulfiller(); auto responder = kj::refcounted(method, kj::mv(paf.fulfiller)); - auto promise = service.request(method, urlCopy, *headersCopy, *pipe.in, *responder); - responder->setPromise(promise.attach(kj::mv(pipe.in), kj::mv(urlCopy), kj::mv(headersCopy))); + + auto requestPaf = kj::newPromiseAndFulfiller>(); + responder->setPromise(kj::mv(requestPaf.promise)); + + auto promise = service.request(method, urlCopy, *headersCopy, *pipe.in, *responder) + .attach(kj::mv(pipe.in), kj::mv(urlCopy), kj::mv(headersCopy)); + requestPaf.fulfiller->fulfill(kj::mv(promise)); return { kj::mv(pipe.out), @@ -3837,9 +4347,14 @@ public: auto paf = kj::newPromiseAndFulfiller(); auto responder = kj::refcounted(kj::mv(paf.fulfiller)); + + auto requestPaf = kj::newPromiseAndFulfiller>(); + responder->setPromise(kj::mv(requestPaf.promise)); + auto in = kj::heap(); - auto promise = service.request(HttpMethod::GET, urlCopy, *headersCopy, *in, *responder); - responder->setPromise(promise.attach(kj::mv(in), kj::mv(urlCopy), kj::mv(headersCopy))); + auto promise = service.request(HttpMethod::GET, urlCopy, *headersCopy, *in, *responder) + .attach(kj::mv(in), kj::mv(urlCopy), kj::mv(headersCopy)); + requestPaf.fulfiller->fulfill(kj::mv(promise)); return paf.promise.attach(kj::mv(responder)); } @@ -3851,6 +4366,68 @@ public: private: HttpService& service; + class DelayedEofInputStream final: public kj::AsyncInputStream { + // An AsyncInputStream wrapper that, when it reaches EOF, delays the final read until some + // promise completes. + + public: + DelayedEofInputStream(kj::Own inner, kj::Promise completionTask) + : inner(kj::mv(inner)), completionTask(kj::mv(completionTask)) {} + + kj::Promise tryRead(void* buffer, size_t minBytes, size_t maxBytes) override { + return wrap(minBytes, inner->tryRead(buffer, minBytes, maxBytes)); + } + + kj::Maybe tryGetLength() override { + return inner->tryGetLength(); + } + + kj::Promise pumpTo(kj::AsyncOutputStream& output, uint64_t amount) override { + return wrap(amount, inner->pumpTo(output, amount)); + } + + private: + kj::Own inner; + kj::Maybe> completionTask; + + template + kj::Promise wrap(T requested, kj::Promise innerPromise) { + return innerPromise.then([this,requested](T actual) -> kj::Promise { + if (actual < requested) { + // Must have reached EOF. + KJ_IF_MAYBE(t, completionTask) { + // Delay until completion. + auto result = t->then([actual]() { return actual; }); + completionTask = nullptr; + return result; + } else { + // Must have called tryRead() again after we already signaled EOF. Fine. + return actual; + } + } else { + return actual; + } + }, [this](kj::Exception&& e) -> kj::Promise { + // The stream threw an exception, but this exception is almost certainly just complaining + // that the other end of the stream was dropped. In all likelihood, the HttpService + // request() call itself will throw a much more interesting error -- we'd rather propagate + // that one, if so. + KJ_IF_MAYBE(t, completionTask) { + auto result = t->then([e = kj::mv(e)]() mutable -> kj::Promise { + // Looks like the service didn't throw. I guess we should propagate the stream error + // after all. + return kj::mv(e); + }); + completionTask = nullptr; + return result; + } else { + // Must have called tryRead() again after we already signaled EOF or threw. Fine. + return kj::mv(e); + } + }); + } + }; + class ResponseImpl final: public HttpService::Response, public kj::Refcounted { public: ResponseImpl(kj::HttpMethod method, @@ -3862,8 +4439,8 @@ private: if (fulfiller->isWaiting()) { fulfiller->reject(kj::mv(exception)); } else { - KJ_LOG(ERROR, "HttpService threw an exception after having already started responding", - exception); + // We need to cause the response stream's read() to throw this, so we should propagate it. + kj::throwRecoverableException(kj::mv(exception)); } }); } @@ -3877,18 +4454,31 @@ private: auto statusTextCopy = kj::str(statusText); auto headersCopy = kj::heap(headers.clone()); - if (method == kj::HttpMethod::HEAD) { - fulfiller->fulfill({ - statusCode, statusTextCopy, headersCopy.get(), - kj::heap(expectedBodySize) - .attach(kj::addRef(*this), kj::mv(statusTextCopy), kj::mv(headersCopy)) - }); + if (method == kj::HttpMethod::HEAD || expectedBodySize.orDefault(1) == 0) { + // We're not expecting any body. We need to delay reporting completion to the client until + // the server side has actually returned from the service method, otherwise we may + // prematurely cancel it. + + task = task.then([this,statusCode,statusTextCopy=kj::mv(statusTextCopy), + headersCopy=kj::mv(headersCopy),expectedBodySize]() mutable { + fulfiller->fulfill({ + statusCode, statusTextCopy, headersCopy.get(), + kj::heap(expectedBodySize) + .attach(kj::mv(statusTextCopy), kj::mv(headersCopy)) + }); + }).eagerlyEvaluate([](kj::Exception&& e) { KJ_LOG(ERROR, e); }); return kj::heap(); } else { auto pipe = newOneWayPipe(expectedBodySize); + + // Wrap the stream in a wrapper that delays the last read (the one that signals EOF) until + // the service's request promise has finished. + auto wrapper = kj::heap( + kj::mv(pipe.in), task.attach(kj::addRef(*this))); + fulfiller->fulfill({ statusCode, statusTextCopy, headersCopy.get(), - pipe.in.attach(kj::addRef(*this), kj::mv(statusTextCopy), kj::mv(headersCopy)) + wrapper.attach(kj::mv(statusTextCopy), kj::mv(headersCopy)) }); return kj::mv(pipe.out); } @@ -3904,6 +4494,91 @@ private: kj::Promise task = nullptr; }; + class DelayedCloseWebSocket final: public WebSocket { + // A WebSocket wrapper that, when it reaches Close (in both directions), delays the final close + // operation until some promise completes. + + public: + DelayedCloseWebSocket(kj::Own inner, kj::Promise completionTask) + : inner(kj::mv(inner)), completionTask(kj::mv(completionTask)) {} + + kj::Promise send(kj::ArrayPtr message) override { + return inner->send(message); + } + kj::Promise send(kj::ArrayPtr message) override { + return inner->send(message); + } + kj::Promise close(uint16_t code, kj::StringPtr reason) override { + return inner->close(code, reason) + .then([this]() { + return afterSendClosed(); + }); + } + kj::Promise disconnect() override { + return inner->disconnect(); + } + void abort() override { + // Don't need to worry about completion task in this case -- cancelling it is reasonable. + inner->abort(); + } + kj::Promise whenAborted() override { + return inner->whenAborted(); + } + kj::Promise receive(size_t maxSize) override { + return inner->receive(maxSize).then([this](Message&& message) -> kj::Promise { + if (message.is()) { + return afterReceiveClosed() + .then([message = kj::mv(message)]() mutable { return kj::mv(message); }); + } + return kj::mv(message); + }); + } + kj::Promise pumpTo(WebSocket& other) override { + return inner->pumpTo(other).then([this]() { + return afterReceiveClosed(); + }); + } + kj::Maybe> tryPumpFrom(WebSocket& other) override { + return other.pumpTo(*inner).then([this]() { + return afterSendClosed(); + }); + } + + uint64_t sentByteCount() override { return inner->sentByteCount(); } + uint64_t receivedByteCount() override { return inner->receivedByteCount(); } + + private: + kj::Own inner; + kj::Maybe> completionTask; + + bool sentClose = false; + bool receivedClose = false; + + kj::Promise afterSendClosed() { + sentClose = true; + if (receivedClose) { + KJ_IF_MAYBE(t, completionTask) { + auto result = kj::mv(*t); + completionTask = nullptr; + return result; + } + } + return kj::READY_NOW; + } + + kj::Promise afterReceiveClosed() { + receivedClose = true; + if (sentClose) { + KJ_IF_MAYBE(t, completionTask) { + auto result = kj::mv(*t); + completionTask = nullptr; + return result; + } + } + return kj::READY_NOW; + } + }; + class WebSocketResponseImpl final: public HttpService::Response, public kj::Refcounted { public: WebSocketResponseImpl(kj::Own> fulfiller) @@ -3914,8 +4589,9 @@ private: if (fulfiller->isWaiting()) { fulfiller->reject(kj::mv(exception)); } else { - KJ_LOG(ERROR, "HttpService threw an exception after having already started responding", - exception); + // We need to cause the client-side WebSocket to throw on close, so propagate the + // exception. + kj::throwRecoverableException(kj::mv(exception)); } }); } @@ -3929,12 +4605,34 @@ private: auto statusTextCopy = kj::str(statusText); auto headersCopy = kj::heap(headers.clone()); - auto pipe = newOneWayPipe(expectedBodySize); - fulfiller->fulfill({ - statusCode, statusTextCopy, headersCopy.get(), - pipe.in.attach(kj::addRef(*this), kj::mv(statusTextCopy), kj::mv(headersCopy)) - }); - return kj::mv(pipe.out); + if (expectedBodySize.orDefault(1) == 0) { + // We're not expecting any body. We need to delay reporting completion to the client until + // the server side has actually returned from the service method, otherwise we may + // prematurely cancel it. + + task = task.then([this,statusCode,statusTextCopy=kj::mv(statusTextCopy), + headersCopy=kj::mv(headersCopy),expectedBodySize]() mutable { + fulfiller->fulfill({ + statusCode, statusTextCopy, headersCopy.get(), + kj::Own(kj::heap(expectedBodySize) + .attach(kj::mv(statusTextCopy), kj::mv(headersCopy))) + }); + }).eagerlyEvaluate([](kj::Exception&& e) { KJ_LOG(ERROR, e); }); + return kj::heap(); + } else { + auto pipe = newOneWayPipe(expectedBodySize); + + // Wrap the stream in a wrapper that delays the last read (the one that signals EOF) until + // the service's request promise has finished. + kj::Own wrapper = + kj::heap(kj::mv(pipe.in), task.attach(kj::addRef(*this))); + + fulfiller->fulfill({ + statusCode, statusTextCopy, headersCopy.get(), + wrapper.attach(kj::mv(statusTextCopy), kj::mv(headersCopy)) + }); + return kj::mv(pipe.out); + } } kj::Own acceptWebSocket(const HttpHeaders& headers) override { @@ -3944,9 +4642,14 @@ private: auto headersCopy = kj::heap(headers.clone()); auto pipe = newWebSocketPipe(); + + // Wrap the client-side WebSocket in a wrapper that delays clean close of the WebSocket until + // the service's request promise has finished. + kj::Own wrapper = + kj::heap(kj::mv(pipe.ends[0]), task.attach(kj::addRef(*this))); fulfiller->fulfill({ 101, "Switching Protocols", headersCopy.get(), - pipe.ends[0].attach(kj::addRef(*this), kj::mv(headersCopy)) + wrapper.attach(kj::mv(headersCopy)) }); return kj::mv(pipe.ends[1]); } @@ -4047,7 +4750,8 @@ kj::Promise> HttpService::connect(kj::StringPtr host) KJ_UNIMPLEMENTED("CONNECT is not implemented by this HttpService"); } -class HttpServer::Connection final: private HttpService::Response { +class HttpServer::Connection final: private HttpService::Response, + private HttpServerErrorHandler { public: Connection(HttpServer& server, kj::AsyncIoStream& stream, HttpService& service) @@ -4066,6 +4770,37 @@ public: } } +public: + kj::Promise startLoop(bool firstRequest) { + return loop(firstRequest).catch_([this](kj::Exception&& e) -> kj::Promise { + // Exception; report 5xx. + + KJ_IF_MAYBE(p, webSocketError) { + // sendWebSocketError() was called. Finish sending and close the connection. Don't log + // the exception because it's probably a side-effect of this. + auto promise = kj::mv(*p); + webSocketError = nullptr; + return kj::mv(promise); + } + + return sendError(kj::mv(e)); + }); + } + +private: + HttpServer& server; + kj::AsyncIoStream& stream; + HttpService& service; + HttpInputStreamImpl httpInput; + HttpOutputStream httpOutput; + kj::Maybe currentMethod; + bool timedOut = false; + bool closed = false; + bool upgraded = false; + bool webSocketClosed = false; + bool closeAfterSend = false; // True if send() should set Connection: close. + kj::Maybe> webSocketError; + kj::Promise loop(bool firstRequest) { if (!firstRequest && server.draining && httpInput.isCleanDrain()) { // Don't call awaitNextMessage() in this case because that will initiate a read() which will @@ -4093,7 +4828,8 @@ public: } auto receivedHeaders = firstByte - .then([this,firstRequest](bool hasData)-> kj::Promise> { + .then([this,firstRequest](bool hasData) + -> kj::Promise { if (hasData) { auto readHeaders = httpInput.readRequestHeaders(); if (!firstRequest) { @@ -4101,9 +4837,12 @@ public: // the first byte of a pipeline response. readHeaders = readHeaders.exclusiveJoin( server.timer.afterDelay(server.settings.headerTimeout) - .then([this]() -> kj::Maybe { + .then([this]() -> HttpHeaders::RequestOrProtocolError { timedOut = true; - return nullptr; + return HttpHeaders::ProtocolError { + 408, "Request Timeout", + "Timed out waiting for next request headers.", nullptr + }; })); } return kj::mv(readHeaders); @@ -4111,7 +4850,11 @@ public: // Client closed connection or pipeline timed out with no bytes received. This is not an // error, so don't report one. this->closed = true; - return kj::Maybe(nullptr); + return HttpHeaders::RequestOrProtocolError(HttpHeaders::ProtocolError { + 408, "Request Timeout", + "Client closed connection or connection timeout " + "while waiting for request headers.", nullptr + }); } }); @@ -4119,15 +4862,19 @@ public: // On the first request, the header timeout starts ticking immediately upon request opening. auto timeoutPromise = server.timer.afterDelay(server.settings.headerTimeout) .exclusiveJoin(server.onDrain.addBranch()) - .then([this]() -> kj::Maybe { + .then([this]() -> HttpHeaders::RequestOrProtocolError { timedOut = true; - return nullptr; + return HttpHeaders::ProtocolError { + 408, "Request Timeout", + "Timed out waiting for initial request headers.", nullptr + }; }); receivedHeaders = receivedHeaders.exclusiveJoin(kj::mv(timeoutPromise)); } return receivedHeaders - .then([this](kj::Maybe&& request) -> kj::Promise { + .then([this](HttpHeaders::RequestOrProtocolError&& requestOrProtocolError) + -> kj::Promise { if (timedOut) { // Client took too long to send anything, so we're going to close the connection. In // theory, we should send back an HTTP 408 error -- it is designed exactly for this @@ -4153,173 +4900,125 @@ public: return httpOutput.flush().then([]() { return false; }); } - KJ_IF_MAYBE(req, request) { - auto& headers = httpInput.getHeaders(); - - currentMethod = req->method; - auto body = httpInput.getEntityBody( - HttpInputStream::REQUEST, req->method, 0, headers); - - // TODO(perf): If the client disconnects, should we cancel the response? Probably, to - // prevent permanent deadlock. It's slightly weird in that arguably the client should - // be able to shutdown the upstream but still wait on the downstream, but I believe many - // other HTTP servers do similar things. - - auto promise = service.request( - req->method, req->url, headers, *body, *this); - return promise.then(kj::mvCapture(body, - [this](kj::Own body) -> kj::Promise { - // Response done. Await next request. - - KJ_IF_MAYBE(p, webSocketError) { - // sendWebSocketError() was called. Finish sending and close the connection. - auto promise = kj::mv(*p); - webSocketError = nullptr; - return kj::mv(promise); - } - - if (upgraded) { - // We've upgraded to WebSocket, and by now we should have closed the WebSocket. - if (!webSocketClosed) { - // This is gonna segfault later so abort now instead. - KJ_LOG(FATAL, "Accepted WebSocket object must be destroyed before HttpService " - "request handler completes."); - abort(); + KJ_SWITCH_ONEOF(requestOrProtocolError) { + KJ_CASE_ONEOF(request, HttpHeaders::Request) { + auto& headers = httpInput.getHeaders(); + + currentMethod = request.method; + auto body = httpInput.getEntityBody( + HttpInputStreamImpl::REQUEST, request.method, 0, headers); + + // TODO(perf): If the client disconnects, should we cancel the response? Probably, to + // prevent permanent deadlock. It's slightly weird in that arguably the client should + // be able to shutdown the upstream but still wait on the downstream, but I believe many + // other HTTP servers do similar things. + + auto promise = service.request( + request.method, request.url, headers, *body, *this); + return promise.then([this, body = kj::mv(body)]() mutable -> kj::Promise { + // Response done. Await next request. + + KJ_IF_MAYBE(p, webSocketError) { + // sendWebSocketError() was called. Finish sending and close the connection. + auto promise = kj::mv(*p); + webSocketError = nullptr; + return kj::mv(promise); } - // Once we start a WebSocket there's no going back to HTTP. - return false; - } - - if (currentMethod != nullptr) { - return sendError(500, "Internal Server Error", kj::str( - "ERROR: The HttpService did not generate a response.")); - } - - if (httpOutput.isBroken()) { - // We started a response but didn't finish it. But HttpService returns success? Perhaps - // it decided that it doesn't want to finish this response. We'll have to disconnect - // here. If the response body is not complete (e.g. Content-Length not reached), the - // client should notice. We don't want to log an error because this condition might be - // intentional on the service's part. - return false; - } + if (upgraded) { + // We've upgraded to WebSocket, and by now we should have closed the WebSocket. + if (!webSocketClosed) { + // This is gonna segfault later so abort now instead. + KJ_LOG(FATAL, "Accepted WebSocket object must be destroyed before HttpService " + "request handler completes."); + abort(); + } + + // Once we start a WebSocket there's no going back to HTTP. + return false; + } - return httpOutput.flush().then(kj::mvCapture(body, - [this](kj::Own body) -> kj::Promise { - if (httpInput.canReuse()) { - // Things look clean. Go ahead and accept the next request. + if (currentMethod != nullptr) { + return sendError(); + } - // Note that we don't have to handle server.draining here because we'll take care of - // it the next time around the loop. - return loop(false); - } else { - // Apparently, the application did not read the request body. Maybe this is a bug, - // or maybe not: maybe the client tried to upload too much data and the application - // legitimately wants to cancel the upload without reading all it it. - // - // We have a problem, though: We did send a response, and we didn't send - // `Connection: close`, so the client may expect that it can send another request. - // Perhaps the client has even finished sending the previous request's body, in - // which case the moment it finishes receiving the response, it could be completely - // within its rights to start a new request. If we close the socket now, we might - // interrupt that new request. - // - // There's no way we can get out of this perfectly cleanly. HTTP just isn't good - // enough at connection management. The best we can do is give the client some grace - // period and then abort the connection. - - auto dummy = kj::heap(); - auto lengthGrace = body->pumpTo(*dummy, server.settings.canceledUploadGraceBytes) - .then([this](size_t amount) { - if (httpInput.canReuse()) { - // Success, we can continue. - return true; - } else { - // Still more data. Give up. - return false; - } - }); - lengthGrace = lengthGrace.attach(kj::mv(dummy), kj::mv(body)); - - auto timeGrace = server.timer.afterDelay(server.settings.canceledUploadGacePeriod) - .then([]() { return false; }); - - return lengthGrace.exclusiveJoin(kj::mv(timeGrace)) - .then([this](bool clean) -> kj::Promise { - if (clean) { - // We recovered. Continue loop. - return loop(false); - } else { - // Client still not done. Return broken. - return false; - } - }); + if (httpOutput.isBroken()) { + // We started a response but didn't finish it. But HttpService returns success? + // Perhaps it decided that it doesn't want to finish this response. We'll have to + // disconnect here. If the response body is not complete (e.g. Content-Length not + // reached), the client should notice. We don't want to log an error because this + // condition might be intentional on the service's part. + return false; } - })); - })); - } else { - // Bad request. - return sendError(400, "Bad Request", kj::str( - "ERROR: The headers sent by your client were not valid.")); - } - }).catch_([this](kj::Exception&& e) -> kj::Promise { - // Exception; report 500. - - if (currentMethod == nullptr) { - // Dang, already sent a partial response. Can't do anything else. - - KJ_IF_MAYBE(p, webSocketError) { - // sendWebSocketError() was called. Finish sending and close the connection. Don't log - // the exception because it's probably a side-effect of this. - auto promise = kj::mv(*p); - webSocketError = nullptr; - return kj::mv(promise); + return httpOutput.flush().then( + [this, body = kj::mv(body)]() mutable -> kj::Promise { + if (httpInput.canReuse()) { + // Things look clean. Go ahead and accept the next request. + + // Note that we don't have to handle server.draining here because we'll take care of + // it the next time around the loop. + return loop(false); + } else { + // Apparently, the application did not read the request body. Maybe this is a bug, + // or maybe not: maybe the client tried to upload too much data and the application + // legitimately wants to cancel the upload without reading all it it. + // + // We have a problem, though: We did send a response, and we didn't send + // `Connection: close`, so the client may expect that it can send another request. + // Perhaps the client has even finished sending the previous request's body, in + // which case the moment it finishes receiving the response, it could be completely + // within its rights to start a new request. If we close the socket now, we might + // interrupt that new request. + // + // There's no way we can get out of this perfectly cleanly. HTTP just isn't good + // enough at connection management. The best we can do is give the client some grace + // period and then abort the connection. + + auto dummy = kj::heap(); + auto lengthGrace = body->pumpTo(*dummy, server.settings.canceledUploadGraceBytes) + .then([this](size_t amount) { + if (httpInput.canReuse()) { + // Success, we can continue. + return true; + } else { + // Still more data. Give up. + return false; + } + }); + lengthGrace = lengthGrace.attach(kj::mv(dummy), kj::mv(body)); + + auto timeGrace = server.timer.afterDelay(server.settings.canceledUploadGracePeriod) + .then([]() { return false; }); + + return lengthGrace.exclusiveJoin(kj::mv(timeGrace)) + .then([this](bool clean) -> kj::Promise { + if (clean) { + // We recovered. Continue loop. + return loop(false); + } else { + // Client still not done. Return broken. + return false; + } + }); + } + }); + }); } + KJ_CASE_ONEOF(protocolError, HttpHeaders::ProtocolError) { + // Bad request. - // If it's a DISCONNECTED exception, it's probably that the client disconnected, which is - // not really worth logging. - if (e.getType() != kj::Exception::Type::DISCONNECTED) { - KJ_LOG(ERROR, "HttpService threw exception after generating a partial response", - "too late to report error to client", e); + // sendError() uses Response::send(), which requires that we have a currentMethod, but we + // never read one. GET seems like the correct choice here. + currentMethod = HttpMethod::GET; + return sendError(kj::mv(protocolError)); } - return false; } - if (e.getType() == kj::Exception::Type::OVERLOADED) { - return sendError(503, "Service Unavailable", kj::str( - "ERROR: The server is temporarily unable to handle your request. Details:\n\n", e)); - } else if (e.getType() == kj::Exception::Type::UNIMPLEMENTED) { - return sendError(501, "Not Implemented", kj::str( - "ERROR: The server does not implement this operation. Details:\n\n", e)); - } else if (e.getType() == kj::Exception::Type::DISCONNECTED) { - // How do we tell an HTTP client that there was a transient network error, and it should - // try again immediately? There's no HTTP status code for this (503 is meant for "try - // again later, not now"). Here's an idea: Don't send any response; just close the - // connection, so that it looks like the connection between the HTTP client and server - // was dropped. A good client should treat this exactly the way we want. - return false; - } else { - return sendError(500, "Internal Server Error", kj::str( - "ERROR: The server threw an exception. Details:\n\n", e)); - } + KJ_UNREACHABLE; }); } -private: - HttpServer& server; - kj::AsyncIoStream& stream; - HttpService& service; - HttpInputStream httpInput; - HttpOutputStream httpOutput; - kj::Maybe currentMethod; - bool timedOut = false; - bool closed = false; - bool upgraded = false; - bool webSocketClosed = false; - kj::Maybe> webSocketError; - kj::Own send( uint statusCode, kj::StringPtr statusText, const HttpHeaders& headers, kj::Maybe expectedBodySize) override { @@ -4329,11 +5028,42 @@ private: kj::StringPtr connectionHeaders[HttpHeaders::CONNECTION_HEADERS_COUNT]; kj::String lengthStr; - if (statusCode == 204 || statusCode == 205 || statusCode == 304) { + if (!closeAfterSend) { + // Check if application wants us to close connections. + KJ_IF_MAYBE(c, server.settings.callbacks) { + if (c->shouldClose()) { + closeAfterSend = true; + } + } + } + + // TODO(0.10): If `server.draining`, we should probably set `closeAfterSend` -- UNLESS the + // connection was created using listenHttpCleanDrain(), in which case the application may + // intend to continue using the connection. + + if (closeAfterSend) { + connectionHeaders[HttpHeaders::BuiltinIndices::CONNECTION] = "close"; + } + + if (statusCode == 204 || statusCode == 304) { // No entity-body. + } else if (statusCode == 205) { + // Status code 205 also has no body, but unlike 204 and 304, it must explicitly encode an + // empty body, e.g. using content-length: 0. I'm guessing this is one of those things, where + // some early clients expected an explicit body while others assumed an empty body, and so + // the standard had to choose the common denominator. + // + // Spec: https://tools.ietf.org/html/rfc7231#section-6.3.6 + connectionHeaders[HttpHeaders::BuiltinIndices::CONTENT_LENGTH] = "0"; } else KJ_IF_MAYBE(s, expectedBodySize) { - lengthStr = kj::str(*s); - connectionHeaders[HttpHeaders::BuiltinIndices::CONTENT_LENGTH] = lengthStr; + // HACK: We interpret a zero-length expected body length on responses to HEAD requests to mean + // "don't set a Content-Length header at all." This provides a way to omit a body header on + // HEAD responses with non-null-body status codes. This is a hack that *only* makes sense + // for HEAD responses. + if (method != HttpMethod::HEAD || *s > 0) { + lengthStr = kj::str(*s); + connectionHeaders[HttpHeaders::BuiltinIndices::CONTENT_LENGTH] = lengthStr; + } } else { connectionHeaders[HttpHeaders::BuiltinIndices::TRANSFER_ENCODING] = "chunked"; } @@ -4374,24 +5104,23 @@ private: "can't call acceptWebSocket() if the request headers didn't have Upgrade: WebSocket"); auto method = KJ_REQUIRE_NONNULL(currentMethod, "already called send()"); - currentMethod = nullptr; + // Unlike send(), we neither need nor want to null out currentMethod. The error cases below + // depend on it being non-null to allow error responses to be sent, and the happy path expects + // it to be GET. if (method != HttpMethod::GET) { - return sendWebSocketError(400, "Bad Request", kj::str( - "ERROR: WebSocket must be initiated with a GET request.")); + return sendWebSocketError("WebSocket must be initiated with a GET request."); } if (requestHeaders.get(HttpHeaderId::SEC_WEBSOCKET_VERSION).orDefault(nullptr) != "13") { - return sendWebSocketError(400, "Bad Request", kj::str( - "ERROR: The requested WebSocket version is not supported.")); + return sendWebSocketError("The requested WebSocket version is not supported."); } kj::String key; KJ_IF_MAYBE(k, requestHeaders.get(HttpHeaderId::SEC_WEBSOCKET_KEY)) { - currentMethod = HttpMethod::GET; key = kj::str(*k); } else { - return sendWebSocketError(400, "Bad Request", kj::str("ERROR: Missing Sec-WebSocket-Key")); + return sendWebSocketError("Missing Sec-WebSocket-Key"); } auto websocketAccept = generateWebSocketAccept(key); @@ -4415,24 +5144,44 @@ private: httpInput, httpOutput, nullptr); } - kj::Promise sendError(uint statusCode, kj::StringPtr statusText, kj::String body) { - HttpHeaders failed(server.requestHeaderTable); - failed.set(HttpHeaderId::CONNECTION, "close"); - failed.set(HttpHeaderId::CONTENT_LENGTH, kj::str(body.size())); + kj::Promise sendError(HttpHeaders::ProtocolError protocolError) { + closeAfterSend = true; - failed.set(HttpHeaderId::CONTENT_TYPE, "text/plain"); + // Client protocol errors always happen on request headers parsing, before we call into the + // HttpService, meaning no response has been sent and we can provide a Response object. + auto promise = server.settings.errorHandler.orDefault(*this).handleClientProtocolError( + kj::mv(protocolError), *this); - httpOutput.writeHeaders(failed.serializeResponse(statusCode, statusText)); - httpOutput.writeBodyData(kj::mv(body)); - httpOutput.finishBody(); - return httpOutput.flush().then([]() { return false; }); // loop ends after flush + return promise.then([this]() { return httpOutput.flush(); }) + .then([]() { return false; }); // loop ends after flush + } + + kj::Promise sendError(kj::Exception&& exception) { + closeAfterSend = true; + + // We only provide the Response object if we know we haven't already sent a response. + auto promise = server.settings.errorHandler.orDefault(*this).handleApplicationError( + kj::mv(exception), currentMethod.map([this](auto&&) -> Response& { return *this; })); + + return promise.then([this]() { return httpOutput.flush(); }) + .then([]() { return false; }); // loop ends after flush + } + + kj::Promise sendError() { + closeAfterSend = true; + + // We can provide a Response object, since none has already been sent. + auto promise = server.settings.errorHandler.orDefault(*this).handleNoResponse(*this); + + return promise.then([this]() { return httpOutput.flush(); }) + .then([]() { return false; }); // loop ends after flush } - kj::Own sendWebSocketError( - uint statusCode, kj::StringPtr statusText, kj::String errorMessage) { + kj::Own sendWebSocketError(StringPtr errorMessage) { kj::Exception exception = KJ_EXCEPTION(FAILED, "received bad WebSocket handshake", errorMessage); - webSocketError = sendError(statusCode, statusText, kj::mv(errorMessage)); + webSocketError = sendError( + HttpHeaders::ProtocolError { 400, "Bad Request", errorMessage, nullptr }); kj::throwRecoverableException(kj::mv(exception)); // Fallback path when exceptions are disabled. @@ -4452,9 +5201,18 @@ private: kj::Promise disconnect() override { return kj::cp(exception); } - kj::Promise receive() override { + void abort() override { + kj::throwRecoverableException(kj::cp(exception)); + } + kj::Promise whenAborted() override { return kj::cp(exception); } + kj::Promise receive(size_t maxSize) override { + return kj::cp(exception); + } + + uint64_t sentByteCount() override { KJ_FAIL_ASSERT("received bad WebSocket handshake"); } + uint64_t receivedByteCount() override { KJ_FAIL_ASSERT("received bad WebSocket handshake"); } private: kj::Exception exception; @@ -4465,17 +5223,17 @@ private: } }; -HttpServer::HttpServer(kj::Timer& timer, HttpHeaderTable& requestHeaderTable, HttpService& service, - Settings settings) +HttpServer::HttpServer(kj::Timer& timer, const HttpHeaderTable& requestHeaderTable, + HttpService& service, Settings settings) : HttpServer(timer, requestHeaderTable, &service, settings, kj::newPromiseAndFulfiller()) {} -HttpServer::HttpServer(kj::Timer& timer, HttpHeaderTable& requestHeaderTable, +HttpServer::HttpServer(kj::Timer& timer, const HttpHeaderTable& requestHeaderTable, HttpServiceFactory serviceFactory, Settings settings) : HttpServer(timer, requestHeaderTable, kj::mv(serviceFactory), settings, kj::newPromiseAndFulfiller()) {} -HttpServer::HttpServer(kj::Timer& timer, HttpHeaderTable& requestHeaderTable, +HttpServer::HttpServer(kj::Timer& timer, const HttpHeaderTable& requestHeaderTable, kj::OneOf service, Settings settings, kj::PromiseFulfillerPair paf) : timer(timer), requestHeaderTable(requestHeaderTable), service(kj::mv(service)), @@ -4536,7 +5294,10 @@ kj::Promise HttpServer::listenHttpCleanDrain(kj::AsyncIoStream& connection } } - auto promise = obj->loop(true); + // Start reading requests and responding to them, but immediately cancel processing if the client + // disconnects. + auto promise = obj->startLoop(true) + .exclusiveJoin(connection.whenWriteDisconnected().then([]() {return false;})); // Eagerly evaluate so that we drop the connection when the promise resolves, even if the caller // doesn't eagerly evaluate. @@ -4547,4 +5308,77 @@ void HttpServer::taskFailed(kj::Exception&& exception) { KJ_LOG(ERROR, "unhandled exception in HTTP server", exception); } +kj::Promise HttpServerErrorHandler::handleClientProtocolError( + HttpHeaders::ProtocolError protocolError, kj::HttpService::Response& response) { + // Default error handler implementation. + + HttpHeaderTable headerTable {}; + HttpHeaders headers(headerTable); + headers.set(HttpHeaderId::CONTENT_TYPE, "text/plain"); + + auto errorMessage = kj::str("ERROR: ", protocolError.description); + auto body = response.send(protocolError.statusCode, protocolError.statusMessage, + headers, errorMessage.size()); + + return body->write(errorMessage.begin(), errorMessage.size()) + .attach(kj::mv(errorMessage), kj::mv(body)); +} + +kj::Promise HttpServerErrorHandler::handleApplicationError( + kj::Exception exception, kj::Maybe response) { + // Default error handler implementation. + + if (exception.getType() == kj::Exception::Type::DISCONNECTED) { + // How do we tell an HTTP client that there was a transient network error, and it should + // try again immediately? There's no HTTP status code for this (503 is meant for "try + // again later, not now"). Here's an idea: Don't send any response; just close the + // connection, so that it looks like the connection between the HTTP client and server + // was dropped. A good client should treat this exactly the way we want. + // + // We also bail here to avoid logging the disconnection, which isn't very interesting. + return kj::READY_NOW; + } + + KJ_IF_MAYBE(r, response) { + HttpHeaderTable headerTable {}; + HttpHeaders headers(headerTable); + headers.set(HttpHeaderId::CONTENT_TYPE, "text/plain"); + + kj::String errorMessage; + kj::Own body; + + if (exception.getType() == kj::Exception::Type::OVERLOADED) { + errorMessage = kj::str( + "ERROR: The server is temporarily unable to handle your request. Details:\n\n", exception); + body = r->send(503, "Service Unavailable", headers, errorMessage.size()); + } else if (exception.getType() == kj::Exception::Type::UNIMPLEMENTED) { + errorMessage = kj::str( + "ERROR: The server does not implement this operation. Details:\n\n", exception); + body = r->send(501, "Not Implemented", headers, errorMessage.size()); + } else { + errorMessage = kj::str( + "ERROR: The server threw an exception. Details:\n\n", exception); + body = r->send(500, "Internal Server Error", headers, errorMessage.size()); + } + + return body->write(errorMessage.begin(), errorMessage.size()) + .attach(kj::mv(errorMessage), kj::mv(body)); + } + + KJ_LOG(ERROR, "HttpService threw exception after generating a partial response", + "too late to report error to client", exception); + return kj::READY_NOW; +} + +kj::Promise HttpServerErrorHandler::handleNoResponse(kj::HttpService::Response& response) { + HttpHeaderTable headerTable {}; + HttpHeaders headers(headerTable); + headers.set(HttpHeaderId::CONTENT_TYPE, "text/plain"); + + constexpr auto errorMessage = "ERROR: The HttpService did not generate a response."_kj; + auto body = response.send(500, "Internal Server Error", headers, errorMessage.size()); + + return body->write(errorMessage.begin(), errorMessage.size()).attach(kj::mv(body)); +} + } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/http.h b/libs/EXTERNAL/capnproto/c++/src/kj/compat/http.h index e173874435d..c65a1bb162b 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/http.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/http.h @@ -54,7 +54,7 @@ namespace kj { MACRO(TRACE) \ /* standard methods */ \ /* */ \ - /* (CONNECT is intentionally omitted since it is handled specially in HttpHandler) */ \ + /* (CONNECT is intentionally omitted since it should be handled specially in HttpServer) */ \ \ MACRO(COPY) \ MACRO(LOCK) \ @@ -117,6 +117,8 @@ class HttpHeaderId { inline bool operator>=(const HttpHeaderId& other) const { return id >= other.id; } inline size_t hashCode() const { return id; } + // Returned value is guaranteed to be small and never collide with other headers on the same + // table. kj::StringPtr toString() const; @@ -247,10 +249,24 @@ class HttpHeaders { public: explicit HttpHeaders(const HttpHeaderTable& table); + static bool isValidHeaderValue(kj::StringPtr value); + // This returns whether the value is a valid parameter to the set call. While the HTTP spec + // suggests that only printable ASCII characters are allowed in header values, in practice that + // turns out to not be the case. We follow the browser's lead in disallowing \r and \n. + // https://github.com/httpwg/http11bis/issues/19 + // Use this if you want to validate the value before supplying it to set() if you want to avoid + // an exception being thrown (e.g. you have custom error reporting). NOTE that set will still + // validate the value. If performance is a problem this API needs to be adjusted to a + // `validateHeaderValue` function that returns a special type that set can be confident has + // already passed through the validation routine. + KJ_DISALLOW_COPY(HttpHeaders); HttpHeaders(HttpHeaders&&) = default; HttpHeaders& operator=(HttpHeaders&&) = default; + size_t size() const; + // Returns the number of headers that forEach() would iterate over. + void clear(); // Clears all contents, as if the object was freshly-allocated. However, calling this rather // than actually re-allocating the object may avoid re-allocation of internal objects. @@ -277,6 +293,12 @@ class HttpHeaders { // Calls `func(name, value)` for each header in the set -- including headers that aren't mapped // to IDs in the header table. Both inputs are of type kj::StringPtr. + template + void forEach(Func1&& func1, Func2&& func2) const; + // Calls `func1(id, value)` for each header in the set that has a registered HttpHeaderId, and + // `func2(name, value)` for each header that does not. All calls to func1() precede all calls to + // func2(). + void set(HttpHeaderId id, kj::StringPtr value); void set(HttpHeaderId id, kj::String&& value); // Sets a header value, overwriting the existing value. @@ -321,8 +343,42 @@ class HttpHeaders { kj::StringPtr statusText; }; - kj::Maybe tryParseRequest(kj::ArrayPtr content); - kj::Maybe tryParseResponse(kj::ArrayPtr content); + struct ProtocolError { + // Represents a protocol error, such as a bad request method or invalid headers. Debugging such + // errors is difficult without a copy of the data which we tried to parse, but this data is + // sensitive, so we can't just lump it into the error description directly. ProtocolError + // provides this sensitive data separate from the error description. + // + // TODO(cleanup): Should maybe not live in HttpHeaders? HttpServerErrorHandler::ProtocolError? + // Or HttpProtocolError? Or maybe we need a more general way of attaching sensitive context to + // kj::Exceptions? + + uint statusCode; + // Suggested HTTP status code that should be used when returning an error to the client. + // + // Most errors are 400. An unrecognized method will be 501. + + kj::StringPtr statusMessage; + // HTTP status message to go with `statusCode`, e.g. "Bad Request". + + kj::StringPtr description; + // An error description safe for all the world to see. + + kj::ArrayPtr rawContent; + // Unredacted data which led to the error condition. This may contain anything transported over + // HTTP, to include sensitive PII, so you must take care to sanitize this before using it in any + // error report that may leak to unprivileged eyes. + // + // This ArrayPtr is merely a copy of the `content` parameter passed to `tryParseRequest()` / + // `tryParseResponse()`, thus it remains valid for as long as a successfully-parsed HttpHeaders + // object would remain valid. + }; + + using RequestOrProtocolError = kj::OneOf; + using ResponseOrProtocolError = kj::OneOf; + + RequestOrProtocolError tryParseRequest(kj::ArrayPtr content); + ResponseOrProtocolError tryParseResponse(kj::ArrayPtr content); // Parse an HTTP header blob and add all the headers to this object. // // `content` should be all text from the start of the request to the first occurrance of two @@ -332,6 +388,9 @@ class HttpHeaders { // to split it into a bunch of shorter strings. The caller must keep `content` valid until the // `HttpHeaders` is destroyed, or pass it to `takeOwnership()`. + bool tryParse(kj::ArrayPtr content); + // Like tryParseRequest()/tryParseResponse(), but don't expect any request/response line. + kj::String serializeRequest(HttpMethod method, kj::StringPtr url, kj::ArrayPtr connectionHeaders = nullptr) const; kj::String serializeResponse(uint statusCode, kj::StringPtr statusText, @@ -396,6 +455,58 @@ class HttpHeaders { // also add direct accessors for those headers. }; +class HttpInputStream { + // Low-level interface to receive HTTP-formatted messages (headers followed by body) from an + // input stream, without a paired output stream. + // + // Most applications will not use this. Regular HTTP clients and servers don't need this. This + // is mainly useful for apps implementing various protocols that look like HTTP but aren't + // really. + +public: + struct Request { + HttpMethod method; + kj::StringPtr url; + const HttpHeaders& headers; + kj::Own body; + }; + virtual kj::Promise readRequest() = 0; + // Reads one HTTP request from the input stream. + // + // The returned struct contains pointers directly into a buffer that is invalidated on the next + // message read. + + struct Response { + uint statusCode; + kj::StringPtr statusText; + const HttpHeaders& headers; + kj::Own body; + }; + virtual kj::Promise readResponse(HttpMethod requestMethod) = 0; + // Reads one HTTP response from the input stream. + // + // You must provide the request method because responses to HEAD requests require special + // treatment. + // + // The returned struct contains pointers directly into a buffer that is invalidated on the next + // message read. + + struct Message { + const HttpHeaders& headers; + kj::Own body; + }; + virtual kj::Promise readMessage() = 0; + // Reads an HTTP header set followed by a body, with no request or response line. This is not + // useful for HTTP but may be useful for other protocols that make the unfortunate choice to + // mimic HTTP message format, such as Visual Studio Code's JSON-RPC transport. + // + // The returned struct contains pointers directly into a buffer that is invalidated on the next + // message read. + + virtual kj::Promise awaitNextMessage() = 0; + // Waits until more data is available, but doesn't consume it. Returns false on EOF. +}; + class EntropySource { // Interface for an object that generates entropy. Typically, cryptographically-random entropy // is expected. @@ -412,7 +523,7 @@ class WebSocket { // Each side can send and receive data and "close" messages. // // Ping/Pong and message fragmentation are not exposed through this interface. These features of - // the underlying WebSocket protocol are not exposed by the browser-level Javascript API either, + // the underlying WebSocket protocol are not exposed by the browser-level JavaScript API either, // and thus applications typically need to implement these features at the application protocol // level instead. The implementation is, however, expected to reply to Ping messages it receives. @@ -434,6 +545,17 @@ class WebSocket { // shutdown, but is sometimes useful when you want the other end to trigger whatever behavior // it normally triggers when a connection is dropped. + virtual void abort() = 0; + // Forcefully close this WebSocket, such that the remote end should get a DISCONNECTED error if + // it continues to write. This differs from disconnect(), which only closes the sending + // direction, but still allows receives. + + virtual kj::Promise whenAborted() = 0; + // Resolves when the remote side aborts the connection such that send() would throw DISCONNECTED, + // if this can be detected without actually writing a message. (If not, this promise never + // resolves, but send() or receive() will throw DISCONNECTED when appropriate. See also + // kj::AsyncOutputStream::whenWriteDisconnected().) + struct Close { uint16_t code; kj::String reason; @@ -441,7 +563,9 @@ class WebSocket { typedef kj::OneOf, Close> Message; - virtual kj::Promise receive() = 0; + static constexpr size_t SUGGESTED_MAX_MESSAGE_SIZE = 1u << 20; // 1MB + + virtual kj::Promise receive(size_t maxSize = SUGGESTED_MAX_MESSAGE_SIZE) = 0; // Read one message from the WebSocket and return it. Can only call once at a time. Do not call // again after Close is received. @@ -459,6 +583,9 @@ class WebSocket { // if this WebSocket implementation is able to perform the pump in an optimized way, better than // the default implementation of pumpTo(). The default implementation of pumpTo() always tries // calling this first, and the default implementation of tryPumpFrom() always returns null. + + virtual uint64_t sentByteCount() = 0; + virtual uint64_t receivedByteCount() = 0; }; class HttpClient { @@ -574,16 +701,41 @@ class HttpService { // // `url` and `headers` are invalidated on the first read from `requestBody` or when the returned // promise resolves, whichever comes first. + // + // Request processing can be canceled by dropping the returned promise. HttpServer may do so if + // the client disconnects prematurely. virtual kj::Promise> connect(kj::StringPtr host); // Handles CONNECT requests. Only relevant for proxy services. Default implementation throws // UNIMPLEMENTED. }; +class HttpClientErrorHandler { +public: + virtual HttpClient::Response handleProtocolError(HttpHeaders::ProtocolError protocolError); + // Override this function to customize error handling when the client receives an HTTP message + // that fails to parse. The default implementations throws an exception. + // + // There are two main use cases for overriding this: + // 1. `protocolError` contains the actual header content that failed to parse, giving you the + // opportunity to log it for debugging purposes. The default implementation throws away this + // content. + // 2. You could potentially convert protocol errors into HTTP error codes, e.g. 502 Bad Gateway. + // + // Note that `protocolError` may contain pointers into buffers that are no longer valid once + // this method returns; you will have to make copies if you want to keep them. + + virtual HttpClient::WebSocketResponse handleWebSocketProtocolError( + HttpHeaders::ProtocolError protocolError); + // Like handleProtocolError() but for WebSocket requests. The default implementation calls + // handleProtocolError() and converts the Response to WebSocketResponse. There is probably very + // little reason to override this. +}; + struct HttpClientSettings { - kj::Duration idleTimout = 5 * kj::SECONDS; + kj::Duration idleTimeout = 5 * kj::SECONDS; // For clients which automatically create new connections, any connection idle for at least this - // long will be closed. + // long will be closed. Set this to 0 to prevent connection reuse entirely. kj::Maybe entropySource = nullptr; // Must be provided in order to use `openWebSocket`. If you don't need WebSockets, this can be @@ -593,9 +745,13 @@ struct HttpClientSettings { // or vulnerable proxies between you and the server, you can provide a dummy entropy source that // doesn't generate real entropy (e.g. returning the same value every time). Otherwise, you must // provide a cryptographically-random entropy source. + + kj::Maybe errorHandler = nullptr; + // Customize how protocol errors are handled by the HttpClient. If null, HttpClientErrorHandler's + // default implementation will be used. }; -kj::Own newHttpClient(kj::Timer& timer, HttpHeaderTable& responseHeaderTable, +kj::Own newHttpClient(kj::Timer& timer, const HttpHeaderTable& responseHeaderTable, kj::Network& network, kj::Maybe tlsNetwork, HttpClientSettings settings = HttpClientSettings()); // Creates a proxy HttpClient that connects to hosts over the given network. The URL must always @@ -611,7 +767,7 @@ kj::Own newHttpClient(kj::Timer& timer, HttpHeaderTable& responseHea // `tlsNetwork` is required to support HTTPS destination URLs. If null, only HTTP URLs can be // fetched. -kj::Own newHttpClient(kj::Timer& timer, HttpHeaderTable& responseHeaderTable, +kj::Own newHttpClient(kj::Timer& timer, const HttpHeaderTable& responseHeaderTable, kj::NetworkAddress& addr, HttpClientSettings settings = HttpClientSettings()); // Creates an HttpClient that always connects to the given address no matter what URL is requested. @@ -625,7 +781,8 @@ kj::Own newHttpClient(kj::Timer& timer, HttpHeaderTable& responseHea // // `responseHeaderTable` is used when parsing HTTP responses. Requests can use any header table. -kj::Own newHttpClient(HttpHeaderTable& responseHeaderTable, kj::AsyncIoStream& stream, +kj::Own newHttpClient(const HttpHeaderTable& responseHeaderTable, + kj::AsyncIoStream& stream, HttpClientSettings settings = HttpClientSettings()); // Creates an HttpClient that speaks over the given pre-established connection. The client may // be used as a proxy client or a host client depending on whether the peer is operating as @@ -637,10 +794,28 @@ kj::Own newHttpClient(HttpHeaderTable& responseHeaderTable, kj::Asyn // subsequent requests will fail. If a response takes a long time, it blocks subsequent responses. // If a WebSocket is opened successfully, all subsequent requests fail. +kj::Own newConcurrencyLimitingHttpClient( + HttpClient& inner, uint maxConcurrentRequests, + kj::Function countChangedCallback); +// Creates an HttpClient that is limited to a maximum number of concurrent requests. Additional +// requests are queued, to be opened only after an open request completes. `countChangedCallback` +// is called when a new connection is opened or enqueued and when an open connection is closed, +// passing the number of open and pending connections. + kj::Own newHttpClient(HttpService& service); kj::Own newHttpService(HttpClient& client); // Adapts an HttpClient to an HttpService and vice versa. +kj::Own newHttpInputStream( + kj::AsyncInputStream& input, const HttpHeaderTable& headerTable); +// Create an HttpInputStream on top of the given stream. Normally applications would not call this +// directly, but it can be useful for implementing protocols that aren't quite HTTP but use similar +// message delimiting. +// +// The HttpInputStream implementation does read-ahead buffering on `input`. Therefore, when the +// HttpInputStream is destroyed, some data read from `input` may be lost, so it's not possible to +// continue reading from `input` in a reliable way. + kj::Own newWebSocket(kj::Own stream, kj::Maybe maskEntropySource); // Create a new WebSocket on top of the given stream. It is assumed that the HTTP -> WebSocket @@ -664,6 +839,9 @@ WebSocketPipe newWebSocketPipe(); // end. No buffering occurs -- a message send does not complete until a corresponding receive // accepts the message. +class HttpServerErrorHandler; +class HttpServerCallbacks; + struct HttpServerSettings { kj::Duration headerTimeout = 15 * kj::SECONDS; // After initial connection open, or after receiving the first byte of a pipelined request, @@ -673,13 +851,65 @@ struct HttpServerSettings { // After one request/response completes, we'll wait up to this long for a pipelined request to // arrive. - kj::Duration canceledUploadGacePeriod = 1 * kj::SECONDS; + kj::Duration canceledUploadGracePeriod = 1 * kj::SECONDS; size_t canceledUploadGraceBytes = 65536; // If the HttpService sends a response and returns without having read the entire request body, // then we have to decide whether to close the connection or wait for the client to finish the // request so that it can pipeline the next one. We'll give them a grace period defined by the // above two values -- if they hit either one, we'll close the socket, but if the request // completes, we'll let the connection stay open to handle more requests. + + kj::Maybe errorHandler = nullptr; + // Customize how client protocol errors and service application exceptions are handled by the + // HttpServer. If null, HttpServerErrorHandler's default implementation will be used. + + kj::Maybe callbacks = nullptr; + // Additional optional callbacks used to control some server behavior. +}; + +class HttpServerErrorHandler { +public: + virtual kj::Promise handleClientProtocolError( + HttpHeaders::ProtocolError protocolError, kj::HttpService::Response& response); + virtual kj::Promise handleApplicationError( + kj::Exception exception, kj::Maybe response); + virtual kj::Promise handleNoResponse(kj::HttpService::Response& response); + // Override these functions to customize error handling during the request/response cycle. + // + // Client protocol errors arise when the server receives an HTTP message that fails to parse. As + // such, HttpService::request() will not have been called yet, and the handler is always + // guaranteed an opportunity to send a response. The default implementation of + // handleClientProtocolError() replies with a 400 Bad Request response. + // + // Application errors arise when HttpService::request() throws an exception. The default + // implementation of handleApplicationError() maps the following exception types to HTTP statuses, + // and generates bodies from the stringified exceptions: + // + // - OVERLOADED: 503 Service Unavailable + // - UNIMPLEMENTED: 501 Not Implemented + // - DISCONNECTED: (no response) + // - FAILED: 500 Internal Server Error + // + // No-response errors occur when HttpService::request() allows its promise to settle before + // sending a response. The default implementation of handleNoResponse() replies with a 500 + // Internal Server Error response. + // + // Unlike `HttpService::request()`, when calling `response.send()` in the context of one of these + // functions, a "Connection: close" header will be added, and the connection will be closed. + // + // Also unlike `HttpService::request()`, it is okay to return kj::READY_NOW without calling + // `response.send()`. In this case, no response will be sent, and the connection will be closed. +}; + +class HttpServerCallbacks { +public: + virtual bool shouldClose() { return false; } + // Whenever the HttpServer begins response headers, it will check `shouldClose()` to decide + // whether to send a `Connection: close` header and close the connection. + // + // This can be useful e.g. if the server has too many connections open and wants to shed some + // of them. Note that to implement graceful shutdown of a server, you should use + // `HttpServer::drain()` instead. }; class HttpServer final: private kj::TaskSet::ErrorHandler { @@ -689,13 +919,13 @@ class HttpServer final: private kj::TaskSet::ErrorHandler { typedef HttpServerSettings Settings; typedef kj::Function(kj::AsyncIoStream&)> HttpServiceFactory; - HttpServer(kj::Timer& timer, HttpHeaderTable& requestHeaderTable, HttpService& service, + HttpServer(kj::Timer& timer, const HttpHeaderTable& requestHeaderTable, HttpService& service, Settings settings = Settings()); // Set up an HttpServer that directs incoming connections to the given service. The service // may be a host service or a proxy service depending on whether you are intending to implement // an HTTP server or an HTTP proxy. - HttpServer(kj::Timer& timer, HttpHeaderTable& requestHeaderTable, + HttpServer(kj::Timer& timer, const HttpHeaderTable& requestHeaderTable, HttpServiceFactory serviceFactory, Settings settings = Settings()); // Like the other constructor, but allows a new HttpService object to be used for each // connection, based on the connection object. This is particularly useful for capturing the @@ -732,7 +962,7 @@ class HttpServer final: private kj::TaskSet::ErrorHandler { class Connection; kj::Timer& timer; - HttpHeaderTable& requestHeaderTable; + const HttpHeaderTable& requestHeaderTable; kj::OneOf service; Settings settings; @@ -745,7 +975,7 @@ class HttpServer final: private kj::TaskSet::ErrorHandler { kj::TaskSet tasks; - HttpServer(kj::Timer& timer, HttpHeaderTable& requestHeaderTable, + HttpServer(kj::Timer& timer, const HttpHeaderTable& requestHeaderTable, kj::OneOf service, Settings settings, kj::PromiseFulfillerPair paf); @@ -796,4 +1026,17 @@ inline void HttpHeaders::forEach(Func&& func) const { } } +template +inline void HttpHeaders::forEach(Func1&& func1, Func2&& func2) const { + for (auto i: kj::indices(indexedHeaders)) { + if (indexedHeaders[i] != nullptr) { + func1(HttpHeaderId(table, i), indexedHeaders[i]); + } + } + + for (auto& header: unindexedHeaders) { + func2(header.name, header.value); + } +} + } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/make-test-certs.sh b/libs/EXTERNAL/capnproto/c++/src/kj/compat/make-test-certs.sh index 2a0957d0d47..33725affb38 100755 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/make-test-certs.sh +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/make-test-certs.sh @@ -105,6 +105,28 @@ y y EOF +# Create alternate host key and CSR +openssl genrsa -out example2.key 4096 +openssl req -new -sha256 -key example2.key -out example2.csr << EOF +US +California +Palo Alto +Sandstorm.io +Testing Department +example.net +garply@sandstorm.io + + +EOF +echo + +# Sign valid host certificate with intermediate CA. +setup_ca_dir int +openssl ca -extensions v3_ca -days 36524 -notext -md sha256 -in example2.csr -out valid2.crt << EOF +y +y +EOF + # Create self-signed host certificate. openssl req -key example.key -new -x509 -days 36524 -sha256 -out self.crt << EOF US @@ -134,5 +156,7 @@ write_constant CA_CERT ca.crt write_constant INTERMEDIATE_CERT int.crt write_constant HOST_KEY example.key write_constant VALID_CERT valid.crt +write_constant HOST_KEY2 example2.key +write_constant VALID_CERT2 valid2.crt write_constant EXPIRED_CERT expired.crt write_constant SELF_SIGNED_CERT self.crt diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/readiness-io-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/readiness-io-test.c++ index c5ef67d29ed..4db1d7cb46c 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/readiness-io-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/readiness-io-test.c++ @@ -93,6 +93,101 @@ KJ_TEST("readiness IO: write even") { } } +KJ_TEST("readiness IO: write while corked") { + auto io = setupAsyncIo(); + auto pipe = io.provider->newOneWayPipe(); + + char buf[7]; + auto readPromise = pipe.in->read(buf, 3, 7); + + ReadyOutputStreamWrapper out(*pipe.out); + auto cork = out.cork(); + KJ_ASSERT(KJ_ASSERT_NONNULL(out.write(kj::StringPtr("foo").asBytes())) == 3); + + // Data hasn't been written yet. + KJ_ASSERT(!readPromise.poll(io.waitScope)); + + // Write some more, and observe it still isn't flushed out yet. + KJ_ASSERT(KJ_ASSERT_NONNULL(out.write(kj::StringPtr("bar").asBytes())) == 3); + KJ_ASSERT(!readPromise.poll(io.waitScope)); + + // After reenabling pumping, the full read should succeed. + // We start this block with `if (true) {` instead of just `{` to avoid g++-8 compiler warnings + // telling us that this block isn't treated as part of KJ_ASSERT's internal `for` loop. + if (true) { + auto tmp = kj::mv(cork); + } + KJ_ASSERT(readPromise.wait(io.waitScope) == 6); + buf[6] = '\0'; + KJ_ASSERT(kj::StringPtr(buf) == "foobar"); +} + +KJ_TEST("readiness IO: write many odd while corked") { + auto io = setupAsyncIo(); + auto pipe = io.provider->newOneWayPipe(); + + // The even/odd tests should work just as before even with automatic pumping + // corked, since we should still pump when the buffer fills up. + ReadyOutputStreamWrapper out(*pipe.out); + auto cork = out.cork(); + + size_t totalWritten = 0; + for (;;) { + KJ_IF_MAYBE(n, out.write(kj::StringPtr("bar").asBytes())) { + totalWritten += *n; + if (*n < 3) { + break; + } + } else { + KJ_FAIL_ASSERT("pipe buffer is divisible by 3? really?"); + } + } + + auto buf = kj::heapArray(totalWritten + 1); + size_t n = pipe.in->read(buf.begin(), totalWritten, buf.size()).wait(io.waitScope); + KJ_ASSERT(n == totalWritten); + for (size_t i = 0; i < totalWritten; i++) { + KJ_ASSERT(buf[i] == "bar"[i%3]); + } + + // Eager pumping should still be corked. + KJ_ASSERT(KJ_ASSERT_NONNULL(out.write(kj::StringPtr("bar").asBytes())) == 3); + auto readPromise = pipe.in->read(buf.begin(), 3, buf.size()); + KJ_ASSERT(!readPromise.poll(io.waitScope)); +} + +KJ_TEST("readiness IO: write many even while corked") { + auto io = setupAsyncIo(); + auto pipe = io.provider->newOneWayPipe(); + + ReadyOutputStreamWrapper out(*pipe.out); + auto cork = out.cork(); + + size_t totalWritten = 0; + for (;;) { + KJ_IF_MAYBE(n, out.write(kj::StringPtr("ba").asBytes())) { + totalWritten += *n; + if (*n < 2) { + KJ_FAIL_ASSERT("pipe buffer is not divisible by 2? really?"); + } + } else { + break; + } + } + + auto buf = kj::heapArray(totalWritten + 1); + size_t n = pipe.in->read(buf.begin(), totalWritten, buf.size()).wait(io.waitScope); + KJ_ASSERT(n == totalWritten); + for (size_t i = 0; i < totalWritten; i++) { + KJ_ASSERT(buf[i] == "ba"[i%2]); + } + + // Eager pumping should still be corked. + KJ_ASSERT(KJ_ASSERT_NONNULL(out.write(kj::StringPtr("ba").asBytes())) == 2); + auto readPromise = pipe.in->read(buf.begin(), 2, buf.size()); + KJ_ASSERT(!readPromise.poll(io.waitScope)); +} + KJ_TEST("readiness IO: read small") { auto io = setupAsyncIo(); auto pipe = io.provider->newOneWayPipe(); diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/readiness-io.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/readiness-io.c++ index 43ae44a070c..ca85feba355 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/readiness-io.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/readiness-io.c++ @@ -96,7 +96,7 @@ kj::Maybe ReadyOutputStreamWrapper::write(kj::ArrayPtr data) filled += result; - if (!isPumping) { + if (!isPumping && (!corked || filled == sizeof(buffer))) { isPumping = true; pumpTask = kj::evalNow([&]() { return pump(); @@ -110,6 +110,21 @@ kj::Promise ReadyOutputStreamWrapper::whenReady() { return pumpTask.addBranch(); } +ReadyOutputStreamWrapper::Cork ReadyOutputStreamWrapper::cork() { + corked = true; + return Cork(*this); +} + +void ReadyOutputStreamWrapper::uncork() { + corked = false; + if (!isPumping && filled > 0) { + isPumping = true; + pumpTask = kj::evalNow([&]() { + return pump(); + }).fork(); + } +} + kj::Promise ReadyOutputStreamWrapper::pump() { uint oldFilled = filled; uint end = start + filled; @@ -132,6 +147,9 @@ kj::Promise ReadyOutputStreamWrapper::pump() { return pump(); } else { isPumping = false; + // As a small optimization, reset to the start of the buffer when it's empty so we can provide + // the underlying layer just one contiguous chunk of memory instead of two when possible. + start = 0; return kj::READY_NOW; } }); diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/readiness-io.h b/libs/EXTERNAL/capnproto/c++/src/kj/compat/readiness-io.h index d283ed252c8..d55d8474b01 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/readiness-io.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/readiness-io.h @@ -71,19 +71,54 @@ class ReadyOutputStreamWrapper { kj::Promise whenReady(); // Returns a promise that resolves when write() will return non-null. + class Cork; + // An object that, when destructed, will uncork its parent stream. + + Cork cork(); + // After calling, data won't be pumped until either the internal buffer fills up or the returned + // object is destructed. Use this if you know multiple small write() calls will be happening in + // the near future and want to flush them all at once. + // Once the returned object is destructed, behavior goes back to normal. The returned object + // must be destructed before the ReadyOutputStreamWrapper. + // TODO(perf): This is an ugly hack to avoid sending lots of tiny packets when using TLS, which + // has to work around OpenSSL's readiness-based I/O layer. We could certainly do better here. + private: AsyncOutputStream& output; ArrayPtr segments[2]; kj::ForkedPromise pumpTask = nullptr; bool isPumping = false; + bool corked = false; uint start = 0; // index of first byte uint filled = 0; // number of bytes currently in buffer byte buffer[8192]; + void uncork(); + kj::Promise pump(); - // Asyncronously push the buffer out to the underlying stream. + // Asynchronously push the buffer out to the underlying stream. +}; + +class ReadyOutputStreamWrapper::Cork { + // An object that, when destructed, will uncork its parent stream. +public: + ~Cork() { + KJ_IF_MAYBE(p, parent) { + p->uncork(); + } + } + Cork(Cork&& other) : parent(kj::mv(other.parent)) { + other.parent = nullptr; + } + KJ_DISALLOW_COPY(Cork); + +private: + Cork(ReadyOutputStreamWrapper& parent) : parent(parent) {} + + kj::Maybe parent; + friend class ReadyOutputStreamWrapper; }; } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/tls-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/tls-test.c++ index dc89013bb77..7c5cf03800d 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/tls-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/tls-test.c++ @@ -21,12 +21,27 @@ #if KJ_HAS_OPENSSL +#if _WIN32 +#include +#endif + #include "tls.h" -#include -#include -#include + #include +#include + +#if _WIN32 +#include + +#include +#else +#include +#endif + +#include +#include + namespace kj { namespace { @@ -34,173 +49,262 @@ namespace { // test data // // made with make-test-certs.sh - static constexpr char CA_CERT[] = "-----BEGIN CERTIFICATE-----\n" - "MIIGJTCCBA2gAwIBAgIJALIY6TRIbD8CMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYD\n" - "VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRv\n" - "MRUwEwYDVQQKDAxTYW5kc3Rvcm0uaW8xGzAZBgNVBAsMElRlc3RpbmcgRGVwYXJ0\n" - "bWVudDEXMBUGA1UEAwwOY2EuZXhhbXBsZS5jb20xIjAgBgkqhkiG9w0BCQEWE2dh\n" - "cnBseUBzYW5kc3Rvcm0uaW8wIBcNMTYwNTMxMDQyODQ0WhgPMjExNjA1MDcwNDI4\n" - "NDRaMIGnMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UE\n" - "BwwJUGFsbyBBbHRvMRUwEwYDVQQKDAxTYW5kc3Rvcm0uaW8xGzAZBgNVBAsMElRl\n" - "c3RpbmcgRGVwYXJ0bWVudDEXMBUGA1UEAwwOY2EuZXhhbXBsZS5jb20xIjAgBgkq\n" - "hkiG9w0BCQEWE2dhcnBseUBzYW5kc3Rvcm0uaW8wggIiMA0GCSqGSIb3DQEBAQUA\n" - "A4ICDwAwggIKAoICAQCzVgWV0irYeGCd0bxf5bMHUTpfgWgXOrnT1kno8N+v8JAN\n" - "n9BdU0GhqFqVC6rZ+XH+C6funzGgVzpXUsFToWOJ2nKqcc2gGufB/WHaoG2qiY+1\n" - "6RekttqJzSvuUMnyPZnA7VDnZ9Pr3YIkIZQQR1FMXFSOVqrdVh4SV7emnZzFgQY/\n" - "iXbKhqaCJI8dSmeIIjTc4PLKoP/yLbmaW+/mRbYv6QEm2poTaS0NtUFqN/B+oaWr\n" - "dAsRdb7Vv5ME+FDx6XOC6vvdmE1UdgQHgAvU4Hhylqln+J0h6wxVAZ3mx3xqiQXS\n" - "rZuI2qEi8wOVNnZDIxaFPqLgCQVCANmK6XmlvQXG9mzaprtwtH4eF586pvDo7RuC\n" - "oqKVTKi6YoWj1ne8FAd0i3QLBiuwivSFl42UbDV75oFSnXIY/t3v9niIzftb17TB\n" - "PfGAJXQ9z2ggWENI3TLojnay8pMISPZnVxAbLfiESp+GGuDmdmZC9EGp1dzeW1Em\n" - "n53iFVYmyOhPy6SdqUm3Bg2gdMB9upUWXjeRczBmI5A8aZRoimTENff1BT0RFJpJ\n" - "4W+6FUpisNZ1MaIA+2MWf5C9fnFQ1zn1lXty3+104ReXE0PfmjRjU1qVjSWW10/Q\n" - "vzz0vnkInxqkw8yVrzx9vX0irHxGVl7Q9RRNkejFcRXpymrooah4mtlrPmTFKwID\n" - "AQABo1AwTjAdBgNVHQ4EFgQU0hyt1HIvkxyWekOFhm4GdnZfsvEwHwYDVR0jBBgw\n" - "FoAU0hyt1HIvkxyWekOFhm4GdnZfsvEwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B\n" - "AQsFAAOCAgEAKfB8JfYko8QjUeGl9goSJp7VgLM7CVqhPOfHkyglruQ9KAF4v2vz\n" - "QUjCUl3bps12oHPKkJhPLD5Nhvw76o2xqJoRLdfzq0h6MXIoKgMFZlAPYZdxZCDy\n" - "CbF4aFerXlkuG8cq8rjdtKYWYe8uka2eVWeRr4gQcK0IW7d6bsQW094xFiLOY8Zx\n" - "Z+nlKDVDDPRfPHoK1wl1CZkv3e6PpwrYpei7rgT5XUmUcdX6LDXOfn94JTk1nw8M\n" - "K0bfr5LXNjNOfIo1cIf12Rn7v3vIJXnC4k9WQYe+Iq++G7B3bOTEP2SKbExTimn6\n" - "0R9EFvbvCQXJQSN4ylZKXohqH6cSEfbrnVil+cVe0WtmQU6tihBZIVy06UZp32cQ\n" - "SN4Gn84sQmDoJAK+0+X3VhtFlAySqOdpt5CY2UMXCx+DDsEtQHZG+kRVdVsJTuWb\n" - "D7QPB2BUylD6NBLp/3JAFkRek1Wd38HrIMXWkENP1oNoHQdO9kKyzTtmmybD+qom\n" - "/ZnXxSjJzh2F40Ph8LJgd1JWVjPaazQbVwUElVSSoLKTmbZcVAqwBAQcX7gfaPve\n" - "8GTHufekQbwVDnWThML8Y/ofIk+Zl3g8MUhi0Q8X60IN24WUflM3S9cHnINVGmuN\n" - "Wz3Z8Z8gQnpBaMv5/6Wt9KQ7Low8iJmMQ5mspVP6Y/BbhXUg9dZjZt4=\n" + "MIIGMzCCBBugAwIBAgIUDxGXACZeJ0byrswV8gyWskZF2Q8wDQYJKoZIhvcNAQEL\n" + "BQAwgacxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQH\n" + "DAlQYWxvIEFsdG8xFTATBgNVBAoMDFNhbmRzdG9ybS5pbzEbMBkGA1UECwwSVGVz\n" + "dGluZyBEZXBhcnRtZW50MRcwFQYDVQQDDA5jYS5leGFtcGxlLmNvbTEiMCAGCSqG\n" + "SIb3DQEJARYTZ2FycGx5QHNhbmRzdG9ybS5pbzAgFw0yMDA2MjcwMDQyNTJaGA8y\n" + "MTIwMDYwMzAwNDI1MlowgacxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9y\n" + "bmlhMRIwEAYDVQQHDAlQYWxvIEFsdG8xFTATBgNVBAoMDFNhbmRzdG9ybS5pbzEb\n" + "MBkGA1UECwwSVGVzdGluZyBEZXBhcnRtZW50MRcwFQYDVQQDDA5jYS5leGFtcGxl\n" + "LmNvbTEiMCAGCSqGSIb3DQEJARYTZ2FycGx5QHNhbmRzdG9ybS5pbzCCAiIwDQYJ\n" + "KoZIhvcNAQEBBQADggIPADCCAgoCggIBAKpp8VF/WDPw1V1aD36/uDWI4XRk9OaJ\n" + "i8tkAbTPutJ7NU4AWv9OzreKIR1PPhj0DtxVOj5KYRTwL1r4DsFWh6D0rBV7oz7o\n" + "zP8hWznVQBSa2BJ2E4uDD8p5oNz1+O+o4UgSBbOr83Gp5SZGw9KO7cgNql9Id/Ii\n" + "sHYxXdrYdAl6InuR6q52CJgcGqQgpFYG+KYqDiByfX52slyz5FA/dfZxsmoEVFLB\n" + "rgbeuhsJGIoasTkGIdCYJhYI7k2uWtvYNurnhgvlpfPHHuSnJ+aWVdKaQUthgbsy\n" + "T2XHuLYpWx7+B7kCF5B4eKtn3e7yzE7A8jn8Teq6yRNUh1PnM7CRMmz3g4UAxmJT\n" + "F5NyQd14IqveFuOk40Ba8wLoypll5We5tV7StUyvaOlAhi+gGPHfWKk4MGiBoaOV\n" + "52D1+/dkh/abokdKZtE59gJX0MrH6mihfc9KQs7N51IhSs4kG5zGIBdvgXtmP17H\n" + "hixUFi0Y85pqGidW4LLQ1pmK9k0U4gYlwHtqHh8an35/vp/mFhl2BDHcpuYKZ34U\n" + "ZDo9GglfCTVEsUvAnwfhuYN0e0kveSTuRCMltjGg0Fs1h9ljNNuc46W4qIx/d5ls\n" + "aMOTKc3PTtwtqgXOXRFn2U7AUXOgtEqyqpuj5ZcjH2YQ3BL24qAiYEHaBOHM+8qF\n" + "9JLZE64j5dnZAgMBAAGjUzBRMB0GA1UdDgQWBBTmqsbDUpi5hgPbcPESYR9t8jsD\n" + "7jAfBgNVHSMEGDAWgBTmqsbDUpi5hgPbcPESYR9t8jsD7jAPBgNVHRMBAf8EBTAD\n" + "AQH/MA0GCSqGSIb3DQEBCwUAA4ICAQADdVBYClYWqNk1s2gamjGsyQ2r88TWTD6X\n" + "RySVnyQPATWuEctrr6+8qTrbqBP4bTPKE+uTzwk+o5SirdJJAkrcwEsSCFw7J/qf\n" + "5U/mXN+EUuqyiMHOS/vLe5X1enj0I6fqJY2mCGFD7Jr/+el1XXjzRZsLZHmqSxww\n" + "T+UjJP+ffvtq3pq4nMQowxXm+Wub0gFHj5wkKMTIDyqnbjzB9bdVd0crtA+EpYIi\n" + "f8y5WB46g1CngRnMzRQvg5FCmxg57i+mVgiUjUe54VenwK9aeeHIuOdLCZ0RmiNH\n" + "KHPUBct+S/AXx8DCoAdm51EahwMBnlUyISpwJ+LVMWA2R9DOxdhEF0tv5iBsD9rn\n" + "oKIWoa0t/Vwnd2n8wyLhuA7N4yzm0rdBjO/rU6n0atIab5+CEDyLeyWQBVwfCUF5\n" + "XYNxOBJgGfSgJa23KUtn15pS/nSTa6sOtS/Mryc4UuNzxn+3ebNOG4UPlH6miSMK\n" + "yA+5SCyKgrn3idifzrq+XafA2WUnxdBLgJMM4OIPAGNjCCW2P1cP/NVllUTjTy2y\n" + "AIKQ/D9V/DzlbIIT6F3CNnqa9xnrBWTKF1YH/zSB7Gh2xlr0WnOWJVQbNUYet982\n" + "JL5ibRhsiqBgltgQPhKhN/rGuh7Cb28679fQqLXKgOWvV2fC4b2y0v9dG78jGCEE\n" + "LBzBUUmunw==\n" "-----END CERTIFICATE-----\n"; static constexpr char INTERMEDIATE_CERT[] = "-----BEGIN CERTIFICATE-----\n" - "MIIGDjCCA/agAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgacxCzAJBgNVBAYTAlVT\n" + "MIIGETCCA/mgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgacxCzAJBgNVBAYTAlVT\n" "MRMwEQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQHDAlQYWxvIEFsdG8xFTATBgNV\n" "BAoMDFNhbmRzdG9ybS5pbzEbMBkGA1UECwwSVGVzdGluZyBEZXBhcnRtZW50MRcw\n" "FQYDVQQDDA5jYS5leGFtcGxlLmNvbTEiMCAGCSqGSIb3DQEJARYTZ2FycGx5QHNh\n" - "bmRzdG9ybS5pbzAgFw0xNjA1MzEwNDI4NDZaGA8yMTE2MDUwNzA0Mjg0NlowgZcx\n" + "bmRzdG9ybS5pbzAgFw0yMDA2MjcwMDQyNTNaGA8yMTIwMDYwMzAwNDI1M1owgZcx\n" "CzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRUwEwYDVQQKDAxTYW5k\n" "c3Rvcm0uaW8xGzAZBgNVBAsMElRlc3RpbmcgRGVwYXJ0bWVudDEbMBkGA1UEAwwS\n" "aW50LWNhLmV4YW1wbGUuY29tMSIwIAYJKoZIhvcNAQkBFhNnYXJwbHlAc2FuZHN0\n" - "b3JtLmlvMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAtqWYNn5PO7cG\n" - "gyFzFX3q4QUQuKX+mzHjf560nOiF0Hon4SnZHnJqfGTSPBiEtqsaYLLf4dmO2SkR\n" - "w65MRKy8A6/441NhmCv8OamepOJWB71jlGVuLIoUwYgvT0R529r+Kzdq2xcnSgOo\n" - "O54SB4d8sSALAfOMKky+EOqcQq7VC+JZtel8LEJUpfvicHZtwS64D4IetsaGuAPz\n" - "KCZ3QTWMZr+FPzvmTJnxmrVwHU/whT8ma8rFljZ3oQiawIzeb/hQ4OjGdjt/H3TZ\n" - "A5rWL5s3t6RbR/MhLkajH4Fpw1qQUt5aHm54czLnHBQ9yz4dP/oWxZ8GBWT9PV63\n" - "CW34Irb6Zb5bmfYpaDtd0XsGACYCmLwDxBH2G5UZ6Z5nazjOHAQcYb6f4kjYVuoG\n" - "PvD5pjsZ7pcZr//gb4+SsVgMQDC1jRNoJPS9G3Wz42msR9IAqFaSUuay6viTPQqf\n" - "mQGOMPzJFbxnje0U483YotL+DhqtV5zFxx1l1nNJL7/5AdSPBRCvriLzIxblemBq\n" - "5JdlkKcqWq/QJtvrzHnlixVCKA1mHaWUB7zv7Np0KzeHWZ6sIcBlR7No0boW4Xmm\n" - "S4F9sTIfh+4f6qhXnjq3G4yIXhggHIWOutFAgupMQv5oNZYDjqzBhDIfWXK/8V9P\n" - "/3ubm34tOsNcWwpxxiEc5dkQAvLmFycCAwEAAaNQME4wHQYDVR0OBBYEFMvpWUv7\n" - "zweivFl4A6ZykYOe9hogMB8GA1UdIwQYMBaAFNIcrdRyL5MclnpDhYZuBnZ2X7Lx\n" - "MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAFyOD1Zti+4awHLMYDwn\n" - "s6YMSKDNnG1GjpNVjkOPZ2BOAbg18fcsNmy69R4S4rfRQMIEZPNSXh092v4BKUmc\n" - "Afa4ahEZmDET4plRWWpWUC8j8/ESIv2372QG2Q00TNWmhPiUKd8VMwV4HFcHml/+\n" - "99BrTNn68s8zMB5BNiJgWTxFIRPLy12FWjr0OriVp1EaPOstkYNxfl4RdBgTXCJ8\n" - "A9XRBNc25JHPNGPVqiRmCpewsSzEmP2sdz1MGspX7xiaE0UWnCyMPOmX90+dZlSI\n" - "GHoBI6iNL894pzKiGvl4dZ2cwKhMncXqvbx9v+IDGSXrC6bsZNl42wee8D7axsKE\n" - "xGNQKcGpU8EQqt6YC2wFV+wAHIlYuH90yk1LAZrehKy0E7N/ToKBz7MgGtoQLbn6\n" - "UuHZto/LthCY6A8ppgqrq1JEbuLoqskRuOCIF6qXjPTsNhsBXCU/xnkae74QO+2r\n" - "adrOubDvIjNtaHBuDgAeVl6PIP4mcJUMF3KhdVg/EDXWVq34YSEF+G/hR/Cj8GKl\n" - "DIGGtcQ2re+uPAoPnQ+II9iJcDeY+1wpo9HK4VL54ERzXd53Vl357mNuA/69Fjh3\n" - "HRzI5ELdyzWOEFCteCWyPtlOCZJ3Uuj9umS9K/B7KM1XmRzTOYvvj7aTaT4DhoO6\n" - "VfvEtl+RmGSm/YmfTJ8551Zq\n" + "b3JtLmlvMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAvDRmV4e7F/1K\n" + "TaySC2xq00aYZfKJmwcvBvLM60xMrwDf5KlHWoYzog72q6qNb0TpvaeOlymU1IfR\n" + "DkRIMFz6qd4QpK3Ah0vDtqEsoYE6F7gr2QAAaB7HQhczClh6FFkMCygrHDRQlJcF\n" + "VseTKxnZBUAUhnG7OI8bHMZsprg31SNG1GCXq/CO/rkKIP1Pdwevr8DFHL3LFF03\n" + "vdeo6+f/3LjlsCzVCNsCcAYIScRSl1scj2QYcwP7tTmDRAmU9EZFv9MWSqRIMT4L\n" + "/4tP9AL/pWGdx8RRvbXoLJQf+hQW6YnSorRmIH/xYsvqMaan4P0hkomfIHP4nMYa\n" + "LgI8VsNhTeDZ7IvSF2F73baluTOHhUD5eE/WDffNeslaCoMbH/B3H6ks0zYt/mHG\n" + "mDaw3OxgMYep7TIE+SABOSJV+pbtQWNyM7u2+TYHm2DaxD3quf+BoYUZT01uDtN4\n" + "BSsR7XEzF25w/4lDxqBxGAZ0DzItK0kzqMykSWvDIjpSg/UjRj05sc+5zcgE4pX1\n" + "nOLD+FuB9jVqo6zCiIkHsSI0XHnm4D6awB1UyDwSh8mivfUDT53OpOIwOI3EB/4U\n" + "iZstUKgyXNrXsE6wS/3JfdDZ9xkw4dWV+0FWKJ4W6Y8UgKvQJRChpCUtcuxfaLjX\n" + "/ZIcMRYEFNjFppka/7frNT1VNnRvJ9cCAwEAAaNTMFEwHQYDVR0OBBYEFNnHDzWZ\n" + "NC5pP6njUnf1BMXuQKnnMB8GA1UdIwQYMBaAFOaqxsNSmLmGA9tw8RJhH23yOwPu\n" + "MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAG0ShYBIbwGMDUBj\n" + "9G2vxI4Nx7lip9/wO5NCC2biVxtcjNxwTuCcqCxeSGgdimo82vJTm9Wa1AkoawSv\n" + "+spoXJAeOL5dGmZX3IRD0KuDoabH7F6xbNOG3HphvwAJcKQOitPAgTt2eGXCaBqw\n" + "a1ivxPzchHVd3+LyjLMK5G3zzgW0tZhKp7IYE0eGoSGwrw10ox6ibyswe8evul1N\n" + "r7Z3zFSWqjy6mcyp7PvlImDMYzWHYYdncEaeBHd13TnTQ++AtY3Da2lu7WG5VlcV\n" + "vcRaNx8ZLIUpW7u76F9fH25GFnQdF87dgt/ufntts4NifwRRRdIDAgunQ0Nf/TDy\n" + "ow/P7dobloGl9c5xGyXk9EbYYiK/Iuga1yUKJP3UQhfOTNBsDkyUD/jLg+0bI8T+\n" + "Vv4hKNhzUObXjzA5P7+RoEqnPe8R5wjvea6OJc2MDm5GFrzZOdIEnW/iuaQTE3ol\n" + "PYZVDvgPB+l7IC0brwTvdnvXaGLFu8ICQtNoOuSkbEAysyBWCEGwloYLL3r/0ozb\n" + "k3z5mUZVBwhpQS/GEChfUKiLxk9rbDmnhKASa6FVtycSVfoW/Oh5M65ARf9CFIUv\n" + "cvDYLpQl7Ao8QiDqlQBvitNi665uucsZ/zOdFlTKKAellLASpB/q2Y6J4wkogFNa\n" + "dGRV0WpQkcuMjXK7bzSSLMo0nMCm\n" "-----END CERTIFICATE-----\n"; static constexpr char HOST_KEY[] = "-----BEGIN RSA PRIVATE KEY-----\n" - "MIIJKQIBAAKCAgEA2wdK2q0OK/6HPd66cO5Otxj/bTuyAIRt05tykA28tXONJzrs\n" - "H1p6Yypr/JPwGYeSBOhyTUNKQQ46YSey+zOTO3VH21/rpdeEAmCVgy/cjtJaoyYA\n" - "VuSf6+5NcHph/oPX9wRqVAb3BjpjQByLr4EvKdhK2S+r0AJcN1bePWJSIpQum0Eg\n" - "mxz1z2OWQRX0+3yV8tZQtk1iJKvmPI1JNFIqQsgQMxQgBQSysRgfFqwyKgDgEHoU\n" - "YqEvU2zZeOQ1onzRNLxf3mseZz9JGsaTaqYjs4vsOSKYQT5Uca+t7q07Z2bFf97k\n" - "KTJdVV4NtXyybhWXxvm1p+MSoSSpTg47oN4AVGH75PusmjnagjWlO7rTAZh17QiA\n" - "ORXGgGlYRqVCYKPMdahehEj7cupNn55mxH4YaomFrwjPayXa/1TsXzP17b5aTSMN\n" - "ye0q6NwOMIUGOAyn8wTEUSSDDErC4nihRHxGb3kFToq1dqCI9Sc9L1HY57Sp8tpK\n" - "5CQD1G3ppzhwZ2/Xn84ipQriogsV5ALbxukC7WGZjTgBahsMZBqHWtWIuOUjAKr/\n" - "X/jpoNgBpkT6VLqpx9L7Tj1zjvFwLsE0YAQq37lx2Sam/+CSzA3BHDsx6GNSptRK\n" - "KYvPSIYDFxJUvf/KHg8mI/gnqa0EgHJvTPBOS7go1G4OhP9IUgjWooXMhPMCAwEA\n" - "AQKCAgEArGxpOQzTAz80KDiWfSCdRvae3dcIoe+epd7RqSWnURDOJfv0thn8DuTu\n" - "bb/oW7Cl+scidEBszBnvS1x9QdOwLDZ/gutYDw5CFb0C9mtPLf/a6mSYD8+bNZg7\n" - "zjgJvNr9wK/xJIT3IigEyguuy1LfVgm3opIsp2u0PLxd5+Tm0+HjbsUube22dLTp\n" - "LAOk//Vr9edRUrJIeKX6ceCnqFCmhDwKxKsrKcgxA8kBcE/OjdJykYYJVjudjgc6\n" - "jDjbIDcyWlmQ/v9Ex/LCEhoRIvv3Tvjv1WqugW4X/AdY3XPyN8xn3eoRo3zKjNGl\n" - "6SFpNdA506Hwp2HS4JiDz7bUqicaCd63/+h6/Lxf1R9xpM6T6A+C7Ausy0yTpQQb\n" - "YzbVgNL7HWelf93zkNOXKAASzJmYq01lBF+LnKL+Yy3q85tr5YXtoNBj21oR+fRa\n" - "gBr1gaeqtRF5wj0uiPSg8M4UuDacHkm8sU2V+citkx/vLh+ofl/xVLPxDfIbIT5V\n" - "cwQvyoI743zR4/6ZzM9x5a89a2Rvb74YP8dx5nkf7x+Tk9UnGIPXgMuPN7PRMyWY\n" - "1wxnCNEdIoZ90qOhrAt8IzJuWOIVY6UdFut9sl8TbKRuzUmX2TQjYBVXULyyjsDQ\n" - "pVlOwwncBuHe+BHDn2pqK9BIgI7oK3iB8Fa4YrgGcYYAyuIs6IECggEBAPRIYoTs\n" - "KHpb8ugPyYpjIrOYvyca05jP8ctCSi1B4OmUnYuv2KxNap7HIrgysBna5nOBY6LG\n" - "0B1Kut7yKd5XtE2vNKVQRd/gR6wBBgm3xIYv9wZHcMFcRZl9xofGSmCk3iOh//Bv\n" - "IS+Mpw6TcKlUHlWLaGQBTf8I3/9nrSJ4zvhoJeyojj21+QZ4xI+W+g2tA4WEbc/p\n" - "jckVZwGNXwSVvfqnmaSC34cFgDMh1mKmD8g60Mje9ynMgG0cHZ+RxuICfQKgFEI2\n" - "iqaagO4kTl+sAwj8XRlcAm+P4/EKjYYOH6DRDQLwybDz+o/RXKkx6lWi3kc7pXmn\n" - "3aVI6UREW5UjuqkCggEBAOWIza+AtMxA+9XyJ6222qEq2MduQPYAbvshqQQ+n/93\n" - "3z5er2fd854X4HX3/w2Ov+C+OOCnpaOkH+EgWUR4yUolw0YGBaFU2evzV2P6HSkf\n" - "Hvw7HxKasquBLppUiOr7YCkm1bd+oDxFuVPq+Zza24KwPfszd2dtGqbXywmv6TLE\n" - "6dH4AEBsUNjLs1oqWX2jic60eSk0zzSY/LP57VLR9u6H7GOS1Z4bqt8rA1gJnWOd\n" - "/J5fsuL2unRoy3jM0X/tr7ZMukA7wIvBN29YR7j2Qvnkf4PLoHD7ftgPaU4XZf/d\n" - "ogc3pTh++0zyPti9UkrCALXP4i3gl8nlKZbepeDcgDsCggEBAKHmU307MzydMilB\n" - "RVa1i2syYgYdzn1p3BvVbGoATnsgpyXMPrM7f92Jp2YjGfmYzcFh0NIyJ/4x6BYY\n" - "s00MHZCa/S5PPHA7KeVCrGjGZbZ1laeQs5dDe1FWPb0A24yf2CYPmRwV2w2zj4im\n" - "iTWAbbZOdbpJ7xKHJEYWxXWiUbHq/K+TquoVb90tL0DnVAS6VSopccopRXIvABzU\n" - "QFQ+ljHI4JhasKDBMY0x8O9ilfUjnfpzY6ZNRhSKXMvEBucFtSqHQ8X6dfwjTC4I\n" - "2/SmgUB0WZOUGn0sBWtcjh15wNaJlrELOvFPUhH9NQdh8KgfEGhvjKVLbye7YfZ/\n" - "w57dljkCggEAacV4wv8UUWtAoX5NOoegh9QuwPfVh4b7nU4NjJ8vK5IZlawcOEjX\n" - "Emr+TF5TcfPuB6qgmyWl9pqS9jLp79uZJknwijwMLCPlqA0ioDeJaIGmzaSQ1Qnk\n" - "e5Oz3fpGfcIIte3nXf9D54JZvInzLIzNypNcfH1i8I4eUfPu5C/jzjlfZhpaQ1Wm\n" - "i8CSjWImivbpcg9IJezn7tzw1h69dgS7PX/1No1bUth9DQnNKKyFknojBvgifuQj\n" - "V7FS0f/QKptk9SS2TxM5zyziVrTfmCQjCPR6rkkPTgEWmom/hPTTU+zV1W2W/UnG\n" - "k9atj0LuwPRVT3LUTz/HsomfeJ5w4gW6MQKCAQA09gsSFV3yonIXN8lb6v1NbJ0+\n" - "6UVyQkP3IWrYtxhmGjOvVTDNgc7CFjKNoWjzA2eeiQOh4BEgAPKS7FEhsPlCd+4U\n" - "cI/U/msRtwyYsGkWzlcoGnI6BzuqVQlzWUJnNMv8vK4bZLQud3C7SPge+FZRoZCj\n" - "YKUJmBUV/0flipX4g+hAjbE0H3zweLbzkB4TStgxrTAHSLvk4o8UxBn0Dm/eerg3\n" - "rzgAlo553rmN82uJdckIaKqJGwGAJwZCWpjtnj4mcYrXIJ6R70NViE9D7c64LLZ7\n" - "jvyFqcN8xfd2RYujuYFD9p0UPtTY2C0sUp5nvDQFkB9kq91yTKHrLg4CWa+R\n" + "MIIJJwIBAAKCAgEAzO6/HGJ46PIogZJPxfSE6eHuqE1JvC8eaSCOKpqbfGsIHPfh\n" + "8d+TFatDe4GF4B6YYYmxpJUN85wZC+6GTA7xeM9UubssiwfACE/se5lJA0SSbXo9\n" + "SXkE5sI/LM7NA6+h3PTmoLlwduWHh6twHbcfcJUtSOSJemCLUqZOiSQ+CGW2RzEF\n" + "01SM/eOmpN9gtmVejD7wOxItbjX/DDE/IKv6xQ2G1RYxEpggCnNQjZj4R0uroN6L\n" + "tM7hiQebueXs3KfVIEcEV9oXfTf2tI4G3NTn9MJUvZQNIMUVOtjl8UmLHfk3T9Hc\n" + "eL/RM0svWIPWNoUJxA6m4u4g9D0eED2W0chKe86tF65RRYHvPNQXZf+tG7wyvRKP\n" + "ePWIlEzIAgXW2WwXEvzFvvRnfX1mYeWA7KhyRaVcCp9H0i5ZP0L8E2RsvYd+2QMm\n" + "zrxkeM01KMJSTAtUhqydNaH5dzlp8UXWvoTFLAh4F744OjdOyjHqoTZi1oh+iqsc\n" + "3o4tWQF3YzpjharP3mngGj+YDp/ZN6F8QdFQU+iNFKx6aDRgnKm+ri0/yTDMQ3G1\n" + "4bQ9FNmLRwB+W5UI9Oy931JxxcW7LxncbHgA326++bD1CdLXyxOvpHK40B5f9zsg\n" + "m980xdjBcLr2o0nGAsZ9V79a0BNqjvDA3DjmXBGTYxJSpMGfipP5KbP2hl0CAwEA\n" + "AQKCAgBSqV63EVVaCQujsCOzYn0WZgbBJmO+n3bxyqrtrm1XU0jzfl1KFfebPvi6\n" + "YbVhgJXQihz4mRMGl4lW0cCj/0cRhvfS7xf5gIfKEor+FAdqZQd3V15PO5xphCK9\n" + "bTEu8nIk0TgRzpr5qn3vkIxpwArTe6jHhT+a+ERaczCsiszm0DglITYLV0iDxIbc\n" + "bCnziJIJmf2Gpj9i/C7DeT3QbO56+4jOfOQQbwJFlNwCMZi8EV7KRdoudWBtyH7d\n" + "DkxreNsz6NFsqlDdNmyxybQk8VAa3yQVUBm3hSeaFBE0MYkG7xaLgMgggKbevM39\n" + "Mzh9x034IjzYvlrWiayNunoSZmr8KhYQUsgAE5F+khTLfheiGvXLkjTdBLIGG5q7\n" + "nb3G1la/jx0/0YpQvawNriovwii76cgHjmnEiNBJH685fvVP1y3bM/dJ62xdzFpw\n" + "7/1xrii1D9rSvrns037WOrv6VtdtNpJoLbUlNXU8CX9Oc7umbBLCLEp00+0DDctk\n" + "3KVX+sQc37w9KRURu8LqyFFx4VvyIQ+sJQKwVqFpaIkR7MzN6apkBCXwF+WtgtCz\n" + "7RGcu8V00yA0Rqqm1RVhPzbBU5UII0sRTYDacZFqzku8dVqEH0dUGlvtG2oIu0ed\n" + "OOv93q2EIyxmA88xcA2YFLI7P/qjYuhcUCd4QZb8S6/c71V+lQKCAQEA+fhQHE8j\n" + "Pp45zNZ9ITBTWqf0slC9iafpZ1qbnI60Sm1UigOD6Qzc5p6jdKggNr7VcQK+QyOo\n" + "pnM3qePHXgDzJZfdN7cXB9O7jZSTWUyTDc1C59b3H4ney/tIIj4FxCXBolkPrUOW\n" + "PE96i2PRQRGI1Pp5TFcFcaT5ZhX9WTPo/0fYBFEpof62mxGYRbQ8cEmRQYAXmlES\n" + "mqE9RwhwqTKy3VET0qDV5eq1EihMkWlDleyIUHF7Ra1ECzh8cbTx8NpS2Iu3BL5v\n" + "Sk6fGv8aegmf4DZcDU4eoZXBA9veAigT0X5cQH4HsAuppIOfRD8pFaJEe2K5EXXu\n" + "lDSlJk5BjSrARwKCAQEA0eBORzH+g2kB/I8JIMYqy4urI7mGU9xNSwF34HhlPURQ\n" + "NxPIByjUKrKZnCVaN2rEk8vRzY8EdsxRpKeGBYW1DZ/NL8nM7RHMKjGopf/n2JRk\n" + "7m1Mn6f4mLRIVzOID/eR2iGPvWNcKrJxgkGXNXlWEi0wasPwbVksV05uGd/5K/sT\n" + "cVVKkYLPhcGkYd7VvCfgF5x+5kPRLTrG8EKbePclUAVhs9huSRR9FAjSThpLyJZo\n" + "0/3WxPNm9FoPYGyAevvdPiF5f6yp1IdPDNZHx46WSVfkgqA5XICjanl5WCNHezDp\n" + "93r/Ix7zJ0Iyu9BxI2wJH3wGGqmsGveKOfZl90MaOwKCAQBCSGfluc5cslQdTtrL\n" + "TCcuKM8n4WUA9XdcopgUwXppKeh62EfIKlMBDBvHuTUhjyTF3LZa0z/LM04VTIL3\n" + "GEVhOI2+UlxXBPv8pOMVkMqFpGITW9sXj9V2PWF5Qv0AcAqSZA9WIE/cGi8iewtn\n" + "t6CS6P/1EDYvVlGTkk0ltDAaURCkxGjHveTp5ZZ9FTfZhohv1+lqUAkg25SGG2TU\n" + "WM85BGC/P0q4tq3g7LKw9DqprJjQy+amKTWbzBSjihmFhj7lkNas+VpFV+e0nuSE\n" + "a7zrFT7/gDF7I1yVC14pMDthF6Kar1CWi+El8Ijw7daVF/wUw67TRHRI9FS+fY3A\n" + "Qw/NAoIBAFbE7LgElFwSEu8u17BEHbdPhC7d6gpLv2zuK3iTbg+5aYyL0hwbpjQM\n" + "6PMkgjr9Gk6cap4YrdjLuklftUodMHB0i+lg/idZP1aGd1pCBcGGAICOkapEUMQZ\n" + "bPsYY/1t9k//piS/qoBAjCs1IOXLx2j2Y9kQLxuWTX2/AEgUUDj9sdkeURj9wvxi\n" + "xaps7WK//ablXZWnnhib/1mfwBVv4G5H+0/WgCoYnWmmCASgXIqOnMJgZOXCV+NY\n" + "RJkx4qB19s9UGZ5ObVxfoLAG+2AmtD2YZ/IVegGjcWx40lE9LLVi0KgvosILbq3h\n" + "cYYytEPXy6HHreJiGbSAeRZjp15l0LcCggEAWjeKaf61ogHq0uCwwipVdAGY4lNG\n" + "dAh2IjAWX0IvjBuRVuUNZyB8GApH3qilDsuzPLYtIhHtbuPrFjPyxMBoppk7hLJ+\n" + "ubzoHxMKeyKwlIi4jY+CXLaHtIV/hZFVYcdvr5A2zUrMgzEnDEEFn96+Dz0IdGrF\n" + "a37oDKDYpfK8EFk/jb2aAhIgYSHSUg4KKlQRuPfu0Vt/O8aasjmAQvfEmbx6c2C5\n" + "4q16Ky/ZM7mCqQNJAXgyPfOeJnX1PwCKntdEs2xtzXb7dEP9Yy9uAgmmVe7EzPCj\n" + "ml57PWxIJKtok73AHEat6qboncHvW1RPDAiQYmXdbe40v4wlPDrnWjUUiA==\n" "-----END RSA PRIVATE KEY-----\n"; static constexpr char VALID_CERT[] = "-----BEGIN CERTIFICATE-----\n" - "MIIF9zCCA9+gAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgZcxCzAJBgNVBAYTAlVT\n" + "MIIF+jCCA+KgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgZcxCzAJBgNVBAYTAlVT\n" "MRMwEQYDVQQIDApDYWxpZm9ybmlhMRUwEwYDVQQKDAxTYW5kc3Rvcm0uaW8xGzAZ\n" "BgNVBAsMElRlc3RpbmcgRGVwYXJ0bWVudDEbMBkGA1UEAwwSaW50LWNhLmV4YW1w\n" - "bGUuY29tMSIwIAYJKoZIhvcNAQkBFhNnYXJwbHlAc2FuZHN0b3JtLmlvMCAXDTE2\n" - "MDUzMTA0Mjg0N1oYDzIxMTYwNTMxMDQyODQ3WjCBkDELMAkGA1UEBhMCVVMxEzAR\n" + "bGUuY29tMSIwIAYJKoZIhvcNAQkBFhNnYXJwbHlAc2FuZHN0b3JtLmlvMCAXDTIw\n" + "MDYyNzAwNDI1M1oYDzIxMjAwNjI3MDA0MjUzWjCBkDELMAkGA1UEBhMCVVMxEzAR\n" "BgNVBAgMCkNhbGlmb3JuaWExFTATBgNVBAoMDFNhbmRzdG9ybS5pbzEbMBkGA1UE\n" "CwwSVGVzdGluZyBEZXBhcnRtZW50MRQwEgYDVQQDDAtleGFtcGxlLmNvbTEiMCAG\n" "CSqGSIb3DQEJARYTZ2FycGx5QHNhbmRzdG9ybS5pbzCCAiIwDQYJKoZIhvcNAQEB\n" - "BQADggIPADCCAgoCggIBANsHStqtDiv+hz3eunDuTrcY/207sgCEbdObcpANvLVz\n" - "jSc67B9aemMqa/yT8BmHkgTock1DSkEOOmEnsvszkzt1R9tf66XXhAJglYMv3I7S\n" - "WqMmAFbkn+vuTXB6Yf6D1/cEalQG9wY6Y0Aci6+BLynYStkvq9ACXDdW3j1iUiKU\n" - "LptBIJsc9c9jlkEV9Pt8lfLWULZNYiSr5jyNSTRSKkLIEDMUIAUEsrEYHxasMioA\n" - "4BB6FGKhL1Ns2XjkNaJ80TS8X95rHmc/SRrGk2qmI7OL7DkimEE+VHGvre6tO2dm\n" - "xX/e5CkyXVVeDbV8sm4Vl8b5tafjEqEkqU4OO6DeAFRh++T7rJo52oI1pTu60wGY\n" - "de0IgDkVxoBpWEalQmCjzHWoXoRI+3LqTZ+eZsR+GGqJha8Iz2sl2v9U7F8z9e2+\n" - "Wk0jDcntKujcDjCFBjgMp/MExFEkgwxKwuJ4oUR8Rm95BU6KtXagiPUnPS9R2Oe0\n" - "qfLaSuQkA9Rt6ac4cGdv15/OIqUK4qILFeQC28bpAu1hmY04AWobDGQah1rViLjl\n" - "IwCq/1/46aDYAaZE+lS6qcfS+049c47xcC7BNGAEKt+5cdkmpv/gkswNwRw7Mehj\n" - "UqbUSimLz0iGAxcSVL3/yh4PJiP4J6mtBIByb0zwTku4KNRuDoT/SFII1qKFzITz\n" - "AgMBAAGjUDBOMB0GA1UdDgQWBBT2qSSBGCUEYfohpsiHlq9yECjAqDAfBgNVHSME\n" - "GDAWgBTL6VlL+88HorxZeAOmcpGDnvYaIDAMBgNVHRMEBTADAQH/MA0GCSqGSIb3\n" - "DQEBCwUAA4ICAQCb34A5Hz6iI80U+mSnkvOnVtaqyxnsVcBbfMcsRyGG/GSVBNJD\n" - "zcCxnxrPc0NXcsK3wIR7KU1oQmusNCtI2XNd1lceBytQD6TDzcuuNCpjF7Uh+pdi\n" - "AL2HzDoy9q4Mxxk1wTGDuyy+4opZQG12fe9pr4wo93/BXbE4kDrSzp/2iTQp9/zh\n" - "JRrRISwyFH6HKX/MoVbpJfAqiMHXHeHylH6h4lUVVfYFSSB8PWL3lxAwCM0ECGmd\n" - "ZMGyh089ViW0mBoF5lacwHkAnw17S0JrUM9+66oPRLIo2rsgnNj4sMo/9dzSJ9/T\n" - "OneewTK4O8t/mZNp1auYFC1+m8wWRh0G5Y5CwZ1CJqy8mHMd/33FbMO+MyyEeFkG\n" - "DHNzCYEupp1ymqvFZK8TyIX1m/QOy0W6NT6INFY1dy3CoJWnMAJRKvxeFQGJ28Up\n" - "wYPZPj7xxGb6TdgVC0c7kMCorIu3tsLLRtwtAbN/D8ogS78QwgyLqJ41friFMD+9\n" - "AS1sjfqiiC4hpr11z+xdCJLb4vkBsigHuofjx3uyiueyKXQTFQCXjF2w1FbJTKSZ\n" - "kQ4CP2eG8UN5BN46kih89NjBUCduXq/x4SJCEjtG57QFBogmcG/OTX1pCOwjnK4L\n" - "z6Kz4+2VTNYHvIXSIoicO+jZeISYpllg6ISNBeqoOp2zp6Rf6rwR6/YZog==\n" + "BQADggIPADCCAgoCggIBAMzuvxxieOjyKIGST8X0hOnh7qhNSbwvHmkgjiqam3xr\n" + "CBz34fHfkxWrQ3uBheAemGGJsaSVDfOcGQvuhkwO8XjPVLm7LIsHwAhP7HuZSQNE\n" + "km16PUl5BObCPyzOzQOvodz05qC5cHblh4ercB23H3CVLUjkiXpgi1KmTokkPghl\n" + "tkcxBdNUjP3jpqTfYLZlXow+8DsSLW41/wwxPyCr+sUNhtUWMRKYIApzUI2Y+EdL\n" + "q6Dei7TO4YkHm7nl7Nyn1SBHBFfaF3039rSOBtzU5/TCVL2UDSDFFTrY5fFJix35\n" + "N0/R3Hi/0TNLL1iD1jaFCcQOpuLuIPQ9HhA9ltHISnvOrReuUUWB7zzUF2X/rRu8\n" + "Mr0Sj3j1iJRMyAIF1tlsFxL8xb70Z319ZmHlgOyockWlXAqfR9IuWT9C/BNkbL2H\n" + "ftkDJs68ZHjNNSjCUkwLVIasnTWh+Xc5afFF1r6ExSwIeBe+ODo3Tsox6qE2YtaI\n" + "foqrHN6OLVkBd2M6Y4Wqz95p4Bo/mA6f2TehfEHRUFPojRSsemg0YJypvq4tP8kw\n" + "zENxteG0PRTZi0cAfluVCPTsvd9SccXFuy8Z3Gx4AN9uvvmw9QnS18sTr6RyuNAe\n" + "X/c7IJvfNMXYwXC69qNJxgLGfVe/WtATao7wwNw45lwRk2MSUqTBn4qT+Smz9oZd\n" + "AgMBAAGjUzBRMB0GA1UdDgQWBBRdpPVLjMnJFs7lCtMW6x39Wnwt3TAfBgNVHSME\n" + "GDAWgBTZxw81mTQuaT+p41J39QTF7kCp5zAPBgNVHRMBAf8EBTADAQH/MA0GCSqG\n" + "SIb3DQEBCwUAA4ICAQA2kid2fGqjeDRVuclfDRr0LhbFYfJJXxW7SPgcUpJYXeAz\n" + "LXotBm/Cc+K01nNtl0JYfJy4IkaQUYgfVsA5/FqTGnbRmpEd5XidiGE6PfkXZSNj\n" + "02v6Uv2bAs8NnJirS5F0JhWZ45xAOMbl04QPUdkISF1JzioCcCqWOggbIV7kzwrB\n" + "MTcsx8vuh04S9vB318pKli4uIjNdwu7HnoqbrqhSgTUK1aXS6sDQNN/nvR6F5TRL\n" + "MC0cCtIA6n04c09WRfHxl/YPQwayxGD23eQ9UC7Noe/R8B3K/+6XUYIEmXx6HpnP\n" + "yt/79iBPwLnaVjGDwKfI8EPuSo7AkDSO3uxMjf7eCL2sCzWlgsne9yOfYxGn+q9K\n" + "h3KTOR1b7EVU/G4h7JxlSHqf3Ii9qFba/HsUo1yMjVEraMNpxXCijGsN30fqUpLg\n" + "2g9lNKmIdyHuYdlZET082b1dvb7cfYChlqHvrPv5awbsc1Ka2pOFOMwnUi2w3cHc\n" + "TLq3SyipI+sgJg3HHSJ3zVHrgKUeDoQi52i5WedvIBPfHC4Ik4zEzlkCMDd3hoLe\n" + "THAsBGBBEICK+lLSo0Mst2kxhTHHK+PxmhorXXnGOpla2wbwzVZgxur45axCWVqH\n" + "cbdcVhzR2w4XhjaS74WGiHHn5mHb/uYZJiZGpFCNefU2WOBlRCX7hgAzlPa4ZQ==\n" + "-----END CERTIFICATE-----\n"; + +static constexpr char HOST_KEY2[] = + "-----BEGIN RSA PRIVATE KEY-----\n" + "MIIJKgIBAAKCAgEAtpULbBVceP5UTP8Aq/qZ4zuws0mgfRazlFFDzn0SpxKRgfUR\n" + "OrDB8EMcffL+IxWYdzszYnm7R4p8udQHtqdX1m+JpWPIcEyOGuKEjEGGVBbfteiG\n" + "vCZaHmmhSGFuBuRQnsmOMN2sX4ATPgISeUpKz3YcEw5zbGV9XveQBCiCZYJOEY2R\n" + "qzuzfwmO76Nf/0pQtFaN6vjHOGOp5e6xEWUNMruliw83/BYmtOE0CH9QmLSi5d/s\n" + "OMsppXVduwUshHv2gwocXFik4FUhDMKjfzp71uvRLqAnpsf6u5uShXwqgamohQct\n" + "h9D0x5KMoTwlf7LnV52dJ4Fp0np4FYkNhJJxqMXjJuzW4HqHyt/zzelhXavKyPyf\n" + "XGmkRHzQLAaOzDN+3qXBvArubYapuy/CF1n/Dh9OvcGJ8vK2wtahGig2Wrwh5k0e\n" + "ZEiQfkFtXKhVmxNgcEEr6coIPAPe882F0HrWgM5h/huS50MC5OWyjnAySZ7L/Qj0\n" + "7jDfNcij1yajmv4ahsL8FI4hal8k3DSXe3MDnAwmBxKg+b/KUWNiucdInZUZ17c5\n" + "765aQoeIPZFVBoAQrgFFLPE31wC7SwrMuKMhy2UKbgXjcZ5MMkbS2WBSaqBFLSys\n" + "zHY0cFPCRh6K7d7vDvSG7lZT16lNFfagbvcO1uusQBD22gGvNOF2zZe377ECAwEA\n" + "AQKCAgA7Dm6JYUdt42XFGd5PwlkwRMhc1X3RuBwR5081ZQM5gyoJjQkroKy6WBrJ\n" + "KmXFV2DfgAiY26MV+tdpDAoKrIoe1CkDlAjrOffk/ku9Shx26oclwbaC+SzBFY2T\n" + "aeA63nKtSahyaeEtarHOpsDu9nbIL/3YtB3le9ZXd1/f2HKE/ubdipsJdeATQTY4\n" + "kPGmE5WTH0P8MsfNl38G3nPrmnHwbP2Ywy1qnoeajhVUgknBevwNuqYfoKcx24qb\n" + "yYqit63+qLCPtiRuY1qzU+mqZ3JTDCe3Gxp4OcsCD8oO3yConAXkMXQqsA3c16wh\n" + "IuFGMsndbx+7/YILEI3y+UekD/IvBmLAtX0X5fQEhHm/8SowHAe5XIOTWGnTjXrF\n" + "JhGwsRuQtSXJPhTVAeR07IrPAASVp0BeppBdHv0pUkOte/usYH3PhYGLuQJIbOHi\n" + "AvDZDE/6CVszXFU7Ry3QpIZ4aQQMGGOJg1LVhNnt7br7ZZJ2mU5BOOvgdcNACcR8\n" + "+sCD2DE3dD6Nxy5rGESHSqYDyedd3KJ7wPlBO6p6KUttgwWtZDXh940kGxcnEQtQ\n" + "HAnHOxCJU+cxj9ekxoNHcTjqCEGkL7jy013soG4yrw62vMsVKHxEUqo/uaBSI7Nd\n" + "h+JiHb/mQ/sGvQcaAlJTnX7kUfy+oPi0nL3Hz97CxwcWSFFmIQKCAQEA8AiKO13n\n" + "0FjMGZJHXeu73Rg/OvQBSSE8Fco/watnEKKzRPIXUdYgjxL8fH7jFwV7Ix941ddG\n" + "2RcY7zchbMAP95xfeYUlrHyxbgNjWWIFezaattrCn8Sw52SXNcsT2bGuaNmyVmHs\n" + "gwyIDCl+1cPArhcuCun8MszsSy5W0EPwsaCqDnPinTE0lSsj0MWpR4K7+m4OOmA1\n" + "zwqQ9j/pgvP8is7YeTEb1a7JtVAD+4nCd7XnzUuun2Qw5jaCGFJKZGEwqeSUQZS6\n" + "NAuQ0OTaw8m3qb7incS9tZahWLACLfT4Jrrfh9Is2pFVgQstzin9dpfLrWKeBRGP\n" + "D3ItA73haE/xjQKCAQEAwrova9Epg5ylKReKCTMZREDO1PTbHXRS7GrgNN1Sy1Ne\n" + "Ke3UjEowJMMINekYEJimuGLixl50K0a6T5lQoUQY6dYHeZEQ4YSN3lPWtgbuZEtI\n" + "OlSrsw9/duT55gVPiRsZTiHjM1mYVEtAeUxHH/PVoSjPY4V1OKWA3HJ/TtgP1JEN\n" + "scdIdIXP1HZnjxxN4juVHyr1+iC2KXbb/OajXFMUCPkp7YrlknDyYcgj2uXRqC3k\n" + "ju3oBplcEnNrWO6RfqQ+QYv87huPXV5sHXzjNBj9ssHwn+EYxwpne+LvMfg3E5l1\n" + "o7Yl40IfHKK85ts8qwjG6tJ3TUxAUrMPSKBLbbODtQKCAQEAxJWZ8Kkl8+LltYOx\n" + "41/vilITZwr0CpqnhQkRUmI4lM1LmQnUw3dlTwgztRqOjgo1ITzjT+9x3NYn27MB\n" + "MvnRme992h6MDkpJXlp0AX5gEttTtrJPd141rC0cEjhx13bH6qNwhYLJm0KmIZ/S\n" + "euxJX8soMFQV8t0WITSgcQ1TkYaOACw0ypzD/e9I8/EOhLyzi5SbHoAxUZHLy4Ho\n" + "kxGUIXLqo8bujwEJve78dAQNOtHGOMLlDzGVQtYdkiHDP5bBrkLAkT1nirx2LD9i\n" + "U7tfKixlmOTKom/tUJ9GCbF5ku61p50gkxk4N+mZ6CFHrtr/Os9rr6cDzZiq+UeH\n" + "1lCy+QKCAQEAhjYxTQyConWq2CGjQCf5+DL624hQJYLxTIV1Nrp8wCsbsaZ8Yp0X\n" + "hZ7u38lijr3H2zo8tyCOzO0YqJgxHJWE3lZoHH/BtM3Zwizixd8NHA9PHvUQyn+a\n" + "COZU3xc19He6/0EYCWJtPVwIehH6y6kRytwH5L4tRve7UzWPTVZZwtafK7MA218H\n" + "GZbqVZbaj10lsK+5jcZSB04m3a5RVebk3jJtlY2wITi7tm1tWQghctr+twx+aV32\n" + "OblXeZokqbamOiM0FyDjtSTJO6HCLzwyT6ygHnHU1Ar1vEtzNWuw+k9A5685eeMu\n" + "8luv+yWMMQ4BnAOnup0dkGJd3F6u3lNmKQKCAQEAkZKTi4g+AKGikU9JuKOTF2UH\n" + "DdolZK/pXfWRIzpyC5cxiwnHhpqtl4jRrNSVSXWo/ChdtxhGv+8FNO+G8wQr0I0K\n" + "iWF4qWU/q7vbjWuE8mDrWfaCWM3IwEoMQ7Ub+gTf2JzQG0t0oSgJ70uaYxxm2x7U\n" + "eBnblzZ6ODG7jgq2WX9S8/JzTvqrtlVDmdPUJlsIymRiDHt+zDAh4kv0aRUJQVWy\n" + "cCd1rWSl2BnCgrLi0Ez5k5EuOW7v3TIFWI6AXAa5kshG3Q5du/TtPLSyqJZu2UCx\n" + "3LprECmh9aJmE8KZL3R5ClSqkzVjuOUj56y63vSiV1B7kTbTnT7G+sJwwgxvyA==\n" + "-----END RSA PRIVATE KEY-----\n"; + +static constexpr char VALID_CERT2[] = + "-----BEGIN CERTIFICATE-----\n" + "MIIF+jCCA+KgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgZcxCzAJBgNVBAYTAlVT\n" + "MRMwEQYDVQQIDApDYWxpZm9ybmlhMRUwEwYDVQQKDAxTYW5kc3Rvcm0uaW8xGzAZ\n" + "BgNVBAsMElRlc3RpbmcgRGVwYXJ0bWVudDEbMBkGA1UEAwwSaW50LWNhLmV4YW1w\n" + "bGUuY29tMSIwIAYJKoZIhvcNAQkBFhNnYXJwbHlAc2FuZHN0b3JtLmlvMCAXDTIw\n" + "MDYyNzAwNDI1M1oYDzIxMjAwNjI3MDA0MjUzWjCBkDELMAkGA1UEBhMCVVMxEzAR\n" + "BgNVBAgMCkNhbGlmb3JuaWExFTATBgNVBAoMDFNhbmRzdG9ybS5pbzEbMBkGA1UE\n" + "CwwSVGVzdGluZyBEZXBhcnRtZW50MRQwEgYDVQQDDAtleGFtcGxlLm5ldDEiMCAG\n" + "CSqGSIb3DQEJARYTZ2FycGx5QHNhbmRzdG9ybS5pbzCCAiIwDQYJKoZIhvcNAQEB\n" + "BQADggIPADCCAgoCggIBALaVC2wVXHj+VEz/AKv6meM7sLNJoH0Ws5RRQ859EqcS\n" + "kYH1ETqwwfBDHH3y/iMVmHc7M2J5u0eKfLnUB7anV9ZviaVjyHBMjhrihIxBhlQW\n" + "37XohrwmWh5poUhhbgbkUJ7JjjDdrF+AEz4CEnlKSs92HBMOc2xlfV73kAQogmWC\n" + "ThGNkas7s38Jju+jX/9KULRWjer4xzhjqeXusRFlDTK7pYsPN/wWJrThNAh/UJi0\n" + "ouXf7DjLKaV1XbsFLIR79oMKHFxYpOBVIQzCo386e9br0S6gJ6bH+rubkoV8KoGp\n" + "qIUHLYfQ9MeSjKE8JX+y51ednSeBadJ6eBWJDYSScajF4ybs1uB6h8rf883pYV2r\n" + "ysj8n1xppER80CwGjswzft6lwbwK7m2GqbsvwhdZ/w4fTr3BifLytsLWoRooNlq8\n" + "IeZNHmRIkH5BbVyoVZsTYHBBK+nKCDwD3vPNhdB61oDOYf4bkudDAuTlso5wMkme\n" + "y/0I9O4w3zXIo9cmo5r+GobC/BSOIWpfJNw0l3tzA5wMJgcSoPm/ylFjYrnHSJ2V\n" + "Gde3Oe+uWkKHiD2RVQaAEK4BRSzxN9cAu0sKzLijIctlCm4F43GeTDJG0tlgUmqg\n" + "RS0srMx2NHBTwkYeiu3e7w70hu5WU9epTRX2oG73DtbrrEAQ9toBrzThds2Xt++x\n" + "AgMBAAGjUzBRMB0GA1UdDgQWBBSCVMitc7axjQ0JObyQ7SoZ15v41jAfBgNVHSME\n" + "GDAWgBTZxw81mTQuaT+p41J39QTF7kCp5zAPBgNVHRMBAf8EBTADAQH/MA0GCSqG\n" + "SIb3DQEBCwUAA4ICAQAGqI+GGbSHkV9C16OLKgujS17zAJDuMeUZVoUvsh0oj7hK\n" + "QwuJ6M6VIWZXk0Ccs/TbtQgyUtt98HY/M5LYjvuB3jb348TvYvBg1un6DC1LNFnw\n" + "x19eUvwxhoI0I9A/heD6251plaXl0rk+wmTn+gqHNswb0LZw7l8XclOQ8s13/Ei3\n" + "fD4P5N3LiXaPfcXzFtEvWJE1ONC/PvLfwWWE2T+/LabJ4I4iumX8oAJZyx9BCE09\n" + "54/0cV1V6xjp31/CS7vkYtDMeREnydwC3PsjjzO18nM0GVw6R2eok/yvD2Rg/pqJ\n" + "CiscKswcy0OR42pCzJyAwHaXV0KZEG9E97ukiqh3ByBUfR0ZwkKv7tDaL7UQiXdF\n" + "sheJ3l8TyQNcuWljjm1MWJt9ZzZt5zE4+yes4YVDNNe9l2jIoT8641pcy2MmPOdJ\n" + "8pEE3xJ2SAdeJKVXuHoi7glzmlK1O5nSNK3GIfKRwJ2hmIXSAoMPfpwtJWdJDNGZ\n" + "N2HThXDMleMrJqwsdToRCp0nBm40cKSDk/o7SfiE7z4e1EVDAFBlWl5SAq9Pqwh5\n" + "lBlsQXd5SbzWGyVk7BjtT3ttbXru9NEINo1l9Cw74GQuW40FsQf4drZVDVtaNWPd\n" + "IvZM211bcU/zZV44rkz3nc08jSGo2qP8bEcuYAlTneDLyrAmpisUXKYm1Q1SxA==\n" "-----END CERTIFICATE-----\n"; static constexpr char EXPIRED_CERT[] = "-----BEGIN CERTIFICATE-----\n" - "MIIF9TCCA92gAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgZcxCzAJBgNVBAYTAlVT\n" + "MIIF+DCCA+CgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgZcxCzAJBgNVBAYTAlVT\n" "MRMwEQYDVQQIDApDYWxpZm9ybmlhMRUwEwYDVQQKDAxTYW5kc3Rvcm0uaW8xGzAZ\n" "BgNVBAsMElRlc3RpbmcgRGVwYXJ0bWVudDEbMBkGA1UEAwwSaW50LWNhLmV4YW1w\n" "bGUuY29tMSIwIAYJKoZIhvcNAQkBFhNnYXJwbHlAc2FuZHN0b3JtLmlvMB4XDTE2\n" @@ -208,67 +312,68 @@ static constexpr char EXPIRED_CERT[] = "VQQIDApDYWxpZm9ybmlhMRUwEwYDVQQKDAxTYW5kc3Rvcm0uaW8xGzAZBgNVBAsM\n" "ElRlc3RpbmcgRGVwYXJ0bWVudDEUMBIGA1UEAwwLZXhhbXBsZS5jb20xIjAgBgkq\n" "hkiG9w0BCQEWE2dhcnBseUBzYW5kc3Rvcm0uaW8wggIiMA0GCSqGSIb3DQEBAQUA\n" - "A4ICDwAwggIKAoICAQDbB0rarQ4r/oc93rpw7k63GP9tO7IAhG3Tm3KQDby1c40n\n" - "OuwfWnpjKmv8k/AZh5IE6HJNQ0pBDjphJ7L7M5M7dUfbX+ul14QCYJWDL9yO0lqj\n" - "JgBW5J/r7k1wemH+g9f3BGpUBvcGOmNAHIuvgS8p2ErZL6vQAlw3Vt49YlIilC6b\n" - "QSCbHPXPY5ZBFfT7fJXy1lC2TWIkq+Y8jUk0UipCyBAzFCAFBLKxGB8WrDIqAOAQ\n" - "ehRioS9TbNl45DWifNE0vF/eax5nP0kaxpNqpiOzi+w5IphBPlRxr63urTtnZsV/\n" - "3uQpMl1VXg21fLJuFZfG+bWn4xKhJKlODjug3gBUYfvk+6yaOdqCNaU7utMBmHXt\n" - "CIA5FcaAaVhGpUJgo8x1qF6ESPty6k2fnmbEfhhqiYWvCM9rJdr/VOxfM/XtvlpN\n" - "Iw3J7Sro3A4whQY4DKfzBMRRJIMMSsLieKFEfEZveQVOirV2oIj1Jz0vUdjntKny\n" - "2krkJAPUbemnOHBnb9efziKlCuKiCxXkAtvG6QLtYZmNOAFqGwxkGoda1Yi45SMA\n" - "qv9f+Omg2AGmRPpUuqnH0vtOPXOO8XAuwTRgBCrfuXHZJqb/4JLMDcEcOzHoY1Km\n" - "1Eopi89IhgMXElS9/8oeDyYj+CeprQSAcm9M8E5LuCjUbg6E/0hSCNaihcyE8wID\n" - "AQABo1AwTjAdBgNVHQ4EFgQU9qkkgRglBGH6IabIh5avchAowKgwHwYDVR0jBBgw\n" - "FoAUy+lZS/vPB6K8WXgDpnKRg572GiAwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B\n" - "AQsFAAOCAgEAtIjwUBNVrc62XK36bHHFLawYJaJUNj4zwdBUmVc5BL0tPHfDIwhP\n" - "GBbxZCrAbPlHUkWig6l52S2lG0Aq2rtBwZPrjSJMio4ln/3y9qZjdqCjKtiQy7lc\n" - "jQu9wXzlcTomARt2NcgTMedOrC4+eXMExUZ+n7xHKsj9u6sYeH0LXtMfg0Xzloc7\n" - "ojF+ISM3bZbZQzGNkG+Fz1OCGgJ6W/wHxAadIHMmOMl1YN9ZRbO7T93AlMSGpehi\n" - "LYh/n8B769yyxlfMWIM/aGECZdLyE5NUeN2r0jfjSkgp9l9dXr5fJL7bA54YcQC4\n" - "dWjuMf8tpPpI1580fU5xD0ta6NGeLkQ5lEUexIaH0vSmUDs0N9HpTooGFcONl75R\n" - "sEaB7/xH/ZGjtRXTc/QAPyfzFvbQNgRSoiif7DHRt7Wv13fKt/xL9uZlTGZUfojt\n" - "eIFe82dNIffpoXkkHGFlpxxgnpwMb62N7BFqi2uJrSasrNEYZuEKTQMb1p76qojp\n" - "6zAUPaut+FHgM5zVaKkvdXvJEReHG7un2a/DfZelIa8VIWO4BGQJBhSaKbRiGXiu\n" - "2BbV3/qp9R0msirfyesBa/NV1syw2PYoouYukSdMfROK4r2FGPN7cw0AYrY3NbGG\n" - "5jFKu2Vr1krHysnmFyXhkoyVSy4dYjrrqa1ItZW9SF0f83IN54C/P7E=\n" + "A4ICDwAwggIKAoICAQDM7r8cYnjo8iiBkk/F9ITp4e6oTUm8Lx5pII4qmpt8awgc\n" + "9+Hx35MVq0N7gYXgHphhibGklQ3znBkL7oZMDvF4z1S5uyyLB8AIT+x7mUkDRJJt\n" + "ej1JeQTmwj8szs0Dr6Hc9OaguXB25YeHq3Adtx9wlS1I5Il6YItSpk6JJD4IZbZH\n" + "MQXTVIz946ak32C2ZV6MPvA7Ei1uNf8MMT8gq/rFDYbVFjESmCAKc1CNmPhHS6ug\n" + "3ou0zuGJB5u55ezcp9UgRwRX2hd9N/a0jgbc1Of0wlS9lA0gxRU62OXxSYsd+TdP\n" + "0dx4v9EzSy9Yg9Y2hQnEDqbi7iD0PR4QPZbRyEp7zq0XrlFFge881Bdl/60bvDK9\n" + "Eo949YiUTMgCBdbZbBcS/MW+9Gd9fWZh5YDsqHJFpVwKn0fSLlk/QvwTZGy9h37Z\n" + "AybOvGR4zTUowlJMC1SGrJ01ofl3OWnxRda+hMUsCHgXvjg6N07KMeqhNmLWiH6K\n" + "qxzeji1ZAXdjOmOFqs/eaeAaP5gOn9k3oXxB0VBT6I0UrHpoNGCcqb6uLT/JMMxD\n" + "cbXhtD0U2YtHAH5blQj07L3fUnHFxbsvGdxseADfbr75sPUJ0tfLE6+kcrjQHl/3\n" + "OyCb3zTF2MFwuvajScYCxn1Xv1rQE2qO8MDcOOZcEZNjElKkwZ+Kk/kps/aGXQID\n" + "AQABo1MwUTAdBgNVHQ4EFgQUXaT1S4zJyRbO5QrTFusd/Vp8Ld0wHwYDVR0jBBgw\n" + "FoAU2ccPNZk0Lmk/qeNSd/UExe5AqecwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG\n" + "9w0BAQsFAAOCAgEAYRQazCV707BpbBo3j2PXfg4rmrm1GIA0JXFsY27CII0aSgTw\n" + "roMBnwp3sJ+UIxqddkwf/4Bn/kq8yHu8WMc1cb4bzsqgU4K2zGJcVF3i9Y4R6oE1\n" + "9Y6QM1db5HYiCdXSNW5uZUQGButyIXsUfPns0jMZfmEhsW4WrN3m2qE357FeBfCF\n" + "nP4Ij3sbUq01OoPBL6sWUbltfL5PgqKitE6UFu1/WFpBatP+8ITOOLhkGmJ70zb1\n" + "rY3jnwlEaRJVw8DmIkkabrgKu2gUZ3qKX9aeW9iJW524A2ZjEg1xoBRq9MVcfiRN\n" + "qAERaEd7MgjIFmOYR5O2juzR3eMrv+U+JsY6K5ItTwwE/MHKjYRM22CgmQahPfvo\n" + "o40qLUn/zJNJAN+1hrmOqFXpC5vmfKV9pcG7BOsuZ9V9gkssnJRosCuU8iIW3gyo\n" + "C6TFLIneStvBzokoCTv0Fxxh/vqfIWmHYr7nsFM8S/X2iDLuCoiB6qsRw5NaOIg6\n" + "QB7cEi3sgBZU+eJDmynR3waIU0HGmj9DK8Tc2TMd5wvkBpJBkqqQkICVQ/u/g7up\n" + "swvT5Iap509sI4nmKqe9meN6m3xBSJrCNPTbjUyu7PuC/rBe7lVwnP5/PN/aj6ZU\n" + "XGyLwArQ/5GgT2sy3aEQQTtb+kthnZo7NL8nmkpoTbrm84DJ4dwkD4qc+hs=\n" "-----END CERTIFICATE-----\n"; static constexpr char SELF_SIGNED_CERT[] = "-----BEGIN CERTIFICATE-----\n" - "MIIGHzCCBAegAwIBAgIJAN7Q46+wlXJHMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD\n" - "VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRv\n" - "MRUwEwYDVQQKDAxTYW5kc3Rvcm0uaW8xGzAZBgNVBAsMElRlc3RpbmcgRGVwYXJ0\n" - "bWVudDEUMBIGA1UEAwwLZXhhbXBsZS5jb20xIjAgBgkqhkiG9w0BCQEWE2dhcnBs\n" - "eUBzYW5kc3Rvcm0uaW8wIBcNMTYwNTMxMDQyODQ3WhgPMjExNjA1MzEwNDI4NDda\n" - "MIGkMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJ\n" - "UGFsbyBBbHRvMRUwEwYDVQQKDAxTYW5kc3Rvcm0uaW8xGzAZBgNVBAsMElRlc3Rp\n" - "bmcgRGVwYXJ0bWVudDEUMBIGA1UEAwwLZXhhbXBsZS5jb20xIjAgBgkqhkiG9w0B\n" - "CQEWE2dhcnBseUBzYW5kc3Rvcm0uaW8wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw\n" - "ggIKAoICAQDbB0rarQ4r/oc93rpw7k63GP9tO7IAhG3Tm3KQDby1c40nOuwfWnpj\n" - "Kmv8k/AZh5IE6HJNQ0pBDjphJ7L7M5M7dUfbX+ul14QCYJWDL9yO0lqjJgBW5J/r\n" - "7k1wemH+g9f3BGpUBvcGOmNAHIuvgS8p2ErZL6vQAlw3Vt49YlIilC6bQSCbHPXP\n" - "Y5ZBFfT7fJXy1lC2TWIkq+Y8jUk0UipCyBAzFCAFBLKxGB8WrDIqAOAQehRioS9T\n" - "bNl45DWifNE0vF/eax5nP0kaxpNqpiOzi+w5IphBPlRxr63urTtnZsV/3uQpMl1V\n" - "Xg21fLJuFZfG+bWn4xKhJKlODjug3gBUYfvk+6yaOdqCNaU7utMBmHXtCIA5FcaA\n" - "aVhGpUJgo8x1qF6ESPty6k2fnmbEfhhqiYWvCM9rJdr/VOxfM/XtvlpNIw3J7Sro\n" - "3A4whQY4DKfzBMRRJIMMSsLieKFEfEZveQVOirV2oIj1Jz0vUdjntKny2krkJAPU\n" - "bemnOHBnb9efziKlCuKiCxXkAtvG6QLtYZmNOAFqGwxkGoda1Yi45SMAqv9f+Omg\n" - "2AGmRPpUuqnH0vtOPXOO8XAuwTRgBCrfuXHZJqb/4JLMDcEcOzHoY1Km1Eopi89I\n" - "hgMXElS9/8oeDyYj+CeprQSAcm9M8E5LuCjUbg6E/0hSCNaihcyE8wIDAQABo1Aw\n" - "TjAdBgNVHQ4EFgQU9qkkgRglBGH6IabIh5avchAowKgwHwYDVR0jBBgwFoAU9qkk\n" - "gRglBGH6IabIh5avchAowKgwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC\n" - "AgEA0dYsDTtt75fqQ35+ZHZGFCzJ4eUM4m/IdXfMlVw5Eb+2XA7pAVQgHhUD2qU0\n" - "6Xk5EqINr7X8TvvR3OGVzlAw7BD4ZVKXkNy82BwRoydSUN9GM+Q1yqHES8+dBH5D\n" - "jdrBq5XdtG3cQEjxxHPb5iWc5MLvVM0UtuuBtk3rp9IoY3+ReczGbm8CXep7jyIv\n" - "W0wTi8gjMPWqn/f4ikkprMh0hXCwRTojfWi+Gl1sKyzviG+FF3hJj/fNf7IKYG8d\n" - "lO/Ay9wqY+j7oMIz6RUUOht31tLnTaSSDDknuHF+r0GJoNWjsncf/NseGDLBiTkZ\n" - "bBGMJhB3Pd6FNcLuREtE1WT5EwJr9WZeSh1mQPssQvwgYjdCI558sw1WEBdON73M\n" - "0/aZzc/gDx9G1zxUxzn85/pxpBeQgb+J8iaAz1Iy9Gn64A6rVbQWAb5/xZ4y1LSe\n" - "xTFVcK9spSf3k2FX24DPOov3oLu7vGmgye76bD4I0WtcVFFK5vXsfXGHq+P8EXv5\n" - "F2KmlitfAh6N+uSeWzROrwU7roYsg81epvXVTYR/MyXNEB46iRMNdOqWThpdteEk\n" - "qts6SreMmr0+aX7oqJ52Ohtw437JvxMqd5PNHuU3qQQS2o7cJzlZ+dDQMoZdxNaV\n" - "bWPnPRk8plkBJLuQZA7KcTGA3b6qHl0hMTDJUE8bscNmMs4=\n" + "MIIGLTCCBBWgAwIBAgIUIGB2OqfFvs22f6uTwFJwWKaLH+kwDQYJKoZIhvcNAQEL\n" + "BQAwgaQxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQH\n" + "DAlQYWxvIEFsdG8xFTATBgNVBAoMDFNhbmRzdG9ybS5pbzEbMBkGA1UECwwSVGVz\n" + "dGluZyBEZXBhcnRtZW50MRQwEgYDVQQDDAtleGFtcGxlLmNvbTEiMCAGCSqGSIb3\n" + "DQEJARYTZ2FycGx5QHNhbmRzdG9ybS5pbzAgFw0yMDA2MjcwMDQyNTNaGA8yMTIw\n" + "MDYyNzAwNDI1M1owgaQxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlh\n" + "MRIwEAYDVQQHDAlQYWxvIEFsdG8xFTATBgNVBAoMDFNhbmRzdG9ybS5pbzEbMBkG\n" + "A1UECwwSVGVzdGluZyBEZXBhcnRtZW50MRQwEgYDVQQDDAtleGFtcGxlLmNvbTEi\n" + "MCAGCSqGSIb3DQEJARYTZ2FycGx5QHNhbmRzdG9ybS5pbzCCAiIwDQYJKoZIhvcN\n" + "AQEBBQADggIPADCCAgoCggIBAMzuvxxieOjyKIGST8X0hOnh7qhNSbwvHmkgjiqa\n" + "m3xrCBz34fHfkxWrQ3uBheAemGGJsaSVDfOcGQvuhkwO8XjPVLm7LIsHwAhP7HuZ\n" + "SQNEkm16PUl5BObCPyzOzQOvodz05qC5cHblh4ercB23H3CVLUjkiXpgi1KmTokk\n" + "PghltkcxBdNUjP3jpqTfYLZlXow+8DsSLW41/wwxPyCr+sUNhtUWMRKYIApzUI2Y\n" + "+EdLq6Dei7TO4YkHm7nl7Nyn1SBHBFfaF3039rSOBtzU5/TCVL2UDSDFFTrY5fFJ\n" + "ix35N0/R3Hi/0TNLL1iD1jaFCcQOpuLuIPQ9HhA9ltHISnvOrReuUUWB7zzUF2X/\n" + "rRu8Mr0Sj3j1iJRMyAIF1tlsFxL8xb70Z319ZmHlgOyockWlXAqfR9IuWT9C/BNk\n" + "bL2HftkDJs68ZHjNNSjCUkwLVIasnTWh+Xc5afFF1r6ExSwIeBe+ODo3Tsox6qE2\n" + "YtaIfoqrHN6OLVkBd2M6Y4Wqz95p4Bo/mA6f2TehfEHRUFPojRSsemg0YJypvq4t\n" + "P8kwzENxteG0PRTZi0cAfluVCPTsvd9SccXFuy8Z3Gx4AN9uvvmw9QnS18sTr6Ry\n" + "uNAeX/c7IJvfNMXYwXC69qNJxgLGfVe/WtATao7wwNw45lwRk2MSUqTBn4qT+Smz\n" + "9oZdAgMBAAGjUzBRMB0GA1UdDgQWBBRdpPVLjMnJFs7lCtMW6x39Wnwt3TAfBgNV\n" + "HSMEGDAWgBRdpPVLjMnJFs7lCtMW6x39Wnwt3TAPBgNVHRMBAf8EBTADAQH/MA0G\n" + "CSqGSIb3DQEBCwUAA4ICAQDEEnq3/Yg0UL3d8z0EWXGnj5hbc8Cf9hj8J918aift\n" + "XhqQ+EU6BV1V7IsrUZ07Wkws7hgd7HpBgmSNQu9cIoSyjiS9je8KL9TqwmBaPNg3\n" + "jE31maqLHdLQfR0USYo7wp8cE7w/tojLwyhuwVEJR4IpVlfAgmD5HMhCX4vwZTUB\n" + "bkzsRtY56JRhNDO2ExY7QPFF4FhXLf8eZGqqk09FTpQemJFwZ2+MYlSOILrP4RL3\n" + "T9LW0EgymAjDUHT047xr5xPAjRUEplqT90bEsAp5D199m/c143tq3Cke/eQDWAR7\n" + "HVYmRmuOhwyqhkKfssZZvuq7Shm0u2vuOvfGhcW7JmukalrCixjitQmbOv1EJT0F\n" + "tQN61nRTUpnC37DEgtYpV8n+GgT1hWXDzC/0UNVOFIR26eX0kxZmqU6v+Rqz/qYe\n" + "NA2TXZ4YvL081QvPFOWVpodM6LLYw2cSGBCdfAdRE1ECoqzk5EgRBH5SrZnuebMG\n" + "V8aJsIRMI011QDEz69YJFzefI9WaawcHqfTWZoCeCBDNm0pEVqRbBAQ8E7IXfQUu\n" + "WjPbENMyTTp6+uRmQkmNJjv9HcvFUu+wyhFODBrZ4LEwFP+oWBGWqru28Se7b66H\n" + "ZvzKfQVXzYpEmhHHK2n5X1hNwSr0kb3QffVpbz3/TBIgQcOyBp/RnOY38pngfRw1\n" + "SQ==\n" "-----END CERTIFICATE-----\n"; // ======================================================================================= @@ -323,6 +428,30 @@ struct TlsTest { options.trustedCertificates = kj::arrayPtr(&caCert, 1); return options; } + + Promise writeToServer(AsyncIoStream& client) { + return client.write("foo", 4); + } + + Promise readFromClient(AsyncIoStream& server) { + auto buf = heapArray(4); + + auto readPromise = server.read(buf.begin(), buf.size()); + + auto checkBuffer = [buf = kj::mv(buf)]() { + KJ_ASSERT(kj::StringPtr(buf.begin(), buf.end()-1) == kj::StringPtr("foo")); + }; + + return readPromise.then(kj::mv(checkBuffer)); + } + + void testConnection(AsyncIoStream& client, AsyncIoStream& server) { + auto writePromise = writeToServer(client); + auto readPromise = readFromClient(server); + + writePromise.wait(io.waitScope); + readPromise.wait(io.waitScope); + }; }; KJ_TEST("TLS basics") { @@ -337,12 +466,47 @@ KJ_TEST("TLS basics") { auto client = clientPromise.wait(test.io.waitScope); auto server = serverPromise.wait(test.io.waitScope); - auto writePromise = client->write("foo", 3); - char buf[4]; - server->read(&buf, 3).wait(test.io.waitScope); - buf[3] = '\0'; + test.testConnection(*client, *server); +} - KJ_ASSERT(kj::StringPtr(buf) == "foo"); +KJ_TEST("TLS peer identity") { + TlsTest test; + ErrorNexus e; + + auto pipe = test.io.provider->newTwoWayPipe(); + + auto innerClientId = kj::LocalPeerIdentity::newInstance({}); + auto& innerClientIdRef = *innerClientId; + auto clientPromise = e.wrap(test.tlsClient.wrapClient( + kj::AuthenticatedStream { kj::mv(pipe.ends[0]), kj::mv(innerClientId) }, + "example.com")); + + auto innerServerId = kj::LocalPeerIdentity::newInstance({}); + auto& innerServerIdRef = *innerServerId; + auto serverPromise = e.wrap(test.tlsServer.wrapServer( + kj::AuthenticatedStream { kj::mv(pipe.ends[1]), kj::mv(innerServerId) })); + + auto client = clientPromise.wait(test.io.waitScope); + auto server = serverPromise.wait(test.io.waitScope); + + { + auto id = client.peerIdentity.downcast(); + KJ_ASSERT(id->hasCertificate()); + KJ_EXPECT(id->getCommonName() == "example.com"); + KJ_EXPECT(&id->getNetworkIdentity() == &innerClientIdRef); + KJ_EXPECT(id->toString() == "example.com"); + } + + { + auto id = server.peerIdentity.downcast(); + KJ_EXPECT(!id->hasCertificate()); + KJ_EXPECT_THROW_RECOVERABLE_MESSAGE( + "client did not provide a certificate", id->getCommonName()); + KJ_EXPECT(&id->getNetworkIdentity() == &innerServerIdRef); + KJ_EXPECT(id->toString() == "(anonymous client)"); + } + + test.testConnection(*client.stream, *server.stream); } KJ_TEST("TLS multiple messages") { @@ -385,6 +549,32 @@ KJ_TEST("TLS multiple messages") { KJ_ASSERT(kj::StringPtr(buf) == "qux"); } +KJ_TEST("TLS zero-sized write") { + TlsTest test; + ErrorNexus e; + + auto pipe = test.io.provider->newTwoWayPipe(); + + auto clientPromise = e.wrap(test.tlsClient.wrapClient(kj::mv(pipe.ends[0]), "example.com")); + auto serverPromise = e.wrap(test.tlsServer.wrapServer(kj::mv(pipe.ends[1]))); + + auto client = clientPromise.wait(test.io.waitScope); + auto server = serverPromise.wait(test.io.waitScope); + + char buf[7]; + auto readPromise = server->read(&buf, 6); + + client->write("", 0).wait(test.io.waitScope); + client->write("foo", 3).wait(test.io.waitScope); + client->write("", 0).wait(test.io.waitScope); + client->write("bar", 3).wait(test.io.waitScope); + + readPromise.wait(test.io.waitScope); + buf[6] = '\0'; + + KJ_ASSERT(kj::StringPtr(buf) == "foobar"); +} + kj::Promise writeN(kj::AsyncIoStream& stream, kj::StringPtr text, size_t count) { if (count == 0) return kj::READY_NOW; --count; @@ -411,6 +601,16 @@ KJ_TEST("TLS full duplex") { auto pipe = test.io.provider->newTwoWayPipe(); +#if _WIN32 + // On Windows we observe that `writeUp`, below, completes before the other end has started + // reading, failing the `!writeUp.poll()` expectation. I guess Windows has big buffers. We can + // fix this by requesting small buffers here. (Worth keeping in mind that Windows doesn't have + // socketpairs, so `newTwoWayPipe()` is implemented in terms of loopback TCP, ugh.) + uint small = 256; + pipe.ends[0]->setsockopt(SOL_SOCKET, SO_SNDBUF, &small, sizeof(small)); + pipe.ends[0]->setsockopt(SOL_SOCKET, SO_RCVBUF, &small, sizeof(small)); +#endif + auto clientPromise = e.wrap(test.tlsClient.wrapClient(kj::mv(pipe.ends[0]), "example.com")); auto serverPromise = e.wrap(test.tlsServer.wrapServer(kj::mv(pipe.ends[1]))); @@ -462,12 +662,7 @@ KJ_TEST("TLS SNI") { auto client = clientPromise.wait(test.io.waitScope); auto server = serverPromise.wait(test.io.waitScope); - auto writePromise = client->write("foo", 3); - char buf[4]; - server->read(&buf, 3).wait(test.io.waitScope); - buf[3] = '\0'; - - KJ_ASSERT(kj::StringPtr(buf) == "foo"); + test.testConnection(*client, *server); KJ_ASSERT(callback.callCount == 1); } @@ -529,17 +724,19 @@ KJ_TEST("TLS client certificate verification") { SSL_MESSAGE("peer did not return a certificate", "PEER_DID_NOT_RETURN_A_CERTIFICATE"), serverPromise.wait(test.io.waitScope)); +#if !KJ_NO_EXCEPTIONS // if exceptions are disabled, we're now in a bad state because + // KJ_EXPECT_THROW_MESSAGE() runs in a forked child process. KJ_EXPECT_THROW_MESSAGE( SSL_MESSAGE("alert", // "alert handshake failure" or "alert certificate required" - "SSLV3_ALERT_HANDSHAKE_FAILURE"), + "CERTIFICATE_REQUIRED"), clientPromise.wait(test.io.waitScope)); +#endif } // Self-signed certificate loaded in the client: fail + TlsKeypair selfSignedKeypair = { TlsPrivateKey(HOST_KEY), TlsCertificate(SELF_SIGNED_CERT) }; + clientOptions.defaultKeypair = selfSignedKeypair; { - TlsKeypair selfSignedKeypair = { TlsPrivateKey(HOST_KEY), TlsCertificate(SELF_SIGNED_CERT) }; - clientOptions.defaultKeypair = selfSignedKeypair; - TlsTest test(clientOptions, serverOptions); auto pipe = test.io.provider->newTwoWayPipe(); @@ -555,34 +752,348 @@ KJ_TEST("TLS client certificate verification") { SSL_MESSAGE("certificate verify failed", "CERTIFICATE_VERIFY_FAILED"), serverPromise.wait(test.io.waitScope)); +#if !KJ_NO_EXCEPTIONS // if exceptions are disabled, we're now in a bad state because + // KJ_EXPECT_THROW_MESSAGE() runs in a forked child process. KJ_EXPECT_THROW_MESSAGE( SSL_MESSAGE("alert unknown ca", "TLSV1_ALERT_UNKNOWN_CA"), clientPromise.wait(test.io.waitScope)); +#endif } // Trusted certificate loaded in the client: success. + TlsKeypair altKeypair = { + TlsPrivateKey(HOST_KEY2), + TlsCertificate(kj::str(VALID_CERT2, INTERMEDIATE_CERT)) + }; + clientOptions.defaultKeypair = altKeypair; { - clientOptions.defaultKeypair = serverOptions.defaultKeypair; + TlsTest test(clientOptions, serverOptions); + ErrorNexus e; + + auto pipe = test.io.provider->newTwoWayPipe(); + + auto clientPromise = e.wrap(test.tlsClient.wrapClient(kj::mv(pipe.ends[0]), "example.com")); + auto serverPromise = e.wrap(test.tlsServer.wrapServer( + kj::AuthenticatedStream { kj::mv(pipe.ends[1]), kj::UnknownPeerIdentity::newInstance() })); + + auto client = clientPromise.wait(test.io.waitScope); + auto server = serverPromise.wait(test.io.waitScope); + + auto id = server.peerIdentity.downcast(); + KJ_ASSERT(id->hasCertificate()); + KJ_EXPECT(id->getCommonName() == "example.net"); + + test.testConnection(*client, *server.stream); + } + // If verifyClients is off, client certificate is ignored, even if trusted. + serverOptions.verifyClients = false; + { + TlsTest test(clientOptions, serverOptions); + ErrorNexus e; + + auto pipe = test.io.provider->newTwoWayPipe(); + + auto clientPromise = e.wrap(test.tlsClient.wrapClient(kj::mv(pipe.ends[0]), "example.com")); + auto serverPromise = e.wrap(test.tlsServer.wrapServer( + kj::AuthenticatedStream { kj::mv(pipe.ends[1]), kj::UnknownPeerIdentity::newInstance() })); + + auto client = clientPromise.wait(test.io.waitScope); + auto server = serverPromise.wait(test.io.waitScope); + + auto id = server.peerIdentity.downcast(); + KJ_EXPECT(!id->hasCertificate()); + } + + // Non-trusted keys are ignored too (not errors). + clientOptions.defaultKeypair = selfSignedKeypair; + { TlsTest test(clientOptions, serverOptions); ErrorNexus e; auto pipe = test.io.provider->newTwoWayPipe(); auto clientPromise = e.wrap(test.tlsClient.wrapClient(kj::mv(pipe.ends[0]), "example.com")); - auto serverPromise = e.wrap(test.tlsServer.wrapServer(kj::mv(pipe.ends[1]))); + auto serverPromise = e.wrap(test.tlsServer.wrapServer( + kj::AuthenticatedStream { kj::mv(pipe.ends[1]), kj::UnknownPeerIdentity::newInstance() })); auto client = clientPromise.wait(test.io.waitScope); auto server = serverPromise.wait(test.io.waitScope); - auto writePromise = client->write("foo", 3); - char buf[4]; - server->read(&buf, 3).wait(test.io.waitScope); - buf[3] = '\0'; + auto id = server.peerIdentity.downcast(); + KJ_EXPECT(!id->hasCertificate()); + } +} + +class MockConnectionReceiver final: public ConnectionReceiver { + // This connection receiver allows mocked async connection establishment without the network. + + struct ClientRequest { + Maybe maybeException; + Own>> clientFulfiller; + }; + +public: + MockConnectionReceiver(AsyncIoProvider& provider): provider(provider) {} + + Promise> accept() override { + return acceptImpl(); + } + + Promise acceptAuthenticated() override { + return acceptImpl().then([](auto stream) -> AuthenticatedStream { + return { kj::mv(stream), LocalPeerIdentity::newInstance({}) }; + }); + } + + uint getPort() override { + return 0; + } + void getsockopt(int, int, void*, uint*) override {} + void setsockopt(int, int, const void*, uint) override {} + + Promise> connect() { + // Mock a new successful connection to our receiver. + return connectImpl(); + } + + Promise badConnect() { + // Mock a new failed connection to our receiver. + return connectImpl(KJ_EXCEPTION(DISCONNECTED, "Pipes are leaky")).ignoreResult(); + } + +private: + Promise> acceptImpl() { + if (clientRequests.empty()) { + KJ_ASSERT(!serverFulfiller); + auto paf = newPromiseAndFulfiller(); + serverFulfiller = kj::mv(paf.fulfiller); + return paf.promise.then([this] { + return acceptImpl(); + }); + } + + // This is accepting in FILO order, it shouldn't matter in practice. + auto request = kj::mv(clientRequests.back()); + clientRequests.removeLast(); + + KJ_IF_MAYBE(exception, kj::mv(request.maybeException)) { + request.clientFulfiller = nullptr; // The other end had an issue, break the promise. + return kj::mv(*exception); + } else { + auto pipe = provider.newTwoWayPipe(); + request.clientFulfiller->fulfill(kj::mv(pipe.ends[0])); + return kj::mv(pipe.ends[1]); + } + } + + Promise> connectImpl(Maybe maybeException = nullptr) { + auto paf = newPromiseAndFulfiller>(); + clientRequests.add(ClientRequest{ kj::mv(maybeException), kj::mv(paf.fulfiller) }); + + if (auto fulfiller = kj::mv(serverFulfiller)) { + fulfiller->fulfill(); + } + + return kj::mv(paf.promise); + } + + AsyncIoProvider& provider; + + Own> serverFulfiller; + Vector clientRequests; +}; + +class TlsReceiverTest final: public TlsTest { + // TlsReceiverTest augments TlsTest to test TlsConnectionReceiver. +public: + TlsReceiverTest(): TlsTest() { + auto baseReceiverPtr = kj::heap(*io.provider); + baseReceiver = baseReceiverPtr.get(); + receiver = tlsServer.wrapPort(kj::mv(baseReceiverPtr)); + } + + TlsReceiverTest(TlsReceiverTest&&) = delete; + TlsReceiverTest(const TlsReceiverTest&) = delete; + TlsReceiverTest& operator=(TlsReceiverTest&&) = delete; + TlsReceiverTest& operator=(const TlsReceiverTest&) = delete; + + Own receiver; + MockConnectionReceiver* baseReceiver; +}; + +KJ_TEST("TLS receiver basics") { + TlsReceiverTest test; - KJ_ASSERT(kj::StringPtr(buf) == "foo"); + auto clientPromise = test.baseReceiver->connect().then([&](auto stream) { + return test.tlsClient.wrapClient(kj::mv(stream), "example.com"); + }); + auto serverPromise = test.receiver->accept(); + + auto client = clientPromise.wait(test.io.waitScope); + auto server = serverPromise.wait(test.io.waitScope); + + test.testConnection(*client, *server); +} + +KJ_TEST("TLS receiver experiences pre-TLS error") { + TlsReceiverTest test; + + KJ_LOG(INFO, "Accepting before a bad connect"); + auto promise = test.receiver->accept(); + + KJ_LOG(INFO, "Disappointing our server"); + test.baseReceiver->badConnect(); + + // Can't use KJ_EXPECT_THROW_RECOVERABLE_MESSAGE because wait() that returns a value can't throw + // recoverable exceptions. Can't use KJ_EXPECT_THROW_MESSAGE because non-recoverable exceptions + // will fork() in -fno-exception which screws up our state. + promise.then([](auto) { + KJ_FAIL_EXPECT("expected exception"); + }, [](kj::Exception&& e) { + KJ_EXPECT(e.getDescription() == "Pipes are leaky"); + }).wait(test.io.waitScope); + + KJ_LOG(INFO, "Trying to load a promise after failure"); + test.receiver->accept().then([](auto) { + KJ_FAIL_EXPECT("expected exception"); + }, [](kj::Exception&& e) { + KJ_EXPECT(e.getDescription() == "Pipes are leaky"); + }).wait(test.io.waitScope); +} + +KJ_TEST("TLS receiver accepts multiple clients") { + TlsReceiverTest test; + + auto wrapClient = [&](auto stream) { + return test.tlsClient.wrapClient(kj::mv(stream), "example.com"); + }; + + auto writeToServer = [&](auto client) { + return test.writeToServer(*client).attach(kj::mv(client)); + }; + + auto readFromClient = [&](auto server) { + return test.readFromClient(*server).attach(kj::mv(server)); + }; + + KJ_LOG(INFO, "Requesting a bunch of client connects"); + constexpr auto kClientCount = 20; + auto clientPromises = Vector>(); + for (auto i = 0; i < kClientCount; ++i) { + auto clientPromise = test.baseReceiver->connect().then(wrapClient).then(writeToServer); + clientPromises.add(kj::mv(clientPromise)); + } + + KJ_LOG(INFO, "Requesting and resolving a bunch of server accepts in sequence"); + for (auto i = 0; i < kClientCount; ++i) { + // Resolve each receive in sequence like the Supervisor/Network. + test.receiver->accept().then(readFromClient).wait(test.io.waitScope); + } + + KJ_LOG(INFO, "Resolving all of our client connects in parallel"); + joinPromises(clientPromises.releaseAsArray()).wait(test.io.waitScope); + + KJ_LOG(INFO, "Requesting one last server accept that we'll never resolve"); + auto extraAcceptPromise = test.receiver->accept().then(readFromClient); + KJ_EXPECT(!extraAcceptPromise.poll(test.io.waitScope)); +} + +KJ_TEST("TLS receiver does not stall on client that disconnects before ssl handshake") { + TlsReceiverTest test; + + auto wrapClient = [&](auto stream) { + return test.tlsClient.wrapClient(kj::mv(stream), "example.com"); + }; + + auto writeToServer = [&](auto client) { + return test.writeToServer(*client).attach(kj::mv(client)); + }; + + auto readFromClient = [&](auto server) { + return test.readFromClient(*server).attach(kj::mv(server)); + }; + + constexpr auto kClientCount = 20; + auto clientPromises = Vector>(); + + KJ_LOG(INFO, "Requesting the first batch of client connects in parallel"); + for (auto i = 0; i < kClientCount / 2; ++i) { + auto clientPromise = test.baseReceiver->connect().then(wrapClient).then(writeToServer); + clientPromises.add(kj::mv(clientPromise)); + } + + KJ_LOG(INFO, "Requesting and resolving a client connect that hangs up before ssl connect"); + KJ_ASSERT(test.baseReceiver->connect().wait(test.io.waitScope)); + + KJ_LOG(INFO, "Requesting the second batch of client connects in parallel"); + for (auto i = 0; i < kClientCount / 2; ++i) { + auto clientPromise = test.baseReceiver->connect().then(wrapClient).then(writeToServer); + clientPromises.add(kj::mv(clientPromise)); + } + + KJ_LOG(INFO, "Requesting and resolving a bunch of server accepts in sequence"); + for (auto i = 0; i < kClientCount; ++i) { + test.receiver->accept().then(readFromClient).wait(test.io.waitScope); + } + + KJ_LOG(INFO, "Resolving all of our client connects in parallel"); + joinPromises(clientPromises.releaseAsArray()).wait(test.io.waitScope); + + KJ_LOG(INFO, "Requesting one last server accept that we'll never resolve"); + auto extraAcceptPromise = test.receiver->accept().then(readFromClient); + KJ_EXPECT(!extraAcceptPromise.poll(test.io.waitScope)); +} + +KJ_TEST("TLS receiver does not stall on hung client") { + TlsReceiverTest test; + + auto wrapClient = [&](auto stream) { + return test.tlsClient.wrapClient(kj::mv(stream), "example.com"); + }; + + auto writeToServer = [&](auto client) { + return test.writeToServer(*client).attach(kj::mv(client)); + }; + + auto readFromClient = [&](auto server) { + return test.readFromClient(*server).attach(kj::mv(server)); + }; + + constexpr auto kClientCount = 20; + auto clientPromises = Vector>(); + + KJ_LOG(INFO, "Requesting the first batch of client connects in parallel"); + for (auto i = 0; i < kClientCount / 2; ++i) { + auto clientPromise = test.baseReceiver->connect().then(wrapClient).then(writeToServer); + clientPromises.add(kj::mv(clientPromise)); + } + + KJ_LOG(INFO, "Requesting and resolving a client connect that never does ssl connect"); + auto hungClient = test.baseReceiver->connect().wait(test.io.waitScope); + KJ_ASSERT(hungClient); + + KJ_LOG(INFO, "Requesting the second batch of client connects in parallel"); + for (auto i = 0; i < kClientCount / 2; ++i) { + auto clientPromise = test.baseReceiver->connect().then(wrapClient).then(writeToServer); + clientPromises.add(kj::mv(clientPromise)); + } + + KJ_LOG(INFO, "Requesting and resolving a bunch of server accepts in sequence"); + for (auto i = 0; i < kClientCount; ++i) { + test.receiver->accept().then(readFromClient).wait(test.io.waitScope); } + + KJ_LOG(INFO, "Resolving all of our client connects in parallel"); + joinPromises(clientPromises.releaseAsArray()).wait(test.io.waitScope); + + KJ_LOG(INFO, "Releasing the hung client"); + hungClient = {}; + + KJ_LOG(INFO, "Requesting one last server accept that we'll never resolve"); + auto extraAcceptPromise = test.receiver->accept().then(readFromClient); + KJ_EXPECT(!extraAcceptPromise.poll(test.io.waitScope)); } #ifdef KJ_EXTERNAL_TESTS diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/tls.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/tls.c++ index 4c7ca8848be..05bf5d53928 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/tls.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/tls.c++ @@ -22,16 +22,19 @@ #if KJ_HAS_OPENSSL #include "tls.h" + #include "readiness-io.h" + #include -#include +#include #include -#include -#include #include -#include #include #include +#include +#include + +#include #include #include @@ -42,11 +45,12 @@ #endif namespace kj { -namespace { // ======================================================================================= // misc helpers +namespace { + KJ_NORETURN(void throwOpensslError()); void throwOpensslError() { // Call when an OpenSSL function returns an error code to convert that into an exception and @@ -96,6 +100,8 @@ inline void ensureOpenSslInitialized() { } #endif +} // namespace + // ======================================================================================= // Implementation of kj::AsyncIoStream that applies TLS on top of some other AsyncIoStream. // @@ -157,11 +163,25 @@ public: } }); } + kj::Promise accept() { // We are the server. Set SSL options to prefer server's cipher choice. SSL_set_options(ssl, SSL_OP_CIPHER_SERVER_PREFERENCE); - return sslCall([this]() { return SSL_accept(ssl); }).ignoreResult(); + auto acceptPromise = sslCall([this]() { + return SSL_accept(ssl); + }); + return acceptPromise.then([](size_t ret) { + if (ret == 0) { + kj::throwRecoverableException( + KJ_EXCEPTION(DISCONNECTED, "Client disconnected during SSL_accept()")); + } + }); + } + + kj::Own getIdentity(kj::Own inner) { + return kj::heap(SSL_get_peer_certificate(ssl), kj::mv(inner), + kj::Badge()); } ~TlsConnection() noexcept(false) { @@ -177,13 +197,18 @@ public: } Promise write(ArrayPtr> pieces) override { - return writeInternal(pieces[0], pieces.slice(1, pieces.size())); + auto cork = writeBuffer.cork(); + return writeInternal(pieces[0], pieces.slice(1, pieces.size())).attach(kj::mv(cork)); + } + + Promise whenWriteDisconnected() override { + return inner.whenWriteDisconnected(); } void shutdownWrite() override { KJ_REQUIRE(shutdownTask == nullptr, "already called shutdownWrite()"); - // TODO(0.8): shutdownWrite() is problematic because it doesn't return a promise. It was + // TODO(0.10): shutdownWrite() is problematic because it doesn't return a promise. It was // designed to assume that it would only be called after all writes are finished and that // there was no reason to block at that point, but SSL sessions don't fit this since they // actually have to send a shutdown message. @@ -214,6 +239,10 @@ public: inner.getpeername(addr, length); } + kj::Maybe getFd() const override { + return inner.getFd(); + } + private: SSL* ssl; kj::AsyncIoStream& inner; @@ -244,6 +273,16 @@ private: kj::ArrayPtr> rest) { KJ_REQUIRE(shutdownTask == nullptr, "already called shutdownWrite()"); + // SSL_write() with a zero-sized input returns 0, but a 0 return is documented as indicating + // an error. So, we need to avoid zero-sized writes entirely. + while (first.size() == 0) { + if (rest.size() == 0) { + return kj::READY_NOW; + } + first = rest.front(); + rest = rest.slice(1, rest.size()); + } + return sslCall([this,first]() { return SSL_write(ssl, first.begin(), first.size()); }) .then([this,first,rest](size_t n) -> kj::Promise { if (n == 0) { @@ -262,7 +301,7 @@ private: kj::Promise sslCall(Func&& func) { if (disconnected) return size_t(0); - ssize_t result = func(); + auto result = func(); if (result > 0) { return result; @@ -288,7 +327,7 @@ private: // According to documentation we shouldn't get here, because our BIO never returns an // "error". But in practice we do get here sometimes when the peer disconnects // prematurely. - KJ_FAIL_ASSERT("TLS protocol error"); + return KJ_EXCEPTION(DISCONNECTED, "SSL unable to continue I/O"); } default: KJ_FAIL_ASSERT("unexpected SSL error code", error); @@ -378,17 +417,39 @@ private: // ======================================================================================= // Implementations of ConnectionReceiver, NetworkAddress, and Network as wrappers adding TLS. -class TlsConnectionReceiver final: public kj::ConnectionReceiver { +class TlsConnectionReceiver final: public ConnectionReceiver, public TaskSet::ErrorHandler { public: - TlsConnectionReceiver(TlsContext& tls, kj::Own inner) - : tls(tls), inner(kj::mv(inner)) {} + TlsConnectionReceiver(TlsContext &tls, Own inner) + : tls(tls), inner(kj::mv(inner)), + acceptLoopTask(acceptLoop().eagerlyEvaluate([this](Exception &&e) { + onAcceptFailure(kj::mv(e)); + })), + tasks(*this) {} + + void taskFailed(Exception&& e) override { + // TODO(someday): SSL connection failures may be a fact of normal operation but they may also + // be important diagnostic information. We should allow for an error handler to be passed in so + // that network issues that affect TLS can be more discoverable from the server side. + if (e.getType() != Exception::Type::DISCONNECTED) { + KJ_LOG(ERROR, "error accepting tls connection", kj::mv(e)); + } + }; Promise> accept() override { - return inner->accept().then([this](kj::Own stream) { - return tls.wrapServer(kj::mv(stream)); + return acceptAuthenticated().then([](AuthenticatedStream&& stream) { + return kj::mv(stream.stream); }); } + Promise acceptAuthenticated() override { + KJ_IF_MAYBE(e, maybeInnerException) { + // We've experienced an exception from the inner receiver, we consider this unrecoverable. + return Exception(*e); + } + + return queue.pop(); + } + uint getPort() override { return inner->getPort(); } @@ -402,8 +463,50 @@ public: } private: + void onAcceptSuccess(AuthenticatedStream&& stream) { + // Queue this stream to go through SSL_accept. + + auto acceptPromise = kj::evalNow([&] { + // Do the SSL acceptance procedure. + return tls.wrapServer(kj::mv(stream)); + }); + + auto sslPromise = acceptPromise.then([this](auto&& stream) -> Promise { + // This is only attached to the success path, thus the error handler will catch if our + // promise fails. + queue.push(kj::mv(stream)); + return kj::READY_NOW; + }); + tasks.add(kj::mv(sslPromise)); + } + + void onAcceptFailure(Exception&& e) { + // Store this exception to reject all future calls to accept() and reject any unfulfilled + // promises from the queue. + maybeInnerException = kj::mv(e); + queue.rejectAll(Exception(KJ_REQUIRE_NONNULL(maybeInnerException))); + } + + Promise acceptLoop() { + // Accept one connection and queue up the next accept on our TaskSet. + + return inner->acceptAuthenticated().then( + [this](AuthenticatedStream&& stream) { + onAcceptSuccess(kj::mv(stream)); + + // Queue up the next accept loop immediately without waiting for SSL_accept()/wrapServer(). + return acceptLoop(); + }); + } + TlsContext& tls; - kj::Own inner; + Own inner; + + Promise acceptLoopTask; + ProducerConsumerQueue queue; + TaskSet tasks; + + Maybe maybeInnerException; }; class TlsNetworkAddress final: public kj::NetworkAddress { @@ -423,6 +526,18 @@ public: })); } + Promise connectAuthenticated() override { + // Note: It's unfortunately pretty common for people to assume they can drop the NetworkAddress + // as soon as connect() returns, and this works with the native network implementation. + // So, we make some copies here. + auto& tlsRef = tls; + auto hostnameCopy = kj::str(hostname); + return inner->connectAuthenticated().then( + [&tlsRef, hostname = kj::mv(hostnameCopy)](kj::AuthenticatedStream stream) { + return tlsRef.wrapClient(kj::mv(stream), hostname); + }); + } + Own listen() override { return tls.wrapPort(inner->listen()); } @@ -481,16 +596,14 @@ private: kj::Own ownInner; }; -} // namespace - // ======================================================================================= // class TlsContext TlsContext::Options::Options() : useSystemTrustStore(true), verifyClients(false), - minVersion(TlsVersion::TLS_1_0), - cipherList("ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS") {} + minVersion(TlsVersion::TLS_1_2), + cipherList("ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305") {} // Cipher list is Mozilla's "intermediate" list, except with classic DH removed since we don't // currently support setting dhparams. See: // https://mozilla.github.io/server-side-tls/ssl-config-generator/ @@ -593,6 +706,12 @@ TlsContext::TlsContext(Options options) { SSL_CTX_set_tlsext_servername_arg(ctx, sni); } + KJ_IF_MAYBE(timeout, options.acceptTimeout) { + this->timer = KJ_REQUIRE_NONNULL(options.timer, + "acceptTimeout option requires that a timer is also provided"); + this->acceptTimeout = *timeout; + } + this->ctx = ctx; } @@ -656,12 +775,37 @@ kj::Promise> TlsContext::wrapClient( kj::Promise> TlsContext::wrapServer(kj::Own stream) { auto conn = kj::heap(kj::mv(stream), reinterpret_cast(ctx)); auto promise = conn->accept(); + KJ_IF_MAYBE(timeout, acceptTimeout) { + promise = KJ_REQUIRE_NONNULL(timer).timeoutAfter(*timeout, kj::mv(promise)); + } return promise.then(kj::mvCapture(conn, [](kj::Own conn) -> kj::Own { return kj::mv(conn); })); } +kj::Promise TlsContext::wrapClient( + kj::AuthenticatedStream stream, kj::StringPtr expectedServerHostname) { + auto conn = kj::heap(kj::mv(stream.stream), reinterpret_cast(ctx)); + auto promise = conn->connect(expectedServerHostname); + return promise.then([conn=kj::mv(conn),innerId=kj::mv(stream.peerIdentity)]() mutable { + auto id = conn->getIdentity(kj::mv(innerId)); + return kj::AuthenticatedStream { kj::mv(conn), kj::mv(id) }; + }); +} + +kj::Promise TlsContext::wrapServer(kj::AuthenticatedStream stream) { + auto conn = kj::heap(kj::mv(stream.stream), reinterpret_cast(ctx)); + auto promise = conn->accept(); + KJ_IF_MAYBE(timeout, acceptTimeout) { + promise = KJ_REQUIRE_NONNULL(timer).timeoutAfter(*timeout, kj::mv(promise)); + } + return promise.then([conn=kj::mv(conn),innerId=kj::mv(stream.peerIdentity)]() mutable { + auto id = conn->getIdentity(kj::mv(innerId)); + return kj::AuthenticatedStream { kj::mv(conn), kj::mv(id) }; + }); +} + kj::Own TlsContext::wrapPort(kj::Own port) { return kj::heap(*this, kj::mv(port)); } @@ -831,6 +975,45 @@ TlsCertificate::~TlsCertificate() noexcept(false) { } } +// ======================================================================================= +// class TlsPeerIdentity + +TlsPeerIdentity::~TlsPeerIdentity() noexcept(false) { + if (cert != nullptr) { + X509_free(reinterpret_cast(cert)); + } +} + +kj::String TlsPeerIdentity::toString() { + if (hasCertificate()) { + return getCommonName(); + } else { + return kj::str("(anonymous client)"); + } +} + +kj::String TlsPeerIdentity::getCommonName() { + if (cert == nullptr) { + KJ_FAIL_REQUIRE("client did not provide a certificate") { return nullptr; } + } + + X509_NAME* subj = X509_get_subject_name(reinterpret_cast(cert)); + + int index = X509_NAME_get_index_by_NID(subj, NID_commonName, -1); + KJ_ASSERT(index != -1, "certificate has no common name?"); + X509_NAME_ENTRY* entry = X509_NAME_get_entry(subj, index); + KJ_ASSERT(entry != nullptr); + ASN1_STRING* data = X509_NAME_ENTRY_get_data(entry); + KJ_ASSERT(data != nullptr); + + unsigned char* out = nullptr; + int len = ASN1_STRING_to_UTF8(&out, data); + KJ_ASSERT(len >= 0); + KJ_DEFER(OPENSSL_free(out)); + + return kj::heapString(reinterpret_cast(out), len); +} + } // namespace kj #endif // KJ_HAS_OPENSSL diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/tls.h b/libs/EXTERNAL/capnproto/c++/src/kj/compat/tls.h index d3ce505b6d8..b901e1fa781 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/tls.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/tls.h @@ -36,6 +36,7 @@ class TlsPrivateKey; class TlsCertificate; struct TlsKeypair; class TlsSniCallback; +class TlsConnection; enum class TlsVersion { SSL_3, // avoid; cryptographically broken @@ -90,6 +91,12 @@ class TlsContext { kj::Maybe sniCallback; // Callback that can be used to choose a different key/certificate based on the specific // hostname requested by the client. + + kj::Maybe timer; + // The timer used for `acceptTimeout` below. + + kj::Maybe acceptTimeout; + // Timeout applied to accepting a new TLS connection. `timer` is required if this is set. }; TlsContext(Options options = Options()); @@ -112,6 +119,12 @@ class TlsContext { // 2. The server's certificate is validated against this hostname. If validation fails, the // promise returned by wrapClient() will be broken; you'll never get a stream. + kj::Promise wrapServer(kj::AuthenticatedStream stream); + kj::Promise wrapClient( + kj::AuthenticatedStream stream, kj::StringPtr expectedServerHostname); + // Like wrapServer() and wrapClient(), but also produces information about the peer's + // certificate (if any). The returned `peerIdentity` will be a `TlsPeerIdentity`. + kj::Own wrapPort(kj::Own port); // Upgrade a ConnectionReceiver to one that automatically upgrades all accepted connections to // TLS (acting as the server). @@ -123,6 +136,8 @@ class TlsContext { private: void* ctx; // actually type SSL_CTX, but we don't want to #include the OpenSSL headers here + kj::Maybe timer; + kj::Maybe acceptTimeout; struct SniCallback; }; @@ -229,4 +244,42 @@ class TlsSniCallback { // TlsContext::Options::defaultKeypair. }; +class TlsPeerIdentity final: public kj::PeerIdentity { +public: + KJ_DISALLOW_COPY(TlsPeerIdentity); + ~TlsPeerIdentity() noexcept(false); + + kj::String toString() override; + + kj::PeerIdentity& getNetworkIdentity() { return *inner; } + // Gets the PeerIdentity of the underlying network connection. + + bool hasCertificate() { return cert != nullptr; } + // Did the peer even present a (trusted) certificate? Servers must always present certificates. + // Clients need only present certificates when the `verifyClients` option is enabled. + // + // Methods of this class that read details of the certificate will throw exceptions when no + // certificate was presented. We don't have them return `Maybe`s because most applications know + // in advance whether or not a certificate should be present, so it would lead to lots of + // `KJ_ASSERT_NONNULL`... + + kj::String getCommonName(); + // Get the authenticated common name from the certificate. + + bool matchesHostname(kj::StringPtr hostname); + // Check if the certificate authenticates the given hostname, considering wildcards and SAN + // extensions. If no certificate was provided, always returns false. + + // TODO(someday): Methods for other things. Match hostnames (i.e. evaluate wildcards and SAN)? + // Key fingerprint? Other certificate fields? + +private: + void* cert; // actually type X509*, but we don't want to #include the OpenSSL headers here. + kj::Own inner; + +public: // (not really public, only TlsConnection can call this) + TlsPeerIdentity(void* cert, kj::Own inner, kj::Badge) + : cert(cert), inner(kj::mv(inner)) {} +}; + } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/url-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/url-test.c++ index d5a2d05627b..d5a437a9c48 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/url-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/url-test.c++ @@ -39,6 +39,12 @@ Url parseAndCheck(kj::StringPtr originalText, kj::StringPtr expectedRestringifie static constexpr Url::Options NO_DECODE { false, // percentDecode + false, // allowEmpty +}; + +static constexpr Url::Options ALLOW_EMPTY { + true, // percentDecode + true, // allowEmpty }; KJ_TEST("parse / stringify URL") { @@ -281,6 +287,25 @@ KJ_TEST("parse / stringify URL") { parseAndCheck("https://capnproto.org/foo%25bar"); parseAndCheck("https://capnproto.org/?foo%25bar=baz%25qux"); parseAndCheck("https://capnproto.org/#foo%25bar"); + + // Make sure redundant /'s and &'s aren't collapsed when options.removeEmpty is false. + parseAndCheck("https://capnproto.org/foo//bar///test//?foo=bar&&baz=qux&", nullptr, ALLOW_EMPTY); + + // "." and ".." are still processed, though. + parseAndCheck("https://capnproto.org/foo//../bar/.", + "https://capnproto.org/foo/bar/", ALLOW_EMPTY); + + { + auto url = parseAndCheck("https://foo/", nullptr, ALLOW_EMPTY); + KJ_EXPECT(url.path.size() == 0); + KJ_EXPECT(url.hasTrailingSlash); + } + + { + auto url = parseAndCheck("https://foo/bar/", nullptr, ALLOW_EMPTY); + KJ_EXPECT(url.path.size() == 1); + KJ_EXPECT(url.hasTrailingSlash); + } } KJ_TEST("URL percent encoding") { diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/url.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/url.c++ index 72f7a9f8608..e7626761fcb 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/url.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/url.c++ @@ -233,7 +233,8 @@ Maybe Url::tryParse(StringPtr text, Context context, Options options) { result.path.removeLast(); } result.hasTrailingSlash = true; - } else if (part.size() == 0 || (part.size() == 1 && part[0] == '.')) { + } else if ((part.size() == 0 && (!options.allowEmpty || text.size() == 0)) || + (part.size() == 1 && part[0] == '.')) { // Collapse consecutive slashes and "/./". result.hasTrailingSlash = true; } else { @@ -247,7 +248,7 @@ Maybe Url::tryParse(StringPtr text, Context context, Options options) { text = text.slice(1); auto part = split(text, END_QUERY_PART); - if (part.size() > 0) { + if (part.size() > 0 || options.allowEmpty) { KJ_IF_MAYBE(key, trySplit(part, '=')) { result.query.add(QueryParam { percentDecodeQuery(*key, err, options), percentDecodeQuery(part, err, options) }); @@ -458,8 +459,8 @@ String Url::toString(Context context) const { for (auto& pathPart: path) { // Protect against path injection. - KJ_REQUIRE(pathPart != "" && pathPart != "." && pathPart != "..", - "invalid name in URL path", *this) { + KJ_REQUIRE((pathPart != "" || options.allowEmpty) && pathPart != "." && pathPart != "..", + "invalid name in URL path", path) { continue; } chars.add('/'); diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/compat/url.h b/libs/EXTERNAL/capnproto/c++/src/kj/compat/url.h index 5350fa84e63..2001adf4331 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/compat/url.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/compat/url.h @@ -38,11 +38,11 @@ struct UrlOptions { // True if URL components should be automatically percent-decoded during parsing, and // percent-encoded during serialization. -#if __cplusplus < 201402L - inline constexpr UrlOptions(bool percentDecode = true): percentDecode(percentDecode) {} - // TODO(cleanup): This constructor is only here to support brace initialization in C++11. It - // should be removed once we upgrade to C++14. -#endif + bool allowEmpty = false; + // Whether or not to allow empty path and query components when parsing; otherwise, they are + // silently removed. In other words, setting this false causes consecutive slashes in the path or + // consecutive ampersands in the query to be collapsed into one, whereas if true then they + // produce empty components. }; struct Url { @@ -103,16 +103,14 @@ struct Url { ~Url() noexcept(false); Url& operator=(Url&&) = default; -#if __cplusplus < 201402L inline Url(String&& scheme, Maybe&& userInfo, String&& host, Vector&& path, bool hasTrailingSlash, Vector&& query, Maybe&& fragment, UrlOptions options) : scheme(kj::mv(scheme)), userInfo(kj::mv(userInfo)), host(kj::mv(host)), path(kj::mv(path)), hasTrailingSlash(hasTrailingSlash), query(kj::mv(query)), fragment(kj::mv(fragment)), options(options) {} - // TODO(cleanup): This constructor is only here to support brace initialization in C++11. It - // should be removed once we upgrade to C++14. -#endif + // This constructor makes brace initialization work in C++11 and C++20 -- but is technically not + // needed in C++14 nor C++17. Go figure. Url clone() const; diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/debug-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/debug-test.c++ index 2639e69aea5..505ae3e785f 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/debug-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/debug-test.c++ @@ -40,7 +40,7 @@ #include #endif -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) #pragma warning(disable: 4996) // Warns that sprintf() is buffer-overrunny. Yeah, I know, it's cool. #endif @@ -75,6 +75,7 @@ public: // This is the child! close(pipeFds[0]); outputPipe = pipeFds[1]; + text.clear(); return true; } else { close(pipeFds[1]); @@ -174,9 +175,20 @@ public: this->text += "log message: "; text = str(file, ":", line, ":+", contextDepth, ": ", severity, ": ", mv(text)); this->text.append(text.begin(), text.end()); + this->text.append("\n"); } }; +#define EXPECT_LOG_EQ(f, expText) do { \ + std::string text; \ + { \ + MockExceptionCallback mockCallback; \ + f(); \ + text = kj::mv(mockCallback.text); \ + } \ + EXPECT_EQ(expText, text); \ +} while(0) + #if KJ_NO_EXCEPTIONS #define EXPECT_FATAL(code) if (mockCallback.forkForDeathTest()) { code; abort(); } #else @@ -197,47 +209,49 @@ std::string fileLine(std::string file, int line) { } TEST(Debug, Log) { - MockExceptionCallback mockCallback; int line; - KJ_LOG(WARNING, "Hello world!"); line = __LINE__; - EXPECT_EQ("log message: " + fileLine(__FILE__, line) + ":+0: warning: Hello world!\n", - mockCallback.text); - mockCallback.text.clear(); + EXPECT_LOG_EQ([&](){ + KJ_LOG(WARNING, "Hello world!"); line = __LINE__; + }, "log message: " + fileLine(__FILE__, line) + ":+0: warning: Hello world!\n"); int i = 123; const char* str = "foo"; - KJ_LOG(ERROR, i, str); line = __LINE__; - EXPECT_EQ("log message: " + fileLine(__FILE__, line) + ":+0: error: i = 123; str = foo\n", - mockCallback.text); - mockCallback.text.clear(); + EXPECT_LOG_EQ([&](){ + KJ_LOG(ERROR, i, str); line = __LINE__; + }, "log message: " + fileLine(__FILE__, line) + ":+0: error: i = 123; str = foo\n"); + + // kj::str() expressions are included literally. + EXPECT_LOG_EQ([&](){ + KJ_LOG(ERROR, kj::str(i, str), "x"); line = __LINE__; + }, "log message: " + fileLine(__FILE__, line) + ":+0: error: 123foo; x\n"); - KJ_DBG("Some debug text."); line = __LINE__; - EXPECT_EQ("log message: " + fileLine(__FILE__, line) + ":+0: debug: Some debug text.\n", - mockCallback.text); - mockCallback.text.clear(); + EXPECT_LOG_EQ([&](){ + KJ_DBG("Some debug text."); line = __LINE__; + }, "log message: " + fileLine(__FILE__, line) + ":+0: debug: Some debug text.\n"); // INFO logging is disabled by default. - KJ_LOG(INFO, "Info."); line = __LINE__; - EXPECT_EQ("", mockCallback.text); - mockCallback.text.clear(); + EXPECT_LOG_EQ([&](){ + KJ_LOG(INFO, "Info."); line = __LINE__; + }, ""); // Enable it. Debug::setLogLevel(Debug::Severity::INFO); - KJ_LOG(INFO, "Some text."); line = __LINE__; - EXPECT_EQ("log message: " + fileLine(__FILE__, line) + ":+0: info: Some text.\n", - mockCallback.text); - mockCallback.text.clear(); + EXPECT_LOG_EQ([&](){ + KJ_LOG(INFO, "Some text."); line = __LINE__; + }, "log message: " + fileLine(__FILE__, line) + ":+0: info: Some text.\n"); // Back to default. Debug::setLogLevel(Debug::Severity::WARNING); - KJ_ASSERT(1 == 1); - EXPECT_FATAL(KJ_ASSERT(1 == 2)); line = __LINE__; - EXPECT_EQ("fatal exception: " + fileLine(__FILE__, line) + ": failed: expected " - "1 == 2\n", mockCallback.text); - mockCallback.text.clear(); + EXPECT_LOG_EQ([&](){ + KJ_ASSERT(1 == 1); + }, ""); + + EXPECT_LOG_EQ([&](){ + EXPECT_FATAL(KJ_ASSERT(1 == 2)); line = __LINE__; + }, "fatal exception: " + fileLine(__FILE__, line) + ": failed: expected 1 == 2 [1 == 2]\n"); KJ_ASSERT(1 == 1) { ADD_FAILURE() << "Shouldn't call recovery code when check passes."; @@ -245,26 +259,31 @@ TEST(Debug, Log) { }; bool recovered = false; - KJ_ASSERT(1 == 2, "1 is not 2") { recovered = true; break; } line = __LINE__; - EXPECT_EQ("recoverable exception: " + fileLine(__FILE__, line) + ": failed: expected " - "1 == 2; 1 is not 2\n", mockCallback.text); + EXPECT_LOG_EQ([&](){ + KJ_ASSERT(1 == 2, "1 is not 2") { recovered = true; break; } line = __LINE__; + }, ( + "recoverable exception: " + fileLine(__FILE__, line) + ": " + "failed: expected 1 == 2 [1 == 2]; 1 is not 2\n" + )); EXPECT_TRUE(recovered); - mockCallback.text.clear(); - - EXPECT_FATAL(KJ_ASSERT(1 == 2, i, "hi", str)); line = __LINE__; - EXPECT_EQ("fatal exception: " + fileLine(__FILE__, line) + ": failed: expected " - "1 == 2; i = 123; hi; str = foo\n", mockCallback.text); - mockCallback.text.clear(); - - EXPECT_FATAL(KJ_REQUIRE(1 == 2, i, "hi", str)); line = __LINE__; - EXPECT_EQ("fatal exception: " + fileLine(__FILE__, line) + ": failed: expected " - "1 == 2; i = 123; hi; str = foo\n", mockCallback.text); - mockCallback.text.clear(); - - EXPECT_FATAL(KJ_FAIL_ASSERT("foo")); line = __LINE__; - EXPECT_EQ("fatal exception: " + fileLine(__FILE__, line) + ": failed: foo\n", - mockCallback.text); - mockCallback.text.clear(); + + EXPECT_LOG_EQ([&](){ + EXPECT_FATAL(KJ_ASSERT(1 == 2, i, "hi", str)); line = __LINE__; + }, ( + "fatal exception: " + fileLine(__FILE__, line) + ": " + "failed: expected 1 == 2 [1 == 2]; i = 123; hi; str = foo\n" + )); + + EXPECT_LOG_EQ([&](){ + EXPECT_FATAL(KJ_REQUIRE(1 == 2, i, "hi", str)); line = __LINE__; + }, ( + "fatal exception: " + fileLine(__FILE__, line) + ": " + "failed: expected 1 == 2 [1 == 2]; i = 123; hi; str = foo\n" + )); + + EXPECT_LOG_EQ([&](){ + EXPECT_FATAL(KJ_FAIL_ASSERT("foo")); line = __LINE__; + }, "fatal exception: " + fileLine(__FILE__, line) + ": failed: foo\n"); } TEST(Debug, Exception) { @@ -322,7 +341,7 @@ TEST(Debug, Catch) { // Catch as std::exception. try { line = __LINE__; KJ_FAIL_ASSERT("foo"); - ADD_FAILURE() << "Expected exception."; + KJ_KNOWN_UNREACHABLE(ADD_FAILURE() << "Expected exception."); } catch (const std::exception& e) { kj::StringPtr what = e.what(); std::string text; @@ -343,90 +362,174 @@ int mockSyscall(int i, int error = 0) { } TEST(Debug, Syscall) { - MockExceptionCallback mockCallback; int line; int i = 123; const char* str = "foo"; - KJ_SYSCALL(mockSyscall(0)); - KJ_SYSCALL(mockSyscall(1)); - - EXPECT_FATAL(KJ_SYSCALL(mockSyscall(-1, EBADF), i, "bar", str)); line = __LINE__; - EXPECT_EQ("fatal exception: " + fileLine(__FILE__, line) + - ": failed: mockSyscall(-1, EBADF): " + strerror(EBADF) + - "; i = 123; bar; str = foo\n", mockCallback.text); - mockCallback.text.clear(); - - EXPECT_FATAL(KJ_SYSCALL(mockSyscall(-1, ECONNRESET), i, "bar", str)); line = __LINE__; - EXPECT_EQ("fatal exception: " + fileLine(__FILE__, line) + - ": disconnected: mockSyscall(-1, ECONNRESET): " + strerror(ECONNRESET) + - "; i = 123; bar; str = foo\n", mockCallback.text); - mockCallback.text.clear(); - - EXPECT_FATAL(KJ_SYSCALL(mockSyscall(-1, ENOMEM), i, "bar", str)); line = __LINE__; - EXPECT_EQ("fatal exception: " + fileLine(__FILE__, line) + - ": overloaded: mockSyscall(-1, ENOMEM): " + strerror(ENOMEM) + - "; i = 123; bar; str = foo\n", mockCallback.text); - mockCallback.text.clear(); - - EXPECT_FATAL(KJ_SYSCALL(mockSyscall(-1, ENOSYS), i, "bar", str)); line = __LINE__; - EXPECT_EQ("fatal exception: " + fileLine(__FILE__, line) + - ": unimplemented: mockSyscall(-1, ENOSYS): " + strerror(ENOSYS) + - "; i = 123; bar; str = foo\n", mockCallback.text); - mockCallback.text.clear(); + EXPECT_LOG_EQ([&](){ + KJ_SYSCALL(mockSyscall(0)); + KJ_SYSCALL(mockSyscall(1)); + }, ""); + + EXPECT_LOG_EQ([&](){ + EXPECT_FATAL(KJ_SYSCALL(mockSyscall(-1, EBADF), i, "bar", str)); line = __LINE__; + }, ( + "fatal exception: " + fileLine(__FILE__, line) + + ": failed: mockSyscall(-1, EBADF): " + strerror(EBADF) + + "; i = 123; bar; str = foo\n" + )); + + EXPECT_LOG_EQ([&](){ + EXPECT_FATAL(KJ_SYSCALL(mockSyscall(-1, ECONNRESET), i, "bar", str)); line = __LINE__; + }, ( + "fatal exception: " + fileLine(__FILE__, line) + + ": disconnected: mockSyscall(-1, ECONNRESET): " + strerror(ECONNRESET) + + "; i = 123; bar; str = foo\n" + )); + + EXPECT_LOG_EQ([&](){ + EXPECT_FATAL(KJ_SYSCALL(mockSyscall(-1, ENOMEM), i, "bar", str)); line = __LINE__; + }, ( + "fatal exception: " + fileLine(__FILE__, line) + + ": overloaded: mockSyscall(-1, ENOMEM): " + strerror(ENOMEM) + + "; i = 123; bar; str = foo\n" + )); + + EXPECT_LOG_EQ([&](){ + EXPECT_FATAL(KJ_SYSCALL(mockSyscall(-1, ENOSYS), i, "bar", str)); line = __LINE__; + }, ( + "fatal exception: " + fileLine(__FILE__, line) + + ": unimplemented: mockSyscall(-1, ENOSYS): " + strerror(ENOSYS) + + "; i = 123; bar; str = foo\n" + )); int result = 0; bool recovered = false; - KJ_SYSCALL(result = mockSyscall(-2, EBADF), i, "bar", str) { recovered = true; break; } line = __LINE__; - EXPECT_EQ("recoverable exception: " + fileLine(__FILE__, line) + - ": failed: mockSyscall(-2, EBADF): " + strerror(EBADF) + - "; i = 123; bar; str = foo\n", mockCallback.text); + EXPECT_LOG_EQ([&](){ + KJ_SYSCALL(result = mockSyscall(-2, EBADF), i, "bar", str) { recovered = true; break; } line = __LINE__; + }, ( + "recoverable exception: " + fileLine(__FILE__, line) + + ": failed: mockSyscall(-2, EBADF): " + strerror(EBADF) + + "; i = 123; bar; str = foo\n" + )); EXPECT_EQ(-2, result); EXPECT_TRUE(recovered); } TEST(Debug, Context) { - MockExceptionCallback mockCallback; - - { - KJ_CONTEXT("foo"); int cline = __LINE__; - - KJ_LOG(WARNING, "blah"); int line = __LINE__; - EXPECT_EQ("log message: " + fileLine(__FILE__, cline) + ":+0: context: foo\n" - "log message: " + fileLine(__FILE__, line) + ":+1: warning: blah\n", - mockCallback.text); - mockCallback.text.clear(); - - EXPECT_FATAL(KJ_FAIL_ASSERT("bar")); line = __LINE__; - EXPECT_EQ("fatal exception: " + fileLine(__FILE__, cline) + ": context: foo\n" - + fileLine(__FILE__, line) + ": failed: bar\n", - mockCallback.text); - mockCallback.text.clear(); - + int line; + int line2; + int cline; + int cline2; + + EXPECT_LOG_EQ([&](){ + KJ_CONTEXT("foo"); cline = __LINE__; + + KJ_LOG(WARNING, "blah"); line = __LINE__; + EXPECT_FATAL(KJ_FAIL_ASSERT("bar")); line2 = __LINE__; + }, ( + "log message: " + fileLine(__FILE__, cline) + ":+0: info: context: foo\n\n" + "log message: " + fileLine(__FILE__, line) + ":+1: warning: blah\n" + "fatal exception: " + fileLine(__FILE__, cline) + ": context: foo\n" + + fileLine(__FILE__, line2) + ": failed: bar\n" + )); + + EXPECT_LOG_EQ([&](){ + KJ_CONTEXT("foo"); cline = __LINE__; { int i = 123; const char* str = "qux"; - KJ_CONTEXT("baz", i, "corge", str); int cline2 = __LINE__; - EXPECT_FATAL(KJ_FAIL_ASSERT("bar")); line = __LINE__; + KJ_CONTEXT("baz", i, "corge", str); cline2 = __LINE__; - EXPECT_EQ("fatal exception: " + fileLine(__FILE__, cline) + ": context: foo\n" - + fileLine(__FILE__, cline2) + ": context: baz; i = 123; corge; str = qux\n" - + fileLine(__FILE__, line) + ": failed: bar\n", - mockCallback.text); - mockCallback.text.clear(); + EXPECT_FATAL(KJ_FAIL_ASSERT("bar")); line = __LINE__; } - + }, ( + "fatal exception: " + fileLine(__FILE__, cline) + ": context: foo\n" + + fileLine(__FILE__, cline2) + ": context: baz; i = 123; corge; str = qux\n" + + fileLine(__FILE__, line) + ": failed: bar\n" + )); + + EXPECT_LOG_EQ([&](){ + KJ_CONTEXT("foo"); cline = __LINE__; { - KJ_CONTEXT("grault"); int cline2 = __LINE__; + int i = 123; + const char* str = "qux"; + KJ_CONTEXT("baz", i, "corge", str); cline2 = __LINE__; + } + { + KJ_CONTEXT("grault"); cline2 = __LINE__; EXPECT_FATAL(KJ_FAIL_ASSERT("bar")); line = __LINE__; - - EXPECT_EQ("fatal exception: " + fileLine(__FILE__, cline) + ": context: foo\n" - + fileLine(__FILE__, cline2) + ": context: grault\n" - + fileLine(__FILE__, line) + ": failed: bar\n", - mockCallback.text); - mockCallback.text.clear(); } + }, ( + "fatal exception: " + fileLine(__FILE__, cline) + ": context: foo\n" + + fileLine(__FILE__, cline2) + ": context: grault\n" + + fileLine(__FILE__, line) + ": failed: bar\n" + )); +} + +KJ_TEST("magic assert stringification") { + { + auto exception = KJ_ASSERT_NONNULL(kj::runCatchingExceptions([&]() { + int foo = 123; + int bar = 456; + KJ_ASSERT(foo == bar) { break; } + })); + + KJ_EXPECT(exception.getDescription() == "expected foo == bar [123 == 456]"); + } + + { + auto exception = KJ_ASSERT_NONNULL(kj::runCatchingExceptions([&]() { + auto foo = kj::str("hello"); + auto bar = kj::str("world!"); + KJ_ASSERT(foo == bar, foo.size(), bar.size()) { break; } + })); + + KJ_EXPECT(exception.getDescription() == + "expected foo == bar [hello == world!]; foo.size() = 5; bar.size() = 6"); + } + + { + auto exception = KJ_ASSERT_NONNULL(kj::runCatchingExceptions([&]() { + KJ_ASSERT(kj::str("hello") == kj::str("world!")) { break; } + })); + + KJ_EXPECT(exception.getDescription() == + "expected kj::str(\"hello\") == kj::str(\"world!\") [hello == world!]"); + } + + { + auto exception = KJ_ASSERT_NONNULL(kj::runCatchingExceptions([&]() { + int foo = 123; + int bar = 456; + KJ_ASSERT((foo == bar)) { break; } + })); + + KJ_EXPECT(exception.getDescription() == "expected (foo == bar)"); + } + + // Test use of << on left side, which could create confusion. + { + auto exception = KJ_ASSERT_NONNULL(kj::runCatchingExceptions([&]() { + int foo = 123; + int bar = 456; + KJ_ASSERT(foo << 2 == bar) { break; } + })); + + KJ_EXPECT(exception.getDescription() == "expected foo << 2 == bar [492 == 456]"); + } + + // Test use of & on left side. + { + int foo = 4; + KJ_ASSERT(foo & 4); + + auto exception = KJ_ASSERT_NONNULL(kj::runCatchingExceptions([&]() { + KJ_ASSERT(foo & 2) { break; } + })); + + KJ_EXPECT(exception.getDescription() == "expected foo & 2"); } } diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/debug.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/debug.c++ index d415ad398f0..f685e3162ff 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/debug.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/debug.c++ @@ -19,22 +19,24 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +#if _WIN32 || __CYGWIN__ +#include "win32-api-version.h" +#endif + #include "debug.h" #include #include #include #include -#if _WIN32 +#if _WIN32 || __CYGWIN__ +#if !__CYGWIN__ #define strerror_r(errno,buf,len) strerror_s(buf,len,errno) -#define NOMINMAX 1 -#define WIN32_LEAN_AND_MEAN 1 -#define NOSERVICE 1 -#define NOMCX 1 -#define NOIME 1 +#endif #include #include "windows-sanity.h" #include "encoding.h" +#include #endif namespace kj { @@ -133,7 +135,7 @@ Exception::Type typeOfErrno(int error) { } } -#if _WIN32 +#if _WIN32 || __CYGWIN__ Exception::Type typeOfWin32Error(DWORD error) { switch (error) { @@ -200,18 +202,20 @@ static String makeDescriptionImpl(DescriptionStyle style, const char* code, int quoted = true; } else if (c == ',' && depth == 0) { if (index < argValues.size()) { - argNames[index] = arrayPtr(start, pos - 1); + argNames[index++] = arrayPtr(start, pos - 1); } - ++index; while (isspace(*pos)) ++pos; start = pos; + if (*pos == '\0') { + // ignore trailing comma + break; + } } } } if (index < argValues.size()) { - argNames[index] = arrayPtr(start, pos - 1); + argNames[index++] = arrayPtr(start, pos - 1); } - ++index; if (index != argValues.size()) { getExceptionCallback().logMessage(LogSeverity::ERROR, __FILE__, __LINE__, 0, @@ -242,6 +246,8 @@ static String makeDescriptionImpl(DescriptionStyle style, const char* code, int StringPtr sep = " = "; StringPtr delim = "; "; StringPtr colon = ": "; + StringPtr openBracket = " ["; + StringPtr closeBracket = "]"; StringPtr sysErrorArray; // On android before marshmallow only the posix version of stderror_r was @@ -279,11 +285,26 @@ static String makeDescriptionImpl(DescriptionStyle style, const char* code, int break; } + auto needsLabel = [](ArrayPtr &argName) -> bool { + return (argName.size() > 0 && argName[0] != '\"' && + !(argName.size() >= 8 && memcmp(argName.begin(), "kj::str(", 8) == 0)); + }; + for (size_t i = 0; i < argValues.size(); i++) { + if (argNames[i] == "_kjCondition"_kj) { + // Special handling: don't output delimiter, we want to append this to the previous item, + // in brackets. Also, if it's just "[false]" (meaning we didn't manage to extract a + // comparison), don't add it at all. + if (argValues[i] != "false") { + totalSize += openBracket.size() + argValues[i].size() + closeBracket.size(); + } + continue; + } + if (i > 0 || style != LOG) { totalSize += delim.size(); } - if (argNames[i].size() > 0 && argNames[i][0] != '\"') { + if (needsLabel(argNames[i])) { totalSize += argNames[i].size() + sep.size(); } totalSize += argValues[i].size(); @@ -304,10 +325,20 @@ static String makeDescriptionImpl(DescriptionStyle style, const char* code, int } for (size_t i = 0; i < argValues.size(); i++) { + if (argNames[i] == "_kjCondition"_kj) { + // Special handling: don't output delimiter, we want to append this to the previous item, + // in brackets. Also, if it's just "[false]" (meaning we didn't manage to extract a + // comparison), don't add it at all. + if (argValues[i] != "false") { + pos = _::fill(pos, openBracket, argValues[i], closeBracket); + } + continue; + } + if (i > 0 || style != LOG) { pos = _::fill(pos, delim); } - if (argNames[i].size() > 0 && argNames[i][0] != '\"') { + if (needsLabel(argNames[i])) { pos = _::fill(pos, argNames[i], sep); } pos = _::fill(pos, argValues[i]); @@ -329,7 +360,7 @@ Debug::Fault::~Fault() noexcept(false) { if (exception != nullptr) { Exception copy = mv(*exception); delete exception; - throwRecoverableException(mv(copy), 2); + throwRecoverableException(mv(copy), 1); } } @@ -337,8 +368,8 @@ void Debug::Fault::fatal() { Exception copy = mv(*exception); delete exception; exception = nullptr; - throwFatalException(mv(copy), 2); - abort(); + throwFatalException(mv(copy), 1); + KJ_KNOWN_UNREACHABLE(abort()); } void Debug::Fault::init( @@ -355,7 +386,7 @@ void Debug::Fault::init( makeDescriptionImpl(SYSCALL, condition, osErrorNumber, nullptr, macroArgs, argValues)); } -#if _WIN32 +#if _WIN32 || __CYGWIN__ void Debug::Fault::init( const char* file, int line, Win32Result osErrorNumber, const char* condition, const char* macroArgs, ArrayPtr argValues) { @@ -401,7 +432,7 @@ int Debug::getOsErrorNumber(bool nonblocking) { : result; } -#if _WIN32 +#if _WIN32 || __CYGWIN__ uint Debug::getWin32ErrorCode() { return ::GetLastError(); } @@ -434,7 +465,7 @@ void Debug::Context::logMessage(LogSeverity severity, const char* file, int line String&& text) { if (!logged) { Value v = ensureInitialized(); - next.logMessage(LogSeverity::INFO, v.file, v.line, 0, + next.logMessage(LogSeverity::INFO, trimSourceFilename(v.file).cStr(), v.line, 0, str("context: ", mv(v.description), '\n')); logged = true; } diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/debug.h b/libs/EXTERNAL/capnproto/c++/src/kj/debug.h index a1e3dcbf2cd..25659c932fb 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/debug.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/debug.h @@ -106,23 +106,21 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "string.h" #include "exception.h" #include "windows-sanity.h" // work-around macro conflict with `ERROR` +KJ_BEGIN_HEADER + namespace kj { -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) // MSVC does __VA_ARGS__ differently from GCC: // - A trailing comma before an empty __VA_ARGS__ is removed automatically, whereas GCC wants // you to request this behavior with "##__VA_ARGS__". // - If __VA_ARGS__ is passed directly as an argument to another macro, it will be treated as a // *single* argument rather than an argument list. This can be worked around by wrapping the -// outer macro call in KJ_EXPAND(), which appraently forces __VA_ARGS__ to be expanded before +// outer macro call in KJ_EXPAND(), which apparently forces __VA_ARGS__ to be expanded before // the macro is evaluated. I don't understand the C preprocessor. // - Using "#__VA_ARGS__" to stringify __VA_ARGS__ expands to zero tokens when __VA_ARGS__ is // empty, rather than expanding to an empty string literal. We can work around by concatenating @@ -131,16 +129,17 @@ namespace kj { #define KJ_EXPAND(X) X #define KJ_LOG(severity, ...) \ - if (!::kj::_::Debug::shouldLog(::kj::LogSeverity::severity)) {} else \ + for (bool _kj_shouldLog = ::kj::_::Debug::shouldLog(::kj::LogSeverity::severity); \ + _kj_shouldLog; _kj_shouldLog = false) \ ::kj::_::Debug::log(__FILE__, __LINE__, ::kj::LogSeverity::severity, \ "" #__VA_ARGS__, __VA_ARGS__) #define KJ_DBG(...) KJ_EXPAND(KJ_LOG(DBG, __VA_ARGS__)) #define KJ_REQUIRE(cond, ...) \ - if (KJ_LIKELY(cond)) {} else \ + if (auto _kjCondition = ::kj::_::MAGIC_ASSERT << cond) {} else \ for (::kj::_::Debug::Fault f(__FILE__, __LINE__, ::kj::Exception::Type::FAILED, \ - #cond, "" #__VA_ARGS__, __VA_ARGS__);; f.fatal()) + #cond, "_kjCondition," #__VA_ARGS__, _kjCondition, __VA_ARGS__);; f.fatal()) #define KJ_FAIL_REQUIRE(...) \ for (::kj::_::Debug::Fault f(__FILE__, __LINE__, ::kj::Exception::Type::FAILED, \ @@ -160,7 +159,7 @@ namespace kj { for (::kj::_::Debug::Fault f(__FILE__, __LINE__, \ errorNumber, code, "" #__VA_ARGS__, __VA_ARGS__);; f.fatal()) -#if _WIN32 +#if _WIN32 || __CYGWIN__ #define KJ_WIN32(call, ...) \ if (auto _kjWin32Result = ::kj::_::Debug::win32Call(call)) {} else \ @@ -210,16 +209,17 @@ namespace kj { #else #define KJ_LOG(severity, ...) \ - if (!::kj::_::Debug::shouldLog(::kj::LogSeverity::severity)) {} else \ + for (bool _kj_shouldLog = ::kj::_::Debug::shouldLog(::kj::LogSeverity::severity); \ + _kj_shouldLog; _kj_shouldLog = false) \ ::kj::_::Debug::log(__FILE__, __LINE__, ::kj::LogSeverity::severity, \ #__VA_ARGS__, ##__VA_ARGS__) #define KJ_DBG(...) KJ_LOG(DBG, ##__VA_ARGS__) #define KJ_REQUIRE(cond, ...) \ - if (KJ_LIKELY(cond)) {} else \ + if (auto _kjCondition = ::kj::_::MAGIC_ASSERT << cond) {} else \ for (::kj::_::Debug::Fault f(__FILE__, __LINE__, ::kj::Exception::Type::FAILED, \ - #cond, #__VA_ARGS__, ##__VA_ARGS__);; f.fatal()) + #cond, "_kjCondition," #__VA_ARGS__, _kjCondition, ##__VA_ARGS__);; f.fatal()) #define KJ_FAIL_REQUIRE(...) \ for (::kj::_::Debug::Fault f(__FILE__, __LINE__, ::kj::Exception::Type::FAILED, \ @@ -239,7 +239,7 @@ namespace kj { for (::kj::_::Debug::Fault f(__FILE__, __LINE__, \ errorNumber, code, #__VA_ARGS__, ##__VA_ARGS__);; f.fatal()) -#if _WIN32 +#if _WIN32 || __CYGWIN__ #define KJ_WIN32(call, ...) \ if (auto _kjWin32Result = ::kj::_::Debug::win32Call(call)) {} else \ @@ -309,7 +309,7 @@ namespace kj { // handleSuccessCase(); // } -#if _WIN32 +#if _WIN32 || __CYGWIN__ #define KJ_WIN32_HANDLE_ERRORS(call) \ if (uint _kjWin32Error = ::kj::_::Debug::win32Call(call).number) \ @@ -356,7 +356,7 @@ class Debug { typedef LogSeverity Severity; // backwards-compatibility -#if _WIN32 +#if _WIN32 || __CYGWIN__ struct Win32Result { uint number; inline explicit Win32Result(uint number): number(number) {} @@ -385,7 +385,7 @@ class Debug { const char* condition, const char* macroArgs); Fault(const char* file, int line, int osErrorNumber, const char* condition, const char* macroArgs); -#if _WIN32 +#if _WIN32 || __CYGWIN__ Fault(const char* file, int line, Win32Result osErrorNumber, const char* condition, const char* macroArgs); #endif @@ -399,7 +399,7 @@ class Debug { const char* condition, const char* macroArgs, ArrayPtr argValues); void init(const char* file, int line, int osErrorNumber, const char* condition, const char* macroArgs, ArrayPtr argValues); -#if _WIN32 +#if _WIN32 || __CYGWIN__ void init(const char* file, int line, Win32Result osErrorNumber, const char* condition, const char* macroArgs, ArrayPtr argValues); #endif @@ -422,7 +422,7 @@ class Debug { template static int syscallError(Call&& call, bool nonblocking); -#if _WIN32 +#if _WIN32 || __CYGWIN__ static Win32Result win32Call(int boolean); static Win32Result win32Call(void* handle); static Win32Result winsockCall(int result); @@ -518,7 +518,7 @@ inline Debug::Fault::Fault(const char* file, int line, kj::Exception::Type type, init(file, line, type, condition, macroArgs, nullptr); } -#if _WIN32 +#if _WIN32 || __CYGWIN__ inline Debug::Fault::Fault(const char* file, int line, Win32Result osErrorNumber, const char* condition, const char* macroArgs) : exception(nullptr) { @@ -577,5 +577,126 @@ inline String Debug::makeDescription<>(const char* macroArgs) { return makeDescriptionInternal(macroArgs, nullptr); } +// ======================================================================================= +// Magic Asserts! +// +// When KJ_ASSERT(foo == bar) fails, `foo` and `bar`'s actual values will be stringified in the +// error message. How does it work? We use template magic and operator precedence. The assertion +// actually evaluates something like this: +// +// if (auto _kjCondition = kj::_::MAGIC_ASSERT << foo == bar) +// +// `<<` has operator precedence slightly above `==`, so `kj::_::MAGIC_ASSERT << foo` gets evaluated +// first. This wraps `foo` in a little wrapper that captures the comparison operators and keeps +// enough information around to be able to stringify the left and right sides of the comparison +// independently. As always, the stringification only actually occurs if the assert fails. +// +// You might ask why we use operator `<<` and not e.g. operator `<=`, since operators of the same +// precedence are evaluated left-to-right. The answer is that some compilers trigger all sorts of +// warnings when you seem to be using a comparison as the input to another comparison. The +// particular warning GCC produces is its general "-Wparentheses" warning which is broadly useful, +// so we don't want to disable it. `<<` also produces some warnings, but only on Clang and the +// specific warning is one we're comfortable disabling (see below). This does mean that we have to +// explicitly overload `operator<<` ourselves to make sure using it in an assert still works. +// +// You might also ask, if we're using operator `<<` anyway, why not start it from the right, in +// which case it would bind after computing any `<<` operators that were actually in the user's +// code? I tried this, but it resulted in a somewhat broader warning from clang that I felt worse +// about disabling (a warning about `<<` precedence not applying specifically to overloads) and +// also created ambiguous overload errors in the KJ units code. + +#if __clang__ +// We intentionally overload operator << for the specific purpose of evaluating it before +// evaluating comparison expressions, so stop Clang from warning about it. Unfortunately this means +// eliminating a warning that would otherwise be useful for people using iostreams... sorry. +#pragma GCC diagnostic ignored "-Woverloaded-shift-op-parentheses" +#endif + +template +struct DebugExpression; + +template ()))> +inline auto tryToCharSequence(T* value) { return kj::toCharSequence(*value); } +inline StringPtr tryToCharSequence(...) { return "(can't stringify)"_kj; } +// SFINAE to stringify a value if and only if it can be stringified. + +template +struct DebugComparison { + Left left; + Right right; + StringPtr op; + bool result; + + inline operator bool() const { return KJ_LIKELY(result); } + + template inline void operator&(T&& other) = delete; + template inline void operator^(T&& other) = delete; + template inline void operator|(T&& other) = delete; +}; + +template +String KJ_STRINGIFY(DebugComparison& cmp) { + return _::concat(tryToCharSequence(&cmp.left), cmp.op, tryToCharSequence(&cmp.right)); +} + +template +struct DebugExpression { + DebugExpression(T&& value): value(kj::fwd(value)) {} + T value; + + // Handle comparison operations by constructing a DebugComparison value. +#define DEFINE_OPERATOR(OP) \ + template \ + DebugComparison operator OP(U&& other) { \ + bool result = value OP other; \ + return { kj::fwd(value), kj::fwd(other), " " #OP " "_kj, result }; \ + } + DEFINE_OPERATOR(==); + DEFINE_OPERATOR(!=); + DEFINE_OPERATOR(<=); + DEFINE_OPERATOR(>=); + DEFINE_OPERATOR(< ); + DEFINE_OPERATOR(> ); +#undef DEFINE_OPERATOR + + // Handle binary operators that have equal or lower precedence than comparisons by performing + // the operation and wrapping the result. +#define DEFINE_OPERATOR(OP) \ + template inline auto operator OP(U&& other) { \ + return DebugExpression(value) OP kj::fwd(other))>(\ + kj::fwd(value) OP kj::fwd(other)); \ + } + DEFINE_OPERATOR(<<); + DEFINE_OPERATOR(>>); + DEFINE_OPERATOR(&); + DEFINE_OPERATOR(^); + DEFINE_OPERATOR(|); +#undef DEFINE_OPERATOR + + inline operator bool() { + // No comparison performed, we're just asserting the expression is truthy. This also covers + // the case of the logic operators && and || -- we cannot overload those because doing so would + // break short-circuiting behavior. + return value; + } +}; + +template +StringPtr KJ_STRINGIFY(const DebugExpression& exp) { + // Hack: This will only ever be called in cases where the expression's truthiness was asserted + // directly, and was determined to be falsy. + return "false"_kj; +} + +struct DebugExpressionStart { + template + DebugExpression operator<<(T&& value) const { + return DebugExpression(kj::fwd(value)); + } +}; +static constexpr DebugExpressionStart MAGIC_ASSERT; + } // namespace _ (private) } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/encoding-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/encoding-test.c++ index 55346be5417..50b1223dda5 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/encoding-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/encoding-test.c++ @@ -58,6 +58,15 @@ void expectRes(EncodingResult result, expectResImpl(kj::mv(result), arrayPtr(expected, s - 1), errors); } +#if __cplusplus >= 202000L +template +void expectRes(EncodingResult result, + const char8_t (&expected)[s], + bool errors = false) { + expectResImpl(kj::mv(result), arrayPtr(reinterpret_cast(expected), s - 1), errors); +} +#endif + template void expectRes(EncodingResult result, byte (&expected)[s], @@ -362,10 +371,12 @@ KJ_TEST("application/x-www-form-urlencoded encoding/decoding") { } KJ_TEST("C escape encoding/decoding") { - KJ_EXPECT(encodeCEscape("fooo\a\b\f\n\r\t\v\'\"\\bar") == - "fooo\\a\\b\\f\\n\\r\\t\\v\\\'\\\"\\\\bar"); + KJ_EXPECT(encodeCEscape("fooo\a\b\f\n\r\t\v\'\"\\barПривет, Мир! Ж=О") == + "fooo\\a\\b\\f\\n\\r\\t\\v\\\'\\\"\\\\bar\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82\x2c\x20\xd0\x9c\xd0\xb8\xd1\x80\x21\x20\xd0\x96\x3d\xd0\x9e"); KJ_EXPECT(encodeCEscape("foo\x01\x7fxxx") == "foo\\001\\177xxx"); + byte bytes[] = {'f', 'o', 'o', 0, '\x01', '\x7f', 'x', 'x', 'x', 128, 254, 255}; + KJ_EXPECT(encodeCEscape(bytes) == "foo\\000\\001\\177xxx\\200\\376\\377"); expectRes(decodeCEscape("fooo\\a\\b\\f\\n\\r\\t\\v\\\'\\\"\\\\bar"), "fooo\a\b\f\n\r\t\v\'\"\\bar"); @@ -475,5 +486,50 @@ KJ_TEST("base64 encoding/decoding") { } } +KJ_TEST("base64 url encoding") { + { + // Handles empty. + auto encoded = encodeBase64Url(StringPtr("").asBytes()); + KJ_EXPECT(encoded == "", encoded, encoded.size()); + } + + { + // Handles paddingless encoding. + auto encoded = encodeBase64Url(StringPtr("foo").asBytes()); + KJ_EXPECT(encoded == "Zm9v", encoded, encoded.size()); + } + + { + // Handles padded encoding. + auto encoded1 = encodeBase64Url(StringPtr("quux").asBytes()); + KJ_EXPECT(encoded1 == "cXV1eA", encoded1, encoded1.size()); + auto encoded2 = encodeBase64Url(StringPtr("corge").asBytes()); + KJ_EXPECT(encoded2 == "Y29yZ2U", encoded2, encoded2.size()); + } + + { + // No line breaks. + StringPtr fullLine = "012345678901234567890123456789012345678901234567890123"; + auto encoded = encodeBase64Url(StringPtr(fullLine).asBytes()); + KJ_EXPECT( + encoded == "MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIz", + encoded); + } + + { + // Replaces plusses. + const byte data[] = { 0b11111011, 0b11101111, 0b10111110 }; + auto encoded = encodeBase64Url(data); + KJ_EXPECT(encoded == "----", encoded, encoded.size(), data); + } + + { + // Replaces slashes. + const byte data[] = { 0b11111111, 0b11111111, 0b11111111 }; + auto encoded = encodeBase64Url(data); + KJ_EXPECT(encoded == "____", encoded, encoded.size(), data); + } +} + } // namespace } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/encoding.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/encoding.c++ index 68e1eb94b4b..06ef3ab78e6 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/encoding.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/encoding.c++ @@ -536,7 +536,9 @@ EncodingResult> decodeBinaryUriComponent( // ======================================================================================= -String encodeCEscape(ArrayPtr bytes) { +namespace _ { // private + +String encodeCEscapeImpl(ArrayPtr bytes, bool isBinary) { Vector escaped(bytes.size()); for (byte b: bytes) { @@ -552,7 +554,7 @@ String encodeCEscape(ArrayPtr bytes) { case '\"': escaped.addAll(StringPtr("\\\"")); break; case '\\': escaped.addAll(StringPtr("\\\\")); break; default: - if (b < 0x20 || b == 0x7f) { + if (b < 0x20 || b == 0x7f || (isBinary && b > 0x7f)) { // Use octal escape, not hex, because hex escapes technically have no length limit and // so can create ambiguity with subsequent characters. escaped.add('\\'); @@ -570,6 +572,8 @@ String encodeCEscape(ArrayPtr bytes) { return String(escaped.releaseAsArray()); } +} // namespace + EncodingResult> decodeBinaryCEscape(ArrayPtr text, bool nulTerminate) { Vector result(text.size() + nulTerminate); bool hadErrors = false; @@ -729,6 +733,7 @@ int base64_encode_block(const char* plaintext_in, int length_in, switch (state_in->step) { while (1) { + KJ_FALLTHROUGH; case step_A: if (plainchar == plaintextend) { state_in->result = result; @@ -739,7 +744,7 @@ int base64_encode_block(const char* plaintext_in, int length_in, result = (fragment & 0x0fc) >> 2; *codechar++ = base64_encode_value(result); result = (fragment & 0x003) << 4; - // fallthrough + KJ_FALLTHROUGH; case step_B: if (plainchar == plaintextend) { state_in->result = result; @@ -750,7 +755,7 @@ int base64_encode_block(const char* plaintext_in, int length_in, result |= (fragment & 0x0f0) >> 4; *codechar++ = base64_encode_value(result); result = (fragment & 0x00f) << 2; - // fallthrough + KJ_FALLTHROUGH; case step_C: if (plainchar == plaintextend) { state_in->result = result; @@ -852,7 +857,7 @@ typedef enum { step_a, step_b, step_c, step_d } base64_decodestep; -typedef struct { +struct base64_decodestate { bool hadErrors = false; size_t nPaddingBytesSeen = 0; // Output state. `nPaddingBytesSeen` is not guaranteed to be correct if `hadErrors` is true. It is @@ -861,7 +866,7 @@ typedef struct { base64_decodestep step = step_a; char plainchar = 0; -} base64_decodestate; +}; int base64_decode_value(char value_in) { // Returns either the fragment value or: -1 on whitespace, -2 on padding, -3 on invalid input. @@ -908,6 +913,7 @@ int base64_decode_block(const char* code_in, const int length_in, { while (1) { + KJ_FALLTHROUGH; case step_a: do { if (codechar == code_in+length_in) { @@ -920,7 +926,7 @@ int base64_decode_block(const char* code_in, const int length_in, ERROR_IF(fragment < -1); } while (fragment < 0); *plainchar = (fragment & 0x03f) << 2; - // fallthrough + KJ_FALLTHROUGH; case step_b: do { if (codechar == code_in+length_in) { @@ -938,7 +944,7 @@ int base64_decode_block(const char* code_in, const int length_in, } while (fragment < 0); *plainchar++ |= (fragment & 0x030) >> 4; *plainchar = (fragment & 0x00f) << 4; - // fallthrough + KJ_FALLTHROUGH; case step_c: do { if (codechar == code_in+length_in) { @@ -958,7 +964,7 @@ int base64_decode_block(const char* code_in, const int length_in, ERROR_IF(state_in->nPaddingBytesSeen > 0); *plainchar++ |= (fragment & 0x03c) >> 2; *plainchar = (fragment & 0x003) << 6; - // fallthrough + KJ_FALLTHROUGH; case step_d: do { if (codechar == code_in+length_in) { @@ -1001,4 +1007,24 @@ EncodingResult> decodeBase64(ArrayPtr input) { return EncodingResult>(kj::mv(output), state.hadErrors); } +String encodeBase64Url(ArrayPtr bytes) { + // TODO(perf): Rewrite as single pass? + // TODO(someday): Write decoder? + + auto base64 = kj::encodeBase64(bytes); + + for (char& c: base64) { + if (c == '+') c = '-'; + if (c == '/') c = '_'; + } + + // Remove trailing '='s. + kj::ArrayPtr slice = base64; + while (slice.size() > 0 && slice.back() == '=') { + slice = slice.slice(0, slice.size() - 1); + } + + return kj::str(slice); +} + } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/encoding.h b/libs/EXTERNAL/capnproto/c++/src/kj/encoding.h index 2604d35e6e3..d61ee473b52 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/encoding.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/encoding.h @@ -26,12 +26,10 @@ // - URI encoding // - Base64 -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "string.h" +KJ_BEGIN_HEADER + namespace kj { template @@ -213,6 +211,9 @@ EncodingResult> decodeBase64(ArrayPtr text); // Decode base64 text. This function reports errors required by the WHATWG HTML/Infra specs: see // https://html.spec.whatwg.org/multipage/webappapis.html#atob for details. +String encodeBase64Url(ArrayPtr bytes); +// Encode the given bytes as URL-safe base64 text. (RFC 4648, section 5) + // ======================================================================================= // inline implementation details @@ -245,6 +246,8 @@ const T* readMaybe(const EncodingResult& value) { } } +String encodeCEscapeImpl(ArrayPtr bytes, bool isBinary); + } // namespace _ (private) inline String encodeUriComponent(ArrayPtr text) { @@ -275,8 +278,13 @@ inline EncodingResult decodeWwwForm(ArrayPtr text) { } inline String encodeCEscape(ArrayPtr text) { - return encodeCEscape(text.asBytes()); + return _::encodeCEscapeImpl(text.asBytes(), false); } + +inline String encodeCEscape(ArrayPtr bytes) { + return _::encodeCEscapeImpl(bytes, true); +} + inline EncodingResult decodeCEscape(ArrayPtr text) { auto result = decodeBinaryCEscape(text, true); return { String(result.releaseAsChars()), result.hadErrors }; @@ -364,4 +372,74 @@ EncodingResult> decodeBase64(const char (&text)[s]) { return decodeBase64(arrayPtr(text, s - 1)); } +#if __cplusplus >= 202000L +template +inline EncodingResult> encodeUtf16(const char8_t (&text)[s], bool nulTerminate=false) { + return encodeUtf16(arrayPtr(reinterpret_cast(text), s - 1), nulTerminate); +} +template +inline EncodingResult> encodeUtf32(const char8_t (&text)[s], bool nulTerminate=false) { + return encodeUtf32(arrayPtr(reinterpret_cast(text), s - 1), nulTerminate); +} +template +inline EncodingResult> encodeWideString( + const char8_t (&text)[s], bool nulTerminate=false) { + return encodeWideString(arrayPtr(reinterpret_cast(text), s - 1), nulTerminate); +} +template +inline EncodingResult> decodeHex(const char8_t (&text)[s]) { + return decodeHex(arrayPtr(reinterpret_cast(text), s - 1)); +} +template +inline String encodeUriComponent(const char8_t (&text)[s]) { + return encodeUriComponent(arrayPtr(reinterpret_cast(text), s - 1)); +} +template +inline Array decodeBinaryUriComponent(const char8_t (&text)[s]) { + return decodeBinaryUriComponent(arrayPtr(reinterpret_cast(text), s - 1)); +} +template +inline EncodingResult decodeUriComponent(const char8_t (&text)[s]) { + return decodeUriComponent(arrayPtr(reinterpret_cast(text), s-1)); +} +template +inline String encodeUriFragment(const char8_t (&text)[s]) { + return encodeUriFragment(arrayPtr(reinterpret_cast(text), s - 1)); +} +template +inline String encodeUriPath(const char8_t (&text)[s]) { + return encodeUriPath(arrayPtr(reinterpret_cast(text), s - 1)); +} +template +inline String encodeUriUserInfo(const char8_t (&text)[s]) { + return encodeUriUserInfo(arrayPtr(reinterpret_cast(text), s - 1)); +} +template +inline String encodeWwwForm(const char8_t (&text)[s]) { + return encodeWwwForm(arrayPtr(reinterpret_cast(text), s - 1)); +} +template +inline EncodingResult decodeWwwForm(const char8_t (&text)[s]) { + return decodeWwwForm(arrayPtr(reinterpret_cast(text), s-1)); +} +template +inline String encodeCEscape(const char8_t (&text)[s]) { + return encodeCEscape(arrayPtr(reinterpret_cast(text), s - 1)); +} +template +inline EncodingResult> decodeBinaryCEscape(const char8_t (&text)[s]) { + return decodeBinaryCEscape(arrayPtr(reinterpret_cast(text), s - 1)); +} +template +inline EncodingResult decodeCEscape(const char8_t (&text)[s]) { + return decodeCEscape(arrayPtr(reinterpret_cast(text), s-1)); +} +template +EncodingResult> decodeBase64(const char8_t (&text)[s]) { + return decodeBase64(arrayPtr(reinterpret_cast(text), s - 1)); +} +#endif + } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/exception-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/exception-test.c++ index 2b5172a724b..2cc37d60bd3 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/exception-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/exception-test.c++ @@ -23,6 +23,7 @@ #include "debug.h" #include #include +#include namespace kj { namespace _ { // private @@ -30,9 +31,11 @@ namespace { TEST(Exception, TrimSourceFilename) { #if _WIN32 - if (trimSourceFilename(__FILE__) != "kj\\exception-test.c++") -#endif + EXPECT_TRUE(trimSourceFilename(__FILE__) == "kj/exception-test.c++" || + trimSourceFilename(__FILE__) == "kj\\exception-test.c++"); +#else EXPECT_EQ(trimSourceFilename(__FILE__), "kj/exception-test.c++"); +#endif } TEST(Exception, RunCatchingExceptions) { @@ -169,7 +172,7 @@ TEST(Exception, ScopeSuccessFail) { } #endif -#if __GNUG__ +#if __GNUG__ || defined(__clang__) kj::String testStackTrace() __attribute__((noinline)); #elif _MSC_VER __declspec(noinline) kj::String testStackTrace(); @@ -205,6 +208,86 @@ KJ_TEST("getStackTrace() returns correct line number, not line + 1") { KJ_ASSERT(strstr(trace.cStr(), wrong.cStr()) == nullptr, trace, wrong); } +#if !KJ_NO_EXCEPTIONS +KJ_TEST("InFlightExceptionIterator works") { + bool caught = false; + try { + KJ_DEFER({ + try { + KJ_FAIL_ASSERT("bar"); + } catch (const kj::Exception& e) { + InFlightExceptionIterator iter; + KJ_IF_MAYBE(e2, iter.next()) { + KJ_EXPECT(e2 == &e, e2->getDescription()); + } else { + KJ_FAIL_EXPECT("missing first exception"); + } + + KJ_IF_MAYBE(e2, iter.next()) { + KJ_EXPECT(e2->getDescription() == "foo", e2->getDescription()); + } else { + KJ_FAIL_EXPECT("missing second exception"); + } + + KJ_EXPECT(iter.next() == nullptr, "more than two exceptions"); + + caught = true; + } + }); + KJ_FAIL_ASSERT("foo"); + } catch (const kj::Exception& e) { + // expected + } + + KJ_EXPECT(caught); +} +#endif + +KJ_TEST("computeRelativeTrace") { + auto testCase = [](uint expectedPrefix, + ArrayPtr trace, ArrayPtr relativeTo) { + auto tracePtr = KJ_MAP(x, trace) { return (void*)x; }; + auto relativeToPtr = KJ_MAP(x, relativeTo) { return (void*)x; }; + + auto result = computeRelativeTrace(tracePtr, relativeToPtr); + KJ_EXPECT(result.begin() == tracePtr.begin()); + + KJ_EXPECT(result.size() == expectedPrefix, trace, relativeTo, result); + }; + + testCase(8, + {1, 2, 3, 4, 5, 6, 7, 8}, + {8, 7, 6, 5, 4, 3, 2, 1}); + + testCase(5, + {1, 2, 3, 4, 5, 6, 7, 8}, + {8, 7, 6, 5, 5, 6, 7, 8}); + + testCase(5, + {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + {8, 7, 6, 5, 5, 6, 7, 8}); + + testCase(5, + {1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8}, + {8, 7, 6, 5, 5, 6, 7, 8}); + + testCase(9, + {1, 2, 3, 4, 5, 6, 7, 8, 5, 5, 6, 7, 8}, + {8, 7, 6, 5, 5, 6, 7, 8}); + + testCase(5, + {1, 2, 3, 4, 5, 5, 6, 7, 8, 5, 6, 7, 8}, + {8, 7, 6, 5, 5, 6, 7, 8}); + + testCase(5, + {1, 2, 3, 4, 5, 6, 7, 8}, + {8, 7, 6, 5, 5, 6, 7, 8, 7, 8}); + + testCase(5, + {1, 2, 3, 4, 5, 6, 7, 8}, + {8, 7, 6, 5, 6, 7, 8, 7, 8}); +} + } // namespace } // namespace _ (private) } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/exception.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/exception.c++ index 76576bb5308..c2dda506ccf 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/exception.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/exception.c++ @@ -23,12 +23,24 @@ #define _GNU_SOURCE #endif +#if _WIN32 || __CYGWIN__ +#include "win32-api-version.h" +#endif + +#if (_WIN32 && _M_X64) || (__CYGWIN__ && __x86_64__) +// Currently the Win32 stack-trace code only supports x86_64. We could easily extend it to support +// i386 as well but it requires some code changes around how we read the context to start the +// trace. +#define KJ_USE_WIN32_DBGHELP 1 +#endif + #include "exception.h" #include "string.h" #include "debug.h" #include "threadlocal.h" #include "miniposix.h" #include "function.h" +#include "main.h" #include #include #include @@ -51,22 +63,49 @@ #include #endif -#if _WIN32 -#define WIN32_LEAN_AND_MEAN +#if _WIN32 || __CYGWIN__ #include #include "windows-sanity.h" #include #endif -#if (__linux__ || __APPLE__) +#if (__linux__ || __APPLE__ || __CYGWIN__) #include #include #endif +#if __CYGWIN__ +#include +#include +#endif + #if KJ_HAS_LIBDL #include "dlfcn.h" #endif +#if _MSC_VER +#include +#endif + +#if KJ_HAS_COMPILER_FEATURE(address_sanitizer) || defined(__SANITIZE_ADDRESS__) +#include +#else +static void __lsan_ignore_object(const void* p) {} +#endif +// TODO(cleanup): Remove the LSAN stuff per https://github.com/capnproto/capnproto/pull/1255 +// feedback. + +namespace { +template +inline T* lsanIgnoreObjectAndReturn(T* ptr) { + // Defensively lsan_ignore_object since the documentation doesn't explicitly specify what happens + // if you call this multiple times on the same object. + // TODO(cleanup): Remove this per https://github.com/capnproto/capnproto/pull/1255. + __lsan_ignore_object(ptr); + return ptr; +} +} + namespace kj { StringPtr KJ_STRINGIFY(LogSeverity severity) { @@ -81,10 +120,7 @@ StringPtr KJ_STRINGIFY(LogSeverity severity) { return SEVERITY_STRINGS[static_cast(severity)]; } -#if _WIN32 && _M_X64 -// Currently the Win32 stack-trace code only supports x86_64. We could easily extend it to support -// i386 as well but it requires some code changes around how we read the context to start the -// trace. +#if KJ_USE_WIN32_DBGHELP namespace { @@ -106,6 +142,13 @@ struct Dbghelp { BOOL (WINAPI *symGetLineFromAddr64)( HANDLE hProcess,DWORD64 qwAddr,PDWORD pdwDisplacement,PIMAGEHLP_LINE64 Line64); +#if __GNUC__ && !__clang__ && __GNUC__ >= 8 +// GCC 8 warns that our reinterpret_casts of function pointers below are casting between +// incompatible types. Yes, GCC, we know that. This is the nature of GetProcAddress(); it returns +// everything as `long long int (*)()` and we have to cast to the actual type. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-function-type" +#endif Dbghelp() : lib(LoadLibraryA("dbghelp.dll")), symInitialize(lib == nullptr ? nullptr : @@ -127,6 +170,9 @@ struct Dbghelp { symInitialize(GetCurrentProcess(), NULL, TRUE); } } +#if __GNUC__ && !__clang__ && __GNUC__ >= 9 +#pragma GCC diagnostic pop +#endif }; const Dbghelp& getDbghelp() { @@ -136,6 +182,13 @@ const Dbghelp& getDbghelp() { ArrayPtr getStackTrace(ArrayPtr space, uint ignoreCount, HANDLE thread, CONTEXT& context) { + // NOTE: Apparently there is a function CaptureStackBackTrace() that is equivalent to glibc's + // backtrace(). Somehow I missed that when I originally wrote this. However, + // CaptureStackBackTrace() does not accept a CONTEXT parameter; it can only trace the caller. + // That's more problematic on Windows where breakHandler(), sehHandler(), and Cygwin signal + // handlers all depend on the ability to pass a CONTEXT. So we'll keep this code, which works + // after all. + const Dbghelp& dbghelp = getDbghelp(); if (dbghelp.stackWalk64 == nullptr || dbghelp.symFunctionTableAccess64 == nullptr || @@ -179,7 +232,7 @@ ArrayPtr getStackTrace(ArrayPtr space, uint ignoreCount) { return nullptr; } -#if _WIN32 && _M_X64 +#if KJ_USE_WIN32_DBGHELP CONTEXT context; RtlCaptureContext(&context); return getStackTrace(space, ignoreCount, GetCurrentThread(), context); @@ -207,7 +260,7 @@ String stringifyStackTrace(ArrayPtr trace) { return nullptr; } -#if _WIN32 && _M_X64 && _MSC_VER +#if KJ_USE_WIN32_DBGHELP && _MSC_VER // Try to get file/line using SymGetLineFromAddr64(). We don't bother if we aren't on MSVC since // this requires MSVC debug info. @@ -232,7 +285,7 @@ String stringifyStackTrace(ArrayPtr trace) { return strArray(lines, ""); -#elif (__linux__ || __APPLE__) && !__ANDROID__ +#elif (__linux__ || __APPLE__ || __CYGWIN__) && !__ANDROID__ // We want to generate a human-readable stack trace. // TODO(someday): It would be really great if we could avoid farming out to another process @@ -274,6 +327,16 @@ String stringifyStackTrace(ArrayPtr trace) { // The Mac OS X equivalent of addr2line is atos. // (Internally, it uses the private CoreSymbolication.framework library.) p = popen(str("xcrun atos -p ", getpid(), ' ', strTrace).cStr(), "r"); +#elif __CYGWIN__ + wchar_t exeWinPath[MAX_PATH]; + if (GetModuleFileNameW(nullptr, exeWinPath, sizeof(exeWinPath)) == 0) { + return nullptr; + } + char exePosixPath[MAX_PATH * 2]; + if (cygwin_conv_path(CCP_WIN_W_TO_POSIX, exeWinPath, exePosixPath, sizeof(exePosixPath)) < 0) { + return nullptr; + } + p = popen(str("addr2line -e '", exePosixPath, "' ", strTrace).cStr(), "r"); #endif if (p == nullptr) { @@ -378,7 +441,48 @@ String getStackTrace() { return kj::str(stringifyStackTraceAddresses(trace), stringifyStackTrace(trace)); } -#if _WIN32 && _M_X64 +namespace { + +#if !KJ_NO_EXCEPTIONS + +[[noreturn]] void terminateHandler() { + void* traceSpace[32]; + + // ignoreCount = 3 to ignore std::terminate entry. + auto trace = kj::getStackTrace(traceSpace, 3); + + kj::String message; + + auto eptr = std::current_exception(); + if (eptr != nullptr) { + try { + std::rethrow_exception(eptr); + } catch (const kj::Exception& exception) { + message = kj::str("*** Fatal uncaught kj::Exception: ", exception, '\n'); + } catch (const std::exception& exception) { + message = kj::str("*** Fatal uncaught std::exception: ", exception.what(), + "\nstack: ", stringifyStackTraceAddresses(trace), + stringifyStackTrace(trace), '\n'); + } catch (...) { + message = kj::str("*** Fatal uncaught exception of type: ", kj::getCaughtExceptionType(), + "\nstack: ", stringifyStackTraceAddresses(trace), + stringifyStackTrace(trace), '\n'); + } + } else { + message = kj::str("*** std::terminate() called with no exception" + "\nstack: ", stringifyStackTraceAddresses(trace), + stringifyStackTrace(trace), '\n'); + } + + kj::FdOutputStream(STDERR_FILENO).write(message.begin(), message.size()); + _exit(1); +} + +#endif + +} // namespace + +#if KJ_USE_WIN32_DBGHELP && !__CYGWIN__ namespace { DWORD mainThreadId = 0; @@ -461,16 +565,45 @@ void printStackTraceOnCrash() { mainThreadId = GetCurrentThreadId(); KJ_WIN32(SetConsoleCtrlHandler(breakHandler, TRUE)); SetUnhandledExceptionFilter(&sehHandler); + +#if !KJ_NO_EXCEPTIONS + // Also override std::terminate() handler with something nicer for KJ. + std::set_terminate(&terminateHandler); +#endif } -#elif KJ_HAS_BACKTRACE +#elif _WIN32 +// Windows, but KJ_USE_WIN32_DBGHELP is not enabled. We can't print useful stack traces, so don't +// try to catch SEH nor ctrl+C. + +void printStackTraceOnCrash() { +#if !KJ_NO_EXCEPTIONS + std::set_terminate(&terminateHandler); +#endif +} + +#else namespace { -void crashHandler(int signo, siginfo_t* info, void* context) { +[[noreturn]] void crashHandler(int signo, siginfo_t* info, void* context) { void* traceSpace[32]; +#if KJ_USE_WIN32_DBGHELP + // Win32 backtracing can't trace its way out of a Cygwin signal handler. However, Cygwin gives + // us direct access to the CONTEXT, which we can pass to the Win32 tracing functions. + ucontext_t* ucontext = reinterpret_cast(context); + // Cygwin's mcontext_t has the same layout as CONTEXT. + // TODO(someday): Figure out why this produces garbage for SIGINT from ctrl+C. It seems to work + // correctly for SIGSEGV. + CONTEXT win32Context; + static_assert(sizeof(ucontext->uc_mcontext) >= sizeof(win32Context), + "mcontext_t should be an extension of CONTEXT"); + memcpy(&win32Context, &ucontext->uc_mcontext, sizeof(win32Context)); + auto trace = getStackTrace(traceSpace, 0, GetCurrentThread(), win32Context); +#else // ignoreCount = 2 to ignore crashHandler() and signal trampoline. auto trace = getStackTrace(traceSpace, 2); +#endif auto message = kj::str("*** Received signal #", signo, ": ", strsignal(signo), "\nstack: ", stringifyStackTraceAddresses(trace), @@ -523,9 +656,11 @@ void printStackTraceOnCrash() { // because stack traces on ctrl+c can be obnoxious for, say, command-line tools. KJ_SYSCALL(sigaction(SIGINT, &action, nullptr)); #endif -} -#else -void printStackTraceOnCrash() { + +#if !KJ_NO_EXCEPTIONS + // Also override std::terminate() handler with something nicer for KJ. + std::set_terminate(&terminateHandler); +#endif } #endif @@ -610,16 +745,20 @@ String KJ_STRINGIFY(const Exception& e) { for (;;) { KJ_IF_MAYBE(c, contextPtr) { contextText[contextDepth++] = - str(c->file, ":", c->line, ": context: ", c->description, "\n"); + str(trimSourceFilename(c->file), ":", c->line, ": context: ", c->description, "\n"); contextPtr = c->next; } else { break; } } + // Note that we put "remote" before "stack" because trace frames are ordered callee before + // caller, so this is the most natural presentation ordering. return str(strArray(contextText, ""), e.getFile(), ":", e.getLine(), ": ", e.getType(), e.getDescription() == nullptr ? "" : ": ", e.getDescription(), + e.getRemoteTrace().size() > 0 ? "\nremote: " : "", + e.getRemoteTrace(), e.getStackTrace().size() > 0 ? "\nstack: " : "", stringifyStackTraceAddresses(e.getStackTrace()), stringifyStackTrace(e.getStackTrace())); @@ -641,6 +780,10 @@ Exception::Exception(const Exception& other) noexcept file = ownFile.cStr(); } + if (other.remoteTrace != nullptr) { + remoteTrace = kj::str(other.remoteTrace); + } + memcpy(trace, other.trace, sizeof(trace[0]) * traceCount); KJ_IF_MAYBE(c, other.context) { @@ -661,8 +804,8 @@ void Exception::wrapContext(const char* file, int line, String&& description) { context = heap(file, line, mv(description), mv(context)); } -void Exception::extendTrace(uint ignoreCount) { - KJ_STACK_ARRAY(void*, newTraceSpace, kj::size(trace) + ignoreCount + 1, +void Exception::extendTrace(uint ignoreCount, uint limit) { + KJ_STACK_ARRAY(void*, newTraceSpace, kj::min(kj::size(trace), limit) + ignoreCount + 1, sizeof(trace)/sizeof(trace[0]) + 8, 128); auto newTrace = kj::getStackTrace(newTraceSpace, ignoreCount + 1); @@ -719,17 +862,59 @@ void Exception::addTrace(void* ptr) { } } +void Exception::addTraceHere() { +#if __GNUC__ + addTrace(__builtin_return_address(0)); +#elif _MSC_VER + addTrace(_ReturnAddress()); +#else + #error "please implement for your compiler" +#endif +} + +#if !KJ_NO_EXCEPTIONS + +namespace { + +KJ_THREADLOCAL_PTR(ExceptionImpl) currentException = nullptr; + +} // namespace + class ExceptionImpl: public Exception, public std::exception { public: - inline ExceptionImpl(Exception&& other): Exception(mv(other)) {} + inline ExceptionImpl(Exception&& other): Exception(mv(other)) { + insertIntoCurrentExceptions(); + } ExceptionImpl(const ExceptionImpl& other): Exception(other) { // No need to copy whatBuffer since it's just to hold the return value of what(). + insertIntoCurrentExceptions(); + } + ~ExceptionImpl() { + // Look for ourselves in the list. + for (auto* ptr = ¤tException; *ptr != nullptr; ptr = &(*ptr)->nextCurrentException) { + if (*ptr == this) { + *ptr = nextCurrentException; + return; + } + } + + // Possibly the ExceptionImpl was destroyed on a different thread than created it? That's + // pretty bad, we'd better abort. + abort(); } const char* what() const noexcept override; private: mutable String whatBuffer; + ExceptionImpl* nextCurrentException = nullptr; + + void insertIntoCurrentExceptions() { + nextCurrentException = currentException; + currentException = this; + } + + friend class InFlightExceptionIterator; }; const char* ExceptionImpl::what() const noexcept { @@ -737,6 +922,45 @@ const char* ExceptionImpl::what() const noexcept { return whatBuffer.begin(); } +InFlightExceptionIterator::InFlightExceptionIterator() + : ptr(currentException) {} + +Maybe InFlightExceptionIterator::next() { + if (ptr == nullptr) return nullptr; + + const ExceptionImpl& result = *static_cast(ptr); + ptr = result.nextCurrentException; + return result; +} + +#endif // !KJ_NO_EXCEPTIONS + +kj::Exception getDestructionReason(void* traceSeparator, kj::Exception::Type defaultType, + const char* defaultFile, int defaultLine, kj::StringPtr defaultDescription) { +#if !KJ_NO_EXCEPTIONS + InFlightExceptionIterator iter; + KJ_IF_MAYBE(e, iter.next()) { + auto copy = kj::cp(*e); + copy.truncateCommonTrace(); + return copy; + } else { +#endif + // Darn, use a generic exception. + kj::Exception exception(defaultType, defaultFile, defaultLine, + kj::heapString(defaultDescription)); + + // Let's give some context on where the PromiseFulfiller was destroyed. + exception.extendTrace(2, 16); + + // Add a separator that hopefully makes this understandable... + exception.addTrace(traceSeparator); + + return exception; +#if !KJ_NO_EXCEPTIONS + } +#endif +} + // ======================================================================================= namespace { @@ -747,9 +971,11 @@ KJ_THREADLOCAL_PTR(ExceptionCallback) threadLocalCallback = nullptr; ExceptionCallback::ExceptionCallback(): next(getExceptionCallback()) { char stackVar; +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION ptrdiff_t offset = reinterpret_cast(this) - &stackVar; KJ_ASSERT(offset < 65536 && offset > -65536, "ExceptionCallback must be allocated on the stack."); +#endif threadLocalCallback = this; } @@ -783,6 +1009,10 @@ Function)> ExceptionCallback::getThreadInitializer() { return next.getThreadInitializer(); } +namespace _ { // private + uint uncaughtExceptionCount(); // defined later in this file +} + class ExceptionCallback::RootExceptionCallback: public ExceptionCallback { public: RootExceptionCallback(): ExceptionCallback(*this) {} @@ -791,7 +1021,7 @@ public: #if KJ_NO_EXCEPTIONS logException(LogSeverity::ERROR, mv(exception)); #else - if (std::uncaught_exception()) { + if (_::uncaughtExceptionCount() > 0) { // Bad time to throw an exception. Just log instead. // // TODO(someday): We should really compare uncaughtExceptionCount() against the count at @@ -854,6 +1084,8 @@ private: // anyway. getExceptionCallback().logMessage(severity, e.getFile(), e.getLine(), 0, str( e.getType(), e.getDescription() == nullptr ? "" : ": ", e.getDescription(), + e.getRemoteTrace().size() > 0 ? "\nremote: " : "", + e.getRemoteTrace(), e.getStackTrace().size() > 0 ? "\nstack: " : "", stringifyStackTraceAddresses(e.getStackTrace()), stringifyStackTrace(e.getStackTrace()), "\n")); @@ -861,9 +1093,34 @@ private: }; ExceptionCallback& getExceptionCallback() { - static ExceptionCallback::RootExceptionCallback defaultCallback; + static auto defaultCallback = lsanIgnoreObjectAndReturn( + new ExceptionCallback::RootExceptionCallback()); + // We allocate on the heap because some objects may throw in their destructors. If those objects + // had static storage, they might get fully constructed before the root callback. If they however + // then throw an exception during destruction, there would be a lifetime issue because their + // destructor would end up getting registered after the root callback's destructor. One solution + // is to just leak this pointer & allocate on first-use. The cost is that the initialization is + // mildly more expensive (+ we need to annotate sanitizers to ignore the problem). A great + // compiler annotation that would simply things would be one that allowed static variables to have + // their destruction omitted wholesale. That would allow us to avoid the heap but still have the + // same robust safety semantics leaking would give us. A practical alternative that could be + // implemented without new compilers would be to define another static root callback in + // RootExceptionCallback's destructor (+ a separate pointer to share its value with this + // function). Since this would end up getting constructed during exit unwind, it would have the + // nice property of effectively being guaranteed to be evicted last. + // + // All this being said, I came back to leaking the object is the easiest tweak here: + // * Can't go wrong + // * Easy to maintain + // * Throwing exceptions is bound to do be expensive and malloc-happy anyway, so the incremental + // cost of 1 heap allocation is minimal. + // + // TODO(cleanup): Harris has an excellent suggestion in + // https://github.com/capnproto/capnproto/pull/1255 that should ensure we initialize the root + // callback once on first use as a global & never destroy it. + ExceptionCallback* scoped = threadLocalCallback; - return scoped != nullptr ? *scoped : defaultCallback; + return scoped != nullptr ? *scoped : *defaultCallback; } void throwFatalException(kj::Exception&& exception, uint ignoreCount) { @@ -990,6 +1247,53 @@ kj::String getCaughtExceptionType() { } #endif +namespace { + +size_t sharedSuffixLength(kj::ArrayPtr a, kj::ArrayPtr b) { + size_t result = 0; + while (a.size() > 0 && b.size() > 0 && a.back() == b.back()) { + ++result; + a = a.slice(0, a.size() - 1); + b = b.slice(0, b.size() - 1); + } + return result; +} + +} // namespace + +kj::ArrayPtr computeRelativeTrace( + kj::ArrayPtr trace, kj::ArrayPtr relativeTo) { + using miniposix::ssize_t; + + static constexpr size_t MIN_MATCH_LEN = 4; + if (trace.size() < MIN_MATCH_LEN || relativeTo.size() < MIN_MATCH_LEN) { + return trace; + } + + kj::ArrayPtr bestMatch = trace; + uint bestMatchLen = MIN_MATCH_LEN - 1; // must beat this to choose something else + + // `trace` and `relativeTrace` may have been truncated at different points. We iterate through + // truncating various suffixes from one of the two and then seeing if the remaining suffixes + // match. + for (ssize_t i = -(ssize_t)(trace.size() - MIN_MATCH_LEN); + i <= (ssize_t)(relativeTo.size() - MIN_MATCH_LEN); + i++) { + // Negative values truncate `trace`, positive values truncate `relativeTo`. + kj::ArrayPtr subtrace = trace.slice(0, trace.size() - kj::max(0, -i)); + kj::ArrayPtr subrt = relativeTo + .slice(0, relativeTo.size() - kj::max(0, i)); + + uint matchLen = sharedSuffixLength(subtrace, subrt); + if (matchLen > bestMatchLen) { + bestMatchLen = matchLen; + bestMatch = subtrace.slice(0, subtrace.size() - matchLen + 1); + } + } + + return bestMatch; +} + namespace _ { // private class RecoverableExceptionCatcher: public ExceptionCallback { @@ -1010,7 +1314,7 @@ public: Maybe caught; }; -Maybe runCatchingExceptions(Runnable& runnable) noexcept { +Maybe runCatchingExceptions(Runnable& runnable) { #if KJ_NO_EXCEPTIONS RecoverableExceptionCatcher catcher; runnable.run(); @@ -1025,12 +1329,16 @@ Maybe runCatchingExceptions(Runnable& runnable) noexcept { } catch (Exception& e) { e.truncateCommonTrace(); return kj::mv(e); + } catch (CanceledException) { + throw; } catch (std::bad_alloc& e) { return Exception(Exception::Type::OVERLOADED, "(unknown)", -1, str("std::bad_alloc: ", e.what())); } catch (std::exception& e) { return Exception(Exception::Type::FAILED, "(unknown)", -1, str("std::exception: ", e.what())); + } catch (TopLevelProcessContext::CleanShutdownException) { + throw; } catch (...) { #if __GNUC__ && !KJ_NO_RTTI return Exception(Exception::Type::FAILED, "(unknown)", -1, str( diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/exception.h b/libs/EXTERNAL/capnproto/c++/src/kj/exception.h index 1020d011308..8c20b1b39ea 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/exception.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/exception.h @@ -21,15 +21,13 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "memory.h" #include "array.h" #include "string.h" #include "windows-sanity.h" // work-around macro conflict with `ERROR` +KJ_BEGIN_HEADER + namespace kj { class ExceptionImpl; @@ -82,6 +80,13 @@ class Exception { StringPtr getDescription() const { return description; } ArrayPtr getStackTrace() const { return arrayPtr(trace, traceCount); } + StringPtr getRemoteTrace() const { return remoteTrace; } + void setRemoteTrace(kj::String&& value) { remoteTrace = kj::mv(value); } + // Additional stack trace data originating from a remote server. If present, then + // `getStackTrace()` only traces up until entry into the RPC system, and the remote trace + // contains any trace information returned over the wire. This string is human-readable but the + // format is otherwise unspecified. + struct Context { // Describes a bit about what was going on when the exception was thrown. @@ -108,9 +113,11 @@ class Exception { // is expected that contexts will be added in reverse order as the exception passes up the // callback stack. - KJ_NOINLINE void extendTrace(uint ignoreCount); + KJ_NOINLINE void extendTrace(uint ignoreCount, uint limit = kj::maxValue); // Append the current stack trace to the exception's trace, ignoring the first `ignoreCount` // frames (see `getStackTrace()` for discussion of `ignoreCount`). + // + // If `limit` is set, limit the number of frames added to the given number. KJ_NOINLINE void truncateCommonTrace(); // Remove the part of the stack trace which the exception shares with the caller of this method. @@ -121,6 +128,9 @@ class Exception { // Append the given pointer to the backtrace, if it is not already full. This is used by the // async library to trace through the promise chain that led to the exception. + KJ_NOINLINE void addTraceHere(); + // Adds the location that called this method to the stack trace. + private: String ownFile; const char* file; @@ -128,12 +138,19 @@ class Exception { Type type; String description; Maybe> context; + String remoteTrace; void* trace[32]; uint traceCount; friend class ExceptionImpl; }; +struct CanceledException { }; +// This exception is thrown to force-unwind a stack in order to immediately cancel whatever that +// stack was doing. It is used in the implementation of fibers in particular. Application code +// should almost never catch this exception, unless you need to modify stack unwinding for some +// reason. kj::runCatchingExceptions() does not catch it. + StringPtr KJ_STRINGIFY(Exception::Type type); String KJ_STRINGIFY(const Exception& e); @@ -250,7 +267,7 @@ KJ_NOINLINE void throwRecoverableException(kj::Exception&& exception, uint ignor namespace _ { class Runnable; } template -Maybe runCatchingExceptions(Func&& func) noexcept; +Maybe runCatchingExceptions(Func&& func); // Executes the given function (usually, a lambda returning nothing) catching any exceptions that // are thrown. Returns the Exception if there was one, or null if the operation completed normally. // Non-KJ exceptions will be wrapped. @@ -297,7 +314,7 @@ class Runnable { template class RunnableImpl: public Runnable { public: - RunnableImpl(Func&& func): func(kj::mv(func)) {} + RunnableImpl(Func&& func): func(kj::fwd(func)) {} void run() override { func(); } @@ -305,13 +322,13 @@ class RunnableImpl: public Runnable { Func func; }; -Maybe runCatchingExceptions(Runnable& runnable) noexcept; +Maybe runCatchingExceptions(Runnable& runnable); } // namespace _ (private) template -Maybe runCatchingExceptions(Func&& func) noexcept { - _::RunnableImpl> runnable(kj::fwd(func)); +Maybe runCatchingExceptions(Func&& func) { + _::RunnableImpl runnable(kj::fwd(func)); return _::runCatchingExceptions(runnable); } @@ -381,4 +398,48 @@ kj::String getCaughtExceptionType(); // for the purpose of error logging. This function is best-effort; on some platforms it may simply // return "(unknown)". +#if !KJ_NO_EXCEPTIONS + +class InFlightExceptionIterator { + // A class that can be used to iterate over exceptions that are in-flight in the current thread, + // meaning they are either uncaught, or caught by a catch block that is current executing. + // + // This is meant for debugging purposes, and the results are best-effort. The C++ standard + // library does not provide any way to inspect uncaught exceptions, so this class can only + // discover KJ exceptions thrown using throwFatalException() or throwRecoverableException(). + // All KJ code uses those two functions to throw exceptions, but if your own code uses a bare + // `throw`, or if the standard library throws an exception, these cannot be inspected. + // + // This class is safe to use in a signal handler. + +public: + InFlightExceptionIterator(); + + Maybe next(); + +private: + const Exception* ptr; +}; + +#endif // !KJ_NO_EXCEPTIONS + +kj::Exception getDestructionReason(void* traceSeparator, + kj::Exception::Type defaultType, const char* defaultFile, int defaultLine, + kj::StringPtr defaultDescription); +// Returns an exception that attempts to capture why a destructor has been invoked. If a KJ +// exception is currently in-flight (see InFlightExceptionIterator), then that exception is +// returned. Otherwise, an exception is constructed using the current stack trace and the type, +// file, line, and description provided. In the latter case, `traceSeparator` is appended to the +// stack trace; this should be a pointer to some dummy symbol which acts as a separator between the +// original stack trace and any new trace frames added later. + +kj::ArrayPtr computeRelativeTrace( + kj::ArrayPtr trace, kj::ArrayPtr relativeTo); +// Given two traces expected to have started from the same root, try to find the part of `trace` +// that is different from `relativeTo`, considering that either or both traces might be truncated. +// +// This is useful for debugging, when reporting several related traces at once. + } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-disk-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-disk-test.c++ index 2f1ebb5ded5..d1d9fa2c98b 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-disk-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-disk-test.c++ @@ -185,7 +185,7 @@ private: Sleep(10); goto retry; } - // fallthrough + KJ_FALLTHROUGH; default: KJ_FAIL_WIN32("RemoveDirectory", error) { break; } } @@ -276,6 +276,11 @@ KJ_TEST("DiskFile") { KJ_EXPECT(file->readAllText() == ""); + // mmaping empty file should work + KJ_EXPECT(file->mmap(0, 0).size() == 0); + KJ_EXPECT(file->mmapPrivate(0, 0).size() == 0); + KJ_EXPECT(file->mmapWritable(0, 0)->get().size() == 0); + file->writeAll("foo"); KJ_EXPECT(file->readAllText() == "foo"); @@ -294,6 +299,14 @@ KJ_TEST("DiskFile") { file->truncate(18); KJ_EXPECT(file->readAllText() == kj::StringPtr("foobaz\0\0\0\0\0\0\0\0\0\0\0\0", 18)); + // empty mappings work, even if useless + KJ_EXPECT(file->mmap(0, 0).size() == 0); + KJ_EXPECT(file->mmapPrivate(0, 0).size() == 0); + KJ_EXPECT(file->mmapWritable(0, 0)->get().size() == 0); + KJ_EXPECT(file->mmap(2, 0).size() == 0); + KJ_EXPECT(file->mmapPrivate(2, 0).size() == 0); + KJ_EXPECT(file->mmapWritable(2, 0)->get().size() == 0); + { auto mapping = file->mmap(0, 18); auto privateMapping = file->mmapPrivate(0, 18); @@ -829,7 +842,15 @@ KJ_TEST("DiskDirectory replace file with directory") { KJ_EXPECT(dir->openFile(Path({"foo", "bar"}))->readAllText() == "bazqux"); } -#ifndef HOLES_NOT_SUPPORTED +#if !defined(HOLES_NOT_SUPPORTED) && (CAPNP_DEBUG_TYPES || CAPNP_EXPENSIVE_TESTS) +// Not all filesystems support sparse files, and if they do, they don't necessarily support +// copying them in a way that preserves holes. We don't want the capnp test suite to fail just +// because it was run on the wrong filesystem. We could design the test to check first if the +// filesystem supports holes, but the code to do that would be almost the same as the code being +// tested... Instead, we've marked this test so it only runs when building this library using +// defines that only the Cap'n Proto maintainers use. So, we run the test ourselves but we don't +// make other people run it. + KJ_TEST("DiskFile holes") { if (isWine()) { // WINE doesn't support sparse files. diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-disk-unix.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-disk-unix.c++ index 431b6657ea6..8c9336238d4 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-disk-unix.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-disk-unix.c++ @@ -346,6 +346,7 @@ public: } Array mmap(uint64_t offset, uint64_t size) const { + if (size == 0) return nullptr; // zero-length mmap() returns EINVAL, so avoid it auto range = getMmapRange(offset, size); const void* mapping = ::mmap(NULL, range.size, PROT_READ, MAP_SHARED, fd, range.offset); if (mapping == MAP_FAILED) { @@ -356,6 +357,7 @@ public: } Array mmapPrivate(uint64_t offset, uint64_t size) const { + if (size == 0) return nullptr; // zero-length mmap() returns EINVAL, so avoid it auto range = getMmapRange(offset, size); void* mapping = ::mmap(NULL, range.size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, range.offset); if (mapping == MAP_FAILED) { @@ -381,7 +383,12 @@ public: } void zero(uint64_t offset, uint64_t size) const { -#ifdef FALLOC_FL_PUNCH_HOLE + // If FALLOC_FL_PUNCH_HOLE is defined, use it to efficiently zero the area. + // + // A fallocate() wrapper was only added to Android's Bionic C library as of API level 21, + // but FALLOC_FL_PUNCH_HOLE is apparently defined in the headers before that, so we'll + // have to explicitly test for that case. +#if defined(FALLOC_FL_PUNCH_HOLE) && !(__ANDROID__ && __BIONIC__ && __ANDROID_API__ < 21) KJ_SYSCALL_HANDLE_ERRORS( fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, offset, size)) { case EOPNOTSUPP: @@ -396,8 +403,8 @@ public: static const byte ZEROS[4096] = { 0 }; -#if __APPLE__ || __CYGWIN__ - // Mac & Cygwin doesn't have pwritev(). +#if __APPLE__ || __CYGWIN__ || (defined(__ANDROID__) && __ANDROID_API__ < 24) + // Mac & Cygwin & Android API levels 23 and lower doesn't have pwritev(). while (size > sizeof(ZEROS)) { write(offset, ZEROS); size -= sizeof(ZEROS); @@ -407,7 +414,7 @@ public: #else // Use a 4k buffer of zeros amplified by iov to write zeros with as few syscalls as possible. size_t count = (size + sizeof(ZEROS) - 1) / sizeof(ZEROS); - const size_t iovmax = miniposix::iovMax(count); + const size_t iovmax = miniposix::iovMax(); KJ_STACK_ARRAY(struct iovec, iov, kj::min(iovmax, count), 16, 256); for (auto& item: iov) { @@ -454,6 +461,7 @@ public: void changed(ArrayPtr slice) const override { KJ_REQUIRE(slice.begin() >= bytes.begin() && slice.end() <= bytes.end(), "byte range is not part of this mapping"); + if (slice.size() == 0) return; // msync() requires page-alignment, apparently, so use getMmapRange() to accomplish that. auto range = getMmapRange(reinterpret_cast(slice.begin()), slice.size()); @@ -463,6 +471,7 @@ public: void sync(ArrayPtr slice) const override { KJ_REQUIRE(slice.begin() >= bytes.begin() && slice.end() <= bytes.end(), "byte range is not part of this mapping"); + if (slice.size() == 0) return; // msync() requires page-alignment, apparently, so use getMmapRange() to accomplish that. auto range = getMmapRange(reinterpret_cast(slice.begin()), slice.size()); @@ -474,6 +483,10 @@ public: }; Own mmapWritable(uint64_t offset, uint64_t size) const { + if (size == 0) { + // zero-length mmap() returns EINVAL, so avoid it + return heap(nullptr); + } auto range = getMmapRange(offset, size); void* mapping = ::mmap(NULL, range.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, range.offset); if (mapping == MAP_FAILED) { @@ -502,6 +515,7 @@ public: default: KJ_FAIL_SYSCALL("sendfile", error) { return fromPos - fromOffset; } } + if (n == 0) break; } return fromPos - fromOffset; } @@ -765,7 +779,7 @@ public: if (!exists(path)) { return nullptr; } - // fallthrough + KJ_FALLTHROUGH; default: KJ_FAIL_SYSCALL("openat(fd, path, O_DIRECTORY)", error, path) { return nullptr; } } @@ -900,7 +914,7 @@ public: mode = mode - WriteMode::CREATE_PARENT; return createNamedTemporary(finalName, mode, kj::mv(tryCreate)); } - // fallthrough + KJ_FALLTHROUGH; default: KJ_FAIL_SYSCALL("create(path)", error, path) { break; } return nullptr; @@ -943,7 +957,7 @@ public: // Retry, but make sure we don't try to create the parent again. return tryReplaceNode(path, mode - WriteMode::CREATE_PARENT, kj::mv(tryCreate)); } - // fallthrough + KJ_FALLTHROUGH; default: KJ_FAIL_SYSCALL("create(path)", error, path) { return false; } } else { @@ -1088,7 +1102,10 @@ public: KJ_SYSCALL_HANDLE_ERRORS(syscall(SYS_renameat2, fromDirFd, fromPath.cStr(), fd.get(), toPath.cStr(), RENAME_EXCHANGE)) { - case ENOSYS: + case ENOSYS: // Syscall not supported by kernel. + case EINVAL: // Maybe we screwed up, or maybe the syscall is not supported by the + // filesystem. Unfortunately, there's no way to tell, so assume the latter. + // ZFS in particular apparently produces EINVAL. break; // fall back to traditional means case ENOENT: // Presumably because the target path doesn't exist. @@ -1117,7 +1134,10 @@ public: } else if (has(mode, WriteMode::CREATE)) { KJ_SYSCALL_HANDLE_ERRORS(syscall(SYS_renameat2, fromDirFd, fromPath.cStr(), fd.get(), toPath.cStr(), RENAME_NOREPLACE)) { - case ENOSYS: + case ENOSYS: // Syscall not supported by kernel. + case EINVAL: // Maybe we screwed up, or maybe the syscall is not supported by the + // filesystem. Unfortunately, there's no way to tell, so assume the latter. + // ZFS in particular apparently produces EINVAL. break; // fall back to traditional means case EEXIST: return false; @@ -1156,8 +1176,10 @@ public: if (S_ISDIR(stats.st_mode)) { return mkdirat(fd, candidatePath.cStr(), 0700); } else { -#if __APPLE__ - // No mknodat() on OSX, gotta open() a file, ugh. +#if __APPLE__ || __FreeBSD__ + // - No mknodat() on OSX, gotta open() a file, ugh. + // - On a modern FreeBSD, mknodat() is reserved strictly for device nodes, + // you cannot create a regular file using it (EINVAL). int newFd = openat(fd, candidatePath.cStr(), O_RDWR | O_CREAT | O_EXCL | MAYBE_O_CLOEXEC, 0700); if (newFd >= 0) close(newFd); diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-disk-win32.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-disk-win32.c++ index 48ce0c143f0..7f3442beaed 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-disk-win32.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-disk-win32.c++ @@ -22,6 +22,9 @@ #if _WIN32 // For Unix implementation, see filesystem-disk-unix.c++. +// Request Vista-level APIs. +#include "win32-api-version.h" + #include "filesystem.h" #include "debug.h" #include "encoding.h" @@ -29,12 +32,6 @@ #include #include -// Request Vista-level APIs. -#define WINVER 0x0600 -#define _WIN32_WINNT 0x0600 - -#define WIN32_LEAN_AND_MEAN // ::eyeroll:: - #include #include #include "windows-sanity.h" @@ -200,7 +197,7 @@ static void rmrfChildren(ArrayPtr path) { Sleep(10); goto retry; } - // fallthrough + KJ_FALLTHROUGH; default: KJ_FAIL_WIN32("RemoveDirectory", error, dbgStr(child)) { break; } } @@ -298,7 +295,7 @@ protected: } }; -#if _MSC_VER && _MSC_VER < 1910 +#if _MSC_VER && _MSC_VER < 1910 && !defined(__clang__) // TODO(msvc): MSVC 2015 can't initialize a constexpr's vtable correctly. const MmapDisposer mmapDisposer = MmapDisposer(); #else @@ -307,16 +304,7 @@ constexpr MmapDisposer mmapDisposer = MmapDisposer(); void* win32Mmap(HANDLE handle, MmapRange range, DWORD pageProtect, DWORD access) { HANDLE mappingHandle; - mappingHandle = CreateFileMappingW(handle, NULL, pageProtect, 0, 0, NULL); - if (mappingHandle == INVALID_HANDLE_VALUE) { - auto error = GetLastError(); - if (error == ERROR_FILE_INVALID && range.size == 0) { - // The documentation says that CreateFileMapping will fail with ERROR_FILE_INVALID if the - // file size is zero. Ugh. - return nullptr; - } - KJ_FAIL_WIN32("CreateFileMapping", error); - } + KJ_WIN32(mappingHandle = CreateFileMappingW(handle, NULL, pageProtect, 0, 0, NULL)); KJ_DEFER(KJ_WIN32(CloseHandle(mappingHandle)) { break; }); void* mapping = MapViewOfFile(mappingHandle, access, @@ -428,6 +416,7 @@ public: } Array mmap(uint64_t offset, uint64_t size) const { + if (size == 0) return nullptr; // Windows won't allow zero-length mappings auto range = getMmapRange(offset, size); const void* mapping = win32Mmap(handle, range, PAGE_READONLY, FILE_MAP_READ); return Array(reinterpret_cast(mapping) + (offset - range.offset), @@ -435,6 +424,7 @@ public: } Array mmapPrivate(uint64_t offset, uint64_t size) const { + if (size == 0) return nullptr; // Windows won't allow zero-length mappings auto range = getMmapRange(offset, size); void* mapping = win32Mmap(handle, range, PAGE_READONLY, FILE_MAP_COPY); return Array(reinterpret_cast(mapping) + (offset - range.offset), @@ -552,7 +542,8 @@ public: KJ_REQUIRE(slice.begin() >= bytes.begin() && slice.end() <= bytes.end(), "byte range is not part of this mapping"); - // Zero is treated specially by FlushViewOfFile(), so check for it. + // Zero is treated specially by FlushViewOfFile(), so check for it. (This also handles the + // case where `bytes` is actually empty and not a real mapping.) if (slice.size() > 0) { KJ_WIN32(FlushViewOfFile(slice.begin(), slice.size())); } @@ -563,6 +554,10 @@ public: }; Own mmapWritable(uint64_t offset, uint64_t size) const { + if (size == 0) { + // Windows won't allow zero-length mappings + return heap(nullptr); + } auto range = getMmapRange(offset, size); void* mapping = win32Mmap(handle, range, PAGE_READWRITE, FILE_MAP_ALL_ACCESS); auto array = Array(reinterpret_cast(mapping) + (offset - range.offset), @@ -818,7 +813,7 @@ public: mode = mode - WriteMode::CREATE_PARENT; return createNamedTemporary(finalName, mode, kj::mv(tryCreate)); } - // fallthrough + KJ_FALLTHROUGH; default: KJ_FAIL_WIN32("create(path)", error, path) { break; } return nullptr; @@ -869,6 +864,7 @@ public: // Retry, but make sure we don't try to create the parent again. return tryReplaceNode(path, mode - WriteMode::CREATE_PARENT, kj::mv(tryCreate)); } + KJ_FALLTHROUGH; default: KJ_FAIL_WIN32("create(path)", error, path) { return false; } } else { @@ -1029,7 +1025,7 @@ public: } } - // Succeded, delete temporary. + // Succeeded, delete temporary. rmrf(*tempName); return true; } else { @@ -1296,7 +1292,7 @@ public: default: KJ_FAIL_WIN32("CopyFile", error, fromPath, toPath) { return false; } } else { - // Copy succeded. + // Copy succeeded. return true; } } @@ -1514,7 +1510,7 @@ public: Vector results; for (uint i = 0; i < 26; i++) { if (drives & (1 << i)) { - char name[2] = { 'A' + i, ':' }; + char name[2] = { static_cast('A' + i), ':' }; results.add(Entry { FsNode::Type::DIRECTORY, kj::heapString(name, 2) }); } } diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-test.c++ index 8ace56164cb..f3eae2fe79a 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/filesystem-test.c++ @@ -155,10 +155,6 @@ KJ_TEST("Path exceptions") { KJ_EXPECT_THROW_MESSAGE("root path has no parent", Path(nullptr).parent()); } -static inline bool operator==(const Array& arr, const wchar_t* expected) { - return wcscmp(arr.begin(), expected) == 0; -} - constexpr kj::ArrayPtr operator "" _a(const wchar_t* str, size_t n) { return { str, n }; } diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/filesystem.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/filesystem.c++ index 01bab169d86..62b944cf86e 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/filesystem.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/filesystem.c++ @@ -622,7 +622,7 @@ Own Directory::openSubdir(PathPtr path, WriteMode mode) const { void Directory::symlink(PathPtr linkpath, StringPtr content, WriteMode mode) const { if (!trySymlink(linkpath, content, mode)) { if (has(mode, WriteMode::CREATE)) { - KJ_FAIL_REQUIRE("path already exsits", linkpath) { break; } + KJ_FAIL_REQUIRE("path already exists", linkpath) { break; } } else { // Shouldn't happen. KJ_FAIL_ASSERT("symlink() returned null despite no preconditions", linkpath) { break; } @@ -912,7 +912,9 @@ private: "InMemoryFile cannot resize the file backing store while memory mappings exist."); auto newBytes = heapArray(kj::max(capacity, bytes.size() * 2)); - memcpy(newBytes.begin(), bytes.begin(), size); + if (size > 0) { // placate ubsan; bytes.begin() might be null + memcpy(newBytes.begin(), bytes.begin(), size); + } memset(newBytes.begin() + size, 0, newBytes.size() - size); bytes = kj::mv(newBytes); } @@ -1511,7 +1513,7 @@ private: entry.set(kj::mv(copy)); } else { if (mode == TransferMode::MOVE) { - KJ_ASSERT(fromDirectory.tryRemove(fromPath), "could't move node", fromPath) { + KJ_ASSERT(fromDirectory.tryRemove(fromPath), "couldn't move node", fromPath) { return false; } } @@ -1543,7 +1545,7 @@ private: entry.set(kj::mv(copy)); } else { if (mode == TransferMode::MOVE) { - KJ_ASSERT(fromDirectory.tryRemove(fromPath), "could't move node", fromPath) { + KJ_ASSERT(fromDirectory.tryRemove(fromPath), "couldn't move node", fromPath) { return false; } } @@ -1560,7 +1562,7 @@ private: // Since symlinks are immutable, we can implement LINK the same as COPY. entry.init(SymlinkNode { lastModified.orDefault(clock.now()), kj::mv(*content) }); if (mode == TransferMode::MOVE) { - KJ_ASSERT(fromDirectory.tryRemove(fromPath), "could't move node", fromPath) { + KJ_ASSERT(fromDirectory.tryRemove(fromPath), "couldn't move node", fromPath) { return false; } } diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/filesystem.h b/libs/EXTERNAL/capnproto/c++/src/kj/filesystem.h index 3085186e020..de309c4114f 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/filesystem.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/filesystem.h @@ -157,6 +157,13 @@ class Path { bool operator>=(PathPtr other) const; // Compare path components lexically. + bool operator==(const Path& other) const; + bool operator!=(const Path& other) const; + bool operator< (const Path& other) const; + bool operator> (const Path& other) const; + bool operator<=(const Path& other) const; + bool operator>=(const Path& other) const; + uint hashCode() const; // Can use in HashMap. @@ -792,7 +799,8 @@ class Directory: public ReadableDirectory { // Open a file for writing. // // `tryOpenFile()` returns null if the path is required to exist but doesn't (MODIFY or REPLACE) - // or if the path is required not to exist but does (CREATE or RACE). + // or if the path is required not to exist but does (CREATE or RACE). These are the only cases + // where it returns null -- all other types of errors (like "access denied") throw exceptions. virtual Own> replaceFile(PathPtr path, WriteMode mode) const = 0; // Construct a file which, when ready, will be atomically moved to `path`, replacing whatever @@ -869,7 +877,9 @@ class Directory: public ReadableDirectory { virtual bool tryRemove(PathPtr path) const = 0; // Deletes/unlinks the given path. If the path names a directory, it is recursively deleted. // - // tryRemove() returns false if the path doesn't exist; remove() throws in this case. + // tryRemove() returns false in the specific case that the path doesn't exist. remove() would + // throw in this case. In all other error cases (like "access denied"), tryRemove() still throws; + // it is only "does not exist" that produces a false return. // TODO(someday): // - Support sockets? There's no openat()-like interface for sockets, so it's hard to support @@ -996,6 +1006,12 @@ inline bool Path::operator< (PathPtr other) const { return PathPtr(*this) < oth inline bool Path::operator> (PathPtr other) const { return PathPtr(*this) > other; } inline bool Path::operator<=(PathPtr other) const { return PathPtr(*this) <= other; } inline bool Path::operator>=(PathPtr other) const { return PathPtr(*this) >= other; } +inline bool Path::operator==(const Path& other) const { return PathPtr(*this) == PathPtr(other); } +inline bool Path::operator!=(const Path& other) const { return PathPtr(*this) != PathPtr(other); } +inline bool Path::operator< (const Path& other) const { return PathPtr(*this) < PathPtr(other); } +inline bool Path::operator> (const Path& other) const { return PathPtr(*this) > PathPtr(other); } +inline bool Path::operator<=(const Path& other) const { return PathPtr(*this) <= PathPtr(other); } +inline bool Path::operator>=(const Path& other) const { return PathPtr(*this) >= PathPtr(other); } inline uint Path::hashCode() const { return kj::hashCode(parts); } inline bool Path::startsWith(PathPtr prefix) const { return PathPtr(*this).startsWith(prefix); } inline bool Path::endsWith (PathPtr suffix) const { return PathPtr(*this).endsWith (suffix); } diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/function.h b/libs/EXTERNAL/capnproto/c++/src/kj/function.h index b1a24308de4..59ba5f35ba1 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/function.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/function.h @@ -21,12 +21,10 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "memory.h" +KJ_BEGIN_HEADER + namespace kj { template @@ -231,7 +229,7 @@ class FunctionParam { } private: - void* space[2]; + alignas(void*) char space[2 * sizeof(void*)]; class WrapperBase { public: @@ -291,3 +289,5 @@ BoundMethod boundMethod(T&& t, Func&& func, ConstFunc&& cons // contain a copy (by move) of it. The method is allowed to be overloaded. } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/hash.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/hash.c++ index ec57bf9279b..bf80a55565f 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/hash.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/hash.c++ @@ -47,10 +47,10 @@ uint HashCoder::operator*(ArrayPtr s) const { switch (len) { case 3: h ^= data[2] << 16; - // fallthrough + KJ_FALLTHROUGH; case 2: h ^= data[1] << 8; - // fallthrough + KJ_FALLTHROUGH; case 1: h ^= data[0]; h *= m; diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/hash.h b/libs/EXTERNAL/capnproto/c++/src/kj/hash.h index 086ac031788..750a14b9580 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/hash.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/hash.h @@ -21,12 +21,10 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "string.h" +KJ_BEGIN_HEADER + namespace kj { namespace _ { // private @@ -105,6 +103,8 @@ struct HashCoder { uint operator*(ArrayPtr arr) const; template () * instance())> uint operator*(const Array& arr) const; + template > + inline uint operator*(T e) const; template ().hashCode())> inline Result operator*(T&& value) const { return kj::fwd(value).hashCode(); } @@ -174,5 +174,12 @@ inline uint HashCoder::operator*(const Array& arr) const { return operator*(arr.asPtr()); } +template +inline uint HashCoder::operator*(T e) const { + return operator*(static_cast<__underlying_type(T)>(e)); +} + } // namespace _ (private) } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/io-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/io-test.c++ index 3d0b20fd7bf..1ec162e2ad3 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/io-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/io-test.c++ @@ -104,7 +104,8 @@ KJ_TEST("VectorOutputStream") { KJ_ASSERT(kj::str(output.getArray().asChars()) == "abcdefghijklmnopABCD"); output.write(junk + 4, 20); - KJ_ASSERT(output.getArray().begin() != buf.begin()); + // (We can't assert output.getArray().begin() != buf.begin() because the memory allocator could + // legitimately have allocated a new array in the same space.) KJ_ASSERT(output.getArray().end() != buf3.begin() + 24); KJ_ASSERT(kj::str(output.getArray().asChars()) == "abcdefghijklmnopABCDEFGHIJKLMNOPQRSTUVWX"); @@ -175,5 +176,27 @@ KJ_TEST("InputStream::readAllText() / readAllBytes()") { } } +KJ_TEST("ArrayOutputStream::write() does not assume adjacent write buffer is its own") { + // Previously, if ArrayOutputStream::write(src, size) saw that `src` equaled its fill position, it + // would assume that the write was already in its buffer. This assumption was buggy if the write + // buffer was directly adjacent in memory to the ArrayOutputStream's buffer, and the + // ArrayOutputStream was full (i.e., its fill position was one-past-the-end). + // + // VectorOutputStream also suffered a similar bug, but it is much harder to test, since it + // performs its own allocation. + + kj::byte buffer[10] = { 0 }; + + ArrayOutputStream output(arrayPtr(buffer, buffer + 5)); + + // Succeeds and fills the ArrayOutputStream. + output.write(buffer + 5, 5); + + // Previously this threw an inscrutable "size <= array.end() - fillPos" requirement failure. + KJ_EXPECT_THROW_MESSAGE( + "backing array was not large enough for the data written", + output.write(buffer + 5, 5)); +} + } // namespace } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/io.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/io.c++ index cf276d7a623..59d12e58639 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/io.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/io.c++ @@ -23,6 +23,10 @@ #define _GNU_SOURCE #endif +#if _WIN32 +#include "win32-api-version.h" +#endif + #include "io.h" #include "debug.h" #include "miniposix.h" @@ -31,10 +35,6 @@ #include "vector.h" #if _WIN32 -#ifndef NOMINMAX -#define NOMINMAX 1 -#endif -#define WIN32_LEAN_AND_MEAN #include #include "windows-sanity.h" #else @@ -271,9 +271,9 @@ ArrayPtr ArrayOutputStream::getWriteBuffer() { } void ArrayOutputStream::write(const void* src, size_t size) { - if (src == fillPos) { + if (src == fillPos && fillPos != array.end()) { // Oh goody, the caller wrote directly into our buffer. - KJ_REQUIRE(size <= array.end() - fillPos); + KJ_REQUIRE(size <= array.end() - fillPos, size, fillPos, array.end() - fillPos); fillPos += size; } else { KJ_REQUIRE(size <= (size_t)(array.end() - fillPos), @@ -299,9 +299,9 @@ ArrayPtr VectorOutputStream::getWriteBuffer() { } void VectorOutputStream::write(const void* src, size_t size) { - if (src == fillPos) { + if (src == fillPos && fillPos != vector.end()) { // Oh goody, the caller wrote directly into our buffer. - KJ_REQUIRE(size <= vector.end() - fillPos); + KJ_REQUIRE(size <= vector.end() - fillPos, size, fillPos, vector.end() - fillPos); fillPos += size; } else { if (vector.end() - fillPos < size) { @@ -326,14 +326,13 @@ void VectorOutputStream::grow(size_t minSize) { AutoCloseFd::~AutoCloseFd() noexcept(false) { if (fd >= 0) { - unwindDetector.catchExceptionsIfUnwinding([&]() { - // Don't use SYSCALL() here because close() should not be repeated on EINTR. - if (miniposix::close(fd) < 0) { - KJ_FAIL_SYSCALL("close", errno, fd) { - break; - } + // Don't use SYSCALL() here because close() should not be repeated on EINTR. + if (miniposix::close(fd) < 0) { + KJ_FAIL_SYSCALL("close", errno, fd) { + // This ensures we don't throw an exception if unwinding. + break; } - }); + } } } @@ -378,7 +377,7 @@ void FdOutputStream::write(ArrayPtr> pieces) { OutputStream::write(pieces); #else - const size_t iovmax = miniposix::iovMax(pieces.size()); + const size_t iovmax = miniposix::iovMax(); while (pieces.size() > iovmax) { write(pieces.slice(0, iovmax)); pieces = pieces.slice(iovmax, pieces.size()); diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/io.h b/libs/EXTERNAL/capnproto/c++/src/kj/io.h index acc6fbdc8a8..a09094983f6 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/io.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/io.h @@ -21,16 +21,14 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include #include "common.h" #include "array.h" #include "exception.h" #include +KJ_BEGIN_HEADER + namespace kj { // ======================================================================================= @@ -300,7 +298,6 @@ class AutoCloseFd { private: int fd; - UnwindDetector unwindDetector; }; inline auto KJ_STRINGIFY(const AutoCloseFd& fd) @@ -437,3 +434,5 @@ class HandleOutputStream: public OutputStream { #endif // _WIN32 } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/list-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/list-test.c++ new file mode 100644 index 00000000000..0c7172de8bd --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/kj/list-test.c++ @@ -0,0 +1,175 @@ +// Copyright (c) 2021 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "list.h" +#include + +namespace kj { +namespace { + +struct TestElement { + int i; + ListLink link; + + TestElement(int i): i(i) {} +}; + +KJ_TEST("List") { + List list; + KJ_EXPECT(list.empty()); + KJ_EXPECT(list.size() == 0); + + TestElement foo(123); + TestElement bar(456); + + { + list.add(foo); + KJ_DEFER(list.remove(foo)); + KJ_EXPECT(!list.empty()); + KJ_EXPECT(list.size() == 1); + KJ_EXPECT(list.front().i == 123); + + { + list.add(bar); + KJ_EXPECT(list.size() == 2); + KJ_DEFER(list.remove(bar)); + + { + auto iter = list.begin(); + KJ_ASSERT(iter != list.end()); + KJ_EXPECT(iter->i == 123); + ++iter; + KJ_ASSERT(iter != list.end()); + KJ_EXPECT(iter->i == 456); + iter->i = 321; + KJ_EXPECT(bar.i == 321); + ++iter; + KJ_ASSERT(iter == list.end()); + } + + const List& clist = list; + + { + auto iter = clist.begin(); + KJ_ASSERT(iter != clist.end()); + KJ_EXPECT(iter->i == 123); + ++iter; + KJ_ASSERT(iter != clist.end()); + KJ_EXPECT(iter->i == 321); + ++iter; + KJ_ASSERT(iter == clist.end()); + } + } + + KJ_EXPECT(list.size() == 1); + + KJ_EXPECT(!list.empty()); + KJ_EXPECT(list.front().i == 123); + + { + auto iter = list.begin(); + KJ_ASSERT(iter != list.end()); + KJ_EXPECT(iter->i == 123); + ++iter; + KJ_ASSERT(iter == list.end()); + } + } + + KJ_EXPECT(list.empty()); + KJ_EXPECT(list.size() == 0); + + { + list.add(bar); + KJ_DEFER(list.remove(bar)); + KJ_EXPECT(!list.empty()); + KJ_EXPECT(list.size() == 1); + KJ_EXPECT(list.front().i == 321); + + { + auto iter = list.begin(); + KJ_ASSERT(iter != list.end()); + KJ_EXPECT(iter->i == 321); + ++iter; + KJ_ASSERT(iter == list.end()); + } + } + + KJ_EXPECT(list.empty()); + KJ_EXPECT(list.size() == 0); +} + +KJ_TEST("List remove while iterating") { + List list; + KJ_EXPECT(list.empty()); + + TestElement foo(123); + list.add(foo); + KJ_DEFER(list.remove(foo)); + + TestElement bar(456); + list.add(bar); + + TestElement baz(789); + list.add(baz); + KJ_DEFER(list.remove(baz)); + + KJ_EXPECT(foo.link.isLinked()); + KJ_EXPECT(bar.link.isLinked()); + KJ_EXPECT(baz.link.isLinked()); + + { + auto iter = list.begin(); + KJ_ASSERT(iter != list.end()); + KJ_EXPECT(iter->i == 123); + ++iter; + + KJ_ASSERT(iter != list.end()); + KJ_EXPECT(iter->i == 456); + list.remove(*iter); + ++iter; + + KJ_ASSERT(iter != list.end()); + KJ_EXPECT(iter->i == 789); + ++iter; + + KJ_EXPECT(iter == list.end()); + } + + KJ_EXPECT(foo.link.isLinked()); + KJ_EXPECT(!bar.link.isLinked()); + KJ_EXPECT(baz.link.isLinked()); + + { + auto iter = list.begin(); + KJ_ASSERT(iter != list.end()); + KJ_EXPECT(iter->i == 123); + ++iter; + + KJ_ASSERT(iter != list.end()); + KJ_EXPECT(iter->i == 789); + ++iter; + + KJ_EXPECT(iter == list.end()); + } +} + +} // namespace +} // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/list.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/list.c++ new file mode 100644 index 00000000000..a7aa006c55f --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/kj/list.c++ @@ -0,0 +1,46 @@ +// Copyright (c) 2021 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "list.h" +#include "debug.h" + +namespace kj { +namespace _ { + +void throwDoubleAdd() { + kj::throwFatalException(KJ_EXCEPTION(FAILED, + "tried to add element to kj::List but the element is already in a list")); +} +void throwRemovedNotPresent() { + kj::throwFatalException(KJ_EXCEPTION(FAILED, + "tried to remove element from kj::List but the element is not in a list")); +} +void throwRemovedWrongList() { + kj::throwFatalException(KJ_EXCEPTION(FAILED, + "tried to remove element from kj::List but the element is in a different list")); +} +void throwDestroyedWhileInList() { + kj::throwFatalException(KJ_EXCEPTION(FAILED, + "destroyed object that is still in a kj::List")); +} + +} // namespace _ +} // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/list.h b/libs/EXTERNAL/capnproto/c++/src/kj/list.h new file mode 100644 index 00000000000..02b8cdb39e9 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/kj/list.h @@ -0,0 +1,214 @@ +// Copyright (c) 2021 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#pragma once + +#include "common.h" + +KJ_BEGIN_HEADER + +namespace kj { + +template +class ListLink; + +template T::*link> +class ListIterator; + +namespace _ { // (private) + +KJ_NORETURN(void throwDoubleAdd()); +KJ_NORETURN(void throwRemovedNotPresent()); +KJ_NORETURN(void throwRemovedWrongList()); +KJ_NORETURN(void throwDestroyedWhileInList()); + +} // namespace _ (private) + +template T::*link> +class List { + // A linked list that does no memory allocation. + // + // The list contains elements of type T that are allocated elsewhere. An existing object of type + // T can be added to the list and removed again without doing any heap allocation. This is + // achieved by requiring that T contains a field of type ListLink. A pointer-to-member to + // this field is the second parameter to the `List` template. + // + // kj::List is ideally suited to situations where an object wants to be able to "add itself" to + // a list of objects waiting for a notification, with the ability to remove itself early if it + // wants to stop waiting. With traditional STL containers, these operations would require memory + // allocation. + // + // Example: + // + // struct Item { + // ListLink link; + // // ... other members ... + // }; + // + // kj::List itemList; + // + // Item foo; + // itemList.add(foo); + // itemList.remove(foo); + // + // Note that you MUST manually remove an element from the list before destroying it. ListLinks + // do not automatically unlink themselves because this could lead to subtle thread-safety bugs + // if the List is guarded by a mutex, and that mutex is not currenty locked. Normally, you should + // have T's destructor remove it from any lists. You can use `link.isLinked()` to check if the + // item is currently in a list. + // + // kj::List is a doubly-linked list in order to allow O(1) removal of any element given only a + // reference to the element. However, it only supports forward iteration. + // + // When iterating over a kj::List, you can safely remove current element which the iterator + // points to without breaking the iteration. However, removing any *other* element could + // invalidate the iterator. + +public: + List() = default; + KJ_DISALLOW_COPY(List); + + bool empty() const { + return head == nullptr; + } + + size_t size() const { + return listSize; + } + + void add(T& element) { + if ((element.*link).prev != nullptr) _::throwDoubleAdd(); + *tail = element; + (element.*link).prev = tail; + tail = &((element.*link).next); + ++listSize; + } + + void remove(T& element) { + if ((element.*link).prev == nullptr) _::throwRemovedNotPresent(); + *((element.*link).prev) = (element.*link).next; + KJ_IF_MAYBE(n, (element.*link).next) { + (n->*link).prev = (element.*link).prev; + } else { + if (tail != &((element.*link).next)) _::throwRemovedWrongList(); + tail = (element.*link).prev; + } + (element.*link).next = nullptr; + (element.*link).prev = nullptr; + --listSize; + } + + typedef ListIterator Iterator; + typedef ListIterator ConstIterator; + + Iterator begin() { return Iterator(head); } + Iterator end() { return Iterator(nullptr); } + ConstIterator begin() const { return ConstIterator(head); } + ConstIterator end() const { return ConstIterator(nullptr); } + + T& front() { return *begin(); } + const T& front() const { return *begin(); } + +private: + Maybe head; + Maybe* tail = &head; + size_t listSize = 0; +}; + +template +class ListLink { +public: + ListLink(): next(nullptr), prev(nullptr) {} + ~ListLink() noexcept { + // Intentionally `noexcept` because we want to crash if a dangling pointer was left in a list. + if (prev != nullptr) _::throwDestroyedWhileInList(); + } + KJ_DISALLOW_COPY(ListLink); + + bool isLinked() const { return prev != nullptr; } + +private: + Maybe next; + Maybe* prev; + + template U::*link> + friend class List; + template U::*link> + friend class ListIterator; +}; + +template T::*link> +class ListIterator { +public: + ListIterator() = default; + + MaybeConstT& operator*() { + KJ_IREQUIRE(current != nullptr, "tried to dereference end of list"); + return *_::readMaybe(current); + } + const T& operator*() const { + KJ_IREQUIRE(current != nullptr, "tried to dereference end of list"); + return *_::readMaybe(current); + } + MaybeConstT* operator->() { + KJ_IREQUIRE(current != nullptr, "tried to dereference end of list"); + return _::readMaybe(current); + } + const T* operator->() const { + KJ_IREQUIRE(current != nullptr, "tried to dereference end of list"); + return _::readMaybe(current); + } + + inline ListIterator& operator++() { + current = next; + next = current.map([](MaybeConstT& obj) -> kj::Maybe { return (obj.*link).next; }) + .orDefault(nullptr); + return *this; + } + inline ListIterator operator++(int) { + ListIterator result = *this; + ++*this; + return result; + } + + inline bool operator==(const ListIterator& other) const { + return _::readMaybe(current) == _::readMaybe(other.current); + } + inline bool operator!=(const ListIterator& other) const { + return _::readMaybe(current) != _::readMaybe(other.current); + } + +private: + Maybe current; + + Maybe next; + // so that the current item can be removed from the list without invalidating the iterator + + explicit ListIterator(Maybe start) + : current(start), + next(start.map([](MaybeConstT& obj) -> kj::Maybe { return (obj.*link).next; }) + .orDefault(nullptr)) {} + friend class List; +}; + +} // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/main.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/main.c++ index 47254d1e61e..6b980de94e6 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/main.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/main.c++ @@ -23,6 +23,10 @@ #define _GNU_SOURCE #endif +#if _WIN32 +#include "win32-api-version.h" +#endif + #include "main.h" #include "debug.h" #include "arena.h" @@ -34,10 +38,6 @@ #include #if _WIN32 -#define WIN32_LEAN_AND_MEAN -#ifndef NOMINMAX -#define NOMINMAX 1 -#endif #include #include "windows-sanity.h" #else diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/main.h b/libs/EXTERNAL/capnproto/c++/src/kj/main.h index cda970ac9fe..2533000649e 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/main.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/main.h @@ -21,15 +21,13 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "array.h" #include "string.h" #include "vector.h" #include "function.h" +KJ_BEGIN_HEADER + namespace kj { class ProcessContext { @@ -402,3 +400,5 @@ class MainBuilder { }; } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/map-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/map-test.c++ index de873487da9..ac2b2410e3c 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/map-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/map-test.c++ @@ -148,6 +148,58 @@ KJ_TEST("TreeMap range") { } } +#if !KJ_NO_EXCEPTIONS +KJ_TEST("HashMap findOrCreate throws") { + HashMap m; + try { + m.findOrCreate(1, []() -> HashMap::Entry { + throw "foo"; + }); + KJ_FAIL_ASSERT("shouldn't get here"); + } catch (const char*) { + // expected + } + + KJ_EXPECT(m.find(1) == nullptr); + m.findOrCreate(1, []() { + return HashMap::Entry { 1, kj::str("ok") }; + }); + + KJ_EXPECT(KJ_ASSERT_NONNULL(m.find(1)) == "ok"); +} +#endif + +template +void testEraseAll(MapType& m) { + m.insert(12, "foo"); + m.insert(83, "bar"); + m.insert(99, "baz"); + m.insert(6, "qux"); + m.insert(55, "corge"); + + auto count = m.eraseAll([](int i, StringPtr s) { + return i == 99 || s == "foo"; + }); + + KJ_EXPECT(count == 2); + KJ_EXPECT(m.size() == 3); + KJ_EXPECT(m.find(12) == nullptr); + KJ_EXPECT(m.find(99) == nullptr); + KJ_EXPECT(KJ_ASSERT_NONNULL(m.find(83)) == "bar"); + KJ_EXPECT(KJ_ASSERT_NONNULL(m.find(6)) == "qux"); + KJ_EXPECT(KJ_ASSERT_NONNULL(m.find(55)) == "corge"); +} + +KJ_TEST("HashMap eraseAll") { + HashMap m; + testEraseAll(m); +} + +KJ_TEST("TreeMap eraseAll") { + TreeMap m; + testEraseAll(m); +} + } // namespace } // namespace _ } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/map.h b/libs/EXTERNAL/capnproto/c++/src/kj/map.h index 7880a746760..bbd2058a01d 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/map.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/map.h @@ -21,13 +21,11 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "table.h" #include "hash.h" +KJ_BEGIN_HEADER + namespace kj { template @@ -87,6 +85,14 @@ class HashMap { // Like find() but if the key isn't present then call createEntry() to create the corresponding // entry and insert it. createEntry() must return type `Entry`. + template + kj::Maybe findEntry(KeyLike&& key); + template + kj::Maybe findEntry(KeyLike&& key) const; + template + Entry& findOrCreateEntry(KeyLike&& key, Func&& createEntry); + // Sometimes you need to see the whole matching Entry, not just the Value. + template bool erase(KeyLike&& key); // Erase the entry with the matching key. @@ -117,7 +123,7 @@ class HashMap { return e.key == key; } template - inline bool hashCode(KeyLike&& key) const { + inline auto hashCode(KeyLike&& key) const { return kj::hashCode(key); } }; @@ -176,6 +182,14 @@ class TreeMap { // Like find() but if the key isn't present then call createEntry() to create the corresponding // entry and insert it. createEntry() must return type `Entry`. + template + kj::Maybe findEntry(KeyLike&& key); + template + kj::Maybe findEntry(KeyLike&& key) const; + template + Entry& findOrCreateEntry(KeyLike&& key, Func&& createEntry); + // Sometimes you need to see the whole matching Entry, not just the Value. + template auto range(K1&& k1, K2&& k2); template @@ -238,7 +252,7 @@ class HashSetCallbacks { template inline bool matches(T& a, U& b) const { return a == b; } template - inline bool hashCode(KeyLike&& key) const { + inline auto hashCode(KeyLike&& key) const { return kj::hashCode(key); } }; @@ -353,6 +367,25 @@ Value& HashMap::findOrCreate(KeyLike&& key, Func&& createEntry) { return table.findOrCreate(key, kj::fwd(createEntry)).value; } +template +template +kj::Maybe::Entry&> +HashMap::findEntry(KeyLike&& key) { + return table.find(kj::fwd(key)); +} +template +template +kj::Maybe::Entry&> +HashMap::findEntry(KeyLike&& key) const { + return table.find(kj::fwd(key)); +} +template +template +typename HashMap::Entry& +HashMap::findOrCreateEntry(KeyLike&& key, Func&& createEntry) { + return table.findOrCreate(kj::fwd(key), kj::fwd(createEntry)); +} + template template bool HashMap::erase(KeyLike&& key) { @@ -367,7 +400,9 @@ void HashMap::erase(Entry& entry) { template template size_t HashMap::eraseAll(Predicate&& predicate) { - return table.eraseAll(kj::fwd(predicate)); + return table.eraseAll([&](Entry& entry) { + return predicate(entry.key, entry.value); + }); } // ----------------------------------------------------------------------------- @@ -445,6 +480,25 @@ Value& TreeMap::findOrCreate(KeyLike&& key, Func&& createEntry) { return table.findOrCreate(key, kj::fwd(createEntry)).value; } +template +template +kj::Maybe::Entry&> +TreeMap::findEntry(KeyLike&& key) { + return table.find(kj::fwd(key)); +} +template +template +kj::Maybe::Entry&> +TreeMap::findEntry(KeyLike&& key) const { + return table.find(kj::fwd(key)); +} +template +template +typename TreeMap::Entry& +TreeMap::findOrCreateEntry(KeyLike&& key, Func&& createEntry) { + return table.findOrCreate(kj::fwd(key), kj::fwd(createEntry)); +} + template template auto TreeMap::range(K1&& k1, K2&& k2) { @@ -470,7 +524,9 @@ void TreeMap::erase(Entry& entry) { template template size_t TreeMap::eraseAll(Predicate&& predicate) { - return table.eraseAll(kj::fwd(predicate)); + return table.eraseAll([&](Entry& entry) { + return predicate(entry.key, entry.value); + }); } template @@ -480,3 +536,5 @@ size_t TreeMap::eraseRange(K1&& k1, K2&& k2) { } } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/memory-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/memory-test.c++ index 2b2435b9bd0..6e1e343232b 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/memory-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/memory-test.c++ @@ -136,6 +136,68 @@ TEST(Memory, AttachNested) { KJ_EXPECT(destroyed3 == 3, destroyed3); } +KJ_TEST("attachRef") { + uint counter = 0; + uint destroyed1 = 0; + uint destroyed2 = 0; + uint destroyed3 = 0; + + auto obj1 = kj::heap(counter, destroyed1); + auto obj2 = kj::heap(counter, destroyed2); + auto obj3 = kj::heap(counter, destroyed3); + + int i = 123; + + Own combined = attachRef(i, kj::mv(obj1), kj::mv(obj2), kj::mv(obj3)); + + KJ_EXPECT(combined.get() == &i); + + KJ_EXPECT(obj1.get() == nullptr); + KJ_EXPECT(obj2.get() == nullptr); + KJ_EXPECT(obj3.get() == nullptr); + KJ_EXPECT(destroyed1 == 0); + KJ_EXPECT(destroyed2 == 0); + KJ_EXPECT(destroyed3 == 0); + + combined = nullptr; + + KJ_EXPECT(destroyed1 == 1, destroyed1); + KJ_EXPECT(destroyed2 == 2, destroyed2); + KJ_EXPECT(destroyed3 == 3, destroyed3); +} + +KJ_TEST("attachVal") { + uint counter = 0; + uint destroyed1 = 0; + uint destroyed2 = 0; + uint destroyed3 = 0; + + auto obj1 = kj::heap(counter, destroyed1); + auto obj2 = kj::heap(counter, destroyed2); + auto obj3 = kj::heap(counter, destroyed3); + + int i = 123; + + Own combined = attachVal(i, kj::mv(obj1), kj::mv(obj2), kj::mv(obj3)); + + int* ptr = combined.get(); + KJ_EXPECT(ptr != &i); + KJ_EXPECT(*ptr == i); + + KJ_EXPECT(obj1.get() == nullptr); + KJ_EXPECT(obj2.get() == nullptr); + KJ_EXPECT(obj3.get() == nullptr); + KJ_EXPECT(destroyed1 == 0); + KJ_EXPECT(destroyed2 == 0); + KJ_EXPECT(destroyed3 == 0); + + combined = nullptr; + + KJ_EXPECT(destroyed1 == 1, destroyed1); + KJ_EXPECT(destroyed2 == 2, destroyed2); + KJ_EXPECT(destroyed3 == 3, destroyed3); +} + struct StaticType { int i; }; @@ -289,6 +351,50 @@ TEST(Memory, OwnConstVoid) { } } +struct IncompleteType; +KJ_DECLARE_NON_POLYMORPHIC(IncompleteType) + +template +struct IncompleteTemplate; +template +KJ_DECLARE_NON_POLYMORPHIC(IncompleteTemplate) + +struct IncompleteDisposer: public Disposer { + mutable void* sawPtr = nullptr; + + virtual void disposeImpl(void* pointer) const { + sawPtr = pointer; + } +}; + +KJ_TEST("Own") { + static int i; + void* ptr = &i; + + { + IncompleteDisposer disposer; + + { + kj::Own foo(reinterpret_cast(ptr), disposer); + kj::Own bar = kj::mv(foo); + } + + KJ_EXPECT(disposer.sawPtr == ptr); + } + + { + IncompleteDisposer disposer; + + { + kj::Own> foo( + reinterpret_cast*>(ptr), disposer); + kj::Own> bar = kj::mv(foo); + } + + KJ_EXPECT(disposer.sawPtr == ptr); + } +} + // TODO(test): More tests. } // namespace diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/memory.h b/libs/EXTERNAL/capnproto/c++/src/kj/memory.h index 36cf9d6a619..1229b5c3ecf 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/memory.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/memory.h @@ -21,14 +21,42 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "common.h" +KJ_BEGIN_HEADER + namespace kj { +template +inline constexpr bool _kj_internal_isPolymorphic(T*) { + // If you get a compiler error here complaining that T is incomplete, it's because you are trying + // to use kj::Own with a type that has only been forward-declared. Since KJ doesn't know if + // the type might be involved in inheritance (especially multiple inheritance), it doesn't know + // how to correctly call the disposer to destroy the type, since the object's true memory address + // may differ from the address used to point to a superclass. + // + // However, if you know for sure that T is NOT polymorphic (i.e. it doesn't have a vtable and + // isn't involved in inheritance), then you can use KJ_DECLARE_NON_POLYMORPHIC(T) to declare this + // to KJ without actually completing the type. Place this macro invocation either in the global + // scope, or in the same namespace as T is defined. + return __is_polymorphic(T); +} + +#define KJ_DECLARE_NON_POLYMORPHIC(...) \ + inline constexpr bool _kj_internal_isPolymorphic(__VA_ARGS__*) { \ + return false; \ + } +// If you want to use kj::Own for an incomplete type T that you know is not polymorphic, then +// write `KJ_DECLARE_NON_POLYMORPHIC(T)` either at the global scope or in the same namespace as +// T is declared. +// +// This also works for templates, e.g.: +// +// template +// struct MyType; +// template +// KJ_DECLARE_NON_POLYMORPHIC(MyType) + namespace _ { // private template struct RefOrVoid_ { typedef T& Type; }; @@ -41,7 +69,7 @@ using RefOrVoid = typename RefOrVoid_::Type; // // This is a hack needed to avoid defining Own as a totally separate class. -template +template struct CastToVoid_; template @@ -112,7 +140,7 @@ class Disposer { // an exception. private: - template + template struct Dispose_; }; @@ -176,7 +204,7 @@ class Own { ~Own() noexcept(false) { dispose(); } inline Own& operator=(Own&& other) { - // Move-assingnment operator. + // Move-assignnment operator. // Careful, this might own `other`. Therefore we have to transfer the pointers first, then // dispose. @@ -253,7 +281,7 @@ class Own { template static inline T* cast(U* ptr) { - static_assert(__is_polymorphic(T), + static_assert(_kj_internal_isPolymorphic((T*)nullptr), "Casting owned pointers requires that the target type is polymorphic."); return ptr; } @@ -318,6 +346,13 @@ class Maybe> { inline Maybe(decltype(nullptr)) noexcept: ptr(nullptr) {} + inline Own& emplace(Own value) { + // Assign the Maybe to the given value and return the content. This avoids the need to do a + // KJ_ASSERT_NONNULL() immediately after setting the Maybe just to read it back again. + ptr = kj::mv(value); + return ptr; + } + inline operator Maybe() { return ptr.get(); } inline operator Maybe() const { return ptr.get(); } @@ -400,8 +435,17 @@ class HeapDisposer final: public Disposer { static const HeapDisposer instance; }; +#if _MSC_VER && _MSC_VER < 1920 && !defined(__clang__) +template +__declspec(selectany) const HeapDisposer HeapDisposer::instance = HeapDisposer(); +// On MSVC 2017 we suddenly started seeing a linker error on one specific specialization of +// `HeapDisposer::instance` when seemingly-unrelated code was modified. Explicitly specifying +// `__declspec(selectany)` seems to fix it. But why? Shouldn't template members have `selectany` +// behavior by default? We don't know. It works and we're moving on. +#else template const HeapDisposer HeapDisposer::instance = HeapDisposer(); +#endif } // namespace _ (private) @@ -426,6 +470,21 @@ Own> heap(T&& orig) { return Own(new T2(kj::fwd(orig)), _::HeapDisposer::instance); } +template +Own> attachVal(T&& value, Attachments&&... attachments); +// Returns an Own that takes ownership of `value` and `attachments`, and points to `value`. +// +// This is equivalent to heap(value).attach(attachments), but only does one allocation rather than +// two. + +template +Own attachRef(T& value, Attachments&&... attachments); +// Like attach() but `value` is not moved; the resulting Own points to its existing location. +// This is preferred if `value` is already owned by one of `attachments`. +// +// This is equivalent to Own(&value, kj::NullDisposer::instance).attach(attachments), but +// is easier to write and allocates slightly less memory. + // ======================================================================================= // SpaceFor -- assists in manual allocation @@ -519,4 +578,19 @@ Own Own::attach(Attachments&&... attachments) { return Own(ptrCopy, *bundle); } +template +Own attachRef(T& value, Attachments&&... attachments) { + auto bundle = new _::DisposableOwnedBundle(kj::fwd(attachments)...); + return Own(&value, *bundle); +} + +template +Own> attachVal(T&& value, Attachments&&... attachments) { + auto bundle = new _::DisposableOwnedBundle( + kj::fwd(value), kj::fwd(attachments)...); + return Own>(&bundle->first, *bundle); +} + } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/miniposix.h b/libs/EXTERNAL/capnproto/c++/src/kj/miniposix.h index d7285ef7acf..e9ae848d386 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/miniposix.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/miniposix.h @@ -24,10 +24,6 @@ // This header provides a small subset of the POSIX API which also happens to be available on // Windows under slightly-different names. -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #if _WIN32 #include #include @@ -47,6 +43,11 @@ #include #endif +// To get KJ_BEGIN_HEADER/KJ_END_HEADER +#include "common.h" + +KJ_BEGIN_HEADER + namespace kj { namespace miniposix { @@ -112,38 +113,32 @@ inline int mkdir(const char* path, int mode) { using ::pipe; using ::mkdir; -inline size_t iovMax(size_t count) { - // Apparently, there is a maximum number of iovecs allowed per call. I don't understand why. - // Most platforms define IOV_MAX but Linux defines only UIO_MAXIOV and others, like Hurd, - // define neither. - // - // On platforms where both IOV_MAX and UIO_MAXIOV are undefined, we poke sysconf(_SC_IOV_MAX), - // then try to fall back to the POSIX-mandated minimum of _XOPEN_IOV_MAX if that fails. - // - // http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/limits.h.html#tag_13_23_03_01 +// Apparently, there is a maximum number of iovecs allowed per call. I don't understand why. +// Most platforms define IOV_MAX but Linux defines only UIO_MAXIOV and others, like Hurd, +// define neither. +// +// On platforms where both IOV_MAX and UIO_MAXIOV are undefined, we poke sysconf(_SC_IOV_MAX), +// then try to fall back to the POSIX-mandated minimum of _XOPEN_IOV_MAX if that fails. +// +// http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/limits.h.html#tag_13_23_03_01 #if defined(IOV_MAX) - // Solaris (and others?) +// Solaris, MacOS (& all other BSD-variants?) (and others?) +static constexpr inline size_t iovMax() { return IOV_MAX; -#elif defined(UIO_MAXIOV) - // Linux - return UIO_MAXIOV; +} +#elif defined(UIO_MAX_IOV) +// Linux +static constexpr inline size_t iovMax() { + return UIO_MAX_IOV; +} #else - // POSIX mystery meat - - long iovmax; - - errno = 0; - if ((iovmax = sysconf(_SC_IOV_MAX)) == -1) { - // assume iovmax == -1 && errno == 0 means "unbounded" - return errno ? _XOPEN_IOV_MAX : count; - } else { - return (size_t) iovmax; - } +#error "Please determine the appropriate constant for IOV_MAX on your system." #endif -} #endif } // namespace miniposix } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/mutex-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/mutex-test.c++ index f241f7bd0c3..32c0a5cf615 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/mutex-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/mutex-test.c++ @@ -19,6 +19,15 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +#if _WIN32 +#include "win32-api-version.h" +#define NOGDI // NOGDI is needed to make EXPECT_EQ(123u, *lock) compile for some reason +#endif + +#include "time.h" + +#define KJ_MUTEX_TEST 1 + #include "mutex.h" #include "debug.h" #include "thread.h" @@ -26,7 +35,6 @@ #include #if _WIN32 -#define NOGDI // NOGDI is needed to make EXPECT_EQ(123u, *lock) compile for some reason #include #undef NOGDI #else @@ -34,6 +42,17 @@ #include #endif +#ifdef KJ_CONTENTION_WARNING_THRESHOLD +#include +#endif + +#if KJ_TRACK_LOCK_BLOCKING +#include +#include +#include +#include +#endif + namespace kj { namespace { @@ -51,6 +70,29 @@ TEST(Mutex, MutexGuarded) { EXPECT_EQ(123u, *lock); EXPECT_EQ(123u, value.getAlreadyLockedExclusive()); +#if KJ_USE_FUTEX + auto timeout = MILLISECONDS * 50; + + auto startTime = systemPreciseMonotonicClock().now(); + EXPECT_TRUE(value.lockExclusiveWithTimeout(timeout) == nullptr); + auto duration = startTime - systemPreciseMonotonicClock().now(); + EXPECT_TRUE(duration < timeout); + + startTime = systemPreciseMonotonicClock().now(); + EXPECT_TRUE(value.lockSharedWithTimeout(timeout) == nullptr); + duration = startTime - systemPreciseMonotonicClock().now(); + EXPECT_TRUE(duration < timeout); + + // originally, upon timing out, the exclusive requested flag would be removed + // from the futex state. if we did remove the exclusive request flag this test + // would hang. + Thread lockTimeoutThread([&]() { + // try to timeout during 10 ms delay + Maybe> maybeLock = value.lockExclusiveWithTimeout(MILLISECONDS * 8); + EXPECT_TRUE(maybeLock == nullptr); + }); +#endif + Thread thread([&]() { Locked threadLock = value.lockExclusive(); EXPECT_EQ(456u, *threadLock); @@ -63,6 +105,11 @@ TEST(Mutex, MutexGuarded) { auto earlyRelease = kj::mv(lock); } +#if KJ_USE_FUTEX + EXPECT_EQ(789u, *KJ_ASSERT_NONNULL(value.lockExclusiveWithTimeout(MILLISECONDS * 50))); + EXPECT_EQ(789u, *KJ_ASSERT_NONNULL(value.lockSharedWithTimeout(MILLISECONDS * 50))); +#endif + EXPECT_EQ(789u, *value.lockExclusive()); { @@ -112,14 +159,13 @@ TEST(Mutex, MutexGuarded) { EXPECT_EQ(321u, *value.lockExclusive()); -#if !_WIN32 // Not checked on win32. +#if !_WIN32 && !__CYGWIN__ // Not checked on win32. EXPECT_DEBUG_ANY_THROW(value.getAlreadyLockedExclusive()); EXPECT_DEBUG_ANY_THROW(value.getAlreadyLockedShared()); #endif EXPECT_EQ(321u, value.getWithoutLock()); } -#if KJ_USE_FUTEX // TODO(someday): Implement on pthread & win32 TEST(Mutex, When) { MutexGuarded value(123); @@ -169,8 +215,265 @@ TEST(Mutex, When) { KJ_EXPECT(*value.lockShared() == 101); } + +#if !KJ_NO_EXCEPTIONS + { + // Throw from predicate. + KJ_EXPECT_THROW_MESSAGE("oops threw", value.when([](uint n) -> bool { + KJ_FAIL_ASSERT("oops threw"); + }, [](uint& n) { + KJ_FAIL_EXPECT("shouldn't get here"); + })); + + // Throw from predicate later on. + kj::Thread thread([&]() { + delay(); + *value.lockExclusive() = 321; + }); + + KJ_EXPECT_THROW_MESSAGE("oops threw", value.when([](uint n) -> bool { + KJ_ASSERT(n != 321, "oops threw"); + return false; + }, [](uint& n) { + KJ_FAIL_EXPECT("shouldn't get here"); + })); + } + + { + // Verify the exceptions didn't break the mutex. + uint m = value.when([](uint n) { return n > 0; }, [](uint& n) { + return n; + }); + KJ_EXPECT(m == 321); + + kj::Thread thread([&]() { + delay(); + *value.lockExclusive() = 654; + }); + + m = value.when([](uint n) { return n > 500; }, [](uint& n) { + return n; + }); + KJ_EXPECT(m == 654); + } +#endif } + +TEST(Mutex, WhenWithTimeout) { + auto& clock = systemPreciseMonotonicClock(); + MutexGuarded value(123); + + // A timeout that won't expire. + static constexpr Duration LONG_TIMEOUT = 10 * kj::SECONDS; + + { + uint m = value.when([](uint n) { return n < 200; }, [](uint& n) { + ++n; + return n + 2; + }, LONG_TIMEOUT); + KJ_EXPECT(m == 126); + + KJ_EXPECT(*value.lockShared() == 124); + } + + { + kj::Thread thread([&]() { + delay(); + *value.lockExclusive() = 321; + }); + + uint m = value.when([](uint n) { return n > 200; }, [](uint& n) { + ++n; + return n + 2; + }, LONG_TIMEOUT); + KJ_EXPECT(m == 324); + + KJ_EXPECT(*value.lockShared() == 322); + } + + { + // Stress test. 100 threads each wait for a value and then set the next value. + *value.lockExclusive() = 0; + + auto threads = kj::heapArrayBuilder>(100); + for (auto i: kj::zeroTo(100)) { + threads.add(kj::heap([i,&value]() { + if (i % 2 == 0) delay(); + uint m = value.when([i](const uint& n) { return n == i; }, + [](uint& n) { return n++; }, LONG_TIMEOUT); + KJ_ASSERT(m == i); + })); + } + + uint m = value.when([](uint n) { return n == 100; }, [](uint& n) { + return n++; + }, LONG_TIMEOUT); + KJ_EXPECT(m == 100); + + KJ_EXPECT(*value.lockShared() == 101); + } + + { + auto start = clock.now(); + uint m = value.when([](uint n) { return n == 0; }, [&](uint& n) { + KJ_ASSERT(n == 101); + auto t = clock.now() - start; + KJ_EXPECT(t >= 10 * kj::MILLISECONDS, t); + return 12; + }, 10 * kj::MILLISECONDS); + KJ_EXPECT(m == 12); + + m = value.when([](uint n) { return n == 0; }, [&](uint& n) { + KJ_ASSERT(n == 101); + auto t = clock.now() - start; + KJ_EXPECT(t >= 20 * kj::MILLISECONDS, t); + return 34; + }, 10 * kj::MILLISECONDS); + KJ_EXPECT(m == 34); + + m = value.when([](uint n) { return n > 0; }, [&](uint& n) { + KJ_ASSERT(n == 101); + return 56; + }, LONG_TIMEOUT); + KJ_EXPECT(m == 56); + } + +#if !KJ_NO_EXCEPTIONS + { + // Throw from predicate. + KJ_EXPECT_THROW_MESSAGE("oops threw", value.when([](uint n) -> bool { + KJ_FAIL_ASSERT("oops threw"); + }, [](uint& n) { + KJ_FAIL_EXPECT("shouldn't get here"); + }, LONG_TIMEOUT)); + + // Throw from predicate later on. + kj::Thread thread([&]() { + delay(); + *value.lockExclusive() = 321; + }); + + KJ_EXPECT_THROW_MESSAGE("oops threw", value.when([](uint n) -> bool { + KJ_ASSERT(n != 321, "oops threw"); + return false; + }, [](uint& n) { + KJ_FAIL_EXPECT("shouldn't get here"); + }, LONG_TIMEOUT)); + } + + { + // Verify the exceptions didn't break the mutex. + uint m = value.when([](uint n) { return n > 0; }, [](uint& n) { + return n; + }, LONG_TIMEOUT); + KJ_EXPECT(m == 321); + + auto start = clock.now(); + m = value.when([](uint n) { return n == 0; }, [&](uint& n) { + KJ_EXPECT(clock.now() - start >= 10 * kj::MILLISECONDS); + return n + 1; + }, 10 * kj::MILLISECONDS); + KJ_EXPECT(m == 322); + + kj::Thread thread([&]() { + delay(); + *value.lockExclusive() = 654; + }); + + m = value.when([](uint n) { return n > 500; }, [](uint& n) { + return n; + }, LONG_TIMEOUT); + KJ_EXPECT(m == 654); + } #endif +} + +TEST(Mutex, WhenWithTimeoutPreciseTiming) { + // Test that MutexGuarded::when() with a timeout sleeps for precisely the right amount of time. + + auto& clock = systemPreciseMonotonicClock(); + + for (uint retryCount = 0; retryCount < 20; retryCount++) { + MutexGuarded value(123); + + auto start = clock.now(); + uint m = value.when([&value](uint n) { + // HACK: Reset the value as a way of testing what happens when the waiting thread is woken + // up but then finds it's not ready yet. + value.getWithoutLock() = 123; + return n == 321; + }, [](uint& n) { + return 456; + }, 100 * kj::MILLISECONDS); + + KJ_EXPECT(m == 456); + + auto t = clock.now() - start; + KJ_EXPECT(t >= 100 * kj::MILLISECONDS); + // Provide a large margin of error here because some operating systems (e.g. Windows) can have + // long timeslices (13ms) and won't schedule more precisely than a timeslice. + if (t <= 120 * kj::MILLISECONDS) { + return; + } + } + KJ_FAIL_ASSERT("time not within expected bounds even after retries"); +} + +TEST(Mutex, WhenWithTimeoutPreciseTimingAfterInterrupt) { + // Test that MutexGuarded::when() with a timeout sleeps for precisely the right amount of time, + // even if the thread is spuriously woken in the middle. + + auto& clock = systemPreciseMonotonicClock(); + + for (uint retryCount = 0; retryCount < 20; retryCount++) { + MutexGuarded value(123); + + kj::Thread thread([&]() { + delay(); + value.lockExclusive().induceSpuriousWakeupForTest(); + }); + + auto start = clock.now(); + uint m = value.when([](uint n) { + return n == 321; + }, [](uint& n) { + return 456; + }, 100 * kj::MILLISECONDS); + + KJ_EXPECT(m == 456); + + auto t = clock.now() - start; + KJ_EXPECT(t >= 100 * kj::MILLISECONDS, t / kj::MILLISECONDS); + // Provide a large margin of error here because some operating systems (e.g. Windows) can have + // long timeslices (13ms) and won't schedule more precisely than a timeslice. + if (t <= 120 * kj::MILLISECONDS) { + return; + } + } + KJ_FAIL_ASSERT("time not within expected bounds even after retries"); +} + +KJ_TEST("wait()s wake each other") { + MutexGuarded value(0); + + { + kj::Thread thread([&]() { + auto lock = value.lockExclusive(); + ++*lock; + lock.wait([](uint value) { return value == 2; }); + ++*lock; + lock.wait([](uint value) { return value == 4; }); + }); + + { + auto lock = value.lockExclusive(); + lock.wait([](uint value) { return value == 1; }); + ++*lock; + lock.wait([](uint value) { return value == 3; }); + ++*lock; + } + } +} TEST(Mutex, Lazy) { Lazy lazy; @@ -221,5 +524,404 @@ TEST(Mutex, LazyException) { #endif } +class OnlyTouchUnderLock { +public: + OnlyTouchUnderLock(): ptr(nullptr) {} + OnlyTouchUnderLock(MutexGuarded& ref): ptr(&ref) { + ptr->getAlreadyLockedExclusive()++; + } + OnlyTouchUnderLock(OnlyTouchUnderLock&& other): ptr(other.ptr) { + other.ptr = nullptr; + if (ptr) { + // Just verify it's locked. Don't increment because different compilers may or may not + // elide moves. + ptr->getAlreadyLockedExclusive(); + } + } + OnlyTouchUnderLock& operator=(OnlyTouchUnderLock&& other) { + if (ptr) { + ptr->getAlreadyLockedExclusive()++; + } + ptr = other.ptr; + other.ptr = nullptr; + if (ptr) { + // Just verify it's locked. Don't increment because different compilers may or may not + // elide moves. + ptr->getAlreadyLockedExclusive(); + } + return *this; + } + ~OnlyTouchUnderLock() noexcept(false) { + if (ptr != nullptr) { + ptr->getAlreadyLockedExclusive()++; + } + } + + void frob() { + ptr->getAlreadyLockedExclusive()++; + } + +private: + MutexGuarded* ptr; +}; + +KJ_TEST("ExternalMutexGuarded destroy after release") { + MutexGuarded guarded(0); + + { + ExternalMutexGuarded ext; + + { + auto lock = guarded.lockExclusive(); + ext.set(lock, guarded); + KJ_EXPECT(*lock == 1, *lock); + ext.get(lock).frob(); + KJ_EXPECT(*lock == 2, *lock); + } + + { + auto lock = guarded.lockExclusive(); + auto released = ext.release(lock); + KJ_EXPECT(*lock == 2, *lock); + released.frob(); + KJ_EXPECT(*lock == 3, *lock); + } + } + + { + auto lock = guarded.lockExclusive(); + KJ_EXPECT(*lock == 4, *lock); + } +} + +KJ_TEST("ExternalMutexGuarded destroy without release") { + MutexGuarded guarded(0); + + { + ExternalMutexGuarded ext; + + { + auto lock = guarded.lockExclusive(); + ext.set(lock, guarded); + KJ_EXPECT(*lock == 1); + ext.get(lock).frob(); + KJ_EXPECT(*lock == 2); + } + } + + { + auto lock = guarded.lockExclusive(); + KJ_EXPECT(*lock == 3); + } +} + +KJ_TEST("condvar wait with flapping predicate") { + // This used to deadlock under some implementations due to a wait() checking its own predicate + // as part of unlock()ing the mutex. Adding `waiterToSkip` fixed this (and also eliminated a + // redundant call to the predicate). + + MutexGuarded guarded(0); + + Thread thread([&]() { + delay(); + *guarded.lockExclusive() = 1; + }); + + { + auto lock = guarded.lockExclusive(); + bool flap = true; + lock.wait([&](uint i) { + flap = !flap; + return i == 1 || flap; + }); + } +} + +#if KJ_TRACK_LOCK_BLOCKING +#if !__GLIBC_PREREQ(2, 30) +#ifndef SYS_gettid +#error SYS_gettid is unavailable on this system +#endif + +#define gettid() ((pid_t)syscall(SYS_gettid)) +#endif + +KJ_TEST("tracking blocking on mutex acquisition") { + // SIGEV_THREAD is supposed to be "private" to the pthreads implementation, but, as + // usual, the higher-level POSIX API that we're supposed to use sucks: the "handler" runs on + // some other thread, which means the stack trace it prints won't be useful. + // + // So, we cheat and work around libc. + MutexGuarded foo(5); + auto lock = foo.lockExclusive(); + + struct BlockDetected { + volatile bool blockedOnMutexAcquisition; + SourceLocation blockLocation; + } blockingInfo = {}; + + struct sigaction handler; + memset(&handler, 0, sizeof(handler)); + handler.sa_sigaction = [](int, siginfo_t* info, void*) { + auto& blockage = *reinterpret_cast(info->si_value.sival_ptr); + KJ_IF_MAYBE(r, blockedReason()) { + KJ_SWITCH_ONEOF(*r) { + KJ_CASE_ONEOF(b, BlockedOnMutexAcquisition) { + blockage.blockedOnMutexAcquisition = true; + blockage.blockLocation = b.origin; + } + KJ_CASE_ONEOF_DEFAULT {} + } + } + }; + handler.sa_flags = SA_SIGINFO | SA_RESTART; + + sigaction(SIGINT, &handler, nullptr); + + timer_t timer; + struct sigevent event; + memset(&event, 0, sizeof(event)); + event.sigev_notify = SIGEV_THREAD_ID; + event.sigev_signo = SIGINT; + event.sigev_value.sival_ptr = &blockingInfo; + KJ_SYSCALL(event._sigev_un._tid = gettid()); + KJ_SYSCALL(timer_create(CLOCK_MONOTONIC, &event, &timer)); + + kj::Duration timeout = 50 * MILLISECONDS; + struct itimerspec spec; + memset(&spec, 0, sizeof(spec)); + spec.it_value.tv_sec = timeout / kj::SECONDS; + spec.it_value.tv_nsec = timeout % kj::SECONDS / kj::NANOSECONDS; + // We can't use KJ_SYSCALL() because it is not async-signal-safe. + KJ_REQUIRE(-1 != timer_settime(timer, 0, &spec, nullptr)); + + kj::SourceLocation expectedBlockLocation; + KJ_REQUIRE(foo.lockSharedWithTimeout(100 * MILLISECONDS, expectedBlockLocation) == nullptr); + + KJ_EXPECT(blockingInfo.blockedOnMutexAcquisition); + KJ_EXPECT(blockingInfo.blockLocation == expectedBlockLocation); +} + +KJ_TEST("tracking blocked on CondVar::wait") { + // SIGEV_THREAD is supposed to be "private" to the pthreads implementation, but, as + // usual, the higher-level POSIX API that we're supposed to use sucks: the "handler" runs on + // some other thread, which means the stack trace it prints won't be useful. + // + // So, we cheat and work around libc. + MutexGuarded foo(5); + auto lock = foo.lockExclusive(); + + struct BlockDetected { + volatile bool blockedOnCondVar; + SourceLocation blockLocation; + } blockingInfo = {}; + + struct sigaction handler; + memset(&handler, 0, sizeof(handler)); + handler.sa_sigaction = [](int, siginfo_t* info, void*) { + auto& blockage = *reinterpret_cast(info->si_value.sival_ptr); + KJ_IF_MAYBE(r, blockedReason()) { + KJ_SWITCH_ONEOF(*r) { + KJ_CASE_ONEOF(b, BlockedOnCondVarWait) { + blockage.blockedOnCondVar = true; + blockage.blockLocation = b.origin; + } + KJ_CASE_ONEOF_DEFAULT {} + } + } + }; + handler.sa_flags = SA_SIGINFO | SA_RESTART; + + sigaction(SIGINT, &handler, nullptr); + + timer_t timer; + struct sigevent event; + memset(&event, 0, sizeof(event)); + event.sigev_notify = SIGEV_THREAD_ID; + event.sigev_signo = SIGINT; + event.sigev_value.sival_ptr = &blockingInfo; + KJ_SYSCALL(event._sigev_un._tid = gettid()); + KJ_SYSCALL(timer_create(CLOCK_MONOTONIC, &event, &timer)); + + kj::Duration timeout = 50 * MILLISECONDS; + struct itimerspec spec; + memset(&spec, 0, sizeof(spec)); + spec.it_value.tv_sec = timeout / kj::SECONDS; + spec.it_value.tv_nsec = timeout % kj::SECONDS / kj::NANOSECONDS; + // We can't use KJ_SYSCALL() because it is not async-signal-safe. + KJ_REQUIRE(-1 != timer_settime(timer, 0, &spec, nullptr)); + + SourceLocation waitLocation; + + lock.wait([](const int& value) { + return false; + }, 100 * MILLISECONDS, waitLocation); + + KJ_EXPECT(blockingInfo.blockedOnCondVar); + KJ_EXPECT(blockingInfo.blockLocation == waitLocation); +} + +KJ_TEST("tracking blocked on Once::init") { + // SIGEV_THREAD is supposed to be "private" to the pthreads implementation, but, as + // usual, the higher-level POSIX API that we're supposed to use sucks: the "handler" runs on + // some other thread, which means the stack trace it prints won't be useful. + // + // So, we cheat and work around libc. + struct BlockDetected { + volatile bool blockedOnOnceInit; + SourceLocation blockLocation; + } blockingInfo = {}; + + struct sigaction handler; + memset(&handler, 0, sizeof(handler)); + handler.sa_sigaction = [](int, siginfo_t* info, void*) { + auto& blockage = *reinterpret_cast(info->si_value.sival_ptr); + KJ_IF_MAYBE(r, blockedReason()) { + KJ_SWITCH_ONEOF(*r) { + KJ_CASE_ONEOF(b, BlockedOnOnceInit) { + blockage.blockedOnOnceInit = true; + blockage.blockLocation = b.origin; + } + KJ_CASE_ONEOF_DEFAULT {} + } + } + }; + handler.sa_flags = SA_SIGINFO | SA_RESTART; + + sigaction(SIGINT, &handler, nullptr); + + timer_t timer; + struct sigevent event; + memset(&event, 0, sizeof(event)); + event.sigev_notify = SIGEV_THREAD_ID; + event.sigev_signo = SIGINT; + event.sigev_value.sival_ptr = &blockingInfo; + KJ_SYSCALL(event._sigev_un._tid = gettid()); + KJ_SYSCALL(timer_create(CLOCK_MONOTONIC, &event, &timer)); + + Lazy once; + MutexGuarded onceInitializing(false); + + Thread backgroundInit([&] { + once.get([&](SpaceFor& x) { + *onceInitializing.lockExclusive() = true; + usleep(100 * 1000); // 100 ms + return x.construct(5); + }); + }); + + kj::Duration timeout = 50 * MILLISECONDS; + struct itimerspec spec; + memset(&spec, 0, sizeof(spec)); + spec.it_value.tv_sec = timeout / kj::SECONDS; + spec.it_value.tv_nsec = timeout % kj::SECONDS / kj::NANOSECONDS; + // We can't use KJ_SYSCALL() because it is not async-signal-safe. + KJ_REQUIRE(-1 != timer_settime(timer, 0, &spec, nullptr)); + + kj::SourceLocation onceInitializingBlocked; + + onceInitializing.lockExclusive().wait([](const bool& initializing) { + return initializing; + }); + + once.get([](SpaceFor& x) { + return x.construct(5); + }, onceInitializingBlocked); + + KJ_EXPECT(blockingInfo.blockedOnOnceInit); + KJ_EXPECT(blockingInfo.blockLocation == onceInitializingBlocked); +} + +#if KJ_SAVE_ACQUIRED_LOCK_INFO +KJ_TEST("get location of exclusive mutex") { + _::Mutex mutex; + kj::SourceLocation lockAcquisition; + mutex.lock(_::Mutex::EXCLUSIVE, nullptr, lockAcquisition); + KJ_DEFER(mutex.unlock(_::Mutex::EXCLUSIVE)); + + const auto& lockedInfo = mutex.lockedInfo(); + const auto& lockInfo = lockedInfo.get<_::HoldingExclusively>(); + EXPECT_EQ(gettid(), lockInfo.threadHoldingLock()); + KJ_EXPECT(lockInfo.lockAcquiredAt() == lockAcquisition); +} + +KJ_TEST("get location of shared mutex") { + _::Mutex mutex; + kj::SourceLocation lockLocation; + mutex.lock(_::Mutex::SHARED, nullptr, lockLocation); + KJ_DEFER(mutex.unlock(_::Mutex::SHARED)); + + const auto& lockedInfo = mutex.lockedInfo(); + const auto& lockInfo = lockedInfo.get<_::HoldingShared>(); + KJ_EXPECT(lockInfo.lockAcquiredAt() == lockLocation); +} +#endif + +#endif + +#ifdef KJ_CONTENTION_WARNING_THRESHOLD +KJ_TEST("make sure contended mutex warns") { + class Expectation final: public ExceptionCallback { + public: + Expectation(LogSeverity severity, StringPtr substring) : + severity(severity), substring(substring), seen(false) {} + + void logMessage(LogSeverity severity, const char* file, int line, int contextDepth, + String&& text) override { + if (!seen && severity == this->severity) { + if (_::hasSubstring(text, substring)) { + // Match. Ignore it. + seen = true; + return; + } + } + + // Pass up the chain. + ExceptionCallback::logMessage(severity, file, line, contextDepth, kj::mv(text)); + } + + bool hasSeen() const { + return seen; + } + + private: + LogSeverity severity; + StringPtr substring; + bool seen; + UnwindDetector unwindDetector; + }; + + _::Mutex mutex; + LockSourceLocation exclusiveLockLocation; + mutex.lock(_::Mutex::EXCLUSIVE, nullptr, exclusiveLockLocation); + + bool seenContendedLockLog = false; + + auto threads = kj::heapArrayBuilder>(KJ_CONTENTION_WARNING_THRESHOLD); + for (auto i: kj::zeroTo(KJ_CONTENTION_WARNING_THRESHOLD)) { + (void)i; + threads.add(kj::heap([&mutex, &seenContendedLockLog]() { + Expectation expectation(LogSeverity::WARNING, "Acquired contended lock"); + LockSourceLocation sharedLockLocation; + mutex.lock(_::Mutex::SHARED, nullptr, sharedLockLocation); + seenContendedLockLog = seenContendedLockLog || expectation.hasSeen(); + mutex.unlock(_::Mutex::SHARED); + })); + } + + while (mutex.numReadersWaitingForTest() < KJ_CONTENTION_WARNING_THRESHOLD) { + usleep(5 * kj::MILLISECONDS / kj::MICROSECONDS); + } + + { + KJ_EXPECT_LOG(WARNING, "excessively many readers were waiting on this lock"); + mutex.unlock(_::Mutex::EXCLUSIVE); + } + + threads.clear(); + + KJ_ASSERT(seenContendedLockLog); +} +#endif } // namespace } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/mutex.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/mutex.c++ index 66546798de5..63e4f86793a 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/mutex.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/mutex.c++ @@ -19,15 +19,18 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -#if _WIN32 -#define WIN32_LEAN_AND_MEAN 1 // lolz -#define WINVER 0x0600 -#define _WIN32_WINNT 0x0600 +#if _WIN32 || __CYGWIN__ +#include "win32-api-version.h" #endif #include "mutex.h" #include "debug.h" +#if !_WIN32 && !__CYGWIN__ +#include +#include +#endif + #if KJ_USE_FUTEX #include #include @@ -36,7 +39,13 @@ #ifndef SYS_futex // Missing on Android/Bionic. +#ifdef __NR_futex #define SYS_futex __NR_futex +#elif defined(SYS_futex_time64) +#define SYS_futex SYS_futex_time64 +#else +#error "Need working SYS_futex" +#endif #endif #ifndef FUTEX_WAIT_PRIVATE @@ -45,30 +54,182 @@ #define FUTEX_WAKE_PRIVATE FUTEX_WAKE #endif -#elif _WIN32 +#elif _WIN32 || __CYGWIN__ #include #endif namespace kj { +#if KJ_TRACK_LOCK_BLOCKING +static thread_local const BlockedOnReason* tlsBlockReason __attribute((tls_model("initial-exec"))); +// The initial-exec model ensures that even if this code is part of a shared library built PIC, then +// we still place this variable in the appropriate ELF section so that __tls_get_addr is avoided. +// It's unclear if __tls_get_addr is still not async signal safe in glibc. The only negative +// downside of this approach is that a shared library built with kj & lock tracking will fail if +// dlopen'ed which isn't an intended use-case for the initial implementation. + +Maybe blockedReason() noexcept { + if (tlsBlockReason == nullptr) { + return nullptr; + } + return *tlsBlockReason; +} + +static void setCurrentThreadIsWaitingFor(const BlockedOnReason* meta) { + tlsBlockReason = meta; +} + +static void setCurrentThreadIsNoLongerWaiting() { + tlsBlockReason = nullptr; +} +#elif KJ_USE_FUTEX +struct BlockedOnMutexAcquisition { + constexpr BlockedOnMutexAcquisition(const _::Mutex& mutex, LockSourceLocationArg) {} +}; + +struct BlockedOnCondVarWait { + constexpr BlockedOnCondVarWait(const _::Mutex& mutex, const void *waiter, + LockSourceLocationArg) {} +}; + +struct BlockedOnOnceInit { + constexpr BlockedOnOnceInit(const _::Once& once, LockSourceLocationArg) {} +}; + +struct BlockedOnReason { + constexpr BlockedOnReason(const BlockedOnMutexAcquisition&) {} + constexpr BlockedOnReason(const BlockedOnCondVarWait&) {} + constexpr BlockedOnReason(const BlockedOnOnceInit&) {} +}; + +static void setCurrentThreadIsWaitingFor(const BlockedOnReason* meta) {} +static void setCurrentThreadIsNoLongerWaiting() {} +#endif + namespace _ { // private +#if KJ_USE_FUTEX +constexpr uint Mutex::EXCLUSIVE_HELD; +constexpr uint Mutex::EXCLUSIVE_REQUESTED; +constexpr uint Mutex::SHARED_COUNT_MASK; +#endif + +inline void Mutex::addWaiter(Waiter& waiter) { +#ifdef KJ_DEBUG + assertLockedByCaller(EXCLUSIVE); +#endif + *waitersTail = waiter; + waitersTail = &waiter.next; +} +inline void Mutex::removeWaiter(Waiter& waiter) { +#ifdef KJ_DEBUG + assertLockedByCaller(EXCLUSIVE); +#endif + *waiter.prev = waiter.next; + KJ_IF_MAYBE(next, waiter.next) { + next->prev = waiter.prev; + } else { + KJ_DASSERT(waitersTail == &waiter.next); + waitersTail = waiter.prev; + } +} + +bool Mutex::checkPredicate(Waiter& waiter) { + // Run the predicate from a thread other than the waiting thread, returning true if it's time to + // signal the waiting thread. This is not only when the predicate passes, but also when it + // throws, in which case we want to propagate the exception to the waiting thread. + + if (waiter.exception != nullptr) return true; // don't run again after an exception + + bool result = false; + KJ_IF_MAYBE(exception, kj::runCatchingExceptions([&]() { + result = waiter.predicate.check(); + })) { + // Exception thown. + result = true; + waiter.exception = kj::heap(kj::mv(*exception)); + }; + return result; +} + +#if !_WIN32 && !__CYGWIN__ +namespace { + +TimePoint toTimePoint(struct timespec ts) { + return kj::origin() + ts.tv_sec * kj::SECONDS + ts.tv_nsec * kj::NANOSECONDS; +} +TimePoint now() { + struct timespec now; + KJ_SYSCALL(clock_gettime(CLOCK_MONOTONIC, &now)); + return toTimePoint(now); +} +struct timespec toRelativeTimespec(Duration timeout) { + struct timespec ts; + ts.tv_sec = timeout / kj::SECONDS; + ts.tv_nsec = timeout % kj::SECONDS / kj::NANOSECONDS; + return ts; +} +struct timespec toAbsoluteTimespec(TimePoint time) { + return toRelativeTimespec(time - kj::origin()); +} + +} // namespace +#endif + #if KJ_USE_FUTEX // ======================================================================================= // Futex-based implementation (Linux-only) +#if KJ_SAVE_ACQUIRED_LOCK_INFO +#if !__GLIBC_PREREQ(2, 30) +#ifndef SYS_gettid +#error SYS_gettid is unavailable on this system +#endif + +#define gettid() ((pid_t)syscall(SYS_gettid)) +#endif + +static thread_local pid_t tlsTid = gettid(); +#define TRACK_ACQUIRED_TID() tlsTid + +Mutex::AcquiredMetadata Mutex::lockedInfo() const { + auto state = __atomic_load_n(&futex, __ATOMIC_RELAXED); + auto tid = lockedExclusivelyByThread; + auto location = lockAcquiredLocation; + + if (state & EXCLUSIVE_HELD) { + return HoldingExclusively{tid, location}; + } else { + return HoldingShared{location}; + } +} + +#else +#define TRACK_ACQUIRED_TID() 0 +#endif + Mutex::Mutex(): futex(0) {} Mutex::~Mutex() { // This will crash anyway, might as well crash with a nice error message. KJ_ASSERT(futex == 0, "Mutex destroyed while locked.") { break; } } -void Mutex::lock(Exclusivity exclusivity) { +bool Mutex::lock(Exclusivity exclusivity, Maybe timeout, LockSourceLocationArg location) { + BlockedOnReason blockReason = BlockedOnMutexAcquisition{*this, location}; + KJ_DEFER(setCurrentThreadIsNoLongerWaiting()); + + auto spec = timeout.map([](Duration d) { return toRelativeTimespec(d); }); + struct timespec* specp = nullptr; + KJ_IF_MAYBE(s, spec) { + specp = s; + } + switch (exclusivity) { case EXCLUSIVE: for (;;) { uint state = 0; if (KJ_LIKELY(__atomic_compare_exchange_n(&futex, &state, EXCLUSIVE_HELD, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))) { + // Acquired. break; } @@ -84,39 +245,112 @@ void Mutex::lock(Exclusivity exclusivity) { state |= EXCLUSIVE_REQUESTED; } - syscall(SYS_futex, &futex, FUTEX_WAIT_PRIVATE, state, NULL, NULL, 0); + setCurrentThreadIsWaitingFor(&blockReason); + + auto result = syscall(SYS_futex, &futex, FUTEX_WAIT_PRIVATE, state, specp, nullptr, 0); + if (result < 0) { + if (errno == ETIMEDOUT) { + setCurrentThreadIsNoLongerWaiting(); + // We timed out, we can't remove the exclusive request flag (since others might be waiting) + // so we just return false. + return false; + } + } } + acquiredExclusive(TRACK_ACQUIRED_TID(), location); +#if KJ_CONTENTION_WARNING_THRESHOLD + printContendedReader = false; +#endif break; case SHARED: { +#if KJ_CONTENTION_WARNING_THRESHOLD + kj::Maybe contentionWaitStart; +#endif + uint state = __atomic_add_fetch(&futex, 1, __ATOMIC_ACQUIRE); + for (;;) { if (KJ_LIKELY((state & EXCLUSIVE_HELD) == 0)) { // Acquired. break; } +#if KJ_CONTENTION_WARNING_THRESHOLD + if (contentionWaitStart == nullptr) { + // We could have the exclusive mutex tell us how long it was holding the lock. That would + // be the nicest. However, I'm hesitant to bloat the structure. I suspect having a reader + // tell us how long it was waiting for is probably a good proxy. + contentionWaitStart = kj::systemPreciseMonotonicClock().now(); + } +#endif + + setCurrentThreadIsWaitingFor(&blockReason); + // The mutex is exclusively locked by another thread. Since we incremented the counter // already, we just have to wait for it to be unlocked. - syscall(SYS_futex, &futex, FUTEX_WAIT_PRIVATE, state, NULL, NULL, 0); + auto result = syscall(SYS_futex, &futex, FUTEX_WAIT_PRIVATE, state, specp, nullptr, 0); + if (result < 0) { + // If we timeout though, we need to signal that we're not waiting anymore. + if (errno == ETIMEDOUT) { + setCurrentThreadIsNoLongerWaiting(); + state = __atomic_sub_fetch(&futex, 1, __ATOMIC_RELAXED); + + // We may have unlocked since we timed out. So act like we just unlocked the mutex + // and maybe send a wait signal if needed. See Mutex::unlock SHARED case. + if (KJ_UNLIKELY(state == EXCLUSIVE_REQUESTED)) { + if (__atomic_compare_exchange_n( + &futex, &state, 0, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) { + // Wake all exclusive waiters. We have to wake all of them because one of them will + // grab the lock while the others will re-establish the exclusive-requested bit. + syscall(SYS_futex, &futex, FUTEX_WAKE_PRIVATE, INT_MAX, nullptr, nullptr, 0); + } + } + return false; + } + } state = __atomic_load_n(&futex, __ATOMIC_ACQUIRE); } + +#ifdef KJ_CONTENTION_WARNING_THRESHOLD + KJ_IF_MAYBE(start, contentionWaitStart) { + if (__atomic_load_n(&printContendedReader, __ATOMIC_RELAXED)) { + // Double-checked lock avoids the CPU needing to acquire the lock in most cases. + if (__atomic_exchange_n(&printContendedReader, false, __ATOMIC_RELAXED)) { + auto contentionDuration = kj::systemPreciseMonotonicClock().now() - *start; + KJ_LOG(WARNING, "Acquired contended lock", location, contentionDuration, + kj::getStackTrace()); + } + } + } +#endif + + // We just want to record the lock being acquired somewhere but the specific location doesn't + // matter. This does mean that race conditions could occur where a thread might read this + // inconsistently (e.g. filename from 1 lock & function from another). This currently is just + // meant to be a debugging aid for manual analysis so it's OK for that purpose. If it's ever + // required for this to be used for anything else, then this should probably be changed to + // use an additional atomic variable that can ensure only one writer updates this. Or use the + // futex variable to ensure that this is only done for the first one to acquire the lock, + // although there may be thundering herd problems with that whereby there's a long wallclock + // time between when the lock is acquired and when the location is updated (since the first + // locker isn't really guaranteed to be the first one unlocked). + acquiredShared(location); + break; } } + return true; } -struct Mutex::Waiter { - kj::Maybe next; - kj::Maybe* prev; - Predicate& predicate; - uint futex; -}; - -void Mutex::unlock(Exclusivity exclusivity) { +void Mutex::unlock(Exclusivity exclusivity, Waiter* waiterToSkip) { switch (exclusivity) { case EXCLUSIVE: { KJ_DASSERT(futex & EXCLUSIVE_HELD, "Unlocked a mutex that wasn't locked."); +#ifdef KJ_CONTENTION_WARNING_THRESHOLD + auto acquiredLocation = releasingExclusive(); +#endif + // First check if there are any conditional waiters. Note we only do this when unlocking an // exclusive lock since under a shared lock the state couldn't have changed. auto nextWaiter = waitersHead; @@ -124,10 +358,31 @@ void Mutex::unlock(Exclusivity exclusivity) { KJ_IF_MAYBE(waiter, nextWaiter) { nextWaiter = waiter->next; - if (waiter->predicate.check()) { + if (waiter != waiterToSkip && checkPredicate(*waiter)) { // This waiter's predicate now evaluates true, so wake it up. - __atomic_store_n(&waiter->futex, 1, __ATOMIC_RELEASE); - syscall(SYS_futex, &waiter->futex, FUTEX_WAKE_PRIVATE, INT_MAX, NULL, NULL, 0); + if (waiter->hasTimeout) { + // In this case we need to be careful to make sure the target thread isn't already + // processing a timeout, so we need to do an atomic CAS rather than just a store. + uint expected = 0; + if (__atomic_compare_exchange_n(&waiter->futex, &expected, 1, false, + __ATOMIC_RELEASE, __ATOMIC_RELAXED)) { + // Good, we set it to 1, transferring ownership of the mutex. Continue on below. + } else { + // Looks like the thread already timed out and set its own futex to 1. In that + // case it is going to try to lock the mutex itself, so we should NOT attempt an + // ownership transfer as this will deadlock. + // + // We have two options here: We can continue along the waiter list looking for + // another waiter that's ready to be signaled, or we could drop out of the list + // immediately since we know that another thread is already waiting for the lock + // and will re-evaluate the waiter queue itself when it is done. It feels cleaner + // to me to continue. + continue; + } + } else { + __atomic_store_n(&waiter->futex, 1, __ATOMIC_RELEASE); + } + syscall(SYS_futex, &waiter->futex, FUTEX_WAKE_PRIVATE, INT_MAX, nullptr, nullptr, 0); // We transferred ownership of the lock to this waiter, so we're done now. return; @@ -138,6 +393,18 @@ void Mutex::unlock(Exclusivity exclusivity) { } } +#ifdef KJ_CONTENTION_WARNING_THRESHOLD + uint readerCount; + { + uint oldState = __atomic_load_n(&futex, __ATOMIC_RELAXED); + readerCount = oldState & SHARED_COUNT_MASK; + if (readerCount >= KJ_CONTENTION_WARNING_THRESHOLD) { + // Atomic not needed because we're still holding the exclusive lock. + printContendedReader = true; + } + } +#endif + // Didn't wake any waiters, so wake normally. uint oldState = __atomic_fetch_and( &futex, ~(EXCLUSIVE_HELD | EXCLUSIVE_REQUESTED), __ATOMIC_RELEASE); @@ -147,7 +414,14 @@ void Mutex::unlock(Exclusivity exclusivity) { // the lock, and we must wake them up. If there are any exclusive waiters, we must wake // them up even if readers are waiting so that at the very least they may re-establish the // EXCLUSIVE_REQUESTED bit that we just removed. - syscall(SYS_futex, &futex, FUTEX_WAKE_PRIVATE, INT_MAX, NULL, NULL, 0); + syscall(SYS_futex, &futex, FUTEX_WAKE_PRIVATE, INT_MAX, nullptr, nullptr, 0); + +#ifdef KJ_CONTENTION_WARNING_THRESHOLD + if (readerCount >= KJ_CONTENTION_WARNING_THRESHOLD) { + KJ_LOG(WARNING, "excessively many readers were waiting on this lock", readerCount, + acquiredLocation, kj::getStackTrace()); + } +#endif } break; } @@ -163,7 +437,7 @@ void Mutex::unlock(Exclusivity exclusivity) { &futex, &state, 0, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) { // Wake all exclusive waiters. We have to wake all of them because one of them will // grab the lock while the others will re-establish the exclusive-requested bit. - syscall(SYS_futex, &futex, FUTEX_WAKE_PRIVATE, INT_MAX, NULL, NULL, 0); + syscall(SYS_futex, &futex, FUTEX_WAKE_PRIVATE, INT_MAX, nullptr, nullptr, 0); } } break; @@ -171,7 +445,7 @@ void Mutex::unlock(Exclusivity exclusivity) { } } -void Mutex::assertLockedByCaller(Exclusivity exclusivity) { +void Mutex::assertLockedByCaller(Exclusivity exclusivity) const { switch (exclusivity) { case EXCLUSIVE: KJ_ASSERT(futex & EXCLUSIVE_HELD, @@ -184,41 +458,120 @@ void Mutex::assertLockedByCaller(Exclusivity exclusivity) { } } -void Mutex::lockWhen(Predicate& predicate) { - lock(EXCLUSIVE); - +void Mutex::wait(Predicate& predicate, Maybe timeout, LockSourceLocationArg location) { // Add waiter to list. - Waiter waiter { nullptr, waitersTail, predicate, 0 }; - *waitersTail = waiter; - waitersTail = &waiter.next; + Waiter waiter { nullptr, waitersTail, predicate, nullptr, 0, timeout != nullptr }; + addWaiter(waiter); + BlockedOnReason blockReason = BlockedOnCondVarWait{*this, &waiter, location}; + KJ_DEFER(setCurrentThreadIsNoLongerWaiting()); + + // To guarantee that we've re-locked the mutex before scope exit, keep track of whether it is + // currently. + bool currentlyLocked = true; KJ_DEFER({ - // Remove from list. - *waiter.prev = waiter.next; - KJ_IF_MAYBE(next, waiter.next) { - next->prev = waiter.prev; - } else { - KJ_DASSERT(waitersTail == &waiter.next); - waitersTail = waiter.prev; - } + // Infinite timeout for re-obtaining the lock is on purpose because the post-condition for this + // function has to be that the lock state hasn't changed (& we have to be locked when we enter + // since that's how condvars work). + if (!currentlyLocked) lock(EXCLUSIVE, nullptr, location); + removeWaiter(waiter); }); if (!predicate.check()) { - unlock(EXCLUSIVE); + unlock(EXCLUSIVE, &waiter); + currentlyLocked = false; - // Wait for someone to set out futex to 1. - while (__atomic_load_n(&waiter.futex, __ATOMIC_ACQUIRE) == 0) { - syscall(SYS_futex, &waiter.futex, FUTEX_WAIT_PRIVATE, 0, NULL, NULL, 0); + struct timespec ts; + struct timespec* tsp = nullptr; + KJ_IF_MAYBE(t, timeout) { + ts = toAbsoluteTimespec(now() + *t); + tsp = &ts; } - // Ownership of an exclusive lock was transferred to us. We can continue. + setCurrentThreadIsWaitingFor(&blockReason); + + // Wait for someone to set our futex to 1. + for (;;) { + // Note we use FUTEX_WAIT_BITSET_PRIVATE + FUTEX_BITSET_MATCH_ANY to get the same effect as + // FUTEX_WAIT_PRIVATE except that the timeout is specified as an absolute time based on + // CLOCK_MONOTONIC. Otherwise, FUTEX_WAIT_PRIVATE interprets it as a relative time, forcing + // us to recompute the time after every iteration. + KJ_SYSCALL_HANDLE_ERRORS(syscall(SYS_futex, + &waiter.futex, FUTEX_WAIT_BITSET_PRIVATE, 0, tsp, nullptr, FUTEX_BITSET_MATCH_ANY)) { + case EAGAIN: + // Indicates that the futex was already non-zero by the time the kernal looked at it. + // Not an error. + break; + case ETIMEDOUT: { + // Wait timed out. This leaves us in a bit of a pickle: Ownership of the mutex was not + // transferred to us from another thread. So, we need to lock it ourselves. But, another + // thread might be in the process of signaling us and transferring ownership. So, we + // first must atomically take control of our destiny. + KJ_ASSERT(timeout != nullptr); + uint expected = 0; + if (__atomic_compare_exchange_n(&waiter.futex, &expected, 1, false, + __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) { + // OK, we set our own futex to 1. That means no other thread will, and so we won't be + // receiving a mutex ownership transfer. We have to lock the mutex ourselves. + setCurrentThreadIsNoLongerWaiting(); + lock(EXCLUSIVE, nullptr, location); + currentlyLocked = true; + return; + } else { + // Oh, someone else actually did signal us, apparently. Let's move on as if the futex + // call told us so. + break; + } + } + default: + KJ_FAIL_SYSCALL("futex(FUTEX_WAIT_PRIVATE)", error); + } + + setCurrentThreadIsNoLongerWaiting(); + + if (__atomic_load_n(&waiter.futex, __ATOMIC_ACQUIRE)) { + // We received a lock ownership transfer from another thread. + currentlyLocked = true; + + // The other thread checked the predicate before the transfer. #ifdef KJ_DEBUG - assertLockedByCaller(EXCLUSIVE); + assertLockedByCaller(EXCLUSIVE); #endif + + KJ_IF_MAYBE(exception, waiter.exception) { + // The predicate threw an exception, apparently. Propagate it. + // TODO(someday): Could we somehow have this be a recoverable exception? Presumably we'd + // then want MutexGuarded::when() to skip calling the callback, but then what should it + // return, since it normally returns the callback's result? Or maybe people who disable + // exceptions just really should not write predicates that can throw. + kj::throwFatalException(kj::mv(**exception)); + } + + return; + } + } } } -void Once::runOnce(Initializer& init) { +void Mutex::induceSpuriousWakeupForTest() { + auto nextWaiter = waitersHead; + for (;;) { + KJ_IF_MAYBE(waiter, nextWaiter) { + nextWaiter = waiter->next; + syscall(SYS_futex, &waiter->futex, FUTEX_WAKE_PRIVATE, INT_MAX, nullptr, nullptr, 0); + } else { + // No more waiters. + break; + } + } +} + +uint Mutex::numReadersWaitingForTest() const { + assertLockedByCaller(EXCLUSIVE); + return futex & SHARED_COUNT_MASK; +} + +void Once::runOnce(Initializer& init, LockSourceLocationArg location) { startOver: uint state = UNINITIALIZED; if (__atomic_compare_exchange_n(&futex, &state, INITIALIZING, false, @@ -230,7 +583,7 @@ startOver: if (__atomic_exchange_n(&futex, UNINITIALIZED, __ATOMIC_RELEASE) == INITIALIZING_WITH_WAITERS) { // Someone was waiting for us to finish. - syscall(SYS_futex, &futex, FUTEX_WAKE_PRIVATE, INT_MAX, NULL, NULL, 0); + syscall(SYS_futex, &futex, FUTEX_WAKE_PRIVATE, INT_MAX, nullptr, nullptr, 0); } }); @@ -239,9 +592,12 @@ startOver: if (__atomic_exchange_n(&futex, INITIALIZED, __ATOMIC_RELEASE) == INITIALIZING_WITH_WAITERS) { // Someone was waiting for us to finish. - syscall(SYS_futex, &futex, FUTEX_WAKE_PRIVATE, INT_MAX, NULL, NULL, 0); + syscall(SYS_futex, &futex, FUTEX_WAKE_PRIVATE, INT_MAX, nullptr, nullptr, 0); } } else { + BlockedOnReason blockReason = BlockedOnOnceInit{*this, location}; + KJ_DEFER(setCurrentThreadIsNoLongerWaiting()); + for (;;) { if (state == INITIALIZED) { break; @@ -257,7 +613,9 @@ startOver: } // Wait for initialization. - syscall(SYS_futex, &futex, FUTEX_WAIT_PRIVATE, INITIALIZING_WITH_WAITERS, NULL, NULL, 0); + setCurrentThreadIsWaitingFor(&blockReason); + syscall(SYS_futex, &futex, FUTEX_WAIT_PRIVATE, INITIALIZING_WITH_WAITERS, + nullptr, nullptr, 0); state = __atomic_load_n(&futex, __ATOMIC_ACQUIRE); if (state == UNINITIALIZED) { @@ -277,12 +635,13 @@ void Once::reset() { } } -#elif _WIN32 +#elif _WIN32 || __CYGWIN__ // ======================================================================================= // Win32 implementation #define coercedSrwLock (*reinterpret_cast(&srwLock)) #define coercedInitOnce (*reinterpret_cast(&initOnce)) +#define coercedCondvar(var) (*reinterpret_cast(&var)) Mutex::Mutex() { static_assert(sizeof(SRWLOCK) == sizeof(srwLock), "SRWLOCK is not a pointer?"); @@ -290,7 +649,10 @@ Mutex::Mutex() { } Mutex::~Mutex() {} -void Mutex::lock(Exclusivity exclusivity) { +bool Mutex::lock(Exclusivity exclusivity, Maybe timeout, NoopSourceLocation) { + if (timeout != nullptr) { + KJ_UNIMPLEMENTED("Locking a mutex with a timeout is only supported on Linux."); + } switch (exclusivity) { case EXCLUSIVE: AcquireSRWLockExclusive(&coercedSrwLock); @@ -299,26 +661,157 @@ void Mutex::lock(Exclusivity exclusivity) { AcquireSRWLockShared(&coercedSrwLock); break; } + return true; } -void Mutex::unlock(Exclusivity exclusivity) { +void Mutex::wakeReadyWaiter(Waiter* waiterToSkip) { + // Look for a waiter whose predicate is now evaluating true, and wake it. We wake no more than + // one waiter because only one waiter could get the lock anyway, and once it releases that lock + // it will awake the next waiter if necessary. + + auto nextWaiter = waitersHead; + for (;;) { + KJ_IF_MAYBE(waiter, nextWaiter) { + nextWaiter = waiter->next; + + if (waiter != waiterToSkip && checkPredicate(*waiter)) { + // This waiter's predicate now evaluates true, so wake it up. It doesn't matter if we + // use Wake vs. WakeAll here since there's always only one thread waiting. + WakeConditionVariable(&coercedCondvar(waiter->condvar)); + + // We only need to wake one waiter. Note that unlike the futex-based implementation, we + // cannot "transfer ownership" of the lock to the waiter, therefore we cannot guarantee + // that the condition is still true when that waiter finally awakes. However, if the + // condition is no longer true at that point, the waiter will re-check all other + // waiters' conditions and possibly wake up any other waiter who is now ready, hence we + // still only need to wake one waiter here. + return; + } + } else { + // No more waiters. + break; + } + } +} + +void Mutex::unlock(Exclusivity exclusivity, Waiter* waiterToSkip) { switch (exclusivity) { - case EXCLUSIVE: - ReleaseSRWLockExclusive(&coercedSrwLock); + case EXCLUSIVE: { + KJ_DEFER(ReleaseSRWLockExclusive(&coercedSrwLock)); + + // Check if there are any conditional waiters. Note we only do this when unlocking an + // exclusive lock since under a shared lock the state couldn't have changed. + wakeReadyWaiter(waiterToSkip); break; + } + case SHARED: ReleaseSRWLockShared(&coercedSrwLock); break; } } -void Mutex::assertLockedByCaller(Exclusivity exclusivity) { +void Mutex::assertLockedByCaller(Exclusivity exclusivity) const { // We could use TryAcquireSRWLock*() here like we do with the pthread version. However, as of // this writing, my version of Wine (1.6.2) doesn't implement these functions and will abort if // they are called. Since we were only going to use them as a hacky way to check if the lock is // held for debug purposes anyway, we just don't bother. } +void Mutex::wait(Predicate& predicate, Maybe timeout, NoopSourceLocation) { + // Add waiter to list. + Waiter waiter { nullptr, waitersTail, predicate, nullptr, 0 }; + static_assert(sizeof(waiter.condvar) == sizeof(CONDITION_VARIABLE), + "CONDITION_VARIABLE is not a pointer?"); + InitializeConditionVariable(&coercedCondvar(waiter.condvar)); + + addWaiter(waiter); + KJ_DEFER(removeWaiter(waiter)); + + DWORD sleepMs; + + // Only initialized if `timeout` is non-null. + const MonotonicClock* clock = nullptr; + kj::Maybe endTime; + + KJ_IF_MAYBE(t, timeout) { + // Windows sleeps are inaccurate -- they can be longer *or shorter* than the requested amount. + // For many use cases of our API, a too-short sleep would be unacceptable. Experimentally, it + // seems like sleeps can be up to half a millisecond short, so we'll add half a millisecond + // (and then we round up, below). + *t += 500 * kj::MICROSECONDS; + + // Compute initial sleep time. + sleepMs = *t / kj::MILLISECONDS; + if (*t % kj::MILLISECONDS > 0 * kj::SECONDS) { + // We guarantee we won't wake up too early. + ++sleepMs; + } + + clock = &systemPreciseMonotonicClock(); + endTime = clock->now() + *t; + } else { + sleepMs = INFINITE; + } + + while (!predicate.check()) { + // SleepConditionVariableSRW() will temporarily release the lock, so we need to signal other + // waiters that are now ready. + wakeReadyWaiter(&waiter); + + if (SleepConditionVariableSRW(&coercedCondvar(waiter.condvar), &coercedSrwLock, sleepMs, 0)) { + // Normal result. Continue loop to check predicate. + } else { + DWORD error = GetLastError(); + if (error == ERROR_TIMEOUT) { + // Windows may have woken us up too early, so don't return yet. Instead, proceed through the + // loop and rely on our sleep time recalculation to detect if we timed out. + } else { + KJ_FAIL_WIN32("SleepConditionVariableSRW()", error); + } + } + + KJ_IF_MAYBE(exception, waiter.exception) { + // The predicate threw an exception, apparently. Propagate it. + // TODO(someday): Could we somehow have this be a recoverable exception? Presumably we'd + // then want MutexGuarded::when() to skip calling the callback, but then what should it + // return, since it normally returns the callback's result? Or maybe people who disable + // exceptions just really should not write predicates that can throw. + kj::throwFatalException(kj::mv(**exception)); + } + + // Recompute sleep time. + KJ_IF_MAYBE(e, endTime) { + auto now = clock->now(); + + if (*e > now) { + auto sleepTime = *e - now; + sleepMs = sleepTime / kj::MILLISECONDS; + if (sleepTime % kj::MILLISECONDS > 0 * kj::SECONDS) { + // We guarantee we won't wake up too early. + ++sleepMs; + } + } else { + // Oops, already timed out. + return; + } + } + } +} + +void Mutex::induceSpuriousWakeupForTest() { + auto nextWaiter = waitersHead; + for (;;) { + KJ_IF_MAYBE(waiter, nextWaiter) { + nextWaiter = waiter->next; + WakeConditionVariable(&coercedCondvar(waiter->condvar)); + } else { + // No more waiters. + break; + } + } +} + static BOOL WINAPI nullInitializer(PINIT_ONCE initOnce, PVOID parameter, PVOID* context) { return true; } @@ -332,7 +825,7 @@ Once::Once(bool startInitialized) { } Once::~Once() {} -void Once::runOnce(Initializer& init) { +void Once::runOnce(Initializer& init, NoopSourceLocation) { BOOL needInit; while (!InitOnceBeginInitialize(&coercedInitOnce, 0, &needInit, nullptr)) { // Init was occurring in another thread, but then failed with an exception. Retry. @@ -377,14 +870,15 @@ void Once::reset() { } \ } -Mutex::Mutex() { - KJ_PTHREAD_CALL(pthread_rwlock_init(&mutex, nullptr)); -} +Mutex::Mutex(): mutex(PTHREAD_RWLOCK_INITIALIZER) {} Mutex::~Mutex() { KJ_PTHREAD_CLEANUP(pthread_rwlock_destroy(&mutex)); } -void Mutex::lock(Exclusivity exclusivity) { +bool Mutex::lock(Exclusivity exclusivity, Maybe timeout, NoopSourceLocation) { + if (timeout != nullptr) { + KJ_UNIMPLEMENTED("Locking a mutex with a timeout is only supported on Linux."); + } switch (exclusivity) { case EXCLUSIVE: KJ_PTHREAD_CALL(pthread_rwlock_wrlock(&mutex)); @@ -393,13 +887,44 @@ void Mutex::lock(Exclusivity exclusivity) { KJ_PTHREAD_CALL(pthread_rwlock_rdlock(&mutex)); break; } + return true; } -void Mutex::unlock(Exclusivity exclusivity) { - KJ_PTHREAD_CALL(pthread_rwlock_unlock(&mutex)); +void Mutex::unlock(Exclusivity exclusivity, Waiter* waiterToSkip) { + KJ_DEFER(KJ_PTHREAD_CALL(pthread_rwlock_unlock(&mutex))); + + if (exclusivity == EXCLUSIVE) { + // Check if there are any conditional waiters. Note we only do this when unlocking an + // exclusive lock since under a shared lock the state couldn't have changed. + auto nextWaiter = waitersHead; + for (;;) { + KJ_IF_MAYBE(waiter, nextWaiter) { + nextWaiter = waiter->next; + + if (waiter != waiterToSkip && checkPredicate(*waiter)) { + // This waiter's predicate now evaluates true, so wake it up. It doesn't matter if we + // use _signal() vs. _broadcast() here since there's always only one thread waiting. + KJ_PTHREAD_CALL(pthread_mutex_lock(&waiter->stupidMutex)); + KJ_PTHREAD_CALL(pthread_cond_signal(&waiter->condvar)); + KJ_PTHREAD_CALL(pthread_mutex_unlock(&waiter->stupidMutex)); + + // We only need to wake one waiter. Note that unlike the futex-based implementation, we + // cannot "transfer ownership" of the lock to the waiter, therefore we cannot guarantee + // that the condition is still true when that waiter finally awakes. However, if the + // condition is no longer true at that point, the waiter will re-check all other waiters' + // conditions and possibly wake up any other waiter who is now ready, hence we still only + // need to wake one waiter here. + break; + } + } else { + // No more waiters. + break; + } + } + } } -void Mutex::assertLockedByCaller(Exclusivity exclusivity) { +void Mutex::assertLockedByCaller(Exclusivity exclusivity) const { switch (exclusivity) { case EXCLUSIVE: // A read lock should fail if the mutex is already held for writing. @@ -419,14 +944,127 @@ void Mutex::assertLockedByCaller(Exclusivity exclusivity) { } } -Once::Once(bool startInitialized): state(startInitialized ? INITIALIZED : UNINITIALIZED) { - KJ_PTHREAD_CALL(pthread_mutex_init(&mutex, nullptr)); +void Mutex::wait(Predicate& predicate, Maybe timeout, NoopSourceLocation) { + // Add waiter to list. + Waiter waiter { + nullptr, waitersTail, predicate, nullptr, + PTHREAD_COND_INITIALIZER, PTHREAD_MUTEX_INITIALIZER + }; + addWaiter(waiter); + + // To guarantee that we've re-locked the mutex before scope exit, keep track of whether it is + // currently. + bool currentlyLocked = true; + KJ_DEFER({ + if (!currentlyLocked) lock(EXCLUSIVE, nullptr, NoopSourceLocation{}); + removeWaiter(waiter); + + // Destroy pthread objects. + KJ_PTHREAD_CLEANUP(pthread_mutex_destroy(&waiter.stupidMutex)); + KJ_PTHREAD_CLEANUP(pthread_cond_destroy(&waiter.condvar)); + }); + +#if !__APPLE__ + if (timeout != nullptr) { + // Oops, the default condvar uses the wall clock, which is dumb... fix it to use the monotonic + // clock. (Except not on macOS, where pthread_condattr_setclock() is unimplemented, but there's + // a bizarre pthread_cond_timedwait_relative_np() method we can use instead...) + pthread_condattr_t attr; + KJ_PTHREAD_CALL(pthread_condattr_init(&attr)); + KJ_PTHREAD_CALL(pthread_condattr_setclock(&attr, CLOCK_MONOTONIC)); + pthread_cond_init(&waiter.condvar, &attr); + KJ_PTHREAD_CALL(pthread_condattr_destroy(&attr)); + } +#endif + + Maybe endTime = timeout.map([](Duration d) { + return toAbsoluteTimespec(now() + d); + }); + + while (!predicate.check()) { + // pthread condvars only work with basic mutexes, not rwlocks. So, we need to lock a basic + // mutex before we unlock the real mutex, and the signaling thread also needs to lock this + // mutex, in order to ensure that this thread is actually waiting on the condvar before it is + // signaled. + KJ_PTHREAD_CALL(pthread_mutex_lock(&waiter.stupidMutex)); + + // OK, now we can unlock the main mutex. + unlock(EXCLUSIVE, &waiter); + currentlyLocked = false; + + bool timedOut = false; + + // Wait for someone to signal the condvar. + KJ_IF_MAYBE(t, endTime) { +#if __APPLE__ + // On macOS, the absolute timeout can only be specified in wall time, not monotonic time, + // which means modifying the system clock will break the wait. However, macOS happens to + // provide an alternative relative-time wait function, so I guess we'll use that. It does + // require recomputing the time every iteration... + struct timespec ts = toRelativeTimespec(kj::max(toTimePoint(*t) - now(), 0 * kj::SECONDS)); + int error = pthread_cond_timedwait_relative_np(&waiter.condvar, &waiter.stupidMutex, &ts); +#else + int error = pthread_cond_timedwait(&waiter.condvar, &waiter.stupidMutex, t); +#endif + if (error != 0) { + if (error == ETIMEDOUT) { + timedOut = true; + } else { + KJ_FAIL_SYSCALL("pthread_cond_timedwait", error); + } + } + } else { + KJ_PTHREAD_CALL(pthread_cond_wait(&waiter.condvar, &waiter.stupidMutex)); + } + + // We have to be very careful about lock ordering here. We need to unlock stupidMutex before + // re-locking the main mutex, because another thread may have a lock on the main mutex already + // and be waiting for a lock on stupidMutex. Note that other thread may signal the condvar + // right after we unlock stupidMutex but before we re-lock the main mutex. That is fine, + // because we've already been signaled. + KJ_PTHREAD_CALL(pthread_mutex_unlock(&waiter.stupidMutex)); + + lock(EXCLUSIVE, nullptr, NoopSourceLocation{}); + currentlyLocked = true; + + KJ_IF_MAYBE(exception, waiter.exception) { + // The predicate threw an exception, apparently. Propagate it. + // TODO(someday): Could we somehow have this be a recoverable exception? Presumably we'd + // then want MutexGuarded::when() to skip calling the callback, but then what should it + // return, since it normally returns the callback's result? Or maybe people who disable + // exceptions just really should not write predicates that can throw. + kj::throwFatalException(kj::mv(**exception)); + } + + if (timedOut) { + return; + } + } } + +void Mutex::induceSpuriousWakeupForTest() { + auto nextWaiter = waitersHead; + for (;;) { + KJ_IF_MAYBE(waiter, nextWaiter) { + nextWaiter = waiter->next; + KJ_PTHREAD_CALL(pthread_mutex_lock(&waiter->stupidMutex)); + KJ_PTHREAD_CALL(pthread_cond_signal(&waiter->condvar)); + KJ_PTHREAD_CALL(pthread_mutex_unlock(&waiter->stupidMutex)); + } else { + // No more waiters. + break; + } + } +} + +Once::Once(bool startInitialized) + : state(startInitialized ? INITIALIZED : UNINITIALIZED), + mutex(PTHREAD_MUTEX_INITIALIZER) {} Once::~Once() { KJ_PTHREAD_CLEANUP(pthread_mutex_destroy(&mutex)); } -void Once::runOnce(Initializer& init) { +void Once::runOnce(Initializer& init, NoopSourceLocation) { KJ_PTHREAD_CALL(pthread_mutex_lock(&mutex)); KJ_DEFER(KJ_PTHREAD_CALL(pthread_mutex_unlock(&mutex))); diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/mutex.h b/libs/EXTERNAL/capnproto/c++/src/kj/mutex.h index 711e0056d0d..e7b299e8116 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/mutex.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/mutex.h @@ -21,33 +21,111 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - +#include "debug.h" #include "memory.h" #include +#include "time.h" +#include "source-location.h" +#include "one-of.h" + +KJ_BEGIN_HEADER #if __linux__ && !defined(KJ_USE_FUTEX) #define KJ_USE_FUTEX 1 #endif -#if !KJ_USE_FUTEX && !_WIN32 -// On Linux we use futex. On other platforms we wrap pthreads. +#if !KJ_USE_FUTEX && !_WIN32 && !__CYGWIN__ +// We fall back to pthreads when we don't have a better platform-specific primitive. pthreads +// mutexes are bloated, though, so we like to avoid them. Hence on Linux we use futex(), and on +// Windows we use SRW locks and friends. On Cygwin we prefer the Win32 primitives both because they +// are more efficient and because I ran into problems with Cygwin's implementation of RW locks +// seeming to allow multiple threads to lock the same mutex (but I didn't investigate very +// closely). +// // TODO(someday): Write efficient low-level locking primitives for other platforms. #include #endif +// There are 3 macros controlling lock tracking: +// KJ_TRACK_LOCK_BLOCKING will set up async signal safe TLS variables that can be used to identify +// the KJ primitive blocking the current thread. +// KJ_SAVE_ACQUIRED_LOCK_INFO will allow introspection of a Mutex to get information about what is +// currently holding the lock. +// KJ_TRACK_LOCK_ACQUISITION is automatically enabled by either one of them. + +#if KJ_TRACK_LOCK_BLOCKING +// Lock tracking is required to keep track of what blocked. +#define KJ_TRACK_LOCK_ACQUISITION 1 +#endif + +#if KJ_SAVE_ACQUIRED_LOCK_INFO +#define KJ_TRACK_LOCK_ACQUISITION 1 +#include +#endif + namespace kj { +#if KJ_TRACK_LOCK_ACQUISITION +#if !KJ_USE_FUTEX +#error Lock tracking is only currently supported for futex-based mutexes. +#endif + +#if !KJ_COMPILER_SUPPORTS_SOURCE_LOCATION +#error C++20 or newer is required (or the use of clang/gcc). +#endif + +using LockSourceLocation = SourceLocation; +using LockSourceLocationArg = const SourceLocation&; +// On x86-64 the codegen is optimal if the argument has type const& for the location. However, +// since this conflicts with the optimal call signature for NoopSourceLocation, +// LockSourceLocationArg is used to conditionally select the right type without polluting the usage +// themselves. Interestingly this makes no difference on ARM. +// https://godbolt.org/z/q6G8ee5a3 +#else +using LockSourceLocation = NoopSourceLocation; +using LockSourceLocationArg = NoopSourceLocation; +#endif + + +class Exception; // ======================================================================================= // Private details -- public interfaces follow below. namespace _ { // private +#if KJ_SAVE_ACQUIRED_LOCK_INFO +class HoldingExclusively { + // The lock is being held in exclusive mode. +public: + constexpr HoldingExclusively(pid_t tid, const SourceLocation& location) + : heldBy(tid), acquiredAt(location) {} + + pid_t threadHoldingLock() const { return heldBy; } + const SourceLocation& lockAcquiredAt() const { return acquiredAt; } + +private: + pid_t heldBy; + SourceLocation acquiredAt; +}; + +class HoldingShared { + // The lock is being held in shared mode currently. Which threads are holding this lock open + // is unknown. +public: + constexpr HoldingShared(const SourceLocation& location) : acquiredAt(location) {} + + const SourceLocation& lockAcquiredAt() const { return acquiredAt; } + +private: + SourceLocation acquiredAt; +}; +#endif + class Mutex { // Internal implementation details. See `MutexGuarded`. + struct Waiter; + public: Mutex(); ~Mutex(); @@ -58,22 +136,45 @@ class Mutex { SHARED }; - void lock(Exclusivity exclusivity); - void unlock(Exclusivity exclusivity); + bool lock(Exclusivity exclusivity, Maybe timeout, LockSourceLocationArg location); + void unlock(Exclusivity exclusivity, Waiter* waiterToSkip = nullptr); - void assertLockedByCaller(Exclusivity exclusivity); + void assertLockedByCaller(Exclusivity exclusivity) const; // In debug mode, assert that the mutex is locked by the calling thread, or if that is // non-trivial, assert that the mutex is locked (which should be good enough to catch problems // in unit tests). In non-debug builds, do nothing. -#if KJ_USE_FUTEX // TODO(someday): Implement on pthread & win32 class Predicate { public: virtual bool check() = 0; }; - void lockWhen(Predicate& predicate); - // Lock (exclusively) when predicate.check() returns true. + void wait(Predicate& predicate, Maybe timeout, LockSourceLocationArg location); + // If predicate.check() returns false, unlock the mutex until predicate.check() returns true, or + // when the timeout (if any) expires. The mutex is always re-locked when this returns regardless + // of whether the timeout expired, and including if it throws. + // + // Requires that the mutex is already exclusively locked before calling. + + void induceSpuriousWakeupForTest(); + // Utility method for mutex-test.c++ which causes a spurious thread wakeup on all threads that + // are waiting for a wait() condition. Assuming correct implementation, all those threads + // should immediately go back to sleep. + +#if KJ_USE_FUTEX + uint numReadersWaitingForTest() const; + // The number of reader locks that are currently blocked on this lock (must be called while + // holding the writer lock). This is really only a utility method for mutex-test.c++ so it can + // validate certain invariants. +#endif + +#if KJ_SAVE_ACQUIRED_LOCK_INFO + using AcquiredMetadata = kj::OneOf; + KJ_DISABLE_TSAN AcquiredMetadata lockedInfo() const; + // Returns metadata about this lock when its held. This method is async signal safe. It must also + // be called in a state where it's guaranteed that the lock state won't be released by another + // thread. In other words this has to be called from the signal handler within the thread that's + // holding the lock. #endif private: @@ -85,21 +186,75 @@ class Mutex { // waiting for a read lock, otherwise it is the count of threads that currently hold a read // lock. +#ifdef KJ_CONTENTION_WARNING_THRESHOLD + bool printContendedReader = false; +#endif + static constexpr uint EXCLUSIVE_HELD = 1u << 31; static constexpr uint EXCLUSIVE_REQUESTED = 1u << 30; static constexpr uint SHARED_COUNT_MASK = EXCLUSIVE_REQUESTED - 1; - struct Waiter; - kj::Maybe waitersHead = nullptr; - kj::Maybe* waitersTail = &waitersHead; - // linked list of waitUntil()s; can only modify under lock - -#elif _WIN32 +#elif _WIN32 || __CYGWIN__ uintptr_t srwLock; // Actually an SRWLOCK, but don't want to #include in header. #else mutable pthread_rwlock_t mutex; #endif + +#if KJ_SAVE_ACQUIRED_LOCK_INFO + pid_t lockedExclusivelyByThread = 0; + SourceLocation lockAcquiredLocation; + + KJ_DISABLE_TSAN void acquiredExclusive(pid_t tid, const SourceLocation& location) noexcept { + lockAcquiredLocation = location; + __atomic_store_n(&lockedExclusivelyByThread, tid, __ATOMIC_RELAXED); + } + + KJ_DISABLE_TSAN void acquiredShared(const SourceLocation& location) noexcept { + lockAcquiredLocation = location; + } + + KJ_DISABLE_TSAN SourceLocation releasingExclusive() noexcept { + auto tmp = lockAcquiredLocation; + lockAcquiredLocation = SourceLocation{}; + lockedExclusivelyByThread = 0; + return tmp; + } +#else + static constexpr void acquiredExclusive(uint, LockSourceLocationArg) {} + static constexpr void acquiredShared(LockSourceLocationArg) {} + static constexpr NoopSourceLocation releasingExclusive() { return NoopSourceLocation{}; } +#endif + struct Waiter { + kj::Maybe next; + kj::Maybe* prev; + Predicate& predicate; + Maybe> exception; +#if KJ_USE_FUTEX + uint futex; + bool hasTimeout; +#elif _WIN32 || __CYGWIN__ + uintptr_t condvar; + // Actually CONDITION_VARIABLE, but don't want to #include in header. +#else + pthread_cond_t condvar; + + pthread_mutex_t stupidMutex; + // pthread condvars are only compatible with basic pthread mutexes, not rwlocks, for no + // particularly good reason. To work around this, we need an extra mutex per condvar. +#endif + }; + + kj::Maybe waitersHead = nullptr; + kj::Maybe* waitersTail = &waitersHead; + // linked list of waitUntil()s; can only modify under lock + + inline void addWaiter(Waiter& waiter); + inline void removeWaiter(Waiter& waiter); + bool checkPredicate(Waiter& waiter); +#if _WIN32 || __CYGWIN__ + void wakeReadyWaiter(Waiter* waiterToSkip); +#endif }; class Once { @@ -120,9 +275,9 @@ class Once { virtual void run() = 0; }; - void runOnce(Initializer& init); + void runOnce(Initializer& init, LockSourceLocationArg location); -#if _WIN32 // TODO(perf): Can we make this inline on win32 somehow? +#if _WIN32 || __CYGWIN__ // TODO(perf): Can we make this inline on win32 somehow? bool isInitialized() noexcept; #else @@ -152,7 +307,7 @@ class Once { INITIALIZED }; -#elif _WIN32 +#elif _WIN32 || __CYGWIN__ uintptr_t initOnce; // Actually an INIT_ONCE, but don't want to #include in header. #else @@ -210,6 +365,32 @@ class Locked { inline operator T*() { return ptr; } inline operator const T*() const { return ptr; } + template + void wait(Cond&& condition, Maybe timeout = nullptr, + LockSourceLocationArg location = {}) { + // Unlocks the lock until `condition(state)` evaluates true (where `state` is type `const T&` + // referencing the object protected by the lock). + + // We can't wait on a shared lock because the internal bookkeeping needed for a wait requires + // the protection of an exclusive lock. + static_assert(!isConst(), "cannot wait() on shared lock"); + + struct PredicateImpl final: public _::Mutex::Predicate { + bool check() override { + return condition(value); + } + + Cond&& condition; + const T& value; + + PredicateImpl(Cond&& condition, const T& value) + : condition(kj::fwd(condition)), value(value) {} + }; + + PredicateImpl impl(kj::fwd(condition), *ptr); + mutex->wait(impl, timeout, location); + } + private: _::Mutex* mutex; T* ptr; @@ -218,6 +399,16 @@ class Locked { template friend class MutexGuarded; + template + friend class ExternalMutexGuarded; + +#if KJ_MUTEX_TEST +public: +#endif + void induceSpuriousWakeupForTest() { mutex->induceSpuriousWakeupForTest(); } + // Utility method for mutex-test.c++ which causes a spurious thread wakeup on all threads that + // are waiting for a when() condition. Assuming correct implementation, all those threads should + // immediately go back to sleep. }; template @@ -239,7 +430,7 @@ class MutexGuarded { explicit MutexGuarded(Params&&... params); // Initialize the mutex-bounded object by passing the given parameters to its constructor. - Locked lockExclusive() const; + Locked lockExclusive(LockSourceLocationArg location = {}) const; // Exclusively locks the object and returns it. The returned `Locked` can be passed by // move, similar to `Own`. // @@ -249,10 +440,20 @@ class MutexGuarded { // be shared between threads, its methods should be const, even though locking it produces a // non-const pointer to the contained object. - Locked lockShared() const; + Locked lockShared(LockSourceLocationArg location = {}) const; // Lock the value for shared access. Multiple shared locks can be taken concurrently, but cannot // be held at the same time as a non-shared lock. + Maybe> lockExclusiveWithTimeout(Duration timeout, + LockSourceLocationArg location = {}) const; + // Attempts to exclusively lock the object. If the timeout elapses before the lock is aquired, + // this returns null. + + Maybe> lockSharedWithTimeout(Duration timeout, + LockSourceLocationArg location = {}) const; + // Attempts to lock the value for shared access. If the timeout elapses before the lock is aquired, + // this returns null. + inline const T& getWithoutLock() const { return value; } inline T& getWithoutLock() { return value; } // Escape hatch for cases where some external factor guarantees that it's safe to get the @@ -263,38 +464,31 @@ class MutexGuarded { inline T& getAlreadyLockedExclusive() const; // Like `getWithoutLock()`, but asserts that the lock is already held by the calling thread. -#if KJ_USE_FUTEX // TODO(someday): Implement on pthread & win32 template - auto when(Cond&& condition, Func&& callback) const -> decltype(callback(instance())) { + auto when(Cond&& condition, Func&& callback, Maybe timeout = nullptr, + LockSourceLocationArg location = {}) const + -> decltype(callback(instance())) { // Waits until condition(state) returns true, then calls callback(state) under lock. // // `condition`, when called, receives as its parameter a const reference to the state, which is - // locked (either shared or exclusive). `callback` returns a mutable reference, which is + // locked (either shared or exclusive). `callback` receives a mutable reference, which is // exclusively locked. // // `condition()` may be called multiple times, from multiple threads, while waiting for the // condition to become true. It may even return true once, but then be called more times. // It is guaranteed, though, that at the time `callback()` is finally called, `condition()` // would currently return true (assuming it is a pure function of the guarded data). + // + // If `timeout` is specified, then after the given amount of time, the callback will be called + // regardless of whether the condition is true. In this case, when `callback()` is called, + // `condition()` may in fact evaluate false, but *only* if the timeout was reached. + // + // TODO(cleanup): lock->wait() is a better interface. Can we deprecate this one? - struct PredicateImpl final: public _::Mutex::Predicate { - bool check() override { - return condition(value); - } - - Cond&& condition; - const T& value; - - PredicateImpl(Cond&& condition, const T& value) - : condition(kj::fwd(condition)), value(value) {} - }; - - PredicateImpl impl(kj::fwd(condition), value); - mutex.lockWhen(impl); - KJ_DEFER(mutex.unlock(_::Mutex::EXCLUSIVE)); + auto lock = lockExclusive(); + lock.wait(kj::fwd(condition), timeout, location); return callback(value); } -#endif private: mutable _::Mutex mutex; @@ -308,15 +502,115 @@ class MutexGuarded { static_assert(sizeof(T) < 0, "MutexGuarded's type cannot be const."); }; +template +class ExternalMutexGuarded { + // Holds a value that can only be manipulated while some other mutex is locked. + // + // The ExternalMutexGuarded lives *outside* the scope of any lock on the mutex, but ensures + // that the value it holds can only be accessed under lock by forcing the caller to present a + // lock before accessing the value. + // + // Additionally, ExternalMutexGuarded's destructor will take an exclusive lock on the mutex + // while destroying the held value, unless the value has been release()ed before hand. + // + // The type T must have the following properties (which probably all movable types satisfy): + // - T is movable. + // - Immediately after any of the following has happened, T's destructor is effectively a no-op + // (hence certainly not requiring locks): + // - The value has been default-constructed. + // - The value has been initialized by-move from a default-constructed T. + // - The value has been moved away. + // - If ExternalMutexGuarded is ever moved, then T must have a move constructor and move + // assignment operator that do not follow any pointers, therefore do not need to take a lock. + // + // Inherits from LockSourceLocation to perform an empty base class optimization when lock tracking + // is compiled out. Once the minimum C++ standard for the KJ library is C++20, this optimization + // could be replaced by a member variable with a [[no_unique_address]] annotation. +public: + ExternalMutexGuarded(LockSourceLocationArg location = {}) + : location(location) {} + + template + ExternalMutexGuarded(Locked lock, Params&&... params, LockSourceLocationArg location = {}) + : mutex(lock.mutex), + value(kj::fwd(params)...), + location(location) {} + // Construct the value in-place. This constructor requires passing ownership of the lock into + // the constructor. Normally this should be a lock that you take on the line calling the + // constructor, like: + // + // ExternalMutexGuarded foo(someMutexGuarded.lockExclusive()); + // + // The reason this constructor does not accept an lvalue reference to an existing lock is because + // this would be deadlock-prone: If an exception were thrown immediately after the constructor + // completed, then the destructor would deadlock, because the lock would still be held. An + // ExternalMutexGuarded must live outside the scope of any locks to avoid such a deadlock. + + ~ExternalMutexGuarded() noexcept(false) { + if (mutex != nullptr) { + mutex->lock(_::Mutex::EXCLUSIVE, nullptr, location); + KJ_DEFER(mutex->unlock(_::Mutex::EXCLUSIVE)); + value = T(); + } + } + + ExternalMutexGuarded(ExternalMutexGuarded&& other) + : mutex(other.mutex), value(kj::mv(other.value)), location(other.location) { + other.mutex = nullptr; + } + ExternalMutexGuarded& operator=(ExternalMutexGuarded&& other) { + mutex = other.mutex; + value = kj::mv(other.value); + location = other.location; + other.mutex = nullptr; + return *this; + } + + template + void set(Locked& lock, T&& newValue) { + KJ_IREQUIRE(mutex == nullptr); + mutex = lock.mutex; + value = kj::mv(newValue); + } + + template + T& get(Locked& lock) { + KJ_IREQUIRE(lock.mutex == mutex); + return value; + } + + template + const T& get(Locked& lock) const { + KJ_IREQUIRE(lock.mutex == mutex); + return value; + } + + template + T release(Locked& lock) { + // Release (move away) the value. This allows the destructor to skip locking the mutex. + KJ_IREQUIRE(lock.mutex == mutex); + T result = kj::mv(value); + mutex = nullptr; + return result; + } + +private: + _::Mutex* mutex = nullptr; + T value; + KJ_NO_UNIQUE_ADDRESS LockSourceLocation location; + // When built against C++20 (or clang >= 9.0), the overhead of this is elided. Otherwise this + // struct will be 1 byte larger than it would otherwise be. +}; + template class Lazy { // A lazily-initialized value. public: template - T& get(Func&& init); + T& get(Func&& init, LockSourceLocationArg location = {}); template - const T& get(Func&& init) const; + const T& get(Func&& init, LockSourceLocationArg location = {}) const; // The first thread to call get() will invoke the given init function to construct the value. // Other threads will block until construction completes, then return the same value. // @@ -343,17 +637,38 @@ inline MutexGuarded::MutexGuarded(Params&&... params) : value(kj::fwd(params)...) {} template -inline Locked MutexGuarded::lockExclusive() const { - mutex.lock(_::Mutex::EXCLUSIVE); +inline Locked MutexGuarded::lockExclusive(LockSourceLocationArg location) + const { + mutex.lock(_::Mutex::EXCLUSIVE, nullptr, location); return Locked(mutex, value); } template -inline Locked MutexGuarded::lockShared() const { - mutex.lock(_::Mutex::SHARED); +inline Locked MutexGuarded::lockShared(LockSourceLocationArg location) const { + mutex.lock(_::Mutex::SHARED, nullptr, location); return Locked(mutex, value); } +template +inline Maybe> MutexGuarded::lockExclusiveWithTimeout(Duration timeout, + LockSourceLocationArg location) const { + if (mutex.lock(_::Mutex::EXCLUSIVE, timeout, location)) { + return Locked(mutex, value); + } else { + return nullptr; + } +} + +template +inline Maybe> MutexGuarded::lockSharedWithTimeout(Duration timeout, + LockSourceLocationArg location) const { + if (mutex.lock(_::Mutex::SHARED, timeout, location)) { + return Locked(mutex, value); + } else { + return nullptr; + } +} + template inline const T& MutexGuarded::getAlreadyLockedShared() const { #ifdef KJ_DEBUG @@ -393,22 +708,71 @@ class Lazy::InitImpl: public _::Once::Initializer { template template -inline T& Lazy::get(Func&& init) { +inline T& Lazy::get(Func&& init, LockSourceLocationArg location) { if (!once.isInitialized()) { InitImpl initImpl(*this, kj::fwd(init)); - once.runOnce(initImpl); + once.runOnce(initImpl, location); } return *value; } template template -inline const T& Lazy::get(Func&& init) const { +inline const T& Lazy::get(Func&& init, LockSourceLocationArg location) const { if (!once.isInitialized()) { InitImpl initImpl(*this, kj::fwd(init)); - once.runOnce(initImpl); + once.runOnce(initImpl, location); } return *value; } +#if KJ_TRACK_LOCK_BLOCKING +struct BlockedOnMutexAcquisition { + const _::Mutex& mutex; + // The mutex we are blocked on. + + const SourceLocation& origin; + // Where did the blocking operation originate from. +}; + +struct BlockedOnCondVarWait { + const _::Mutex& mutex; + // The mutex the condition variable is using (may or may not be locked). + + const void* waiter; + // Pointer to the waiter that's being waited on. + + const SourceLocation& origin; + // Where did the blocking operation originate from. +}; + +struct BlockedOnOnceInit { + const _::Once& once; + + const SourceLocation& origin; + // Where did the blocking operation originate from. +}; + +using BlockedOnReason = OneOf; + +Maybe blockedReason() noexcept; +// Returns the information about the reason the current thread is blocked synchronously on KJ +// lock primitives. Returns nullptr if the current thread is not currently blocked on such +// primitves. This is intended to be called from a signal handler to check whether the current +// thread is blocked. Outside of a signal handler there is little value to this function. In those +// cases by definition the thread is not blocked. This includes the callable used as part of a +// condition variable since that happens after the lock is acquired & the current thread is no +// longer blocked). The utility could be made useful for non-signal handler use-cases by being able +// to fetch the pointer to the TLS variable directly (i.e. const BlockedOnReason&*). However, there +// would have to be additional changes/complexity to try make that work since you'd need +// synchronization to ensure that the memory you'd try to reference is still valid. The likely +// solution would be to make these mutually exclusive options where you can use either the fast +// async-safe option, or a mutex-guarded TLS variable you can get a reference to that isn't +// async-safe. That being said, maybe someone can come up with a way to make something that works +// in both use-cases which would of course be more preferable. +#endif + + } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/one-of-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/one-of-test.c++ index f49cef059c3..7c74ca7e858 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/one-of-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/one-of-test.c++ @@ -71,10 +71,10 @@ TEST(OneOf, Basic) { EXPECT_EQ("foo", var.get()); EXPECT_EQ("", var2.get()); - if (false) { + auto canCompile KJ_UNUSED = [&]() { var.allHandled<3>(); // var.allHandled<2>(); // doesn't compile - } + }; } TEST(OneOf, Copy) { @@ -170,4 +170,44 @@ TEST(OneOf, Maybe) { } } +KJ_TEST("OneOf copy/move from alternative variants") { + { + // Test const copy. + const OneOf src = 23.5f; + OneOf dst = src; + KJ_ASSERT(dst.is()); + KJ_EXPECT(dst.get() == 23.5); + } + + { + // Test case that requires non-const copy. + int arr[3] = {1, 2, 3}; + OneOf> src = ArrayPtr(arr); + OneOf> dst = src; + KJ_ASSERT(dst.is>()); + KJ_EXPECT(dst.get>().begin() == arr); + KJ_EXPECT(dst.get>().size() == kj::size(arr)); + } + + { + // Test move. + OneOf src = kj::str("foo"); + OneOf dst = kj::mv(src); + KJ_ASSERT(dst.is()); + KJ_EXPECT(dst.get() == "foo"); + + String s = kj::mv(dst).get(); + KJ_EXPECT(s == "foo"); + } + + { + // We can still have nested OneOfs. + OneOf src = 23.5f; + OneOf> dst = src; + KJ_ASSERT((dst.is>())); + KJ_ASSERT((dst.get>().is())); + KJ_EXPECT((dst.get>().get() == 23.5)); + } +} + } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/one-of.h b/libs/EXTERNAL/capnproto/c++/src/kj/one-of.h index 1a90208838c..cbed3916b87 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/one-of.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/one-of.h @@ -21,20 +21,40 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "common.h" +KJ_BEGIN_HEADER + namespace kj { namespace _ { // private -template -struct TypeIndex_ { static constexpr uint value = TypeIndex_::value; }; -template -struct TypeIndex_ { static constexpr uint value = i; }; +template class Fail, typename Key, typename... Variants> +struct TypeIndex_; +template class Fail, typename Key, typename First, typename... Rest> +struct TypeIndex_ { + static constexpr uint value = TypeIndex_::value; +}; +template class Fail, typename Key, typename... Rest> +struct TypeIndex_ { static constexpr uint value = i; }; +template class Fail, typename Key> +struct TypeIndex_: public Fail {}; + +template +struct OneOfFailError_ { + static_assert(i == -1, "type does not match any in OneOf"); +}; +template +struct OneOfFailZero_ { + static constexpr int value = 0; +}; + +template +struct SuccessIfNotZero { + typedef int Success; +}; +template <> +struct SuccessIfNotZero<0> {}; enum class Variants0 {}; enum class Variants1 { _variant0 }; @@ -46,6 +66,38 @@ enum class Variants6 { _variant0, _variant1, _variant2, _variant3, _variant4, _v enum class Variants7 { _variant0, _variant1, _variant2, _variant3, _variant4, _variant5, _variant6 }; enum class Variants8 { _variant0, _variant1, _variant2, _variant3, _variant4, _variant5, _variant6, _variant7 }; +enum class Variants9 { _variant0, _variant1, _variant2, _variant3, _variant4, _variant5, _variant6, + _variant7, _variant8 }; +enum class Variants10 { _variant0, _variant1, _variant2, _variant3, _variant4, _variant5, _variant6, + _variant7, _variant8, _variant9 }; +enum class Variants11 { _variant0, _variant1, _variant2, _variant3, _variant4, _variant5, _variant6, + _variant7, _variant8, _variant9, _variant10 }; +enum class Variants12 { _variant0, _variant1, _variant2, _variant3, _variant4, _variant5, _variant6, + _variant7, _variant8, _variant9, _variant10, _variant11 }; +enum class Variants13 { _variant0, _variant1, _variant2, _variant3, _variant4, _variant5, _variant6, + _variant7, _variant8, _variant9, _variant10, _variant11, _variant12 }; +enum class Variants14 { _variant0, _variant1, _variant2, _variant3, _variant4, _variant5, _variant6, + _variant7, _variant8, _variant9, _variant10, _variant11, _variant12, + _variant13 }; +enum class Variants15 { _variant0, _variant1, _variant2, _variant3, _variant4, _variant5, _variant6, + _variant7, _variant8, _variant9, _variant10, _variant11, _variant12, + _variant13, _variant14 }; +enum class Variants16 { _variant0, _variant1, _variant2, _variant3, _variant4, _variant5, _variant6, + _variant7, _variant8, _variant9, _variant10, _variant11, _variant12, + _variant13, _variant14, _variant15 }; +enum class Variants17 { _variant0, _variant1, _variant2, _variant3, _variant4, _variant5, _variant6, + _variant7, _variant8, _variant9, _variant10, _variant11, _variant12, + _variant13, _variant14, _variant15, _variant16 }; +enum class Variants18 { _variant0, _variant1, _variant2, _variant3, _variant4, _variant5, _variant6, + _variant7, _variant8, _variant9, _variant10, _variant11, _variant12, + _variant13, _variant14, _variant15, _variant16, _variant17 }; +enum class Variants19 { _variant0, _variant1, _variant2, _variant3, _variant4, _variant5, _variant6, + _variant7, _variant8, _variant9, _variant10, _variant11, _variant12, + _variant13, _variant14, _variant15, _variant16, _variant17, _variant18 }; +enum class Variants20 { _variant0, _variant1, _variant2, _variant3, _variant4, _variant5, _variant6, + _variant7, _variant8, _variant9, _variant10, _variant11, _variant12, + _variant13, _variant14, _variant15, _variant16, _variant17, _variant18, + _variant19 }; template struct Variants_; template <> struct Variants_<0> { typedef Variants0 Type; }; @@ -57,6 +109,18 @@ template <> struct Variants_<5> { typedef Variants5 Type; }; template <> struct Variants_<6> { typedef Variants6 Type; }; template <> struct Variants_<7> { typedef Variants7 Type; }; template <> struct Variants_<8> { typedef Variants8 Type; }; +template <> struct Variants_<9> { typedef Variants9 Type; }; +template <> struct Variants_<10> { typedef Variants10 Type; }; +template <> struct Variants_<11> { typedef Variants11 Type; }; +template <> struct Variants_<12> { typedef Variants12 Type; }; +template <> struct Variants_<13> { typedef Variants13 Type; }; +template <> struct Variants_<14> { typedef Variants14 Type; }; +template <> struct Variants_<15> { typedef Variants15 Type; }; +template <> struct Variants_<16> { typedef Variants16 Type; }; +template <> struct Variants_<17> { typedef Variants17 Type; }; +template <> struct Variants_<18> { typedef Variants18 Type; }; +template <> struct Variants_<19> { typedef Variants19 Type; }; +template <> struct Variants_<20> { typedef Variants20 Type; }; template using Variants = typename Variants_::Type; @@ -66,18 +130,43 @@ using Variants = typename Variants_::Type; template class OneOf { template - static inline constexpr uint typeIndex() { return _::TypeIndex_<1, Key, Variants...>::value; } - // Get the 1-based index of Key within the type list Types. + static inline constexpr uint typeIndex() { + return _::TypeIndex_<1, _::OneOfFailError_, Key, Variants...>::value; + } + // Get the 1-based index of Key within the type list Types, or static_assert with a nice error. + + template + static inline constexpr uint typeIndexOrZero() { + return _::TypeIndex_<1, _::OneOfFailZero_, Key, Variants...>::value; + } + + template + struct HasAll; + // Has a member type called "Success" if and only if all of `OtherVariants` are types that + // appear in `Variants`. Used with SFINAE to enable subset constructors. public: inline OneOf(): tag(0) {} + OneOf(const OneOf& other) { copyFrom(other); } OneOf(OneOf& other) { copyFrom(other); } OneOf(OneOf&& other) { moveFrom(other); } - template + // Copy/move from same OneOf type. + + template ::Success> + OneOf(const OneOf& other) { copyFromSubset(other); } + template ::Success> + OneOf(OneOf& other) { copyFromSubset(other); } + template ::Success> + OneOf(OneOf&& other) { moveFromSubset(other); } + // Copy/move from OneOf that contains a subset of the types we do. + + template >::Success> OneOf(T&& other): tag(typeIndex>()) { ctor(*reinterpret_cast*>(space), kj::fwd(other)); } + // Copy/move from a value that matches one of the individual types in the OneOf. + ~OneOf() { destroy(); } OneOf& operator=(const OneOf& other) { if (tag != 0) destroy(); copyFrom(other); return *this; } @@ -92,15 +181,25 @@ class OneOf { } template - T& get() { + T& get() & { KJ_IREQUIRE(is(), "Must check OneOf::is() before calling get()."); return *reinterpret_cast(space); } template - const T& get() const { + T&& get() && { + KJ_IREQUIRE(is(), "Must check OneOf::is() before calling get()."); + return kj::mv(*reinterpret_cast(space)); + } + template + const T& get() const& { KJ_IREQUIRE(is(), "Must check OneOf::is() before calling get()."); return *reinterpret_cast(space); } + template + const T&& get() const&& { + KJ_IREQUIRE(is(), "Must check OneOf::is() before calling get()."); + return kj::mv(*reinterpret_cast(space)); + } template T& init(Params&&... params) { @@ -118,6 +217,14 @@ class OneOf { return nullptr; } } + template + Maybe tryGet() const { + if (is()) { + return *reinterpret_cast(space); + } else { + return nullptr; + } + } template KJ_NORETURN(void allHandled()); @@ -222,8 +329,55 @@ class OneOf { tag = other.tag; doAll(moveVariantFrom(other)...); } + + template + inline bool copySubsetVariantFrom(const OneOf& other) { + if (other.template is()) { + tag = typeIndex>(); + ctor(*reinterpret_cast(space), other.template get()); + } + return false; + } + template + void copyFromSubset(const OneOf& other) { + doAll(copySubsetVariantFrom(other)...); + } + + template + inline bool copySubsetVariantFrom(OneOf& other) { + if (other.template is()) { + tag = typeIndex>(); + ctor(*reinterpret_cast(space), other.template get()); + } + return false; + } + template + void copyFromSubset(OneOf& other) { + doAll(copySubsetVariantFrom(other)...); + } + + template + inline bool moveSubsetVariantFrom(OneOf& other) { + if (other.template is()) { + tag = typeIndex>(); + ctor(*reinterpret_cast(space), kj::mv(other.template get())); + } + return false; + } + template + void moveFromSubset(OneOf& other) { + doAll(moveSubsetVariantFrom(other)...); + } }; +template +template +struct OneOf::HasAll + : public HasAll(), Rest...> {}; +template +template +struct OneOf::HasAll: public _::SuccessIfNotZero {}; + template template void OneOf::allHandled() { @@ -244,11 +398,23 @@ void OneOf::allHandled() { auto _kj_switch_subject = (value)._switchSubject(); \ switch (_kj_switch_subject->which()) #endif +#if !_MSC_VER || defined(__clang__) #define KJ_CASE_ONEOF(name, ...) \ break; \ - case ::kj::Decay::tagFor<__VA_ARGS__>(): \ - for (auto& name = _kj_switch_subject->get<__VA_ARGS__>(), *_kj_switch_done = &name; \ + case ::kj::Decay::template tagFor<__VA_ARGS__>(): \ + for (auto& name = _kj_switch_subject->template get<__VA_ARGS__>(), *_kj_switch_done = &name; \ _kj_switch_done; _kj_switch_done = nullptr) +#else +// TODO(msvc): The latest MSVC which ships with VS2019 now ICEs on the implementation above. It +// appears we can hack around the problem by moving the `->template get<>()` syntax to an outer +// `if`. (This unfortunately allows wonky syntax like `KJ_CASE_ONEOF(a, B) { } else { }`.) +// https://developercommunity.visualstudio.com/content/problem/1143733/internal-compiler-error-on-v1670.html +#define KJ_CASE_ONEOF(name, ...) \ + break; \ + case ::kj::Decay::template tagFor<__VA_ARGS__>(): \ + if (auto* _kj_switch_done = &_kj_switch_subject->template get<__VA_ARGS__>()) \ + for (auto& name = *_kj_switch_done; _kj_switch_done; _kj_switch_done = nullptr) +#endif #define KJ_CASE_ONEOF_DEFAULT break; default: // Allows switching over a OneOf. // @@ -283,3 +449,5 @@ void OneOf::allHandled() { // looping, but it's defined as a pointer since that's all we can define in this context. } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/parse/char.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/parse/char.c++ index 1c6c77d5e31..4db99142fa5 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/parse/char.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/parse/char.c++ @@ -61,7 +61,8 @@ double ParseFloat::operator()(const Array& digits, *pos++ = '\0'; KJ_DASSERT(pos == buf.end()); - return strtod(buf.begin(), nullptr); + // The above construction should always produce a valid double, so this should never throw... + return StringPtr(buf.begin(), bufSize).parseAs(); } } // namespace _ (private) diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/parse/char.h b/libs/EXTERNAL/capnproto/c++/src/kj/parse/char.h index 959357dde5c..74e4e6c82b8 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/parse/char.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/parse/char.h @@ -24,14 +24,12 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "common.h" #include "../string.h" #include +KJ_BEGIN_HEADER + namespace kj { namespace parse { @@ -158,7 +156,7 @@ constexpr inline CharGroup_ charRange(char first, char last) { return CharGroup_().orRange(first, last); } -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) #define anyOfChars(chars) CharGroup_().orAny(chars) // TODO(msvc): MSVC ICEs on the proper definition of `anyOfChars()`, which in turn prevents us from // building the compiler or schema parser. We don't know why this happens, but Harris found that @@ -364,3 +362,5 @@ constexpr auto doubleQuotedHexBinary = sequence( } // namespace parse } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/parse/common.h b/libs/EXTERNAL/capnproto/c++/src/kj/parse/common.h index 2388449d1a5..6d2653f907d 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/parse/common.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/parse/common.h @@ -35,19 +35,22 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "../common.h" #include "../memory.h" #include "../array.h" #include "../tuple.h" #include "../vector.h" -#if _MSC_VER && !__clang__ + +#if _MSC_VER && _MSC_VER < 1920 && !__clang__ +#define KJ_MSVC_BROKEN_DECLTYPE 1 +#endif + +#if KJ_MSVC_BROKEN_DECLTYPE #include // result_of_t #endif +KJ_BEGIN_HEADER + namespace kj { namespace parse { @@ -103,10 +106,9 @@ template struct OutputType_; template struct OutputType_> { typedef T Type; }; template using OutputType = typename OutputType_< -#if _MSC_VER && !__clang__ +#if KJ_MSVC_BROKEN_DECLTYPE std::result_of_t - // The instance() based version below results in: - // C2064: term does not evaluate to a function taking 1 arguments + // The instance() based version below results in many compiler errors on MSVC2017. #else decltype(instance()(instance())) #endif @@ -819,3 +821,5 @@ constexpr EndOfInput_ endOfInput = EndOfInput_(); } // namespace parse } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/refcount.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/refcount.c++ index 794a595465b..33de86fb72f 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/refcount.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/refcount.c++ @@ -22,7 +22,7 @@ #include "refcount.h" #include "debug.h" -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) // Annoyingly, MSVC only implements the C++ atomic libs, not the C libs, so the only useful // thing we can get from seems to be atomic_thread_fence... but that one function is // indeed not implemented by the intrinsics, so... @@ -52,7 +52,7 @@ AtomicRefcounted::~AtomicRefcounted() noexcept(false) { } void AtomicRefcounted::disposeImpl(void* pointer) const { -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) if (KJ_MSVC_INTERLOCKED(Decrement, rel)(&refcount) == 0) { std::atomic_thread_fence(std::memory_order_acquire); delete this; @@ -66,7 +66,7 @@ void AtomicRefcounted::disposeImpl(void* pointer) const { } bool AtomicRefcounted::addRefWeakInternal() const { -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) long orig = refcount; for (;;) { diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/refcount.h b/libs/EXTERNAL/capnproto/c++/src/kj/refcount.h index 9ac94a5dc46..51fd6dc79b8 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/refcount.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/refcount.h @@ -23,10 +23,6 @@ #include "memory.h" -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #if _MSC_VER #if _MSC_VER < 1910 #include @@ -35,6 +31,8 @@ #endif #endif +KJ_BEGIN_HEADER + namespace kj { // ======================================================================================= @@ -119,7 +117,7 @@ Own Refcounted::addRefInternal(T* object) { // // Warning: Atomic ops are SLOW. -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) #if _M_ARM #define KJ_MSVC_INTERLOCKED(OP, MEM) _Interlocked##OP##_##MEM #else @@ -134,7 +132,7 @@ class AtomicRefcounted: private kj::Disposer { KJ_DISALLOW_COPY(AtomicRefcounted); inline bool isShared() const { -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) return KJ_MSVC_INTERLOCKED(Or, acq)(&refcount, 0) > 1; #else return __atomic_load_n(&refcount, __ATOMIC_ACQUIRE) > 1; @@ -142,7 +140,7 @@ class AtomicRefcounted: private kj::Disposer { } private: -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) mutable volatile long refcount = 0; #else mutable volatile uint refcount = 0; @@ -173,13 +171,15 @@ inline kj::Own atomicRefcounted(Params&&... params) { template kj::Own atomicAddRef(T& object) { - KJ_IREQUIRE(object.AtomicRefcounted::refcount > 0, "Object not allocated with kj::refcounted()."); + KJ_IREQUIRE(object.AtomicRefcounted::refcount > 0, + "Object not allocated with kj::atomicRefcounted()."); return AtomicRefcounted::addRefInternal(&object); } template kj::Own atomicAddRef(const T& object) { - KJ_IREQUIRE(object.AtomicRefcounted::refcount > 0, "Object not allocated with kj::refcounted()."); + KJ_IREQUIRE(object.AtomicRefcounted::refcount > 0, + "Object not allocated with kj::atomicRefcounted()."); return AtomicRefcounted::addRefInternal(&object); } @@ -203,7 +203,7 @@ kj::Maybe> atomicAddRefWeak(const T& object) { template kj::Own AtomicRefcounted::addRefInternal(T* object) { AtomicRefcounted* refcounted = object; -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) KJ_MSVC_INTERLOCKED(Increment, nf)(&refcounted->refcount); #else __atomic_add_fetch(&refcounted->refcount, 1, __ATOMIC_RELAXED); @@ -214,7 +214,7 @@ kj::Own AtomicRefcounted::addRefInternal(T* object) { template kj::Own AtomicRefcounted::addRefInternal(const T* object) { const AtomicRefcounted* refcounted = object; -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) KJ_MSVC_INTERLOCKED(Increment, nf)(&refcounted->refcount); #else __atomic_add_fetch(&refcounted->refcount, 1, __ATOMIC_RELAXED); @@ -223,3 +223,5 @@ kj::Own AtomicRefcounted::addRefInternal(const T* object) { } } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/source-location.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/source-location.c++ new file mode 100644 index 00000000000..fda19323e35 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/kj/source-location.c++ @@ -0,0 +1,28 @@ +// Copyright (c) 2021 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#include "source-location.h" + +namespace kj { +kj::String KJ_STRINGIFY(const SourceLocation& l) { + return kj::str(l.fileName, ":", l.lineNumber, ":", l.columnNumber, " in ", l.function); +} +} // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/source-location.h b/libs/EXTERNAL/capnproto/c++/src/kj/source-location.h new file mode 100644 index 00000000000..ebcd4d3f120 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/kj/source-location.h @@ -0,0 +1,107 @@ +// Copyright (c) 2021 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#pragma once + +#include "string.h" + +// GCC does not implement __builtin_COLUMN() as that's non-standard but MSVC & clang do. +// MSVC does as of version https://github.com/microsoft/STL/issues/54) but there's currently not any +// pressing need for this for MSVC & writing the write compiler version check is annoying. +// Checking for clang version is problematic due to the way that XCode lies about __clang_major__. +// Instead we use __has_builtin as the feature check to check clang. +// Context: https://github.com/capnproto/capnproto/issues/1305 +#ifdef __has_builtin +#if __has_builtin(__builtin_COLUMN) +#define KJ_CALLER_COLUMN() __builtin_COLUMN() +#else +#define KJ_CALLER_COLUMN() 0 +#endif +#else +#define KJ_CALLER_COLUMN() 0 +#endif + +#if __cplusplus > 201703L +#define KJ_COMPILER_SUPPORTS_SOURCE_LOCATION 1 +#elif defined(__has_builtin) +// Clang 9 added these builtins: https://releases.llvm.org/9.0.0/tools/clang/docs/LanguageExtensions.html +// Use __has_builtin as the way to detect this because __clang_major__ is unreliable (see above +// about issue with Xcode-provided clang). +#define KJ_COMPILER_SUPPORTS_SOURCE_LOCATION ( \ + __has_builtin(__builtin_FILE) && \ + __has_builtin(__builtin_LINE) && \ + __has_builtin(__builtin_FUNCTION) \ + ) +#elif __GNUC__ >= 5 +// GCC 5 supports the required builtins: https://gcc.gnu.org/onlinedocs/gcc-5.1.0/gcc/Other-Builtins.html +#define KJ_COMPILER_SUPPORTS_SOURCE_LOCATION 1 +#endif + +namespace kj { +class SourceLocation { + // libc++ doesn't seem to implement (or even ), so + // this is a non-STL wrapper over the compiler primitives (these are the same across MSVC/clang/ + // gcc). Additionally this uses kj::StringPtr for holding the strings instead of const char* which + // makes it integrate a little more nicely into KJ. + + struct Badge { explicit constexpr Badge() = default; }; + // Neat little trick to make sure we can never call SourceLocation with explicit arguments. +public: +#if !KJ_COMPILER_SUPPORTS_SOURCE_LOCATION + constexpr SourceLocation() : fileName("??"), function("??"), lineNumber(0), columnNumber(0) {} + // Constructs a dummy source location that's not pointing at anything. +#else + constexpr SourceLocation(Badge = Badge{}, const char* file = __builtin_FILE(), + const char* func = __builtin_FUNCTION(), uint line = __builtin_LINE(), + uint column = KJ_CALLER_COLUMN()) + : fileName(file), function(func), lineNumber(line), columnNumber(column) + {} +#endif + +#if KJ_COMPILER_SUPPORTS_SOURCE_LOCATION + // This can only be exposed if we actually generate valid SourceLocation objects as otherwise all + // SourceLocation objects would confusingly (and likely problematically) be equated equal. + constexpr bool operator==(const SourceLocation& o) const { + // Pointer equality is fine here based on how SourceLocation operates & how compilers will + // intern all duplicate string constants. + return fileName == o.fileName && function == o.function && lineNumber == o.lineNumber && + columnNumber == o.columnNumber; + } +#endif + + const char* fileName; + const char* function; + uint lineNumber; + uint columnNumber; +}; + +kj::String KJ_STRINGIFY(const SourceLocation& l); + +class NoopSourceLocation { + // This is used in places where we want to conditionally compile out tracking the source location. + // As such it intentionally lacks all the features but the default constructor so that the API + // isn't accidentally used in the wrong compilation context. +}; + +KJ_UNUSED static kj::String KJ_STRINGIFY(const NoopSourceLocation& l) { + return kj::String(); +} +} // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/std/iostream.h b/libs/EXTERNAL/capnproto/c++/src/kj/std/iostream.h index 5ac0549b08f..f909caff889 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/std/iostream.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/std/iostream.h @@ -25,13 +25,11 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "../io.h" #include +KJ_BEGIN_HEADER + namespace kj { namespace std { @@ -83,3 +81,5 @@ class StdInputStream: public kj::InputStream { } // namespace std } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/string-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/string-test.c++ index d11f88b9b4f..b461a4bb3fc 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/string-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/string-test.c++ @@ -23,6 +23,7 @@ #include #include #include "vector.h" +#include namespace kj { namespace _ { // private @@ -231,6 +232,66 @@ KJ_TEST("parsing 'nan' returns canonical NaN value") { } } +KJ_TEST("stringify array-of-array") { + int arr1[] = {1, 23}; + int arr2[] = {456, 7890}; + ArrayPtr arr3[] = {arr1, arr2}; + ArrayPtr> array = arr3; + + KJ_EXPECT(str(array) == "1, 23, 456, 7890"); +} + +KJ_TEST("ArrayPtr == StringPtr") { + StringPtr s = "foo"_kj; + ArrayPtr a = s; + + KJ_EXPECT(a == s); +#if __cplusplus >= 202000L + KJ_EXPECT(s == a); +#endif +} + +KJ_TEST("String == String") { + String a = kj::str("foo"); + String b = kj::str("foo"); + String c = kj::str("bar"); + + // We're trying to trigger the -Wambiguous-reversed-operator warning in Clang, but it seems + // magic asserts inadvertently squelch it. So, we use an alternate macro with no magic. +#define KJ_EXPECT_NOMAGIC(cond) \ + if (cond) {} else { KJ_FAIL_ASSERT("expected " #cond); } + + KJ_EXPECT_NOMAGIC(a == a); + KJ_EXPECT_NOMAGIC(a == b); + KJ_EXPECT_NOMAGIC(a != c); +} + +KJ_TEST("float stringification and parsing is not locale-dependent") { + // Remember the old locale, set it back when we're done. + char* oldLocaleCstr = setlocale(LC_NUMERIC, nullptr); + KJ_ASSERT(oldLocaleCstr != nullptr); + auto oldLocale = kj::str(oldLocaleCstr); + KJ_DEFER(setlocale(LC_NUMERIC, oldLocale.cStr())); + + // Set the locale to "C". + KJ_ASSERT(setlocale(LC_NUMERIC, "C") != nullptr); + + KJ_ASSERT(kj::str(1.5) == "1.5"); + KJ_ASSERT(kj::str(1.5f) == "1.5"); + KJ_EXPECT("1.5"_kj.parseAs() == 1.5); + KJ_EXPECT("1.5"_kj.parseAs() == 1.5); + + if (setlocale(LC_NUMERIC, "es_ES") == nullptr && + setlocale(LC_NUMERIC, "es_ES.utf8") == nullptr) { + // Some systems may not have the desired locale available. + KJ_LOG(WARNING, "Couldn't set locale to es_ES. Skipping this test."); + } else { + KJ_EXPECT(kj::str(1.5) == "1.5"); + KJ_EXPECT(kj::str(1.5f) == "1.5"); + KJ_EXPECT("1.5"_kj.parseAs() == 1.5); + KJ_EXPECT("1.5"_kj.parseAs() == 1.5); + } +} } // namespace } // namespace _ (private) } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/string-tree.h b/libs/EXTERNAL/capnproto/c++/src/kj/string-tree.h index a0e0d6530e8..19281bdc188 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/string-tree.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/string-tree.h @@ -21,12 +21,10 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "string.h" +KJ_BEGIN_HEADER + namespace kj { class StringTree { @@ -217,3 +215,5 @@ StringTree strTree(Params&&... params) { } } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/string.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/string.c++ index 827bb888c6f..7dc5fc67ab4 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/string.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/string.c++ @@ -29,7 +29,7 @@ namespace kj { -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) #pragma warning(disable: 4996) // Warns that sprintf() is buffer-overrunny. We know that, it's cool. #endif @@ -76,32 +76,6 @@ T parseInteger(const StringPtr& s) { } } -double parseDouble(const StringPtr& s) { - KJ_REQUIRE(s != nullptr, "String does not contain valid number", s) { return 0; } - char *endPtr; - errno = 0; - auto value = strtod(s.begin(), &endPtr); - KJ_REQUIRE(endPtr == s.end(), "String does not contain valid floating number", s) { return 0; } -#if _WIN32 || __CYGWIN__ || __BIONIC__ - // When Windows' strtod() parses "nan", it returns a value with the sign bit set. But, our - // preferred canonical value for NaN does not have the sign bit set, and all other platforms - // return one without the sign bit set. So, on Windows, detect NaN and return our preferred - // version. - // - // Cygwin seemingly does not try to emulate Linux behavior here, but rather allows Windows' - // behavior to leak through. (Conversely, WINE actually produces the Linux behavior despite - // trying to behave like Win32...) - // - // Bionic (Android) failed the unit test and so I added it to the list without investigating - // further. - if (isNaN(value)) { - // NaN - return kj::nan(); - } -#endif - return value; -} - } // namespace #define PARSE_AS_INTEGER(T) \ @@ -118,8 +92,6 @@ PARSE_AS_INTEGER(unsigned long); PARSE_AS_INTEGER(long long); PARSE_AS_INTEGER(unsigned long long); #undef PARSE_AS_INTEGER -template <> double StringPtr::parseAs() const { return parseDouble(*this); } -template <> float StringPtr::parseAs() const { return parseDouble(*this); } String heapString(size_t size) { char* buffer = _::HeapArrayDisposer::allocate(size + 1); @@ -187,7 +159,11 @@ static CappedArray stringifyImpl(T i) { // We don't use sprintf() because it's not async-signal-safe (for strPreallocated()). CappedArray result; bool negative = i < 0; - Unsigned u = negative ? -i : i; + // Note that if `i` is the most-negative value, negating it produces the same bit value. But + // since it's a signed integer, this is considered an overflow. We therefore must make it + // unsigned first, then negate it, to avoid ubsan complaining. + Unsigned u = i; + if (negative) u = -u; uint8_t reverse[sizeof(T) * 3 + 1]; uint8_t* p = reverse; if (u == 0) { @@ -485,6 +461,76 @@ char* FloatToBuffer(float value, char* buffer) { return buffer; } +// ---------------------------------------------------------------------- +// NoLocaleStrtod() +// This code will make you cry. +// ---------------------------------------------------------------------- + +namespace { + +// Returns a string identical to *input except that the character pointed to +// by radix_pos (which should be '.') is replaced with the locale-specific +// radix character. +kj::String LocalizeRadix(const char* input, const char* radix_pos) { + // Determine the locale-specific radix character by calling sprintf() to + // print the number 1.5, then stripping off the digits. As far as I can + // tell, this is the only portable, thread-safe way to get the C library + // to divuldge the locale's radix character. No, localeconv() is NOT + // thread-safe. + char temp[16]; + int size = sprintf(temp, "%.1f", 1.5); + KJ_ASSERT(temp[0] == '1'); + KJ_ASSERT(temp[size-1] == '5'); + KJ_ASSERT(size <= 6); + + // Now replace the '.' in the input with it. + return kj::str( + kj::arrayPtr(input, radix_pos), + kj::arrayPtr(temp + 1, size - 2), + kj::StringPtr(radix_pos + 1)); +} + +} // namespace + +double NoLocaleStrtod(const char* text, char** original_endptr) { + // We cannot simply set the locale to "C" temporarily with setlocale() + // as this is not thread-safe. Instead, we try to parse in the current + // locale first. If parsing stops at a '.' character, then this is a + // pretty good hint that we're actually in some other locale in which + // '.' is not the radix character. + + char* temp_endptr; + double result = strtod(text, &temp_endptr); + if (original_endptr != NULL) *original_endptr = temp_endptr; + if (*temp_endptr != '.') return result; + + // Parsing halted on a '.'. Perhaps we're in a different locale? Let's + // try to replace the '.' with a locale-specific radix character and + // try again. + kj::String localized = LocalizeRadix(text, temp_endptr); + const char* localized_cstr = localized.cStr(); + char* localized_endptr; + result = strtod(localized_cstr, &localized_endptr); + if ((localized_endptr - localized_cstr) > + (temp_endptr - text)) { + // This attempt got further, so replacing the decimal must have helped. + // Update original_endptr to point at the right location. + if (original_endptr != NULL) { + // size_diff is non-zero if the localized radix has multiple bytes. + int size_diff = localized.size() - strlen(text); + // const_cast is necessary to match the strtod() interface. + *original_endptr = const_cast( + text + (localized_endptr - localized_cstr - size_diff)); + } + } + + return result; +} + +// ---------------------------------------------------------------------- +// End of code copied from Protobuf +// ---------------------------------------------------------------------- + } // namespace CappedArray Stringifier::operator*(float f) const { @@ -499,5 +545,35 @@ CappedArray Stringifier::operator*(double f) const { return result; } +double parseDouble(const StringPtr& s) { + KJ_REQUIRE(s != nullptr, "String does not contain valid number", s) { return 0; } + char *endPtr; + errno = 0; + auto value = _::NoLocaleStrtod(s.begin(), &endPtr); + KJ_REQUIRE(endPtr == s.end(), "String does not contain valid floating number", s) { return 0; } +#if _WIN32 || __CYGWIN__ || __BIONIC__ + // When Windows' strtod() parses "nan", it returns a value with the sign bit set. But, our + // preferred canonical value for NaN does not have the sign bit set, and all other platforms + // return one without the sign bit set. So, on Windows, detect NaN and return our preferred + // version. + // + // Cygwin seemingly does not try to emulate Linux behavior here, but rather allows Windows' + // behavior to leak through. (Conversely, WINE actually produces the Linux behavior despite + // trying to behave like Win32...) + // + // Bionic (Android) failed the unit test and so I added it to the list without investigating + // further. + if (isNaN(value)) { + // NaN + return kj::nan(); + } +#endif + return value; +} + } // namespace _ (private) + +template <> double StringPtr::parseAs() const { return _::parseDouble(*this); } +template <> float StringPtr::parseAs() const { return _::parseDouble(*this); } + } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/string.h b/libs/EXTERNAL/capnproto/c++/src/kj/string.h index 6cde1dbf479..193442aad06 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/string.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/string.h @@ -21,14 +21,12 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include #include "array.h" #include +KJ_BEGIN_HEADER + namespace kj { class StringPtr; class String; @@ -71,16 +69,31 @@ class StringPtr { public: inline StringPtr(): content("", 1) {} inline StringPtr(decltype(nullptr)): content("", 1) {} - inline StringPtr(const char* value): content(value, strlen(value) + 1) {} - inline StringPtr(const char* value, size_t size): content(value, size + 1) { + inline StringPtr(const char* value KJ_LIFETIMEBOUND): content(value, strlen(value) + 1) {} + inline StringPtr(const char* value KJ_LIFETIMEBOUND, size_t size): content(value, size + 1) { KJ_IREQUIRE(value[size] == '\0', "StringPtr must be NUL-terminated."); } - inline StringPtr(const char* begin, const char* end): StringPtr(begin, end - begin) {} - inline StringPtr(const String& value); + inline StringPtr(const char* begin KJ_LIFETIMEBOUND, const char* end KJ_LIFETIMEBOUND): StringPtr(begin, end - begin) {} + inline StringPtr(String&& value KJ_LIFETIMEBOUND) : StringPtr(value) {} + inline StringPtr(const String& value KJ_LIFETIMEBOUND); + StringPtr& operator=(String&& value) = delete; + inline StringPtr& operator=(decltype(nullptr)) { + content = ArrayPtr("", 1); + return *this; + } + +#if __cpp_char8_t + inline StringPtr(const char8_t* value KJ_LIFETIMEBOUND): StringPtr(reinterpret_cast(value)) {} + inline StringPtr(const char8_t* value KJ_LIFETIMEBOUND, size_t size) + : StringPtr(reinterpret_cast(value), size) {} + inline StringPtr(const char8_t* begin KJ_LIFETIMEBOUND, const char8_t* end KJ_LIFETIMEBOUND) + : StringPtr(reinterpret_cast(begin), reinterpret_cast(end)) {} + // KJ strings are and always have been UTF-8, so screw this C++20 char8_t stuff. +#endif #if KJ_COMPILER_SUPPORTS_STL_STRING_INTEROP template ().c_str())> - inline StringPtr(const T& t): StringPtr(t.c_str()) {} + inline StringPtr(const T& t KJ_LIFETIMEBOUND): StringPtr(t.c_str()) {} // Allow implicit conversion from any class that has a c_str() method (namely, std::string). // We use a template trick to detect std::string in order to avoid including the header for // those who don't want it. @@ -105,11 +118,11 @@ class StringPtr { inline char operator[](size_t index) const { return content[index]; } - inline const char* begin() const { return content.begin(); } - inline const char* end() const { return content.end() - 1; } + inline constexpr const char* begin() const { return content.begin(); } + inline constexpr const char* end() const { return content.end() - 1; } - inline bool operator==(decltype(nullptr)) const { return content.size() <= 1; } - inline bool operator!=(decltype(nullptr)) const { return content.size() > 1; } + inline constexpr bool operator==(decltype(nullptr)) const { return content.size() <= 1; } + inline constexpr bool operator!=(decltype(nullptr)) const { return content.size() > 1; } inline bool operator==(const StringPtr& other) const; inline bool operator!=(const StringPtr& other) const { return !(*this == other); } @@ -138,15 +151,18 @@ class StringPtr { // Overflowed floating numbers return inf. private: - inline constexpr StringPtr(ArrayPtr content): content(content) {} + inline explicit constexpr StringPtr(ArrayPtr content): content(content) {} ArrayPtr content; friend constexpr kj::StringPtr (::operator "" _kj)(const char* str, size_t n); + friend class SourceLocation; }; +#if !__cpp_impl_three_way_comparison inline bool operator==(const char* a, const StringPtr& b) { return b == a; } inline bool operator!=(const char* a, const StringPtr& b) { return b != a; } +#endif template <> char StringPtr::parseAs() const; template <> signed char StringPtr::parseAs() const; @@ -181,30 +197,35 @@ class String { inline explicit String(Array buffer); // Does not copy. Requires `buffer` ends with `\0`. - inline operator ArrayPtr(); - inline operator ArrayPtr() const; - inline ArrayPtr asArray(); - inline ArrayPtr asArray() const; - inline ArrayPtr asBytes() { return asArray().asBytes(); } - inline ArrayPtr asBytes() const { return asArray().asBytes(); } + inline operator ArrayPtr() KJ_LIFETIMEBOUND; + inline operator ArrayPtr() const KJ_LIFETIMEBOUND; + inline ArrayPtr asArray() KJ_LIFETIMEBOUND; + inline ArrayPtr asArray() const KJ_LIFETIMEBOUND; + inline ArrayPtr asBytes() KJ_LIFETIMEBOUND { return asArray().asBytes(); } + inline ArrayPtr asBytes() const KJ_LIFETIMEBOUND { return asArray().asBytes(); } // Result does not include NUL terminator. + inline StringPtr asPtr() const KJ_LIFETIMEBOUND { + // Convenience operator to return a StringPtr. + return StringPtr{*this}; + } + inline Array releaseArray() { return kj::mv(content); } // Disowns the backing array (which includes the NUL terminator) and returns it. The String value // is clobbered (as if moved away). - inline const char* cStr() const; + inline const char* cStr() const KJ_LIFETIMEBOUND; inline size_t size() const; // Result does not include NUL terminator. inline char operator[](size_t index) const; - inline char& operator[](size_t index); + inline char& operator[](size_t index) KJ_LIFETIMEBOUND; - inline char* begin(); - inline char* end(); - inline const char* begin() const; - inline const char* end() const; + inline char* begin() KJ_LIFETIMEBOUND; + inline char* end() KJ_LIFETIMEBOUND; + inline const char* begin() const KJ_LIFETIMEBOUND; + inline const char* end() const KJ_LIFETIMEBOUND; inline bool operator==(decltype(nullptr)) const { return content.size() <= 1; } inline bool operator!=(decltype(nullptr)) const { return content.size() > 1; } @@ -216,11 +237,23 @@ class String { inline bool operator<=(const StringPtr& other) const { return StringPtr(*this) <= other; } inline bool operator>=(const StringPtr& other) const { return StringPtr(*this) >= other; } + inline bool operator==(const String& other) const { return StringPtr(*this) == StringPtr(other); } + inline bool operator!=(const String& other) const { return StringPtr(*this) != StringPtr(other); } + inline bool operator< (const String& other) const { return StringPtr(*this) < StringPtr(other); } + inline bool operator> (const String& other) const { return StringPtr(*this) > StringPtr(other); } + inline bool operator<=(const String& other) const { return StringPtr(*this) <= StringPtr(other); } + inline bool operator>=(const String& other) const { return StringPtr(*this) >= StringPtr(other); } + // Note that if we don't overload for `const String&` specifically, then C++20 will decide that + // comparisons between two strings are ambiguous. (Clang turns this into a warning, + // -Wambiguous-reversed-operator, due to the stupidity...) + inline bool startsWith(const StringPtr& other) const { return StringPtr(*this).startsWith(other);} inline bool endsWith(const StringPtr& other) const { return StringPtr(*this).endsWith(other); } - inline StringPtr slice(size_t start) const { return StringPtr(*this).slice(start); } - inline ArrayPtr slice(size_t start, size_t end) const { + inline StringPtr slice(size_t start) const KJ_LIFETIMEBOUND { + return StringPtr(*this).slice(start); + } + inline ArrayPtr slice(size_t start, size_t end) const KJ_LIFETIMEBOUND { return StringPtr(*this).slice(start, end); } @@ -235,8 +268,10 @@ class String { Array content; }; +#if !__cpp_impl_three_way_comparison inline bool operator==(const char* a, const String& b) { return b == a; } inline bool operator!=(const char* a, const String& b) { return b != a; } +#endif String heapString(size_t size); // Allocate a String of the given size on the heap, not including NUL terminator. The NUL @@ -314,9 +349,13 @@ class Delimited; // Delimits a sequence of type T with a string delimiter. Implements kj::delimited(). template -char* fill(char* __restrict__ target, Delimited first, Rest&&... rest); +char* fill(char* __restrict__ target, Delimited&& first, Rest&&... rest); +template +char* fillLimited(char* __restrict__ target, char* limit, Delimited&& first,Rest&&... rest); template -char* fillLimited(char* __restrict__ target, char* limit, Delimited first,Rest&&... rest); +char* fill(char* __restrict__ target, Delimited& first, Rest&&... rest); +template +char* fillLimited(char* __restrict__ target, char* limit, Delimited& first,Rest&&... rest); // As with StringTree, we special-case Delimited. struct Stringifier { @@ -334,14 +373,29 @@ struct Stringifier { inline ArrayPtr operator*(ArrayPtr s) const { return s; } inline ArrayPtr operator*(ArrayPtr s) const { return s; } - inline ArrayPtr operator*(const Array& s) const { return s; } - inline ArrayPtr operator*(const Array& s) const { return s; } + inline ArrayPtr operator*(const Array& s) const KJ_LIFETIMEBOUND { + return s; + } + inline ArrayPtr operator*(const Array& s) const KJ_LIFETIMEBOUND { return s; } template - inline ArrayPtr operator*(const CappedArray& s) const { return s; } + inline ArrayPtr operator*(const CappedArray& s) const KJ_LIFETIMEBOUND { + return s; + } template - inline ArrayPtr operator*(const FixedArray& s) const { return s; } - inline ArrayPtr operator*(const char* s) const { return arrayPtr(s, strlen(s)); } - inline ArrayPtr operator*(const String& s) const { return s.asArray(); } + inline ArrayPtr operator*(const FixedArray& s) const KJ_LIFETIMEBOUND { + return s; + } + inline ArrayPtr operator*(const char* s) const KJ_LIFETIMEBOUND { + return arrayPtr(s, strlen(s)); + } +#if __cpp_char8_t + inline ArrayPtr operator*(const char8_t* s) const KJ_LIFETIMEBOUND { + return operator*(reinterpret_cast(s)); + } +#endif + inline ArrayPtr operator*(const String& s) const KJ_LIFETIMEBOUND { + return s.asArray(); + } inline ArrayPtr operator*(const StringPtr& s) const { return s.asArray(); } inline Range operator*(const Range& r) const { return r; } @@ -370,11 +424,6 @@ struct Stringifier { CappedArray operator*(double f) const; CappedArray operator*(const void* s) const; - template - _::Delimited> operator*(ArrayPtr arr) const; - template - _::Delimited> operator*(const Array& arr) const; - #if KJ_COMPILER_SUPPORTS_STL_STRING_INTEROP // supports expression SFINAE? template ().toString())> inline Result operator*(T&& value) const { return kj::fwd(value).toString(); } @@ -468,25 +517,23 @@ StringPtr strPreallocated(ArrayPtr buffer, Params&&... params) { return StringPtr(buffer.begin(), end); } -namespace _ { // private - -template -inline _::Delimited> Stringifier::operator*(ArrayPtr arr) const { +template ()))> +inline _::Delimited> operator*(const _::Stringifier&, ArrayPtr arr) { return _::Delimited>(arr, ", "); } -template -inline _::Delimited> Stringifier::operator*(const Array& arr) const { +template ()))> +inline _::Delimited> operator*(const _::Stringifier&, const Array& arr) { return _::Delimited>(arr, ", "); } -} // namespace _ (private) - #define KJ_STRINGIFY(...) operator*(::kj::_::Stringifier, __VA_ARGS__) // Defines a stringifier for a custom type. Example: // // class Foo {...}; // inline StringPtr KJ_STRINGIFY(const Foo& foo) { return foo.name(); } +// // or perhaps +// inline String KJ_STRINGIFY(const Foo& foo) { return kj::str(foo.fld1(), ",", foo.fld2()); } // // This allows Foo to be passed to str(). // @@ -673,12 +720,22 @@ class Delimited { }; template -char* fill(char* __restrict__ target, Delimited first, Rest&&... rest) { +char* fill(char* __restrict__ target, Delimited&& first, Rest&&... rest) { + target = first.flattenTo(target); + return fill(target, kj::fwd(rest)...); +} +template +char* fillLimited(char* __restrict__ target, char* limit, Delimited&& first, Rest&&... rest) { + target = first.flattenTo(target, limit); + return fillLimited(target, limit, kj::fwd(rest)...); +} +template +char* fill(char* __restrict__ target, Delimited& first, Rest&&... rest) { target = first.flattenTo(target); return fill(target, kj::fwd(rest)...); } template -char* fillLimited(char* __restrict__ target, char* limit, Delimited first, Rest&&... rest) { +char* fillLimited(char* __restrict__ target, char* limit, Delimited& first, Rest&&... rest) { target = first.flattenTo(target, limit); return fillLimited(target, limit, kj::fwd(rest)...); } @@ -700,3 +757,5 @@ _::Delimited delimited(T&& arr, kj::StringPtr delim) { constexpr kj::StringPtr operator "" _kj(const char* str, size_t n) { return kj::StringPtr(kj::ArrayPtr(str, n + 1)); }; + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/table-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/table-test.c++ index 3a96e1135af..202ab9b6798 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/table-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/table-test.c++ @@ -267,6 +267,32 @@ KJ_TEST("hash tables when hash is always same") { KJ_EXPECT_THROW_MESSAGE("inserted row already exists in table", table.insert("bar")); } +class IntHasher { + // Dumb integer hasher that just returns the integer itself. +public: + uint keyForRow(uint i) const { return i; } + + bool matches(uint a, uint b) const { + return a == b; + } + uint hashCode(uint i) const { + return i; + } +}; + +KJ_TEST("HashIndex with many erasures doesn't keep growing") { + HashIndex index; + + kj::ArrayPtr rows = nullptr; + + for (uint i: kj::zeroTo(1000000)) { + KJ_ASSERT(index.insert(rows, 0, i) == nullptr); + index.erase(rows, 0, i); + } + + KJ_ASSERT(index.capacity() < 10); +} + struct SiPair { kj::StringPtr str; uint i; @@ -739,6 +765,21 @@ KJ_TEST("simple tree table") { KJ_EXPECT(iter == range.end()); } + { + auto iter = table.seek("garply"); + KJ_EXPECT(*iter++ == "garply"); + KJ_EXPECT(*iter++ == "grault"); + KJ_EXPECT(*iter++ == "qux"); + KJ_EXPECT(iter == table.ordered().end()); + } + + { + auto iter = table.seek("gorply"); + KJ_EXPECT(*iter++ == "grault"); + KJ_EXPECT(*iter++ == "qux"); + KJ_EXPECT(iter == table.ordered().end()); + } + auto& graultRow = table.begin()[1]; kj::StringPtr origGrault = graultRow; @@ -769,6 +810,35 @@ KJ_TEST("simple tree table") { KJ_EXPECT(*iter++ == "waldo"); KJ_EXPECT(iter == table.end()); } + + // Verify that move constructor/assignment work. + Table> other(kj::mv(table)); + KJ_EXPECT(other.size() == 5); + KJ_EXPECT(table.size() == 0); + KJ_EXPECT(table.begin() == table.end()); + { + auto iter = other.begin(); + KJ_EXPECT(*iter++ == "garply"); + KJ_EXPECT(*iter++ == "grault"); + KJ_EXPECT(*iter++ == "qux"); + KJ_EXPECT(*iter++ == "corge"); + KJ_EXPECT(*iter++ == "waldo"); + KJ_EXPECT(iter == other.end()); + } + + table = kj::mv(other); + KJ_EXPECT(other.size() == 0); + KJ_EXPECT(table.size() == 5); + { + auto iter = table.begin(); + KJ_EXPECT(*iter++ == "garply"); + KJ_EXPECT(*iter++ == "grault"); + KJ_EXPECT(*iter++ == "qux"); + KJ_EXPECT(*iter++ == "corge"); + KJ_EXPECT(*iter++ == "waldo"); + KJ_EXPECT(iter == table.end()); + } + KJ_EXPECT(other.begin() == other.end()); } class UintCompare { @@ -1101,6 +1171,66 @@ KJ_TEST("insertion order index") { } } +KJ_TEST("insertion order index is movable") { + using UintTable = Table; + + kj::Maybe myTable; + + { + UintTable yourTable; + + yourTable.insert(12); + yourTable.insert(34); + yourTable.insert(56); + yourTable.insert(78); + yourTable.insert(111); + yourTable.insert(222); + yourTable.insert(333); + yourTable.insert(444); + yourTable.insert(555); + yourTable.insert(666); + yourTable.insert(777); + yourTable.insert(888); + yourTable.insert(999); + + myTable = kj::mv(yourTable); + } + + auto& table = KJ_ASSERT_NONNULL(myTable); + + // At one time the following induced a segfault/double-free, due to incorrect memory management in + // InsertionOrderIndex's move ctor and dtor. + auto range = table.ordered(); + auto iter = range.begin(); + KJ_ASSERT(iter != range.end()); + KJ_EXPECT(*iter++ == 12); + KJ_ASSERT(iter != range.end()); + KJ_EXPECT(*iter++ == 34); + KJ_ASSERT(iter != range.end()); + KJ_EXPECT(*iter++ == 56); + KJ_ASSERT(iter != range.end()); + KJ_EXPECT(*iter++ == 78); + KJ_ASSERT(iter != range.end()); + KJ_EXPECT(*iter++ == 111); + KJ_ASSERT(iter != range.end()); + KJ_EXPECT(*iter++ == 222); + KJ_ASSERT(iter != range.end()); + KJ_EXPECT(*iter++ == 333); + KJ_ASSERT(iter != range.end()); + KJ_EXPECT(*iter++ == 444); + KJ_ASSERT(iter != range.end()); + KJ_EXPECT(*iter++ == 555); + KJ_ASSERT(iter != range.end()); + KJ_EXPECT(*iter++ == 666); + KJ_ASSERT(iter != range.end()); + KJ_EXPECT(*iter++ == 777); + KJ_ASSERT(iter != range.end()); + KJ_EXPECT(*iter++ == 888); + KJ_ASSERT(iter != range.end()); + KJ_EXPECT(*iter++ == 999); + KJ_EXPECT(iter == range.end()); +} + } // namespace } // namespace _ } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/table.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/table.c++ index 5f363e0a88b..62cfa6e4e34 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/table.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/table.c++ @@ -30,7 +30,7 @@ static inline uint lg(uint value) { // Compute floor(log2(value)). // // Undefined for value = 0. -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) unsigned long i; auto found = _BitScanReverse(&i, value); KJ_DASSERT(found); // !found means value = 0 @@ -161,18 +161,32 @@ kj::Array rehash(kj::ArrayPtr oldBuckets, size_t t auto newBuckets = kj::heapArray(size); memset(newBuckets.begin(), 0, sizeof(HashBucket) * size); + uint entryCount = 0; + uint collisionCount = 0; + for (auto& oldBucket: oldBuckets) { if (oldBucket.isOccupied()) { + ++entryCount; for (uint i = oldBucket.hash % newBuckets.size();; i = probeHash(newBuckets, i)) { auto& newBucket = newBuckets[i]; if (newBucket.isEmpty()) { newBucket = oldBucket; break; } + ++collisionCount; } } } + if (collisionCount > 16 + entryCount * 4) { + static bool warned = false; + if (!warned) { + KJ_LOG(WARNING, "detected excessive collisions in hash table; is your hash function OK?", + entryCount, collisionCount, kj::getStackTrace()); + warned = true; + } + } + return newBuckets; } @@ -193,12 +207,43 @@ BTreeImpl::BTreeImpl() freelistSize(0), beginLeaf(0), endLeaf(0) {} + BTreeImpl::~BTreeImpl() noexcept(false) { if (tree != &EMPTY_NODE) { aligned_free(tree); } } +BTreeImpl::BTreeImpl(BTreeImpl&& other) + : BTreeImpl() { + *this = kj::mv(other); +} + +BTreeImpl& BTreeImpl::operator=(BTreeImpl&& other) { + KJ_DASSERT(&other != this); + + if (tree != &EMPTY_NODE) { + aligned_free(tree); + } + tree = other.tree; + treeCapacity = other.treeCapacity; + height = other.height; + freelistHead = other.freelistHead; + freelistSize = other.freelistSize; + beginLeaf = other.beginLeaf; + endLeaf = other.endLeaf; + + other.tree = const_cast(&EMPTY_NODE); + other.treeCapacity = 1; + other.height = 0; + other.freelistHead = 1; + other.freelistSize = 0; + other.beginLeaf = 0; + other.endLeaf = 0; + + return *this; +} + const BTreeImpl::NodeUnion BTreeImpl::EMPTY_NODE = {{{0, {0}}}}; void BTreeImpl::verify(size_t size, FunctionParam f) { @@ -288,16 +333,7 @@ void BTreeImpl::growTree(uint minCapacity) { // aligned_alloc() function. Unfortunately, many platforms don't implement it. Luckily, there // are usually alternatives. -#if __APPLE__ || __BIONIC__ - // OSX and Android lack aligned_alloc(), but have posix_memalign(). Fine. - void* allocPtr; - int error = posix_memalign(&allocPtr, - sizeof(BTreeImpl::NodeUnion), newCapacity * sizeof(BTreeImpl::NodeUnion)); - if (error != 0) { - KJ_FAIL_SYSCALL("posix_memalign", error); - } - NodeUnion* newTree = reinterpret_cast(allocPtr); -#elif _WIN32 +#if _WIN32 // Windows lacks aligned_alloc() but has its own _aligned_malloc() (which requires freeing using // _aligned_free()). // WATCH OUT: The argument order for _aligned_malloc() is opposite of aligned_alloc()! @@ -305,12 +341,22 @@ void BTreeImpl::growTree(uint minCapacity) { _aligned_malloc(newCapacity * sizeof(BTreeImpl::NodeUnion), sizeof(BTreeImpl::NodeUnion))); KJ_ASSERT(newTree != nullptr, "memory allocation failed", newCapacity); #else - // Let's use the C11 standard. - NodeUnion* newTree = reinterpret_cast( - aligned_alloc(sizeof(BTreeImpl::NodeUnion), newCapacity * sizeof(BTreeImpl::NodeUnion))); - KJ_ASSERT(newTree != nullptr, "memory allocation failed", newCapacity); + // macOS, OpenBSD, and Android lack aligned_alloc(), but have posix_memalign(). Fine. + void* allocPtr; + int error = posix_memalign(&allocPtr, + sizeof(BTreeImpl::NodeUnion), newCapacity * sizeof(BTreeImpl::NodeUnion)); + if (error != 0) { + KJ_FAIL_SYSCALL("posix_memalign", error); + } + NodeUnion* newTree = reinterpret_cast(allocPtr); #endif + // Note: C11 introduces aligned_alloc() as a standard, but it's still missing on many platforms, + // so we don't use it. But if you wanted to use it, you'd do this: +// NodeUnion* newTree = reinterpret_cast( +// aligned_alloc(sizeof(BTreeImpl::NodeUnion), newCapacity * sizeof(BTreeImpl::NodeUnion))); +// KJ_ASSERT(newTree != nullptr, "memory allocation failed", newCapacity); + acopy(newTree, tree, treeCapacity); azero(newTree + treeCapacity, newCapacity - treeCapacity); if (tree != &EMPTY_NODE) aligned_free(tree); @@ -808,6 +854,19 @@ void BTreeImpl::Parent::eraseAfter(uint i) { const InsertionOrderIndex::Link InsertionOrderIndex::EMPTY_LINK = { 0, 0 }; InsertionOrderIndex::InsertionOrderIndex(): capacity(0), links(const_cast(&EMPTY_LINK)) {} +InsertionOrderIndex::InsertionOrderIndex(InsertionOrderIndex&& other) + : capacity(other.capacity), links(other.links) { + other.capacity = 0; + other.links = const_cast(&EMPTY_LINK); +} +InsertionOrderIndex& InsertionOrderIndex::operator=(InsertionOrderIndex&& other) { + KJ_DASSERT(&other != this); + capacity = other.capacity; + links = other.links; + other.capacity = 0; + other.links = const_cast(&EMPTY_LINK); + return *this; +} InsertionOrderIndex::~InsertionOrderIndex() noexcept(false) { if (links != &EMPTY_LINK) delete[] links; } diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/table.h b/libs/EXTERNAL/capnproto/c++/src/kj/table.h index c0351609b1a..f3e19ddcb0f 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/table.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/table.h @@ -21,10 +21,6 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "common.h" #include "tuple.h" #include "vector.h" @@ -39,16 +35,18 @@ #endif #endif +KJ_BEGIN_HEADER + namespace kj { namespace _ { // private -template -class MappedIterable; template class TableMapping; template using TableIterable = MappedIterable>; +template +using TableIterator = MappedIterator>; } // namespace _ (private) @@ -122,8 +120,8 @@ class Table { // // Optional. Implements Table::find(...). // // template - // Iterable range(kj::ArrayPtr table, SearchParams&&...) const; - // // Optional. Implements Table::range(...). + // Iterator seek(kj::ArrayPtr table, SearchParams&&...) const; + // // Optional. Implements Table::seek() and Table::range(...). // // Iterator begin() const; // Iterator end() const; @@ -187,12 +185,12 @@ class Table { // beginning of an argument list, but we define a hack to support it below. Don't worry about // it. - template - auto range(Params&&... params); - template - auto range(Params&&... params) const; + template + auto range(BeginKey&& begin, EndKey&& end); + template + auto range(BeginKey&& begin, EndKey&& end) const; // Using the given index, look up a range of values, returning an iterable. What parameters are - // accepted depends on the index. Not all indexes support this method (in particular, unique + // accepted depends on the index. Not all indexes support this method (in particular, unordered // indexes normally don't). template @@ -202,13 +200,25 @@ class Table { // Returns an iterable over the whole table ordered using the given index. Not all indexes // support this method. + template + auto seek(Params&&... params); + template + auto seek(Params&&... params) const; + // Takes same parameters as find(), but returns an iterator at the position where the search + // key should go. That is, this returns an iterator that points to the matching entry or, if + // there is no matching entry, points at the next entry after the key, in order. Or, if there + // is no such entry, the returned iterator is the same as ordered().end(). + // + // seek() is only supported by indexes that support ordered(). It returns the same kind of + // iterator that ordered() uses. + template bool eraseMatch(Params&&... params); // Erase the row that would be matched by `find(params)`. Returns true if there was a // match. - template - size_t eraseRange(Params&&... params); + template + size_t eraseRange(BeginKey&& begin, EndKey&& end); // Erase the row that would be matched by `range(params)`. Returns the number of // elements erased. @@ -239,18 +249,22 @@ class Table { kj::Maybe find(Params&&... params) const; template Row& findOrCreate(Params&&... params, Func&& createFunc); - template - auto range(Params&&... params); - template - auto range(Params&&... params) const; + template + auto range(BeginKey&& begin, EndKey&& end); + template + auto range(BeginKey&& begin, EndKey&& end) const; template _::TableIterable>&> ordered(); template _::TableIterable>&> ordered() const; template - bool eraseMatch(Params&&... params); + auto seek(Params&&... params); + template + auto seek(Params&&... params) const; template - size_t eraseRange(Params&&... params); + bool eraseMatch(Params&&... params); + template + size_t eraseRange(BeginKey&& begin, EndKey&& end); // Methods which take an index type as a template parameter can also take an index number. This // is useful particularly when you have multiple indexes of the same type but different runtime // properties. Additionally, you can omit the template parameter altogether to use the first @@ -362,72 +376,6 @@ inline void tryReserveSize(Params&&...) {} // If `src` has a `.size()` method, call dst.reserve(dst.size() + src.size()). // Otherwise, do nothing. -template -class MappedIterator: private Mapping { - // An iterator that wraps some other iterator and maps the values through a mapping function. - // The type `Mapping` must define a method `map()` which performs this mapping. - // - // TODO(cleanup): This seems generally useful. Should we put it somewhere resuable? - -public: - template - MappedIterator(Inner inner, Params&&... params) - : Mapping(kj::fwd(params)...), inner(inner) {} - - inline auto operator->() const { return &Mapping::map(*inner); } - inline decltype(auto) operator* () const { return Mapping::map(*inner); } - inline decltype(auto) operator[](size_t index) const { return Mapping::map(inner[index]); } - inline MappedIterator& operator++() { ++inner; return *this; } - inline MappedIterator operator++(int) { return MappedIterator(inner++, *this); } - inline MappedIterator& operator--() { --inner; return *this; } - inline MappedIterator operator--(int) { return MappedIterator(inner--, *this); } - inline MappedIterator& operator+=(ptrdiff_t amount) { inner += amount; return *this; } - inline MappedIterator& operator-=(ptrdiff_t amount) { inner -= amount; return *this; } - inline MappedIterator operator+ (ptrdiff_t amount) const { - return MappedIterator(inner + amount, *this); - } - inline MappedIterator operator- (ptrdiff_t amount) const { - return MappedIterator(inner - amount, *this); - } - inline ptrdiff_t operator- (const MappedIterator& other) const { return inner - other.inner; } - - inline bool operator==(const MappedIterator& other) const { return inner == other.inner; } - inline bool operator!=(const MappedIterator& other) const { return inner != other.inner; } - inline bool operator<=(const MappedIterator& other) const { return inner <= other.inner; } - inline bool operator>=(const MappedIterator& other) const { return inner >= other.inner; } - inline bool operator< (const MappedIterator& other) const { return inner < other.inner; } - inline bool operator> (const MappedIterator& other) const { return inner > other.inner; } - -private: - Inner inner; -}; - -template -class MappedIterable: private Mapping { - // An iterable that wraps some other iterable and maps the values through a mapping function. - // The type `Mapping` must define a method `map()` which performs this mapping. - // - // TODO(cleanup): This seems generally useful. Should we put it somewhere resuable? - -public: - template - MappedIterable(Inner inner, Params&&... params) - : Mapping(kj::fwd(params)...), inner(inner) {} - - typedef Decay().begin())> InnerIterator; - typedef MappedIterator Iterator; - typedef Decay().begin())> InnerConstIterator; - typedef MappedIterator ConstIterator; - - inline Iterator begin() { return { inner.begin(), (Mapping&)*this }; } - inline Iterator end() { return { inner.end(), (Mapping&)*this }; } - inline ConstIterator begin() const { return { inner.begin(), (const Mapping&)*this }; } - inline ConstIterator end() const { return { inner.end(), (const Mapping&)*this }; } - -private: - Inner inner; -}; - template class TableMapping { public: @@ -449,6 +397,23 @@ class TableUnmapping { Row* table; }; +template +class IterRange { +public: + inline IterRange(Iterator b, Iterator e): b(b), e(e) {} + + inline Iterator begin() const { return b; } + inline Iterator end() const { return e; } +private: + Iterator b; + Iterator e; +}; + +template +inline IterRange> iterRange(Iterator b, Iterator e) { + return { b, e }; +} + } // namespace _ (private) template @@ -640,11 +605,15 @@ class Table::FindOrCreateImpl { return table.rows[*existing]; } else { bool success = false; + KJ_DEFER({ + if (!success) { + get(table.indexes).erase(table.rows.asPtr(), pos, params...); + } + }); auto& newRow = table.rows.add(createFunc()); KJ_DEFER({ if (!success) { table.rows.removeLast(); - get(table.indexes).erase(table.rows.asPtr(), pos, params...); } }); if (Table::template Impl<>::insert(table, pos, newRow, index) == nullptr) { @@ -683,25 +652,29 @@ Row& Table::findOrCreate(First&& first, Rest&&... rest) { } template -template -auto Table::range(Params&&... params) { - return range>()>(kj::fwd(params)...); +template +auto Table::range(BeginKey&& begin, EndKey&& end) { + return range>()>( + kj::fwd(begin), kj::fwd(end)); } template -template -auto Table::range(Params&&... params) { - auto inner = get(indexes).range(rows.asPtr(), kj::fwd(params)...); +template +auto Table::range(BeginKey&& begin, EndKey&& end) { + auto inner = _::iterRange(get(indexes).seek(rows.asPtr(), kj::fwd(begin)), + get(indexes).seek(rows.asPtr(), kj::fwd(end))); return _::TableIterable(kj::mv(inner), rows.begin()); } template -template -auto Table::range(Params&&... params) const { - return range>()>(kj::fwd(params)...); +template +auto Table::range(BeginKey&& begin, EndKey&& end) const { + return range>()>( + kj::fwd(begin), kj::fwd(end)); } template -template -auto Table::range(Params&&... params) const { - auto inner = get(indexes).range(rows.asPtr(), kj::fwd(params)...); +template +auto Table::range(BeginKey&& begin, EndKey&& end) const { + auto inner = _::iterRange(get(indexes).seek(rows.asPtr(), kj::fwd(begin)), + get(indexes).seek(rows.asPtr(), kj::fwd(end))); return _::TableIterable(kj::mv(inner), rows.begin()); } @@ -727,6 +700,29 @@ Table::ordered() const { return { get(indexes), rows.begin() }; } +template +template +auto Table::seek(Params&&... params) { + return seek>()>(kj::fwd(params)...); +} +template +template +auto Table::seek(Params&&... params) { + auto inner = get(indexes).seek(rows.asPtr(), kj::fwd(params)...); + return _::TableIterator(kj::mv(inner), rows.begin()); +} +template +template +auto Table::seek(Params&&... params) const { + return seek>()>(kj::fwd(params)...); +} +template +template +auto Table::seek(Params&&... params) const { + auto inner = get(indexes).seek(rows.asPtr(), kj::fwd(params)...); + return _::TableIterator(kj::mv(inner), rows.begin()); +} + template template bool Table::eraseMatch(Params&&... params) { @@ -744,14 +740,17 @@ bool Table::eraseMatch(Params&&... params) { } template -template -size_t Table::eraseRange(Params&&... params) { - return eraseRange>()>(kj::fwd(params)...); +template +size_t Table::eraseRange(BeginKey&& begin, EndKey&& end) { + return eraseRange>()>( + kj::fwd(begin), kj::fwd(end)); } template -template -size_t Table::eraseRange(Params&&... params) { - return eraseAllImpl(get(indexes).range(rows.asPtr(), kj::fwd(params)...)); +template +size_t Table::eraseRange(BeginKey&& begin, EndKey&& end) { + auto inner = _::iterRange(get(indexes).seek(rows.asPtr(), kj::fwd(begin)), + get(indexes).seek(rows.asPtr(), kj::fwd(end))); + return eraseAllImpl(inner); } template @@ -811,7 +810,7 @@ size_t Table::eraseAll(Predicate&& predicate) { template template size_t Table::eraseAll(Collection&& collection) { - return eraseAllImpl(_::MappedIterable>( + return eraseAllImpl(MappedIterable>( collection, rows.begin())); } @@ -890,10 +889,15 @@ uint chooseBucket(uint hash, uint count); template class HashIndex { public: - HashIndex() KJ_DEFAULT_CONSTRUCTOR_VS2015_BUGGY + HashIndex() = default; template HashIndex(Params&&... params): cb(kj::fwd(params)...) {} + size_t capacity() { + // This method is for testing. + return buckets.size(); + } + void reserve(size_t size) { if (buckets.size() < size * 2) { rehash(size); @@ -913,8 +917,12 @@ class HashIndex { template kj::Maybe insert(kj::ArrayPtr table, size_t pos, Params&&... params) { if (buckets.size() * 2 < (table.size() + 1 + erasedCount) * 3) { - // Load factor is more than 2/3, let's rehash. - rehash(kj::max(buckets.size() * 2, (table.size() + 1) * 2)); + // Load factor is more than 2/3, let's rehash so that it's 1/3, i.e. double the buckets. + // Note that rehashing also cleans up erased entries, so we may not actually be doubling if + // there are a lot of erasures. Nevertheless, this gives us amortized constant time -- it + // would take at least O(table.size()) more insertions (whether or not erasures occur) + // before another rehash is needed. + rehash((table.size() + 1) * 3); } uint hashCode = cb.hashCode(params...); @@ -1008,6 +1016,7 @@ class HashIndex { void rehash(size_t targetSize) { buckets = _::rehash(buckets, targetSize); + erasedCount = 0; } }; @@ -1067,6 +1076,10 @@ class BTreeImpl { BTreeImpl(); ~BTreeImpl() noexcept(false); + KJ_DISALLOW_COPY(BTreeImpl); + BTreeImpl(BTreeImpl&& other); + BTreeImpl& operator=(BTreeImpl&& other); + void logInconsistency() const; void reserve(size_t size); @@ -1404,23 +1417,6 @@ class BTreeImpl::Iterator { uint row; }; -template -class IterRange { -public: - inline IterRange(Iterator b, Iterator e): b(b), e(e) {} - - inline Iterator begin() const { return b; } - inline Iterator end() const { return e; } -private: - Iterator b; - Iterator e; -}; - -template -inline IterRange> iterRange(Iterator b, Iterator e) { - return { b, e }; -} - inline BTreeImpl::Iterator BTreeImpl::begin() const { return { tree, &tree[beginLeaf].leaf, 0 }; } @@ -1434,7 +1430,7 @@ inline BTreeImpl::Iterator BTreeImpl::end() const { template class TreeIndex { public: - TreeIndex() KJ_DEFAULT_CONSTRUCTOR_VS2015_BUGGY + TreeIndex() = default; template TreeIndex(Params&&... params): cb(kj::fwd(params)...) {} @@ -1488,13 +1484,9 @@ class TreeIndex { } } - template - _::IterRange<_::BTreeImpl::Iterator> range( - kj::ArrayPtr table, Begin&& begin, End&& end) const { - return { - impl.search(searchKey(table, begin)), - impl.search(searchKey(table, end )) - }; + template + _::BTreeImpl::Iterator seek(kj::ArrayPtr table, Params&&... params) const { + return impl.search(searchKey(table, params...)); } private: @@ -1538,6 +1530,10 @@ class InsertionOrderIndex { struct Link; public: InsertionOrderIndex(); + InsertionOrderIndex(const InsertionOrderIndex&) = delete; + InsertionOrderIndex& operator=(const InsertionOrderIndex&) = delete; + InsertionOrderIndex(InsertionOrderIndex&& other); + InsertionOrderIndex& operator=(InsertionOrderIndex&& other); ~InsertionOrderIndex() noexcept(false); class Iterator { @@ -1623,3 +1619,5 @@ class InsertionOrderIndex { }; } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/test-helpers.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/test-helpers.c++ index 1cc56500178..c4fcbc19fd5 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/test-helpers.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/test-helpers.c++ @@ -19,7 +19,13 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + #include "test.h" + +#include #ifndef _WIN32 #include #include @@ -28,17 +34,35 @@ #include #endif +#include + namespace kj { namespace _ { // private bool hasSubstring(StringPtr haystack, StringPtr needle) { - // TODO(perf): This is not the best algorithm for substring matching. if (needle.size() <= haystack.size()) { + // Boyer Moore Horspool wins https://quick-bench.com/q/RiKdKduhdLb6x_DfS1fHaksqwdQ + // https://quick-bench.com/q/KV8irwXrkvsNMbNpP8ENR_tBEPY but libc++ only has default_searcher + // which performs *drastically worse* than the naiive algorithm (seriously - why even bother?). + // Hell, doing a query for an embedded null & dispatching to strstr is still cheaper & only + // marginally slower than the purely naiive implementation. + +#if !defined(_WIN32) + return memmem(haystack.begin(), haystack.size(), needle.begin(), needle.size()) != nullptr; +#elif defined(__cpp_lib_boyer_moore_searcher) + std::boyer_moore_horspool_searcher searcher{needle.begin(), needle.size()}; + return std::search(haystack.begin(), haystack.end(), searcher) != haystack.end(); +#else + // TODO(perf): This is not the best algorithm for substring matching. strstr can't be used + // because this is supposed to be safe to call on strings with embedded nulls. + // Amusingly this naiive algorithm some times outperforms std::default_searcher, even if we need + // to double-check first if the needle has an embedded null (indicating std::search ). for (size_t i = 0; i <= haystack.size() - needle.size(); i++) { if (haystack.slice(i).startsWith(needle)) { return true; } } +#endif } return false; } diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/test.c++ index fddbe9faf1d..e988320f894 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/test.c++ @@ -30,7 +30,6 @@ #include #include #include -#include #include "time.h" #ifndef _WIN32 #include @@ -185,8 +184,7 @@ private: }; TimePoint readClock() { - return origin() + std::chrono::duration_cast( - std::chrono::steady_clock::now().time_since_epoch()).count() * NANOSECONDS; + return systemPreciseMonotonicClock().now(); } } // namespace diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/test.h b/libs/EXTERNAL/capnproto/c++/src/kj/test.h index 20fc0ecfa15..fbc34492ecf 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/test.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/test.h @@ -21,15 +21,13 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "debug.h" #include "vector.h" #include "function.h" #include "windows-sanity.h" // work-around macro conflict with `ERROR` +KJ_BEGIN_HEADER + namespace kj { class TestRunner; @@ -62,24 +60,26 @@ class TestCase { } KJ_UNIQUE_NAME(testCase); \ void KJ_UNIQUE_NAME(TestCase)::run() -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) #define KJ_INDIRECT_EXPAND(m, vargs) m vargs #define KJ_FAIL_EXPECT(...) \ KJ_INDIRECT_EXPAND(KJ_LOG, (ERROR , __VA_ARGS__)); #define KJ_EXPECT(cond, ...) \ - if (cond); else KJ_INDIRECT_EXPAND(KJ_FAIL_EXPECT, ("failed: expected " #cond , __VA_ARGS__)) + if (auto _kjCondition = ::kj::_::MAGIC_ASSERT << cond); \ + else KJ_INDIRECT_EXPAND(KJ_FAIL_EXPECT, ("failed: expected " #cond , _kjCondition, __VA_ARGS__)) #else #define KJ_FAIL_EXPECT(...) \ KJ_LOG(ERROR, ##__VA_ARGS__); #define KJ_EXPECT(cond, ...) \ - if (cond); else KJ_FAIL_EXPECT("failed: expected " #cond, ##__VA_ARGS__) + if (auto _kjCondition = ::kj::_::MAGIC_ASSERT << cond); \ + else KJ_FAIL_EXPECT("failed: expected " #cond, _kjCondition, ##__VA_ARGS__) #endif #define KJ_EXPECT_THROW_RECOVERABLE(type, code) \ do { \ KJ_IF_MAYBE(e, ::kj::runCatchingExceptions([&]() { code; })) { \ KJ_EXPECT(e->getType() == ::kj::Exception::Type::type, \ - "code threw wrong exception type: " #code, e->getType()); \ + "code threw wrong exception type: " #code, *e); \ } else { \ KJ_FAIL_EXPECT("code did not throw: " #code); \ } \ @@ -89,7 +89,7 @@ class TestCase { do { \ KJ_IF_MAYBE(e, ::kj::runCatchingExceptions([&]() { code; })) { \ KJ_EXPECT(::kj::_::hasSubstring(e->getDescription(), message), \ - "exception description didn't contain expected substring", e->getDescription()); \ + "exception description didn't contain expected substring", *e); \ } else { \ KJ_FAIL_EXPECT("code did not throw: " #code); \ } \ @@ -163,3 +163,5 @@ class GlobFilter { } // namespace _ (private) } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/thread.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/thread.c++ index c3ec31b359f..e013c07c5eb 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/thread.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/thread.c++ @@ -117,7 +117,7 @@ Thread::ThreadState::ThreadState(Function func) refcount(2) {} void Thread::ThreadState::unref() { -#if _MSC_VER +#if _MSC_VER && !defined(__clang__) if (_InterlockedDecrement(&refcount) == 0) { #else if (__atomic_sub_fetch(&refcount, 1, __ATOMIC_RELEASE) == 0) { @@ -125,7 +125,14 @@ void Thread::ThreadState::unref() { #endif KJ_IF_MAYBE(e, exception) { - KJ_LOG(ERROR, "uncaught exception thrown by detached thread", *e); + // If the exception is still present in ThreadState, this must be a detached thread, so + // the exception will never be rethrown. We should at least log it. + // + // We need to run the thread initializer again before we log anything because the main + // purpose of the thread initializer is to set up a logging callback. + initializer([&]() { + KJ_LOG(ERROR, "uncaught exception thrown by detached thread", *e); + }); } delete this; diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/thread.h b/libs/EXTERNAL/capnproto/c++/src/kj/thread.h index 27c9d052f31..46fd39bb9c2 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/thread.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/thread.h @@ -21,14 +21,12 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "common.h" #include "function.h" #include "exception.h" +KJ_BEGIN_HEADER + namespace kj { class Thread { @@ -80,3 +78,5 @@ class Thread { }; } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/threadlocal.h b/libs/EXTERNAL/capnproto/c++/src/kj/threadlocal.h index d21edf70e8c..613b96e788c 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/threadlocal.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/threadlocal.h @@ -22,9 +22,6 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif // This file declares a macro `KJ_THREADLOCAL_PTR` for declaring thread-local pointer-typed // variables. Use like: // KJ_THREADLOCAL_PTR(MyType) foo = nullptr; @@ -46,83 +43,17 @@ #include "common.h" -#if !defined(KJ_USE_PTHREAD_THREADLOCAL) && defined(__APPLE__) -#include "TargetConditionals.h" -#if TARGET_OS_IPHONE -// iOS apparently does not support __thread (nor C++11 thread_local). -#define KJ_USE_PTHREAD_TLS 1 -#endif -#endif - -#if KJ_USE_PTHREAD_TLS -#include -#endif +KJ_BEGIN_HEADER namespace kj { -#if KJ_USE_PTHREAD_TLS -// If __thread is unavailable, we'll fall back to pthreads. - -#define KJ_THREADLOCAL_PTR(type) \ - namespace { struct KJ_UNIQUE_NAME(_kj_TlpTag); } \ - static ::kj::_::ThreadLocalPtr< type, KJ_UNIQUE_NAME(_kj_TlpTag)> -// Hack: In order to ensure each thread-local results in a unique template instance, we declare -// a one-off dummy type to use as the second type parameter. - -namespace _ { // private - -template -class ThreadLocalPtr { - // Hacky type to emulate __thread T*. We need a separate instance of the ThreadLocalPtr template - // for every thread-local variable, because we don't want to require a global constructor, and in - // order to initialize the TLS on first use we need to use a local static variable (in getKey()). - // Each template instance will get a separate such local static variable, fulfilling our need. - -public: - ThreadLocalPtr() = default; - constexpr ThreadLocalPtr(decltype(nullptr)) {} - // Allow initialization to nullptr without a global constructor. - - inline ThreadLocalPtr& operator=(T* val) { - pthread_setspecific(getKey(), val); - return *this; - } - - inline operator T*() const { - return get(); - } - - inline T& operator*() const { - return *get(); - } - - inline T* operator->() const { - return get(); - } - -private: - inline T* get() const { - return reinterpret_cast(pthread_getspecific(getKey())); - } - - inline static pthread_key_t getKey() { - static pthread_key_t key = createKey(); - return key; - } - - static pthread_key_t createKey() { - pthread_key_t key; - pthread_key_create(&key, 0); - return key; - } -}; - -} // namespace _ (private) - -#elif __GNUC__ +#if __GNUC__ #define KJ_THREADLOCAL_PTR(type) static __thread type* // GCC's __thread is lighter-weight than thread_local and is good enough for our purposes. +// +// TODO(cleanup): The above comment was written many years ago. Is it still true? Shouldn't the +// compiler be smart enough to optimize a thread_local of POD type? #else @@ -131,3 +62,5 @@ class ThreadLocalPtr { #endif // KJ_USE_PTHREAD_TLS } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/time-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/time-test.c++ new file mode 100644 index 00000000000..0dd5d64b853 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/kj/time-test.c++ @@ -0,0 +1,146 @@ +// Copyright (c) 2019 Cloudflare, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#if _WIN32 +#include "win32-api-version.h" +#endif + +#include "time.h" +#include "debug.h" +#include +#include + +#if _WIN32 +#include +#include "windows-sanity.h" +#else +#include +#endif + +namespace kj { +namespace { + +KJ_TEST("stringify times") { + KJ_EXPECT(kj::str(50 * kj::SECONDS) == "50s"); + KJ_EXPECT(kj::str(5 * kj::SECONDS + 2 * kj::MILLISECONDS) == "5.002s"); + KJ_EXPECT(kj::str(256 * kj::MILLISECONDS) == "256ms"); + KJ_EXPECT(kj::str(5 * kj::MILLISECONDS + 2 * kj::NANOSECONDS) == "5.000002ms"); + KJ_EXPECT(kj::str(50 * kj::MICROSECONDS) == "50μs"); + KJ_EXPECT(kj::str(5 * kj::MICROSECONDS + 300 * kj::NANOSECONDS) == "5.3μs"); + KJ_EXPECT(kj::str(50 * kj::NANOSECONDS) == "50ns"); +} + +#if _WIN32 +void delay(kj::Duration d) { + Sleep(d / kj::MILLISECONDS); +} +#else +void delay(kj::Duration d) { + usleep(d / kj::MICROSECONDS); +} +#endif + +KJ_TEST("calendar clocks matches unix time") { + // Check that the times returned by the calendar clock are within 1s of what time() returns. + + auto& coarse = systemCoarseCalendarClock(); + auto& precise = systemPreciseCalendarClock(); + + Date p = precise.now(); + Date c = coarse.now(); + time_t t = time(nullptr); + + int64_t pi = (p - UNIX_EPOCH) / kj::SECONDS; + int64_t ci = (c - UNIX_EPOCH) / kj::SECONDS; + + KJ_EXPECT(pi >= t - 1); + KJ_EXPECT(pi <= t + 1); + KJ_EXPECT(ci >= t - 1); + KJ_EXPECT(ci <= t + 1); +} + +KJ_TEST("monotonic clocks match each other") { + // Check that the monotonic clocks return comparable times. + + auto& coarse = systemCoarseMonotonicClock(); + auto& precise = systemPreciseMonotonicClock(); + + TimePoint p = precise.now(); + TimePoint c = coarse.now(); + + // 40ms tolerance due to Windows timeslices being quite long, especially on GitHub Actions where + // Windows is drunk and has completely lost track of time. + KJ_EXPECT(p < c + 40 * kj::MILLISECONDS, p - c); + KJ_EXPECT(p > c - 40 * kj::MILLISECONDS, c - p); +} + +KJ_TEST("all clocks advance in real time") { + Duration coarseCalDiff; + Duration preciseCalDiff; + Duration coarseMonoDiff; + Duration preciseMonoDiff; + + for (uint retryCount KJ_UNUSED: kj::zeroTo(20)) { + auto& coarseCal = systemCoarseCalendarClock(); + auto& preciseCal = systemPreciseCalendarClock(); + auto& coarseMono = systemCoarseMonotonicClock(); + auto& preciseMono = systemPreciseMonotonicClock(); + + Date coarseCalBefore = coarseCal.now(); + Date preciseCalBefore = preciseCal.now(); + TimePoint coarseMonoBefore = coarseMono.now(); + TimePoint preciseMonoBefore = preciseMono.now(); + + Duration delayTime = 150 * kj::MILLISECONDS; + delay(delayTime); + + Date coarseCalAfter = coarseCal.now(); + Date preciseCalAfter = preciseCal.now(); + TimePoint coarseMonoAfter = coarseMono.now(); + TimePoint preciseMonoAfter = preciseMono.now(); + + coarseCalDiff = coarseCalAfter - coarseCalBefore; + preciseCalDiff = preciseCalAfter - preciseCalBefore; + coarseMonoDiff = coarseMonoAfter - coarseMonoBefore; + preciseMonoDiff = preciseMonoAfter - preciseMonoBefore; + + // 20ms tolerance due to Windows timeslices being quite long (and Windows sleeps being only + // accurate to the timeslice). + if (coarseCalDiff > delayTime - 20 * kj::MILLISECONDS && + coarseCalDiff < delayTime + 20 * kj::MILLISECONDS && + preciseCalDiff > delayTime - 20 * kj::MILLISECONDS && + preciseCalDiff < delayTime + 20 * kj::MILLISECONDS && + coarseMonoDiff > delayTime - 20 * kj::MILLISECONDS && + coarseMonoDiff < delayTime + 20 * kj::MILLISECONDS && + preciseMonoDiff > delayTime - 20 * kj::MILLISECONDS && + preciseMonoDiff < delayTime + 20 * kj::MILLISECONDS) { + // success + return; + } + } + + KJ_FAIL_EXPECT("clocks seem inaccurate even after 20 tries", + coarseCalDiff / kj::MICROSECONDS, preciseCalDiff / kj::MICROSECONDS, + coarseMonoDiff / kj::MICROSECONDS, preciseMonoDiff / kj::MICROSECONDS); +} + +} // namespace +} // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/time.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/time.c++ index 19f6b86d7a3..d0846588415 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/time.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/time.c++ @@ -20,10 +20,20 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +#if _WIN32 +#include "win32-api-version.h" +#endif + #include "time.h" #include "debug.h" #include +#if _WIN32 +#include +#else +#include +#endif + namespace kj { const Clock& nullClock() { @@ -35,4 +45,262 @@ const Clock& nullClock() { return NULL_CLOCK; } +#if _WIN32 + +namespace { + +static constexpr int64_t WIN32_EPOCH_OFFSET = 116444736000000000ull; +// Number of 100ns intervals from Jan 1, 1601 to Jan 1, 1970. + +static Date toKjDate(FILETIME t) { + int64_t value = (static_cast(t.dwHighDateTime) << 32) | t.dwLowDateTime; + return (value - WIN32_EPOCH_OFFSET) * (100 * kj::NANOSECONDS) + UNIX_EPOCH; +} + +class Win32CoarseClock: public Clock { +public: + Date now() const override { + FILETIME ft; + GetSystemTimeAsFileTime(&ft); + return toKjDate(ft); + } +}; + +class Win32PreciseClock: public Clock { + typedef VOID WINAPI GetSystemTimePreciseAsFileTimeFunc(LPFILETIME); +public: + Date now() const override { + static GetSystemTimePreciseAsFileTimeFunc* const getSystemTimePreciseAsFileTimePtr = + getGetSystemTimePreciseAsFileTime(); + FILETIME ft; + if (getSystemTimePreciseAsFileTimePtr == nullptr) { + // We can't use QueryPerformanceCounter() to get any more precision because we have no way + // of knowing when the calendar clock jumps. So I guess we're stuck. + GetSystemTimeAsFileTime(&ft); + } else { + getSystemTimePreciseAsFileTimePtr(&ft); + } + return toKjDate(ft); + } + +private: + static GetSystemTimePreciseAsFileTimeFunc* getGetSystemTimePreciseAsFileTime() { + // Dynamically look up the function GetSystemTimePreciseAsFileTimeFunc(). This was only + // introduced as of Windows 8, so it might be missing. +#if __GNUC__ && !__clang__ && __GNUC__ >= 8 +// GCC 8 warns that our reinterpret_cast of a function pointer below is casting between +// incompatible types. Yes, GCC, we know that. This is the nature of GetProcAddress(); it returns +// everything as `long long int (*)()` and we have to cast to the actual type. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-function-type" +#endif + return reinterpret_cast(GetProcAddress( + GetModuleHandleA("kernel32.dll"), + "GetSystemTimePreciseAsFileTime")); + } +}; + +class Win32CoarseMonotonicClock: public MonotonicClock { +public: + TimePoint now() const override { + return kj::origin() + GetTickCount64() * kj::MILLISECONDS; + } +}; + +class Win32PreciseMonotonicClock: public MonotonicClock { + // Precise clock implemented using QueryPerformanceCounter(). + // + // TODO(someday): Windows 10 has QueryUnbiasedInterruptTime() and + // QueryUnbiasedInterruptTimePrecise(), a new API for monotonic timing that isn't as difficult. + // Is there any benefit to dynamically checking for these and using them if available? + +public: + TimePoint now() const override { + static const QpcProperties props; + + LARGE_INTEGER now; + QueryPerformanceCounter(&now); + uint64_t adjusted = now.QuadPart - props.origin; + uint64_t ns = mulDiv64(adjusted, 1'000'000'000, props.frequency); + return kj::origin() + ns * kj::NANOSECONDS; + } + +private: + struct QpcProperties { + uint64_t origin; + // What QueryPerformanceCounter() would have returned at the time when GetTickCount64() returned + // zero. Used to ensure that the coarse and precise timers return similar values. + + uint64_t frequency; + // From QueryPerformanceFrequency(). + + QpcProperties() { + LARGE_INTEGER now, freqLi; + uint64_t ticks = GetTickCount64(); + QueryPerformanceCounter(&now); + + QueryPerformanceFrequency(&freqLi); + frequency = freqLi.QuadPart; + + // Convert the millisecond tick count into performance counter ticks. + uint64_t ticksAsQpc = mulDiv64(ticks, freqLi.QuadPart, 1000); + + origin = now.QuadPart - ticksAsQpc; + } + }; + + static inline uint64_t mulDiv64(uint64_t value, uint64_t numer, uint64_t denom) { + // Inspired by: + // https://github.com/rust-lang/rust/pull/22788/files#diff-24f054cd23f65af3b574c6ce8aa5a837R54 + // Computes (value*numer)/denom without overflow, as long as both + // (numer*denom) and the overall result fit into 64 bits. + uint64_t q = value / denom; + uint64_t r = value % denom; + return q * numer + r * numer / denom; + } +}; + +} // namespace + +const Clock& systemCoarseCalendarClock() { + static constexpr Win32CoarseClock clock; + return clock; +} +const Clock& systemPreciseCalendarClock() { + static constexpr Win32PreciseClock clock; + return clock; +} + +const MonotonicClock& systemCoarseMonotonicClock() { + static constexpr Win32CoarseMonotonicClock clock; + return clock; +} +const MonotonicClock& systemPreciseMonotonicClock() { + static constexpr Win32PreciseMonotonicClock clock; + return clock; +} + +#else + +namespace { + +class PosixClock: public Clock { +public: + constexpr PosixClock(clockid_t clockId): clockId(clockId) {} + + Date now() const override { + struct timespec ts; + KJ_SYSCALL(clock_gettime(clockId, &ts)); + return UNIX_EPOCH + ts.tv_sec * kj::SECONDS + ts.tv_nsec * kj::NANOSECONDS; + } + +private: + clockid_t clockId; +}; + +class PosixMonotonicClock: public MonotonicClock { +public: + constexpr PosixMonotonicClock(clockid_t clockId): clockId(clockId) {} + + TimePoint now() const override { + struct timespec ts; + KJ_SYSCALL(clock_gettime(clockId, &ts)); + return kj::origin() + ts.tv_sec * kj::SECONDS + ts.tv_nsec * kj::NANOSECONDS; + } + +private: + clockid_t clockId; +}; + +} // namespace + +// FreeBSD has "_PRECISE", but Linux just defaults to precise. +#ifndef CLOCK_REALTIME_PRECISE +#define CLOCK_REALTIME_PRECISE CLOCK_REALTIME +#endif + +#ifndef CLOCK_MONOTONIC_PRECISE +#define CLOCK_MONOTONIC_PRECISE CLOCK_MONOTONIC +#endif + +// FreeBSD has "_FAST", Linux has "_COARSE". +// MacOS has an "_APPROX" but only for CLOCK_MONOTONIC_RAW, which isn't helpful. +#ifndef CLOCK_REALTIME_COARSE +#ifdef CLOCK_REALTIME_FAST +#define CLOCK_REALTIME_COARSE CLOCK_REALTIME_FAST +#else +#define CLOCK_REALTIME_COARSE CLOCK_REALTIME +#endif +#endif + +#ifndef CLOCK_MONOTONIC_COARSE +#ifdef CLOCK_MONOTONIC_FAST +#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST +#else +#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC +#endif +#endif + +const Clock& systemCoarseCalendarClock() { + static constexpr PosixClock clock(CLOCK_REALTIME_COARSE); + return clock; +} +const Clock& systemPreciseCalendarClock() { + static constexpr PosixClock clock(CLOCK_REALTIME_PRECISE); + return clock; +} + +const MonotonicClock& systemCoarseMonotonicClock() { + static constexpr PosixMonotonicClock clock(CLOCK_MONOTONIC_COARSE); + return clock; +} +const MonotonicClock& systemPreciseMonotonicClock() { + static constexpr PosixMonotonicClock clock(CLOCK_MONOTONIC_PRECISE); + return clock; +} + +#endif + +kj::String KJ_STRINGIFY(TimePoint t) { + return kj::toCharSequence(t - kj::origin()); +} +kj::String KJ_STRINGIFY(Date d) { + return kj::toCharSequence(d - UNIX_EPOCH); +} +kj::String KJ_STRINGIFY(Duration d) { + auto digits = kj::toCharSequence(d / kj::NANOSECONDS); + ArrayPtr arr = digits; + + size_t point; + kj::StringPtr suffix; + kj::Duration unit; + if (digits.size() > 9) { + point = arr.size() - 9; + suffix = "s"; + unit = kj::SECONDS; + } else if (digits.size() > 6) { + point = arr.size() - 6; + suffix = "ms"; + unit = kj::MILLISECONDS; + } else if (digits.size() > 3) { + point = arr.size() - 3; + suffix = "μs"; + unit = kj::MICROSECONDS; + } else { + point = arr.size(); + suffix = "ns"; + unit = kj::NANOSECONDS; + } + + if (d % unit == 0 * kj::NANOSECONDS) { + return kj::str(arr.slice(0, point), suffix); + } else { + while (arr.back() == '0') { + arr = arr.slice(0, arr.size() - 1); + } + KJ_ASSERT(arr.size() > point); + return kj::str(arr.slice(0, point), ".", arr.slice(point, arr.size()), suffix); + } +} + } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/time.h b/libs/EXTERNAL/capnproto/c++/src/kj/time.h index 72cdf42a8d6..0c2e47af718 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/time.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/time.h @@ -22,12 +22,11 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "units.h" #include +#include "string.h" + +KJ_BEGIN_HEADER namespace kj { namespace _ { // private @@ -50,12 +49,17 @@ constexpr Duration HOURS = 60 * MINUTES; constexpr Duration DAYS = 24 * HOURS; using TimePoint = Absolute; -// An absolute time measured by some particular instance of `Timer`. `Time`s from two different -// `Timer`s may be measured from different origins and so are not necessarily compatible. +// An absolute time measured by some particular instance of `Timer` or `MonotonicClock`. `Time`s +// from two different `Timer`s or `MonotonicClock`s may be measured from different origins and so +// are not necessarily compatible. using Date = Absolute; // A point in real-world time, measured relative to the Unix epoch (Jan 1, 1970 00:00:00 UTC). +kj::String KJ_STRINGIFY(TimePoint); +kj::String KJ_STRINGIFY(Date); +kj::String KJ_STRINGIFY(Duration); + constexpr Date UNIX_EPOCH = origin(); // The `Date` representing Jan 1, 1970 00:00:00 UTC. @@ -65,8 +69,48 @@ class Clock { virtual Date now() const = 0; }; +class MonotonicClock { + // Interface to read time in a way that increases as real-world time increases, independent of + // any manual changes to the calendar date/time. Such a clock never "goes backwards" even if the + // system administrator changes the calendar time or suspends the system. However, this clock's + // time points are only meaningful in comparison to other time points from the same clock, and + // cannot be used to determine the current calendar date. + +public: + virtual TimePoint now() const = 0; +}; + const Clock& nullClock(); // A clock which always returns UNIX_EPOCH as the current time. Useful when you don't care about // time. +const Clock& systemCoarseCalendarClock(); +const Clock& systemPreciseCalendarClock(); +// A clock that reads the real system time. +// +// In well-designed code, this should only be called by the top-level dependency injector. All +// other modules should request that the caller provide a Clock so that alternate clock +// implementations can be injected for testing, simulation, reproducibility, and other purposes. +// +// The "coarse" version has precision around 1-10ms, while the "precise" version has precision +// better than 1us. The "precise" version may be slightly slower, though on modern hardware and +// a reasonable operating system the difference is usually negligible. +// +// Note: On Windows prior to Windows 8, there is no precise calendar clock; the "precise" clock +// will be no more precise than the "coarse" clock in this case. + +const MonotonicClock& systemCoarseMonotonicClock(); +const MonotonicClock& systemPreciseMonotonicClock(); +// A MonotonicClock that reads the real system time. +// +// In well-designed code, this should only be called by the top-level dependency injector. All +// other modules should request that the caller provide a Clock so that alternate clock +// implementations can be injected for testing, simulation, reproducibility, and other purposes. +// +// The "coarse" version has precision around 1-10ms, while the "precise" version has precision +// better than 1us. The "precise" version may be slightly slower, though on modern hardware and +// a reasonable operating system the difference is usually negligible. + } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/timer.h b/libs/EXTERNAL/capnproto/c++/src/kj/timer.h index 882501c82ac..862f97b9c79 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/timer.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/timer.h @@ -22,25 +22,35 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "time.h" #include "async.h" +KJ_BEGIN_HEADER + namespace kj { -class Timer { +class Timer: public MonotonicClock { // Interface to time and timer functionality. // // Each `Timer` may have a different origin, and some `Timer`s may in fact tick at a different // rate than real time (e.g. a `Timer` could represent CPU time consumed by a thread). However, // all `Timer`s are monotonic: time will never appear to move backwards, even if the calendar // date as tracked by the system is manually modified. + // + // That said, the `Timer` returned by `kj::setupAsyncIo().provider->getTimer()` in particular is + // guaranteed to be synchronized with the `MonotonicClock` returned by + // `systemPreciseMonotonicClock()` (or, more precisely, is updated to match that clock whenever + // the loop waits). + // + // Note that the value returned by `Timer::now()` only changes each time the + // event loop waits for I/O from the system. While the event loop is actively + // running, the time stays constant. This is intended to make behavior more + // deterministic and reproducible. However, if you need up-to-the-cycle + // accurate time, then `Timer::now()` is not appropriate. Instead, use + // `systemPreciseMonotonicClock()` directly in this case. public: - virtual TimePoint now() = 0; + virtual TimePoint now() const = 0; // Returns the current value of a clock that moves steadily forward, independent of any // changes in the wall clock. The value is updated every time the event loop waits, // and is constant in-between waits. @@ -99,7 +109,7 @@ class TimerImpl final: public Timer { // Set the time to `time` and fire any at() events that have been passed. // implements Timer ---------------------------------------------------------- - TimePoint now() override; + TimePoint now() const override; Promise atTime(TimePoint time) override; Promise afterDelay(Duration delay) override; @@ -127,6 +137,8 @@ Promise Timer::timeoutAfter(Duration delay, Promise&& promise) { })); } -inline TimePoint TimerImpl::now() { return time; } +inline TimePoint TimerImpl::now() const { return time; } } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/tuple.h b/libs/EXTERNAL/capnproto/c++/src/kj/tuple.h index e4c07b47ee0..2a526c0c329 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/tuple.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/tuple.h @@ -19,7 +19,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -// This file defines a notion of tuples that is simpler that `std::tuple`. It works as follows: +// This file defines a notion of tuples that is simpler than `std::tuple`. It works as follows: // - `kj::Tuple is the type of a tuple of an A, a B, and a C. // - `kj::tuple(a, b, c)` returns a tuple containing a, b, and c. If any of these are themselves // tuples, they are flattened, so `tuple(a, tuple(b, c), d)` is equivalent to `tuple(a, b, c, d)`. @@ -37,12 +37,10 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "common.h" +KJ_BEGIN_HEADER + namespace kj { namespace _ { // private @@ -93,7 +91,7 @@ struct TupleElement { // from a TupleElement for each element, which is more efficient than a recursive definition. T value; - TupleElement() KJ_DEFAULT_CONSTRUCTOR_VS2015_BUGGY + TupleElement() = default; constexpr inline TupleElement(const T& value): value(value) {} constexpr inline TupleElement(T&& value): value(kj::mv(value)) {} }; @@ -123,7 +121,7 @@ struct TupleImpl, Types...> static_assert(sizeof...(indexes) == sizeof...(Types), "Incorrect use of TupleImpl."); - TupleImpl() KJ_DEFAULT_CONSTRUCTOR_VS2015_BUGGY + TupleImpl() = default; template inline TupleImpl(Params&&... params) @@ -153,7 +151,7 @@ class Tuple { // The actual Tuple class (used for tuples of size other than 1). public: - Tuple() KJ_DEFAULT_CONSTRUCTOR_VS2015_BUGGY + Tuple() = default; template constexpr inline Tuple(Tuple&& other): impl(kj::mv(other)) {} @@ -441,3 +439,5 @@ template using TypeOfIndex = typename TypeOfIndex_::Type; } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/units-test.c++ b/libs/EXTERNAL/capnproto/c++/src/kj/units-test.c++ index 89643e7a267..892c1d39862 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/units-test.c++ +++ b/libs/EXTERNAL/capnproto/c++/src/kj/units-test.c++ @@ -349,5 +349,35 @@ TEST(UnitMeasure, BoundedMinMax) { assertTypeAndValue(boundedValue<4,t2>(3), kj::min(bounded<5>(), boundedValue<4,t2>(3))); } +#if !_MSC_VER // MSVC barfs on this test and I just don't have time to care. +KJ_TEST("compare bounded quantities of different bounds") { + auto foo = boundedValue<12345, uint>(123) * unit, byte>>(); + auto bar = boundedValue<54321, uint>(123) * unit, byte>>(); + auto baz = bounded<123>() * unit, byte>>(); + + KJ_EXPECT(foo == foo); + KJ_EXPECT(foo == bar); + KJ_EXPECT(foo == baz); + KJ_EXPECT(bar == foo); + KJ_EXPECT(bar == bar); + KJ_EXPECT(bar == baz); + KJ_EXPECT(baz == foo); + KJ_EXPECT(baz == bar); + KJ_EXPECT(baz == baz); + + auto denom = unit, int>>(); + + KJ_EXPECT(foo / denom == foo / denom); + KJ_EXPECT(foo / denom == bar / denom); + KJ_EXPECT(foo / denom == baz / denom); + KJ_EXPECT(bar / denom == foo / denom); + KJ_EXPECT(bar / denom == bar / denom); + KJ_EXPECT(bar / denom == baz / denom); + KJ_EXPECT(baz / denom == foo / denom); + KJ_EXPECT(baz / denom == bar / denom); + KJ_EXPECT(baz / denom == baz / denom); +} +#endif + } // namespace } // namespace kj diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/units.h b/libs/EXTERNAL/capnproto/c++/src/kj/units.h index d35474a7278..e843b12433d 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/units.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/units.h @@ -25,13 +25,11 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "common.h" #include +KJ_BEGIN_HEADER + namespace kj { // ======================================================================================= @@ -170,8 +168,14 @@ class UnitRatio { return unit1PerUnit2 / other.unit1PerUnit2; } - inline bool operator==(UnitRatio other) const { return unit1PerUnit2 == other.unit1PerUnit2; } - inline bool operator!=(UnitRatio other) const { return unit1PerUnit2 != other.unit1PerUnit2; } + template + inline constexpr bool operator==(const UnitRatio& other) const { + return unit1PerUnit2 == other.unit1PerUnit2; + } + template + inline constexpr bool operator!=(const UnitRatio& other) const { + return unit1PerUnit2 != other.unit1PerUnit2; + } private: Number unit1PerUnit2; @@ -372,7 +376,7 @@ class Quantity { template friend class Quantity; - template + template friend inline constexpr auto operator*(Number1 a, Quantity b) -> Quantity; }; @@ -392,7 +396,8 @@ inline constexpr auto unit() -> decltype(Unit_::get()) { return Unit_::get // unit>() returns a Quantity of value 1. It also, intentionally, works on basic // numeric types. -template +template ()>> inline constexpr auto operator*(Number1 a, Quantity b) -> Quantity { return Quantity(a * b.value, unsafe); @@ -423,6 +428,13 @@ class Absolute { // units, which is actually totally logical and kind of neat. public: + inline constexpr Absolute(MaxValue_): value(maxValue) {} + inline constexpr Absolute(MinValue_): value(minValue) {} + // Allow initialization from maxValue and minValue. + // TODO(msvc): decltype(maxValue) and decltype(minValue) deduce unknown-type for these function + // parameters, causing the compiler to complain of a duplicate constructor definition, so we + // specify MaxValue_ and MinValue_ types explicitly. + inline constexpr Absolute operator+(const T& other) const { return Absolute(value + other); } inline constexpr Absolute operator-(const T& other) const { return Absolute(value - other); } inline constexpr T operator-(const Absolute& other) const { return value - other.value; } @@ -1038,14 +1050,14 @@ inline constexpr T unboundAs(U value) { template inline constexpr T unboundMax(Bounded value) { - // Explicitly ungaurd expecting a value that is at most `maxN`. + // Explicitly unguard expecting a value that is at most `maxN`. static_assert(maxN <= requestedMax, "possible overflow detected"); return value.unwrap(); } template inline constexpr uint unboundMax(BoundedConst) { - // Explicitly ungaurd expecting a value that is at most `maxN`. + // Explicitly unguard expecting a value that is at most `maxN`. static_assert(value <= requestedMax, "overflow detected"); return value; } @@ -1053,7 +1065,7 @@ inline constexpr uint unboundMax(BoundedConst) { template inline constexpr auto unboundMaxBits(T value) -> decltype(unboundMax()>(value)) { - // Explicitly ungaurd expecting a value that fits into `bits` bits. + // Explicitly unguard expecting a value that fits into `bits` bits. return unboundMax()>(value); } @@ -1167,3 +1179,5 @@ inline constexpr Range, Unit>> } } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/vector.h b/libs/EXTERNAL/capnproto/c++/src/kj/vector.h index fa45778ce60..60a370a0f26 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/vector.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/vector.h @@ -21,12 +21,10 @@ #pragma once -#if defined(__GNUC__) && !KJ_HEADER_WARNINGS -#pragma GCC system_header -#endif - #include "array.h" +KJ_BEGIN_HEADER + namespace kj { template @@ -44,24 +42,25 @@ class Vector { inline explicit Vector(size_t capacity): builder(heapArrayBuilder(capacity)) {} inline Vector(Array&& array): builder(kj::mv(array)) {} - inline operator ArrayPtr() { return builder; } - inline operator ArrayPtr() const { return builder; } - inline ArrayPtr asPtr() { return builder.asPtr(); } - inline ArrayPtr asPtr() const { return builder.asPtr(); } + inline operator ArrayPtr() KJ_LIFETIMEBOUND { return builder; } + inline operator ArrayPtr() const KJ_LIFETIMEBOUND { return builder; } + inline ArrayPtr asPtr() KJ_LIFETIMEBOUND { return builder.asPtr(); } + inline ArrayPtr asPtr() const KJ_LIFETIMEBOUND { return builder.asPtr(); } inline size_t size() const { return builder.size(); } inline bool empty() const { return size() == 0; } inline size_t capacity() const { return builder.capacity(); } - inline T& operator[](size_t index) const { return builder[index]; } - - inline const T* begin() const { return builder.begin(); } - inline const T* end() const { return builder.end(); } - inline const T& front() const { return builder.front(); } - inline const T& back() const { return builder.back(); } - inline T* begin() { return builder.begin(); } - inline T* end() { return builder.end(); } - inline T& front() { return builder.front(); } - inline T& back() { return builder.back(); } + inline T& operator[](size_t index) KJ_LIFETIMEBOUND { return builder[index]; } + inline const T& operator[](size_t index) const KJ_LIFETIMEBOUND { return builder[index]; } + + inline const T* begin() const KJ_LIFETIMEBOUND { return builder.begin(); } + inline const T* end() const KJ_LIFETIMEBOUND { return builder.end(); } + inline const T& front() const KJ_LIFETIMEBOUND { return builder.front(); } + inline const T& back() const KJ_LIFETIMEBOUND { return builder.back(); } + inline T* begin() KJ_LIFETIMEBOUND { return builder.begin(); } + inline T* end() KJ_LIFETIMEBOUND { return builder.end(); } + inline T& front() KJ_LIFETIMEBOUND { return builder.front(); } + inline T& back() KJ_LIFETIMEBOUND { return builder.back(); } inline Array releaseAsArray() { // TODO(perf): Avoid a copy/move by allowing Array to point to incomplete space? @@ -76,15 +75,15 @@ class Vector { template inline bool operator!=(const U& other) const { return asPtr() != other; } - inline ArrayPtr slice(size_t start, size_t end) { + inline ArrayPtr slice(size_t start, size_t end) KJ_LIFETIMEBOUND { return asPtr().slice(start, end); } - inline ArrayPtr slice(size_t start, size_t end) const { + inline ArrayPtr slice(size_t start, size_t end) const KJ_LIFETIMEBOUND { return asPtr().slice(start, end); } template - inline T& add(Params&&... params) { + inline T& add(Params&&... params) KJ_LIFETIMEBOUND { if (builder.isFull()) grow(); return builder.add(kj::fwd(params)...); } @@ -150,3 +149,5 @@ inline auto KJ_STRINGIFY(const Vector& v) -> decltype(toCharSequence(v.asPtr( } } // namespace kj + +KJ_END_HEADER diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/win32-api-version.h b/libs/EXTERNAL/capnproto/c++/src/kj/win32-api-version.h new file mode 100644 index 00000000000..31d34198f90 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/src/kj/win32-api-version.h @@ -0,0 +1,44 @@ +// Copyright (c) 2013-2017 Sandstorm Development Group, Inc. and contributors +// Licensed under the MIT License: +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#pragma once + +// Request Vista-level APIs. +#ifndef WINVER +#define WINVER 0x0600 +#elif WINVER < 0x0600 +#error "WINVER defined but older than Vista" +#endif + +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x0600 +#elif _WIN32_WINNT < 0x0600 +#error "_WIN32_WINNT defined but older than Vista" +#endif + +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN // ::eyeroll:: +#endif + +#define NOSERVICE 1 +#define NOMCX 1 +#define NOIME 1 +#define NOMINMAX 1 diff --git a/libs/EXTERNAL/capnproto/c++/src/kj/windows-sanity.h b/libs/EXTERNAL/capnproto/c++/src/kj/windows-sanity.h index dc863a8f195..64475dc41dd 100644 --- a/libs/EXTERNAL/capnproto/c++/src/kj/windows-sanity.h +++ b/libs/EXTERNAL/capnproto/c++/src/kj/windows-sanity.h @@ -36,7 +36,7 @@ // windows-sanity.h, we can be sure that no conflicts will occur regardless of in what order the // application chooses to include these headers vs. windows.h. -#if !_WIN32 +#if !_WIN32 && !__CYGWIN__ // Not on Windows. Tell the compiler never to try to include this again. #pragma once @@ -50,8 +50,11 @@ namespace win32 { const auto ERROR_ = ERROR; + +#ifdef ERROR // This could be absent if e.g. NOGDI was used. #undef ERROR const auto ERROR = ERROR_; +#endif typedef VOID VOID_; #undef VOID diff --git a/libs/EXTERNAL/capnproto/c++/valgrind.supp b/libs/EXTERNAL/capnproto/c++/valgrind.supp new file mode 100644 index 00000000000..e4a0e140e09 --- /dev/null +++ b/libs/EXTERNAL/capnproto/c++/valgrind.supp @@ -0,0 +1,11 @@ +{ + + Memcheck:Addr8 + fun:check_free + fun:free_key_mem + fun:__dlerror_main_freeres + fun:__libc_freeres + fun:_vgnU_freeres + fun:_ZN2kj22TopLevelProcessContext4exitEv +} + diff --git a/libs/EXTERNAL/capnproto/doc/README.md b/libs/EXTERNAL/capnproto/doc/README.md index ad3eea43b3b..96608b04acc 100644 --- a/libs/EXTERNAL/capnproto/doc/README.md +++ b/libs/EXTERNAL/capnproto/doc/README.md @@ -7,9 +7,10 @@ Start by installing ruby1.9.1-dev. On Debian-based operating systems: sudo apt-get install ruby-dev -Then install Jekyll: +Then install Jekyll 3.8.1 (Jekyll 4.x will NOT work due as they removed Pygments support): - sudo gem install jekyll pygments.rb + sudo gem install jekyll -v 3.8.1 + sudo gem install pygments.rb Now install Pygments and SetupTools to be able to install the CapnProto lexer. On Debian based operating systems: @@ -24,7 +25,7 @@ Next, install the custom Pygments syntax highlighter: Now you can launch a local server: - jekyll serve --watch + jekyll _3.8.1_ serve --watch Edit, test, commit. diff --git a/libs/EXTERNAL/capnproto/doc/_posts/2013-12-12-capnproto-0.4-time-travel.md b/libs/EXTERNAL/capnproto/doc/_posts/2013-12-12-capnproto-0.4-time-travel.md index a895a9c5422..42f4bfef21c 100644 --- a/libs/EXTERNAL/capnproto/doc/_posts/2013-12-12-capnproto-0.4-time-travel.md +++ b/libs/EXTERNAL/capnproto/doc/_posts/2013-12-12-capnproto-0.4-time-travel.md @@ -36,14 +36,14 @@ is just talking about implementing a promise API in C++. Pipelining is another that. Please [see the RPC page]({{ site.baseurl }}rpc.html) if you want to know more about pipelining._ -If you do a lot of serious Javascript programming, you've probably heard of +If you do a lot of serious JavaScript programming, you've probably heard of [Promises/A+](http://promisesaplus.com/) and similar proposals. Cap'n Proto RPC introduces a similar construct in C++. In fact, the API is nearly identical, and its semantics are nearly identical. Compare with -[Domenic Denicola's Javascript example](http://domenic.me/2012/10/14/youre-missing-the-point-of-promises/): +[Domenic Denicola's JavaScript example](http://domenic.me/2012/10/14/youre-missing-the-point-of-promises/): {% highlight c++ %} -// C++ version of Domenic's Javascript promises example. +// C++ version of Domenic's JavaScript promises example. getTweetsFor("domenic") // returns a promise .then([](vector tweets) { auto shortUrls = parseTweetsForUrls(tweets); @@ -63,10 +63,10 @@ getTweetsFor("domenic") // returns a promise {% endhighlight %} This is C++, but it is no more lines -- nor otherwise more complex -- than the equivalent -Javascript. We're doing several I/O operations, we're doing them asynchronously, and we don't +JavaScript. We're doing several I/O operations, we're doing them asynchronously, and we don't have a huge unreadable mess of callback functions. Promises are based on event loop concurrency, which means you can perform concurrent operations with shared state without worrying about mutex -locking -- i.e., the Javascript model. (Of course, if you really want threads, you can run +locking -- i.e., the JavaScript model. (Of course, if you really want threads, you can run multiple event loops in multiple threads and make inter-thread RPC calls between them.) [More on C++ promises.]({{ site.baseurl }}cxxrpc.html#kj_concurrency_framework) diff --git a/libs/EXTERNAL/capnproto/doc/_posts/2014-03-11-capnproto-0.4.1-bugfixes.md b/libs/EXTERNAL/capnproto/doc/_posts/2014-03-11-capnproto-0.4.1-bugfixes.md index 31e73013ce6..777bf224553 100644 --- a/libs/EXTERNAL/capnproto/doc/_posts/2014-03-11-capnproto-0.4.1-bugfixes.md +++ b/libs/EXTERNAL/capnproto/doc/_posts/2014-03-11-capnproto-0.4.1-bugfixes.md @@ -25,6 +25,6 @@ In the meantime, though, there have been some major updates from the community: C++ and Python), and the second language to implement it from the ground up (Python just wraps the C++ implementation). Check out author [David Renshaw](https://github.com/dwrensha)'s [talk at Mozilla](https://air.mozilla.org/rust-meetup-february-2014/). - * A [Javascript port](https://github.com/jscheid/capnproto-js) has appeared, but it needs help + * A [JavaScript port](https://github.com/jscheid/capnproto-js) has appeared, but it needs help to keep going! diff --git a/libs/EXTERNAL/capnproto/doc/_posts/2014-06-17-capnproto-flatbuffers-sbe.md b/libs/EXTERNAL/capnproto/doc/_posts/2014-06-17-capnproto-flatbuffers-sbe.md index 746ec217171..68ce6fb13cb 100644 --- a/libs/EXTERNAL/capnproto/doc/_posts/2014-06-17-capnproto-flatbuffers-sbe.md +++ b/libs/EXTERNAL/capnproto/doc/_posts/2014-06-17-capnproto-flatbuffers-sbe.md @@ -98,7 +98,7 @@ The down side of reflection is that it is generally very slow (compared to gener When building a message, depending on how your code is organized, it may be convenient to have flexibility in the order in which you fill in the data. If that flexibility is missing, you may find you have to do extra bookkeeping to store data off to the side until its time comes to be added to the message. -Protocol Buffers is natually completely flexible in terms of initialization order because the mesasge is being built on the heap. There is no reason to impose restrictions. (Although, the C++ Protobuf library heavily encourages top-down building.) +Protocol Buffers is naturally completely flexible in terms of initialization order because the message is being built on the heap. There is no reason to impose restrictions. (Although, the C++ Protobuf library heavily encourages top-down building.) All the zero-copy systems, though, have to use some form of arena allocation to make sure that the message is built in a contiguous block of memory that can be written out all at once. So, things get more complicated. diff --git a/libs/EXTERNAL/capnproto/doc/_posts/2015-03-02-security-advisory-and-integer-overflow-protection.md b/libs/EXTERNAL/capnproto/doc/_posts/2015-03-02-security-advisory-and-integer-overflow-protection.md index 3b7a3c6470c..1834524d17b 100644 --- a/libs/EXTERNAL/capnproto/doc/_posts/2015-03-02-security-advisory-and-integer-overflow-protection.md +++ b/libs/EXTERNAL/capnproto/doc/_posts/2015-03-02-security-advisory-and-integer-overflow-protection.md @@ -50,7 +50,7 @@ In the past, C and C++ code has been plagued by buffer overrun bugs, but these d But developing a similar sense for integer overflow is hard. We do arithmetic in code all the time, and the vast majority of it isn't an issue. The few places where overflow can happen all too easily go unnoticed. -And by the way, integer overflow affects many memory-safe languages too! Java and C# don't protect against overflow. Python does, using slow arbitrary-precision integers. Javascript doesn't use integers, and is instead succeptible to loss-of-precision bugs, which can have similar (but more subtle) consequences. +And by the way, integer overflow affects many memory-safe languages too! Java and C# don't protect against overflow. Python does, using slow arbitrary-precision integers. JavaScript doesn't use integers, and is instead succeptible to loss-of-precision bugs, which can have similar (but more subtle) consequences. While writing Cap'n Proto, I made sure to think carefully about overflow and managed to correct for it most of the time. On learning that I missed a case, I immediately feared that I might have missed many more, and wondered how I might go about systematically finding them. diff --git a/libs/EXTERNAL/capnproto/doc/_posts/2020-04-23-capnproto-0.8.md b/libs/EXTERNAL/capnproto/doc/_posts/2020-04-23-capnproto-0.8.md new file mode 100644 index 00000000000..3d1bd2e7907 --- /dev/null +++ b/libs/EXTERNAL/capnproto/doc/_posts/2020-04-23-capnproto-0.8.md @@ -0,0 +1,115 @@ +--- +layout: post +title: "Cap'n Proto 0.8: Streaming flow control, HTTP-over-RPC, fibers, etc." +author: kentonv +--- + + + +Today I'm releasing Cap'n Proto 0.8. + +### What's new? + +* [Multi-stream Flow Control](#multi-stream-flow-control) +* [HTTP-over-Cap'n-Proto](#http-over-capn-proto) +* [KJ improvements](#kj-improvements) +* Lots and lots of minor tweaks and fixes. + +#### Multi-stream Flow Control + +It is commonly believed, wrongly, that Cap'n Proto doesn't support "streaming", in the way that gRPC does. In fact, Cap'n Proto's object-capability model and promise pipelining make it much more expressive than gRPC. In Cap'n Proto, "streaming" is just a pattern, not a built-in feature. + +Streaming is accomplished by introducing a temporary RPC object as part of a call. Each streamed message becomes a call to the temporary object. Think of this like providing a callback function in an object-oriented language. + +For instance, server -> client streaming ("returning multiple responses") can look like this: + +{% highlight capnp %} +# NOT NEW: Server -> client streaming example. +interface MyInterface { + streamingCall @0 (callback :Callback) -> (); + + interface Callback { + sendChunk @0 (chunk :Data) -> (); + } +} +{% endhighlight %} + +Or for client -> server streaming, the server returns a callback: + +{% highlight capnp %} +# NOT NEW: Client -> Server streaming example. +interface MyInterface { + streamingCall @0 () -> (callback :Callback); + + interface Callback { + sendChunk @0 (chunk :Data) -> (); + } +} +{% endhighlight %} + +Note that the client -> server example relies on [promise pipelining](https://capnproto.org/rpc.html#time-travel-promise-pipelining): When the client invokes `streamingCall()`, it does NOT have to wait for the server to respond before it starts making calls to the `callback`. Using promise pipelining (which has been a built-in feature of Cap'n Proto RPC since its first release in 2013), the client sends messages to the server that say: "Once my call to `streamingCall()` call is finished, take the returned callback and call this on it." + +Obviously, you can also combine the two examples to create bidirectional streams. You can also introduce "callback" objects that have multiple methods, methods that themselves return values (maybe even further streaming callbacks!), etc. You can send and receive multiple new RPC objects in a single call. Etc. + +But there has been one problem that arises in the context of streaming specifically: flow control. Historically, if an app wanted to stream faster than the underlying network connection would allow, then it could end up queuing messages in memory. Worse, if other RPC calls were happening on the same connection concurrently, they could end up blocked behind these queued streaming calls. + +In order to avoid such problems, apps needed to implement some sort of flow control strategy. An easy strategy was to wait for each `sendChunk()` call to return before starting the next call, but this would incur an unnecessary network round trip for each chunk. A better strategy was for apps to allow multiple concurrent calls, but only up to some limit before waiting for in-flight calls to return. For example, an app could limit itself to four in-flight stream calls at a time, or to 64kB worth of chunks. + +This sort of worked, but there were two problems. First, this logic could get pretty complicated, distracting from the app's business logic. Second, the "N-bytes-in-flight-at-a-time" strategy only works well if the value of N is close to the [bandwidth-delay product (BDP)](https://en.wikipedia.org/wiki/Bandwidth-delay_product) of the connection. If N was chosen too low, the connection would be under-utilized. If too high, it would increase queuing latency for all users of the connection. + +Cap'n Proto 0.8 introduces a built-in feature to manage flow control. Now, you can declare your streaming calls like this: + +{% highlight capnp %} +interface MyInterface { + streamingCall @0 (callback :Callback) -> (); + + interface Callback { + # NEW: This streaming call features flow control! + sendChunk @0 (chunk :Data) -> stream; + done @1 (); + } +} +{% endhighlight %} + +Methods declared with `-> stream` behave like methods with empty return types (`-> ()`), but with special behavior when the call is sent over a network connection. Instead of waiting for the remote site to respond to the call, the Cap'n Proto client library will act as if the call has "returned" as soon as it thinks the app should send the next call. So, now the app can use a simple loop that calls `sendChunk()`, waits for it to "complete", then sends the next chunk. Each call will appear to "return immediately" until such a time as Cap'n Proto thinks the connection is fully-utilized, and then each call will block until space frees up. + +When using streaming, it is important that apps be aware that error handling works differently. Since the client side may indicate completion of the call before the call has actually executed on the server, any exceptions thrown on the server side obviously cannot propagate to the client. Instead, we introduce a new rule: If a streaming call ends up throwing an exception, then all later method invocations on the same object (streaming or not) will also throw the same exception. You'll notice that we added a `done()` method to the callback interface above. After completing all streaming calls, the caller _must_ call `done()` to check for errors. If any previous streaming call failed, then `done()` will fail too. + +Under the hood, Cap'n Proto currently implements flow control using a simple hack: it queries the send buffer size of the underlying network socket, and sets that as the "window size" for each stream. The operating system will typically increase the socket buffer as needed to match the TCP congestion window, and Cap'n Proto's streaming window size will increase to match. This is not a very good implementation for a number of reasons. The biggest problem is that it doesn't account for proxying: with Cap'n Proto it is common to pass objects through multiple nodes, which automatically arranges for calls to the object to be proxied though the middlemen. But, the TCP socket buffer size only approximates the BDP of the first hop. A better solution would measure the end-to-end BDP using an algorithm like [BBR](https://queue.acm.org/detail.cfm?id=3022184). Expect future versions of Cap'n Proto to improve on this. + +Note that this new feature does not come with any change to the underlying RPC protocol! The flow control behavior is implemented entirely on the client side. The `-> stream` declaration in the schema is merely a hint to the client that it should use this behavior. Methods declared with `-> stream` are wire-compatible with methods declared with `-> ()`. Currently, flow control is only implemented in the C++ library. RPC implementations in other languages will treat `-> stream` the same as `-> ()` until they add explicit support for it. Apps in those languages will need to continue doing their own flow control in the meantime, as they did before this feature was added. + +#### HTTP-over-Cap'n-Proto + +Cap'n Proto 0.8 defines [a protocol for tunnelling HTTP calls over Cap'n Proto RPC](https://github.com/capnproto/capnproto/blob/master/c++/src/capnp/compat/http-over-capnp.capnp), along with an [adapter library](https://github.com/capnproto/capnproto/blob/master/c++/src/capnp/compat/http-over-capnp.h) adapting it to the [KJ HTTP API](https://github.com/capnproto/capnproto/blob/master/c++/src/kj/compat/http.h). Thus, programs written to send or receive HTTP requests using KJ HTTP can easily be adapted to communicate over Cap'n Proto RPC instead. It's also easy to build a proxy that converts regular HTTP protocol into Cap'n Proto RPC and vice versa. + +In principle, http-over-capnp can achieve similar advantages to HTTP/2: Multiple calls can multiplex over the same connection with arbitrary ordering. But, unlike HTTP/2, calls can be initiated in either direction, can be addressed to multiple virtual endpoints (without relying on URL-based routing), and of course can be multiplexed with non-HTTP Cap'n Proto traffic. + +In practice, however, http-over-capnp is new, and should not be expected to perform as well as mature HTTP/2 implementations today. More work is needed. + +We use http-over-capnp in [Cloudflare Workers](https://workers.cloudflare.com/) to communicate HTTP requests between components of the system, especially into and out of sandboxes. Using this protocol, instead of plain HTTP or HTTP/2, allows us to communicate routing and metadata out-of-band (rather than e.g. stuffing it into private headers). It also allows us to design component APIs using an [object-capability model](http://erights.org/elib/capability/ode/ode-capabilities.html), which turns out to be an excellent choice when code needs to be securely sandboxed. + +Today, our use of this protocol is fairly experimental, but we plan to use it more heavily as the code matures. + +#### KJ improvements + +KJ is the C++ toolkit library developed together with Cap'n Proto's C++ implementation. Ironically, most of the development in the Cap'n Proto repo these days is actually improvements to KJ, in part because it is used heavily in the implementation of [Cloudflare Workers](https://workers.cloudflare.com/). + +* The KJ Promise API now supports fibers. Fibers allow you to execute code in a synchronous style within a thread driven by an asynchronous event loop. The synchronous code runs on an alternate call stack. The code can synchronously wait on a promise, at which point the thread switches back to the main stack and runs the event loop. We generally recommend that new code be written in asynchronous style rather than using fibers, but fibers can be useful in cases where you want to call a synchronous library, and then perform asynchronous tasks in callbacks from said library. [See the pull request for more details.](https://github.com/capnproto/capnproto/pull/913) +* New API `kj::Executor` can be used to communicate directly between event loops on different threads. You can use it to execute an arbitrary lambda on a different thread's event loop. Previously, it was necessary to use some OS construct like a pipe, signal, or eventfd to wake up the receiving thread. +* KJ's mutex API now supports conditional waits, meaning you can unlock a mutex and sleep until such a time as a given lambda function, applied to the mutex's protected state, evaluates to true. +* The KJ HTTP library has continued to be developed actively for its use in [Cloudflare Workers](https://workers.cloudflare.com/). This library now handles millions of requests per second worldwide, both as a client and as a server (since most Workers are proxies), for a wide variety of web sites big and small. + +### Towards 1.0 + +Cap'n Proto has now been around for seven years, with many huge production users (such as Cloudflare). But, we're still on an 0.x release? What gives? + +Well, to be honest, there are still a lot of missing features that I feel like are critical to Cap'n Proto's vision, the most obvious one being three-party handoff. But, so far I just haven't had a real production need to implement those features. Clearly, I should stop waiting for perfection. + +Still, there are a couple smaller things I want to do for an upcoming 1.0 release: + +1. Properly document KJ, independent of Cap'n Proto. KJ has evolved into an extremely useful general-purpose C++ toolkit library. +2. Fix a mistake in the design of KJ's `AsyncOutputStream` interface. The interface currently does not have a method to write EOF; instead, EOF is implied by the destructor. This has proven to be the wrong design. Since fixing it will be a breaking API change for anyone using this interface, I want to do it before declaring 1.0. + +I aim to get these done sometime this summer... diff --git a/libs/EXTERNAL/capnproto/doc/_posts/2021-08-14-capnproto-0.9.md b/libs/EXTERNAL/capnproto/doc/_posts/2021-08-14-capnproto-0.9.md new file mode 100644 index 00000000000..edfb8d2024e --- /dev/null +++ b/libs/EXTERNAL/capnproto/doc/_posts/2021-08-14-capnproto-0.9.md @@ -0,0 +1,14 @@ +--- +layout: post +title: "Cap'n Proto 0.9" +author: kentonv +--- + + + +Today I'm releasing Cap'n Proto 0.9. + +There's no huge new features in this release, but there are many minor improvements and bug fixes. You can [read the PR history](https://github.com/capnproto/capnproto/pulls?q=is%3Apr+is%3Aclosed) to find out what has changed. + +Cap'n Proto development has continued to be primarily driven by the [Cloudflare Workers](https://workers.cloudflare.com/) project (of which I'm the lead engineer). As of the previous release, Cloudflare Workers primarily used the [KJ C++ toolkit](https://github.com/capnproto/capnproto/blob/master/kjdoc/tour.md) that is developed with Cap'n Proto, but made only light use of Cap'n Proto serialization and RPC itself. That has now changed: the implementation of [Durable Objects](https://blog.cloudflare.com/introducing-workers-durable-objects/) makes heavy use of Cap'n Proto RPC for essentially all communication within the system. diff --git a/libs/EXTERNAL/capnproto/doc/capnp-tool.md b/libs/EXTERNAL/capnproto/doc/capnp-tool.md index 88a78ae5658..228dc3d6600 100644 --- a/libs/EXTERNAL/capnproto/doc/capnp-tool.md +++ b/libs/EXTERNAL/capnproto/doc/capnp-tool.md @@ -65,7 +65,7 @@ This prints the value of `myConstant`, a [const](language.html#constants) declar applying variable substitution. It can also output the value in binary format (`--binary` or `--packed`). -At first glance, this may seem no more interesting that `capnp encode`: the syntax used to define +At first glance, this may seem no more interesting than `capnp encode`: the syntax used to define constants in schema files is the same as the format accepted by `capnp encode`, right? There is, however, a big difference: constants in schema files may be defined in terms of other constants, which may even be imported from other files. diff --git a/libs/EXTERNAL/capnproto/doc/cxxrpc.md b/libs/EXTERNAL/capnproto/doc/cxxrpc.md index ccb2f73a6bd..eb25acb8b50 100644 --- a/libs/EXTERNAL/capnproto/doc/cxxrpc.md +++ b/libs/EXTERNAL/capnproto/doc/cxxrpc.md @@ -22,7 +22,7 @@ a fully-functional Cap'n Proto client and server. ## KJ Concurrency Framework RPC naturally requires a notion of concurrency. Unfortunately, -[all concurrency models suck](https://plus.google.com/u/0/+KentonVarda/posts/D95XKtB5DhK). +[all concurrency models suck](https://web.archive.org/web/20170718202612/https://plus.google.com/+KentonVarda/posts/D95XKtB5DhK). Cap'n Proto's RPC is based on the [KJ library](cxx.html#kj-library)'s event-driven concurrency framework. The core of the KJ asynchronous framework (events, promises, callbacks) is defined in @@ -35,8 +35,8 @@ must have its own event loop. KJ discourages fine-grained interaction between t synchronization is expensive and error-prone. Instead, threads are encouraged to communicate through Cap'n Proto RPC. -KJ's event loop model bears a lot of similarity to the Javascript concurrency model. Experienced -Javascript hackers -- especially node.js hackers -- will feel right at home. +KJ's event loop model bears a lot of similarity to the JavaScript concurrency model. Experienced +JavaScript hackers -- especially node.js hackers -- will feel right at home. _As of version 0.4, the only supported way to communicate between threads is over pipes or socketpairs. This will be improved in future versions. For now, just set up an RPC connection @@ -64,7 +64,7 @@ kj::Promise sendEmail(kj::StringPtr address, // the message has been successfully sent. {% endhighlight %} -As you will see, KJ promises are very similar to the evolving Javascript promise standard, and +As you will see, KJ promises are very similar to the evolving JavaScript promise standard, and much of the [wisdom around it](https://www.google.com/search?q=javascript+promises) can be directly applied to KJ promises. diff --git a/libs/EXTERNAL/capnproto/doc/encoding.md b/libs/EXTERNAL/capnproto/doc/encoding.md index 796de7b98ad..5eeba8fa31b 100644 --- a/libs/EXTERNAL/capnproto/doc/encoding.md +++ b/libs/EXTERNAL/capnproto/doc/encoding.md @@ -178,7 +178,9 @@ A list value is encoded as a pointer to a flat array of values. 5 = 8 bytes (non-pointer) 6 = 8 bytes (pointer) 7 = composite (see below) - D (29 bits) = Number of elements in the list, except when C is 7 + D (29 bits) = Size of the list: + when C <> 7: Number of elements in the list. + when C = 7: Number of words in the list, not counting the tag word (see below). The pointed-to values are tightly-packed. In particular, `Bool`s are packed bit-by-bit in @@ -372,7 +374,10 @@ A canonical Cap'n Proto message must adhere to the following rules: * Similarly, for a struct list, if a trailing word in a section of all structs in the list is zero, then it must be truncated from all structs in the list. (All structs in a struct list must have equal sizes, hence a trailing zero can only be removed if it is zero in all elements.) -* Any struct pointer pointing to a zero-sized struct should have an offset of -1. +* Any struct pointer pointing to a zero-sized struct should have an + offset of -1. + * Note that this applies _only_ to structs; other zero-sized values should have offsets + allocated in preorder, as normal. * Canonical messages are not packed. However, packing can still be applied for transmission purposes; the message must simply be unpacked before checking signatures. diff --git a/libs/EXTERNAL/capnproto/doc/feed.xml b/libs/EXTERNAL/capnproto/doc/feed.xml index 3020b0417ec..d13f3edbba9 100644 --- a/libs/EXTERNAL/capnproto/doc/feed.xml +++ b/libs/EXTERNAL/capnproto/doc/feed.xml @@ -14,8 +14,8 @@ layout: none {{ post.title | xml_escape }} {{ post.content | xml_escape }} {{ post.date | date: "%a, %d %b %Y %H:%M:%S %z" }} - {{ site.baseurl }}{{ post.url }} - {{ site.baseurl }}{{ post.url }} + {{ post.url }} + {{ post.url }} {% endfor %} diff --git a/libs/EXTERNAL/capnproto/doc/go/capnp/index.html b/libs/EXTERNAL/capnproto/doc/go/capnp/index.html new file mode 100644 index 00000000000..ba220702449 --- /dev/null +++ b/libs/EXTERNAL/capnproto/doc/go/capnp/index.html @@ -0,0 +1,19 @@ +--- +layout: none +--- + + + + + + + + + + + diff --git a/libs/EXTERNAL/capnproto/doc/index.md b/libs/EXTERNAL/capnproto/doc/index.md index 893d62b6630..6545ebafa83 100644 --- a/libs/EXTERNAL/capnproto/doc/index.md +++ b/libs/EXTERNAL/capnproto/doc/index.md @@ -8,7 +8,7 @@ title: Introduction Cap'n Proto is an insanely fast data interchange format and capability-based RPC system. Think -JSON, except binary. Or think [Protocol Buffers](http://protobuf.googlecode.com), except faster. +JSON, except binary. Or think [Protocol Buffers](https://github.com/protocolbuffers/protobuf), except faster. In fact, in benchmarks, Cap'n Proto is INFINITY TIMES faster than Protocol Buffers. This benchmark is, of course, unfair. It is only measuring the time to encode and decode a message diff --git a/libs/EXTERNAL/capnproto/doc/install.md b/libs/EXTERNAL/capnproto/doc/install.md index 51468fe4746..ca94e4219ca 100644 --- a/libs/EXTERNAL/capnproto/doc/install.md +++ b/libs/EXTERNAL/capnproto/doc/install.md @@ -18,16 +18,16 @@ This package is licensed under the [MIT License](http://opensource.org/licenses/ ### Supported Compilers -Cap'n Proto makes extensive use of C++11 language features. As a result, it requires a relatively +Cap'n Proto makes extensive use of C++14 language features. As a result, it requires a relatively new version of a well-supported compiler. The minimum versions are: -* GCC 4.8 -* Clang 3.5 -* Visual C++ 2015 +* GCC 5.0 +* Clang 5.0 +* Visual C++ 2017 If your system's default compiler is older that the above, you will need to install a newer compiler and set the `CXX` environment variable before trying to build Cap'n Proto. For example, -after installing GCC 4.8, you could set `CXX=g++-4.8` to use this compiler. +after installing GCC 5, you could set `CXX=g++-5` to use this compiler. ### Supported Operating Systems @@ -41,7 +41,7 @@ as well as on Windows. We test every Cap'n Proto release on the following platfo * Windows - MinGW-w64 * Windows - Visual C++ -**Windows users:** Cap'n Proto requires Visual Studio 2015 Update 3 or newer. All features +**Windows users:** Cap'n Proto requires Visual Studio 2017 or newer. All features of Cap'n Proto -- including serialization, dynamic API, RPC, and schema parser -- are now supported. **Mac OS X users:** You should use the latest Xcode with the Xcode command-line @@ -56,9 +56,9 @@ package from [Apple](https://developer.apple.com/downloads/) or compiler builds You may download and install the release version of Cap'n Proto like so: -
curl -O https://capnproto.org/capnproto-c++-0.7.0.tar.gz
-tar zxf capnproto-c++-0.7.0.tar.gz
-cd capnproto-c++-0.7.0
+
curl -O https://capnproto.org/capnproto-c++-0.9.1.tar.gz
+tar zxf capnproto-c++-0.9.1.tar.gz
+cd capnproto-c++-0.9.1
 ./configure
 make -j6 check
 sudo make install
@@ -74,6 +74,7 @@ Some package managers include Cap'n Proto packages. Note: These packages are not maintained by us and are sometimes not up to date with the latest Cap'n Proto release. * Debian / Ubuntu: `apt-get install capnproto` +* Arch Linux: `sudo pacman -S capnproto` * Homebrew (OSX): `brew install capnp` **From Git** @@ -96,28 +97,32 @@ If you download directly from Git, you will need to have the GNU autotools -- 1. Download Cap'n Proto Win32 build: -
https://capnproto.org/capnproto-c++-win32-0.7.0.zip
+
https://capnproto.org/capnproto-c++-win32-0.9.1.zip
-2. Find `capnp.exe`, `capnpc-c++.exe`, and `capnpc-capnp.exe` under `capnproto-tools-win32-0.7.0` in +2. Find `capnp.exe`, `capnpc-c++.exe`, and `capnpc-capnp.exe` under `capnproto-tools-win32-0.9.1` in the zip and copy them somewhere. +3. If your `.capnp` files will import any of the `.capnp` files provided by the core project, or + if you use the `stream` keyword (which implicitly imports `capnp/stream.capnp`), then you need + to put those files somewhere where the capnp compiler can find them. To do this, copy the + directory `capnproto-c++-0.9.1/src` to the location of your choice, then make sure to pass the + flag `-I ` to `capnp` when you run it. + If you don't care about C++ support, you can stop here. The compiler exe can be used with plugins provided by projects implementing Cap'n Proto in other languages. If you want to use Cap'n Proto in C++ with Visual Studio, do the following: -1. Make sure that you are using Visual Studio 2015 or newer, with all updates installed. Cap'n - Proto uses C++11 language features that did not work in previous versions of Visual Studio, +1. Make sure that you are using Visual Studio 2017 or newer, with all updates installed. Cap'n + Proto uses C++14 language features that did not work in previous versions of Visual Studio, and the updates include many bug fixes that Cap'n Proto requires. 2. Install [CMake](http://www.cmake.org/) version 3.1 or later. -3. Use CMake to generate Visual Studio project files under `capnproto-c++-0.7.0` in the zip file. +3. Use CMake to generate Visual Studio project files under `capnproto-c++-0.9.1` in the zip file. You can use the CMake UI for this or run this shell command: - cmake -G "Visual Studio 14 2015" - - (For VS2017, you can use "Visual Studio 15 2017" as the generator name.) + cmake -G "Visual Studio 15 2017" 3. Open the "Cap'n Proto" solution in Visual Studio. diff --git a/libs/EXTERNAL/capnproto/doc/language.md b/libs/EXTERNAL/capnproto/doc/language.md index 82fef91db5c..034e854c460 100644 --- a/libs/EXTERNAL/capnproto/doc/language.md +++ b/libs/EXTERNAL/capnproto/doc/language.md @@ -393,7 +393,7 @@ Cap'n Proto generics work very similarly to Java generics or C++ templates. Some a type is wire-compatible with any specific parameterization, so long as you interpret the `AnyPointer`s as the correct type at runtime. -* Relatedly, it is safe to cast an generic interface of a specific parameterization to a generic +* Relatedly, it is safe to cast a generic interface of a specific parameterization to a generic interface where all parameters are `AnyPointer` and vice versa, as long as the `AnyPointer`s are treated as the correct type at runtime. This means that e.g. you can implement a server in a generic way that is correct for all parameterizations but call it from clients using a specific @@ -403,17 +403,27 @@ Cap'n Proto generics work very similarly to Java generics or C++ templates. Some substituting the type parameters manually. For example, `Map(Text, Person)` is encoded exactly the same as: -
{% highlight capnp %} - struct PersonMap { - # Encoded the same as Map(Text, Person). - entries @0 :List(Entry); - struct Entry { - key @0 :Text; - value @1 :Person; +
struct PersonMap {
+    # Encoded the same as Map(Text, Person).
+    entries @0 :List(Entry);
+    struct Entry {
+      key @0 :Text;
+      value @1 :Person;
     }
-  }
-  {% endhighlight %}
-  
+ }
+ + {% comment %} + Highlighter manually invoked because of: https://github.com/jekyll/jekyll/issues/588 + Original code was: + struct PersonMap { + # Encoded the same as Map(Text, Person). + entries @0 :List(Entry); + struct Entry { + key @0 :Text; + value @1 :Person; + } + } + {% endcomment %} Therefore, it is possible to upgrade non-generic types to generic types while retaining backwards-compatibility. @@ -595,9 +605,9 @@ struct MyType $foo("bar") { } {% endhighlight %} -The possible targets for an annotation are: `file`, `struct`, `field`, `union`, `enum`, `enumerant`, -`interface`, `method`, `parameter`, `annotation`, `const`. You may also specify `*` to cover them -all. +The possible targets for an annotation are: `file`, `struct`, `field`, `union`, `group`, `enum`, +`enumerant`, `interface`, `method`, `parameter`, `annotation`, `const`. +You may also specify `*` to cover them all. {% highlight capnp %} annotation baz(*) :Int32; @@ -733,29 +743,47 @@ without changing the [canonical](encoding.html#canonicalization) encoding of a m be replaced with the new generic parameter so long as all existing users of the type are updated to bind that generic parameter to the type it replaced. For example: -
{% highlight capnp %} - struct Map { - entries @0 :List(Entry); - struct Entry { - key @0 :Text; - value @1 :Text; +
struct Map {
+    entries @0 :List(Entry);
+    struct Entry {
+      key @0 :Text;
+      value @1 :Text;
     }
-  }
-  {% endhighlight %}
-  
+ } + + {% comment %} + Highlighter manually invoked because of: https://github.com/jekyll/jekyll/issues/588 + Original code was: + struct Map { + entries @0 :List(Entry); + struct Entry { + key @0 :Text; + value @1 :Text; + } + } + {% endcomment %} Can change to: -
{% highlight capnp %} - struct Map(Key, Value) { - entries @0 :List(Entry); - struct Entry { - key @0 :Key; - value @1 :Value; +
struct Map(Key, Value) {
+    entries @0 :List(Entry);
+    struct Entry {
+      key @0 :Key;
+      value @1 :Value;
     }
-  }
-  {% endhighlight %}
-  
+ } + + {% comment %} + Highlighter manually invoked because of: https://github.com/jekyll/jekyll/issues/588 + Original code was: + struct Map(Key, Value) { + entries @0 :List(Entry); + struct Entry { + key @0 :Key; + value @1 :Value; + } + } + {% endcomment %} As long as all existing uses of `Map` are replaced with `Map(Text, Text)` (and any uses of `Map.Entry` are replaced with `Map(Text, Text).Entry`). diff --git a/libs/EXTERNAL/capnproto/doc/otherlang.md b/libs/EXTERNAL/capnproto/doc/otherlang.md index c4b45568b7b..e3064927679 100644 --- a/libs/EXTERNAL/capnproto/doc/otherlang.md +++ b/libs/EXTERNAL/capnproto/doc/otherlang.md @@ -14,9 +14,11 @@ project's documentation for details. ##### Serialization + RPC * [C++](cxx.html) by [@kentonv](https://github.com/kentonv) +* [C# (.NET Core)](https://github.com/c80k/capnproto-dotnetcore) by [@c80k](https://github.com/c80k) * [Erlang](http://ecapnp.astekk.se/) by [@kaos](https://github.com/kaos) * [Go](https://github.com/zombiezen/go-capnproto2) by [@zombiezen](https://github.com/zombiezen) (forked from [@glycerine](https://github.com/glycerine)'s serialization-only version, below) -* [Javascript (Node.js only)](https://github.com/capnproto/node-capnp) by [@kentonv](https://github.com/kentonv) +* [Haskell](https://github.com/zenhack/haskell-capnp) by [@zenhack](https://github.com/zenhack) +* [JavaScript (Node.js only)](https://github.com/capnproto/node-capnp) by [@kentonv](https://github.com/kentonv) * [OCaml](https://github.com/capnproto/capnp-ocaml) by [@pelzlpj](https://github.com/pelzlpj) with [RPC](https://github.com/mirage/capnp-rpc) by [@talex5](https://github.com/talex5) * [Python](http://capnproto.github.io/pycapnp/) by [@jparyani](https://github.com/jparyani) * [Rust](https://github.com/dwrensha/capnproto-rust) by [@dwrensha](https://github.com/dwrensha) @@ -26,10 +28,9 @@ project's documentation for details. * [C](https://github.com/opensourcerouting/c-capnproto) by [OpenSourceRouting](https://www.opensourcerouting.org/) / [@eqvinox](https://github.com/eqvinox) (originally by [@jmckaskill](https://github.com/jmckaskill)) * [D](https://github.com/capnproto/capnproto-dlang) by [@ThomasBrixLarsen](https://github.com/ThomasBrixLarsen) * [Go](https://github.com/glycerine/go-capnproto) by [@glycerine](https://github.com/glycerine) (originally by [@jmckaskill](https://github.com/jmckaskill)) -* [Haskell](https://github.com/zenhack/haskell-capnp) by [@zenhack](https://github.com/zenhack) * [Java](https://github.com/capnproto/capnproto-java/) by [@dwrensha](https://github.com/dwrensha) -* [Javascript](https://github.com/capnp-js/capnp-js-base) by [@popham](https://github.com/popham) -* [Javascript](https://github.com/jscheid/capnproto-js) (older, abandoned) by [@jscheid](https://github.com/jscheid) +* [JavaScript](https://github.com/capnp-js/plugin/) by [@popham](https://github.com/popham) +* [JavaScript](https://github.com/jscheid/capnproto-js) (older, abandoned) by [@jscheid](https://github.com/jscheid) * [Lua](https://github.com/cloudflare/lua-capnproto) by [CloudFlare](http://www.cloudflare.com/) / [@calio](https://github.com/calio) * [Nim](https://github.com/zielmicha/capnp.nim) by [@zielmicha](https://github.com/zielmicha) * [Ruby](https://github.com/cstrahan/capnp-ruby) by [@cstrahan](https://github.com/cstrahan) @@ -43,8 +44,7 @@ new languages. * [Common Test Framework](https://github.com/kaos/capnp_test) by [@kaos](https://github.com/kaos) * [Sublime Syntax Highlighting](https://github.com/joshuawarner32/capnproto-sublime) by [@joshuawarner32](https://github.com/joshuawarner32) -* [Vim Syntax Highlighting](https://github.com/peter-edge/vim-capnp) by [@peter-edge](https://github.com/peter-edge) - (originally by [@cstrahan](https://github.com/cstrahan)) +* [Vim Syntax Highlighting](https://github.com/cstrahan/vim-capnp) by [@cstrahan](https://github.com/cstrahan) * [Wireshark Dissector Plugin](https://github.com/kaos/wireshark-plugins) by [@kaos](https://github.com/kaos) * [VS Code Syntax Highlighter](https://marketplace.visualstudio.com/items?itemName=xmonader.vscode-capnp) by [@xmonader](https://github.com/xmonader) * [IntelliJ Syntax Highlighter](https://github.com/xmonader/sercapnp) by [@xmonader](https://github.com/xmonader) diff --git a/libs/EXTERNAL/capnproto/doc/push-site.sh b/libs/EXTERNAL/capnproto/doc/push-site.sh index 83cfad2bd7e..958c1013dd8 100755 --- a/libs/EXTERNAL/capnproto/doc/push-site.sh +++ b/libs/EXTERNAL/capnproto/doc/push-site.sh @@ -44,7 +44,7 @@ echo "Regenerating site..." rm -rf _site _site.tar.gz -jekyll build --safe $FUTURE --config $CONFIG +jekyll _3.8.1_ build --safe $FUTURE --config $CONFIG echo -n "Push now? (y/N)" read -n 1 YESNO @@ -52,7 +52,7 @@ echo if [ "x$YESNO" == "xy" ]; then echo "Pushing..." - tar cz --xform='s,_site/,,' _site/* | gce-ss ssh fe --command "cd /var/www/capnproto.org$PREFIX && tar xz" + tar cz --xform='s,_site/,,' _site/* | gce-ss ssh alpha2 --command "cd /var/www/capnproto.org$PREFIX && tar xz" else echo "Push CANCELED" fi diff --git a/libs/EXTERNAL/capnproto/doc/rpc.md b/libs/EXTERNAL/capnproto/doc/rpc.md index bb8e2d881e3..1c6dcd81fb4 100644 --- a/libs/EXTERNAL/capnproto/doc/rpc.md +++ b/libs/EXTERNAL/capnproto/doc/rpc.md @@ -30,7 +30,7 @@ bar() on the result of the first call". These messages can be sent together -- to wait for the first call to actually return. To make programming to this model easy, in your code, each call returns a "promise". Promises -work much like Javascript promises or promises/futures in other languages: the promise is returned +work much like JavaScript promises or promises/futures in other languages: the promise is returned immediately, but you must later call `wait()` on it, or call `then()` to register an asynchronous callback. diff --git a/libs/EXTERNAL/capnproto/highlighting/emacs/README.md b/libs/EXTERNAL/capnproto/highlighting/emacs/README.md index ddfc29ff4fe..1eeab9eaf17 100644 --- a/libs/EXTERNAL/capnproto/highlighting/emacs/README.md +++ b/libs/EXTERNAL/capnproto/highlighting/emacs/README.md @@ -9,5 +9,4 @@ capnproto directory lives): ```elisp (add-to-list 'load-path "~/src/capnproto/highlighting/emacs") (require 'capnp-mode) -(add-to-list 'auto-mode-alist '("\\.capnp\\'" . capnp-mode)) ``` diff --git a/libs/EXTERNAL/capnproto/highlighting/emacs/capnp-mode.el b/libs/EXTERNAL/capnproto/highlighting/emacs/capnp-mode.el index 688bf3850d0..2681ca80a85 100644 --- a/libs/EXTERNAL/capnproto/highlighting/emacs/capnp-mode.el +++ b/libs/EXTERNAL/capnproto/highlighting/emacs/capnp-mode.el @@ -1,9 +1,10 @@ -;;; capnp-mode.el --- major mode for editing Capn' Proto Files +;;; capnp-mode.el --- Major mode for editing Capn' Proto Files ;; This is free and unencumbered software released into the public domain. ;; Author: Brian Taylor ;; Version: 1.0.0 +;; URL: https://github.com/capnproto/capnproto ;;; Commentary: @@ -15,7 +16,6 @@ ;; ;; (add-to-list 'load-path "~/src/capnproto/highlighting/emacs") ;; (require 'capnp-mode) -;; (add-to-list 'auto-mode-alist '("\\.capnp\\'" . capnp-mode)) ;; ;;; Code: @@ -23,12 +23,11 @@ ;; command to comment/uncomment text (defun capnp-comment-dwim (arg) "Comment or uncomment current line or region in a smart way. -For detail, see `comment-dwim'." +For detail, see `comment-dwim' for ARG explanation." (interactive "*P") (require 'newcomment) (let ( - (comment-start "#") (comment-end "") - ) + (comment-start "#") (comment-end "")) (comment-dwim arg))) (defvar capnp--syntax-table @@ -72,4 +71,9 @@ For detail, see `comment-dwim'." (setq mode-name "capnp") (define-key capnp-mode-map [remap comment-dwim] 'capnp-comment-dwim)) +;;;###autoload +(add-to-list 'auto-mode-alist '("\\.capnp\\'" . capnp-mode)) + (provide 'capnp-mode) +;;; capnp-mode.el ends here + diff --git a/libs/EXTERNAL/capnproto/kjdoc/index.md b/libs/EXTERNAL/capnproto/kjdoc/index.md new file mode 100644 index 00000000000..96f0ac4f2b7 --- /dev/null +++ b/libs/EXTERNAL/capnproto/kjdoc/index.md @@ -0,0 +1,67 @@ +# Introducing KJ + +KJ is Modern C++'s missing base library. + +## What's wrong with `std`? + +The C++ language has advanced rapidly over the last decade. However, its standard library (`std`) remains a weak point. Most modern languages ship with libraries that have built-in support for common needs, such as making HTTP requests. `std`, meanwhile, not only lacks HTTP, but doesn't even support basic networking. Developers are forced either to depend on low-level, non-portable OS APIs, or pull in a bunch of third-party dependencies with inconsistent styles and quality. + +Worse, `std` was largely designed before C++ best practices were established. Much of it predates C++11, which changed almost everything about how C++ is written. Some critical parts of `std` -- such as the `iostreams` component -- were designed before anyone really knew how to write quality object-oriented code, and are atrociously bad by modern standards. + +Finally, `std` is designed by committee, which has advantages and disadvantages. On one hand, committees are less likely to make major errors in design. However, they also struggle to make bold decisions, and they move slowly. Committees can also lose touch with real-world concerns, over-engineering features that aren't needed while missing essential basics. + +## How is KJ different? + +KJ was designed and implemented primarily by one developer, Kenton Varda. Every feature was designed to solve a real-world need in a project Kenton was working on -- first [Cap'n Proto](https://capnproto.org), then [Sandstorm](https://sandstorm.io), and more recently, [Cloudflare Workers](https://workers.dev). KJ was designed from the beginning to target exclusively Modern C++ (C++11 and later). + +Since its humble beginnings in 2013, KJ has developed a huge range of practical functionality, including: + +* RAII utilities, especially for memory management +* Basic types and data structures: `Array`, `Maybe`, `OneOf`, `Tuple`, `Function`, `Quantity` (unit analysis), `String`, `Vector`, `HashMap`, `HashSet`, `TreeMap`, `TreeSet`, etc. +* Convenient stringification +* Exception/assertion framework with friggin' stack traces +* Event loop framework with `Promise` API inspired by E (which also inspired JavaScript's `Promise`). +* Threads, fibers, mutexes, lazy initialization +* I/O: Clocks, filesystem, networking +* Protocols: HTTP (client and server), TLS (via OpenSSL/BoringSSL), gzip (via libz) +* Parsers: URL, JSON (using Cap'n Proto), parser combinator framework +* Encodings: UTF-8/16/32, base64, hex, URL encoding, C escapes +* Command-line argument parsing +* Unit testing framework +* And more! + +KJ is not always perfectly organized, and admittedly has some quirks. But, it has proven pragmatic and powerful in real-world applications. + +# Getting KJ + +KJ is bundled with Cap'n Proto -- see [installing Cap'n Proto](https://capnproto.org/install.html). KJ is built as a separate set of libraries, so that you can link against it without Cap'n Proto if desired. + +KJ is officially tested on Linux (GCC and Clang), Windows (Visual Studio, MinGW, and Cygwin), MacOS, and Android. It should additionally be easy to get working on any POSIX platform targeted by GCC or Clang. + +# FAQ + +## What does KJ stand for? + +Nothing. + +The name "KJ" was chosen to be a relatively unusual combination of two letters that is easy to type (on both Qwerty and Dvorak layouts). This is important, because users of KJ will find themselves typing `kj::` very frequently. + +## Why reinvent modern `std` features that are well-designed? + +Some features of KJ appear to replace `std` features that were introduced recently with decent, modern designs. Examples include `kj::Own` vs `std::unique_ptr`, `kj::Maybe` vs `std::optional`, and `kj::Promise` vs `std::task`. + +First, in many cases, the KJ feature actually predates the corresponding `std` feature. `kj::Maybe` was one of the first KJ types, introduced in 2013; `std::optional` arrived in C++17. `kj::Promise` was also introduced in 2013; `std::task` is coming in C++20 (with coroutines). + +Second, consistency. KJ uses somewhat different idioms from `std`, resulting in some friction when trying to use KJ and `std` types together. The most obvious friction is aesthetic (e.g. naming conventions), but some deeper issues exist. For example, KJ tries to treat `const` as transitive, especially so that it can be used to help enforce thread-safety. This can lead to subtle problems (e.g. unexpected compiler errors) with `std` containers not designed with transitive constness in mind. KJ also uses a very different [philosophy around exceptions](../style-guide.md#exceptions) compared to `std`; KJ believes exception-free code is a myth, but `std` sometimes requires it. + +Third, even some modern `std` APIs have design flaws. For example, `std::optional`s can be dereferenced without an explicit null check, resulting in a crash if the value is null -- exactly what this type should have existed to prevent! `kj::Maybe`, in contrast, forces you to write an if/else block or an explicit assertion. For another example, `kj::Own` uses dynamic dispatch for deleters, which allows for lots of useful patterns that `std::unique_ptr`'s static dispatch cannot do. + +## Shouldn't modern software be moving away from memory-unsafe languages? + +Probably! + +Similarly, modern software should also move away from type-unsafe languages. Type-unsafety and memory-unsafety are both responsible for a huge number of security bugs. (Think SQL injection for an example of a security bug resulting from type-unsafety.) + +Hence, all other things being equal, I would suggest Rust for new projects. + +But it's rare that all other things are really equal, and you may have your reasons for using C++. KJ is here to help, not to judge. diff --git a/libs/EXTERNAL/capnproto/kjdoc/style-guide.md b/libs/EXTERNAL/capnproto/kjdoc/style-guide.md new file mode 120000 index 00000000000..4b6893e8c0f --- /dev/null +++ b/libs/EXTERNAL/capnproto/kjdoc/style-guide.md @@ -0,0 +1 @@ +../style-guide.md \ No newline at end of file diff --git a/libs/EXTERNAL/capnproto/kjdoc/tour.md b/libs/EXTERNAL/capnproto/kjdoc/tour.md new file mode 100644 index 00000000000..06d9be08621 --- /dev/null +++ b/libs/EXTERNAL/capnproto/kjdoc/tour.md @@ -0,0 +1,1022 @@ +--- +title: A tour of KJ +--- + +This page is a tour through the functionality provided by KJ. It is intended for developers new to KJ who want to learn the ropes. + +**This page is not an API reference.** KJ's reference documentation is provided by comments in the headers themselves. Keeping reference docs in the headers makes it easy to find them using your editor's "jump to declaration" hotkey. It also ensures that the documentation is never out-of-sync with the version of KJ you are using. + +Core Programming +====================================================================== + +This section covers core KJ features used throughout nearly all KJ-based code. + +Every KJ developer should familiarize themselves at least with this section. + +## Core Utility Functions + +### Argument-passing: move, copy, forward + +`kj::mv` has exactly the same semantics as `std::move`, but takes fewer keystrokes to type. Since this is used extraordinarily often, saving a few keystrokes really makes a legitimate difference. If you aren't familiar with `std::move`, I recommend reading up on [C++11 move semantics](https://stackoverflow.com/questions/3106110/what-is-move-semantics). + +`kj::cp` is invoked in a similar way to `kj::mv`, but explicitly invokes the copy constructor of its argument, returning said copy. This is occasionally useful when invoking a function that wants an rvalue reference as a parameter, which normally requires pass-by-move, but you really want to pass it a copy. + +`kj::fwd`, is equivalent to `std::forward`. It is used to implement [perfect forwarding](https://en.cppreference.com/w/cpp/utility/forward), that is, forwarding arbitrary arguments from a template function into some other function without understanding their types. + +### Deferring code to scope exit + +This macro declares some code which must execute when exiting the current scope (whether normally or by exception). It is essentially a shortcut for declaring a class with a destructor containing said code, and instantiating that destructor. Example: + +```c++ +void processFile() { + int fd = open("file.txt", O_RDONLY); + KJ_ASSERT(fd >= 0); + + // Make sure file is closed on return. + KJ_DEFER(close(fd)); + + // ... do something with the file ... +} +``` + +You can also pass a multi-line block (in curly braces) as the argument to `KJ_DEFER`. + +There is also a non-macro version, `kj::defer`, which takes a lambda as its argument, and returns an object that invokes that lambda on destruction. The returned object has move semantics. This is convenient when the scope of the deferral isn't necessarily exactly function scope, such as when capturing context in a callback. Example: + +```c++ +kj::Function processFile() { + int fd = open("file.txt", O_RDONLY); + KJ_ASSERT(fd >= 0); + + // Make sure file is closed when the returned function + // is eventually destroyed. + auto deferredClose = kj::defer([fd]() { close(fd); }); + + return [fd, deferredClose = kj::mv(deferredClose)] + (int arg) { + // ... do something with fd and arg ... + } +} +``` + +Sometimes, you want a deferred action to occur only when the scope exits normally via `return`, or only when it exits due to an exception. For those purposes, `KJ_ON_SCOPE_SUCCESS` and `KJ_ON_SCOPE_FAILURE` may be used, with the same syntax as `KJ_DEFER`. + +### Size and range helpers + +`kj::size()` accepts a built-in array or a container as an argument, and returns the number of elements. In the case of a container, the container must implement a `.size()` method. The idea is that you can use this to find out how many iterations a range-based `for` loop on that container would execute. That said, in practice `kj::size` is most commonly used with arrays, as a shortcut for something like `sizeof(array) / sizeof(array[0])`. + +```c++ +int arr[15]; +KJ_ASSERT(kj::size(arr) == 15); +``` + +`kj::range(i, j)` returns an iterable that contains all integers from `i` to `j` (including `i`, but not including `j`). This is typically used in `for` loops: + +```c++ +for (auto i: kj::range(5, 10)) { + KJ_ASSERT(i >= 5 && i < 10); +} +``` + +In the very-common case of iterating from zero, `kj::zeroTo(i)` should be used instead of `kj::range(0, i)`, in order to avoid ambiguity about what type of integer should be generated. + +`kj::indices(container)` is equivalent to `kj::zeroTo(kj::size(container))`. This is extremely convenient when iterating over parallel arrays. + +```c++ +KJ_ASSERT(foo.size() == bar.size()); +for (auto i: kj::indices(foo)) { + foo[i] = bar[i]; +} +``` + +`kj::repeat(value, n)` returns an iterable that acts like an array of size `n` where every element is `value`. This is not often used, but can be convenient for string formatting as well as generating test data. + +### Casting helpers + +`kj::implicitCast(value)` is equivalent to `static_cast(value)`, but will generate a compiler error if `value` cannot be implicitly cast to `T`. For example, `static_cast` can be used for both upcasts (derived type to base type) and downcasts (base type to derived type), but `implicitCast` can only be used for the former. + +`kj::downcast(value)` is equivalent to `static_cast(value)`, except that when compiled in debug mode with RTTI available, a runtime check (`dynamic_cast`) will be performed to verify that `value` really has type `T`. Use this in cases where you are casting a base type to a derived type, and you are confident that the object is actually an instance of the derived type. The debug-mode check will help you catch bugs. + +`kj::dynamicDowncastIfAvailable(value)` is like `dynamic_cast(value)` with two differences. First, it returns `kj::Maybe` instead of `T*`. Second, if the program is compiled without RTTI enabled, the function always returns null. This function is intended to be used to implement optimizations, where the code can do something smarter if `value` happens to be of some specific type -- but if RTTI is not available, it is safe to skip the optimization. See [KJ idiomatic use of dynamic_cast](style-guide.md#dynamic_cast) for more background. + +### Min/max, numeric limits, and special floats + +`kj::min()` and `kj::max()` return the minimum and maximum of the input arguments, automatically choosing the appropriate return type even if the inputs are of different types. + +`kj::minValue` and `kj::maxValue` are special constants that, when cast to an integer type, become the minimum or maximum value of the respective type. For example: + +```c++ +int16_t i = kj::maxValue; +KJ_ASSERT(i == 32767); +``` + +`kj::inf()` evaluates to floating-point infinity, while `kj::nan()` evaluates to floating-point NaN. `kj::isNaN()` returns true if its argument is NaN. + +### Explicit construction and destruction + +`kj::ctor()` and `kj::dtor()` explicitly invoke a constructor or destructor in a way that is readable and convenient. The first argument is a reference to memory where the object should live. + +These functions should almost never be used in high-level code. They are intended for use in custom memory management, or occasionally with unions that contain non-trivial types (but consider using `kj::OneOf` instead). You must understand C++ memory aliasing rules to use these correctly. + +## Ownership and memory management + +KJ style makes heavy use of [RAII](style-guide.md#raii-resource-acquisition-is-initialization). KJ-based code should never use `new` and `delete` directly. Instead, use the utilities in this section to manage memory in a RAII way. + +### Owned pointers, heap allocation, and disposers + +`kj::Own` is a pointer to a value of type `T` which is "owned" by the holder. When a `kj::Own` goes out-of-scope, the value it points to will (typically) be destroyed and freed. + +`kj::Own` has move semantics. Thus, when used as a function parameter or return type, `kj::Own` indicates that ownership of the object is being transferred. + +`kj::heap(args...)` allocates an object of type `T` on the heap, passing `args...` to its constructor, and returns a `kj::Own`. This is the most common way to create owned objects. + +However, a `kj::Own` does not necessarily refer to a heap object. A `kj::Own` is actually implemented as a pair of a pointer to the object, and a pointer to a `kj::Disposer` object that knows how to destroy it; `kj::Own`'s destructor invokes the disposer. `kj::Disposer` is an abstract interface with many implementations. `kj::heap` uses an implementation that invokes the object's destructor then frees its underlying space from the heap (like `delete` does), but other implementations exist. Alternative disposers allow an application to control memory allocation more precisely when desired. + +Some example uses of disposers include: + +* `kj::fakeOwn(ref)` returns a `kj::Own` that points to `ref` but doesn't actually destroy it. This is useful when you know for sure that `ref` will outlive the scope of the `kj::Own`, and therefore heap allocation is unnecessary. This is common in cases where, for example, the `kj::Own` is being passed into an object which itself will be destroyed before `ref` becomes invalid. It also makes sense when `ref` is actually a static value or global that lives forever. +* `kj::refcounted(args...)` allocates a `T` which uses reference counting. It returns a `kj::Own` that represents one reference to the object. Additional references can be created by calling `kj::addRef(*ptr)`. The object is destroyed when no more `kj::Own`s exist pointing at it. Note that `T` must be a subclass of `kj::Refcounted`. If references may be shared across threads, then atomic refcounting must be used; use `kj::atomicRefcounted(args...)` and inherit `kj::AtomicRefcounted`. Reference counting should be using sparingly; see [KJ idioms around reference counting](style-guide.md#reference-counting) for a discussion of when it should be used and why it is designed the way it is. +* `kj::attachRef(ref, args...)` returns a `kj::Own` pointing to `ref` that actually owns `args...`, so that when the `kj::Own` goes out-of-scope, the other arguments are destroyed. Typically these arguments are themselves `kj::Own`s or other pass-by-move values that themselves own the object referenced by `ref`. `kj::attachVal(value, args...)` is similar, where `value` is a pass-by-move value rather than a reference; a copy of it will be allocated on the heap. Finally, `ownPtr.attach(args...)` returns a new `kj::Own` pointing to the same value that `ownPtr` pointed to, but such that `args...` are owned as well and will be destroyed together. Attachments are always destroyed after the thing they are attached to. +* `kj::SpaceFor` contains enough space for a value of type `T`, but does not construct the value until its `construct(args...)` method is called. That method returns an `kj::Own`, whose disposer destroys the value. `kj::SpaceFor` is thus a safer way to perform manual construction compared to invoking `kj::ctor()` and `kj::dtor()`. + +These disposers cover most use cases, but you can also implement your own if desired. `kj::Own` features a constructor overload that lets you pass an arbitrary disposer. + +### Arrays + +`kj::Array` is similar to `kj::Own`, but points to (and owns) an array of `T`s. + +A `kj::Array` can be allocated with `kj::heapArray(size)`, if `T` can be default-constructed. Otherwise, you will need to use a `kj::ArrayBuilder` to build the array. First call `kj::heapArrayBuilder(size)`, then invoke the builder's `add(value)` method to add each element, then finally call its `finish()` method to obtain the completed `kj::Array`. `ArrayBuilder` requires that you know the final size before you start; if you don't, you may want to use `kj::Vector` instead. + +Passing a `kj::Array` implies an ownership transfer. If you merely want to pass a pointer to an array, without transferring ownership, use `kj::ArrayPtr`. This type essentially encapsulates a pointer to the beginning of the array, plus its size. Note that a `kj::ArrayPtr` points to _the underlying memory_ backing a `kj::Array`, not to the `kj::Array` itself; thus, moving a `kj::Array` does NOT invalidate any `kj::ArrayPtr`s already pointing at it. You can also construct a `kj::ArrayPtr` pointing to any C-style array (doesn't have to be a `kj::Array`) using `kj::arrayPtr(ptr, size)` or `kj::arrayPtr(beginPtr, endPtr)`. + +Both `kj::Array` and `kj::ArrayPtr` contain a number of useful methods, like `slice()`. Be sure to check out the class definitions for more details. + +## Strings + +A `kj::String` is a segment of text. By convention, this text is expected to be Unicode encoded in UTF-8. But, `kj::String` itself is not Unicode-aware; it is merely an array of `char`s. + +NUL characters (`'\0'`) are allowed to appear anywhere in a string and do not terminate the string. However, as a convenience, the buffer backing a `kj::String` always has an additional NUL character appended to the end (but not counted in the size). This allows the text in a `kj::String` to be passed to legacy C APIs that use NUL-terminated strings without an extra copy; use the `.cStr()` method to get a `const char*` for such cases. (Of course, keep in mind that if the string contains NUL characters other than at the end, legacy C APIs will interpret the string as truncated at that point.) + +`kj::StringPtr` represents a pointer to a `kj::String`. Similar to `kj::ArrayPtr`, `kj::StringPtr` does not point at the `kj::String` object itself, but at its backing array. Thus, moving a `kj::String` does not invalidate any `kj::StringPtr`s. This is a major difference from `std::string`! Moving an `std::string` invalidates all pointers into its backing buffer (including `std::string_view`s), because `std::string` inlines small strings as an optimization. This optimization may seem clever, but means that `std::string` cannot safely be used as a way to hold and transfer ownership of a text buffer. Doing so can lead to subtle, data-dependent bugs; a program might work fine until someone gives it an unusually small input, at which point it segfaults. `kj::String` foregoes this optimization for simplicity. + +Also similar to `kj::ArrayPtr`, a `kj::StringPtr` does not have to point at a `kj::String`. It can be initialized from a string literal or any C-style NUL-terminated `const char*` without making a copy. Also, KJ defines the special literal suffix `_kj` to write a string literal whose type is implicitly `kj::StringPtr`. + +```c++ +// It's OK to initialize a StringPtr from a classic literal. +// No copy is performed; the StringPtr points directly at +// constant memory. +kj::StringPtr foo = "foo"; + +// But if you add the _kj suffix, then you don't even need +// to declare the type. `bar` will implicitly have type +// kj::StringPtr. Also, this version can be declared +// `constexpr`. +constexpr auto bar = "bar"_kj; +``` + +### Stringification + +To allocate and construct a `kj::String`, use `kj::str(args...)`. Each argument is stringified and the results are concatenated to form the final string. (You can also allocate an uninitialized string buffer with `kj::heapString(size)`.) + +```c++ +kj::String makeGreeting(kj::StringPtr name) { + return kj::str("Hello, ", name, "!"); +} +``` + +KJ knows how to stringify most primitive types as well as many KJ types automatically. Note that integers will be stringified in base 10; if you want hexadecimal, use `kj::hex(i)` as the parameter to `kj::str()`. + +You can additionally extend `kj::str()` to work with your own types by declaring a stringification method using `KJ_STRINGIFY`, like so: + +```c++ +enum MyType { A, B, C }; +kj::StringPtr KJ_STRINGIFY(MyType value) { + switch (value) { + case A: return "A"_kj; + case B: return "B"_kj; + case C: return "C"_kj; + } + KJ_UNREACHABLE; +} +``` + +The `KJ_STRINGIFY` declaration should appear either in the same namespace where the type is defined, or in the global scope. The function can return any random-access iterable sequence of `char`, such as a `kj::String`, `kj::StringPtr`, `kj::ArrayPtr`, etc. As an alternative to `KJ_STRINGIFY`, you can also declare a `toString()` method on your type, with the same return type semantics. + +When constructing very large, complex strings -- for example, when writing a code generator -- consider using `kj::StringTree`, which maintains a tree of strings and only concatenates them at the very end. For example, `kj::strTree(foo, kj::strTree(bar, baz)).flatten()` only performs one concatenation, whereas `kj::str(foo, kj::str(bar, baz))` would perform a redundant intermediate concatenation. + +## Core Utility Types + +### Maybes + +`kj::Maybe` is either `nullptr`, or contains a `T`. In KJ-based code, nullable values should always be expressed using `kj::Maybe`. Primitive pointers should never be null. Use `kj::Maybe` instead of `T*` to express that the pointer/reference can be null. + +In order to dereference a `kj::Maybe`, you must use the `KJ_IF_MAYBE` macro, which behaves like an `if` statement. + +```c++ +kj::Maybe maybeI = 123; +kj::Maybe maybeJ = nullptr; + +KJ_IF_MAYBE(i, maybeI) { + // This block will execute, with `i` being a + // pointer into `maybeI`'s value. In a better world, + // `i` would be a reference rather than a pointer, + // but we couldn't find a way to trick the compiler + // into that. + KJ_ASSERT(*i == 123); +} else { + KJ_FAIL_ASSERT("can't get here"); +} + +KJ_IF_MAYBE(j, maybeJ) { + KJ_FAIL_ASSERT("can't get here"); +} else { + // This block will execute. +} +``` + +Note that `KJ_IF_MAYBE` forces you to think about the null case. This differs from `std::optional`, which can be dereferenced using `*`, resulting in undefined behavior if the value is null. + +Performance nuts will be interested to know that `kj::Maybe` and `kj::Maybe>` are both optimized such that they take no more space than their underlying pointer type, using a literal null pointer to indicate nullness. For other types of `T`, `kj::Maybe` must maintain an extra boolean and so is somewhat larger than `T`. + +### Variant types + +`kj::OneOf` is a variant type that can be assigned to exactly one of the input types. To unpack the variant, use `KJ_SWITCH_ONEOF`: + +```c++ +void handle(kj::OneOf value) { + KJ_SWITCH_ONEOF(value) { + KJ_CASE_ONEOF(i, int) { + // Note that `i` is an lvalue reference to the content + // of the OneOf. This differs from `KJ_IF_MAYBE` where + // the variable is a pointer. + handleInt(i); + } + KJ_CASE_ONEOF(s, kj::String) { + handleString(s); + } + } +} +``` + +Often, in real-world usage, the type of each variant in a `kj::OneOf` is not sufficient to understand its meaning; sometimes two different variants end up having the same type used for different purposes. In these cases, it would be useful to assign a name to each variant. A common way to do this is to define a custom `struct` type for each variant, and then declare the `kj::OneOf` using those: + +```c++ +struct NotStarted { + kj::String filename; +}; +struct Running { + kj::Own file; +}; +struct Done { + kj::String result; +}; + +typedef kj::OneOf State; +``` + +### Functions + +`kj::Function` represents a callable function with the given signature. A `kj::Function` can be initialized from any callable object, such as a lambda, function pointer, or anything with `operator()`. `kj::Function` is useful when you want to write an API that accepts a lambda callback, without defining the API itself as a template. `kj::Function` supports move semantics. + +`kj::ConstFunction` is like `kj::Function`, but is used to indicate that the function should be safe to call from multiple threads. (See [KJ idioms around constness and thread-safety](style-guide.md#constness).) + +A special optimization type, `kj::FunctionParam`, is like `kj::Function` but designed to be used specifically as the type of a callback parameter to some other function where that callback is only called synchronously; i.e., the callback won't be called anymore after the outer function returns. Unlike `kj::Function`, a `kj::FunctionParam` can be constructed entirely on the stack, with no heap allocation. + +### Vectors (growable arrays) + +Like `std::vector`, `kj::Vector` is an array that supports appending an element in amortized O(1) time. When the underlying backing array is full, an array of twice the size is allocated and all elements moved. + +### Hash/tree maps/sets... and tables + +`kj::HashMap`, `kj::HashSet`, `kj::TreeMap`, and `kj::TreeSet` do what you'd expect, with modern lambda-oriented interfaces that are less awkward than the corresponding STL types. + +All of these types are actually specific instances of the more-general `kj::Table`. A `kj::Table` can have any number of columns (whereas "sets" have exactly 1 and "maps" have exactly 2), and can maintain indexes on multiple columns at once. Each index can be hash-based, tree-based, or a custom index type that you provide. + +Unlike STL's, KJ's hashtable-based containers iterate in a well-defined deterministic order based on the order of insertion and removals. Deterministic behavior is important for reproducibility, which is important not just for debugging, but also in distributed systems where multiple systems must independently reproduce the same state. KJ's hashtable containers are also faster than `libstdc++`'s in benchmarks. + +KJ's tree-based containers use a b-tree design for better memory locality than the more traditional red-black trees. The implementation is tuned to avoid code bloat by keeping most logic out of templates, though this does make it slightly slower than `libstdc++`'s `map` and `set` in benchmarks. + +`kj::hashCode(params...)` computes a hash across all the inputs, appropriate for use in a hash table. It is extensible in a similar fashion to `kj::str()`, by using `KJ_HASHCODE` or defining a `.hashCode()` method on your custom types. `kj::Table`'s hashtable-based index uses `kj::hashCode` to compute hashes. + +## Debugging and Observability + +KJ believes that there is no such thing as bug-free code. Instead, we must expect that our code will go wrong, and try to extract as much information as possible when it does. To that end, KJ provides powerful assertion macros designed for observability. (Be sure also to read about [KJ's exception philosophy](style-guide.md#exceptions); this section describes the actual APIs involved.) + +### Assertions + +Let's start with the basic assert: + +```c++ +KJ_ASSERT(foo == bar.baz, "the baz is not foo", bar.name, i); +``` + +When `foo == bar.baz` evaluates false, this line will throw an exception with a description like this: + +``` +src/file.c++:52: failed: expected foo == bar.baz [123 == 321]; the baz is not foo; bar.name = "banana"; i = 5 +stack: libqux.so@0x52134 libqux.so@0x16f582 bin/corge@0x12515 bin/corge@0x5552 +``` + +Notice all the information this contains: + +* The file and line number in the source code where the assertion macro was used. +* The condition which failed. +* The stringified values of the operands to the condition, i.e. `foo` and `bar.baz` (shown in `[]` brackets). +* The values of all other parameters passed to the assertion, i.e. `"the baz is not foo"`, `bar.name`, and `i`. For expressions that aren't just string literals, both the expression and the stringified result of evaluating it are shown. +* A numeric stack trace. If possible, the addresses will be given relative to their respective binary, so that ASLR doesn't make traces useless. The trace can be decoded with tools like `addr2line`. If possible, KJ will also shell out to `addr2line` itself to produce a human-readable trace. + +Note that the work of producing an error description happens only in the case that it's needed. If the condition evaluates true, then that is all the work that is done. + +`KJ_ASSERT` should be used in cases where you are checking conditions that, if they fail, represent a bug in the code where the assert appears. On the other hand, when checking for preconditions -- i.e., bugs in the _caller_ of the code -- use `KJ_REQUIRE` instead: + +```c++ +T& operator[](size_t i) { + KJ_REQUIRE(i < size(), "index out-of-bounds"); + // ... +} +``` + +`KJ_REQUIRE` and `KJ_ASSERT` do exactly the same thing; using one or the other is only a matter of self-documentation. + +`KJ_FAIL_ASSERT(...)` should be used instead of `KJ_ASSERT(false, ...)` when you want a branch always to fail. + +Assertions operate exactly the same in debug and release builds. To express a debug-only assertion, you can use `KJ_DASSERT`. However, we highly recommend letting asserts run in production, as they are frequently an invaluable tool for tracking down bugs that weren't covered in testing. + +### Logging + +The `KJ_LOG` macro can be used to log messages meant for the developer or operator without interrupting control flow. + +```c++ +if (foo.isWrong()) { + KJ_LOG(ERROR, "the foo is wrong", foo); +} +``` + +The first parameter is the log level, which can be `INFO`, `WARNING`, `ERROR`, or `FATAL`. By default, `INFO` logs are discarded, while other levels are displayed. For programs whose main function is based on `kj/main.h`, the `-v` flag can be used to enable `INFO` logging. A `FATAL` log should typically be followed by `abort()` or similar. + +Parameters other than the first are stringified in the same manner as with `KJ_ASSERT`. These parameters will not be evaluated at all, though, if the specified log level is not enabled. + +By default, logs go to standard error. However, you can implement a `kj::ExceptionCallback` (in `kj/exception.h`) to capture logs and customize how they are handled. + +### Debug printing + +Let's face it: "printf() debugging" is easy and effective. KJ embraces this with the `KJ_DBG()` macro. + +```c++ +KJ_DBG("hi", foo, bar, baz.qux) +``` + +`KJ_DBG(...)` is equivalent to `KJ_LOG(DEBUG, ...)` -- logging at the `DEBUG` level, which is always enabled. The dedicated macro exists for brevity when debugging. `KJ_DBG` is intended to be used strictly for temporary debugging code that should never be committed. We recommend setting up commit hooks to reject code that contains invocations of `KJ_DBG`. + +### System call error checking + +KJ includes special variants of its assertion macros that convert traditional C API error conventions into exceptions. + +```c++ +int fd; +KJ_SYSCALL(fd = open(filename, O_RDONLY), "couldn't open the document", filename); +``` + +This macro evaluates the first parameter, which is expected to be a system call. If it returns a negative value, indicating an error, then an exception is thrown. The exception description incorporates a description of the error code communicated by `errno`, as well as the other parameters passed to the macro (stringified in the same manner as other assertion/logging macros do). + +Additionally, `KJ_SYSCALL()` will automatically retry calls that fail with `EINTR`. Because of this, it is important that the expression is idempotent. + +Sometimes, you need to handle certain error codes without throwing. For those cases, use `KJ_SYSCALL_HANDLE_ERRORS`: + +```c++ +int fd; +KJ_SYSCALL_HANDLE_ERRORS(fd = open(filename, O_RDONLY)) { + case ENOENT: + // File didn't exist, return null. + return nullptr; + default: + // Some other error. The error code (from errno) is in a local variable `error`. + // `KJ_FAIL_SYSCALL` expects its second parameter to be this integer error code. + KJ_FAIL_SYSCALL("open()", error, "couldn't open the document", filename); +} +``` + +On Windows, two similar macros are available based on Windows API calling conventions: `KJ_WIN32` works with API functions that return a `BOOLEAN`, `HANDLE`, or pointer type. `KJ_WINSOCK` works with Winsock APIs that return negative values to indicate errors. Some Win32 APIs follow neither of these conventions, in which case you will have to write your own code to check for an error and use `KJ_FAIL_WIN32` to turn it into an exception. + +### Alternate exception types + +As described in [KJ's exception philosophy](style-guide.md#exceptions), KJ supports a small set of exception types. Regular assertions throw `FAILED` exceptions. `KJ_SYSCALL` usually throws `FAILED`, but identifies certain error codes as `DISCONNECTED` or `OVERLOADED`. For example, `ECONNRESET` is clearly a `DISCONNECTED` exception. + +If you wish to manually construct and throw a different exception type, you may use `KJ_EXCEPTION`: + +```c++ +kj::Exception e = KJ_EXCEPTION(DISCONNECTED, "connection lost", addr); +``` + +### Throwing and catching exceptions + +KJ code usually should not use `throw` or `catch` directly, but rather use KJ's wrappers: + +```c++ +// Throw an exception. +kj::Exception e = ...; +kj::throwFatalException(kj::mv(e)); + +// Run some code catching exceptions. +kj::Maybe maybeException = kj::runCatchingExceptions([&]() { + doSomething(); +}); +KJ_IF_MAYBE(e, maybeException) { + // handle exception +} +``` + +These wrappers perform some extra bookkeeping: +* `kj::runCatchingExceptions()` will catch any kind of exception, whether it derives from `kj::Exception` or not, and will do its best to convert it into a `kj::Exception`. +* `kj::throwFatalException()` and `kj::throwRecoverableException()` invoke the thread's current `kj::ExceptionCallback` to throw the exception, allowing apps to customize how exceptions are handled. The default `ExceptionCallback` makes sure to throw the exception in such a way that it can be understood and caught by code looking for `std::exception`, such as the C++ library's standard termination handler. +* These helpers also work, to some extent, even when compiled with `-fno-exceptions` -- see below. (Note that "fatal" vs. "recoverable" exceptions are only different in this case; when exceptions are enabled, they are handled the same.) + +### Supporting `-fno-exceptions` + +KJ strongly recommends using C++ exceptions. However, exceptions are controversial, and many C++ applications are compiled with exceptions disabled. Some KJ-based libraries (especially Cap'n Proto) would like to accommodate such users. To that end, KJ's exception and assertion infrastructure is designed to degrade gracefully when compiled without exception support. In this case, exceptions are split into two types: + +* Fatal exceptions, when compiled with `-fno-exceptions`, will terminate the program when thrown. +* Recoverable exceptions, when compiled with `-fno-exceptions`, will be recorded on the side. Control flow then continues normally, possibly using a dummy value or skipping code which cannot execute. Later, the application can check if an exception has been raised and handle it. + +`KJ_ASSERT`s (and `KJ_REQUIRE`s) are fatal by default. To make them recoverable, add a "recovery block" after the assert: + +```c++ +kj::StringPtr getItem(int i) { + KJ_REQUIRE(i >= 0 && i < items.size()) { + // This is the recovery block. Recover by simply returning an empty string. + return ""; + } + return items[i]; +} +``` + +When the code above is compiled with exceptions enabled, an out-of-bounds index will result in an exception being thrown. But when compiled with `-fno-exceptions`, the function will store the exception off to the side (in KJ), and then return an empty string. + +A recovery block can indicate that control flow should continue normally even in case of error by using a `break` statement. + +```c++ +void incrementBy(int i) { + KJ_REQUIRE(i >= 0, "negative increments not allowed") { + // Pretend the caller passed `0` and continue. + i = 0; + break; + } + + value += i; +} +``` + +**WARNING:** The recovery block is executed even when exceptions are enabled. The exception is thrown upon exit from the block (even if a `return` or `break` statement is present). Therefore, be careful about side effects in the recovery block. Also, note that both GCC and Clang have a longstanding bug where a returned value's destructor is not called if the return is interrupted by an exception being thrown. Therefore, you must not return a value with a non-trivial destructor from a recovery block. + +There are two ways to handle recoverable exceptions: + +* Use `kj::runCatchingExceptions()`. When compiled with `-fno-exceptions`, this function will arrange for any recoverable exception to be stored off to the side. Upon completion of the given lambda, `kj::runCatchingExceptions()` will return the exception. +* Write a custom `kj::ExceptionCallback`, which can handle exceptions in any way you choose. + +Note that while most features of KJ work with `-fno-exceptions`, some of them have not been carefully written for this case, and may trigger fatal exceptions too easily. People relying on this mode will have to tread carefully. + +### Exceptions in Destructors + +Bugs can occur anywhere -- including in destructors. KJ encourages applications to detect bugs using assertions, which throw exceptions. As a result, exceptions can be thrown in destructors. There is no way around this. You cannot simply declare that destructors shall not have bugs. + +Because of this, KJ recommends that all destructors be declared with `noexcept(false)`, in order to negate C++11's unfortunate decision that destructors should be `noexcept` by default. + +However, this does not solve C++'s Most Unfortunate Decision, namely that throwing an exception from a destructor that was called during an unwind from another exception always terminates the program. It is very common for exceptions to cause "secondary" exceptions during unwind. For example, the destructor of a buffered stream might check whether the buffer has been flushed, and raise an exception if it has not, reasoning that this is a serious bug that could lead to data loss. But if the program is already unwinding due to some other exception, then it is likely that the failure to flush the buffer is because of that other exception. The "secondary" exception might as well be ignored. Terminating the program is the worst possible response. + +To work around the MUD, KJ offers two tools: + +First, during unwind from one exception, KJ will handle all "recoverable" exceptions as if compiled with `-fno-exceptions`, described in the previous section. So, whenever writing assertions in destructors, it is a good idea to give them a recovery block like `{break;}` or `{return;}`. + +```c++ +BufferedStream::~BufferedStream() noexcept(false) { + KJ_REQUIRE(buffer.size() == 0, "buffer was not flushed; possible data loss") { + // Don't throw if we're unwinding! + break; + } +} +``` + +Second, `kj::UnwindDetector` can be used to squelch exceptions during unwind. This is especially helpful in cases where your destructor needs to call complex external code that wasn't written with destructors in mind. Use it like so: + +```c++ +class Transaction { +public: + // ... + +private: + kj::UnwindDetector unwindDetector; + // ... +}; + +Transaction::~Transaction() noexcept(false) { + unwindDetector.catchExceptionsIfUnwinding([&]() { + if (!committed) { + rollback(); + } + }); +} +``` + +Core Systems +====================================================================== + +This section describes KJ APIs that control process execution and low-level interactions with the operating system. Most users of KJ will need to be familiar with most of this section. + +## Threads and Synchronization + +`kj::Thread` creates a thread in which the lambda passed to `kj::Thread`'s constructor will be executed. `kj::Thread`'s destructor waits for the thread to exit before continuing, and rethrows any exception that had been thrown from the thread's main function -- unless the thread's `.detach()` method has been called, in which case `kj::Thread`'s destructor does nothing. + +`kj::MutexGuarded` holds an instance of `T` that is protected by a mutex. In order to access the protected value, you must first create a lock. `.lockExclusive()` returns `kj::Locked` which can be used to access the underlying value. `.lockShared()` returns `kj::Locked`, [using constness to enforce thread-safe read-only access](style-guide.md#constness) so that multiple threads can take the lock concurrently. In this way, KJ mutexes make it difficult to forget to take a lock before accessing the protected object. + +`kj::Locked` has a method `.wait(cond)` which temporarily releases the lock and waits, taking the lock back as soon as `cond(value)` evaluates true. This provides a much cleaner and more readable interface than traditional conditional variables. + +`kj::Lazy` is an instance of `T` that is constructed on first access in a thread-safe way. + +Macros `KJ_TRACK_LOCK_BLOCKING` and `KJ_SAVE_ACQUIRED_LOCK_INFO` can be used to enable support utilities to implement deadlock detection & analysis. +* `KJ_TRACK_LOCK_BLOCKING`: When the current thread is doing a blocking synchronous KJ operation, that operation is available via `kj::blockedReason()` (intention is for this to be invoked from the signal handler running on the thread that's doing the synchronous operation). +* `KJ_SAVE_ACQUIRED_LOCK_INFO`: When enabled, lock acquisition will save state about the location of the acquired lock. When combined with `KJ_TRACK_LOCK_BLOCKING` this can be particularly helpful because any watchdog can just forward the signal to the thread that's holding the lock. +## Asynchronous Event Loop + +### Promises + +KJ makes asynchronous programming manageable using an API modeled on E-style Promises. E-style Promises were also the inspiration for JavaScript Promises, so modern JavaScript programmers should find KJ Promises familiar, although there are some important differences. + +A `kj::Promise` represents an asynchronous background task that, upon completion, either "resolves" to a value of type `T`, or "rejects" with an exception. + +In the simplest case, a `kj::Promise` can be directly constructed from an instance of `T`: + +```c++ +int i = 123; +kj::Promise promise = i; +``` + +In this case, the promise is immediately resolved to the given value. + +A promise can also immediately reject with an exception: + +```c++ +kj::Exception e = KJ_EXCEPTION(FAILED, "problem"); +kj::Promise promise = kj::mv(e); +``` + +Of course, `Promise`s are much more interesting when they don't complete immediately. + +When a function returns a `Promise`, it means that the function performs some asynchronous operation that will complete in the future. These functions are always non-blocking -- they immediately return a `Promise`. The task completes asynchronously on the event loop. The eventual results of the promise can be obtained using `.then()` to register a callback, or, in certain situations, `.wait()` to synchronously wait. These are described in more detail below. + +### Basic event loop setup + +In order to execute `Promise`-based code, the thread must be running an event loop. Typically, at the top level of the thread, you would do something like: + +```c++ +kj::AsyncIoContext io = kj::setupAsyncIo(); + +kj::AsyncIoProvider& ioProvider = *io.provider; +kj::LowLevelAsyncIoProvider& lowLevelProvider = *io.lowLevelProvider; +kj::WaitScope& waitScope = io.waitScope; +``` + +`kj::setupAsyncIo()` constructs and returns a bunch of objects: + +* A `kj::AsyncIoProvider`, which provides access to a variety of I/O APIs, like timers, pipes, and networking. +* A `kj::LowLevelAsyncIoProvider`, which allows you to wrap existing low-level operating system handles (Unix file descriptors, or Windows `HANDLE`s) in KJ asynchronous interfaces. +* A `kj::WaitScope`, which allows you to perform synchronous waits (see next section). +* OS-specific interfaces for even lower-level access -- see the API definition for more details. + +In order to implement all this, KJ will set up the appropriate OS-specific constructs to handle I/O events on the host platform. For example, on Linux, KJ will use `epoll`, whereas on Windows, it will set up an I/O Completion Port. + +Sometimes, you may need KJ promises to cooperate with some existing event loop, rather than set up its own. For example, you might be using libuv, or Boost.Asio. Usually, a thread can only have one event loop, because it can only wait on one OS event queue (e.g. `epoll`) at a time. To accommodate this, it is possible (though not easy) to adapt KJ to run on top of some other event loop, by creating a custom implementation of `kj::EventPort`. The details of how to do this are beyond the scope of this document. + +Sometimes, you may find that you don't really need to perform operating system I/O at all. For example, a unit test might only need to call some asynchronous functions using mock I/O interfaces, or a thread in a multi-threaded program may only need to exchange events with other threads and not the OS. In these cases, you can create a simple event loop instead: + +```c++ +kj::EventLoop eventLoop; +kj::WaitScope waitScope(eventLoop); +``` + +### Synchronous waits + +In the top level of your program (or thread), the program is allowed to synchronously wait on a promise using the `kj::WaitScope` (see above). + +``` +kj::Timer& timer = io.provider->getTimer(); +kj::Promise promise = timer.afterDelay(5 * kj::SECONDS); +promise.wait(waitScope); // returns after 5 seconds' delay +``` + +`promise.wait()` will run the thread's event loop until the promise completes. It will then return the `Promise`'s result (or throw the `Promise`'s exception). `.wait()` consumes the `Promise`, as if the `Promise` has been moved away. + +Synchronous waits cannot be nested -- i.e. a `.then()` callback (see below) that is called by the event loop itself cannot execute another level of synchronous waits. Hence, synchronous waits generally can only be used at the top level of the thread. The API requires passing a `kj::WaitScope` to `.wait()` as a way to demonstrate statically that the caller is allowed to perform synchronous waits. Any function which wishes to perform synchronous waits must take a `kj::WaitScope&` as a parameter to indicate that it does this. + +Synchronous waits often make sense to use in "client" programs that only have one task to complete before they exit. On the other end of the spectrum, server programs that handle many clients generally must do everything asynchronously. At the top level of a server program, you will typically instruct the event loop to run forever, like so: + +```c++ +// Run event loop forever, do everything asynchronously. +kj::NEVER_DONE.wait(waitScope); +``` + +Libraries should always be asynchronous, so that either kind of program can use them. + +### Asynchronous callbacks + +Similar to JavaScript promises, you may register a callback to call upon completion of a KJ promise using `.then()`: + +```c++ +kj::Promise textPromise = stream.readAllText(); +kj::Promise lineCountPromise = textPromise + .then([](kj::String text) { + int lineCount = 0; + for (char c: text) { + if (c == '\n') { + ++lineCount; + } + } + return lineCount; +}); +``` + +`promise.then()` takes, as its argument, a lambda which transforms the result of the `Promise`. It returns a new `Promise` for the transformed result. We call this lambda a "continuation". + +Calling `.then()`, like `.wait()`, consumes the original promise, as if it were "moved away". Ownership of the original promise is transferred into the new, derived promise. If you want to register multiple continuations on the same promise, you must fork it first (see below). + +If the continuation itself returns another `Promise`, then the `Promise`s become chained. That is, the final type is reduced from `Promise>` to just `Promise`. + +```c++ +kj::Promise> connectPromise = + networkAddress.connect(); +kj::Promise textPromise = connectPromise + .then([](kj::Own stream) { + return stream->readAllText().attach(kj::mv(stream)); +}); +``` + +If a promise rejects (throws an exception), then the exception propagates through `.then()` to the new derived promise, without calling the continuation. If you'd like to actually handle the exception, you may pass a second lambda as the second argument to `.then()`. + +```c++ +kj::Promise promise = networkAddress.connect() + .then([](kj::Own stream) { + return stream->readAllText().attach(kj::mv(stream)); +}, [](kj::Exception&& exception) { + return kj::str("connection error: ", exception); +}); +``` + +You can also use `.catch_(errorHandler)`, which is a shortcut for `.then(identityFunction, errorHandler)`. + +### `kj::evalNow()`, `kj::evalLater()`, and `kj::evalLast()` + +These three functions take a lambda as the parameter, and return the result of evaluating the lambda. They differ in when, exactly, the execution happens. + +```c++ +kj::Promise promise = kj::evalLater([]() { + int i = doSomething(); + return i; +}); +``` + +As with `.then()` continuations, the lambda passed to these functions may itself return a `Promise`. + +`kj::evalNow()` executes the lambda immediately -- before `evalNow()` even returns. The purpose of `evalNow()` is to catch any exceptions thrown and turn them into a rejected promise. This is often a good idea when you don't want the caller to have to handle both synchronous and asynchronous exceptions -- wrapping your whole function in `kj::evalNow()` ensures that all exceptions are delivered asynchronously. + +`kj::evalLater()` executes the lambda on a future turn of the event loop. This is equivalent to `kj::Promise().then()`. + +`kj::evalLast()` arranges for the lambda to be called only after all other work queued to the event loop has completed (but before querying the OS for new I/O events). This can often be useful e.g. for batching. For example, if a program tends to make many small write()s to a socket in rapid succession, you might want to add a layer that collects the writes into a batch, then sends the whole batch in a single write from an `evalLast()`. This way, none of the bytes are significantly delayed, but they can still be coalesced. + +If multiple `evalLast()`s exist at the same time, they will execute in last-in-first-out order. If the first one out schedules more work on the event loop, that work will be completed before the next `evalLast()` executes, and so on. + +### Attachments + +Often, a task represented by a `Promise` will require that some object remains alive until the `Promise` completes. In particular, under KJ conventions, unless documented otherwise, any class method which returns a `Promise` inherently expects that the caller will ensure that the object it was called on will remain alive until the `Promise` completes (or is canceled). Put another way, member function implementations may assume their `this` pointer is valid as long as their returned `Promise` is alive. + +You may use `promise.attach(kj::mv(object))` to give a `Promise` direct ownership of an object that must be kept alive until the promise completes. `.attach()`, like `.then()`, consumes the promise and returns a new one of the same type. + +```c++ +kj::Promise> connectPromise = + networkAddress.connect(); +kj::Promise textPromise = connectPromise + .then([](kj::Own stream) { + // We must attach the stream so that it remains alive until `readAllText()` + // is done. The stream will then be discarded. + return stream->readAllText().attach(kj::mv(stream)); +}); +``` + +Using `.attach()` is semantically equivalent to using `.then()`, passing an identity function as the continuation, while having that function capture ownership of the attached object, i.e.: + +```c++ +// This... +promise.attach(kj::mv(attachment)); +// ...is equivalent to this... +promise.then([a = kj::mv(attachment)](auto x) { return kj::mv(x); }); +``` + +Note that you can use `.attach()` together with `kj::defer()` to construct a "finally" block -- code which will execute after the promise completes (or is canceled). + +```c++ +promise = promise.attach(kj::defer([]() { + // This code will execute when the promise completes or is canceled. +})); +``` + +### Background tasks + +If you construct a `Promise` and then just leave it be without calling `.then()` or `.wait()` to consume it, the task it represents will nevertheless execute when the event loop runs, "in the background". You can call `.then()` or `.wait()` later on, when you're ready. This makes it possible to run multiple concurrent tasks at once. + +Note that, when possible, KJ evaluates continuations lazily. Continuations which merely transform the result (without returning a new `Promise` that might require more waiting) are only evaluated when the final result is actually needed. This is an optimization which allows a long chain of `.then()`s to be executed all at once, rather than turning the event loop for each one. However, it can lead to some confusion when storing an unconsumed `Promise`. For example: + +```c++ +kj::Promise promise = timer.afterDelay(5 * kj::SECONDS) + .then([]() { + // This log line will never be written, because nothing + // is waiting on the final result of the promise. + KJ_LOG(WARNING, "It has been 5 seconds!!!"); +}); +kj::NEVER_DONE.wait(waitScope); +``` + +To solve this, use `.eagerlyEvaluate()`: + +```c++ +kj::Promise promise = timer.afterDelay(5 * kj::SECONDS) + .then([]() { + // This log will correctly be written after 5 seconds. + KJ_LOG(WARNING, "It has been 5 seconds!!!"); +}).eagerlyEvaluate([](kj::Exception&& exception) { + KJ_LOG(ERROR, exception); +}); +kj::NEVER_DONE.wait(waitScope); +``` + +`.eagerlyEvaluate()` takes an error handler callback as its parameter, with the same semantics as `.catch_()` or the second parameter to `.then()`. This is required because otherwise, it is very easy to forget to install an error handler on background tasks, resulting in errors being silently discarded. However, if you are certain that errors will be properly handled elsewhere, you may pass `nullptr` as the parameter to skip error checking -- this is equivalent to passing a callback that merely re-throws the exception. + +If you have lots of background tasks, use `kj::TaskSet` to manage them. Any promise added to a `kj::TaskSet` will be run to completion (with eager evaluation), with any exceptions being reported to a provided error handler callback. + +### Cancellation + +If you destroy a `Promise` before it has completed, any incomplete work will be immediately canceled. + +Upon cancellation, no further continuations are executed at all, not even error handlers. Only destructors are executed. Hence, when there is cleanup that must be performed after a task, it is not sufficient to use `.then()` to perform the cleanup in continuations. You must instead use `.attach()` to attach an object whose destructor performs the cleanup (or perhaps `.attach(kj::defer(...))`, as mentioned earlier). + +Promise cancellation has proven to be an extremely useful feature of KJ promises which is missing in other async frameworks, such as JavaScript's. However, it places new responsibility on the developer. Just as developers who allow exceptions must design their code to be "exception safe", developers using KJ promises must design their code to be "cancellation safe". + +It is especially important to note that once a promise has been canceled, then any references that were received along with the promise may no longer be valid. For example, consider this function: + +``` +kj::Promise write(kj::ArrayPtr data); +``` + +The function receives a pointer to some data owned elsewhere. By KJ convention, the caller must ensure this pointer remains valid until the promise completes _or is canceled_. If the caller decides it needs to free the data early, it may do so as long as it cancels the promise first. This property is important as otherwise it becomes impossible to reason about ownership in complex systems. + +This means that the implementation of `write()` must immediately stop using `data` as soon as cancellation occurs. For example, if `data` has been placed in some sort of queue where some other concurrent task takes items from the queue to write them, then it must be ensured that `data` will be removed from that queue upon cancellation. This "queued writes" pattern has historically been a frequent source of bugs in KJ code, to the point where experienced KJ developers now become immediately suspicious of such queuing. The `kj::AsyncOutputStream` interface explicitly prohibits overlapping calls to `write()` specifically so that the implementation need not worry about maintaining queues. + +### Promise-Fulfiller Pairs and Adapted Promises + +Sometimes, it's difficult to express asynchronous control flow as a simple chain of continuations. For example, imagine a producer-consumer queue, where producers and consumers are executing concurrently on the same event loop. The consumer doesn't directly call the producer, nor vice versa, but the consumer would like to wait for the producer to produce an item for consumption. + +For these situations, you may use a `Promise`-`Fulfiller` pair. + +```c++ +kj::PromiseFulfillerPair paf = kj::newPromiseAndFulfiller(); + +// Consumer waits for the promise. +paf.promise.then([](int i) { ... }); + +// Producer calls the fulfiller to fulfill the promise. +paf.fulfiller->fulfill(123); + +// Producer can also reject the promise. +paf.fulfiller->reject(KJ_EXCEPTION(FAILED, "something went wrong")); +``` + +**WARNING! DANGER!** When using promise-fulfiller pairs, it is very easy to forget about both exception propagation and, more importantly, cancellation-safety. + +* **Exception-safety:** If your code stops early due to an exception, it may forget to invoke the fulfiller. Upon destroying the fulfiller, the consumer end will receive a generic, unhelpful exception, merely saying that the fulfiller was destroyed unfulfilled. To aid in debugging, you should make sure to catch exceptions and call `fulfiller->reject()` to propagate them. +* **Cancellation-safety:** Either the producer or the consumer task could be canceled, and you must consider how this affects the other end. + * **Canceled consumer:** If the consumer is canceled, the producer may waste time producing an item that no one is waiting for. Or, worse, if the consumer has provided references to the producer (for example, a buffer into which results should be written), those references may become invalid upon cancellation, but the producer will continue executing, possibly resulting in a use-after-free. To avoid these problems, the producer can call `fulfiller->isWaiting()` to check if the consumer is still waiting -- this method returns false if either the consumer has been canceled, or if the producer has already fulfilled or rejected the promise previously. However, `isWaiting()` requires polling, which is not ideal. For better control, consider using an adapted promise (see below). + * **Canceled producer:** If the producer is canceled, by default it will probably destroy the fulfiller without fulfilling or reject it. As described previously, the consumer will receive a non-descript exception, which is likely unhelpful for debugging. To avoid this scenario, the producer could perhaps use `.attach(kj::defer(...))` with a lambda that checks `fulfiller->isWaiting()` and rejects it if not. + +Because of the complexity of the above issues, it is generally recommended that you **avoid promise-fulfiller pairs** except in cases where these issues very clearly don't matter (such as unit tests). + +Instead, when cancellation concerns matter, consider using "adapted promises", a more sophisticated alternative. `kj::newAdaptedPromise()` constructs an instance of the class `Adapter` (which you define) encapsulated in a returned `Promise`. `Adapter`'s constructor receives a `kj::PromiseFulfiller&` used to fulfill the promise. The constructor should then register the fulfiller with the desired producer. If the promise is canceled, `Adapter`'s destructor will be invoked, and should un-register the fulfiller. One common technique is for `Adapter` implementations to form a linked list with other `Adapter`s waiting for the same producer. Adapted promises make consumer cancellation much more explicit and easy to handle, at the expense of requiring more code. + +### Loops + +Promises, due to their construction, don't lend themselves easily to classic `for()`/`while()` loops. Instead, loops should be expressed recursively, as in a functional language. For example: + +```c++ +kj::Promise boopEvery5Seconds(kj::Timer& timer) { + return timer.afterDelay(5 * kj::SECONDS).then([&timer]() { + boop(); + // Loop by recursing. + return boopEvery5Seconds(timer); + }); +} +``` + +KJ promises include "tail call optimization" for loops like the one above, so that the promise chain length remains finite no matter how many times the loop iterates. + +**WARNING!** It is very easy to accidentally break tail call optimization, creating a memory leak. Consider the following: + +```c++ +kj::Promise boopEvery5Seconds(kj::Timer& timer) { + // WARNING! MEMORY LEAK! + return timer.afterDelay(5 * kj::SECONDS).then([&timer]() { + boop(); + // Loop by recursing. + return boopEvery5Seconds(timer); + }).catch_([](kj::Exception&& exception) { + // Oh no, an error! Log it and end the loop. + KJ_LOG(ERROR, exception); + kj::throwFatalException(kj::mv(exception)); + }); +} +``` + +The problem in this example is that the recursive call is _not_ a tail call, due to the `.catch_()` appended to the end. Every time around the loop, a new `.catch_()` is added to the promise chain. If an exception were thrown, that exception would end up being logged many times -- once for each time the loop has repeated so far. Or if the loop iterated enough times, and the top promise was then canceled, the chain could be so long that the destructors overflow the stack. + +In this case, the best fix is to pull the `.catch_()` out of the loop entirely: + +```c++ +kj::Promise boopEvery5Seconds(kj::Timer& timer) { + return boopEvery5SecondsLoop(timer) + .catch_([](kj::Exception&& exception) { + // Oh no, an error! Log it and end the loop. + KJ_LOG(ERROR, exception); + kj::throwFatalException(kj::mv(exception)); + }) +} + +kj::Promise boopEvery5SecondsLoop(kj::Timer& timer) { + // No memory leaks now! + return timer.afterDelay(5 * kj::SECONDS).then([&timer]() { + boop(); + // Loop by recursing. + return boopEvery5SecondsLoop(timer); + }); +} +``` + +Another possible fix would be to make sure the recursive continuation and the error handler are passed to the same `.then()` invocation: + +```c++ +kj::Promise boopEvery5Seconds(kj::Timer& timer) { + // No more memory leaks, but hard to reason about. + return timer.afterDelay(5 * kj::SECONDS).then([&timer]() { + boop(); + }).then([&timer]() { + // Loop by recursing. + return boopEvery5Seconds(timer); + }, [](kj::Exception&& exception) { + // Oh no, an error! Log it and end the loop. + KJ_LOG(ERROR, exception); + kj::throwFatalException(kj::mv(exception)); + }); +} +``` + +Notice that in this second case, the error handler is scoped so that it does _not_ catch exceptions thrown by the recursive call; it only catches exceptions from `boop()`. This solves the problem, but it's a bit trickier to understand and to ensure that exceptions can't accidentally slip past the error handler. + +### Forking and splitting promises + +As mentioned above, `.then()` and similar functions consume the promise on which they are called, so they can only be called once. But what if you want to start multiple tasks using the result of a promise? You could solve this in a convoluted way using adapted promises, but KJ has a built-in solution: `.fork()` + +```c++ +kj::Promise promise = ...; +kj::ForkedPromise forked = promise.fork(); +kj::Promise branch1 = promise.addBranch(); +kj::Promise branch2 = promise.addBranch(); +kj::Promise branch3 = promise.addBranch(); +``` + +A forked promise can have any number of "branches" which represent different consumers waiting for the same result. + +Forked promises use reference counting. The `ForkedPromise` itself, and each branch created from it, each represent a reference to the original promise. The original promise will only be canceled if all branches are canceled and the `ForkedPromise` itself is destroyed. + +Forked promises require that the result type has a copy constructor, so that it can be copied to each branch. (Regular promises only require the result type to be movable, not copyable.) Or, alternatively, if the result type is `kj::Own` -- which is never copyable -- then `T` must have a method `kj::Own T::addRef()`; this method will be invoked to create each branch. Typically, `addRef()` would be implemented using reference counting. + +Sometimes, the copyable requirement of `.fork()` can be burdensome and unnecessary. If the result type has multiple components, and each branch really only needs one of the components, then being able to copy (or refcount) is unnecessary. In these cases, you can use `.split()` instead. `.split()` converts a promise for a `kj::Tuple` into a `kj::Tuple` of promises. That is: + +```c++ +kj::Promise, kj::String>> promise = ...; +kj::Tuple>, kj::Promise> promises = promise.split(); +``` + +### Joining promises + +The opposite of forking promises is joining promises. There are two types of joins: +* **Exclusive** joins wait for any one input promise to complete, then cancel the rest, returning the result of the promise that completed. +* **Inclusive** joins wait for all input promises to complete, and render all of the results. + +For an exclusive join, use `promise.exclusiveJoin(kj::mv(otherPromise))`. The two promises must return the same type. The result is a promise that returns whichever result is produced first, and cancels the other promise at that time. (To exclusively join more than two promises, call `.exclusiveJoin()` multiple times in a chain.) + +To perform an inclusive join, use `kj::joinPromises()`. This turns a `kj::Array>` into a `kj::Promise>`. However, note that `kj::joinPromises()` has a couple common gotchas: +* Trailing continuations on the promises passed to `kj::joinPromises()` are evaluated lazily after all the promises become ready. Use `.eagerlyEvaluate()` on each one to force trailing continuations to happen eagerly. (See earlier discussion under "Background Tasks".) +* If any promise in the array rejects, the exception will be held until all other promises have completed (or rejected), and only then will the exception propagate. In practice we've found that most uses of `kj::joinPromises()` would prefer "exclusive" or "fail-fast" behavior in the case of an exception, but as of this writing we have not yet introduced a function that does this. + +### Threads + +The KJ async framework is designed around single-threaded event loops. However, you can have multiple threads, with each running its own loop. + +All KJ async objects, unless specifically documented otherwise, are intrinsically tied to the thread and event loop on which they were created. These objects must not be accessed from any other thread. + +To communicate between threads, you may use `kj::Executor`. Each thread (that has an event loop) may call `kj::getCurrentThreadExecutor()` to get a reference to its own `Executor`. That reference may then be shared with other threads. The other threads can use the methods of `Executor` to queue functions to execute on the owning thread's event loop. + +The threads which call an `Executor` do not have to have KJ event loops themselves. Thus, you can use an `Executor` to signal a KJ event loop thread from a non-KJ thread. + +### Fibers + +Fibers allow code to be written in a synchronous / blocking style while running inside the KJ event loop, by executing the code on an alternate call stack. The code running on this alternate stack is given a special `kj::WaitScope&`, which it can pass to `promise.wait()` to perform synchronous waits. When such a `.wait()` is invoked, the thread switches back to the main call stack and continues running the event loop there. When the waited promise resolves, execution switches back to the alternate call stack and `.wait()` returns (or throws). + +```c++ +constexpr size_t STACK_SIZE = 65536; +kj::Promise promise = + kj::startFiber(STACK_SIZE, [](kj::WaitScope& waitScope) { + int i = someAsyncFunc().wait(waitScope); + i += anotherAsyncFunc().wait(waitScope); + return i; +}); +``` + +**CAUTION:** Fibers produce attractive-looking code, but have serious drawbacks. Every fiber must allocate a new call stack, which is typically rather large. The above example allocates a 64kb stack, which is the _minimum_ supported size. Some programs and libraries expect to be able to allocate megabytes of data on the stack. On modern Linux systems, a default stack size of 8MB is typical. Stack space is allocated lazily on page faults, but just setting up the memory mapping is much more expensive than a typical `malloc()`. If you create lots of fibers, you should use `kj::FiberPool` to reduce allocation costs -- but while this reduces allocation overhead, it will increase memory usage. + +Because of this, fibers should not be used just to make code look nice (C++20's `co_await`, which KJ will soon support, is a better way to do that). Instead, the main use case for fibers is to be able to call into existing libraries that are not designed to operate in an asynchronous way. For example, say you find a library that performs stream I/O, and lets you provide your own `read()`/`write()` implementations, but expects those implementations to operate in a blocking fashion. With fibers, you can use such a library within the asynchronous KJ event loop. + +### Unit testing tips + +When unit-testing promise APIs, two tricky challenges frequently arise: + +* Testing that a promise has completed when it is supposed to. You can use `promise.wait()`, but if the promise has not completed as expected, then the test may simply hang. This can be frustrating to debug. +* Testing that a promise has not completed prematurely. You obviously can't use `promise.wait()`, because you _expect_ the promise has not completed, and therefore this would hang. You might try using `.then()` with a continuation that sets a flag, but if the flag is not set, it's hard to tell whether this is because the promise really has not completed, or merely because the event loop hasn't yet called the `.then()` continuation. + +To solve these problems, you can use `promise.poll(waitScope)`. This function runs the event loop until either the promise completes, or there is nothing left to do except to wait. This includes running any continuations in the queue as well as checking for I/O events from the operating system, repeatedly, until nothing is left. The only thing `.poll()` will not do is block. `.poll()` returns true if the promise has completed, false if it hasn't. + +```c++ +// In a unit test... +kj::Promise promise = waitForBoop(); + +// The promise should not be done yet because we haven't booped yet. +KJ_ASSERT(!promise.poll(waitScope)); + +boop(); + +// Assert the promise is done, to make sure wait() won't hang! +KJ_ASSERT(promise.poll(waitScope)); + +promise.wait(waitScope); +``` + +## System I/O + +### Async I/O + +On top of KJ's async framework (described earlier), KJ provides asynchronous APIs for byte streams, networking, and timers. + +As mentioned previously, `kj::setupAsyncIo()` allocates an appropriate OS-specific event queue (such as `epoll` on Linux), returning implementations of `kj::AsyncIoProvider` and `kj::LowLevelAsyncIoProvider` implemented in terms of that queue. `kj::AsyncIoProvider` provides an OS-independent API for byte streams, networking, and timers. `kj::LowLevelAsyncIoProvider` allows native OS handles (file descriptors on Unix, `HANDLE`s on Windows) to be wrapped in KJ byte stream APIs, like `kj::AsyncIoStream`. + +Please refer to the API reference (the header files) for details on these APIs. + +### Synchronous I/O + +Although most complex KJ applications use async I/O, sometimes you want something a little simpler. + +`kj/io.h` provides some more basic, synchronous streaming interfaces, like `kj::InputStream` and `kj::OutputStream`. Implementations are provided on top of file descriptors and Windows `HANDLE`s. + +Additionally, the important utility class `kj::AutoCloseFd` (and `kj::AutoCloseHandle` for Windows) can be found here. This is an RAII wrapper around a file descriptor (or `HANDLE`), which you will likely want to use any time you are manipulating raw file descriptors (or `HANDLE`s) in KJ code. + +### Filesystem + +KJ provides an advanced, cross-platform filesystem API in `kj/filesystem.h`. Features include: + +* Paths represented using `kj::Path`. In addition to providing common-sense path parsing and manipulation functions, this class is designed to defend against path injection attacks. +* All interfaces are abstract, allowing multiple implementations. +* An in-memory implementation is provided, useful in particular for mocking the filesystem in unit tests. +* On Unix, disk `kj::Directory` objects are backed by open file descriptors and use the `openat()` family of system calls. +* Makes it easy to use atomic replacement when writing new files -- and even whole directories. +* Symlinks, hard links, listing directories, recursive delete, recursive create parents, recursive copy directory, memory mapping, and unnamed temporary files are all exposed and easy to use. +* Sparse files ("hole punching"), copy-on-write file cloning (`FICLONE`, `FICLONERANGE`), `sendfile()`-based copying, `renameat2()` atomic replacements, and more will automatically be used when available. + +See the API reference (header file) for details. + +### Clocks and time + +KJ provides a time library in `kj/time.h` which uses the type system to enforce unit safety. + +`kj::Duration` represents a length of time, such as a number of seconds. Multiply an integer by `kj::SECONDS`, `kj::MINUTES`, `kj::NANOSECONDS`, etc. to get a `kj::Duration` value. Divide by the appropriate constant to get an integer. + +`kj::Date` represents a point in time in the real world. `kj::UNIX_EPOCH` represents January 1st, 1970, 00:00 UTC. Other dates can be constructed by adding a `kj::Duration` to `kj::UNIX_EPOCH`. Taking the difference between to `kj::Date`s produces a `kj::Duration`. + +`kj::TimePoint` represents a time point measured against an unspecified origin time. This is typically used with monotonic clocks that don't necessarily reflect calendar time. Unlike `kj::Date`, there is no implicit guarantee that two `kj::TimePoint`s are measured against the same origin and are therefore comparable; it is up to the application to track which clock any particular `kj::TimePoint` came from. + +`kj::Clock` is a simple interface whose `now()` method returns the current `kj::Date`. `kj::MonotonicClock` is a similar interface returning a `kj::TimePoint`, but with the guarantee that times returned always increase (whereas a `kj::Clock` might go "back in time" if the user manually modifies their system clock). + +`kj::systemCoarseCalendarClock()`, `kj::systemPreciseCalendarClock()`, `kj::systemCoarseMonotonicClock()`, `kj::systemPreciseMonotonicClock()` are global functions that return implementations of `kj::Clock` or `kJ::MonotonicClock` based on sytem time. + +`kj::Timer` provides an async (promise-based) interface to wait for a specified time to pass. A `kj::Timer` is provided via `kj::AsyncIoProvider`, constructed using `kj::setupAsyncIo()` (see earlier discussion on async I/O). + +## Program Harness + +TODO: kj::Main, unit test framework + +Libraries +====================================================================== + +TODO: parser combinator framework, HTTP, TLS, URL, encoding, JSON diff --git a/libs/EXTERNAL/capnproto/release.sh b/libs/EXTERNAL/capnproto/release.sh index 57970bfcb22..f9f104e776d 100755 --- a/libs/EXTERNAL/capnproto/release.sh +++ b/libs/EXTERNAL/capnproto/release.sh @@ -120,7 +120,7 @@ done_banner() { y | Y ) doit git push origin $PUSH doit gce-ss copy-files capnproto-c++-$VERSION.tar.gz capnproto-c++-win32-$VERSION.zip \ - fe:/var/www/capnproto.org + alpha2:/var/www/capnproto.org if [ "$FINAL" = yes ]; then echo "=========================================================================" @@ -177,7 +177,7 @@ case "${1-}:$BRANCH" in declare -a VERSION_ARR=(${RELEASE_VERSION//./ }) NEXT_VERSION=${VERSION_ARR[0]}.$((VERSION_ARR[1] + 1)) - update_version $HEAD_VERSION $NEXT_VERSION-dev "mainlaine" + update_version $HEAD_VERSION $NEXT_VERSION-dev "mainline" done_banner $RELEASE_VERSION-rc1 "master release-$RELEASE_VERSION" no ;; diff --git a/libs/EXTERNAL/capnproto/security-advisories/README.md b/libs/EXTERNAL/capnproto/security-advisories/README.md index b6490a01384..ce6b3222610 100644 --- a/libs/EXTERNAL/capnproto/security-advisories/README.md +++ b/libs/EXTERNAL/capnproto/security-advisories/README.md @@ -4,8 +4,6 @@ This directory contains security advisories issued for Cap'n Proto. Each advisory explains not just the bug that was fixed, but measures we are taking to avoid the class of bugs in the future. -Note that Cap'n Proto has not yet undergone formal security review and therefore should not yet be trusted for reading possibly-malicious input. Even so, Cap'n Proto intends to be secure and we treat security bugs no less seriously than we would had security review already taken place. - ## Reporting Bugs Please report security bugs to [security@sandstorm.io](mailto:security@sandstorm.io). diff --git a/libs/EXTERNAL/capnproto/style-guide.md b/libs/EXTERNAL/capnproto/style-guide.md index 20da9cec81d..d3d47ef0f56 100644 --- a/libs/EXTERNAL/capnproto/style-guide.md +++ b/libs/EXTERNAL/capnproto/style-guide.md @@ -67,7 +67,7 @@ KJ code is RAII-strict. Whenever it is the case that "this block of code cannot Use the macros `KJ_DEFER`, `KJ_ON_SCOPE_SUCCESS`, and `KJ_ON_SCOPE_FAILURE` to easily specify some code that must be executed on exit from the current scope, without the need to define a whole class with a destructor. -Be careful when writing complicated destructors. If a destructor performs multiple cleanup actions, you generally need to make sure that the latter actions occur even if the former ones throw an exception. For this reason, a destructor should generally perform no more than one cleanup action. If you need to clean up multiple things, have you class contain multiple members representing the different things that need cleanup, each with its own destructor. This way, if one member's destructor throws, the others still run. +Be careful when writing complicated destructors. If a destructor performs multiple cleanup actions, you generally need to make sure that the latter actions occur even if the former ones throw an exception. For this reason, a destructor should generally perform no more than one cleanup action. If you need to clean up multiple things, have your class contain multiple members representing the different things that need cleanup, each with its own destructor. This way, if one member's destructor throws, the others still run. ### Ownership @@ -109,7 +109,7 @@ Keep in mind that atomic (thread-safe) reference counting can be extremely slow. A "singleton" is any mutable object or value that is globally accessible. "Globally accessible" means that the object is declared as a global variable or static member variable, or that the object can be found by following pointers from such variables. -Never use singletons. Singletons cause invisible and unexpected dependencies between components of your software that appear unrelated. Worse, the assumption that "there should only be one of this object per process" is almost always wrong, but its wrongness only becomes apparent after so much code uses the singleton that it is infeasible to change. Singleton interfaces ofter turn into unusable monstrosities in an attempt to work around the fact that they should never have been a singleton in the first place. +Never use singletons. Singletons cause invisible and unexpected dependencies between components of your software that appear unrelated. Worse, the assumption that "there should only be one of this object per process" is almost always wrong, but its wrongness only becomes apparent after so much code uses the singleton that it is infeasible to change. Singleton interfaces often turn into unusable monstrosities in an attempt to work around the fact that they should never have been a singleton in the first place. See ["Singletons Considered Harmful"](http://www.object-oriented-security.org/lets-argue/singletons) for a complete discussion. @@ -449,7 +449,7 @@ As a code reviewer, when you see a violation of formatting rules, think carefull **Rationale:** There has never been broad agreement on C++ naming style. The closest we have is the C++ standard library. Unfortunately, the C++ standard library made the awful decision of naming types and values in the same style, losing a highly useful visual cue that makes programming more pleasant, and preventing variables from being named after their type (which in many contexts is perfectly appropriate). -Meanwhile, the Java style, which KJ emulates, has been broadly adopted to varying degrees in other languages, from Javascript to Haskell. Using a similar style in KJ code makes it less jarring to switch between C++ and those other languages. Being consistent with Javascript is especially useful because it is the one language that everyone pretty much has to use, due to its use in the web platform. +Meanwhile, the Java style, which KJ emulates, has been broadly adopted to varying degrees in other languages, from JavaScript to Haskell. Using a similar style in KJ code makes it less jarring to switch between C++ and those other languages. Being consistent with JavaScript is especially useful because it is the one language that everyone pretty much has to use, due to its use in the web platform. There has also never been any agreement on C++ file extensions, for some reason. The extension `.c++`, though not widely used, is accepted by all reasonable tools and is clearly the most precise choice. diff --git a/libs/EXTERNAL/capnproto/super-test.sh b/libs/EXTERNAL/capnproto/super-test.sh index 2a948cbe3f1..e578105c97c 100755 --- a/libs/EXTERNAL/capnproto/super-test.sh +++ b/libs/EXTERNAL/capnproto/super-test.sh @@ -13,14 +13,20 @@ function test_samples() { ./addressbook dwrite | ./addressbook dread rm -f /tmp/capnp-calculator-example-$$ ./calculator-server unix:/tmp/capnp-calculator-example-$$ & - sleep 0.1 + local SERVER_PID=$! + sleep 1 ./calculator-client unix:/tmp/capnp-calculator-example-$$ - kill %+ - wait %+ || true + # `kill %./calculator-server` doesn't seem to work on recent Cygwins, but we can kill by PID. + kill -9 $SERVER_PID + # This `fg` will fail if bash happens to have already noticed the quit and reaped the process + # before `fg` is invoked, so in that case we just proceed. + fg %./calculator-server || true rm -f /tmp/capnp-calculator-example-$$ } QUICK= +CPP_FEATURES= +EXTRA_LIBS= PARALLEL=$(nproc 2>/dev/null || echo 1) @@ -37,6 +43,30 @@ while [ $# -gt 0 ]; do quick ) QUICK=quick ;; + cpp-features ) + if [ "$#" -lt 2 ] || [ -n "$CPP_FEATURES" ]; then + echo "usage: $0 cpp-features CPP_DEFINES" >&2 + echo "e.g. $0 cpp-features '-DSOME_VAR=5 -DSOME_OTHER_VAR=6'" >&2 + if [ -n "$CPP_FEATURES" ]; then + echo "cpp-features provided multiple times" >&2 + fi + exit 1 + fi + CPP_FEATURES="$2" + shift + ;; + extra-libs ) + if [ "$#" -lt 2 ] || [ -n "$EXTRA_LIBS" ]; then + echo "usage: $0 extra-libs EXTRA_LIBS" >&2 + echo "e.g. $0 extra-libs '-lrt'" >&2 + if [ -n "$EXTRA_LIBS" ]; then + echo "extra-libs provided multiple times" >&2 + fi + exit 1 + fi + EXTRA_LIBS="$2" + shift + ;; caffeinate ) # Re-run preventing sleep. shift @@ -91,17 +121,17 @@ while [ $# -gt 0 ]; do export CXX="$2" shift ;; - clang ) - export CXX=clang++ - ;; - gcc-4.9 ) - export CXX=g++-4.9 + clang* ) + export CXX=clang++${1#clang} + if [ "$1" != "clang-5.0" ]; then + export LIB_FUZZING_ENGINE=-fsanitize=fuzzer + fi ;; - gcc-4.8 ) - export CXX=g++-4.8 + gcc* ) + export CXX=g++${1#gcc} ;; - gcc-4.7 ) - export CXX=g++-4.7 + g++* ) + export CXX=$1 ;; mingw ) if [ "$#" -ne 2 ]; then @@ -116,7 +146,7 @@ while [ $# -gt 0 ]; do export WINEPATH='Z:\usr\'"$CROSS_HOST"'\lib;Z:\usr\lib\gcc\'"$CROSS_HOST"'\6.3-win32;Z:'"$PWD"'\.libs' - doit ./configure --host="$CROSS_HOST" --disable-shared CXXFLAGS='-static-libgcc -static-libstdc++' + doit ./configure --host="$CROSS_HOST" --disable-shared CXXFLAGS="-static-libgcc -static-libstdc++ $CPP_FEATURES" LIBS="$EXTRA_LIBS" doit make -j$PARALLEL check doit make distclean @@ -126,18 +156,19 @@ while [ $# -gt 0 ]; do android ) # To install Android SDK: # - Download command-line tools: https://developer.android.com/studio/index.html#command-tools - # - export SDKMANAGER_OPTS="--add-modules java.se.ee" # - Run $SDK_HOME/tools/bin/sdkmanager platform-tools 'platforms;android-25' 'system-images;android-25;google_apis;armeabi-v7a' emulator 'build-tools;25.0.2' ndk-bundle - # - export AVDMANAGER_OPTS="--add-modules java.se.ee" # - Run $SDK_HOME/tools/bin/avdmanager create avd -n capnp -k 'system-images;android-25;google_apis;armeabi-v7a' -b google_apis/armeabi-v7a - # - Run $SDK_HOME/ndk-bundle/build/tools/make_standalone_toolchain.py --arch arm --api 24 --install-dir $TOOLCHAIN_HOME if [ "$#" -ne 4 ]; then - echo "usage: $0 android SDK_HOME TOOLCHAIN_HOME CROSS_HOST" >&2 + echo "usage: $0 android SDK_HOME CROSS_HOST COMPILER_PREFIX" >&2 + echo + echo "SDK_HOME: Location where android-sdk is installed." >&2 + echo "CROSS_HOST: E.g. arm-linux-androideabi" >&2 + echo "COMPILER_PREFIX: E.g. armv7a-linux-androideabi24" >&2 exit 1 fi SDK_HOME=$2 - TOOLCHAIN_HOME=$3 - CROSS_HOST=$4 + CROSS_HOST=$3 + COMPILER_PREFIX=$4 cd c++ test -e configure || doit autoreconf -i @@ -148,9 +179,9 @@ while [ $# -gt 0 ]; do cp capnp capnp-host cp capnpc-c++ capnpc-c++-host - export PATH="$TOOLCHAIN_HOME/bin:$PATH" + export PATH="$SDK_HOME/ndk-bundle/toolchains/llvm/prebuilt/linux-x86_64/bin:$PATH" doit make distclean - doit ./configure --host="$CROSS_HOST" CC=clang CXX=clang++ --with-external-capnp --disable-shared CXXFLAGS='-fPIE' LDFLAGS='-pie' LIBS='-static-libstdc++ -static-libgcc -ldl' CAPNP=./capnp-host CAPNPC_CXX=./capnpc-c++-host + doit ./configure --host="$CROSS_HOST" CC="$COMPILER_PREFIX-clang" CXX="$COMPILER_PREFIX-clang++" --with-external-capnp --disable-shared CXXFLAGS="-fPIE $CPP_FEATURES" LDFLAGS='-pie' LIBS="-static-libstdc++ -static-libgcc -ldl $EXTRA_LIBS" CAPNP=./capnp-host CAPNPC_CXX=./capnpc-c++-host doit make -j$PARALLEL doit make -j$PARALLEL capnp-test @@ -317,7 +348,14 @@ done # sign-compare warnings than probably all other warnings combined and I've never seen it flag a # real problem. Disable unused parameters because it's stupidly noisy and never a real problem. # Enable expensive release-gating tests. -export CXXFLAGS="-O2 -DDEBUG -Wall -Wextra -Werror -Wno-strict-aliasing -Wno-sign-compare -Wno-unused-parameter -DCAPNP_EXPENSIVE_TESTS=1" +export CXXFLAGS="-O2 -DDEBUG -Wall -Wextra -Werror -Wno-strict-aliasing -Wno-sign-compare -Wno-unused-parameter -DCAPNP_EXPENSIVE_TESTS=1 ${CPP_FEATURES}" +export LIBS="$EXTRA_LIBS" + +if [ "${CXX:-}" != "g++-5" ]; then + # This warning flag is missing on g++-5 but available on all other GCC/Clang versions we target + # in CI. + export CXXFLAGS="$CXXFLAGS -Wimplicit-fallthrough" +fi STAGING=$PWD/tmp-staging @@ -338,7 +376,20 @@ echo "Building c++" echo "=========================================================================" # Apple now aliases gcc to clang, so probe to find out what compiler we're really using. -if (${CXX:-g++} -dM -E -x c++ /dev/null 2>&1 | grep -q '__clang__'); then +# +# NOTE: You might be tempted to use `grep -q` here instead of sending output to /dev/null. However, +# we cannot, because `grep -q` exits immediately upon seeing a match. If it exits too soon, the +# first stage of the pipeline gets killed, and the whole expression is considered to have failed +# since we are running bash with the `pipefail` option enabled. +# FUN STORY: We used to use grep -q. One day, we found that Clang 9 when running under GitHub +# Actions was detected as *not* Clang. But if we ran it twice, it would succeed on the second +# try. It turns out that under previous versions of Clang, the `__clang__` define was pretty +# close to the end of the list, so it always managed to write the whole list before `grep -q` +# exited. But under Clang 9, there's a bunch more defines after this one, giving more time for +# `grep -q` to exit and break everything. But if the compiler had executed once recently then +# the second run would go faster due to caching (I guess) and manage to get all the data out +# to the buffer in time. +if (${CXX:-g++} -dM -E -x c++ /dev/null 2>&1 | grep '__clang__' > /dev/null); then IS_CLANG=yes DISABLE_OPTIMIZATION_IF_GCC= else @@ -358,13 +409,13 @@ fi cd c++ doit autoreconf -i -doit ./configure --prefix="$STAGING" +doit ./configure --prefix="$STAGING" || (cat config.log && exit 1) doit make -j$PARALLEL check if [ $IS_CLANG = no ]; then # Verify that generated code compiles with pedantic warnings. Make sure to treat capnp headers # as system headers so warnings in them are ignored. - doit ${CXX:-g++} -isystem src -std=c++1y -fno-permissive -pedantic -Wall -Wextra -Werror \ + doit ${CXX:-g++} -isystem src -std=c++14 -fno-permissive -pedantic -Wall -Wextra -Werror \ -c src/capnp/test.capnp.c++ -o /dev/null fi @@ -380,13 +431,13 @@ test "x$(which capnpc-c++)" = "x$STAGING/bin/capnpc-c++" cd samples doit capnp compile -oc++ addressbook.capnp -I"$STAGING"/include --no-standard-import -doit ${CXX:-g++} -std=c++1y addressbook.c++ addressbook.capnp.c++ -o addressbook \ +doit ${CXX:-g++} -std=c++14 addressbook.c++ addressbook.capnp.c++ -o addressbook \ $CXXFLAGS $(pkg-config --cflags --libs capnp) doit capnp compile -oc++ calculator.capnp -I"$STAGING"/include --no-standard-import -doit ${CXX:-g++} -std=c++1y calculator-client.c++ calculator.capnp.c++ -o calculator-client \ +doit ${CXX:-g++} -std=c++14 calculator-client.c++ calculator.capnp.c++ -o calculator-client \ $CXXFLAGS $(pkg-config --cflags --libs capnp-rpc) -doit ${CXX:-g++} -std=c++1y calculator-server.c++ calculator.capnp.c++ -o calculator-server \ +doit ${CXX:-g++} -std=c++14 calculator-server.c++ calculator.capnp.c++ -o calculator-server \ $CXXFLAGS $(pkg-config --cflags --libs capnp-rpc) test_samples @@ -412,16 +463,7 @@ if [ "$QUICK" = quick ]; then fi echo "=========================================================================" -echo "Testing --with-external-capnp" -echo "=========================================================================" - -doit make distclean -doit ./configure --prefix="$STAGING" --disable-shared \ - --with-external-capnp CAPNP=$STAGING/bin/capnp -doit make -j$PARALLEL check - -echo "=========================================================================" -echo "Testing --disable-reflection" +echo "Testing --with-external-capnp and --disable-reflection" echo "=========================================================================" doit make distclean @@ -431,32 +473,20 @@ doit make -j$PARALLEL check doit make distclean # Test 32-bit build now while we have $STAGING available for cross-compiling. -if [ "x`uname -m`" = "xx86_64" ]; then +# +# Cygwin64 can cross-compile to Cygwin32 but can't actually run the cross-compiled binaries. Let's +# just skip this test on Cygwin since it's so slow and honestly no one cares. +# +# MacOS apparently no longer distributes 32-bit standard libraries. OK fine let's restrict this to +# Linux. +if [ "x`uname -m`" = "xx86_64" ] && [ "x`uname`" = xLinux ]; then echo "=========================================================================" echo "Testing 32-bit build" echo "=========================================================================" - if [[ "`uname`" =~ CYGWIN ]]; then - # It's just not possible to run cygwin32 binaries from within cygwin64. - - # Build as if we are cross-compiling, using the capnp we installed to $STAGING. - doit ./configure --prefix="$STAGING" --disable-shared --host=i686-pc-cygwin \ - --with-external-capnp CAPNP=$STAGING/bin/capnp - doit make -j$PARALLEL - doit make -j$PARALLEL capnp-test.exe - - # Expect a cygwin32 sshd to be listening at localhost port 2222, and use it - # to run the tests. - doit scp -P 2222 capnp-test.exe localhost:~/tmp-capnp-test.exe - doit ssh -p 2222 localhost './tmp-capnp-test.exe && rm tmp-capnp-test.exe' - - doit make distclean - - elif [ "x${CXX:-g++}" != "xg++-4.8" ]; then - doit ./configure CXX="${CXX:-g++} -m32" CXXFLAGS="$CXXFLAGS ${ADDL_M32_FLAGS:-}" --disable-shared - doit make -j$PARALLEL check - doit make distclean - fi + doit ./configure CXX="${CXX:-g++} -m32" CXXFLAGS="$CXXFLAGS ${ADDL_M32_FLAGS:-}" --disable-shared + doit make -j$PARALLEL check + doit make distclean fi echo "=========================================================================" @@ -493,32 +523,23 @@ echo "=========================================================================" # is inlined in hundreds of other places without issue, so I have no idea how to narrow down the # bug. Clang works fine. So, for now, we disable optimizations on GCC for -fno-exceptions tests. -doit ./configure --disable-shared CXXFLAGS="$CXXFLAGS -fno-rtti" -doit make -j$PARALLEL check -doit make distclean -doit ./configure --disable-shared CXXFLAGS="$CXXFLAGS -fno-exceptions $DISABLE_OPTIMIZATION_IF_GCC" -doit make -j$PARALLEL check -doit make distclean doit ./configure --disable-shared CXXFLAGS="$CXXFLAGS -fno-rtti -fno-exceptions $DISABLE_OPTIMIZATION_IF_GCC" doit make -j$PARALLEL check -# Valgrind is currently "experimental and mostly broken" on OSX and fails to run the full test -# suite, but I have it installed because it did manage to help me track down a bug or two. Anyway, -# skip it on OSX for now. -if [ "x`uname`" != xDarwin ] && which valgrind > /dev/null; then +if [ "x`uname`" = xLinux ]; then doit make distclean echo "=========================================================================" echo "Testing with valgrind" echo "=========================================================================" - doit ./configure --disable-shared CXXFLAGS="-g" + doit ./configure --disable-shared CXXFLAGS="-g $CPP_FEATURES" doit make -j$PARALLEL doit make -j$PARALLEL capnp-test # Running the fuzz tests under Valgrind is a great thing to do -- but it takes # some 40 minutes. So, it needs to be done as a separate step of the release # process, perhaps along with the AFL tests. - CAPNP_SKIP_FUZZ_TEST=1 doit valgrind --leak-check=full --track-fds=yes --error-exitcode=1 --child-silent-after-fork=yes --sim-hints=lax-ioctls ./capnp-test + CAPNP_SKIP_FUZZ_TEST=1 doit valgrind --leak-check=full --track-fds=yes --error-exitcode=1 --child-silent-after-fork=yes --sim-hints=lax-ioctls --suppressions=valgrind.supp ./capnp-test fi doit make maintainer-clean