diff --git a/.github/actions/dependencies/action.yml b/.github/actions/dependencies/action.yml index 4384c701a9e..2a66f7dc760 100644 --- a/.github/actions/dependencies/action.yml +++ b/.github/actions/dependencies/action.yml @@ -10,7 +10,6 @@ runs: shell: bash run: | conan export --version 1.1.10 external/snappy - conan export --version 9.7.3 external/rocksdb conan export --version 4.0.3 external/soci conan export --version 2.3.1 external/wamr - name: add Ripple Conan remote @@ -23,7 +22,6 @@ runs: fi conan remote add --index 0 ripple "${CONAN_URL}" echo "Added conan remote ripple at ${CONAN_URL}" - - name: try to authenticate to Ripple Conan remote if: env.CONAN_LOGIN_USERNAME_RIPPLE != '' && env.CONAN_PASSWORD_RIPPLE != '' id: remote @@ -32,7 +30,6 @@ runs: echo "Authenticating to ripple remote..." conan remote auth ripple --force conan remote list-users - - name: list missing binaries id: binaries shell: bash @@ -47,7 +44,7 @@ runs: cd ${build_dir} conan install \ --output-folder . \ - --build missing \ + --build '*' \ --options:host "&:tests=True" \ --options:host "&:xrpld=True" \ --settings:all build_type=${{ inputs.configuration }} \ diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index b21d912f44b..4a414935582 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -24,8 +24,6 @@ env: CONAN_GLOBAL_CONF: | core.download:parallel={{os.cpu_count()}} core.upload:parallel={{os.cpu_count()}} - core:default_build_profile=libxrpl - core:default_profile=libxrpl tools.build:jobs={{ (os.cpu_count() * 4/5) | int }} tools.build:verbosity=verbose tools.compilation:verbosity=verbose @@ -95,7 +93,6 @@ jobs: shell: bash run: | conan export --version 1.1.10 external/snappy - conan export --version 9.7.3 external/rocksdb conan export --version 4.0.3 external/soci conan export --version 2.3.1 external/wamr - name: add Ripple Conan remote diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index d404a0fec24..cf70a162400 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -25,8 +25,6 @@ env: CONAN_GLOBAL_CONF: | core.download:parallel={{ os.cpu_count() }} core.upload:parallel={{ os.cpu_count() }} - core:default_build_profile=libxrpl - core:default_profile=libxrpl tools.build:jobs={{ (os.cpu_count() * 4/5) | int }} tools.build:verbosity=verbose tools.compilation:verbosity=verbose @@ -101,7 +99,6 @@ jobs: run: tar -czf conan.tar.gz -C ${CONAN_HOME} . - name: build dependencies uses: ./.github/actions/dependencies - with: configuration: ${{ matrix.configuration }} - name: upload archive @@ -359,40 +356,44 @@ jobs: cmake --build . ./example | grep '^[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+' - # NOTE we are not using dependencies built above because it lags with - # compiler versions. Instrumentation requires clang version 16 or - # later - instrumentation-build: - if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} - env: - CLANG_RELEASE: 16 + needs: dependencies runs-on: [self-hosted, heavy] container: ghcr.io/xrplf/ci/debian-bookworm:clang-16 - + env: + build_dir: .build steps: - - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + - name: download cache + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 + with: + name: linux-clang-Debug - - name: prepare environment + - name: extract cache run: | - mkdir ${GITHUB_WORKSPACE}/.build - echo "SOURCE_DIR=$GITHUB_WORKSPACE" >> $GITHUB_ENV - echo "BUILD_DIR=$GITHUB_WORKSPACE/.build" >> $GITHUB_ENV + mkdir -p ${CONAN_HOME} + tar -xzf conan.tar.gz -C ${CONAN_HOME} - - name: configure Conan + - name: check environment run: | - echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf - conan config install conan/profiles/ -tf $(conan config home)/profiles/ - conan export --version 2.3.1 external/wamr - conan profile show - - name: build dependencies + echo ${PATH} | tr ':' '\n' + conan --version + cmake --version + env | sort + ls ${CONAN_HOME} + + - name: checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + + - name: dependencies + uses: ./.github/actions/dependencies + with: + configuration: Debug + + - name: prepare environment run: | - cd ${BUILD_DIR} - conan install ${SOURCE_DIR} \ - --output-folder ${BUILD_DIR} \ - --build missing \ - --settings:all build_type=Debug + mkdir -p ${build_dir} + echo "SOURCE_DIR=$(pwd)" >> $GITHUB_ENV + echo "BUILD_DIR=$(pwd)/${build_dir}" >> $GITHUB_ENV - name: build with instrumentation run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 369e4869d47..e759b2edc8c 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -27,8 +27,6 @@ env: CONAN_GLOBAL_CONF: | core.download:parallel={{os.cpu_count()}} core.upload:parallel={{os.cpu_count()}} - core:default_build_profile=libxrpl - core:default_profile=libxrpl tools.build:jobs=24 tools.build:verbosity=verbose tools.compilation:verbosity=verbose @@ -90,7 +88,6 @@ jobs: shell: bash run: | conan export --version 1.1.10 external/snappy - conan export --version 9.7.3 external/rocksdb conan export --version 4.0.3 external/soci conan export --version 2.3.1 external/wamr - name: add Ripple Conan remote diff --git a/BUILD.md b/BUILD.md index 6eb162a3767..0ea3c12e62a 100644 --- a/BUILD.md +++ b/BUILD.md @@ -172,14 +172,6 @@ which allows you to statically link it with GCC, if you want. conan export --version 1.1.10 external/snappy ``` -Export our [Conan recipe for RocksDB](./external/rocksdb). -It does not override paths to dependencies when building with Visual Studio. - -``` -# Conan 2.x -conan export --version 9.7.3 external/rocksdb -``` - Export our [Conan recipe for SOCI](./external/soci). It patches their CMake to correctly import its dependencies. @@ -390,10 +382,7 @@ and can be helpful for detecting `#include` omissions. After any updates or changes to dependencies, you may need to do the following: 1. Remove your build directory. -2. Remove the Conan cache: - ``` - rm -rf ~/.conan/data - ``` +2. Remove the Conan cache: `conan remove "*" -c` 3. Re-run [conan install](#build-and-test). ### 'protobuf/port_def.inc' file not found @@ -412,50 +401,6 @@ For example, if you want to build Debug: 1. For conan install, pass `--settings build_type=Debug` 2. For cmake, pass `-DCMAKE_BUILD_TYPE=Debug` -### no std::result_of - -If your compiler version is recent enough to have removed `std::result_of` as -part of C++20, e.g. Apple Clang 15.0, then you might need to add a preprocessor -definition to your build. - -``` -conan profile update 'options.boost:extra_b2_flags="define=BOOST_ASIO_HAS_STD_INVOKE_RESULT"' default -conan profile update 'env.CFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default -conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default -conan profile update 'conf.tools.build:cflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default -conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default -``` - -### call to 'async_teardown' is ambiguous - -If you are compiling with an early version of Clang 16, then you might hit -a [regression][6] when compiling C++20 that manifests as an [error in a Boost -header][7]. You can workaround it by adding this preprocessor definition: - -``` -conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_DISABLE_CONCEPTS"' default -conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]' default -``` - -### recompile with -fPIC - -If you get a linker error suggesting that you recompile Boost with -position-independent code, such as: - -``` -/usr/bin/ld.gold: error: /home/username/.conan/data/boost/1.77.0/_/_/package/.../lib/libboost_container.a(alloc_lib.o): - requires unsupported dynamic reloc 11; recompile with -fPIC -``` - -Conan most likely downloaded a bad binary distribution of the dependency. -This seems to be a [bug][1] in Conan just for Boost 1.77.0 compiled with GCC -for Linux. The solution is to build the dependency locally by passing -`--build boost` when calling `conan install`. - -``` -conan install --build boost ... -``` - ## Add a Dependency If you want to experiment with a new package, follow these steps: diff --git a/cmake/RippledCompiler.cmake b/cmake/RippledCompiler.cmake index 7485605d950..30058fd5036 100644 --- a/cmake/RippledCompiler.cmake +++ b/cmake/RippledCompiler.cmake @@ -90,28 +90,15 @@ if (MSVC) -errorreport:none -machine:X64) else () - # HACK : because these need to come first, before any warning demotion - string (APPEND CMAKE_CXX_FLAGS " -Wall -Wdeprecated") - if (wextra) - string (APPEND CMAKE_CXX_FLAGS " -Wextra -Wno-unused-parameter") - endif () - # not MSVC target_compile_options (common INTERFACE + -Wall + -Wdeprecated + $<$:-Wextra -Wno-unused-parameter> $<$:-Werror> - $<$: - -frtti - -Wnon-virtual-dtor - > - -Wno-sign-compare - -Wno-char-subscripts - -Wno-format - -Wno-unused-local-typedefs -fstack-protector - $<$: - -Wno-unused-but-set-variable - -Wno-deprecated - > + -Wno-sign-compare + -Wno-unused-but-set-variable $<$>:-fno-strict-aliasing> # tweak gcc optimization for debug $<$,$>:-O0> diff --git a/cmake/RippledCore.cmake b/cmake/RippledCore.cmake index b3a6a77bcb5..e5c584d1eb0 100644 --- a/cmake/RippledCore.cmake +++ b/cmake/RippledCore.cmake @@ -68,6 +68,11 @@ target_link_libraries(xrpl.imports.main wamr::wamr ) +if (WIN32) + target_link_libraries(xrpl.imports.main INTERFACE ntdll) +endif() + + include(add_module) include(target_link_modules) diff --git a/conan/profiles/libxrpl b/conan/profiles/default similarity index 56% rename from conan/profiles/libxrpl rename to conan/profiles/default index b037b8c4a2a..3a7bcda1c65 100644 --- a/conan/profiles/libxrpl +++ b/conan/profiles/default @@ -9,6 +9,7 @@ [settings] os={{ os }} arch={{ arch }} +build_type=Debug compiler={{compiler}} compiler.version={{ compiler_version }} compiler.cppstd=20 @@ -17,3 +18,17 @@ compiler.runtime=static {% else %} compiler.libcxx={{detect_api.detect_libcxx(compiler, version, compiler_exe)}} {% endif %} + +[conf] +{% if compiler == "clang" and compiler_version >= 19 %} +tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] +{% endif %} +{% if compiler == "apple-clang" and compiler_version >= 17 %} +tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] +{% endif %} +{% if compiler == "gcc" and compiler_version < 13 %} +tools.build:cxxflags=['-Wno-restrict'] +{% endif %} + +[tool_requires] +!cmake/*: cmake/[>=3 <4] diff --git a/conanfile.py b/conanfile.py index 9a4eccbdce1..898cfe74d70 100644 --- a/conanfile.py +++ b/conanfile.py @@ -106,7 +106,7 @@ def configure(self): def requirements(self): # Conan 2 requires transitive headers to be specified transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {} - self.requires('boost/1.83.0', force=True, **transitive_headers_opt) + self.requires('boost/1.86.0', force=True, **transitive_headers_opt) self.requires('date/3.0.4', **transitive_headers_opt) self.requires('lz4/1.10.0', force=True) self.requires('protobuf/3.21.12', force=True) @@ -114,7 +114,7 @@ def requirements(self): if self.options.jemalloc: self.requires('jemalloc/5.3.0') if self.options.rocksdb: - self.requires('rocksdb/9.7.3') + self.requires('rocksdb/10.0.1') self.requires('xxhash/0.8.3', **transitive_headers_opt) exports_sources = ( @@ -146,8 +146,6 @@ def generate(self): tc.variables['static'] = self.options.static tc.variables['unity'] = self.options.unity tc.variables['xrpld'] = self.options.xrpld - if self.settings.compiler == 'clang' and self.settings.compiler.version == 16: - tc.extra_cxxflags = ["-DBOOST_ASIO_DISABLE_CONCEPTS"] tc.generate() def build(self): diff --git a/external/ed25519-donna/CMakeLists.txt b/external/ed25519-donna/CMakeLists.txt index 418dc38326b..f060d530aa4 100644 --- a/external/ed25519-donna/CMakeLists.txt +++ b/external/ed25519-donna/CMakeLists.txt @@ -17,6 +17,9 @@ add_library(ed25519 STATIC ) add_library(ed25519::ed25519 ALIAS ed25519) target_link_libraries(ed25519 PUBLIC OpenSSL::SSL) +if(NOT MSVC) + target_compile_options(ed25519 PRIVATE -Wno-implicit-fallthrough) +endif() include(GNUInstallDirs) diff --git a/external/rocksdb/conandata.yml b/external/rocksdb/conandata.yml deleted file mode 100644 index 7d7a575d980..00000000000 --- a/external/rocksdb/conandata.yml +++ /dev/null @@ -1,12 +0,0 @@ -sources: - "9.7.3": - url: "https://github.com/facebook/rocksdb/archive/refs/tags/v9.7.3.tar.gz" - sha256: "acfabb989cbfb5b5c4d23214819b059638193ec33dad2d88373c46448d16d38b" -patches: - "9.7.3": - - patch_file: "patches/9.x.x-0001-exclude-thirdparty.patch" - patch_description: "Do not include thirdparty.inc" - patch_type: "portability" - - patch_file: "patches/9.7.3-0001-memory-leak.patch" - patch_description: "Fix a leak of obsolete blob files left open until DB::Close()" - patch_type: "portability" diff --git a/external/rocksdb/conanfile.py b/external/rocksdb/conanfile.py deleted file mode 100644 index 8b85ce1540d..00000000000 --- a/external/rocksdb/conanfile.py +++ /dev/null @@ -1,235 +0,0 @@ -import os -import glob -import shutil - -from conan import ConanFile -from conan.errors import ConanInvalidConfiguration -from conan.tools.build import check_min_cppstd -from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout -from conan.tools.files import apply_conandata_patches, collect_libs, copy, export_conandata_patches, get, rm, rmdir -from conan.tools.microsoft import check_min_vs, is_msvc, is_msvc_static_runtime -from conan.tools.scm import Version - -required_conan_version = ">=1.53.0" - - -class RocksDBConan(ConanFile): - name = "rocksdb" - description = "A library that provides an embeddable, persistent key-value store for fast storage" - license = ("GPL-2.0-only", "Apache-2.0") - url = "https://github.com/conan-io/conan-center-index" - homepage = "https://github.com/facebook/rocksdb" - topics = ("database", "leveldb", "facebook", "key-value") - package_type = "library" - settings = "os", "arch", "compiler", "build_type" - options = { - "shared": [True, False], - "fPIC": [True, False], - "lite": [True, False], - "with_gflags": [True, False], - "with_snappy": [True, False], - "with_lz4": [True, False], - "with_zlib": [True, False], - "with_zstd": [True, False], - "with_tbb": [True, False], - "with_jemalloc": [True, False], - "enable_sse": [False, "sse42", "avx2"], - "use_rtti": [True, False], - } - default_options = { - "shared": False, - "fPIC": True, - "lite": False, - "with_snappy": False, - "with_lz4": False, - "with_zlib": False, - "with_zstd": False, - "with_gflags": False, - "with_tbb": False, - "with_jemalloc": False, - "enable_sse": False, - "use_rtti": False, - } - - @property - def _min_cppstd(self): - return "11" if Version(self.version) < "8.8.1" else "17" - - @property - def _compilers_minimum_version(self): - return {} if self._min_cppstd == "11" else { - "apple-clang": "10", - "clang": "7", - "gcc": "7", - "msvc": "191", - "Visual Studio": "15", - } - - def export_sources(self): - export_conandata_patches(self) - - def config_options(self): - if self.settings.os == "Windows": - del self.options.fPIC - if self.settings.arch != "x86_64": - del self.options.with_tbb - if self.settings.build_type == "Debug": - self.options.use_rtti = True # Rtti are used in asserts for debug mode... - - def configure(self): - if self.options.shared: - self.options.rm_safe("fPIC") - - def layout(self): - cmake_layout(self, src_folder="src") - - def requirements(self): - if self.options.with_gflags: - self.requires("gflags/2.2.2") - if self.options.with_snappy: - self.requires("snappy/1.1.10") - if self.options.with_lz4: - self.requires("lz4/1.10.0") - if self.options.with_zlib: - self.requires("zlib/[>=1.2.11 <2]") - if self.options.with_zstd: - self.requires("zstd/1.5.6") - if self.options.get_safe("with_tbb"): - self.requires("onetbb/2021.12.0") - if self.options.with_jemalloc: - self.requires("jemalloc/5.3.0") - - def validate(self): - if self.settings.compiler.get_safe("cppstd"): - check_min_cppstd(self, self._min_cppstd) - - minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False) - if minimum_version and Version(self.settings.compiler.version) < minimum_version: - raise ConanInvalidConfiguration( - f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support." - ) - - if self.settings.arch not in ["x86_64", "ppc64le", "ppc64", "mips64", "armv8"]: - raise ConanInvalidConfiguration("Rocksdb requires 64 bits") - - check_min_vs(self, "191") - - if self.version == "6.20.3" and \ - self.settings.os == "Linux" and \ - self.settings.compiler == "gcc" and \ - Version(self.settings.compiler.version) < "5": - raise ConanInvalidConfiguration("Rocksdb 6.20.3 is not compilable with gcc <5.") # See https://github.com/facebook/rocksdb/issues/3522 - - def source(self): - get(self, **self.conan_data["sources"][self.version], strip_root=True) - - def generate(self): - tc = CMakeToolchain(self) - tc.variables["FAIL_ON_WARNINGS"] = False - tc.variables["WITH_TESTS"] = False - tc.variables["WITH_TOOLS"] = False - tc.variables["WITH_CORE_TOOLS"] = False - tc.variables["WITH_BENCHMARK_TOOLS"] = False - tc.variables["WITH_FOLLY_DISTRIBUTED_MUTEX"] = False - if is_msvc(self): - tc.variables["WITH_MD_LIBRARY"] = not is_msvc_static_runtime(self) - tc.variables["ROCKSDB_INSTALL_ON_WINDOWS"] = self.settings.os == "Windows" - tc.variables["ROCKSDB_LITE"] = self.options.lite - tc.variables["WITH_GFLAGS"] = self.options.with_gflags - tc.variables["WITH_SNAPPY"] = self.options.with_snappy - tc.variables["WITH_LZ4"] = self.options.with_lz4 - tc.variables["WITH_ZLIB"] = self.options.with_zlib - tc.variables["WITH_ZSTD"] = self.options.with_zstd - tc.variables["WITH_TBB"] = self.options.get_safe("with_tbb", False) - tc.variables["WITH_JEMALLOC"] = self.options.with_jemalloc - tc.variables["ROCKSDB_BUILD_SHARED"] = self.options.shared - tc.variables["ROCKSDB_LIBRARY_EXPORTS"] = self.settings.os == "Windows" and self.options.shared - tc.variables["ROCKSDB_DLL" ] = self.settings.os == "Windows" and self.options.shared - tc.variables["USE_RTTI"] = self.options.use_rtti - if not bool(self.options.enable_sse): - tc.variables["PORTABLE"] = True - tc.variables["FORCE_SSE42"] = False - elif self.options.enable_sse == "sse42": - tc.variables["PORTABLE"] = True - tc.variables["FORCE_SSE42"] = True - elif self.options.enable_sse == "avx2": - tc.variables["PORTABLE"] = False - tc.variables["FORCE_SSE42"] = False - # not available yet in CCI - tc.variables["WITH_NUMA"] = False - tc.generate() - - deps = CMakeDeps(self) - if self.options.with_jemalloc: - deps.set_property("jemalloc", "cmake_file_name", "JeMalloc") - deps.set_property("jemalloc", "cmake_target_name", "JeMalloc::JeMalloc") - if self.options.with_zstd: - deps.set_property("zstd", "cmake_target_name", "zstd::zstd") - deps.generate() - - def build(self): - apply_conandata_patches(self) - cmake = CMake(self) - cmake.configure() - cmake.build() - - def _remove_static_libraries(self): - rm(self, "rocksdb.lib", os.path.join(self.package_folder, "lib")) - for lib in glob.glob(os.path.join(self.package_folder, "lib", "*.a")): - if not lib.endswith(".dll.a"): - os.remove(lib) - - def _remove_cpp_headers(self): - for path in glob.glob(os.path.join(self.package_folder, "include", "rocksdb", "*")): - if path != os.path.join(self.package_folder, "include", "rocksdb", "c.h"): - if os.path.isfile(path): - os.remove(path) - else: - shutil.rmtree(path) - - def package(self): - copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) - copy(self, "LICENSE*", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) - cmake = CMake(self) - cmake.install() - if self.options.shared: - self._remove_static_libraries() - self._remove_cpp_headers() # Force stable ABI for shared libraries - rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) - rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) - - def package_info(self): - cmake_target = "rocksdb-shared" if self.options.shared else "rocksdb" - self.cpp_info.set_property("cmake_file_name", "RocksDB") - self.cpp_info.set_property("cmake_target_name", f"RocksDB::{cmake_target}") - # TODO: back to global scope in conan v2 once cmake_find_package* generators removed - self.cpp_info.components["librocksdb"].libs = collect_libs(self) - if self.settings.os == "Windows": - self.cpp_info.components["librocksdb"].system_libs = ["shlwapi", "rpcrt4"] - if self.options.shared: - self.cpp_info.components["librocksdb"].defines = ["ROCKSDB_DLL"] - elif self.settings.os in ["Linux", "FreeBSD"]: - self.cpp_info.components["librocksdb"].system_libs = ["pthread", "m"] - if self.options.lite: - self.cpp_info.components["librocksdb"].defines.append("ROCKSDB_LITE") - - # TODO: to remove in conan v2 once cmake_find_package* generators removed - self.cpp_info.names["cmake_find_package"] = "RocksDB" - self.cpp_info.names["cmake_find_package_multi"] = "RocksDB" - self.cpp_info.components["librocksdb"].names["cmake_find_package"] = cmake_target - self.cpp_info.components["librocksdb"].names["cmake_find_package_multi"] = cmake_target - self.cpp_info.components["librocksdb"].set_property("cmake_target_name", f"RocksDB::{cmake_target}") - if self.options.with_gflags: - self.cpp_info.components["librocksdb"].requires.append("gflags::gflags") - if self.options.with_snappy: - self.cpp_info.components["librocksdb"].requires.append("snappy::snappy") - if self.options.with_lz4: - self.cpp_info.components["librocksdb"].requires.append("lz4::lz4") - if self.options.with_zlib: - self.cpp_info.components["librocksdb"].requires.append("zlib::zlib") - if self.options.with_zstd: - self.cpp_info.components["librocksdb"].requires.append("zstd::zstd") - if self.options.get_safe("with_tbb"): - self.cpp_info.components["librocksdb"].requires.append("onetbb::onetbb") - if self.options.with_jemalloc: - self.cpp_info.components["librocksdb"].requires.append("jemalloc::jemalloc") diff --git a/external/rocksdb/patches/9.7.3-0001-memory-leak.patch b/external/rocksdb/patches/9.7.3-0001-memory-leak.patch deleted file mode 100644 index bb086e6cb21..00000000000 --- a/external/rocksdb/patches/9.7.3-0001-memory-leak.patch +++ /dev/null @@ -1,319 +0,0 @@ -diff --git a/HISTORY.md b/HISTORY.md -index 36d472229..05ad1a202 100644 ---- a/HISTORY.md -+++ b/HISTORY.md -@@ -1,6 +1,10 @@ - # Rocksdb Change Log - > NOTE: Entries for next release do not go here. Follow instructions in `unreleased_history/README.txt` - -+## 9.7.4 (10/31/2024) -+### Bug Fixes -+* Fix a leak of obsolete blob files left open until DB::Close(). This bug was introduced in version 9.4.0. -+ - ## 9.7.3 (10/16/2024) - ### Behavior Changes - * OPTIONS file to be loaded by remote worker is now preserved so that it does not get purged by the primary host. A similar technique as how we are preserving new SST files from getting purged is used for this. min_options_file_numbers_ is tracked like pending_outputs_ is tracked. -diff --git a/db/blob/blob_file_cache.cc b/db/blob/blob_file_cache.cc -index 5f340aadf..1b9faa238 100644 ---- a/db/blob/blob_file_cache.cc -+++ b/db/blob/blob_file_cache.cc -@@ -42,6 +42,7 @@ Status BlobFileCache::GetBlobFileReader( - assert(blob_file_reader); - assert(blob_file_reader->IsEmpty()); - -+ // NOTE: sharing same Cache with table_cache - const Slice key = GetSliceForKey(&blob_file_number); - - assert(cache_); -@@ -98,4 +99,13 @@ Status BlobFileCache::GetBlobFileReader( - return Status::OK(); - } - -+void BlobFileCache::Evict(uint64_t blob_file_number) { -+ // NOTE: sharing same Cache with table_cache -+ const Slice key = GetSliceForKey(&blob_file_number); -+ -+ assert(cache_); -+ -+ cache_.get()->Erase(key); -+} -+ - } // namespace ROCKSDB_NAMESPACE -diff --git a/db/blob/blob_file_cache.h b/db/blob/blob_file_cache.h -index 740e67ada..6858d012b 100644 ---- a/db/blob/blob_file_cache.h -+++ b/db/blob/blob_file_cache.h -@@ -36,6 +36,15 @@ class BlobFileCache { - uint64_t blob_file_number, - CacheHandleGuard* blob_file_reader); - -+ // Called when a blob file is obsolete to ensure it is removed from the cache -+ // to avoid effectively leaking the open file and assicated memory -+ void Evict(uint64_t blob_file_number); -+ -+ // Used to identify cache entries for blob files (not normally useful) -+ static const Cache::CacheItemHelper* GetHelper() { -+ return CacheInterface::GetBasicHelper(); -+ } -+ - private: - using CacheInterface = - BasicTypedCacheInterface; -diff --git a/db/column_family.h b/db/column_family.h -index e4b7adde8..86637736a 100644 ---- a/db/column_family.h -+++ b/db/column_family.h -@@ -401,6 +401,7 @@ class ColumnFamilyData { - SequenceNumber earliest_seq); - - TableCache* table_cache() const { return table_cache_.get(); } -+ BlobFileCache* blob_file_cache() const { return blob_file_cache_.get(); } - BlobSource* blob_source() const { return blob_source_.get(); } - - // See documentation in compaction_picker.h -diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc -index 261593423..06573ac2e 100644 ---- a/db/db_impl/db_impl.cc -+++ b/db/db_impl/db_impl.cc -@@ -659,8 +659,9 @@ Status DBImpl::CloseHelper() { - // We need to release them before the block cache is destroyed. The block - // cache may be destroyed inside versions_.reset(), when column family data - // list is destroyed, so leaving handles in table cache after -- // versions_.reset() may cause issues. -- // Here we clean all unreferenced handles in table cache. -+ // versions_.reset() may cause issues. Here we clean all unreferenced handles -+ // in table cache, and (for certain builds/conditions) assert that no obsolete -+ // files are hanging around unreferenced (leak) in the table/blob file cache. - // Now we assume all user queries have finished, so only version set itself - // can possibly hold the blocks from block cache. After releasing unreferenced - // handles here, only handles held by version set left and inside -@@ -668,6 +669,9 @@ Status DBImpl::CloseHelper() { - // time a handle is released, we erase it from the cache too. By doing that, - // we can guarantee that after versions_.reset(), table cache is empty - // so the cache can be safely destroyed. -+#ifndef NDEBUG -+ TEST_VerifyNoObsoleteFilesCached(/*db_mutex_already_held=*/true); -+#endif // !NDEBUG - table_cache_->EraseUnRefEntries(); - - for (auto& txn_entry : recovered_transactions_) { -@@ -3227,6 +3231,8 @@ Status DBImpl::MultiGetImpl( - s = Status::Aborted(); - break; - } -+ // This could be a long-running operation -+ ROCKSDB_THREAD_YIELD_HOOK(); - } - - // Post processing (decrement reference counts and record statistics) -diff --git a/db/db_impl/db_impl.h b/db/db_impl/db_impl.h -index 5e4fa310b..ccc0abfa7 100644 ---- a/db/db_impl/db_impl.h -+++ b/db/db_impl/db_impl.h -@@ -1241,9 +1241,14 @@ class DBImpl : public DB { - static Status TEST_ValidateOptions(const DBOptions& db_options) { - return ValidateOptions(db_options); - } -- - #endif // NDEBUG - -+ // In certain configurations, verify that the table/blob file cache only -+ // contains entries for live files, to check for effective leaks of open -+ // files. This can only be called when purging of obsolete files has -+ // "settled," such as during parts of DB Close(). -+ void TEST_VerifyNoObsoleteFilesCached(bool db_mutex_already_held) const; -+ - // persist stats to column family "_persistent_stats" - void PersistStats(); - -diff --git a/db/db_impl/db_impl_debug.cc b/db/db_impl/db_impl_debug.cc -index 790a50d7a..67f5b4aaf 100644 ---- a/db/db_impl/db_impl_debug.cc -+++ b/db/db_impl/db_impl_debug.cc -@@ -9,6 +9,7 @@ - - #ifndef NDEBUG - -+#include "db/blob/blob_file_cache.h" - #include "db/column_family.h" - #include "db/db_impl/db_impl.h" - #include "db/error_handler.h" -@@ -328,5 +329,49 @@ size_t DBImpl::TEST_EstimateInMemoryStatsHistorySize() const { - InstrumentedMutexLock l(&const_cast(this)->stats_history_mutex_); - return EstimateInMemoryStatsHistorySize(); - } -+ -+void DBImpl::TEST_VerifyNoObsoleteFilesCached( -+ bool db_mutex_already_held) const { -+ // This check is somewhat expensive and obscure to make a part of every -+ // unit test in every build variety. Thus, we only enable it for ASAN builds. -+ if (!kMustFreeHeapAllocations) { -+ return; -+ } -+ -+ std::optional l; -+ if (db_mutex_already_held) { -+ mutex_.AssertHeld(); -+ } else { -+ l.emplace(&mutex_); -+ } -+ -+ std::vector live_files; -+ for (auto cfd : *versions_->GetColumnFamilySet()) { -+ if (cfd->IsDropped()) { -+ continue; -+ } -+ // Sneakily add both SST and blob files to the same list -+ cfd->current()->AddLiveFiles(&live_files, &live_files); -+ } -+ std::sort(live_files.begin(), live_files.end()); -+ -+ auto fn = [&live_files](const Slice& key, Cache::ObjectPtr, size_t, -+ const Cache::CacheItemHelper* helper) { -+ if (helper != BlobFileCache::GetHelper()) { -+ // Skip non-blob files for now -+ // FIXME: diagnose and fix the leaks of obsolete SST files revealed in -+ // unit tests. -+ return; -+ } -+ // See TableCache and BlobFileCache -+ assert(key.size() == sizeof(uint64_t)); -+ uint64_t file_number; -+ GetUnaligned(reinterpret_cast(key.data()), &file_number); -+ // Assert file is in sorted live_files -+ assert( -+ std::binary_search(live_files.begin(), live_files.end(), file_number)); -+ }; -+ table_cache_->ApplyToAllEntries(fn, {}); -+} - } // namespace ROCKSDB_NAMESPACE - #endif // NDEBUG -diff --git a/db/db_iter.cc b/db/db_iter.cc -index e02586377..bf4749eb9 100644 ---- a/db/db_iter.cc -+++ b/db/db_iter.cc -@@ -540,6 +540,8 @@ bool DBIter::FindNextUserEntryInternal(bool skipping_saved_key, - } else { - iter_.Next(); - } -+ // This could be a long-running operation due to tombstones, etc. -+ ROCKSDB_THREAD_YIELD_HOOK(); - } while (iter_.Valid()); - - valid_ = false; -diff --git a/db/table_cache.cc b/db/table_cache.cc -index 71fc29c32..8a5be75e8 100644 ---- a/db/table_cache.cc -+++ b/db/table_cache.cc -@@ -164,6 +164,7 @@ Status TableCache::GetTableReader( - } - - Cache::Handle* TableCache::Lookup(Cache* cache, uint64_t file_number) { -+ // NOTE: sharing same Cache with BlobFileCache - Slice key = GetSliceForFileNumber(&file_number); - return cache->Lookup(key); - } -@@ -179,6 +180,7 @@ Status TableCache::FindTable( - size_t max_file_size_for_l0_meta_pin, Temperature file_temperature) { - PERF_TIMER_GUARD_WITH_CLOCK(find_table_nanos, ioptions_.clock); - uint64_t number = file_meta.fd.GetNumber(); -+ // NOTE: sharing same Cache with BlobFileCache - Slice key = GetSliceForFileNumber(&number); - *handle = cache_.Lookup(key); - TEST_SYNC_POINT_CALLBACK("TableCache::FindTable:0", -diff --git a/db/version_builder.cc b/db/version_builder.cc -index ed8ab8214..c98f53f42 100644 ---- a/db/version_builder.cc -+++ b/db/version_builder.cc -@@ -24,6 +24,7 @@ - #include - - #include "cache/cache_reservation_manager.h" -+#include "db/blob/blob_file_cache.h" - #include "db/blob/blob_file_meta.h" - #include "db/dbformat.h" - #include "db/internal_stats.h" -@@ -744,12 +745,9 @@ class VersionBuilder::Rep { - return Status::Corruption("VersionBuilder", oss.str()); - } - -- // Note: we use C++11 for now but in C++14, this could be done in a more -- // elegant way using generalized lambda capture. -- VersionSet* const vs = version_set_; -- const ImmutableCFOptions* const ioptions = ioptions_; -- -- auto deleter = [vs, ioptions](SharedBlobFileMetaData* shared_meta) { -+ auto deleter = [vs = version_set_, ioptions = ioptions_, -+ bc = cfd_ ? cfd_->blob_file_cache() -+ : nullptr](SharedBlobFileMetaData* shared_meta) { - if (vs) { - assert(ioptions); - assert(!ioptions->cf_paths.empty()); -@@ -758,6 +756,9 @@ class VersionBuilder::Rep { - vs->AddObsoleteBlobFile(shared_meta->GetBlobFileNumber(), - ioptions->cf_paths.front().path); - } -+ if (bc) { -+ bc->Evict(shared_meta->GetBlobFileNumber()); -+ } - - delete shared_meta; - }; -@@ -766,7 +767,7 @@ class VersionBuilder::Rep { - blob_file_number, blob_file_addition.GetTotalBlobCount(), - blob_file_addition.GetTotalBlobBytes(), - blob_file_addition.GetChecksumMethod(), -- blob_file_addition.GetChecksumValue(), deleter); -+ blob_file_addition.GetChecksumValue(), std::move(deleter)); - - mutable_blob_file_metas_.emplace( - blob_file_number, MutableBlobFileMetaData(std::move(shared_meta))); -diff --git a/db/version_set.h b/db/version_set.h -index 9336782b1..024f869e7 100644 ---- a/db/version_set.h -+++ b/db/version_set.h -@@ -1514,7 +1514,6 @@ class VersionSet { - void GetLiveFilesMetaData(std::vector* metadata); - - void AddObsoleteBlobFile(uint64_t blob_file_number, std::string path) { -- // TODO: Erase file from BlobFileCache? - obsolete_blob_files_.emplace_back(blob_file_number, std::move(path)); - } - -diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h -index 2a19796b8..0afa2cab1 100644 ---- a/include/rocksdb/version.h -+++ b/include/rocksdb/version.h -@@ -13,7 +13,7 @@ - // minor or major version number planned for release. - #define ROCKSDB_MAJOR 9 - #define ROCKSDB_MINOR 7 --#define ROCKSDB_PATCH 3 -+#define ROCKSDB_PATCH 4 - - // Do not use these. We made the mistake of declaring macros starting with - // double underscore. Now we have to live with our choice. We'll deprecate these -diff --git a/port/port.h b/port/port.h -index 13aa56d47..141716e5b 100644 ---- a/port/port.h -+++ b/port/port.h -@@ -19,3 +19,19 @@ - #elif defined(OS_WIN) - #include "port/win/port_win.h" - #endif -+ -+#ifdef OS_LINUX -+// A temporary hook into long-running RocksDB threads to support modifying their -+// priority etc. This should become a public API hook once the requirements -+// are better understood. -+extern "C" void RocksDbThreadYield() __attribute__((__weak__)); -+#define ROCKSDB_THREAD_YIELD_HOOK() \ -+ { \ -+ if (RocksDbThreadYield) { \ -+ RocksDbThreadYield(); \ -+ } \ -+ } -+#else -+#define ROCKSDB_THREAD_YIELD_HOOK() \ -+ {} -+#endif diff --git a/external/rocksdb/patches/9.x.x-0001-exclude-thirdparty.patch b/external/rocksdb/patches/9.x.x-0001-exclude-thirdparty.patch deleted file mode 100644 index 7b5858bc1e9..00000000000 --- a/external/rocksdb/patches/9.x.x-0001-exclude-thirdparty.patch +++ /dev/null @@ -1,30 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index 93b884d..b715cb6 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -106,14 +106,9 @@ endif() - include(CMakeDependentOption) - - if(MSVC) -- option(WITH_GFLAGS "build with GFlags" OFF) - option(WITH_XPRESS "build with windows built in compression" OFF) -- option(ROCKSDB_SKIP_THIRDPARTY "skip thirdparty.inc" OFF) -- -- if(NOT ROCKSDB_SKIP_THIRDPARTY) -- include(${CMAKE_CURRENT_SOURCE_DIR}/thirdparty.inc) -- endif() --else() -+endif() -+if(TRUE) - if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT CMAKE_SYSTEM_NAME MATCHES "kFreeBSD") - # FreeBSD has jemalloc as default malloc - # but it does not have all the jemalloc files in include/... -@@ -126,7 +121,7 @@ else() - endif() - endif() - -- if(MINGW) -+ if(MSVC OR MINGW) - option(WITH_GFLAGS "build with GFlags" OFF) - else() - option(WITH_GFLAGS "build with GFlags" ON) diff --git a/external/soci/conanfile.py b/external/soci/conanfile.py index 7e611493d70..fe4c54e53e4 100644 --- a/external/soci/conanfile.py +++ b/external/soci/conanfile.py @@ -70,7 +70,7 @@ def requirements(self): if self.options.with_postgresql: self.requires("libpq/15.5") if self.options.with_boost: - self.requires("boost/1.83.0") + self.requires("boost/1.86.0") @property def _minimum_compilers_version(self): @@ -154,7 +154,7 @@ def package_info(self): self.cpp_info.components["soci_core"].set_property("cmake_target_name", "SOCI::soci_core{}".format(target_suffix)) self.cpp_info.components["soci_core"].libs = ["{}soci_core{}".format(lib_prefix, lib_suffix)] if self.options.with_boost: - self.cpp_info.components["soci_core"].requires.append("boost::boost") + self.cpp_info.components["soci_core"].requires.append("boost::headers") # soci_empty if self.options.empty: diff --git a/external/wamr/patches/ripp_metering.patch b/external/wamr/patches/ripp_metering.patch index 5e09eec8df8..76a2057cc9a 100644 --- a/external/wamr/patches/ripp_metering.patch +++ b/external/wamr/patches/ripp_metering.patch @@ -1,5 +1,5 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index 88a1642b..aeb29912 100644 +index 88a1642b..e9eab7ec 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ @@ -11,6 +11,15 @@ index 88a1642b..aeb29912 100644 option(BUILD_SHARED_LIBS "Build using shared libraries" OFF) +@@ -170,7 +170,7 @@ if (MINGW) + endif () + + if (WIN32) +- target_link_libraries(vmlib PRIVATE ntdll) ++ target_link_libraries(vmlib PUBLIC ntdll) + endif() + + set (WAMR_PUBLIC_HEADERS diff --git a/core/iwasm/aot/aot_runtime.c b/core/iwasm/aot/aot_runtime.c index b2c9ed62..87947a18 100644 --- a/core/iwasm/aot/aot_runtime.c @@ -389,7 +398,7 @@ index ddc0b15b..3a707878 100644 #if WASM_ENABLE_TAGS != 0 diff --git a/core/iwasm/interpreter/wasm_interp_classic.c b/core/iwasm/interpreter/wasm_interp_classic.c -index 1e98b0fa..db6278c5 100644 +index 1e98b0fa..e77fdfcd 100644 --- a/core/iwasm/interpreter/wasm_interp_classic.c +++ b/core/iwasm/interpreter/wasm_interp_classic.c @@ -1569,13 +1569,14 @@ get_global_addr(uint8 *global_data, WASMGlobalInstance *global) @@ -406,7 +415,7 @@ index 1e98b0fa..db6278c5 100644 +#define CHECK_INSTRUCTION_LIMIT() \ + do { \ + --instructions_left; \ -+ if (instructions_left <= 0) { \ ++ if (instructions_left < 0) { \ + wasm_set_exception(module, "instruction limit exceeded"); \ + goto got_exception; \ + } \ diff --git a/include/xrpl/basics/Log.h b/include/xrpl/basics/Log.h index 2506b8ea8dd..833907eb9c8 100644 --- a/include/xrpl/basics/Log.h +++ b/include/xrpl/basics/Log.h @@ -26,6 +26,7 @@ #include #include +#include #include #include #include diff --git a/include/xrpl/beast/container/detail/aged_unordered_container.h b/include/xrpl/beast/container/detail/aged_unordered_container.h index 3b9c83a0149..23200ae007e 100644 --- a/include/xrpl/beast/container/detail/aged_unordered_container.h +++ b/include/xrpl/beast/container/detail/aged_unordered_container.h @@ -3257,7 +3257,6 @@ operator==(aged_unordered_container< { if (size() != other.size()) return false; - using EqRng = std::pair; for (auto iter(cbegin()), last(cend()); iter != last;) { auto const& k(extract(*iter)); diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index 8435f0e0f1f..40e61669500 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -512,6 +512,7 @@ TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw, Delegation::delegatable, ({ {sfVaultID, soeREQUIRED}, {sfAmount, soeREQUIRED, soeMPTSupported}, {sfDestination, soeOPTIONAL}, + {sfDestinationTag, soeOPTIONAL}, })) /** This transaction claws back tokens from a vault. */ diff --git a/src/libxrpl/basics/FileUtilities.cpp b/src/libxrpl/basics/FileUtilities.cpp index 291eb43c7bc..ffb97926146 100644 --- a/src/libxrpl/basics/FileUtilities.cpp +++ b/src/libxrpl/basics/FileUtilities.cpp @@ -28,6 +28,7 @@ #include #include +#include #include #include #include @@ -55,7 +56,7 @@ getFileContents( return {}; } - ifstream fileStream(fullPath, std::ios::in); + std::ifstream fileStream(fullPath.string(), std::ios::in); if (!fileStream) { @@ -85,7 +86,8 @@ writeFileContents( using namespace boost::filesystem; using namespace boost::system::errc; - ofstream fileStream(destPath, std::ios::out | std::ios::trunc); + std::ofstream fileStream( + destPath.string(), std::ios::out | std::ios::trunc); if (!fileStream) { diff --git a/src/libxrpl/protocol/PublicKey.cpp b/src/libxrpl/protocol/PublicKey.cpp index cdf646e0f86..54b50c80ef1 100644 --- a/src/libxrpl/protocol/PublicKey.cpp +++ b/src/libxrpl/protocol/PublicKey.cpp @@ -107,8 +107,9 @@ sliceToHex(Slice const& slice) } for (int i = 0; i < slice.size(); ++i) { - s += "0123456789ABCDEF"[((slice[i] & 0xf0) >> 4)]; - s += "0123456789ABCDEF"[((slice[i] & 0x0f) >> 0)]; + constexpr char hex[] = "0123456789ABCDEF"; + s += hex[((slice[i] & 0xf0) >> 4)]; + s += hex[((slice[i] & 0x0f) >> 0)]; } return s; } diff --git a/src/libxrpl/protocol/STTx.cpp b/src/libxrpl/protocol/STTx.cpp index 615012dba4d..8be8f906a5e 100644 --- a/src/libxrpl/protocol/STTx.cpp +++ b/src/libxrpl/protocol/STTx.cpp @@ -671,12 +671,12 @@ isMemoOkay(STObject const& st, std::string& reason) "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz"); - for (char c : symbols) + for (unsigned char c : symbols) a[c] = 1; return a; }(); - for (auto c : *optData) + for (unsigned char c : *optData) { if (!allowedSymbols[c]) { diff --git a/src/libxrpl/protocol/tokens.cpp b/src/libxrpl/protocol/tokens.cpp index a822b1937fb..52cffd7a5cb 100644 --- a/src/libxrpl/protocol/tokens.cpp +++ b/src/libxrpl/protocol/tokens.cpp @@ -544,7 +544,7 @@ b58_to_b256_be(std::string_view input, std::span out) XRPL_ASSERT( num_b_58_10_coeffs <= b_58_10_coeff.size(), "ripple::b58_fast::detail::b58_to_b256_be : maximum coeff"); - for (auto c : input.substr(0, partial_coeff_len)) + for (unsigned char c : input.substr(0, partial_coeff_len)) { auto cur_val = ::ripple::alphabetReverse[c]; if (cur_val < 0) @@ -558,7 +558,7 @@ b58_to_b256_be(std::string_view input, std::span out) { for (int j = 0; j < num_full_coeffs; ++j) { - auto c = input[partial_coeff_len + j * 10 + i]; + unsigned char c = input[partial_coeff_len + j * 10 + i]; auto cur_val = ::ripple::alphabetReverse[c]; if (cur_val < 0) { diff --git a/src/test/app/RCLValidations_test.cpp b/src/test/app/RCLValidations_test.cpp index 31c38f23b1c..fce4e94048d 100644 --- a/src/test/app/RCLValidations_test.cpp +++ b/src/test/app/RCLValidations_test.cpp @@ -229,7 +229,6 @@ class RCLValidations_test : public beast::unit_test::suite // support for a ledger hash which is already in the trie. using Seq = RCLValidatedLedger::Seq; - using ID = RCLValidatedLedger::ID; // Max known ancestors for each ledger Seq const maxAncestors = 256; diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp index ce97eff24fa..f9036719cdd 100644 --- a/src/test/app/Vault_test.cpp +++ b/src/test/app/Vault_test.cpp @@ -234,6 +234,28 @@ class Vault_test : public beast::unit_test::suite env(tx, ter{tecNO_PERMISSION}); } + { + testcase(prefix + " fail to withdraw to zero destination"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1000)}); + tx[sfDestination] = "0"; + env(tx, ter(temMALFORMED)); + } + + { + testcase( + prefix + + " fail to withdraw with tag but without destination"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1000)}); + tx[sfDestinationTag] = "0"; + env(tx, ter(temMALFORMED)); + } + if (!asset.raw().native()) { testcase( @@ -1335,6 +1357,7 @@ class Vault_test : public beast::unit_test::suite struct CaseArgs { bool enableClawback = true; + bool requireAuth = true; }; auto testCase = [this]( @@ -1356,16 +1379,20 @@ class Vault_test : public beast::unit_test::suite Vault vault{env}; MPTTester mptt{env, issuer, mptInitNoFund}; + auto const none = LedgerSpecificFlags(0); mptt.create( {.flags = tfMPTCanTransfer | tfMPTCanLock | - (args.enableClawback ? lsfMPTCanClawback - : LedgerSpecificFlags(0)) | - tfMPTRequireAuth}); + (args.enableClawback ? tfMPTCanClawback : none) | + (args.requireAuth ? tfMPTRequireAuth : none)}); PrettyAsset asset = mptt.issuanceID(); mptt.authorize({.account = owner}); - mptt.authorize({.account = issuer, .holder = owner}); mptt.authorize({.account = depositor}); - mptt.authorize({.account = issuer, .holder = depositor}); + if (args.requireAuth) + { + mptt.authorize({.account = issuer, .holder = owner}); + mptt.authorize({.account = issuer, .holder = depositor}); + } + env(pay(issuer, depositor, asset(1000))); env.close(); @@ -1514,6 +1541,100 @@ class Vault_test : public beast::unit_test::suite } }); + testCase( + [this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + PrettyAsset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase( + "MPT 3rd party without MPToken cannot be withdrawal " + "destination"); + + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx); + env.close(); + + { + // Set destination to 3rd party without MPToken + Account charlie{"charlie"}; + env.fund(XRP(1000), charlie); + env.close(); + + tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + tx[sfDestination] = charlie.human(); + env(tx, ter(tecNO_AUTH)); + } + }, + {.requireAuth = false}); + + testCase( + [this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + PrettyAsset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase("MPT depositor without MPToken cannot withdraw"); + + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1000)}); + env(tx); + env.close(); + + { + // Remove depositor's MPToken and withdraw will fail + mptt.authorize( + {.account = depositor, .flags = tfMPTUnauthorize}); + env.close(); + auto const mptoken = + env.le(keylet::mptoken(mptt.issuanceID(), depositor)); + BEAST_EXPECT(mptoken == nullptr); + + tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx, ter(tecNO_AUTH)); + } + + { + // Restore depositor's MPToken and withdraw will succeed + mptt.authorize({.account = depositor}); + env.close(); + + tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx); + } + }, + {.requireAuth = false}); + testCase([this]( Env& env, Account const& issuer, @@ -1803,6 +1924,7 @@ class Vault_test : public beast::unit_test::suite PrettyAsset const asset = issuer["IOU"]; env.trust(asset(1000), owner); + env.trust(asset(1000), charlie); env(pay(issuer, owner, asset(200))); env(rate(issuer, 1.25)); env.close(); @@ -2118,6 +2240,79 @@ class Vault_test : public beast::unit_test::suite env.close(); }); + testCase([&, this]( + Env& env, + Account const& owner, + Account const& issuer, + Account const& charlie, + auto, + Vault& vault, + PrettyAsset const& asset, + auto&&...) { + testcase("IOU no trust line to 3rd party"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + env(vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(100)})); + env.close(); + + Account const erin{"erin"}; + env.fund(XRP(1000), erin); + env.close(); + + // Withdraw to 3rd party without trust line + auto const tx1 = [&](ripple::Keylet keylet) { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + tx[sfDestination] = erin.human(); + return tx; + }(keylet); + env(tx1, ter{tecNO_LINE}); + }); + + testCase([&, this]( + Env& env, + Account const& owner, + Account const& issuer, + Account const& charlie, + auto, + Vault& vault, + PrettyAsset const& asset, + auto&&...) { + testcase("IOU no trust line to depositor"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + // reset limit, so deposit of all funds will delete the trust line + env.trust(asset(0), owner); + env.close(); + + env(vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(200)})); + env.close(); + + auto trustline = + env.le(keylet::line(owner, asset.raw().get())); + BEAST_EXPECT(trustline == nullptr); + + // Withdraw without trust line, will succeed + auto const tx1 = [&](ripple::Keylet keylet) { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + return tx; + }(keylet); + env(tx1); + }); + testCase([&, this]( Env& env, Account const& owner, diff --git a/src/test/beast/aged_associative_container_test.cpp b/src/test/beast/aged_associative_container_test.cpp index f88d5acc277..586f4868726 100644 --- a/src/test/beast/aged_associative_container_test.cpp +++ b/src/test/beast/aged_associative_container_test.cpp @@ -703,10 +703,6 @@ aged_associative_container_test_base::checkContentsRefRef( Values const& v) { using Cont = typename std::remove_reference::type; - using Traits = TestTraits< - Cont::is_unordered::value, - Cont::is_multi::value, - Cont::is_map::value>; using size_type = typename Cont::size_type; BEAST_EXPECT(c.size() == v.size()); @@ -761,10 +757,6 @@ typename std::enable_if::type aged_associative_container_test_base::testConstructEmpty() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Key = typename Traits::Key; - using T = typename Traits::T; - using Clock = typename Traits::Clock; using Comp = typename Traits::Comp; using Alloc = typename Traits::Alloc; using MyComp = typename Traits::MyComp; @@ -802,10 +794,6 @@ typename std::enable_if::type aged_associative_container_test_base::testConstructEmpty() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Key = typename Traits::Key; - using T = typename Traits::T; - using Clock = typename Traits::Clock; using Hash = typename Traits::Hash; using Equal = typename Traits::Equal; using Alloc = typename Traits::Alloc; @@ -870,10 +858,6 @@ typename std::enable_if::type aged_associative_container_test_base::testConstructRange() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Key = typename Traits::Key; - using T = typename Traits::T; - using Clock = typename Traits::Clock; using Comp = typename Traits::Comp; using Alloc = typename Traits::Alloc; using MyComp = typename Traits::MyComp; @@ -925,10 +909,6 @@ typename std::enable_if::type aged_associative_container_test_base::testConstructRange() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Key = typename Traits::Key; - using T = typename Traits::T; - using Clock = typename Traits::Clock; using Hash = typename Traits::Hash; using Equal = typename Traits::Equal; using Alloc = typename Traits::Alloc; @@ -996,14 +976,6 @@ typename std::enable_if::type aged_associative_container_test_base::testConstructInitList() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Key = typename Traits::Key; - using T = typename Traits::T; - using Clock = typename Traits::Clock; - using Comp = typename Traits::Comp; - using Alloc = typename Traits::Alloc; - using MyComp = typename Traits::MyComp; - using MyAlloc = typename Traits::MyAlloc; typename Traits::ManualClock clock; // testcase (Traits::name() + " init-list"); @@ -1020,16 +992,6 @@ typename std::enable_if::type aged_associative_container_test_base::testConstructInitList() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Key = typename Traits::Key; - using T = typename Traits::T; - using Clock = typename Traits::Clock; - using Hash = typename Traits::Hash; - using Equal = typename Traits::Equal; - using Alloc = typename Traits::Alloc; - using MyHash = typename Traits::MyHash; - using MyEqual = typename Traits::MyEqual; - using MyAlloc = typename Traits::MyAlloc; typename Traits::ManualClock clock; // testcase (Traits::name() + " init-list"); @@ -1050,7 +1012,6 @@ void aged_associative_container_test_base::testCopyMove() { using Traits = TestTraits; - using Value = typename Traits::Value; using Alloc = typename Traits::Alloc; typename Traits::ManualClock clock; auto const v(Traits::values()); @@ -1121,8 +1082,6 @@ void aged_associative_container_test_base::testIterator() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Alloc = typename Traits::Alloc; typename Traits::ManualClock clock; auto const v(Traits::values()); @@ -1179,8 +1138,6 @@ typename std::enable_if::type aged_associative_container_test_base::testReverseIterator() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Alloc = typename Traits::Alloc; typename Traits::ManualClock clock; auto const v(Traits::values()); @@ -1190,7 +1147,6 @@ aged_associative_container_test_base::testReverseIterator() typename Traits::template Cont<> c{clock}; using iterator = decltype(c.begin()); - using const_iterator = decltype(c.cbegin()); using reverse_iterator = decltype(c.rbegin()); using const_reverse_iterator = decltype(c.crbegin()); @@ -1394,7 +1350,6 @@ void aged_associative_container_test_base::testChronological() { using Traits = TestTraits; - using Value = typename Traits::Value; typename Traits::ManualClock clock; auto const v(Traits::values()); @@ -1760,7 +1715,6 @@ typename std::enable_if::type aged_associative_container_test_base::testCompare() { using Traits = TestTraits; - using Value = typename Traits::Value; typename Traits::ManualClock clock; auto const v(Traits::values()); @@ -1832,8 +1786,6 @@ template void aged_associative_container_test_base::testMaybeUnorderedMultiMap() { - using Traits = TestTraits; - testConstructEmpty(); testConstructRange(); testConstructInitList(); diff --git a/src/test/consensus/LedgerTrie_test.cpp b/src/test/consensus/LedgerTrie_test.cpp index f46fea8e6e0..6ed45777f09 100644 --- a/src/test/consensus/LedgerTrie_test.cpp +++ b/src/test/consensus/LedgerTrie_test.cpp @@ -313,7 +313,6 @@ class LedgerTrie_test : public beast::unit_test::suite testSupport() { using namespace csf; - using Seq = Ledger::Seq; LedgerTrie t; LedgerHistoryHelper h; @@ -596,7 +595,6 @@ class LedgerTrie_test : public beast::unit_test::suite testRootRelated() { using namespace csf; - using Seq = Ledger::Seq; // Since the root is a special node that breaks the no-single child // invariant, do some tests that exercise it. diff --git a/src/test/consensus/Validations_test.cpp b/src/test/consensus/Validations_test.cpp index 4424d7619d2..a04e62b7235 100644 --- a/src/test/consensus/Validations_test.cpp +++ b/src/test/consensus/Validations_test.cpp @@ -805,7 +805,6 @@ class Validations_test : public beast::unit_test::suite Ledger ledgerACD = h["acd"]; using Seq = Ledger::Seq; - using ID = Ledger::ID; auto pref = [](Ledger ledger) { return std::make_pair(ledger.seq(), ledger.id()); diff --git a/src/test/jtx/TrustedPublisherServer.h b/src/test/jtx/TrustedPublisherServer.h index 61386734841..7bc092cbe31 100644 --- a/src/test/jtx/TrustedPublisherServer.h +++ b/src/test/jtx/TrustedPublisherServer.h @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include diff --git a/src/test/rpc/Subscribe_test.cpp b/src/test/rpc/Subscribe_test.cpp index 1c1b804701b..a52ce52c95d 100644 --- a/src/test/rpc/Subscribe_test.cpp +++ b/src/test/rpc/Subscribe_test.cpp @@ -131,6 +131,9 @@ class Subscribe_test : public beast::unit_test::suite BEAST_EXPECT(jv.isMember(jss::id) && jv[jss::id] == 5); } BEAST_EXPECT(jv[jss::result][jss::ledger_index] == 2); + BEAST_EXPECT( + jv[jss::result][jss::network_id] == + env.app().config().NETWORK_ID); } { @@ -139,7 +142,8 @@ class Subscribe_test : public beast::unit_test::suite // Check stream update BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) { - return jv[jss::ledger_index] == 3; + return jv[jss::ledger_index] == 3 && + jv[jss::network_id] == env.app().config().NETWORK_ID; })); } @@ -149,7 +153,8 @@ class Subscribe_test : public beast::unit_test::suite // Check stream update BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) { - return jv[jss::ledger_index] == 4; + return jv[jss::ledger_index] == 4 && + jv[jss::network_id] == env.app().config().NETWORK_ID; })); } @@ -509,6 +514,11 @@ class Subscribe_test : public beast::unit_test::suite if (!jv.isMember(jss::validated_hash)) return false; + uint32_t netID = env.app().config().NETWORK_ID; + if (!jv.isMember(jss::network_id) || + jv[jss::network_id] != netID) + return false; + // Certain fields are only added on a flag ledger. bool const isFlagLedger = (env.closed()->info().seq + 1) % 256 == 0; @@ -583,6 +593,7 @@ class Subscribe_test : public beast::unit_test::suite jv[jss::streams][0u] = "ledger"; jr = env.rpc("json", "subscribe", to_string(jv))[jss::result]; BEAST_EXPECT(jr[jss::status] == "success"); + BEAST_EXPECT(jr[jss::network_id] == env.app().config().NETWORK_ID); jr = env.rpc("json", "unsubscribe", to_string(jv))[jss::result]; BEAST_EXPECT(jr[jss::status] == "success"); diff --git a/src/test/server/ServerStatus_test.cpp b/src/test/server/ServerStatus_test.cpp index bcd355e301d..b27dee6e0a0 100644 --- a/src/test/server/ServerStatus_test.cpp +++ b/src/test/server/ServerStatus_test.cpp @@ -681,7 +681,7 @@ class ServerStatus_test : public beast::unit_test::suite, resp["Upgrade"] == "websocket"); BEAST_EXPECT( resp.find("Connection") != resp.end() && - resp["Connection"] == "upgrade"); + resp["Connection"] == "Upgrade"); } void diff --git a/src/test/unit_test/FileDirGuard.h b/src/test/unit_test/FileDirGuard.h index d247ae30157..091bc80d20f 100644 --- a/src/test/unit_test/FileDirGuard.h +++ b/src/test/unit_test/FileDirGuard.h @@ -26,6 +26,8 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. #include +#include + namespace ripple { namespace test { namespace detail { diff --git a/src/xrpld/app/main/Application.cpp b/src/xrpld/app/main/Application.cpp index ea0b794116d..c824eccfba6 100644 --- a/src/xrpld/app/main/Application.cpp +++ b/src/xrpld/app/main/Application.cpp @@ -79,7 +79,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/xrpld/app/main/Main.cpp b/src/xrpld/app/main/Main.cpp index e926a38563e..19c8c9910d5 100644 --- a/src/xrpld/app/main/Main.cpp +++ b/src/xrpld/app/main/Main.cpp @@ -39,7 +39,7 @@ #include #include -#include +#include #include #include diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 76c2b94e5e8..7caf5c8d081 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -2415,6 +2415,7 @@ NetworkOPsImp::pubValidation(std::shared_ptr const& val) jvObj[jss::flags] = val->getFlags(); jvObj[jss::signing_time] = *(*val)[~sfSigningTime]; jvObj[jss::data] = strHex(val->getSerializer().slice()); + jvObj[jss::network_id] = app_.config().NETWORK_ID; if (auto version = (*val)[~sfServerVersion]) jvObj[jss::server_version] = std::to_string(*version); @@ -3148,6 +3149,8 @@ NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) jvObj[jss::ledger_time] = Json::Value::UInt( lpAccepted->info().closeTime.time_since_epoch().count()); + jvObj[jss::network_id] = app_.config().NETWORK_ID; + if (!lpAccepted->rules().enabled(featureXRPFees)) jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED; jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped(); @@ -4208,6 +4211,7 @@ NetworkOPsImp::subLedger(InfoSub::ref isrListener, Json::Value& jvResult) jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash); jvResult[jss::ledger_time] = Json::Value::UInt( lpClosed->info().closeTime.time_since_epoch().count()); + jvResult[jss::network_id] = app_.config().NETWORK_ID; if (!lpClosed->rules().enabled(featureXRPFees)) jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED; jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped(); diff --git a/src/xrpld/app/misc/WamrVM.cpp b/src/xrpld/app/misc/WamrVM.cpp index 64759ff8d07..3f0bd1d95fb 100644 --- a/src/xrpld/app/misc/WamrVM.cpp +++ b/src/xrpld/app/misc/WamrVM.cpp @@ -21,6 +21,7 @@ #include +#include #include #ifdef _DEBUG diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index 048e54805ee..83dcef2fa9b 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -366,14 +366,14 @@ escrowCreatePreclaimHelper( // authorized auto const& mptIssue = amount.get(); if (auto const ter = - requireAuth(ctx.view, mptIssue, account, MPTAuthType::WeakAuth); + requireAuth(ctx.view, mptIssue, account, AuthType::WeakAuth); ter != tesSUCCESS) return ter; // If the issuer has requireAuth set, check if the destination is // authorized if (auto const ter = - requireAuth(ctx.view, mptIssue, dest, MPTAuthType::WeakAuth); + requireAuth(ctx.view, mptIssue, dest, AuthType::WeakAuth); ter != tesSUCCESS) return ter; @@ -831,7 +831,7 @@ escrowFinishPreclaimHelper( // authorized auto const& mptIssue = amount.get(); if (auto const ter = - requireAuth(ctx.view, mptIssue, dest, MPTAuthType::WeakAuth); + requireAuth(ctx.view, mptIssue, dest, AuthType::WeakAuth); ter != tesSUCCESS) return ter; @@ -1447,7 +1447,7 @@ escrowCancelPreclaimHelper( // authorized auto const& mptIssue = amount.get(); if (auto const ter = - requireAuth(ctx.view, mptIssue, account, MPTAuthType::WeakAuth); + requireAuth(ctx.view, mptIssue, account, AuthType::WeakAuth); ter != tesSUCCESS) return ter; diff --git a/src/xrpld/app/tx/detail/VaultWithdraw.cpp b/src/xrpld/app/tx/detail/VaultWithdraw.cpp index 7a8605cdbdb..09a9fd14e14 100644 --- a/src/xrpld/app/tx/detail/VaultWithdraw.cpp +++ b/src/xrpld/app/tx/detail/VaultWithdraw.cpp @@ -52,9 +52,19 @@ VaultWithdraw::preflight(PreflightContext const& ctx) return temBAD_AMOUNT; if (auto const destination = ctx.tx[~sfDestination]; - destination && *destination == beast::zero) + destination.has_value()) { - JLOG(ctx.j.debug()) << "VaultWithdraw: zero/empty destination account."; + if (*destination == beast::zero) + { + JLOG(ctx.j.debug()) + << "VaultWithdraw: zero/empty destination account."; + return temMALFORMED; + } + } + else if (ctx.tx.isFieldPresent(sfDestinationTag)) + { + JLOG(ctx.j.debug()) << "VaultWithdraw: sfDestinationTag is set but " + "sfDestination is not"; return temMALFORMED; } @@ -123,33 +133,39 @@ VaultWithdraw::preclaim(PreclaimContext const& ctx) // Withdrawal to a 3rd party destination account is essentially a transfer, // via shares in the vault. Enforce all the usual asset transfer checks. + AuthType authType = AuthType::Legacy; if (account != dstAcct) { auto const sleDst = ctx.view.read(keylet::account(dstAcct)); if (sleDst == nullptr) return tecNO_DST; - if (sleDst->getFlags() & lsfRequireDestTag) + if (sleDst->isFlag(lsfRequireDestTag) && + !ctx.tx.isFieldPresent(sfDestinationTag)) return tecDST_TAG_NEEDED; // Cannot send without a tag - if (sleDst->getFlags() & lsfDepositAuth) + if (sleDst->isFlag(lsfDepositAuth)) { if (!ctx.view.exists(keylet::depositPreauth(dstAcct, account))) return tecNO_PERMISSION; } + // The destination account must have consented to receive the asset by + // creating a RippleState or MPToken + authType = AuthType::StrongAuth; } - // Destination MPToken must exist (if asset is an MPT) - if (auto const ter = requireAuth(ctx.view, vaultAsset, dstAcct); + // Destination MPToken (for an MPT) or trust line (for an IOU) must exist + // if not sending to Account. + if (auto const ter = requireAuth(ctx.view, vaultAsset, dstAcct, authType); !isTesSuccess(ter)) return ter; // Cannot withdraw from a Vault an Asset frozen for the destination account - if (isFrozen(ctx.view, dstAcct, vaultAsset)) - return vaultAsset.holds() ? tecFROZEN : tecLOCKED; + if (auto const ret = checkFrozen(ctx.view, dstAcct, vaultAsset)) + return ret; - if (isFrozen(ctx.view, account, vaultShare)) - return tecLOCKED; + if (auto const ret = checkFrozen(ctx.view, account, vaultShare)) + return ret; return tesSUCCESS; } diff --git a/src/xrpld/ledger/View.h b/src/xrpld/ledger/View.h index 8c391499b6a..fc9360734d4 100644 --- a/src/xrpld/ledger/View.h +++ b/src/xrpld/ledger/View.h @@ -175,6 +175,29 @@ isFrozen( asset.value()); } +[[nodiscard]] inline TER +checkFrozen(ReadView const& view, AccountID const& account, Issue const& issue) +{ + return isFrozen(view, account, issue) ? (TER)tecFROZEN : (TER)tesSUCCESS; +} + +[[nodiscard]] inline TER +checkFrozen( + ReadView const& view, + AccountID const& account, + MPTIssue const& mptIssue) +{ + return isFrozen(view, account, mptIssue) ? (TER)tecLOCKED : (TER)tesSUCCESS; +} + +[[nodiscard]] inline TER +checkFrozen(ReadView const& view, AccountID const& account, Asset const& asset) +{ + return std::visit( + [&](auto const& issue) { return checkFrozen(view, account, issue); }, + asset.value()); +} + [[nodiscard]] bool isAnyFrozen( ReadView const& view, @@ -725,19 +748,40 @@ transferXRP( STAmount const& amount, beast::Journal j); -/* Check if MPToken exists: - * - StrongAuth - before checking lsfMPTRequireAuth is set - * - WeakAuth - after checking if lsfMPTRequireAuth is set +/* Check if MPToken (for MPT) or trust line (for IOU) exists: + * - StrongAuth - before checking if authorization is required + * - WeakAuth + * for MPT - after checking lsfMPTRequireAuth flag + * for IOU - do not check if trust line exists + * - Legacy + * for MPT - before checking lsfMPTRequireAuth flag i.e. same as StrongAuth + * for IOU - do not check if trust line exists i.e. same as WeakAuth */ -enum class MPTAuthType : bool { StrongAuth = true, WeakAuth = false }; +enum class AuthType { StrongAuth, WeakAuth, Legacy }; /** Check if the account lacks required authorization. * - * Return tecNO_AUTH or tecNO_LINE if it does - * and tesSUCCESS otherwise. + * Return tecNO_AUTH or tecNO_LINE if it does + * and tesSUCCESS otherwise. + * + * If StrongAuth then return tecNO_LINE if the RippleState doesn't exist. Return + * tecNO_AUTH if lsfRequireAuth is set on the issuer's AccountRoot, and the + * RippleState does exist, and the RippleState is not authorized. + * + * If WeakAuth then return tecNO_AUTH if lsfRequireAuth is set, and the + * RippleState exists, and is not authorized. Return tecNO_LINE if + * lsfRequireAuth is set and the RippleState doesn't exist. Consequently, if + * WeakAuth and lsfRequireAuth is *not* set, this function will return + * tesSUCCESS even if RippleState does *not* exist. + * + * The default "Legacy" auth type is equivalent to WeakAuth. */ [[nodiscard]] TER -requireAuth(ReadView const& view, Issue const& issue, AccountID const& account); +requireAuth( + ReadView const& view, + Issue const& issue, + AccountID const& account, + AuthType authType = AuthType::Legacy); /** Check if the account lacks required authorization. * @@ -751,32 +795,33 @@ requireAuth(ReadView const& view, Issue const& issue, AccountID const& account); * purely defensive, as we currently do not allow such vaults to be created. * * If StrongAuth then return tecNO_AUTH if MPToken doesn't exist or - * lsfMPTRequireAuth is set and MPToken is not authorized. If WeakAuth then - * return tecNO_AUTH if lsfMPTRequireAuth is set and MPToken doesn't exist or is - * not authorized (explicitly or via credentials, if DomainID is set in - * MPTokenIssuance). Consequently, if WeakAuth and lsfMPTRequireAuth is *not* - * set, this function will return true even if MPToken does *not* exist. + * lsfMPTRequireAuth is set and MPToken is not authorized. + * + * If WeakAuth then return tecNO_AUTH if lsfMPTRequireAuth is set and MPToken + * doesn't exist or is not authorized (explicitly or via credentials, if + * DomainID is set in MPTokenIssuance). Consequently, if WeakAuth and + * lsfMPTRequireAuth is *not* set, this function will return true even if + * MPToken does *not* exist. + * + * The default "Legacy" auth type is equivalent to StrongAuth. */ [[nodiscard]] TER requireAuth( ReadView const& view, MPTIssue const& mptIssue, AccountID const& account, - MPTAuthType authType = MPTAuthType::StrongAuth, + AuthType authType = AuthType::Legacy, int depth = 0); [[nodiscard]] TER inline requireAuth( ReadView const& view, Asset const& asset, AccountID const& account, - MPTAuthType authType = MPTAuthType::StrongAuth) + AuthType authType = AuthType::Legacy) { return std::visit( [&](TIss const& issue_) { - if constexpr (std::is_same_v) - return requireAuth(view, issue_, account); - else - return requireAuth(view, issue_, account, authType); + return requireAuth(view, issue_, account, authType); }, asset.value()); } diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index f0c4d335cd2..828e2020cb2 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -504,8 +504,8 @@ accountHolds( if (zeroIfUnauthorized == ahZERO_IF_UNAUTHORIZED && view.rules().enabled(featureSingleAssetVault)) { - if (auto const err = requireAuth( - view, mptIssue, account, MPTAuthType::StrongAuth); + if (auto const err = + requireAuth(view, mptIssue, account, AuthType::StrongAuth); !isTesSuccess(err)) amount.clear(mptIssue); } @@ -2296,15 +2296,27 @@ transferXRP( } TER -requireAuth(ReadView const& view, Issue const& issue, AccountID const& account) +requireAuth( + ReadView const& view, + Issue const& issue, + AccountID const& account, + AuthType authType) { if (isXRP(issue) || issue.account == account) return tesSUCCESS; + + auto const trustLine = + view.read(keylet::line(account, issue.account, issue.currency)); + // If account has no line, and this is a strong check, fail + if (!trustLine && authType == AuthType::StrongAuth) + return tecNO_LINE; + + // If this is a weak or legacy check, or if the account has a line, fail if + // auth is required and not set on the line if (auto const issuerAccount = view.read(keylet::account(issue.account)); issuerAccount && (*issuerAccount)[sfFlags] & lsfRequireAuth) { - if (auto const trustLine = - view.read(keylet::line(account, issue.account, issue.currency))) + if (trustLine) return ((*trustLine)[sfFlags] & ((account > issue.account) ? lsfLowAuth : lsfHighAuth)) ? tesSUCCESS @@ -2320,7 +2332,7 @@ requireAuth( ReadView const& view, MPTIssue const& mptIssue, AccountID const& account, - MPTAuthType authType, + AuthType authType, int depth) { auto const mptID = keylet::mptIssuance(mptIssue.getMptID()); @@ -2355,7 +2367,7 @@ requireAuth( if (auto const err = std::visit( [&](TIss const& issue) { if constexpr (std::is_same_v) - return requireAuth(view, issue, account); + return requireAuth(view, issue, account, authType); else return requireAuth( view, issue, account, authType, depth + 1); @@ -2370,7 +2382,8 @@ requireAuth( auto const sleToken = view.read(mptokenID); // if account has no MPToken, fail - if (!sleToken && authType == MPTAuthType::StrongAuth) + if (!sleToken && + (authType == AuthType::StrongAuth || authType == AuthType::Legacy)) return tecNO_AUTH; // Note, this check is not amendment-gated because DomainID will be always diff --git a/src/xrpld/rpc/handlers/LogLevel.cpp b/src/xrpld/rpc/handlers/LogLevel.cpp index 0fc266569e8..a93d010706e 100644 --- a/src/xrpld/rpc/handlers/LogLevel.cpp +++ b/src/xrpld/rpc/handlers/LogLevel.cpp @@ -44,7 +44,6 @@ doLogLevel(RPC::JsonContext& context) Logs::toString(Logs::fromSeverity(context.app.logs().threshold())); std::vector> logTable( context.app.logs().partition_severities()); - using stringPair = std::map::value_type; for (auto const& [k, v] : logTable) lev[k] = v;