Compare commits

..

No commits in common. "master" and "v126.0.6478.40-1" have entirely different histories.

16926 changed files with 687354 additions and 1333488 deletions

View file

@ -25,7 +25,7 @@ jobs:
path: | path: |
src/third_party/llvm-build/Release+Asserts/ src/third_party/llvm-build/Release+Asserts/
src/gn/ src/gn/
src/qemu-user*.deb src/qemu-user-static*.deb
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }} key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache PGO (Linux, OpenWrt) - name: Cache PGO (Linux, OpenWrt)
uses: actions/cache@v4 uses: actions/cache@v4
@ -45,11 +45,11 @@ jobs:
- run: ./get-clang.sh - run: ./get-clang.sh
- run: EXTRA_FLAGS='target_os="android"' ./get-clang.sh - run: EXTRA_FLAGS='target_os="android"' ./get-clang.sh
- run: | - run: |
if [ ! -f qemu-user*.deb ]; then if [ ! -f qemu-user-static*.deb ]; then
wget https://snapshot.debian.org/archive/debian/20250405T083429Z/pool/main/q/qemu/qemu-user_9.2.2%2Bds-1%2Bb2_amd64.deb wget https://snapshot.debian.org/archive/debian/20230611T210420Z/pool/main/q/qemu/qemu-user-static_8.0%2Bdfsg-4_amd64.deb
fi fi
cache-toolchains-win: cache-toolchains-win:
runs-on: windows-2022 runs-on: windows-2019
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Cache toolchains - name: Cache toolchains
@ -97,7 +97,7 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
arch: [x64, x86, arm64, arm, mipsel, mips64el, riscv64, loong64] arch: [x64, x86, arm64, arm, mipsel, mips64el, riscv64]
env: env:
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}"' EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}"'
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }} BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
@ -109,7 +109,7 @@ jobs:
path: | path: |
src/third_party/llvm-build/Release+Asserts/ src/third_party/llvm-build/Release+Asserts/
src/gn/ src/gn/
src/qemu-user*.deb src/qemu-user-static*.deb
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }} key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache PGO (Linux, OpenWrt) - name: Cache PGO (Linux, OpenWrt)
uses: actions/cache@v4 uses: actions/cache@v4
@ -138,7 +138,7 @@ jobs:
sudo apt update sudo apt update
sudo apt install ninja-build pkg-config ccache bubblewrap sudo apt install ninja-build pkg-config ccache bubblewrap
sudo apt remove -y qemu-user-binfmt sudo apt remove -y qemu-user-binfmt
sudo dpkg -i qemu-user*.deb sudo dpkg -i qemu-user-static*.deb
# libc6-i386 interferes with x86 build # libc6-i386 interferes with x86 build
sudo apt remove libc6-i386 sudo apt remove libc6-i386
- run: ./get-clang.sh - run: ./get-clang.sh
@ -179,20 +179,16 @@ jobs:
abi: armeabi-v7a abi: armeabi-v7a
env: env:
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}" target_os="android"' EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}" target_os="android"'
BUNDLE: naiveproxy-plugin-${{ github.event.release.tag_name || 'v1.1.1.1-1' }}-${{ matrix.abi }}.apk BUNDLE: naiveproxy-plugin-${{ github.event.release.tag_name || 'v1' }}-${{ matrix.abi }}.apk
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: 17
- name: Cache toolchains (Linux, OpenWrt, Android) - name: Cache toolchains (Linux, OpenWrt, Android)
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
path: | path: |
src/third_party/llvm-build/Release+Asserts/ src/third_party/llvm-build/Release+Asserts/
src/gn/ src/gn/
src/qemu-user*.deb src/qemu-user-static*.deb
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }} key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache AFDO (Android) - name: Cache AFDO (Android)
uses: actions/cache@v4 uses: actions/cache@v4
@ -222,7 +218,7 @@ jobs:
sudo apt update sudo apt update
sudo apt install ninja-build pkg-config ccache bubblewrap sudo apt install ninja-build pkg-config ccache bubblewrap
sudo apt remove -y qemu-user-binfmt sudo apt remove -y qemu-user-binfmt
sudo dpkg -i qemu-user*.deb sudo dpkg -i qemu-user-static*.deb
# libc6-i386 interferes with x86 build # libc6-i386 interferes with x86 build
sudo apt remove libc6-i386 sudo apt remove libc6-i386
- run: ./get-clang.sh - run: ./get-clang.sh
@ -240,7 +236,7 @@ jobs:
working-directory: apk working-directory: apk
env: env:
APK_ABI: ${{ matrix.abi }} APK_ABI: ${{ matrix.abi }}
APK_VERSION_NAME: ${{ github.event.release.tag_name || 'v1.1.1.1-1' }} APK_VERSION_NAME: ${{ github.event.release.tag_name || 'v1' }}
KEYSTORE_PASS: ${{ secrets.KEYSTORE_PASS }} KEYSTORE_PASS: ${{ secrets.KEYSTORE_PASS }}
run: | run: |
mkdir -p app/libs/$APK_ABI mkdir -p app/libs/$APK_ABI
@ -260,7 +256,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
win: win:
needs: cache-toolchains-win needs: cache-toolchains-win
runs-on: windows-2022 runs-on: windows-2019
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -375,6 +371,40 @@ jobs:
run: gh release upload "${GITHUB_REF##*/}" ${{ env.BUNDLE }}.tar.xz --clobber run: gh release upload "${GITHUB_REF##*/}" ${{ env.BUNDLE }}.tar.xz --clobber
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ios:
needs: cache-toolchains-mac
runs-on: macos-13
strategy:
fail-fast: false
matrix:
arch: [arm64]
env:
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}" target_os="ios" ios_enable_code_signing=false'
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
steps:
- uses: actions/checkout@v4
- name: Cache toolchains and PGO
uses: actions/cache@v4
with:
path: |
src/third_party/llvm-build/Release+Asserts/
src/chrome/build/pgo_profiles/chrome-mac-*
src/gn/
key: toolchains-pgo-mac-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- id: ccache-timestamp
run: echo "CCACHE_TIMESTAMP=$(date +%s)" >>$GITHUB_OUTPUT
- name: Cache ccache files
uses: actions/cache@v4
with:
path: ~/Library/Caches/ccache
key: ccache-ios-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.CCACHE_TIMESTAMP }}
restore-keys: ccache-ios-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
- run: brew install ninja ccache
- run: pip install setuptools
- run: ./get-clang.sh
- run: ccache -z
- run: ./build.sh
- run: ccache -s
openwrt: openwrt:
needs: cache-toolchains-posix needs: cache-toolchains-posix
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
@ -386,7 +416,7 @@ jobs:
openwrt: "target=x86 subtarget=64" openwrt: "target=x86 subtarget=64"
target_cpu: x64 target_cpu: x64
- arch: x86 - arch: x86
openwrt: "target=x86 subtarget=geode" openwrt: "target=x86 subtarget=generic"
target_cpu: x86 target_cpu: x86
- arch: aarch64_cortex-a53 - arch: aarch64_cortex-a53
openwrt: "target=sunxi subtarget=cortexa53" openwrt: "target=sunxi subtarget=cortexa53"
@ -395,9 +425,7 @@ jobs:
- arch: aarch64_cortex-a53-static - arch: aarch64_cortex-a53-static
openwrt: "target=sunxi subtarget=cortexa53" openwrt: "target=sunxi subtarget=cortexa53"
target_cpu: arm64 target_cpu: arm64
extra: 'arm_cpu="cortex-a53" build_static=true use_allocator_shim=false use_partition_alloc=false' extra: 'arm_cpu="cortex-a53" build_static=true no_madvise_syscall=true'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: aarch64_cortex-a72 - arch: aarch64_cortex-a72
openwrt: "target=mvebu subtarget=cortexa72" openwrt: "target=mvebu subtarget=cortexa72"
target_cpu: arm64 target_cpu: arm64
@ -405,26 +433,16 @@ jobs:
- arch: aarch64_cortex-a72-static - arch: aarch64_cortex-a72-static
openwrt: "target=mvebu subtarget=cortexa72" openwrt: "target=mvebu subtarget=cortexa72"
target_cpu: arm64 target_cpu: arm64
extra: 'arm_cpu="cortex-a72" build_static=true use_allocator_shim=false use_partition_alloc=false' extra: 'arm_cpu="cortex-a72" build_static=true no_madvise_syscall=true'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: aarch64_cortex-a76
openwrt: "target=bcm27xx subtarget=bcm2712"
target_cpu: arm64
extra: 'arm_cpu="cortex-a76"'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: aarch64_generic - arch: aarch64_generic
openwrt: "target=layerscape subtarget=armv8_64b" openwrt: "target=rockchip subtarget=armv8"
target_cpu: arm64 target_cpu: arm64
- arch: aarch64_generic-static - arch: aarch64_generic-static
openwrt: "target=layerscape subtarget=armv8_64b" openwrt: "target=rockchip subtarget=armv8"
target_cpu: arm64 target_cpu: arm64
extra: "build_static=true use_allocator_shim=false use_partition_alloc=false" extra: "build_static=true no_madvise_syscall=true"
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: arm_arm1176jzf-s_vfp - arch: arm_arm1176jzf-s_vfp
openwrt: "target=brcm2708 subtarget=bcm2708" openwrt: "target=bcm27xx subtarget=bcm2708"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="arm1176jzf-s" arm_fpu="vfp" arm_float_abi="hard" arm_use_neon=false arm_use_thumb=false' extra: 'arm_version=0 arm_cpu="arm1176jzf-s" arm_fpu="vfp" arm_float_abi="hard" arm_use_neon=false arm_use_thumb=false'
- arch: arm_arm926ej-s - arch: arm_arm926ej-s
@ -432,35 +450,29 @@ jobs:
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="arm926ej-s" arm_float_abi="soft" arm_use_neon=false arm_use_thumb=false' extra: 'arm_version=0 arm_cpu="arm926ej-s" arm_float_abi="soft" arm_use_neon=false arm_use_thumb=false'
- arch: arm_cortex-a15_neon-vfpv4 - arch: arm_cortex-a15_neon-vfpv4
openwrt: "target=ipq806x subtarget=generic" openwrt: "target=armsr subtarget=armv7"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a15" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true' extra: 'arm_version=0 arm_cpu="cortex-a15" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true'
- arch: arm_cortex-a5_vfpv4 - arch: arm_cortex-a5_vfpv4
openwrt: "target=at91 subtarget=sama5d3" openwrt: "target=at91 subtarget=sama5"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a5" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false' extra: 'arm_version=0 arm_cpu="cortex-a5" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false'
- arch: arm_cortex-a7 - arch: arm_cortex-a7
openwrt: "target=mediatek subtarget=mt7629" openwrt: "target=mediatek subtarget=mt7629"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_float_abi="soft" arm_use_neon=false' extra: 'arm_version=0 arm_cpu="cortex-a7" arm_float_abi="soft" arm_use_neon=false'
openwrt_release: '21.02.0'
openwrt_gcc_ver: '8.4.0'
- arch: arm_cortex-a7_neon-vfpv4 - arch: arm_cortex-a7_neon-vfpv4
openwrt: "target=sunxi subtarget=cortexa7" openwrt: "target=sunxi subtarget=cortexa7"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true' extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true'
- arch: arm_cortex-a7_neon-vfpv4-static
openwrt: "target=sunxi subtarget=cortexa7"
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true build_static=true use_allocator_shim=false use_partition_alloc=false'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: arm_cortex-a7_vfpv4 - arch: arm_cortex-a7_vfpv4
openwrt: "target=at91 subtarget=sama7" openwrt: "target=at91 subtarget=sama7"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false' extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false'
openwrt_release: '22.03.0' - arch: arm_cortex-a7_neon-vfpv4-static
openwrt_gcc_ver: '11.2.0' openwrt: "target=sunxi subtarget=cortexa7"
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true build_static=true no_madvise_syscall=true'
- arch: arm_cortex-a8_vfpv3 - arch: arm_cortex-a8_vfpv3
openwrt: "target=sunxi subtarget=cortexa8" openwrt: "target=sunxi subtarget=cortexa8"
target_cpu: arm target_cpu: arm
@ -472,15 +484,13 @@ jobs:
- arch: arm_cortex-a9-static - arch: arm_cortex-a9-static
openwrt: "target=bcm53xx subtarget=generic" openwrt: "target=bcm53xx subtarget=generic"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_float_abi="soft" arm_use_neon=false build_static=true use_allocator_shim=false use_partition_alloc=false' extra: 'arm_version=0 arm_cpu="cortex-a9" arm_float_abi="soft" arm_use_neon=false build_static=true no_madvise_syscall=true'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: arm_cortex-a9_neon - arch: arm_cortex-a9_neon
openwrt: "target=imx6 subtarget=generic" openwrt: "target=zynq subtarget=generic"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="neon" arm_float_abi="hard" arm_use_neon=true' extra: 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="neon" arm_float_abi="hard" arm_use_neon=true'
- arch: arm_cortex-a9_vfpv3-d16 - arch: arm_cortex-a9_vfpv3-d16
openwrt: "target=mvebu subtarget=cortexa9" openwrt: "target=tegra subtarget=generic"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="vfpv3-d16" arm_float_abi="hard" arm_use_neon=false' extra: 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="vfpv3-d16" arm_float_abi="hard" arm_use_neon=false'
- arch: arm_mpcore - arch: arm_mpcore
@ -498,26 +508,17 @@ jobs:
- arch: mipsel_24kc-static - arch: mipsel_24kc-static
openwrt: "target=ramips subtarget=rt305x" openwrt: "target=ramips subtarget=rt305x"
target_cpu: mipsel target_cpu: mipsel
extra: 'mips_arch_variant="r2" mips_float_abi="soft" build_static=true use_allocator_shim=false use_partition_alloc=false' extra: 'mips_arch_variant="r2" mips_float_abi="soft" build_static=true no_madvise_syscall=true'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: mipsel_mips32 - arch: mipsel_mips32
openwrt: "target=brcm47xx subtarget=legacy" openwrt: "target=bcm47xx subtarget=generic"
target_cpu: mipsel target_cpu: mipsel
extra: 'mips_arch_variant="r1" mips_float_abi="soft"' extra: 'mips_arch_variant="r1" mips_float_abi="soft"'
- arch: riscv64 - arch: riscv64
openwrt: "target=sifiveu subtarget=generic" openwrt: "target=sifiveu subtarget=generic"
target_cpu: riscv64 target_cpu: riscv64
openwrt_release: '23.05.0'
openwrt_gcc_ver: '12.3.0'
- arch: loongarch64
openwrt: "target=loongarch64 subtarget=generic"
target_cpu: loong64
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
env: env:
EXTRA_FLAGS: target_cpu="${{ matrix.target_cpu }}" target_os="openwrt" ${{ matrix.extra }} enable_shadow_metadata=false EXTRA_FLAGS: target_cpu="${{ matrix.target_cpu }}" target_os="openwrt" ${{ matrix.extra }}
OPENWRT_FLAGS: arch=${{ matrix.arch }} release=${{ matrix.openwrt_release || '18.06.0' }} gcc_ver=${{ matrix.openwrt_gcc_ver || '7.3.0' }} ${{ matrix.openwrt }} OPENWRT_FLAGS: arch=${{ matrix.arch }} release=23.05.0 gcc_ver=12.3.0 ${{ matrix.openwrt }}
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }} BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
@ -527,7 +528,7 @@ jobs:
path: | path: |
src/third_party/llvm-build/Release+Asserts/ src/third_party/llvm-build/Release+Asserts/
src/gn/ src/gn/
src/qemu-user*.deb src/qemu-user-static*.deb
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }} key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache PGO (Linux, OpenWrt) - name: Cache PGO (Linux, OpenWrt)
uses: actions/cache@v4 uses: actions/cache@v4
@ -552,7 +553,7 @@ jobs:
sudo apt update sudo apt update
sudo apt install ninja-build pkg-config ccache bubblewrap sudo apt install ninja-build pkg-config ccache bubblewrap
sudo apt remove -y qemu-user-binfmt sudo apt remove -y qemu-user-binfmt
sudo dpkg -i qemu-user*.deb sudo dpkg -i qemu-user-static*.deb
# libc6-i386 interferes with x86 build # libc6-i386 interferes with x86 build
sudo apt remove libc6-i386 sudo apt remove libc6-i386
- run: ./get-clang.sh - run: ./get-clang.sh

View file

@ -1 +1 @@
135.0.7049.38 126.0.6478.40

View file

@ -21,7 +21,7 @@ The Naïve server here works as a forward proxy and a packet length padding laye
## Download NaïveProxy ## Download NaïveProxy
Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [Exclave](https://github.com/dyhkwong/Exclave), [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)). Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)).
Users should always use the latest version to keep signatures identical to Chrome. Users should always use the latest version to keep signatures identical to Chrome.
@ -82,6 +82,7 @@ Or `quic://user:pass@example.com`, if it works better. See also [parameter usage
* [v2rayN](https://github.com/2dust/v2rayN), GUI client, Windows * [v2rayN](https://github.com/2dust/v2rayN), GUI client, Windows
* [NekoBox for Android](https://github.com/MatsuriDayo/NekoBoxForAndroid), Proxy toolchain, Android * [NekoBox for Android](https://github.com/MatsuriDayo/NekoBoxForAndroid), Proxy toolchain, Android
* [NekoRay / NekoBox For PC](https://github.com/MatsuriDayo/nekoray), Qt based GUI, Windows, Linux * [NekoRay / NekoBox For PC](https://github.com/MatsuriDayo/nekoray), Qt based GUI, Windows, Linux
* [Yet Another Shadow Socket](https://github.com/Chilledheart/yass), NaïveProxy-compatible forward proxy, Android, iOS, Windows, macOS, Linux, FreeBSD
## Notes for downstream ## Notes for downstream
@ -113,7 +114,7 @@ Further reads and writes after `kFirstPaddings` are unpadded to avoid performanc
### H2 RST_STREAM frame padding ### H2 RST_STREAM frame padding
In experiments, NaïveProxy tends to send too many RST_STREAM frames per session, an uncommon behavior from regular browsers. To solve this, an END_STREAM DATA frame padded with total length distributed in [48, 72] is prepended to the RST_STREAM frame so it looks like a HEADERS frame. The server often replies to this with a WINDOW_UPDATE because padding is accounted in flow control. Whether this results in a new uncommon behavior is still unclear. In experiments, NaïveProxy tends to send too many RST_STREAM frames per session, an uncommon behavior from regular browsers. To solve this, an END_STREAM DATA frame padded with total length distributed in [48, 72] is prepended to the RST_STREAM frame so it looks like a HEADERS frame. The server often replies to this with a WINDOW_UPDATE because padding is accounted in flow control. Whether this results in a new uncommon behavior is still unclear.
### H2 HEADERS frame padding ### H2 HEADERS frame padding
@ -129,7 +130,7 @@ The first CONNECT request to a server cannot use "Fast Open" to send payload bef
## Changes from Chromium upstream ## Changes from Chromium upstream
- Minimize source code and build size (0.3% of the original) - Minimize source code and build size (1% of the original)
- Disable exceptions and RTTI, except on Mac and Android. - Disable exceptions and RTTI, except on Mac and Android.
- Support OpenWrt builds - Support OpenWrt builds
- (Android, Linux) Use the builtin verifier instead of the system verifier (drop dependency of NSS on Linux) and read the system trust store from (following Go's behavior in crypto/x509/root_unix.go and crypto/x509/root_linux.go): - (Android, Linux) Use the builtin verifier instead of the system verifier (drop dependency of NSS on Linux) and read the system trust store from (following Go's behavior in crypto/x509/root_unix.go and crypto/x509/root_linux.go):

View file

@ -14,8 +14,8 @@ Description:
"proxy": "..." "proxy": "..."
} }
Specifying a flag multiple times on the command line is equivalent to `--listen` can be specified multiple times on the command line,
having an array of multiple strings in the JSON file. and can be either a string or an array of strings in the JSON file.
Uses "config.json" by default if run without arguments. Uses "config.json" by default if run without arguments.
@ -29,16 +29,18 @@ Options:
Prints version. Prints version.
--listen=LISTEN-URI --listen=<proto>://[addr][:port]
--listen=socks://[[user]:[pass]@][addr][:port]
LISTEN-URI = <LISTEN-PROTO>"://"[<USER>":"<PASS>"@"][<ADDR>][":"<PORT>] Listens at addr:port with protocol <proto>.
LISTEN-PROTO = "socks" | "http" | "redir"
Listens at addr:port with protocol <LISTEN-PROTO>.
Can be specified multiple times to listen on multiple ports. Can be specified multiple times to listen on multiple ports.
Available proto: socks, http, redir.
Default proto, addr, port: socks, 0.0.0.0, 1080. Default proto, addr, port: socks, 0.0.0.0, 1080.
Note: redir requires specific iptables rules and uses no authentication. * http: Supports only proxying https:// URLs, no http://.
* redir: Works with certain iptables setup.
(Redirecting locally originated traffic) (Redirecting locally originated traffic)
iptables -t nat -A OUTPUT -d $proxy_server_ip -j RETURN iptables -t nat -A OUTPUT -d $proxy_server_ip -j RETURN
@ -55,21 +57,10 @@ Options:
The artificial results are not saved for privacy, so restarting the The artificial results are not saved for privacy, so restarting the
resolver may cause downstream to cache stale results. resolver may cause downstream to cache stale results.
--proxy=PROXY --proxy=<proto>://<user>:<pass>@<hostname>[:<port>]
PROXY = PROXY-CHAIN | SOCKS-PROXY Routes traffic via the proxy server. Connects directly by default.
PROXY-CHAIN = <PROXY-URI>[","<PROXY-CHAIN>] Available proto: https, quic. Infers port by default.
PROXY-URI = <PROXY-PROTO>"://"[<USER>":"<PASS>"@"]<HOSTNAME>[":"<PORT>]
PROXY-PROTO = "http" | "https" | "quic"
SOCKS-PROXY = "socks://"<HOSTNAME>[":"<PORT>]
Routes traffic via the proxy chain.
The default proxy is directly connection without proxying.
The last PROXY-URI is negotiated automatically for Naive padding.
Limitations:
* QUIC proxies cannot follow TCP-based proxies in a proxy chain.
* The user needs to ensure there is no loop in the proxy chain.
* SOCKS proxies do not support chaining, authentication, or Naive padding.
--insecure-concurrency=<N> --insecure-concurrency=<N>

1
apk/.gitignore vendored
View file

@ -1,3 +1,2 @@
.gradle/ .gradle/
app/build/ app/build/
app/libs/

View file

@ -4,7 +4,7 @@ plugins {
} }
android { android {
namespace = "io.nekohasekai.sagernet.plugin.naive" namespace = "moe.matsuri.exe.naive"
signingConfigs { signingConfigs {
create("release") { create("release") {
@ -17,21 +17,23 @@ android {
buildTypes { buildTypes {
getByName("release") { getByName("release") {
proguardFiles(
getDefaultProguardFile("proguard-android-optimize.txt"),
file("proguard-rules.pro")
)
isMinifyEnabled = true isMinifyEnabled = true
signingConfig = signingConfigs.getByName("release") signingConfig = signingConfigs.getByName("release")
} }
} }
buildToolsVersion = "35.0.0" compileSdk = 33
compileSdk = 35
defaultConfig { defaultConfig {
minSdk = 24 minSdk = 21
targetSdk = 35 targetSdk = 33
applicationId = "io.nekohasekai.sagernet.plugin.naive" applicationId = "moe.matsuri.exe.naive"
versionCode = System.getenv("APK_VERSION_NAME").removePrefix("v").split(".")[0].toInt() * 10 + System.getenv("APK_VERSION_NAME").removePrefix("v").split("-")[1].toInt() versionCode = System.getenv("APK_VERSION_NAME").removePrefix("v").split(".")[0].toInt()
versionName = System.getenv("APK_VERSION_NAME").removePrefix("v") versionName = System.getenv("APK_VERSION_NAME").removePrefix("v")
splits.abi { splits.abi {
isEnable = true isEnable = true
@ -42,8 +44,12 @@ android {
} }
compileOptions { compileOptions {
sourceCompatibility = JavaVersion.VERSION_17 sourceCompatibility = JavaVersion.VERSION_1_8
targetCompatibility = JavaVersion.VERSION_17 targetCompatibility = JavaVersion.VERSION_1_8
}
kotlinOptions {
jvmTarget = "1.8"
} }
lint { lint {
@ -53,10 +59,6 @@ android {
warningsAsErrors = true warningsAsErrors = true
} }
packaging {
jniLibs.useLegacyPackaging = true
}
applicationVariants.all { applicationVariants.all {
outputs.all { outputs.all {
this as com.android.build.gradle.internal.api.BaseVariantOutputImpl this as com.android.build.gradle.internal.api.BaseVariantOutputImpl

View file

@ -13,12 +13,13 @@
<application <application
android:allowBackup="false" android:allowBackup="false"
android:extractNativeLibs="true"
android:icon="@mipmap/ic_launcher" android:icon="@mipmap/ic_launcher"
android:label="Naïve Plugin" android:label="Naïve For NekoBox"
android:roundIcon="@mipmap/ic_launcher_round"> android:roundIcon="@mipmap/ic_launcher_round">
<provider <provider
android:name=".BinaryProvider" android:name=".BinaryProvider"
android:authorities="io.nekohasekai.sagernet.plugin.naive.BinaryProvider" android:authorities="moe.matsuri.exe.naive.BinaryProvider"
android:directBootAware="true" android:directBootAware="true"
android:exported="true" android:exported="true"
tools:ignore="ExportedContentProvider"> tools:ignore="ExportedContentProvider">
@ -28,7 +29,7 @@
<intent-filter> <intent-filter>
<action android:name="io.nekohasekai.sagernet.plugin.ACTION_NATIVE_PLUGIN" /> <action android:name="io.nekohasekai.sagernet.plugin.ACTION_NATIVE_PLUGIN" />
<data <data
android:host="io.nekohasekai.sagernet" android:host="moe.matsuri.lite"
android:path="/naive-plugin" android:path="/naive-plugin"
android:scheme="plugin" /> android:scheme="plugin" />
</intent-filter> </intent-filter>

View file

@ -17,7 +17,7 @@
* * * *
******************************************************************************/ ******************************************************************************/
package io.nekohasekai.sagernet.plugin.naive package moe.matsuri.exe.naive
import android.net.Uri import android.net.Uri
import android.os.ParcelFileDescriptor import android.os.ParcelFileDescriptor

View file

@ -5,8 +5,8 @@ buildscript {
mavenCentral() mavenCentral()
} }
dependencies { dependencies {
classpath 'com.android.tools.build:gradle:8.6.0' classpath 'com.android.tools.build:gradle:7.3.1'
classpath 'org.jetbrains.kotlin:kotlin-gradle-plugin:2.0.20' classpath 'org.jetbrains.kotlin:kotlin-gradle-plugin:1.6.10'
// NOTE: Do not place your application dependencies here; they belong // NOTE: Do not place your application dependencies here; they belong
// in the individual module build.gradle files // in the individual module build.gradle files

Binary file not shown.

View file

@ -1,7 +1,6 @@
#Thu Jan 27 22:42:44 HKT 2022
distributionBase=GRADLE_USER_HOME distributionBase=GRADLE_USER_HOME
distributionUrl=https\://services.gradle.org/distributions/gradle-7.4-bin.zip
distributionPath=wrapper/dists distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-bin.zip
networkTimeout=10000
validateDistributionUrl=true
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists zipStorePath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME

285
apk/gradlew vendored
View file

@ -1,7 +1,7 @@
#!/bin/sh #!/usr/bin/env sh
# #
# Copyright © 2015-2021 the original authors. # Copyright 2015 the original author or authors.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -15,104 +15,69 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
# SPDX-License-Identifier: Apache-2.0
#
############################################################################## ##############################################################################
# ##
# Gradle start up script for POSIX generated by Gradle. ## Gradle start up script for UN*X
# ##
# Important for running:
#
# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is
# noncompliant, but you have some other compliant shell such as ksh or
# bash, then to run this script, type that shell name before the whole
# command line, like:
#
# ksh Gradle
#
# Busybox and similar reduced shells will NOT work, because this script
# requires all of these POSIX shell features:
# * functions;
# * expansions «$var», «${var}», «${var:-default}», «${var+SET}»,
# «${var#prefix}», «${var%suffix}», and «$( cmd )»;
# * compound commands having a testable exit status, especially «case»;
# * various built-in commands including «command», «set», and «ulimit».
#
# Important for patching:
#
# (2) This script targets any POSIX shell, so it avoids extensions provided
# by Bash, Ksh, etc; in particular arrays are avoided.
#
# The "traditional" practice of packing multiple parameters into a
# space-separated string is a well documented source of bugs and security
# problems, so this is (mostly) avoided, by progressively accumulating
# options in "$@", and eventually passing that to Java.
#
# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS,
# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly;
# see the in-line comments for details.
#
# There are tweaks for specific operating systems such as AIX, CygWin,
# Darwin, MinGW, and NonStop.
#
# (3) This script is generated from the Groovy template
# https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt
# within the Gradle project.
#
# You can find Gradle at https://github.com/gradle/gradle/.
#
############################################################################## ##############################################################################
# Attempt to set APP_HOME # Attempt to set APP_HOME
# Resolve links: $0 may be a link # Resolve links: $0 may be a link
app_path=$0 PRG="$0"
# Need this for relative symlinks.
# Need this for daisy-chained symlinks. while [ -h "$PRG" ] ; do
while ls=`ls -ld "$PRG"`
APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path link=`expr "$ls" : '.*-> \(.*\)$'`
[ -h "$app_path" ] if expr "$link" : '/.*' > /dev/null; then
do PRG="$link"
ls=$( ls -ld "$app_path" ) else
link=${ls#*' -> '} PRG=`dirname "$PRG"`"/$link"
case $link in #( fi
/*) app_path=$link ;; #(
*) app_path=$APP_HOME$link ;;
esac
done done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >/dev/null
APP_HOME="`pwd -P`"
cd "$SAVED" >/dev/null
# This is normally unused APP_NAME="Gradle"
# shellcheck disable=SC2034 APP_BASE_NAME=`basename "$0"`
APP_BASE_NAME=${0##*/}
# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
' "$PWD" ) || exit
# Use the maximum available, or set MAX_FD != -1 to use that value. # Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD=maximum MAX_FD="maximum"
warn () { warn () {
echo "$*" echo "$*"
} >&2 }
die () { die () {
echo echo
echo "$*" echo "$*"
echo echo
exit 1 exit 1
} >&2 }
# OS specific support (must be 'true' or 'false'). # OS specific support (must be 'true' or 'false').
cygwin=false cygwin=false
msys=false msys=false
darwin=false darwin=false
nonstop=false nonstop=false
case "$( uname )" in #( case "`uname`" in
CYGWIN* ) cygwin=true ;; #( CYGWIN* )
Darwin* ) darwin=true ;; #( cygwin=true
MSYS* | MINGW* ) msys=true ;; #( ;;
NONSTOP* ) nonstop=true ;; Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
NONSTOP* )
nonstop=true
;;
esac esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
@ -122,9 +87,9 @@ CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
if [ -n "$JAVA_HOME" ] ; then if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables # IBM's JDK on AIX uses strange locations for the executables
JAVACMD=$JAVA_HOME/jre/sh/java JAVACMD="$JAVA_HOME/jre/sh/java"
else else
JAVACMD=$JAVA_HOME/bin/java JAVACMD="$JAVA_HOME/bin/java"
fi fi
if [ ! -x "$JAVACMD" ] ; then if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
@ -133,120 +98,88 @@ Please set the JAVA_HOME variable in your environment to match the
location of your Java installation." location of your Java installation."
fi fi
else else
JAVACMD=java JAVACMD="java"
if ! command -v java >/dev/null 2>&1 which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
then
die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the Please set the JAVA_HOME variable in your environment to match the
location of your Java installation." location of your Java installation."
fi
fi fi
# Increase the maximum file descriptors if we can. # Increase the maximum file descriptors if we can.
if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
case $MAX_FD in #( MAX_FD_LIMIT=`ulimit -H -n`
max*) if [ $? -eq 0 ] ; then
# In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
# shellcheck disable=SC2039,SC3045 MAX_FD="$MAX_FD_LIMIT"
MAX_FD=$( ulimit -H -n ) || fi
warn "Could not query maximum file descriptor limit" ulimit -n $MAX_FD
esac if [ $? -ne 0 ] ; then
case $MAX_FD in #( warn "Could not set maximum file descriptor limit: $MAX_FD"
'' | soft) :;; #( fi
*) else
# In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
# shellcheck disable=SC2039,SC3045 fi
ulimit -n "$MAX_FD" ||
warn "Could not set maximum file descriptor limit to $MAX_FD"
esac
fi fi
# Collect all arguments for the java command, stacking in reverse order: # For Darwin, add options to specify how the application appears in the dock
# * args from the command line if $darwin; then
# * the main class name GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
# * -classpath fi
# * -D...appname settings
# * --module-path (only if needed)
# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables.
# For Cygwin or MSYS, switch paths to Windows format before running java # For Cygwin or MSYS, switch paths to Windows format before running java
if "$cygwin" || "$msys" ; then if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then
APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=$( cygpath --unix "$JAVACMD" ) JAVACMD=`cygpath --unix "$JAVACMD"`
# Now convert the arguments - kludge to limit ourselves to /bin/sh # We build the pattern for arguments to be converted via cygpath
for arg do ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
if SEP=""
case $arg in #( for dir in $ROOTDIRSRAW ; do
-*) false ;; # don't mess with options #( ROOTDIRS="$ROOTDIRS$SEP$dir"
/?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath SEP="|"
[ -e "$t" ] ;; #(
*) false ;;
esac
then
arg=$( cygpath --path --ignore --mixed "$arg" )
fi
# Roll the args list around exactly as many times as the number of
# args, so each arg winds up back in the position where it started, but
# possibly modified.
#
# NB: a `for` loop captures its iteration list before it begins, so
# changing the positional parameters here affects neither the number of
# iterations, nor the values presented in `arg`.
shift # remove old arg
set -- "$@" "$arg" # push replacement arg
done done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=`expr $i + 1`
done
case $i in
0) set -- ;;
1) set -- "$args0" ;;
2) set -- "$args0" "$args1" ;;
3) set -- "$args0" "$args1" "$args2" ;;
4) set -- "$args0" "$args1" "$args2" "$args3" ;;
5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac
fi fi
# Escape application args
save () {
for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
echo " "
}
APP_ARGS=`save "$@"`
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. # Collect all arguments for the java command, following the shell quoting and substitution rules
DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
# Collect all arguments for the java command:
# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
# and any embedded shellness will be escaped.
# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be
# treated as '${Hostname}' itself on the command line.
set -- \
"-Dorg.gradle.appname=$APP_BASE_NAME" \
-classpath "$CLASSPATH" \
org.gradle.wrapper.GradleWrapperMain \
"$@"
# Stop when "xargs" is not available.
if ! command -v xargs >/dev/null 2>&1
then
die "xargs is not available"
fi
# Use "xargs" to parse quoted args.
#
# With -n1 it outputs one arg per line, with the quotes and backslashes removed.
#
# In Bash we could simply go:
#
# readarray ARGS < <( xargs -n1 <<<"$var" ) &&
# set -- "${ARGS[@]}" "$@"
#
# but POSIX shell has neither arrays nor command substitution, so instead we
# post-process each arg (as a line of input to sed) to backslash-escape any
# character that might be a shell metacharacter, then use eval to reverse
# that process (while maintaining the separation between arguments), and wrap
# the whole thing up as a single "set" statement.
#
# This will of course break if any of these variables contains a newline or
# an unmatched quote.
#
eval "set -- $(
printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" |
xargs -n1 |
sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' |
tr '\n' ' '
)" '"$@"'
exec "$JAVACMD" "$@" exec "$JAVACMD" "$@"

94
apk/gradlew.bat vendored
View file

@ -1,94 +0,0 @@
@rem
@rem Copyright 2015 the original author or authors.
@rem
@rem Licensed under the Apache License, Version 2.0 (the "License");
@rem you may not use this file except in compliance with the License.
@rem You may obtain a copy of the License at
@rem
@rem https://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem
@rem SPDX-License-Identifier: Apache-2.0
@rem
@if "%DEBUG%"=="" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%"=="" set DIRNAME=.
@rem This is normally unused
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Resolve any "." and ".." in APP_HOME to make it shorter.
for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if %ERRORLEVEL% equ 0 goto execute
echo. 1>&2
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2
echo. 1>&2
echo Please set the JAVA_HOME variable in your environment to match the 1>&2
echo location of your Java installation. 1>&2
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto execute
echo. 1>&2
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2
echo. 1>&2
echo Please set the JAVA_HOME variable in your environment to match the 1>&2
echo location of your Java installation. 1>&2
goto fail
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
:end
@rem End local scope for the variables with windows NT shell
if %ERRORLEVEL% equ 0 goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
set EXIT_CODE=%ERRORLEVEL%
if %EXIT_CODE% equ 0 set EXIT_CODE=1
if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
exit /b %EXIT_CODE%
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega

View file

@ -5,6 +5,6 @@ dependencyResolutionManagement {
mavenCentral() mavenCentral()
} }
} }
rootProject.name = "Naive Plugin" rootProject.name = "Matsuri Plugins"
include ':app' include ':app'

View file

@ -28,10 +28,10 @@ IncludeCategories:
# LINT.IfChange(winheader) # LINT.IfChange(winheader)
- Regex: '^<objbase\.h>' # This has to be before initguid.h. - Regex: '^<objbase\.h>' # This has to be before initguid.h.
Priority: 1 Priority: 1
- Regex: '^<(atlbase|initguid|mmdeviceapi|ocidl|ole2|shobjidl|tchar|unknwn|windows|winsock2|winternl|ws2tcpip)\.h>' - Regex: '^<(initguid|mmdeviceapi|windows|winsock2|ws2tcpip|shobjidl|atlbase|ole2|unknwn|tchar|ocidl)\.h>'
Priority: 2 Priority: 2
# LINT.ThenChange(/tools/add_header.py:winheader) # LINT.ThenChange(/tools/add_header.py:winheader)
# UIAutomation*.h needs to be after base/win/atl.h. # UIAutomation*.h need to be after base/win/atl.h.
# Note the low priority number. # Note the low priority number.
- Regex: '^<UIAutomation.*\.h>' - Regex: '^<UIAutomation.*\.h>'
Priority: 6 Priority: 6
@ -39,11 +39,8 @@ IncludeCategories:
- Regex: '^<.*\.h>' - Regex: '^<.*\.h>'
Priority: 3 Priority: 3
# C++ standard library headers. # C++ standard library headers.
- Regex: '^<.*>' - Regex: '^<.*'
Priority: 4 Priority: 4
# windows_h_disallowed.h should appear last. Note the low priority number.
- Regex: '"(.*/)?windows_h_disallowed\.h"'
Priority: 7
# Other libraries. # Other libraries.
- Regex: '.*' - Regex: '.*'
Priority: 5 Priority: 5

19
src/.gn
View file

@ -55,14 +55,11 @@ default_args = {
crashpad_dependencies = "chromium" crashpad_dependencies = "chromium"
# Override ANGLE's Vulkan dependencies. # Override ANGLE's Vulkan dependencies.
angle_vulkan_headers_dir = "//third_party/vulkan-headers/src" angle_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
angle_vulkan_loader_dir = "//third_party/vulkan-loader/src" angle_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src"
angle_vulkan_tools_dir = "//third_party/vulkan-tools/src" angle_vulkan_tools_dir = "//third_party/vulkan-deps/vulkan-tools/src"
angle_vulkan_validation_layers_dir = angle_vulkan_validation_layers_dir =
"//third_party/vulkan-validation-layers/src" "//third_party/vulkan-deps/vulkan-validation-layers/src"
# Override VMA's Vulkan dependencies.
vma_vulkan_headers_dir = "//third_party/vulkan-headers/src"
# Overwrite default args declared in the Fuchsia sdk # Overwrite default args declared in the Fuchsia sdk
fuchsia_sdk_readelf_exec = fuchsia_sdk_readelf_exec =
@ -90,7 +87,7 @@ no_check_targets = [
"//v8:v8_libplatform", # 2 errors "//v8:v8_libplatform", # 2 errors
] ]
# These are the list of GN files that run exec_script. This allowlist exists # These are the list of GN files that run exec_script. This whitelist exists
# to force additional review for new uses of exec_script, which is strongly # to force additional review for new uses of exec_script, which is strongly
# discouraged. # discouraged.
# #
@ -145,11 +142,11 @@ no_check_targets = [
# this situation much easier to create. if the build always lists the # this situation much easier to create. if the build always lists the
# files and passes them to a script, it will always be correct. # files and passes them to a script, it will always be correct.
exec_script_allowlist = exec_script_whitelist =
build_dotfile_settings.exec_script_allowlist + build_dotfile_settings.exec_script_whitelist +
angle_dotfile_settings.exec_script_whitelist + angle_dotfile_settings.exec_script_whitelist +
[ [
# Allowlist entries for //build should go into # Whitelist entries for //build should go into
# //build/dotfile_settings.gni instead, so that they can be shared # //build/dotfile_settings.gni instead, so that they can be shared
# with other repos. The entries in this list should be only for files # with other repos. The entries in this list should be only for files
# in the Chromium repo outside of //build. # in the Chromium repo outside of //build.

View file

@ -17,7 +17,6 @@ Aaron Jacobs <samusaaron3@gmail.com>
Aaron Leventhal <aaronlevbugs@gmail.com> Aaron Leventhal <aaronlevbugs@gmail.com>
Aaron Randolph <aaron.randolph@gmail.com> Aaron Randolph <aaron.randolph@gmail.com>
Aaryaman Vasishta <jem456.vasishta@gmail.com> Aaryaman Vasishta <jem456.vasishta@gmail.com>
AbdAlRahman Gad <abdobngad@gmail.com>
Abdu Ameen <abdu.ameen000@gmail.com> Abdu Ameen <abdu.ameen000@gmail.com>
Abdullah Abu Tasneem <a.tasneem@samsung.com> Abdullah Abu Tasneem <a.tasneem@samsung.com>
Abhijeet Kandalkar <abhijeet.k@samsung.com> Abhijeet Kandalkar <abhijeet.k@samsung.com>
@ -49,7 +48,6 @@ Ahmed Elwasefi <a.m.elwasefi@gmail.com>
Ahmet Emir Ercin <ahmetemiremir@gmail.com> Ahmet Emir Ercin <ahmetemiremir@gmail.com>
Aidarbek Suleimenov <suleimenov.aidarbek@gmail.com> Aidarbek Suleimenov <suleimenov.aidarbek@gmail.com>
Aiden Grossman <aidengrossmanpso@gmail.com> Aiden Grossman <aidengrossmanpso@gmail.com>
Airing Deng <airingdeng@gmail.com>
Ajay Berwal <a.berwal@samsung.com> Ajay Berwal <a.berwal@samsung.com>
Ajay Berwal <ajay.berwal@samsung.com> Ajay Berwal <ajay.berwal@samsung.com>
Ajay Sharma <ajay.sh@samsung.com> Ajay Sharma <ajay.sh@samsung.com>
@ -63,6 +61,7 @@ Aldo Culquicondor <alculquicondor@gmail.com>
Alec Petridis <alecthechop@gmail.com> Alec Petridis <alecthechop@gmail.com>
Aleksandar Stojiljkovic <aleksandar.stojiljkovic@intel.com> Aleksandar Stojiljkovic <aleksandar.stojiljkovic@intel.com>
Aleksei Gurianov <gurianov@gmail.com> Aleksei Gurianov <gurianov@gmail.com>
Aleksey Khoroshilov <akhoroshilov@brave.com>
Alesandro Ortiz <alesandro@alesandroortiz.com> Alesandro Ortiz <alesandro@alesandroortiz.com>
Alessandro Astone <ales.astone@gmail.com> Alessandro Astone <ales.astone@gmail.com>
Alex Chronopoulos <achronop@gmail.com> Alex Chronopoulos <achronop@gmail.com>
@ -87,7 +86,6 @@ Alexey Kuts <kruntuid@gmail.com>
Alexey Kuzmin <alex.s.kuzmin@gmail.com> Alexey Kuzmin <alex.s.kuzmin@gmail.com>
Alexey Kuznetsov <saturas2000@gmail.com> Alexey Kuznetsov <saturas2000@gmail.com>
Alexey Terentiev <alexeyter@gmail.com> Alexey Terentiev <alexeyter@gmail.com>
Alexia Bojian <bojianalexia4@gmail.com>
Alexis Brenon <brenon.alexis@gmail.com> Alexis Brenon <brenon.alexis@gmail.com>
Alexis La Goutte <alexis.lagoutte@gmail.com> Alexis La Goutte <alexis.lagoutte@gmail.com>
Alexis Menard <alexis.menard@intel.com> Alexis Menard <alexis.menard@intel.com>
@ -118,7 +116,6 @@ Andreas Papacharalampous <andreas@apap04.com>
Andrei Borza <andrei.borza@gmail.com> Andrei Borza <andrei.borza@gmail.com>
Andrei Parvu <andrei.prv@gmail.com> Andrei Parvu <andrei.prv@gmail.com>
Andrei Parvu <parvu@adobe.com> Andrei Parvu <parvu@adobe.com>
Andrei Volykhin <andrei.volykhin@gmail.com>
Andres Salomon <dilinger@queued.net> Andres Salomon <dilinger@queued.net>
Andreu Botella <andreu@andreubotella.com> Andreu Botella <andreu@andreubotella.com>
Andrew Boyarshin <andrew.boyarshin@gmail.com> Andrew Boyarshin <andrew.boyarshin@gmail.com>
@ -194,7 +191,6 @@ Ben Noordhuis <ben@strongloop.com>
Benedek Heilig <benecene@gmail.com> Benedek Heilig <benecene@gmail.com>
Benjamin Dupont <bedupont@cisco.com> Benjamin Dupont <bedupont@cisco.com>
Benjamin Jemlich <pcgod99@gmail.com> Benjamin Jemlich <pcgod99@gmail.com>
Beomsik Min <beomsikm@gmail.com>
Bernard Cafarelli <voyageur@gentoo.org> Bernard Cafarelli <voyageur@gentoo.org>
Bernhard M. Wiedemann <bwiedemann@suse.de> Bernhard M. Wiedemann <bwiedemann@suse.de>
Bert Belder <bertbelder@gmail.com> Bert Belder <bertbelder@gmail.com>
@ -212,6 +208,7 @@ Brendan Kirby <brendan.kirby@imgtec.com>
Brendan Long <self@brendanlong.com> Brendan Long <self@brendanlong.com>
Brendon Tiszka <btiszka@gmail.com> Brendon Tiszka <btiszka@gmail.com>
Brett Lewis <brettlewis@brettlewis.us> Brett Lewis <brettlewis@brettlewis.us>
Brian Clifton <clifton@brave.com>
Brian Dunn <brian@theophil.us> Brian Dunn <brian@theophil.us>
Brian G. Merrell <bgmerrell@gmail.com> Brian G. Merrell <bgmerrell@gmail.com>
Brian Konzman, SJ <b.g.konzman@gmail.com> Brian Konzman, SJ <b.g.konzman@gmail.com>
@ -240,7 +237,6 @@ Cameron Gutman <aicommander@gmail.com>
Camille Viot <viot.camille@outlook.com> Camille Viot <viot.camille@outlook.com>
Can Liu <peter.can.liu@gmail.com> Can Liu <peter.can.liu@gmail.com>
Carlos Santa <carlos.santa@intel.com> Carlos Santa <carlos.santa@intel.com>
Casey Primozic <me@ameo.link>
Catalin Badea <badea@adobe.com> Catalin Badea <badea@adobe.com>
Cathie Chen <cathiechen@tencent.com> Cathie Chen <cathiechen@tencent.com>
Cem Kocagil <cem.kocagil@gmail.com> Cem Kocagil <cem.kocagil@gmail.com>
@ -280,7 +276,6 @@ Chris Szurgot <szurgotc@amazon.com>
Chris Tserng <tserng@amazon.com> Chris Tserng <tserng@amazon.com>
Chris Vasselli <clindsay@gmail.com> Chris Vasselli <clindsay@gmail.com>
Chris Ye <hawkoyates@gmail.com> Chris Ye <hawkoyates@gmail.com>
Christian Liebel <christianliebel@gmail.com>
Christoph Staengle <christoph142@gmx.com> Christoph Staengle <christoph142@gmx.com>
Christophe Dumez <ch.dumez@samsung.com> Christophe Dumez <ch.dumez@samsung.com>
Christopher Dale <chrelad@gmail.com> Christopher Dale <chrelad@gmail.com>
@ -301,7 +296,6 @@ Daiwei Li <daiweili@suitabletech.com>
Damien Marié <damien@dam.io> Damien Marié <damien@dam.io>
Dan McCombs <overridex@gmail.com> Dan McCombs <overridex@gmail.com>
Daniel Adams <msub2official@gmail.com> Daniel Adams <msub2official@gmail.com>
Daniel Bertalan <dani@danielbertalan.dev>
Daniel Bevenius <daniel.bevenius@gmail.com> Daniel Bevenius <daniel.bevenius@gmail.com>
Daniel Bomar <dbdaniel42@gmail.com> Daniel Bomar <dbdaniel42@gmail.com>
Daniel Carvalho Liedke <dliedke@gmail.com> Daniel Carvalho Liedke <dliedke@gmail.com>
@ -313,11 +307,9 @@ Daniel Lockyer <thisisdaniellockyer@gmail.com>
Daniel Nishi <dhnishi@gmail.com> Daniel Nishi <dhnishi@gmail.com>
Daniel Platz <daplatz@googlemail.com> Daniel Platz <daplatz@googlemail.com>
Daniel Playfair Cal <daniel.playfair.cal@gmail.com> Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
Daniel Richard G. <iskunk@gmail.com>
Daniel Shaulov <dshaulov@ptc.com> Daniel Shaulov <dshaulov@ptc.com>
Daniel Trebbien <dtrebbien@gmail.com> Daniel Trebbien <dtrebbien@gmail.com>
Daniel Waxweiler <daniel.waxweiler@gmail.com> Daniel Waxweiler <daniel.waxweiler@gmail.com>
Daniel Zhao <zhaodani@amazon.com>
Dániel Bátyai <dbatyai@inf.u-szeged.hu> Dániel Bátyai <dbatyai@inf.u-szeged.hu>
Dániel Vince <vinced@inf.u-szeged.hu> Dániel Vince <vinced@inf.u-szeged.hu>
Daniil Suvorov <severecloud@gmail.com> Daniil Suvorov <severecloud@gmail.com>
@ -325,16 +317,13 @@ Danny Weiss <danny.weiss.fr@gmail.com>
Danylo Boiko <danielboyko02@gmail.com> Danylo Boiko <danielboyko02@gmail.com>
Daoming Qiu <daoming.qiu@intel.com> Daoming Qiu <daoming.qiu@intel.com>
Darik Harter <darik.harter@gmail.com> Darik Harter <darik.harter@gmail.com>
Darryl Pogue <darryl@dpogue.ca>
Darshan Sen <raisinten@gmail.com> Darshan Sen <raisinten@gmail.com>
Darshini KN <kn.darshini@samsung.com> Darshini KN <kn.darshini@samsung.com>
Dave Vandyke <kzar@kzar.co.uk> Dave Vandyke <kzar@kzar.co.uk>
David Benjamin <davidben@mit.edu> David Benjamin <davidben@mit.edu>
David Brown <develop.david.brown@gmail.com> David Brown <develop.david.brown@gmail.com>
David Cernoch <dcernoch@uplandsoftware.com>
David Davidovic <david@davidovic.io> David Davidovic <david@davidovic.io>
David Erceg <erceg.david@gmail.com> David Erceg <erceg.david@gmail.com>
David Faden <dfaden@gmail.com>
David Fox <david@davidjfox.com> David Fox <david@davidjfox.com>
David Futcher <david.mike.futcher@gmail.com> David Futcher <david.mike.futcher@gmail.com>
David Jin <davidjin@amazon.com> David Jin <davidjin@amazon.com>
@ -343,7 +332,6 @@ David Leen <davileen@amazon.com>
David Manouchehri <david@davidmanouchehri.com> David Manouchehri <david@davidmanouchehri.com>
David McAllister <mcdavid@amazon.com> David McAllister <mcdavid@amazon.com>
David Michael Barr <david.barr@samsung.com> David Michael Barr <david.barr@samsung.com>
David Redondo <kde@david-redondo.de>
David Sanders <dsanders11@ucsbalum.com> David Sanders <dsanders11@ucsbalum.com>
David Spellman <dspell@amazon.com> David Spellman <dspell@amazon.com>
David Valachovic <adenflorian@gmail.com> David Valachovic <adenflorian@gmail.com>
@ -351,7 +339,6 @@ Dax Kelson <dkelson@gurulabs.com>
Dean Leitersdorf <dean.leitersdorf@gmail.com> Dean Leitersdorf <dean.leitersdorf@gmail.com>
Debadree Chatterjee <debadree333@gmail.com> Debadree Chatterjee <debadree333@gmail.com>
Debashish Samantaray <d.samantaray@samsung.com> Debashish Samantaray <d.samantaray@samsung.com>
Debin Zhang <debinzhang3@gmail.com>
Debug Wang <debugwang@tencent.com> Debug Wang <debugwang@tencent.com>
Deep Shah <deep.shah@samsung.com> Deep Shah <deep.shah@samsung.com>
Deepak Dilip Borade <deepak.db@samsung.com> Deepak Dilip Borade <deepak.db@samsung.com>
@ -371,7 +358,6 @@ Diana Suvorova <diana.suvorova@gmail.com>
Diego Fernández Santos <agujaydedal@gmail.com> Diego Fernández Santos <agujaydedal@gmail.com>
Diego Ferreiro Val <elfogris@gmail.com> Diego Ferreiro Val <elfogris@gmail.com>
Dillon Sellars <dill.sellars@gmail.com> Dillon Sellars <dill.sellars@gmail.com>
Dingming Liu <liudingming@bytedance.com>
Divya Bansal <divya.bansal@samsung.com> Divya Bansal <divya.bansal@samsung.com>
Dmitry Shachnev <mitya57@gmail.com> Dmitry Shachnev <mitya57@gmail.com>
Dmitry Sokolov <dimanne@gmail.com> Dmitry Sokolov <dimanne@gmail.com>
@ -390,7 +376,6 @@ Dongseong Hwang <dongseong.hwang@intel.com>
Dongwoo Joshua Im <dw.im@samsung.com> Dongwoo Joshua Im <dw.im@samsung.com>
Dongyu Lin <l2d4y3@gmail.com> Dongyu Lin <l2d4y3@gmail.com>
Donna Wu <donna.wu@intel.com> Donna Wu <donna.wu@intel.com>
Douglas Browne <douglas.browne123@gmail.com>
Douglas F. Turner <doug.turner@gmail.com> Douglas F. Turner <doug.turner@gmail.com>
Drew Blaisdell <drew.blaisdell@gmail.com> Drew Blaisdell <drew.blaisdell@gmail.com>
Dushyant Kant Sharma <dush.sharma@samsung.com> Dushyant Kant Sharma <dush.sharma@samsung.com>
@ -415,14 +400,12 @@ Emil Suleymanov <emil@esnx.xyz>
Ergun Erdogmus <erdogmusergun@gmail.com> Ergun Erdogmus <erdogmusergun@gmail.com>
Eric Ahn <byungwook.ahn@gmail.com> Eric Ahn <byungwook.ahn@gmail.com>
Eric Huang <ele828@gmail.com> Eric Huang <ele828@gmail.com>
Eric Long <i@hack3r.moe>
Eric Rescorla <ekr@rtfm.com> Eric Rescorla <ekr@rtfm.com>
Erik Hill <erikghill@gmail.com> Erik Hill <erikghill@gmail.com>
Erik Kurzinger <ekurzinger@gmail.com> Erik Kurzinger <ekurzinger@gmail.com>
Erik Sjölund <erik.sjolund@gmail.com> Erik Sjölund <erik.sjolund@gmail.com>
Eriq Augustine <eriq.augustine@gmail.com> Eriq Augustine <eriq.augustine@gmail.com>
Ernesto Mudu <ernesto.mudu@gmail.com> Ernesto Mudu <ernesto.mudu@gmail.com>
Ethan Chen <randomgamingdev@gmail.com>
Ethan Wong <bunnnywong@gmail.com> Ethan Wong <bunnnywong@gmail.com>
Etienne Laurin <etienne@atnnn.com> Etienne Laurin <etienne@atnnn.com>
Eugene Kim <eugene70kim@gmail.com> Eugene Kim <eugene70kim@gmail.com>
@ -449,6 +432,7 @@ Finbar Crago <finbar.crago@gmail.com>
François Beaufort <beaufort.francois@gmail.com> François Beaufort <beaufort.francois@gmail.com>
François Devatine <devatine@verizonmedia.com> François Devatine <devatine@verizonmedia.com>
Francois Kritzinger <francoisk777@gmail.com> Francois Kritzinger <francoisk777@gmail.com>
Francois Marier <francois@brave.com>
Francois Rauch <leopardb@gmail.com> Francois Rauch <leopardb@gmail.com>
Frankie Dintino <fdintino@theatlantic.com> Frankie Dintino <fdintino@theatlantic.com>
Franklin Ta <fta2012@gmail.com> Franklin Ta <fta2012@gmail.com>
@ -491,7 +475,6 @@ Greg Visser <gregvis@gmail.com>
Gregory Davis <gpdavis.chromium@gmail.com> Gregory Davis <gpdavis.chromium@gmail.com>
Grzegorz Czajkowski <g.czajkowski@samsung.com> Grzegorz Czajkowski <g.czajkowski@samsung.com>
Guangzhen Li <guangzhen.li@intel.com> Guangzhen Li <guangzhen.li@intel.com>
Guobin Wu <wuguobin.1229@bytedance.com>
Gurpreet Kaur <k.gurpreet@samsung.com> Gurpreet Kaur <k.gurpreet@samsung.com>
Gustav Tiger <gustav.tiger@sonymobile.com> Gustav Tiger <gustav.tiger@sonymobile.com>
Gyuyoung Kim <gyuyoung.kim@navercorp.com> Gyuyoung Kim <gyuyoung.kim@navercorp.com>
@ -508,7 +491,6 @@ Hansel Lee <mr.hansel.lee@gmail.com>
Hanwen Zheng <eserinc.z@gmail.com> Hanwen Zheng <eserinc.z@gmail.com>
Hao Li <hao.x.li@intel.com> Hao Li <hao.x.li@intel.com>
Haojian Wu <hokein.wu@gmail.com> Haojian Wu <hokein.wu@gmail.com>
Haoran Tang <haoran.tang.personal@gmail.com>
Haoxuan Zhang <zhanghaoxuan.59@bytedance.com> Haoxuan Zhang <zhanghaoxuan.59@bytedance.com>
Hari Singh <hari.singh1@samsung.com> Hari Singh <hari.singh1@samsung.com>
Harpreet Singh Khurana <harpreet.sk@samsung.com> Harpreet Singh Khurana <harpreet.sk@samsung.com>
@ -582,7 +564,6 @@ Ivan Naydonov <samogot@gmail.com>
Ivan Pavlotskiy <ivan.pavlotskiy@lgepartner.com> Ivan Pavlotskiy <ivan.pavlotskiy@lgepartner.com>
Ivan Sham <ivansham@amazon.com> Ivan Sham <ivansham@amazon.com>
Ivan Sidorov <ivansid@gmail.com> Ivan Sidorov <ivansid@gmail.com>
Jacek Fedoryński <jfedor@gmail.com>
Jack Bates <jack@nottheoilrig.com> Jack Bates <jack@nottheoilrig.com>
Jack Shi <flystone2020@gmail.com> Jack Shi <flystone2020@gmail.com>
Jackson Loeffler <j@jloeffler.com> Jackson Loeffler <j@jloeffler.com>
@ -590,13 +571,11 @@ Jacky Hu <flameddd@gmail.com>
Jacob Clark <jacob.jh.clark@googlemail.com> Jacob Clark <jacob.jh.clark@googlemail.com>
Jacob Mandelson <jacob@mandelson.org> Jacob Mandelson <jacob@mandelson.org>
Jaehun Lim <ljaehun.lim@samsung.com> Jaehun Lim <ljaehun.lim@samsung.com>
Jaehyun Chung <jaehyun.chung@amd.com>
Jaehyun Ko <jaehyun.dev@gmail.com> Jaehyun Ko <jaehyun.dev@gmail.com>
Jaehyun Lee <j-hyun.lee@samsung.com> Jaehyun Lee <j-hyun.lee@samsung.com>
Jaekyeom Kim <btapiz@gmail.com> Jaekyeom Kim <btapiz@gmail.com>
Jaemin Seo <jaemin86.seo@samsung.com> Jaemin Seo <jaemin86.seo@samsung.com>
Jaemo Koo <jaemok@amazon.com> Jaemo Koo <jaemok@amazon.com>
Jaemo Koo <koo2434@gmail.com>
Jaeseok Yoon <yjaeseok@gmail.com> Jaeseok Yoon <yjaeseok@gmail.com>
Jaewon Choi <jaewon.james.choi@gmail.com> Jaewon Choi <jaewon.james.choi@gmail.com>
Jaewon Jung <jw.jung@navercorp.com> Jaewon Jung <jw.jung@navercorp.com>
@ -611,7 +590,6 @@ Jakob Weigert <jakob.j.w@googlemail.com>
Jakub Machacek <xtreit@gmail.com> Jakub Machacek <xtreit@gmail.com>
James Burton <jb@0.me.uk> James Burton <jb@0.me.uk>
James Choi <jchoi42@pha.jhu.edu> James Choi <jchoi42@pha.jhu.edu>
James Crosby <crosby.james@gmail.com>
James Raphael Tiovalen <jamestiotio@gmail.com> James Raphael Tiovalen <jamestiotio@gmail.com>
James Stanley <james@apphaus.co.uk> James Stanley <james@apphaus.co.uk>
James Vega <vega.james@gmail.com> James Vega <vega.james@gmail.com>
@ -630,10 +608,8 @@ Jared Wein <weinjared@gmail.com>
Jari Karppanen <jkarp@amazon.com> Jari Karppanen <jkarp@amazon.com>
Jason Gronn <jasontopia03@gmail.com> Jason Gronn <jasontopia03@gmail.com>
Javayhu <javayhu@gmail.com> Javayhu <javayhu@gmail.com>
Jay Kapadia <jaykapadia389@gmail.com>
Jay Oster <jay@kodewerx.org> Jay Oster <jay@kodewerx.org>
Jay Soffian <jaysoffian@gmail.com> Jay Soffian <jaysoffian@gmail.com>
Jay Yang <sjyang1126@gmail.com>
Jeado Ko <haibane84@gmail.com> Jeado Ko <haibane84@gmail.com>
Jeffrey C <jeffreyca16@gmail.com> Jeffrey C <jeffreyca16@gmail.com>
Jeffrey Yeung <jeffrey.yeung@poly.com> Jeffrey Yeung <jeffrey.yeung@poly.com>
@ -652,7 +628,6 @@ Jesper Storm Bache <jsbache@gmail.com>
Jesper van den Ende <jespertheend@gmail.com> Jesper van den Ende <jespertheend@gmail.com>
Jesse Miller <jesse@jmiller.biz> Jesse Miller <jesse@jmiller.biz>
Jesus Sanchez-Palencia <jesus.sanchez-palencia.fernandez.fil@intel.com> Jesus Sanchez-Palencia <jesus.sanchez-palencia.fernandez.fil@intel.com>
Jia Yu <yujia.1019@bytedance.com>
Jiadong Chen <chenjiadong@huawei.com> Jiadong Chen <chenjiadong@huawei.com>
Jiadong Zhu <jiadong.zhu@linaro.org> Jiadong Zhu <jiadong.zhu@linaro.org>
Jiahao Lu <lujjjh@gmail.com> Jiahao Lu <lujjjh@gmail.com>
@ -681,8 +656,8 @@ Jincheol Jo <jincheol.jo@navercorp.com>
Jinfeng Ma <majinfeng1@xiaomi.com> Jinfeng Ma <majinfeng1@xiaomi.com>
Jing Zhao <zhaojing7@xiaomi.com> Jing Zhao <zhaojing7@xiaomi.com>
Jinglong Zuo <zuojinglong@xiaomi.com> Jinglong Zuo <zuojinglong@xiaomi.com>
Jingqi Sun <jingqi.sun@hotmail.com>
Jingqi Sun <sunjingqi47@gmail.com> Jingqi Sun <sunjingqi47@gmail.com>
Jingqi Sun <jingqi.sun@hotmail.com>
Jingwei Liu <kingweiliu@gmail.com> Jingwei Liu <kingweiliu@gmail.com>
Jingyi Wei <wjywbs@gmail.com> Jingyi Wei <wjywbs@gmail.com>
Jinho Bang <jinho.bang@samsung.com> Jinho Bang <jinho.bang@samsung.com>
@ -708,7 +683,6 @@ John Kleinschmidt <kleinschmidtorama@gmail.com>
John Yani <vanuan@gmail.com> John Yani <vanuan@gmail.com>
John Yoo <nearbyh13@gmail.com> John Yoo <nearbyh13@gmail.com>
Johnson Lin <johnson.lin@intel.com> Johnson Lin <johnson.lin@intel.com>
Jojo R <rjiejie@gmail.com>
Jon Jensen <jonj@netflix.com> Jon Jensen <jonj@netflix.com>
Jonathan Frazer <listedegarde@gmail.com> Jonathan Frazer <listedegarde@gmail.com>
Jonathan Garbee <jonathan@garbee.me> Jonathan Garbee <jonathan@garbee.me>
@ -722,7 +696,6 @@ JongKwon Lee <jongkwon.lee@navercorp.com>
Jongmok Kim <jongmok.kim@navercorp.com> Jongmok Kim <jongmok.kim@navercorp.com>
Jongmok Kim <johny.kimc@gmail.com> Jongmok Kim <johny.kimc@gmail.com>
Jongsoo Lee <leejongsoo@gmail.com> Jongsoo Lee <leejongsoo@gmail.com>
Joonas Halinen <joonashalinen@outlook.com>
Joone Hur <joone.hur@intel.com> Joone Hur <joone.hur@intel.com>
Joonghun Park <pjh0718@gmail.com> Joonghun Park <pjh0718@gmail.com>
Jorge Villatoro <jorge@tomatocannon.com> Jorge Villatoro <jorge@tomatocannon.com>
@ -732,7 +705,6 @@ Joseph Lolak <joseph.lolak@samsung.com>
Josh Triplett <josh.triplett@intel.com> Josh Triplett <josh.triplett@intel.com>
Josh Triplett <josh@joshtriplett.org> Josh Triplett <josh@joshtriplett.org>
Joshua Lock <joshua.lock@intel.com> Joshua Lock <joshua.lock@intel.com>
Joshua Olaoye <joshuaolaoye46@gmail.com>
Joshua Roesslein <jroesslein@gmail.com> Joshua Roesslein <jroesslein@gmail.com>
Josué Ratelle <jorat1346@gmail.com> Josué Ratelle <jorat1346@gmail.com>
Josyula Venkat Narasimham <venkat.nj@samsung.com> Josyula Venkat Narasimham <venkat.nj@samsung.com>
@ -759,7 +731,6 @@ Junmin Zhu <junmin.zhu@intel.com>
Junsang Mo <mojunsang26@gmail.com> Junsang Mo <mojunsang26@gmail.com>
Junsong Li <ljs.darkfish@gmail.com> Junsong Li <ljs.darkfish@gmail.com>
Jun Wang <wangjuna@uniontech.com> Jun Wang <wangjuna@uniontech.com>
Jun Xu <jun1.xu@intel.com>
Jun Zeng <hjunzeng6@gmail.com> Jun Zeng <hjunzeng6@gmail.com>
Justin Okamoto <justmoto@amazon.com> Justin Okamoto <justmoto@amazon.com>
Justin Ribeiro <justin@justinribeiro.com> Justin Ribeiro <justin@justinribeiro.com>
@ -767,7 +738,7 @@ Jüri Valdmann <juri.valdmann@qt.io>
Juyoung Kim <chattank05@gmail.com> Juyoung Kim <chattank05@gmail.com>
Jingge Yu <jinggeyu423@gmail.com> Jingge Yu <jinggeyu423@gmail.com>
Jing Peiyang <jingpeiyang@eswincomputing.com> Jing Peiyang <jingpeiyang@eswincomputing.com>
Jinli Wu <wujinli@bytedance.com> Jinli Wu <wujinli.cn@gmail.com>
K. M. Merajul Arefin <m.arefin@samsung.com> K. M. Merajul Arefin <m.arefin@samsung.com>
Kai Jiang <jiangkai@gmail.com> Kai Jiang <jiangkai@gmail.com>
Kai Köhne <kai.koehne@qt.io> Kai Köhne <kai.koehne@qt.io>
@ -782,7 +753,6 @@ Kangyuan Shu <kangyuan.shu@intel.com>
Karan Thakkar <karanjthakkar@gmail.com> Karan Thakkar <karanjthakkar@gmail.com>
Karel Král <kralkareliv@gmail.com> Karel Král <kralkareliv@gmail.com>
Karl <karlpolicechromium@gmail.com> Karl <karlpolicechromium@gmail.com>
Karl Piper <karl4piper@gmail.com>
Kartikey Bhatt <kartikey@amazon.com> Kartikey Bhatt <kartikey@amazon.com>
Kaspar Brand <googlecontrib@velox.ch> Kaspar Brand <googlecontrib@velox.ch>
Kaushalendra Mishra <k.mishra@samsung.com> Kaushalendra Mishra <k.mishra@samsung.com>
@ -798,7 +768,6 @@ Keita Suzuki <keitasuzuki.park@gmail.com>
Keita Yoshimoto <y073k3@gmail.com> Keita Yoshimoto <y073k3@gmail.com>
Keith Chen <keitchen@amazon.com> Keith Chen <keitchen@amazon.com>
Keith Cirkel <chromium@keithcirkel.co.uk> Keith Cirkel <chromium@keithcirkel.co.uk>
Kelsen Liu <kelsenliu21@gmail.com>
Kenneth Rohde Christiansen <kenneth.r.christiansen@intel.com> Kenneth Rohde Christiansen <kenneth.r.christiansen@intel.com>
Kenneth Strickland <ken.strickland@gmail.com> Kenneth Strickland <ken.strickland@gmail.com>
Kenneth Zhou <knthzh@gmail.com> Kenneth Zhou <knthzh@gmail.com>
@ -809,8 +778,6 @@ Ketan Goyal <ketan.goyal@samsung.com>
Kevin Gibbons <bakkot@gmail.com> Kevin Gibbons <bakkot@gmail.com>
Kevin Lee Helpingstine <sig11@reprehensible.net> Kevin Lee Helpingstine <sig11@reprehensible.net>
Kevin M. McCormick <mckev@amazon.com> Kevin M. McCormick <mckev@amazon.com>
Kexy Biscuit <kexybiscuit@aosc.io>
Kexy Biscuit <kexybiscuit@gmail.com>
Keyou <qqkillyou@gmail.com> Keyou <qqkillyou@gmail.com>
Khasim Syed Mohammed <khasim.mohammed@linaro.org> Khasim Syed Mohammed <khasim.mohammed@linaro.org>
Khem Raj <raj.khem@gmail.com> Khem Raj <raj.khem@gmail.com>
@ -846,11 +813,9 @@ Kyungtae Kim <ktf.kim@samsung.com>
Kyungyoung Heo <bbvch13531@gmail.com> Kyungyoung Heo <bbvch13531@gmail.com>
Kyutae Lee <gorisanson@gmail.com> Kyutae Lee <gorisanson@gmail.com>
Lalit Chandivade <lalit.chandivade@einfochips.com> Lalit Chandivade <lalit.chandivade@einfochips.com>
Lalit Rana <lalitrn44@gmail.com>
Lam Lu <lamlu@amazon.com> Lam Lu <lamlu@amazon.com>
Laszlo Gombos <l.gombos@samsung.com> Laszlo Gombos <l.gombos@samsung.com>
Laszlo Radanyi <bekkra@gmail.com> Laszlo Radanyi <bekkra@gmail.com>
lauren n. liberda <lauren@selfisekai.rocks>
Lauren Yeun Kim <lauren.yeun.kim@gmail.com> Lauren Yeun Kim <lauren.yeun.kim@gmail.com>
Lauri Oherd <lauri.oherd@gmail.com> Lauri Oherd <lauri.oherd@gmail.com>
Lavar Askew <open.hyperion@gmail.com> Lavar Askew <open.hyperion@gmail.com>
@ -876,7 +841,6 @@ Lin Peng <penglin220@gmail.com>
Lin Peng <penglin22@huawei.com> Lin Peng <penglin22@huawei.com>
Lingqi Chi <someway.bit@gmail.com> Lingqi Chi <someway.bit@gmail.com>
Lingyun Cai <lingyun.cai@intel.com> Lingyun Cai <lingyun.cai@intel.com>
Linnan Li <lilinnan0903@gmail.com>
Lionel Landwerlin <lionel.g.landwerlin@intel.com> Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Lisha Guo <lisha.guo@intel.com> Lisha Guo <lisha.guo@intel.com>
Lizhi Fan <lizhi.fan@samsung.com> Lizhi Fan <lizhi.fan@samsung.com>
@ -912,7 +876,6 @@ Malcolm Wang <malcolm.2.wang@gmail.com>
Mallikarjuna Rao V <vm.arjun@samsung.com> Mallikarjuna Rao V <vm.arjun@samsung.com>
Manish Chhajer <chhajer.m@samsung.com> Manish Chhajer <chhajer.m@samsung.com>
Manish Jethani <m.jethani@eyeo.com> Manish Jethani <m.jethani@eyeo.com>
Manjunath Babu <10manju@gmail.com>
Manojkumar Bhosale <manojkumar.bhosale@imgtec.com> Manojkumar Bhosale <manojkumar.bhosale@imgtec.com>
Manuel Braun <thembrown@gmail.com> Manuel Braun <thembrown@gmail.com>
Manuel Lagana <manuel.lagana.dev@gmail.com> Manuel Lagana <manuel.lagana.dev@gmail.com>
@ -942,7 +905,6 @@ Martin Persson <mnpn03@gmail.com>
Martin Rogalla <martin@martinrogalla.com> Martin Rogalla <martin@martinrogalla.com>
Martina Kollarova <martina.kollarova@intel.com> Martina Kollarova <martina.kollarova@intel.com>
Martino Fontana <tinozzo123@gmail.com> Martino Fontana <tinozzo123@gmail.com>
Marvin Giessing <marvin.giessing@gmail.com>
Masahiro Yado <yado.masa@gmail.com> Masahiro Yado <yado.masa@gmail.com>
Masaru Nishida <msr.i386@gmail.com> Masaru Nishida <msr.i386@gmail.com>
Masayuki Wakizaka <mwakizaka0108@gmail.com> Masayuki Wakizaka <mwakizaka0108@gmail.com>
@ -952,8 +914,6 @@ Mathias Bynens <mathias@qiwi.be>
Mathieu Meisser <mmeisser@logitech.com> Mathieu Meisser <mmeisser@logitech.com>
Matt Arpidone <mma.public@gmail.com> Matt Arpidone <mma.public@gmail.com>
Matt Fysh <mattfysh@gmail.com> Matt Fysh <mattfysh@gmail.com>
Matt Harding <majaharding@gmail.com>
Matt Jolly <kangie@gentoo.org>
Matt Strum <mstrum@amazon.com> Matt Strum <mstrum@amazon.com>
Matt Zeunert <matt@mostlystatic.com> Matt Zeunert <matt@mostlystatic.com>
Matthew "strager" Glazar <strager.nds@gmail.com> Matthew "strager" Glazar <strager.nds@gmail.com>
@ -968,6 +928,7 @@ Matthieu Rigolot <matthieu.rigolot@gmail.com>
Matthieu Vlad Hauglustaine <matt.hauglustaine@gmail.com> Matthieu Vlad Hauglustaine <matt.hauglustaine@gmail.com>
Mattias Buelens <mattias.buelens@gmail.com> Mattias Buelens <mattias.buelens@gmail.com>
Max Coplan <mchcopl@gmail.com> Max Coplan <mchcopl@gmail.com>
Max Karolinskiy <max@brave.com>
Max Perepelitsyn <pph34r@gmail.com> Max Perepelitsyn <pph34r@gmail.com>
Max Schmitt <max@schmitt.mx> Max Schmitt <max@schmitt.mx>
Max Vujovic <mvujovic@adobe.com> Max Vujovic <mvujovic@adobe.com>
@ -977,19 +938,16 @@ Mc Zeng <zengmcong@gmail.com>
Md Abdullah Al Alamin <a.alamin.cse@gmail.com> Md Abdullah Al Alamin <a.alamin.cse@gmail.com>
Md. Hasanur Rashid <hasanur.r@samsung.com> Md. Hasanur Rashid <hasanur.r@samsung.com>
Md Hasibul Hasan <hasibulhasan873@gmail.com> Md Hasibul Hasan <hasibulhasan873@gmail.com>
Md Hasibul Hasan <hasibul.h@samsung.com>
Md Jobed Hossain <jobed.h@samsung.com> Md Jobed Hossain <jobed.h@samsung.com>
Md Raiyan bin Sayeed <mrbsayee@uwaterloo.ca> Md Raiyan bin Sayeed <mrbsayee@uwaterloo.ca>
Md. Sadiqul Amin <sadiqul.amin@samsung.com> Md. Sadiqul Amin <sadiqul.amin@samsung.com>
Md Sami Uddin <md.sami@samsung.com> Md Sami Uddin <md.sami@samsung.com>
Mego Tan <tannal2409@gmail.com>
Merajul Arefin <merajularefin@gmail.com> Merajul Arefin <merajularefin@gmail.com>
Micha Hanselmann <micha.hanselmann@gmail.com> Micha Hanselmann <micha.hanselmann@gmail.com>
Michael Cirone <mikecirone@gmail.com> Michael Cirone <mikecirone@gmail.com>
Michael Constant <mconst@gmail.com> Michael Constant <mconst@gmail.com>
Michael Forney <mforney@mforney.org> Michael Forney <mforney@mforney.org>
Michael Gilbert <floppymaster@gmail.com> Michael Gilbert <floppymaster@gmail.com>
Michael Herrmann <michael@herrmann.io>
Michael Kolomeytsev <michael.kolomeytsev@gmail.com> Michael Kolomeytsev <michael.kolomeytsev@gmail.com>
Michael Lopez <lopes92290@gmail.com> Michael Lopez <lopes92290@gmail.com>
Michael Morrison <codebythepound@gmail.com> Michael Morrison <codebythepound@gmail.com>
@ -1006,11 +964,11 @@ Mihai Tica <mitica@adobe.com>
Mike Pennisi <mike@mikepennisi.com> Mike Pennisi <mike@mikepennisi.com>
Mike Tilburg <mtilburg@adobe.com> Mike Tilburg <mtilburg@adobe.com>
Mikhail Pozdnyakov <mikhail.pozdnyakov@intel.com> Mikhail Pozdnyakov <mikhail.pozdnyakov@intel.com>
Mikhail Atuchin <matuchin@brave.com>
Milko Leporis <milko.leporis@imgtec.com> Milko Leporis <milko.leporis@imgtec.com>
Milton Chiang <milton.chiang@mediatek.com> Milton Chiang <milton.chiang@mediatek.com>
Milutin Smiljanic <msmiljanic.gm@gmail.com> Milutin Smiljanic <msmiljanic.gm@gmail.com>
Minchul Kang <tegongkang@gmail.com> Minchul Kang <tegongkang@gmail.com>
Ming Lei <minggeorgelei@gmail.com>
Mingeun Park <mindal99546@gmail.com> Mingeun Park <mindal99546@gmail.com>
Minggang Wang <minggang.wang@intel.com> Minggang Wang <minggang.wang@intel.com>
Mingmin Xie <melvinxie@gmail.com> Mingmin Xie <melvinxie@gmail.com>
@ -1027,14 +985,10 @@ Mitchell Cohen <mitchell@agilebits.com>
Miyoung Shin <myid.shin@navercorp.com> Miyoung Shin <myid.shin@navercorp.com>
Mohamed I. Hammad <ibraaaa@gmail.com> Mohamed I. Hammad <ibraaaa@gmail.com>
Mohamed Mansour <m0.interactive@gmail.com> Mohamed Mansour <m0.interactive@gmail.com>
Mohamed Hany Youns <mohamedhyouns@gmail.com>
Mohammad Azam <m.azam@samsung.com> Mohammad Azam <m.azam@samsung.com>
MohammadSabri <mohammad.kh.sabri@exalt.ps>
Mohammed Ashraf <mohammedashraf4599@gmail.com>
Mohammed Wajahat Ali Siddiqui <wajahat.s@samsung.com> Mohammed Wajahat Ali Siddiqui <wajahat.s@samsung.com>
Mohan Reddy <mohan.reddy@samsung.com> Mohan Reddy <mohan.reddy@samsung.com>
Mohit Bhalla <bhallam@amazon.com> Mohit Bhalla <bhallam@amazon.com>
Mohraiel Matta <mohraielmatta@gmail.com>
Moiseanu Rares-Marian <moiseanurares@gmail.com> Moiseanu Rares-Marian <moiseanurares@gmail.com>
Momoka Yamamoto <momoka.my6@gmail.com> Momoka Yamamoto <momoka.my6@gmail.com>
Momoko Hattori <momohatt10@gmail.com> Momoko Hattori <momohatt10@gmail.com>
@ -1070,9 +1024,7 @@ Nedeljko Babic <nedeljko.babic@imgtec.com>
Neehit Goyal <neehit.goyal@samsung.com> Neehit Goyal <neehit.goyal@samsung.com>
Nidhi Jaju <nidhijaju127@gmail.com> Nidhi Jaju <nidhijaju127@gmail.com>
Niek van der Maas <mail@niekvandermaas.nl> Niek van der Maas <mail@niekvandermaas.nl>
Nik Pavlov <nikita.pavlov.dev@gmail.com>
Nikhil Bansal <n.bansal@samsung.com> Nikhil Bansal <n.bansal@samsung.com>
Nikhil Meena <iakhilmeena@gmail.com>
Nikhil Sahni <nikhil.sahni@samsung.com> Nikhil Sahni <nikhil.sahni@samsung.com>
Nikita Ofitserov <himikof@gmail.com> Nikita Ofitserov <himikof@gmail.com>
Niklas Hambüchen <mail@nh2.me> Niklas Hambüchen <mail@nh2.me>
@ -1086,7 +1038,6 @@ Nivedan Sharma <ni.sharma@samsung.com>
Noam Rosenthal <noam.j.rosenthal@gmail.com> Noam Rosenthal <noam.j.rosenthal@gmail.com>
Noj Vek <nojvek@gmail.com> Noj Vek <nojvek@gmail.com>
Nolan Cao <nolan.robin.cao@gmail.com> Nolan Cao <nolan.robin.cao@gmail.com>
Nourhan Hasan <nourhan.m.hasan@gmail.com>
Oleksii Kadurin <ovkadurin@gmail.com> Oleksii Kadurin <ovkadurin@gmail.com>
Oliver Dunk <oliver@oliverdunk.com> Oliver Dunk <oliver@oliverdunk.com>
Olivier Tilloy <olivier+chromium@tilloy.net> Olivier Tilloy <olivier+chromium@tilloy.net>
@ -1120,7 +1071,6 @@ Paul Wicks <pwicks86@gmail.com>
Pavan Kumar Emani <pavan.e@samsung.com> Pavan Kumar Emani <pavan.e@samsung.com>
Pavel Golikov <paullo612@ya.ru> Pavel Golikov <paullo612@ya.ru>
Pavel Ivanov <paivanof@gmail.com> Pavel Ivanov <paivanof@gmail.com>
Pawan Udassi <pawanudassi@hotmail.com>
Pawel Forysiuk <p.forysiuk@samsung.com> Pawel Forysiuk <p.forysiuk@samsung.com>
Paweł Hajdan jr <phajdan.jr@gmail.com> Paweł Hajdan jr <phajdan.jr@gmail.com>
Paweł Stanek <pawel@gener8ads.com> Paweł Stanek <pawel@gener8ads.com>
@ -1155,6 +1105,7 @@ Po-Chun Chang <pochang0403@gmail.com>
Prakhar Shrivastav <p.shri@samsung.com> Prakhar Shrivastav <p.shri@samsung.com>
Pramod Begur Srinath <pramod.bs@samsung.com> Pramod Begur Srinath <pramod.bs@samsung.com>
Pranay Kumar <pranay.kumar@samsung.com> Pranay Kumar <pranay.kumar@samsung.com>
Pranjal Jumde <pranjal@brave.com>
Prashant Hiremath <prashhir@cisco.com> Prashant Hiremath <prashhir@cisco.com>
Prashant Nevase <prashant.n@samsung.com> Prashant Nevase <prashant.n@samsung.com>
Prashant Patil <prashant.patil@imgtec.com> Prashant Patil <prashant.patil@imgtec.com>
@ -1181,6 +1132,7 @@ Rahul Gupta <rahul.g@samsung.com>
Rahul Yadav <rahul.yadav@samsung.com> Rahul Yadav <rahul.yadav@samsung.com>
Rajesh Mahindra <rmahindra@uber.com> Rajesh Mahindra <rmahindra@uber.com>
Rajneesh Rana <rajneesh.r@samsung.com> Rajneesh Rana <rajneesh.r@samsung.com>
Ralph Giles <rgiles@brave.com>
Raman Tenneti <raman.tenneti@gmail.com> Raman Tenneti <raman.tenneti@gmail.com>
Ramkumar Gokarnesan <ramkumar.gokarnesan@gmail.com> Ramkumar Gokarnesan <ramkumar.gokarnesan@gmail.com>
Ramkumar Ramachandra <artagnon@gmail.com> Ramkumar Ramachandra <artagnon@gmail.com>
@ -1246,7 +1198,6 @@ Ryan Manuel <rfmanuel@gmail.com>
Ryan Norton <rnorton10@gmail.com> Ryan Norton <rnorton10@gmail.com>
Ryan Sleevi <ryan-chromium-dev@sleevi.com> Ryan Sleevi <ryan-chromium-dev@sleevi.com>
Ryan Yoakum <ryoakum@skobalt.com> Ryan Yoakum <ryoakum@skobalt.com>
Ryan Huen <ryanhuenprivate@gmail.com>
Rye Zhang <ryezhang@tencent.com> Rye Zhang <ryezhang@tencent.com>
Ryo Ogawa <negibokken@gmail.com> Ryo Ogawa <negibokken@gmail.com>
Ryuan Choi <ryuan.choi@samsung.com> Ryuan Choi <ryuan.choi@samsung.com>
@ -1291,7 +1242,6 @@ Sean Bryant <sean@cyberwang.net>
Sean DuBois <seaduboi@amazon.com> Sean DuBois <seaduboi@amazon.com>
Sebastian Amend <sebastian.amend@googlemail.com> Sebastian Amend <sebastian.amend@googlemail.com>
Sebastian Krzyszkowiak <dos@dosowisko.net> Sebastian Krzyszkowiak <dos@dosowisko.net>
Sebastian Markbåge <sebastian@calyptus.eu>
Sebastjan Raspor <sebastjan.raspor1@gmail.com> Sebastjan Raspor <sebastjan.raspor1@gmail.com>
Seo Sanghyeon <sanxiyn@gmail.com> Seo Sanghyeon <sanxiyn@gmail.com>
Seokju Kwon <seokju.kwon@gmail.com> Seokju Kwon <seokju.kwon@gmail.com>
@ -1301,7 +1251,6 @@ Sergei Poletaev <spylogsster@gmail.com>
Sergei Romanov <rsv.981@gmail.com> Sergei Romanov <rsv.981@gmail.com>
Sergey Romanov <svromanov@sberdevices.ru> Sergey Romanov <svromanov@sberdevices.ru>
Sergey Kipet <sergey.kipet@gmail.com> Sergey Kipet <sergey.kipet@gmail.com>
Sergey Markelov <sergionso@gmail.com>
Sergey Putilin <p.sergey@samsung.com> Sergey Putilin <p.sergey@samsung.com>
Sergey Shekyan <shekyan@gmail.com> Sergey Shekyan <shekyan@gmail.com>
Sergey Talantov <sergey.talantov@gmail.com> Sergey Talantov <sergey.talantov@gmail.com>
@ -1312,7 +1261,6 @@ Serhii Matrunchyk <sergiy.matrunchyk@gmail.com>
Seshadri Mahalingam <seshadri.mahalingam@gmail.com> Seshadri Mahalingam <seshadri.mahalingam@gmail.com>
Seungkyu Lee <zx6658@gmail.com> Seungkyu Lee <zx6658@gmail.com>
Sevan Janiyan <venture37@geeklan.co.uk> Sevan Janiyan <venture37@geeklan.co.uk>
Shaheen Fazim <fazim.pentester@gmail.com>
Shahriar Rostami <shahriar.rostami@gmail.com> Shahriar Rostami <shahriar.rostami@gmail.com>
Shail Singhal <shail.s@samsung.com> Shail Singhal <shail.s@samsung.com>
Shane Hansen <shanemhansen@gmail.com> Shane Hansen <shanemhansen@gmail.com>
@ -1370,7 +1318,6 @@ Sooho Park <sooho1000@gmail.com>
Soojung Choi <crystal2840@gmail.com> Soojung Choi <crystal2840@gmail.com>
Soorya R <soorya.r@samsung.com> Soorya R <soorya.r@samsung.com>
Soren Dreijer <dreijerbit@gmail.com> Soren Dreijer <dreijerbit@gmail.com>
Spencer Wilson <spencer@spencerwilson.org>
Sreerenj Balachandran <sreerenj.balachandran@intel.com> Sreerenj Balachandran <sreerenj.balachandran@intel.com>
Srirama Chandra Sekhar Mogali <srirama.m@samsung.com> Srirama Chandra Sekhar Mogali <srirama.m@samsung.com>
Stacy Kim <stacy.kim@ucla.edu> Stacy Kim <stacy.kim@ucla.edu>
@ -1394,7 +1341,6 @@ Sunchang Li <johnstonli@tencent.com>
Sundoo Kim <nerdooit@gmail.com> Sundoo Kim <nerdooit@gmail.com>
Sundoo Kim <0xd00d00b@gmail.com> Sundoo Kim <0xd00d00b@gmail.com>
Suneel Kota <suneel.kota@samsung.com> Suneel Kota <suneel.kota@samsung.com>
Sung Lee <sung.lee@amd.com>
Sungguk Lim <limasdf@gmail.com> Sungguk Lim <limasdf@gmail.com>
Sunghyeok Kang <sh0528.kang@samsung.com> Sunghyeok Kang <sh0528.kang@samsung.com>
Sungmann Cho <sungmann.cho@gmail.com> Sungmann Cho <sungmann.cho@gmail.com>
@ -1434,7 +1380,6 @@ Takuya Kurimoto <takuya004869@gmail.com>
Tanay Chowdhury <tanay.c@samsung.com> Tanay Chowdhury <tanay.c@samsung.com>
Tanvir Rizvi <tanvir.rizvi@samsung.com> Tanvir Rizvi <tanvir.rizvi@samsung.com>
Tao Wang <tao.wang.2261@gmail.com> Tao Wang <tao.wang.2261@gmail.com>
Tao Xiong <taox4@illinois.edu>
Tapu Kumar Ghose <ghose.tapu@gmail.com> Tapu Kumar Ghose <ghose.tapu@gmail.com>
Taylor Price <trprice@gmail.com> Taylor Price <trprice@gmail.com>
Ted Kim <neot0000@gmail.com> Ted Kim <neot0000@gmail.com>
@ -1449,12 +1394,10 @@ Thomas Nguyen <haitung.nguyen@avast.com>
Thomas Phillips <tphillips@snapchat.com> Thomas Phillips <tphillips@snapchat.com>
Thomas White <im.toms.inbox@gmail.com> Thomas White <im.toms.inbox@gmail.com>
Tiago Vignatti <tiago.vignatti@intel.com> Tiago Vignatti <tiago.vignatti@intel.com>
Tianyi Zhang <me@1stprinciple.org>
Tibor Dusnoki <tibor.dusnoki.91@gmail.com> Tibor Dusnoki <tibor.dusnoki.91@gmail.com>
Tibor Dusnoki <tdusnoki@inf.u-szeged.hu> Tibor Dusnoki <tdusnoki@inf.u-szeged.hu>
Tien Hock Loh <tienhock.loh@starfivetech.com> Tien Hock Loh <tienhock.loh@starfivetech.com>
Tim Ansell <mithro@mithis.com> Tim Ansell <mithro@mithis.com>
Tim Barry <oregongraperoot@gmail.com>
Tim Niederhausen <tim@rnc-ag.de> Tim Niederhausen <tim@rnc-ag.de>
Tim Steiner <twsteiner@gmail.com> Tim Steiner <twsteiner@gmail.com>
Timo Gurr <timo.gurr@gmail.com> Timo Gurr <timo.gurr@gmail.com>
@ -1470,11 +1413,9 @@ Tom Harwood <tfh@skip.org>
Tomas Popela <tomas.popela@gmail.com> Tomas Popela <tomas.popela@gmail.com>
Tomasz Edward Posłuszny <tom@devpeer.net> Tomasz Edward Posłuszny <tom@devpeer.net>
Tony Shen <legendmastertony@gmail.com> Tony Shen <legendmastertony@gmail.com>
Topi Lassila <tolassila@gmail.com>
Torsten Kurbad <google@tk-webart.de> Torsten Kurbad <google@tk-webart.de>
Toshihito Kikuchi <leamovret@gmail.com> Toshihito Kikuchi <leamovret@gmail.com>
Toshiaki Tanaka <zokutyou2@gmail.com> Toshiaki Tanaka <zokutyou2@gmail.com>
Travis Leithead <travis.leithead@gmail.com>
Trent Willis <trentmwillis@gmail.com> Trent Willis <trentmwillis@gmail.com>
Trevor Perrin <unsafe@trevp.net> Trevor Perrin <unsafe@trevp.net>
Tripta Gupta <triptagupta19@gmail.com> Tripta Gupta <triptagupta19@gmail.com>
@ -1514,7 +1455,6 @@ Vishal Bhatnagar <vishal.b@samsung.com>
Vishal Lingam <vishal.reddy@samsung.com> Vishal Lingam <vishal.reddy@samsung.com>
Vitaliy Kharin <kvserr@gmail.com> Vitaliy Kharin <kvserr@gmail.com>
Vivek Galatage <vivek.vg@samsung.com> Vivek Galatage <vivek.vg@samsung.com>
Vlad Zahorodnii <vlad.zahorodnii@kde.org>
Volker Sorge <volker.sorge@gmail.com> Volker Sorge <volker.sorge@gmail.com>
Waihung Fu <fufranci@amazon.com> Waihung Fu <fufranci@amazon.com>
wafuwafu13 <mariobaske@i.softbank.jp> wafuwafu13 <mariobaske@i.softbank.jp>
@ -1522,11 +1462,9 @@ Wojciech Bielawski <wojciech.bielawski@gmail.com>
Wang Chen <wangchen20@iscas.ac.cn> Wang Chen <wangchen20@iscas.ac.cn>
Wang Chen <unicornxw@gmail.com> Wang Chen <unicornxw@gmail.com>
Wang Weiwei <wangww@dingdao.com> Wang Weiwei <wangww@dingdao.com>
Wang Zirui <kingzirvi@gmail.com>
Wangyang Dai <jludwy@gmail.com> Wangyang Dai <jludwy@gmail.com>
Wanming Lin <wanming.lin@intel.com> Wanming Lin <wanming.lin@intel.com>
Wei Li <wei.c.li@intel.com> Wei Li <wei.c.li@intel.com>
Weicong Yu <yuweicong666@gmail.com>
Wen Fan <fanwen1@huawei.com> Wen Fan <fanwen1@huawei.com>
Wenxiang Qian <leonwxqian@gmail.com> Wenxiang Qian <leonwxqian@gmail.com>
WenSheng He <wensheng.he@samsung.com> WenSheng He <wensheng.he@samsung.com>
@ -1592,7 +1530,6 @@ Yong Shin <sy3620@gmail.com>
Yong Wang <ccyongwang@tencent.com> Yong Wang <ccyongwang@tencent.com>
Yonggang Luo <luoyonggang@gmail.com> Yonggang Luo <luoyonggang@gmail.com>
Yongha Lee <yongha78.lee@samsung.com> Yongha Lee <yongha78.lee@samsung.com>
Yongsang Park <yongsangpark980813@gmail.com>
Yongseok Choi <yongseok.choi@navercorp.com> Yongseok Choi <yongseok.choi@navercorp.com>
Yongsheng Zhu <yongsheng.zhu@intel.com> Yongsheng Zhu <yongsheng.zhu@intel.com>
Yoonjae Cho <yoonjae.cho92@gmail.com> Yoonjae Cho <yoonjae.cho92@gmail.com>
@ -1625,13 +1562,11 @@ Yuta Kasai <kasai.yuta0810@gmail.com>
Yuvanesh Natarajan <yuvanesh.n1@samsung.com> Yuvanesh Natarajan <yuvanesh.n1@samsung.com>
Zach Bjornson <zbbjornson@gmail.com> Zach Bjornson <zbbjornson@gmail.com>
Zachary Capalbo <zach.geek@gmail.com> Zachary Capalbo <zach.geek@gmail.com>
Zehan Li <synclzhhans@gmail.com>
Zeno Albisser <zeno.albisser@digia.com> Zeno Albisser <zeno.albisser@digia.com>
Zeqin Chen <talonchen@tencent.com> Zeqin Chen <talonchen@tencent.com>
Zhanbang He <hezhanbang@gmail.com> Zhanbang He <hezhanbang@gmail.com>
Zhang Hao <zhanghao.m@bytedance.com> Zhang Hao <zhanghao.m@bytedance.com>
Zhang Hao <15686357310a@gmail.com> Zhang Hao <15686357310a@gmail.com>
Zhao Qin <qzmiss@gmail.com>
Zhaoming Jiang <zhaoming.jiang@intel.com> Zhaoming Jiang <zhaoming.jiang@intel.com>
Zhaoze Zhou <zhaoze.zhou@partner.samsung.com> Zhaoze Zhou <zhaoze.zhou@partner.samsung.com>
Zheda Chen <zheda.chen@intel.com> Zheda Chen <zheda.chen@intel.com>
@ -1657,7 +1592,6 @@ Zsolt Borbely <zsborbely.u-szeged@partner.samsung.com>
迷渡 <justjavac@gmail.com> 迷渡 <justjavac@gmail.com>
郑苏波 (Super Zheng) <superzheng@tencent.com> 郑苏波 (Super Zheng) <superzheng@tencent.com>
一丝 (Yisi) <yiorsi@gmail.com> 一丝 (Yisi) <yiorsi@gmail.com>
林训杰 (XunJie Lin) <wick.linxunjie@gmail.com>
# Please DO NOT APPEND here. See comments at the top of the file. # Please DO NOT APPEND here. See comments at the top of the file.
# END individuals section. # END individuals section.
@ -1669,7 +1603,6 @@ Akamai Inc. <*@akamai.com>
ARM Holdings <*@arm.com> ARM Holdings <*@arm.com>
BlackBerry Limited <*@blackberry.com> BlackBerry Limited <*@blackberry.com>
Bocoup <*@bocoup.com> Bocoup <*@bocoup.com>
Brave Software Inc. <*@brave.com>
Canonical Limited <*@canonical.com> Canonical Limited <*@canonical.com>
Cloudflare, Inc. <*@cloudflare.com> Cloudflare, Inc. <*@cloudflare.com>
CloudMosa, Inc. <*@cloudmosa.com> CloudMosa, Inc. <*@cloudmosa.com>
@ -1687,7 +1620,6 @@ EngFlow, Inc. <*@engflow.com>
Estimote, Inc. <*@estimote.com> Estimote, Inc. <*@estimote.com>
Google Inc. <*@google.com> Google Inc. <*@google.com>
Grammarly, Inc. <*@grammarly.com> Grammarly, Inc. <*@grammarly.com>
Here Inc. <*@here.io>
Hewlett-Packard Development Company, L.P. <*@hp.com> Hewlett-Packard Development Company, L.P. <*@hp.com>
HyperConnect Inc. <*@hpcnt.com> HyperConnect Inc. <*@hpcnt.com>
IBM Inc. <*@*.ibm.com> IBM Inc. <*@*.ibm.com>
@ -1717,7 +1649,6 @@ NVIDIA Corporation <*@nvidia.com>
OpenFin Inc. <*@openfin.co> OpenFin Inc. <*@openfin.co>
Opera Software ASA <*@opera.com> Opera Software ASA <*@opera.com>
Optical Tone Ltd <*@opticaltone.com> Optical Tone Ltd <*@opticaltone.com>
Palo Alto Networks, Inc. <*@paloaltonetworks.com>
Pengutronix e.K. <*@pengutronix.de> Pengutronix e.K. <*@pengutronix.de>
Quality First Software GmbH <*@qf-software.com> Quality First Software GmbH <*@qf-software.com>
Rakuten Kobo Inc. <*@kobo.com> Rakuten Kobo Inc. <*@kobo.com>

View file

@ -12,6 +12,7 @@ import("//build/config/compiler/compiler.gni")
import("//build/config/cronet/config.gni") import("//build/config/cronet/config.gni")
import("//build/config/dcheck_always_on.gni") import("//build/config/dcheck_always_on.gni")
import("//build/config/features.gni") import("//build/config/features.gni")
import("//build/config/ios/config.gni")
import("//build/config/rust.gni") import("//build/config/rust.gni")
import("//build/config/sanitizers/sanitizers.gni") import("//build/config/sanitizers/sanitizers.gni")
import("//build/config/ui.gni") import("//build/config/ui.gni")

3164
src/DEPS

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,12 +1,17 @@
include_rules = [ include_rules = [
# `#include "partition_alloc/..."` is prefered to
# `#include "base/allocator/partition_allocator/src/partition_alloc/..."`.
"+partition_alloc",
"-base/allocator/partition_allocator",
"+third_party/ashmem", "+third_party/ashmem",
"+third_party/apple_apsl", "+third_party/apple_apsl",
"+third_party/boringssl/src/include", "+third_party/boringssl/src/include",
"+third_party/ced", "+third_party/ced",
"+third_party/fuzztest",
# We are moving the old jni_generator to jni_zero, some references will remain # We are moving the old jni_generator to jni_zero, some references will remain
# in //base. # in //base.
"+third_party/jni_zero", "+third_party/jni_zero",
"+third_party/libevent",
"+third_party/libunwindstack/src/libunwindstack/include", "+third_party/libunwindstack/src/libunwindstack/include",
"+third_party/lss", "+third_party/lss",
"+third_party/modp_b64", "+third_party/modp_b64",
@ -20,7 +25,6 @@ include_rules = [
"+third_party/test_fonts", "+third_party/test_fonts",
# JSON Deserialization. # JSON Deserialization.
"+third_party/rust/serde_json_lenient/v0_2/wrapper", "+third_party/rust/serde_json_lenient/v0_2/wrapper",
"+third_party/zlib",
# These are implicitly brought in from the root, and we don't want them. # These are implicitly brought in from the root, and we don't want them.
"-ipc", "-ipc",
@ -39,7 +43,7 @@ specific_include_rules = {
"+third_party/perfetto/protos/perfetto/trace/track_event/chrome_process_descriptor.pbzero.h", "+third_party/perfetto/protos/perfetto/trace/track_event/chrome_process_descriptor.pbzero.h",
], ],
# To evaluate the performance effects of using absl's flat_hash_map. # To evaluate the performance effects of using absl's flat_hash_map.
"supports_user_data\.cc": [ "supports_user_data\.h": [
"+third_party/abseil-cpp/absl/container/flat_hash_map.h", "+third_party/abseil-cpp/absl/container/flat_hash_map.h",
] ]
} }

View file

@ -4,6 +4,7 @@ set noparent
# NOTE: keep this in sync with global-owners-override@chromium.org owners # NOTE: keep this in sync with global-owners-override@chromium.org owners
# by emailing lsc-policy@chromium.org when this list changes. # by emailing lsc-policy@chromium.org when this list changes.
altimin@chromium.org altimin@chromium.org
danakj@chromium.org
dcheng@chromium.org dcheng@chromium.org
fdoray@chromium.org fdoray@chromium.org
gab@chromium.org gab@chromium.org

View file

@ -9,4 +9,5 @@
# yourself, don't hesitate to seek help from another security team member! # yourself, don't hesitate to seek help from another security team member!
# Nobody knows everything, and the only way to learn is from experience. # Nobody knows everything, and the only way to learn is from experience.
dcheng@chromium.org dcheng@chromium.org
rsesek@chromium.org
tsepez@chromium.org tsepez@chromium.org

View file

@ -1,4 +1,5 @@
lizeb@chromium.org lizeb@chromium.org
primiano@chromium.org
wfh@chromium.org wfh@chromium.org
per-file allocator.gni=file://base/allocator/partition_allocator/OWNERS per-file allocator.gni=file://base/allocator/partition_allocator/OWNERS

View file

@ -5,7 +5,7 @@
#include "base/allocator/allocator_check.h" #include "base/allocator/allocator_check.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "partition_alloc/buildflags.h" #include "partition_alloc/partition_alloc_buildflags.h"
#if BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
#include "partition_alloc/shim/winheap_stubs_win.h" #include "partition_alloc/shim/winheap_stubs_win.h"

View file

@ -21,4 +21,4 @@ constexpr size_t kMaximumNumberOfObservers = 4;
} // namespace base::allocator::dispatcher::configuration } // namespace base::allocator::dispatcher::configuration
#endif // BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_ #endif // BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_

View file

@ -8,7 +8,7 @@
#include "base/check.h" #include "base/check.h"
#include "base/dcheck_is_on.h" #include "base/dcheck_is_on.h"
#include "base/no_destructor.h" #include "base/no_destructor.h"
#include "partition_alloc/buildflags.h" #include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/shim/allocator_shim.h" #include "partition_alloc/shim/allocator_shim.h"
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
@ -16,7 +16,7 @@
#endif #endif
#if PA_BUILDFLAG(USE_PARTITION_ALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
#include "partition_alloc/partition_alloc_hooks.h" // nogncheck #include "partition_alloc/partition_alloc_hooks.h"
#endif #endif
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {
@ -34,7 +34,7 @@ struct Dispatcher::Impl {
void Reset() { void Reset() {
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
DCHECK([&] { DCHECK([&]() {
auto const was_set = is_initialized_check_flag_.test_and_set(); auto const was_set = is_initialized_check_flag_.test_and_set();
is_initialized_check_flag_.clear(); is_initialized_check_flag_.clear();
return was_set; return was_set;

View file

@ -5,11 +5,11 @@
#ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_ #ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_ #define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#include <memory>
#include "base/allocator/dispatcher/internal/dispatcher_internal.h" #include "base/allocator/dispatcher/internal/dispatcher_internal.h"
#include "base/base_export.h" #include "base/base_export.h"
#include <memory>
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {
namespace internal { namespace internal {

View file

@ -5,13 +5,13 @@
#ifndef BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_ #ifndef BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
#define BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_ #define BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
#include <tuple>
#include <utility>
#include "base/allocator/dispatcher/configuration.h" #include "base/allocator/dispatcher/configuration.h"
#include "base/allocator/dispatcher/dispatcher.h" #include "base/allocator/dispatcher/dispatcher.h"
#include "base/allocator/dispatcher/internal/tools.h" #include "base/allocator/dispatcher/internal/tools.h"
#include <tuple>
#include <utility>
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {
namespace internal { namespace internal {

View file

@ -3,8 +3,7 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "base/allocator/dispatcher/internal/dispatch_data.h" #include "base/allocator/dispatcher/internal/dispatch_data.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
namespace base::allocator::dispatcher::internal { namespace base::allocator::dispatcher::internal {

View file

@ -7,14 +7,14 @@
#include "base/base_export.h" #include "base/base_export.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "partition_alloc/buildflags.h" #include "partition_alloc/partition_alloc_buildflags.h"
#if PA_BUILDFLAG(USE_PARTITION_ALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
#include "partition_alloc/partition_alloc_hooks.h" // nogncheck #include "partition_alloc/partition_alloc_hooks.h"
#endif #endif
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
#include "partition_alloc/shim/allocator_shim.h" // nogncheck #include "partition_alloc/shim/allocator_shim.h"
#endif #endif
namespace base::allocator::dispatcher::internal { namespace base::allocator::dispatcher::internal {

View file

@ -13,10 +13,10 @@
#include "base/allocator/dispatcher/subsystem.h" #include "base/allocator/dispatcher/subsystem.h"
#include "base/check.h" #include "base/check.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "partition_alloc/buildflags.h" #include "partition_alloc/partition_alloc_buildflags.h"
#if PA_BUILDFLAG(USE_PARTITION_ALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
#include "partition_alloc/partition_alloc_allocation_data.h" // nogncheck #include "partition_alloc/partition_alloc_allocation_data.h"
#endif #endif
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
@ -125,165 +125,149 @@ struct DispatcherImpl {
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC) #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC)
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
static void* AllocFn(size_t size, void* context) { static void* AllocFn(const AllocatorDispatch* self,
void* const address = size_t size,
allocator_dispatch_.next->alloc_function(size, context); void* context) {
void* const address = self->next->alloc_function(self->next, size, context);
DoNotifyAllocationForShim(address, size); DoNotifyAllocationForShim(address, size);
return address; return address;
} }
static void* AllocUncheckedFn(size_t size, void* context) { static void* AllocUncheckedFn(const AllocatorDispatch* self,
size_t size,
void* context) {
void* const address = void* const address =
allocator_dispatch_.next->alloc_unchecked_function(size, context); self->next->alloc_unchecked_function(self->next, size, context);
DoNotifyAllocationForShim(address, size); DoNotifyAllocationForShim(address, size);
return address; return address;
} }
static void* AllocZeroInitializedFn(size_t n, size_t size, void* context) { static void* AllocZeroInitializedFn(const AllocatorDispatch* self,
void* const address = size_t n,
allocator_dispatch_.next->alloc_zero_initialized_function(n, size, size_t size,
context); void* context) {
void* const address = self->next->alloc_zero_initialized_function(
self->next, n, size, context);
DoNotifyAllocationForShim(address, n * size); DoNotifyAllocationForShim(address, n * size);
return address; return address;
} }
static void* AllocAlignedFn(size_t alignment, size_t size, void* context) { static void* AllocAlignedFn(const AllocatorDispatch* self,
void* const address = allocator_dispatch_.next->alloc_aligned_function( size_t alignment,
alignment, size, context); size_t size,
void* context) {
void* const address = self->next->alloc_aligned_function(
self->next, alignment, size, context);
DoNotifyAllocationForShim(address, size); DoNotifyAllocationForShim(address, size);
return address; return address;
} }
static void* ReallocFn(void* address, size_t size, void* context) { static void* ReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
// Note: size == 0 actually performs free. // Note: size == 0 actually performs free.
DoNotifyFreeForShim(address); DoNotifyFreeForShim(address);
void* const reallocated_address = void* const reallocated_address =
allocator_dispatch_.next->realloc_function(address, size, context); self->next->realloc_function(self->next, address, size, context);
DoNotifyAllocationForShim(reallocated_address, size); DoNotifyAllocationForShim(reallocated_address, size);
return reallocated_address; return reallocated_address;
} }
static void* ReallocUncheckedFn(void* address, size_t size, void* context) { static void FreeFn(const AllocatorDispatch* self,
// Note: size == 0 actually performs free. void* address,
DoNotifyFreeForShim(address); void* context) {
void* const reallocated_address =
allocator_dispatch_.next->realloc_unchecked_function(address, size,
context);
DoNotifyAllocationForShim(reallocated_address, size);
return reallocated_address;
}
static void FreeFn(void* address, void* context) {
// Note: DoNotifyFree should be called before free_function (here and in // Note: DoNotifyFree should be called before free_function (here and in
// other places). That is because observers need to handle the allocation // other places). That is because observers need to handle the allocation
// being freed before calling free_function, as once the latter is executed // being freed before calling free_function, as once the latter is executed
// the address becomes available and can be allocated by another thread. // the address becomes available and can be allocated by another thread.
// That would be racy otherwise. // That would be racy otherwise.
DoNotifyFreeForShim(address); DoNotifyFreeForShim(address);
MUSTTAIL return allocator_dispatch_.next->free_function(address, context); self->next->free_function(self->next, address, context);
} }
static unsigned BatchMallocFn(size_t size, static unsigned BatchMallocFn(const AllocatorDispatch* self,
size_t size,
void** results, void** results,
unsigned num_requested, unsigned num_requested,
void* context) { void* context) {
unsigned const num_allocated = unsigned const num_allocated = self->next->batch_malloc_function(
allocator_dispatch_.next->batch_malloc_function(size, results, self->next, size, results, num_requested, context);
num_requested, context);
for (unsigned i = 0; i < num_allocated; ++i) { for (unsigned i = 0; i < num_allocated; ++i) {
DoNotifyAllocationForShim(results[i], size); DoNotifyAllocationForShim(results[i], size);
} }
return num_allocated; return num_allocated;
} }
static void BatchFreeFn(void** to_be_freed, static void BatchFreeFn(const AllocatorDispatch* self,
void** to_be_freed,
unsigned num_to_be_freed, unsigned num_to_be_freed,
void* context) { void* context) {
for (unsigned i = 0; i < num_to_be_freed; ++i) { for (unsigned i = 0; i < num_to_be_freed; ++i) {
DoNotifyFreeForShim(to_be_freed[i]); DoNotifyFreeForShim(to_be_freed[i]);
} }
MUSTTAIL return allocator_dispatch_.next->batch_free_function( self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
to_be_freed, num_to_be_freed, context); context);
} }
static void FreeDefiniteSizeFn(void* address, size_t size, void* context) { static void FreeDefiniteSizeFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
DoNotifyFreeForShim(address); DoNotifyFreeForShim(address);
MUSTTAIL return allocator_dispatch_.next->free_definite_size_function( self->next->free_definite_size_function(self->next, address, size, context);
address, size, context);
} }
static void TryFreeDefaultFn(void* address, void* context) { static void TryFreeDefaultFn(const AllocatorDispatch* self,
void* address,
void* context) {
DoNotifyFreeForShim(address); DoNotifyFreeForShim(address);
MUSTTAIL return allocator_dispatch_.next->try_free_default_function( self->next->try_free_default_function(self->next, address, context);
address, context);
} }
static void* AlignedMallocFn(size_t size, size_t alignment, void* context) { static void* AlignedMallocFn(const AllocatorDispatch* self,
void* const address = allocator_dispatch_.next->aligned_malloc_function( size_t size,
size, alignment, context); size_t alignment,
void* context) {
void* const address = self->next->aligned_malloc_function(
self->next, size, alignment, context);
DoNotifyAllocationForShim(address, size); DoNotifyAllocationForShim(address, size);
return address; return address;
} }
static void* AlignedMallocUncheckedFn(size_t size, static void* AlignedReallocFn(const AllocatorDispatch* self,
size_t alignment, void* address,
void* context) {
void* const address =
allocator_dispatch_.next->aligned_malloc_unchecked_function(
size, alignment, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AlignedReallocFn(void* address,
size_t size, size_t size,
size_t alignment, size_t alignment,
void* context) { void* context) {
// Note: size == 0 actually performs free. // Note: size == 0 actually performs free.
DoNotifyFreeForShim(address); DoNotifyFreeForShim(address);
address = allocator_dispatch_.next->aligned_realloc_function( address = self->next->aligned_realloc_function(self->next, address, size,
address, size, alignment, context); alignment, context);
DoNotifyAllocationForShim(address, size); DoNotifyAllocationForShim(address, size);
return address; return address;
} }
static void* AlignedReallocUncheckedFn(void* address, static void AlignedFreeFn(const AllocatorDispatch* self,
size_t size, void* address,
size_t alignment, void* context) {
void* context) {
// Note: size == 0 actually performs free.
DoNotifyFreeForShim(address); DoNotifyFreeForShim(address);
address = allocator_dispatch_.next->aligned_realloc_unchecked_function( self->next->aligned_free_function(self->next, address, context);
address, size, alignment, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void AlignedFreeFn(void* address, void* context) {
DoNotifyFreeForShim(address);
MUSTTAIL return allocator_dispatch_.next->aligned_free_function(address,
context);
} }
ALWAYS_INLINE static void DoNotifyAllocationForShim(void* address, ALWAYS_INLINE static void DoNotifyAllocationForShim(void* address,
@ -324,26 +308,23 @@ std::tuple<ObserverTypes*...> DispatcherImpl<ObserverTypes...>::s_observers;
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
template <typename... ObserverTypes> template <typename... ObserverTypes>
AllocatorDispatch DispatcherImpl<ObserverTypes...>::allocator_dispatch_ = { AllocatorDispatch DispatcherImpl<ObserverTypes...>::allocator_dispatch_ = {
AllocFn, // alloc_function AllocFn, // alloc_function
AllocUncheckedFn, // alloc_unchecked_function AllocUncheckedFn, // alloc_unchecked_function
AllocZeroInitializedFn, // alloc_zero_initialized_function AllocZeroInitializedFn, // alloc_zero_initialized_function
AllocAlignedFn, // alloc_aligned_function AllocAlignedFn, // alloc_aligned_function
ReallocFn, // realloc_function ReallocFn, // realloc_function
ReallocUncheckedFn, // realloc_unchecked_function FreeFn, // free_function
FreeFn, // free_function nullptr, // get_size_estimate_function
nullptr, // get_size_estimate_function nullptr, // good_size_function
nullptr, // good_size_function nullptr, // claimed_address_function
nullptr, // claimed_address_function BatchMallocFn, // batch_malloc_function
BatchMallocFn, // batch_malloc_function BatchFreeFn, // batch_free_function
BatchFreeFn, // batch_free_function FreeDefiniteSizeFn, // free_definite_size_function
FreeDefiniteSizeFn, // free_definite_size_function TryFreeDefaultFn, // try_free_default_function
TryFreeDefaultFn, // try_free_default_function AlignedMallocFn, // aligned_malloc_function
AlignedMallocFn, // aligned_malloc_function AlignedReallocFn, // aligned_realloc_function
AlignedMallocUncheckedFn, // aligned_malloc_unchecked_function AlignedFreeFn, // aligned_free_function
AlignedReallocFn, // aligned_realloc_function nullptr // next
AlignedReallocUncheckedFn, // aligned_realloc_unchecked_function
AlignedFreeFn, // aligned_free_function
nullptr // next
}; };
#endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM) #endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)

View file

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_MEMORY_TAGGING_H_ #ifndef BASE_ALLOCATOR_DISPATCHER_TAGGING_H_
#define BASE_ALLOCATOR_DISPATCHER_MEMORY_TAGGING_H_ #define BASE_ALLOCATOR_DISPATCHER_TAGGING_H_
#include "partition_alloc/tagging.h" #include "partition_alloc/tagging.h"
@ -39,4 +39,4 @@ constexpr MTEMode ConvertToMTEMode(
} // namespace base::allocator::dispatcher } // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_MEMORY_TAGGING_H_ #endif // BASE_ALLOCATOR_DISPATCHER_TAGGING_H_

View file

@ -10,7 +10,7 @@
#include "base/allocator/dispatcher/memory_tagging.h" #include "base/allocator/dispatcher/memory_tagging.h"
#include "base/allocator/dispatcher/subsystem.h" #include "base/allocator/dispatcher/subsystem.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "partition_alloc/buildflags.h" #include "partition_alloc/partition_alloc_buildflags.h"
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {

View file

@ -33,9 +33,8 @@ struct BASE_EXPORT ReentryGuard {
} }
ALWAYS_INLINE ~ReentryGuard() { ALWAYS_INLINE ~ReentryGuard() {
if (allowed_) [[likely]] { if (LIKELY(allowed_))
pthread_setspecific(entered_key_, nullptr); pthread_setspecific(entered_key_, nullptr);
}
} }
explicit operator bool() const noexcept { return allowed_; } explicit operator bool() const noexcept { return allowed_; }

View file

@ -24,4 +24,4 @@ enum class AllocationSubsystem {
}; };
} // namespace base::allocator::dispatcher } // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_ #endif // BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_

View file

@ -24,4 +24,4 @@ struct DispatcherTest : public ::testing::Test {
} // namespace base::allocator::dispatcher::testing } // namespace base::allocator::dispatcher::testing
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_ #endif // BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_

View file

@ -30,4 +30,4 @@ struct ObserverMock {
} // namespace testing } // namespace testing
} // namespace base::allocator::dispatcher } // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_ #endif // BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_

View file

@ -8,14 +8,14 @@
#if USE_LOCAL_TLS_EMULATION() #if USE_LOCAL_TLS_EMULATION()
#include <sys/mman.h>
#include "base/check.h" #include "base/check.h"
#include "base/dcheck_is_on.h" #include "base/dcheck_is_on.h"
#include "base/debug/crash_logging.h" #include "base/debug/crash_logging.h"
#include "base/immediate_crash.h" #include "base/immediate_crash.h"
#include "build/build_config.h" #include "build/build_config.h"
#include <sys/mman.h>
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
#include <sys/prctl.h> #include <sys/prctl.h>
#endif #endif
@ -96,7 +96,7 @@ PThreadTLSSystem& PThreadTLSSystem::operator=(PThreadTLSSystem&& other) {
bool PThreadTLSSystem::Setup( bool PThreadTLSSystem::Setup(
OnThreadTerminationFunction thread_termination_function, OnThreadTerminationFunction thread_termination_function,
std::string_view instance_id) { const std::string_view instance_id) {
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
// Initialize must happen outside of the allocation path. Therefore, it is // Initialize must happen outside of the allocation path. Therefore, it is
// secure to verify with DCHECK. // secure to verify with DCHECK.

View file

@ -17,21 +17,17 @@
#endif #endif
#if USE_LOCAL_TLS_EMULATION() #if USE_LOCAL_TLS_EMULATION()
#include <pthread.h>
#include <algorithm> #include <algorithm>
#include <atomic> #include <atomic>
#include <functional>
#include <memory> #include <memory>
#include <mutex> #include <mutex>
#include "base/base_export.h" #include "base/base_export.h"
#include "base/check.h" #include "base/check.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "partition_alloc/partition_alloc_constants.h"
#if PA_BUILDFLAG(USE_PARTITION_ALLOC) #include <pthread.h>
#include "partition_alloc/partition_alloc_constants.h" // nogncheck
#endif
#if HAS_FEATURE(thread_sanitizer) #if HAS_FEATURE(thread_sanitizer)
#define DISABLE_TSAN_INSTRUMENTATION __attribute__((no_sanitize("thread"))) #define DISABLE_TSAN_INSTRUMENTATION __attribute__((no_sanitize("thread")))
@ -115,7 +111,7 @@ class BASE_EXPORT PThreadTLSSystem {
// @param thread_termination_function An optional function which will be // @param thread_termination_function An optional function which will be
// invoked upon termination of a thread. // invoked upon termination of a thread.
bool Setup(OnThreadTerminationFunction thread_termination_function, bool Setup(OnThreadTerminationFunction thread_termination_function,
std::string_view instance_id); const std::string_view instance_id);
// Tear down the TLS system. After completing tear down, the thread // Tear down the TLS system. After completing tear down, the thread
// termination function passed to Setup will not be invoked anymore. // termination function passed to Setup will not be invoked anymore.
bool TearDownForTesting(); bool TearDownForTesting();
@ -203,7 +199,7 @@ template <typename PayloadType,
size_t AllocationChunkSize, size_t AllocationChunkSize,
bool IsDestructibleForTesting> bool IsDestructibleForTesting>
struct ThreadLocalStorage { struct ThreadLocalStorage {
explicit ThreadLocalStorage(std::string_view instance_id) explicit ThreadLocalStorage(const std::string_view instance_id)
: root_(AllocateAndInitializeChunk()) { : root_(AllocateAndInitializeChunk()) {
Initialize(instance_id); Initialize(instance_id);
} }
@ -211,7 +207,7 @@ struct ThreadLocalStorage {
// Create a new instance of |ThreadLocalStorage| using the passed allocator // Create a new instance of |ThreadLocalStorage| using the passed allocator
// and TLS system. This initializes the underlying TLS system and creates the // and TLS system. This initializes the underlying TLS system and creates the
// first chunk of data. // first chunk of data.
ThreadLocalStorage(std::string_view instance_id, ThreadLocalStorage(const std::string_view instance_id,
AllocatorType allocator, AllocatorType allocator,
TLSSystemType tls_system) TLSSystemType tls_system)
: allocator_(std::move(allocator)), : allocator_(std::move(allocator)),
@ -248,7 +244,7 @@ struct ThreadLocalStorage {
auto* slot = static_cast<SingleSlot*>(tls_system.GetThreadSpecificData()); auto* slot = static_cast<SingleSlot*>(tls_system.GetThreadSpecificData());
if (slot == nullptr) [[unlikely]] { if (UNLIKELY(slot == nullptr)) {
slot = FindAndAllocateFreeSlot(root_.load(std::memory_order_relaxed)); slot = FindAndAllocateFreeSlot(root_.load(std::memory_order_relaxed));
// We might be called in the course of handling a memory allocation. We do // We might be called in the course of handling a memory allocation. We do
@ -364,7 +360,7 @@ struct ThreadLocalStorage {
} }
// Perform common initialization during construction of an instance. // Perform common initialization during construction of an instance.
void Initialize(std::string_view instance_id) { void Initialize(const std::string_view instance_id) {
// The constructor must be called outside of the allocation path. Therefore, // The constructor must be called outside of the allocation path. Therefore,
// it is secure to verify with CHECK. // it is secure to verify with CHECK.

View file

@ -7,7 +7,7 @@
#include <mach/mach.h> #include <mach/mach.h>
#include <malloc/malloc.h> #include <malloc/malloc.h>
#include "partition_alloc/buildflags.h" #include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/shim/early_zone_registration_constants.h" #include "partition_alloc/shim/early_zone_registration_constants.h"
// BASE_EXPORT tends to be defined as soon as anything from //base is included. // BASE_EXPORT tends to be defined as soon as anything from //base is included.

View file

@ -8,7 +8,22 @@
#include "base/strings/strcat.h" #include "base/strings/strcat.h"
#include "base/system/sys_info.h" #include "base/system/sys_info.h"
namespace base::miracle_parameter { namespace base {
namespace miracle_parameter {
namespace {
std::string GetFieldTrialParamByFeatureAsString(
const base::Feature& feature,
const std::string& param_name,
const std::string& default_value) {
const std::string value =
base::GetFieldTrialParamValueByFeature(feature, param_name);
return value.empty() ? default_value : value;
}
} // namespace
std::string GetParamNameWithSuffix(const std::string& param_name) { std::string GetParamNameWithSuffix(const std::string& param_name) {
// `base::SysInfo::AmountOfPhysicalMemoryMB()` refers to CommandLine // `base::SysInfo::AmountOfPhysicalMemoryMB()` refers to CommandLine
@ -73,4 +88,6 @@ base::TimeDelta GetMiracleParameterAsTimeDelta(const base::Feature& feature,
default_value)); default_value));
} }
} // namespace base::miracle_parameter } // namespace miracle_parameter
} // namespace base

View file

@ -166,7 +166,7 @@ Enum GetMiracleParameterAsEnum(
default_value, type, options) \ default_value, type, options) \
type function_name() { \ type function_name() { \
static const type value = miracle_parameter::GetMiracleParameterAsEnum( \ static const type value = miracle_parameter::GetMiracleParameterAsEnum( \
feature, param_name, default_value, base::span(options)); \ feature, param_name, default_value, base::make_span(options)); \
return value; \ return value; \
} }

View file

@ -12,29 +12,15 @@
#include "base/time/time.h" #include "base/time/time.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "build/chromecast_buildflags.h" #include "build/chromecast_buildflags.h"
#include "partition_alloc/buildflags.h" #include "build/chromeos_buildflags.h"
#include "partition_alloc/partition_alloc_base/time/time.h" #include "partition_alloc/partition_alloc_base/time/time.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_root.h" #include "partition_alloc/partition_root.h"
#include "partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h" #include "partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h"
#include "partition_alloc/thread_cache.h" #include "partition_alloc/thread_cache.h"
namespace base::features { namespace base {
namespace features {
namespace {
static constexpr char kPAFeatureEnabledProcessesStr[] = "enabled-processes";
static constexpr char kBrowserOnlyStr[] = "browser-only";
static constexpr char kBrowserAndRendererStr[] = "browser-and-renderer";
static constexpr char kNonRendererStr[] = "non-renderer";
static constexpr char kAllProcessesStr[] = "all-processes";
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
static constexpr char kRendererOnlyStr[] = "renderer-only";
static constexpr char kAllChildProcessesStr[] = "all-child-processes";
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
} // namespace
BASE_FEATURE(kPartitionAllocUnretainedDanglingPtr, BASE_FEATURE(kPartitionAllocUnretainedDanglingPtr,
"PartitionAllocUnretainedDanglingPtr", "PartitionAllocUnretainedDanglingPtr",
@ -46,8 +32,7 @@ constexpr FeatureParam<UnretainedDanglingPtrMode>::Option
{UnretainedDanglingPtrMode::kDumpWithoutCrashing, {UnretainedDanglingPtrMode::kDumpWithoutCrashing,
"dump_without_crashing"}, "dump_without_crashing"},
}; };
// Note: Do not use the prepared macro as of no need for a local cache. const base::FeatureParam<UnretainedDanglingPtrMode>
constinit const FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam = { kUnretainedDanglingPtrModeParam = {
&kPartitionAllocUnretainedDanglingPtr, &kPartitionAllocUnretainedDanglingPtr,
"mode", "mode",
@ -55,10 +40,6 @@ constinit const FeatureParam<UnretainedDanglingPtrMode>
&kUnretainedDanglingPtrModeOption, &kUnretainedDanglingPtrModeOption,
}; };
// Note: DPD conflicts with no-op `free()` (see
// `base::allocator::MakeFreeNoOp()`). No-op `free()` stands down in the
// presence of DPD, but hypothetically fully launching DPD should prompt
// a rethink of no-op `free()`.
BASE_FEATURE(kPartitionAllocDanglingPtr, BASE_FEATURE(kPartitionAllocDanglingPtr,
"PartitionAllocDanglingPtr", "PartitionAllocDanglingPtr",
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG) #if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG)
@ -72,8 +53,7 @@ constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
{DanglingPtrMode::kCrash, "crash"}, {DanglingPtrMode::kCrash, "crash"},
{DanglingPtrMode::kLogOnly, "log_only"}, {DanglingPtrMode::kLogOnly, "log_only"},
}; };
// Note: Do not use the prepared macro as of no need for a local cache. const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
constinit const FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
&kPartitionAllocDanglingPtr, &kPartitionAllocDanglingPtr,
"mode", "mode",
DanglingPtrMode::kCrash, DanglingPtrMode::kCrash,
@ -83,91 +63,73 @@ constexpr FeatureParam<DanglingPtrType>::Option kDanglingPtrTypeOption[] = {
{DanglingPtrType::kAll, "all"}, {DanglingPtrType::kAll, "all"},
{DanglingPtrType::kCrossTask, "cross_task"}, {DanglingPtrType::kCrossTask, "cross_task"},
}; };
// Note: Do not use the prepared macro as of no need for a local cache. const base::FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
constinit const FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
&kPartitionAllocDanglingPtr, &kPartitionAllocDanglingPtr,
"type", "type",
DanglingPtrType::kAll, DanglingPtrType::kAll,
&kDanglingPtrTypeOption, &kDanglingPtrTypeOption,
}; };
#if PA_BUILDFLAG(USE_STARSCAN)
// If enabled, PCScan is turned on by default for all partitions that don't
// disable it explicitly.
BASE_FEATURE(kPartitionAllocPCScan,
"PartitionAllocPCScan",
FEATURE_DISABLED_BY_DEFAULT);
#endif // PA_BUILDFLAG(USE_STARSCAN)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If enabled, PCScan is turned on only for the browser's malloc partition.
BASE_FEATURE(kPartitionAllocPCScanBrowserOnly,
"PartitionAllocPCScanBrowserOnly",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, PCScan is turned on only for the renderer's malloc partition.
BASE_FEATURE(kPartitionAllocPCScanRendererOnly,
"PartitionAllocPCScanRendererOnly",
FEATURE_DISABLED_BY_DEFAULT);
// Use a larger maximum thread cache cacheable bucket size. // Use a larger maximum thread cache cacheable bucket size.
BASE_FEATURE(kPartitionAllocLargeThreadCacheSize, BASE_FEATURE(kPartitionAllocLargeThreadCacheSize,
"PartitionAllocLargeThreadCacheSize", "PartitionAllocLargeThreadCacheSize",
FEATURE_ENABLED_BY_DEFAULT); FEATURE_ENABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_INT(GetPartitionAllocLargeThreadCacheSizeValue, MIRACLE_PARAMETER_FOR_INT(
kPartitionAllocLargeThreadCacheSize, GetPartitionAllocLargeThreadCacheSizeValue,
"PartitionAllocLargeThreadCacheSizeValue", kPartitionAllocLargeThreadCacheSize,
::partition_alloc::kThreadCacheLargeSizeThreshold) "PartitionAllocLargeThreadCacheSizeValue",
::partition_alloc::ThreadCacheLimits::kLargeSizeThreshold)
MIRACLE_PARAMETER_FOR_INT( MIRACLE_PARAMETER_FOR_INT(
GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid, GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid,
kPartitionAllocLargeThreadCacheSize, kPartitionAllocLargeThreadCacheSize,
"PartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid", "PartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid",
::partition_alloc::kThreadCacheDefaultSizeThreshold) ::partition_alloc::ThreadCacheLimits::kDefaultSizeThreshold)
BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing, BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
"PartitionAllocLargeEmptySlotSpanRing", "PartitionAllocLargeEmptySlotSpanRing",
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
FEATURE_ENABLED_BY_DEFAULT);
#else
FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
#endif
BASE_FEATURE(kPartitionAllocWithAdvancedChecks,
"PartitionAllocWithAdvancedChecks",
FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>::Option
kPartitionAllocWithAdvancedChecksEnabledProcessesOptions[] = {
{PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserOnly,
kBrowserOnlyStr},
{PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserAndRenderer,
kBrowserAndRendererStr},
{PartitionAllocWithAdvancedChecksEnabledProcesses::kNonRenderer,
kNonRendererStr},
{PartitionAllocWithAdvancedChecksEnabledProcesses::kAllProcesses,
kAllProcessesStr}};
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>
kPartitionAllocWithAdvancedChecksEnabledProcessesParam{
&kPartitionAllocWithAdvancedChecks, kPAFeatureEnabledProcessesStr,
PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserOnly,
&kPartitionAllocWithAdvancedChecksEnabledProcessesOptions};
BASE_FEATURE(kPartitionAllocSchedulerLoopQuarantine, BASE_FEATURE(kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantine", "PartitionAllocSchedulerLoopQuarantine",
FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
// Scheduler Loop Quarantine's per-branch capacity in bytes. // Scheduler Loop Quarantine's per-branch capacity in bytes.
// Note: Do not use the prepared macro as of no need for a local cache. const base::FeatureParam<int>
constinit const FeatureParam<int>
kPartitionAllocSchedulerLoopQuarantineBranchCapacity{ kPartitionAllocSchedulerLoopQuarantineBranchCapacity{
&kPartitionAllocSchedulerLoopQuarantine, &kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantineBranchCapacity", 0}; "PartitionAllocSchedulerLoopQuarantineBranchCapacity", 0};
// Scheduler Loop Quarantine's capacity for the UI thread in bytes.
BASE_FEATURE_PARAM(int,
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity,
&kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantineBrowserUICapacity",
0);
BASE_FEATURE(kPartitionAllocZappingByFreeFlags, BASE_FEATURE(kPartitionAllocZappingByFreeFlags,
"PartitionAllocZappingByFreeFlags", "PartitionAllocZappingByFreeFlags",
FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kPartitionAllocEventuallyZeroFreedMemory,
"PartitionAllocEventuallyZeroFreedMemory",
FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kPartitionAllocFewerMemoryRegions,
"PartitionAllocFewerMemoryRegions",
FEATURE_DISABLED_BY_DEFAULT);
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_FEATURE(kPartitionAllocBackupRefPtr, BASE_FEATURE(kPartitionAllocBackupRefPtr,
"PartitionAllocBackupRefPtr", "PartitionAllocBackupRefPtr",
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG) #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || \
BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS) || \
(BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_CASTOS)) || \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG)
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#else #else
FEATURE_DISABLED_BY_DEFAULT FEATURE_DISABLED_BY_DEFAULT
@ -176,41 +138,30 @@ BASE_FEATURE(kPartitionAllocBackupRefPtr,
constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
kBackupRefPtrEnabledProcessesOptions[] = { kBackupRefPtrEnabledProcessesOptions[] = {
{BackupRefPtrEnabledProcesses::kBrowserOnly, kBrowserOnlyStr}, {BackupRefPtrEnabledProcesses::kBrowserOnly, "browser-only"},
{BackupRefPtrEnabledProcesses::kBrowserAndRenderer, {BackupRefPtrEnabledProcesses::kBrowserAndRenderer,
kBrowserAndRendererStr}, "browser-and-renderer"},
{BackupRefPtrEnabledProcesses::kNonRenderer, kNonRendererStr}, {BackupRefPtrEnabledProcesses::kNonRenderer, "non-renderer"},
{BackupRefPtrEnabledProcesses::kAllProcesses, kAllProcessesStr}}; {BackupRefPtrEnabledProcesses::kAllProcesses, "all-processes"}};
BASE_FEATURE_ENUM_PARAM(BackupRefPtrEnabledProcesses, const base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam, kBackupRefPtrEnabledProcessesParam{
&kPartitionAllocBackupRefPtr, &kPartitionAllocBackupRefPtr, "enabled-processes",
kPAFeatureEnabledProcessesStr, BackupRefPtrEnabledProcesses::kNonRenderer,
#if PA_BUILDFLAG(IS_MAC) && PA_BUILDFLAG(PA_ARCH_CPU_ARM64) &kBackupRefPtrEnabledProcessesOptions};
BackupRefPtrEnabledProcesses::kNonRenderer,
#else
BackupRefPtrEnabledProcesses::kAllProcesses,
#endif
&kBackupRefPtrEnabledProcessesOptions);
constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = { constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
{BackupRefPtrMode::kDisabled, "disabled"}, {BackupRefPtrMode::kDisabled, "disabled"},
{BackupRefPtrMode::kEnabled, "enabled"}, {BackupRefPtrMode::kEnabled, "enabled"},
}; };
BASE_FEATURE_ENUM_PARAM(BackupRefPtrMode, const base::FeatureParam<BackupRefPtrMode> kBackupRefPtrModeParam{
kBackupRefPtrModeParam, &kPartitionAllocBackupRefPtr, "brp-mode", BackupRefPtrMode::kEnabled,
&kPartitionAllocBackupRefPtr, &kBackupRefPtrModeOptions};
"brp-mode",
BackupRefPtrMode::kEnabled,
&kBackupRefPtrModeOptions);
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<int> kBackupRefPtrExtraExtrasSizeParam{
&kPartitionAllocBackupRefPtr, "brp-extra-extras-size", 0};
BASE_FEATURE(kPartitionAllocMemoryTagging, BASE_FEATURE(kPartitionAllocMemoryTagging,
"PartitionAllocMemoryTagging", "PartitionAllocMemoryTagging",
#if PA_BUILDFLAG(USE_FULL_MTE) || BUILDFLAG(IS_ANDROID) #if PA_BUILDFLAG(USE_FULL_MTE)
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#else #else
FEATURE_DISABLED_BY_DEFAULT FEATURE_DISABLED_BY_DEFAULT
@ -221,8 +172,7 @@ constexpr FeatureParam<MemtagMode>::Option kMemtagModeOptions[] = {
{MemtagMode::kSync, "sync"}, {MemtagMode::kSync, "sync"},
{MemtagMode::kAsync, "async"}}; {MemtagMode::kAsync, "async"}};
// Note: Do not use the prepared macro as of no need for a local cache. const base::FeatureParam<MemtagMode> kMemtagModeParam{
constinit const FeatureParam<MemtagMode> kMemtagModeParam{
&kPartitionAllocMemoryTagging, "memtag-mode", &kPartitionAllocMemoryTagging, "memtag-mode",
#if PA_BUILDFLAG(USE_FULL_MTE) #if PA_BUILDFLAG(USE_FULL_MTE)
MemtagMode::kSync, MemtagMode::kSync,
@ -231,30 +181,19 @@ constinit const FeatureParam<MemtagMode> kMemtagModeParam{
#endif #endif
&kMemtagModeOptions}; &kMemtagModeOptions};
constexpr FeatureParam<RetagMode>::Option kRetagModeOptions[] = {
{RetagMode::kIncrement, "increment"},
{RetagMode::kRandom, "random"},
};
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<RetagMode> kRetagModeParam{
&kPartitionAllocMemoryTagging, "retag-mode", RetagMode::kIncrement,
&kRetagModeOptions};
constexpr FeatureParam<MemoryTaggingEnabledProcesses>::Option constexpr FeatureParam<MemoryTaggingEnabledProcesses>::Option
kMemoryTaggingEnabledProcessesOptions[] = { kMemoryTaggingEnabledProcessesOptions[] = {
{MemoryTaggingEnabledProcesses::kBrowserOnly, kBrowserOnlyStr}, {MemoryTaggingEnabledProcesses::kBrowserOnly, "browser-only"},
{MemoryTaggingEnabledProcesses::kNonRenderer, kNonRendererStr}, {MemoryTaggingEnabledProcesses::kNonRenderer, "non-renderer"},
{MemoryTaggingEnabledProcesses::kAllProcesses, kAllProcessesStr}}; {MemoryTaggingEnabledProcesses::kAllProcesses, "all-processes"}};
// Note: Do not use the prepared macro as of no need for a local cache. const base::FeatureParam<MemoryTaggingEnabledProcesses>
constinit const FeatureParam<MemoryTaggingEnabledProcesses>
kMemoryTaggingEnabledProcessesParam{ kMemoryTaggingEnabledProcessesParam{
&kPartitionAllocMemoryTagging, kPAFeatureEnabledProcessesStr, &kPartitionAllocMemoryTagging, "enabled-processes",
#if PA_BUILDFLAG(USE_FULL_MTE) #if PA_BUILDFLAG(USE_FULL_MTE)
MemoryTaggingEnabledProcesses::kAllProcesses, MemoryTaggingEnabledProcesses::kAllProcesses,
#else #else
MemoryTaggingEnabledProcesses::kNonRenderer, MemoryTaggingEnabledProcesses::kBrowserOnly,
#endif #endif
&kMemoryTaggingEnabledProcessesOptions}; &kMemoryTaggingEnabledProcessesOptions};
@ -273,15 +212,13 @@ BASE_FEATURE(kPartitionAllocPermissiveMte,
#endif #endif
); );
BASE_FEATURE(kAsanBrpDereferenceCheck, const base::FeatureParam<bool> kBackupRefPtrAsanEnableDereferenceCheckParam{
"AsanBrpDereferenceCheck", &kPartitionAllocBackupRefPtr, "asan-enable-dereference-check", true};
FEATURE_ENABLED_BY_DEFAULT); const base::FeatureParam<bool> kBackupRefPtrAsanEnableExtractionCheckParam{
BASE_FEATURE(kAsanBrpExtractionCheck, &kPartitionAllocBackupRefPtr, "asan-enable-extraction-check",
"AsanBrpExtractionCheck", // Not much noise at the moment to false}; // Not much noise at the moment to enable by default.
FEATURE_DISABLED_BY_DEFAULT); // enable by default. const base::FeatureParam<bool> kBackupRefPtrAsanEnableInstantiationCheckParam{
BASE_FEATURE(kAsanBrpInstantiationCheck, &kPartitionAllocBackupRefPtr, "asan-enable-instantiation-check", true};
"AsanBrpInstantiationCheck",
FEATURE_ENABLED_BY_DEFAULT);
// If enabled, switches the bucket distribution to a denser one. // If enabled, switches the bucket distribution to a denser one.
// //
@ -295,31 +232,29 @@ BASE_FEATURE(kPartitionAllocUseDenserDistribution,
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS) #endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
); );
const FeatureParam<BucketDistributionMode>::Option const base::FeatureParam<BucketDistributionMode>::Option
kPartitionAllocBucketDistributionOption[] = { kPartitionAllocBucketDistributionOption[] = {
{BucketDistributionMode::kDefault, "default"}, {BucketDistributionMode::kDefault, "default"},
{BucketDistributionMode::kDenser, "denser"}, {BucketDistributionMode::kDenser, "denser"},
}; };
// Note: Do not use the prepared macro as of no need for a local cache. const base::FeatureParam<BucketDistributionMode>
constinit const FeatureParam<BucketDistributionMode> kPartitionAllocBucketDistributionParam {
kPartitionAllocBucketDistributionParam{ &kPartitionAllocUseDenserDistribution, "mode",
&kPartitionAllocUseDenserDistribution, "mode",
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS) #if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
BucketDistributionMode::kDefault, BucketDistributionMode::kDefault,
#else #else
BucketDistributionMode::kDenser, BucketDistributionMode::kDenser,
#endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS) #endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
&kPartitionAllocBucketDistributionOption}; &kPartitionAllocBucketDistributionOption
};
BASE_FEATURE(kPartitionAllocMemoryReclaimer, BASE_FEATURE(kPartitionAllocMemoryReclaimer,
"PartitionAllocMemoryReclaimer", "PartitionAllocMemoryReclaimer",
FEATURE_ENABLED_BY_DEFAULT); FEATURE_ENABLED_BY_DEFAULT);
BASE_FEATURE_PARAM(TimeDelta, const base::FeatureParam<TimeDelta> kPartitionAllocMemoryReclaimerInterval = {
kPartitionAllocMemoryReclaimerInterval, &kPartitionAllocMemoryReclaimer, "interval",
&kPartitionAllocMemoryReclaimer, TimeDelta(), // Defaults to zero.
"interval", };
TimeDelta() // Defaults to zero.
);
// Configures whether we set a lower limit for renderers that do not have a main // Configures whether we set a lower limit for renderers that do not have a main
// frame, similar to the limit that is already done for backgrounded renderers. // frame, similar to the limit that is already done for backgrounded renderers.
@ -327,22 +262,52 @@ BASE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers,
"LowerPAMemoryLimitForNonMainRenderers", "LowerPAMemoryLimitForNonMainRenderers",
FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
// If enabled, switches PCScan scheduling to a mutator-aware scheduler. Does not
// affect whether PCScan is enabled itself.
BASE_FEATURE(kPartitionAllocPCScanMUAwareScheduler,
"PartitionAllocPCScanMUAwareScheduler",
FEATURE_ENABLED_BY_DEFAULT);
// If enabled, PCScan frees unconditionally all quarantined objects.
// This is a performance testing feature.
BASE_FEATURE(kPartitionAllocPCScanImmediateFreeing,
"PartitionAllocPCScanImmediateFreeing",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, PCScan clears eagerly (synchronously) on free().
BASE_FEATURE(kPartitionAllocPCScanEagerClearing,
"PartitionAllocPCScanEagerClearing",
FEATURE_DISABLED_BY_DEFAULT);
// In addition to heap, scan also the stack of the current mutator.
BASE_FEATURE(kPartitionAllocPCScanStackScanning,
"PartitionAllocPCScanStackScanning",
#if PA_BUILDFLAG(STACK_SCAN_SUPPORTED)
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
#endif // PA_BUILDFLAG(STACK_SCAN_SUPPORTED)
);
BASE_FEATURE(kPartitionAllocDCScan,
"PartitionAllocDCScan",
FEATURE_DISABLED_BY_DEFAULT);
// Whether to straighten free lists for larger slot spans in PurgeMemory() -> // Whether to straighten free lists for larger slot spans in PurgeMemory() ->
// ... -> PartitionPurgeSlotSpan(). // ... -> PartitionPurgeSlotSpan().
BASE_FEATURE(kPartitionAllocStraightenLargerSlotSpanFreeLists, BASE_FEATURE(kPartitionAllocStraightenLargerSlotSpanFreeLists,
"PartitionAllocStraightenLargerSlotSpanFreeLists", "PartitionAllocStraightenLargerSlotSpanFreeLists",
FEATURE_ENABLED_BY_DEFAULT); FEATURE_ENABLED_BY_DEFAULT);
const FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>:: const base::FeatureParam<
Option kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption[] = { partition_alloc::StraightenLargerSlotSpanFreeListsMode>::Option
kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption[] = {
{partition_alloc::StraightenLargerSlotSpanFreeListsMode:: {partition_alloc::StraightenLargerSlotSpanFreeListsMode::
kOnlyWhenUnprovisioning, kOnlyWhenUnprovisioning,
"only-when-unprovisioning"}, "only-when-unprovisioning"},
{partition_alloc::StraightenLargerSlotSpanFreeListsMode::kAlways, {partition_alloc::StraightenLargerSlotSpanFreeListsMode::kAlways,
"always"}, "always"},
}; };
// Note: Do not use the prepared macro as of no need for a local cache. const base::FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>
constinit const FeatureParam<
partition_alloc::StraightenLargerSlotSpanFreeListsMode>
kPartitionAllocStraightenLargerSlotSpanFreeListsMode = { kPartitionAllocStraightenLargerSlotSpanFreeListsMode = {
&kPartitionAllocStraightenLargerSlotSpanFreeLists, &kPartitionAllocStraightenLargerSlotSpanFreeLists,
"mode", "mode",
@ -375,11 +340,9 @@ BASE_FEATURE(kPageAllocatorRetryOnCommitFailure,
// The feature: kPartialLowEndModeOnMidRangeDevices is defined in // The feature: kPartialLowEndModeOnMidRangeDevices is defined in
// //base/features.cc. Since the following feature param is related to // //base/features.cc. Since the following feature param is related to
// PartitionAlloc, define the param here. // PartitionAlloc, define the param here.
BASE_FEATURE_PARAM(bool, const FeatureParam<bool> kPartialLowEndModeExcludePartitionAllocSupport{
kPartialLowEndModeExcludePartitionAllocSupport, &kPartialLowEndModeOnMidRangeDevices, "exclude-partition-alloc-support",
&kPartialLowEndModeOnMidRangeDevices, false};
"exclude-partition-alloc-support",
false);
#endif #endif
BASE_FEATURE(kEnableConfigurableThreadCacheMultiplier, BASE_FEATURE(kEnableConfigurableThreadCacheMultiplier,
@ -397,19 +360,19 @@ MIRACLE_PARAMETER_FOR_DOUBLE(GetThreadCacheMultiplierForAndroid,
1.) 1.)
constexpr partition_alloc::internal::base::TimeDelta ToPartitionAllocTimeDelta( constexpr partition_alloc::internal::base::TimeDelta ToPartitionAllocTimeDelta(
TimeDelta time_delta) { base::TimeDelta time_delta) {
return partition_alloc::internal::base::Microseconds( return partition_alloc::internal::base::Microseconds(
time_delta.InMicroseconds()); time_delta.InMicroseconds());
} }
constexpr TimeDelta FromPartitionAllocTimeDelta( constexpr base::TimeDelta FromPartitionAllocTimeDelta(
partition_alloc::internal::base::TimeDelta time_delta) { partition_alloc::internal::base::TimeDelta time_delta) {
return Microseconds(time_delta.InMicroseconds()); return base::Microseconds(time_delta.InMicroseconds());
} }
BASE_FEATURE(kEnableConfigurableThreadCachePurgeInterval, BASE_FEATURE(kEnableConfigurableThreadCachePurgeInterval,
"EnableConfigurableThreadCachePurgeInterval", "EnableConfigurableThreadCachePurgeInterval",
FEATURE_DISABLED_BY_DEFAULT); base::FEATURE_DISABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_TIME_DELTA( MIRACLE_PARAMETER_FOR_TIME_DELTA(
GetThreadCacheMinPurgeIntervalValue, GetThreadCacheMinPurgeIntervalValue,
@ -446,7 +409,7 @@ GetThreadCacheDefaultPurgeInterval() {
BASE_FEATURE(kEnableConfigurableThreadCacheMinCachedMemoryForPurging, BASE_FEATURE(kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
"EnableConfigurableThreadCacheMinCachedMemoryForPurging", "EnableConfigurableThreadCacheMinCachedMemoryForPurging",
FEATURE_DISABLED_BY_DEFAULT); base::FEATURE_DISABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_INT( MIRACLE_PARAMETER_FOR_INT(
GetThreadCacheMinCachedMemoryForPurgingBytes, GetThreadCacheMinCachedMemoryForPurgingBytes,
@ -466,35 +429,64 @@ BASE_FEATURE(kPartitionAllocDisableBRPInBufferPartition,
"PartitionAllocDisableBRPInBufferPartition", "PartitionAllocDisableBRPInBufferPartition",
FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground, #if PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
"PartitionAllocAdjustSizeWhenInForeground", BASE_FEATURE(kUsePoolOffsetFreelists,
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) "PartitionAllocUsePoolOffsetFreelists",
FEATURE_ENABLED_BY_DEFAULT); base::FEATURE_DISABLED_BY_DEFAULT);
#else
FEATURE_DISABLED_BY_DEFAULT);
#endif #endif
BASE_FEATURE(kPartitionAllocUseSmallSingleSlotSpans, BASE_FEATURE(kPartitionAllocMakeFreeNoOpOnShutdown,
"PartitionAllocUseSmallSingleSlotSpans", "PartitionAllocMakeFreeNoOpOnShutdown",
FEATURE_ENABLED_BY_DEFAULT);
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
BASE_FEATURE(kPartitionAllocShadowMetadata,
"PartitionAllocShadowMetadata",
FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<ShadowMetadataEnabledProcesses>::Option constexpr FeatureParam<WhenFreeBecomesNoOp>::Option
kShadowMetadataEnabledProcessesOptions[] = { kPartitionAllocMakeFreeNoOpOnShutdownOptions[] = {
{ShadowMetadataEnabledProcesses::kRendererOnly, kRendererOnlyStr}, {WhenFreeBecomesNoOp::kBeforePreShutdown, "before-preshutdown"},
{ShadowMetadataEnabledProcesses::kAllChildProcesses, {WhenFreeBecomesNoOp::kBeforeHaltingStartupTracingController,
kAllChildProcessesStr}}; "before-halting-startup-tracing-controller"},
{
WhenFreeBecomesNoOp::kBeforeShutDownThreads,
"before-shutdown-threads",
},
{
WhenFreeBecomesNoOp::kInShutDownThreads,
"in-shutdown-threads",
},
{
WhenFreeBecomesNoOp::kAfterShutDownThreads,
"after-shutdown-threads",
},
};
// Note: Do not use the prepared macro as of no need for a local cache. const base::FeatureParam<WhenFreeBecomesNoOp>
constinit const FeatureParam<ShadowMetadataEnabledProcesses> kPartitionAllocMakeFreeNoOpOnShutdownParam{
kShadowMetadataEnabledProcessesParam{ &kPartitionAllocMakeFreeNoOpOnShutdown, "callsite",
&kPartitionAllocShadowMetadata, kPAFeatureEnabledProcessesStr, WhenFreeBecomesNoOp::kBeforeShutDownThreads,
ShadowMetadataEnabledProcesses::kRendererOnly, &kPartitionAllocMakeFreeNoOpOnShutdownOptions};
&kShadowMetadataEnabledProcessesOptions};
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
} // namespace base::features void MakeFreeNoOp(WhenFreeBecomesNoOp callsite) {
CHECK(base::FeatureList::GetInstance());
// Ignoring `free()` during Shutdown would allow developers to introduce new
// dangling pointers. So we want to avoid ignoring free when it is enabled.
// Note: For now, the DanglingPointerDetector is only enabled on 5 bots, and
// on linux non-official configuration.
// TODO(b/40802063): Reconsider this decision after the experiment.
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
if (base::FeatureList::IsEnabled(features::kPartitionAllocDanglingPtr)) {
return;
}
#endif // PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
if (base::FeatureList::IsEnabled(kPartitionAllocMakeFreeNoOpOnShutdown) &&
kPartitionAllocMakeFreeNoOpOnShutdownParam.Get() == callsite) {
allocator_shim::InsertNoOpOnFreeAllocatorShimOnShutDown();
}
#endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
}
BASE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground,
"PartitionAllocAdjustSizeWhenInForeground",
base::FEATURE_DISABLED_BY_DEFAULT);
} // namespace features
} // namespace base

View file

@ -9,40 +9,23 @@
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/feature_list.h" #include "base/feature_list.h"
#include "base/metrics/field_trial_params.h" #include "base/metrics/field_trial_params.h"
#include "base/strings/string_piece.h"
#include "base/time/time.h" #include "base/time/time.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/time/time.h" #include "partition_alloc/partition_alloc_base/time/time.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_root.h" #include "partition_alloc/partition_root.h"
namespace base::features { namespace base {
namespace features {
namespace internal { extern const BASE_EXPORT Feature kPartitionAllocUnretainedDanglingPtr;
enum class PAFeatureEnabledProcesses {
// Enabled only in the browser process.
kBrowserOnly,
// Enabled only in the browser and renderer processes.
kBrowserAndRenderer,
// Enabled in all processes, except renderer.
kNonRenderer,
// Enabled only in renderer processes.
kRendererOnly,
// Enabled in all child processes, except zygote.
kAllChildProcesses,
// Enabled in all processes.
kAllProcesses,
};
} // namespace internal
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUnretainedDanglingPtr);
enum class UnretainedDanglingPtrMode { enum class UnretainedDanglingPtrMode {
kCrash, kCrash,
kDumpWithoutCrashing, kDumpWithoutCrashing,
}; };
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(UnretainedDanglingPtrMode, extern const BASE_EXPORT base::FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam); kUnretainedDanglingPtrModeParam;
// See /docs/dangling_ptr.md // See /docs/dangling_ptr.md
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr);
@ -61,7 +44,8 @@ enum class DanglingPtrMode {
// Note: This will be extended with a single shot DumpWithoutCrashing. // Note: This will be extended with a single shot DumpWithoutCrashing.
}; };
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(DanglingPtrMode, kDanglingPtrModeParam); extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode>
kDanglingPtrModeParam;
enum class DanglingPtrType { enum class DanglingPtrType {
// Act on any dangling raw_ptr released after being freed. // Act on any dangling raw_ptr released after being freed.
kAll, // (default) kAll, // (default)
@ -72,47 +56,39 @@ enum class DanglingPtrType {
// Note: This will be extended with LongLived // Note: This will be extended with LongLived
}; };
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(DanglingPtrType, kDanglingPtrTypeParam); extern const BASE_EXPORT base::FeatureParam<DanglingPtrType>
kDanglingPtrTypeParam;
using PartitionAllocWithAdvancedChecksEnabledProcesses =
internal::PAFeatureEnabledProcesses;
#if PA_BUILDFLAG(USE_STARSCAN)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScan);
#endif
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanBrowserOnly);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanRendererOnly);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeThreadCacheSize); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeThreadCacheSize);
BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValue(); BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValue();
BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid(); BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid();
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocWithAdvancedChecks);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
PartitionAllocWithAdvancedChecksEnabledProcesses,
kPartitionAllocWithAdvancedChecksEnabledProcessesParam);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSchedulerLoopQuarantine); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSchedulerLoopQuarantine);
// Scheduler Loop Quarantine's per-thread capacity in bytes. // Scheduler Loop Quarantine's per-thread capacity in bytes.
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM( extern const BASE_EXPORT base::FeatureParam<int>
int, kPartitionAllocSchedulerLoopQuarantineBranchCapacity;
kPartitionAllocSchedulerLoopQuarantineBranchCapacity);
// Scheduler Loop Quarantine's capacity for the UI thread in bytes.
// TODO(https://crbug.com/387470567): Support more thread types.
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
int,
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocZappingByFreeFlags); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocZappingByFreeFlags);
// Eventually zero out most PartitionAlloc memory. This is not meant as a
// security guarantee, but to increase the compression ratio of PartitionAlloc's
// fragmented super pages.
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocEventuallyZeroFreedMemory);
// Whether to make PartitionAlloc use fewer memory regions. This matters on
// Linux-based systems, where there is a per-process limit that we hit in some
// cases. See the comment in PartitionBucket::SlotSpanCOmmitedSize() for detail.
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocFewerMemoryRegions);
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
using BackupRefPtrEnabledProcesses = internal::PAFeatureEnabledProcesses; enum class BackupRefPtrEnabledProcesses {
// BRP enabled only in the browser process.
kBrowserOnly,
// BRP enabled only in the browser and renderer processes.
kBrowserAndRenderer,
// BRP enabled in all processes, except renderer.
kNonRenderer,
// BRP enabled in all processes.
kAllProcesses,
};
enum class BackupRefPtrMode { enum class BackupRefPtrMode {
// BRP is disabled across all partitions. Equivalent to the Finch flag being // BRP is disabled across all partitions. Equivalent to the Finch flag being
@ -131,54 +107,76 @@ enum class MemtagMode {
kAsync, kAsync,
}; };
enum class RetagMode { enum class MemoryTaggingEnabledProcesses {
// Allocations are retagged by incrementing the current tag. // Memory tagging enabled only in the browser process.
kIncrement, kBrowserOnly,
// Memory tagging enabled in all processes, except renderer.
// Allocations are retagged with a random tag. kNonRenderer,
kRandom, // Memory tagging enabled in all processes.
kAllProcesses,
}; };
using MemoryTaggingEnabledProcesses = internal::PAFeatureEnabledProcesses;
enum class BucketDistributionMode : uint8_t { enum class BucketDistributionMode : uint8_t {
kDefault, kDefault,
kDenser, kDenser,
}; };
// Parameter for 'kPartitionAllocMakeFreeNoOpOnShutdown' feature which
// controls when free() becomes a no-op during Shutdown()
enum class WhenFreeBecomesNoOp {
kBeforePreShutdown,
kBeforeHaltingStartupTracingController,
kBeforeShutDownThreads,
kInShutDownThreads,
kAfterShutDownThreads,
};
// Inserts a no-op on 'free()' allocator shim at the front of the
// dispatch chain if called from the appropriate callsite.
BASE_EXPORT void MakeFreeNoOp(WhenFreeBecomesNoOp callsite);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMakeFreeNoOpOnShutdown);
extern const BASE_EXPORT base::FeatureParam<WhenFreeBecomesNoOp>
kPartitionAllocMakeFreeNoOpOnShutdownParam;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtr); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtr);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BackupRefPtrEnabledProcesses, extern const BASE_EXPORT base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam); kBackupRefPtrEnabledProcessesParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BackupRefPtrMode, extern const BASE_EXPORT base::FeatureParam<BackupRefPtrMode>
kBackupRefPtrModeParam); kBackupRefPtrModeParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(int,
kBackupRefPtrExtraExtrasSizeParam);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryTagging); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryTagging);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(MemtagMode, kMemtagModeParam); extern const BASE_EXPORT base::FeatureParam<MemtagMode> kMemtagModeParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(RetagMode, kRetagModeParam); extern const BASE_EXPORT base::FeatureParam<MemoryTaggingEnabledProcesses>
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(MemoryTaggingEnabledProcesses, kMemoryTaggingEnabledProcessesParam;
kMemoryTaggingEnabledProcessesParam);
// Kill switch for memory tagging. Skips any code related to memory tagging when // Kill switch for memory tagging. Skips any code related to memory tagging when
// enabled. // enabled.
BASE_EXPORT BASE_DECLARE_FEATURE(kKillPartitionAllocMemoryTagging); BASE_EXPORT BASE_DECLARE_FEATURE(kKillPartitionAllocMemoryTagging);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte);
BASE_EXPORT BASE_DECLARE_FEATURE(kAsanBrpDereferenceCheck); extern const BASE_EXPORT base::FeatureParam<bool>
BASE_EXPORT BASE_DECLARE_FEATURE(kAsanBrpExtractionCheck); kBackupRefPtrAsanEnableDereferenceCheckParam;
BASE_EXPORT BASE_DECLARE_FEATURE(kAsanBrpInstantiationCheck); extern const BASE_EXPORT base::FeatureParam<bool>
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BucketDistributionMode, kBackupRefPtrAsanEnableExtractionCheckParam;
kPartitionAllocBucketDistributionParam); extern const BASE_EXPORT base::FeatureParam<bool>
kBackupRefPtrAsanEnableInstantiationCheckParam;
extern const BASE_EXPORT base::FeatureParam<BucketDistributionMode>
kPartitionAllocBucketDistributionParam;
BASE_EXPORT BASE_DECLARE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers); BASE_EXPORT BASE_DECLARE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanMUAwareScheduler);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanStackScanning);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDCScan);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanImmediateFreeing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanEagerClearing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseDenserDistribution); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseDenserDistribution);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryReclaimer); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryReclaimer);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(TimeDelta, extern const BASE_EXPORT base::FeatureParam<TimeDelta>
kPartitionAllocMemoryReclaimerInterval); kPartitionAllocMemoryReclaimerInterval;
BASE_EXPORT BASE_DECLARE_FEATURE( BASE_EXPORT BASE_DECLARE_FEATURE(
kPartitionAllocStraightenLargerSlotSpanFreeLists); kPartitionAllocStraightenLargerSlotSpanFreeLists);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM( extern const BASE_EXPORT
partition_alloc::StraightenLargerSlotSpanFreeListsMode, base::FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>
kPartitionAllocStraightenLargerSlotSpanFreeListsMode); kPartitionAllocStraightenLargerSlotSpanFreeListsMode;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortSmallerSlotSpanFreeLists); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortSmallerSlotSpanFreeLists);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortActiveSlotSpans); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortActiveSlotSpans);
@ -187,9 +185,8 @@ BASE_EXPORT BASE_DECLARE_FEATURE(kPageAllocatorRetryOnCommitFailure);
#endif #endif
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS) #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM( extern const base::FeatureParam<bool>
bool, kPartialLowEndModeExcludePartitionAllocSupport;
kPartialLowEndModeExcludePartitionAllocSupport);
#endif #endif
BASE_EXPORT BASE_DECLARE_FEATURE(kEnableConfigurableThreadCacheMultiplier); BASE_EXPORT BASE_DECLARE_FEATURE(kEnableConfigurableThreadCacheMultiplier);
@ -210,24 +207,18 @@ BASE_EXPORT int GetThreadCacheMinCachedMemoryForPurgingBytes();
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDisableBRPInBufferPartition); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDisableBRPInBufferPartition);
// This feature is additionally gated behind a buildflag because
// pool offset freelists cannot be represented when PartitionAlloc uses
// 32-bit pointers.
#if PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
BASE_EXPORT BASE_DECLARE_FEATURE(kUsePoolOffsetFreelists);
#endif
// When set, partitions use a larger ring buffer and free memory less // When set, partitions use a larger ring buffer and free memory less
// aggressively when in the foreground. // aggressively when in the foreground.
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground);
// When enabled, uses a more nuanced heuristic to determine if slot } // namespace features
// spans can be treated as "single-slot." } // namespace base
//
// See also: https://crbug.com/333443437
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseSmallSingleSlotSpans);
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
using ShadowMetadataEnabledProcesses = internal::PAFeatureEnabledProcesses;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocShadowMetadata);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(ShadowMetadataEnabledProcesses,
kShadowMetadataEnabledProcessesParam);
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
} // namespace base::features
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_

File diff suppressed because it is too large Load diff

View file

@ -14,12 +14,16 @@
#include "base/synchronization/lock.h" #include "base/synchronization/lock.h"
#include "base/task/sequenced_task_runner.h" #include "base/task/sequenced_task_runner.h"
#include "base/thread_annotations.h" #include "base/thread_annotations.h"
#include "partition_alloc/buildflags.h" #include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_config.h" #include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/thread_cache.h" #include "partition_alloc/thread_cache.h"
namespace base::allocator { namespace base::allocator {
#if PA_BUILDFLAG(USE_STARSCAN)
BASE_EXPORT void RegisterPCScanStatsReporter();
#endif
// Starts a periodic timer on the current thread to purge all thread caches. // Starts a periodic timer on the current thread to purge all thread caches.
BASE_EXPORT void StartThreadCachePeriodicPurge(); BASE_EXPORT void StartThreadCachePeriodicPurge();
@ -37,21 +41,12 @@ BASE_EXPORT std::map<std::string, std::string> ProposeSyntheticFinchTrials();
BASE_EXPORT void InstallDanglingRawPtrChecks(); BASE_EXPORT void InstallDanglingRawPtrChecks();
BASE_EXPORT void InstallUnretainedDanglingRawPtrChecks(); BASE_EXPORT void InstallUnretainedDanglingRawPtrChecks();
// Once called, makes `free()` do nothing. This is done to reduce
// shutdown hangs on CrOS.
// Does nothing if Dangling Pointer Detector (`docs/dangling_ptr.md`)
// is not active.
// Does nothing if allocator shim support is not built.
BASE_EXPORT void MakeFreeNoOp();
// Allows to re-configure PartitionAlloc at run-time. // Allows to re-configure PartitionAlloc at run-time.
class BASE_EXPORT PartitionAllocSupport { class BASE_EXPORT PartitionAllocSupport {
public: public:
struct BrpConfiguration { struct BrpConfiguration {
bool enable_brp = false; bool enable_brp = false;
bool process_affected_by_brp_flag = false;
// TODO(https://crbug.com/371135823): Remove after the investigation.
size_t extra_extras_size = 0;
}; };
// Reconfigure* functions re-configure PartitionAlloc. It is impossible to // Reconfigure* functions re-configure PartitionAlloc. It is impossible to
@ -86,9 +81,7 @@ class BASE_EXPORT PartitionAllocSupport {
void ReconfigureAfterTaskRunnerInit(const std::string& process_type); void ReconfigureAfterTaskRunnerInit(const std::string& process_type);
// |has_main_frame| tells us if the renderer contains a main frame. // |has_main_frame| tells us if the renderer contains a main frame.
// The default value is intended for other process types, where the parameter void OnForegrounded(bool has_main_frame);
// does not make sense.
void OnForegrounded(bool has_main_frame = false);
void OnBackgrounded(); void OnBackgrounded();
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) #if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
@ -107,11 +100,6 @@ class BASE_EXPORT PartitionAllocSupport {
// For calling from within third_party/blink/. // For calling from within third_party/blink/.
static bool ShouldEnableMemoryTaggingInRendererProcess(); static bool ShouldEnableMemoryTaggingInRendererProcess();
// Returns true if PA advanced checks should be enabled if available for the
// given process type. May be called multiple times per process.
static bool ShouldEnablePartitionAllocWithAdvancedChecks(
const std::string& process_type);
private: private:
PartitionAllocSupport(); PartitionAllocSupport();
@ -126,7 +114,7 @@ class BASE_EXPORT PartitionAllocSupport {
#if PA_CONFIG(THREAD_CACHE_SUPPORTED) && \ #if PA_CONFIG(THREAD_CACHE_SUPPORTED) && \
PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
size_t largest_cached_size_ = size_t largest_cached_size_ =
::partition_alloc::kThreadCacheDefaultSizeThreshold; ::partition_alloc::ThreadCacheLimits::kDefaultSizeThreshold;
#endif #endif
}; };

View file

@ -1,7 +0,0 @@
---
Checks: 'google-build-namespaces,
readability-redundant-smartptr-get,
readability-static-accessed-through-instance'
InheritParentConfig: true
HeaderFilterRegex: 'partition_alloc/*'
...

View file

@ -1,8 +0,0 @@
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is partition_alloc root GN configuration. It is used when built as a
# standalone project. This is not used in production.
buildconfig = "//gn/BUILDCONFIG.gn"

View file

@ -11,14 +11,8 @@ group("buildflags") {
public_deps = [ "src/partition_alloc:buildflags" ] public_deps = [ "src/partition_alloc:buildflags" ]
} }
if (use_partition_alloc && is_clang_or_gcc) { if (is_clang_or_gcc) {
group("partition_alloc") { group("partition_alloc") {
public_deps = [ "src/partition_alloc:partition_alloc" ] public_deps = [ "src/partition_alloc:partition_alloc" ]
} }
} }
if (use_allocator_shim) {
group("allocator_shim") {
public_deps = [ "src/partition_alloc:allocator_shim" ]
}
}

View file

@ -9,49 +9,36 @@ noparent = True
# `partition_alloc` can depend only on itself, via its `include_dirs`. # `partition_alloc` can depend only on itself, via its `include_dirs`.
include_rules = [ "+partition_alloc" ] include_rules = [ "+partition_alloc" ]
# TODO(crbug.com/40158212): Depending on what is tested, split the tests in
# between chromium and partition_alloc. Remove those exceptions:
specific_include_rules = { specific_include_rules = {
# Dependencies on //testing: ".*_(perf|unit)test\.cc$": [
".*_(perf|unit)?test.*\.(h|cc)": [ "+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
"+base/allocator/dispatcher/dispatcher.h",
"+base/debug/allocation_trace.h",
"+base/debug/debugging_buildflags.h",
"+base/debug/proc_maps_linux.h",
"+base/system/sys_info.h",
"+base/test/gtest_util.h",
"+base/timer/lap_timer.h",
"+base/win/windows_version.h",
"+testing/gmock/include/gmock/gmock.h", "+testing/gmock/include/gmock/gmock.h",
"+testing/gtest/include/gtest/gtest.h", "+testing/gtest/include/gtest/gtest.h",
"+testing/perf/perf_result_reporter.h", "+testing/perf/perf_result_reporter.h",
], ],
"gtest_util.h": [ "extended_api\.cc$": [
"+testing/gtest/include/gtest/gtest.h",
],
# Dependencies on //base:
"extended_api\.cc": [
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h", "+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
], ],
"partition_alloc_perftest\.cc": [ "raw_(ptr|ref)_unittest\.cc$": [
"+base/allocator/dispatcher/dispatcher.h", "+base",
"+base/debug/allocation_trace.h", "+third_party/abseil-cpp/absl/types/optional.h",
"+base/debug/debugging_buildflags.h", "+third_party/abseil-cpp/absl/types/variant.h",
"+base/timer/lap_timer.h",
], ],
"partition_lock_perftest\.cc": [ "raw_ptr_test_support\.h$": [
"+base/timer/lap_timer.h", "+testing/gmock/include/gmock/gmock.h",
], "+third_party/abseil-cpp/absl/types/optional.h",
"raw_ptr_unittest\.cc": [
"+base/allocator/partition_alloc_features.h",
"+base/allocator/partition_alloc_support.h",
"+base/cpu.h",
"+base/debug/asan_service.h",
"+base/metrics/histogram_base.h",
"+base/test/bind.h",
"+base/test/gtest_util.h",
"+base/test/memory/dangling_ptr_instrumentation.h",
"+base/test/scoped_feature_list.h",
"+base/types/to_address.h",
],
"raw_ref_unittest\.cc": [
"+base/debug/asan_service.h",
"+base/memory/raw_ptr_asan_service.h",
"+base/test/gtest_util.h",
], ],
# TODO(https://crbug.com/1508847): Remove //build dependency.
"build_config.h$": [ "+build/build_config.h" ],
} }
# In the context of a module-level DEPS, the `deps` variable must be defined. # In the context of a module-level DEPS, the `deps` variable must be defined.

View file

@ -1,3 +1,4 @@
bartekn@chromium.org
haraken@chromium.org haraken@chromium.org
keishi@chromium.org keishi@chromium.org
lizeb@chromium.org lizeb@chromium.org

View file

@ -9,15 +9,8 @@ for more details on the presubmit API built into depot_tools.
PRESUBMIT_VERSION = '2.0.0' PRESUBMIT_VERSION = '2.0.0'
# This is the base path of the partition_alloc directory when stored inside the
# chromium repository. PRESUBMIT.py is executed from chromium.
_PARTITION_ALLOC_BASE_PATH = 'base/allocator/partition_allocator/src/' _PARTITION_ALLOC_BASE_PATH = 'base/allocator/partition_allocator/src/'
# Pattern matching C/C++ source files, for use in allowlist args.
_SOURCE_FILE_PATTERN = r'.*\.(h|hpp|c|cc|cpp)$'
# Similar pattern, matching GN files.
_BUILD_FILE_PATTERN = r'.*\.(gn|gni)$'
# This is adapted from Chromium's PRESUBMIT.py. The differences are: # This is adapted from Chromium's PRESUBMIT.py. The differences are:
# - Base path: It is relative to the partition_alloc's source directory instead # - Base path: It is relative to the partition_alloc's source directory instead
@ -95,155 +88,56 @@ def CheckForIncludeGuards(input_api, output_api):
return errors return errors
# In .gn and .gni files, check there are no unexpected dependencies on files def CheckBuildConfigMacrosWithoutInclude(input_api, output_api):
# located outside of the partition_alloc repository. # Excludes OS_CHROMEOS, which is not defined in build_config.h.
# macro_re = input_api.re.compile(
# This is important, because partition_alloc has no CQ bots on its own, but only r'^\s*#(el)?if.*\bdefined\(((COMPILER_|ARCH_CPU_|WCHAR_T_IS_)[^)]*)')
# through the chromium's CQ. include_re = input_api.re.compile(
# r'^#include\s+"partition_alloc/build_config.h"',
# Only //build_overrides/ is allowed, as it provides embedders, a way to input_api.re.MULTILINE)
# overrides the default build settings and forward the dependencies to extension_re = input_api.re.compile(r'\.[a-z]+$')
# partition_alloc.
def CheckNoExternalImportInGn(input_api, output_api):
# Match and capture <path> from import("<path>").
import_re = input_api.re.compile(r'^ *import\("([^"]+)"\)')
sources = lambda affected_file: input_api.FilterSourceFile(
affected_file,
files_to_skip=[],
files_to_check=[_BUILD_FILE_PATTERN])
errors = [] errors = []
for f in input_api.AffectedSourceFiles(sources): config_h_file = input_api.os_path.join('build', 'build_config.h')
for line_number, line in f.ChangedContents(): for f in input_api.AffectedFiles(include_deletes=False):
match = import_re.search(line) # The build-config macros are allowed to be used in build_config.h
if not match: # without including itself.
continue if f.LocalPath() == config_h_file:
import_path = match.group(1) continue
if import_path.startswith('//build_overrides/'): if not f.LocalPath().endswith(
continue ('.h', '.c', '.cc', '.cpp', '.m', '.mm')):
if not import_path.startswith('//'): continue
continue;
errors.append(output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallow external import: %s' %
(f.LocalPath(), line_number + 1, import_path)))
return errors;
# partition_alloc still supports C++17, because Skia still uses C++17. found_line_number = None
def CheckCpp17CompatibleHeaders(input_api, output_api): found_macro = None
CPP_20_HEADERS = [ all_lines = input_api.ReadFile(f, 'r').splitlines()
"barrier", for line_num, line in enumerate(all_lines):
"bit", match = macro_re.search(line)
#"compare", Three-way comparison may be used under appropriate guards. if match:
"format", found_line_number = line_num
"numbers", found_macro = match.group(2)
"ranges", break
"semaphore", if not found_line_number:
"source_location", continue
"span",
"stop_token",
"syncstream",
"version",
]
CPP_23_HEADERS = [ found_include_line = -1
"expected", for line_num, line in enumerate(all_lines):
"flat_map", if include_re.search(line):
"flat_set", found_include_line = line_num
"generator", break
"mdspan", if found_include_line >= 0 and found_include_line < found_line_number:
"print", continue
"spanstream",
"stacktrace",
"stdatomic.h",
"stdfloat",
]
sources = lambda affected_file: input_api.FilterSourceFile( if not f.LocalPath().endswith('.h'):
affected_file, primary_header_path = extension_re.sub('.h', f.AbsoluteLocalPath())
# compiler_specific.h may use these headers in guarded ways. try:
files_to_skip=[ content = input_api.ReadFile(primary_header_path, 'r')
r'.*partition_alloc_base/augmentations/compiler_specific\.h' if include_re.search(content):
],
files_to_check=[_SOURCE_FILE_PATTERN])
errors = []
for f in input_api.AffectedSourceFiles(sources):
# for line_number, line in f.ChangedContents():
for line_number, line in enumerate(f.NewContents()):
for header in CPP_20_HEADERS:
if not "#include <%s>" % header in line:
continue continue
errors.append( except IOError:
output_api.PresubmitError( pass
'%s:%d\nPartitionAlloc disallows C++20 headers: <%s>' errors.append('%s:%d %s macro is used without first including '
% (f.LocalPath(), line_number + 1, header))) 'partition_alloc/build_config.h.' %
for header in CPP_23_HEADERS: (f.LocalPath(), found_line_number, found_macro))
if not "#include <%s>" % header in line: if errors:
continue return [output_api.PresubmitPromptWarning('\n'.join(errors))]
errors.append( return []
output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallows C++23 headers: <%s>'
% (f.LocalPath(), line_number + 1, header)))
return errors
def CheckCpp17CompatibleKeywords(input_api, output_api):
CPP_20_KEYWORDS = [
"concept",
"consteval",
"constinit",
"co_await",
"co_return",
"co_yield",
"requires",
"std::hardware_",
"std::is_constant_evaluated",
"std::bit_cast",
"std::midpoint",
"std::to_array",
]
# Note: C++23 doesn't introduce new keywords.
sources = lambda affected_file: input_api.FilterSourceFile(
affected_file,
# compiler_specific.h may use these keywords in guarded macros.
files_to_skip=[r'.*partition_alloc_base/compiler_specific\.h'],
files_to_check=[_SOURCE_FILE_PATTERN])
errors = []
for f in input_api.AffectedSourceFiles(sources):
for line_number, line in f.ChangedContents():
for keyword in CPP_20_KEYWORDS:
if not keyword in line:
continue
# Skip if part of a comment
if '//' in line and line.index('//') < line.index(keyword):
continue
# Make sure there are word separators around the keyword:
regex = r'\b%s\b' % keyword
if not input_api.re.search(regex, line):
continue
errors.append(
output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallows C++20 keywords: %s'
% (f.LocalPath(), line_number + 1, keyword)))
return errors
# Check `NDEBUG` is not used inside partition_alloc. We prefer to use the
# buildflags `#if PA_BUILDFLAG(IS_DEBUG)` instead.
def CheckNoNDebug(input_api, output_api):
sources = lambda affected_file: input_api.FilterSourceFile(
affected_file,
files_to_skip=[],
files_to_check=[_SOURCE_FILE_PATTERN])
errors = []
for f in input_api.AffectedSourceFiles(sources):
for line_number, line in f.ChangedContents():
if 'NDEBUG' in line:
errors.append(output_api.PresubmitError('%s:%d\nPartitionAlloc'
% (f.LocalPath(), line_number + 1)
+ 'disallows NDEBUG, use PA_BUILDFLAG(IS_DEBUG) instead'))
return errors

View file

@ -119,7 +119,7 @@ partition page that holds metadata (32B struct per partition page).
of each super page). of each super page).
* In some configurations, PartitionAlloc stores more metadata than can * In some configurations, PartitionAlloc stores more metadata than can
fit in the one system page at the front. These are the bitmaps for fit in the one system page at the front. These are the bitmaps for
`MTECheckedPtr<T>`, and they are relegated to the head of StarScan and `MTECheckedPtr<T>`, and they are relegated to the head of
what would otherwise be usable space for slot spans. One, both, or what would otherwise be usable space for slot spans. One, both, or
none of these bitmaps may be present, depending on build none of these bitmaps may be present, depending on build
configuration, runtime configuration, and type of allocation. configuration, runtime configuration, and type of allocation.

View file

@ -0,0 +1,9 @@
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file will be used to check out PartitionAlloc and to build it as
# standalone library. In this case, PartitionAlloc needs to define
# build_with_chromium. If building PartitionAlloc as a part of chromium,
# chromium will provide build_with_chromium=true.
build_with_chromium = false

View file

@ -2,8 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
# By definition, PartitionAlloc standalone builds outside of chromium. import("//build_overrides/build.gni")
build_with_chromium = false
# This is the default build configuration when building PartitionAlloc # This is the default build configuration when building PartitionAlloc
# as a standalone library. # as a standalone library.
@ -15,13 +14,9 @@ use_allocator_shim_default = false
enable_backup_ref_ptr_support_default = false enable_backup_ref_ptr_support_default = false
enable_backup_ref_ptr_slow_checks_default = false enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false enable_dangling_raw_ptr_checks_default = false
enable_ios_corruption_hardening_default = false assert_cpp20_default = true
# This is the default build configuration for pointers/raw_ptr*. # This is the default build configuration for pointers/raw_ptr*.
raw_ptr_zero_on_construct_default = true raw_ptr_zero_on_construct_default = true
raw_ptr_zero_on_move_default = true raw_ptr_zero_on_move_default = true
raw_ptr_zero_on_destruct_default = false raw_ptr_zero_on_destruct_default = false
# PartitionAlloc needs to support cpp17 for standalone builds, as long as Skia
# supports it.
assert_cpp20_default = false

View file

@ -169,7 +169,7 @@ tracking a non-contiguous set of allocations using a bitmap.
The usable area of a super page in which slot spans The usable area of a super page in which slot spans
reside. While generally this means "everything between the first reside. While generally this means "everything between the first
and last guard partition pages in a super page," the presence of and last guard partition pages in a super page," the presence of
other metadata can bump the starting offset other metadata (e.g. StarScan bitmaps) can bump the starting offset
forward. While this term is entrenched in the code, the team forward. While this term is entrenched in the code, the team
considers it suboptimal and is actively looking for a replacement. considers it suboptimal and is actively looking for a replacement.

View file

@ -1,106 +0,0 @@
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is the BUILDCONFIG for building partition_alloc as a standalone project.
#
# The config is based on:
# - skia: //gn/BUILDCONFIG.gn
# - chromium: //build/config/BUILDCONFIG.gn
build_with_chromium = false
is_asan = false
# It's best to keep the names and defaults of is_foo flags consistent with:
# - Chrome
# - Skia.
declare_args() {
is_official_build = false
is_component_build = false
dcheck_always_on = true
}
declare_args() {
is_debug = !is_official_build
}
# Platform detection defaults:
if (target_os == "") {
target_os = host_os
}
if (current_os == "") {
current_os = target_os
}
if (target_cpu == "") {
target_cpu = host_cpu
}
if (target_cpu == "x86_64") {
target_cpu = "x64"
}
if (current_cpu == "") {
current_cpu = target_cpu
}
is_android = current_os == "android"
is_chromeos = false
is_fuchsia = current_os == "fuchsia"
is_ios = current_os == "ios"
is_linux = current_os == "linux"
is_mac = current_os == "mac"
is_nacl = false
is_win = current_os == "win" || current_os == "winuwp"
is_cast_android = false
is_castos = false
is_cronet_build = false
enable_expensive_dchecks = false
dcheck_is_configurable = false
can_unwind_with_frame_pointers = false
is_posix = !is_win && !is_fuchsia
is_apple = is_mac || is_ios
# TODO(crbug.com/41481467): Consider expanding the standalone configuration for
# additional OSes.
assert(is_linux, "PartitionAlloc standalone only support Linux for now")
is_clang = true
# A component is either:
# - A static library (is_component_build=false)
# - A shared library (is_component_build=true)
template("component") {
if (is_component_build) {
_component_mode = "shared_library"
} else {
_component_mode = "static_library"
}
target(_component_mode, target_name) {
forward_variables_from(invoker, "*")
}
}
# Default configs
default_configs = [
"//gn/partition_alloc:default",
"//gn/partition_alloc:no_exceptions",
"//gn/partition_alloc:no_rtti",
]
if (!is_debug) {
default_configs += [
"//gn/partition_alloc:optimize",
"//gn/partition_alloc:NDEBUG",
]
}
# GCC-like toolchains, including Clang.
set_default_toolchain("//gn/toolchain:clang")
default_toolchain_name = "clang"
set_defaults("source_set") {
configs = default_configs
}
set_defaults("component") {
configs = default_configs
}

View file

@ -1,2 +0,0 @@
arthursonzogni@chromium.org
tasak@google.com

View file

@ -1,33 +0,0 @@
# PartitionAlloc standalone GN config
This directory contains a GN configuration to build partition_alloc as a
standalone library.
This is not an official product that is supported by the Chromium project. There
are no guarantees that this will work in the future, or that it will work in
all configurations. There are no commit queue or trybots using it.
This is useful for verifying that partition_alloc can be built as a library, and
discover the formal dependencies that partition_alloc has on the rest of the
Chromium project. This is not intended to be used in production code, and is not
This is also provided as a convenience for chromium developers working on
partition_alloc who want to iterate on partition_alloc without having to build
the entire Chromium project.
/!\ This is under construction. /!\
## Building
```sh
gn gen out/Default
autoninja -C out/Default
```
## Supported configurations:
### Platforms
- Linux
### Toolchains
- Clang

View file

@ -1,25 +0,0 @@
#!/usr/bin/env python3
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copied from Skia's //gn/cp.py
import os
import shutil
import sys
src, dst = sys.argv[1:]
if os.path.exists(dst):
if os.path.isdir(dst):
shutil.rmtree(dst)
else:
os.remove(dst)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copy2(src, dst)
#work around https://github.com/ninja-build/ninja/issues/1554
os.utime(dst, None)

View file

@ -1,45 +0,0 @@
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
config("default") {
asmflags = []
cflags = []
cflags = [
"-Wno-return-type", # TODO(crbug.com/41481467): Fix this warning.
"-Wno-invalid-offsetof", # TODO(crbug.com/41481467): Fix this warning.
"-fstrict-aliasing",
"-fPIC",
"-fvisibility=hidden",
]
cflags_cc = [
"-std=c++17",
"-fvisibility-inlines-hidden",
]
cflags_objcc = cflags_cc
defines = []
ldflags = []
libs = [ "pthread" ]
# TODO(crbug.com/41481467): Consider creating a bot running partition_alloc
# with extra flags enforced only in the standalone configuration. Then we can
# remove the extra warnings when embedded.
}
config("no_exceptions") {
cflags_cc = [ "-fno-exceptions" ]
cflags_objcc = cflags_cc
}
config("no_rtti") {
cflags_cc = [ "-fno-rtti" ]
cflags_objcc = cflags_cc
}
config("optimize") {
cflags = [ "-O3" ]
}
config("NDEBUG") {
defines = [ "NDEBUG" ]
}

View file

@ -1,18 +0,0 @@
#!/usr/bin/env python3
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copied from Skia's //gn/rm.py
import os
import shutil
import sys
dst, = sys.argv[1:]
if os.path.exists(dst):
if os.path.isdir(dst):
shutil.rmtree(dst)
else:
os.remove(dst)

View file

@ -1,61 +0,0 @@
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
toolchain("clang") {
ar = "llvm-ar"
cc = "clang"
cxx = "clang++"
link = "clang++"
tool("cc") {
depfile = "{{output}}.d"
command = "$cc -MD -MF $depfile {{defines}} {{include_dirs}} {{cflags}} {{cflags_c}} -c {{source}} -o {{output}}"
depsformat = "gcc"
outputs =
[ "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o" ]
description = "CC {{source}}"
}
tool("cxx") {
depfile = "{{output}}.d"
command = "$cxx -MD -MF $depfile {{defines}} {{include_dirs}} {{cflags}} {{cflags_cc}} -c {{source}} -o {{output}}"
depsformat = "gcc"
outputs =
[ "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o" ]
description = "CXX {{source}}"
}
tool("alink") {
rspfile = "{{output}}.rsp"
rspfile_content = "{{inputs}}"
rm_py = rebase_path("../rm.py")
command =
"python3 \"$rm_py\" \"{{output}}\" && $ar rcs {{output}} @$rspfile"
outputs = [ "{{root_out_dir}}/{{target_output_name}}{{output_extension}}" ]
default_output_extension = ".a"
output_prefix = "lib"
description = "LINK (static) {{output}}"
}
tool("solink") {
soname = "{{target_output_name}}{{output_extension}}"
rpath = "-Wl,-soname,$soname"
rspfile = "{{output}}.rsp"
rspfile_content = "{{inputs}}"
command = "$link -shared {{ldflags}} @$rspfile {{frameworks}} {{solibs}} {{libs}} $rpath -o {{output}}"
outputs = [ "{{root_out_dir}}/$soname" ]
output_prefix = "lib"
default_output_extension = ".so"
description = "LINK (shared) {{output}}"
}
tool("stamp") {
command = "touch {{output}}"
description = "STAMP {{output}}"
}
}

View file

@ -2,58 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import("//build/config/cronet/config.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("//build_overrides/partition_alloc.gni") import("//build_overrides/partition_alloc.gni")
# -----------------------------------------------------------------------------
# Note on the use of `xxx_default` variable in partition_alloc.
#
# GN provides default_args() instruction. It is meant to be used by embedders,
# to override the default args declared by the embeddees (e.g. partition_alloc).
# This is the intended way to use GN. It properly interacts with the args.gn
# user's file.
#
# Unfortunately, Chrome and others embedders aren't using it. Instead, they
# expect embeddees to import global '.gni' file from the embedder, e.g.
# `//build_overrides/partition_alloc.gni`. This file sets some `xxx_default`
# variable that will be transferred to the declared args. For instance
# a library would use:
# ```
# import("//build_overrides/library.gni")
# declare_args() {
# xxx = xxx_default
# }
# ```
#
# We don't really want to break embedders when introducing new args. Ideally,
# We would have liked to have defaults for default variables. That would be
# a recursive problem. To resolve it, we sometimes use the `defined(...)`
# instruction to check if the embedder has defined the `xxx_default` variable or
# not.
#
# In general, we should aim to support the embedders that are using GN normally,
# and avoid requiring them to define `xxx_default` in the `//build_overrides`
# -----------------------------------------------------------------------------
# Some embedders uses `is_debug`, it can be used to set the default value of
# `partition_alloc_is_debug_default`.
if (!defined(partition_alloc_is_debug_default)) {
if (defined(is_debug)) {
partition_alloc_is_debug_default = is_debug
} else {
partition_alloc_is_debug_default = false
}
}
# Some embedders uses `dcheck_always_on`, it can be used to set the default
# value of `partition_alloc_dcheck_always_on_default`.
if (!defined(partition_alloc_dcheck_always_on_default)) {
if (defined(dcheck_always_on)) {
partition_alloc_dcheck_always_on_default = dcheck_always_on
} else {
partition_alloc_dcheck_always_on_default = false
}
}
# PartitionAlloc have limited support for MSVC's cl.exe compiler. It can only # PartitionAlloc have limited support for MSVC's cl.exe compiler. It can only
# access the generate "buildflags" and the "raw_ptr" definitions implemented # access the generate "buildflags" and the "raw_ptr" definitions implemented
# with RawPtrNoOpImpl. Everything else is considered not supported. # with RawPtrNoOpImpl. Everything else is considered not supported.
@ -78,16 +30,8 @@ if (is_nacl) {
assert(false, "Unknown CPU: $current_cpu") assert(false, "Unknown CPU: $current_cpu")
} }
# Makes the number of empty slot spans that can remain committed larger in # Increases the size of the empty slot span ring.
# foreground mode compared to background mode use_large_empty_slot_span_ring = is_mac
# (see `PartitionRoot::AdjustFor(Background|Foreground)`).
#
# Foreground/background modes are used by default on macOS and Windows so this
# must be true on these platforms. It's also true on other platforms to allow
# experiments.
#
# TODO(crbug.com/329199197): Clean this up when experiments are complete.
use_large_empty_slot_span_ring = true
# Disables for Android ARM64 because it actually requires API 31+. # Disables for Android ARM64 because it actually requires API 31+.
# See partition_alloc/tagging.cc: # See partition_alloc/tagging.cc:
@ -98,12 +42,6 @@ has_memory_tagging =
current_cpu == "arm64" && is_clang && !is_asan && is_linux && current_os != "openwrt" current_cpu == "arm64" && is_clang && !is_asan && is_linux && current_os != "openwrt"
declare_args() { declare_args() {
# Debug configuration.
partition_alloc_is_debug = partition_alloc_is_debug_default
# Enable PA_DCHECKs in PartitionAlloc in release mode.
partition_alloc_dcheck_always_on = partition_alloc_dcheck_always_on_default
# Causes all the allocations to be routed via allocator_shim.cc. Usually, # Causes all the allocations to be routed via allocator_shim.cc. Usually,
# the allocator shim will, in turn, route them to PartitionAlloc, but # the allocator shim will, in turn, route them to PartitionAlloc, but
# other allocators are also supported by the allocator shim. # other allocators are also supported by the allocator shim.
@ -141,63 +79,42 @@ if (is_nacl) {
} }
declare_args() { declare_args() {
# Turns on compiler optimizations in PartitionAlloc in Debug build.
# If enabling PartitionAlloc-Everywhere in Debug build for tests in Debug
# build, since all memory allocations and deallocations are executed by
# non-optimized PartitionAlloc, chrome (including tests) will be much
# slower. This will cause debug trybots' timeouts. If we want to debug
# PartitionAlloc itself, use partition_alloc_optimized_debug=false.
# Otherwise, use partition_alloc_optimized_debug=true to enable optimized
# PartitionAlloc.
partition_alloc_optimized_debug = true
# PartitionAlloc-Everywhere (PA-E). Causes allocator_shim.cc to route # PartitionAlloc-Everywhere (PA-E). Causes allocator_shim.cc to route
# calls to PartitionAlloc, rather than some other platform allocator. # calls to PartitionAlloc, rather than some other platform allocator.
use_partition_alloc_as_malloc = use_partition_alloc && use_allocator_shim && use_partition_alloc_as_malloc = use_partition_alloc && use_allocator_shim &&
use_partition_alloc_as_malloc_default use_partition_alloc_as_malloc_default
} }
declare_args() { assert(!use_allocator_shim || (is_android || is_apple || is_chromeos ||
# Whether PartitionAlloc dispatch can be replaced with another dispatch with is_fuchsia || is_linux || is_win),
# some more safety checks at runtime or not. When true, the allocator shim "The allocator shim does not (yet) support the platform.")
# provides an extended API to swap PartitionAlloc.
enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support =
use_partition_alloc_as_malloc
}
declare_args() {
# This is a flag for binary experiment on iOS. When BRP for iOS is enabled,
# we see some un-actionable `DoubleFreeOrCorruptionDetected` crashes.
# This flag enables some extra `CHECK`s to get actionable crash reports.
# TODO(crbug.com/371135823): Remove upon completion of investigation.
enable_ios_corruption_hardening = use_partition_alloc_as_malloc && is_ios &&
enable_ios_corruption_hardening_default
}
assert(
!enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support || use_partition_alloc_as_malloc,
"PartitionAlloc with advanced checks requires PartitionAlloc itself.")
assert(!use_allocator_shim || !is_nacl,
"The allocator shim supports every platform, except nacl")
if (use_allocator_shim && is_win) { if (use_allocator_shim && is_win) {
# It's hard to override CRT's malloc family in every case in the component # It's hard to override CRT's malloc family in every case in the component
# build, and it's very easy to override it partially and to be inconsistent # build, and it's very easy to override it partially and to be inconsistent
# among allocations and deallocations. Then, we'll crash when PA deallocates # among allocations and deallocations. Then, we'll crash when PA deallocates
# a memory region allocated by the CRT's malloc or vice versa. # a memory region allocated by the CRT's malloc or vice versa.
# Since PartitionAlloc depends on libc++, it is difficult to link libc++.dll assert(!is_component_build,
# with PartitionAlloc to replace its allocator with PartitionAlloc. "The allocator shim doesn't work for the component build on Windows.")
# If using libcxx_is_shared=true,
# a. since inline methods or inline functions defined in some libc++ headers,
# e.g. vector, use new, malloc(), and so on, the memory allocation will
# be done inside a client code.
# b. on the other hand, libc++.dll deallocates the memory allocated by the
# inline methods or inline functions. It will not be run inside the client
# code.
# So a.'s allocation is done by PartitionAlloc, but b.'s deallocation is
# done by system allocator. This will cause heap check failure (WinHeap
# doesn't know PartitionAlloc) and crash.
# If libcxx_is_shared=false, libc++ is a static library. All libc++ code
# will be run inside the client. The above issue will disappear.
assert(
!is_component_build || (!libcxx_is_shared && !partition_alloc_is_debug),
"The allocator shim for the Windows component build needs !libcxx_is_shared && !partition_alloc_is_debug.")
} }
declare_args() { declare_args() {
use_freeslot_bitmap = false use_freeslot_bitmap = false
# Puts the regular and BRP pools right next to each other, so that we can
# check "belongs to one of the two pools" with a single bitmask operation.
glue_core_pools = false
# Introduces pointer compression support in PA. These are 4-byte # Introduces pointer compression support in PA. These are 4-byte
# pointers that can point within the core pools (regular and BRP). # pointers that can point within the core pools (regular and BRP).
# #
@ -219,23 +136,6 @@ declare_args() {
# through malloc. Useful for using with tools that intercept malloc, e.g. # through malloc. Useful for using with tools that intercept malloc, e.g.
# heaptrack. # heaptrack.
forward_through_malloc = false forward_through_malloc = false
# Enable reentrancy checks at `partition_alloc::internal::Lock`.
# TODO(crbug.com/371135823): Remove upon completion of investigation.
enable_partition_lock_reentrancy_check = enable_ios_corruption_hardening
# This will write a fixed cookie pattern at the end of each allocation, and
# later verify the pattern remain unchanged to ensure there is no OOB write.
# It comes with performance and memory cost, hence enabled only in debug.
use_partition_cookie =
partition_alloc_is_debug || partition_alloc_dcheck_always_on ||
enable_ios_corruption_hardening
# This will change partition cookie size to 4B or 8B, whichever equivalent to
# size of InSlotMetadata. This option is useful for InSlotMetadata corruption
# investigation.
# TODO(crbug.com/371135823): Remove upon completion of investigation.
smaller_partition_cookie = enable_ios_corruption_hardening
} }
declare_args() { declare_args() {
@ -305,21 +205,12 @@ declare_args() {
# Enable the feature flag required to activate backup ref pointers. That is to # Enable the feature flag required to activate backup ref pointers. That is to
# say `PartitionAllocBackupRefPtr`. # say `PartitionAllocBackupRefPtr`.
# #
# This is meant to be modified primarily on bots. It is much easier to # This is meant to be used primarily on bots. It is much easier to override
# override the feature flags using a binary flag instead of updating multiple # the feature flags using a binary flag instead of updating multiple bots's
# bots's scripts to pass command line arguments. # scripts to pass command line arguments.
# #
# TODO(328104161): Remove this flag. # TODO(328104161): Remove this flag.
enable_backup_ref_ptr_feature_flag = enable_backup_ref_ptr_feature_flag = false
enable_backup_ref_ptr_support && use_raw_ptr_backup_ref_impl &&
# Platforms where BackupRefPtr hasn't shipped yet:
!is_castos && !is_ios
# While keeping BRP support, override a feature flag to make it disabled
# state. This will overwrite `enable_backup_ref_ptr_feature_flag`.
# TODO(https://crbug.com/372183586): Fix the bug and remove this arg.
force_disable_backup_ref_ptr_feature =
enable_backup_ref_ptr_support && enable_ios_corruption_hardening
# Build support for Dangling Ptr Detection (DPD) via BackupRefPtr (BRP), # Build support for Dangling Ptr Detection (DPD) via BackupRefPtr (BRP),
# making the raw_ptr<T> implementation to RawPtrBackupRefImpl if active. # making the raw_ptr<T> implementation to RawPtrBackupRefImpl if active.
@ -330,16 +221,16 @@ declare_args() {
enable_backup_ref_ptr_instance_tracer = false enable_backup_ref_ptr_instance_tracer = false
backup_ref_ptr_extra_oob_checks = backup_ref_ptr_extra_oob_checks =
enable_backup_ref_ptr_support && use_raw_ptr_backup_ref_impl false && enable_backup_ref_ptr_support && use_raw_ptr_backup_ref_impl
} }
declare_args() { declare_args() {
# Enable the feature flag required to check for dangling pointers. That is to # Enable the feature flag required to check for dangling pointers. That is to
# say `PartitionAllocDanglingPtr`. # say `PartitionAllocDanglingPtr`.
# #
# This is meant to be modified primarily on bots. It is much easier to # This is meant to be used primarily on bots. It is much easier to override
# override the feature flags using a binary flag instead of updating multiple # the feature flags using a binary flag instead of updating multiple bots's
# bots's scripts to pass command line arguments. # scripts to pass command line arguments.
# #
# TODO(328104161): Remove this flag. # TODO(328104161): Remove this flag.
enable_dangling_raw_ptr_feature_flag = enable_dangling_raw_ptr_checks enable_dangling_raw_ptr_feature_flag = enable_dangling_raw_ptr_checks
@ -353,7 +244,7 @@ declare_args() {
declare_args() { declare_args() {
# Shadow metadata is still under development and only supports Linux # Shadow metadata is still under development and only supports Linux
# for now. # for now.
enable_shadow_metadata = is_linux && has_64_bit_pointers enable_shadow_metadata = false
} }
declare_args() { declare_args() {
@ -366,32 +257,23 @@ declare_args() {
use_full_mte = false use_full_mte = false
} }
# *Scan is currently only used by Chromium, and supports only 64-bit.
use_starscan = build_with_chromium && has_64_bit_pointers
stack_scan_supported = stack_scan_supported =
current_cpu == "x64" || current_cpu == "x86" || current_cpu == "arm" || current_cpu == "x64" || current_cpu == "x86" || current_cpu == "arm" ||
current_cpu == "arm64" || current_cpu == "riscv64" || current_cpu == "loong64" current_cpu == "arm64" || current_cpu == "riscv64"
# We want to provide assertions that guard against inconsistent build # We want to provide assertions that guard against inconsistent build
# args, but there is no point in having them fire if we're not building # args, but there is no point in having them fire if we're not building
# PartitionAlloc at all. If `use_partition_alloc` is false, we jam all # PartitionAlloc at all. If `use_partition_alloc` is false, we jam all
# related args to `false`. # related args to `false`.
# #
# We also disable PA-Everywhere and PA-based features in two types of
# toolchains:
# - Toolchains that disable PA-Everywhere explicitly.
# - The rust host build tools toochain, which builds DLLs to dlopen into the
# compiler for proc macros. We would want any allocations to use the same
# paths as the compiler.
#
# Do not clear the following, as they can function outside of PartitionAlloc # Do not clear the following, as they can function outside of PartitionAlloc
# - has_64_bit_pointers # - has_64_bit_pointers
# - has_memory_tagging # - has_memory_tagging
if (!use_partition_alloc || if (!use_partition_alloc) {
(defined(toolchain_allows_use_partition_alloc_as_malloc) &&
!toolchain_allows_use_partition_alloc_as_malloc) ||
(defined(toolchain_for_rust_host_build_tools) &&
toolchain_for_rust_host_build_tools)) {
use_partition_alloc_as_malloc = false use_partition_alloc_as_malloc = false
glue_core_pools = false
enable_backup_ref_ptr_support = false enable_backup_ref_ptr_support = false
use_raw_ptr_backup_ref_impl = false use_raw_ptr_backup_ref_impl = false
use_asan_backup_ref_ptr = false use_asan_backup_ref_ptr = false
@ -402,8 +284,8 @@ if (!use_partition_alloc ||
enable_dangling_raw_ptr_feature_flag = false enable_dangling_raw_ptr_feature_flag = false
enable_pointer_subtraction_check = false enable_pointer_subtraction_check = false
backup_ref_ptr_poison_oob_ptr = false backup_ref_ptr_poison_oob_ptr = false
backup_ref_ptr_extra_oob_checks = false
enable_backup_ref_ptr_instance_tracer = false enable_backup_ref_ptr_instance_tracer = false
use_starscan = false
use_full_mte = false use_full_mte = false
} }
@ -492,15 +374,14 @@ assert(build_with_chromium || !use_asan_backup_ref_ptr,
assert(!use_asan_backup_ref_ptr || use_raw_ptr_hookable_impl, assert(!use_asan_backup_ref_ptr || use_raw_ptr_hookable_impl,
"AsanBackupRefPtr requires RawPtrHookableImpl") "AsanBackupRefPtr requires RawPtrHookableImpl")
# pkeys support is explicitly disabled in all Cronet builds, as some test
# dependencies that use partition_allocator are compiled in AOSP against a
# version of glibc that does not include pkeys syscall numbers.
is_pkeys_available =
(is_linux || is_chromeos) && current_cpu == "x64" && !is_cronet_build
declare_args() { declare_args() {
enable_pkeys = is_pkeys_available # pkeys support is explicitly disabled in all Cronet builds, as some test
# dependencies that use partition_allocator are compiled in AOSP against a
# version of glibc that does not include pkeys syscall numbers.
enable_pkeys =
(is_linux || is_chromeos) && target_cpu == "x64" && !is_cronet_build
} }
assert(!enable_pkeys || is_pkeys_available, assert(!enable_pkeys || ((is_linux || is_chromeos) && target_cpu == "x64"),
"Pkeys are only supported on x64 linux and ChromeOS") "Pkeys are only supported on x64 linux and ChromeOS")
# Some implementations of raw_ptr<>, like BackupRefPtr, require zeroing when # Some implementations of raw_ptr<>, like BackupRefPtr, require zeroing when

View file

@ -2,47 +2,17 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import("//build/config/android/config.gni")
import("//build/config/cast.gni")
import("//build/config/chromeos/ui_mode.gni")
import("//build/config/compiler/compiler.gni")
import("//build/config/dcheck_always_on.gni")
import("//build/config/logging.gni")
import("../../partition_alloc.gni") import("../../partition_alloc.gni")
import("buildflag_header.gni") import("buildflag_header.gni")
# //build_overrides/partition_alloc.gni should define partition_alloc_{ # Add partition_alloc.gni and import it for partition_alloc configs.
# add,remove}_configs. But if not defined (e.g. the embedder misses the config),
# define them here.
if (!defined(partition_alloc_add_configs)) {
partition_alloc_add_configs = []
}
if (!defined(partition_alloc_remove_configs)) {
partition_alloc_remove_configs = []
}
# Enables compilation of the freelist dispatcher, which we'll use to
# carry out runtime evaluation of PartitionAlloc's two freelist
# implementations: the existing encoded-next freelist and the new
# pool offset freelist. When false, the latter is not built.
use_freelist_dispatcher = has_64_bit_pointers
assert(has_64_bit_pointers || !use_freelist_dispatcher,
"freelist dispatcher can't be used without 64-bit pointers")
record_alloc_info = false
# GWP-ASan is tied to BRP's enablement.
enable_gwp_asan_support = enable_backup_ref_ptr_support
# Pointer compression requires 64-bit pointers.
enable_pointer_compression =
enable_pointer_compression_support && has_64_bit_pointers
# Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`, but avails it
# as a buildflag.
partition_alloc_dchecks_are_on =
partition_alloc_is_debug || partition_alloc_dcheck_always_on
# Building PartitionAlloc for Windows component build.
# Currently use build_with_chromium not to affect any third_party code,
# but if any third_party code wants to use, remove build_with_chromium.
use_partition_alloc_as_malloc_on_win_component_build =
build_with_chromium && is_win && is_component_build
# TODO(crbug.com/40276913): Split PartitionAlloc into a public and # TODO(crbug.com/40276913): Split PartitionAlloc into a public and
# private parts. The public config would include add the "./include" dir and # private parts. The public config would include add the "./include" dir and
@ -81,7 +51,6 @@ config("dependants_extra_warnings") {
"-Wduplicate-enum", "-Wduplicate-enum",
"-Wextra-semi", "-Wextra-semi",
"-Wextra-semi-stmt", "-Wextra-semi-stmt",
"-Widiomatic-parentheses",
"-Wimplicit-fallthrough", "-Wimplicit-fallthrough",
"-Winconsistent-missing-destructor-override", "-Winconsistent-missing-destructor-override",
"-Winvalid-offsetof", "-Winvalid-offsetof",
@ -118,105 +87,24 @@ config("wexit_time_destructors") {
} }
} }
source_set("buildflag_macro") { _remove_configs = []
sources = [ "buildflag.h" ] _add_configs = []
public_configs = [ ":public_includes" ] if (!is_debug || partition_alloc_optimized_debug) {
_remove_configs += [ "//build/config/compiler:default_optimization" ]
# PartitionAlloc is relatively hot (>1% of cycles for users of CrOS).
# Use speed-focused optimizations for it.
_add_configs += [ "//build/config/compiler:optimize_speed" ]
} else {
_remove_configs += [ "//build/config/compiler:default_optimization" ]
_add_configs += [ "//build/config/compiler:no_optimize" ]
} }
# When developers are repeatedly growing a buffer with `realloc`, they are
# expected to request a new size that is larger than the current size by
# some growth factor. This growth factor allows to amortize the cost of
# memcpy. Unfortunately, some nVidia drivers have a bug where they repeatedly
# increase the buffer by 4144 byte only.
#
# In particular, most Skia Linux bots are using the affected nVidia driver. So
# this flag is used as a workaround for Skia standalone, not in production.
#
# External link:
# https://forums.developer.nvidia.com/t/550-54-14-very-bad-performance-due-to-bunch-of-reallocations-during-glcore-initialization/287027
#
# Internal discussion at @chrome-memory-safety:
# https://groups.google.com/a/google.com/d/msgid/chrome-memory-safety/CAAzos5HrexY2njz2YzWrffTq1xEfkx15GVpSvHUyQED6wBSXvA%40mail.gmail.com?utm_medium=email&utm_source=footer
declare_args() {
partition_alloc_realloc_growth_factor_mitigation = false
}
pa_buildflag_header("buildflags") {
header = "buildflags.h"
flags = [
"ASSERT_CPP_20=$assert_cpp20",
"BACKUP_REF_PTR_EXTRA_OOB_CHECKS=$backup_ref_ptr_extra_oob_checks",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
"DCHECKS_ARE_ON=$partition_alloc_dchecks_are_on",
"DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT=$enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support",
"ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
"ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER=$enable_backup_ref_ptr_instance_tracer",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag",
"ENABLE_GWP_ASAN_SUPPORT=$enable_gwp_asan_support",
"ENABLE_PARTITION_LOCK_REENTRANCY_CHECK=$enable_partition_lock_reentrancy_check",
"ENABLE_PKEYS=$enable_pkeys",
"ENABLE_POINTER_ARITHMETIC_TRAIT_CHECK=$enable_pointer_arithmetic_trait_check",
"ENABLE_POINTER_COMPRESSION=$enable_pointer_compression",
"ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
"ENABLE_THREAD_ISOLATION=$enable_pkeys",
"EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"FORCE_DISABLE_BACKUP_REF_PTR_FEATURE=$force_disable_backup_ref_ptr_feature",
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
"FORWARD_THROUGH_MALLOC=$forward_through_malloc",
"HAS_64_BIT_POINTERS=$has_64_bit_pointers",
"HAS_MEMORY_TAGGING=$has_memory_tagging",
"IS_ANDROID=$is_android",
"IS_CASTOS=$is_castos",
"IS_CAST_ANDROID=$is_cast_android",
"IS_CHROMEOS=$is_chromeos",
"IS_DEBUG=$partition_alloc_is_debug",
"RAW_PTR_ZERO_ON_CONSTRUCT=$raw_ptr_zero_on_construct",
"RAW_PTR_ZERO_ON_DESTRUCT=$raw_ptr_zero_on_destruct",
"RAW_PTR_ZERO_ON_MOVE=$raw_ptr_zero_on_move",
"REALLOC_GROWTH_FACTOR_MITIGATION=$partition_alloc_realloc_growth_factor_mitigation",
"RECORD_ALLOC_INFO=$record_alloc_info",
"SMALLER_PARTITION_COOKIE=$smaller_partition_cookie",
"STACK_SCAN_SUPPORTED=$stack_scan_supported",
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
"USE_FREELIST_DISPATCHER=$use_freelist_dispatcher",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
"USE_FULL_MTE=$use_full_mte",
"USE_LARGE_EMPTY_SLOT_SPAN_RING=$use_large_empty_slot_span_ring",
"USE_PARTITION_ALLOC=$use_partition_alloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
"USE_PARTITION_COOKIE=$use_partition_cookie",
"USE_RAW_PTR_ASAN_UNOWNED_IMPL=$use_raw_ptr_asan_unowned_impl",
"USE_RAW_PTR_BACKUP_REF_IMPL=$use_raw_ptr_backup_ref_impl",
"USE_RAW_PTR_HOOKABLE_IMPL=$use_raw_ptr_hookable_impl",
]
}
# TODO(crbug.com/41481467): Remove this alias.
# Temporary alias, the time to update partition_alloc dependants.
# Currently needed by pdfium and dawn.
source_set("partition_alloc_buildflags") {
public = [ "partition_alloc_buildflags.h" ]
public_deps = [ ":buildflags" ]
}
# Provides platform and architecture detections from the compiler defines.
source_set("build_config") { source_set("build_config") {
sources = [ sources = [
"build_config.h", "build_config.h",
"buildflag.h", "buildflag.h",
] ]
public_deps = [
":buildflag_macro", # Provides 'PA_BUILDFLAG()' macro.
":buildflags", # Provides `IS_CHROMEOS` definition.
]
public_configs = [ ":public_includes" ]
} }
component("raw_ptr") { component("raw_ptr") {
@ -254,22 +142,157 @@ component("raw_ptr") {
sources += [ "pointers/raw_ptr_noop_impl.h" ] sources += [ "pointers/raw_ptr_noop_impl.h" ]
sources += [ "pointers/empty.cc" ] sources += [ "pointers/empty.cc" ]
} }
public_deps = [ public_deps = [ ":build_config" ]
":build_config",
":buildflags",
]
if (use_partition_alloc) { if (use_partition_alloc) {
public_deps += [ ":partition_alloc" ] public_deps += [ ":partition_alloc" ]
} }
deps = [ ":buildflags" ]
# See also: `partition_alloc_base/component_export.h` # See also: `partition_alloc_base/component_export.h`
defines = [ "IS_RAW_PTR_IMPL" ] defines = [ "IS_RAW_PTR_IMPL" ]
configs -= partition_alloc_remove_configs configs -= _remove_configs
configs += partition_alloc_add_configs configs += _add_configs
configs += [ ":dependants_extra_warnings" ] configs += [ ":dependants_extra_warnings" ]
} }
# Enables compilation of the freelist dispatcher, which we'll use to
# carry out runtime evaluation of PartitionAlloc's two freelist
# implementations: the existing encoded-next freelist and the new
# pool offset freelist. When false, the latter is not built.
use_freelist_dispatcher = has_64_bit_pointers && false
pa_buildflag_header("partition_alloc_buildflags") {
header = "partition_alloc_buildflags.h"
_record_alloc_info = false
# GWP-ASan is tied to BRP's enablement.
_enable_gwp_asan_support = enable_backup_ref_ptr_support
# Pools are a logical concept when address space is 32-bit.
_glue_core_pools = glue_core_pools && has_64_bit_pointers
# Pointer compression requires 64-bit pointers.
_enable_pointer_compression =
enable_pointer_compression_support && has_64_bit_pointers
# TODO(crbug.com/40158212): Need to refactor the following buildflags.
# The buildflags (except RECORD_ALLOC_INFO) are used by both chrome and
# PartitionAlloc. For PartitionAlloc,
# gen/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h
# defines and PartitionAlloc includes the header file. For chrome,
# gen/base/allocator/buildflags.h defines and chrome includes.
flags = [
"HAS_64_BIT_POINTERS=$has_64_bit_pointers",
"HAS_MEMORY_TAGGING=$has_memory_tagging",
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
"USE_LARGE_EMPTY_SLOT_SPAN_RING=$use_large_empty_slot_span_ring",
"USE_PARTITION_ALLOC=$use_partition_alloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
"ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
"ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER=$enable_backup_ref_ptr_instance_tracer",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag",
"ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
"ENABLE_POINTER_ARITHMETIC_TRAIT_CHECK=$enable_pointer_arithmetic_trait_check",
"BACKUP_REF_PTR_EXTRA_OOB_CHECKS=$backup_ref_ptr_extra_oob_checks",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"USE_RAW_PTR_BACKUP_REF_IMPL=$use_raw_ptr_backup_ref_impl",
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
"USE_RAW_PTR_ASAN_UNOWNED_IMPL=$use_raw_ptr_asan_unowned_impl",
"USE_RAW_PTR_HOOKABLE_IMPL=$use_raw_ptr_hookable_impl",
"ENABLE_GWP_ASAN_SUPPORT=$_enable_gwp_asan_support",
# TODO(bartekn): Remove once PDFium switches to
# USE_RAW_PTR_ASAN_UNOWNED_IMPL.
"USE_ASAN_UNOWNED_PTR=$use_raw_ptr_asan_unowned_impl",
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
"USE_FULL_MTE=$use_full_mte",
"RECORD_ALLOC_INFO=$_record_alloc_info",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
"GLUE_CORE_POOLS=$_glue_core_pools",
"ENABLE_POINTER_COMPRESSION=$_enable_pointer_compression",
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
"USE_FREELIST_DISPATCHER=$use_freelist_dispatcher",
"USE_STARSCAN=$use_starscan",
"STACK_SCAN_SUPPORTED=$stack_scan_supported",
"ENABLE_PKEYS=$enable_pkeys",
"ENABLE_THREAD_ISOLATION=$enable_pkeys",
"FORWARD_THROUGH_MALLOC=$forward_through_malloc",
"ASSERT_CPP_20=$assert_cpp20",
]
}
pa_buildflag_header("raw_ptr_buildflags") {
header = "raw_ptr_buildflags.h"
flags = [
"RAW_PTR_ZERO_ON_CONSTRUCT=$raw_ptr_zero_on_construct",
"RAW_PTR_ZERO_ON_MOVE=$raw_ptr_zero_on_move",
"RAW_PTR_ZERO_ON_DESTRUCT=$raw_ptr_zero_on_destruct",
]
}
pa_buildflag_header("chromecast_buildflags") {
header = "chromecast_buildflags.h"
# TODO(https://crbug.com/41481467): Remove the "PA_" prefix, because it
# will already be part of the define and the `PA_BUILDFLAG` macro.
flags = [
"PA_IS_CAST_ANDROID=$is_cast_android",
"PA_IS_CASTOS=$is_castos",
]
}
pa_buildflag_header("chromeos_buildflags") {
header = "chromeos_buildflags.h"
# TODO(https://crbug.com/41481467): Remove the "PA_" prefix, because it
# will already be part of the define and the `PA_BUILDFLAG` macro.
flags = [ "PA_IS_CHROMEOS_ASH=$is_chromeos_ash" ]
}
pa_buildflag_header("debugging_buildflags") {
header = "debugging_buildflags.h"
header_dir = rebase_path(".", "//") + "/partition_alloc_base/debug"
# Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`,
# but avails it as a buildflag.
_dcheck_is_on = is_debug || dcheck_always_on
# TODO(https://crbug.com/41481467): Remove the "PA_" prefix, because it
# will already be part of the define and the `PA_BUILDFLAG` macro.
flags = [
"PA_DCHECK_IS_ON=$_dcheck_is_on",
"PA_EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"PA_DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"PA_CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
]
}
group("buildflags") {
public_deps = [
":chromecast_buildflags",
":chromeos_buildflags",
":debugging_buildflags",
":partition_alloc_buildflags",
":raw_ptr_buildflags",
]
public_configs = [ ":public_includes" ]
}
if (is_clang_or_gcc) { if (is_clang_or_gcc) {
config("partition_alloc_implementation") { config("partition_alloc_implementation") {
# See also: `partition_alloc_base/component_export.h` # See also: `partition_alloc_base/component_export.h`
@ -350,7 +373,7 @@ if (is_clang_or_gcc) {
} }
} }
if (enable_pkeys && partition_alloc_is_debug) { if (enable_pkeys && is_debug) {
config("no_stack_protector") { config("no_stack_protector") {
cflags = [ "-fno-stack-protector" ] cflags = [ "-fno-stack-protector" ]
} }
@ -361,40 +384,12 @@ if (is_clang_or_gcc) {
":allocator_base", ":allocator_base",
":allocator_core", ":allocator_core",
":allocator_shim", ":allocator_shim",
":buildflags",
] ]
} }
if (is_win && is_component_build) {
group("win_component_build_adapter") {
# Currently guard this target by using build_with_chromium to avoid
# any issues on third_party build. But if any third_party code wants to
# use allocator_shim for its own component build, we will remove this
# guard.
if (build_with_chromium) {
if (use_allocator_shim) {
public_deps = [
":allocator_base",
":allocator_shim",
]
}
}
# If not with chromium, currently do nothing.
}
}
component("allocator_core") { component("allocator_core") {
visibility = [ ":*" ] visibility = [ ":*" ]
if (use_partition_alloc_as_malloc_on_win_component_build) {
# Since common_deps defined in //build/config/BUILD.gn depends on
# PartitionAlloc for PartitionAlloc-Everywhere, we need no_default_deps
# here, because default deps includes common_deps dependency.
# Without no_defaults_deps=true, we will see cyclic deps:
# common_deps=>PartitionAlloc=>common_deps
no_default_deps = true
}
sources = [ sources = [
"aarch64_support.h", "aarch64_support.h",
"address_pool_manager.cc", "address_pool_manager.cc",
@ -447,7 +442,6 @@ if (is_clang_or_gcc) {
"partition_bucket.cc", "partition_bucket.cc",
"partition_bucket.h", "partition_bucket.h",
"partition_bucket_lookup.h", "partition_bucket_lookup.h",
"partition_cookie.cc",
"partition_cookie.h", "partition_cookie.h",
"partition_dcheck_helper.cc", "partition_dcheck_helper.cc",
"partition_dcheck_helper.h", "partition_dcheck_helper.h",
@ -462,7 +456,6 @@ if (is_clang_or_gcc) {
"partition_page_constants.h", "partition_page_constants.h",
"partition_root.cc", "partition_root.cc",
"partition_root.h", "partition_root.h",
"partition_shared_mutex.h",
"partition_stats.cc", "partition_stats.cc",
"partition_stats.h", "partition_stats.h",
"partition_superpage_extent_entry.h", "partition_superpage_extent_entry.h",
@ -488,6 +481,29 @@ if (is_clang_or_gcc) {
"yield_processor.h", "yield_processor.h",
] ]
if (use_starscan) {
sources += [
"starscan/logging.h",
"starscan/pcscan.cc",
"starscan/pcscan.h",
"starscan/pcscan_internal.cc",
"starscan/pcscan_internal.h",
"starscan/pcscan_scheduling.cc",
"starscan/pcscan_scheduling.h",
"starscan/raceful_worklist.h",
"starscan/scan_loop.h",
"starscan/snapshot.cc",
"starscan/snapshot.h",
"starscan/starscan_fwd.h",
"starscan/state_bitmap.h",
"starscan/stats_collector.cc",
"starscan/stats_collector.h",
"starscan/stats_reporter.h",
"starscan/write_protector.cc",
"starscan/write_protector.h",
]
}
defines = [] defines = []
if (is_win) { if (is_win) {
sources += [ sources += [
@ -521,9 +537,6 @@ if (is_clang_or_gcc) {
} else if (current_cpu == "riscv64") { } else if (current_cpu == "riscv64") {
assert(stack_scan_supported) assert(stack_scan_supported)
sources += [ "stack/asm/riscv64/push_registers_asm.cc" ] sources += [ "stack/asm/riscv64/push_registers_asm.cc" ]
} else if (current_cpu == "loong64") {
assert(stack_scan_supported)
sources += [ "stack/asm/loong64/push_registers_asm.cc" ]
} else { } else {
# To support a trampoline for another arch, please refer to v8/src/heap/base. # To support a trampoline for another arch, please refer to v8/src/heap/base.
assert(!stack_scan_supported) assert(!stack_scan_supported)
@ -539,7 +552,10 @@ if (is_clang_or_gcc) {
public_deps = [ public_deps = [
":build_config", ":build_config",
":buildflags", ":chromecast_buildflags",
":chromeos_buildflags",
":debugging_buildflags",
":partition_alloc_buildflags",
] ]
configs += [ configs += [
@ -548,13 +564,11 @@ if (is_clang_or_gcc) {
":wexit_time_destructors", ":wexit_time_destructors",
] ]
deps = [ ":allocator_base" ] deps = [ ":allocator_base" ]
if (use_partition_alloc_as_malloc_on_win_component_build) {
# We need to add explicit libc++ dependency here because of
# no_default_deps=true.
deps += [ "//buildtools/third_party/libc++:libc++" ]
}
public_configs = [] public_configs = []
if (is_android) {
# tagging.cc requires __arm_mte_set_* functions.
deps += [ "//third_party/cpu_features:ndk_compat" ]
}
if (is_fuchsia) { if (is_fuchsia) {
deps += [ deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.kernel:fuchsia.kernel_cpp", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.kernel:fuchsia.kernel_cpp",
@ -584,27 +598,19 @@ if (is_clang_or_gcc) {
] ]
} }
configs -= partition_alloc_remove_configs configs -= _remove_configs
configs += partition_alloc_add_configs configs += _add_configs
configs += [ ":dependants_extra_warnings" ] configs += [ ":dependants_extra_warnings" ]
# We want to be able to test pkey mode without access to the default pkey. # We want to be able to test pkey mode without access to the default pkey.
# This is incompatible with stack protectors since the TLS won't be pkey-tagged. # This is incompatible with stack protectors since the TLS won't be pkey-tagged.
if (enable_pkeys && partition_alloc_is_debug) { if (enable_pkeys && is_debug) {
configs += [ ":no_stack_protector" ] configs += [ ":no_stack_protector" ]
} }
} }
component("allocator_base") { component("allocator_base") {
visibility = [ ":*" ] visibility = [ ":*" ]
if (use_partition_alloc_as_malloc_on_win_component_build) {
# Since common_deps defined in //build/config/BUILD.gn depends on
# PartitionAlloc for PartitionAlloc-Everywhere, we need no_default_deps
# here, because default deps includes common_deps dependency.
# Without no_defaults_deps=true, we will see cyclic deps:
# common_deps=>PartitionAlloc=>common_deps
no_default_deps = true
}
sources = [ sources = [
"partition_alloc_base/atomic_ref_count.h", "partition_alloc_base/atomic_ref_count.h",
@ -623,7 +629,6 @@ if (is_clang_or_gcc) {
"partition_alloc_base/debug/stack_trace.cc", "partition_alloc_base/debug/stack_trace.cc",
"partition_alloc_base/debug/stack_trace.h", "partition_alloc_base/debug/stack_trace.h",
"partition_alloc_base/export_template.h", "partition_alloc_base/export_template.h",
"partition_alloc_base/files/platform_file.h",
"partition_alloc_base/immediate_crash.h", "partition_alloc_base/immediate_crash.h",
"partition_alloc_base/log_message.cc", "partition_alloc_base/log_message.cc",
"partition_alloc_base/log_message.h", "partition_alloc_base/log_message.h",
@ -669,13 +674,11 @@ if (is_clang_or_gcc) {
"partition_alloc_base/time/time.h", "partition_alloc_base/time/time.h",
"partition_alloc_base/time/time_override.cc", "partition_alloc_base/time/time_override.cc",
"partition_alloc_base/time/time_override.h", "partition_alloc_base/time/time_override.h",
"partition_alloc_base/types/same_as_any.h",
"partition_alloc_base/types/strong_alias.h", "partition_alloc_base/types/strong_alias.h",
"partition_alloc_base/win/win_handle_types.h", "partition_alloc_base/win/win_handle_types.h",
"partition_alloc_base/win/win_handle_types_list.inc", "partition_alloc_base/win/win_handle_types_list.inc",
"partition_alloc_base/win/windows_types.h", "partition_alloc_base/win/windows_types.h",
] ]
libs = []
if (is_win) { if (is_win) {
sources += [ sources += [
@ -687,12 +690,7 @@ if (is_clang_or_gcc) {
"partition_alloc_base/threading/platform_thread_win.cc", "partition_alloc_base/threading/platform_thread_win.cc",
"partition_alloc_base/time/time_win.cc", "partition_alloc_base/time/time_win.cc",
] ]
libs += [ } else if (is_posix) {
"winmm.lib", # For timeGetTime.
]
}
if (is_posix) {
sources += [ sources += [
"partition_alloc_base/debug/stack_trace_posix.cc", "partition_alloc_base/debug/stack_trace_posix.cc",
"partition_alloc_base/files/file_util.h", "partition_alloc_base/files/file_util.h",
@ -711,6 +709,9 @@ if (is_clang_or_gcc) {
sources += [ "partition_alloc_base/debug/stack_trace_linux.cc" ] sources += [ "partition_alloc_base/debug/stack_trace_linux.cc" ]
} }
if (is_android || is_chromeos_ash) {
sources += [ "partition_alloc_base/time/time_android.cc" ]
}
if (is_apple) { if (is_apple) {
# Request <dlfcn.h> to provide the `dladdr()` function. This is used to # Request <dlfcn.h> to provide the `dladdr()` function. This is used to
# translate address to symbolic information. # translate address to symbolic information.
@ -723,9 +724,7 @@ if (is_clang_or_gcc) {
} else { } else {
sources += [ "partition_alloc_base/time/time_now_posix.cc" ] sources += [ "partition_alloc_base/time/time_now_posix.cc" ]
} }
} } else if (is_fuchsia) {
if (is_fuchsia) {
sources += [ sources += [
"partition_alloc_base/fuchsia/fuchsia_logging.cc", "partition_alloc_base/fuchsia/fuchsia_logging.cc",
"partition_alloc_base/fuchsia/fuchsia_logging.h", "partition_alloc_base/fuchsia/fuchsia_logging.h",
@ -739,7 +738,6 @@ if (is_clang_or_gcc) {
"partition_alloc_base/time/time_fuchsia.cc", "partition_alloc_base/time/time_fuchsia.cc",
] ]
} }
if (is_android) { if (is_android) {
# Only android build requires native_library, and native_library depends # Only android build requires native_library, and native_library depends
# on file_path. So file_path is added if is_android = true. # on file_path. So file_path is added if is_android = true.
@ -752,7 +750,6 @@ if (is_clang_or_gcc) {
"partition_alloc_base/native_library_posix.cc", "partition_alloc_base/native_library_posix.cc",
] ]
} }
if (is_apple) { if (is_apple) {
# Apple-specific utilities # Apple-specific utilities
sources += [ sources += [
@ -781,7 +778,10 @@ if (is_clang_or_gcc) {
public_deps = [ public_deps = [
":build_config", ":build_config",
":buildflags", ":chromecast_buildflags",
":chromeos_buildflags",
":debugging_buildflags",
":partition_alloc_buildflags",
] ]
public_configs = [ ":public_includes" ] public_configs = [ ":public_includes" ]
configs += [ configs += [
@ -790,11 +790,6 @@ if (is_clang_or_gcc) {
] ]
deps = [] deps = []
if (use_partition_alloc_as_malloc_on_win_component_build) {
# We need to add explicit libc++ dependency here because of
# no_default_deps=true.
deps += [ "//buildtools/third_party/libc++:libc++" ]
}
if (is_fuchsia) { if (is_fuchsia) {
public_deps += [ "//third_party/fuchsia-sdk/sdk/pkg/fit" ] public_deps += [ "//third_party/fuchsia-sdk/sdk/pkg/fit" ]
} }
@ -807,12 +802,14 @@ if (is_clang_or_gcc) {
] ]
} }
configs -= partition_alloc_remove_configs configs -= _remove_configs
configs += partition_alloc_add_configs configs += _add_configs
configs += [ ":dependants_extra_warnings" ] configs += [ ":dependants_extra_warnings" ]
} }
component("allocator_shim") { component("allocator_shim") {
visibility = [ ":*" ]
sources = [] sources = []
deps = [] deps = []
all_dependent_configs = [] all_dependent_configs = []
@ -823,8 +820,8 @@ if (is_clang_or_gcc) {
] ]
frameworks = [] frameworks = []
configs -= partition_alloc_remove_configs configs -= _remove_configs
configs += partition_alloc_add_configs configs += _add_configs
configs += [ ":dependants_extra_warnings" ] configs += [ ":dependants_extra_warnings" ]
shim_headers = [] shim_headers = []
@ -843,14 +840,14 @@ if (is_clang_or_gcc) {
"shim/allocator_shim_dispatch_to_noop_on_free.h", "shim/allocator_shim_dispatch_to_noop_on_free.h",
] ]
if (use_partition_alloc) { if (use_partition_alloc) {
shim_sources += shim_sources += [
[ "shim/allocator_shim_default_dispatch_to_partition_alloc.cc" ] "shim/allocator_shim_default_dispatch_to_partition_alloc.cc",
shim_headers += "shim/nonscannable_allocator.cc",
[ "shim/allocator_shim_default_dispatch_to_partition_alloc.h" ] ]
} shim_headers += [
if (enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support) { "shim/allocator_shim_default_dispatch_to_partition_alloc.h",
shim_sources += [ "shim/allocator_shim_default_dispatch_to_partition_alloc_with_advanced_checks.cc" ] "shim/nonscannable_allocator.h",
shim_headers += [ "shim/allocator_shim_default_dispatch_to_partition_alloc_with_advanced_checks.h" ] ]
} }
if (is_android) { if (is_android) {
shim_headers += [ shim_headers += [
@ -884,7 +881,7 @@ if (is_clang_or_gcc) {
# Do not compile with ARC because this target has to interface with # Do not compile with ARC because this target has to interface with
# low-level Objective-C and having ARC would interfere. # low-level Objective-C and having ARC would interfere.
configs -= [ partition_alloc_enable_arc_config ] configs -= [ "//build/config/compiler:enable_arc" ]
} }
} }
if (is_chromeos || is_linux) { if (is_chromeos || is_linux) {
@ -953,199 +950,6 @@ if (is_clang_or_gcc) {
] ]
} }
} # if (is_clang_or_gcc) } # if (is_clang_or_gcc)
# TODO(crbug.com/40158212): After making partition_alloc a standalone library,
# TODO(crbug.com/40158212): Consider supporting building tests outside of # move test code here. i.e. test("partition_alloc_tests") { ... } and
# chromium and having a dedicated 'partition_alloc_unittests' target. # test("partition_alloc_perftests").
if (build_with_chromium) {
source_set("unittests") {
testonly = true
sources = [ "partition_alloc_base/test/gtest_util.h" ]
if (is_linux || is_chromeos || is_android) {
sources += [
"partition_alloc_base/debug/proc_maps_linux.cc",
"partition_alloc_base/debug/proc_maps_linux.h",
]
}
if (is_android) {
sources += [
"partition_alloc_base/files/file_path_pa_unittest.cc",
"partition_alloc_base/native_library_pa_unittest.cc",
]
}
if (use_partition_alloc) {
sources += [
"address_pool_manager_unittest.cc",
"address_space_randomization_unittest.cc",
"compressed_pointer_unittest.cc",
"freeslot_bitmap_unittest.cc",
"hardening_unittest.cc",
"lightweight_quarantine_unittest.cc",
"memory_reclaimer_unittest.cc",
"page_allocator_unittest.cc",
"partition_alloc_base/bits_pa_unittest.cc",
"partition_alloc_base/component_export_pa_unittest.cc",
"partition_alloc_base/cpu_pa_unittest.cc",
"partition_alloc_base/logging_pa_unittest.cc",
"partition_alloc_base/no_destructor_pa_unittest.cc",
"partition_alloc_base/rand_util_pa_unittest.cc",
"partition_alloc_base/scoped_clear_last_error_pa_unittest.cc",
"partition_alloc_base/strings/cstring_builder_pa_unittest.cc",
"partition_alloc_base/strings/safe_sprintf_pa_unittest.cc",
"partition_alloc_base/strings/string_util_pa_unittest.cc",
"partition_alloc_base/strings/stringprintf_pa_unittest.cc",
"partition_alloc_base/thread_annotations_pa_unittest.cc",
"partition_alloc_unittest.cc",
"partition_lock_unittest.cc",
"reverse_bytes_unittest.cc",
"slot_start_unittest.cc",
"thread_cache_unittest.cc",
"use_death_tests.h",
]
}
if (is_fuchsia) {
sources +=
[ "partition_alloc_base/fuchsia/fuchsia_logging_pa_unittest.cc" ]
}
if (use_allocator_shim) {
sources += [
"shim/allocator_shim_default_dispatch_to_partition_alloc_unittest.cc",
]
if (is_win) {
sources += [ "shim/winheap_stubs_win_unittest.cc" ]
}
if (is_ios) {
sources += [
"shim/allocator_interception_apple_unittest.mm",
"shim/malloc_zone_functions_apple_unittest.cc",
]
}
}
if ((is_android || is_linux) && target_cpu == "arm64") {
cflags = [
"-Xclang",
"-target-feature",
"-Xclang",
"+mte",
]
}
if (enable_pkeys && partition_alloc_is_debug && !is_component_build) {
# This test requires RELRO, which is not enabled in component builds.
# Also, require a debug build, since we only disable stack protectors in
# debug builds in PartitionAlloc (see below why it's needed).
sources += [ "thread_isolation/pkey_unittest.cc" ]
# We want to test the pkey code without access to memory that is not
# pkey-tagged. This will allow us to catch unintended memory accesses
# that could break our security assumptions. The stack protector reads a
# value from the TLS which won't be pkey-tagged, hence disabling it for
# the test.
configs += [ ":no_stack_protector" ]
}
frameworks = []
if (is_mac) {
frameworks += [
"Foundation.framework",
"OpenCL.framework",
]
}
deps = [
":partition_alloc",
":test_support",
"//testing/gmock",
"//testing/gtest",
]
configs -= partition_alloc_remove_configs
configs += partition_alloc_add_configs
}
}
source_set("test_support") {
testonly = true
sources = [
"extended_api.cc",
"extended_api.h",
"partition_alloc_base/threading/platform_thread_for_testing.h",
"partition_alloc_for_testing.h",
"pointers/raw_ptr_counting_impl_for_test.h",
]
if (is_posix) {
sources += [
"partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
]
}
if (is_fuchsia) {
sources += [
"partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc",
"partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
]
}
if (is_win) {
sources +=
[ "partition_alloc_base/threading/platform_thread_win_for_testing.cc" ]
}
if (is_apple) {
sources += [
"partition_alloc_base/threading/platform_thread_apple_for_testing.mm",
]
}
if (is_linux || is_chromeos) {
sources += [
"partition_alloc_base/threading/platform_thread_linux_for_testing.cc",
]
}
if (is_android) {
sources += [
"partition_alloc_base/threading/platform_thread_android_for_testing.cc",
]
}
public_deps = [
":arm_bti_testfunctions",
":buildflags",
":partition_alloc",
":raw_ptr",
]
public_configs = []
if (is_fuchsia) {
public_deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.logger:fuchsia.logger_hlcpp",
"//third_party/fuchsia-sdk/sdk/pkg/fit",
"//third_party/fuchsia-sdk/sdk/pkg/sync",
"//third_party/fuchsia-sdk/sdk/pkg/sys_cpp",
"//third_party/fuchsia-sdk/sdk/pkg/zx",
]
# Needed for users of spinning_mutex.h, which for performance reasons,
# contains inlined calls to `libsync` inside the header file.
# It appends an entry to the "libs" section of the dependent target.
public_configs += [ ":fuchsia_sync_lib" ]
}
configs -= partition_alloc_remove_configs
configs += partition_alloc_add_configs
}
source_set("arm_bti_testfunctions") {
testonly = true
sources = []
if (target_cpu == "arm64" && (is_linux || is_android)) {
sources = [
"arm_bti_test_functions.S",
"arm_bti_test_functions.h",
]
}
}

View file

@ -8,15 +8,13 @@
#include <stdint.h> #include <stdint.h>
#include "partition_alloc/build_config.h" #include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h" #include "partition_alloc/partition_alloc_buildflags.h"
#if defined(__MUSL__) #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) && !defined(__MUSL__)
// Musl does not support ifunc.
#elif PA_BUILDFLAG(IS_ANDROID) || PA_BUILDFLAG(IS_LINUX)
#define HAS_HW_CAPS #define HAS_HW_CAPS
#endif #endif
#if PA_BUILDFLAG(PA_ARCH_CPU_ARM64) && defined(HAS_HW_CAPS) #if defined(ARCH_CPU_ARM64) && defined(HAS_HW_CAPS)
#include <asm/hwcap.h> #include <asm/hwcap.h>
#include <sys/ifunc.h> #include <sys/ifunc.h>
#else #else
@ -27,7 +25,7 @@ namespace partition_alloc::internal {
constexpr bool IsBtiEnabled(uint64_t ifunc_hwcap, constexpr bool IsBtiEnabled(uint64_t ifunc_hwcap,
struct __ifunc_arg_t* ifunc_hw) { struct __ifunc_arg_t* ifunc_hw) {
#if PA_BUILDFLAG(PA_ARCH_CPU_ARM64) && defined(HAS_HW_CAPS) #if defined(ARCH_CPU_ARM64) && defined(HAS_HW_CAPS)
return (ifunc_hwcap & _IFUNC_ARG_HWCAP) && (ifunc_hw->_hwcap2 & HWCAP2_BTI); return (ifunc_hwcap & _IFUNC_ARG_HWCAP) && (ifunc_hw->_hwcap2 & HWCAP2_BTI);
#else #else
return false; return false;
@ -36,7 +34,7 @@ constexpr bool IsBtiEnabled(uint64_t ifunc_hwcap,
constexpr bool IsMteEnabled(uint64_t ifunc_hwcap, constexpr bool IsMteEnabled(uint64_t ifunc_hwcap,
struct __ifunc_arg_t* ifunc_hw) { struct __ifunc_arg_t* ifunc_hw) {
#if PA_BUILDFLAG(PA_ARCH_CPU_ARM64) && defined(HAS_HW_CAPS) && \ #if defined(ARCH_CPU_ARM64) && defined(HAS_HW_CAPS) && \
PA_BUILDFLAG(HAS_MEMORY_TAGGING) PA_BUILDFLAG(HAS_MEMORY_TAGGING)
return (ifunc_hwcap & _IFUNC_ARG_HWCAP) && (ifunc_hw->_hwcap2 & HWCAP2_MTE); return (ifunc_hwcap & _IFUNC_ARG_HWCAP) && (ifunc_hw->_hwcap2 & HWCAP2_MTE);
#else #else

View file

@ -11,22 +11,23 @@
#include "partition_alloc/address_space_stats.h" #include "partition_alloc/address_space_stats.h"
#include "partition_alloc/build_config.h" #include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/page_allocator.h" #include "partition_alloc/page_allocator.h"
#include "partition_alloc/page_allocator_constants.h" #include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/notreached.h" #include "partition_alloc/partition_alloc_base/notreached.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h" #include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/reservation_offset_table.h" #include "partition_alloc/reservation_offset_table.h"
#include "partition_alloc/thread_isolation/alignment.h" #include "partition_alloc/thread_isolation/alignment.h"
#if PA_BUILDFLAG(IS_APPLE) || PA_BUILDFLAG(ENABLE_THREAD_ISOLATION) #if BUILDFLAG(IS_APPLE) || PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
#include <sys/mman.h> #include <sys/mman.h>
#endif #endif
namespace partition_alloc::internal { namespace partition_alloc::internal {
PA_CONSTINIT AddressPoolManager AddressPoolManager::singleton_; AddressPoolManager AddressPoolManager::singleton_;
// static // static
AddressPoolManager& AddressPoolManager::GetInstance() { AddressPoolManager& AddressPoolManager::GetInstance() {
@ -124,7 +125,7 @@ void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
PA_CHECK(!(ptr & kSuperPageOffsetMask)); PA_CHECK(!(ptr & kSuperPageOffsetMask));
PA_CHECK(!(length & kSuperPageOffsetMask)); PA_CHECK(!(length & kSuperPageOffsetMask));
address_begin_ = ptr; address_begin_ = ptr;
#if PA_BUILDFLAG(DCHECKS_ARE_ON) #if PA_BUILDFLAG(PA_DCHECK_IS_ON)
address_end_ = ptr + length; address_end_ = ptr + length;
PA_DCHECK(address_begin_ < address_end_); PA_DCHECK(address_begin_ < address_end_);
#endif #endif
@ -203,7 +204,7 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
bit_hint_ = end_bit; bit_hint_ = end_bit;
} }
uintptr_t address = address_begin_ + beg_bit * kSuperPageSize; uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
#if PA_BUILDFLAG(DCHECKS_ARE_ON) #if PA_BUILDFLAG(PA_DCHECK_IS_ON)
PA_DCHECK(address + requested_size <= address_end_); PA_DCHECK(address + requested_size <= address_end_);
#endif #endif
return address; return address;
@ -245,7 +246,7 @@ void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
PA_DCHECK(!(free_size & kSuperPageOffsetMask)); PA_DCHECK(!(free_size & kSuperPageOffsetMask));
PA_DCHECK(address_begin_ <= address); PA_DCHECK(address_begin_ <= address);
#if PA_BUILDFLAG(DCHECKS_ARE_ON) #if PA_BUILDFLAG(PA_DCHECK_IS_ON)
PA_DCHECK(address + free_size <= address_end_); PA_DCHECK(address + free_size <= address_end_);
#endif #endif
@ -556,7 +557,7 @@ void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION) #if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
// This function just exists to static_assert the layout of the private fields // This function just exists to static_assert the layout of the private fields
// in Pool. It is never called. // in Pool.
void AddressPoolManager::AssertThreadIsolatedLayout() { void AddressPoolManager::AssertThreadIsolatedLayout() {
constexpr size_t last_pool_offset = constexpr size_t last_pool_offset =
offsetof(AddressPoolManager, pools_) + sizeof(Pool) * (kNumPools - 1); offsetof(AddressPoolManager, pools_) + sizeof(Pool) * (kNumPools - 1);

View file

@ -10,11 +10,12 @@
#include "partition_alloc/address_pool_manager_types.h" #include "partition_alloc/address_pool_manager_types.h"
#include "partition_alloc/build_config.h" #include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_address_space.h" #include "partition_alloc/partition_address_space.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h" #include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/thread_annotations.h" #include "partition_alloc/partition_alloc_base/thread_annotations.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h" #include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_lock.h" #include "partition_alloc/partition_lock.h"
@ -113,8 +114,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool GetStats(AddressSpaceStats* stats); bool GetStats(AddressSpaceStats* stats);
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION) #if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
// This function just exists to static_assert the layout of the private fields
// in Pool. It is never called.
static void AssertThreadIsolatedLayout(); static void AssertThreadIsolatedLayout();
#endif // PA_BUILDFLAG(ENABLE_THREAD_ISOLATION) #endif // PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
@ -163,7 +162,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
size_t total_bits_ = 0; size_t total_bits_ = 0;
uintptr_t address_begin_ = 0; uintptr_t address_begin_ = 0;
#if PA_BUILDFLAG(DCHECKS_ARE_ON) #if PA_BUILDFLAG(PA_DCHECK_IS_ON)
uintptr_t address_end_ = 0; uintptr_t address_end_ = 0;
#endif #endif
@ -202,7 +201,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
#endif // PA_BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_BUILDFLAG(HAS_64_BIT_POINTERS)
PA_CONSTINIT static AddressPoolManager singleton_; static PA_CONSTINIT AddressPoolManager singleton_;
}; };
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View file

@ -4,7 +4,7 @@
#include "partition_alloc/address_pool_manager_bitmap.h" #include "partition_alloc/address_pool_manager_bitmap.h"
#include "partition_alloc/buildflags.h" #include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#if !PA_BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_BUILDFLAG(HAS_64_BIT_POINTERS)

View file

@ -11,9 +11,9 @@
#include <limits> #include <limits>
#include "partition_alloc/build_config.h" #include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h" #include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h" #include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_lock.h" #include "partition_alloc/partition_lock.h"

View file

@ -5,11 +5,11 @@
#include "partition_alloc/address_space_randomization.h" #include "partition_alloc/address_space_randomization.h"
#include "partition_alloc/build_config.h" #include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h" #include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h" #include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/random.h" #include "partition_alloc/random.h"
#if PA_BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
#include <windows.h> #include <windows.h>
#endif #endif
@ -27,7 +27,7 @@ uintptr_t GetRandomPageBase() {
random &= internal::ASLRMask(); random &= internal::ASLRMask();
random += internal::ASLROffset(); random += internal::ASLROffset();
#else // PA_BUILDFLAG(HAS_64_BIT_POINTERS) #else // PA_BUILDFLAG(HAS_64_BIT_POINTERS)
#if PA_BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
// On win32 host systems the randomization plus huge alignment causes // On win32 host systems the randomization plus huge alignment causes
// excessive fragmentation. Plus most of these systems lack ASLR, so the // excessive fragmentation. Plus most of these systems lack ASLR, so the
// randomization isn't buying anything. In that case we just skip it. // randomization isn't buying anything. In that case we just skip it.
@ -39,7 +39,7 @@ uintptr_t GetRandomPageBase() {
if (!is_wow64) { if (!is_wow64) {
return 0; return 0;
} }
#endif // PA_BUILDFLAG(IS_WIN) #endif // BUILDFLAG(IS_WIN)
random &= internal::ASLRMask(); random &= internal::ASLRMask();
random += internal::ASLROffset(); random += internal::ASLROffset();
#endif // PA_BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_BUILDFLAG(HAS_64_BIT_POINTERS)

View file

@ -36,7 +36,7 @@ AslrMask(uintptr_t bits) {
// //
// clang-format off // clang-format off
#if PA_BUILDFLAG(PA_ARCH_CPU_64_BITS) #if defined(ARCH_CPU_64_BITS)
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
@ -54,7 +54,7 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0x7e8000000000ULL); return AslrAddress(0x7e8000000000ULL);
} }
#elif PA_BUILDFLAG(IS_WIN) #elif BUILDFLAG(IS_WIN)
// Windows 8.10 and newer support the full 48 bit address range. Since // Windows 8.10 and newer support the full 48 bit address range. Since
// ASLROffset() is non-zero and may cause a carry, use 47 bit masks. See // ASLROffset() is non-zero and may cause a carry, use 47 bit masks. See
@ -67,7 +67,7 @@ AslrMask(uintptr_t bits) {
return 0x80000000ULL; return 0x80000000ULL;
} }
#elif PA_BUILDFLAG(IS_APPLE) #elif BUILDFLAG(IS_APPLE)
// macOS as of 10.12.5 does not clean up entries in page map levels 3/4 // macOS as of 10.12.5 does not clean up entries in page map levels 3/4
// [PDP/PML4] created from mmap or mach_vm_allocate, even after the region // [PDP/PML4] created from mmap or mach_vm_allocate, even after the region
@ -98,9 +98,9 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0x10000000000ULL); return AslrAddress(0x10000000000ULL);
} }
#elif PA_BUILDFLAG(IS_POSIX) || PA_BUILDFLAG(IS_FUCHSIA) #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
#if PA_BUILDFLAG(PA_ARCH_CPU_X86_64) #if defined(ARCH_CPU_X86_64)
// Linux (and macOS) support the full 47-bit user space of x64 processors. // Linux (and macOS) support the full 47-bit user space of x64 processors.
// Use only 46 to allow the kernel a chance to fulfill the request. // Use only 46 to allow the kernel a chance to fulfill the request.
@ -113,7 +113,7 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0); return AslrAddress(0);
} }
#elif PA_BUILDFLAG(IS_ANDROID) && (PA_BUILDFLAG(PA_ARCH_CPU_ARM64) || PA_BUILDFLAG(PA_ARCH_CPU_RISCV64)) #elif BUILDFLAG(IS_ANDROID) && (defined(ARCH_CPU_ARM64) || defined(ARCH_CPU_RISCV64))
// Restrict the address range on Android to avoid a large performance // Restrict the address range on Android to avoid a large performance
// regression in single-process WebViews. See https://crbug.com/837640. // regression in single-process WebViews. See https://crbug.com/837640.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
@ -124,8 +124,8 @@ AslrMask(uintptr_t bits) {
ASLROffset() { ASLROffset() {
return AslrAddress(0x20000000ULL); return AslrAddress(0x20000000ULL);
} }
#elif PA_BUILDFLAG(PA_ARCH_CPU_ARM64) #elif defined(ARCH_CPU_ARM64)
#if PA_BUILDFLAG(IS_LINUX) #if BUILDFLAG(IS_LINUX)
// Linux on arm64 can use 39, 42, 48, or 52-bit user space, depending on // Linux on arm64 can use 39, 42, 48, or 52-bit user space, depending on
// page size and number of levels of translation pages used. We use // page size and number of levels of translation pages used. We use
@ -153,9 +153,9 @@ AslrMask(uintptr_t bits) {
#endif #endif
#elif PA_BUILDFLAG(PA_ARCH_CPU_PPC64) #elif defined(ARCH_CPU_PPC64)
#if PA_BUILDFLAG(IS_AIX) #if BUILDFLAG(IS_AIX)
// AIX has 64 bits of virtual addressing, but we limit the address range // AIX has 64 bits of virtual addressing, but we limit the address range
// to (a) minimize segment lookaside buffer (SLB) misses; and (b) use // to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
@ -167,7 +167,7 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0x400000000000ULL); return AslrAddress(0x400000000000ULL);
} }
#elif PA_BUILDFLAG(PA_ARCH_CPU_BIG_ENDIAN) #elif defined(ARCH_CPU_BIG_ENDIAN)
// Big-endian Linux PPC has 44 bits of virtual addressing. Use 42. // Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
@ -177,19 +177,9 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0); return AslrAddress(0);
} }
#else // !PA_BUILDFLAG(IS_AIX) && !PA_BUILDFLAG(PA_ARCH_CPU_BIG_ENDIAN) #else // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
#if PA_BUILDFLAG(IS_LINUX)
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46. // Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t ASLRMask() {
return AslrMask(46);
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t ASLROffset() {
return AslrAddress(0);
}
#else
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
return AslrMask(46); return AslrMask(46);
} }
@ -197,11 +187,9 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0); return AslrAddress(0);
} }
#endif #endif // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
#endif // !PA_BUILDFLAG(IS_AIX) && !PA_BUILDFLAG(PA_ARCH_CPU_BIG_ENDIAN) #elif defined(ARCH_CPU_S390X)
#elif PA_BUILDFLAG(PA_ARCH_CPU_S390X)
// Linux on Z uses bits 22 - 32 for Region Indexing, which translates to // Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
@ -213,7 +201,7 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0); return AslrAddress(0);
} }
#elif PA_BUILDFLAG(PA_ARCH_CPU_S390) #elif defined(ARCH_CPU_S390)
// 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel // 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
// a chance to fulfill the request. // a chance to fulfill the request.
@ -224,8 +212,8 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0); return AslrAddress(0);
} }
#else // !PA_BUILDFLAG(PA_ARCH_CPU_X86_64) && !PA_BUILDFLAG(PA_ARCH_CPU_PPC64) && #else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
// !PA_BUILDFLAG(PA_ARCH_CPU_S390X) && !PA_BUILDFLAG(PA_ARCH_CPU_S390) // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
// For all other POSIX variants, use 30 bits. // For all other POSIX variants, use 30 bits.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
@ -233,7 +221,7 @@ AslrMask(uintptr_t bits) {
return AslrMask(30); return AslrMask(30);
} }
#if PA_BUILDFLAG(IS_SOLARIS) #if BUILDFLAG(IS_SOLARIS)
// For our Solaris/illumos mmap hint, we pick a random address in the // For our Solaris/illumos mmap hint, we pick a random address in the
// bottom half of the top half of the address space (that is, the third // bottom half of the top half of the address space (that is, the third
@ -249,7 +237,7 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0x80000000ULL); return AslrAddress(0x80000000ULL);
} }
#elif PA_BUILDFLAG(IS_AIX) #elif BUILDFLAG(IS_AIX)
// The range 0x30000000 - 0xD0000000 is available on AIX; choose the // The range 0x30000000 - 0xD0000000 is available on AIX; choose the
// upper range. // upper range.
@ -257,7 +245,7 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0x90000000ULL); return AslrAddress(0x90000000ULL);
} }
#else // !PA_BUILDFLAG(IS_SOLARIS) && !PA_BUILDFLAG(IS_AIX) #else // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
@ -267,14 +255,14 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0x20000000ULL); return AslrAddress(0x20000000ULL);
} }
#endif // !PA_BUILDFLAG(IS_SOLARIS) && !PA_BUILDFLAG(IS_AIX) #endif // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
#endif // !PA_BUILDFLAG(PA_ARCH_CPU_X86_64) && !PA_BUILDFLAG(PA_ARCH_CPU_PPC64) && #endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
// !PA_BUILDFLAG(PA_ARCH_CPU_S390X) && !PA_BUILDFLAG(PA_ARCH_CPU_S390) // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
#endif // PA_BUILDFLAG(IS_POSIX) #endif // BUILDFLAG(IS_POSIX)
#elif PA_BUILDFLAG(PA_ARCH_CPU_32_BITS) #elif defined(ARCH_CPU_32_BITS)
// This is a good range on 32-bit Windows and Android (the only platforms on // This is a good range on 32-bit Windows and Android (the only platforms on
// which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There // which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
@ -290,7 +278,7 @@ AslrMask(uintptr_t bits) {
#error Please tell us about your exotic hardware! Sounds interesting. #error Please tell us about your exotic hardware! Sounds interesting.
#endif // PA_BUILDFLAG(PA_ARCH_CPU_32_BITS) #endif // defined(ARCH_CPU_32_BITS)
// clang-format on // clang-format on

View file

@ -7,8 +7,8 @@
#include <cstddef> #include <cstddef>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/component_export.h" #include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
namespace partition_alloc { namespace partition_alloc {

View file

@ -3,7 +3,6 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "partition_alloc/allocation_guard.h" #include "partition_alloc/allocation_guard.h"
#include "partition_alloc/partition_alloc_base/immediate_crash.h" #include "partition_alloc/partition_alloc_base/immediate_crash.h"
#include "partition_alloc/partition_alloc_config.h" #include "partition_alloc/partition_alloc_config.h"

View file

@ -7,7 +7,7 @@
#include "partition_alloc/build_config.h" #include "partition_alloc/build_config.h"
#if PA_BUILDFLAG(PA_ARCH_CPU_ARM64) #if defined(ARCH_CPU_ARM64)
extern "C" { extern "C" {
/** /**
* A valid BTI function. Jumping to this funtion should not cause any problem in * A valid BTI function. Jumping to this funtion should not cause any problem in
@ -26,6 +26,6 @@ int64_t arm_bti_test_function_invalid_offset(int64_t);
**/ **/
void arm_bti_test_function_end(void); void arm_bti_test_function_end(void);
} }
#endif // PA_BUILDFLAG(PA_ARCH_CPU_ARM64) #endif // defined(ARCH_CPU_ARM64)
#endif // PARTITION_ALLOC_ARM_BTI_TEST_FUNCTIONS_H_ #endif // PARTITION_ALLOC_ARM_BTI_TEST_FUNCTIONS_H_

View file

@ -4,505 +4,7 @@
#ifndef PARTITION_ALLOC_BUILD_CONFIG_H_ #ifndef PARTITION_ALLOC_BUILD_CONFIG_H_
#define PARTITION_ALLOC_BUILD_CONFIG_H_ #define PARTITION_ALLOC_BUILD_CONFIG_H_
// This file is derived from chromium's //build/build_config.h. // TODO(https://crbug.com/41481467): Remove //build dependency.
// #include "build/build_config.h"
// Differences:
// - Only the definition used by partition_alloc are included.
// - The definition can only be consumed PA_BUILDFLAG(...) macro. This avoids
// silent failure when developers forget to include this file. This avoids the
// need of a PRESUBMIT.py to enforce the inclusion of this file.
//
//
// This files contains the following definition:
//
// Operating system:
// IS_IOS / IS_AIX / IS_ASMJS / IS_FREEBSD / IS_FUCHSIA / IS_LINUX / IS_MAC /
// IS_NACL / IS_NETBSD / IS_OPENBSD / IS_QNX / IS_SOLARIS / IS_WIN
//
// Operating system family:
// IS_APPLE / IS_BSD / IS_POSIX
//
// Compiler:
// PA_COMPILER_GCC / PA_COMPILER_MSVC
//
// Processor:
// PA_ARCH_CPU_ARM64 / PA_ARCH_CPU_ARMEL / PA_ARCH_CPU_BIG_ENDIAN /
// PA_ARCH_CPU_LITTLE_ENDIAN / PA_ARCH_CPU_MIPS / PA_ARCH_CPU_MIPS64 /
// PA_ARCH_CPU_MIPS64EL / PA_ARCH_CPU_MIPSEL / PA_ARCH_CPU_PPC64 /
// PA_ARCH_CPU_RISCV64 / PA_ARCH_CPU_S390 / PA_ARCH_CPU_S390X /
// PA_ARCH_CPU_X86 / PA_ARCH_CPU_X86_64
//
// Processor Family:
// PA_ARCH_CPU_32_BITS / PA_ARCH_CPU_64_BITS / PA_ARCH_CPU_ARM_FAMILY /
// PA_ARCH_CPU_LOONGPA_ARCH64 / PA_ARCH_CPU_PPC64_FAMILY /
// PA_ARCH_CPU_S390_FAMILY / PA_ARCH_CPU_X86_FAMILY
//
// Compiler:
// PA_COMPILER_GCC / PA_COMPILER_MSVC
//
// Standard library:
// PA_LIBC_GLIBC
// Definition of PA_BUILDFLAG(...) macro.
#include "partition_alloc/buildflag.h" // IWYU pragma: export
// Definition of PA_BUILDFLAG(IS_CHROMEOS).
#include "partition_alloc/buildflags.h" // IWYU pragma: export
// Clangd does not detect PA_BUILDFLAG_INTERNAL_* indirect usage, so mark the
// header as "always_keep" to avoid "unused include" warning.
//
// IWYU pragma: always_keep
// A set of macros to use for platform detection.
#if defined(__native_client__)
// __native_client__ must be first, so that other IS_ defines are not set.
#define PA_IS_NACL
#elif PA_BUILDFLAG(IS_ANDROID)
// The IS_ANDROID PA_BUILDFLAG macro is defined in buildflags.h.
//
// PartitionAlloc's embedders (Chromium, Dawn, Pdfium, Skia) define different
// macros for Android builds: "ANDROID" or "SK_BUILD_FOR_ANDROID".
//
// To avoid relying on these external definitions, PartitionAlloc uses its own
// dedicated build flag.
#elif defined(__APPLE__)
// Only include TargetConditionals after testing ANDROID as some Android builds
// on the Mac have this header available and it's not needed unless the target
// is really an Apple platform.
#include <TargetConditionals.h>
#if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
#define PA_IS_IOS
#else
#define PA_IS_MAC
#endif // defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
#elif defined(__linux__)
#if !PA_BUILDFLAG(IS_CHROMEOS)
// Do not define PA_IS_LINUX on Chrome OS build.
// The IS_CHROMEOS PA_BUILDFLAG macro is defined in buildflags.h.
#define PA_IS_LINUX
#endif // !PA_BUILDFLAG(IS_CHROMEOS)
// Include a system header to pull in features.h for glibc/uclibc macros.
#include <assert.h>
#if defined(__GLIBC__) && !defined(__UCLIBC__)
// We really are using glibc, not uClibc pretending to be glibc.
#define PA_LIBC_GLIBC
#endif
#elif defined(_WIN32)
#define PA_IS_WIN
#elif defined(__Fuchsia__)
#define PA_IS_FUCHSIA
#elif defined(__FreeBSD__)
#define PA_IS_FREEBSD
#elif defined(__NetBSD__)
#define PA_IS_NETBSD
#elif defined(__OpenBSD__)
#define PA_IS_OPENBSD
#elif defined(__sun)
#define PA_IS_SOLARIS
#elif defined(__QNXNTO__)
#define PA_IS_QNX
#elif defined(_AIX)
#define PA_IS_AIX
#elif defined(__asmjs__) || defined(__wasm__)
#define PA_IS_ASMJS
#endif
// NOTE: Adding a new port? Please follow
// https://chromium.googlesource.com/chromium/src/+/main/docs/new_port_policy.md
#if defined(PA_IS_MAC) || defined(PA_IS_IOS)
#define PA_IS_APPLE
#endif
#if defined(PA_IS_FREEBSD) || defined(PA_IS_NETBSD) || defined(PA_IS_OPENBSD)
#define PA_IS_BSD
#endif
#if defined(PA_IS_AIX) || defined(PA_IS_ASMJS) || defined(PA_IS_FREEBSD) || \
defined(PA_IS_IOS) || defined(PA_IS_LINUX) || defined(PA_IS_CHROMEOS) || \
defined(PA_IS_MAC) || defined(PA_IS_NACL) || defined(PA_IS_NETBSD) || \
defined(PA_IS_OPENBSD) || defined(PA_IS_QNX) || defined(PA_IS_SOLARIS) || \
PA_BUILDFLAG(IS_ANDROID) || PA_BUILDFLAG(IS_CHROMEOS)
#define PA_IS_POSIX
#endif
// Compiler detection. Note: clang masquerades as GCC on POSIX and as MSVC on
// Windows.
#if defined(__GNUC__)
#define PA_COMPILER_GCC
#elif defined(_MSC_VER)
#define PA_COMPILER_MSVC
#endif
// ------
// Processor architecture detection. For more info on what's defined, see:
// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
// http://www.agner.org/optimize/calling_conventions.pdf
// or with gcc, run: "echo | gcc -E -dM -"
#if defined(_M_X64) || defined(__x86_64__)
#define PA_ARCH_CPU_X86_FAMILY
#define PA_ARCH_CPU_X86_64
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#elif defined(_M_IX86) || defined(__i386__)
#define PA_ARCH_CPU_X86_FAMILY
#define PA_ARCH_CPU_X86
#define PA_ARCH_CPU_32_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#elif defined(__s390x__)
#define PA_ARCH_CPU_S390_FAMILY
#define PA_ARCH_CPU_S390X
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_BIG_ENDIAN
#elif defined(__s390__)
#define PA_ARCH_CPU_S390_FAMILY
#define PA_ARCH_CPU_S390
#define PA_ARCH_CPU_BIG_ENDIAN
#elif (defined(__PPC64__) || defined(__PPC__)) && defined(__BIG_ENDIAN__)
#define PA_ARCH_CPU_PPC64_FAMILY
#define PA_ARCH_CPU_PPC64
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_BIG_ENDIAN
#elif defined(__PPC64__)
#define PA_ARCH_CPU_PPC64_FAMILY
#define PA_ARCH_CPU_PPC64
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#elif defined(__ARMEL__)
#define PA_ARCH_CPU_ARM_FAMILY
#define PA_ARCH_CPU_ARMEL
#define PA_ARCH_CPU_32_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#elif defined(__aarch64__) || defined(_M_ARM64)
#define PA_ARCH_CPU_ARM_FAMILY
#define PA_ARCH_CPU_ARM64
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#elif defined(__pnacl__) || defined(__asmjs__) || defined(__wasm__)
#define PA_ARCH_CPU_32_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#elif defined(__MIPSEL__)
#if defined(__LP64__)
#define PA_ARCH_CPU_MIPS64EL
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#else
#define PA_ARCH_CPU_MIPSEL
#define PA_ARCH_CPU_32_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#endif
#elif defined(__MIPSEB__)
#if defined(__LP64__)
#define PA_ARCH_CPU_MIPS64
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_BIG_ENDIAN
#else
#define PA_ARCH_CPU_MIPS
#define PA_ARCH_CPU_32_BITS
#define PA_ARCH_CPU_BIG_ENDIAN
#endif
#elif defined(__loongarch__)
#define PA_ARCH_CPU_LITTLE_ENDIAN
#if __loongarch_grlen == 64
#define PA_ARCH_CPU_LOONGARCH64
#define PA_ARCH_CPU_64_BITS
#else
#define PA_ARCH_CPU_32_BITS
#endif
#elif defined(__riscv) && (__riscv_xlen == 64)
#define PA_ARCH_CPU_RISCV64
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#endif
// The part below can be generated with the following script:
// https://paste.googleplex.com/6324671838683136
//
// It transform the defines above into PA_BUILDFLAG_INTERNAL_* defines, then
// undef the original define.
//
// Usage of PA_BUILDFLAG(...) macro is better than raw define, because it avoids
// silent failure when developers forget to include this file.
#if defined(PA_ARCH_CPU_32_BITS)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_32_BITS() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_32_BITS() (0)
#endif
#undef PA_ARCH_CPU_32_BITS
#if defined(PA_ARCH_CPU_64_BITS)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_64_BITS() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_64_BITS() (0)
#endif
#undef PA_ARCH_CPU_64_BITS
#if defined(PA_ARCH_CPU_ARM64)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_ARM64() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_ARM64() (0)
#endif
#undef PA_ARCH_CPU_ARM64
#if defined(PA_ARCH_CPU_ARMEL)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_ARMEL() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_ARMEL() (0)
#endif
#undef PA_ARCH_CPU_ARMEL
#if defined(PA_ARCH_CPU_ARM_FAMILY)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_ARM_FAMILY() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_ARM_FAMILY() (0)
#endif
#undef PA_ARCH_CPU_ARM_FAMILY
#if defined(PA_ARCH_CPU_BIG_ENDIAN)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_BIG_ENDIAN() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_BIG_ENDIAN() (0)
#endif
#undef PA_ARCH_CPU_BIG_ENDIAN
#if defined(PA_ARCH_CPU_LITTLE_ENDIAN)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_LITTLE_ENDIAN() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_LITTLE_ENDIAN() (0)
#endif
#undef PA_ARCH_CPU_LITTLE_ENDIAN
#if defined(PA_ARCH_CPU_LOONGARCH64)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_LOONGARCH64() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_LOONGARCH64() (0)
#endif
#undef PA_ARCH_CPU_LOONGARCH64
#if defined(PA_ARCH_CPU_MIPS)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPS() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPS() (0)
#endif
#undef PA_ARCH_CPU_MIPS
#if defined(PA_ARCH_CPU_MIPS64)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPS64() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPS64() (0)
#endif
#undef PA_ARCH_CPU_MIPS64
#if defined(PA_ARCH_CPU_MIPS64EL)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPS64EL() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPS64EL() (0)
#endif
#undef PA_ARCH_CPU_MIPS64EL
#if defined(PA_ARCH_CPU_MIPSEL)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPSEL() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPSEL() (0)
#endif
#undef PA_ARCH_CPU_MIPSEL
#if defined(PA_ARCH_CPU_PPC64)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_PPC64() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_PPC64() (0)
#endif
#undef PA_ARCH_CPU_PPC64
#if defined(PA_ARCH_CPU_PPC64_FAMILY)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_PPC64_FAMILY() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_PPC64_FAMILY() (0)
#endif
#undef PA_ARCH_CPU_PPC64_FAMILY
#if defined(PA_ARCH_CPU_RISCV64)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_RISCV64() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_RISCV64() (0)
#endif
#undef PA_ARCH_CPU_RISCV64
#if defined(PA_ARCH_CPU_S390)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_S390() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_S390() (0)
#endif
#undef PA_ARCH_CPU_S390
#if defined(PA_ARCH_CPU_S390_FAMILY)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_S390_FAMILY() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_S390_FAMILY() (0)
#endif
#undef PA_ARCH_CPU_S390_FAMILY
#if defined(PA_ARCH_CPU_S390X)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_S390X() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_S390X() (0)
#endif
#undef PA_ARCH_CPU_S390X
#if defined(PA_ARCH_CPU_X86)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_X86() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_X86() (0)
#endif
#undef PA_ARCH_CPU_X86
#if defined(PA_ARCH_CPU_X86_64)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_X86_64() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_X86_64() (0)
#endif
#undef PA_ARCH_CPU_X86_64
#if defined(PA_ARCH_CPU_X86_FAMILY)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_X86_FAMILY() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_X86_FAMILY() (0)
#endif
#undef PA_ARCH_CPU_X86_FAMILY
#if defined(PA_COMPILER_GCC)
#define PA_BUILDFLAG_INTERNAL_PA_COMPILER_GCC() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_COMPILER_GCC() (0)
#endif
#undef PA_COMPILER_GCC
#if defined(PA_COMPILER_MSVC)
#define PA_BUILDFLAG_INTERNAL_PA_COMPILER_MSVC() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_COMPILER_MSVC() (0)
#endif
#undef PA_COMPILER_MSVC
#if defined(PA_IS_AIX)
#define PA_BUILDFLAG_INTERNAL_IS_AIX() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_AIX() (0)
#endif
#undef PA_IS_AIX
#if defined(PA_IS_APPLE)
#define PA_BUILDFLAG_INTERNAL_IS_APPLE() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_APPLE() (0)
#endif
#undef PA_IS_APPLE
#if defined(PA_IS_ASMJS)
#define PA_BUILDFLAG_INTERNAL_IS_ASMJS() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_ASMJS() (0)
#endif
#undef PA_IS_ASMJS
#if defined(PA_IS_BSD)
#define PA_BUILDFLAG_INTERNAL_IS_BSD() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_BSD() (0)
#endif
#undef PA_IS_BSD
#if defined(PA_IS_FREEBSD)
#define PA_BUILDFLAG_INTERNAL_IS_FREEBSD() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_FREEBSD() (0)
#endif
#undef PA_IS_FREEBSD
#if defined(PA_IS_FUCHSIA)
#define PA_BUILDFLAG_INTERNAL_IS_FUCHSIA() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_FUCHSIA() (0)
#endif
#undef PA_IS_FUCHSIA
#if defined(PA_IS_IOS)
#define PA_BUILDFLAG_INTERNAL_IS_IOS() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_IOS() (0)
#endif
#undef PA_IS_IOS
#if defined(PA_IS_LINUX)
#define PA_BUILDFLAG_INTERNAL_IS_LINUX() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_LINUX() (0)
#endif
#undef PA_IS_LINUX
#if defined(PA_IS_MAC)
#define PA_BUILDFLAG_INTERNAL_IS_MAC() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_MAC() (0)
#endif
#undef PA_IS_MAC
#if defined(PA_IS_NACL)
#define PA_BUILDFLAG_INTERNAL_IS_NACL() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_NACL() (0)
#endif
#undef PA_IS_NACL
#if defined(PA_IS_NETBSD)
#define PA_BUILDFLAG_INTERNAL_IS_NETBSD() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_NETBSD() (0)
#endif
#undef PA_IS_NETBSD
#if defined(PA_IS_OPENBSD)
#define PA_BUILDFLAG_INTERNAL_IS_OPENBSD() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_OPENBSD() (0)
#endif
#undef PA_IS_OPENBSD
#if defined(PA_IS_POSIX)
#define PA_BUILDFLAG_INTERNAL_IS_POSIX() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_POSIX() (0)
#endif
#undef PA_IS_POSIX
#if defined(PA_IS_QNX)
#define PA_BUILDFLAG_INTERNAL_IS_QNX() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_QNX() (0)
#endif
#undef PA_IS_QNX
#if defined(PA_IS_SOLARIS)
#define PA_BUILDFLAG_INTERNAL_IS_SOLARIS() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_SOLARIS() (0)
#endif
#undef PA_IS_SOLARIS
#if defined(PA_IS_WIN)
#define PA_BUILDFLAG_INTERNAL_IS_WIN() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_WIN() (0)
#endif
#undef PA_IS_WIN
#if defined(PA_LIBC_GLIBC)
#define PA_BUILDFLAG_INTERNAL_PA_LIBC_GLIBC() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_LIBC_GLIBC() (0)
#endif
#undef PA_LIBC_GLIBC
#endif // PARTITION_ALLOC_BUILD_CONFIG_H_ #endif // PARTITION_ALLOC_BUILD_CONFIG_H_

View file

@ -116,6 +116,6 @@ template("pa_buildflag_header") {
"visibility", "visibility",
]) ])
public_deps = [ "${_current_dir}:buildflag_macro" ] public_deps = [ "${_current_dir}:build_config" ]
} }
} }

View file

@ -3,8 +3,7 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "partition_alloc/compressed_pointer.h" #include "partition_alloc/compressed_pointer.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#if PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION) #if PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)

View file

@ -5,14 +5,14 @@
#ifndef PARTITION_ALLOC_COMPRESSED_POINTER_H_ #ifndef PARTITION_ALLOC_COMPRESSED_POINTER_H_
#define PARTITION_ALLOC_COMPRESSED_POINTER_H_ #define PARTITION_ALLOC_COMPRESSED_POINTER_H_
#include <bit>
#include <climits> #include <climits>
#include <type_traits> #include <type_traits>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_address_space.h" #include "partition_alloc/partition_address_space.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h" #include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#if PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION) #if PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)
@ -78,7 +78,7 @@ constexpr bool IsDecayedSame =
class CompressedPointerBaseGlobal final { class CompressedPointerBaseGlobal final {
public: public:
static constexpr size_t kUsefulBits = static constexpr size_t kUsefulBits =
base::bits::CountrZero(PartitionAddressSpace::CorePoolsSize()); std::countr_zero(PartitionAddressSpace::CorePoolsSize());
static_assert(kUsefulBits >= sizeof(uint32_t) * CHAR_BIT); static_assert(kUsefulBits >= sizeof(uint32_t) * CHAR_BIT);
static constexpr size_t kBitsToShift = static constexpr size_t kBitsToShift =
kUsefulBits - sizeof(uint32_t) * CHAR_BIT; kUsefulBits - sizeof(uint32_t) * CHAR_BIT;
@ -102,11 +102,11 @@ class CompressedPointerBaseGlobal final {
static constexpr uintptr_t kUsefulBitsMask = static constexpr uintptr_t kUsefulBitsMask =
PartitionAddressSpace::CorePoolsSize() - 1; PartitionAddressSpace::CorePoolsSize() - 1;
PA_CONSTINIT static union alignas(kPartitionCachelineSize) static union alignas(kPartitionCachelineSize)
PA_COMPONENT_EXPORT(PARTITION_ALLOC) Base { PA_COMPONENT_EXPORT(PARTITION_ALLOC) Base {
uintptr_t base; uintptr_t base;
char cache_line[kPartitionCachelineSize]; char cache_line[kPartitionCachelineSize];
} g_base_; } g_base_ PA_CONSTINIT;
PA_ALWAYS_INLINE static bool IsBaseConsistent() { PA_ALWAYS_INLINE static bool IsBaseConsistent() {
return kUsefulBitsMask == (g_base_.base & kUsefulBitsMask); return kUsefulBitsMask == (g_base_.base & kUsefulBitsMask);
@ -232,7 +232,7 @@ class PA_TRIVIAL_ABI CompressedPointer final {
static constexpr size_t kMinimalRequiredAlignment = 8; static constexpr size_t kMinimalRequiredAlignment = 8;
static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment); static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment);
#if PA_BUILDFLAG(DCHECKS_ARE_ON) #if PA_BUILDFLAG(PA_DCHECK_IS_ON)
PA_DCHECK(reinterpret_cast<uintptr_t>(ptr) % kMinimalRequiredAlignment == PA_DCHECK(reinterpret_cast<uintptr_t>(ptr) % kMinimalRequiredAlignment ==
0); 0);
PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet()); PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
@ -243,7 +243,7 @@ class PA_TRIVIAL_ABI CompressedPointer final {
PA_DCHECK(!ptr || PA_DCHECK(!ptr ||
(base & kCorePoolsBaseMask) == (base & kCorePoolsBaseMask) ==
(reinterpret_cast<uintptr_t>(ptr) & kCorePoolsBaseMask)); (reinterpret_cast<uintptr_t>(ptr) & kCorePoolsBaseMask));
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON) #endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
const auto uptr = reinterpret_cast<uintptr_t>(ptr); const auto uptr = reinterpret_cast<uintptr_t>(ptr);
// Shift the pointer and truncate. // Shift the pointer and truncate.

View file

@ -1,17 +0,0 @@
digraph {
layout = "circo"
dpi = 156
node[shape=box]
crt[shape=circle, label="(not yet fully\ninitialized)\nWindows\nCRT"]
malloc[label="malloc()"]
crt->malloc[label="calls"]
malloc->PartitionAlloc[label="intercepted\nby"]
static_local[label="nontrivial\nfunction-local\nstatic"]
PartitionAlloc->static_local[label="initializes"]
lock[label="critical section\n(implicit lock)"]
static_local->lock[label="enters"]
lock->crt[label="attempts\nre-entry\ninto", style=dotted]
}

View file

@ -9,14 +9,15 @@
#include <cstdint> #include <cstdint>
#include "partition_alloc/build_config.h" #include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/freeslot_bitmap.h" #include "partition_alloc/freeslot_bitmap.h"
#include "partition_alloc/partition_alloc-inl.h" #include "partition_alloc/partition_alloc-inl.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_config.h" #include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#if !PA_BUILDFLAG(PA_ARCH_CPU_BIG_ENDIAN) #if !defined(ARCH_CPU_BIG_ENDIAN)
#include "partition_alloc/reverse_bytes.h" #include "partition_alloc/reverse_bytes.h"
#endif #endif
@ -56,7 +57,7 @@ class EncodedFreelistPtr {
// corrupt a freelist pointer, partial pointer overwrite attacks are // corrupt a freelist pointer, partial pointer overwrite attacks are
// thwarted. // thwarted.
// For big endian, similar guarantees are arrived at with a negation. // For big endian, similar guarantees are arrived at with a negation.
#if PA_BUILDFLAG(PA_ARCH_CPU_BIG_ENDIAN) #if defined(ARCH_CPU_BIG_ENDIAN)
uintptr_t transformed = ~address; uintptr_t transformed = ~address;
#else #else
uintptr_t transformed = ReverseBytes(address); uintptr_t transformed = ReverseBytes(address);
@ -176,16 +177,16 @@ class EncodedNextFreelistEntry {
// SetNext() is either called on the freelist head, when provisioning new // SetNext() is either called on the freelist head, when provisioning new
// slots, or when GetNext() has been called before, no need to pass the // slots, or when GetNext() has been called before, no need to pass the
// size. // size.
#if PA_BUILDFLAG(DCHECKS_ARE_ON) #if PA_BUILDFLAG(PA_DCHECK_IS_ON)
// Regular freelists always point to an entry within the same super page. // Regular freelists always point to an entry within the same super page.
// //
// This is most likely a PartitionAlloc bug if this triggers. // This is most likely a PartitionAlloc bug if this triggers.
if (entry && (SlotStartPtr2Addr(this) & kSuperPageBaseMask) != if (PA_UNLIKELY(entry &&
(SlotStartPtr2Addr(entry) & kSuperPageBaseMask)) (SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
[[unlikely]] { (SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) {
FreelistCorruptionDetected(0); FreelistCorruptionDetected(0);
} }
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON) #endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
encoded_next_ = EncodedFreelistPtr(entry); encoded_next_ = EncodedFreelistPtr(entry);
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
@ -220,7 +221,7 @@ class EncodedNextFreelistEntry {
} }
auto* ret = encoded_next_.Decode(); auto* ret = encoded_next_.Decode();
if (!IsWellFormed<for_thread_cache>(this, ret)) [[unlikely]] { if (PA_UNLIKELY(!IsWellFormed<for_thread_cache>(this, ret))) {
if constexpr (crash_on_corruption) { if constexpr (crash_on_corruption) {
// Put the corrupted data on the stack, it may give us more information // Put the corrupted data on the stack, it may give us more information
// about what kind of corruption that was. // about what kind of corruption that was.

View file

@ -4,7 +4,7 @@
#include "partition_alloc/extended_api.h" #include "partition_alloc/extended_api.h"
#include "partition_alloc/buildflags.h" #include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_config.h" #include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h" #include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
#include "partition_alloc/thread_cache.h" #include "partition_alloc/thread_cache.h"
@ -77,17 +77,7 @@ ThreadCacheProcessScopeForTesting::ThreadCacheProcessScopeForTesting(
// Replace ThreadCache's PartitionRoot. // Replace ThreadCache's PartitionRoot.
ThreadCache::SwapForTesting(root_); ThreadCache::SwapForTesting(root_);
} else { } else {
bool regular_was_disabled = !regular_was_enabled_; if (!regular_was_enabled_) {
#if PA_BUILDFLAG(IS_WIN)
// ThreadCache may be tombstone because of the previous test. In the
// case, we have to remove tombstone and re-create ThreadCache for
// a new test.
if (ThreadCache::IsTombstone(ThreadCache::Get())) {
ThreadCache::RemoveTombstoneForTesting();
regular_was_disabled = true;
}
#endif
if (regular_was_disabled) {
EnablePartitionAllocThreadCacheForRootIfDisabled(root_); EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
ThreadCache::SwapForTesting(root_); ThreadCache::SwapForTesting(root_);
} }
@ -99,7 +89,6 @@ ThreadCacheProcessScopeForTesting::ThreadCacheProcessScopeForTesting(
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PA_CHECK(ThreadCache::Get()); PA_CHECK(ThreadCache::Get());
PA_CHECK(!ThreadCache::IsTombstone(ThreadCache::Get()));
} }
ThreadCacheProcessScopeForTesting::~ThreadCacheProcessScopeForTesting() { ThreadCacheProcessScopeForTesting::~ThreadCacheProcessScopeForTesting() {

View file

@ -5,7 +5,7 @@
#ifndef PARTITION_ALLOC_EXTENDED_API_H_ #ifndef PARTITION_ALLOC_EXTENDED_API_H_
#define PARTITION_ALLOC_EXTENDED_API_H_ #define PARTITION_ALLOC_EXTENDED_API_H_
#include "partition_alloc/buildflags.h" #include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_root.h" #include "partition_alloc/partition_root.h"
#include "partition_alloc/partition_stats.h" #include "partition_alloc/partition_stats.h"
#include "partition_alloc/thread_cache.h" #include "partition_alloc/thread_cache.h"

View file

@ -46,6 +46,12 @@ constexpr inline IfEnum<EnumType, bool> ContainsFlags(EnumType superset,
return (superset & subset) == subset; return (superset & subset) == subset;
} }
// Removes flags `target` from `from`.
template <typename EnumType>
constexpr inline IfEnum<EnumType> RemoveFlags(EnumType from, EnumType target) {
return from & ~target;
}
// A macro to define binary arithmetic over `EnumType`. // A macro to define binary arithmetic over `EnumType`.
// Use inside `namespace partition_alloc::internal`. // Use inside `namespace partition_alloc::internal`.
#define PA_DEFINE_OPERATORS_FOR_FLAGS(EnumType) \ #define PA_DEFINE_OPERATORS_FOR_FLAGS(EnumType) \

View file

@ -9,10 +9,10 @@
#include <cstdint> #include <cstdint>
#include <utility> #include <utility>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/freeslot_bitmap_constants.h" #include "partition_alloc/freeslot_bitmap_constants.h"
#include "partition_alloc/partition_alloc_base/bits.h" #include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#if PA_BUILDFLAG(USE_FREESLOT_BITMAP) #if PA_BUILDFLAG(USE_FREESLOT_BITMAP)
@ -92,7 +92,7 @@ PA_ALWAYS_INLINE void FreeSlotBitmapReset(uintptr_t begin_addr,
*cell &= ~CellWithAOne(bit_index); *cell &= ~CellWithAOne(bit_index);
} }
#if PA_BUILDFLAG(DCHECKS_ARE_ON) #if PA_BUILDFLAG(PA_DCHECK_IS_ON)
// Checks if the cells that are meant to contain only unset bits are really 0. // Checks if the cells that are meant to contain only unset bits are really 0.
auto [begin_cell, begin_bit_index] = auto [begin_cell, begin_bit_index] =
GetFreeSlotBitmapCellPtrAndBitIndex(begin_addr); GetFreeSlotBitmapCellPtrAndBitIndex(begin_addr);
@ -131,7 +131,7 @@ PA_ALWAYS_INLINE void FreeSlotBitmapReset(uintptr_t begin_addr,
for (FreeSlotBitmapCellType* cell = begin_cell; cell < end_cell; ++cell) { for (FreeSlotBitmapCellType* cell = begin_cell; cell < end_cell; ++cell) {
PA_DCHECK(*cell == 0u); PA_DCHECK(*cell == 0u);
} }
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON) #endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
} }
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View file

@ -7,9 +7,9 @@
#include <cstdint> #include <cstdint>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/bits.h" #include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_alloc_forward.h" #include "partition_alloc/partition_alloc_forward.h"
#include "partition_alloc/reservation_offset_table.h" #include "partition_alloc/reservation_offset_table.h"

View file

@ -19,21 +19,17 @@
namespace partition_alloc { namespace partition_alloc {
namespace {
PartitionOptions GwpAsanPartitionOptions() {
PartitionOptions options;
options.backup_ref_ptr = PartitionOptions::kEnabled;
return options;
}
} // namespace
// static // static
void* GwpAsanSupport::MapRegion(size_t slot_count, void* GwpAsanSupport::MapRegion(size_t slot_count,
std::vector<uint16_t>& free_list) { std::vector<uint16_t>& free_list) {
PA_CHECK(slot_count > 0); PA_CHECK(slot_count > 0);
static internal::base::NoDestructor<PartitionRoot> root( constexpr PartitionOptions kConfig = []() {
GwpAsanPartitionOptions()); PartitionOptions opts;
opts.backup_ref_ptr = PartitionOptions::kEnabled;
return opts;
}();
static internal::base::NoDestructor<PartitionRoot> root(kConfig);
const size_t kSlotSize = 2 * internal::SystemPageSize(); const size_t kSlotSize = 2 * internal::SystemPageSize();
uint16_t bucket_index = PartitionRoot::SizeToBucketIndex( uint16_t bucket_index = PartitionRoot::SizeToBucketIndex(
@ -42,7 +38,8 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
const size_t kSuperPagePayloadStartOffset = const size_t kSuperPagePayloadStartOffset =
internal::SuperPagePayloadStartOffset( internal::SuperPagePayloadStartOffset(
/* is_managed_by_normal_buckets = */ true); /* is_managed_by_normal_buckets = */ true,
/* with_quarantine = */ false);
PA_CHECK(kSuperPagePayloadStartOffset % kSlotSize == 0); PA_CHECK(kSuperPagePayloadStartOffset % kSlotSize == 0);
const size_t kSuperPageGwpAsanSlotAreaBeginOffset = const size_t kSuperPageGwpAsanSlotAreaBeginOffset =
kSuperPagePayloadStartOffset; kSuperPagePayloadStartOffset;
@ -67,14 +64,14 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
return nullptr; return nullptr;
} }
#if PA_BUILDFLAG(PA_ARCH_CPU_64_BITS) #if defined(ARCH_CPU_64_BITS)
// Mapping the GWP-ASan region in to the lower 32-bits of address space // Mapping the GWP-ASan region in to the lower 32-bits of address space
// makes it much more likely that a bad pointer dereference points into // makes it much more likely that a bad pointer dereference points into
// our region and triggers a false positive report. We rely on the fact // our region and triggers a false positive report. We rely on the fact
// that PA address pools are never allocated in the first 4GB due to // that PA address pools are never allocated in the first 4GB due to
// their alignment requirements. // their alignment requirements.
PA_CHECK(super_page_span_start >= (1ULL << 32)); PA_CHECK(super_page_span_start >= (1ULL << 32));
#endif // PA_BUILDFLAG(PA_ARCH_CPU_64_BITS) #endif // defined(ARCH_CPU_64_BITS)
uintptr_t super_page_span_end = uintptr_t super_page_span_end =
super_page_span_start + super_page_count * kSuperPageSize; super_page_span_start + super_page_count * kSuperPageSize;
@ -93,16 +90,15 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
partition_page_idx += bucket->get_pages_per_slot_span()) { partition_page_idx += bucket->get_pages_per_slot_span()) {
auto* slot_span_metadata = auto* slot_span_metadata =
&page_metadata[partition_page_idx].slot_span_metadata; &page_metadata[partition_page_idx].slot_span_metadata;
bucket->InitializeSlotSpanForGwpAsan(slot_span_metadata, root.get()); bucket->InitializeSlotSpanForGwpAsan(slot_span_metadata);
auto slot_span_start = auto slot_span_start =
internal::SlotSpanMetadata<internal::MetadataKind::kReadOnly>:: internal::SlotSpanMetadata::ToSlotSpanStart(slot_span_metadata);
ToSlotSpanStart(slot_span_metadata);
for (uintptr_t slot_idx = 0; slot_idx < kSlotsPerSlotSpan; ++slot_idx) { for (uintptr_t slot_idx = 0; slot_idx < kSlotsPerSlotSpan; ++slot_idx) {
auto slot_start = slot_span_start + slot_idx * kSlotSize; auto slot_start = slot_span_start + slot_idx * kSlotSize;
PartitionRoot::InSlotMetadataPointerFromSlotStartAndSize(slot_start, PartitionRoot::InSlotMetadataPointerFromSlotStartAndSize(slot_start,
kSlotSize) kSlotSize)
->InitializeForGwpAsan(); ->InitalizeForGwpAsan();
size_t global_slot_idx = (slot_start - super_page_span_start - size_t global_slot_idx = (slot_start - super_page_span_start -
kSuperPageGwpAsanSlotAreaBeginOffset) / kSuperPageGwpAsanSlotAreaBeginOffset) /
kSlotSize; kSlotSize;

View file

@ -5,8 +5,8 @@
#ifndef PARTITION_ALLOC_GWP_ASAN_SUPPORT_H_ #ifndef PARTITION_ALLOC_GWP_ASAN_SUPPORT_H_
#define PARTITION_ALLOC_GWP_ASAN_SUPPORT_H_ #define PARTITION_ALLOC_GWP_ASAN_SUPPORT_H_
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/component_export.h" #include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#if PA_BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT) #if PA_BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)

View file

@ -6,23 +6,28 @@
#define PARTITION_ALLOC_IN_SLOT_METADATA_H_ #define PARTITION_ALLOC_IN_SLOT_METADATA_H_
#include <atomic> #include <atomic>
#include <bit>
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>
#include <limits> #include <limits>
#include "partition_alloc/build_config.h" #include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/dangling_raw_ptr_checks.h" #include "partition_alloc/dangling_raw_ptr_checks.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h" #include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/immediate_crash.h" #include "partition_alloc/partition_alloc_base/immediate_crash.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h" #include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h" #include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_alloc_forward.h" #include "partition_alloc/partition_alloc_forward.h"
#include "partition_alloc/tagging.h" #include "partition_alloc/tagging.h"
#if BUILDFLAG(IS_APPLE)
#include "partition_alloc/partition_alloc_base/bits.h"
#endif // BUILDFLAG(IS_APPLE)
namespace partition_alloc::internal { namespace partition_alloc::internal {
// Aligns up (on 8B boundary) `in_slot_metadata_size` on Mac as a workaround for // Aligns up (on 8B boundary) `in_slot_metadata_size` on Mac as a workaround for
@ -35,17 +40,18 @@ namespace partition_alloc::internal {
// Placed outside `PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)` // Placed outside `PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)`
// intentionally to accommodate usage in contexts also outside // intentionally to accommodate usage in contexts also outside
// this gating. // this gating.
PA_ALWAYS_INLINE constexpr size_t AlignUpInSlotMetadataSizeForApple( PA_ALWAYS_INLINE size_t
size_t in_slot_metadata_size) { AlignUpInSlotMetadataSizeForApple(size_t in_slot_metadata_size) {
#if PA_BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_APPLE)
return base::bits::AlignUp<size_t>(in_slot_metadata_size, 8); return internal::base::bits::AlignUp<size_t>(in_slot_metadata_size, 8);
#else #else
return in_slot_metadata_size; return in_slot_metadata_size;
#endif // PA_BUILDFLAG(IS_APPLE) #endif // BUILDFLAG(IS_APPLE)
} }
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
namespace {
// Utility functions to define a bit field. // Utility functions to define a bit field.
template <typename CountType> template <typename CountType>
static constexpr CountType SafeShift(CountType lhs, int rhs) { static constexpr CountType SafeShift(CountType lhs, int rhs) {
@ -63,6 +69,7 @@ struct BitField {
~(SafeShift<CountType>(1, lo) - 1); ~(SafeShift<CountType>(1, lo) - 1);
} }
}; };
} // namespace
// Special-purpose atomic bit field class mainly used by RawPtrBackupRefImpl. // Special-purpose atomic bit field class mainly used by RawPtrBackupRefImpl.
// Formerly known as `PartitionRefCount`, but renamed to support usage that is // Formerly known as `PartitionRefCount`, but renamed to support usage that is
@ -164,9 +171,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
std::numeric_limits<CountType>::max()); std::numeric_limits<CountType>::max());
static constexpr auto kPtrInc = static constexpr auto kPtrInc =
SafeShift<CountType>(1, base::bits::CountrZero(kPtrCountMask)); SafeShift<CountType>(1, std::countr_zero(kPtrCountMask));
static constexpr auto kUnprotectedPtrInc = static constexpr auto kUnprotectedPtrInc =
SafeShift<CountType>(1, base::bits::CountrZero(kUnprotectedPtrCountMask)); SafeShift<CountType>(1, std::countr_zero(kUnprotectedPtrCountMask));
PA_ALWAYS_INLINE explicit InSlotMetadata(bool needs_mac11_malloc_size_hack); PA_ALWAYS_INLINE explicit InSlotMetadata(bool needs_mac11_malloc_size_hack);
@ -213,8 +220,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) #if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
// If a dangling raw_ptr<> was detected, report it. // If a dangling raw_ptr<> was detected, report it.
if ((old_count & kDanglingRawPtrDetectedBit) == kDanglingRawPtrDetectedBit) if (PA_UNLIKELY((old_count & kDanglingRawPtrDetectedBit) ==
[[unlikely]] { kDanglingRawPtrDetectedBit)) {
partition_alloc::internal::DanglingRawPtrReleased( partition_alloc::internal::DanglingRawPtrReleased(
reinterpret_cast<uintptr_t>(this)); reinterpret_cast<uintptr_t>(this));
} }
@ -254,21 +261,18 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
PA_ALWAYS_INLINE bool ReleaseFromAllocator() { PA_ALWAYS_INLINE bool ReleaseFromAllocator() {
CheckCookieIfSupported(); CheckCookieIfSupported();
// TODO(bartekn): Make the double-free check more effective. Once freed, the
// in-slot metadata is overwritten by an encoded freelist-next pointer.
CountType old_count = CountType old_count =
count_.fetch_and(~kMemoryHeldByAllocatorBit, std::memory_order_release); count_.fetch_and(~kMemoryHeldByAllocatorBit, std::memory_order_release);
// If kMemoryHeldByAllocatorBit was already unset, it indicates a double if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit))) {
// free, but it could also be caused by a memory corruption. Note, this
// detection mechanism isn't perfect, because in-slot-metadata can be
// overwritten by the freelist pointer (or its shadow) for very small slots,
// thus masking the error away.
if (!(old_count & kMemoryHeldByAllocatorBit)) [[unlikely]] {
DoubleFreeOrCorruptionDetected(old_count); DoubleFreeOrCorruptionDetected(old_count);
} }
// Release memory when no raw_ptr<> exists anymore: // Release memory when no raw_ptr<> exists anymore:
static constexpr CountType mask = kPtrCountMask | kUnprotectedPtrCountMask; static constexpr CountType mask = kPtrCountMask | kUnprotectedPtrCountMask;
if ((old_count & mask) == 0) [[likely]] { if (PA_LIKELY((old_count & mask) == 0)) {
std::atomic_thread_fence(std::memory_order_acquire); std::atomic_thread_fence(std::memory_order_acquire);
// The allocation is about to get freed, so clear the cookie. // The allocation is about to get freed, so clear the cookie.
ClearCookieIfSupported(); ClearCookieIfSupported();
@ -315,8 +319,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
// Request to quarantine this allocation. The request might be ignored if // Request to quarantine this allocation. The request might be ignored if
// the allocation is already freed. // the allocation is already freed.
// TODO(crbug.com/329027914) This is an unused function. Start using it in
// tests and/or in production code.
PA_ALWAYS_INLINE void SetQuarantineRequest() { PA_ALWAYS_INLINE void SetQuarantineRequest() {
CountType old_count = CountType old_count =
count_.fetch_or(kRequestQuarantineBit, std::memory_order_relaxed); count_.fetch_or(kRequestQuarantineBit, std::memory_order_relaxed);
@ -325,8 +327,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
} }
// Get and clear out quarantine request. // Get and clear out quarantine request.
// TODO(crbug.com/329027914) This is an unused function. Start using it in
// tests and/or in production code.
PA_ALWAYS_INLINE bool PopQuarantineRequest() { PA_ALWAYS_INLINE bool PopQuarantineRequest() {
CountType old_count = CountType old_count =
count_.fetch_and(~kRequestQuarantineBit, std::memory_order_acq_rel); count_.fetch_and(~kRequestQuarantineBit, std::memory_order_acq_rel);
@ -339,7 +339,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
// make sure the `raw_ptr<T>` release operation will never attempt to call the // make sure the `raw_ptr<T>` release operation will never attempt to call the
// PA `free` on such a slot. GWP-ASan takes the extra reference into account // PA `free` on such a slot. GWP-ASan takes the extra reference into account
// when determining whether the slot can be reused. // when determining whether the slot can be reused.
PA_ALWAYS_INLINE void InitializeForGwpAsan() { PA_ALWAYS_INLINE void InitalizeForGwpAsan() {
#if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE) #if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
brp_cookie_ = CalculateCookie(); brp_cookie_ = CalculateCookie();
#endif #endif
@ -371,7 +371,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) #if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
// The `kPtrCountMask` counts the number of raw_ptr<T>. It is expected to be // The `kPtrCountMask` counts the number of raw_ptr<T>. It is expected to be
// zero when there are no unexpected dangling pointers. // zero when there are no unexpected dangling pointers.
if ((count & kPtrCountMask) == 0) [[likely]] { if (PA_LIKELY((count & kPtrCountMask) == 0)) {
return; return;
} }
@ -406,9 +406,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
// - A raw_ptr<T, DisableDanglingPtrDetection> // - A raw_ptr<T, DisableDanglingPtrDetection>
// //
// Assuming this raw_ptr is not dangling, the memory must still be held at // Assuming this raw_ptr is not dangling, the memory must still be held at
// least by the allocator, so this is `[[likely]]`. // least by the allocator, so this is PA_LIKELY true.
if ((count & (kMemoryHeldByAllocatorBit | kPtrCountMask | if (PA_LIKELY((count & (kMemoryHeldByAllocatorBit | kPtrCountMask |
kUnprotectedPtrCountMask))) [[likely]] { kUnprotectedPtrCountMask)))) {
return false; // Do not release the memory. return false; // Do not release the memory.
} }
@ -544,10 +544,10 @@ PA_ALWAYS_INLINE InSlotMetadata* InSlotMetadataPointer(uintptr_t slot_start,
// the InSlotMetadata object out-of-line in this case, specifically in a // the InSlotMetadata object out-of-line in this case, specifically in a
// special table after the super page metadata (see InSlotMetadataTable in // special table after the super page metadata (see InSlotMetadataTable in
// partition_alloc_constants.h). // partition_alloc_constants.h).
if (slot_start & SystemPageOffsetMask()) [[likely]] { if (PA_LIKELY(slot_start & SystemPageOffsetMask())) {
uintptr_t refcount_address = uintptr_t refcount_address =
slot_start + slot_size - sizeof(InSlotMetadata); slot_start + slot_size - sizeof(InSlotMetadata);
#if PA_BUILDFLAG(DCHECKS_ARE_ON) || \ #if PA_BUILDFLAG(PA_DCHECK_IS_ON) || \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
PA_CHECK(refcount_address % alignof(InSlotMetadata) == 0); PA_CHECK(refcount_address % alignof(InSlotMetadata) == 0);
#endif #endif
@ -560,7 +560,7 @@ PA_ALWAYS_INLINE InSlotMetadata* InSlotMetadataPointer(uintptr_t slot_start,
(slot_start & kSuperPageBaseMask) + SystemPageSize() * 2); (slot_start & kSuperPageBaseMask) + SystemPageSize() * 2);
size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift()) size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift())
<< GetInSlotMetadataIndexMultiplierShift(); << GetInSlotMetadataIndexMultiplierShift();
#if PA_BUILDFLAG(DCHECKS_ARE_ON) || \ #if PA_BUILDFLAG(PA_DCHECK_IS_ON) || \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
PA_CHECK(sizeof(InSlotMetadata) * index <= SystemPageSize()); PA_CHECK(sizeof(InSlotMetadata) * index <= SystemPageSize());
#endif #endif
@ -572,7 +572,7 @@ PA_ALWAYS_INLINE InSlotMetadata* InSlotMetadataPointer(uintptr_t slot_start,
static inline constexpr size_t kInSlotMetadataSizeAdjustment = static inline constexpr size_t kInSlotMetadataSizeAdjustment =
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
AlignUpInSlotMetadataSizeForApple(sizeof(InSlotMetadata)); sizeof(InSlotMetadata);
#else #else
0ul; 0ul;
#endif #endif

View file

@ -7,7 +7,7 @@
namespace partition_alloc::internal { namespace partition_alloc::internal {
PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_COMPONENT_EXPORT(PARTITION_ALLOC)
PartitionRoot& InternalAllocatorRoot() { PartitionRoot& InternalAllocatorRoot() {
static internal::base::NoDestructor<PartitionRoot> allocator([] { static internal::base::NoDestructor<PartitionRoot> allocator([]() {
// Disable features using the internal root to avoid reentrancy issue. // Disable features using the internal root to avoid reentrancy issue.
PartitionOptions opts; PartitionOptions opts;
opts.thread_cache = PartitionOptions::kDisabled; opts.thread_cache = PartitionOptions::kDisabled;
@ -37,4 +37,8 @@ void InternalPartitionAllocated::operator delete(void* ptr, std::align_val_t) {
InternalAllocatorRoot().Free<FreeFlags::kNoHooks>(ptr); InternalAllocatorRoot().Free<FreeFlags::kNoHooks>(ptr);
} }
// A deleter for `std::unique_ptr<T>`.
void InternalPartitionDeleter::operator()(void* ptr) const {
InternalAllocatorRoot().Free<FreeFlags::kNoHooks>(ptr);
}
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View file

@ -26,7 +26,7 @@ PartitionRoot& InternalAllocatorRoot();
// A class that meets C++ named requirements, Allocator. // A class that meets C++ named requirements, Allocator.
template <typename T> template <typename T>
typename InternalAllocator<T>::value_type* InternalAllocator<T>::allocate( InternalAllocator<T>::value_type* InternalAllocator<T>::allocate(
std::size_t count) { std::size_t count) {
PA_CHECK(count <= PA_CHECK(count <=
std::numeric_limits<std::size_t>::max() / sizeof(value_type)); std::numeric_limits<std::size_t>::max() / sizeof(value_type));

View file

@ -71,9 +71,8 @@ template <typename T>
void DestroyAtInternalPartition(T* ptr); void DestroyAtInternalPartition(T* ptr);
// A deleter for `std::unique_ptr<T>`. // A deleter for `std::unique_ptr<T>`.
template <typename T> struct PA_COMPONENT_EXPORT(PARTITION_ALLOC) InternalPartitionDeleter final {
struct InternalPartitionDeleter final { void operator()(void* ptr) const;
void operator()(T* ptr) const { DestroyAtInternalPartition(ptr); }
}; };
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View file

@ -9,51 +9,19 @@
#include "partition_alloc/partition_root.h" #include "partition_alloc/partition_root.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {
namespace {
// Utility classes to lock only if a condition is met. // An utility to lock only if a condition is met.
class PA_SCOPED_LOCKABLE ConditionalScopedGuard {
template <>
class PA_SCOPED_LOCKABLE
LightweightQuarantineBranch::CompileTimeConditionalScopedGuard<
LightweightQuarantineBranch::LockRequired::kNotRequired> {
public: public:
PA_ALWAYS_INLINE explicit CompileTimeConditionalScopedGuard(Lock& lock) PA_ALWAYS_INLINE ConditionalScopedGuard(bool condition, Lock& lock)
PA_EXCLUSIVE_LOCK_FUNCTION(lock) {}
// For some reason, defaulting this causes a thread safety annotation failure.
PA_ALWAYS_INLINE
~CompileTimeConditionalScopedGuard() // NOLINT(modernize-use-equals-default)
PA_UNLOCK_FUNCTION() {}
};
template <>
class PA_SCOPED_LOCKABLE
LightweightQuarantineBranch::CompileTimeConditionalScopedGuard<
LightweightQuarantineBranch::LockRequired::kRequired> {
public:
PA_ALWAYS_INLINE explicit CompileTimeConditionalScopedGuard(Lock& lock)
PA_EXCLUSIVE_LOCK_FUNCTION(lock)
: lock_(lock) {
lock_.Acquire();
}
PA_ALWAYS_INLINE ~CompileTimeConditionalScopedGuard() PA_UNLOCK_FUNCTION() {
lock_.Release();
}
private:
Lock& lock_;
};
class PA_SCOPED_LOCKABLE
LightweightQuarantineBranch::RuntimeConditionalScopedGuard {
public:
PA_ALWAYS_INLINE RuntimeConditionalScopedGuard(bool condition, Lock& lock)
PA_EXCLUSIVE_LOCK_FUNCTION(lock) PA_EXCLUSIVE_LOCK_FUNCTION(lock)
: condition_(condition), lock_(lock) { : condition_(condition), lock_(lock) {
if (condition_) { if (condition_) {
lock_.Acquire(); lock_.Acquire();
} }
} }
PA_ALWAYS_INLINE ~RuntimeConditionalScopedGuard() PA_UNLOCK_FUNCTION() { PA_ALWAYS_INLINE ~ConditionalScopedGuard() PA_UNLOCK_FUNCTION() {
if (condition_) { if (condition_) {
lock_.Release(); lock_.Release();
} }
@ -64,6 +32,8 @@ class PA_SCOPED_LOCKABLE
Lock& lock_; Lock& lock_;
}; };
} // namespace
LightweightQuarantineBranch LightweightQuarantineRoot::CreateBranch( LightweightQuarantineBranch LightweightQuarantineRoot::CreateBranch(
const LightweightQuarantineBranchConfig& config) { const LightweightQuarantineBranchConfig& config) {
return LightweightQuarantineBranch(*this, config); return LightweightQuarantineBranch(*this, config);
@ -74,12 +44,7 @@ LightweightQuarantineBranch::LightweightQuarantineBranch(
const LightweightQuarantineBranchConfig& config) const LightweightQuarantineBranchConfig& config)
: root_(root), : root_(root),
lock_required_(config.lock_required), lock_required_(config.lock_required),
branch_capacity_in_bytes_(config.branch_capacity_in_bytes) { branch_capacity_in_bytes_(config.branch_capacity_in_bytes) {}
if (lock_required_) {
to_be_freed_working_memory_ =
ConstructAtInternalPartition<ToBeFreedArray>();
}
}
LightweightQuarantineBranch::LightweightQuarantineBranch( LightweightQuarantineBranch::LightweightQuarantineBranch(
LightweightQuarantineBranch&& b) LightweightQuarantineBranch&& b)
@ -90,25 +55,57 @@ LightweightQuarantineBranch::LightweightQuarantineBranch(
branch_capacity_in_bytes_( branch_capacity_in_bytes_(
b.branch_capacity_in_bytes_.load(std::memory_order_relaxed)) { b.branch_capacity_in_bytes_.load(std::memory_order_relaxed)) {
b.branch_size_in_bytes_ = 0; b.branch_size_in_bytes_ = 0;
if (lock_required_) {
to_be_freed_working_memory_.store(b.to_be_freed_working_memory_.exchange(
nullptr, std::memory_order_relaxed),
std::memory_order_relaxed);
}
} }
LightweightQuarantineBranch::~LightweightQuarantineBranch() { LightweightQuarantineBranch::~LightweightQuarantineBranch() {
Purge(); Purge();
if (lock_required_) { slots_.clear();
DestroyAtInternalPartition(to_be_freed_working_memory_.exchange( }
nullptr, std::memory_order_relaxed));
bool LightweightQuarantineBranch::Quarantine(void* object,
SlotSpanMetadata* slot_span,
uintptr_t slot_start) {
const auto usable_size = root_.allocator_root_.GetSlotUsableSize(slot_span);
const size_t capacity_in_bytes =
branch_capacity_in_bytes_.load(std::memory_order_relaxed);
{
ConditionalScopedGuard guard(lock_required_, lock_);
if (capacity_in_bytes < usable_size) {
// Even if this branch dequarantines all entries held by it, this entry
// cannot fit within the capacity.
root_.allocator_root_.FreeNoHooksImmediate(object, slot_span, slot_start);
root_.quarantine_miss_count_.fetch_add(1u, std::memory_order_relaxed);
return false;
}
// Dequarantine some entries as required.
PurgeInternal(capacity_in_bytes - usable_size);
// Put the entry onto the list.
branch_size_in_bytes_ += usable_size;
slots_.emplace_back(slot_start, usable_size);
// Swap randomly so that the quarantine list remain shuffled.
// This is not uniformly random, but sufficiently random.
const size_t random_index = random_.RandUint32() % slots_.size();
std::swap(slots_[random_index], slots_.back());
} }
// Update stats (not locked).
root_.count_.fetch_add(1, std::memory_order_relaxed);
root_.size_in_bytes_.fetch_add(usable_size, std::memory_order_relaxed);
root_.cumulative_count_.fetch_add(1, std::memory_order_relaxed);
root_.cumulative_size_in_bytes_.fetch_add(usable_size,
std::memory_order_relaxed);
return true;
} }
bool LightweightQuarantineBranch::IsQuarantinedForTesting(void* object) { bool LightweightQuarantineBranch::IsQuarantinedForTesting(void* object) {
RuntimeConditionalScopedGuard guard(lock_required_, lock_); ConditionalScopedGuard guard(lock_required_, lock_);
uintptr_t slot_start = uintptr_t slot_start = root_.allocator_root_.ObjectToSlotStart(object);
root_.allocator_root_.ObjectToSlotStartUnchecked(object);
for (const auto& slot : slots_) { for (const auto& slot : slots_) {
if (slot.slot_start == slot_start) { if (slot.slot_start == slot_start) {
return true; return true;
@ -122,139 +119,26 @@ void LightweightQuarantineBranch::SetCapacityInBytes(size_t capacity_in_bytes) {
} }
void LightweightQuarantineBranch::Purge() { void LightweightQuarantineBranch::Purge() {
RuntimeConditionalScopedGuard guard(lock_required_, lock_); ConditionalScopedGuard guard(lock_required_, lock_);
PurgeInternal(0); PurgeInternal(0);
slots_.shrink_to_fit(); slots_.shrink_to_fit();
} }
template <LightweightQuarantineBranch::LockRequired lock_required>
bool LightweightQuarantineBranch::QuarantineInternal(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size) {
PA_DCHECK(lock_required_ ? lock_required == LockRequired::kRequired
: lock_required == LockRequired::kNotRequired);
PA_DCHECK(usable_size == root_.allocator_root_.GetSlotUsableSize(slot_span));
const size_t capacity_in_bytes =
branch_capacity_in_bytes_.load(std::memory_order_relaxed);
if (capacity_in_bytes < usable_size) [[unlikely]] {
// Even if this branch dequarantines all entries held by it, this entry
// cannot fit within the capacity.
root_.allocator_root_.FreeNoHooksImmediate(object, slot_span, slot_start);
root_.quarantine_miss_count_.fetch_add(1u, std::memory_order_relaxed);
return false;
}
if constexpr (lock_required == LockRequired::kNotRequired) {
// Although there is no need to actually acquire the lock as
// LockRequired::kNotRequired is specified,
// a CompileTimeConditionalScopedGuard is necessary in order to touch
// `slots_` as `slots_` is annotated with `PA_GUARDED_BY(lock_)`.
// CompileTimeConditionalScopedGuard's ctor and dtor behave as
// PA_EXCLUSIVE_LOCK_FUNCTION and PA_UNLOCK_FUNCTION.
CompileTimeConditionalScopedGuard<lock_required> guard(lock_);
// Dequarantine some entries as required.
PurgeInternal(capacity_in_bytes - usable_size);
// Put the entry onto the list.
branch_size_in_bytes_ += usable_size;
slots_.push_back({slot_start, usable_size});
// Swap randomly so that the quarantine list remain shuffled.
// This is not uniformly random, but sufficiently random.
const size_t random_index = random_.RandUint32() % slots_.size();
std::swap(slots_[random_index], slots_.back());
} else {
std::unique_ptr<ToBeFreedArray, InternalPartitionDeleter<ToBeFreedArray>>
to_be_freed;
size_t num_of_slots = 0;
{
CompileTimeConditionalScopedGuard<lock_required> guard(lock_);
// Borrow the reserved working memory from to_be_freed_working_memory_,
// and set nullptr to it indicating that it's in use.
to_be_freed.reset(to_be_freed_working_memory_.exchange(nullptr));
if (!to_be_freed) {
// When the reserved working memory has already been in use by another
// thread, fall back to allocate another chunk of working memory.
to_be_freed.reset(ConstructAtInternalPartition<ToBeFreedArray>());
}
// Dequarantine some entries as required. Save the objects to be
// deallocated into `to_be_freed`.
PurgeInternalWithDefferedFree(capacity_in_bytes - usable_size,
*to_be_freed, num_of_slots);
// Put the entry onto the list.
branch_size_in_bytes_ += usable_size;
slots_.push_back({slot_start, usable_size});
// Swap randomly so that the quarantine list remain shuffled.
// This is not uniformly random, but sufficiently random.
const size_t random_index = random_.RandUint32() % slots_.size();
std::swap(slots_[random_index], slots_.back());
}
// Actually deallocate the dequarantined objects.
BatchFree(*to_be_freed, num_of_slots);
// Return the possibly-borrowed working memory to
// to_be_freed_working_memory_. It doesn't matter much if it's really
// borrowed or locally-allocated. The important facts are 1) to_be_freed is
// non-null, and 2) to_be_freed_working_memory_ may likely be null (because
// this or another thread has already borrowed it). It's simply good to make
// to_be_freed_working_memory_ non-null whenever possible. Maybe yet another
// thread would be about to borrow the working memory.
to_be_freed.reset(
to_be_freed_working_memory_.exchange(to_be_freed.release()));
}
// Update stats (not locked).
root_.count_.fetch_add(1, std::memory_order_relaxed);
root_.size_in_bytes_.fetch_add(usable_size, std::memory_order_relaxed);
root_.cumulative_count_.fetch_add(1, std::memory_order_relaxed);
root_.cumulative_size_in_bytes_.fetch_add(usable_size,
std::memory_order_relaxed);
return true;
}
template bool LightweightQuarantineBranch::QuarantineInternal<
LightweightQuarantineBranch::LockRequired::kNotRequired>(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size);
template bool LightweightQuarantineBranch::QuarantineInternal<
LightweightQuarantineBranch::LockRequired::kRequired>(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size);
PA_ALWAYS_INLINE void LightweightQuarantineBranch::PurgeInternal( PA_ALWAYS_INLINE void LightweightQuarantineBranch::PurgeInternal(
size_t target_size_in_bytes) { size_t target_size_in_bytes) {
int64_t freed_count = 0; int64_t freed_count = 0;
int64_t freed_size_in_bytes = 0; int64_t freed_size_in_bytes = 0;
// Dequarantine some entries as required. // Dequarantine some entries as required.
while (target_size_in_bytes < branch_size_in_bytes_) { while (!slots_.empty() && target_size_in_bytes < branch_size_in_bytes_) {
PA_DCHECK(!slots_.empty());
// As quarantined entries are shuffled, picking last entry is equivalent // As quarantined entries are shuffled, picking last entry is equivalent
// to picking random entry. // to picking random entry.
const auto& to_free = slots_.back(); const auto& to_free = slots_.back();
size_t to_free_size = to_free.usable_size; size_t to_free_size = to_free.usable_size;
auto* slot_span = SlotSpanMetadata<MetadataKind::kReadOnly>::FromSlotStart( auto* slot_span = SlotSpanMetadata::FromSlotStart(to_free.slot_start);
to_free.slot_start);
void* object = root_.allocator_root_.SlotStartToObject(to_free.slot_start); void* object = root_.allocator_root_.SlotStartToObject(to_free.slot_start);
PA_DCHECK(slot_span == PA_DCHECK(slot_span == SlotSpanMetadata::FromObject(object));
SlotSpanMetadata<MetadataKind::kReadOnly>::FromObject(object));
PA_DCHECK(to_free.slot_start); PA_DCHECK(to_free.slot_start);
root_.allocator_root_.FreeNoHooksImmediate(object, slot_span, root_.allocator_root_.FreeNoHooksImmediate(object, slot_span,
@ -272,53 +156,4 @@ PA_ALWAYS_INLINE void LightweightQuarantineBranch::PurgeInternal(
root_.count_.fetch_sub(freed_count, std::memory_order_relaxed); root_.count_.fetch_sub(freed_count, std::memory_order_relaxed);
} }
PA_ALWAYS_INLINE void
LightweightQuarantineBranch::PurgeInternalWithDefferedFree(
size_t target_size_in_bytes,
ToBeFreedArray& to_be_freed,
size_t& num_of_slots) {
num_of_slots = 0;
int64_t freed_size_in_bytes = 0;
// Dequarantine some entries as required.
while (target_size_in_bytes < branch_size_in_bytes_) {
PA_DCHECK(!slots_.empty());
// As quarantined entries are shuffled, picking last entry is equivalent to
// picking random entry.
const QuarantineSlot& to_free = slots_.back();
const size_t to_free_size = to_free.usable_size;
to_be_freed[num_of_slots++] = to_free.slot_start;
slots_.pop_back();
freed_size_in_bytes += to_free_size;
branch_size_in_bytes_ -= to_free_size;
if (num_of_slots >= kMaxFreeTimesPerPurge) {
break;
}
}
root_.size_in_bytes_.fetch_sub(freed_size_in_bytes,
std::memory_order_relaxed);
root_.count_.fetch_sub(num_of_slots, std::memory_order_relaxed);
}
PA_ALWAYS_INLINE void LightweightQuarantineBranch::BatchFree(
const ToBeFreedArray& to_be_freed,
size_t num_of_slots) {
for (size_t i = 0; i < num_of_slots; ++i) {
const uintptr_t slot_start = to_be_freed[i];
PA_DCHECK(slot_start);
auto* slot_span =
SlotSpanMetadata<MetadataKind::kReadOnly>::FromSlotStart(slot_start);
void* object = root_.allocator_root_.SlotStartToObject(slot_start);
PA_DCHECK(slot_span ==
SlotSpanMetadata<MetadataKind::kReadOnly>::FromObject(object));
root_.allocator_root_.FreeNoHooksImmediate(object, slot_span, slot_start);
}
}
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View file

@ -108,35 +108,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LightweightQuarantineBranch {
// as much as possible. If the object is too large, this may return // as much as possible. If the object is too large, this may return
// `false`, meaning that quarantine request has failed (and freed // `false`, meaning that quarantine request has failed (and freed
// immediately). Otherwise, returns `true`. // immediately). Otherwise, returns `true`.
PA_ALWAYS_INLINE bool Quarantine( bool Quarantine(void* object,
void* object, SlotSpanMetadata* slot_span,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span, uintptr_t slot_start);
uintptr_t slot_start,
size_t usable_size) {
return lock_required_ ? QuarantineWithAcquiringLock(object, slot_span,
slot_start, usable_size)
: QuarantineWithoutAcquiringLock(
object, slot_span, slot_start, usable_size);
}
// Despite that LightweightQuarantineBranchConfig::lock_required_ is already
// specified, we provide two versions `With/WithoutAcquiringLock` so that we
// can avoid the overhead of runtime conditional branches.
PA_ALWAYS_INLINE bool QuarantineWithAcquiringLock(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size) {
PA_MUSTTAIL return QuarantineInternal<LockRequired::kRequired>(
object, slot_span, slot_start, usable_size);
}
PA_ALWAYS_INLINE bool QuarantineWithoutAcquiringLock(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size) {
PA_MUSTTAIL return QuarantineInternal<LockRequired::kNotRequired>(
object, slot_span, slot_start, usable_size);
}
// Dequarantine all entries **held by this branch**. // Dequarantine all entries **held by this branch**.
// It is possible that another branch with entries and it remains untouched. // It is possible that another branch with entries and it remains untouched.
@ -155,27 +129,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LightweightQuarantineBranch {
void SetCapacityInBytes(size_t capacity_in_bytes); void SetCapacityInBytes(size_t capacity_in_bytes);
private: private:
enum class LockRequired { kNotRequired, kRequired };
template <LockRequired lock_required>
class PA_SCOPED_LOCKABLE CompileTimeConditionalScopedGuard;
class PA_SCOPED_LOCKABLE RuntimeConditionalScopedGuard;
// `ToBeFreedArray` is used in `PurgeInternalInTwoPhases1of2` and
// `PurgeInternalInTwoPhases2of2`. See the function comment about the purpose.
// In order to avoid reentrancy issues, we must not deallocate any object in
// `Quarantine`. So, std::vector is not an option. std::array doesn't
// deallocate, plus, std::array has perf advantages.
static constexpr size_t kMaxFreeTimesPerPurge = 1024;
using ToBeFreedArray = std::array<uintptr_t, kMaxFreeTimesPerPurge>;
LightweightQuarantineBranch(Root& root, LightweightQuarantineBranch(Root& root,
const LightweightQuarantineBranchConfig& config); const LightweightQuarantineBranchConfig& config);
template <LockRequired lock_required>
bool QuarantineInternal(void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size);
// Try to dequarantine entries to satisfy below: // Try to dequarantine entries to satisfy below:
// root_.size_in_bytes_ <= target_size_in_bytes // root_.size_in_bytes_ <= target_size_in_bytes
// It is possible that this branch cannot satisfy the // It is possible that this branch cannot satisfy the
@ -183,19 +139,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LightweightQuarantineBranch {
// constraint, call `Purge()` for each branch in sequence, synchronously. // constraint, call `Purge()` for each branch in sequence, synchronously.
PA_ALWAYS_INLINE void PurgeInternal(size_t target_size_in_bytes) PA_ALWAYS_INLINE void PurgeInternal(size_t target_size_in_bytes)
PA_EXCLUSIVE_LOCKS_REQUIRED(lock_); PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
// In order to reduce thread contention, dequarantines entries in two phases:
// Phase 1) With the lock acquired, saves `slot_start`s of the quarantined
// objects in an array, and shrinks `slots_`. Then, releases the lock so
// that another thread can quarantine an object.
// Phase 2) Without the lock acquired, deallocates objects saved in the
// array in Phase 1. This may take some time, but doesn't block other
// threads.
PA_ALWAYS_INLINE void PurgeInternalWithDefferedFree(
size_t target_size_in_bytes,
ToBeFreedArray& to_be_freed,
size_t& num_of_slots) PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
PA_ALWAYS_INLINE void BatchFree(const ToBeFreedArray& to_be_freed,
size_t num_of_slots);
Root& root_; Root& root_;
@ -217,35 +160,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LightweightQuarantineBranch {
// Using `std::atomic` here so that other threads can update this value. // Using `std::atomic` here so that other threads can update this value.
std::atomic_size_t branch_capacity_in_bytes_; std::atomic_size_t branch_capacity_in_bytes_;
// This working memory is temporarily needed only while dequarantining
// objects in slots_ when lock_required_ is true. However, allocating this
// working memory on stack may cause stack overflow [1]. Plus, it's non-
// negligible perf penalty to allocate and deallocate this working memory on
// heap only while dequarantining. So, we reserve one chunk of working memory
// on heap during the entire lifetime of this branch object and try to reuse
// this working memory among threads. Only when thread contention occurs, we
// allocate and deallocate another chunk of working memory.
// [1] https://issues.chromium.org/issues/387508217
std::atomic<ToBeFreedArray*> to_be_freed_working_memory_ = nullptr;
friend class LightweightQuarantineRoot; friend class LightweightQuarantineRoot;
}; };
extern template PA_COMPONENT_EXPORT(
PARTITION_ALLOC) bool LightweightQuarantineBranch::
QuarantineInternal<LightweightQuarantineBranch::LockRequired::kNotRequired>(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size);
extern template PA_COMPONENT_EXPORT(
PARTITION_ALLOC) bool LightweightQuarantineBranch::
QuarantineInternal<LightweightQuarantineBranch::LockRequired::kRequired>(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size);
} // namespace internal } // namespace internal
} // namespace partition_alloc } // namespace partition_alloc

View file

@ -4,12 +4,16 @@
#include "partition_alloc/memory_reclaimer.h" #include "partition_alloc/memory_reclaimer.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc.h" #include "partition_alloc/partition_alloc.h"
#include "partition_alloc/partition_alloc_base/no_destructor.h" #include "partition_alloc/partition_alloc_base/no_destructor.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h" #include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h" #include "partition_alloc/partition_alloc_config.h"
#if PA_BUILDFLAG(USE_STARSCAN)
#include "partition_alloc/starscan/pcscan.h"
#endif
namespace partition_alloc { namespace partition_alloc {
// static // static
@ -48,17 +52,30 @@ void MemoryReclaimer::ReclaimNormal() {
Reclaim(kFlags); Reclaim(kFlags);
} }
void MemoryReclaimer::ReclaimFast() {
constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
PurgeFlags::kDiscardUnusedSystemPages |
PurgeFlags::kLimitDuration;
Reclaim(kFlags);
}
void MemoryReclaimer::Reclaim(int flags) { void MemoryReclaimer::Reclaim(int flags) {
internal::ScopedGuard lock( internal::ScopedGuard lock(
lock_); // Has to protect from concurrent (Un)Register calls. lock_); // Has to protect from concurrent (Un)Register calls.
// PCScan quarantines freed slots. Trigger the scan first to let it call
// FreeNoHooksImmediate on slots that pass the quarantine.
//
// In turn, FreeNoHooksImmediate may add slots to thread cache. Purge it next
// so that the slots are actually freed. (This is done synchronously only for
// the current thread.)
//
// Lastly decommit empty slot spans and lastly try to discard unused pages at
// the end of the remaining active slots.
#if PA_CONFIG(STARSCAN_ENABLE_STARSCAN_ON_RECLAIM) && PA_BUILDFLAG(USE_STARSCAN)
{
using PCScan = internal::PCScan;
const auto invocation_mode = flags & PurgeFlags::kAggressiveReclaim
? PCScan::InvocationMode::kForcedBlocking
: PCScan::InvocationMode::kBlocking;
PCScan::PerformScanIfNeeded(invocation_mode);
}
#endif // PA_CONFIG(STARSCAN_ENABLE_STARSCAN_ON_RECLAIM) &&
// PA_BUILDFLAG(USE_STARSCAN)
#if PA_CONFIG(THREAD_CACHE_SUPPORTED) #if PA_CONFIG(THREAD_CACHE_SUPPORTED)
// Don't completely empty the thread cache outside of low memory situations, // Don't completely empty the thread cache outside of low memory situations,
// as there is periodic purge which makes sure that it doesn't take too much // as there is periodic purge which makes sure that it doesn't take too much

View file

@ -51,8 +51,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) MemoryReclaimer {
// Triggers an explicit reclaim now reclaiming all free memory // Triggers an explicit reclaim now reclaiming all free memory
void ReclaimAll(); void ReclaimAll();
// Same as ReclaimNormal(), but return early if reclaim takes too long.
void ReclaimFast();
private: private:
MemoryReclaimer(); MemoryReclaimer();

View file

@ -10,13 +10,12 @@
#include "partition_alloc/partition_alloc_base/debug/alias.h" #include "partition_alloc/partition_alloc_base/debug/alias.h"
#include "partition_alloc/partition_alloc_base/immediate_crash.h" #include "partition_alloc/partition_alloc_base/immediate_crash.h"
#if PA_BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
#include <windows.h> #include <windows.h>
#include <array> #include <array>
#include <cstdlib> #include <cstdlib>
#include <limits> #endif // BUILDFLAG(IS_WIN)
#endif // PA_BUILDFLAG(IS_WIN)
namespace partition_alloc { namespace partition_alloc {
@ -27,50 +26,23 @@ namespace internal {
// Crash server classifies base::internal::OnNoMemoryInternal as OOM. // Crash server classifies base::internal::OnNoMemoryInternal as OOM.
// TODO(crbug.com/40158212): Update to // TODO(crbug.com/40158212): Update to
// partition_alloc::internal::base::internal::OnNoMemoryInternal // partition_alloc::internal::base::internal::OnNoMemoryInternal
[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void OnNoMemoryInternal( PA_NOINLINE void OnNoMemoryInternal(size_t size) {
size_t size) {
g_oom_size = size; g_oom_size = size;
size_t tmp_size = size; #if BUILDFLAG(IS_WIN)
internal::base::debug::Alias(&tmp_size);
#if PA_BUILDFLAG(IS_WIN)
// Create an exception vector with:
// [0] the size of the allocation, in bytes
// [1] "current committed memory limit for the system or the current process,
// whichever is smaller, in bytes"
// [2] "maximum amount of memory the current process can commit, in bytes"
//
// Citations from
// https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-memorystatusex
//
// System commit constraints (which may be different from the process commit
// constraints) are in the stability_report.SystemMemoryState.WindowsMemory
// proto attached to crash reports.
//
// Note: Both the process commit constraints in the exception vector and the
// system commit constraints in the proto are collected *after* the OOM and
// may therefore not reflect the state at the time of the OOM (e.g. another
// process may have exited or the page file may have been resized).
constexpr size_t kInvalid = std::numeric_limits<ULONG_PTR>::max();
ULONG_PTR exception_args[] = {size, kInvalid, kInvalid};
MEMORYSTATUSEX memory_status = {};
memory_status.dwLength = sizeof(memory_status);
if (::GlobalMemoryStatusEx(&memory_status) != 0) {
exception_args[1] = memory_status.ullTotalPageFile;
exception_args[2] = memory_status.ullAvailPageFile;
}
internal::base::debug::Alias(&memory_status);
// Kill the process. This is important for security since most of code // Kill the process. This is important for security since most of code
// does not check the result of memory allocation. // does not check the result of memory allocation.
// Documentation: https://msdn.microsoft.com/en-us/library/het71c37.aspx // https://msdn.microsoft.com/en-us/library/het71c37.aspx
// Pass the size of the failed request in an exception argument.
ULONG_PTR exception_args[] = {size};
::RaiseException(win::kOomExceptionCode, EXCEPTION_NONCONTINUABLE, ::RaiseException(win::kOomExceptionCode, EXCEPTION_NONCONTINUABLE,
std::size(exception_args), exception_args); std::size(exception_args), exception_args);
// Safety check, make sure process exits here. // Safety check, make sure process exits here.
_exit(win::kOomExceptionCode); _exit(win::kOomExceptionCode);
#else #else
size_t tmp_size = size;
internal::base::debug::Alias(&tmp_size);
// Note: Don't add anything that may allocate here. Depending on the // Note: Don't add anything that may allocate here. Depending on the
// allocator, this may be called from within the allocator (e.g. with // allocator, this may be called from within the allocator (e.g. with
// PartitionAlloc), and would deadlock as our locks are not recursive. // PartitionAlloc), and would deadlock as our locks are not recursive.
@ -83,7 +55,7 @@ namespace internal {
// to be able to successfully unwind through libc to get to the correct // to be able to successfully unwind through libc to get to the correct
// address, which is particularly an issue on Android. // address, which is particularly an issue on Android.
PA_IMMEDIATE_CRASH(); PA_IMMEDIATE_CRASH();
#endif // PA_BUILDFLAG(IS_WIN) #endif // BUILDFLAG(IS_WIN)
} }
} // namespace internal } // namespace internal

View file

@ -12,7 +12,7 @@
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h" #include "partition_alloc/partition_alloc_base/component_export.h"
#if PA_BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
#include "partition_alloc/partition_alloc_base/win/windows_types.h" #include "partition_alloc/partition_alloc_base/win/windows_types.h"
#endif #endif
@ -22,15 +22,15 @@ namespace partition_alloc {
// |size| is the size of the failed allocation, or 0 if not known. // |size| is the size of the failed allocation, or 0 if not known.
// Crash reporting classifies such crashes as OOM. // Crash reporting classifies such crashes as OOM.
// Must be allocation-safe. // Must be allocation-safe.
[[noreturn]] PA_NOT_TAIL_CALLED PA_COMPONENT_EXPORT( PA_COMPONENT_EXPORT(PARTITION_ALLOC)
PARTITION_ALLOC) void TerminateBecauseOutOfMemory(size_t size); void TerminateBecauseOutOfMemory(size_t size);
// Records the size of the allocation that caused the current OOM crash, for // Records the size of the allocation that caused the current OOM crash, for
// consumption by Breakpad. // consumption by Breakpad.
// TODO: this can be removed when Breakpad is no longer supported. // TODO: this can be removed when Breakpad is no longer supported.
PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern size_t g_oom_size; PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern size_t g_oom_size;
#if PA_BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
namespace win { namespace win {
// Custom Windows exception code chosen to indicate an out of memory error. // Custom Windows exception code chosen to indicate an out of memory error.

Some files were not shown because too many files have changed in this diff Show more