Compare commits

...

No commits in common. "v123.0.6312.40-1" and "master" have entirely different histories.

26636 changed files with 1426597 additions and 1678073 deletions

View file

@ -25,7 +25,7 @@ jobs:
path: | path: |
src/third_party/llvm-build/Release+Asserts/ src/third_party/llvm-build/Release+Asserts/
src/gn/ src/gn/
src/qemu-user-static*.deb src/qemu-user*.deb
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }} key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache PGO (Linux, OpenWrt) - name: Cache PGO (Linux, OpenWrt)
uses: actions/cache@v4 uses: actions/cache@v4
@ -45,11 +45,11 @@ jobs:
- run: ./get-clang.sh - run: ./get-clang.sh
- run: EXTRA_FLAGS='target_os="android"' ./get-clang.sh - run: EXTRA_FLAGS='target_os="android"' ./get-clang.sh
- run: | - run: |
if [ ! -f qemu-user-static*.deb ]; then if [ ! -f qemu-user*.deb ]; then
wget https://snapshot.debian.org/archive/debian/20230611T210420Z/pool/main/q/qemu/qemu-user-static_8.0%2Bdfsg-4_amd64.deb wget https://snapshot.debian.org/archive/debian/20250405T083429Z/pool/main/q/qemu/qemu-user_9.2.2%2Bds-1%2Bb2_amd64.deb
fi fi
cache-toolchains-win: cache-toolchains-win:
runs-on: windows-2019 runs-on: windows-2022
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Cache toolchains - name: Cache toolchains
@ -79,7 +79,7 @@ jobs:
unzip ninja-win.zip -d ~/bin unzip ninja-win.zip -d ~/bin
fi fi
cache-toolchains-mac: cache-toolchains-mac:
runs-on: macos-11 runs-on: macos-13
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: actions/cache@v4 - uses: actions/cache@v4
@ -97,7 +97,7 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
arch: [x64, x86, arm64, arm, mipsel, mips64el, riscv64] arch: [x64, x86, arm64, arm, mipsel, mips64el, riscv64, loong64]
env: env:
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}"' EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}"'
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }} BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
@ -109,7 +109,7 @@ jobs:
path: | path: |
src/third_party/llvm-build/Release+Asserts/ src/third_party/llvm-build/Release+Asserts/
src/gn/ src/gn/
src/qemu-user-static*.deb src/qemu-user*.deb
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }} key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache PGO (Linux, OpenWrt) - name: Cache PGO (Linux, OpenWrt)
uses: actions/cache@v4 uses: actions/cache@v4
@ -138,7 +138,7 @@ jobs:
sudo apt update sudo apt update
sudo apt install ninja-build pkg-config ccache bubblewrap sudo apt install ninja-build pkg-config ccache bubblewrap
sudo apt remove -y qemu-user-binfmt sudo apt remove -y qemu-user-binfmt
sudo dpkg -i qemu-user-static*.deb sudo dpkg -i qemu-user*.deb
# libc6-i386 interferes with x86 build # libc6-i386 interferes with x86 build
sudo apt remove libc6-i386 sudo apt remove libc6-i386
- run: ./get-clang.sh - run: ./get-clang.sh
@ -179,16 +179,20 @@ jobs:
abi: armeabi-v7a abi: armeabi-v7a
env: env:
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}" target_os="android"' EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}" target_os="android"'
BUNDLE: naiveproxy-plugin-${{ github.event.release.tag_name || 'v1' }}-${{ matrix.abi }}.apk BUNDLE: naiveproxy-plugin-${{ github.event.release.tag_name || 'v1.1.1.1-1' }}-${{ matrix.abi }}.apk
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: 17
- name: Cache toolchains (Linux, OpenWrt, Android) - name: Cache toolchains (Linux, OpenWrt, Android)
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
path: | path: |
src/third_party/llvm-build/Release+Asserts/ src/third_party/llvm-build/Release+Asserts/
src/gn/ src/gn/
src/qemu-user-static*.deb src/qemu-user*.deb
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }} key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache AFDO (Android) - name: Cache AFDO (Android)
uses: actions/cache@v4 uses: actions/cache@v4
@ -218,7 +222,7 @@ jobs:
sudo apt update sudo apt update
sudo apt install ninja-build pkg-config ccache bubblewrap sudo apt install ninja-build pkg-config ccache bubblewrap
sudo apt remove -y qemu-user-binfmt sudo apt remove -y qemu-user-binfmt
sudo dpkg -i qemu-user-static*.deb sudo dpkg -i qemu-user*.deb
# libc6-i386 interferes with x86 build # libc6-i386 interferes with x86 build
sudo apt remove libc6-i386 sudo apt remove libc6-i386
- run: ./get-clang.sh - run: ./get-clang.sh
@ -236,7 +240,7 @@ jobs:
working-directory: apk working-directory: apk
env: env:
APK_ABI: ${{ matrix.abi }} APK_ABI: ${{ matrix.abi }}
APK_VERSION_NAME: ${{ github.event.release.tag_name || 'v1' }} APK_VERSION_NAME: ${{ github.event.release.tag_name || 'v1.1.1.1-1' }}
KEYSTORE_PASS: ${{ secrets.KEYSTORE_PASS }} KEYSTORE_PASS: ${{ secrets.KEYSTORE_PASS }}
run: | run: |
mkdir -p app/libs/$APK_ABI mkdir -p app/libs/$APK_ABI
@ -256,7 +260,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
win: win:
needs: cache-toolchains-win needs: cache-toolchains-win
runs-on: windows-2019 runs-on: windows-2022
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -320,7 +324,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
mac: mac:
needs: cache-toolchains-mac needs: cache-toolchains-mac
runs-on: macos-11 runs-on: macos-13
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -371,40 +375,6 @@ jobs:
run: gh release upload "${GITHUB_REF##*/}" ${{ env.BUNDLE }}.tar.xz --clobber run: gh release upload "${GITHUB_REF##*/}" ${{ env.BUNDLE }}.tar.xz --clobber
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ios:
needs: cache-toolchains-mac
runs-on: macos-11
strategy:
fail-fast: false
matrix:
arch: [arm64]
env:
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}" target_os="ios" ios_enable_code_signing=false'
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
steps:
- uses: actions/checkout@v4
- name: Cache toolchains and PGO
uses: actions/cache@v4
with:
path: |
src/third_party/llvm-build/Release+Asserts/
src/chrome/build/pgo_profiles/chrome-mac-*
src/gn/
key: toolchains-pgo-mac-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- id: ccache-timestamp
run: echo "CCACHE_TIMESTAMP=$(date +%s)" >>$GITHUB_OUTPUT
- name: Cache ccache files
uses: actions/cache@v4
with:
path: ~/Library/Caches/ccache
key: ccache-ios-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.CCACHE_TIMESTAMP }}
restore-keys: ccache-ios-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
- run: brew install ninja ccache
- run: pip install setuptools
- run: ./get-clang.sh
- run: ccache -z
- run: ./build.sh
- run: ccache -s
openwrt: openwrt:
needs: cache-toolchains-posix needs: cache-toolchains-posix
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
@ -416,7 +386,7 @@ jobs:
openwrt: "target=x86 subtarget=64" openwrt: "target=x86 subtarget=64"
target_cpu: x64 target_cpu: x64
- arch: x86 - arch: x86
openwrt: "target=x86 subtarget=generic" openwrt: "target=x86 subtarget=geode"
target_cpu: x86 target_cpu: x86
- arch: aarch64_cortex-a53 - arch: aarch64_cortex-a53
openwrt: "target=sunxi subtarget=cortexa53" openwrt: "target=sunxi subtarget=cortexa53"
@ -425,7 +395,9 @@ jobs:
- arch: aarch64_cortex-a53-static - arch: aarch64_cortex-a53-static
openwrt: "target=sunxi subtarget=cortexa53" openwrt: "target=sunxi subtarget=cortexa53"
target_cpu: arm64 target_cpu: arm64
extra: 'arm_cpu="cortex-a53" build_static=true no_madvise_syscall=true' extra: 'arm_cpu="cortex-a53" build_static=true use_allocator_shim=false use_partition_alloc=false'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: aarch64_cortex-a72 - arch: aarch64_cortex-a72
openwrt: "target=mvebu subtarget=cortexa72" openwrt: "target=mvebu subtarget=cortexa72"
target_cpu: arm64 target_cpu: arm64
@ -433,16 +405,26 @@ jobs:
- arch: aarch64_cortex-a72-static - arch: aarch64_cortex-a72-static
openwrt: "target=mvebu subtarget=cortexa72" openwrt: "target=mvebu subtarget=cortexa72"
target_cpu: arm64 target_cpu: arm64
extra: 'arm_cpu="cortex-a72" build_static=true no_madvise_syscall=true' extra: 'arm_cpu="cortex-a72" build_static=true use_allocator_shim=false use_partition_alloc=false'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: aarch64_cortex-a76
openwrt: "target=bcm27xx subtarget=bcm2712"
target_cpu: arm64
extra: 'arm_cpu="cortex-a76"'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: aarch64_generic - arch: aarch64_generic
openwrt: "target=rockchip subtarget=armv8" openwrt: "target=layerscape subtarget=armv8_64b"
target_cpu: arm64 target_cpu: arm64
- arch: aarch64_generic-static - arch: aarch64_generic-static
openwrt: "target=rockchip subtarget=armv8" openwrt: "target=layerscape subtarget=armv8_64b"
target_cpu: arm64 target_cpu: arm64
extra: "build_static=true no_madvise_syscall=true" extra: "build_static=true use_allocator_shim=false use_partition_alloc=false"
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: arm_arm1176jzf-s_vfp - arch: arm_arm1176jzf-s_vfp
openwrt: "target=bcm27xx subtarget=bcm2708" openwrt: "target=brcm2708 subtarget=bcm2708"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="arm1176jzf-s" arm_fpu="vfp" arm_float_abi="hard" arm_use_neon=false arm_use_thumb=false' extra: 'arm_version=0 arm_cpu="arm1176jzf-s" arm_fpu="vfp" arm_float_abi="hard" arm_use_neon=false arm_use_thumb=false'
- arch: arm_arm926ej-s - arch: arm_arm926ej-s
@ -450,29 +432,35 @@ jobs:
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="arm926ej-s" arm_float_abi="soft" arm_use_neon=false arm_use_thumb=false' extra: 'arm_version=0 arm_cpu="arm926ej-s" arm_float_abi="soft" arm_use_neon=false arm_use_thumb=false'
- arch: arm_cortex-a15_neon-vfpv4 - arch: arm_cortex-a15_neon-vfpv4
openwrt: "target=armsr subtarget=armv7" openwrt: "target=ipq806x subtarget=generic"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a15" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true' extra: 'arm_version=0 arm_cpu="cortex-a15" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true'
- arch: arm_cortex-a5_vfpv4 - arch: arm_cortex-a5_vfpv4
openwrt: "target=at91 subtarget=sama5" openwrt: "target=at91 subtarget=sama5d3"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a5" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false' extra: 'arm_version=0 arm_cpu="cortex-a5" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false'
- arch: arm_cortex-a7 - arch: arm_cortex-a7
openwrt: "target=mediatek subtarget=mt7629" openwrt: "target=mediatek subtarget=mt7629"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_float_abi="soft" arm_use_neon=false' extra: 'arm_version=0 arm_cpu="cortex-a7" arm_float_abi="soft" arm_use_neon=false'
openwrt_release: '21.02.0'
openwrt_gcc_ver: '8.4.0'
- arch: arm_cortex-a7_neon-vfpv4 - arch: arm_cortex-a7_neon-vfpv4
openwrt: "target=sunxi subtarget=cortexa7" openwrt: "target=sunxi subtarget=cortexa7"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true' extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true'
- arch: arm_cortex-a7_neon-vfpv4-static
openwrt: "target=sunxi subtarget=cortexa7"
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true build_static=true use_allocator_shim=false use_partition_alloc=false'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: arm_cortex-a7_vfpv4 - arch: arm_cortex-a7_vfpv4
openwrt: "target=at91 subtarget=sama7" openwrt: "target=at91 subtarget=sama7"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false' extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false'
- arch: arm_cortex-a7_neon-vfpv4-static openwrt_release: '22.03.0'
openwrt: "target=sunxi subtarget=cortexa7" openwrt_gcc_ver: '11.2.0'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true build_static=true no_madvise_syscall=true'
- arch: arm_cortex-a8_vfpv3 - arch: arm_cortex-a8_vfpv3
openwrt: "target=sunxi subtarget=cortexa8" openwrt: "target=sunxi subtarget=cortexa8"
target_cpu: arm target_cpu: arm
@ -484,13 +472,15 @@ jobs:
- arch: arm_cortex-a9-static - arch: arm_cortex-a9-static
openwrt: "target=bcm53xx subtarget=generic" openwrt: "target=bcm53xx subtarget=generic"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_float_abi="soft" arm_use_neon=false build_static=true no_madvise_syscall=true' extra: 'arm_version=0 arm_cpu="cortex-a9" arm_float_abi="soft" arm_use_neon=false build_static=true use_allocator_shim=false use_partition_alloc=false'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: arm_cortex-a9_neon - arch: arm_cortex-a9_neon
openwrt: "target=zynq subtarget=generic" openwrt: "target=imx6 subtarget=generic"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="neon" arm_float_abi="hard" arm_use_neon=true' extra: 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="neon" arm_float_abi="hard" arm_use_neon=true'
- arch: arm_cortex-a9_vfpv3-d16 - arch: arm_cortex-a9_vfpv3-d16
openwrt: "target=tegra subtarget=generic" openwrt: "target=mvebu subtarget=cortexa9"
target_cpu: arm target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="vfpv3-d16" arm_float_abi="hard" arm_use_neon=false' extra: 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="vfpv3-d16" arm_float_abi="hard" arm_use_neon=false'
- arch: arm_mpcore - arch: arm_mpcore
@ -508,17 +498,26 @@ jobs:
- arch: mipsel_24kc-static - arch: mipsel_24kc-static
openwrt: "target=ramips subtarget=rt305x" openwrt: "target=ramips subtarget=rt305x"
target_cpu: mipsel target_cpu: mipsel
extra: 'mips_arch_variant="r2" mips_float_abi="soft" build_static=true no_madvise_syscall=true' extra: 'mips_arch_variant="r2" mips_float_abi="soft" build_static=true use_allocator_shim=false use_partition_alloc=false'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: mipsel_mips32 - arch: mipsel_mips32
openwrt: "target=bcm47xx subtarget=generic" openwrt: "target=brcm47xx subtarget=legacy"
target_cpu: mipsel target_cpu: mipsel
extra: 'mips_arch_variant="r1" mips_float_abi="soft"' extra: 'mips_arch_variant="r1" mips_float_abi="soft"'
- arch: riscv64 - arch: riscv64
openwrt: "target=sifiveu subtarget=generic" openwrt: "target=sifiveu subtarget=generic"
target_cpu: riscv64 target_cpu: riscv64
openwrt_release: '23.05.0'
openwrt_gcc_ver: '12.3.0'
- arch: loongarch64
openwrt: "target=loongarch64 subtarget=generic"
target_cpu: loong64
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
env: env:
EXTRA_FLAGS: target_cpu="${{ matrix.target_cpu }}" target_os="openwrt" ${{ matrix.extra }} EXTRA_FLAGS: target_cpu="${{ matrix.target_cpu }}" target_os="openwrt" ${{ matrix.extra }} enable_shadow_metadata=false
OPENWRT_FLAGS: arch=${{ matrix.arch }} release=23.05.0 gcc_ver=12.3.0 ${{ matrix.openwrt }} OPENWRT_FLAGS: arch=${{ matrix.arch }} release=${{ matrix.openwrt_release || '18.06.0' }} gcc_ver=${{ matrix.openwrt_gcc_ver || '7.3.0' }} ${{ matrix.openwrt }}
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }} BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
@ -528,7 +527,7 @@ jobs:
path: | path: |
src/third_party/llvm-build/Release+Asserts/ src/third_party/llvm-build/Release+Asserts/
src/gn/ src/gn/
src/qemu-user-static*.deb src/qemu-user*.deb
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }} key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache PGO (Linux, OpenWrt) - name: Cache PGO (Linux, OpenWrt)
uses: actions/cache@v4 uses: actions/cache@v4
@ -553,7 +552,7 @@ jobs:
sudo apt update sudo apt update
sudo apt install ninja-build pkg-config ccache bubblewrap sudo apt install ninja-build pkg-config ccache bubblewrap
sudo apt remove -y qemu-user-binfmt sudo apt remove -y qemu-user-binfmt
sudo dpkg -i qemu-user-static*.deb sudo dpkg -i qemu-user*.deb
# libc6-i386 interferes with x86 build # libc6-i386 interferes with x86 build
sudo apt remove libc6-i386 sudo apt remove libc6-i386
- run: ./get-clang.sh - run: ./get-clang.sh

View file

@ -1 +1 @@
123.0.6312.40 135.0.7049.38

View file

@ -21,7 +21,7 @@ The Naïve server here works as a forward proxy and a packet length padding laye
## Download NaïveProxy ## Download NaïveProxy
Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)). Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [Exclave](https://github.com/dyhkwong/Exclave), [husi](https://github.com/xchacha20-poly1305/husi), [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)).
Users should always use the latest version to keep signatures identical to Chrome. Users should always use the latest version to keep signatures identical to Chrome.
@ -34,7 +34,7 @@ The following describes the naïve fork of Caddy forwardproxy setup.
Download [here](https://github.com/klzgrad/forwardproxy/releases/latest) or build from source: Download [here](https://github.com/klzgrad/forwardproxy/releases/latest) or build from source:
```sh ```sh
go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
~/go/bin/xcaddy build --with github.com/caddyserver/forwardproxy@caddy2=github.com/klzgrad/forwardproxy@naive ~/go/bin/xcaddy build --with github.com/caddyserver/forwardproxy=github.com/klzgrad/forwardproxy@naive
``` ```
Example Caddyfile (replace `user` and `pass` accordingly): Example Caddyfile (replace `user` and `pass` accordingly):
@ -79,10 +79,7 @@ Or `quic://user:pass@example.com`, if it works better. See also [parameter usage
## Third-party integration ## Third-party integration
* [v2rayN](https://github.com/2dust/v2rayN), GUI client, Windows * [v2rayN](https://github.com/2dust/v2rayN), GUI client
* [NekoBox for Android](https://github.com/MatsuriDayo/NekoBoxForAndroid), Proxy toolchain, Android
* [NekoRay / NekoBox For PC](https://github.com/MatsuriDayo/nekoray), Qt based GUI, Windows, Linux
* [Yet Another Shadow Socket](https://github.com/Chilledheart/yass), NaïveProxy-compatible forward proxy, Android, iOS, Windows, macOS, Linux, FreeBSD
## Notes for downstream ## Notes for downstream
@ -114,7 +111,7 @@ Further reads and writes after `kFirstPaddings` are unpadded to avoid performanc
### H2 RST_STREAM frame padding ### H2 RST_STREAM frame padding
In experiments, NaïveProxy tends to send too many RST_STREAM frames per session, an uncommon behavior from regular browsers. To solve this, an END_STREAM DATA frame padded with total length distributed in [48, 72] is prepended to the RST_STREAM frame so it looks like a HEADERS frame. The server often replies to this with a WINDOW_UPDATE because padding is accounted in flow control. Whether this results in a new uncommon behavior is still unclear. In experiments, NaïveProxy tends to send too many RST_STREAM frames per session, an uncommon behavior from regular browsers. To solve this, an END_STREAM DATA frame padded with total length distributed in [48, 72] is prepended to the RST_STREAM frame so it looks like a HEADERS frame. The server often replies to this with a WINDOW_UPDATE because padding is accounted in flow control. Whether this results in a new uncommon behavior is still unclear.
### H2 HEADERS frame padding ### H2 HEADERS frame padding
@ -130,7 +127,7 @@ The first CONNECT request to a server cannot use "Fast Open" to send payload bef
## Changes from Chromium upstream ## Changes from Chromium upstream
- Minimize source code and build size (1% of the original) - Minimize source code and build size (0.3% of the original)
- Disable exceptions and RTTI, except on Mac and Android. - Disable exceptions and RTTI, except on Mac and Android.
- Support OpenWrt builds - Support OpenWrt builds
- (Android, Linux) Use the builtin verifier instead of the system verifier (drop dependency of NSS on Linux) and read the system trust store from (following Go's behavior in crypto/x509/root_unix.go and crypto/x509/root_linux.go): - (Android, Linux) Use the builtin verifier instead of the system verifier (drop dependency of NSS on Linux) and read the system trust store from (following Go's behavior in crypto/x509/root_unix.go and crypto/x509/root_linux.go):

View file

@ -14,8 +14,8 @@ Description:
"proxy": "..." "proxy": "..."
} }
`--listen` can be specified multiple times on the command line, Specifying a flag multiple times on the command line is equivalent to
and can be either a string or an array of strings in the JSON file. having an array of multiple strings in the JSON file.
Uses "config.json" by default if run without arguments. Uses "config.json" by default if run without arguments.
@ -29,18 +29,16 @@ Options:
Prints version. Prints version.
--listen=<proto>://[addr][:port] --listen=LISTEN-URI
--listen=socks://[[user]:[pass]@][addr][:port]
Listens at addr:port with protocol <proto>. LISTEN-URI = <LISTEN-PROTO>"://"[<USER>":"<PASS>"@"][<ADDR>][":"<PORT>]
LISTEN-PROTO = "socks" | "http" | "redir"
Listens at addr:port with protocol <LISTEN-PROTO>.
Can be specified multiple times to listen on multiple ports. Can be specified multiple times to listen on multiple ports.
Available proto: socks, http, redir.
Default proto, addr, port: socks, 0.0.0.0, 1080. Default proto, addr, port: socks, 0.0.0.0, 1080.
* http: Supports only proxying https:// URLs, no http://. Note: redir requires specific iptables rules and uses no authentication.
* redir: Works with certain iptables setup.
(Redirecting locally originated traffic) (Redirecting locally originated traffic)
iptables -t nat -A OUTPUT -d $proxy_server_ip -j RETURN iptables -t nat -A OUTPUT -d $proxy_server_ip -j RETURN
@ -57,10 +55,21 @@ Options:
The artificial results are not saved for privacy, so restarting the The artificial results are not saved for privacy, so restarting the
resolver may cause downstream to cache stale results. resolver may cause downstream to cache stale results.
--proxy=<proto>://<user>:<pass>@<hostname>[:<port>] --proxy=PROXY
Routes traffic via the proxy server. Connects directly by default. PROXY = PROXY-CHAIN | SOCKS-PROXY
Available proto: https, quic. Infers port by default. PROXY-CHAIN = <PROXY-URI>[","<PROXY-CHAIN>]
PROXY-URI = <PROXY-PROTO>"://"[<USER>":"<PASS>"@"]<HOSTNAME>[":"<PORT>]
PROXY-PROTO = "http" | "https" | "quic"
SOCKS-PROXY = "socks://"<HOSTNAME>[":"<PORT>]
Routes traffic via the proxy chain.
The default proxy is directly connection without proxying.
The last PROXY-URI is negotiated automatically for Naive padding.
Limitations:
* QUIC proxies cannot follow TCP-based proxies in a proxy chain.
* The user needs to ensure there is no loop in the proxy chain.
* SOCKS proxies do not support chaining, authentication, or Naive padding.
--insecure-concurrency=<N> --insecure-concurrency=<N>
@ -97,3 +106,7 @@ Options:
--ssl-key-log-file=<path> --ssl-key-log-file=<path>
Saves SSL keys for Wireshark inspection. Saves SSL keys for Wireshark inspection.
--no-post-quantum
Overrides the default and disables post-quantum key agreement.

1
apk/.gitignore vendored
View file

@ -1,2 +1,3 @@
.gradle/ .gradle/
app/build/ app/build/
app/libs/

View file

@ -4,7 +4,7 @@ plugins {
} }
android { android {
namespace = "moe.matsuri.exe.naive" namespace = "io.nekohasekai.sagernet.plugin.naive"
signingConfigs { signingConfigs {
create("release") { create("release") {
@ -17,23 +17,21 @@ android {
buildTypes { buildTypes {
getByName("release") { getByName("release") {
proguardFiles(
getDefaultProguardFile("proguard-android-optimize.txt"),
file("proguard-rules.pro")
)
isMinifyEnabled = true isMinifyEnabled = true
signingConfig = signingConfigs.getByName("release") signingConfig = signingConfigs.getByName("release")
} }
} }
compileSdk = 33 buildToolsVersion = "35.0.0"
compileSdk = 35
defaultConfig { defaultConfig {
minSdk = 21 minSdk = 24
targetSdk = 33 targetSdk = 35
applicationId = "moe.matsuri.exe.naive" applicationId = "io.nekohasekai.sagernet.plugin.naive"
versionCode = System.getenv("APK_VERSION_NAME").removePrefix("v").split(".")[0].toInt() versionCode = System.getenv("APK_VERSION_NAME").removePrefix("v").split(".")[0].toInt() * 10 + System.getenv("APK_VERSION_NAME").removePrefix("v").split("-")[1].toInt()
versionName = System.getenv("APK_VERSION_NAME").removePrefix("v") versionName = System.getenv("APK_VERSION_NAME").removePrefix("v")
splits.abi { splits.abi {
isEnable = true isEnable = true
@ -44,12 +42,8 @@ android {
} }
compileOptions { compileOptions {
sourceCompatibility = JavaVersion.VERSION_1_8 sourceCompatibility = JavaVersion.VERSION_17
targetCompatibility = JavaVersion.VERSION_1_8 targetCompatibility = JavaVersion.VERSION_17
}
kotlinOptions {
jvmTarget = "1.8"
} }
lint { lint {
@ -59,6 +53,10 @@ android {
warningsAsErrors = true warningsAsErrors = true
} }
packaging {
jniLibs.useLegacyPackaging = true
}
applicationVariants.all { applicationVariants.all {
outputs.all { outputs.all {
this as com.android.build.gradle.internal.api.BaseVariantOutputImpl this as com.android.build.gradle.internal.api.BaseVariantOutputImpl

View file

@ -13,13 +13,12 @@
<application <application
android:allowBackup="false" android:allowBackup="false"
android:extractNativeLibs="true"
android:icon="@mipmap/ic_launcher" android:icon="@mipmap/ic_launcher"
android:label="Naïve For NekoBox" android:label="Naïve Plugin"
android:roundIcon="@mipmap/ic_launcher_round"> android:roundIcon="@mipmap/ic_launcher_round">
<provider <provider
android:name=".BinaryProvider" android:name=".BinaryProvider"
android:authorities="moe.matsuri.exe.naive.BinaryProvider" android:authorities="io.nekohasekai.sagernet.plugin.naive.BinaryProvider"
android:directBootAware="true" android:directBootAware="true"
android:exported="true" android:exported="true"
tools:ignore="ExportedContentProvider"> tools:ignore="ExportedContentProvider">
@ -29,7 +28,7 @@
<intent-filter> <intent-filter>
<action android:name="io.nekohasekai.sagernet.plugin.ACTION_NATIVE_PLUGIN" /> <action android:name="io.nekohasekai.sagernet.plugin.ACTION_NATIVE_PLUGIN" />
<data <data
android:host="moe.matsuri.lite" android:host="io.nekohasekai.sagernet"
android:path="/naive-plugin" android:path="/naive-plugin"
android:scheme="plugin" /> android:scheme="plugin" />
</intent-filter> </intent-filter>

View file

@ -17,7 +17,7 @@
* * * *
******************************************************************************/ ******************************************************************************/
package moe.matsuri.exe.naive package io.nekohasekai.sagernet.plugin.naive
import android.net.Uri import android.net.Uri
import android.os.ParcelFileDescriptor import android.os.ParcelFileDescriptor

View file

@ -5,8 +5,8 @@ buildscript {
mavenCentral() mavenCentral()
} }
dependencies { dependencies {
classpath 'com.android.tools.build:gradle:7.3.1' classpath 'com.android.tools.build:gradle:8.6.0'
classpath 'org.jetbrains.kotlin:kotlin-gradle-plugin:1.6.10' classpath 'org.jetbrains.kotlin:kotlin-gradle-plugin:2.0.20'
// NOTE: Do not place your application dependencies here; they belong // NOTE: Do not place your application dependencies here; they belong
// in the individual module build.gradle files // in the individual module build.gradle files

Binary file not shown.

View file

@ -1,6 +1,7 @@
#Thu Jan 27 22:42:44 HKT 2022
distributionBase=GRADLE_USER_HOME distributionBase=GRADLE_USER_HOME
distributionUrl=https\://services.gradle.org/distributions/gradle-7.4-bin.zip
distributionPath=wrapper/dists distributionPath=wrapper/dists
zipStorePath=wrapper/dists distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-bin.zip
networkTimeout=10000
validateDistributionUrl=true
zipStoreBase=GRADLE_USER_HOME zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists

297
apk/gradlew vendored
View file

@ -1,7 +1,7 @@
#!/usr/bin/env sh #!/bin/sh
# #
# Copyright 2015 the original author or authors. # Copyright © 2015-2021 the original authors.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -15,69 +15,104 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
# SPDX-License-Identifier: Apache-2.0
#
############################################################################## ##############################################################################
## #
## Gradle start up script for UN*X # Gradle start up script for POSIX generated by Gradle.
## #
# Important for running:
#
# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is
# noncompliant, but you have some other compliant shell such as ksh or
# bash, then to run this script, type that shell name before the whole
# command line, like:
#
# ksh Gradle
#
# Busybox and similar reduced shells will NOT work, because this script
# requires all of these POSIX shell features:
# * functions;
# * expansions «$var», «${var}», «${var:-default}», «${var+SET}»,
# «${var#prefix}», «${var%suffix}», and «$( cmd )»;
# * compound commands having a testable exit status, especially «case»;
# * various built-in commands including «command», «set», and «ulimit».
#
# Important for patching:
#
# (2) This script targets any POSIX shell, so it avoids extensions provided
# by Bash, Ksh, etc; in particular arrays are avoided.
#
# The "traditional" practice of packing multiple parameters into a
# space-separated string is a well documented source of bugs and security
# problems, so this is (mostly) avoided, by progressively accumulating
# options in "$@", and eventually passing that to Java.
#
# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS,
# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly;
# see the in-line comments for details.
#
# There are tweaks for specific operating systems such as AIX, CygWin,
# Darwin, MinGW, and NonStop.
#
# (3) This script is generated from the Groovy template
# https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt
# within the Gradle project.
#
# You can find Gradle at https://github.com/gradle/gradle/.
#
############################################################################## ##############################################################################
# Attempt to set APP_HOME # Attempt to set APP_HOME
# Resolve links: $0 may be a link # Resolve links: $0 may be a link
PRG="$0" app_path=$0
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do # Need this for daisy-chained symlinks.
ls=`ls -ld "$PRG"` while
link=`expr "$ls" : '.*-> \(.*\)$'` APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path
if expr "$link" : '/.*' > /dev/null; then [ -h "$app_path" ]
PRG="$link" do
else ls=$( ls -ld "$app_path" )
PRG=`dirname "$PRG"`"/$link" link=${ls#*' -> '}
fi case $link in #(
/*) app_path=$link ;; #(
*) app_path=$APP_HOME$link ;;
esac
done done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >/dev/null
APP_HOME="`pwd -P`"
cd "$SAVED" >/dev/null
APP_NAME="Gradle" # This is normally unused
APP_BASE_NAME=`basename "$0"` # shellcheck disable=SC2034
APP_BASE_NAME=${0##*/}
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. # Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036)
DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s
' "$PWD" ) || exit
# Use the maximum available, or set MAX_FD != -1 to use that value. # Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum" MAX_FD=maximum
warn () { warn () {
echo "$*" echo "$*"
} } >&2
die () { die () {
echo echo
echo "$*" echo "$*"
echo echo
exit 1 exit 1
} } >&2
# OS specific support (must be 'true' or 'false'). # OS specific support (must be 'true' or 'false').
cygwin=false cygwin=false
msys=false msys=false
darwin=false darwin=false
nonstop=false nonstop=false
case "`uname`" in case "$( uname )" in #(
CYGWIN* ) CYGWIN* ) cygwin=true ;; #(
cygwin=true Darwin* ) darwin=true ;; #(
;; MSYS* | MINGW* ) msys=true ;; #(
Darwin* ) NONSTOP* ) nonstop=true ;;
darwin=true
;;
MINGW* )
msys=true
;;
NONSTOP* )
nonstop=true
;;
esac esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
@ -87,9 +122,9 @@ CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
if [ -n "$JAVA_HOME" ] ; then if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables # IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java" JAVACMD=$JAVA_HOME/jre/sh/java
else else
JAVACMD="$JAVA_HOME/bin/java" JAVACMD=$JAVA_HOME/bin/java
fi fi
if [ ! -x "$JAVACMD" ] ; then if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
@ -98,88 +133,120 @@ Please set the JAVA_HOME variable in your environment to match the
location of your Java installation." location of your Java installation."
fi fi
else else
JAVACMD="java" JAVACMD=java
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. if ! command -v java >/dev/null 2>&1
then
die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the Please set the JAVA_HOME variable in your environment to match the
location of your Java installation." location of your Java installation."
fi
fi fi
# Increase the maximum file descriptors if we can. # Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then
MAX_FD_LIMIT=`ulimit -H -n` case $MAX_FD in #(
if [ $? -eq 0 ] ; then max*)
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked.
MAX_FD="$MAX_FD_LIMIT" # shellcheck disable=SC2039,SC3045
fi MAX_FD=$( ulimit -H -n ) ||
ulimit -n $MAX_FD warn "Could not query maximum file descriptor limit"
if [ $? -ne 0 ] ; then esac
warn "Could not set maximum file descriptor limit: $MAX_FD" case $MAX_FD in #(
fi '' | soft) :;; #(
else *)
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked.
fi # shellcheck disable=SC2039,SC3045
fi ulimit -n "$MAX_FD" ||
warn "Could not set maximum file descriptor limit to $MAX_FD"
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin or MSYS, switch paths to Windows format before running java
if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=`cygpath --unix "$JAVACMD"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=`expr $i + 1`
done
case $i in
0) set -- ;;
1) set -- "$args0" ;;
2) set -- "$args0" "$args1" ;;
3) set -- "$args0" "$args1" "$args2" ;;
4) set -- "$args0" "$args1" "$args2" "$args3" ;;
5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac esac
fi fi
# Escape application args # Collect all arguments for the java command, stacking in reverse order:
save () { # * args from the command line
for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done # * the main class name
echo " " # * -classpath
} # * -D...appname settings
APP_ARGS=`save "$@"` # * --module-path (only if needed)
# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables.
# Collect all arguments for the java command, following the shell quoting and substitution rules # For Cygwin or MSYS, switch paths to Windows format before running java
eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" if "$cygwin" || "$msys" ; then
APP_HOME=$( cygpath --path --mixed "$APP_HOME" )
CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" )
JAVACMD=$( cygpath --unix "$JAVACMD" )
# Now convert the arguments - kludge to limit ourselves to /bin/sh
for arg do
if
case $arg in #(
-*) false ;; # don't mess with options #(
/?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath
[ -e "$t" ] ;; #(
*) false ;;
esac
then
arg=$( cygpath --path --ignore --mixed "$arg" )
fi
# Roll the args list around exactly as many times as the number of
# args, so each arg winds up back in the position where it started, but
# possibly modified.
#
# NB: a `for` loop captures its iteration list before it begins, so
# changing the positional parameters here affects neither the number of
# iterations, nor the values presented in `arg`.
shift # remove old arg
set -- "$@" "$arg" # push replacement arg
done
fi
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
# Collect all arguments for the java command:
# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
# and any embedded shellness will be escaped.
# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be
# treated as '${Hostname}' itself on the command line.
set -- \
"-Dorg.gradle.appname=$APP_BASE_NAME" \
-classpath "$CLASSPATH" \
org.gradle.wrapper.GradleWrapperMain \
"$@"
# Stop when "xargs" is not available.
if ! command -v xargs >/dev/null 2>&1
then
die "xargs is not available"
fi
# Use "xargs" to parse quoted args.
#
# With -n1 it outputs one arg per line, with the quotes and backslashes removed.
#
# In Bash we could simply go:
#
# readarray ARGS < <( xargs -n1 <<<"$var" ) &&
# set -- "${ARGS[@]}" "$@"
#
# but POSIX shell has neither arrays nor command substitution, so instead we
# post-process each arg (as a line of input to sed) to backslash-escape any
# character that might be a shell metacharacter, then use eval to reverse
# that process (while maintaining the separation between arguments), and wrap
# the whole thing up as a single "set" statement.
#
# This will of course break if any of these variables contains a newline or
# an unmatched quote.
#
eval "set -- $(
printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" |
xargs -n1 |
sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' |
tr '\n' ' '
)" '"$@"'
exec "$JAVACMD" "$@" exec "$JAVACMD" "$@"

94
apk/gradlew.bat vendored Normal file
View file

@ -0,0 +1,94 @@
@rem
@rem Copyright 2015 the original author or authors.
@rem
@rem Licensed under the Apache License, Version 2.0 (the "License");
@rem you may not use this file except in compliance with the License.
@rem You may obtain a copy of the License at
@rem
@rem https://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem
@rem SPDX-License-Identifier: Apache-2.0
@rem
@if "%DEBUG%"=="" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%"=="" set DIRNAME=.
@rem This is normally unused
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Resolve any "." and ".." in APP_HOME to make it shorter.
for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if %ERRORLEVEL% equ 0 goto execute
echo. 1>&2
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2
echo. 1>&2
echo Please set the JAVA_HOME variable in your environment to match the 1>&2
echo location of your Java installation. 1>&2
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto execute
echo. 1>&2
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2
echo. 1>&2
echo Please set the JAVA_HOME variable in your environment to match the 1>&2
echo location of your Java installation. 1>&2
goto fail
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
:end
@rem End local scope for the variables with windows NT shell
if %ERRORLEVEL% equ 0 goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
set EXIT_CODE=%ERRORLEVEL%
if %EXIT_CODE% equ 0 set EXIT_CODE=1
if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
exit /b %EXIT_CODE%
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega

View file

@ -5,6 +5,6 @@ dependencyResolutionManagement {
mavenCentral() mavenCentral()
} }
} }
rootProject.name = "Matsuri Plugins" rootProject.name = "Naive Plugin"
include ':app' include ':app'

View file

@ -12,6 +12,46 @@ Standard: Cpp11
InsertBraces: true InsertBraces: true
InsertNewlineAtEOF: true InsertNewlineAtEOF: true
# Sort #includes by following
# https://google.github.io/styleguide/cppguide.html#Names_and_Order_of_Includes
#
# ref: https://clang.llvm.org/docs/ClangFormatStyleOptions.html#includeblocks
IncludeBlocks: Regroup
# ref: https://clang.llvm.org/docs/ClangFormatStyleOptions.html#includecategories
IncludeCategories:
# The win32 api has all sorts of implicit include order dependencies :-/
# Give a few headers special priorities that make sure they appear before
# all other headers.
# Sync this with SerializeIncludes in tools/add_header.py.
# TODO(crbug.com/329138753): remove include sorting from tools/add_header.py
# after confirming clang-format sort works well.
# LINT.IfChange(winheader)
- Regex: '^<objbase\.h>' # This has to be before initguid.h.
Priority: 1
- Regex: '^<(atlbase|initguid|mmdeviceapi|ocidl|ole2|shobjidl|tchar|unknwn|windows|winsock2|winternl|ws2tcpip)\.h>'
Priority: 2
# LINT.ThenChange(/tools/add_header.py:winheader)
# UIAutomation*.h needs to be after base/win/atl.h.
# Note the low priority number.
- Regex: '^<UIAutomation.*\.h>'
Priority: 6
# Other C system headers.
- Regex: '^<.*\.h>'
Priority: 3
# C++ standard library headers.
- Regex: '^<.*>'
Priority: 4
# windows_h_disallowed.h should appear last. Note the low priority number.
- Regex: '"(.*/)?windows_h_disallowed\.h"'
Priority: 7
# Other libraries.
- Regex: '.*'
Priority: 5
# ref: https://clang.llvm.org/docs/ClangFormatStyleOptions.html#includeismainregex
IncludeIsMainRegex: "\
(_(32|64|android|apple|chromeos|freebsd|fuchsia|fuzzer|ios|linux|mac|nacl|openbsd|posix|stubs?|win))?\
(_(unit|browser|perf)?tests?)?$"
# Make sure code like: # Make sure code like:
# IPC_BEGIN_MESSAGE_MAP() # IPC_BEGIN_MESSAGE_MAP()
# IPC_MESSAGE_HANDLER(WidgetHostViewHost_Update, OnUpdate) # IPC_MESSAGE_HANDLER(WidgetHostViewHost_Update, OnUpdate)

1
src/.gitattributes vendored
View file

@ -30,6 +30,7 @@
*.proto text eol=lf *.proto text eol=lf
*.rs text eol=lf *.rs text eol=lf
*.sh text eol=lf *.sh text eol=lf
*.spec text eol=lf
*.sql text eol=lf *.sql text eol=lf
*.toml text eol=lf *.toml text eol=lf
*.txt text eol=lf *.txt text eol=lf

21
src/.gn
View file

@ -55,11 +55,14 @@ default_args = {
crashpad_dependencies = "chromium" crashpad_dependencies = "chromium"
# Override ANGLE's Vulkan dependencies. # Override ANGLE's Vulkan dependencies.
angle_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src" angle_vulkan_headers_dir = "//third_party/vulkan-headers/src"
angle_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src" angle_vulkan_loader_dir = "//third_party/vulkan-loader/src"
angle_vulkan_tools_dir = "//third_party/vulkan-deps/vulkan-tools/src" angle_vulkan_tools_dir = "//third_party/vulkan-tools/src"
angle_vulkan_validation_layers_dir = angle_vulkan_validation_layers_dir =
"//third_party/vulkan-deps/vulkan-validation-layers/src" "//third_party/vulkan-validation-layers/src"
# Override VMA's Vulkan dependencies.
vma_vulkan_headers_dir = "//third_party/vulkan-headers/src"
# Overwrite default args declared in the Fuchsia sdk # Overwrite default args declared in the Fuchsia sdk
fuchsia_sdk_readelf_exec = fuchsia_sdk_readelf_exec =
@ -69,6 +72,8 @@ default_args = {
pdf_partition_alloc_dir = "//base/allocator/partition_allocator" pdf_partition_alloc_dir = "//base/allocator/partition_allocator"
devtools_visibility = [ "*" ] devtools_visibility = [ "*" ]
clang_unsafe_buffers_paths = "//build/config/unsafe_buffers_paths.txt"
} }
# These are the targets to skip header checking by default. The files in targets # These are the targets to skip header checking by default. The files in targets
@ -85,7 +90,7 @@ no_check_targets = [
"//v8:v8_libplatform", # 2 errors "//v8:v8_libplatform", # 2 errors
] ]
# These are the list of GN files that run exec_script. This whitelist exists # These are the list of GN files that run exec_script. This allowlist exists
# to force additional review for new uses of exec_script, which is strongly # to force additional review for new uses of exec_script, which is strongly
# discouraged. # discouraged.
# #
@ -140,11 +145,11 @@ no_check_targets = [
# this situation much easier to create. if the build always lists the # this situation much easier to create. if the build always lists the
# files and passes them to a script, it will always be correct. # files and passes them to a script, it will always be correct.
exec_script_whitelist = exec_script_allowlist =
build_dotfile_settings.exec_script_whitelist + build_dotfile_settings.exec_script_allowlist +
angle_dotfile_settings.exec_script_whitelist + angle_dotfile_settings.exec_script_whitelist +
[ [
# Whitelist entries for //build should go into # Allowlist entries for //build should go into
# //build/dotfile_settings.gni instead, so that they can be shared # //build/dotfile_settings.gni instead, so that they can be shared
# with other repos. The entries in this list should be only for files # with other repos. The entries in this list should be only for files
# in the Chromium repo outside of //build. # in the Chromium repo outside of //build.

View file

@ -17,6 +17,7 @@ Aaron Jacobs <samusaaron3@gmail.com>
Aaron Leventhal <aaronlevbugs@gmail.com> Aaron Leventhal <aaronlevbugs@gmail.com>
Aaron Randolph <aaron.randolph@gmail.com> Aaron Randolph <aaron.randolph@gmail.com>
Aaryaman Vasishta <jem456.vasishta@gmail.com> Aaryaman Vasishta <jem456.vasishta@gmail.com>
AbdAlRahman Gad <abdobngad@gmail.com>
Abdu Ameen <abdu.ameen000@gmail.com> Abdu Ameen <abdu.ameen000@gmail.com>
Abdullah Abu Tasneem <a.tasneem@samsung.com> Abdullah Abu Tasneem <a.tasneem@samsung.com>
Abhijeet Kandalkar <abhijeet.k@samsung.com> Abhijeet Kandalkar <abhijeet.k@samsung.com>
@ -48,18 +49,20 @@ Ahmed Elwasefi <a.m.elwasefi@gmail.com>
Ahmet Emir Ercin <ahmetemiremir@gmail.com> Ahmet Emir Ercin <ahmetemiremir@gmail.com>
Aidarbek Suleimenov <suleimenov.aidarbek@gmail.com> Aidarbek Suleimenov <suleimenov.aidarbek@gmail.com>
Aiden Grossman <aidengrossmanpso@gmail.com> Aiden Grossman <aidengrossmanpso@gmail.com>
Airing Deng <airingdeng@gmail.com>
Ajay Berwal <a.berwal@samsung.com> Ajay Berwal <a.berwal@samsung.com>
Ajay Berwal <ajay.berwal@samsung.com> Ajay Berwal <ajay.berwal@samsung.com>
Ajay Sharma <ajay.sh@samsung.com> Ajay Sharma <ajay.sh@samsung.com>
Ajith Kumar V <ajith.v@samsung.com> Ajith Kumar V <ajith.v@samsung.com>
Akash Yadav <akash1.yadav@samsung.com> Akash Yadav <akash1.yadav@samsung.com>
Akihiko Odaki <akihiko.odaki@gmail.com>
Akos Kiss <akiss@inf.u-szeged.hu> Akos Kiss <akiss@inf.u-szeged.hu>
Akpokwaye Mudiaga <mudiaga.akpokwaye@gitstart.dev>
Aku Kotkavuo <a.kotkavuo@partner.samsung.com> Aku Kotkavuo <a.kotkavuo@partner.samsung.com>
Aldo Culquicondor <alculquicondor@gmail.com> Aldo Culquicondor <alculquicondor@gmail.com>
Alec Petridis <alecthechop@gmail.com> Alec Petridis <alecthechop@gmail.com>
Aleksandar Stojiljkovic <aleksandar.stojiljkovic@intel.com> Aleksandar Stojiljkovic <aleksandar.stojiljkovic@intel.com>
Aleksei Gurianov <gurianov@gmail.com> Aleksei Gurianov <gurianov@gmail.com>
Aleksey Khoroshilov <akhoroshilov@brave.com>
Alesandro Ortiz <alesandro@alesandroortiz.com> Alesandro Ortiz <alesandro@alesandroortiz.com>
Alessandro Astone <ales.astone@gmail.com> Alessandro Astone <ales.astone@gmail.com>
Alex Chronopoulos <achronop@gmail.com> Alex Chronopoulos <achronop@gmail.com>
@ -84,6 +87,7 @@ Alexey Kuts <kruntuid@gmail.com>
Alexey Kuzmin <alex.s.kuzmin@gmail.com> Alexey Kuzmin <alex.s.kuzmin@gmail.com>
Alexey Kuznetsov <saturas2000@gmail.com> Alexey Kuznetsov <saturas2000@gmail.com>
Alexey Terentiev <alexeyter@gmail.com> Alexey Terentiev <alexeyter@gmail.com>
Alexia Bojian <bojianalexia4@gmail.com>
Alexis Brenon <brenon.alexis@gmail.com> Alexis Brenon <brenon.alexis@gmail.com>
Alexis La Goutte <alexis.lagoutte@gmail.com> Alexis La Goutte <alexis.lagoutte@gmail.com>
Alexis Menard <alexis.menard@intel.com> Alexis Menard <alexis.menard@intel.com>
@ -114,6 +118,7 @@ Andreas Papacharalampous <andreas@apap04.com>
Andrei Borza <andrei.borza@gmail.com> Andrei Borza <andrei.borza@gmail.com>
Andrei Parvu <andrei.prv@gmail.com> Andrei Parvu <andrei.prv@gmail.com>
Andrei Parvu <parvu@adobe.com> Andrei Parvu <parvu@adobe.com>
Andrei Volykhin <andrei.volykhin@gmail.com>
Andres Salomon <dilinger@queued.net> Andres Salomon <dilinger@queued.net>
Andreu Botella <andreu@andreubotella.com> Andreu Botella <andreu@andreubotella.com>
Andrew Boyarshin <andrew.boyarshin@gmail.com> Andrew Boyarshin <andrew.boyarshin@gmail.com>
@ -152,6 +157,7 @@ Armin Burgmeier <aburgmeier@bloomberg.net>
Arnaud Coomans <hello@acoomans.com> Arnaud Coomans <hello@acoomans.com>
Arnaud Mandy <arnaud.mandy@intel.com> Arnaud Mandy <arnaud.mandy@intel.com>
Arnaud Renevier <a.renevier@samsung.com> Arnaud Renevier <a.renevier@samsung.com>
Arnaud Renevier <arnaud@switchboard.app>
Arpita Bahuguna <a.bah@samsung.com> Arpita Bahuguna <a.bah@samsung.com>
Arthur Lussos <developer0420@gmail.com> Arthur Lussos <developer0420@gmail.com>
Artin Lindqvist <artin.lindqvist.chromium@gmail.com> Artin Lindqvist <artin.lindqvist.chromium@gmail.com>
@ -188,6 +194,7 @@ Ben Noordhuis <ben@strongloop.com>
Benedek Heilig <benecene@gmail.com> Benedek Heilig <benecene@gmail.com>
Benjamin Dupont <bedupont@cisco.com> Benjamin Dupont <bedupont@cisco.com>
Benjamin Jemlich <pcgod99@gmail.com> Benjamin Jemlich <pcgod99@gmail.com>
Beomsik Min <beomsikm@gmail.com>
Bernard Cafarelli <voyageur@gentoo.org> Bernard Cafarelli <voyageur@gentoo.org>
Bernhard M. Wiedemann <bwiedemann@suse.de> Bernhard M. Wiedemann <bwiedemann@suse.de>
Bert Belder <bertbelder@gmail.com> Bert Belder <bertbelder@gmail.com>
@ -205,7 +212,6 @@ Brendan Kirby <brendan.kirby@imgtec.com>
Brendan Long <self@brendanlong.com> Brendan Long <self@brendanlong.com>
Brendon Tiszka <btiszka@gmail.com> Brendon Tiszka <btiszka@gmail.com>
Brett Lewis <brettlewis@brettlewis.us> Brett Lewis <brettlewis@brettlewis.us>
Brian Clifton <clifton@brave.com>
Brian Dunn <brian@theophil.us> Brian Dunn <brian@theophil.us>
Brian G. Merrell <bgmerrell@gmail.com> Brian G. Merrell <bgmerrell@gmail.com>
Brian Konzman, SJ <b.g.konzman@gmail.com> Brian Konzman, SJ <b.g.konzman@gmail.com>
@ -214,6 +220,7 @@ Brian Merrell, Novell Inc. <bgmerrell@gmail.com>
Brian Salomon <briansalomon@gmail.com> Brian Salomon <briansalomon@gmail.com>
Brian Yip <itsbriany@gmail.com> Brian Yip <itsbriany@gmail.com>
Brook Hong <hzgmaxwell@gmail.com> Brook Hong <hzgmaxwell@gmail.com>
Bruce Dai <feng.dai@intel.com>
Bruno Calvignac <bruno@flock.com> Bruno Calvignac <bruno@flock.com>
Bruno de Oliveira Abinader <brunoabinader@gmail.com> Bruno de Oliveira Abinader <brunoabinader@gmail.com>
Bruno Pitrus <brunopitrus@hotmail.com> Bruno Pitrus <brunopitrus@hotmail.com>
@ -233,6 +240,7 @@ Cameron Gutman <aicommander@gmail.com>
Camille Viot <viot.camille@outlook.com> Camille Viot <viot.camille@outlook.com>
Can Liu <peter.can.liu@gmail.com> Can Liu <peter.can.liu@gmail.com>
Carlos Santa <carlos.santa@intel.com> Carlos Santa <carlos.santa@intel.com>
Casey Primozic <me@ameo.link>
Catalin Badea <badea@adobe.com> Catalin Badea <badea@adobe.com>
Cathie Chen <cathiechen@tencent.com> Cathie Chen <cathiechen@tencent.com>
Cem Kocagil <cem.kocagil@gmail.com> Cem Kocagil <cem.kocagil@gmail.com>
@ -272,6 +280,7 @@ Chris Szurgot <szurgotc@amazon.com>
Chris Tserng <tserng@amazon.com> Chris Tserng <tserng@amazon.com>
Chris Vasselli <clindsay@gmail.com> Chris Vasselli <clindsay@gmail.com>
Chris Ye <hawkoyates@gmail.com> Chris Ye <hawkoyates@gmail.com>
Christian Liebel <christianliebel@gmail.com>
Christoph Staengle <christoph142@gmx.com> Christoph Staengle <christoph142@gmx.com>
Christophe Dumez <ch.dumez@samsung.com> Christophe Dumez <ch.dumez@samsung.com>
Christopher Dale <chrelad@gmail.com> Christopher Dale <chrelad@gmail.com>
@ -292,6 +301,7 @@ Daiwei Li <daiweili@suitabletech.com>
Damien Marié <damien@dam.io> Damien Marié <damien@dam.io>
Dan McCombs <overridex@gmail.com> Dan McCombs <overridex@gmail.com>
Daniel Adams <msub2official@gmail.com> Daniel Adams <msub2official@gmail.com>
Daniel Bertalan <dani@danielbertalan.dev>
Daniel Bevenius <daniel.bevenius@gmail.com> Daniel Bevenius <daniel.bevenius@gmail.com>
Daniel Bomar <dbdaniel42@gmail.com> Daniel Bomar <dbdaniel42@gmail.com>
Daniel Carvalho Liedke <dliedke@gmail.com> Daniel Carvalho Liedke <dliedke@gmail.com>
@ -303,9 +313,11 @@ Daniel Lockyer <thisisdaniellockyer@gmail.com>
Daniel Nishi <dhnishi@gmail.com> Daniel Nishi <dhnishi@gmail.com>
Daniel Platz <daplatz@googlemail.com> Daniel Platz <daplatz@googlemail.com>
Daniel Playfair Cal <daniel.playfair.cal@gmail.com> Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
Daniel Richard G. <iskunk@gmail.com>
Daniel Shaulov <dshaulov@ptc.com> Daniel Shaulov <dshaulov@ptc.com>
Daniel Trebbien <dtrebbien@gmail.com> Daniel Trebbien <dtrebbien@gmail.com>
Daniel Waxweiler <daniel.waxweiler@gmail.com> Daniel Waxweiler <daniel.waxweiler@gmail.com>
Daniel Zhao <zhaodani@amazon.com>
Dániel Bátyai <dbatyai@inf.u-szeged.hu> Dániel Bátyai <dbatyai@inf.u-szeged.hu>
Dániel Vince <vinced@inf.u-szeged.hu> Dániel Vince <vinced@inf.u-szeged.hu>
Daniil Suvorov <severecloud@gmail.com> Daniil Suvorov <severecloud@gmail.com>
@ -313,13 +325,16 @@ Danny Weiss <danny.weiss.fr@gmail.com>
Danylo Boiko <danielboyko02@gmail.com> Danylo Boiko <danielboyko02@gmail.com>
Daoming Qiu <daoming.qiu@intel.com> Daoming Qiu <daoming.qiu@intel.com>
Darik Harter <darik.harter@gmail.com> Darik Harter <darik.harter@gmail.com>
Darryl Pogue <darryl@dpogue.ca>
Darshan Sen <raisinten@gmail.com> Darshan Sen <raisinten@gmail.com>
Darshini KN <kn.darshini@samsung.com> Darshini KN <kn.darshini@samsung.com>
Dave Vandyke <kzar@kzar.co.uk> Dave Vandyke <kzar@kzar.co.uk>
David Benjamin <davidben@mit.edu> David Benjamin <davidben@mit.edu>
David Brown <develop.david.brown@gmail.com> David Brown <develop.david.brown@gmail.com>
David Cernoch <dcernoch@uplandsoftware.com>
David Davidovic <david@davidovic.io> David Davidovic <david@davidovic.io>
David Erceg <erceg.david@gmail.com> David Erceg <erceg.david@gmail.com>
David Faden <dfaden@gmail.com>
David Fox <david@davidjfox.com> David Fox <david@davidjfox.com>
David Futcher <david.mike.futcher@gmail.com> David Futcher <david.mike.futcher@gmail.com>
David Jin <davidjin@amazon.com> David Jin <davidjin@amazon.com>
@ -328,6 +343,7 @@ David Leen <davileen@amazon.com>
David Manouchehri <david@davidmanouchehri.com> David Manouchehri <david@davidmanouchehri.com>
David McAllister <mcdavid@amazon.com> David McAllister <mcdavid@amazon.com>
David Michael Barr <david.barr@samsung.com> David Michael Barr <david.barr@samsung.com>
David Redondo <kde@david-redondo.de>
David Sanders <dsanders11@ucsbalum.com> David Sanders <dsanders11@ucsbalum.com>
David Spellman <dspell@amazon.com> David Spellman <dspell@amazon.com>
David Valachovic <adenflorian@gmail.com> David Valachovic <adenflorian@gmail.com>
@ -335,6 +351,7 @@ Dax Kelson <dkelson@gurulabs.com>
Dean Leitersdorf <dean.leitersdorf@gmail.com> Dean Leitersdorf <dean.leitersdorf@gmail.com>
Debadree Chatterjee <debadree333@gmail.com> Debadree Chatterjee <debadree333@gmail.com>
Debashish Samantaray <d.samantaray@samsung.com> Debashish Samantaray <d.samantaray@samsung.com>
Debin Zhang <debinzhang3@gmail.com>
Debug Wang <debugwang@tencent.com> Debug Wang <debugwang@tencent.com>
Deep Shah <deep.shah@samsung.com> Deep Shah <deep.shah@samsung.com>
Deepak Dilip Borade <deepak.db@samsung.com> Deepak Dilip Borade <deepak.db@samsung.com>
@ -354,9 +371,11 @@ Diana Suvorova <diana.suvorova@gmail.com>
Diego Fernández Santos <agujaydedal@gmail.com> Diego Fernández Santos <agujaydedal@gmail.com>
Diego Ferreiro Val <elfogris@gmail.com> Diego Ferreiro Val <elfogris@gmail.com>
Dillon Sellars <dill.sellars@gmail.com> Dillon Sellars <dill.sellars@gmail.com>
Dingming Liu <liudingming@bytedance.com>
Divya Bansal <divya.bansal@samsung.com> Divya Bansal <divya.bansal@samsung.com>
Dmitry Shachnev <mitya57@gmail.com> Dmitry Shachnev <mitya57@gmail.com>
Dmitry Sokolov <dimanne@gmail.com> Dmitry Sokolov <dimanne@gmail.com>
Dominic Elm <elmdominic@gmx.net>
Dominic Farolino <domfarolino@gmail.com> Dominic Farolino <domfarolino@gmail.com>
Dominic Jodoin <dominic.jodoin@gmail.com> Dominic Jodoin <dominic.jodoin@gmail.com>
Dominik Röttsches <dominik.rottsches@intel.com> Dominik Röttsches <dominik.rottsches@intel.com>
@ -371,6 +390,7 @@ Dongseong Hwang <dongseong.hwang@intel.com>
Dongwoo Joshua Im <dw.im@samsung.com> Dongwoo Joshua Im <dw.im@samsung.com>
Dongyu Lin <l2d4y3@gmail.com> Dongyu Lin <l2d4y3@gmail.com>
Donna Wu <donna.wu@intel.com> Donna Wu <donna.wu@intel.com>
Douglas Browne <douglas.browne123@gmail.com>
Douglas F. Turner <doug.turner@gmail.com> Douglas F. Turner <doug.turner@gmail.com>
Drew Blaisdell <drew.blaisdell@gmail.com> Drew Blaisdell <drew.blaisdell@gmail.com>
Dushyant Kant Sharma <dush.sharma@samsung.com> Dushyant Kant Sharma <dush.sharma@samsung.com>
@ -389,17 +409,20 @@ Egor Starkov <egor.starkov@samsung.com>
Ehsan Akhgari <ehsan.akhgari@gmail.com> Ehsan Akhgari <ehsan.akhgari@gmail.com>
Ehsan Akhgari <ehsan@mightyapp.com> Ehsan Akhgari <ehsan@mightyapp.com>
Elan Ruusamäe <elan.ruusamae@gmail.com> Elan Ruusamäe <elan.ruusamae@gmail.com>
Eldar Rello <eldar.rello@gmail.com>
Ely Ronnen <elyronnen@gmail.com> Ely Ronnen <elyronnen@gmail.com>
Emil Suleymanov <emil@esnx.xyz> Emil Suleymanov <emil@esnx.xyz>
Ergun Erdogmus <erdogmusergun@gmail.com> Ergun Erdogmus <erdogmusergun@gmail.com>
Eric Ahn <byungwook.ahn@gmail.com> Eric Ahn <byungwook.ahn@gmail.com>
Eric Huang <ele828@gmail.com> Eric Huang <ele828@gmail.com>
Eric Long <i@hack3r.moe>
Eric Rescorla <ekr@rtfm.com> Eric Rescorla <ekr@rtfm.com>
Erik Hill <erikghill@gmail.com> Erik Hill <erikghill@gmail.com>
Erik Kurzinger <ekurzinger@gmail.com> Erik Kurzinger <ekurzinger@gmail.com>
Erik Sjölund <erik.sjolund@gmail.com> Erik Sjölund <erik.sjolund@gmail.com>
Eriq Augustine <eriq.augustine@gmail.com> Eriq Augustine <eriq.augustine@gmail.com>
Ernesto Mudu <ernesto.mudu@gmail.com> Ernesto Mudu <ernesto.mudu@gmail.com>
Ethan Chen <randomgamingdev@gmail.com>
Ethan Wong <bunnnywong@gmail.com> Ethan Wong <bunnnywong@gmail.com>
Etienne Laurin <etienne@atnnn.com> Etienne Laurin <etienne@atnnn.com>
Eugene Kim <eugene70kim@gmail.com> Eugene Kim <eugene70kim@gmail.com>
@ -426,7 +449,6 @@ Finbar Crago <finbar.crago@gmail.com>
François Beaufort <beaufort.francois@gmail.com> François Beaufort <beaufort.francois@gmail.com>
François Devatine <devatine@verizonmedia.com> François Devatine <devatine@verizonmedia.com>
Francois Kritzinger <francoisk777@gmail.com> Francois Kritzinger <francoisk777@gmail.com>
Francois Marier <francois@brave.com>
Francois Rauch <leopardb@gmail.com> Francois Rauch <leopardb@gmail.com>
Frankie Dintino <fdintino@theatlantic.com> Frankie Dintino <fdintino@theatlantic.com>
Franklin Ta <fta2012@gmail.com> Franklin Ta <fta2012@gmail.com>
@ -469,6 +491,7 @@ Greg Visser <gregvis@gmail.com>
Gregory Davis <gpdavis.chromium@gmail.com> Gregory Davis <gpdavis.chromium@gmail.com>
Grzegorz Czajkowski <g.czajkowski@samsung.com> Grzegorz Czajkowski <g.czajkowski@samsung.com>
Guangzhen Li <guangzhen.li@intel.com> Guangzhen Li <guangzhen.li@intel.com>
Guobin Wu <wuguobin.1229@bytedance.com>
Gurpreet Kaur <k.gurpreet@samsung.com> Gurpreet Kaur <k.gurpreet@samsung.com>
Gustav Tiger <gustav.tiger@sonymobile.com> Gustav Tiger <gustav.tiger@sonymobile.com>
Gyuyoung Kim <gyuyoung.kim@navercorp.com> Gyuyoung Kim <gyuyoung.kim@navercorp.com>
@ -481,11 +504,15 @@ Halley Zhao <halley.zhao@intel.com>
Halton Huo <halton.huo@gmail.com> Halton Huo <halton.huo@gmail.com>
Halton Huo <halton.huo@intel.com> Halton Huo <halton.huo@intel.com>
Hans Hillen <hans.hillen@gmail.com> Hans Hillen <hans.hillen@gmail.com>
Hansel Lee <mr.hansel.lee@gmail.com>
Hanwen Zheng <eserinc.z@gmail.com>
Hao Li <hao.x.li@intel.com> Hao Li <hao.x.li@intel.com>
Haojian Wu <hokein.wu@gmail.com> Haojian Wu <hokein.wu@gmail.com>
Haoran Tang <haoran.tang.personal@gmail.com>
Haoxuan Zhang <zhanghaoxuan.59@bytedance.com> Haoxuan Zhang <zhanghaoxuan.59@bytedance.com>
Hari Singh <hari.singh1@samsung.com> Hari Singh <hari.singh1@samsung.com>
Harpreet Singh Khurana <harpreet.sk@samsung.com> Harpreet Singh Khurana <harpreet.sk@samsung.com>
Harshal Gupta <gupta.h@samsung.com>
Harshikesh Kumar <harshikeshnobug@gmail.com> Harshikesh Kumar <harshikeshnobug@gmail.com>
Harshit Pal <harshitp12345@gmail.com> Harshit Pal <harshitp12345@gmail.com>
Hassan Salehe Matar <hassansalehe@gmail.com> Hassan Salehe Matar <hassansalehe@gmail.com>
@ -503,7 +530,6 @@ Himanshu Joshi <h.joshi@samsung.com>
Himanshu Nayak <himanshu.nayak@amd.corp-partner.google.com> Himanshu Nayak <himanshu.nayak@amd.corp-partner.google.com>
Hiroki Oshima <hiroki.oshima@gmail.com> Hiroki Oshima <hiroki.oshima@gmail.com>
Hiroyuki Matsuda <gsittyz@gmail.com> Hiroyuki Matsuda <gsittyz@gmail.com>
Ho Cheung <uioptt24@gmail.com>
Hodol Han <bab6ting@gmail.com> Hodol Han <bab6ting@gmail.com>
Holger Kraus <kraush@amazon.com> Holger Kraus <kraush@amazon.com>
Hong Zheng <hong.zheng@intel.com> Hong Zheng <hong.zheng@intel.com>
@ -556,23 +582,28 @@ Ivan Naydonov <samogot@gmail.com>
Ivan Pavlotskiy <ivan.pavlotskiy@lgepartner.com> Ivan Pavlotskiy <ivan.pavlotskiy@lgepartner.com>
Ivan Sham <ivansham@amazon.com> Ivan Sham <ivansham@amazon.com>
Ivan Sidorov <ivansid@gmail.com> Ivan Sidorov <ivansid@gmail.com>
Jacek Fedoryński <jfedor@gmail.com>
Jack Bates <jack@nottheoilrig.com> Jack Bates <jack@nottheoilrig.com>
Jack Shi <flystone2020@gmail.com>
Jackson Loeffler <j@jloeffler.com> Jackson Loeffler <j@jloeffler.com>
Jacky Hu <flameddd@gmail.com> Jacky Hu <flameddd@gmail.com>
Jacob Clark <jacob.jh.clark@googlemail.com> Jacob Clark <jacob.jh.clark@googlemail.com>
Jacob Mandelson <jacob@mandelson.org> Jacob Mandelson <jacob@mandelson.org>
Jaehun Lim <ljaehun.lim@samsung.com> Jaehun Lim <ljaehun.lim@samsung.com>
Jaehyun Chung <jaehyun.chung@amd.com>
Jaehyun Ko <jaehyun.dev@gmail.com> Jaehyun Ko <jaehyun.dev@gmail.com>
Jaehyun Lee <j-hyun.lee@samsung.com> Jaehyun Lee <j-hyun.lee@samsung.com>
Jaekyeom Kim <btapiz@gmail.com> Jaekyeom Kim <btapiz@gmail.com>
Jaemin Seo <jaemin86.seo@samsung.com> Jaemin Seo <jaemin86.seo@samsung.com>
Jaemo Koo <jaemok@amazon.com> Jaemo Koo <jaemok@amazon.com>
Jaemo Koo <koo2434@gmail.com>
Jaeseok Yoon <yjaeseok@gmail.com> Jaeseok Yoon <yjaeseok@gmail.com>
Jaewon Choi <jaewon.james.choi@gmail.com> Jaewon Choi <jaewon.james.choi@gmail.com>
Jaewon Jung <jw.jung@navercorp.com> Jaewon Jung <jw.jung@navercorp.com>
Jaeyong Bae <jdragon.bae@gmail.com> Jaeyong Bae <jdragon.bae@gmail.com>
Jagadesh P <jagadeshjai1999@gmail.com> Jagadesh P <jagadeshjai1999@gmail.com>
Jagdish Chourasia <jagdish.c@samsung.com> Jagdish Chourasia <jagdish.c@samsung.com>
Jagdish Chourasia <jagdish.jnu08@gmail.com>
Jaime Soriano Pastor <jsorianopastor@gmail.com> Jaime Soriano Pastor <jsorianopastor@gmail.com>
Jake Helfert <jake@helfert.us> Jake Helfert <jake@helfert.us>
Jake Hendy <me@jakehendy.com> Jake Hendy <me@jakehendy.com>
@ -580,6 +611,7 @@ Jakob Weigert <jakob.j.w@googlemail.com>
Jakub Machacek <xtreit@gmail.com> Jakub Machacek <xtreit@gmail.com>
James Burton <jb@0.me.uk> James Burton <jb@0.me.uk>
James Choi <jchoi42@pha.jhu.edu> James Choi <jchoi42@pha.jhu.edu>
James Crosby <crosby.james@gmail.com>
James Raphael Tiovalen <jamestiotio@gmail.com> James Raphael Tiovalen <jamestiotio@gmail.com>
James Stanley <james@apphaus.co.uk> James Stanley <james@apphaus.co.uk>
James Vega <vega.james@gmail.com> James Vega <vega.james@gmail.com>
@ -598,8 +630,10 @@ Jared Wein <weinjared@gmail.com>
Jari Karppanen <jkarp@amazon.com> Jari Karppanen <jkarp@amazon.com>
Jason Gronn <jasontopia03@gmail.com> Jason Gronn <jasontopia03@gmail.com>
Javayhu <javayhu@gmail.com> Javayhu <javayhu@gmail.com>
Jay Kapadia <jaykapadia389@gmail.com>
Jay Oster <jay@kodewerx.org> Jay Oster <jay@kodewerx.org>
Jay Soffian <jaysoffian@gmail.com> Jay Soffian <jaysoffian@gmail.com>
Jay Yang <sjyang1126@gmail.com>
Jeado Ko <haibane84@gmail.com> Jeado Ko <haibane84@gmail.com>
Jeffrey C <jeffreyca16@gmail.com> Jeffrey C <jeffreyca16@gmail.com>
Jeffrey Yeung <jeffrey.yeung@poly.com> Jeffrey Yeung <jeffrey.yeung@poly.com>
@ -618,6 +652,7 @@ Jesper Storm Bache <jsbache@gmail.com>
Jesper van den Ende <jespertheend@gmail.com> Jesper van den Ende <jespertheend@gmail.com>
Jesse Miller <jesse@jmiller.biz> Jesse Miller <jesse@jmiller.biz>
Jesus Sanchez-Palencia <jesus.sanchez-palencia.fernandez.fil@intel.com> Jesus Sanchez-Palencia <jesus.sanchez-palencia.fernandez.fil@intel.com>
Jia Yu <yujia.1019@bytedance.com>
Jiadong Chen <chenjiadong@huawei.com> Jiadong Chen <chenjiadong@huawei.com>
Jiadong Zhu <jiadong.zhu@linaro.org> Jiadong Zhu <jiadong.zhu@linaro.org>
Jiahao Lu <lujjjh@gmail.com> Jiahao Lu <lujjjh@gmail.com>
@ -646,6 +681,8 @@ Jincheol Jo <jincheol.jo@navercorp.com>
Jinfeng Ma <majinfeng1@xiaomi.com> Jinfeng Ma <majinfeng1@xiaomi.com>
Jing Zhao <zhaojing7@xiaomi.com> Jing Zhao <zhaojing7@xiaomi.com>
Jinglong Zuo <zuojinglong@xiaomi.com> Jinglong Zuo <zuojinglong@xiaomi.com>
Jingqi Sun <jingqi.sun@hotmail.com>
Jingqi Sun <sunjingqi47@gmail.com>
Jingwei Liu <kingweiliu@gmail.com> Jingwei Liu <kingweiliu@gmail.com>
Jingyi Wei <wjywbs@gmail.com> Jingyi Wei <wjywbs@gmail.com>
Jinho Bang <jinho.bang@samsung.com> Jinho Bang <jinho.bang@samsung.com>
@ -671,6 +708,7 @@ John Kleinschmidt <kleinschmidtorama@gmail.com>
John Yani <vanuan@gmail.com> John Yani <vanuan@gmail.com>
John Yoo <nearbyh13@gmail.com> John Yoo <nearbyh13@gmail.com>
Johnson Lin <johnson.lin@intel.com> Johnson Lin <johnson.lin@intel.com>
Jojo R <rjiejie@gmail.com>
Jon Jensen <jonj@netflix.com> Jon Jensen <jonj@netflix.com>
Jonathan Frazer <listedegarde@gmail.com> Jonathan Frazer <listedegarde@gmail.com>
Jonathan Garbee <jonathan@garbee.me> Jonathan Garbee <jonathan@garbee.me>
@ -684,6 +722,7 @@ JongKwon Lee <jongkwon.lee@navercorp.com>
Jongmok Kim <jongmok.kim@navercorp.com> Jongmok Kim <jongmok.kim@navercorp.com>
Jongmok Kim <johny.kimc@gmail.com> Jongmok Kim <johny.kimc@gmail.com>
Jongsoo Lee <leejongsoo@gmail.com> Jongsoo Lee <leejongsoo@gmail.com>
Joonas Halinen <joonashalinen@outlook.com>
Joone Hur <joone.hur@intel.com> Joone Hur <joone.hur@intel.com>
Joonghun Park <pjh0718@gmail.com> Joonghun Park <pjh0718@gmail.com>
Jorge Villatoro <jorge@tomatocannon.com> Jorge Villatoro <jorge@tomatocannon.com>
@ -693,9 +732,11 @@ Joseph Lolak <joseph.lolak@samsung.com>
Josh Triplett <josh.triplett@intel.com> Josh Triplett <josh.triplett@intel.com>
Josh Triplett <josh@joshtriplett.org> Josh Triplett <josh@joshtriplett.org>
Joshua Lock <joshua.lock@intel.com> Joshua Lock <joshua.lock@intel.com>
Joshua Olaoye <joshuaolaoye46@gmail.com>
Joshua Roesslein <jroesslein@gmail.com> Joshua Roesslein <jroesslein@gmail.com>
Josué Ratelle <jorat1346@gmail.com> Josué Ratelle <jorat1346@gmail.com>
Josyula Venkat Narasimham <venkat.nj@samsung.com> Josyula Venkat Narasimham <venkat.nj@samsung.com>
Joy Roy <joy.roy.nil76@gmail.com>
Joyer Huang <collger@gmail.com> Joyer Huang <collger@gmail.com>
Juan Cruz Viotti <jv@jviotti.com> Juan Cruz Viotti <jv@jviotti.com>
Juan Jose Lopez Jaimez <jj.lopezjaimez@gmail.com> Juan Jose Lopez Jaimez <jj.lopezjaimez@gmail.com>
@ -718,6 +759,7 @@ Junmin Zhu <junmin.zhu@intel.com>
Junsang Mo <mojunsang26@gmail.com> Junsang Mo <mojunsang26@gmail.com>
Junsong Li <ljs.darkfish@gmail.com> Junsong Li <ljs.darkfish@gmail.com>
Jun Wang <wangjuna@uniontech.com> Jun Wang <wangjuna@uniontech.com>
Jun Xu <jun1.xu@intel.com>
Jun Zeng <hjunzeng6@gmail.com> Jun Zeng <hjunzeng6@gmail.com>
Justin Okamoto <justmoto@amazon.com> Justin Okamoto <justmoto@amazon.com>
Justin Ribeiro <justin@justinribeiro.com> Justin Ribeiro <justin@justinribeiro.com>
@ -725,7 +767,7 @@ Jüri Valdmann <juri.valdmann@qt.io>
Juyoung Kim <chattank05@gmail.com> Juyoung Kim <chattank05@gmail.com>
Jingge Yu <jinggeyu423@gmail.com> Jingge Yu <jinggeyu423@gmail.com>
Jing Peiyang <jingpeiyang@eswincomputing.com> Jing Peiyang <jingpeiyang@eswincomputing.com>
Jinli Wu <wujinli.cn@gmail.com> Jinli Wu <wujinli@bytedance.com>
K. M. Merajul Arefin <m.arefin@samsung.com> K. M. Merajul Arefin <m.arefin@samsung.com>
Kai Jiang <jiangkai@gmail.com> Kai Jiang <jiangkai@gmail.com>
Kai Köhne <kai.koehne@qt.io> Kai Köhne <kai.koehne@qt.io>
@ -740,6 +782,7 @@ Kangyuan Shu <kangyuan.shu@intel.com>
Karan Thakkar <karanjthakkar@gmail.com> Karan Thakkar <karanjthakkar@gmail.com>
Karel Král <kralkareliv@gmail.com> Karel Král <kralkareliv@gmail.com>
Karl <karlpolicechromium@gmail.com> Karl <karlpolicechromium@gmail.com>
Karl Piper <karl4piper@gmail.com>
Kartikey Bhatt <kartikey@amazon.com> Kartikey Bhatt <kartikey@amazon.com>
Kaspar Brand <googlecontrib@velox.ch> Kaspar Brand <googlecontrib@velox.ch>
Kaushalendra Mishra <k.mishra@samsung.com> Kaushalendra Mishra <k.mishra@samsung.com>
@ -755,6 +798,7 @@ Keita Suzuki <keitasuzuki.park@gmail.com>
Keita Yoshimoto <y073k3@gmail.com> Keita Yoshimoto <y073k3@gmail.com>
Keith Chen <keitchen@amazon.com> Keith Chen <keitchen@amazon.com>
Keith Cirkel <chromium@keithcirkel.co.uk> Keith Cirkel <chromium@keithcirkel.co.uk>
Kelsen Liu <kelsenliu21@gmail.com>
Kenneth Rohde Christiansen <kenneth.r.christiansen@intel.com> Kenneth Rohde Christiansen <kenneth.r.christiansen@intel.com>
Kenneth Strickland <ken.strickland@gmail.com> Kenneth Strickland <ken.strickland@gmail.com>
Kenneth Zhou <knthzh@gmail.com> Kenneth Zhou <knthzh@gmail.com>
@ -765,6 +809,8 @@ Ketan Goyal <ketan.goyal@samsung.com>
Kevin Gibbons <bakkot@gmail.com> Kevin Gibbons <bakkot@gmail.com>
Kevin Lee Helpingstine <sig11@reprehensible.net> Kevin Lee Helpingstine <sig11@reprehensible.net>
Kevin M. McCormick <mckev@amazon.com> Kevin M. McCormick <mckev@amazon.com>
Kexy Biscuit <kexybiscuit@aosc.io>
Kexy Biscuit <kexybiscuit@gmail.com>
Keyou <qqkillyou@gmail.com> Keyou <qqkillyou@gmail.com>
Khasim Syed Mohammed <khasim.mohammed@linaro.org> Khasim Syed Mohammed <khasim.mohammed@linaro.org>
Khem Raj <raj.khem@gmail.com> Khem Raj <raj.khem@gmail.com>
@ -779,6 +825,8 @@ Kirill Ovchinnikov <kirill.ovchinn@gmail.com>
Klemen Forstnerič <klemen.forstneric@gmail.com> Klemen Forstnerič <klemen.forstneric@gmail.com>
Kodam Nagaraju <k2.nagaraju@samsung.com> Kodam Nagaraju <k2.nagaraju@samsung.com>
Konrad Dzwinel <kdzwinel@gmail.com> Konrad Dzwinel <kdzwinel@gmail.com>
Kousuke Takaki <yoseio@brainoid.dev>
Kovacs Zeteny <brightbulbapp@gmail.com>
Krishna Chaitanya <krish.botta@samsung.com> Krishna Chaitanya <krish.botta@samsung.com>
Kristof Kosztyo <kkosztyo.u-szeged@partner.samsung.com> Kristof Kosztyo <kkosztyo.u-szeged@partner.samsung.com>
Krzysztof Czech <k.czech@samsung.com> Krzysztof Czech <k.czech@samsung.com>
@ -798,9 +846,11 @@ Kyungtae Kim <ktf.kim@samsung.com>
Kyungyoung Heo <bbvch13531@gmail.com> Kyungyoung Heo <bbvch13531@gmail.com>
Kyutae Lee <gorisanson@gmail.com> Kyutae Lee <gorisanson@gmail.com>
Lalit Chandivade <lalit.chandivade@einfochips.com> Lalit Chandivade <lalit.chandivade@einfochips.com>
Lalit Rana <lalitrn44@gmail.com>
Lam Lu <lamlu@amazon.com> Lam Lu <lamlu@amazon.com>
Laszlo Gombos <l.gombos@samsung.com> Laszlo Gombos <l.gombos@samsung.com>
Laszlo Radanyi <bekkra@gmail.com> Laszlo Radanyi <bekkra@gmail.com>
lauren n. liberda <lauren@selfisekai.rocks>
Lauren Yeun Kim <lauren.yeun.kim@gmail.com> Lauren Yeun Kim <lauren.yeun.kim@gmail.com>
Lauri Oherd <lauri.oherd@gmail.com> Lauri Oherd <lauri.oherd@gmail.com>
Lavar Askew <open.hyperion@gmail.com> Lavar Askew <open.hyperion@gmail.com>
@ -826,6 +876,7 @@ Lin Peng <penglin220@gmail.com>
Lin Peng <penglin22@huawei.com> Lin Peng <penglin22@huawei.com>
Lingqi Chi <someway.bit@gmail.com> Lingqi Chi <someway.bit@gmail.com>
Lingyun Cai <lingyun.cai@intel.com> Lingyun Cai <lingyun.cai@intel.com>
Linnan Li <lilinnan0903@gmail.com>
Lionel Landwerlin <lionel.g.landwerlin@intel.com> Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Lisha Guo <lisha.guo@intel.com> Lisha Guo <lisha.guo@intel.com>
Lizhi Fan <lizhi.fan@samsung.com> Lizhi Fan <lizhi.fan@samsung.com>
@ -861,6 +912,7 @@ Malcolm Wang <malcolm.2.wang@gmail.com>
Mallikarjuna Rao V <vm.arjun@samsung.com> Mallikarjuna Rao V <vm.arjun@samsung.com>
Manish Chhajer <chhajer.m@samsung.com> Manish Chhajer <chhajer.m@samsung.com>
Manish Jethani <m.jethani@eyeo.com> Manish Jethani <m.jethani@eyeo.com>
Manjunath Babu <10manju@gmail.com>
Manojkumar Bhosale <manojkumar.bhosale@imgtec.com> Manojkumar Bhosale <manojkumar.bhosale@imgtec.com>
Manuel Braun <thembrown@gmail.com> Manuel Braun <thembrown@gmail.com>
Manuel Lagana <manuel.lagana.dev@gmail.com> Manuel Lagana <manuel.lagana.dev@gmail.com>
@ -890,6 +942,7 @@ Martin Persson <mnpn03@gmail.com>
Martin Rogalla <martin@martinrogalla.com> Martin Rogalla <martin@martinrogalla.com>
Martina Kollarova <martina.kollarova@intel.com> Martina Kollarova <martina.kollarova@intel.com>
Martino Fontana <tinozzo123@gmail.com> Martino Fontana <tinozzo123@gmail.com>
Marvin Giessing <marvin.giessing@gmail.com>
Masahiro Yado <yado.masa@gmail.com> Masahiro Yado <yado.masa@gmail.com>
Masaru Nishida <msr.i386@gmail.com> Masaru Nishida <msr.i386@gmail.com>
Masayuki Wakizaka <mwakizaka0108@gmail.com> Masayuki Wakizaka <mwakizaka0108@gmail.com>
@ -899,6 +952,8 @@ Mathias Bynens <mathias@qiwi.be>
Mathieu Meisser <mmeisser@logitech.com> Mathieu Meisser <mmeisser@logitech.com>
Matt Arpidone <mma.public@gmail.com> Matt Arpidone <mma.public@gmail.com>
Matt Fysh <mattfysh@gmail.com> Matt Fysh <mattfysh@gmail.com>
Matt Harding <majaharding@gmail.com>
Matt Jolly <kangie@gentoo.org>
Matt Strum <mstrum@amazon.com> Matt Strum <mstrum@amazon.com>
Matt Zeunert <matt@mostlystatic.com> Matt Zeunert <matt@mostlystatic.com>
Matthew "strager" Glazar <strager.nds@gmail.com> Matthew "strager" Glazar <strager.nds@gmail.com>
@ -911,8 +966,8 @@ Matthew Willis <appamatto@gmail.com>
Matthias Reitinger <reimarvin@gmail.com> Matthias Reitinger <reimarvin@gmail.com>
Matthieu Rigolot <matthieu.rigolot@gmail.com> Matthieu Rigolot <matthieu.rigolot@gmail.com>
Matthieu Vlad Hauglustaine <matt.hauglustaine@gmail.com> Matthieu Vlad Hauglustaine <matt.hauglustaine@gmail.com>
Mattias Buelens <mattias.buelens@gmail.com>
Max Coplan <mchcopl@gmail.com> Max Coplan <mchcopl@gmail.com>
Max Karolinskiy <max@brave.com>
Max Perepelitsyn <pph34r@gmail.com> Max Perepelitsyn <pph34r@gmail.com>
Max Schmitt <max@schmitt.mx> Max Schmitt <max@schmitt.mx>
Max Vujovic <mvujovic@adobe.com> Max Vujovic <mvujovic@adobe.com>
@ -921,16 +976,20 @@ Mayur Kankanwadi <mayurk.vk@samsung.com>
Mc Zeng <zengmcong@gmail.com> Mc Zeng <zengmcong@gmail.com>
Md Abdullah Al Alamin <a.alamin.cse@gmail.com> Md Abdullah Al Alamin <a.alamin.cse@gmail.com>
Md. Hasanur Rashid <hasanur.r@samsung.com> Md. Hasanur Rashid <hasanur.r@samsung.com>
Md Hasibul Hasan <hasibulhasan873@gmail.com>
Md Hasibul Hasan <hasibul.h@samsung.com>
Md Jobed Hossain <jobed.h@samsung.com> Md Jobed Hossain <jobed.h@samsung.com>
Md Raiyan bin Sayeed <mrbsayee@uwaterloo.ca> Md Raiyan bin Sayeed <mrbsayee@uwaterloo.ca>
Md. Sadiqul Amin <sadiqul.amin@samsung.com> Md. Sadiqul Amin <sadiqul.amin@samsung.com>
Md Sami Uddin <md.sami@samsung.com> Md Sami Uddin <md.sami@samsung.com>
Mego Tan <tannal2409@gmail.com>
Merajul Arefin <merajularefin@gmail.com> Merajul Arefin <merajularefin@gmail.com>
Micha Hanselmann <micha.hanselmann@gmail.com> Micha Hanselmann <micha.hanselmann@gmail.com>
Michael Cirone <mikecirone@gmail.com> Michael Cirone <mikecirone@gmail.com>
Michael Constant <mconst@gmail.com> Michael Constant <mconst@gmail.com>
Michael Forney <mforney@mforney.org> Michael Forney <mforney@mforney.org>
Michael Gilbert <floppymaster@gmail.com> Michael Gilbert <floppymaster@gmail.com>
Michael Herrmann <michael@herrmann.io>
Michael Kolomeytsev <michael.kolomeytsev@gmail.com> Michael Kolomeytsev <michael.kolomeytsev@gmail.com>
Michael Lopez <lopes92290@gmail.com> Michael Lopez <lopes92290@gmail.com>
Michael Morrison <codebythepound@gmail.com> Michael Morrison <codebythepound@gmail.com>
@ -951,6 +1010,7 @@ Milko Leporis <milko.leporis@imgtec.com>
Milton Chiang <milton.chiang@mediatek.com> Milton Chiang <milton.chiang@mediatek.com>
Milutin Smiljanic <msmiljanic.gm@gmail.com> Milutin Smiljanic <msmiljanic.gm@gmail.com>
Minchul Kang <tegongkang@gmail.com> Minchul Kang <tegongkang@gmail.com>
Ming Lei <minggeorgelei@gmail.com>
Mingeun Park <mindal99546@gmail.com> Mingeun Park <mindal99546@gmail.com>
Minggang Wang <minggang.wang@intel.com> Minggang Wang <minggang.wang@intel.com>
Mingmin Xie <melvinxie@gmail.com> Mingmin Xie <melvinxie@gmail.com>
@ -967,15 +1027,20 @@ Mitchell Cohen <mitchell@agilebits.com>
Miyoung Shin <myid.shin@navercorp.com> Miyoung Shin <myid.shin@navercorp.com>
Mohamed I. Hammad <ibraaaa@gmail.com> Mohamed I. Hammad <ibraaaa@gmail.com>
Mohamed Mansour <m0.interactive@gmail.com> Mohamed Mansour <m0.interactive@gmail.com>
Mohamed Hany Youns <mohamedhyouns@gmail.com>
Mohammad Azam <m.azam@samsung.com> Mohammad Azam <m.azam@samsung.com>
MohammadSabri <mohammad.kh.sabri@exalt.ps>
Mohammed Ashraf <mohammedashraf4599@gmail.com>
Mohammed Wajahat Ali Siddiqui <wajahat.s@samsung.com> Mohammed Wajahat Ali Siddiqui <wajahat.s@samsung.com>
Mohan Reddy <mohan.reddy@samsung.com> Mohan Reddy <mohan.reddy@samsung.com>
Mohit Bhalla <bhallam@amazon.com> Mohit Bhalla <bhallam@amazon.com>
Mohraiel Matta <mohraielmatta@gmail.com>
Moiseanu Rares-Marian <moiseanurares@gmail.com> Moiseanu Rares-Marian <moiseanurares@gmail.com>
Momoka Yamamoto <momoka.my6@gmail.com> Momoka Yamamoto <momoka.my6@gmail.com>
Momoko Hattori <momohatt10@gmail.com> Momoko Hattori <momohatt10@gmail.com>
Mostafa Sedaghat joo <mostafa.sedaghat@gmail.com> Mostafa Sedaghat joo <mostafa.sedaghat@gmail.com>
Mrunal Kapade <mrunal.kapade@intel.com> Mrunal Kapade <mrunal.kapade@intel.com>
Muhammad Mahad <mahadtxt@gmail.com>
Munira Tursunova <moonira@google.com> Munira Tursunova <moonira@google.com>
Myeongjin Cho <myeongjin.cho@navercorp.com> Myeongjin Cho <myeongjin.cho@navercorp.com>
Myles C. Maxfield <mymax@amazon.com> Myles C. Maxfield <mymax@amazon.com>
@ -984,6 +1049,7 @@ Myunghoon Kim <asdvfrqwe@gmail.com>
Nagarajan Narayanan <nagarajan.n@samsung.com> Nagarajan Narayanan <nagarajan.n@samsung.com>
Nagarjuna Atluri <nagarjuna.a@samsung.com> Nagarjuna Atluri <nagarjuna.a@samsung.com>
Naiem Shaik <naiem.shaik@gmail.com> Naiem Shaik <naiem.shaik@gmail.com>
Nakuru Wubni <nakuru.wubni@gitstart.dev>
Naman Kumar Narula <namankumarnarula@gmail.com> Naman Kumar Narula <namankumarnarula@gmail.com>
Naman Yadav <naman.yadav@samsung.com> Naman Yadav <naman.yadav@samsung.com>
Nancy Tillery <hedonistsmith@gmail.com> Nancy Tillery <hedonistsmith@gmail.com>
@ -1004,7 +1070,9 @@ Nedeljko Babic <nedeljko.babic@imgtec.com>
Neehit Goyal <neehit.goyal@samsung.com> Neehit Goyal <neehit.goyal@samsung.com>
Nidhi Jaju <nidhijaju127@gmail.com> Nidhi Jaju <nidhijaju127@gmail.com>
Niek van der Maas <mail@niekvandermaas.nl> Niek van der Maas <mail@niekvandermaas.nl>
Nik Pavlov <nikita.pavlov.dev@gmail.com>
Nikhil Bansal <n.bansal@samsung.com> Nikhil Bansal <n.bansal@samsung.com>
Nikhil Meena <iakhilmeena@gmail.com>
Nikhil Sahni <nikhil.sahni@samsung.com> Nikhil Sahni <nikhil.sahni@samsung.com>
Nikita Ofitserov <himikof@gmail.com> Nikita Ofitserov <himikof@gmail.com>
Niklas Hambüchen <mail@nh2.me> Niklas Hambüchen <mail@nh2.me>
@ -1018,18 +1086,22 @@ Nivedan Sharma <ni.sharma@samsung.com>
Noam Rosenthal <noam.j.rosenthal@gmail.com> Noam Rosenthal <noam.j.rosenthal@gmail.com>
Noj Vek <nojvek@gmail.com> Noj Vek <nojvek@gmail.com>
Nolan Cao <nolan.robin.cao@gmail.com> Nolan Cao <nolan.robin.cao@gmail.com>
Nourhan Hasan <nourhan.m.hasan@gmail.com>
Oleksii Kadurin <ovkadurin@gmail.com> Oleksii Kadurin <ovkadurin@gmail.com>
Oliver Dunk <oliver@oliverdunk.com> Oliver Dunk <oliver@oliverdunk.com>
Olivier Tilloy <olivier+chromium@tilloy.net> Olivier Tilloy <olivier+chromium@tilloy.net>
Olli Raula (Old name Olli Syrjälä) <olli.raula@intel.com> Olli Raula (Old name Olli Syrjälä) <olli.raula@intel.com>
Omar Sandoval <osandov@osandov.com> Omar Sandoval <osandov@osandov.com>
Omar Shawky <omarmshawky11@gmail.com>
Orko Garai <orko.garai@gmail.com> Orko Garai <orko.garai@gmail.com>
Owen Shaw <owenpshaw@gmail.com> Owen Shaw <owenpshaw@gmail.com>
Owen Yuwono <owenyuwono@gmail.com> Owen Yuwono <owenyuwono@gmail.com>
Palash Verma <palashverma47@gmail.com> Palash Verma <palashverma47@gmail.com>
Pan Deng <pan.deng@intel.com> Pan Deng <pan.deng@intel.com>
Parag Radke <nrqv63@motorola.com> Parag Radke <nrqv63@motorola.com>
Paras Awasthi <awasthiparas6@gmail.com>
Paritosh Kumar <paritosh.in@samsung.com> Paritosh Kumar <paritosh.in@samsung.com>
Pasquale Riello <pas.riello@gmail.com>
Patrasciuc Sorin Cristian <cristian.patrasciuc@gmail.com> Patrasciuc Sorin Cristian <cristian.patrasciuc@gmail.com>
Patricija Cerkaite <cer.patricija@gmail.com> Patricija Cerkaite <cer.patricija@gmail.com>
Patrick Chan <chanpatorikku@gmail.com> Patrick Chan <chanpatorikku@gmail.com>
@ -1048,6 +1120,7 @@ Paul Wicks <pwicks86@gmail.com>
Pavan Kumar Emani <pavan.e@samsung.com> Pavan Kumar Emani <pavan.e@samsung.com>
Pavel Golikov <paullo612@ya.ru> Pavel Golikov <paullo612@ya.ru>
Pavel Ivanov <paivanof@gmail.com> Pavel Ivanov <paivanof@gmail.com>
Pawan Udassi <pawanudassi@hotmail.com>
Pawel Forysiuk <p.forysiuk@samsung.com> Pawel Forysiuk <p.forysiuk@samsung.com>
Paweł Hajdan jr <phajdan.jr@gmail.com> Paweł Hajdan jr <phajdan.jr@gmail.com>
Paweł Stanek <pawel@gener8ads.com> Paweł Stanek <pawel@gener8ads.com>
@ -1082,7 +1155,6 @@ Po-Chun Chang <pochang0403@gmail.com>
Prakhar Shrivastav <p.shri@samsung.com> Prakhar Shrivastav <p.shri@samsung.com>
Pramod Begur Srinath <pramod.bs@samsung.com> Pramod Begur Srinath <pramod.bs@samsung.com>
Pranay Kumar <pranay.kumar@samsung.com> Pranay Kumar <pranay.kumar@samsung.com>
Pranjal Jumde <pranjal@brave.com>
Prashant Hiremath <prashhir@cisco.com> Prashant Hiremath <prashhir@cisco.com>
Prashant Nevase <prashant.n@samsung.com> Prashant Nevase <prashant.n@samsung.com>
Prashant Patil <prashant.patil@imgtec.com> Prashant Patil <prashant.patil@imgtec.com>
@ -1090,6 +1162,7 @@ Pratham <prathamIN@proton.me>
Praveen Akkiraju <praveen.anp@samsung.com> Praveen Akkiraju <praveen.anp@samsung.com>
Preeti Nayak <preeti.nayak@samsung.com> Preeti Nayak <preeti.nayak@samsung.com>
Pritam Nikam <pritam.nikam@samsung.com> Pritam Nikam <pritam.nikam@samsung.com>
Psychpsyo <psychpsyo@gmail.com>
Puttaraju R <puttaraju.r@samsung.com> Puttaraju R <puttaraju.r@samsung.com>
Punith Nayak <npunith125@gmail.com> Punith Nayak <npunith125@gmail.com>
Qi Tiezheng <qitiezheng@360.cn> Qi Tiezheng <qitiezheng@360.cn>
@ -1173,6 +1246,7 @@ Ryan Manuel <rfmanuel@gmail.com>
Ryan Norton <rnorton10@gmail.com> Ryan Norton <rnorton10@gmail.com>
Ryan Sleevi <ryan-chromium-dev@sleevi.com> Ryan Sleevi <ryan-chromium-dev@sleevi.com>
Ryan Yoakum <ryoakum@skobalt.com> Ryan Yoakum <ryoakum@skobalt.com>
Ryan Huen <ryanhuenprivate@gmail.com>
Rye Zhang <ryezhang@tencent.com> Rye Zhang <ryezhang@tencent.com>
Ryo Ogawa <negibokken@gmail.com> Ryo Ogawa <negibokken@gmail.com>
Ryuan Choi <ryuan.choi@samsung.com> Ryuan Choi <ryuan.choi@samsung.com>
@ -1186,6 +1260,7 @@ Sam James <sam@gentoo.org>
Sam Larison <qufighter@gmail.com> Sam Larison <qufighter@gmail.com>
Sam McDonald <sam@sammcd.com> Sam McDonald <sam@sammcd.com>
Samuel Attard <samuel.r.attard@gmail.com> Samuel Attard <samuel.r.attard@gmail.com>
Samuel Maddock <samuelmaddock@electronjs.org>
Sanfeng Liao <sanfengliao@gmail.com> Sanfeng Liao <sanfengliao@gmail.com>
Sanggi Hong <sanggi.hong11@gmail.com> Sanggi Hong <sanggi.hong11@gmail.com>
Sanghee Lee <sanghee.lee1992@gmail.com> Sanghee Lee <sanghee.lee1992@gmail.com>
@ -1216,6 +1291,7 @@ Sean Bryant <sean@cyberwang.net>
Sean DuBois <seaduboi@amazon.com> Sean DuBois <seaduboi@amazon.com>
Sebastian Amend <sebastian.amend@googlemail.com> Sebastian Amend <sebastian.amend@googlemail.com>
Sebastian Krzyszkowiak <dos@dosowisko.net> Sebastian Krzyszkowiak <dos@dosowisko.net>
Sebastian Markbåge <sebastian@calyptus.eu>
Sebastjan Raspor <sebastjan.raspor1@gmail.com> Sebastjan Raspor <sebastjan.raspor1@gmail.com>
Seo Sanghyeon <sanxiyn@gmail.com> Seo Sanghyeon <sanxiyn@gmail.com>
Seokju Kwon <seokju.kwon@gmail.com> Seokju Kwon <seokju.kwon@gmail.com>
@ -1225,6 +1301,7 @@ Sergei Poletaev <spylogsster@gmail.com>
Sergei Romanov <rsv.981@gmail.com> Sergei Romanov <rsv.981@gmail.com>
Sergey Romanov <svromanov@sberdevices.ru> Sergey Romanov <svromanov@sberdevices.ru>
Sergey Kipet <sergey.kipet@gmail.com> Sergey Kipet <sergey.kipet@gmail.com>
Sergey Markelov <sergionso@gmail.com>
Sergey Putilin <p.sergey@samsung.com> Sergey Putilin <p.sergey@samsung.com>
Sergey Shekyan <shekyan@gmail.com> Sergey Shekyan <shekyan@gmail.com>
Sergey Talantov <sergey.talantov@gmail.com> Sergey Talantov <sergey.talantov@gmail.com>
@ -1235,11 +1312,13 @@ Serhii Matrunchyk <sergiy.matrunchyk@gmail.com>
Seshadri Mahalingam <seshadri.mahalingam@gmail.com> Seshadri Mahalingam <seshadri.mahalingam@gmail.com>
Seungkyu Lee <zx6658@gmail.com> Seungkyu Lee <zx6658@gmail.com>
Sevan Janiyan <venture37@geeklan.co.uk> Sevan Janiyan <venture37@geeklan.co.uk>
Shaheen Fazim <fazim.pentester@gmail.com>
Shahriar Rostami <shahriar.rostami@gmail.com> Shahriar Rostami <shahriar.rostami@gmail.com>
Shail Singhal <shail.s@samsung.com> Shail Singhal <shail.s@samsung.com>
Shane Hansen <shanemhansen@gmail.com> Shane Hansen <shanemhansen@gmail.com>
ShankarGanesh K <blr.bmlab@gmail.com> ShankarGanesh K <blr.bmlab@gmail.com>
Shanmuga Pandi M <shanmuga.m@samsung.com> Shanmuga Pandi M <shanmuga.m@samsung.com>
Shanxing Mei <shanxing.mei@intel.com>
Shaobo Yan <shaobo.yan@intel.com> Shaobo Yan <shaobo.yan@intel.com>
Shaotang Zhu <zhushaotang@uniontech.com> Shaotang Zhu <zhushaotang@uniontech.com>
Shashi Kumar <sk.kumar@samsung.com> Shashi Kumar <sk.kumar@samsung.com>
@ -1291,6 +1370,7 @@ Sooho Park <sooho1000@gmail.com>
Soojung Choi <crystal2840@gmail.com> Soojung Choi <crystal2840@gmail.com>
Soorya R <soorya.r@samsung.com> Soorya R <soorya.r@samsung.com>
Soren Dreijer <dreijerbit@gmail.com> Soren Dreijer <dreijerbit@gmail.com>
Spencer Wilson <spencer@spencerwilson.org>
Sreerenj Balachandran <sreerenj.balachandran@intel.com> Sreerenj Balachandran <sreerenj.balachandran@intel.com>
Srirama Chandra Sekhar Mogali <srirama.m@samsung.com> Srirama Chandra Sekhar Mogali <srirama.m@samsung.com>
Stacy Kim <stacy.kim@ucla.edu> Stacy Kim <stacy.kim@ucla.edu>
@ -1314,6 +1394,7 @@ Sunchang Li <johnstonli@tencent.com>
Sundoo Kim <nerdooit@gmail.com> Sundoo Kim <nerdooit@gmail.com>
Sundoo Kim <0xd00d00b@gmail.com> Sundoo Kim <0xd00d00b@gmail.com>
Suneel Kota <suneel.kota@samsung.com> Suneel Kota <suneel.kota@samsung.com>
Sung Lee <sung.lee@amd.com>
Sungguk Lim <limasdf@gmail.com> Sungguk Lim <limasdf@gmail.com>
Sunghyeok Kang <sh0528.kang@samsung.com> Sunghyeok Kang <sh0528.kang@samsung.com>
Sungmann Cho <sungmann.cho@gmail.com> Sungmann Cho <sungmann.cho@gmail.com>
@ -1353,6 +1434,7 @@ Takuya Kurimoto <takuya004869@gmail.com>
Tanay Chowdhury <tanay.c@samsung.com> Tanay Chowdhury <tanay.c@samsung.com>
Tanvir Rizvi <tanvir.rizvi@samsung.com> Tanvir Rizvi <tanvir.rizvi@samsung.com>
Tao Wang <tao.wang.2261@gmail.com> Tao Wang <tao.wang.2261@gmail.com>
Tao Xiong <taox4@illinois.edu>
Tapu Kumar Ghose <ghose.tapu@gmail.com> Tapu Kumar Ghose <ghose.tapu@gmail.com>
Taylor Price <trprice@gmail.com> Taylor Price <trprice@gmail.com>
Ted Kim <neot0000@gmail.com> Ted Kim <neot0000@gmail.com>
@ -1367,10 +1449,12 @@ Thomas Nguyen <haitung.nguyen@avast.com>
Thomas Phillips <tphillips@snapchat.com> Thomas Phillips <tphillips@snapchat.com>
Thomas White <im.toms.inbox@gmail.com> Thomas White <im.toms.inbox@gmail.com>
Tiago Vignatti <tiago.vignatti@intel.com> Tiago Vignatti <tiago.vignatti@intel.com>
Tianyi Zhang <me@1stprinciple.org>
Tibor Dusnoki <tibor.dusnoki.91@gmail.com> Tibor Dusnoki <tibor.dusnoki.91@gmail.com>
Tibor Dusnoki <tdusnoki@inf.u-szeged.hu> Tibor Dusnoki <tdusnoki@inf.u-szeged.hu>
Tien Hock Loh <tienhock.loh@starfivetech.com> Tien Hock Loh <tienhock.loh@starfivetech.com>
Tim Ansell <mithro@mithis.com> Tim Ansell <mithro@mithis.com>
Tim Barry <oregongraperoot@gmail.com>
Tim Niederhausen <tim@rnc-ag.de> Tim Niederhausen <tim@rnc-ag.de>
Tim Steiner <twsteiner@gmail.com> Tim Steiner <twsteiner@gmail.com>
Timo Gurr <timo.gurr@gmail.com> Timo Gurr <timo.gurr@gmail.com>
@ -1386,11 +1470,14 @@ Tom Harwood <tfh@skip.org>
Tomas Popela <tomas.popela@gmail.com> Tomas Popela <tomas.popela@gmail.com>
Tomasz Edward Posłuszny <tom@devpeer.net> Tomasz Edward Posłuszny <tom@devpeer.net>
Tony Shen <legendmastertony@gmail.com> Tony Shen <legendmastertony@gmail.com>
Topi Lassila <tolassila@gmail.com>
Torsten Kurbad <google@tk-webart.de> Torsten Kurbad <google@tk-webart.de>
Toshihito Kikuchi <leamovret@gmail.com> Toshihito Kikuchi <leamovret@gmail.com>
Toshiaki Tanaka <zokutyou2@gmail.com> Toshiaki Tanaka <zokutyou2@gmail.com>
Travis Leithead <travis.leithead@gmail.com>
Trent Willis <trentmwillis@gmail.com> Trent Willis <trentmwillis@gmail.com>
Trevor Perrin <unsafe@trevp.net> Trevor Perrin <unsafe@trevp.net>
Tripta Gupta <triptagupta19@gmail.com>
Tripta Gupta <tripta.g@samsung.com> Tripta Gupta <tripta.g@samsung.com>
Tristan Fraipont <tristan.fraipont@gmail.com> Tristan Fraipont <tristan.fraipont@gmail.com>
Tudor Brindus <me@tbrindus.ca> Tudor Brindus <me@tbrindus.ca>
@ -1427,6 +1514,7 @@ Vishal Bhatnagar <vishal.b@samsung.com>
Vishal Lingam <vishal.reddy@samsung.com> Vishal Lingam <vishal.reddy@samsung.com>
Vitaliy Kharin <kvserr@gmail.com> Vitaliy Kharin <kvserr@gmail.com>
Vivek Galatage <vivek.vg@samsung.com> Vivek Galatage <vivek.vg@samsung.com>
Vlad Zahorodnii <vlad.zahorodnii@kde.org>
Volker Sorge <volker.sorge@gmail.com> Volker Sorge <volker.sorge@gmail.com>
Waihung Fu <fufranci@amazon.com> Waihung Fu <fufranci@amazon.com>
wafuwafu13 <mariobaske@i.softbank.jp> wafuwafu13 <mariobaske@i.softbank.jp>
@ -1434,9 +1522,11 @@ Wojciech Bielawski <wojciech.bielawski@gmail.com>
Wang Chen <wangchen20@iscas.ac.cn> Wang Chen <wangchen20@iscas.ac.cn>
Wang Chen <unicornxw@gmail.com> Wang Chen <unicornxw@gmail.com>
Wang Weiwei <wangww@dingdao.com> Wang Weiwei <wangww@dingdao.com>
Wang Zirui <kingzirvi@gmail.com>
Wangyang Dai <jludwy@gmail.com> Wangyang Dai <jludwy@gmail.com>
Wanming Lin <wanming.lin@intel.com> Wanming Lin <wanming.lin@intel.com>
Wei Li <wei.c.li@intel.com> Wei Li <wei.c.li@intel.com>
Weicong Yu <yuweicong666@gmail.com>
Wen Fan <fanwen1@huawei.com> Wen Fan <fanwen1@huawei.com>
Wenxiang Qian <leonwxqian@gmail.com> Wenxiang Qian <leonwxqian@gmail.com>
WenSheng He <wensheng.he@samsung.com> WenSheng He <wensheng.he@samsung.com>
@ -1502,6 +1592,7 @@ Yong Shin <sy3620@gmail.com>
Yong Wang <ccyongwang@tencent.com> Yong Wang <ccyongwang@tencent.com>
Yonggang Luo <luoyonggang@gmail.com> Yonggang Luo <luoyonggang@gmail.com>
Yongha Lee <yongha78.lee@samsung.com> Yongha Lee <yongha78.lee@samsung.com>
Yongsang Park <yongsangpark980813@gmail.com>
Yongseok Choi <yongseok.choi@navercorp.com> Yongseok Choi <yongseok.choi@navercorp.com>
Yongsheng Zhu <yongsheng.zhu@intel.com> Yongsheng Zhu <yongsheng.zhu@intel.com>
Yoonjae Cho <yoonjae.cho92@gmail.com> Yoonjae Cho <yoonjae.cho92@gmail.com>
@ -1512,10 +1603,12 @@ Youngho Seo <hazivoo@gmail.com>
Youngjin Choi <cyjin9.yc@gmail.com> Youngjin Choi <cyjin9.yc@gmail.com>
YoungKi Hong <simon.hong81@gmail.com> YoungKi Hong <simon.hong81@gmail.com>
Youngmin Yoo <youngmin.yoo@samsung.com> Youngmin Yoo <youngmin.yoo@samsung.com>
Youngmin Hong <mjdal0523@gmail.com>
Youngsoo Choi <kenshin.choi@samsung.com> Youngsoo Choi <kenshin.choi@samsung.com>
Youngsun Suh <zard17@gmail.com> Youngsun Suh <zard17@gmail.com>
Yuan-Pin Yu <yjames@uber.com> Yuan-Pin Yu <yjames@uber.com>
Yuhong Sha <yuhong.sha@samsung.com> Yuhong Sha <yuhong.sha@samsung.com>
YuJiang Zhou <zhouyujiang.zyj@alibaba-inc.com>
Yuki Osaki <yuki.osaki7@gmail.com> Yuki Osaki <yuki.osaki7@gmail.com>
Yuki Tsuchiya <Yuki.Tsuchiya@sony.com> Yuki Tsuchiya <Yuki.Tsuchiya@sony.com>
Yuma Takai <tara20070827@gmail.com> Yuma Takai <tara20070827@gmail.com>
@ -1532,11 +1625,13 @@ Yuta Kasai <kasai.yuta0810@gmail.com>
Yuvanesh Natarajan <yuvanesh.n1@samsung.com> Yuvanesh Natarajan <yuvanesh.n1@samsung.com>
Zach Bjornson <zbbjornson@gmail.com> Zach Bjornson <zbbjornson@gmail.com>
Zachary Capalbo <zach.geek@gmail.com> Zachary Capalbo <zach.geek@gmail.com>
Zehan Li <synclzhhans@gmail.com>
Zeno Albisser <zeno.albisser@digia.com> Zeno Albisser <zeno.albisser@digia.com>
Zeqin Chen <talonchen@tencent.com> Zeqin Chen <talonchen@tencent.com>
Zhanbang He <hezhanbang@gmail.com> Zhanbang He <hezhanbang@gmail.com>
Zhang Hao <zhanghao.m@bytedance.com> Zhang Hao <zhanghao.m@bytedance.com>
Zhang Hao <15686357310a@gmail.com> Zhang Hao <15686357310a@gmail.com>
Zhao Qin <qzmiss@gmail.com>
Zhaoming Jiang <zhaoming.jiang@intel.com> Zhaoming Jiang <zhaoming.jiang@intel.com>
Zhaoze Zhou <zhaoze.zhou@partner.samsung.com> Zhaoze Zhou <zhaoze.zhou@partner.samsung.com>
Zheda Chen <zheda.chen@intel.com> Zheda Chen <zheda.chen@intel.com>
@ -1562,6 +1657,7 @@ Zsolt Borbely <zsborbely.u-szeged@partner.samsung.com>
迷渡 <justjavac@gmail.com> 迷渡 <justjavac@gmail.com>
郑苏波 (Super Zheng) <superzheng@tencent.com> 郑苏波 (Super Zheng) <superzheng@tencent.com>
一丝 (Yisi) <yiorsi@gmail.com> 一丝 (Yisi) <yiorsi@gmail.com>
林训杰 (XunJie Lin) <wick.linxunjie@gmail.com>
# Please DO NOT APPEND here. See comments at the top of the file. # Please DO NOT APPEND here. See comments at the top of the file.
# END individuals section. # END individuals section.
@ -1573,6 +1669,7 @@ Akamai Inc. <*@akamai.com>
ARM Holdings <*@arm.com> ARM Holdings <*@arm.com>
BlackBerry Limited <*@blackberry.com> BlackBerry Limited <*@blackberry.com>
Bocoup <*@bocoup.com> Bocoup <*@bocoup.com>
Brave Software Inc. <*@brave.com>
Canonical Limited <*@canonical.com> Canonical Limited <*@canonical.com>
Cloudflare, Inc. <*@cloudflare.com> Cloudflare, Inc. <*@cloudflare.com>
CloudMosa, Inc. <*@cloudmosa.com> CloudMosa, Inc. <*@cloudmosa.com>
@ -1590,6 +1687,7 @@ EngFlow, Inc. <*@engflow.com>
Estimote, Inc. <*@estimote.com> Estimote, Inc. <*@estimote.com>
Google Inc. <*@google.com> Google Inc. <*@google.com>
Grammarly, Inc. <*@grammarly.com> Grammarly, Inc. <*@grammarly.com>
Here Inc. <*@here.io>
Hewlett-Packard Development Company, L.P. <*@hp.com> Hewlett-Packard Development Company, L.P. <*@hp.com>
HyperConnect Inc. <*@hpcnt.com> HyperConnect Inc. <*@hpcnt.com>
IBM Inc. <*@*.ibm.com> IBM Inc. <*@*.ibm.com>
@ -1619,12 +1717,14 @@ NVIDIA Corporation <*@nvidia.com>
OpenFin Inc. <*@openfin.co> OpenFin Inc. <*@openfin.co>
Opera Software ASA <*@opera.com> Opera Software ASA <*@opera.com>
Optical Tone Ltd <*@opticaltone.com> Optical Tone Ltd <*@opticaltone.com>
Palo Alto Networks, Inc. <*@paloaltonetworks.com>
Pengutronix e.K. <*@pengutronix.de> Pengutronix e.K. <*@pengutronix.de>
Quality First Software GmbH <*@qf-software.com> Quality First Software GmbH <*@qf-software.com>
Rakuten Kobo Inc. <*@kobo.com> Rakuten Kobo Inc. <*@kobo.com>
Rakuten Kobo Inc. <*@rakuten.com> Rakuten Kobo Inc. <*@rakuten.com>
Red Hat Inc. <*@redhat.com> Red Hat Inc. <*@redhat.com>
Semihalf <*@semihalf.com> Semihalf <*@semihalf.com>
S57 ApS <*@s57.io>
Seznam.cz, a.s. <*@firma.seznam.cz> Seznam.cz, a.s. <*@firma.seznam.cz>
Slack Technologies Inc. <*@slack-corp.com> Slack Technologies Inc. <*@slack-corp.com>
Spotify AB <*@spotify.com> Spotify AB <*@spotify.com>

3752
src/DEPS

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -3,10 +3,10 @@ include_rules = [
"+third_party/apple_apsl", "+third_party/apple_apsl",
"+third_party/boringssl/src/include", "+third_party/boringssl/src/include",
"+third_party/ced", "+third_party/ced",
"+third_party/fuzztest",
# We are moving the old jni_generator to jni_zero, some references will remain # We are moving the old jni_generator to jni_zero, some references will remain
# in //base. # in //base.
"+third_party/jni_zero", "+third_party/jni_zero",
"+third_party/libevent",
"+third_party/libunwindstack/src/libunwindstack/include", "+third_party/libunwindstack/src/libunwindstack/include",
"+third_party/lss", "+third_party/lss",
"+third_party/modp_b64", "+third_party/modp_b64",
@ -19,7 +19,8 @@ include_rules = [
"+third_party/rust/cxx", "+third_party/rust/cxx",
"+third_party/test_fonts", "+third_party/test_fonts",
# JSON Deserialization. # JSON Deserialization.
"+third_party/rust/serde_json_lenient/v0_1/wrapper", "+third_party/rust/serde_json_lenient/v0_2/wrapper",
"+third_party/zlib",
# These are implicitly brought in from the root, and we don't want them. # These are implicitly brought in from the root, and we don't want them.
"-ipc", "-ipc",
@ -38,7 +39,7 @@ specific_include_rules = {
"+third_party/perfetto/protos/perfetto/trace/track_event/chrome_process_descriptor.pbzero.h", "+third_party/perfetto/protos/perfetto/trace/track_event/chrome_process_descriptor.pbzero.h",
], ],
# To evaluate the performance effects of using absl's flat_hash_map. # To evaluate the performance effects of using absl's flat_hash_map.
"supports_user_data\.h": [ "supports_user_data\.cc": [
"+third_party/abseil-cpp/absl/container/flat_hash_map.h", "+third_party/abseil-cpp/absl/container/flat_hash_map.h",
] ]
} }

View file

@ -4,12 +4,12 @@ set noparent
# NOTE: keep this in sync with global-owners-override@chromium.org owners # NOTE: keep this in sync with global-owners-override@chromium.org owners
# by emailing lsc-policy@chromium.org when this list changes. # by emailing lsc-policy@chromium.org when this list changes.
altimin@chromium.org altimin@chromium.org
danakj@chromium.org
dcheng@chromium.org dcheng@chromium.org
fdoray@chromium.org fdoray@chromium.org
gab@chromium.org gab@chromium.org
kylechar@chromium.org kylechar@chromium.org
mark@chromium.org mark@chromium.org
pkasting@chromium.org
thakis@chromium.org thakis@chromium.org
thestig@chromium.org thestig@chromium.org
wez@chromium.org wez@chromium.org
@ -30,7 +30,6 @@ per-file ..._fuchsia*=file://build/fuchsia/OWNERS
# For Windows-specific changes: # For Windows-specific changes:
per-file ..._win*=file://base/win/OWNERS per-file ..._win*=file://base/win/OWNERS
per-file callback_list*=pkasting@chromium.org
per-file feature_list*=asvitkine@chromium.org per-file feature_list*=asvitkine@chromium.org
per-file feature_list*=isherman@chromium.org per-file feature_list*=isherman@chromium.org

View file

@ -9,5 +9,4 @@
# yourself, don't hesitate to seek help from another security team member! # yourself, don't hesitate to seek help from another security team member!
# Nobody knows everything, and the only way to learn is from experience. # Nobody knows everything, and the only way to learn is from experience.
dcheng@chromium.org dcheng@chromium.org
rsesek@chromium.org
tsepez@chromium.org tsepez@chromium.org

View file

@ -11,8 +11,10 @@ import("//build/config/dcheck_always_on.gni")
buildflag_header("buildflags") { buildflag_header("buildflags") {
header = "buildflags.h" header = "buildflags.h"
flags = use_partition_alloc_as_gwp_asan_store =
[ "USE_PARTITION_ALLOC_AS_GWP_ASAN_STORE=$enable_backup_ref_ptr_support" ] enable_backup_ref_ptr_support && use_raw_ptr_backup_ref_impl
flags = [ "USE_PARTITION_ALLOC_AS_GWP_ASAN_STORE=$use_partition_alloc_as_gwp_asan_store" ]
} }
if (is_apple) { if (is_apple) {

View file

@ -1,5 +1,4 @@
lizeb@chromium.org lizeb@chromium.org
primiano@chromium.org
wfh@chromium.org wfh@chromium.org
per-file allocator.gni=file://base/allocator/partition_allocator/OWNERS per-file allocator.gni=file://base/allocator/partition_allocator/OWNERS

View file

@ -35,8 +35,7 @@ indirectly, on `base` within the scope of a linker unit.
More importantly, **no other place outside of `/base` should depend on the More importantly, **no other place outside of `/base` should depend on the
specific allocator**. specific allocator**.
If such a functional dependency is required that should be achieved using If such a functional dependency is required that should be achieved using
abstractions in `base` (see `/base/allocator/allocator_extension.h` and abstractions in `base` (see `/base/memory/`)
`/base/memory/`)
**Why `base` depends on `allocator`?** **Why `base` depends on `allocator`?**
Because it needs to provide services that depend on the actual allocator Because it needs to provide services that depend on the actual allocator

View file

@ -4,11 +4,11 @@
#include "base/allocator/allocator_check.h" #include "base/allocator/allocator_check.h"
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "partition_alloc/buildflags.h"
#if BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
#include "base/allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win.h" #include "partition_alloc/shim/winheap_stubs_win.h"
#endif #endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
@ -16,18 +16,19 @@
#endif #endif
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_APPLE)
#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.h" #include "partition_alloc/shim/allocator_interception_apple.h"
#endif #endif
namespace base::allocator { namespace base::allocator {
bool IsAllocatorInitialized() { bool IsAllocatorInitialized() {
#if BUILDFLAG(IS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM) #if BUILDFLAG(IS_WIN) && PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
// Set by allocator_shim_override_ucrt_symbols_win.h when the // Set by allocator_shim_override_ucrt_symbols_win.h when the
// shimmed _set_new_mode() is called. // shimmed _set_new_mode() is called.
return allocator_shim::g_is_win_shim_layer_initialized; return allocator_shim::g_is_win_shim_layer_initialized;
#elif BUILDFLAG(IS_APPLE) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \ #elif BUILDFLAG(IS_APPLE) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
!BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_ALLOCATOR_SHIM) !PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
// From allocator_interception_mac.mm. // From allocator_interception_mac.mm.
return allocator_shim::g_replaced_default_zone; return allocator_shim::g_replaced_default_zone;
#else #else

View file

@ -1,15 +0,0 @@
// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_extension.h"
#include "base/allocator/buildflags.h"
#include "base/check.h"
namespace base {
namespace allocator {
void ReleaseFreeMemory() {}
} // namespace allocator
} // namespace base

View file

@ -1,23 +0,0 @@
// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
#define BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
#include <stddef.h> // for size_t
#include "base/base_export.h"
#include "build/build_config.h"
namespace base {
namespace allocator {
// Request that the allocator release any free memory it knows about to the
// system.
BASE_EXPORT void ReleaseFreeMemory();
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_

View file

@ -21,4 +21,4 @@ constexpr size_t kMaximumNumberOfObservers = 4;
} // namespace base::allocator::dispatcher::configuration } // namespace base::allocator::dispatcher::configuration
#endif // BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_ #endif // BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_

View file

@ -5,18 +5,18 @@
#include "base/allocator/dispatcher/dispatcher.h" #include "base/allocator/dispatcher/dispatcher.h"
#include "base/allocator/dispatcher/internal/dispatch_data.h" #include "base/allocator/dispatcher/internal/dispatch_data.h"
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
#include "base/check.h" #include "base/check.h"
#include "base/dcheck_is_on.h" #include "base/dcheck_is_on.h"
#include "base/no_destructor.h" #include "base/no_destructor.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/shim/allocator_shim.h"
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
#include <atomic> #include <atomic>
#endif #endif
#if BUILDFLAG(USE_PARTITION_ALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.h" #include "partition_alloc/partition_alloc_hooks.h" // nogncheck
#endif #endif
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {
@ -34,7 +34,7 @@ struct Dispatcher::Impl {
void Reset() { void Reset() {
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
DCHECK([&]() { DCHECK([&] {
auto const was_set = is_initialized_check_flag_.test_and_set(); auto const was_set = is_initialized_check_flag_.test_and_set();
is_initialized_check_flag_.clear(); is_initialized_check_flag_.clear();
return was_set; return was_set;
@ -51,13 +51,13 @@ struct Dispatcher::Impl {
// connected. This way we prevent notifications although no observers are // connected. This way we prevent notifications although no observers are
// present. // present.
static void ConnectToEmitters(const internal::DispatchData& dispatch_data) { static void ConnectToEmitters(const internal::DispatchData& dispatch_data) {
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
if (auto* const allocator_dispatch = dispatch_data.GetAllocatorDispatch()) { if (auto* const allocator_dispatch = dispatch_data.GetAllocatorDispatch()) {
allocator_shim::InsertAllocatorDispatch(allocator_dispatch); allocator_shim::InsertAllocatorDispatch(allocator_dispatch);
} }
#endif #endif
#if BUILDFLAG(USE_PARTITION_ALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
{ {
auto* const allocation_hook = dispatch_data.GetAllocationObserverHook(); auto* const allocation_hook = dispatch_data.GetAllocationObserverHook();
auto* const free_hook = dispatch_data.GetFreeObserverHook(); auto* const free_hook = dispatch_data.GetFreeObserverHook();
@ -70,14 +70,14 @@ struct Dispatcher::Impl {
} }
static void DisconnectFromEmitters(internal::DispatchData& dispatch_data) { static void DisconnectFromEmitters(internal::DispatchData& dispatch_data) {
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
if (auto* const allocator_dispatch = dispatch_data.GetAllocatorDispatch()) { if (auto* const allocator_dispatch = dispatch_data.GetAllocatorDispatch()) {
allocator_shim::RemoveAllocatorDispatchForTesting( allocator_shim::RemoveAllocatorDispatchForTesting(
allocator_dispatch); // IN-TEST allocator_dispatch); // IN-TEST
} }
#endif #endif
#if BUILDFLAG(USE_PARTITION_ALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
partition_alloc::PartitionAllocHooks::SetObserverHooks(nullptr, nullptr); partition_alloc::PartitionAllocHooks::SetObserverHooks(nullptr, nullptr);
#endif #endif
} }

View file

@ -5,11 +5,11 @@
#ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_ #ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_ #define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#include <memory>
#include "base/allocator/dispatcher/internal/dispatcher_internal.h" #include "base/allocator/dispatcher/internal/dispatcher_internal.h"
#include "base/base_export.h" #include "base/base_export.h"
#include <memory>
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {
namespace internal { namespace internal {

View file

@ -5,13 +5,13 @@
#ifndef BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_ #ifndef BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
#define BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_ #define BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
#include <tuple>
#include <utility>
#include "base/allocator/dispatcher/configuration.h" #include "base/allocator/dispatcher/configuration.h"
#include "base/allocator/dispatcher/dispatcher.h" #include "base/allocator/dispatcher/dispatcher.h"
#include "base/allocator/dispatcher/internal/tools.h" #include "base/allocator/dispatcher/internal/tools.h"
#include <tuple>
#include <utility>
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {
namespace internal { namespace internal {

View file

@ -3,11 +3,12 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "base/allocator/dispatcher/internal/dispatch_data.h" #include "base/allocator/dispatcher/internal/dispatch_data.h"
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
namespace base::allocator::dispatcher::internal { namespace base::allocator::dispatcher::internal {
#if BUILDFLAG(USE_PARTITION_ALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
DispatchData& DispatchData::SetAllocationObserverHooks( DispatchData& DispatchData::SetAllocationObserverHooks(
AllocationObserverHook* allocation_observer_hook, AllocationObserverHook* allocation_observer_hook,
@ -28,7 +29,7 @@ DispatchData::FreeObserverHook* DispatchData::GetFreeObserverHook() const {
} }
#endif #endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
DispatchData& DispatchData::SetAllocatorDispatch( DispatchData& DispatchData::SetAllocatorDispatch(
AllocatorDispatch* allocator_dispatch) { AllocatorDispatch* allocator_dispatch) {
allocator_dispatch_ = allocator_dispatch; allocator_dispatch_ = allocator_dispatch;

View file

@ -5,28 +5,28 @@
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_ #ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_ #define BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "partition_alloc/buildflags.h"
#if BUILDFLAG(USE_PARTITION_ALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.h" #include "partition_alloc/partition_alloc_hooks.h" // nogncheck
#endif #endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h" #include "partition_alloc/shim/allocator_shim.h" // nogncheck
#endif #endif
namespace base::allocator::dispatcher::internal { namespace base::allocator::dispatcher::internal {
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
using allocator_shim::AllocatorDispatch; using allocator_shim::AllocatorDispatch;
#endif #endif
// A simple utility class to pass all the information required to properly hook // A simple utility class to pass all the information required to properly hook
// into the memory allocation subsystems from DispatcherImpl to the Dispatcher. // into the memory allocation subsystems from DispatcherImpl to the Dispatcher.
struct BASE_EXPORT DispatchData { struct BASE_EXPORT DispatchData {
#if BUILDFLAG(USE_PARTITION_ALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
using AllocationObserverHook = using AllocationObserverHook =
partition_alloc::PartitionAllocHooks::AllocationObserverHook; partition_alloc::PartitionAllocHooks::AllocationObserverHook;
using FreeObserverHook = using FreeObserverHook =
@ -44,7 +44,7 @@ struct BASE_EXPORT DispatchData {
public: public:
#endif #endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
DispatchData& SetAllocatorDispatch(AllocatorDispatch* allocator_dispatch); DispatchData& SetAllocatorDispatch(AllocatorDispatch* allocator_dispatch);
AllocatorDispatch* GetAllocatorDispatch() const; AllocatorDispatch* GetAllocatorDispatch() const;

View file

@ -11,23 +11,23 @@
#include "base/allocator/dispatcher/memory_tagging.h" #include "base/allocator/dispatcher/memory_tagging.h"
#include "base/allocator/dispatcher/notification_data.h" #include "base/allocator/dispatcher/notification_data.h"
#include "base/allocator/dispatcher/subsystem.h" #include "base/allocator/dispatcher/subsystem.h"
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
#include "base/check.h" #include "base/check.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "partition_alloc/buildflags.h"
#if BUILDFLAG(USE_PARTITION_ALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_allocation_data.h" #include "partition_alloc/partition_alloc_allocation_data.h" // nogncheck
#endif #endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h" #include "partition_alloc/shim/allocator_shim.h"
#endif #endif
#include <tuple> #include <tuple>
namespace base::allocator::dispatcher::internal { namespace base::allocator::dispatcher::internal {
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
using allocator_shim::AllocatorDispatch; using allocator_shim::AllocatorDispatch;
#endif #endif
@ -83,17 +83,17 @@ struct DispatcherImpl {
private: private:
static DispatchData CreateDispatchData() { static DispatchData CreateDispatchData() {
return DispatchData() return DispatchData()
#if BUILDFLAG(USE_PARTITION_ALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
.SetAllocationObserverHooks(&PartitionAllocatorAllocationHook, .SetAllocationObserverHooks(&PartitionAllocatorAllocationHook,
&PartitionAllocatorFreeHook) &PartitionAllocatorFreeHook)
#endif #endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
.SetAllocatorDispatch(&allocator_dispatch_) .SetAllocatorDispatch(&allocator_dispatch_)
#endif #endif
; ;
} }
#if BUILDFLAG(USE_PARTITION_ALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
static void PartitionAllocatorAllocationHook( static void PartitionAllocatorAllocationHook(
const partition_alloc::AllocationNotificationData& pa_notification_data) { const partition_alloc::AllocationNotificationData& pa_notification_data) {
AllocationNotificationData dispatcher_notification_data( AllocationNotificationData dispatcher_notification_data(
@ -101,7 +101,7 @@ struct DispatcherImpl {
pa_notification_data.type_name(), pa_notification_data.type_name(),
AllocationSubsystem::kPartitionAllocator); AllocationSubsystem::kPartitionAllocator);
#if BUILDFLAG(HAS_MEMORY_TAGGING) #if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
dispatcher_notification_data.SetMteReportingMode( dispatcher_notification_data.SetMteReportingMode(
ConvertToMTEMode(pa_notification_data.mte_reporting_mode())); ConvertToMTEMode(pa_notification_data.mte_reporting_mode()));
#endif #endif
@ -115,177 +115,175 @@ struct DispatcherImpl {
pa_notification_data.address(), pa_notification_data.address(),
AllocationSubsystem::kPartitionAllocator); AllocationSubsystem::kPartitionAllocator);
#if BUILDFLAG(HAS_MEMORY_TAGGING) #if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
dispatcher_notification_data.SetMteReportingMode( dispatcher_notification_data.SetMteReportingMode(
ConvertToMTEMode(pa_notification_data.mte_reporting_mode())); ConvertToMTEMode(pa_notification_data.mte_reporting_mode()));
#endif #endif
DoNotifyFree(dispatcher_notification_data); DoNotifyFree(dispatcher_notification_data);
} }
#endif // BUILDFLAG(USE_PARTITION_ALLOC) #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC)
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
static void* AllocFn(const AllocatorDispatch* self, static void* AllocFn(size_t size, void* context) {
size_t size,
void* context) {
void* const address = self->next->alloc_function(self->next, size, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AllocUncheckedFn(const AllocatorDispatch* self,
size_t size,
void* context) {
void* const address = void* const address =
self->next->alloc_unchecked_function(self->next, size, context); allocator_dispatch_.next->alloc_function(size, context);
DoNotifyAllocationForShim(address, size); DoNotifyAllocationForShim(address, size);
return address; return address;
} }
static void* AllocZeroInitializedFn(const AllocatorDispatch* self, static void* AllocUncheckedFn(size_t size, void* context) {
size_t n, void* const address =
size_t size, allocator_dispatch_.next->alloc_unchecked_function(size, context);
void* context) {
void* const address = self->next->alloc_zero_initialized_function( DoNotifyAllocationForShim(address, size);
self->next, n, size, context);
return address;
}
static void* AllocZeroInitializedFn(size_t n, size_t size, void* context) {
void* const address =
allocator_dispatch_.next->alloc_zero_initialized_function(n, size,
context);
DoNotifyAllocationForShim(address, n * size); DoNotifyAllocationForShim(address, n * size);
return address; return address;
} }
static void* AllocAlignedFn(const AllocatorDispatch* self, static void* AllocAlignedFn(size_t alignment, size_t size, void* context) {
size_t alignment, void* const address = allocator_dispatch_.next->alloc_aligned_function(
size_t size, alignment, size, context);
void* context) {
void* const address = self->next->alloc_aligned_function(
self->next, alignment, size, context);
DoNotifyAllocationForShim(address, size); DoNotifyAllocationForShim(address, size);
return address; return address;
} }
static void* ReallocFn(const AllocatorDispatch* self, static void* ReallocFn(void* address, size_t size, void* context) {
void* address,
size_t size,
void* context) {
// Note: size == 0 actually performs free. // Note: size == 0 actually performs free.
DoNotifyFreeForShim(address); DoNotifyFreeForShim(address);
void* const reallocated_address = void* const reallocated_address =
self->next->realloc_function(self->next, address, size, context); allocator_dispatch_.next->realloc_function(address, size, context);
DoNotifyAllocationForShim(reallocated_address, size); DoNotifyAllocationForShim(reallocated_address, size);
return reallocated_address; return reallocated_address;
} }
static void FreeFn(const AllocatorDispatch* self, static void* ReallocUncheckedFn(void* address, size_t size, void* context) {
void* address, // Note: size == 0 actually performs free.
void* context) { DoNotifyFreeForShim(address);
void* const reallocated_address =
allocator_dispatch_.next->realloc_unchecked_function(address, size,
context);
DoNotifyAllocationForShim(reallocated_address, size);
return reallocated_address;
}
static void FreeFn(void* address, void* context) {
// Note: DoNotifyFree should be called before free_function (here and in // Note: DoNotifyFree should be called before free_function (here and in
// other places). That is because observers need to handle the allocation // other places). That is because observers need to handle the allocation
// being freed before calling free_function, as once the latter is executed // being freed before calling free_function, as once the latter is executed
// the address becomes available and can be allocated by another thread. // the address becomes available and can be allocated by another thread.
// That would be racy otherwise. // That would be racy otherwise.
DoNotifyFreeForShim(address); DoNotifyFreeForShim(address);
self->next->free_function(self->next, address, context); MUSTTAIL return allocator_dispatch_.next->free_function(address, context);
} }
static size_t GetSizeEstimateFn(const AllocatorDispatch* self, static unsigned BatchMallocFn(size_t size,
void* address,
void* context) {
return self->next->get_size_estimate_function(self->next, address, context);
}
static size_t GoodSizeFn(const AllocatorDispatch* self,
size_t size,
void* context) {
return self->next->good_size_function(self->next, size, context);
}
static bool ClaimedAddressFn(const AllocatorDispatch* self,
void* address,
void* context) {
return self->next->claimed_address_function(self->next, address, context);
}
static unsigned BatchMallocFn(const AllocatorDispatch* self,
size_t size,
void** results, void** results,
unsigned num_requested, unsigned num_requested,
void* context) { void* context) {
unsigned const num_allocated = self->next->batch_malloc_function( unsigned const num_allocated =
self->next, size, results, num_requested, context); allocator_dispatch_.next->batch_malloc_function(size, results,
num_requested, context);
for (unsigned i = 0; i < num_allocated; ++i) { for (unsigned i = 0; i < num_allocated; ++i) {
DoNotifyAllocationForShim(results[i], size); DoNotifyAllocationForShim(results[i], size);
} }
return num_allocated; return num_allocated;
} }
static void BatchFreeFn(const AllocatorDispatch* self, static void BatchFreeFn(void** to_be_freed,
void** to_be_freed,
unsigned num_to_be_freed, unsigned num_to_be_freed,
void* context) { void* context) {
for (unsigned i = 0; i < num_to_be_freed; ++i) { for (unsigned i = 0; i < num_to_be_freed; ++i) {
DoNotifyFreeForShim(to_be_freed[i]); DoNotifyFreeForShim(to_be_freed[i]);
} }
self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed, MUSTTAIL return allocator_dispatch_.next->batch_free_function(
context); to_be_freed, num_to_be_freed, context);
} }
static void FreeDefiniteSizeFn(const AllocatorDispatch* self, static void FreeDefiniteSizeFn(void* address, size_t size, void* context) {
void* address,
size_t size,
void* context) {
DoNotifyFreeForShim(address); DoNotifyFreeForShim(address);
self->next->free_definite_size_function(self->next, address, size, context); MUSTTAIL return allocator_dispatch_.next->free_definite_size_function(
address, size, context);
} }
static void TryFreeDefaultFn(const AllocatorDispatch* self, static void TryFreeDefaultFn(void* address, void* context) {
void* address,
void* context) {
DoNotifyFreeForShim(address); DoNotifyFreeForShim(address);
self->next->try_free_default_function(self->next, address, context); MUSTTAIL return allocator_dispatch_.next->try_free_default_function(
address, context);
} }
static void* AlignedMallocFn(const AllocatorDispatch* self, static void* AlignedMallocFn(size_t size, size_t alignment, void* context) {
size_t size, void* const address = allocator_dispatch_.next->aligned_malloc_function(
size_t alignment, size, alignment, context);
void* context) {
void* const address = self->next->aligned_malloc_function(
self->next, size, alignment, context);
DoNotifyAllocationForShim(address, size); DoNotifyAllocationForShim(address, size);
return address; return address;
} }
static void* AlignedReallocFn(const AllocatorDispatch* self, static void* AlignedMallocUncheckedFn(size_t size,
void* address, size_t alignment,
void* context) {
void* const address =
allocator_dispatch_.next->aligned_malloc_unchecked_function(
size, alignment, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AlignedReallocFn(void* address,
size_t size, size_t size,
size_t alignment, size_t alignment,
void* context) { void* context) {
// Note: size == 0 actually performs free. // Note: size == 0 actually performs free.
DoNotifyFreeForShim(address); DoNotifyFreeForShim(address);
address = self->next->aligned_realloc_function(self->next, address, size, address = allocator_dispatch_.next->aligned_realloc_function(
alignment, context); address, size, alignment, context);
DoNotifyAllocationForShim(address, size); DoNotifyAllocationForShim(address, size);
return address; return address;
} }
static void AlignedFreeFn(const AllocatorDispatch* self, static void* AlignedReallocUncheckedFn(void* address,
void* address, size_t size,
void* context) { size_t alignment,
void* context) {
// Note: size == 0 actually performs free.
DoNotifyFreeForShim(address); DoNotifyFreeForShim(address);
self->next->aligned_free_function(self->next, address, context); address = allocator_dispatch_.next->aligned_realloc_unchecked_function(
address, size, alignment, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void AlignedFreeFn(void* address, void* context) {
DoNotifyFreeForShim(address);
MUSTTAIL return allocator_dispatch_.next->aligned_free_function(address,
context);
} }
ALWAYS_INLINE static void DoNotifyAllocationForShim(void* address, ALWAYS_INLINE static void DoNotifyAllocationForShim(void* address,
@ -304,7 +302,7 @@ struct DispatcherImpl {
} }
static AllocatorDispatch allocator_dispatch_; static AllocatorDispatch allocator_dispatch_;
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM) #endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
ALWAYS_INLINE static void DoNotifyAllocation( ALWAYS_INLINE static void DoNotifyAllocation(
const AllocationNotificationData& notification_data) { const AllocationNotificationData& notification_data) {
@ -323,27 +321,31 @@ struct DispatcherImpl {
template <typename... ObserverTypes> template <typename... ObserverTypes>
std::tuple<ObserverTypes*...> DispatcherImpl<ObserverTypes...>::s_observers; std::tuple<ObserverTypes*...> DispatcherImpl<ObserverTypes...>::s_observers;
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
template <typename... ObserverTypes> template <typename... ObserverTypes>
AllocatorDispatch DispatcherImpl<ObserverTypes...>::allocator_dispatch_ = { AllocatorDispatch DispatcherImpl<ObserverTypes...>::allocator_dispatch_ = {
&AllocFn, AllocFn, // alloc_function
&AllocUncheckedFn, AllocUncheckedFn, // alloc_unchecked_function
&AllocZeroInitializedFn, AllocZeroInitializedFn, // alloc_zero_initialized_function
&AllocAlignedFn, AllocAlignedFn, // alloc_aligned_function
&ReallocFn, ReallocFn, // realloc_function
&FreeFn, ReallocUncheckedFn, // realloc_unchecked_function
&GetSizeEstimateFn, FreeFn, // free_function
&GoodSizeFn, nullptr, // get_size_estimate_function
&ClaimedAddressFn, nullptr, // good_size_function
&BatchMallocFn, nullptr, // claimed_address_function
&BatchFreeFn, BatchMallocFn, // batch_malloc_function
&FreeDefiniteSizeFn, BatchFreeFn, // batch_free_function
&TryFreeDefaultFn, FreeDefiniteSizeFn, // free_definite_size_function
&AlignedMallocFn, TryFreeDefaultFn, // try_free_default_function
&AlignedReallocFn, AlignedMallocFn, // aligned_malloc_function
&AlignedFreeFn, AlignedMallocUncheckedFn, // aligned_malloc_unchecked_function
nullptr}; AlignedReallocFn, // aligned_realloc_function
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM) AlignedReallocUncheckedFn, // aligned_realloc_unchecked_function
AlignedFreeFn, // aligned_free_function
nullptr // next
};
#endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
// Specialization of DispatcherImpl in case we have no observers to notify. In // Specialization of DispatcherImpl in case we have no observers to notify. In
// this special case we return a set of null pointers as the Dispatcher must not // this special case we return a set of null pointers as the Dispatcher must not
@ -352,10 +354,10 @@ template <>
struct DispatcherImpl<> { struct DispatcherImpl<> {
static DispatchData GetNotificationHooks(std::tuple<> /*observers*/) { static DispatchData GetNotificationHooks(std::tuple<> /*observers*/) {
return DispatchData() return DispatchData()
#if BUILDFLAG(USE_PARTITION_ALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
.SetAllocationObserverHooks(nullptr, nullptr) .SetAllocationObserverHooks(nullptr, nullptr)
#endif #endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
.SetAllocatorDispatch(nullptr) .SetAllocatorDispatch(nullptr)
#endif #endif
; ;

View file

@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_TAGGING_H_ #ifndef BASE_ALLOCATOR_DISPATCHER_MEMORY_TAGGING_H_
#define BASE_ALLOCATOR_DISPATCHER_TAGGING_H_ #define BASE_ALLOCATOR_DISPATCHER_MEMORY_TAGGING_H_
#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h" #include "partition_alloc/tagging.h"
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {
// The various modes of Arm's MTE extension. The enum values should match their // The various modes of Arm's MTE extension. The enum values should match their
@ -39,4 +39,4 @@ constexpr MTEMode ConvertToMTEMode(
} // namespace base::allocator::dispatcher } // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_TAGGING_H_ #endif // BASE_ALLOCATOR_DISPATCHER_MEMORY_TAGGING_H_

View file

@ -9,8 +9,8 @@
#include "base/allocator/dispatcher/memory_tagging.h" #include "base/allocator/dispatcher/memory_tagging.h"
#include "base/allocator/dispatcher/subsystem.h" #include "base/allocator/dispatcher/subsystem.h"
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "partition_alloc/buildflags.h"
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {
@ -42,28 +42,28 @@ class BASE_EXPORT AllocationNotificationData {
// In the allocation observer path, it's interesting which reporting mode is // In the allocation observer path, it's interesting which reporting mode is
// enabled. // enabled.
#if BUILDFLAG(HAS_MEMORY_TAGGING) #if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
constexpr AllocationNotificationData& SetMteReportingMode(MTEMode mode) { constexpr AllocationNotificationData& SetMteReportingMode(MTEMode mode) {
mte_reporting_mode_ = mode; mte_reporting_mode_ = mode;
return *this; return *this;
} }
#endif // BUILDFLAG(HAS_MEMORY_TAGGING) #endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
constexpr MTEMode mte_reporting_mode() const { constexpr MTEMode mte_reporting_mode() const {
#if BUILDFLAG(HAS_MEMORY_TAGGING) #if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
return mte_reporting_mode_; return mte_reporting_mode_;
#else #else
return MTEMode::kUndefined; return MTEMode::kUndefined;
#endif // BUILDFLAG(HAS_MEMORY_TAGGING) #endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
} }
private: private:
void* address_ = nullptr; void* address_ = nullptr;
size_t size_ = 0; size_t size_ = 0;
const char* type_name_ = nullptr; const char* type_name_ = nullptr;
#if BUILDFLAG(HAS_MEMORY_TAGGING) #if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
MTEMode mte_reporting_mode_ = MTEMode::kUndefined; MTEMode mte_reporting_mode_ = MTEMode::kUndefined;
#endif // BUILDFLAG(HAS_MEMORY_TAGGING) #endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
AllocationSubsystem allocation_subsystem_; AllocationSubsystem allocation_subsystem_;
}; };
@ -83,26 +83,26 @@ class BASE_EXPORT FreeNotificationData {
// In the free observer path, it's interesting which reporting mode is // In the free observer path, it's interesting which reporting mode is
// enabled. // enabled.
#if BUILDFLAG(HAS_MEMORY_TAGGING) #if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
constexpr FreeNotificationData& SetMteReportingMode(MTEMode mode) { constexpr FreeNotificationData& SetMteReportingMode(MTEMode mode) {
mte_reporting_mode_ = mode; mte_reporting_mode_ = mode;
return *this; return *this;
} }
#endif // BUILDFLAG(HAS_MEMORY_TAGGING) #endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
constexpr MTEMode mte_reporting_mode() const { constexpr MTEMode mte_reporting_mode() const {
#if BUILDFLAG(HAS_MEMORY_TAGGING) #if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
return mte_reporting_mode_; return mte_reporting_mode_;
#else #else
return MTEMode::kUndefined; return MTEMode::kUndefined;
#endif // BUILDFLAG(HAS_MEMORY_TAGGING) #endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
} }
private: private:
void* address_ = nullptr; void* address_ = nullptr;
#if BUILDFLAG(HAS_MEMORY_TAGGING) #if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
MTEMode mte_reporting_mode_ = MTEMode::kUndefined; MTEMode mte_reporting_mode_ = MTEMode::kUndefined;
#endif // BUILDFLAG(HAS_MEMORY_TAGGING) #endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
AllocationSubsystem allocation_subsystem_; AllocationSubsystem allocation_subsystem_;
}; };

View file

@ -29,7 +29,7 @@ void ReentryGuard::InitTLSSlot() {
int error = pthread_key_create(&entered_key_, nullptr); int error = pthread_key_create(&entered_key_, nullptr);
CHECK(!error); CHECK(!error);
// Touch the TLS slot immediately to force any allocations. // Touch the TLS slot immediately to force any allocations.
// TODO(https://crbug.com/1411454): Use this technique to avoid allocations // TODO(crbug.com/40062835): Use this technique to avoid allocations
// in PoissonAllocationSampler::ScopedMuteThreadSamples, which will make // in PoissonAllocationSampler::ScopedMuteThreadSamples, which will make
// ReentryGuard redundant. // ReentryGuard redundant.
pthread_setspecific(entered_key_, nullptr); pthread_setspecific(entered_key_, nullptr);
@ -47,7 +47,7 @@ void ReentryGuard::InitTLSSlot() {}
void ReentryGuard::RecordTLSSlotToCrashKey() { void ReentryGuard::RecordTLSSlotToCrashKey() {
// Record the key in crash dumps to detect when it's higher than 32 // Record the key in crash dumps to detect when it's higher than 32
// (PTHREAD_KEY_2NDLEVEL_SIZE). // (PTHREAD_KEY_2NDLEVEL_SIZE).
// TODO(crbug.com/1411454): Remove this after diagnosing reentry crashes. // TODO(crbug.com/40062835): Remove this after diagnosing reentry crashes.
static auto* const crash_key = base::debug::AllocateCrashKeyString( static auto* const crash_key = base::debug::AllocateCrashKeyString(
"reentry_guard_tls_slot", base::debug::CrashKeySize::Size32); "reentry_guard_tls_slot", base::debug::CrashKeySize::Size32);

View file

@ -33,8 +33,9 @@ struct BASE_EXPORT ReentryGuard {
} }
ALWAYS_INLINE ~ReentryGuard() { ALWAYS_INLINE ~ReentryGuard() {
if (LIKELY(allowed_)) if (allowed_) [[likely]] {
pthread_setspecific(entered_key_, nullptr); pthread_setspecific(entered_key_, nullptr);
}
} }
explicit operator bool() const noexcept { return allowed_; } explicit operator bool() const noexcept { return allowed_; }

View file

@ -24,4 +24,4 @@ enum class AllocationSubsystem {
}; };
} // namespace base::allocator::dispatcher } // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_ #endif // BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_

View file

@ -24,4 +24,4 @@ struct DispatcherTest : public ::testing::Test {
} // namespace base::allocator::dispatcher::testing } // namespace base::allocator::dispatcher::testing
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_ #endif // BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_

View file

@ -30,4 +30,4 @@ struct ObserverMock {
} // namespace testing } // namespace testing
} // namespace base::allocator::dispatcher } // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_ #endif // BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_

View file

@ -4,16 +4,18 @@
#include "base/allocator/dispatcher/tls.h" #include "base/allocator/dispatcher/tls.h"
#include <string_view>
#if USE_LOCAL_TLS_EMULATION() #if USE_LOCAL_TLS_EMULATION()
#include <sys/mman.h>
#include "base/check.h" #include "base/check.h"
#include "base/dcheck_is_on.h" #include "base/dcheck_is_on.h"
#include "base/debug/crash_logging.h" #include "base/debug/crash_logging.h"
#include "base/immediate_crash.h" #include "base/immediate_crash.h"
#include "build/build_config.h" #include "build/build_config.h"
#include <sys/mman.h>
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
#include <sys/prctl.h> #include <sys/prctl.h>
#endif #endif
@ -94,7 +96,7 @@ PThreadTLSSystem& PThreadTLSSystem::operator=(PThreadTLSSystem&& other) {
bool PThreadTLSSystem::Setup( bool PThreadTLSSystem::Setup(
OnThreadTerminationFunction thread_termination_function, OnThreadTerminationFunction thread_termination_function,
const base::StringPiece instance_id) { std::string_view instance_id) {
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
// Initialize must happen outside of the allocation path. Therefore, it is // Initialize must happen outside of the allocation path. Therefore, it is
// secure to verify with DCHECK. // secure to verify with DCHECK.

View file

@ -5,6 +5,8 @@
#ifndef BASE_ALLOCATOR_DISPATCHER_TLS_H_ #ifndef BASE_ALLOCATOR_DISPATCHER_TLS_H_
#define BASE_ALLOCATOR_DISPATCHER_TLS_H_ #define BASE_ALLOCATOR_DISPATCHER_TLS_H_
#include <string_view>
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(IS_POSIX) // the current allocation mechanism (mmap) and TLS #if BUILDFLAG(IS_POSIX) // the current allocation mechanism (mmap) and TLS
@ -15,18 +17,21 @@
#endif #endif
#if USE_LOCAL_TLS_EMULATION() #if USE_LOCAL_TLS_EMULATION()
#include <pthread.h>
#include <algorithm> #include <algorithm>
#include <atomic> #include <atomic>
#include <functional>
#include <memory> #include <memory>
#include <mutex> #include <mutex>
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "base/check.h" #include "base/check.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/strings/string_piece.h"
#include <pthread.h> #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
#include "partition_alloc/partition_alloc_constants.h" // nogncheck
#endif
#if HAS_FEATURE(thread_sanitizer) #if HAS_FEATURE(thread_sanitizer)
#define DISABLE_TSAN_INSTRUMENTATION __attribute__((no_sanitize("thread"))) #define DISABLE_TSAN_INSTRUMENTATION __attribute__((no_sanitize("thread")))
@ -74,6 +79,8 @@ struct BASE_EXPORT MMapAllocator {
partition_alloc::PartitionPageSize(); partition_alloc::PartitionPageSize();
#elif BUILDFLAG(IS_APPLE) #elif BUILDFLAG(IS_APPLE)
constexpr static size_t AllocationChunkSize = 16384; constexpr static size_t AllocationChunkSize = 16384;
#elif BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_64_BITS)
constexpr static size_t AllocationChunkSize = 16384;
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64) #elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
constexpr static size_t AllocationChunkSize = 16384; constexpr static size_t AllocationChunkSize = 16384;
#else #else
@ -108,7 +115,7 @@ class BASE_EXPORT PThreadTLSSystem {
// @param thread_termination_function An optional function which will be // @param thread_termination_function An optional function which will be
// invoked upon termination of a thread. // invoked upon termination of a thread.
bool Setup(OnThreadTerminationFunction thread_termination_function, bool Setup(OnThreadTerminationFunction thread_termination_function,
const base::StringPiece instance_id); std::string_view instance_id);
// Tear down the TLS system. After completing tear down, the thread // Tear down the TLS system. After completing tear down, the thread
// termination function passed to Setup will not be invoked anymore. // termination function passed to Setup will not be invoked anymore.
bool TearDownForTesting(); bool TearDownForTesting();
@ -196,7 +203,7 @@ template <typename PayloadType,
size_t AllocationChunkSize, size_t AllocationChunkSize,
bool IsDestructibleForTesting> bool IsDestructibleForTesting>
struct ThreadLocalStorage { struct ThreadLocalStorage {
explicit ThreadLocalStorage(const base::StringPiece instance_id) explicit ThreadLocalStorage(std::string_view instance_id)
: root_(AllocateAndInitializeChunk()) { : root_(AllocateAndInitializeChunk()) {
Initialize(instance_id); Initialize(instance_id);
} }
@ -204,7 +211,7 @@ struct ThreadLocalStorage {
// Create a new instance of |ThreadLocalStorage| using the passed allocator // Create a new instance of |ThreadLocalStorage| using the passed allocator
// and TLS system. This initializes the underlying TLS system and creates the // and TLS system. This initializes the underlying TLS system and creates the
// first chunk of data. // first chunk of data.
ThreadLocalStorage(const base::StringPiece instance_id, ThreadLocalStorage(std::string_view instance_id,
AllocatorType allocator, AllocatorType allocator,
TLSSystemType tls_system) TLSSystemType tls_system)
: allocator_(std::move(allocator)), : allocator_(std::move(allocator)),
@ -241,7 +248,7 @@ struct ThreadLocalStorage {
auto* slot = static_cast<SingleSlot*>(tls_system.GetThreadSpecificData()); auto* slot = static_cast<SingleSlot*>(tls_system.GetThreadSpecificData());
if (UNLIKELY(slot == nullptr)) { if (slot == nullptr) [[unlikely]] {
slot = FindAndAllocateFreeSlot(root_.load(std::memory_order_relaxed)); slot = FindAndAllocateFreeSlot(root_.load(std::memory_order_relaxed));
// We might be called in the course of handling a memory allocation. We do // We might be called in the course of handling a memory allocation. We do
@ -357,7 +364,7 @@ struct ThreadLocalStorage {
} }
// Perform common initialization during construction of an instance. // Perform common initialization during construction of an instance.
void Initialize(const base::StringPiece instance_id) { void Initialize(std::string_view instance_id) {
// The constructor must be called outside of the allocation path. Therefore, // The constructor must be called outside of the allocation path. Therefore,
// it is secure to verify with CHECK. // it is secure to verify with CHECK.

View file

@ -7,8 +7,8 @@
#include <mach/mach.h> #include <mach/mach.h>
#include <malloc/malloc.h> #include <malloc/malloc.h>
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h" #include "partition_alloc/buildflags.h"
#include "base/allocator/partition_allocator/src/partition_alloc/shim/early_zone_registration_constants.h" #include "partition_alloc/shim/early_zone_registration_constants.h"
// BASE_EXPORT tends to be defined as soon as anything from //base is included. // BASE_EXPORT tends to be defined as soon as anything from //base is included.
#if defined(BASE_EXPORT) #if defined(BASE_EXPORT)
@ -17,7 +17,7 @@
namespace partition_alloc { namespace partition_alloc {
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if !PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
void EarlyMallocZoneRegistration() {} void EarlyMallocZoneRegistration() {}
void AllowDoublePartitionAllocZoneRegistration() {} void AllowDoublePartitionAllocZoneRegistration() {}
@ -262,5 +262,5 @@ void AllowDoublePartitionAllocZoneRegistration() {
} }
} }
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
} // namespace partition_alloc } // namespace partition_alloc

View file

@ -8,22 +8,7 @@
#include "base/strings/strcat.h" #include "base/strings/strcat.h"
#include "base/system/sys_info.h" #include "base/system/sys_info.h"
namespace base { namespace base::miracle_parameter {
namespace miracle_parameter {
namespace {
std::string GetFieldTrialParamByFeatureAsString(
const base::Feature& feature,
const std::string& param_name,
const std::string& default_value) {
const std::string value =
base::GetFieldTrialParamValueByFeature(feature, param_name);
return value.empty() ? default_value : value;
}
} // namespace
std::string GetParamNameWithSuffix(const std::string& param_name) { std::string GetParamNameWithSuffix(const std::string& param_name) {
// `base::SysInfo::AmountOfPhysicalMemoryMB()` refers to CommandLine // `base::SysInfo::AmountOfPhysicalMemoryMB()` refers to CommandLine
@ -88,6 +73,4 @@ base::TimeDelta GetMiracleParameterAsTimeDelta(const base::Feature& feature,
default_value)); default_value));
} }
} // namespace miracle_parameter } // namespace base::miracle_parameter
} // namespace base

View file

@ -14,7 +14,7 @@
// dependency cycle of (base->miracle_parameter->base). // dependency cycle of (base->miracle_parameter->base).
// Eventually the miracle_parameter component will have a public interface in // Eventually the miracle_parameter component will have a public interface in
// //base/ and this could be removed. // //base/ and this could be removed.
// TODO(crbug.com/1475915): remove miracle_parameter from // TODO(crbug.com/40279826): remove miracle_parameter from
// //base/allocator/. // //base/allocator/.
namespace base { namespace base {
@ -166,7 +166,7 @@ Enum GetMiracleParameterAsEnum(
default_value, type, options) \ default_value, type, options) \
type function_name() { \ type function_name() { \
static const type value = miracle_parameter::GetMiracleParameterAsEnum( \ static const type value = miracle_parameter::GetMiracleParameterAsEnum( \
feature, param_name, default_value, base::make_span(options)); \ feature, param_name, default_value, base::span(options)); \
return value; \ return value; \
} }

View file

@ -5,11 +5,6 @@
#include "base/allocator/partition_alloc_features.h" #include "base/allocator/partition_alloc_features.h"
#include "base/allocator/miracle_parameter.h" #include "base/allocator/miracle_parameter.h"
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h"
#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "base/feature_list.h" #include "base/feature_list.h"
#include "base/features.h" #include "base/features.h"
@ -17,10 +12,29 @@
#include "base/time/time.h" #include "base/time/time.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "build/chromecast_buildflags.h" #include "build/chromecast_buildflags.h"
#include "build/chromeos_buildflags.h" #include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/time/time.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_root.h"
#include "partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h"
#include "partition_alloc/thread_cache.h"
namespace base { namespace base::features {
namespace features {
namespace {
static constexpr char kPAFeatureEnabledProcessesStr[] = "enabled-processes";
static constexpr char kBrowserOnlyStr[] = "browser-only";
static constexpr char kBrowserAndRendererStr[] = "browser-and-renderer";
static constexpr char kNonRendererStr[] = "non-renderer";
static constexpr char kAllProcessesStr[] = "all-processes";
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
static constexpr char kRendererOnlyStr[] = "renderer-only";
static constexpr char kAllChildProcessesStr[] = "all-child-processes";
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
} // namespace
BASE_FEATURE(kPartitionAllocUnretainedDanglingPtr, BASE_FEATURE(kPartitionAllocUnretainedDanglingPtr,
"PartitionAllocUnretainedDanglingPtr", "PartitionAllocUnretainedDanglingPtr",
@ -32,19 +46,22 @@ constexpr FeatureParam<UnretainedDanglingPtrMode>::Option
{UnretainedDanglingPtrMode::kDumpWithoutCrashing, {UnretainedDanglingPtrMode::kDumpWithoutCrashing,
"dump_without_crashing"}, "dump_without_crashing"},
}; };
const base::FeatureParam<UnretainedDanglingPtrMode> // Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam = { kUnretainedDanglingPtrModeParam = {
&kPartitionAllocUnretainedDanglingPtr, &kPartitionAllocUnretainedDanglingPtr,
"mode", "mode",
UnretainedDanglingPtrMode::kDumpWithoutCrashing, UnretainedDanglingPtrMode::kCrash,
&kUnretainedDanglingPtrModeOption, &kUnretainedDanglingPtrModeOption,
}; };
// Note: DPD conflicts with no-op `free()` (see
// `base::allocator::MakeFreeNoOp()`). No-op `free()` stands down in the
// presence of DPD, but hypothetically fully launching DPD should prompt
// a rethink of no-op `free()`.
BASE_FEATURE(kPartitionAllocDanglingPtr, BASE_FEATURE(kPartitionAllocDanglingPtr,
"PartitionAllocDanglingPtr", "PartitionAllocDanglingPtr",
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG) || \ #if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG)
(BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) && BUILDFLAG(IS_LINUX) && \
!defined(OFFICIAL_BUILD) && (!defined(NDEBUG) || DCHECK_IS_ON()))
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#else #else
FEATURE_DISABLED_BY_DEFAULT FEATURE_DISABLED_BY_DEFAULT
@ -55,7 +72,8 @@ constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
{DanglingPtrMode::kCrash, "crash"}, {DanglingPtrMode::kCrash, "crash"},
{DanglingPtrMode::kLogOnly, "log_only"}, {DanglingPtrMode::kLogOnly, "log_only"},
}; };
const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{ // Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
&kPartitionAllocDanglingPtr, &kPartitionAllocDanglingPtr,
"mode", "mode",
DanglingPtrMode::kCrash, DanglingPtrMode::kCrash,
@ -65,72 +83,91 @@ constexpr FeatureParam<DanglingPtrType>::Option kDanglingPtrTypeOption[] = {
{DanglingPtrType::kAll, "all"}, {DanglingPtrType::kAll, "all"},
{DanglingPtrType::kCrossTask, "cross_task"}, {DanglingPtrType::kCrossTask, "cross_task"},
}; };
const base::FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{ // Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
&kPartitionAllocDanglingPtr, &kPartitionAllocDanglingPtr,
"type", "type",
DanglingPtrType::kAll, DanglingPtrType::kAll,
&kDanglingPtrTypeOption, &kDanglingPtrTypeOption,
}; };
#if BUILDFLAG(USE_STARSCAN) #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If enabled, PCScan is turned on by default for all partitions that don't
// disable it explicitly.
BASE_FEATURE(kPartitionAllocPCScan,
"PartitionAllocPCScan",
FEATURE_DISABLED_BY_DEFAULT);
#endif // BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If enabled, PCScan is turned on only for the browser's malloc partition.
BASE_FEATURE(kPartitionAllocPCScanBrowserOnly,
"PartitionAllocPCScanBrowserOnly",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, PCScan is turned on only for the renderer's malloc partition.
BASE_FEATURE(kPartitionAllocPCScanRendererOnly,
"PartitionAllocPCScanRendererOnly",
FEATURE_DISABLED_BY_DEFAULT);
// Use a larger maximum thread cache cacheable bucket size. // Use a larger maximum thread cache cacheable bucket size.
BASE_FEATURE(kPartitionAllocLargeThreadCacheSize, BASE_FEATURE(kPartitionAllocLargeThreadCacheSize,
"PartitionAllocLargeThreadCacheSize", "PartitionAllocLargeThreadCacheSize",
FEATURE_ENABLED_BY_DEFAULT); FEATURE_ENABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_INT( MIRACLE_PARAMETER_FOR_INT(GetPartitionAllocLargeThreadCacheSizeValue,
GetPartitionAllocLargeThreadCacheSizeValue, kPartitionAllocLargeThreadCacheSize,
kPartitionAllocLargeThreadCacheSize, "PartitionAllocLargeThreadCacheSizeValue",
"PartitionAllocLargeThreadCacheSizeValue", ::partition_alloc::kThreadCacheLargeSizeThreshold)
::partition_alloc::ThreadCacheLimits::kLargeSizeThreshold)
MIRACLE_PARAMETER_FOR_INT( MIRACLE_PARAMETER_FOR_INT(
GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid, GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid,
kPartitionAllocLargeThreadCacheSize, kPartitionAllocLargeThreadCacheSize,
"PartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid", "PartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid",
::partition_alloc::ThreadCacheLimits::kDefaultSizeThreshold) ::partition_alloc::kThreadCacheDefaultSizeThreshold)
BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing, BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
"PartitionAllocLargeEmptySlotSpanRing", "PartitionAllocLargeEmptySlotSpanRing",
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
FEATURE_ENABLED_BY_DEFAULT);
#else
FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
#endif
BASE_FEATURE(kPartitionAllocWithAdvancedChecks,
"PartitionAllocWithAdvancedChecks",
FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>::Option
kPartitionAllocWithAdvancedChecksEnabledProcessesOptions[] = {
{PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserOnly,
kBrowserOnlyStr},
{PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserAndRenderer,
kBrowserAndRendererStr},
{PartitionAllocWithAdvancedChecksEnabledProcesses::kNonRenderer,
kNonRendererStr},
{PartitionAllocWithAdvancedChecksEnabledProcesses::kAllProcesses,
kAllProcessesStr}};
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>
kPartitionAllocWithAdvancedChecksEnabledProcessesParam{
&kPartitionAllocWithAdvancedChecks, kPAFeatureEnabledProcessesStr,
PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserOnly,
&kPartitionAllocWithAdvancedChecksEnabledProcessesOptions};
BASE_FEATURE(kPartitionAllocSchedulerLoopQuarantine, BASE_FEATURE(kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantine", "PartitionAllocSchedulerLoopQuarantine",
FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
// Scheduler Loop Quarantine's capacity in bytes. // Scheduler Loop Quarantine's per-branch capacity in bytes.
const base::FeatureParam<int> kPartitionAllocSchedulerLoopQuarantineCapacity{ // Note: Do not use the prepared macro as of no need for a local cache.
&kPartitionAllocSchedulerLoopQuarantine, constinit const FeatureParam<int>
"PartitionAllocSchedulerLoopQuarantineCapacity", 0}; kPartitionAllocSchedulerLoopQuarantineBranchCapacity{
&kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantineBranchCapacity", 0};
// Scheduler Loop Quarantine's capacity for the UI thread in bytes.
BASE_FEATURE_PARAM(int,
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity,
&kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantineBrowserUICapacity",
0);
BASE_FEATURE(kPartitionAllocZappingByFreeFlags, BASE_FEATURE(kPartitionAllocZappingByFreeFlags,
"PartitionAllocZappingByFreeFlags", "PartitionAllocZappingByFreeFlags",
FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_FEATURE(kPartitionAllocEventuallyZeroFreedMemory,
"PartitionAllocEventuallyZeroFreedMemory",
FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kPartitionAllocFewerMemoryRegions,
"PartitionAllocFewerMemoryRegions",
FEATURE_DISABLED_BY_DEFAULT);
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_FEATURE(kPartitionAllocBackupRefPtr, BASE_FEATURE(kPartitionAllocBackupRefPtr,
"PartitionAllocBackupRefPtr", "PartitionAllocBackupRefPtr",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || \ #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG)
BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS) || \
(BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_CASTOS)) || \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG)
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#else #else
FEATURE_DISABLED_BY_DEFAULT FEATURE_DISABLED_BY_DEFAULT
@ -139,47 +176,41 @@ BASE_FEATURE(kPartitionAllocBackupRefPtr,
constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
kBackupRefPtrEnabledProcessesOptions[] = { kBackupRefPtrEnabledProcessesOptions[] = {
{BackupRefPtrEnabledProcesses::kBrowserOnly, "browser-only"}, {BackupRefPtrEnabledProcesses::kBrowserOnly, kBrowserOnlyStr},
{BackupRefPtrEnabledProcesses::kBrowserAndRenderer, {BackupRefPtrEnabledProcesses::kBrowserAndRenderer,
"browser-and-renderer"}, kBrowserAndRendererStr},
{BackupRefPtrEnabledProcesses::kNonRenderer, "non-renderer"}, {BackupRefPtrEnabledProcesses::kNonRenderer, kNonRendererStr},
{BackupRefPtrEnabledProcesses::kAllProcesses, "all-processes"}}; {BackupRefPtrEnabledProcesses::kAllProcesses, kAllProcessesStr}};
const base::FeatureParam<BackupRefPtrEnabledProcesses> BASE_FEATURE_ENUM_PARAM(BackupRefPtrEnabledProcesses,
kBackupRefPtrEnabledProcessesParam{ kBackupRefPtrEnabledProcessesParam,
&kPartitionAllocBackupRefPtr, "enabled-processes", &kPartitionAllocBackupRefPtr,
BackupRefPtrEnabledProcesses::kNonRenderer, kPAFeatureEnabledProcessesStr,
&kBackupRefPtrEnabledProcessesOptions}; #if PA_BUILDFLAG(IS_MAC) && PA_BUILDFLAG(PA_ARCH_CPU_ARM64)
BackupRefPtrEnabledProcesses::kNonRenderer,
#else
BackupRefPtrEnabledProcesses::kAllProcesses,
#endif
&kBackupRefPtrEnabledProcessesOptions);
// Map *-with-memory-reclaimer modes onto their counterpars without the suffix.
// They are the same, as memory reclaimer is now controlled independently.
//
// Similarly, map disabled-but-*-way-split onto plain disabled, as we are done
// experimenting with partition split.
//
// We need to keep those option strings, as there is a long tail of clients that
// may have an old field trial config, which used these modes.
//
// DO NOT USE *-with-memory-reclaimer and disabled-but-*-way-split modes in new
// configs!
constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = { constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
{BackupRefPtrMode::kDisabled, "disabled"}, {BackupRefPtrMode::kDisabled, "disabled"},
{BackupRefPtrMode::kEnabled, "enabled"}, {BackupRefPtrMode::kEnabled, "enabled"},
{BackupRefPtrMode::kEnabled, "enabled-with-memory-reclaimer"},
{BackupRefPtrMode::kEnabledInSameSlotMode, "enabled-in-same-slot-mode"},
{BackupRefPtrMode::kDisabled, "disabled-but-2-way-split"},
{BackupRefPtrMode::kDisabled,
"disabled-but-2-way-split-with-memory-reclaimer"},
{BackupRefPtrMode::kDisabled, "disabled-but-3-way-split"},
}; };
const base::FeatureParam<BackupRefPtrMode> kBackupRefPtrModeParam{ BASE_FEATURE_ENUM_PARAM(BackupRefPtrMode,
&kPartitionAllocBackupRefPtr, "brp-mode", kBackupRefPtrModeParam,
BackupRefPtrMode::kEnabledInSameSlotMode, &kBackupRefPtrModeOptions}; &kPartitionAllocBackupRefPtr,
"brp-mode",
BackupRefPtrMode::kEnabled,
&kBackupRefPtrModeOptions);
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<int> kBackupRefPtrExtraExtrasSizeParam{
&kPartitionAllocBackupRefPtr, "brp-extra-extras-size", 0};
BASE_FEATURE(kPartitionAllocMemoryTagging, BASE_FEATURE(kPartitionAllocMemoryTagging,
"PartitionAllocMemoryTagging", "PartitionAllocMemoryTagging",
#if BUILDFLAG(USE_FULL_MTE) #if PA_BUILDFLAG(USE_FULL_MTE) || BUILDFLAG(IS_ANDROID)
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#else #else
FEATURE_DISABLED_BY_DEFAULT FEATURE_DISABLED_BY_DEFAULT
@ -190,28 +221,40 @@ constexpr FeatureParam<MemtagMode>::Option kMemtagModeOptions[] = {
{MemtagMode::kSync, "sync"}, {MemtagMode::kSync, "sync"},
{MemtagMode::kAsync, "async"}}; {MemtagMode::kAsync, "async"}};
const base::FeatureParam<MemtagMode> kMemtagModeParam{ // Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<MemtagMode> kMemtagModeParam{
&kPartitionAllocMemoryTagging, "memtag-mode", &kPartitionAllocMemoryTagging, "memtag-mode",
#if BUILDFLAG(USE_FULL_MTE) #if PA_BUILDFLAG(USE_FULL_MTE)
MemtagMode::kSync, MemtagMode::kSync,
#else #else
MemtagMode::kAsync, MemtagMode::kAsync,
#endif #endif
&kMemtagModeOptions}; &kMemtagModeOptions};
constexpr FeatureParam<RetagMode>::Option kRetagModeOptions[] = {
{RetagMode::kIncrement, "increment"},
{RetagMode::kRandom, "random"},
};
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<RetagMode> kRetagModeParam{
&kPartitionAllocMemoryTagging, "retag-mode", RetagMode::kIncrement,
&kRetagModeOptions};
constexpr FeatureParam<MemoryTaggingEnabledProcesses>::Option constexpr FeatureParam<MemoryTaggingEnabledProcesses>::Option
kMemoryTaggingEnabledProcessesOptions[] = { kMemoryTaggingEnabledProcessesOptions[] = {
{MemoryTaggingEnabledProcesses::kBrowserOnly, "browser-only"}, {MemoryTaggingEnabledProcesses::kBrowserOnly, kBrowserOnlyStr},
{MemoryTaggingEnabledProcesses::kNonRenderer, "non-renderer"}, {MemoryTaggingEnabledProcesses::kNonRenderer, kNonRendererStr},
{MemoryTaggingEnabledProcesses::kAllProcesses, "all-processes"}}; {MemoryTaggingEnabledProcesses::kAllProcesses, kAllProcessesStr}};
const base::FeatureParam<MemoryTaggingEnabledProcesses> // Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<MemoryTaggingEnabledProcesses>
kMemoryTaggingEnabledProcessesParam{ kMemoryTaggingEnabledProcessesParam{
&kPartitionAllocMemoryTagging, "enabled-processes", &kPartitionAllocMemoryTagging, kPAFeatureEnabledProcessesStr,
#if BUILDFLAG(USE_FULL_MTE) #if PA_BUILDFLAG(USE_FULL_MTE)
MemoryTaggingEnabledProcesses::kAllProcesses, MemoryTaggingEnabledProcesses::kAllProcesses,
#else #else
MemoryTaggingEnabledProcesses::kBrowserOnly, MemoryTaggingEnabledProcesses::kNonRenderer,
#endif #endif
&kMemoryTaggingEnabledProcessesOptions}; &kMemoryTaggingEnabledProcessesOptions};
@ -222,7 +265,7 @@ BASE_FEATURE(kKillPartitionAllocMemoryTagging,
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte);
BASE_FEATURE(kPartitionAllocPermissiveMte, BASE_FEATURE(kPartitionAllocPermissiveMte,
"PartitionAllocPermissiveMte", "PartitionAllocPermissiveMte",
#if BUILDFLAG(USE_FULL_MTE) #if PA_BUILDFLAG(USE_FULL_MTE)
// We want to actually crash if USE_FULL_MTE is enabled. // We want to actually crash if USE_FULL_MTE is enabled.
FEATURE_DISABLED_BY_DEFAULT FEATURE_DISABLED_BY_DEFAULT
#else #else
@ -230,13 +273,15 @@ BASE_FEATURE(kPartitionAllocPermissiveMte,
#endif #endif
); );
const base::FeatureParam<bool> kBackupRefPtrAsanEnableDereferenceCheckParam{ BASE_FEATURE(kAsanBrpDereferenceCheck,
&kPartitionAllocBackupRefPtr, "asan-enable-dereference-check", true}; "AsanBrpDereferenceCheck",
const base::FeatureParam<bool> kBackupRefPtrAsanEnableExtractionCheckParam{ FEATURE_ENABLED_BY_DEFAULT);
&kPartitionAllocBackupRefPtr, "asan-enable-extraction-check", BASE_FEATURE(kAsanBrpExtractionCheck,
false}; // Not much noise at the moment to enable by default. "AsanBrpExtractionCheck", // Not much noise at the moment to
const base::FeatureParam<bool> kBackupRefPtrAsanEnableInstantiationCheckParam{ FEATURE_DISABLED_BY_DEFAULT); // enable by default.
&kPartitionAllocBackupRefPtr, "asan-enable-instantiation-check", true}; BASE_FEATURE(kAsanBrpInstantiationCheck,
"AsanBrpInstantiationCheck",
FEATURE_ENABLED_BY_DEFAULT);
// If enabled, switches the bucket distribution to a denser one. // If enabled, switches the bucket distribution to a denser one.
// //
@ -250,29 +295,31 @@ BASE_FEATURE(kPartitionAllocUseDenserDistribution,
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS) #endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
); );
const base::FeatureParam<BucketDistributionMode>::Option const FeatureParam<BucketDistributionMode>::Option
kPartitionAllocBucketDistributionOption[] = { kPartitionAllocBucketDistributionOption[] = {
{BucketDistributionMode::kDefault, "default"}, {BucketDistributionMode::kDefault, "default"},
{BucketDistributionMode::kDenser, "denser"}, {BucketDistributionMode::kDenser, "denser"},
}; };
const base::FeatureParam<BucketDistributionMode> // Note: Do not use the prepared macro as of no need for a local cache.
kPartitionAllocBucketDistributionParam { constinit const FeatureParam<BucketDistributionMode>
&kPartitionAllocUseDenserDistribution, "mode", kPartitionAllocBucketDistributionParam{
&kPartitionAllocUseDenserDistribution, "mode",
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS) #if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
BucketDistributionMode::kDefault, BucketDistributionMode::kDefault,
#else #else
BucketDistributionMode::kDenser, BucketDistributionMode::kDenser,
#endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS) #endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
&kPartitionAllocBucketDistributionOption &kPartitionAllocBucketDistributionOption};
};
BASE_FEATURE(kPartitionAllocMemoryReclaimer, BASE_FEATURE(kPartitionAllocMemoryReclaimer,
"PartitionAllocMemoryReclaimer", "PartitionAllocMemoryReclaimer",
FEATURE_ENABLED_BY_DEFAULT); FEATURE_ENABLED_BY_DEFAULT);
const base::FeatureParam<TimeDelta> kPartitionAllocMemoryReclaimerInterval = { BASE_FEATURE_PARAM(TimeDelta,
&kPartitionAllocMemoryReclaimer, "interval", kPartitionAllocMemoryReclaimerInterval,
TimeDelta(), // Defaults to zero. &kPartitionAllocMemoryReclaimer,
}; "interval",
TimeDelta() // Defaults to zero.
);
// Configures whether we set a lower limit for renderers that do not have a main // Configures whether we set a lower limit for renderers that do not have a main
// frame, similar to the limit that is already done for backgrounded renderers. // frame, similar to the limit that is already done for backgrounded renderers.
@ -280,52 +327,22 @@ BASE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers,
"LowerPAMemoryLimitForNonMainRenderers", "LowerPAMemoryLimitForNonMainRenderers",
FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
// If enabled, switches PCScan scheduling to a mutator-aware scheduler. Does not
// affect whether PCScan is enabled itself.
BASE_FEATURE(kPartitionAllocPCScanMUAwareScheduler,
"PartitionAllocPCScanMUAwareScheduler",
FEATURE_ENABLED_BY_DEFAULT);
// If enabled, PCScan frees unconditionally all quarantined objects.
// This is a performance testing feature.
BASE_FEATURE(kPartitionAllocPCScanImmediateFreeing,
"PartitionAllocPCScanImmediateFreeing",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, PCScan clears eagerly (synchronously) on free().
BASE_FEATURE(kPartitionAllocPCScanEagerClearing,
"PartitionAllocPCScanEagerClearing",
FEATURE_DISABLED_BY_DEFAULT);
// In addition to heap, scan also the stack of the current mutator.
BASE_FEATURE(kPartitionAllocPCScanStackScanning,
"PartitionAllocPCScanStackScanning",
#if BUILDFLAG(PCSCAN_STACK_SUPPORTED)
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
#endif // BUILDFLAG(PCSCAN_STACK_SUPPORTED)
);
BASE_FEATURE(kPartitionAllocDCScan,
"PartitionAllocDCScan",
FEATURE_DISABLED_BY_DEFAULT);
// Whether to straighten free lists for larger slot spans in PurgeMemory() -> // Whether to straighten free lists for larger slot spans in PurgeMemory() ->
// ... -> PartitionPurgeSlotSpan(). // ... -> PartitionPurgeSlotSpan().
BASE_FEATURE(kPartitionAllocStraightenLargerSlotSpanFreeLists, BASE_FEATURE(kPartitionAllocStraightenLargerSlotSpanFreeLists,
"PartitionAllocStraightenLargerSlotSpanFreeLists", "PartitionAllocStraightenLargerSlotSpanFreeLists",
FEATURE_ENABLED_BY_DEFAULT); FEATURE_ENABLED_BY_DEFAULT);
const base::FeatureParam< const FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>::
partition_alloc::StraightenLargerSlotSpanFreeListsMode>::Option Option kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption[] = {
kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption[] = {
{partition_alloc::StraightenLargerSlotSpanFreeListsMode:: {partition_alloc::StraightenLargerSlotSpanFreeListsMode::
kOnlyWhenUnprovisioning, kOnlyWhenUnprovisioning,
"only-when-unprovisioning"}, "only-when-unprovisioning"},
{partition_alloc::StraightenLargerSlotSpanFreeListsMode::kAlways, {partition_alloc::StraightenLargerSlotSpanFreeListsMode::kAlways,
"always"}, "always"},
}; };
const base::FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode> // Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<
partition_alloc::StraightenLargerSlotSpanFreeListsMode>
kPartitionAllocStraightenLargerSlotSpanFreeListsMode = { kPartitionAllocStraightenLargerSlotSpanFreeListsMode = {
&kPartitionAllocStraightenLargerSlotSpanFreeLists, &kPartitionAllocStraightenLargerSlotSpanFreeLists,
"mode", "mode",
@ -358,9 +375,11 @@ BASE_FEATURE(kPageAllocatorRetryOnCommitFailure,
// The feature: kPartialLowEndModeOnMidRangeDevices is defined in // The feature: kPartialLowEndModeOnMidRangeDevices is defined in
// //base/features.cc. Since the following feature param is related to // //base/features.cc. Since the following feature param is related to
// PartitionAlloc, define the param here. // PartitionAlloc, define the param here.
const FeatureParam<bool> kPartialLowEndModeExcludePartitionAllocSupport{ BASE_FEATURE_PARAM(bool,
&kPartialLowEndModeOnMidRangeDevices, "exclude-partition-alloc-support", kPartialLowEndModeExcludePartitionAllocSupport,
false}; &kPartialLowEndModeOnMidRangeDevices,
"exclude-partition-alloc-support",
false);
#endif #endif
BASE_FEATURE(kEnableConfigurableThreadCacheMultiplier, BASE_FEATURE(kEnableConfigurableThreadCacheMultiplier,
@ -378,19 +397,19 @@ MIRACLE_PARAMETER_FOR_DOUBLE(GetThreadCacheMultiplierForAndroid,
1.) 1.)
constexpr partition_alloc::internal::base::TimeDelta ToPartitionAllocTimeDelta( constexpr partition_alloc::internal::base::TimeDelta ToPartitionAllocTimeDelta(
base::TimeDelta time_delta) { TimeDelta time_delta) {
return partition_alloc::internal::base::Microseconds( return partition_alloc::internal::base::Microseconds(
time_delta.InMicroseconds()); time_delta.InMicroseconds());
} }
constexpr base::TimeDelta FromPartitionAllocTimeDelta( constexpr TimeDelta FromPartitionAllocTimeDelta(
partition_alloc::internal::base::TimeDelta time_delta) { partition_alloc::internal::base::TimeDelta time_delta) {
return base::Microseconds(time_delta.InMicroseconds()); return Microseconds(time_delta.InMicroseconds());
} }
BASE_FEATURE(kEnableConfigurableThreadCachePurgeInterval, BASE_FEATURE(kEnableConfigurableThreadCachePurgeInterval,
"EnableConfigurableThreadCachePurgeInterval", "EnableConfigurableThreadCachePurgeInterval",
base::FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_TIME_DELTA( MIRACLE_PARAMETER_FOR_TIME_DELTA(
GetThreadCacheMinPurgeIntervalValue, GetThreadCacheMinPurgeIntervalValue,
@ -427,7 +446,7 @@ GetThreadCacheDefaultPurgeInterval() {
BASE_FEATURE(kEnableConfigurableThreadCacheMinCachedMemoryForPurging, BASE_FEATURE(kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
"EnableConfigurableThreadCacheMinCachedMemoryForPurging", "EnableConfigurableThreadCacheMinCachedMemoryForPurging",
base::FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_INT( MIRACLE_PARAMETER_FOR_INT(
GetThreadCacheMinCachedMemoryForPurgingBytes, GetThreadCacheMinCachedMemoryForPurgingBytes,
@ -441,63 +460,41 @@ MIRACLE_PARAMETER_FOR_INT(
// abundance of caution, we provide this toggle that allows us to // abundance of caution, we provide this toggle that allows us to
// wholly disable MiraclePtr in the buffer partition, if necessary. // wholly disable MiraclePtr in the buffer partition, if necessary.
// //
// TODO(crbug.com/1444624): this is unneeded once // TODO(crbug.com/40064499): this is unneeded once
// MiraclePtr-for-Renderer launches. // MiraclePtr-for-Renderer launches.
BASE_FEATURE(kPartitionAllocDisableBRPInBufferPartition, BASE_FEATURE(kPartitionAllocDisableBRPInBufferPartition,
"PartitionAllocDisableBRPInBufferPartition", "PartitionAllocDisableBRPInBufferPartition",
FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
#if BUILDFLAG(USE_FREELIST_POOL_OFFSETS) BASE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground,
BASE_FEATURE(kUsePoolOffsetFreelists, "PartitionAllocAdjustSizeWhenInForeground",
"PartitionAllocUsePoolOffsetFreelists", #if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
base::FEATURE_DISABLED_BY_DEFAULT); FEATURE_ENABLED_BY_DEFAULT);
#else
FEATURE_DISABLED_BY_DEFAULT);
#endif #endif
BASE_FEATURE(kPartitionAllocMakeFreeNoOpOnShutdown, BASE_FEATURE(kPartitionAllocUseSmallSingleSlotSpans,
"PartitionAllocMakeFreeNoOpOnShutdown", "PartitionAllocUseSmallSingleSlotSpans",
FEATURE_ENABLED_BY_DEFAULT);
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
BASE_FEATURE(kPartitionAllocShadowMetadata,
"PartitionAllocShadowMetadata",
FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<WhenFreeBecomesNoOp>::Option constexpr FeatureParam<ShadowMetadataEnabledProcesses>::Option
kPartitionAllocMakeFreeNoOpOnShutdownOptions[] = { kShadowMetadataEnabledProcessesOptions[] = {
{ {ShadowMetadataEnabledProcesses::kRendererOnly, kRendererOnlyStr},
WhenFreeBecomesNoOp::kBeforeShutDownThreads, {ShadowMetadataEnabledProcesses::kAllChildProcesses,
"before-shutdown-threads", kAllChildProcessesStr}};
},
{
WhenFreeBecomesNoOp::kInShutDownThreads,
"in-shutdown-threads",
},
{
WhenFreeBecomesNoOp::kAfterShutDownThreads,
"after-shutdown-threads",
},
};
const base::FeatureParam<WhenFreeBecomesNoOp> // Note: Do not use the prepared macro as of no need for a local cache.
kPartitionAllocMakeFreeNoOpOnShutdownParam{ constinit const FeatureParam<ShadowMetadataEnabledProcesses>
&kPartitionAllocMakeFreeNoOpOnShutdown, "callsite", kShadowMetadataEnabledProcessesParam{
WhenFreeBecomesNoOp::kBeforeShutDownThreads, &kPartitionAllocShadowMetadata, kPAFeatureEnabledProcessesStr,
&kPartitionAllocMakeFreeNoOpOnShutdownOptions}; ShadowMetadataEnabledProcesses::kRendererOnly,
&kShadowMetadataEnabledProcessesOptions};
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
void MakeFreeNoOp(WhenFreeBecomesNoOp callsite) { } // namespace base::features
CHECK(base::FeatureList::GetInstance());
// Ignoring `free()` during Shutdown would allow developers to introduce new
// dangling pointers. So we want to avoid ignoring free when it is enabled.
// Note: For now, the DanglingPointerDetector is only enabled on 5 bots, and
// on linux non-official configuration.
// TODO(b/40802063): Reconsider this decision after the experiment.
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
if (base::FeatureList::IsEnabled(features::kPartitionAllocDanglingPtr)) {
return;
}
#endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
if (base::FeatureList::IsEnabled(kPartitionAllocMakeFreeNoOpOnShutdown) &&
kPartitionAllocMakeFreeNoOpOnShutdownParam.Get() == callsite) {
allocator_shim::InsertNoOpOnFreeAllocatorShimOnShutDown();
}
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
}
} // namespace features
} // namespace base

View file

@ -5,27 +5,44 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_ #ifndef BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_ #define BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/feature_list.h" #include "base/feature_list.h"
#include "base/metrics/field_trial_params.h" #include "base/metrics/field_trial_params.h"
#include "base/strings/string_piece.h"
#include "base/time/time.h" #include "base/time/time.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/time/time.h"
#include "partition_alloc/partition_root.h"
namespace base { namespace base::features {
namespace features {
extern const BASE_EXPORT Feature kPartitionAllocUnretainedDanglingPtr; namespace internal {
enum class PAFeatureEnabledProcesses {
// Enabled only in the browser process.
kBrowserOnly,
// Enabled only in the browser and renderer processes.
kBrowserAndRenderer,
// Enabled in all processes, except renderer.
kNonRenderer,
// Enabled only in renderer processes.
kRendererOnly,
// Enabled in all child processes, except zygote.
kAllChildProcesses,
// Enabled in all processes.
kAllProcesses,
};
} // namespace internal
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUnretainedDanglingPtr);
enum class UnretainedDanglingPtrMode { enum class UnretainedDanglingPtrMode {
kCrash, kCrash,
kDumpWithoutCrashing, kDumpWithoutCrashing,
}; };
extern const BASE_EXPORT base::FeatureParam<UnretainedDanglingPtrMode> BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(UnretainedDanglingPtrMode,
kUnretainedDanglingPtrModeParam; kUnretainedDanglingPtrModeParam);
// See /docs/dangling_ptr.md // See /docs/dangling_ptr.md
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr);
@ -44,8 +61,7 @@ enum class DanglingPtrMode {
// Note: This will be extended with a single shot DumpWithoutCrashing. // Note: This will be extended with a single shot DumpWithoutCrashing.
}; };
extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode> BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(DanglingPtrMode, kDanglingPtrModeParam);
kDanglingPtrModeParam;
enum class DanglingPtrType { enum class DanglingPtrType {
// Act on any dangling raw_ptr released after being freed. // Act on any dangling raw_ptr released after being freed.
kAll, // (default) kAll, // (default)
@ -56,39 +72,47 @@ enum class DanglingPtrType {
// Note: This will be extended with LongLived // Note: This will be extended with LongLived
}; };
extern const BASE_EXPORT base::FeatureParam<DanglingPtrType> BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(DanglingPtrType, kDanglingPtrTypeParam);
kDanglingPtrTypeParam;
#if BUILDFLAG(USE_STARSCAN) using PartitionAllocWithAdvancedChecksEnabledProcesses =
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScan); internal::PAFeatureEnabledProcesses;
#endif
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanBrowserOnly);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanRendererOnly);
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeThreadCacheSize); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeThreadCacheSize);
BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValue(); BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValue();
BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid(); BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid();
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocWithAdvancedChecks);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
PartitionAllocWithAdvancedChecksEnabledProcesses,
kPartitionAllocWithAdvancedChecksEnabledProcessesParam);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSchedulerLoopQuarantine); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSchedulerLoopQuarantine);
// Scheduler Loop Quarantine's capacity in bytes. // Scheduler Loop Quarantine's per-thread capacity in bytes.
extern const BASE_EXPORT base::FeatureParam<int> BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
kPartitionAllocSchedulerLoopQuarantineCapacity; int,
kPartitionAllocSchedulerLoopQuarantineBranchCapacity);
// Scheduler Loop Quarantine's capacity for the UI thread in bytes.
// TODO(https://crbug.com/387470567): Support more thread types.
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
int,
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocZappingByFreeFlags); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocZappingByFreeFlags);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
enum class BackupRefPtrEnabledProcesses { // Eventually zero out most PartitionAlloc memory. This is not meant as a
// BRP enabled only in the browser process. // security guarantee, but to increase the compression ratio of PartitionAlloc's
kBrowserOnly, // fragmented super pages.
// BRP enabled only in the browser and renderer processes. BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocEventuallyZeroFreedMemory);
kBrowserAndRenderer,
// BRP enabled in all processes, except renderer. // Whether to make PartitionAlloc use fewer memory regions. This matters on
kNonRenderer, // Linux-based systems, where there is a per-process limit that we hit in some
// BRP enabled in all processes. // cases. See the comment in PartitionBucket::SlotSpanCOmmitedSize() for detail.
kAllProcesses, BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocFewerMemoryRegions);
}; #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
using BackupRefPtrEnabledProcesses = internal::PAFeatureEnabledProcesses;
enum class BackupRefPtrMode { enum class BackupRefPtrMode {
// BRP is disabled across all partitions. Equivalent to the Finch flag being // BRP is disabled across all partitions. Equivalent to the Finch flag being
@ -98,11 +122,6 @@ enum class BackupRefPtrMode {
// BRP is enabled in the main partition, as well as certain Renderer-only // BRP is enabled in the main partition, as well as certain Renderer-only
// partitions (if enabled in Renderer at all). // partitions (if enabled in Renderer at all).
kEnabled, kEnabled,
// As above, but "same slot" mode is used, as opposed to "previous slot".
// This means that ref-count is placed at the end of the same slot as the
// object it protects, as opposed to the end of the previous slot.
kEnabledInSameSlotMode,
}; };
enum class MemtagMode { enum class MemtagMode {
@ -112,75 +131,54 @@ enum class MemtagMode {
kAsync, kAsync,
}; };
enum class MemoryTaggingEnabledProcesses { enum class RetagMode {
// Memory tagging enabled only in the browser process. // Allocations are retagged by incrementing the current tag.
kBrowserOnly, kIncrement,
// Memory tagging enabled in all processes, except renderer.
kNonRenderer, // Allocations are retagged with a random tag.
// Memory tagging enabled in all processes. kRandom,
kAllProcesses,
}; };
using MemoryTaggingEnabledProcesses = internal::PAFeatureEnabledProcesses;
enum class BucketDistributionMode : uint8_t { enum class BucketDistributionMode : uint8_t {
kDefault, kDefault,
kDenser, kDenser,
}; };
// Parameter for 'kPartitionAllocMakeFreeNoOpOnShutdown' feature which
// controls when free() becomes a no-op during Shutdown()
enum class WhenFreeBecomesNoOp {
// Allocator is inserted either before, in, or after shutdown threads
kBeforeShutDownThreads,
kInShutDownThreads,
kAfterShutDownThreads,
};
// Inserts a no-op on 'free()' allocator shim at the front of the
// dispatch chain if called from the appropriate callsite.
BASE_EXPORT void MakeFreeNoOp(WhenFreeBecomesNoOp callsite);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMakeFreeNoOpOnShutdown);
extern const BASE_EXPORT base::FeatureParam<WhenFreeBecomesNoOp>
kPartitionAllocMakeFreeNoOpOnShutdownParam;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtr); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtr);
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrEnabledProcesses> BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BackupRefPtrEnabledProcesses,
kBackupRefPtrEnabledProcessesParam; kBackupRefPtrEnabledProcessesParam);
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrMode> BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BackupRefPtrMode,
kBackupRefPtrModeParam; kBackupRefPtrModeParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(int,
kBackupRefPtrExtraExtrasSizeParam);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryTagging); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryTagging);
extern const BASE_EXPORT base::FeatureParam<MemtagMode> kMemtagModeParam; BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(MemtagMode, kMemtagModeParam);
extern const BASE_EXPORT base::FeatureParam<MemoryTaggingEnabledProcesses> BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(RetagMode, kRetagModeParam);
kMemoryTaggingEnabledProcessesParam; BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(MemoryTaggingEnabledProcesses,
kMemoryTaggingEnabledProcessesParam);
// Kill switch for memory tagging. Skips any code related to memory tagging when // Kill switch for memory tagging. Skips any code related to memory tagging when
// enabled. // enabled.
BASE_EXPORT BASE_DECLARE_FEATURE(kKillPartitionAllocMemoryTagging); BASE_EXPORT BASE_DECLARE_FEATURE(kKillPartitionAllocMemoryTagging);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte);
extern const BASE_EXPORT base::FeatureParam<bool> BASE_EXPORT BASE_DECLARE_FEATURE(kAsanBrpDereferenceCheck);
kBackupRefPtrAsanEnableDereferenceCheckParam; BASE_EXPORT BASE_DECLARE_FEATURE(kAsanBrpExtractionCheck);
extern const BASE_EXPORT base::FeatureParam<bool> BASE_EXPORT BASE_DECLARE_FEATURE(kAsanBrpInstantiationCheck);
kBackupRefPtrAsanEnableExtractionCheckParam; BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BucketDistributionMode,
extern const BASE_EXPORT base::FeatureParam<bool> kPartitionAllocBucketDistributionParam);
kBackupRefPtrAsanEnableInstantiationCheckParam;
extern const BASE_EXPORT base::FeatureParam<BucketDistributionMode>
kPartitionAllocBucketDistributionParam;
BASE_EXPORT BASE_DECLARE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers); BASE_EXPORT BASE_DECLARE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanMUAwareScheduler);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanStackScanning);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDCScan);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanImmediateFreeing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanEagerClearing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseDenserDistribution); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseDenserDistribution);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryReclaimer); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryReclaimer);
extern const BASE_EXPORT base::FeatureParam<TimeDelta> BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(TimeDelta,
kPartitionAllocMemoryReclaimerInterval; kPartitionAllocMemoryReclaimerInterval);
BASE_EXPORT BASE_DECLARE_FEATURE( BASE_EXPORT BASE_DECLARE_FEATURE(
kPartitionAllocStraightenLargerSlotSpanFreeLists); kPartitionAllocStraightenLargerSlotSpanFreeLists);
extern const BASE_EXPORT BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
base::FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode> partition_alloc::StraightenLargerSlotSpanFreeListsMode,
kPartitionAllocStraightenLargerSlotSpanFreeListsMode; kPartitionAllocStraightenLargerSlotSpanFreeListsMode);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortSmallerSlotSpanFreeLists); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortSmallerSlotSpanFreeLists);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortActiveSlotSpans); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortActiveSlotSpans);
@ -189,15 +187,11 @@ BASE_EXPORT BASE_DECLARE_FEATURE(kPageAllocatorRetryOnCommitFailure);
#endif #endif
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS) #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
extern const base::FeatureParam<bool> BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
kPartialLowEndModeExcludePartitionAllocSupport; bool,
kPartialLowEndModeExcludePartitionAllocSupport);
#endif #endif
// Name of the synthetic trial associated with forcibly enabling BRP in
// all processes.
inline constexpr base::StringPiece kRendererLiveBRPSyntheticTrialName =
"BackupRefPtrRendererLive";
BASE_EXPORT BASE_DECLARE_FEATURE(kEnableConfigurableThreadCacheMultiplier); BASE_EXPORT BASE_DECLARE_FEATURE(kEnableConfigurableThreadCacheMultiplier);
BASE_EXPORT double GetThreadCacheMultiplier(); BASE_EXPORT double GetThreadCacheMultiplier();
BASE_EXPORT double GetThreadCacheMultiplierForAndroid(); BASE_EXPORT double GetThreadCacheMultiplierForAndroid();
@ -216,14 +210,24 @@ BASE_EXPORT int GetThreadCacheMinCachedMemoryForPurgingBytes();
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDisableBRPInBufferPartition); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDisableBRPInBufferPartition);
// This feature is additionally gated behind a buildflag because // When set, partitions use a larger ring buffer and free memory less
// pool offset freelists cannot be represented when PartitionAlloc uses // aggressively when in the foreground.
// 32-bit pointers. BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground);
#if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
BASE_EXPORT BASE_DECLARE_FEATURE(kUsePoolOffsetFreelists);
#endif
} // namespace features // When enabled, uses a more nuanced heuristic to determine if slot
} // namespace base // spans can be treated as "single-slot."
//
// See also: https://crbug.com/333443437
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseSmallSingleSlotSpans);
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
using ShadowMetadataEnabledProcesses = internal::PAFeatureEnabledProcesses;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocShadowMetadata);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(ShadowMetadataEnabledProcesses,
kShadowMetadataEnabledProcessesParam);
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
} // namespace base::features
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_

File diff suppressed because it is too large Load diff

View file

@ -8,21 +8,18 @@
#include <map> #include <map>
#include <string> #include <string>
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "base/feature_list.h"
#include "base/memory/scoped_refptr.h" #include "base/memory/scoped_refptr.h"
#include "base/synchronization/lock.h" #include "base/synchronization/lock.h"
#include "base/task/sequenced_task_runner.h" #include "base/task/sequenced_task_runner.h"
#include "base/thread_annotations.h" #include "base/thread_annotations.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/thread_cache.h"
namespace base::allocator { namespace base::allocator {
#if BUILDFLAG(USE_STARSCAN)
BASE_EXPORT void RegisterPCScanStatsReporter();
#endif
// Starts a periodic timer on the current thread to purge all thread caches. // Starts a periodic timer on the current thread to purge all thread caches.
BASE_EXPORT void StartThreadCachePeriodicPurge(); BASE_EXPORT void StartThreadCachePeriodicPurge();
@ -40,13 +37,21 @@ BASE_EXPORT std::map<std::string, std::string> ProposeSyntheticFinchTrials();
BASE_EXPORT void InstallDanglingRawPtrChecks(); BASE_EXPORT void InstallDanglingRawPtrChecks();
BASE_EXPORT void InstallUnretainedDanglingRawPtrChecks(); BASE_EXPORT void InstallUnretainedDanglingRawPtrChecks();
// Once called, makes `free()` do nothing. This is done to reduce
// shutdown hangs on CrOS.
// Does nothing if Dangling Pointer Detector (`docs/dangling_ptr.md`)
// is not active.
// Does nothing if allocator shim support is not built.
BASE_EXPORT void MakeFreeNoOp();
// Allows to re-configure PartitionAlloc at run-time. // Allows to re-configure PartitionAlloc at run-time.
class BASE_EXPORT PartitionAllocSupport { class BASE_EXPORT PartitionAllocSupport {
public: public:
struct BrpConfiguration { struct BrpConfiguration {
bool enable_brp = false; bool enable_brp = false;
bool ref_count_in_same_slot = false;
bool process_affected_by_brp_flag = false; // TODO(https://crbug.com/371135823): Remove after the investigation.
size_t extra_extras_size = 0;
}; };
// Reconfigure* functions re-configure PartitionAlloc. It is impossible to // Reconfigure* functions re-configure PartitionAlloc. It is impossible to
@ -81,10 +86,12 @@ class BASE_EXPORT PartitionAllocSupport {
void ReconfigureAfterTaskRunnerInit(const std::string& process_type); void ReconfigureAfterTaskRunnerInit(const std::string& process_type);
// |has_main_frame| tells us if the renderer contains a main frame. // |has_main_frame| tells us if the renderer contains a main frame.
void OnForegrounded(bool has_main_frame); // The default value is intended for other process types, where the parameter
// does not make sense.
void OnForegrounded(bool has_main_frame = false);
void OnBackgrounded(); void OnBackgrounded();
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) #if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
static std::string ExtractDanglingPtrSignatureForTests( static std::string ExtractDanglingPtrSignatureForTests(
std::string stacktrace); std::string stacktrace);
#endif #endif
@ -100,6 +107,11 @@ class BASE_EXPORT PartitionAllocSupport {
// For calling from within third_party/blink/. // For calling from within third_party/blink/.
static bool ShouldEnableMemoryTaggingInRendererProcess(); static bool ShouldEnableMemoryTaggingInRendererProcess();
// Returns true if PA advanced checks should be enabled if available for the
// given process type. May be called multiple times per process.
static bool ShouldEnablePartitionAllocWithAdvancedChecks(
const std::string& process_type);
private: private:
PartitionAllocSupport(); PartitionAllocSupport();
@ -112,12 +124,40 @@ class BASE_EXPORT PartitionAllocSupport {
std::string established_process_type_ GUARDED_BY(lock_) = "INVALID"; std::string established_process_type_ GUARDED_BY(lock_) = "INVALID";
#if PA_CONFIG(THREAD_CACHE_SUPPORTED) && \ #if PA_CONFIG(THREAD_CACHE_SUPPORTED) && \
BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
size_t largest_cached_size_ = size_t largest_cached_size_ =
::partition_alloc::ThreadCacheLimits::kDefaultSizeThreshold; ::partition_alloc::kThreadCacheDefaultSizeThreshold;
#endif #endif
}; };
BASE_EXPORT BASE_DECLARE_FEATURE(kDisableMemoryReclaimerInBackground);
// Visible in header for testing.
class BASE_EXPORT MemoryReclaimerSupport {
public:
static MemoryReclaimerSupport& Instance();
MemoryReclaimerSupport();
~MemoryReclaimerSupport();
void Start(scoped_refptr<TaskRunner> task_runner);
void SetForegrounded(bool in_foreground);
void ResetForTesting();
bool has_pending_task_for_testing() const { return has_pending_task_; }
static TimeDelta GetInterval();
// Visible for testing
static constexpr base::TimeDelta kFirstPAPurgeOrReclaimDelay =
base::Minutes(1);
private:
void Run();
void MaybeScheduleTask(TimeDelta delay = TimeDelta());
scoped_refptr<TaskRunner> task_runner_;
bool in_foreground_ = true;
bool has_pending_task_ = false;
};
} // namespace base::allocator } // namespace base::allocator
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_

View file

@ -0,0 +1,7 @@
---
Checks: 'google-build-namespaces,
readability-redundant-smartptr-get,
readability-static-accessed-through-instance'
InheritParentConfig: true
HeaderFilterRegex: 'partition_alloc/*'
...

View file

@ -0,0 +1,8 @@
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is partition_alloc root GN configuration. It is used when built as a
# standalone project. This is not used in production.
buildconfig = "//gn/BUILDCONFIG.gn"

View file

@ -11,8 +11,14 @@ group("buildflags") {
public_deps = [ "src/partition_alloc:buildflags" ] public_deps = [ "src/partition_alloc:buildflags" ]
} }
if (is_clang_or_gcc) { if (use_partition_alloc && is_clang_or_gcc) {
group("partition_alloc") { group("partition_alloc") {
public_deps = [ "src/partition_alloc:partition_alloc" ] public_deps = [ "src/partition_alloc:partition_alloc" ]
} }
} }
if (use_allocator_shim) {
group("allocator_shim") {
public_deps = [ "src/partition_alloc:allocator_shim" ]
}
}

View file

@ -6,42 +6,52 @@
# project in order to be a standalone library. # project in order to be a standalone library.
noparent = True noparent = True
include_rules = [ # `partition_alloc` can depend only on itself, via its `include_dirs`.
# `partition_alloc` can depends on itself, via the `include_dirs` it declares. include_rules = [ "+partition_alloc" ]
"+partition_alloc",
# Build flags to infer the architecture and operating system in use.
"+build/build_config.h",
"+build/buildflag.h",
]
# TODO(crbug.com/40158212): Depending on what is tested, split the tests in
# between chromium and partition_alloc. Remove those exceptions:
specific_include_rules = { specific_include_rules = {
".*_(perf|unit)test\.cc$": [ # Dependencies on //testing:
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h", ".*_(perf|unit)?test.*\.(h|cc)": [
"+base/allocator/dispatcher/dispatcher.h",
"+base/debug/allocation_trace.h",
"+base/debug/debugging_buildflags.h",
"+base/debug/proc_maps_linux.h",
"+base/system/sys_info.h",
"+base/test/gtest_util.h",
"+base/timer/lap_timer.h",
"+base/win/windows_version.h",
"+testing/gmock/include/gmock/gmock.h", "+testing/gmock/include/gmock/gmock.h",
"+testing/gtest/include/gtest/gtest.h", "+testing/gtest/include/gtest/gtest.h",
"+testing/perf/perf_result_reporter.h", "+testing/perf/perf_result_reporter.h",
], ],
"extended_api\.cc$": [ "gtest_util.h": [
"+testing/gtest/include/gtest/gtest.h",
],
# Dependencies on //base:
"extended_api\.cc": [
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h", "+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
], ],
"raw_(ptr|ref)_unittest\.cc$": [ "partition_alloc_perftest\.cc": [
"+base", "+base/allocator/dispatcher/dispatcher.h",
"+third_party/abseil-cpp/absl/types/optional.h", "+base/debug/allocation_trace.h",
"+third_party/abseil-cpp/absl/types/variant.h", "+base/debug/debugging_buildflags.h",
"+base/timer/lap_timer.h",
],
"partition_lock_perftest\.cc": [
"+base/timer/lap_timer.h",
],
"raw_ptr_unittest\.cc": [
"+base/allocator/partition_alloc_features.h",
"+base/allocator/partition_alloc_support.h",
"+base/cpu.h",
"+base/debug/asan_service.h",
"+base/metrics/histogram_base.h",
"+base/test/bind.h",
"+base/test/gtest_util.h",
"+base/test/memory/dangling_ptr_instrumentation.h",
"+base/test/scoped_feature_list.h",
"+base/types/to_address.h",
],
"raw_ref_unittest\.cc": [
"+base/debug/asan_service.h",
"+base/memory/raw_ptr_asan_service.h",
"+base/test/gtest_util.h",
], ],
"raw_ptr_test_support\.h$": [
"+testing/gmock/include/gmock/gmock.h",
"+third_party/abseil-cpp/absl/types/optional.h",
]
} }
# In the context of a module-level DEPS, the `deps` variable must be defined. # In the context of a module-level DEPS, the `deps` variable must be defined.

View file

@ -1,4 +1,3 @@
bartekn@chromium.org
haraken@chromium.org haraken@chromium.org
keishi@chromium.org keishi@chromium.org
lizeb@chromium.org lizeb@chromium.org

View file

@ -0,0 +1,249 @@
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for base/allocator/partition_allocator.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
PRESUBMIT_VERSION = '2.0.0'
# This is the base path of the partition_alloc directory when stored inside the
# chromium repository. PRESUBMIT.py is executed from chromium.
_PARTITION_ALLOC_BASE_PATH = 'base/allocator/partition_allocator/src/'
# Pattern matching C/C++ source files, for use in allowlist args.
_SOURCE_FILE_PATTERN = r'.*\.(h|hpp|c|cc|cpp)$'
# Similar pattern, matching GN files.
_BUILD_FILE_PATTERN = r'.*\.(gn|gni)$'
# This is adapted from Chromium's PRESUBMIT.py. The differences are:
# - Base path: It is relative to the partition_alloc's source directory instead
# of chromium.
# - Stricter: A single format is allowed: `PATH_ELEM_FILE_NAME_H_`.
def CheckForIncludeGuards(input_api, output_api):
"""Check that header files have proper include guards"""
def guard_for_file(file):
local_path = file.LocalPath()
if input_api.is_windows:
local_path = local_path.replace('\\', '/')
assert local_path.startswith(_PARTITION_ALLOC_BASE_PATH)
guard = input_api.os_path.normpath(
local_path[len(_PARTITION_ALLOC_BASE_PATH):])
guard = guard + '_'
guard = guard.upper()
guard = input_api.re.sub(r'[+\\/.-]', '_', guard)
return guard
def is_partition_alloc_header_file(f):
# We only check header files.
return f.LocalPath().endswith('.h')
errors = []
for f in input_api.AffectedSourceFiles(is_partition_alloc_header_file):
expected_guard = guard_for_file(f)
# Unlike the Chromium's top-level PRESUBMIT.py, we enforce a stricter
# rule which accepts only `PATH_ELEM_FILE_NAME_H_` per coding style.
guard_name_pattern = input_api.re.escape(expected_guard)
guard_pattern = input_api.re.compile(r'#ifndef\s+(' +
guard_name_pattern + ')')
guard_name = None
guard_line_number = None
seen_guard_end = False
for line_number, line in enumerate(f.NewContents()):
if guard_name is None:
match = guard_pattern.match(line)
if match:
guard_name = match.group(1)
guard_line_number = line_number
continue
# The line after #ifndef should have a #define of the same name.
if line_number == guard_line_number + 1:
expected_line = '#define %s' % guard_name
if line != expected_line:
errors.append(
output_api.PresubmitPromptWarning(
'Missing "%s" for include guard' % expected_line,
['%s:%d' % (f.LocalPath(), line_number + 1)],
'Expected: %r\nGot: %r' % (expected_line, line)))
if not seen_guard_end and line == '#endif // %s' % guard_name:
seen_guard_end = True
continue
if seen_guard_end:
if line.strip() != '':
errors.append(
output_api.PresubmitPromptWarning(
'Include guard %s not covering the whole file' %
(guard_name), [f.LocalPath()]))
break # Nothing else to check and enough to warn once.
if guard_name is None:
errors.append(
output_api.PresubmitPromptWarning(
'Missing include guard in %s\n'
'Recommended name: %s\n' %
(f.LocalPath(), expected_guard)))
return errors
# In .gn and .gni files, check there are no unexpected dependencies on files
# located outside of the partition_alloc repository.
#
# This is important, because partition_alloc has no CQ bots on its own, but only
# through the chromium's CQ.
#
# Only //build_overrides/ is allowed, as it provides embedders, a way to
# overrides the default build settings and forward the dependencies to
# partition_alloc.
def CheckNoExternalImportInGn(input_api, output_api):
# Match and capture <path> from import("<path>").
import_re = input_api.re.compile(r'^ *import\("([^"]+)"\)')
sources = lambda affected_file: input_api.FilterSourceFile(
affected_file,
files_to_skip=[],
files_to_check=[_BUILD_FILE_PATTERN])
errors = []
for f in input_api.AffectedSourceFiles(sources):
for line_number, line in f.ChangedContents():
match = import_re.search(line)
if not match:
continue
import_path = match.group(1)
if import_path.startswith('//build_overrides/'):
continue
if not import_path.startswith('//'):
continue;
errors.append(output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallow external import: %s' %
(f.LocalPath(), line_number + 1, import_path)))
return errors;
# partition_alloc still supports C++17, because Skia still uses C++17.
def CheckCpp17CompatibleHeaders(input_api, output_api):
CPP_20_HEADERS = [
"barrier",
"bit",
#"compare", Three-way comparison may be used under appropriate guards.
"format",
"numbers",
"ranges",
"semaphore",
"source_location",
"span",
"stop_token",
"syncstream",
"version",
]
CPP_23_HEADERS = [
"expected",
"flat_map",
"flat_set",
"generator",
"mdspan",
"print",
"spanstream",
"stacktrace",
"stdatomic.h",
"stdfloat",
]
sources = lambda affected_file: input_api.FilterSourceFile(
affected_file,
# compiler_specific.h may use these headers in guarded ways.
files_to_skip=[
r'.*partition_alloc_base/augmentations/compiler_specific\.h'
],
files_to_check=[_SOURCE_FILE_PATTERN])
errors = []
for f in input_api.AffectedSourceFiles(sources):
# for line_number, line in f.ChangedContents():
for line_number, line in enumerate(f.NewContents()):
for header in CPP_20_HEADERS:
if not "#include <%s>" % header in line:
continue
errors.append(
output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallows C++20 headers: <%s>'
% (f.LocalPath(), line_number + 1, header)))
for header in CPP_23_HEADERS:
if not "#include <%s>" % header in line:
continue
errors.append(
output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallows C++23 headers: <%s>'
% (f.LocalPath(), line_number + 1, header)))
return errors
def CheckCpp17CompatibleKeywords(input_api, output_api):
CPP_20_KEYWORDS = [
"concept",
"consteval",
"constinit",
"co_await",
"co_return",
"co_yield",
"requires",
"std::hardware_",
"std::is_constant_evaluated",
"std::bit_cast",
"std::midpoint",
"std::to_array",
]
# Note: C++23 doesn't introduce new keywords.
sources = lambda affected_file: input_api.FilterSourceFile(
affected_file,
# compiler_specific.h may use these keywords in guarded macros.
files_to_skip=[r'.*partition_alloc_base/compiler_specific\.h'],
files_to_check=[_SOURCE_FILE_PATTERN])
errors = []
for f in input_api.AffectedSourceFiles(sources):
for line_number, line in f.ChangedContents():
for keyword in CPP_20_KEYWORDS:
if not keyword in line:
continue
# Skip if part of a comment
if '//' in line and line.index('//') < line.index(keyword):
continue
# Make sure there are word separators around the keyword:
regex = r'\b%s\b' % keyword
if not input_api.re.search(regex, line):
continue
errors.append(
output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallows C++20 keywords: %s'
% (f.LocalPath(), line_number + 1, keyword)))
return errors
# Check `NDEBUG` is not used inside partition_alloc. We prefer to use the
# buildflags `#if PA_BUILDFLAG(IS_DEBUG)` instead.
def CheckNoNDebug(input_api, output_api):
sources = lambda affected_file: input_api.FilterSourceFile(
affected_file,
files_to_skip=[],
files_to_check=[_SOURCE_FILE_PATTERN])
errors = []
for f in input_api.AffectedSourceFiles(sources):
for line_number, line in f.ChangedContents():
if 'NDEBUG' in line:
errors.append(output_api.PresubmitError('%s:%d\nPartitionAlloc'
% (f.LocalPath(), line_number + 1)
+ 'disallows NDEBUG, use PA_BUILDFLAG(IS_DEBUG) instead'))
return errors

View file

@ -0,0 +1,83 @@
#!/usr/bin/env python3
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
import PRESUBMIT
# Append chrome source root to import `PRESUBMIT_test_mocks.py`.
sys.path.append(
os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from PRESUBMIT_test_mocks import MockAffectedFile, MockInputApi, MockOutputApi
_PARTITION_ALLOC_BASE_PATH = 'base/allocator/partition_allocator/src/'
class PartitionAllocIncludeGuardsTest(unittest.TestCase):
def _CheckForIncludeGuardsWithMock(self, filename, lines):
mock_input_api = MockInputApi()
mock_input_api.files = [MockAffectedFile(filename, lines)]
mock_output_api = MockOutputApi()
return PRESUBMIT.CheckForIncludeGuards(mock_input_api, mock_output_api)
def testExpectedGuardNameDoesNotError(self):
lines = [
'#ifndef PARTITION_ALLOC_RANDOM_H_',
'#define PARTITION_ALLOC_RANDOM_H_',
'#endif // PARTITION_ALLOC_RANDOM_H_'
]
errors = self._CheckForIncludeGuardsWithMock(
_PARTITION_ALLOC_BASE_PATH + 'partition_alloc/random.h', lines)
self.assertEqual(0, len(errors))
def testMissingGuardErrors(self):
lines = []
errors = self._CheckForIncludeGuardsWithMock(
_PARTITION_ALLOC_BASE_PATH + 'partition_alloc/random.h', lines)
self.assertEqual(1, len(errors))
self.assertIn('Missing include guard', errors[0].message)
self.assertIn('Recommended name: PARTITION_ALLOC_RANDOM_H_',
errors[0].message)
def testMissingGuardInNonHeaderFileDoesNotError(self):
lines = []
errors = self._CheckForIncludeGuardsWithMock(
_PARTITION_ALLOC_BASE_PATH + 'partition_alloc/random.cc', lines)
self.assertEqual(0, len(errors))
def testGuardNotCoveringWholeFileErrors(self):
lines = [
'#ifndef PARTITION_ALLOC_RANDOM_H_',
'#define PARTITION_ALLOC_RANDOM_H_',
'#endif // PARTITION_ALLOC_RANDOM_H_',
'int oh_i_forgot_to_guard_this;'
]
errors = self._CheckForIncludeGuardsWithMock(
_PARTITION_ALLOC_BASE_PATH + 'partition_alloc/random.h', lines)
self.assertEqual(1, len(errors))
self.assertIn('not covering the whole file', errors[0].message)
def testMissingDefineInGuardErrors(self):
lines = [
'#ifndef PARTITION_ALLOC_RANDOM_H_',
'int somehow_put_here;'
'#define PARTITION_ALLOC_RANDOM_H_',
'#endif // PARTITION_ALLOC_RANDOM_H_',
]
errors = self._CheckForIncludeGuardsWithMock(
_PARTITION_ALLOC_BASE_PATH + 'partition_alloc/random.h', lines)
self.assertEqual(1, len(errors))
self.assertIn(
'Missing "#define PARTITION_ALLOC_RANDOM_H_" for include guard',
errors[0].message)
if __name__ == '__main__':
unittest.main()

View file

@ -111,7 +111,7 @@ partition page that holds metadata (32B struct per partition page).
* Although only five colors are shown, in reality, a super page holds * Although only five colors are shown, in reality, a super page holds
tens of slot spans, some of which belong to the same bucket. tens of slot spans, some of which belong to the same bucket.
* The system page that holds metadata tracks each partition page with one 32B * The system page that holds metadata tracks each partition page with one 32B
[`PartitionPage` struct][PartitionPage], which is either [`PartitionPageMetadata` struct][PartitionPage], which is either
* a [`SlotSpanMetadata`][SlotSpanMetadata] ("v"s in the diagram) or * a [`SlotSpanMetadata`][SlotSpanMetadata] ("v"s in the diagram) or
* a [`SubsequentPageMetadata`][SubsequentPageMetadata] ("+"s in the * a [`SubsequentPageMetadata`][SubsequentPageMetadata] ("+"s in the
diagram). diagram).
@ -119,7 +119,7 @@ partition page that holds metadata (32B struct per partition page).
of each super page). of each super page).
* In some configurations, PartitionAlloc stores more metadata than can * In some configurations, PartitionAlloc stores more metadata than can
fit in the one system page at the front. These are the bitmaps for fit in the one system page at the front. These are the bitmaps for
StarScan and `MTECheckedPtr<T>`, and they are relegated to the head of `MTECheckedPtr<T>`, and they are relegated to the head of
what would otherwise be usable space for slot spans. One, both, or what would otherwise be usable space for slot spans. One, both, or
none of these bitmaps may be present, depending on build none of these bitmaps may be present, depending on build
configuration, runtime configuration, and type of allocation. configuration, runtime configuration, and type of allocation.
@ -197,7 +197,7 @@ the inaccuracy can't happen in the other direction, i.e. an active span can only
be on the active list, and an empty span can only be on the active or empty be on the active list, and an empty span can only be on the active or empty
list. list.
[PartitionPage]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/partition_page.h;l=314;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4 [PartitionPage]: https://source.chromium.org/search?q=-file:third_party/(angle|dawn)%20class:PartitionPageMetadata%20file:partition_page.h&ss=chromium
[SlotSpanMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/partition_page.h;l=120;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4 [SlotSpanMetadata]: https://source.chromium.org/search?q=-file:third_party/(angle|dawn)%20class:SlotSpanMetadata%20file:partition_page.h&ss=chromium
[SubsequentPageMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/partition_page.h;l=295;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4 [SubsequentPageMetadata]: https://source.chromium.org/search?q=-file:third_party/(angle|dawn)%20class:SubsequentPageMetadata%20file:partition_page.h&ss=chromium
[payload-start]: https://source.chromium.org/chromium/chromium/src/+/35b2deed603dedd4abb37f204d516ed62aa2b85c:base/allocator/partition_allocator/src/partition_alloc/partition_page.h;l=454 [payload-start]: https://source.chromium.org/search?q=-file:third_party%2F(angle%7Cdawn)%20content:SuperPagePayloadBegin%20file:partition_page.h&ss=chromium

View file

@ -1,9 +0,0 @@
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file will be used to check out PartitionAlloc and to build it as
# standalone library. In this case, PartitionAlloc needs to define
# build_with_chromium. If building PartitionAlloc as a part of chromium,
# chromium will provide build_with_chromium=true.
build_with_chromium = false

View file

@ -2,7 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import("//build_overrides/build.gni") # By definition, PartitionAlloc standalone builds outside of chromium.
build_with_chromium = false
# This is the default build configuration when building PartitionAlloc # This is the default build configuration when building PartitionAlloc
# as a standalone library. # as a standalone library.
@ -14,8 +15,13 @@ use_allocator_shim_default = false
enable_backup_ref_ptr_support_default = false enable_backup_ref_ptr_support_default = false
enable_backup_ref_ptr_slow_checks_default = false enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false enable_dangling_raw_ptr_checks_default = false
enable_ios_corruption_hardening_default = false
# This is the default build configuration for pointers/raw_ptr*. # This is the default build configuration for pointers/raw_ptr*.
raw_ptr_zero_on_construct_default = true raw_ptr_zero_on_construct_default = true
raw_ptr_zero_on_move_default = true raw_ptr_zero_on_move_default = true
raw_ptr_zero_on_destruct_default = false raw_ptr_zero_on_destruct_default = false
# PartitionAlloc needs to support cpp17 for standalone builds, as long as Skia
# supports it.
assert_cpp20_default = false

View file

@ -36,9 +36,9 @@ implementation of `raw_ptr<T>`.
PartitionAlloc provides APIs to PartitionAlloc provides APIs to
* reclaim memory (see [memory\_reclaimer.h](./memory_reclaimer.h)) and * reclaim memory (see `memory_reclaimer.h`) and
* purge thread caches (see [thread\_cache.h](./thread_cache.h)). * purge thread caches (see `thread_cache.h`).
Both of these must be called by the embedder external to PartitionAlloc. Both of these must be called by the embedder external to PartitionAlloc.
PA provides neither an event loop nor timers of its own, delegating this PA provides neither an event loop nor timers of its own, delegating this

View file

@ -4,9 +4,11 @@ This page describes some core terminology used in PartitionAlloc.
A weak attempt is made to present terms "in conceptual order" s.t. A weak attempt is made to present terms "in conceptual order" s.t.
each term depends mainly upon previously defined ones. each term depends mainly upon previously defined ones.
* **Partition**: A heap that is separated and protected both from other ### Partition
partitions and from non-PartitionAlloc memory. Each partition holds
multiple buckets. A heap that is separated and protected both from other
partitions and from non-PartitionAlloc memory. Each partition holds
multiple buckets.
*** promo *** promo
**NOTE**: In code (and comments), "partition," "root," and even **NOTE**: In code (and comments), "partition," "root," and even
@ -15,97 +17,142 @@ each term depends mainly upon previously defined ones.
## Pages ## Pages
* **System Page**: A memory page defined by the CPU/OS. Commonly ### System Page
referred to as a "virtual page" in other contexts. This is typically
4KiB, but it can be larger. PartitionAlloc supports up to 64KiB, A memory page defined by the CPU/OS. Commonly
though this constant isn't always known at compile time (depending referred to as a "virtual page" in other contexts. This is typically
on the OS). 4KiB, but it can be larger. PartitionAlloc supports up to 64KiB,
* **Partition Page**: The most common granularity used by though this constant isn't always known at compile time (depending
PartitionAlloc. Consists of exactly 4 system pages. on the OS).
* **Super Page**: A 2MiB region, aligned on a 2MiB boundary. Not to
be confused with OS-level terms like "large page" or "huge page", ### Partition Page
which are also commonly 2MiB. These have to be fully committed /
uncommitted in memory, whereas super pages can be partially committed The most common granularity used by
with system page granularity. PartitionAlloc. Consists of exactly 4 system pages.
* **Extent**: An extent is a run of consecutive super pages (belonging
to a single partition). Extents are to super pages what slot spans are ### Super Page
to slots (see below).
A 2MiB region, aligned on a 2MiB boundary. Not to
be confused with OS-level terms like "large page" or "huge page",
which are also commonly 2MiB. These have to be fully committed /
uncommitted in memory, whereas super pages can be partially committed
with system page granularity.
### Extent
An extent is a run of consecutive super pages (belonging
to a single partition). Extents are to super pages what slot spans are
to slots (see below).
## Slots and Spans ## Slots and Spans
* **Slot**: An indivisible allocation unit. Slot sizes are tied to ### Slot
buckets. For example, each allocation that falls into the bucket
(224,&nbsp;256] would be satisfied with a slot of size 256. This An indivisible allocation unit. Slot sizes are tied to
applies only to normal buckets, not to direct map. buckets. For example, each allocation that falls into the bucket
* **Slot Span**: A run of same-sized slots that are contiguous in (224,&nbsp;256] would be satisfied with a slot of size 256. This
memory. Slot span size is a multiple of partition page size, but it applies only to normal buckets, not to direct map.
isn't always a multiple of slot size, although we try hard for this
to be the case. ### Slot Span
* **Small Bucket**: Allocations up to 4 partition pages. In these
cases, slot spans are always between 1 and 4 partition pages in A run of same-sized slots that are contiguous in
size. For each slot span size, the slot span is chosen to minimize memory. Slot span size is a multiple of partition page size, but it
number of pages used while keeping the rounding waste under a isn't always a multiple of slot size, although we try hard for this
reasonable limit. to be the case.
* For example, for a slot size 96, 64B waste is deemed acceptable
when using a single partition page, but for slot size ### Small Bucket
384, the potential waste of 256B wouldn't be, so 3 partition pages
are used to achieve 0B waste. Allocations up to 4 partition pages. In these
* PartitionAlloc may avoid waste by lowering the number of committed cases, slot spans are always between 1 and 4 partition pages in
system pages compared to the number of reserved pages. For size. For each slot span size, the slot span is chosen to minimize
example, for the slot size of 896B we'd use a slot span of 2 number of pages used while keeping the rounding waste under a
partition pages of 16KiB, i.e. 8 system pages of 4KiB, but commit reasonable limit.
only up to 7, thus resulting in perfect packing.
* **Single-Slot Span**: Allocations above 4 partition pages (but * For example, for a slot size 96, 64B waste is deemed acceptable
&le;`kMaxBucketed`). This is because each slot span is guaranteed to when using a single partition page, but for slot size
hold exactly one slot. 384, the potential waste of 256B wouldn't be, so 3 partition pages
* Fun fact: there are sizes &le;4 partition pages that result in a are used to achieve 0B waste.
slot span having exactly 1 slot, but nonetheless they're still * PartitionAlloc may avoid waste by lowering the number of committed
classified as small buckets. The reason is that single-slot spans system pages compared to the number of reserved pages. For
are often handled by a different code path, and that distinction example, for the slot size of 896B we'd use a slot span of 2
is made purely based on slot size, for simplicity and efficiency. partition pages of 16KiB, i.e. 8 system pages of 4KiB, but commit
only up to 7, thus resulting in perfect packing.
### Single-Slot Span
Allocations above 4 partition pages (but
&le;`kMaxBucketed`). This is because each slot span is guaranteed to
hold exactly one slot.
*** promo
Fun fact: there are sizes &le;4 partition pages that result in a
slot span having exactly 1 slot, but nonetheless they're still
classified as small buckets. The reason is that single-slot spans
are often handled by a different code path, and that distinction
is made purely based on slot size, for simplicity and efficiency.
***
## Buckets ## Buckets
* **Bucket**: A collection of regions in a partition that contains ### Bucket
similar-sized objects. For example, one bucket may hold objects of
size (224,&nbsp;256], another (256,&nbsp;320], etc. Bucket size A collection of regions in a partition that contains
brackets are geometrically spaced, similar-sized objects. For example, one bucket may hold objects of
[going up to `kMaxBucketed`][max-bucket-comment]. size (224,&nbsp;256], another (256,&nbsp;320], etc. Bucket size
* Plainly put, all slots (ergo the resulting spans) of a given size brackets are geometrically spaced,
class are logically chained into one bucket. [going up to `kMaxBucketed`][max-bucket-comment].
*** promo
Plainly put, all slots (ergo the resulting spans) of a given size
class are logically chained into one bucket.
***
![A bucket, spanning multiple super pages, collects spans whose ![A bucket, spanning multiple super pages, collects spans whose
slots are of a particular size class.](./src/partition_alloc/dot/bucket.png) slots are of a particular size class.](./src/partition_alloc/dot/bucket.png)
* **Normal Bucket**: Any bucket whose size ceiling does not exceed ### Normal Bucket
`kMaxBucketed`. This is the common case in PartitionAlloc, and
the "normal" modifier is often dropped in casual reference. Any bucket whose size ceiling does not exceed
* **Direct Map (Bucket)**: Any allocation whose size exceeds `kMaxBucketed`. `kMaxBucketed`. This is the common case in PartitionAlloc, and
the "normal" modifier is often dropped in casual reference.
### Direct Map (Bucket)
Any allocation whose size exceeds `kMaxBucketed`.
## Other Terms ## Other Terms
* **Object**: A chunk of memory returned to the allocating invoker ### Object
of the size requested. It doesn't have to span the entire slot,
nor does it have to begin at the slot start. This term is commonly A chunk of memory returned to the allocating invoker
used as a parameter name in PartitionAlloc code, as opposed to of the size requested. It doesn't have to span the entire slot,
`slot_start`. nor does it have to begin at the slot start. This term is commonly
* **Thread Cache**: A [thread-local structure][pa-thread-cache] that used as a parameter name in PartitionAlloc code, as opposed to
holds some not-too-large memory chunks, ready to be allocated. This `slot_start`.
speeds up in-thread allocation by reducing a lock hold to a
thread-local storage lookup, improving cache locality. ### Thread Cache
* **Pool**: A large (and contiguous on 64-bit) virtual address region, housing
super pages, etc. from which PartitionAlloc services allocations. The A [thread-local structure][pa-thread-cache] that
primary purpose of the pools is to provide a fast answer to the holds some not-too-large memory chunks, ready to be allocated. This
question, "Did PartitionAlloc allocate the memory for this pointer speeds up in-thread allocation by reducing a lock hold to a
from this pool?" with a single bit-masking operation. thread-local storage lookup, improving cache locality.
* The regular pool is a general purpose pool that contains allocations that
### Pool
A large (and contiguous on 64-bit) virtual address region, housing
super pages, etc. from which PartitionAlloc services allocations. The
primary purpose of the pools is to provide a fast answer to the
question, "Did PartitionAlloc allocate the memory for this pointer
from this pool?" with a single bit-masking operation.
* The regular pool is a general purpose pool that contains allocations that
aren't protected by BackupRefPtr. aren't protected by BackupRefPtr.
* The BRP pool contains all allocations protected by BackupRefPtr. * The BRP pool contains all allocations protected by BackupRefPtr.
* [64-bit only] The configurable pool is named generically, because its * [64-bit only] The configurable pool is named generically, because its
primary user (the [V8 Sandbox][v8-sandbox]) can configure it at runtime, primary user (the [V8 Sandbox][v8-sandbox]) can configure it at runtime,
providing a pre-existing mapping. Its allocations aren't protected by providing a pre-existing mapping. Its allocations aren't protected by
BackupRefPtr. BackupRefPtr.
* [64-bit only] The thread isolated pool is returning memory protected with * [64-bit only] The thread isolated pool is returning memory protected with
per-thread permissions. At the moment, this is implemented for pkeys on x64. per-thread permissions. At the moment, this is implemented for pkeys on x64.
It's primary user is [V8 CFI][v8-cfi]. It's primary user is [V8 CFI][v8-cfi].
@ -117,23 +164,33 @@ Pools are downgraded into a logical concept in 32-bit environments,
tracking a non-contiguous set of allocations using a bitmap. tracking a non-contiguous set of allocations using a bitmap.
*** ***
* **Payload**: The usable area of a super page in which slot spans ### Payload
reside. While generally this means "everything between the first
and last guard partition pages in a super page," the presence of The usable area of a super page in which slot spans
other metadata (e.g. StarScan bitmaps) can bump the starting offset reside. While generally this means "everything between the first
forward. While this term is entrenched in the code, the team and last guard partition pages in a super page," the presence of
considers it suboptimal and is actively looking for a replacement. other metadata can bump the starting offset
* **Allocation Fast Path**: A path taken during an allocation that is forward. While this term is entrenched in the code, the team
considered fast. Usually means that an allocation request can be considers it suboptimal and is actively looking for a replacement.
immediately satisfied by grabbing a slot from the freelist of the
first active slot span in the bucket. ### Allocation Fast Path
* **Allocation Slow Path**: Anything which is not fast (see above).
Can involve A path taken during an allocation that is
* finding another active slot span in the list, considered fast. Usually means that an allocation request can be
* provisioning more slots in a slot span, immediately satisfied by grabbing a slot from the freelist of the
* bringing back a free (or decommitted) slot span, first active slot span in the bucket.
* allocating a new slot span, or even
* allocating a new super page. ### Allocation Slow Path
Anything which is not fast (see above).
Can involve
* finding another active slot span in the list,
* provisioning more slots in a slot span,
* bringing back a free (or decommitted) slot span,
* allocating a new slot span, or even
* allocating a new super page.
*** aside *** aside
By "slow" we may mean something as simple as extra logic (`if` By "slow" we may mean something as simple as extra logic (`if`
@ -146,12 +203,14 @@ These terms are (mostly) deprecated and should not be used. They are
surfaced here to provide a ready reference for readers coming from surfaced here to provide a ready reference for readers coming from
older design documents or documentation. older design documents or documentation.
* **GigaCage**: A memory region several gigabytes wide, reserved by ### GigaCage
PartitionAlloc upon initialization, from which nearly all allocations
are taken. _Pools_ have overtaken GigaCage in conceptual importance, A memory region several gigabytes wide, reserved by
and so and so there is less need today to refer to "GigaCage" or the PartitionAlloc upon initialization, from which nearly all allocations
"cage." This is especially true given the V8 Sandbox and the are taken. _Pools_ have overtaken GigaCage in conceptual importance,
configurable pool (see above). and so and so there is less need today to refer to "GigaCage" or the
"cage." This is especially true given the V8 Sandbox and the
configurable pool (see above).
## PartitionAlloc-Everywhere ## PartitionAlloc-Everywhere
@ -174,13 +233,13 @@ minimize fragmentation.
As of 2022, PartitionAlloc-Everywhere is supported on As of 2022, PartitionAlloc-Everywhere is supported on
* Windows 32- and 64-bit * Windows 32- and 64-bit
* Linux * Linux
* Android 32- and 64-bit * Android 32- and 64-bit
* macOS * macOS
* Fuchsia * Fuchsia
[max-bucket-comment]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h;l=345;drc=667e6b001f438521e1c1a1bc3eabeead7aaa1f37 [max-bucket-comment]: https://source.chromium.org/search?q=-file:third_party%2F(angle%7Cdawn)%20file:partition_alloc_constants.h%20symbol:kMaxBucketed$&ss=chromium
[pa-thread-cache]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/thread_cache.h [pa-thread-cache]: https://source.chromium.org/search?q=-file:third_party%2F(angle%7Cdawn)%20file:partition_alloc/thread_cache.h&ss=chromium
[v8-sandbox]: https://docs.google.com/document/d/1FM4fQmIhEqPG8uGp5o9A-mnPB5BOeScZYpkHjo0KKA8/preview# [v8-sandbox]: https://docs.google.com/document/d/1FM4fQmIhEqPG8uGp5o9A-mnPB5BOeScZYpkHjo0KKA8/preview#
[v8-cfi]: https://docs.google.com/document/d/1O2jwK4dxI3nRcOJuPYkonhTkNQfbmwdvxQMyXgeaRHo/preview# [v8-cfi]: https://docs.google.com/document/d/1O2jwK4dxI3nRcOJuPYkonhTkNQfbmwdvxQMyXgeaRHo/preview#

View file

@ -0,0 +1,106 @@
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is the BUILDCONFIG for building partition_alloc as a standalone project.
#
# The config is based on:
# - skia: //gn/BUILDCONFIG.gn
# - chromium: //build/config/BUILDCONFIG.gn
build_with_chromium = false
is_asan = false
# It's best to keep the names and defaults of is_foo flags consistent with:
# - Chrome
# - Skia.
declare_args() {
is_official_build = false
is_component_build = false
dcheck_always_on = true
}
declare_args() {
is_debug = !is_official_build
}
# Platform detection defaults:
if (target_os == "") {
target_os = host_os
}
if (current_os == "") {
current_os = target_os
}
if (target_cpu == "") {
target_cpu = host_cpu
}
if (target_cpu == "x86_64") {
target_cpu = "x64"
}
if (current_cpu == "") {
current_cpu = target_cpu
}
is_android = current_os == "android"
is_chromeos = false
is_fuchsia = current_os == "fuchsia"
is_ios = current_os == "ios"
is_linux = current_os == "linux"
is_mac = current_os == "mac"
is_nacl = false
is_win = current_os == "win" || current_os == "winuwp"
is_cast_android = false
is_castos = false
is_cronet_build = false
enable_expensive_dchecks = false
dcheck_is_configurable = false
can_unwind_with_frame_pointers = false
is_posix = !is_win && !is_fuchsia
is_apple = is_mac || is_ios
# TODO(crbug.com/41481467): Consider expanding the standalone configuration for
# additional OSes.
assert(is_linux, "PartitionAlloc standalone only support Linux for now")
is_clang = true
# A component is either:
# - A static library (is_component_build=false)
# - A shared library (is_component_build=true)
template("component") {
if (is_component_build) {
_component_mode = "shared_library"
} else {
_component_mode = "static_library"
}
target(_component_mode, target_name) {
forward_variables_from(invoker, "*")
}
}
# Default configs
default_configs = [
"//gn/partition_alloc:default",
"//gn/partition_alloc:no_exceptions",
"//gn/partition_alloc:no_rtti",
]
if (!is_debug) {
default_configs += [
"//gn/partition_alloc:optimize",
"//gn/partition_alloc:NDEBUG",
]
}
# GCC-like toolchains, including Clang.
set_default_toolchain("//gn/toolchain:clang")
default_toolchain_name = "clang"
set_defaults("source_set") {
configs = default_configs
}
set_defaults("component") {
configs = default_configs
}

View file

@ -0,0 +1,2 @@
arthursonzogni@chromium.org
tasak@google.com

View file

@ -0,0 +1,33 @@
# PartitionAlloc standalone GN config
This directory contains a GN configuration to build partition_alloc as a
standalone library.
This is not an official product that is supported by the Chromium project. There
are no guarantees that this will work in the future, or that it will work in
all configurations. There are no commit queue or trybots using it.
This is useful for verifying that partition_alloc can be built as a library, and
discover the formal dependencies that partition_alloc has on the rest of the
Chromium project. This is not intended to be used in production code, and is not
This is also provided as a convenience for chromium developers working on
partition_alloc who want to iterate on partition_alloc without having to build
the entire Chromium project.
/!\ This is under construction. /!\
## Building
```sh
gn gen out/Default
autoninja -C out/Default
```
## Supported configurations:
### Platforms
- Linux
### Toolchains
- Clang

View file

@ -0,0 +1,25 @@
#!/usr/bin/env python3
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copied from Skia's //gn/cp.py
import os
import shutil
import sys
src, dst = sys.argv[1:]
if os.path.exists(dst):
if os.path.isdir(dst):
shutil.rmtree(dst)
else:
os.remove(dst)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copy2(src, dst)
#work around https://github.com/ninja-build/ninja/issues/1554
os.utime(dst, None)

View file

@ -0,0 +1,45 @@
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
config("default") {
asmflags = []
cflags = []
cflags = [
"-Wno-return-type", # TODO(crbug.com/41481467): Fix this warning.
"-Wno-invalid-offsetof", # TODO(crbug.com/41481467): Fix this warning.
"-fstrict-aliasing",
"-fPIC",
"-fvisibility=hidden",
]
cflags_cc = [
"-std=c++17",
"-fvisibility-inlines-hidden",
]
cflags_objcc = cflags_cc
defines = []
ldflags = []
libs = [ "pthread" ]
# TODO(crbug.com/41481467): Consider creating a bot running partition_alloc
# with extra flags enforced only in the standalone configuration. Then we can
# remove the extra warnings when embedded.
}
config("no_exceptions") {
cflags_cc = [ "-fno-exceptions" ]
cflags_objcc = cflags_cc
}
config("no_rtti") {
cflags_cc = [ "-fno-rtti" ]
cflags_objcc = cflags_cc
}
config("optimize") {
cflags = [ "-O3" ]
}
config("NDEBUG") {
defines = [ "NDEBUG" ]
}

View file

@ -0,0 +1,18 @@
#!/usr/bin/env python3
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copied from Skia's //gn/rm.py
import os
import shutil
import sys
dst, = sys.argv[1:]
if os.path.exists(dst):
if os.path.isdir(dst):
shutil.rmtree(dst)
else:
os.remove(dst)

View file

@ -0,0 +1,61 @@
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
toolchain("clang") {
ar = "llvm-ar"
cc = "clang"
cxx = "clang++"
link = "clang++"
tool("cc") {
depfile = "{{output}}.d"
command = "$cc -MD -MF $depfile {{defines}} {{include_dirs}} {{cflags}} {{cflags_c}} -c {{source}} -o {{output}}"
depsformat = "gcc"
outputs =
[ "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o" ]
description = "CC {{source}}"
}
tool("cxx") {
depfile = "{{output}}.d"
command = "$cxx -MD -MF $depfile {{defines}} {{include_dirs}} {{cflags}} {{cflags_cc}} -c {{source}} -o {{output}}"
depsformat = "gcc"
outputs =
[ "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o" ]
description = "CXX {{source}}"
}
tool("alink") {
rspfile = "{{output}}.rsp"
rspfile_content = "{{inputs}}"
rm_py = rebase_path("../rm.py")
command =
"python3 \"$rm_py\" \"{{output}}\" && $ar rcs {{output}} @$rspfile"
outputs = [ "{{root_out_dir}}/{{target_output_name}}{{output_extension}}" ]
default_output_extension = ".a"
output_prefix = "lib"
description = "LINK (static) {{output}}"
}
tool("solink") {
soname = "{{target_output_name}}{{output_extension}}"
rpath = "-Wl,-soname,$soname"
rspfile = "{{output}}.rsp"
rspfile_content = "{{inputs}}"
command = "$link -shared {{ldflags}} @$rspfile {{frameworks}} {{solibs}} {{libs}} $rpath -o {{output}}"
outputs = [ "{{root_out_dir}}/$soname" ]
output_prefix = "lib"
default_output_extension = ".so"
description = "LINK (shared) {{output}}"
}
tool("stamp") {
command = "touch {{output}}"
description = "STAMP {{output}}"
}
}

View file

@ -2,10 +2,58 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import("//build/config/cronet/config.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("//build_overrides/partition_alloc.gni") import("//build_overrides/partition_alloc.gni")
# -----------------------------------------------------------------------------
# Note on the use of `xxx_default` variable in partition_alloc.
#
# GN provides default_args() instruction. It is meant to be used by embedders,
# to override the default args declared by the embeddees (e.g. partition_alloc).
# This is the intended way to use GN. It properly interacts with the args.gn
# user's file.
#
# Unfortunately, Chrome and others embedders aren't using it. Instead, they
# expect embeddees to import global '.gni' file from the embedder, e.g.
# `//build_overrides/partition_alloc.gni`. This file sets some `xxx_default`
# variable that will be transferred to the declared args. For instance
# a library would use:
# ```
# import("//build_overrides/library.gni")
# declare_args() {
# xxx = xxx_default
# }
# ```
#
# We don't really want to break embedders when introducing new args. Ideally,
# We would have liked to have defaults for default variables. That would be
# a recursive problem. To resolve it, we sometimes use the `defined(...)`
# instruction to check if the embedder has defined the `xxx_default` variable or
# not.
#
# In general, we should aim to support the embedders that are using GN normally,
# and avoid requiring them to define `xxx_default` in the `//build_overrides`
# -----------------------------------------------------------------------------
# Some embedders uses `is_debug`, it can be used to set the default value of
# `partition_alloc_is_debug_default`.
if (!defined(partition_alloc_is_debug_default)) {
if (defined(is_debug)) {
partition_alloc_is_debug_default = is_debug
} else {
partition_alloc_is_debug_default = false
}
}
# Some embedders uses `dcheck_always_on`, it can be used to set the default
# value of `partition_alloc_dcheck_always_on_default`.
if (!defined(partition_alloc_dcheck_always_on_default)) {
if (defined(dcheck_always_on)) {
partition_alloc_dcheck_always_on_default = dcheck_always_on
} else {
partition_alloc_dcheck_always_on_default = false
}
}
# PartitionAlloc have limited support for MSVC's cl.exe compiler. It can only # PartitionAlloc have limited support for MSVC's cl.exe compiler. It can only
# access the generate "buildflags" and the "raw_ptr" definitions implemented # access the generate "buildflags" and the "raw_ptr" definitions implemented
# with RawPtrNoOpImpl. Everything else is considered not supported. # with RawPtrNoOpImpl. Everything else is considered not supported.
@ -30,6 +78,17 @@ if (is_nacl) {
assert(false, "Unknown CPU: $current_cpu") assert(false, "Unknown CPU: $current_cpu")
} }
# Makes the number of empty slot spans that can remain committed larger in
# foreground mode compared to background mode
# (see `PartitionRoot::AdjustFor(Background|Foreground)`).
#
# Foreground/background modes are used by default on macOS and Windows so this
# must be true on these platforms. It's also true on other platforms to allow
# experiments.
#
# TODO(crbug.com/329199197): Clean this up when experiments are complete.
use_large_empty_slot_span_ring = true
# Disables for Android ARM64 because it actually requires API 31+. # Disables for Android ARM64 because it actually requires API 31+.
# See partition_alloc/tagging.cc: # See partition_alloc/tagging.cc:
# mallopt can be loaded after API 26. # mallopt can be loaded after API 26.
@ -39,8 +98,14 @@ has_memory_tagging =
current_cpu == "arm64" && is_clang && !is_asan && is_linux && current_os != "openwrt" current_cpu == "arm64" && is_clang && !is_asan && is_linux && current_os != "openwrt"
declare_args() { declare_args() {
# Debug configuration.
partition_alloc_is_debug = partition_alloc_is_debug_default
# Enable PA_DCHECKs in PartitionAlloc in release mode.
partition_alloc_dcheck_always_on = partition_alloc_dcheck_always_on_default
# Causes all the allocations to be routed via allocator_shim.cc. Usually, # Causes all the allocations to be routed via allocator_shim.cc. Usually,
# the allocator shim will, in turn, route them to Partition Alloc, but # the allocator shim will, in turn, route them to PartitionAlloc, but
# other allocators are also supported by the allocator shim. # other allocators are also supported by the allocator shim.
use_allocator_shim = use_allocator_shim_default && is_clang_or_gcc use_allocator_shim = use_allocator_shim_default && is_clang_or_gcc
@ -76,42 +141,63 @@ if (is_nacl) {
} }
declare_args() { declare_args() {
# Turns on compiler optimizations in PartitionAlloc in Debug build.
# If enabling PartitionAlloc-Everywhere in Debug build for tests in Debug
# build, since all memory allocations and deallocations are executed by
# non-optimized PartitionAlloc, chrome (including tests) will be much
# slower. This will cause debug trybots' timeouts. If we want to debug
# PartitionAlloc itself, use partition_alloc_optimized_debug=false.
# Otherwise, use partition_alloc_optimized_debug=true to enable optimized
# PartitionAlloc.
partition_alloc_optimized_debug = true
# PartitionAlloc-Everywhere (PA-E). Causes allocator_shim.cc to route # PartitionAlloc-Everywhere (PA-E). Causes allocator_shim.cc to route
# calls to PartitionAlloc, rather than some other platform allocator. # calls to PartitionAlloc, rather than some other platform allocator.
use_partition_alloc_as_malloc = use_partition_alloc && use_allocator_shim && use_partition_alloc_as_malloc = use_partition_alloc && use_allocator_shim &&
use_partition_alloc_as_malloc_default use_partition_alloc_as_malloc_default
} }
assert(!use_allocator_shim || (is_android || is_apple || is_chromeos || declare_args() {
is_fuchsia || is_linux || is_win), # Whether PartitionAlloc dispatch can be replaced with another dispatch with
"The allocator shim does not (yet) support the platform.") # some more safety checks at runtime or not. When true, the allocator shim
# provides an extended API to swap PartitionAlloc.
enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support =
use_partition_alloc_as_malloc
}
declare_args() {
# This is a flag for binary experiment on iOS. When BRP for iOS is enabled,
# we see some un-actionable `DoubleFreeOrCorruptionDetected` crashes.
# This flag enables some extra `CHECK`s to get actionable crash reports.
# TODO(crbug.com/371135823): Remove upon completion of investigation.
enable_ios_corruption_hardening = use_partition_alloc_as_malloc && is_ios &&
enable_ios_corruption_hardening_default
}
assert(
!enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support || use_partition_alloc_as_malloc,
"PartitionAlloc with advanced checks requires PartitionAlloc itself.")
assert(!use_allocator_shim || !is_nacl,
"The allocator shim supports every platform, except nacl")
if (use_allocator_shim && is_win) { if (use_allocator_shim && is_win) {
# It's hard to override CRT's malloc family in every case in the component # It's hard to override CRT's malloc family in every case in the component
# build, and it's very easy to override it partially and to be inconsistent # build, and it's very easy to override it partially and to be inconsistent
# among allocations and deallocations. Then, we'll crash when PA deallocates # among allocations and deallocations. Then, we'll crash when PA deallocates
# a memory region allocated by the CRT's malloc or vice versa. # a memory region allocated by the CRT's malloc or vice versa.
assert(!is_component_build, # Since PartitionAlloc depends on libc++, it is difficult to link libc++.dll
"The allocator shim doesn't work for the component build on Windows.") # with PartitionAlloc to replace its allocator with PartitionAlloc.
# If using libcxx_is_shared=true,
# a. since inline methods or inline functions defined in some libc++ headers,
# e.g. vector, use new, malloc(), and so on, the memory allocation will
# be done inside a client code.
# b. on the other hand, libc++.dll deallocates the memory allocated by the
# inline methods or inline functions. It will not be run inside the client
# code.
# So a.'s allocation is done by PartitionAlloc, but b.'s deallocation is
# done by system allocator. This will cause heap check failure (WinHeap
# doesn't know PartitionAlloc) and crash.
# If libcxx_is_shared=false, libc++ is a static library. All libc++ code
# will be run inside the client. The above issue will disappear.
assert(
!is_component_build || (!libcxx_is_shared && !partition_alloc_is_debug),
"The allocator shim for the Windows component build needs !libcxx_is_shared && !partition_alloc_is_debug.")
} }
declare_args() { declare_args() {
use_freeslot_bitmap = false use_freeslot_bitmap = false
# Puts the regular and BRP pools right next to each other, so that we can
# check "belongs to one of the two pools" with a single bitmask operation.
glue_core_pools = false
# Introduces pointer compression support in PA. These are 4-byte # Introduces pointer compression support in PA. These are 4-byte
# pointers that can point within the core pools (regular and BRP). # pointers that can point within the core pools (regular and BRP).
# #
@ -133,6 +219,23 @@ declare_args() {
# through malloc. Useful for using with tools that intercept malloc, e.g. # through malloc. Useful for using with tools that intercept malloc, e.g.
# heaptrack. # heaptrack.
forward_through_malloc = false forward_through_malloc = false
# Enable reentrancy checks at `partition_alloc::internal::Lock`.
# TODO(crbug.com/371135823): Remove upon completion of investigation.
enable_partition_lock_reentrancy_check = enable_ios_corruption_hardening
# This will write a fixed cookie pattern at the end of each allocation, and
# later verify the pattern remain unchanged to ensure there is no OOB write.
# It comes with performance and memory cost, hence enabled only in debug.
use_partition_cookie =
partition_alloc_is_debug || partition_alloc_dcheck_always_on ||
enable_ios_corruption_hardening
# This will change partition cookie size to 4B or 8B, whichever equivalent to
# size of InSlotMetadata. This option is useful for InSlotMetadata corruption
# investigation.
# TODO(crbug.com/371135823): Remove upon completion of investigation.
smaller_partition_cookie = enable_ios_corruption_hardening
} }
declare_args() { declare_args() {
@ -158,6 +261,14 @@ assert(!enable_pointer_compression_support || glue_core_pools,
"Pointer compression relies on core pools being contiguous.") "Pointer compression relies on core pools being contiguous.")
declare_args() { declare_args() {
# We want to use RawPtrBackupRefImpl as the raw_ptr<> implementation
# iff BRP support is enabled. However, for purpose of performance
# investigations we want to be able to control each separately.
#
# TEST ONLY! Don't touch unless you think you know what you're doing. Play
# with enable_backup_ref_ptr_support instead.
use_raw_ptr_backup_ref_impl = enable_backup_ref_ptr_support
# Make explicit calls to ASAN at runtime, e.g. to mark quarrantined memory # Make explicit calls to ASAN at runtime, e.g. to mark quarrantined memory
# as poisoned. Allows ASAN to tell if a particular memory error is protected # as poisoned. Allows ASAN to tell if a particular memory error is protected
# by BRP in its reports. # by BRP in its reports.
@ -170,12 +281,12 @@ declare_args() {
(is_win || is_android || is_linux || is_mac || is_chromeos) (is_win || is_android || is_linux || is_mac || is_chromeos)
# Use probe-on-destruct unowned ptr detection with ASAN. # Use probe-on-destruct unowned ptr detection with ASAN.
use_asan_unowned_ptr = false use_raw_ptr_asan_unowned_impl = false
} }
# Use the version of raw_ptr<T> that allows the embedder to implement custom # Use the version of raw_ptr<T> that allows the embedder to implement custom
# logic. # logic.
use_hookable_raw_ptr = use_asan_backup_ref_ptr use_raw_ptr_hookable_impl = use_asan_backup_ref_ptr
declare_args() { declare_args() {
# - enable_backup_ref_ptr_slow_checks: enable additional safety checks that # - enable_backup_ref_ptr_slow_checks: enable additional safety checks that
@ -194,44 +305,55 @@ declare_args() {
# Enable the feature flag required to activate backup ref pointers. That is to # Enable the feature flag required to activate backup ref pointers. That is to
# say `PartitionAllocBackupRefPtr`. # say `PartitionAllocBackupRefPtr`.
# #
# This is meant to be used primarily on bots. It is much easier to override # This is meant to be modified primarily on bots. It is much easier to
# the feature flags using a binary flag instead of updating multiple bots's # override the feature flags using a binary flag instead of updating multiple
# scripts to pass command line arguments. # bots's scripts to pass command line arguments.
enable_backup_ref_ptr_feature_flag = false #
# TODO(328104161): Remove this flag.
enable_backup_ref_ptr_feature_flag =
enable_backup_ref_ptr_support && use_raw_ptr_backup_ref_impl &&
# Platforms where BackupRefPtr hasn't shipped yet:
!is_castos && !is_ios
# While keeping BRP support, override a feature flag to make it disabled
# state. This will overwrite `enable_backup_ref_ptr_feature_flag`.
# TODO(https://crbug.com/372183586): Fix the bug and remove this arg.
force_disable_backup_ref_ptr_feature =
enable_backup_ref_ptr_support && enable_ios_corruption_hardening
# Build support for Dangling Ptr Detection (DPD) via BackupRefPtr (BRP), # Build support for Dangling Ptr Detection (DPD) via BackupRefPtr (BRP),
# making the raw_ptr<T> implementation to RawPtrBackupRefImpl if active. # making the raw_ptr<T> implementation to RawPtrBackupRefImpl if active.
enable_dangling_raw_ptr_checks = enable_dangling_raw_ptr_checks =
enable_dangling_raw_ptr_checks_default && enable_backup_ref_ptr_support enable_dangling_raw_ptr_checks_default && enable_backup_ref_ptr_support &&
use_raw_ptr_backup_ref_impl
enable_backup_ref_ptr_instance_tracer = false
backup_ref_ptr_extra_oob_checks =
enable_backup_ref_ptr_support && use_raw_ptr_backup_ref_impl
}
declare_args() {
# Enable the feature flag required to check for dangling pointers. That is to # Enable the feature flag required to check for dangling pointers. That is to
# say `PartitionAllocDanglingPtr`. # say `PartitionAllocDanglingPtr`.
# #
# This is meant to be used primarily on bots. It is much easier to override # This is meant to be modified primarily on bots. It is much easier to
# the feature flags using a binary flag instead of updating multiple bots's # override the feature flags using a binary flag instead of updating multiple
# scripts to pass command line arguments. # bots's scripts to pass command line arguments.
enable_dangling_raw_ptr_feature_flag = false
# Enables the dangling raw_ptr checks feature for the performance experiment.
# Not every dangling pointers have been fixed or annotated yet. To avoid
# accounting for the cost of calling the PA's embedder's callbacks when a
# dangling pointer has been detected, this simulates the raw_ptr to be
# allowed to dangle.
# #
# This flag is temporary, and isn't used by PA embedders, so it doesn't need # TODO(328104161): Remove this flag.
# to go through build_overrides enable_dangling_raw_ptr_feature_flag = enable_dangling_raw_ptr_checks
enable_dangling_raw_ptr_perf_experiment = false }
# Set to `enable_backup_ref_ptr_support && has_64_bit_pointers` when enabling. declare_args() {
backup_ref_ptr_poison_oob_ptr = false backup_ref_ptr_poison_oob_ptr =
false && backup_ref_ptr_extra_oob_checks && has_64_bit_pointers
enable_backup_ref_ptr_instance_tracer = false
} }
declare_args() { declare_args() {
# Shadow metadata is still under development and only supports Linux # Shadow metadata is still under development and only supports Linux
# for now. # for now.
enable_shadow_metadata = false enable_shadow_metadata = is_linux && has_64_bit_pointers
} }
declare_args() { declare_args() {
@ -244,74 +366,91 @@ declare_args() {
use_full_mte = false use_full_mte = false
} }
# *Scan is currently only used by Chromium, and supports only 64-bit. stack_scan_supported =
use_starscan = build_with_chromium && has_64_bit_pointers current_cpu == "x64" || current_cpu == "x86" || current_cpu == "arm" ||
current_cpu == "arm64" || current_cpu == "riscv64" || current_cpu == "loong64"
pcscan_stack_supported =
use_starscan &&
(current_cpu == "x64" || current_cpu == "x86" || current_cpu == "arm" ||
current_cpu == "arm64" || current_cpu == "riscv64")
# We want to provide assertions that guard against inconsistent build # We want to provide assertions that guard against inconsistent build
# args, but there is no point in having them fire if we're not building # args, but there is no point in having them fire if we're not building
# PartitionAlloc at all. If `use_partition_alloc` is false, we jam all # PartitionAlloc at all. If `use_partition_alloc` is false, we jam all
# related args to `false`. # related args to `false`.
# #
# We also disable PA-Everywhere and PA-based features in two types of
# toolchains:
# - Toolchains that disable PA-Everywhere explicitly.
# - The rust host build tools toochain, which builds DLLs to dlopen into the
# compiler for proc macros. We would want any allocations to use the same
# paths as the compiler.
#
# Do not clear the following, as they can function outside of PartitionAlloc # Do not clear the following, as they can function outside of PartitionAlloc
# - has_64_bit_pointers # - has_64_bit_pointers
# - has_memory_tagging # - has_memory_tagging
if (!use_partition_alloc) { if (!use_partition_alloc ||
(defined(toolchain_allows_use_partition_alloc_as_malloc) &&
!toolchain_allows_use_partition_alloc_as_malloc) ||
(defined(toolchain_for_rust_host_build_tools) &&
toolchain_for_rust_host_build_tools)) {
use_partition_alloc_as_malloc = false use_partition_alloc_as_malloc = false
glue_core_pools = false
enable_backup_ref_ptr_support = false enable_backup_ref_ptr_support = false
use_raw_ptr_backup_ref_impl = false
use_asan_backup_ref_ptr = false use_asan_backup_ref_ptr = false
use_asan_unowned_ptr = false use_raw_ptr_asan_unowned_impl = false
use_hookable_raw_ptr = false use_raw_ptr_hookable_impl = false
enable_backup_ref_ptr_slow_checks = false enable_backup_ref_ptr_slow_checks = false
enable_dangling_raw_ptr_checks = false enable_dangling_raw_ptr_checks = false
enable_dangling_raw_ptr_feature_flag = false enable_dangling_raw_ptr_feature_flag = false
enable_dangling_raw_ptr_perf_experiment = false
enable_pointer_subtraction_check = false enable_pointer_subtraction_check = false
backup_ref_ptr_poison_oob_ptr = false backup_ref_ptr_poison_oob_ptr = false
backup_ref_ptr_extra_oob_checks = false
enable_backup_ref_ptr_instance_tracer = false enable_backup_ref_ptr_instance_tracer = false
use_starscan = false
use_full_mte = false use_full_mte = false
} }
# Disable |use_full_mte| if memory tagging is not available. This is for targets that run as part the build process.
if (!has_memory_tagging) {
use_full_mte = false
}
# use_raw_ptr_backup_ref_impl can only be used if
# enable_backup_ref_ptr_support is true.
assert(enable_backup_ref_ptr_support || !use_raw_ptr_backup_ref_impl,
"Can't use RawPtrBackupRefImpl if BRP isn't enabled at all")
# enable_backup_ref_ptr_slow_checks can only be used if # enable_backup_ref_ptr_slow_checks can only be used if
# enable_backup_ref_ptr_support is true. # enable_backup_ref_ptr_support is true.
assert(enable_backup_ref_ptr_support || !enable_backup_ref_ptr_slow_checks, assert(enable_backup_ref_ptr_support || !enable_backup_ref_ptr_slow_checks,
"Can't enable additional BackupRefPtr checks if it isn't enabled at all") "Can't enable additional BRP checks if it isn't enabled at all")
# enable_dangling_raw_ptr_checks can only be used if # enable_dangling_raw_ptr_checks can only be used if
# enable_backup_ref_ptr_support is true. # enable_backup_ref_ptr_support & use_raw_ptr_backup_ref_impl are true.
assert( assert((enable_backup_ref_ptr_support && use_raw_ptr_backup_ref_impl) ||
enable_backup_ref_ptr_support || !enable_dangling_raw_ptr_checks, !enable_dangling_raw_ptr_checks,
"Can't enable dangling raw_ptr checks if BackupRefPtr isn't enabled at all") "Can't enable dangling raw_ptr checks if BRP isn't enabled and used")
# It's meaningless to force on DPD (e.g. on bots) if the support isn't compiled # It's meaningless to force on DPD (e.g. on bots) if the support isn't compiled
# in. # in.
assert(enable_dangling_raw_ptr_checks || !enable_dangling_raw_ptr_feature_flag, assert(enable_dangling_raw_ptr_checks || !enable_dangling_raw_ptr_feature_flag,
"Meaningless to enable DPD without it compiled.") "Meaningless to enable DPD without it compiled.")
# To run the dangling raw_ptr detector experiment, the underlying feature must # To enable extra OOB checks for BRP, the underlying feature must be
# be enabled too.
assert(
enable_dangling_raw_ptr_checks || !enable_dangling_raw_ptr_perf_experiment,
"Missing dangling pointer checks feature for its performance experiment")
# To poison OOB pointers for BackupRefPtr, the underlying feature must be
# enabled, too. # enabled, too.
assert( assert((enable_backup_ref_ptr_support && use_raw_ptr_backup_ref_impl) ||
enable_backup_ref_ptr_support || !backup_ref_ptr_poison_oob_ptr, !backup_ref_ptr_extra_oob_checks,
"Can't enable poisoning for OOB pointers if BackupRefPtr isn't enabled " + "Can't enable extra OOB checks if BRP isn't enabled and used")
"at all")
# To poison OOB pointers for BRP, the underlying feature must be
# enabled, too.
assert(backup_ref_ptr_extra_oob_checks || !backup_ref_ptr_poison_oob_ptr,
"Can't enable poisoning for OOB pointers if OOB checks aren't enabled " +
"at all")
assert(has_64_bit_pointers || !backup_ref_ptr_poison_oob_ptr, assert(has_64_bit_pointers || !backup_ref_ptr_poison_oob_ptr,
"Can't enable poisoning for OOB pointers if pointers are only 32-bit") "Can't enable poisoning for OOB pointers if pointers are only 32-bit")
# AsanBackupRefPtr and AsanUnownedPtr are mutually exclusive variants of # AsanBackupRefPtr and AsanUnownedPtr are mutually exclusive variants of
# raw_ptr. # raw_ptr.
assert( assert(
!use_asan_unowned_ptr || !use_asan_backup_ref_ptr, !use_raw_ptr_asan_unowned_impl || !use_asan_backup_ref_ptr,
"Both AsanUnownedPtr and AsanBackupRefPtr can't be enabled at the same " + "Both AsanUnownedPtr and AsanBackupRefPtr can't be enabled at the same " +
"time") "time")
@ -321,27 +460,27 @@ assert(
"Both BackupRefPtr and AsanBackupRefPtr can't be enabled at the same time") "Both BackupRefPtr and AsanBackupRefPtr can't be enabled at the same time")
# BackupRefPtr and AsanUnownedPtr are mutually exclusive variants of raw_ptr. # BackupRefPtr and AsanUnownedPtr are mutually exclusive variants of raw_ptr.
assert(!enable_backup_ref_ptr_support || !use_asan_unowned_ptr, assert(!enable_backup_ref_ptr_support || !use_raw_ptr_asan_unowned_impl,
"Both BackupRefPtr and AsanUnownedPtr can't be enabled at the same time") "Both BackupRefPtr and AsanUnownedPtr can't be enabled at the same time")
# RawPtrHookableImpl and BackupRefPtr are mutually exclusive variants of # RawPtrHookableImpl and BackupRefPtr are mutually exclusive variants of
# raw_ptr. # raw_ptr.
assert( assert(
!use_hookable_raw_ptr || !enable_backup_ref_ptr_support, !use_raw_ptr_hookable_impl || !enable_backup_ref_ptr_support,
"Both RawPtrHookableImpl and BackupRefPtr can't be enabled at the same " + "Both RawPtrHookableImpl and BackupRefPtr can't be enabled at the same " +
"time") "time")
# RawPtrHookableImpl and AsanUnownedPtr are mutually exclusive variants of # RawPtrHookableImpl and AsanUnownedPtr are mutually exclusive variants of
# raw_ptr. # raw_ptr.
assert( assert(
!use_hookable_raw_ptr || !use_asan_unowned_ptr, !use_raw_ptr_hookable_impl || !use_raw_ptr_asan_unowned_impl,
"Both RawPtrHookableImpl and AsanUnownedPtr can't be enabled at the same " + "Both RawPtrHookableImpl and AsanUnownedPtr can't be enabled at the same " +
"time") "time")
assert(!use_asan_backup_ref_ptr || is_asan, assert(!use_asan_backup_ref_ptr || is_asan,
"AsanBackupRefPtr requires AddressSanitizer") "AsanBackupRefPtr requires AddressSanitizer")
assert(!use_asan_unowned_ptr || is_asan, assert(!use_raw_ptr_asan_unowned_impl || is_asan,
"AsanUnownedPtr requires AddressSanitizer") "AsanUnownedPtr requires AddressSanitizer")
# AsanBackupRefPtr is not supported outside Chromium. The implementation is # AsanBackupRefPtr is not supported outside Chromium. The implementation is
@ -350,22 +489,18 @@ assert(!use_asan_unowned_ptr || is_asan,
assert(build_with_chromium || !use_asan_backup_ref_ptr, assert(build_with_chromium || !use_asan_backup_ref_ptr,
"AsanBackupRefPtr is not supported outside Chromium") "AsanBackupRefPtr is not supported outside Chromium")
assert(!use_asan_backup_ref_ptr || use_hookable_raw_ptr, assert(!use_asan_backup_ref_ptr || use_raw_ptr_hookable_impl,
"AsanBackupRefPtr requires RawPtrHookableImpl") "AsanBackupRefPtr requires RawPtrHookableImpl")
# use_full_mte can only be used if has_memory_tagging is true. # pkeys support is explicitly disabled in all Cronet builds, as some test
assert( # dependencies that use partition_allocator are compiled in AOSP against a
has_memory_tagging || !use_full_mte, # version of glibc that does not include pkeys syscall numbers.
"Can't use full MTE protection if memory tagging isn't supported at all.") is_pkeys_available =
(is_linux || is_chromeos) && current_cpu == "x64" && !is_cronet_build
declare_args() { declare_args() {
# pkeys support is explicitly disabled in all Cronet builds, as some test enable_pkeys = is_pkeys_available
# dependencies that use partition_allocator are compiled in AOSP against a
# version of glibc that does not include pkeys syscall numbers.
enable_pkeys =
(is_linux || is_chromeos) && target_cpu == "x64" && !is_cronet_build
} }
assert(!enable_pkeys || ((is_linux || is_chromeos) && target_cpu == "x64"), assert(!enable_pkeys || is_pkeys_available,
"Pkeys are only supported on x64 linux and ChromeOS") "Pkeys are only supported on x64 linux and ChromeOS")
# Some implementations of raw_ptr<>, like BackupRefPtr, require zeroing when # Some implementations of raw_ptr<>, like BackupRefPtr, require zeroing when
@ -398,3 +533,9 @@ declare_args() {
raw_ptr_zero_on_move = raw_ptr_zero_on_move_default raw_ptr_zero_on_move = raw_ptr_zero_on_move_default
raw_ptr_zero_on_destruct = raw_ptr_zero_on_destruct_default raw_ptr_zero_on_destruct = raw_ptr_zero_on_destruct_default
} }
declare_args() {
# Assert that PartitionAlloc and MiraclePtr run on C++20 when set to true.
# Embedders may opt-out of using C++ 20 build.
assert_cpp20 = assert_cpp20_default
}

View file

@ -2,21 +2,52 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import("//build/buildflag_header.gni")
import("//build/config/android/config.gni")
import("//build/config/cast.gni")
import("//build/config/chromeos/ui_mode.gni")
import("//build/config/compiler/compiler.gni")
import("//build/config/dcheck_always_on.gni")
import("//build/config/logging.gni")
import("../../partition_alloc.gni") import("../../partition_alloc.gni")
import("buildflag_header.gni")
# Add partition_alloc.gni and import it for partition_alloc configs. # //build_overrides/partition_alloc.gni should define partition_alloc_{
# add,remove}_configs. But if not defined (e.g. the embedder misses the config),
# define them here.
if (!defined(partition_alloc_add_configs)) {
partition_alloc_add_configs = []
}
if (!defined(partition_alloc_remove_configs)) {
partition_alloc_remove_configs = []
}
# TODO(https://crbug.com/1467773): Split PartitionAlloc into a public and # Enables compilation of the freelist dispatcher, which we'll use to
# carry out runtime evaluation of PartitionAlloc's two freelist
# implementations: the existing encoded-next freelist and the new
# pool offset freelist. When false, the latter is not built.
use_freelist_dispatcher = has_64_bit_pointers
assert(has_64_bit_pointers || !use_freelist_dispatcher,
"freelist dispatcher can't be used without 64-bit pointers")
record_alloc_info = false
# GWP-ASan is tied to BRP's enablement.
enable_gwp_asan_support = enable_backup_ref_ptr_support
# Pointer compression requires 64-bit pointers.
enable_pointer_compression =
enable_pointer_compression_support && has_64_bit_pointers
# Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`, but avails it
# as a buildflag.
partition_alloc_dchecks_are_on =
partition_alloc_is_debug || partition_alloc_dcheck_always_on
# Building PartitionAlloc for Windows component build.
# Currently use build_with_chromium not to affect any third_party code,
# but if any third_party code wants to use, remove build_with_chromium.
use_partition_alloc_as_malloc_on_win_component_build =
build_with_chromium && is_win && is_component_build
# TODO(crbug.com/40276913): Split PartitionAlloc into a public and
# private parts. The public config would include add the "./include" dir and # private parts. The public config would include add the "./include" dir and
# the private config would add the "./src" dir. # the private config would add the "./src" dir.
# TODO(https://crbug.com/1467773): Move this config and several target into # TODO(crbug.com/40276913): Move this config and several target into
# "../..". # "../..".
config("public_includes") { config("public_includes") {
include_dirs = [ include_dirs = [
@ -50,6 +81,7 @@ config("dependants_extra_warnings") {
"-Wduplicate-enum", "-Wduplicate-enum",
"-Wextra-semi", "-Wextra-semi",
"-Wextra-semi-stmt", "-Wextra-semi-stmt",
"-Widiomatic-parentheses",
"-Wimplicit-fallthrough", "-Wimplicit-fallthrough",
"-Winconsistent-missing-destructor-override", "-Winconsistent-missing-destructor-override",
"-Winvalid-offsetof", "-Winvalid-offsetof",
@ -77,17 +109,114 @@ config("dependants_extra_warnings") {
} }
} }
_remove_configs = [] # This will generate warnings when using Clang if code generates exit-time
_add_configs = [] # destructors, which will slow down closing the program.
if (!is_debug || partition_alloc_optimized_debug) { # TODO(thakis): Make this a blocklist instead, http://crbug.com/101600
_remove_configs += [ "//build/config/compiler:default_optimization" ] config("wexit_time_destructors") {
if (is_clang) {
cflags = [ "-Wexit-time-destructors" ]
}
}
# Partition alloc is relatively hot (>1% of cycles for users of CrOS). source_set("buildflag_macro") {
# Use speed-focused optimizations for it. sources = [ "buildflag.h" ]
_add_configs += [ "//build/config/compiler:optimize_speed" ] public_configs = [ ":public_includes" ]
} else { }
_remove_configs += [ "//build/config/compiler:default_optimization" ]
_add_configs += [ "//build/config/compiler:no_optimize" ] # When developers are repeatedly growing a buffer with `realloc`, they are
# expected to request a new size that is larger than the current size by
# some growth factor. This growth factor allows to amortize the cost of
# memcpy. Unfortunately, some nVidia drivers have a bug where they repeatedly
# increase the buffer by 4144 byte only.
#
# In particular, most Skia Linux bots are using the affected nVidia driver. So
# this flag is used as a workaround for Skia standalone, not in production.
#
# External link:
# https://forums.developer.nvidia.com/t/550-54-14-very-bad-performance-due-to-bunch-of-reallocations-during-glcore-initialization/287027
#
# Internal discussion at @chrome-memory-safety:
# https://groups.google.com/a/google.com/d/msgid/chrome-memory-safety/CAAzos5HrexY2njz2YzWrffTq1xEfkx15GVpSvHUyQED6wBSXvA%40mail.gmail.com?utm_medium=email&utm_source=footer
declare_args() {
partition_alloc_realloc_growth_factor_mitigation = false
}
pa_buildflag_header("buildflags") {
header = "buildflags.h"
flags = [
"ASSERT_CPP_20=$assert_cpp20",
"BACKUP_REF_PTR_EXTRA_OOB_CHECKS=$backup_ref_ptr_extra_oob_checks",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
"DCHECKS_ARE_ON=$partition_alloc_dchecks_are_on",
"DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT=$enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support",
"ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
"ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER=$enable_backup_ref_ptr_instance_tracer",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag",
"ENABLE_GWP_ASAN_SUPPORT=$enable_gwp_asan_support",
"ENABLE_PARTITION_LOCK_REENTRANCY_CHECK=$enable_partition_lock_reentrancy_check",
"ENABLE_PKEYS=$enable_pkeys",
"ENABLE_POINTER_ARITHMETIC_TRAIT_CHECK=$enable_pointer_arithmetic_trait_check",
"ENABLE_POINTER_COMPRESSION=$enable_pointer_compression",
"ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
"ENABLE_THREAD_ISOLATION=$enable_pkeys",
"EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"FORCE_DISABLE_BACKUP_REF_PTR_FEATURE=$force_disable_backup_ref_ptr_feature",
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
"FORWARD_THROUGH_MALLOC=$forward_through_malloc",
"HAS_64_BIT_POINTERS=$has_64_bit_pointers",
"HAS_MEMORY_TAGGING=$has_memory_tagging",
"IS_ANDROID=$is_android",
"IS_CASTOS=$is_castos",
"IS_CAST_ANDROID=$is_cast_android",
"IS_CHROMEOS=$is_chromeos",
"IS_DEBUG=$partition_alloc_is_debug",
"RAW_PTR_ZERO_ON_CONSTRUCT=$raw_ptr_zero_on_construct",
"RAW_PTR_ZERO_ON_DESTRUCT=$raw_ptr_zero_on_destruct",
"RAW_PTR_ZERO_ON_MOVE=$raw_ptr_zero_on_move",
"REALLOC_GROWTH_FACTOR_MITIGATION=$partition_alloc_realloc_growth_factor_mitigation",
"RECORD_ALLOC_INFO=$record_alloc_info",
"SMALLER_PARTITION_COOKIE=$smaller_partition_cookie",
"STACK_SCAN_SUPPORTED=$stack_scan_supported",
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
"USE_FREELIST_DISPATCHER=$use_freelist_dispatcher",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
"USE_FULL_MTE=$use_full_mte",
"USE_LARGE_EMPTY_SLOT_SPAN_RING=$use_large_empty_slot_span_ring",
"USE_PARTITION_ALLOC=$use_partition_alloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
"USE_PARTITION_COOKIE=$use_partition_cookie",
"USE_RAW_PTR_ASAN_UNOWNED_IMPL=$use_raw_ptr_asan_unowned_impl",
"USE_RAW_PTR_BACKUP_REF_IMPL=$use_raw_ptr_backup_ref_impl",
"USE_RAW_PTR_HOOKABLE_IMPL=$use_raw_ptr_hookable_impl",
]
}
# TODO(crbug.com/41481467): Remove this alias.
# Temporary alias, the time to update partition_alloc dependants.
# Currently needed by pdfium and dawn.
source_set("partition_alloc_buildflags") {
public = [ "partition_alloc_buildflags.h" ]
public_deps = [ ":buildflags" ]
}
# Provides platform and architecture detections from the compiler defines.
source_set("build_config") {
sources = [
"build_config.h",
"buildflag.h",
]
public_deps = [
":buildflag_macro", # Provides 'PA_BUILDFLAG()' macro.
":buildflags", # Provides `IS_CHROMEOS` definition.
]
public_configs = [ ":public_includes" ]
} }
component("raw_ptr") { component("raw_ptr") {
@ -104,19 +233,19 @@ component("raw_ptr") {
] ]
sources = [ "pointers/instance_tracer.cc" ] sources = [ "pointers/instance_tracer.cc" ]
public_configs = [ ":public_includes" ] public_configs = [ ":public_includes" ]
configs += [ "//build/config/compiler:wexit_time_destructors" ] configs += [ ":wexit_time_destructors" ]
if (enable_backup_ref_ptr_support) { if (use_raw_ptr_backup_ref_impl) {
sources += [ sources += [
"pointers/raw_ptr_backup_ref_impl.cc", "pointers/raw_ptr_backup_ref_impl.cc",
"pointers/raw_ptr_backup_ref_impl.h", "pointers/raw_ptr_backup_ref_impl.h",
] ]
} else if (use_hookable_raw_ptr) { } else if (use_raw_ptr_hookable_impl) {
sources += [ sources += [
"pointers/raw_ptr_hookable_impl.cc", "pointers/raw_ptr_hookable_impl.cc",
"pointers/raw_ptr_hookable_impl.h", "pointers/raw_ptr_hookable_impl.h",
] ]
} else if (use_asan_unowned_ptr) { } else if (use_raw_ptr_asan_unowned_impl) {
sources += [ sources += [
"pointers/raw_ptr_asan_unowned_impl.cc", "pointers/raw_ptr_asan_unowned_impl.cc",
"pointers/raw_ptr_asan_unowned_impl.h", "pointers/raw_ptr_asan_unowned_impl.h",
@ -125,152 +254,22 @@ component("raw_ptr") {
sources += [ "pointers/raw_ptr_noop_impl.h" ] sources += [ "pointers/raw_ptr_noop_impl.h" ]
sources += [ "pointers/empty.cc" ] sources += [ "pointers/empty.cc" ]
} }
public_deps = [
":build_config",
":buildflags",
]
if (use_partition_alloc) { if (use_partition_alloc) {
public_deps = [ ":partition_alloc" ] public_deps += [ ":partition_alloc" ]
} }
deps = [ ":buildflags" ]
# See also: `partition_alloc_base/component_export.h` # See also: `partition_alloc_base/component_export.h`
defines = [ "IS_RAW_PTR_IMPL" ] defines = [ "IS_RAW_PTR_IMPL" ]
configs -= _remove_configs configs -= partition_alloc_remove_configs
configs += _add_configs configs += partition_alloc_add_configs
configs += [ ":dependants_extra_warnings" ] configs += [ ":dependants_extra_warnings" ]
} }
# Changes the freelist implementation to use pointer offsets in lieu
# of full-on pointers. Defaults to false, which implies the use of
# "encoded next" freelist entry.
#
# Only usable when pointers are 64-bit.
use_freelist_pool_offsets = has_64_bit_pointers && false
buildflag_header("partition_alloc_buildflags") {
header = "partition_alloc_buildflags.h"
_record_alloc_info = false
# GWP-ASan is tied to BRP's enablement.
_enable_gwp_asan_support = enable_backup_ref_ptr_support
# Pools are a logical concept when address space is 32-bit.
_glue_core_pools = glue_core_pools && has_64_bit_pointers
# Pointer compression requires 64-bit pointers.
_enable_pointer_compression =
enable_pointer_compression_support && has_64_bit_pointers
# Force-enable live BRP in all processes, ignoring the canonical
# experiment state of `PartitionAllocBackupRefPtr`.
#
# This is not exposed as a GN arg as it is not meant to be used by
# developers - it is simply a compile-time hinge that should be
# set in the experimental build and then reverted immediately.
_force_all_process_brp = false
# TODO(crbug.com/1151236): Need to refactor the following buildflags.
# The buildflags (except RECORD_ALLOC_INFO) are used by both chrome and
# partition alloc. For partition alloc,
# gen/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h
# defines and partition alloc includes the header file. For chrome,
# gen/base/allocator/buildflags.h defines and chrome includes.
flags = [
"HAS_64_BIT_POINTERS=$has_64_bit_pointers",
"HAS_MEMORY_TAGGING=$has_memory_tagging",
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
"USE_PARTITION_ALLOC=$use_partition_alloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
"ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
"ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER=$enable_backup_ref_ptr_instance_tracer",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag",
"ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment",
"ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
"ENABLE_POINTER_ARITHMETIC_TRAIT_CHECK=$enable_pointer_arithmetic_trait_check",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
"USE_ASAN_UNOWNED_PTR=$use_asan_unowned_ptr",
"USE_HOOKABLE_RAW_PTR=$use_hookable_raw_ptr",
"ENABLE_GWP_ASAN_SUPPORT=$_enable_gwp_asan_support",
"FORCIBLY_ENABLE_BACKUP_REF_PTR_IN_ALL_PROCESSES=$_force_all_process_brp",
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
"USE_FULL_MTE=use_full_mte",
"RECORD_ALLOC_INFO=$_record_alloc_info",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
"GLUE_CORE_POOLS=$_glue_core_pools",
"ENABLE_POINTER_COMPRESSION=$_enable_pointer_compression",
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
"USE_FREELIST_POOL_OFFSETS=$use_freelist_pool_offsets",
"USE_STARSCAN=$use_starscan",
"PCSCAN_STACK_SUPPORTED=$pcscan_stack_supported",
"ENABLE_PKEYS=$enable_pkeys",
"ENABLE_THREAD_ISOLATION=$enable_pkeys",
"FORWARD_THROUGH_MALLOC=$forward_through_malloc",
]
}
buildflag_header("raw_ptr_buildflags") {
header = "raw_ptr_buildflags.h"
flags = [
"RAW_PTR_ZERO_ON_CONSTRUCT=$raw_ptr_zero_on_construct",
"RAW_PTR_ZERO_ON_MOVE=$raw_ptr_zero_on_move",
"RAW_PTR_ZERO_ON_DESTRUCT=$raw_ptr_zero_on_destruct",
]
}
buildflag_header("chromecast_buildflags") {
header = "chromecast_buildflags.h"
flags = [
"PA_IS_CAST_ANDROID=$is_cast_android",
"PA_IS_CASTOS=$is_castos",
]
}
buildflag_header("chromeos_buildflags") {
header = "chromeos_buildflags.h"
flags = [ "PA_IS_CHROMEOS_ASH=$is_chromeos_ash" ]
}
buildflag_header("debugging_buildflags") {
header = "debugging_buildflags.h"
header_dir = rebase_path(".", "//") + "/partition_alloc_base/debug"
# Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`,
# but avails it as a buildflag.
_dcheck_is_on = is_debug || dcheck_always_on
flags = [
"PA_DCHECK_IS_ON=$_dcheck_is_on",
"PA_EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"PA_DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"PA_CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
]
}
group("buildflags") {
public_deps = [
":chromecast_buildflags",
":chromeos_buildflags",
":debugging_buildflags",
":partition_alloc_buildflags",
":raw_ptr_buildflags",
]
public_configs = [ ":public_includes" ]
}
if (is_clang_or_gcc) { if (is_clang_or_gcc) {
config("partition_alloc_implementation") { config("partition_alloc_implementation") {
# See also: `partition_alloc_base/component_export.h` # See also: `partition_alloc_base/component_export.h`
@ -351,7 +350,7 @@ if (is_clang_or_gcc) {
} }
} }
if (enable_pkeys && is_debug) { if (enable_pkeys && partition_alloc_is_debug) {
config("no_stack_protector") { config("no_stack_protector") {
cflags = [ "-fno-stack-protector" ] cflags = [ "-fno-stack-protector" ]
} }
@ -362,13 +361,42 @@ if (is_clang_or_gcc) {
":allocator_base", ":allocator_base",
":allocator_core", ":allocator_core",
":allocator_shim", ":allocator_shim",
":buildflags",
] ]
} }
if (is_win && is_component_build) {
group("win_component_build_adapter") {
# Currently guard this target by using build_with_chromium to avoid
# any issues on third_party build. But if any third_party code wants to
# use allocator_shim for its own component build, we will remove this
# guard.
if (build_with_chromium) {
if (use_allocator_shim) {
public_deps = [
":allocator_base",
":allocator_shim",
]
}
}
# If not with chromium, currently do nothing.
}
}
component("allocator_core") { component("allocator_core") {
visibility = [ ":*" ] visibility = [ ":*" ]
if (use_partition_alloc_as_malloc_on_win_component_build) {
# Since common_deps defined in //build/config/BUILD.gn depends on
# PartitionAlloc for PartitionAlloc-Everywhere, we need no_default_deps
# here, because default deps includes common_deps dependency.
# Without no_defaults_deps=true, we will see cyclic deps:
# common_deps=>PartitionAlloc=>common_deps
no_default_deps = true
}
sources = [ sources = [
"aarch64_support.h",
"address_pool_manager.cc", "address_pool_manager.cc",
"address_pool_manager.h", "address_pool_manager.h",
"address_pool_manager_bitmap.cc", "address_pool_manager_bitmap.cc",
@ -388,6 +416,7 @@ if (is_clang_or_gcc) {
"freeslot_bitmap_constants.h", "freeslot_bitmap_constants.h",
"gwp_asan_support.cc", "gwp_asan_support.cc",
"gwp_asan_support.h", "gwp_asan_support.h",
"in_slot_metadata.h",
"internal_allocator.cc", "internal_allocator.cc",
"internal_allocator.h", "internal_allocator.h",
"internal_allocator_forward.h", "internal_allocator_forward.h",
@ -418,6 +447,7 @@ if (is_clang_or_gcc) {
"partition_bucket.cc", "partition_bucket.cc",
"partition_bucket.h", "partition_bucket.h",
"partition_bucket_lookup.h", "partition_bucket_lookup.h",
"partition_cookie.cc",
"partition_cookie.h", "partition_cookie.h",
"partition_dcheck_helper.cc", "partition_dcheck_helper.cc",
"partition_dcheck_helper.h", "partition_dcheck_helper.h",
@ -430,9 +460,9 @@ if (is_clang_or_gcc) {
"partition_page.cc", "partition_page.cc",
"partition_page.h", "partition_page.h",
"partition_page_constants.h", "partition_page_constants.h",
"partition_ref_count.h",
"partition_root.cc", "partition_root.cc",
"partition_root.h", "partition_root.h",
"partition_shared_mutex.h",
"partition_stats.cc", "partition_stats.cc",
"partition_stats.h", "partition_stats.h",
"partition_superpage_extent_entry.h", "partition_superpage_extent_entry.h",
@ -444,6 +474,8 @@ if (is_clang_or_gcc) {
"reverse_bytes.h", "reverse_bytes.h",
"spinning_mutex.cc", "spinning_mutex.cc",
"spinning_mutex.h", "spinning_mutex.h",
"stack/stack.cc",
"stack/stack.h",
"tagging.cc", "tagging.cc",
"tagging.h", "tagging.h",
"thread_cache.cc", "thread_cache.cc",
@ -456,31 +488,6 @@ if (is_clang_or_gcc) {
"yield_processor.h", "yield_processor.h",
] ]
if (use_starscan) {
sources += [
"starscan/logging.h",
"starscan/pcscan.cc",
"starscan/pcscan.h",
"starscan/pcscan_internal.cc",
"starscan/pcscan_internal.h",
"starscan/pcscan_scheduling.cc",
"starscan/pcscan_scheduling.h",
"starscan/raceful_worklist.h",
"starscan/scan_loop.h",
"starscan/snapshot.cc",
"starscan/snapshot.h",
"starscan/stack/stack.cc",
"starscan/stack/stack.h",
"starscan/starscan_fwd.h",
"starscan/state_bitmap.h",
"starscan/stats_collector.cc",
"starscan/stats_collector.h",
"starscan/stats_reporter.h",
"starscan/write_protector.cc",
"starscan/write_protector.h",
]
}
defines = [] defines = []
if (is_win) { if (is_win) {
sources += [ sources += [
@ -499,51 +506,55 @@ if (is_clang_or_gcc) {
# The Android NDK supports PR_MTE_* macros as of NDK r23. # The Android NDK supports PR_MTE_* macros as of NDK r23.
defines += [ "HAS_PR_MTE_MACROS" ] defines += [ "HAS_PR_MTE_MACROS" ]
} }
if (use_starscan) { if (current_cpu == "x64") {
if (current_cpu == "x64") { assert(stack_scan_supported)
assert(pcscan_stack_supported) sources += [ "stack/asm/x64/push_registers_asm.cc" ]
sources += [ "starscan/stack/asm/x64/push_registers_asm.cc" ] } else if (current_cpu == "x86") {
} else if (current_cpu == "x86") { assert(stack_scan_supported)
assert(pcscan_stack_supported) sources += [ "stack/asm/x86/push_registers_asm.cc" ]
sources += [ "starscan/stack/asm/x86/push_registers_asm.cc" ] } else if (current_cpu == "arm") {
} else if (current_cpu == "arm") { assert(stack_scan_supported)
assert(pcscan_stack_supported) sources += [ "stack/asm/arm/push_registers_asm.cc" ]
sources += [ "starscan/stack/asm/arm/push_registers_asm.cc" ] } else if (current_cpu == "arm64") {
} else if (current_cpu == "arm64") { assert(stack_scan_supported)
assert(pcscan_stack_supported) sources += [ "stack/asm/arm64/push_registers_asm.cc" ]
sources += [ "starscan/stack/asm/arm64/push_registers_asm.cc" ] } else if (current_cpu == "riscv64") {
} else if (current_cpu == "riscv64") { assert(stack_scan_supported)
assert(pcscan_stack_supported) sources += [ "stack/asm/riscv64/push_registers_asm.cc" ]
sources += [ "starscan/stack/asm/riscv64/push_registers_asm.cc" ] } else if (current_cpu == "loong64") {
} else { assert(stack_scan_supported)
# To support a trampoline for another arch, please refer to v8/src/heap/base. sources += [ "stack/asm/loong64/push_registers_asm.cc" ]
assert(!pcscan_stack_supported)
}
}
if (use_freelist_pool_offsets) {
sources += [ "pool_offset_freelist.h" ]
} else { } else {
sources += [ "encoded_next_freelist.h" ] # To support a trampoline for another arch, please refer to v8/src/heap/base.
assert(!stack_scan_supported)
} }
# TODO(crbug.com/40274683): once we evaluate pool offset freelists,
# we should erase the dispatcher and compile (& use) exactly one
# freelist implementation.
if (use_freelist_dispatcher) {
sources += [ "pool_offset_freelist.h" ]
}
sources += [ "encoded_next_freelist.h" ]
public_deps = [ public_deps = [
":chromecast_buildflags", ":build_config",
":chromeos_buildflags", ":buildflags",
":debugging_buildflags",
":partition_alloc_buildflags",
] ]
configs += [ configs += [
":partition_alloc_implementation", ":partition_alloc_implementation",
":memory_tagging", ":memory_tagging",
"//build/config/compiler:wexit_time_destructors", ":wexit_time_destructors",
] ]
deps = [ ":allocator_base" ] deps = [ ":allocator_base" ]
public_configs = [] if (use_partition_alloc_as_malloc_on_win_component_build) {
if (is_android) { # We need to add explicit libc++ dependency here because of
# tagging.cc requires __arm_mte_set_* functions. # no_default_deps=true.
deps += [ "//third_party/cpu_features:ndk_compat" ] deps += [ "//buildtools/third_party/libc++:libc++" ]
} }
public_configs = []
if (is_fuchsia) { if (is_fuchsia) {
deps += [ deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.kernel:fuchsia.kernel_cpp", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.kernel:fuchsia.kernel_cpp",
@ -573,19 +584,27 @@ if (is_clang_or_gcc) {
] ]
} }
configs -= _remove_configs configs -= partition_alloc_remove_configs
configs += _add_configs configs += partition_alloc_add_configs
configs += [ ":dependants_extra_warnings" ] configs += [ ":dependants_extra_warnings" ]
# We want to be able to test pkey mode without access to the default pkey. # We want to be able to test pkey mode without access to the default pkey.
# This is incompatible with stack protectors since the TLS won't be pkey-tagged. # This is incompatible with stack protectors since the TLS won't be pkey-tagged.
if (enable_pkeys && is_debug) { if (enable_pkeys && partition_alloc_is_debug) {
configs += [ ":no_stack_protector" ] configs += [ ":no_stack_protector" ]
} }
} }
component("allocator_base") { component("allocator_base") {
visibility = [ ":*" ] visibility = [ ":*" ]
if (use_partition_alloc_as_malloc_on_win_component_build) {
# Since common_deps defined in //build/config/BUILD.gn depends on
# PartitionAlloc for PartitionAlloc-Everywhere, we need no_default_deps
# here, because default deps includes common_deps dependency.
# Without no_defaults_deps=true, we will see cyclic deps:
# common_deps=>PartitionAlloc=>common_deps
no_default_deps = true
}
sources = [ sources = [
"partition_alloc_base/atomic_ref_count.h", "partition_alloc_base/atomic_ref_count.h",
@ -604,6 +623,7 @@ if (is_clang_or_gcc) {
"partition_alloc_base/debug/stack_trace.cc", "partition_alloc_base/debug/stack_trace.cc",
"partition_alloc_base/debug/stack_trace.h", "partition_alloc_base/debug/stack_trace.h",
"partition_alloc_base/export_template.h", "partition_alloc_base/export_template.h",
"partition_alloc_base/files/platform_file.h",
"partition_alloc_base/immediate_crash.h", "partition_alloc_base/immediate_crash.h",
"partition_alloc_base/log_message.cc", "partition_alloc_base/log_message.cc",
"partition_alloc_base/log_message.h", "partition_alloc_base/log_message.h",
@ -649,11 +669,13 @@ if (is_clang_or_gcc) {
"partition_alloc_base/time/time.h", "partition_alloc_base/time/time.h",
"partition_alloc_base/time/time_override.cc", "partition_alloc_base/time/time_override.cc",
"partition_alloc_base/time/time_override.h", "partition_alloc_base/time/time_override.h",
"partition_alloc_base/types/same_as_any.h",
"partition_alloc_base/types/strong_alias.h", "partition_alloc_base/types/strong_alias.h",
"partition_alloc_base/win/win_handle_types.h", "partition_alloc_base/win/win_handle_types.h",
"partition_alloc_base/win/win_handle_types_list.inc", "partition_alloc_base/win/win_handle_types_list.inc",
"partition_alloc_base/win/windows_types.h", "partition_alloc_base/win/windows_types.h",
] ]
libs = []
if (is_win) { if (is_win) {
sources += [ sources += [
@ -665,7 +687,12 @@ if (is_clang_or_gcc) {
"partition_alloc_base/threading/platform_thread_win.cc", "partition_alloc_base/threading/platform_thread_win.cc",
"partition_alloc_base/time/time_win.cc", "partition_alloc_base/time/time_win.cc",
] ]
} else if (is_posix) { libs += [
"winmm.lib", # For timeGetTime.
]
}
if (is_posix) {
sources += [ sources += [
"partition_alloc_base/debug/stack_trace_posix.cc", "partition_alloc_base/debug/stack_trace_posix.cc",
"partition_alloc_base/files/file_util.h", "partition_alloc_base/files/file_util.h",
@ -684,9 +711,6 @@ if (is_clang_or_gcc) {
sources += [ "partition_alloc_base/debug/stack_trace_linux.cc" ] sources += [ "partition_alloc_base/debug/stack_trace_linux.cc" ]
} }
if (is_android || is_chromeos_ash) {
sources += [ "partition_alloc_base/time/time_android.cc" ]
}
if (is_apple) { if (is_apple) {
# Request <dlfcn.h> to provide the `dladdr()` function. This is used to # Request <dlfcn.h> to provide the `dladdr()` function. This is used to
# translate address to symbolic information. # translate address to symbolic information.
@ -699,7 +723,9 @@ if (is_clang_or_gcc) {
} else { } else {
sources += [ "partition_alloc_base/time/time_now_posix.cc" ] sources += [ "partition_alloc_base/time/time_now_posix.cc" ]
} }
} else if (is_fuchsia) { }
if (is_fuchsia) {
sources += [ sources += [
"partition_alloc_base/fuchsia/fuchsia_logging.cc", "partition_alloc_base/fuchsia/fuchsia_logging.cc",
"partition_alloc_base/fuchsia/fuchsia_logging.h", "partition_alloc_base/fuchsia/fuchsia_logging.h",
@ -713,6 +739,7 @@ if (is_clang_or_gcc) {
"partition_alloc_base/time/time_fuchsia.cc", "partition_alloc_base/time/time_fuchsia.cc",
] ]
} }
if (is_android) { if (is_android) {
# Only android build requires native_library, and native_library depends # Only android build requires native_library, and native_library depends
# on file_path. So file_path is added if is_android = true. # on file_path. So file_path is added if is_android = true.
@ -725,6 +752,7 @@ if (is_clang_or_gcc) {
"partition_alloc_base/native_library_posix.cc", "partition_alloc_base/native_library_posix.cc",
] ]
} }
if (is_apple) { if (is_apple) {
# Apple-specific utilities # Apple-specific utilities
sources += [ sources += [
@ -752,18 +780,21 @@ if (is_clang_or_gcc) {
} }
public_deps = [ public_deps = [
":chromecast_buildflags", ":build_config",
":chromeos_buildflags", ":buildflags",
":debugging_buildflags",
":partition_alloc_buildflags",
] ]
public_configs = [ ":public_includes" ] public_configs = [ ":public_includes" ]
configs += [ configs += [
":partition_alloc_base_implementation", ":partition_alloc_base_implementation",
"//build/config/compiler:wexit_time_destructors", ":wexit_time_destructors",
] ]
deps = [] deps = []
if (use_partition_alloc_as_malloc_on_win_component_build) {
# We need to add explicit libc++ dependency here because of
# no_default_deps=true.
deps += [ "//buildtools/third_party/libc++:libc++" ]
}
if (is_fuchsia) { if (is_fuchsia) {
public_deps += [ "//third_party/fuchsia-sdk/sdk/pkg/fit" ] public_deps += [ "//third_party/fuchsia-sdk/sdk/pkg/fit" ]
} }
@ -776,26 +807,24 @@ if (is_clang_or_gcc) {
] ]
} }
configs -= _remove_configs configs -= partition_alloc_remove_configs
configs += _add_configs configs += partition_alloc_add_configs
configs += [ ":dependants_extra_warnings" ] configs += [ ":dependants_extra_warnings" ]
} }
component("allocator_shim") { component("allocator_shim") {
visibility = [ ":*" ]
sources = [] sources = []
deps = [] deps = []
all_dependent_configs = [] all_dependent_configs = []
public_configs = [ ":public_includes" ] public_configs = [ ":public_includes" ]
configs += [ configs += [
":allocator_shim_implementation", ":allocator_shim_implementation",
"//build/config/compiler:wexit_time_destructors", ":wexit_time_destructors",
] ]
frameworks = [] frameworks = []
configs -= _remove_configs configs -= partition_alloc_remove_configs
configs += _add_configs configs += partition_alloc_add_configs
configs += [ ":dependants_extra_warnings" ] configs += [ ":dependants_extra_warnings" ]
shim_headers = [] shim_headers = []
@ -814,14 +843,14 @@ if (is_clang_or_gcc) {
"shim/allocator_shim_dispatch_to_noop_on_free.h", "shim/allocator_shim_dispatch_to_noop_on_free.h",
] ]
if (use_partition_alloc) { if (use_partition_alloc) {
shim_sources += [ shim_sources +=
"shim/allocator_shim_default_dispatch_to_partition_alloc.cc", [ "shim/allocator_shim_default_dispatch_to_partition_alloc.cc" ]
"shim/nonscannable_allocator.cc", shim_headers +=
] [ "shim/allocator_shim_default_dispatch_to_partition_alloc.h" ]
shim_headers += [ }
"shim/allocator_shim_default_dispatch_to_partition_alloc.h", if (enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support) {
"shim/nonscannable_allocator.h", shim_sources += [ "shim/allocator_shim_default_dispatch_to_partition_alloc_with_advanced_checks.cc" ]
] shim_headers += [ "shim/allocator_shim_default_dispatch_to_partition_alloc_with_advanced_checks.h" ]
} }
if (is_android) { if (is_android) {
shim_headers += [ shim_headers += [
@ -855,7 +884,7 @@ if (is_clang_or_gcc) {
# Do not compile with ARC because this target has to interface with # Do not compile with ARC because this target has to interface with
# low-level Objective-C and having ARC would interfere. # low-level Objective-C and having ARC would interfere.
configs -= [ "//build/config/compiler:enable_arc" ] configs -= [ partition_alloc_enable_arc_config ]
} }
} }
if (is_chromeos || is_linux) { if (is_chromeos || is_linux) {
@ -924,6 +953,199 @@ if (is_clang_or_gcc) {
] ]
} }
} # if (is_clang_or_gcc) } # if (is_clang_or_gcc)
# TODO(crbug.com/1151236): After making partition_alloc a standalone library,
# move test code here. i.e. test("partition_alloc_tests") { ... } and # TODO(crbug.com/40158212): Consider supporting building tests outside of
# test("partition_alloc_perftests"). # chromium and having a dedicated 'partition_alloc_unittests' target.
if (build_with_chromium) {
source_set("unittests") {
testonly = true
sources = [ "partition_alloc_base/test/gtest_util.h" ]
if (is_linux || is_chromeos || is_android) {
sources += [
"partition_alloc_base/debug/proc_maps_linux.cc",
"partition_alloc_base/debug/proc_maps_linux.h",
]
}
if (is_android) {
sources += [
"partition_alloc_base/files/file_path_pa_unittest.cc",
"partition_alloc_base/native_library_pa_unittest.cc",
]
}
if (use_partition_alloc) {
sources += [
"address_pool_manager_unittest.cc",
"address_space_randomization_unittest.cc",
"compressed_pointer_unittest.cc",
"freeslot_bitmap_unittest.cc",
"hardening_unittest.cc",
"lightweight_quarantine_unittest.cc",
"memory_reclaimer_unittest.cc",
"page_allocator_unittest.cc",
"partition_alloc_base/bits_pa_unittest.cc",
"partition_alloc_base/component_export_pa_unittest.cc",
"partition_alloc_base/cpu_pa_unittest.cc",
"partition_alloc_base/logging_pa_unittest.cc",
"partition_alloc_base/no_destructor_pa_unittest.cc",
"partition_alloc_base/rand_util_pa_unittest.cc",
"partition_alloc_base/scoped_clear_last_error_pa_unittest.cc",
"partition_alloc_base/strings/cstring_builder_pa_unittest.cc",
"partition_alloc_base/strings/safe_sprintf_pa_unittest.cc",
"partition_alloc_base/strings/string_util_pa_unittest.cc",
"partition_alloc_base/strings/stringprintf_pa_unittest.cc",
"partition_alloc_base/thread_annotations_pa_unittest.cc",
"partition_alloc_unittest.cc",
"partition_lock_unittest.cc",
"reverse_bytes_unittest.cc",
"slot_start_unittest.cc",
"thread_cache_unittest.cc",
"use_death_tests.h",
]
}
if (is_fuchsia) {
sources +=
[ "partition_alloc_base/fuchsia/fuchsia_logging_pa_unittest.cc" ]
}
if (use_allocator_shim) {
sources += [
"shim/allocator_shim_default_dispatch_to_partition_alloc_unittest.cc",
]
if (is_win) {
sources += [ "shim/winheap_stubs_win_unittest.cc" ]
}
if (is_ios) {
sources += [
"shim/allocator_interception_apple_unittest.mm",
"shim/malloc_zone_functions_apple_unittest.cc",
]
}
}
if ((is_android || is_linux) && target_cpu == "arm64") {
cflags = [
"-Xclang",
"-target-feature",
"-Xclang",
"+mte",
]
}
if (enable_pkeys && partition_alloc_is_debug && !is_component_build) {
# This test requires RELRO, which is not enabled in component builds.
# Also, require a debug build, since we only disable stack protectors in
# debug builds in PartitionAlloc (see below why it's needed).
sources += [ "thread_isolation/pkey_unittest.cc" ]
# We want to test the pkey code without access to memory that is not
# pkey-tagged. This will allow us to catch unintended memory accesses
# that could break our security assumptions. The stack protector reads a
# value from the TLS which won't be pkey-tagged, hence disabling it for
# the test.
configs += [ ":no_stack_protector" ]
}
frameworks = []
if (is_mac) {
frameworks += [
"Foundation.framework",
"OpenCL.framework",
]
}
deps = [
":partition_alloc",
":test_support",
"//testing/gmock",
"//testing/gtest",
]
configs -= partition_alloc_remove_configs
configs += partition_alloc_add_configs
}
}
source_set("test_support") {
testonly = true
sources = [
"extended_api.cc",
"extended_api.h",
"partition_alloc_base/threading/platform_thread_for_testing.h",
"partition_alloc_for_testing.h",
"pointers/raw_ptr_counting_impl_for_test.h",
]
if (is_posix) {
sources += [
"partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
]
}
if (is_fuchsia) {
sources += [
"partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc",
"partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
]
}
if (is_win) {
sources +=
[ "partition_alloc_base/threading/platform_thread_win_for_testing.cc" ]
}
if (is_apple) {
sources += [
"partition_alloc_base/threading/platform_thread_apple_for_testing.mm",
]
}
if (is_linux || is_chromeos) {
sources += [
"partition_alloc_base/threading/platform_thread_linux_for_testing.cc",
]
}
if (is_android) {
sources += [
"partition_alloc_base/threading/platform_thread_android_for_testing.cc",
]
}
public_deps = [
":arm_bti_testfunctions",
":buildflags",
":partition_alloc",
":raw_ptr",
]
public_configs = []
if (is_fuchsia) {
public_deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.logger:fuchsia.logger_hlcpp",
"//third_party/fuchsia-sdk/sdk/pkg/fit",
"//third_party/fuchsia-sdk/sdk/pkg/sync",
"//third_party/fuchsia-sdk/sdk/pkg/sys_cpp",
"//third_party/fuchsia-sdk/sdk/pkg/zx",
]
# Needed for users of spinning_mutex.h, which for performance reasons,
# contains inlined calls to `libsync` inside the header file.
# It appends an entry to the "libs" section of the dependent target.
public_configs += [ ":fuchsia_sync_lib" ]
}
configs -= partition_alloc_remove_configs
configs += partition_alloc_add_configs
}
source_set("arm_bti_testfunctions") {
testonly = true
sources = []
if (target_cpu == "arm64" && (is_linux || is_android)) {
sources = [
"arm_bti_test_functions.S",
"arm_bti_test_functions.h",
]
}
}

View file

@ -0,0 +1,51 @@
// Copyright 2024 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef PARTITION_ALLOC_AARCH64_SUPPORT_H_
#define PARTITION_ALLOC_AARCH64_SUPPORT_H_
#include <stdint.h>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#if defined(__MUSL__)
// Musl does not support ifunc.
#elif PA_BUILDFLAG(IS_ANDROID) || PA_BUILDFLAG(IS_LINUX)
#define HAS_HW_CAPS
#endif
#if PA_BUILDFLAG(PA_ARCH_CPU_ARM64) && defined(HAS_HW_CAPS)
#include <asm/hwcap.h>
#include <sys/ifunc.h>
#else
struct __ifunc_arg_t;
#endif
namespace partition_alloc::internal {
constexpr bool IsBtiEnabled(uint64_t ifunc_hwcap,
struct __ifunc_arg_t* ifunc_hw) {
#if PA_BUILDFLAG(PA_ARCH_CPU_ARM64) && defined(HAS_HW_CAPS)
return (ifunc_hwcap & _IFUNC_ARG_HWCAP) && (ifunc_hw->_hwcap2 & HWCAP2_BTI);
#else
return false;
#endif
}
constexpr bool IsMteEnabled(uint64_t ifunc_hwcap,
struct __ifunc_arg_t* ifunc_hw) {
#if PA_BUILDFLAG(PA_ARCH_CPU_ARM64) && defined(HAS_HW_CAPS) && \
PA_BUILDFLAG(HAS_MEMORY_TAGGING)
return (ifunc_hwcap & _IFUNC_ARG_HWCAP) && (ifunc_hw->_hwcap2 & HWCAP2_MTE);
#else
return false;
#endif
}
} // namespace partition_alloc::internal
#undef HAS_HW_CAPS
#endif // PARTITION_ALLOC_AARCH64_SUPPORT_H_

View file

@ -9,25 +9,24 @@
#include <cstdint> #include <cstdint>
#include <limits> #include <limits>
#include "build/build_config.h"
#include "partition_alloc/address_space_stats.h" #include "partition_alloc/address_space_stats.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/page_allocator.h" #include "partition_alloc/page_allocator.h"
#include "partition_alloc/page_allocator_constants.h" #include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/notreached.h" #include "partition_alloc/partition_alloc_base/notreached.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h" #include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/reservation_offset_table.h" #include "partition_alloc/reservation_offset_table.h"
#include "partition_alloc/thread_isolation/alignment.h" #include "partition_alloc/thread_isolation/alignment.h"
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(ENABLE_THREAD_ISOLATION) #if PA_BUILDFLAG(IS_APPLE) || PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
#include <sys/mman.h> #include <sys/mman.h>
#endif #endif
namespace partition_alloc::internal { namespace partition_alloc::internal {
AddressPoolManager AddressPoolManager::singleton_; PA_CONSTINIT AddressPoolManager AddressPoolManager::singleton_;
// static // static
AddressPoolManager& AddressPoolManager::GetInstance() { AddressPoolManager& AddressPoolManager::GetInstance() {
@ -40,7 +39,7 @@ constexpr PageTag kPageTag = PageTag::kPartitionAlloc;
} // namespace } // namespace
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_BUILDFLAG(HAS_64_BIT_POINTERS)
namespace { namespace {
@ -125,7 +124,7 @@ void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
PA_CHECK(!(ptr & kSuperPageOffsetMask)); PA_CHECK(!(ptr & kSuperPageOffsetMask));
PA_CHECK(!(length & kSuperPageOffsetMask)); PA_CHECK(!(length & kSuperPageOffsetMask));
address_begin_ = ptr; address_begin_ = ptr;
#if BUILDFLAG(PA_DCHECK_IS_ON) #if PA_BUILDFLAG(DCHECKS_ARE_ON)
address_end_ = ptr + length; address_end_ = ptr + length;
PA_DCHECK(address_begin_ < address_end_); PA_DCHECK(address_begin_ < address_end_);
#endif #endif
@ -204,7 +203,7 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
bit_hint_ = end_bit; bit_hint_ = end_bit;
} }
uintptr_t address = address_begin_ + beg_bit * kSuperPageSize; uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
#if BUILDFLAG(PA_DCHECK_IS_ON) #if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(address + requested_size <= address_end_); PA_DCHECK(address + requested_size <= address_end_);
#endif #endif
return address; return address;
@ -246,7 +245,7 @@ void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
PA_DCHECK(!(free_size & kSuperPageOffsetMask)); PA_DCHECK(!(free_size & kSuperPageOffsetMask));
PA_DCHECK(address_begin_ <= address); PA_DCHECK(address_begin_ <= address);
#if BUILDFLAG(PA_DCHECK_IS_ON) #if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(address + free_size <= address_end_); PA_DCHECK(address + free_size <= address_end_);
#endif #endif
@ -301,19 +300,19 @@ void AddressPoolManager::GetPoolStats(const pool_handle handle,
bool AddressPoolManager::GetStats(AddressSpaceStats* stats) { bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
// Get 64-bit pool stats. // Get 64-bit pool stats.
GetPoolStats(kRegularPoolHandle, &stats->regular_pool_stats); GetPoolStats(kRegularPoolHandle, &stats->regular_pool_stats);
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
GetPoolStats(kBRPPoolHandle, &stats->brp_pool_stats); GetPoolStats(kBRPPoolHandle, &stats->brp_pool_stats);
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (IsConfigurablePoolAvailable()) { if (IsConfigurablePoolAvailable()) {
GetPoolStats(kConfigurablePoolHandle, &stats->configurable_pool_stats); GetPoolStats(kConfigurablePoolHandle, &stats->configurable_pool_stats);
} }
#if BUILDFLAG(ENABLE_THREAD_ISOLATION) #if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
GetPoolStats(kThreadIsolatedPoolHandle, &stats->thread_isolated_pool_stats); GetPoolStats(kThreadIsolatedPoolHandle, &stats->thread_isolated_pool_stats);
#endif #endif
return true; return true;
} }
#else // BUILDFLAG(HAS_64_BIT_POINTERS) #else // PA_BUILDFLAG(HAS_64_BIT_POINTERS)
static_assert( static_assert(
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap == kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
@ -380,7 +379,7 @@ void AddressPoolManager::MarkUsed(pool_handle handle,
size_t length) { size_t length) {
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock()); ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
// When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used. // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (handle == kBRPPoolHandle) { if (handle == kBRPPoolHandle) {
PA_DCHECK( PA_DCHECK(
(length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0); (length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
@ -412,7 +411,7 @@ void AddressPoolManager::MarkUsed(pool_handle handle,
(length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) - (length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap); AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
} else } else
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
{ {
PA_DCHECK(handle == kRegularPoolHandle); PA_DCHECK(handle == kRegularPoolHandle);
PA_DCHECK( PA_DCHECK(
@ -434,7 +433,7 @@ void AddressPoolManager::MarkUnused(pool_handle handle,
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock()); ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
// When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used. // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (handle == kBRPPoolHandle) { if (handle == kBRPPoolHandle) {
PA_DCHECK( PA_DCHECK(
(length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0); (length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
@ -449,7 +448,7 @@ void AddressPoolManager::MarkUnused(pool_handle handle,
(length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) - (length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap); AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
} else } else
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
{ {
PA_DCHECK(handle == kRegularPoolHandle); PA_DCHECK(handle == kRegularPoolHandle);
PA_DCHECK( PA_DCHECK(
@ -520,7 +519,7 @@ bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
// Get 32-bit pool usage. // Get 32-bit pool usage.
stats->regular_pool_stats.usage = stats->regular_pool_stats.usage =
CountUsedSuperPages(regular_pool_bits, kRegularPoolBitsPerSuperPage); CountUsedSuperPages(regular_pool_bits, kRegularPoolBitsPerSuperPage);
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
static_assert( static_assert(
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap == kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
0, 0,
@ -542,11 +541,11 @@ bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
stats->blocklist_hit_count = stats->blocklist_hit_count =
AddressPoolManagerBitmap::blocklist_hit_count_.load( AddressPoolManagerBitmap::blocklist_hit_count_.load(
std::memory_order_relaxed); std::memory_order_relaxed);
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
return true; return true;
} }
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_BUILDFLAG(HAS_64_BIT_POINTERS)
void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) { void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
AddressSpaceStats stats{}; AddressSpaceStats stats{};
@ -555,9 +554,9 @@ void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
} }
} }
#if BUILDFLAG(ENABLE_THREAD_ISOLATION) #if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
// This function just exists to static_assert the layout of the private fields // This function just exists to static_assert the layout of the private fields
// in Pool. // in Pool. It is never called.
void AddressPoolManager::AssertThreadIsolatedLayout() { void AddressPoolManager::AssertThreadIsolatedLayout() {
constexpr size_t last_pool_offset = constexpr size_t last_pool_offset =
offsetof(AddressPoolManager, pools_) + sizeof(Pool) * (kNumPools - 1); offsetof(AddressPoolManager, pools_) + sizeof(Pool) * (kNumPools - 1);
@ -566,6 +565,6 @@ void AddressPoolManager::AssertThreadIsolatedLayout() {
static_assert(alloc_bitset_offset % PA_THREAD_ISOLATED_ALIGN_SZ == 0); static_assert(alloc_bitset_offset % PA_THREAD_ISOLATED_ALIGN_SZ == 0);
static_assert(sizeof(AddressPoolManager) % PA_THREAD_ISOLATED_ALIGN_SZ == 0); static_assert(sizeof(AddressPoolManager) % PA_THREAD_ISOLATED_ALIGN_SZ == 0);
} }
#endif // BUILDFLAG(ENABLE_THREAD_ISOLATION) #endif // PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View file

@ -2,27 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_H_ #ifndef PARTITION_ALLOC_ADDRESS_POOL_MANAGER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_H_ #define PARTITION_ALLOC_ADDRESS_POOL_MANAGER_H_
#include <bitset> #include <bitset>
#include <limits> #include <limits>
#include "build/build_config.h"
#include "partition_alloc/address_pool_manager_types.h" #include "partition_alloc/address_pool_manager_types.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_address_space.h" #include "partition_alloc/partition_address_space.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h" #include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/thread_annotations.h" #include "partition_alloc/partition_alloc_base/thread_annotations.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h" #include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_lock.h" #include "partition_alloc/partition_lock.h"
#include "partition_alloc/thread_isolation/alignment.h" #include "partition_alloc/thread_isolation/alignment.h"
#include "partition_alloc/thread_isolation/thread_isolation.h" #include "partition_alloc/thread_isolation/thread_isolation.h"
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_BUILDFLAG(HAS_64_BIT_POINTERS)
#include "partition_alloc/address_pool_manager_bitmap.h" #include "partition_alloc/address_pool_manager_bitmap.h"
#endif #endif
@ -59,7 +58,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
AddressPoolManager(const AddressPoolManager&) = delete; AddressPoolManager(const AddressPoolManager&) = delete;
AddressPoolManager& operator=(const AddressPoolManager&) = delete; AddressPoolManager& operator=(const AddressPoolManager&) = delete;
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_BUILDFLAG(HAS_64_BIT_POINTERS)
void Add(pool_handle handle, uintptr_t address, size_t length); void Add(pool_handle handle, uintptr_t address, size_t length);
void Remove(pool_handle handle); void Remove(pool_handle handle);
@ -69,7 +68,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
// Return the base address of a pool. // Return the base address of a pool.
uintptr_t GetPoolBaseAddress(pool_handle handle); uintptr_t GetPoolBaseAddress(pool_handle handle);
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_BUILDFLAG(HAS_64_BIT_POINTERS)
// Reserves address space from the pool. // Reserves address space from the pool.
uintptr_t Reserve(pool_handle handle, uintptr_t Reserve(pool_handle handle,
@ -82,7 +81,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
size_t length); size_t length);
void ResetForTesting(); void ResetForTesting();
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_BUILDFLAG(HAS_64_BIT_POINTERS)
void MarkUsed(pool_handle handle, uintptr_t address, size_t size); void MarkUsed(pool_handle handle, uintptr_t address, size_t size);
void MarkUnused(pool_handle handle, uintptr_t address, size_t size); void MarkUnused(pool_handle handle, uintptr_t address, size_t size);
@ -93,13 +92,13 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
static bool IsManagedByBRPPool(uintptr_t address) { static bool IsManagedByBRPPool(uintptr_t address) {
return AddressPoolManagerBitmap::IsManagedByBRPPool(address); return AddressPoolManagerBitmap::IsManagedByBRPPool(address);
} }
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS) #endif // !PA_BUILDFLAG(HAS_64_BIT_POINTERS)
void DumpStats(AddressSpaceStatsDumper* dumper); void DumpStats(AddressSpaceStatsDumper* dumper);
private: private:
friend class AddressPoolManagerForTesting; friend class AddressPoolManagerForTesting;
#if BUILDFLAG(ENABLE_THREAD_ISOLATION) #if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
// If we use a thread isolated pool, we need to write-protect its metadata. // If we use a thread isolated pool, we need to write-protect its metadata.
// Allow the function to get access to the pool pointer. // Allow the function to get access to the pool pointer.
friend void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption); friend void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption);
@ -113,11 +112,13 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
// if PartitionAlloc is wholly unused in this process.) // if PartitionAlloc is wholly unused in this process.)
bool GetStats(AddressSpaceStats* stats); bool GetStats(AddressSpaceStats* stats);
#if BUILDFLAG(ENABLE_THREAD_ISOLATION) #if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
// This function just exists to static_assert the layout of the private fields
// in Pool. It is never called.
static void AssertThreadIsolatedLayout(); static void AssertThreadIsolatedLayout();
#endif // BUILDFLAG(ENABLE_THREAD_ISOLATION) #endif // PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_BUILDFLAG(HAS_64_BIT_POINTERS)
class Pool { class Pool {
public: public:
@ -162,14 +163,14 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
size_t total_bits_ = 0; size_t total_bits_ = 0;
uintptr_t address_begin_ = 0; uintptr_t address_begin_ = 0;
#if BUILDFLAG(PA_DCHECK_IS_ON) #if PA_BUILDFLAG(DCHECKS_ARE_ON)
uintptr_t address_end_ = 0; uintptr_t address_end_ = 0;
#endif #endif
#if BUILDFLAG(ENABLE_THREAD_ISOLATION) #if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
friend class AddressPoolManager; friend class AddressPoolManager;
friend void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption); friend void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption);
#endif // BUILDFLAG(ENABLE_THREAD_ISOLATION) #endif // PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
}; };
PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) { PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) {
@ -199,11 +200,11 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
#endif #endif
Pool pools_[kNumPools]; Pool pools_[kNumPools];
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_BUILDFLAG(HAS_64_BIT_POINTERS)
static PA_CONSTINIT AddressPoolManager singleton_; PA_CONSTINIT static AddressPoolManager singleton_;
}; };
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_H_ #endif // PARTITION_ALLOC_ADDRESS_POOL_MANAGER_H_

View file

@ -4,10 +4,10 @@
#include "partition_alloc/address_pool_manager_bitmap.h" #include "partition_alloc/address_pool_manager_bitmap.h"
#include "partition_alloc/partition_alloc_buildflags.h" #include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_BUILDFLAG(HAS_64_BIT_POINTERS)
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -25,13 +25,13 @@ std::bitset<AddressPoolManagerBitmap::kRegularPoolBits>
AddressPoolManagerBitmap::regular_pool_bits_; // GUARDED_BY(GetLock()) AddressPoolManagerBitmap::regular_pool_bits_; // GUARDED_BY(GetLock())
std::bitset<AddressPoolManagerBitmap::kBRPPoolBits> std::bitset<AddressPoolManagerBitmap::kBRPPoolBits>
AddressPoolManagerBitmap::brp_pool_bits_; // GUARDED_BY(GetLock()) AddressPoolManagerBitmap::brp_pool_bits_; // GUARDED_BY(GetLock())
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
std::array<std::atomic_bool, std::array<std::atomic_bool,
AddressPoolManagerBitmap::kAddressSpaceSize / kSuperPageSize> AddressPoolManagerBitmap::kAddressSpaceSize / kSuperPageSize>
AddressPoolManagerBitmap::brp_forbidden_super_page_map_; AddressPoolManagerBitmap::brp_forbidden_super_page_map_;
std::atomic_size_t AddressPoolManagerBitmap::blocklist_hit_count_; std::atomic_size_t AddressPoolManagerBitmap::blocklist_hit_count_;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS) #endif // !PA_BUILDFLAG(HAS_64_BIT_POINTERS)

View file

@ -2,23 +2,23 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_ #ifndef PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_ #define PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_
#include <array> #include <array>
#include <atomic> #include <atomic>
#include <bitset> #include <bitset>
#include <limits> #include <limits>
#include "build/build_config.h" #include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h" #include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h" #include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_lock.h" #include "partition_alloc/partition_lock.h"
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_BUILDFLAG(HAS_64_BIT_POINTERS)
namespace partition_alloc { namespace partition_alloc {
@ -102,7 +102,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManagerBitmap {
brp_pool_bits_)[address >> kBitShiftOfBRPPoolBitmap]; brp_pool_bits_)[address >> kBitShiftOfBRPPoolBitmap];
} }
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
static void BanSuperPageFromBRPPool(uintptr_t address) { static void BanSuperPageFromBRPPool(uintptr_t address) {
brp_forbidden_super_page_map_[address >> kSuperPageShift].store( brp_forbidden_super_page_map_[address >> kSuperPageShift].store(
true, std::memory_order_relaxed); true, std::memory_order_relaxed);
@ -126,7 +126,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManagerBitmap {
} }
static void IncrementBlocklistHitCount() { ++blocklist_hit_count_; } static void IncrementBlocklistHitCount() { ++blocklist_hit_count_; }
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
private: private:
friend class AddressPoolManager; friend class AddressPoolManager;
@ -136,11 +136,11 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManagerBitmap {
static std::bitset<kRegularPoolBits> regular_pool_bits_ static std::bitset<kRegularPoolBits> regular_pool_bits_
PA_GUARDED_BY(GetLock()); PA_GUARDED_BY(GetLock());
static std::bitset<kBRPPoolBits> brp_pool_bits_ PA_GUARDED_BY(GetLock()); static std::bitset<kBRPPoolBits> brp_pool_bits_ PA_GUARDED_BY(GetLock());
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
static std::array<std::atomic_bool, kAddressSpaceSize / kSuperPageSize> static std::array<std::atomic_bool, kAddressSpaceSize / kSuperPageSize>
brp_forbidden_super_page_map_; brp_forbidden_super_page_map_;
static std::atomic_size_t blocklist_hit_count_; static std::atomic_size_t blocklist_hit_count_;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
}; };
} // namespace internal } // namespace internal
@ -150,11 +150,11 @@ PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
// When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used. // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
// No need to add IsManagedByConfigurablePool, because Configurable Pool // No need to add IsManagedByConfigurablePool, because Configurable Pool
// doesn't exist on 32-bit. // doesn't exist on 32-bit.
#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if !PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PA_DCHECK(!internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address)); PA_DCHECK(!internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address));
#endif #endif
return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address) return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address)
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|| internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address) || internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address)
#endif #endif
; ;
@ -184,6 +184,6 @@ PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
} // namespace partition_alloc } // namespace partition_alloc
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS) #endif // !PA_BUILDFLAG(HAS_64_BIT_POINTERS)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_ #endif // PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_

View file

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_TYPES_H_ #ifndef PARTITION_ALLOC_ADDRESS_POOL_MANAGER_TYPES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_TYPES_H_ #define PARTITION_ALLOC_ADDRESS_POOL_MANAGER_TYPES_H_
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -11,4 +11,4 @@ enum pool_handle : unsigned;
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_TYPES_H_ #endif // PARTITION_ALLOC_ADDRESS_POOL_MANAGER_TYPES_H_

View file

@ -4,12 +4,12 @@
#include "partition_alloc/address_space_randomization.h" #include "partition_alloc/address_space_randomization.h"
#include "build/build_config.h" #include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_buildflags.h" #include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_check.h" #include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/random.h" #include "partition_alloc/random.h"
#if BUILDFLAG(IS_WIN) #if PA_BUILDFLAG(IS_WIN)
#include <windows.h> #include <windows.h>
#endif #endif
@ -18,7 +18,7 @@ namespace partition_alloc {
uintptr_t GetRandomPageBase() { uintptr_t GetRandomPageBase() {
uintptr_t random = static_cast<uintptr_t>(internal::RandomValue()); uintptr_t random = static_cast<uintptr_t>(internal::RandomValue());
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_BUILDFLAG(HAS_64_BIT_POINTERS)
random <<= 32ULL; random <<= 32ULL;
random |= static_cast<uintptr_t>(internal::RandomValue()); random |= static_cast<uintptr_t>(internal::RandomValue());
@ -26,8 +26,8 @@ uintptr_t GetRandomPageBase() {
// OS and build configuration. // OS and build configuration.
random &= internal::ASLRMask(); random &= internal::ASLRMask();
random += internal::ASLROffset(); random += internal::ASLROffset();
#else // BUILDFLAG(HAS_64_BIT_POINTERS) #else // PA_BUILDFLAG(HAS_64_BIT_POINTERS)
#if BUILDFLAG(IS_WIN) #if PA_BUILDFLAG(IS_WIN)
// On win32 host systems the randomization plus huge alignment causes // On win32 host systems the randomization plus huge alignment causes
// excessive fragmentation. Plus most of these systems lack ASLR, so the // excessive fragmentation. Plus most of these systems lack ASLR, so the
// randomization isn't buying anything. In that case we just skip it. // randomization isn't buying anything. In that case we just skip it.
@ -39,10 +39,10 @@ uintptr_t GetRandomPageBase() {
if (!is_wow64) { if (!is_wow64) {
return 0; return 0;
} }
#endif // BUILDFLAG(IS_WIN) #endif // PA_BUILDFLAG(IS_WIN)
random &= internal::ASLRMask(); random &= internal::ASLRMask();
random += internal::ASLROffset(); random += internal::ASLROffset();
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_BUILDFLAG(HAS_64_BIT_POINTERS)
PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask())); PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask()));
return random; return random;

View file

@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_RANDOMIZATION_H_ #ifndef PARTITION_ALLOC_ADDRESS_SPACE_RANDOMIZATION_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_RANDOMIZATION_H_ #define PARTITION_ALLOC_ADDRESS_SPACE_RANDOMIZATION_H_
#include <cstdint> #include <cstdint>
#include "build/build_config.h" #include "partition_alloc/build_config.h"
#include "partition_alloc/page_allocator_constants.h" #include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h" #include "partition_alloc/partition_alloc_base/component_export.h"
@ -36,7 +36,7 @@ AslrMask(uintptr_t bits) {
// //
// clang-format off // clang-format off
#if defined(ARCH_CPU_64_BITS) #if PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
@ -54,7 +54,7 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0x7e8000000000ULL); return AslrAddress(0x7e8000000000ULL);
} }
#elif BUILDFLAG(IS_WIN) #elif PA_BUILDFLAG(IS_WIN)
// Windows 8.10 and newer support the full 48 bit address range. Since // Windows 8.10 and newer support the full 48 bit address range. Since
// ASLROffset() is non-zero and may cause a carry, use 47 bit masks. See // ASLROffset() is non-zero and may cause a carry, use 47 bit masks. See
@ -67,7 +67,7 @@ AslrMask(uintptr_t bits) {
return 0x80000000ULL; return 0x80000000ULL;
} }
#elif BUILDFLAG(IS_APPLE) #elif PA_BUILDFLAG(IS_APPLE)
// macOS as of 10.12.5 does not clean up entries in page map levels 3/4 // macOS as of 10.12.5 does not clean up entries in page map levels 3/4
// [PDP/PML4] created from mmap or mach_vm_allocate, even after the region // [PDP/PML4] created from mmap or mach_vm_allocate, even after the region
@ -80,8 +80,8 @@ AslrMask(uintptr_t bits) {
// https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference // https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
// is that here we clamp to 39 bits, not 32. // is that here we clamp to 39 bits, not 32.
// //
// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior // TODO(crbug.com/40528509): Remove this limitation if/when the macOS
// changes. // behavior changes.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
ASLRMask() { ASLRMask() {
return AslrMask(38); return AslrMask(38);
@ -98,23 +98,22 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0x10000000000ULL); return AslrAddress(0x10000000000ULL);
} }
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA) #elif PA_BUILDFLAG(IS_POSIX) || PA_BUILDFLAG(IS_FUCHSIA)
#if defined(ARCH_CPU_X86_64) #if PA_BUILDFLAG(PA_ARCH_CPU_X86_64)
// Linux (and macOS) support the full 47-bit user space of x64 processors. // Linux (and macOS) support the full 47-bit user space of x64 processors.
// Use only 46 to allow the kernel a chance to fulfill the request. // Use only 46 to allow the kernel a chance to fulfill the request.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
ASLRMask() {
return AslrMask(46); return AslrMask(46);
} }
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
ASLROffset() {
return AslrAddress(0); return AslrAddress(0);
} }
#elif defined(ARCH_CPU_ARM64) #elif PA_BUILDFLAG(IS_ANDROID) && (PA_BUILDFLAG(PA_ARCH_CPU_ARM64) || PA_BUILDFLAG(PA_ARCH_CPU_RISCV64))
#if BUILDFLAG(IS_ANDROID)
// Restrict the address range on Android to avoid a large performance // Restrict the address range on Android to avoid a large performance
// regression in single-process WebViews. See https://crbug.com/837640. // regression in single-process WebViews. See https://crbug.com/837640.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
@ -125,8 +124,8 @@ AslrMask(uintptr_t bits) {
ASLROffset() { ASLROffset() {
return AslrAddress(0x20000000ULL); return AslrAddress(0x20000000ULL);
} }
#elif PA_BUILDFLAG(PA_ARCH_CPU_ARM64)
#elif BUILDFLAG(IS_LINUX) #if PA_BUILDFLAG(IS_LINUX)
// Linux on arm64 can use 39, 42, 48, or 52-bit user space, depending on // Linux on arm64 can use 39, 42, 48, or 52-bit user space, depending on
// page size and number of levels of translation pages used. We use // page size and number of levels of translation pages used. We use
@ -154,9 +153,9 @@ AslrMask(uintptr_t bits) {
#endif #endif
#elif defined(ARCH_CPU_PPC64) #elif PA_BUILDFLAG(PA_ARCH_CPU_PPC64)
#if BUILDFLAG(IS_AIX) #if PA_BUILDFLAG(IS_AIX)
// AIX has 64 bits of virtual addressing, but we limit the address range // AIX has 64 bits of virtual addressing, but we limit the address range
// to (a) minimize segment lookaside buffer (SLB) misses; and (b) use // to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
@ -168,7 +167,7 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0x400000000000ULL); return AslrAddress(0x400000000000ULL);
} }
#elif defined(ARCH_CPU_BIG_ENDIAN) #elif PA_BUILDFLAG(PA_ARCH_CPU_BIG_ENDIAN)
// Big-endian Linux PPC has 44 bits of virtual addressing. Use 42. // Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
@ -178,9 +177,19 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0); return AslrAddress(0);
} }
#else // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN) #else // !PA_BUILDFLAG(IS_AIX) && !PA_BUILDFLAG(PA_ARCH_CPU_BIG_ENDIAN)
#if PA_BUILDFLAG(IS_LINUX)
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46. // Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t ASLRMask() {
return AslrMask(46);
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t ASLROffset() {
return AslrAddress(0);
}
#else
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
return AslrMask(46); return AslrMask(46);
} }
@ -188,9 +197,11 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0); return AslrAddress(0);
} }
#endif // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN) #endif
#elif defined(ARCH_CPU_S390X) #endif // !PA_BUILDFLAG(IS_AIX) && !PA_BUILDFLAG(PA_ARCH_CPU_BIG_ENDIAN)
#elif PA_BUILDFLAG(PA_ARCH_CPU_S390X)
// Linux on Z uses bits 22 - 32 for Region Indexing, which translates to // Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
@ -202,7 +213,7 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0); return AslrAddress(0);
} }
#elif defined(ARCH_CPU_S390) #elif PA_BUILDFLAG(PA_ARCH_CPU_S390)
// 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel // 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
// a chance to fulfill the request. // a chance to fulfill the request.
@ -213,15 +224,16 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0); return AslrAddress(0);
} }
#else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) && #else // !PA_BUILDFLAG(PA_ARCH_CPU_X86_64) && !PA_BUILDFLAG(PA_ARCH_CPU_PPC64) &&
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390) // !PA_BUILDFLAG(PA_ARCH_CPU_S390X) && !PA_BUILDFLAG(PA_ARCH_CPU_S390)
// For all other POSIX variants, use 30 bits. // For all other POSIX variants, use 30 bits.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
ASLRMask() {
return AslrMask(30); return AslrMask(30);
} }
#if BUILDFLAG(IS_SOLARIS) #if PA_BUILDFLAG(IS_SOLARIS)
// For our Solaris/illumos mmap hint, we pick a random address in the // For our Solaris/illumos mmap hint, we pick a random address in the
// bottom half of the top half of the address space (that is, the third // bottom half of the top half of the address space (that is, the third
@ -237,7 +249,7 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0x80000000ULL); return AslrAddress(0x80000000ULL);
} }
#elif BUILDFLAG(IS_AIX) #elif PA_BUILDFLAG(IS_AIX)
// The range 0x30000000 - 0xD0000000 is available on AIX; choose the // The range 0x30000000 - 0xD0000000 is available on AIX; choose the
// upper range. // upper range.
@ -245,23 +257,24 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0x90000000ULL); return AslrAddress(0x90000000ULL);
} }
#else // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX) #else // !PA_BUILDFLAG(IS_SOLARIS) && !PA_BUILDFLAG(IS_AIX)
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
// 10.6 and 10.7. // 10.6 and 10.7.
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
ASLROffset() {
return AslrAddress(0x20000000ULL); return AslrAddress(0x20000000ULL);
} }
#endif // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX) #endif // !PA_BUILDFLAG(IS_SOLARIS) && !PA_BUILDFLAG(IS_AIX)
#endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) && #endif // !PA_BUILDFLAG(PA_ARCH_CPU_X86_64) && !PA_BUILDFLAG(PA_ARCH_CPU_PPC64) &&
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390) // !PA_BUILDFLAG(PA_ARCH_CPU_S390X) && !PA_BUILDFLAG(PA_ARCH_CPU_S390)
#endif // BUILDFLAG(IS_POSIX) #endif // PA_BUILDFLAG(IS_POSIX)
#elif defined(ARCH_CPU_32_BITS) #elif PA_BUILDFLAG(PA_ARCH_CPU_32_BITS)
// This is a good range on 32-bit Windows and Android (the only platforms on // This is a good range on 32-bit Windows and Android (the only platforms on
// which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There // which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
@ -277,7 +290,7 @@ AslrMask(uintptr_t bits) {
#error Please tell us about your exotic hardware! Sounds interesting. #error Please tell us about your exotic hardware! Sounds interesting.
#endif // defined(ARCH_CPU_32_BITS) #endif // PA_BUILDFLAG(PA_ARCH_CPU_32_BITS)
// clang-format on // clang-format on
@ -285,4 +298,4 @@ AslrMask(uintptr_t bits) {
} // namespace partition_alloc } // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_RANDOMIZATION_H_ #endif // PARTITION_ALLOC_ADDRESS_SPACE_RANDOMIZATION_H_

View file

@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_STATS_H_ #ifndef PARTITION_ALLOC_ADDRESS_SPACE_STATS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_STATS_H_ #define PARTITION_ALLOC_ADDRESS_SPACE_STATS_H_
#include <cstddef> #include <cstddef>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/component_export.h" #include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
namespace partition_alloc { namespace partition_alloc {
@ -19,25 +19,25 @@ struct PoolStats {
// On 32-bit, pools are mainly logical entities, intermingled with // On 32-bit, pools are mainly logical entities, intermingled with
// allocations not managed by PartitionAlloc. The "largest available // allocations not managed by PartitionAlloc. The "largest available
// reservation" is not possible to measure in that case. // reservation" is not possible to measure in that case.
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_BUILDFLAG(HAS_64_BIT_POINTERS)
size_t largest_available_reservation = 0; size_t largest_available_reservation = 0;
#endif #endif
}; };
struct AddressSpaceStats { struct AddressSpaceStats {
PoolStats regular_pool_stats; PoolStats regular_pool_stats;
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PoolStats brp_pool_stats; PoolStats brp_pool_stats;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_BUILDFLAG(HAS_64_BIT_POINTERS)
PoolStats configurable_pool_stats; PoolStats configurable_pool_stats;
#else #else
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
size_t blocklist_size; // measured in super pages size_t blocklist_size; // measured in super pages
size_t blocklist_hit_count; size_t blocklist_hit_count;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_BUILDFLAG(HAS_64_BIT_POINTERS)
#if BUILDFLAG(ENABLE_THREAD_ISOLATION) #if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
PoolStats thread_isolated_pool_stats; PoolStats thread_isolated_pool_stats;
#endif #endif
}; };
@ -52,4 +52,4 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressSpaceStatsDumper {
} // namespace partition_alloc } // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_STATS_H_ #endif // PARTITION_ALLOC_ADDRESS_SPACE_STATS_H_

View file

@ -3,6 +3,7 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "partition_alloc/allocation_guard.h" #include "partition_alloc/allocation_guard.h"
#include "partition_alloc/partition_alloc_base/immediate_crash.h" #include "partition_alloc/partition_alloc_base/immediate_crash.h"
#include "partition_alloc/partition_alloc_config.h" #include "partition_alloc/partition_alloc_config.h"

View file

@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ALLOCATION_GUARD_H_ #ifndef PARTITION_ALLOC_ALLOCATION_GUARD_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ALLOCATION_GUARD_H_ #define PARTITION_ALLOC_ALLOCATION_GUARD_H_
#include "build/build_config.h" #include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_base/component_export.h" #include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_config.h" #include "partition_alloc/partition_alloc_config.h"
@ -46,4 +46,4 @@ using ::partition_alloc::ScopedDisallowAllocations;
} // namespace base::internal } // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ALLOCATION_GUARD_H_ #endif // PARTITION_ALLOC_ALLOCATION_GUARD_H_

View file

@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ARM_BTI_TEST_FUNCTIONS_H_ #ifndef PARTITION_ALLOC_ARM_BTI_TEST_FUNCTIONS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ARM_BTI_TEST_FUNCTIONS_H_ #define PARTITION_ALLOC_ARM_BTI_TEST_FUNCTIONS_H_
#include "build/build_config.h" #include "partition_alloc/build_config.h"
#if defined(ARCH_CPU_ARM64) #if PA_BUILDFLAG(PA_ARCH_CPU_ARM64)
extern "C" { extern "C" {
/** /**
* A valid BTI function. Jumping to this funtion should not cause any problem in * A valid BTI function. Jumping to this funtion should not cause any problem in
@ -26,6 +26,6 @@ int64_t arm_bti_test_function_invalid_offset(int64_t);
**/ **/
void arm_bti_test_function_end(void); void arm_bti_test_function_end(void);
} }
#endif // defined(ARCH_CPU_ARM64) #endif // PA_BUILDFLAG(PA_ARCH_CPU_ARM64)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ARM_BTI_TEST_FUNCTIONS_H_ #endif // PARTITION_ALLOC_ARM_BTI_TEST_FUNCTIONS_H_

View file

@ -0,0 +1,508 @@
// Copyright 2024 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef PARTITION_ALLOC_BUILD_CONFIG_H_
#define PARTITION_ALLOC_BUILD_CONFIG_H_
// This file is derived from chromium's //build/build_config.h.
//
// Differences:
// - Only the definition used by partition_alloc are included.
// - The definition can only be consumed PA_BUILDFLAG(...) macro. This avoids
// silent failure when developers forget to include this file. This avoids the
// need of a PRESUBMIT.py to enforce the inclusion of this file.
//
//
// This files contains the following definition:
//
// Operating system:
// IS_IOS / IS_AIX / IS_ASMJS / IS_FREEBSD / IS_FUCHSIA / IS_LINUX / IS_MAC /
// IS_NACL / IS_NETBSD / IS_OPENBSD / IS_QNX / IS_SOLARIS / IS_WIN
//
// Operating system family:
// IS_APPLE / IS_BSD / IS_POSIX
//
// Compiler:
// PA_COMPILER_GCC / PA_COMPILER_MSVC
//
// Processor:
// PA_ARCH_CPU_ARM64 / PA_ARCH_CPU_ARMEL / PA_ARCH_CPU_BIG_ENDIAN /
// PA_ARCH_CPU_LITTLE_ENDIAN / PA_ARCH_CPU_MIPS / PA_ARCH_CPU_MIPS64 /
// PA_ARCH_CPU_MIPS64EL / PA_ARCH_CPU_MIPSEL / PA_ARCH_CPU_PPC64 /
// PA_ARCH_CPU_RISCV64 / PA_ARCH_CPU_S390 / PA_ARCH_CPU_S390X /
// PA_ARCH_CPU_X86 / PA_ARCH_CPU_X86_64
//
// Processor Family:
// PA_ARCH_CPU_32_BITS / PA_ARCH_CPU_64_BITS / PA_ARCH_CPU_ARM_FAMILY /
// PA_ARCH_CPU_LOONGPA_ARCH64 / PA_ARCH_CPU_PPC64_FAMILY /
// PA_ARCH_CPU_S390_FAMILY / PA_ARCH_CPU_X86_FAMILY
//
// Compiler:
// PA_COMPILER_GCC / PA_COMPILER_MSVC
//
// Standard library:
// PA_LIBC_GLIBC
// Definition of PA_BUILDFLAG(...) macro.
#include "partition_alloc/buildflag.h" // IWYU pragma: export
// Definition of PA_BUILDFLAG(IS_CHROMEOS).
#include "partition_alloc/buildflags.h" // IWYU pragma: export
// Clangd does not detect PA_BUILDFLAG_INTERNAL_* indirect usage, so mark the
// header as "always_keep" to avoid "unused include" warning.
//
// IWYU pragma: always_keep
// A set of macros to use for platform detection.
#if defined(__native_client__)
// __native_client__ must be first, so that other IS_ defines are not set.
#define PA_IS_NACL
#elif PA_BUILDFLAG(IS_ANDROID)
// The IS_ANDROID PA_BUILDFLAG macro is defined in buildflags.h.
//
// PartitionAlloc's embedders (Chromium, Dawn, Pdfium, Skia) define different
// macros for Android builds: "ANDROID" or "SK_BUILD_FOR_ANDROID".
//
// To avoid relying on these external definitions, PartitionAlloc uses its own
// dedicated build flag.
#elif defined(__APPLE__)
// Only include TargetConditionals after testing ANDROID as some Android builds
// on the Mac have this header available and it's not needed unless the target
// is really an Apple platform.
#include <TargetConditionals.h>
#if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
#define PA_IS_IOS
#else
#define PA_IS_MAC
#endif // defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
#elif defined(__linux__)
#if !PA_BUILDFLAG(IS_CHROMEOS)
// Do not define PA_IS_LINUX on Chrome OS build.
// The IS_CHROMEOS PA_BUILDFLAG macro is defined in buildflags.h.
#define PA_IS_LINUX
#endif // !PA_BUILDFLAG(IS_CHROMEOS)
// Include a system header to pull in features.h for glibc/uclibc macros.
#include <assert.h>
#if defined(__GLIBC__) && !defined(__UCLIBC__)
// We really are using glibc, not uClibc pretending to be glibc.
#define PA_LIBC_GLIBC
#endif
#elif defined(_WIN32)
#define PA_IS_WIN
#elif defined(__Fuchsia__)
#define PA_IS_FUCHSIA
#elif defined(__FreeBSD__)
#define PA_IS_FREEBSD
#elif defined(__NetBSD__)
#define PA_IS_NETBSD
#elif defined(__OpenBSD__)
#define PA_IS_OPENBSD
#elif defined(__sun)
#define PA_IS_SOLARIS
#elif defined(__QNXNTO__)
#define PA_IS_QNX
#elif defined(_AIX)
#define PA_IS_AIX
#elif defined(__asmjs__) || defined(__wasm__)
#define PA_IS_ASMJS
#endif
// NOTE: Adding a new port? Please follow
// https://chromium.googlesource.com/chromium/src/+/main/docs/new_port_policy.md
#if defined(PA_IS_MAC) || defined(PA_IS_IOS)
#define PA_IS_APPLE
#endif
#if defined(PA_IS_FREEBSD) || defined(PA_IS_NETBSD) || defined(PA_IS_OPENBSD)
#define PA_IS_BSD
#endif
#if defined(PA_IS_AIX) || defined(PA_IS_ASMJS) || defined(PA_IS_FREEBSD) || \
defined(PA_IS_IOS) || defined(PA_IS_LINUX) || defined(PA_IS_CHROMEOS) || \
defined(PA_IS_MAC) || defined(PA_IS_NACL) || defined(PA_IS_NETBSD) || \
defined(PA_IS_OPENBSD) || defined(PA_IS_QNX) || defined(PA_IS_SOLARIS) || \
PA_BUILDFLAG(IS_ANDROID) || PA_BUILDFLAG(IS_CHROMEOS)
#define PA_IS_POSIX
#endif
// Compiler detection. Note: clang masquerades as GCC on POSIX and as MSVC on
// Windows.
#if defined(__GNUC__)
#define PA_COMPILER_GCC
#elif defined(_MSC_VER)
#define PA_COMPILER_MSVC
#endif
// ------
// Processor architecture detection. For more info on what's defined, see:
// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
// http://www.agner.org/optimize/calling_conventions.pdf
// or with gcc, run: "echo | gcc -E -dM -"
#if defined(_M_X64) || defined(__x86_64__)
#define PA_ARCH_CPU_X86_FAMILY
#define PA_ARCH_CPU_X86_64
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#elif defined(_M_IX86) || defined(__i386__)
#define PA_ARCH_CPU_X86_FAMILY
#define PA_ARCH_CPU_X86
#define PA_ARCH_CPU_32_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#elif defined(__s390x__)
#define PA_ARCH_CPU_S390_FAMILY
#define PA_ARCH_CPU_S390X
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_BIG_ENDIAN
#elif defined(__s390__)
#define PA_ARCH_CPU_S390_FAMILY
#define PA_ARCH_CPU_S390
#define PA_ARCH_CPU_BIG_ENDIAN
#elif (defined(__PPC64__) || defined(__PPC__)) && defined(__BIG_ENDIAN__)
#define PA_ARCH_CPU_PPC64_FAMILY
#define PA_ARCH_CPU_PPC64
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_BIG_ENDIAN
#elif defined(__PPC64__)
#define PA_ARCH_CPU_PPC64_FAMILY
#define PA_ARCH_CPU_PPC64
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#elif defined(__ARMEL__)
#define PA_ARCH_CPU_ARM_FAMILY
#define PA_ARCH_CPU_ARMEL
#define PA_ARCH_CPU_32_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#elif defined(__aarch64__) || defined(_M_ARM64)
#define PA_ARCH_CPU_ARM_FAMILY
#define PA_ARCH_CPU_ARM64
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#elif defined(__pnacl__) || defined(__asmjs__) || defined(__wasm__)
#define PA_ARCH_CPU_32_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#elif defined(__MIPSEL__)
#if defined(__LP64__)
#define PA_ARCH_CPU_MIPS64EL
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#else
#define PA_ARCH_CPU_MIPSEL
#define PA_ARCH_CPU_32_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#endif
#elif defined(__MIPSEB__)
#if defined(__LP64__)
#define PA_ARCH_CPU_MIPS64
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_BIG_ENDIAN
#else
#define PA_ARCH_CPU_MIPS
#define PA_ARCH_CPU_32_BITS
#define PA_ARCH_CPU_BIG_ENDIAN
#endif
#elif defined(__loongarch__)
#define PA_ARCH_CPU_LITTLE_ENDIAN
#if __loongarch_grlen == 64
#define PA_ARCH_CPU_LOONGARCH64
#define PA_ARCH_CPU_64_BITS
#else
#define PA_ARCH_CPU_32_BITS
#endif
#elif defined(__riscv) && (__riscv_xlen == 64)
#define PA_ARCH_CPU_RISCV64
#define PA_ARCH_CPU_64_BITS
#define PA_ARCH_CPU_LITTLE_ENDIAN
#endif
// The part below can be generated with the following script:
// https://paste.googleplex.com/6324671838683136
//
// It transform the defines above into PA_BUILDFLAG_INTERNAL_* defines, then
// undef the original define.
//
// Usage of PA_BUILDFLAG(...) macro is better than raw define, because it avoids
// silent failure when developers forget to include this file.
#if defined(PA_ARCH_CPU_32_BITS)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_32_BITS() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_32_BITS() (0)
#endif
#undef PA_ARCH_CPU_32_BITS
#if defined(PA_ARCH_CPU_64_BITS)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_64_BITS() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_64_BITS() (0)
#endif
#undef PA_ARCH_CPU_64_BITS
#if defined(PA_ARCH_CPU_ARM64)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_ARM64() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_ARM64() (0)
#endif
#undef PA_ARCH_CPU_ARM64
#if defined(PA_ARCH_CPU_ARMEL)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_ARMEL() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_ARMEL() (0)
#endif
#undef PA_ARCH_CPU_ARMEL
#if defined(PA_ARCH_CPU_ARM_FAMILY)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_ARM_FAMILY() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_ARM_FAMILY() (0)
#endif
#undef PA_ARCH_CPU_ARM_FAMILY
#if defined(PA_ARCH_CPU_BIG_ENDIAN)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_BIG_ENDIAN() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_BIG_ENDIAN() (0)
#endif
#undef PA_ARCH_CPU_BIG_ENDIAN
#if defined(PA_ARCH_CPU_LITTLE_ENDIAN)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_LITTLE_ENDIAN() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_LITTLE_ENDIAN() (0)
#endif
#undef PA_ARCH_CPU_LITTLE_ENDIAN
#if defined(PA_ARCH_CPU_LOONGARCH64)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_LOONGARCH64() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_LOONGARCH64() (0)
#endif
#undef PA_ARCH_CPU_LOONGARCH64
#if defined(PA_ARCH_CPU_MIPS)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPS() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPS() (0)
#endif
#undef PA_ARCH_CPU_MIPS
#if defined(PA_ARCH_CPU_MIPS64)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPS64() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPS64() (0)
#endif
#undef PA_ARCH_CPU_MIPS64
#if defined(PA_ARCH_CPU_MIPS64EL)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPS64EL() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPS64EL() (0)
#endif
#undef PA_ARCH_CPU_MIPS64EL
#if defined(PA_ARCH_CPU_MIPSEL)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPSEL() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_MIPSEL() (0)
#endif
#undef PA_ARCH_CPU_MIPSEL
#if defined(PA_ARCH_CPU_PPC64)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_PPC64() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_PPC64() (0)
#endif
#undef PA_ARCH_CPU_PPC64
#if defined(PA_ARCH_CPU_PPC64_FAMILY)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_PPC64_FAMILY() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_PPC64_FAMILY() (0)
#endif
#undef PA_ARCH_CPU_PPC64_FAMILY
#if defined(PA_ARCH_CPU_RISCV64)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_RISCV64() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_RISCV64() (0)
#endif
#undef PA_ARCH_CPU_RISCV64
#if defined(PA_ARCH_CPU_S390)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_S390() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_S390() (0)
#endif
#undef PA_ARCH_CPU_S390
#if defined(PA_ARCH_CPU_S390_FAMILY)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_S390_FAMILY() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_S390_FAMILY() (0)
#endif
#undef PA_ARCH_CPU_S390_FAMILY
#if defined(PA_ARCH_CPU_S390X)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_S390X() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_S390X() (0)
#endif
#undef PA_ARCH_CPU_S390X
#if defined(PA_ARCH_CPU_X86)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_X86() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_X86() (0)
#endif
#undef PA_ARCH_CPU_X86
#if defined(PA_ARCH_CPU_X86_64)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_X86_64() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_X86_64() (0)
#endif
#undef PA_ARCH_CPU_X86_64
#if defined(PA_ARCH_CPU_X86_FAMILY)
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_X86_FAMILY() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_ARCH_CPU_X86_FAMILY() (0)
#endif
#undef PA_ARCH_CPU_X86_FAMILY
#if defined(PA_COMPILER_GCC)
#define PA_BUILDFLAG_INTERNAL_PA_COMPILER_GCC() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_COMPILER_GCC() (0)
#endif
#undef PA_COMPILER_GCC
#if defined(PA_COMPILER_MSVC)
#define PA_BUILDFLAG_INTERNAL_PA_COMPILER_MSVC() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_COMPILER_MSVC() (0)
#endif
#undef PA_COMPILER_MSVC
#if defined(PA_IS_AIX)
#define PA_BUILDFLAG_INTERNAL_IS_AIX() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_AIX() (0)
#endif
#undef PA_IS_AIX
#if defined(PA_IS_APPLE)
#define PA_BUILDFLAG_INTERNAL_IS_APPLE() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_APPLE() (0)
#endif
#undef PA_IS_APPLE
#if defined(PA_IS_ASMJS)
#define PA_BUILDFLAG_INTERNAL_IS_ASMJS() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_ASMJS() (0)
#endif
#undef PA_IS_ASMJS
#if defined(PA_IS_BSD)
#define PA_BUILDFLAG_INTERNAL_IS_BSD() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_BSD() (0)
#endif
#undef PA_IS_BSD
#if defined(PA_IS_FREEBSD)
#define PA_BUILDFLAG_INTERNAL_IS_FREEBSD() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_FREEBSD() (0)
#endif
#undef PA_IS_FREEBSD
#if defined(PA_IS_FUCHSIA)
#define PA_BUILDFLAG_INTERNAL_IS_FUCHSIA() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_FUCHSIA() (0)
#endif
#undef PA_IS_FUCHSIA
#if defined(PA_IS_IOS)
#define PA_BUILDFLAG_INTERNAL_IS_IOS() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_IOS() (0)
#endif
#undef PA_IS_IOS
#if defined(PA_IS_LINUX)
#define PA_BUILDFLAG_INTERNAL_IS_LINUX() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_LINUX() (0)
#endif
#undef PA_IS_LINUX
#if defined(PA_IS_MAC)
#define PA_BUILDFLAG_INTERNAL_IS_MAC() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_MAC() (0)
#endif
#undef PA_IS_MAC
#if defined(PA_IS_NACL)
#define PA_BUILDFLAG_INTERNAL_IS_NACL() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_NACL() (0)
#endif
#undef PA_IS_NACL
#if defined(PA_IS_NETBSD)
#define PA_BUILDFLAG_INTERNAL_IS_NETBSD() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_NETBSD() (0)
#endif
#undef PA_IS_NETBSD
#if defined(PA_IS_OPENBSD)
#define PA_BUILDFLAG_INTERNAL_IS_OPENBSD() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_OPENBSD() (0)
#endif
#undef PA_IS_OPENBSD
#if defined(PA_IS_POSIX)
#define PA_BUILDFLAG_INTERNAL_IS_POSIX() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_POSIX() (0)
#endif
#undef PA_IS_POSIX
#if defined(PA_IS_QNX)
#define PA_BUILDFLAG_INTERNAL_IS_QNX() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_QNX() (0)
#endif
#undef PA_IS_QNX
#if defined(PA_IS_SOLARIS)
#define PA_BUILDFLAG_INTERNAL_IS_SOLARIS() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_SOLARIS() (0)
#endif
#undef PA_IS_SOLARIS
#if defined(PA_IS_WIN)
#define PA_BUILDFLAG_INTERNAL_IS_WIN() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_WIN() (0)
#endif
#undef PA_IS_WIN
#if defined(PA_LIBC_GLIBC)
#define PA_BUILDFLAG_INTERNAL_PA_LIBC_GLIBC() (1)
#else
#define PA_BUILDFLAG_INTERNAL_PA_LIBC_GLIBC() (0)
#endif
#undef PA_LIBC_GLIBC
#endif // PARTITION_ALLOC_BUILD_CONFIG_H_

View file

@ -0,0 +1,17 @@
// Copyright 2024 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef PARTITION_ALLOC_BUILDFLAG_H_
#define PARTITION_ALLOC_BUILDFLAG_H_
// This was copied from chromium's and adapted to partition_alloc.
// Please refer to chromium's //build/buildflag.h original comments.
//
// Using a different macro and internal define allows partition_alloc and
// chromium to cohabit without affecting each other.
#define PA_BUILDFLAG_CAT_INDIRECT(a, b) a##b
#define PA_BUILDFLAG_CAT(a, b) PA_BUILDFLAG_CAT_INDIRECT(a, b)
#define PA_BUILDFLAG(flag) (PA_BUILDFLAG_CAT(PA_BUILDFLAG_INTERNAL_, flag)())
#endif // PARTITION_ALLOC_BUILDFLAG_H_

View file

@ -0,0 +1,121 @@
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This was copied from chromium's and adapted to partition_alloc.
# Please refer to chromium's //build/buildflag_header.gni
# Generates a header with preprocessor defines specified by the build file.
#
# The flags are converted to function-style defines with mangled names and
# code uses an accessor macro to access the values. This is to try to
# minimize bugs where code checks whether something is defined or not, and
# the proper header isn't included, meaning the answer will always be silently
# false or might vary across the code base.
#
# In the GN template, specify build flags in the template as a list
# of strings that encode key/value pairs like this:
#
# flags = [ "ENABLE_FOO=1", "ENABLE_BAR=$enable_bar" ]
#
# The GN values "true" and "false" will be mapped to 0 and 1 for boolean
# #if flags to be expressed naturally. This means you can't directly make a
# define that generates C++ value of true or false for use in code. If you
# REALLY need this, you can also use the string "(true)" and "(false)" to
# prevent the rewriting.
# The `template` creates a closure. It is important to capture the dependency
# from outside, because the template is executed from the context of the
# invoker, who uses a different current directory.
_current_dir = get_path_info(".", "abspath")
# To check the value of the flag in C code:
#
# #include "path/to/here/header_file.h"
#
# #if PA_BUILDFLAG(ENABLE_FOO)
# ...
# #endif
#
# const char kSpamServerUrl[] = BUILDFLAG(SPAM_SERVER_URL);
#
# There will be no #define called ENABLE_FOO so if you accidentally test for
# that in an ifdef it will always be negative.
#
#
# Template parameters
#
# flags [required, list of strings]
# Flag values as described above.
#
# header [required, string]
# File name for generated header. By default, this will go in the
# generated file directory for this target, and you would include it
# with:
# #include "<path_to_this_BUILD_file>/<header>"
#
# header_dir [optional, string]
# Override the default location of the generated header. The string will
# be treated as a subdirectory of the root_gen_dir. For example:
# header_dir = "foo/bar"
# Then you can include the header as:
# #include "foo/bar/baz.h"
#
# Example
#
# pa_buildflag_header("foo_buildflags") {
# header = "foo_buildflags.h"
#
# flags = [
# # This uses the GN build flag enable_doom_melon as the definition.
# "ENABLE_DOOM_MELON=$enable_doom_melon",
#
# # This force-enables the flag.
# "ENABLE_SPACE_LASER=true",
#
# # This will expand to the quoted C string when used in source code.
# "SPAM_SERVER_URL=\"http://www.example.com/\"",
# ]
# }
template("pa_buildflag_header") {
action(target_name) {
script = "./write_buildflag_header.py"
if (defined(invoker.header_dir)) {
header_file = "${invoker.header_dir}/${invoker.header}"
} else {
# Compute the path from the root to this file.
header_file = rebase_path(".", "//") + "/${invoker.header}"
}
outputs = [ "$root_gen_dir/$header_file" ]
# Always write --flags to the file so it's not empty. Empty will confuse GN
# into thinking the response file isn't used.
response_file_contents = [ "--flags" ]
if (defined(invoker.flags)) {
response_file_contents += invoker.flags
}
args = [
"--output",
header_file, # Not rebased.
"--rulename",
get_label_info(":$target_name", "label_no_toolchain"),
"--gen-dir",
rebase_path(root_gen_dir, root_build_dir),
"--definitions",
"{{response_file_name}}",
]
forward_variables_from(invoker,
[
"deps",
"public_deps",
"testonly",
"visibility",
])
public_deps = [ "${_current_dir}:buildflag_macro" ]
}
}

View file

@ -3,9 +3,10 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "partition_alloc/compressed_pointer.h" #include "partition_alloc/compressed_pointer.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#if BUILDFLAG(ENABLE_POINTER_COMPRESSION) #include "partition_alloc/buildflags.h"
#if PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -26,4 +27,4 @@ void CompressedPointerBaseGlobal::ResetBaseForTesting() {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // BUILDFLAG(ENABLE_POINTER_COMPRESSION) #endif // PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)

View file

@ -2,28 +2,28 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_COMPRESSED_POINTER_H_ #ifndef PARTITION_ALLOC_COMPRESSED_POINTER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_COMPRESSED_POINTER_H_ #define PARTITION_ALLOC_COMPRESSED_POINTER_H_
#include <bit>
#include <climits> #include <climits>
#include <type_traits> #include <type_traits>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_address_space.h" #include "partition_alloc/partition_address_space.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h" #include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#if BUILDFLAG(ENABLE_POINTER_COMPRESSION) #if PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)
#if !BUILDFLAG(GLUE_CORE_POOLS) #if !PA_BUILDFLAG(GLUE_CORE_POOLS)
#error "Pointer compression only works with glued pools" #error "Pointer compression only works with glued pools"
#endif #endif
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE) #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
#error "Pointer compression currently supports constant pool size" #error "Pointer compression currently supports constant pool size"
#endif #endif
#endif // BUILDFLAG(ENABLE_POINTER_COMPRESSION) #endif // PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)
namespace partition_alloc { namespace partition_alloc {
@ -33,7 +33,7 @@ template <typename T1, typename T2>
constexpr bool IsDecayedSame = constexpr bool IsDecayedSame =
std::is_same_v<std::decay_t<T1>, std::decay_t<T2>>; std::is_same_v<std::decay_t<T1>, std::decay_t<T2>>;
#if BUILDFLAG(ENABLE_POINTER_COMPRESSION) #if PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)
// Pointer compression works by storing only the 'useful' 32-bit part of the // Pointer compression works by storing only the 'useful' 32-bit part of the
// pointer. The other half (the base) is stored in a global variable // pointer. The other half (the base) is stored in a global variable
@ -78,7 +78,7 @@ constexpr bool IsDecayedSame =
class CompressedPointerBaseGlobal final { class CompressedPointerBaseGlobal final {
public: public:
static constexpr size_t kUsefulBits = static constexpr size_t kUsefulBits =
std::countr_zero(PartitionAddressSpace::CorePoolsSize()); base::bits::CountrZero(PartitionAddressSpace::CorePoolsSize());
static_assert(kUsefulBits >= sizeof(uint32_t) * CHAR_BIT); static_assert(kUsefulBits >= sizeof(uint32_t) * CHAR_BIT);
static constexpr size_t kBitsToShift = static constexpr size_t kBitsToShift =
kUsefulBits - sizeof(uint32_t) * CHAR_BIT; kUsefulBits - sizeof(uint32_t) * CHAR_BIT;
@ -102,11 +102,11 @@ class CompressedPointerBaseGlobal final {
static constexpr uintptr_t kUsefulBitsMask = static constexpr uintptr_t kUsefulBitsMask =
PartitionAddressSpace::CorePoolsSize() - 1; PartitionAddressSpace::CorePoolsSize() - 1;
static union alignas(kPartitionCachelineSize) PA_CONSTINIT static union alignas(kPartitionCachelineSize)
PA_COMPONENT_EXPORT(PARTITION_ALLOC) Base { PA_COMPONENT_EXPORT(PARTITION_ALLOC) Base {
uintptr_t base; uintptr_t base;
char cache_line[kPartitionCachelineSize]; char cache_line[kPartitionCachelineSize];
} g_base_ PA_CONSTINIT; } g_base_;
PA_ALWAYS_INLINE static bool IsBaseConsistent() { PA_ALWAYS_INLINE static bool IsBaseConsistent() {
return kUsefulBitsMask == (g_base_.base & kUsefulBitsMask); return kUsefulBitsMask == (g_base_.base & kUsefulBitsMask);
@ -118,11 +118,11 @@ class CompressedPointerBaseGlobal final {
friend class PartitionAddressSpace; friend class PartitionAddressSpace;
}; };
#endif // BUILDFLAG(ENABLE_POINTER_COMPRESSION) #endif // PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)
} // namespace internal } // namespace internal
#if BUILDFLAG(ENABLE_POINTER_COMPRESSION) #if PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)
template <typename T> template <typename T>
class PA_TRIVIAL_ABI CompressedPointer final { class PA_TRIVIAL_ABI CompressedPointer final {
@ -150,7 +150,7 @@ class PA_TRIVIAL_ABI CompressedPointer final {
} else { } else {
// When the types are different, perform the round, because the pointer // When the types are different, perform the round, because the pointer
// may need to be adjusted. // may need to be adjusted.
// TODO(1376980): Avoid the cycle here. // TODO(crbug.com/40243421): Avoid the cycle here.
value_ = Compress(other.get()); value_ = Compress(other.get());
} }
} }
@ -232,7 +232,7 @@ class PA_TRIVIAL_ABI CompressedPointer final {
static constexpr size_t kMinimalRequiredAlignment = 8; static constexpr size_t kMinimalRequiredAlignment = 8;
static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment); static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment);
#if BUILDFLAG(PA_DCHECK_IS_ON) #if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(reinterpret_cast<uintptr_t>(ptr) % kMinimalRequiredAlignment == PA_DCHECK(reinterpret_cast<uintptr_t>(ptr) % kMinimalRequiredAlignment ==
0); 0);
PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet()); PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
@ -243,7 +243,7 @@ class PA_TRIVIAL_ABI CompressedPointer final {
PA_DCHECK(!ptr || PA_DCHECK(!ptr ||
(base & kCorePoolsBaseMask) == (base & kCorePoolsBaseMask) ==
(reinterpret_cast<uintptr_t>(ptr) & kCorePoolsBaseMask)); (reinterpret_cast<uintptr_t>(ptr) & kCorePoolsBaseMask));
#endif // BUILDFLAG(PA_DCHECK_IS_ON) #endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
const auto uptr = reinterpret_cast<uintptr_t>(ptr); const auto uptr = reinterpret_cast<uintptr_t>(ptr);
// Shift the pointer and truncate. // Shift the pointer and truncate.
@ -252,7 +252,7 @@ class PA_TRIVIAL_ABI CompressedPointer final {
// it on decompression. Assuming compression is a significantly less // it on decompression. Assuming compression is a significantly less
// frequent operation, we let more work here in favor of faster // frequent operation, we let more work here in favor of faster
// decompression. // decompression.
// TODO(1376980): Avoid this by overreserving the heap. // TODO(crbug.com/40243421): Avoid this by overreserving the heap.
if (compressed) { if (compressed) {
compressed |= (1u << (sizeof(uint32_t) * CHAR_BIT - 1)); compressed |= (1u << (sizeof(uint32_t) * CHAR_BIT - 1));
} }
@ -292,7 +292,7 @@ PA_ALWAYS_INLINE bool operator==(CompressedPointer<T> a,
} else { } else {
// When the types are different, compare decompressed pointers, because the // When the types are different, compare decompressed pointers, because the
// pointers may need to be adjusted. // pointers may need to be adjusted.
// TODO(1376980): Avoid decompression here. // TODO(crbug.com/40243421): Avoid decompression here.
return a.get() == b.get(); return a.get() == b.get();
} }
} }
@ -361,7 +361,7 @@ PA_ALWAYS_INLINE constexpr bool operator<(CompressedPointer<T> a,
} else { } else {
// When the types are different, compare decompressed pointers, because the // When the types are different, compare decompressed pointers, because the
// pointers may need to be adjusted. // pointers may need to be adjusted.
// TODO(1376980): Avoid decompression here. // TODO(crbug.com/40243421): Avoid decompression here.
return a.get() < b.get(); return a.get() < b.get();
} }
} }
@ -389,7 +389,7 @@ PA_ALWAYS_INLINE constexpr bool operator<=(CompressedPointer<T> a,
} else { } else {
// When the types are different, compare decompressed pointers, because the // When the types are different, compare decompressed pointers, because the
// pointers may need to be adjusted. // pointers may need to be adjusted.
// TODO(1376980): Avoid decompression here. // TODO(crbug.com/40243421): Avoid decompression here.
return a.get() <= b.get(); return a.get() <= b.get();
} }
} }
@ -444,7 +444,7 @@ PA_ALWAYS_INLINE constexpr bool operator>=(T* a, CompressedPointer<U> b) {
return static_cast<CompressedPointer<T>>(a) >= b; return static_cast<CompressedPointer<T>>(a) >= b;
} }
#endif // BUILDFLAG(ENABLE_POINTER_COMPRESSION) #endif // PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)
// Simple wrapper over the raw pointer. // Simple wrapper over the raw pointer.
template <typename T> template <typename T>
@ -665,4 +665,4 @@ PA_ALWAYS_INLINE constexpr bool operator>=(T* a, UncompressedPointer<U> b) {
} // namespace partition_alloc } // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_COMPRESSED_POINTER_H_ #endif // PARTITION_ALLOC_COMPRESSED_POINTER_H_

View file

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_DANGLING_RAW_PTR_CHECKS_H_ #ifndef PARTITION_ALLOC_DANGLING_RAW_PTR_CHECKS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_DANGLING_RAW_PTR_CHECKS_H_ #define PARTITION_ALLOC_DANGLING_RAW_PTR_CHECKS_H_
#include <cstdint> #include <cstdint>
@ -64,4 +64,4 @@ bool IsUnretainedDanglingRawPtrCheckEnabled();
} // namespace internal } // namespace internal
} // namespace partition_alloc } // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_DANGLING_RAW_PTR_CHECKS_H_ #endif // PARTITION_ALLOC_DANGLING_RAW_PTR_CHECKS_H_

View file

@ -0,0 +1,17 @@
digraph {
layout = "circo"
dpi = 156
node[shape=box]
crt[shape=circle, label="(not yet fully\ninitialized)\nWindows\nCRT"]
malloc[label="malloc()"]
crt->malloc[label="calls"]
malloc->PartitionAlloc[label="intercepted\nby"]
static_local[label="nontrivial\nfunction-local\nstatic"]
PartitionAlloc->static_local[label="initializes"]
lock[label="critical section\n(implicit lock)"]
static_local->lock[label="enters"]
lock->crt[label="attempts\nre-entry\ninto", style=dotted]
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

View file

@ -2,27 +2,23 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_ #ifndef PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_ #define PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>
#include "build/build_config.h" #include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/freeslot_bitmap.h" #include "partition_alloc/freeslot_bitmap.h"
#include "partition_alloc/partition_alloc-inl.h" #include "partition_alloc/partition_alloc-inl.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/immediate_crash.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h" #include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_ref_count.h"
#if !defined(ARCH_CPU_BIG_ENDIAN) #if !PA_BUILDFLAG(PA_ARCH_CPU_BIG_ENDIAN)
#include "partition_alloc/reverse_bytes.h" #include "partition_alloc/reverse_bytes.h"
#endif // !defined(ARCH_CPU_BIG_ENDIAN) #endif
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -52,7 +48,7 @@ class EncodedFreelistPtr {
// encoding and decoding. // encoding and decoding.
PA_ALWAYS_INLINE static constexpr uintptr_t Transform(uintptr_t address) { PA_ALWAYS_INLINE static constexpr uintptr_t Transform(uintptr_t address) {
// We use bswap on little endian as a fast transformation for two reasons: // We use bswap on little endian as a fast transformation for two reasons:
// 1) On 64 bit architectures, the pointer is very unlikely to be a // 1) On 64 bit architectures, the swapped pointer is very unlikely to be a
// canonical address. Therefore, if an object is freed and its vtable is // canonical address. Therefore, if an object is freed and its vtable is
// used where the attacker doesn't get the chance to run allocations // used where the attacker doesn't get the chance to run allocations
// between the free and use, the vtable dereference is likely to fault. // between the free and use, the vtable dereference is likely to fault.
@ -60,7 +56,7 @@ class EncodedFreelistPtr {
// corrupt a freelist pointer, partial pointer overwrite attacks are // corrupt a freelist pointer, partial pointer overwrite attacks are
// thwarted. // thwarted.
// For big endian, similar guarantees are arrived at with a negation. // For big endian, similar guarantees are arrived at with a negation.
#if defined(ARCH_CPU_BIG_ENDIAN) #if PA_BUILDFLAG(PA_ARCH_CPU_BIG_ENDIAN)
uintptr_t transformed = ~address; uintptr_t transformed = ~address;
#else #else
uintptr_t transformed = ReverseBytes(address); uintptr_t transformed = ReverseBytes(address);
@ -155,19 +151,24 @@ class EncodedNextFreelistEntry {
// corruption. Meant to be used to report the failed allocation size. // corruption. Meant to be used to report the failed allocation size.
template <bool crash_on_corruption> template <bool crash_on_corruption>
PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNextForThreadCache( PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNextForThreadCache(
size_t slot_size) const; size_t slot_size) const {
PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNext(size_t slot_size) const; return GetNextInternal<crash_on_corruption, /*for_thread_cache=*/true>(
slot_size);
}
PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNext(size_t slot_size) const {
return GetNextInternal<true, /*for_thread_cache=*/false>(slot_size);
}
PA_NOINLINE void CheckFreeList(size_t slot_size) const { PA_NOINLINE void CheckFreeList(size_t slot_size) const {
for (auto* entry = this; entry; entry = entry->GetNext(slot_size)) { for (auto* entry = this; entry; entry = entry->GetNext(slot_size)) {
// |GetNext()| checks freelist integrity. // `GetNext()` calls `IsWellFormed()`.
} }
} }
PA_NOINLINE void CheckFreeListForThreadCache(size_t slot_size) const { PA_NOINLINE void CheckFreeListForThreadCache(size_t slot_size) const {
for (auto* entry = this; entry; for (auto* entry = this; entry;
entry = entry->GetNextForThreadCache<true>(slot_size)) { entry = entry->GetNextForThreadCache<true>(slot_size)) {
// |GetNextForThreadCache()| checks freelist integrity. // `GetNextForThreadCache()` calls `IsWellFormed()`.
} }
} }
@ -175,16 +176,16 @@ class EncodedNextFreelistEntry {
// SetNext() is either called on the freelist head, when provisioning new // SetNext() is either called on the freelist head, when provisioning new
// slots, or when GetNext() has been called before, no need to pass the // slots, or when GetNext() has been called before, no need to pass the
// size. // size.
#if BUILDFLAG(PA_DCHECK_IS_ON) #if PA_BUILDFLAG(DCHECKS_ARE_ON)
// Regular freelists always point to an entry within the same super page. // Regular freelists always point to an entry within the same super page.
// //
// This is most likely a PartitionAlloc bug if this triggers. // This is most likely a PartitionAlloc bug if this triggers.
if (PA_UNLIKELY(entry && if (entry && (SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
(SlotStartPtr2Addr(this) & kSuperPageBaseMask) != (SlotStartPtr2Addr(entry) & kSuperPageBaseMask))
(SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) { [[unlikely]] {
FreelistCorruptionDetected(0); FreelistCorruptionDetected(0);
} }
#endif // BUILDFLAG(PA_DCHECK_IS_ON) #endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
encoded_next_ = EncodedFreelistPtr(entry); encoded_next_ = EncodedFreelistPtr(entry);
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
@ -208,52 +209,85 @@ class EncodedNextFreelistEntry {
} }
private: private:
template <bool crash_on_corruption> template <bool crash_on_corruption, bool for_thread_cache>
PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNextInternal( PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNextInternal(
size_t slot_size, size_t slot_size) const {
bool for_thread_cache) const; // GetNext() can be called on discarded memory, in which case
// |encoded_next_| is 0, and none of the checks apply. Don't prefetch
// nullptr either.
if (IsEncodedNextPtrZero()) {
return nullptr;
}
PA_ALWAYS_INLINE static bool IsSane(const EncodedNextFreelistEntry* here, auto* ret = encoded_next_.Decode();
const EncodedNextFreelistEntry* next, if (!IsWellFormed<for_thread_cache>(this, ret)) [[unlikely]] {
bool for_thread_cache) { if constexpr (crash_on_corruption) {
// Don't allow the freelist to be blindly followed to any location. // Put the corrupted data on the stack, it may give us more information
// Checks two constraints: // about what kind of corruption that was.
// - here and next must belong to the same superpage, unless this is in the PA_DEBUG_DATA_ON_STACK("first",
// thread cache (they even always belong to the same slot span). static_cast<size_t>(encoded_next_.encoded_));
// - next cannot point inside the metadata area. #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
PA_DEBUG_DATA_ON_STACK("second", static_cast<size_t>(shadow_));
#endif
FreelistCorruptionDetected(slot_size);
}
return nullptr;
}
// In real-world profiles, the load of |encoded_next_| above is responsible
// for a large fraction of the allocation cost. However, we cannot
// anticipate it enough since it is accessed right after we know its
// address.
// //
// Also, the lightweight UaF detection (pointer shadow) is checked. // In the case of repeated allocations, we can prefetch the access that will
// be done at the *next* allocation, which will touch *ret, prefetch it.
PA_PREFETCH(ret);
return ret;
}
uintptr_t here_address = SlotStartPtr2Addr(here); template <bool for_thread_cache>
uintptr_t next_address = SlotStartPtr2Addr(next); PA_ALWAYS_INLINE static bool IsWellFormed(
const EncodedNextFreelistEntry* here,
const EncodedNextFreelistEntry* next) {
// Don't allow the freelist to be blindly followed to any location.
// Checks following constraints:
// - `here->shadow_` must match an inversion of `here->next_` (if present).
// - `next` mustn't point inside the super page metadata area.
// - Unless this is a thread-cache freelist, `here` and `next` must belong
// to the same super page (as a matter of fact, they must belong to the
// same slot span, but that'd be too expensive to check here).
// - `next` is marked as free in the free slot bitmap (if present).
const uintptr_t here_address = SlotStartPtr2Addr(here);
const uintptr_t next_address = SlotStartPtr2Addr(next);
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
bool shadow_ptr_ok = here->encoded_next_.Inverted() == here->shadow_; bool shadow_ptr_ok = here->encoded_next_.Inverted() == here->shadow_;
#else #else
bool shadow_ptr_ok = true; constexpr bool shadow_ptr_ok = true;
#endif
bool same_superpage = (here_address & kSuperPageBaseMask) ==
(next_address & kSuperPageBaseMask);
#if BUILDFLAG(USE_FREESLOT_BITMAP)
bool marked_as_free_in_bitmap =
for_thread_cache ? true : !FreeSlotBitmapSlotIsUsed(next_address);
#else
bool marked_as_free_in_bitmap = true;
#endif #endif
// This is necessary but not sufficient when quarantine is enabled, see // This is necessary but not sufficient when quarantine is enabled, see
// SuperPagePayloadBegin() in partition_page.h. However we don't want to // SuperPagePayloadBegin() in partition_page.h. However we don't want to
// fetch anything from the root in this function. // fetch anything from the root in this function.
bool not_in_metadata = const bool not_in_metadata =
(next_address & kSuperPageOffsetMask) >= PartitionPageSize(); (next_address & kSuperPageOffsetMask) >= PartitionPageSize();
if (for_thread_cache) { if constexpr (for_thread_cache) {
return shadow_ptr_ok & not_in_metadata; return shadow_ptr_ok & not_in_metadata;
} else {
return shadow_ptr_ok & same_superpage & marked_as_free_in_bitmap &
not_in_metadata;
} }
const bool same_super_page = (here_address & kSuperPageBaseMask) ==
(next_address & kSuperPageBaseMask);
#if PA_BUILDFLAG(USE_FREESLOT_BITMAP)
bool marked_as_free_in_bitmap = !FreeSlotBitmapSlotIsUsed(next_address);
#else
constexpr bool marked_as_free_in_bitmap = true;
#endif
return shadow_ptr_ok & same_super_page & marked_as_free_in_bitmap &
not_in_metadata;
} }
EncodedFreelistPtr encoded_next_; EncodedFreelistPtr encoded_next_;
@ -265,56 +299,6 @@ class EncodedNextFreelistEntry {
#endif #endif
}; };
template <bool crash_on_corruption>
PA_ALWAYS_INLINE EncodedNextFreelistEntry*
EncodedNextFreelistEntry::GetNextInternal(size_t slot_size,
bool for_thread_cache) const {
// GetNext() can be called on discarded memory, in which case |encoded_next_|
// is 0, and none of the checks apply. Don't prefetch nullptr either.
if (IsEncodedNextPtrZero()) {
return nullptr;
}
auto* ret = encoded_next_.Decode();
// We rely on constant propagation to remove the branches coming from
// |for_thread_cache|, since the argument is always a compile-time constant.
if (PA_UNLIKELY(!IsSane(this, ret, for_thread_cache))) {
if constexpr (crash_on_corruption) {
// Put the corrupted data on the stack, it may give us more information
// about what kind of corruption that was.
PA_DEBUG_DATA_ON_STACK("first",
static_cast<size_t>(encoded_next_.encoded_));
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
PA_DEBUG_DATA_ON_STACK("second", static_cast<size_t>(shadow_));
#endif
FreelistCorruptionDetected(slot_size);
} else {
return nullptr;
}
}
// In real-world profiles, the load of |encoded_next_| above is responsible
// for a large fraction of the allocation cost. However, we cannot anticipate
// it enough since it is accessed right after we know its address.
//
// In the case of repeated allocations, we can prefetch the access that will
// be done at the *next* allocation, which will touch *ret, prefetch it.
PA_PREFETCH(ret);
return ret;
}
template <bool crash_on_corruption>
PA_ALWAYS_INLINE EncodedNextFreelistEntry*
EncodedNextFreelistEntry::GetNextForThreadCache(size_t slot_size) const {
return GetNextInternal<crash_on_corruption>(slot_size, true);
}
PA_ALWAYS_INLINE EncodedNextFreelistEntry* EncodedNextFreelistEntry::GetNext(
size_t slot_size) const {
return GetNextInternal<true>(slot_size, false);
}
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_ #endif // PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_

View file

@ -4,7 +4,7 @@
#include "partition_alloc/extended_api.h" #include "partition_alloc/extended_api.h"
#include "partition_alloc/partition_alloc_buildflags.h" #include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_config.h" #include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h" #include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
#include "partition_alloc/thread_cache.h" #include "partition_alloc/thread_cache.h"
@ -36,7 +36,7 @@ void EnablePartitionAllocThreadCacheForRootIfDisabled(PartitionRoot* root) {
root->settings.with_thread_cache = true; root->settings.with_thread_cache = true;
} }
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
void DisablePartitionAllocThreadCacheForProcess() { void DisablePartitionAllocThreadCacheForProcess() {
PA_CHECK(allocator_shim::internal::PartitionAllocMalloc:: PA_CHECK(allocator_shim::internal::PartitionAllocMalloc::
AllocatorConfigurationFinalized()); AllocatorConfigurationFinalized());
@ -45,7 +45,7 @@ void DisablePartitionAllocThreadCacheForProcess() {
DisableThreadCacheForRootIfEnabled( DisableThreadCacheForRootIfEnabled(
allocator_shim::internal::PartitionAllocMalloc::OriginalAllocator()); allocator_shim::internal::PartitionAllocMalloc::OriginalAllocator());
} }
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
} // namespace } // namespace
@ -63,7 +63,7 @@ ThreadAllocStats GetAllocStatsForCurrentThread() {
ThreadCacheProcessScopeForTesting::ThreadCacheProcessScopeForTesting( ThreadCacheProcessScopeForTesting::ThreadCacheProcessScopeForTesting(
PartitionRoot* root) PartitionRoot* root)
: root_(root) { : root_(root) {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
auto* regular_allocator = auto* regular_allocator =
allocator_shim::internal::PartitionAllocMalloc::Allocator(); allocator_shim::internal::PartitionAllocMalloc::Allocator();
regular_was_enabled_ = regular_was_enabled_ =
@ -77,7 +77,17 @@ ThreadCacheProcessScopeForTesting::ThreadCacheProcessScopeForTesting(
// Replace ThreadCache's PartitionRoot. // Replace ThreadCache's PartitionRoot.
ThreadCache::SwapForTesting(root_); ThreadCache::SwapForTesting(root_);
} else { } else {
if (!regular_was_enabled_) { bool regular_was_disabled = !regular_was_enabled_;
#if PA_BUILDFLAG(IS_WIN)
// ThreadCache may be tombstone because of the previous test. In the
// case, we have to remove tombstone and re-create ThreadCache for
// a new test.
if (ThreadCache::IsTombstone(ThreadCache::Get())) {
ThreadCache::RemoveTombstoneForTesting();
regular_was_disabled = true;
}
#endif
if (regular_was_disabled) {
EnablePartitionAllocThreadCacheForRootIfDisabled(root_); EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
ThreadCache::SwapForTesting(root_); ThreadCache::SwapForTesting(root_);
} }
@ -86,13 +96,14 @@ ThreadCacheProcessScopeForTesting::ThreadCacheProcessScopeForTesting(
PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get())); PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get()));
EnablePartitionAllocThreadCacheForRootIfDisabled(root_); EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
ThreadCache::SwapForTesting(root_); ThreadCache::SwapForTesting(root_);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PA_CHECK(ThreadCache::Get()); PA_CHECK(ThreadCache::Get());
PA_CHECK(!ThreadCache::IsTombstone(ThreadCache::Get()));
} }
ThreadCacheProcessScopeForTesting::~ThreadCacheProcessScopeForTesting() { ThreadCacheProcessScopeForTesting::~ThreadCacheProcessScopeForTesting() {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
auto* regular_allocator = auto* regular_allocator =
allocator_shim::internal::PartitionAllocMalloc::Allocator(); allocator_shim::internal::PartitionAllocMalloc::Allocator();
bool regular_enabled = bool regular_enabled =
@ -122,7 +133,7 @@ ThreadCacheProcessScopeForTesting::~ThreadCacheProcessScopeForTesting() {
DisableThreadCacheForRootIfEnabled(root_); DisableThreadCacheForRootIfEnabled(root_);
ThreadCache::SwapForTesting(nullptr); ThreadCache::SwapForTesting(nullptr);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
} }
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED) #endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)

View file

@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_EXTENDED_API_H_ #ifndef PARTITION_ALLOC_EXTENDED_API_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_EXTENDED_API_H_ #define PARTITION_ALLOC_EXTENDED_API_H_
#include "partition_alloc/partition_alloc_buildflags.h" #include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_root.h" #include "partition_alloc/partition_root.h"
#include "partition_alloc/partition_stats.h" #include "partition_alloc/partition_stats.h"
#include "partition_alloc/thread_cache.h" #include "partition_alloc/thread_cache.h"
@ -32,11 +32,11 @@ class ThreadCacheProcessScopeForTesting {
private: private:
PartitionRoot* root_ = nullptr; PartitionRoot* root_ = nullptr;
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
bool regular_was_enabled_ = false; bool regular_was_enabled_ = false;
#endif #endif
}; };
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_EXTENDED_API_H_ #endif // PARTITION_ALLOC_EXTENDED_API_H_

View file

@ -11,8 +11,8 @@
// other enum value and passed on to a function that takes an int or unsigned // other enum value and passed on to a function that takes an int or unsigned
// int. // int.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FLAGS_H_ #ifndef PARTITION_ALLOC_FLAGS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FLAGS_H_ #define PARTITION_ALLOC_FLAGS_H_
#include <type_traits> #include <type_traits>
@ -46,12 +46,6 @@ constexpr inline IfEnum<EnumType, bool> ContainsFlags(EnumType superset,
return (superset & subset) == subset; return (superset & subset) == subset;
} }
// Removes flags `target` from `from`.
template <typename EnumType>
constexpr inline IfEnum<EnumType> RemoveFlags(EnumType from, EnumType target) {
return from & ~target;
}
// A macro to define binary arithmetic over `EnumType`. // A macro to define binary arithmetic over `EnumType`.
// Use inside `namespace partition_alloc::internal`. // Use inside `namespace partition_alloc::internal`.
#define PA_DEFINE_OPERATORS_FOR_FLAGS(EnumType) \ #define PA_DEFINE_OPERATORS_FOR_FLAGS(EnumType) \
@ -98,4 +92,4 @@ constexpr inline IfEnum<EnumType> RemoveFlags(EnumType from, EnumType target) {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FLAGS_H_ #endif // PARTITION_ALLOC_FLAGS_H_

View file

@ -2,20 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_H_ #ifndef PARTITION_ALLOC_FREESLOT_BITMAP_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_H_ #define PARTITION_ALLOC_FREESLOT_BITMAP_H_
#include <climits> #include <climits>
#include <cstdint> #include <cstdint>
#include <utility> #include <utility>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/freeslot_bitmap_constants.h" #include "partition_alloc/freeslot_bitmap_constants.h"
#include "partition_alloc/partition_alloc_base/bits.h" #include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#if BUILDFLAG(USE_FREESLOT_BITMAP) #if PA_BUILDFLAG(USE_FREESLOT_BITMAP)
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -92,7 +92,7 @@ PA_ALWAYS_INLINE void FreeSlotBitmapReset(uintptr_t begin_addr,
*cell &= ~CellWithAOne(bit_index); *cell &= ~CellWithAOne(bit_index);
} }
#if BUILDFLAG(PA_DCHECK_IS_ON) #if PA_BUILDFLAG(DCHECKS_ARE_ON)
// Checks if the cells that are meant to contain only unset bits are really 0. // Checks if the cells that are meant to contain only unset bits are really 0.
auto [begin_cell, begin_bit_index] = auto [begin_cell, begin_bit_index] =
GetFreeSlotBitmapCellPtrAndBitIndex(begin_addr); GetFreeSlotBitmapCellPtrAndBitIndex(begin_addr);
@ -131,11 +131,11 @@ PA_ALWAYS_INLINE void FreeSlotBitmapReset(uintptr_t begin_addr,
for (FreeSlotBitmapCellType* cell = begin_cell; cell < end_cell; ++cell) { for (FreeSlotBitmapCellType* cell = begin_cell; cell < end_cell; ++cell) {
PA_DCHECK(*cell == 0u); PA_DCHECK(*cell == 0u);
} }
#endif // BUILDFLAG(PA_DCHECK_IS_ON) #endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
} }
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // BUILDFLAG(USE_FREESLOT_BITMAP) #endif // PA_BUILDFLAG(USE_FREESLOT_BITMAP)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_H_ #endif // PARTITION_ALLOC_FREESLOT_BITMAP_H_

View file

@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_CONSTANTS_H_ #ifndef PARTITION_ALLOC_FREESLOT_BITMAP_CONSTANTS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_CONSTANTS_H_ #define PARTITION_ALLOC_FREESLOT_BITMAP_CONSTANTS_H_
#include <cstdint> #include <cstdint>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/bits.h" #include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h" #include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_constants.h" #include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_alloc_forward.h" #include "partition_alloc/partition_alloc_forward.h"
#include "partition_alloc/reservation_offset_table.h" #include "partition_alloc/reservation_offset_table.h"
@ -28,7 +28,7 @@ constexpr size_t kFreeSlotBitmapSize =
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
ReservedFreeSlotBitmapSize() { ReservedFreeSlotBitmapSize() {
#if BUILDFLAG(USE_FREESLOT_BITMAP) #if PA_BUILDFLAG(USE_FREESLOT_BITMAP)
return base::bits::AlignUp(kFreeSlotBitmapSize, PartitionPageSize()); return base::bits::AlignUp(kFreeSlotBitmapSize, PartitionPageSize());
#else #else
return 0; return 0;
@ -37,7 +37,7 @@ ReservedFreeSlotBitmapSize() {
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
CommittedFreeSlotBitmapSize() { CommittedFreeSlotBitmapSize() {
#if BUILDFLAG(USE_FREESLOT_BITMAP) #if PA_BUILDFLAG(USE_FREESLOT_BITMAP)
return base::bits::AlignUp(kFreeSlotBitmapSize, SystemPageSize()); return base::bits::AlignUp(kFreeSlotBitmapSize, SystemPageSize());
#else #else
return 0; return 0;
@ -49,7 +49,7 @@ NumPartitionPagesPerFreeSlotBitmap() {
return ReservedFreeSlotBitmapSize() / PartitionPageSize(); return ReservedFreeSlotBitmapSize() / PartitionPageSize();
} }
#if BUILDFLAG(USE_FREESLOT_BITMAP) #if PA_BUILDFLAG(USE_FREESLOT_BITMAP)
PA_ALWAYS_INLINE uintptr_t SuperPageFreeSlotBitmapAddr(uintptr_t super_page) { PA_ALWAYS_INLINE uintptr_t SuperPageFreeSlotBitmapAddr(uintptr_t super_page) {
PA_DCHECK(!(super_page % kSuperPageAlignment)); PA_DCHECK(!(super_page % kSuperPageAlignment));
return super_page + PartitionPageSize(); return super_page + PartitionPageSize();
@ -58,4 +58,4 @@ PA_ALWAYS_INLINE uintptr_t SuperPageFreeSlotBitmapAddr(uintptr_t super_page) {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_CONSTANTS_H_ #endif // PARTITION_ALLOC_FREESLOT_BITMAP_CONSTANTS_H_

View file

@ -4,32 +4,36 @@
#include "partition_alloc/gwp_asan_support.h" #include "partition_alloc/gwp_asan_support.h"
#if BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT) #if PA_BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
#include "build/build_config.h" #include "partition_alloc/build_config.h"
#include "partition_alloc/freeslot_bitmap_constants.h" #include "partition_alloc/freeslot_bitmap_constants.h"
#include "partition_alloc/in_slot_metadata.h"
#include "partition_alloc/page_allocator_constants.h" #include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/no_destructor.h" #include "partition_alloc/partition_alloc_base/no_destructor.h"
#include "partition_alloc/partition_alloc_check.h" #include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_bucket.h" #include "partition_alloc/partition_bucket.h"
#include "partition_alloc/partition_lock.h" #include "partition_alloc/partition_lock.h"
#include "partition_alloc/partition_page.h" #include "partition_alloc/partition_page.h"
#include "partition_alloc/partition_ref_count.h"
#include "partition_alloc/partition_root.h" #include "partition_alloc/partition_root.h"
namespace partition_alloc { namespace partition_alloc {
namespace {
PartitionOptions GwpAsanPartitionOptions() {
PartitionOptions options;
options.backup_ref_ptr = PartitionOptions::kEnabled;
return options;
}
} // namespace
// static // static
void* GwpAsanSupport::MapRegion(size_t slot_count, void* GwpAsanSupport::MapRegion(size_t slot_count,
std::vector<uint16_t>& free_list) { std::vector<uint16_t>& free_list) {
PA_CHECK(slot_count > 0); PA_CHECK(slot_count > 0);
constexpr PartitionOptions kConfig = []() { static internal::base::NoDestructor<PartitionRoot> root(
PartitionOptions opts; GwpAsanPartitionOptions());
opts.backup_ref_ptr = PartitionOptions::kEnabled;
return opts;
}();
static internal::base::NoDestructor<PartitionRoot> root(kConfig);
const size_t kSlotSize = 2 * internal::SystemPageSize(); const size_t kSlotSize = 2 * internal::SystemPageSize();
uint16_t bucket_index = PartitionRoot::SizeToBucketIndex( uint16_t bucket_index = PartitionRoot::SizeToBucketIndex(
@ -38,8 +42,7 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
const size_t kSuperPagePayloadStartOffset = const size_t kSuperPagePayloadStartOffset =
internal::SuperPagePayloadStartOffset( internal::SuperPagePayloadStartOffset(
/* is_managed_by_normal_buckets = */ true, /* is_managed_by_normal_buckets = */ true);
/* with_quarantine = */ false);
PA_CHECK(kSuperPagePayloadStartOffset % kSlotSize == 0); PA_CHECK(kSuperPagePayloadStartOffset % kSlotSize == 0);
const size_t kSuperPageGwpAsanSlotAreaBeginOffset = const size_t kSuperPageGwpAsanSlotAreaBeginOffset =
kSuperPagePayloadStartOffset; kSuperPagePayloadStartOffset;
@ -64,14 +67,14 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
return nullptr; return nullptr;
} }
#if defined(ARCH_CPU_64_BITS) #if PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)
// Mapping the GWP-ASan region in to the lower 32-bits of address space // Mapping the GWP-ASan region in to the lower 32-bits of address space
// makes it much more likely that a bad pointer dereference points into // makes it much more likely that a bad pointer dereference points into
// our region and triggers a false positive report. We rely on the fact // our region and triggers a false positive report. We rely on the fact
// that PA address pools are never allocated in the first 4GB due to // that PA address pools are never allocated in the first 4GB due to
// their alignment requirements. // their alignment requirements.
PA_CHECK(super_page_span_start >= (1ULL << 32)); PA_CHECK(super_page_span_start >= (1ULL << 32));
#endif // defined(ARCH_CPU_64_BITS) #endif // PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)
uintptr_t super_page_span_end = uintptr_t super_page_span_end =
super_page_span_start + super_page_count * kSuperPageSize; super_page_span_start + super_page_count * kSuperPageSize;
@ -90,15 +93,16 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
partition_page_idx += bucket->get_pages_per_slot_span()) { partition_page_idx += bucket->get_pages_per_slot_span()) {
auto* slot_span_metadata = auto* slot_span_metadata =
&page_metadata[partition_page_idx].slot_span_metadata; &page_metadata[partition_page_idx].slot_span_metadata;
bucket->InitializeSlotSpanForGwpAsan(slot_span_metadata); bucket->InitializeSlotSpanForGwpAsan(slot_span_metadata, root.get());
auto slot_span_start = auto slot_span_start =
internal::SlotSpanMetadata::ToSlotSpanStart(slot_span_metadata); internal::SlotSpanMetadata<internal::MetadataKind::kReadOnly>::
ToSlotSpanStart(slot_span_metadata);
for (uintptr_t slot_idx = 0; slot_idx < kSlotsPerSlotSpan; ++slot_idx) { for (uintptr_t slot_idx = 0; slot_idx < kSlotsPerSlotSpan; ++slot_idx) {
auto slot_start = slot_span_start + slot_idx * kSlotSize; auto slot_start = slot_span_start + slot_idx * kSlotSize;
PartitionRoot::RefCountPointerFromSlotStartAndSize(slot_start, PartitionRoot::InSlotMetadataPointerFromSlotStartAndSize(slot_start,
kSlotSize) kSlotSize)
->InitalizeForGwpAsan(); ->InitializeForGwpAsan();
size_t global_slot_idx = (slot_start - super_page_span_start - size_t global_slot_idx = (slot_start - super_page_span_start -
kSuperPageGwpAsanSlotAreaBeginOffset) / kSuperPageGwpAsanSlotAreaBeginOffset) /
kSlotSize; kSlotSize;
@ -122,11 +126,11 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
// static // static
bool GwpAsanSupport::CanReuse(uintptr_t slot_start) { bool GwpAsanSupport::CanReuse(uintptr_t slot_start) {
const size_t kSlotSize = 2 * internal::SystemPageSize(); const size_t kSlotSize = 2 * internal::SystemPageSize();
return PartitionRoot::RefCountPointerFromSlotStartAndSize(slot_start, return PartitionRoot::InSlotMetadataPointerFromSlotStartAndSize(slot_start,
kSlotSize) kSlotSize)
->CanBeReusedByGwpAsan(); ->CanBeReusedByGwpAsan();
} }
} // namespace partition_alloc } // namespace partition_alloc
#endif // BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT) #endif // PA_BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)

Some files were not shown because too many files have changed in this diff Show more