Compare commits
No commits in common. "v103.0.5060.53-1" and "master" have entirely different histories.
v103.0.506
...
master
469
.github/workflows/build.yml
vendored
|
@ -16,44 +16,44 @@ env:
|
|||
SCCACHE_CACHE_SIZE: 200M
|
||||
jobs:
|
||||
cache-toolchains-posix:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
- name: Cache toolchains (Linux, OpenWrt, Android)
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
src/third_party/llvm-build/Release+Asserts/
|
||||
src/gn/
|
||||
src/qemu-user-static*.deb
|
||||
src/qemu-user*.deb
|
||||
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- name: Cache PGO (Linux, OpenWrt)
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: src/chrome/build/pgo_profiles/
|
||||
key: pgo-linux-openwrt-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- name: Cache AFDO (Android)
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: src/chrome/android/profiles/
|
||||
key: afdo-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- name: Cache Android NDK (Android)
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: src/third_party/android_ndk/
|
||||
path: src/third_party/android_toolchain/ndk/
|
||||
key: android-ndk-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- run: ./get-clang.sh
|
||||
- run: EXTRA_FLAGS='target_os="android"' ./get-clang.sh
|
||||
- run: |
|
||||
if [ ! -f qemu-user-static*.deb ]; then
|
||||
wget https://snapshot.debian.org/archive/debian/20220515T152741Z/pool/main/q/qemu/qemu-user-static_7.0%2Bdfsg-6_amd64.deb
|
||||
if [ ! -f qemu-user*.deb ]; then
|
||||
wget https://snapshot.debian.org/archive/debian/20250405T083429Z/pool/main/q/qemu/qemu-user_9.2.2%2Bds-1%2Bb2_amd64.deb
|
||||
fi
|
||||
cache-toolchains-win:
|
||||
runs-on: windows-2019
|
||||
runs-on: windows-2022
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
- name: Cache toolchains
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
src/third_party/llvm-build/Release+Asserts/
|
||||
|
@ -62,12 +62,12 @@ jobs:
|
|||
~/bin/ninja.exe
|
||||
key: toolchains-win-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- name: Cache PGO (win64)
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: src/chrome/build/pgo_profiles/chrome-win64-*
|
||||
key: pgo-win64-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- name: Cache PGO (win32)
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: src/chrome/build/pgo_profiles/chrome-win32-*
|
||||
key: pgo-win32-arm64-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
|
@ -79,10 +79,10 @@ jobs:
|
|||
unzip ninja-win.zip -d ~/bin
|
||||
fi
|
||||
cache-toolchains-mac:
|
||||
runs-on: macos-11
|
||||
runs-on: macos-13
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/cache@v2
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
src/third_party/llvm-build/Release+Asserts/
|
||||
|
@ -93,60 +93,58 @@ jobs:
|
|||
- run: EXTRA_FLAGS='target_cpu="arm64"' ./get-clang.sh
|
||||
linux:
|
||||
needs: cache-toolchains-posix
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch: [x64, x86, arm64, arm, mipsel, mips64el]
|
||||
arch: [x64, x86, arm64, arm, mipsel, mips64el, riscv64, loong64]
|
||||
env:
|
||||
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}"'
|
||||
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
|
||||
CRONET_BUNDLE: cronet-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.18.1'
|
||||
- uses: actions/checkout@v4
|
||||
- name: Cache toolchains (Linux, OpenWrt, Android)
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
src/third_party/llvm-build/Release+Asserts/
|
||||
src/gn/
|
||||
src/qemu-user-static*.deb
|
||||
src/qemu-user*.deb
|
||||
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- name: Cache PGO (Linux, OpenWrt)
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: src/chrome/build/pgo_profiles/
|
||||
key: pgo-linux-openwrt-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- name: Regenerate Debian keyring
|
||||
run: |
|
||||
rm -f ./build/linux/sysroot_scripts/keyring.gpg
|
||||
GPG_TTY=/dev/null ./build/linux/sysroot_scripts/generate_keyring.sh
|
||||
- name: Cache sysroot
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: src/out/sysroot-build/bullseye/bullseye_*
|
||||
key: sysroot-linux-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- id: ccache-timestamp
|
||||
run: echo "::set-output name=date::$(date +%s)"
|
||||
run: echo "CCACHE_TIMESTAMP=$(date +%s)" >>$GITHUB_OUTPUT
|
||||
- name: Cache ccache files
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.ccache
|
||||
key: ccache-linux-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.date }}
|
||||
path: ~/.cache/ccache
|
||||
key: ccache-linux-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.CCACHE_TIMESTAMP }}
|
||||
restore-keys: ccache-linux-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
|
||||
- name: Install APT packages
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install ninja-build pkg-config qemu-user ccache bubblewrap
|
||||
sudo apt install ninja-build pkg-config ccache bubblewrap
|
||||
sudo apt remove -y qemu-user-binfmt
|
||||
sudo dpkg -i qemu-user-static_7.0+dfsg-6_amd64.deb
|
||||
sudo dpkg -i qemu-user*.deb
|
||||
# libc6-i386 interferes with x86 build
|
||||
sudo apt remove libc6-i386
|
||||
- run: ./get-clang.sh
|
||||
- run: ccache -z
|
||||
- run: ./build.sh
|
||||
- run: ccache -s
|
||||
- run: CCACHE_DISABLE=1 ./go-build.sh
|
||||
working-directory: src/out/Release/cronet
|
||||
- run: ../tests/basic.sh out/Release/naive
|
||||
- name: Pack naiveproxy assets
|
||||
run: |
|
||||
|
@ -155,85 +153,76 @@ jobs:
|
|||
tar cJf ${{ env.BUNDLE }}.tar.xz ${{ env.BUNDLE }}
|
||||
openssl sha256 out/Release/naive >sha256sum.txt
|
||||
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ env.BUNDLE }}.tar.xz naive executable sha256 ${{ env.SHA256SUM }}
|
||||
path: src/sha256sum.txt
|
||||
- name: Upload naiveproxy assets
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
run: hub release edit -a ${{ env.BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Pack cronet assets
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
run: |
|
||||
mv out/Release/cronet ${{ env.CRONET_BUNDLE }}
|
||||
tar cJf ${{ env.CRONET_BUNDLE }}.tar.xz ${{ env.CRONET_BUNDLE }}
|
||||
openssl sha256 ${{ env.CRONET_BUNDLE }}.tar.xz >sha256sum.txt
|
||||
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
with:
|
||||
name: ${{ env.CRONET_BUNDLE }}.tar.xz sha256 ${{ env.SHA256SUM }}
|
||||
path: src/sha256sum.txt
|
||||
- name: Upload cronet assets
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
run: hub release edit -a ${{ env.CRONET_BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
|
||||
run: gh release upload "${GITHUB_REF##*/}" ${{ env.BUNDLE }}.tar.xz --clobber
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
android:
|
||||
needs: cache-toolchains-posix
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch: [x64, x86, arm64, arm]
|
||||
include:
|
||||
- arch: x64
|
||||
abi: x86_64
|
||||
- arch: x86
|
||||
abi: x86
|
||||
- arch: arm64
|
||||
abi: arm64-v8a
|
||||
- arch: arm
|
||||
abi: armeabi-v7a
|
||||
env:
|
||||
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}" target_os="android"'
|
||||
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
|
||||
CRONET_BUNDLE: cronet-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
|
||||
BUNDLE: naiveproxy-plugin-${{ github.event.release.tag_name || 'v1.1.1.1-1' }}-${{ matrix.abi }}.apk
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-java@v4
|
||||
with:
|
||||
go-version: '^1.18.1'
|
||||
distribution: 'temurin'
|
||||
java-version: 17
|
||||
- name: Cache toolchains (Linux, OpenWrt, Android)
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
src/third_party/llvm-build/Release+Asserts/
|
||||
src/gn/
|
||||
src/qemu-user-static*.deb
|
||||
src/qemu-user*.deb
|
||||
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- name: Cache AFDO (Android)
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: src/chrome/android/profiles/
|
||||
key: afdo-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- name: Cache Android NDK (Android)
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: src/third_party/android_ndk/
|
||||
path: src/third_party/android_toolchain/ndk/
|
||||
key: android-ndk-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- name: Cache sysroot
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: src/out/sysroot-build/android/
|
||||
key: sysroot-android-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- id: ccache-timestamp
|
||||
run: echo "::set-output name=date::$(date +%s)"
|
||||
run: echo "CCACHE_TIMESTAMP=$(date +%s)" >>$GITHUB_OUTPUT
|
||||
- name: Cache ccache files
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.ccache
|
||||
key: ccache-android-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.date }}
|
||||
path: ~/.cache/ccache
|
||||
key: ccache-android-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.CCACHE_TIMESTAMP }}
|
||||
restore-keys: ccache-android-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
|
||||
- name: Install APT packages
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install ninja-build pkg-config qemu-user ccache bubblewrap
|
||||
sudo apt install ninja-build pkg-config ccache bubblewrap
|
||||
sudo apt remove -y qemu-user-binfmt
|
||||
sudo dpkg -i qemu-user-static_7.0+dfsg-6_amd64.deb
|
||||
sudo dpkg -i qemu-user*.deb
|
||||
# libc6-i386 interferes with x86 build
|
||||
sudo apt remove libc6-i386
|
||||
- run: ./get-clang.sh
|
||||
|
@ -241,45 +230,37 @@ jobs:
|
|||
- run: ./build.sh
|
||||
- run: ccache -s
|
||||
- run: ./get-android-sys.sh
|
||||
- run: CCACHE_DISABLE=1 ./go-build.sh
|
||||
working-directory: src/out/Release/cronet
|
||||
- run: ../tests/basic.sh out/Release/naive
|
||||
- name: Pack naiveproxy assets
|
||||
run: |
|
||||
mkdir ${{ env.BUNDLE }}
|
||||
cp out/Release/naive config.json ../LICENSE ../USAGE.txt ${{ env.BUNDLE }}
|
||||
tar cJf ${{ env.BUNDLE }}.tar.xz ${{ env.BUNDLE }}
|
||||
openssl sha256 out/Release/naive >sha256sum.txt
|
||||
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v2
|
||||
- name: Gradle cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
name: ${{ env.BUNDLE }}.tar.xz naive executable sha256 ${{ env.SHA256SUM }}
|
||||
path: src/sha256sum.txt
|
||||
path: ~/.gradle
|
||||
key: gradle-${{ hashFiles('**/*.gradle.kts') }}
|
||||
- name: Create APK
|
||||
working-directory: apk
|
||||
env:
|
||||
APK_ABI: ${{ matrix.abi }}
|
||||
APK_VERSION_NAME: ${{ github.event.release.tag_name || 'v1.1.1.1-1' }}
|
||||
KEYSTORE_PASS: ${{ secrets.KEYSTORE_PASS }}
|
||||
run: |
|
||||
mkdir -p app/libs/$APK_ABI
|
||||
cp ../src/out/Release/naive app/libs/$APK_ABI/libnaive.so
|
||||
./gradlew :app:assembleRelease
|
||||
openssl sha256 app/build/outputs/apk/release/${{ env.BUNDLE }} >sha256sum.txt
|
||||
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ env.BUNDLE }} sha256 ${{ env.SHA256SUM }}
|
||||
path: apk/sha256sum.txt
|
||||
- name: Upload naiveproxy assets
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
run: hub release edit -a ${{ env.BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Pack cronet assets
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
run: |
|
||||
mv out/Release/cronet ${{ env.CRONET_BUNDLE }}
|
||||
tar cJf ${{ env.CRONET_BUNDLE }}.tar.xz ${{ env.CRONET_BUNDLE }}
|
||||
openssl sha256 ${{ env.CRONET_BUNDLE }}.tar.xz >sha256sum.txt
|
||||
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
with:
|
||||
name: ${{ env.CRONET_BUNDLE }}.tar.xz sha256 ${{ env.SHA256SUM }}
|
||||
path: src/sha256sum.txt
|
||||
- name: Upload cronet assets
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
run: hub release edit -a ${{ env.CRONET_BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
|
||||
working-directory: apk/app/build/outputs/apk/release
|
||||
run: gh release upload "${GITHUB_REF##*/}" ${{ env.BUNDLE }} --clobber
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
win:
|
||||
needs: cache-toolchains-win
|
||||
runs-on: windows-2019
|
||||
runs-on: windows-2022
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
@ -287,14 +268,10 @@ jobs:
|
|||
env:
|
||||
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}"'
|
||||
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
|
||||
CRONET_BUNDLE: cronet-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.18.1'
|
||||
- uses: actions/checkout@v4
|
||||
- name: Cache toolchains
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
src/third_party/llvm-build/Release+Asserts/
|
||||
|
@ -304,30 +281,28 @@ jobs:
|
|||
key: toolchains-win-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- name: Cache PGO (win64)
|
||||
if: ${{ matrix.arch == 'x64' }}
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: src/chrome/build/pgo_profiles/chrome-win64-*
|
||||
key: pgo-win64-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- name: Cache PGO (win32)
|
||||
if: ${{ matrix.arch != 'x64' }}
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: src/chrome/build/pgo_profiles/chrome-win32-*
|
||||
key: pgo-win32-arm64-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- id: ccache-timestamp
|
||||
run: echo "::set-output name=date::$(date +%s)"
|
||||
run: echo "CCACHE_TIMESTAMP=$(date +%s)" >>$GITHUB_OUTPUT
|
||||
- name: Cache ccache files
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/AppData/Local/Mozilla/sccache
|
||||
key: ccache-win-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.date }}
|
||||
key: ccache-win-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.CCACHE_TIMESTAMP }}
|
||||
restore-keys: ccache-win-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
|
||||
- run: ./get-clang.sh
|
||||
- run: ~/.cargo/bin/sccache -z
|
||||
- run: ./build.sh
|
||||
- run: ~/.cargo/bin/sccache -s
|
||||
- run: CCACHE_DISABLE=1 ./go-build.sh
|
||||
working-directory: src/out/Release/cronet
|
||||
- run: ../tests/basic.sh out/Release/naive
|
||||
# No real or emulated environment is available to test this.
|
||||
if: ${{ matrix.arch != 'arm64' }}
|
||||
|
@ -338,35 +313,18 @@ jobs:
|
|||
7z a ${{ env.BUNDLE }}.zip ${{ env.BUNDLE }}
|
||||
openssl sha256 out/Release/naive.exe >sha256sum.txt
|
||||
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ env.BUNDLE }}.zip naive executable sha256 ${{ env.SHA256SUM }}
|
||||
path: src/sha256sum.txt
|
||||
- name: Upload naiveproxy assets
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
run: hub release edit -a ${{ env.BUNDLE }}.zip -m "" "${GITHUB_REF##*/}"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Pack cronet assets
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
run: |
|
||||
mv out/Release/cronet ${{ env.CRONET_BUNDLE }}
|
||||
7z a ${{ env.CRONET_BUNDLE }}.zip ${{ env.CRONET_BUNDLE }}
|
||||
openssl sha256 ${{ env.CRONET_BUNDLE }}.zip >sha256sum.txt
|
||||
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
with:
|
||||
name: ${{ env.CRONET_BUNDLE }}.zip sha256 ${{ env.SHA256SUM }}
|
||||
path: src/sha256sum.txt
|
||||
- name: Upload cronet assets
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
run: hub release edit -a ${{ env.CRONET_BUNDLE }}.zip -m "" "${GITHUB_REF##*/}"
|
||||
run: gh release upload "${GITHUB_REF##*/}" ${{ env.BUNDLE }}.zip --clobber
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
mac:
|
||||
needs: cache-toolchains-mac
|
||||
runs-on: macos-11
|
||||
runs-on: macos-13
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
@ -374,14 +332,10 @@ jobs:
|
|||
env:
|
||||
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}"'
|
||||
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
|
||||
CRONET_BUNDLE: cronet-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.18.1'
|
||||
- uses: actions/checkout@v4
|
||||
- name: Cache toolchains and PGO
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
src/third_party/llvm-build/Release+Asserts/
|
||||
|
@ -389,20 +343,19 @@ jobs:
|
|||
src/gn/
|
||||
key: toolchains-pgo-mac-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- id: ccache-timestamp
|
||||
run: echo "::set-output name=date::$(date +%s)"
|
||||
run: echo "CCACHE_TIMESTAMP=$(date +%s)" >>$GITHUB_OUTPUT
|
||||
- name: Cache ccache files
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/Library/Caches/ccache
|
||||
key: ccache-mac-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.date }}
|
||||
key: ccache-mac-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.CCACHE_TIMESTAMP }}
|
||||
restore-keys: ccache-mac-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
|
||||
- run: brew install ninja ccache
|
||||
- run: pip install setuptools
|
||||
- run: ./get-clang.sh
|
||||
- run: ccache -z
|
||||
- run: ./build.sh
|
||||
- run: ccache -s
|
||||
- run: CCACHE_DISABLE=1 ./go-build.sh
|
||||
working-directory: src/out/Release/cronet
|
||||
- run: ../tests/basic.sh out/Release/naive
|
||||
# No real or emulated environment is available to test this.
|
||||
if: ${{ matrix.arch != 'arm64' }}
|
||||
|
@ -413,212 +366,199 @@ jobs:
|
|||
tar cJf ${{ env.BUNDLE }}.tar.xz ${{ env.BUNDLE }}
|
||||
openssl sha256 out/Release/naive >sha256sum.txt
|
||||
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ env.BUNDLE }}.tar.xz naive executable sha256 ${{ env.SHA256SUM }}
|
||||
path: src/sha256sum.txt
|
||||
- name: Upload naiveproxy assets
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
run: hub release edit -a ${{ env.BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
|
||||
run: gh release upload "${GITHUB_REF##*/}" ${{ env.BUNDLE }}.tar.xz --clobber
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Pack cronet assets
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
run: |
|
||||
mv out/Release/cronet ${{ env.CRONET_BUNDLE }}
|
||||
tar cJf ${{ env.CRONET_BUNDLE }}.tar.xz ${{ env.CRONET_BUNDLE }}
|
||||
openssl sha256 ${{ env.CRONET_BUNDLE }}.tar.xz >sha256sum.txt
|
||||
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
with:
|
||||
name: ${{ env.CRONET_BUNDLE }}.tar.xz sha256 ${{ env.SHA256SUM }}
|
||||
path: src/sha256sum.txt
|
||||
- name: Upload cronet assets
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
run: hub release edit -a ${{ env.CRONET_BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ios:
|
||||
needs: cache-toolchains-mac
|
||||
runs-on: macos-11
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch: [x64, arm64]
|
||||
env:
|
||||
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}" target_os="ios" ios_enable_code_signing=false'
|
||||
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Cache toolchains and PGO
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
src/third_party/llvm-build/Release+Asserts/
|
||||
src/chrome/build/pgo_profiles/chrome-mac-*
|
||||
src/gn/
|
||||
key: toolchains-pgo-mac-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- id: ccache-timestamp
|
||||
run: echo "::set-output name=date::$(date +%s)"
|
||||
- name: Cache ccache files
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/Library/Caches/ccache
|
||||
key: ccache-ios-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.date }}
|
||||
restore-keys: ccache-ios-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
|
||||
- run: brew install ninja ccache
|
||||
- run: ./get-clang.sh
|
||||
- run: ccache -z
|
||||
- run: ./build.sh
|
||||
- run: ccache -s
|
||||
openwrt:
|
||||
needs: cache-toolchains-posix
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- arch: x86_64
|
||||
openwrt: 'target=x86 subtarget=64'
|
||||
openwrt: "target=x86 subtarget=64"
|
||||
target_cpu: x64
|
||||
- arch: x86
|
||||
openwrt: 'target=x86 subtarget=generic'
|
||||
openwrt: "target=x86 subtarget=geode"
|
||||
target_cpu: x86
|
||||
- arch: aarch64_cortex-a53
|
||||
openwrt: 'target=sunxi subtarget=cortexa53'
|
||||
openwrt: "target=sunxi subtarget=cortexa53"
|
||||
target_cpu: arm64
|
||||
extra: 'arm_cpu="cortex-a53"'
|
||||
- arch: aarch64_cortex-a53-static
|
||||
openwrt: 'target=sunxi subtarget=cortexa53'
|
||||
openwrt: "target=sunxi subtarget=cortexa53"
|
||||
target_cpu: arm64
|
||||
extra: 'arm_cpu="cortex-a53" build_static=true'
|
||||
extra: 'arm_cpu="cortex-a53" build_static=true use_allocator_shim=false use_partition_alloc=false'
|
||||
openwrt_release: '24.10.0'
|
||||
openwrt_gcc_ver: '13.3.0'
|
||||
- arch: aarch64_cortex-a72
|
||||
openwrt: 'target=mvebu subtarget=cortexa72'
|
||||
openwrt: "target=mvebu subtarget=cortexa72"
|
||||
target_cpu: arm64
|
||||
extra: 'arm_cpu="cortex-a72"'
|
||||
- arch: aarch64_generic
|
||||
openwrt: 'target=rockchip subtarget=armv8'
|
||||
- arch: aarch64_cortex-a72-static
|
||||
openwrt: "target=mvebu subtarget=cortexa72"
|
||||
target_cpu: arm64
|
||||
extra: 'arm_cpu="cortex-a72" build_static=true use_allocator_shim=false use_partition_alloc=false'
|
||||
openwrt_release: '24.10.0'
|
||||
openwrt_gcc_ver: '13.3.0'
|
||||
- arch: aarch64_cortex-a76
|
||||
openwrt: "target=bcm27xx subtarget=bcm2712"
|
||||
target_cpu: arm64
|
||||
extra: 'arm_cpu="cortex-a76"'
|
||||
openwrt_release: '24.10.0'
|
||||
openwrt_gcc_ver: '13.3.0'
|
||||
- arch: aarch64_generic
|
||||
openwrt: "target=layerscape subtarget=armv8_64b"
|
||||
target_cpu: arm64
|
||||
- arch: aarch64_generic-static
|
||||
openwrt: "target=layerscape subtarget=armv8_64b"
|
||||
target_cpu: arm64
|
||||
extra: "build_static=true use_allocator_shim=false use_partition_alloc=false"
|
||||
openwrt_release: '24.10.0'
|
||||
openwrt_gcc_ver: '13.3.0'
|
||||
- arch: arm_arm1176jzf-s_vfp
|
||||
openwrt: 'target=bcm27xx subtarget=bcm2708'
|
||||
openwrt: "target=brcm2708 subtarget=bcm2708"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="arm1176jzf-s" arm_fpu="vfp" arm_float_abi="hard" arm_use_neon=false arm_use_thumb=false'
|
||||
- arch: arm_arm926ej-s
|
||||
openwrt: 'target=mxs'
|
||||
openwrt: "target=mxs subtarget=generic"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="arm926ej-s" arm_float_abi="soft" arm_use_neon=false arm_use_thumb=false'
|
||||
- arch: arm_cortex-a15_neon-vfpv4
|
||||
openwrt: 'target=armvirt subtarget=32'
|
||||
openwrt: "target=ipq806x subtarget=generic"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="cortex-a15" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true'
|
||||
- arch: arm_cortex-a5_vfpv4
|
||||
openwrt: 'target=at91 subtarget=sama5'
|
||||
openwrt: "target=at91 subtarget=sama5d3"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="cortex-a5" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false'
|
||||
- arch: arm_cortex-a7
|
||||
openwrt: 'target=mediatek subtarget=mt7629'
|
||||
openwrt: "target=mediatek subtarget=mt7629"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_float_abi="soft" arm_use_neon=false'
|
||||
openwrt_release: '21.02.0'
|
||||
openwrt_gcc_ver: '8.4.0'
|
||||
- arch: arm_cortex-a7_neon-vfpv4
|
||||
openwrt: 'target=sunxi subtarget=cortexa7'
|
||||
openwrt: "target=sunxi subtarget=cortexa7"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true'
|
||||
- arch: arm_cortex-a7_neon-vfpv4-static
|
||||
openwrt: 'target=sunxi subtarget=cortexa7'
|
||||
openwrt: "target=sunxi subtarget=cortexa7"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true build_static=true'
|
||||
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true build_static=true use_allocator_shim=false use_partition_alloc=false'
|
||||
openwrt_release: '24.10.0'
|
||||
openwrt_gcc_ver: '13.3.0'
|
||||
- arch: arm_cortex-a7_vfpv4
|
||||
openwrt: "target=at91 subtarget=sama7"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false'
|
||||
openwrt_release: '22.03.0'
|
||||
openwrt_gcc_ver: '11.2.0'
|
||||
- arch: arm_cortex-a8_vfpv3
|
||||
openwrt: 'target=sunxi subtarget=cortexa8'
|
||||
openwrt: "target=sunxi subtarget=cortexa8"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="cortex-a8" arm_fpu="vfpv3" arm_float_abi="hard" arm_use_neon=false'
|
||||
- arch: arm_cortex-a9
|
||||
openwrt: 'target=bcm53xx subtarget=generic'
|
||||
openwrt: "target=bcm53xx subtarget=generic"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_float_abi="soft" arm_use_neon=false'
|
||||
- arch: arm_cortex-a9-static
|
||||
openwrt: 'target=bcm53xx subtarget=generic'
|
||||
openwrt: "target=bcm53xx subtarget=generic"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_float_abi="soft" arm_use_neon=false build_static=true'
|
||||
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_float_abi="soft" arm_use_neon=false build_static=true use_allocator_shim=false use_partition_alloc=false'
|
||||
openwrt_release: '24.10.0'
|
||||
openwrt_gcc_ver: '13.3.0'
|
||||
- arch: arm_cortex-a9_neon
|
||||
openwrt: 'target=imx6'
|
||||
openwrt: "target=imx6 subtarget=generic"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="neon" arm_float_abi="hard" arm_use_neon=true'
|
||||
- arch: arm_cortex-a9_vfpv3-d16
|
||||
openwrt: 'target=tegra'
|
||||
openwrt: "target=mvebu subtarget=cortexa9"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="vfpv3-d16" arm_float_abi="hard" arm_use_neon=false'
|
||||
- arch: arm_mpcore
|
||||
openwrt: 'target=oxnas subtarget=ox820'
|
||||
openwrt: "target=oxnas subtarget=ox820"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="mpcore" arm_float_abi="soft" arm_use_neon=false arm_use_thumb=false'
|
||||
- arch: arm_xscale
|
||||
openwrt: 'target=kirkwood'
|
||||
openwrt: "target=kirkwood subtarget=generic"
|
||||
target_cpu: arm
|
||||
extra: 'arm_version=0 arm_cpu="xscale" arm_float_abi="soft" arm_use_neon=false arm_use_thumb=false'
|
||||
- arch: mipsel_24kc
|
||||
openwrt: 'target=ramips subtarget=rt305x'
|
||||
openwrt: "target=ramips subtarget=rt305x"
|
||||
target_cpu: mipsel
|
||||
extra: 'mips_arch_variant="r2" mips_float_abi="soft" mips_tune="24kc"'
|
||||
- arch: mipsel_74kc
|
||||
openwrt: 'target=ramips subtarget=rt3883'
|
||||
extra: 'mips_arch_variant="r2" mips_float_abi="soft"'
|
||||
- arch: mipsel_24kc-static
|
||||
openwrt: "target=ramips subtarget=rt305x"
|
||||
target_cpu: mipsel
|
||||
extra: 'mips_arch_variant="r2" mips_float_abi="soft" mips_tune="74kc"'
|
||||
extra: 'mips_arch_variant="r2" mips_float_abi="soft" build_static=true use_allocator_shim=false use_partition_alloc=false'
|
||||
openwrt_release: '24.10.0'
|
||||
openwrt_gcc_ver: '13.3.0'
|
||||
- arch: mipsel_mips32
|
||||
openwrt: 'target=bcm47xx subtarget=generic'
|
||||
openwrt: "target=brcm47xx subtarget=legacy"
|
||||
target_cpu: mipsel
|
||||
extra: 'mips_arch_variant="r1" mips_float_abi="soft"'
|
||||
- arch: riscv64
|
||||
openwrt: "target=sifiveu subtarget=generic"
|
||||
target_cpu: riscv64
|
||||
openwrt_release: '23.05.0'
|
||||
openwrt_gcc_ver: '12.3.0'
|
||||
- arch: loongarch64
|
||||
openwrt: "target=loongarch64 subtarget=generic"
|
||||
target_cpu: loong64
|
||||
openwrt_release: '24.10.0'
|
||||
openwrt_gcc_ver: '13.3.0'
|
||||
env:
|
||||
EXTRA_FLAGS: target_cpu="${{ matrix.target_cpu }}" target_os="openwrt" use_allocator="none" use_allocator_shim=false use_partition_alloc=false ${{ matrix.extra }}
|
||||
OPENWRT_FLAGS: arch=${{ matrix.arch }} release=21.02.2 gcc_ver=8.4.0 ${{ matrix.openwrt }}
|
||||
EXTRA_FLAGS: target_cpu="${{ matrix.target_cpu }}" target_os="openwrt" ${{ matrix.extra }} enable_shadow_metadata=false
|
||||
OPENWRT_FLAGS: arch=${{ matrix.arch }} release=${{ matrix.openwrt_release || '18.06.0' }} gcc_ver=${{ matrix.openwrt_gcc_ver || '7.3.0' }} ${{ matrix.openwrt }}
|
||||
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
|
||||
CRONET_BUNDLE: cronet-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.18.1'
|
||||
- uses: actions/checkout@v4
|
||||
- name: Cache toolchains (Linux, OpenWrt, Android)
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
src/third_party/llvm-build/Release+Asserts/
|
||||
src/gn/
|
||||
src/qemu-user-static*.deb
|
||||
src/qemu-user*.deb
|
||||
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- name: Cache PGO (Linux, OpenWrt)
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: src/chrome/build/pgo_profiles/
|
||||
key: pgo-linux-openwrt-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- name: Cache sysroot
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: src/out/sysroot-build/openwrt
|
||||
key: sysroot-openwrt-21.02.2-${{ matrix.arch }}-v${{ env.CACHE_EPOCH }}
|
||||
key: sysroot-openwrt-23.05.0-${{ matrix.arch }}-v${{ env.CACHE_EPOCH }}
|
||||
- id: ccache-timestamp
|
||||
run: echo "::set-output name=date::$(date +%s)"
|
||||
run: echo "CCACHE_TIMESTAMP=$(date +%s)" >>$GITHUB_OUTPUT
|
||||
- name: Cache ccache files
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.ccache
|
||||
key: ccache-openwrt-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.date }}
|
||||
path: ~/.cache/ccache
|
||||
key: ccache-openwrt-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.CCACHE_TIMESTAMP }}
|
||||
restore-keys: ccache-openwrt-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
|
||||
- name: Install APT packages
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install ninja-build pkg-config qemu-user ccache bubblewrap
|
||||
sudo apt install ninja-build pkg-config ccache bubblewrap
|
||||
sudo apt remove -y qemu-user-binfmt
|
||||
sudo dpkg -i qemu-user-static_7.0+dfsg-6_amd64.deb
|
||||
sudo dpkg -i qemu-user*.deb
|
||||
# libc6-i386 interferes with x86 build
|
||||
sudo apt remove libc6-i386
|
||||
- run: ./get-clang.sh
|
||||
- run: ccache -z
|
||||
- run: ./build.sh
|
||||
- run: ccache -s
|
||||
- run: CCACHE_DISABLE=1 ./go-build.sh
|
||||
working-directory: src/out/Release/cronet
|
||||
if: ${{ ! contains(matrix.extra, 'build_static=true') }}
|
||||
- run: ../tests/basic.sh out/Release/naive
|
||||
- name: Pack naiveproxy assets
|
||||
run: |
|
||||
|
@ -627,29 +567,12 @@ jobs:
|
|||
tar cJf ${{ env.BUNDLE }}.tar.xz ${{ env.BUNDLE }}
|
||||
openssl sha256 out/Release/naive >sha256sum.txt
|
||||
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ env.BUNDLE }}.tar.xz naive executable sha256 ${{ env.SHA256SUM }}
|
||||
path: src/sha256sum.txt
|
||||
- name: Upload naiveproxy assets
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
run: hub release edit -a ${{ env.BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Pack cronet assets
|
||||
if: ${{ github.event_name == 'release' && ! contains(matrix.extra, 'build_static=true') }}
|
||||
run: |
|
||||
mv out/Release/cronet ${{ env.CRONET_BUNDLE }}
|
||||
tar cJf ${{ env.CRONET_BUNDLE }}.tar.xz ${{ env.CRONET_BUNDLE }}
|
||||
openssl sha256 ${{ env.CRONET_BUNDLE }}.tar.xz >sha256sum.txt
|
||||
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: ${{ github.event_name == 'release' && ! contains(matrix.extra, 'build_static=true') }}
|
||||
with:
|
||||
name: ${{ env.CRONET_BUNDLE }}.tar.xz sha256 ${{ env.SHA256SUM }}
|
||||
path: src/sha256sum.txt
|
||||
- name: Upload cronet assets
|
||||
if: ${{ github.event_name == 'release' && ! contains(matrix.extra, 'build_static=true') }}
|
||||
run: hub release edit -a ${{ env.CRONET_BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
|
||||
run: gh release upload "${GITHUB_REF##*/}" ${{ env.BUNDLE }}.tar.xz --clobber
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
2
.gitignore
vendored
|
@ -2,7 +2,7 @@ src/chrome/android/profiles/afdo.prof
|
|||
src/chrome/build/pgo_profiles/
|
||||
src/gn/
|
||||
src/third_party/llvm-build/
|
||||
src/third_party/android_ndk/
|
||||
src/third_party/android_toolchain/
|
||||
src/out/
|
||||
.vscode/
|
||||
*.pyc
|
||||
|
|
|
@ -1 +1 @@
|
|||
103.0.5060.53
|
||||
135.0.7049.38
|
||||
|
|
57
README.md
|
@ -1,9 +1,7 @@
|
|||
# NaïveProxy and Cronet 
|
||||
# NaïveProxy 
|
||||
|
||||
NaïveProxy uses Chromium's network stack to camouflage traffic with strong censorship resistence and low detectablility. Reusing Chrome's stack also ensures best practices in performance and security.
|
||||
|
||||
Cronet is a library similarly derived from Chromium's network stack, but its official releases are limited to Android and iOS. NaïveProxy's fork of Cronet provides binary releases of its native API, support for multiple platforms, and support for creating Go apps with cgo and the [cronet-go](https://github.com/SagerNet/cronet-go) bindings.
|
||||
|
||||
The following traffic attacks are mitigated by using Chromium's network stack:
|
||||
|
||||
* Website fingerprinting / traffic classification: [mitigated](https://arxiv.org/abs/1707.00641) by traffic multiplexing in HTTP/2.
|
||||
|
@ -21,9 +19,9 @@ The frontend server can be any well-known reverse proxy that is able to route HT
|
|||
|
||||
The Naïve server here works as a forward proxy and a packet length padding layer. Caddy forwardproxy is also a forward proxy but it lacks a padding layer. A [fork](https://github.com/klzgrad/forwardproxy) adds the NaïveProxy padding layer to forwardproxy, combining both in one.
|
||||
|
||||
## Download NaïveProxy and Cronet binaries
|
||||
## Download NaïveProxy
|
||||
|
||||
[Download here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [SagerNet](https://github.com/SagerNet/SagerNet)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)).
|
||||
Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [Exclave](https://github.com/dyhkwong/Exclave), [husi](https://github.com/xchacha20-poly1305/husi), [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)).
|
||||
|
||||
Users should always use the latest version to keep signatures identical to Chrome.
|
||||
|
||||
|
@ -31,36 +29,33 @@ Build from source: Please see [.github/workflows/build.yml](https://github.com/k
|
|||
|
||||
## Server setup
|
||||
|
||||
The following describes the naïve fork of forwardproxy setup.
|
||||
The following describes the naïve fork of Caddy forwardproxy setup.
|
||||
|
||||
Build:
|
||||
Download [here](https://github.com/klzgrad/forwardproxy/releases/latest) or build from source:
|
||||
```sh
|
||||
go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
|
||||
~/go/bin/xcaddy build --with github.com/caddyserver/forwardproxy@caddy2=github.com/klzgrad/forwardproxy@naive
|
||||
~/go/bin/xcaddy build --with github.com/caddyserver/forwardproxy=github.com/klzgrad/forwardproxy@naive
|
||||
```
|
||||
|
||||
Example Caddyfile (replace `user` and `pass` accordingly):
|
||||
```
|
||||
{
|
||||
servers {
|
||||
protocol {
|
||||
experimental_http3
|
||||
}
|
||||
}
|
||||
order forward_proxy before file_server
|
||||
}
|
||||
:443, example.com
|
||||
tls me@example.com
|
||||
route {
|
||||
:443, example.com {
|
||||
tls me@example.com
|
||||
forward_proxy {
|
||||
basic_auth user pass
|
||||
hide_ip
|
||||
hide_via
|
||||
probe_resistance
|
||||
}
|
||||
file_server { root /var/www/html }
|
||||
file_server {
|
||||
root /var/www/html
|
||||
}
|
||||
}
|
||||
```
|
||||
`:443` must appear first for this Caddyfile to work. For more advanced usage consider using [JSON for Caddy 2's config](https://caddyserver.com/docs/json/).
|
||||
`:443` must appear first for this Caddyfile to work. See Caddyfile [docs](https://caddyserver.com/docs/caddyfile/directives/tls) for customizing TLS certificates. For more advanced usage consider using [JSON for Caddy 2's config](https://caddyserver.com/docs/json/).
|
||||
|
||||
Run with the Caddyfile:
|
||||
```
|
||||
|
@ -82,6 +77,10 @@ Run `./naive` with the following `config.json` to get a SOCKS5 proxy at local po
|
|||
|
||||
Or `quic://user:pass@example.com`, if it works better. See also [parameter usage](https://github.com/klzgrad/naiveproxy/blob/master/USAGE.txt) and [performance tuning](https://github.com/klzgrad/naiveproxy/wiki/Performance-Tuning).
|
||||
|
||||
## Third-party integration
|
||||
|
||||
* [v2rayN](https://github.com/2dust/v2rayN), GUI client
|
||||
|
||||
## Notes for downstream
|
||||
|
||||
Do not use the master branch to track updates, as it rebases from a new root commit for every new Chrome release. Use stable releases and the associated tags to track new versions, where short release notes are also provided.
|
||||
|
@ -112,7 +111,7 @@ Further reads and writes after `kFirstPaddings` are unpadded to avoid performanc
|
|||
|
||||
### H2 RST_STREAM frame padding
|
||||
|
||||
In experiments, NaïveProxy tends to send too many RST_STREAM frames per session, an uncommon behavior from regular browsers. To solve this, an END_STREAM DATA frame padded with total length distributed in [48, 72] is prepended to the RST_STREAM frame so it looks like a HEADERS frame. The server often replies to this with a WINDOW_UPDATE because padding is accounted in flow control. Whether this results in a new uncommon behavior is still unclear.
|
||||
In experiments, NaïveProxy tends to send too many RST_STREAM frames per session, an uncommon behavior from regular browsers. To solve this, an END_STREAM DATA frame padded with total length distributed in [48, 72] is prepended to the RST_STREAM frame so it looks like a HEADERS frame. The server often replies to this with a WINDOW_UPDATE because padding is accounted in flow control. Whether this results in a new uncommon behavior is still unclear.
|
||||
|
||||
### H2 HEADERS frame padding
|
||||
|
||||
|
@ -126,9 +125,9 @@ NaïveProxy servers and clients determines whether the counterpart is capable of
|
|||
|
||||
The first CONNECT request to a server cannot use "Fast Open" to send payload before response, because the server's padding capability has not been determined from the first response and it's unknown whether to send padded or unpadded payload for Fast Open.
|
||||
|
||||
## Changes from upstream
|
||||
## Changes from Chromium upstream
|
||||
|
||||
- Minimize source code and build size (1% of the original)
|
||||
- Minimize source code and build size (0.3% of the original)
|
||||
- Disable exceptions and RTTI, except on Mac and Android.
|
||||
- Support OpenWrt builds
|
||||
- (Android, Linux) Use the builtin verifier instead of the system verifier (drop dependency of NSS on Linux) and read the system trust store from (following Go's behavior in crypto/x509/root_unix.go and crypto/x509/root_linux.go):
|
||||
|
@ -150,9 +149,13 @@ The first CONNECT request to a server cannot use "Fast Open" to send payload bef
|
|||
- Force tunneling for all sockets
|
||||
- Support HTTP/2 and HTTP/3 CONNECT tunnel Fast Open using the `fastopen` header
|
||||
- Pad RST_STREAM frames
|
||||
- (Cronet) Allow passing in `-connect-authority` header to override the CONNECT authority field
|
||||
- (Cronet) Disable system proxy resolution and use fixed proxy resolution specified by experimental option `proxy_server`
|
||||
- (Cronet) Support setting base::FeatureList by experimental option `feature_list`
|
||||
- (Cronet) Support setting the network isolation key of a stream with `-network-isolation-key` header
|
||||
- (Cronet) Add certificate net fetcher
|
||||
- (Cronet) Support setting socket limits by experimental option `socket_limits`
|
||||
|
||||
## Known weaknesses
|
||||
|
||||
* HTTP CONNECT Fast Open creates back to back h2 packets consistently, which should not appear so often. This could be fixed with a little bit of corking but it would require surgical change deep in Chromium h2 stack, not very easy to do.
|
||||
* TLS over TLS requires more handshake round trips than needed by common h2 requests, that is, no h2 requests need these many back and forth handshakes. There is no simple way to avoid this besides doing MITM proxying, breaking E2E encryption.
|
||||
* TLS over TLS overhead causes visible packet length enlargement and lack of small packets. Removing this overhead also requires MITM proxying.
|
||||
* TLS over TLS overhead also causes packets to consistently exceed MTU limits, which should not happen for an originating user agent. Fixing this requires re-segmentation and it is not easy to do.
|
||||
* Packet length obfuscation partly relies on h2 multiplexing, which does not work if there is only one connection, a scenario not uncommon. It is not clear how to create covering co-connections organically (i.e. not hard coded).
|
||||
* Multiplexing requires use of a few long-lived tunnel connections. It is not clearly how long is appropriate for parroting and how to convincingly rotate the connections if there is an age limit or how to detect and recover stuck tunnel connections convincingly.
|
||||
|
||||
|
|
37
USAGE.txt
|
@ -14,6 +14,9 @@ Description:
|
|||
"proxy": "..."
|
||||
}
|
||||
|
||||
Specifying a flag multiple times on the command line is equivalent to
|
||||
having an array of multiple strings in the JSON file.
|
||||
|
||||
Uses "config.json" by default if run without arguments.
|
||||
|
||||
Options:
|
||||
|
@ -26,17 +29,16 @@ Options:
|
|||
|
||||
Prints version.
|
||||
|
||||
--listen=<proto>://[addr][:port]
|
||||
--listen=socks://[[user]:[pass]@][addr][:port]
|
||||
--listen=LISTEN-URI
|
||||
|
||||
Listens at addr:port with protocol <proto>.
|
||||
LISTEN-URI = <LISTEN-PROTO>"://"[<USER>":"<PASS>"@"][<ADDR>][":"<PORT>]
|
||||
LISTEN-PROTO = "socks" | "http" | "redir"
|
||||
|
||||
Available proto: socks, http, redir.
|
||||
Listens at addr:port with protocol <LISTEN-PROTO>.
|
||||
Can be specified multiple times to listen on multiple ports.
|
||||
Default proto, addr, port: socks, 0.0.0.0, 1080.
|
||||
|
||||
* http: Supports only proxying https:// URLs, no http://.
|
||||
|
||||
* redir: Works with certain iptables setup.
|
||||
Note: redir requires specific iptables rules and uses no authentication.
|
||||
|
||||
(Redirecting locally originated traffic)
|
||||
iptables -t nat -A OUTPUT -d $proxy_server_ip -j RETURN
|
||||
|
@ -53,10 +55,21 @@ Options:
|
|||
The artificial results are not saved for privacy, so restarting the
|
||||
resolver may cause downstream to cache stale results.
|
||||
|
||||
--proxy=<proto>://<user>:<pass>@<hostname>[:<port>]
|
||||
--proxy=PROXY
|
||||
|
||||
Routes traffic via the proxy server. Connects directly by default.
|
||||
Available proto: https, quic. Infers port by default.
|
||||
PROXY = PROXY-CHAIN | SOCKS-PROXY
|
||||
PROXY-CHAIN = <PROXY-URI>[","<PROXY-CHAIN>]
|
||||
PROXY-URI = <PROXY-PROTO>"://"[<USER>":"<PASS>"@"]<HOSTNAME>[":"<PORT>]
|
||||
PROXY-PROTO = "http" | "https" | "quic"
|
||||
SOCKS-PROXY = "socks://"<HOSTNAME>[":"<PORT>]
|
||||
|
||||
Routes traffic via the proxy chain.
|
||||
The default proxy is directly connection without proxying.
|
||||
The last PROXY-URI is negotiated automatically for Naive padding.
|
||||
Limitations:
|
||||
* QUIC proxies cannot follow TCP-based proxies in a proxy chain.
|
||||
* The user needs to ensure there is no loop in the proxy chain.
|
||||
* SOCKS proxies do not support chaining, authentication, or Naive padding.
|
||||
|
||||
--insecure-concurrency=<N>
|
||||
|
||||
|
@ -93,3 +106,7 @@ Options:
|
|||
--ssl-key-log-file=<path>
|
||||
|
||||
Saves SSL keys for Wireshark inspection.
|
||||
|
||||
--no-post-quantum
|
||||
|
||||
Overrides the default and disables post-quantum key agreement.
|
||||
|
|
3
apk/.gitignore
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
.gradle/
|
||||
app/build/
|
||||
app/libs/
|
73
apk/app/build.gradle.kts
Normal file
|
@ -0,0 +1,73 @@
|
|||
plugins {
|
||||
id("com.android.application")
|
||||
id("org.jetbrains.kotlin.android")
|
||||
}
|
||||
|
||||
android {
|
||||
namespace = "io.nekohasekai.sagernet.plugin.naive"
|
||||
|
||||
signingConfigs {
|
||||
create("release") {
|
||||
storeFile = rootProject.file("release.keystore")
|
||||
storePassword = System.getenv("KEYSTORE_PASS")
|
||||
keyAlias = "release"
|
||||
keyPassword = System.getenv("KEYSTORE_PASS")
|
||||
}
|
||||
}
|
||||
|
||||
buildTypes {
|
||||
getByName("release") {
|
||||
isMinifyEnabled = true
|
||||
signingConfig = signingConfigs.getByName("release")
|
||||
}
|
||||
}
|
||||
|
||||
buildToolsVersion = "35.0.0"
|
||||
|
||||
compileSdk = 35
|
||||
|
||||
defaultConfig {
|
||||
minSdk = 24
|
||||
targetSdk = 35
|
||||
|
||||
applicationId = "io.nekohasekai.sagernet.plugin.naive"
|
||||
versionCode = System.getenv("APK_VERSION_NAME").removePrefix("v").split(".")[0].toInt() * 10 + System.getenv("APK_VERSION_NAME").removePrefix("v").split("-")[1].toInt()
|
||||
versionName = System.getenv("APK_VERSION_NAME").removePrefix("v")
|
||||
splits.abi {
|
||||
isEnable = true
|
||||
isUniversalApk = false
|
||||
reset()
|
||||
include(System.getenv("APK_ABI"))
|
||||
}
|
||||
}
|
||||
|
||||
compileOptions {
|
||||
sourceCompatibility = JavaVersion.VERSION_17
|
||||
targetCompatibility = JavaVersion.VERSION_17
|
||||
}
|
||||
|
||||
lint {
|
||||
showAll = true
|
||||
checkAllWarnings = true
|
||||
checkReleaseBuilds = false
|
||||
warningsAsErrors = true
|
||||
}
|
||||
|
||||
packaging {
|
||||
jniLibs.useLegacyPackaging = true
|
||||
}
|
||||
|
||||
applicationVariants.all {
|
||||
outputs.all {
|
||||
this as com.android.build.gradle.internal.api.BaseVariantOutputImpl
|
||||
outputFileName =
|
||||
outputFileName.replace(project.name, "naiveproxy-plugin-v$versionName")
|
||||
.replace("-release", "")
|
||||
.replace("-oss", "")
|
||||
}
|
||||
}
|
||||
|
||||
sourceSets.getByName("main") {
|
||||
jniLibs.srcDir("libs")
|
||||
}
|
||||
}
|
45
apk/app/src/main/AndroidManifest.xml
Normal file
|
@ -0,0 +1,45 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
xmlns:tools="http://schemas.android.com/tools"
|
||||
android:installLocation="internalOnly"
|
||||
tools:ignore="MissingLeanbackLauncher">
|
||||
|
||||
<uses-feature
|
||||
android:name="android.software.leanback"
|
||||
android:required="false" />
|
||||
<uses-feature
|
||||
android:name="android.hardware.touchscreen"
|
||||
android:required="false" />
|
||||
|
||||
<application
|
||||
android:allowBackup="false"
|
||||
android:icon="@mipmap/ic_launcher"
|
||||
android:label="Naïve Plugin"
|
||||
android:roundIcon="@mipmap/ic_launcher_round">
|
||||
<provider
|
||||
android:name=".BinaryProvider"
|
||||
android:authorities="io.nekohasekai.sagernet.plugin.naive.BinaryProvider"
|
||||
android:directBootAware="true"
|
||||
android:exported="true"
|
||||
tools:ignore="ExportedContentProvider">
|
||||
<intent-filter>
|
||||
<action android:name="io.nekohasekai.sagernet.plugin.ACTION_NATIVE_PLUGIN" />
|
||||
</intent-filter>
|
||||
<intent-filter>
|
||||
<action android:name="io.nekohasekai.sagernet.plugin.ACTION_NATIVE_PLUGIN" />
|
||||
<data
|
||||
android:host="io.nekohasekai.sagernet"
|
||||
android:path="/naive-plugin"
|
||||
android:scheme="plugin" />
|
||||
</intent-filter>
|
||||
|
||||
<meta-data
|
||||
android:name="io.nekohasekai.sagernet.plugin.id"
|
||||
android:value="naive-plugin" />
|
||||
<meta-data
|
||||
android:name="io.nekohasekai.sagernet.plugin.executable_path"
|
||||
android:value="libnaive.so" />
|
||||
</provider>
|
||||
</application>
|
||||
|
||||
</manifest>
|
|
@ -0,0 +1,98 @@
|
|||
/******************************************************************************
|
||||
* *
|
||||
* Copyright (C) 2021 by nekohasekai <contact-sagernet@sekai.icu> *
|
||||
* *
|
||||
* This program is free software: you can redistribute it and/or modify *
|
||||
* it under the terms of the GNU General Public License as published by *
|
||||
* the Free Software Foundation, either version 3 of the License, or *
|
||||
* (at your option) any later version. *
|
||||
* *
|
||||
* This program is distributed in the hope that it will be useful, *
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
|
||||
* GNU General Public License for more details. *
|
||||
* *
|
||||
* You should have received a copy of the GNU General Public License *
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
|
||||
* *
|
||||
******************************************************************************/
|
||||
|
||||
package io.nekohasekai.sagernet.plugin
|
||||
|
||||
import android.content.ContentProvider
|
||||
import android.content.ContentValues
|
||||
import android.database.Cursor
|
||||
import android.database.MatrixCursor
|
||||
import android.net.Uri
|
||||
import android.os.Bundle
|
||||
import android.os.ParcelFileDescriptor
|
||||
|
||||
abstract class NativePluginProvider : ContentProvider() {
|
||||
override fun getType(uri: Uri): String? = "application/x-elf"
|
||||
|
||||
override fun onCreate(): Boolean = true
|
||||
|
||||
/**
|
||||
* Provide all files needed for native plugin.
|
||||
*
|
||||
* @param provider A helper object to use to add files.
|
||||
*/
|
||||
protected abstract fun populateFiles(provider: PathProvider)
|
||||
|
||||
override fun query(
|
||||
uri: Uri,
|
||||
projection: Array<out String>?,
|
||||
selection: String?,
|
||||
selectionArgs: Array<out String>?,
|
||||
sortOrder: String?,
|
||||
): Cursor? {
|
||||
check(selection == null && selectionArgs == null && sortOrder == null)
|
||||
val result = MatrixCursor(projection)
|
||||
populateFiles(PathProvider(uri, result))
|
||||
return result
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns executable entry absolute path.
|
||||
* This is used for fast mode initialization where ss-local launches your native binary at the path given directly.
|
||||
* In order for this to work, plugin app is encouraged to have the following in its AndroidManifest.xml:
|
||||
* - android:installLocation="internalOnly" for <manifest>
|
||||
* - android:extractNativeLibs="true" for <application>
|
||||
*
|
||||
* Default behavior is throwing UnsupportedOperationException. If you don't wish to use this feature, use the
|
||||
* default behavior.
|
||||
*
|
||||
* @return Absolute path for executable entry.
|
||||
*/
|
||||
open fun getExecutable(): String = throw UnsupportedOperationException()
|
||||
|
||||
abstract fun openFile(uri: Uri): ParcelFileDescriptor
|
||||
override fun openFile(uri: Uri, mode: String): ParcelFileDescriptor {
|
||||
check(mode == "r")
|
||||
return openFile(uri)
|
||||
}
|
||||
|
||||
override fun call(method: String, arg: String?, extras: Bundle?): Bundle? = when (method) {
|
||||
PluginContract.METHOD_GET_EXECUTABLE -> {
|
||||
Bundle().apply {
|
||||
putString(PluginContract.EXTRA_ENTRY, getExecutable())
|
||||
}
|
||||
}
|
||||
else -> super.call(method, arg, extras)
|
||||
}
|
||||
|
||||
// Methods that should not be used
|
||||
override fun insert(uri: Uri, values: ContentValues?): Uri? =
|
||||
throw UnsupportedOperationException()
|
||||
|
||||
override fun update(
|
||||
uri: Uri,
|
||||
values: ContentValues?,
|
||||
selection: String?,
|
||||
selectionArgs: Array<out String>?,
|
||||
): Int =
|
||||
throw UnsupportedOperationException()
|
||||
|
||||
override fun delete(uri: Uri, selection: String?, selectionArgs: Array<out String>?): Int =
|
||||
throw UnsupportedOperationException()
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/******************************************************************************
|
||||
* *
|
||||
* Copyright (C) 2021 by nekohasekai <contact-sagernet@sekai.icu> *
|
||||
* *
|
||||
* This program is free software: you can redistribute it and/or modify *
|
||||
* it under the terms of the GNU General Public License as published by *
|
||||
* the Free Software Foundation, either version 3 of the License, or *
|
||||
* (at your option) any later version. *
|
||||
* *
|
||||
* This program is distributed in the hope that it will be useful, *
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
|
||||
* GNU General Public License for more details. *
|
||||
* *
|
||||
* You should have received a copy of the GNU General Public License *
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
|
||||
* *
|
||||
******************************************************************************/
|
||||
|
||||
package io.nekohasekai.sagernet.plugin
|
||||
|
||||
import android.database.MatrixCursor
|
||||
import android.net.Uri
|
||||
import java.io.File
|
||||
|
||||
/**
|
||||
* Helper class to provide relative paths of files to copy.
|
||||
*/
|
||||
class PathProvider internal constructor(baseUri: Uri, private val cursor: MatrixCursor) {
|
||||
private val basePath = baseUri.path?.trim('/') ?: ""
|
||||
|
||||
fun addPath(path: String, mode: Int = 0b110100100): PathProvider {
|
||||
val trimmed = path.trim('/')
|
||||
if (trimmed.startsWith(basePath)) cursor.newRow()
|
||||
.add(PluginContract.COLUMN_PATH, trimmed)
|
||||
.add(PluginContract.COLUMN_MODE, mode)
|
||||
return this
|
||||
}
|
||||
fun addTo(file: File, to: String = "", mode: Int = 0b110100100): PathProvider {
|
||||
var sub = to + file.name
|
||||
if (basePath.startsWith(sub)) if (file.isDirectory) {
|
||||
sub += '/'
|
||||
file.listFiles()!!.forEach { addTo(it, sub, mode) }
|
||||
} else addPath(sub, mode)
|
||||
return this
|
||||
}
|
||||
fun addAt(file: File, at: String = "", mode: Int = 0b110100100): PathProvider {
|
||||
if (basePath.startsWith(at)) {
|
||||
if (file.isDirectory) file.listFiles()!!.forEach { addTo(it, at, mode) } else addPath(at, mode)
|
||||
}
|
||||
return this
|
||||
}
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
/******************************************************************************
|
||||
* *
|
||||
* Copyright (C) 2021 by nekohasekai <contact-sagernet@sekai.icu> *
|
||||
* *
|
||||
* This program is free software: you can redistribute it and/or modify *
|
||||
* it under the terms of the GNU General Public License as published by *
|
||||
* the Free Software Foundation, either version 3 of the License, or *
|
||||
* (at your option) any later version. *
|
||||
* *
|
||||
* This program is distributed in the hope that it will be useful, *
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
|
||||
* GNU General Public License for more details. *
|
||||
* *
|
||||
* You should have received a copy of the GNU General Public License *
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
|
||||
* *
|
||||
******************************************************************************/
|
||||
|
||||
package io.nekohasekai.sagernet.plugin
|
||||
|
||||
object PluginContract {
|
||||
|
||||
const val ACTION_NATIVE_PLUGIN = "io.nekohasekai.sagernet.plugin.ACTION_NATIVE_PLUGIN"
|
||||
const val EXTRA_ENTRY = "io.nekohasekai.sagernet.plugin.EXTRA_ENTRY"
|
||||
const val METADATA_KEY_ID = "io.nekohasekai.sagernet.plugin.id"
|
||||
const val METADATA_KEY_EXECUTABLE_PATH = "io.nekohasekai.sagernet.plguin.executable_path"
|
||||
const val METHOD_GET_EXECUTABLE = "sagernet:getExecutable"
|
||||
|
||||
const val COLUMN_PATH = "path"
|
||||
const val COLUMN_MODE = "mode"
|
||||
const val SCHEME = "plugin"
|
||||
const val AUTHORITY = "io.nekohasekai.sagernet"
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/******************************************************************************
|
||||
* *
|
||||
* Copyright (C) 2021 by nekohasekai <contact-sagernet@sekai.icu> *
|
||||
* *
|
||||
* This program is free software: you can redistribute it and/or modify *
|
||||
* it under the terms of the GNU General Public License as published by *
|
||||
* the Free Software Foundation, either version 3 of the License, or *
|
||||
* (at your option) any later version. *
|
||||
* *
|
||||
* This program is distributed in the hope that it will be useful, *
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
|
||||
* GNU General Public License for more details. *
|
||||
* *
|
||||
* You should have received a copy of the GNU General Public License *
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
|
||||
* *
|
||||
******************************************************************************/
|
||||
|
||||
package io.nekohasekai.sagernet.plugin.naive
|
||||
|
||||
import android.net.Uri
|
||||
import android.os.ParcelFileDescriptor
|
||||
import io.nekohasekai.sagernet.plugin.NativePluginProvider
|
||||
import io.nekohasekai.sagernet.plugin.PathProvider
|
||||
import java.io.File
|
||||
import java.io.FileNotFoundException
|
||||
|
||||
class BinaryProvider : NativePluginProvider() {
|
||||
override fun populateFiles(provider: PathProvider) {
|
||||
provider.addPath("naive-plugin", 0b111101101)
|
||||
}
|
||||
|
||||
override fun getExecutable() = context!!.applicationInfo.nativeLibraryDir + "/libnaive.so"
|
||||
override fun openFile(uri: Uri): ParcelFileDescriptor = when (uri.path) {
|
||||
"/naive-plugin" -> ParcelFileDescriptor.open(
|
||||
File(getExecutable()),
|
||||
ParcelFileDescriptor.MODE_READ_ONLY
|
||||
)
|
||||
else -> throw FileNotFoundException()
|
||||
}
|
||||
}
|
25
apk/app/src/main/res/drawable/ic_launcher_foreground.xml
Normal file
|
@ -0,0 +1,25 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<vector xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
android:width="108dp"
|
||||
android:height="108dp"
|
||||
android:viewportWidth="108"
|
||||
android:viewportHeight="108"
|
||||
android:tint="#FFFFFF">
|
||||
<group android:scaleX="0.13877095"
|
||||
android:scaleY="0.13877095"
|
||||
android:translateX="29.16"
|
||||
android:translateY="42.010185">
|
||||
<group android:translateY="138.67206">
|
||||
<path android:pathData="M4.171875,-0L34.546875,-0L34.546875,-5.890625L25.90625,-7.046875L23.3125,-9.796875L23.3125,-82.125L78.1875,-0L87.6875,-0L87.6875,-87.328125L90.28125,-89.9375L98.921875,-91.234375L98.921875,-97L68.671875,-97L68.671875,-91.234375L77.3125,-89.9375L79.90625,-87.328125L79.90625,-19.34375L30.8125,-93.390625L30.8125,-97L3.734375,-97L3.734375,-91.234375L12.953125,-90.359375L15.546875,-88.34375L15.546875,-9.796875L12.953125,-7.046875L4.171875,-5.890625L4.171875,-0Z"
|
||||
android:fillColor="#FFFFFF"/>
|
||||
<path android:pathData="M151.09375,-8.375L152.95312,0L173.26562,0L173.26562,-5.171875L165.625,-5.90625L163.03125,-8.234375L163.03125,-50.3125C163.03125,-64.796875,156.70312,-71,141.57812,-71C123.4375,-71,112.5,-63.5,112.5,-54.75C112.5,-51.3125,113.640625,-50.3125,116.953125,-50.3125L126.890625,-50.3125L126.890625,-62.359375C130.78125,-63.9375,134.23438,-64.65625,137.84375,-64.65625C148.20312,-64.65625,151.09375,-59.921875,151.09375,-48.578125L151.09375,-44C122.28125,-37.0625,109.046875,-32.6875,109.046875,-17.546875C109.046875,-6.625,116.09375,1,127.46875,1C134.39062,1.015625,142.01562,-2.15625,151.09375,-8.375ZM151.09375,-13.328125C143.89062,-9.09375,137.98438,-6.765625,133.23438,-6.765625C126.03125,-6.765625,121.71875,-11.578125,121.71875,-18.703125C121.71875,-29.34375,130.78125,-33.265625,151.09375,-38.953125L151.09375,-13.328125Z"
|
||||
android:fillColor="#FFFFFF"/>
|
||||
<path android:pathData="M197.59375,-70L181.03125,-65L181.03125,-60.53125L191.10938,-60.53125L191.10938,-8.203125L188.65625,-5.90625L181.03125,-5.171875L181.03125,0L213.4375,0L213.4375,-5.171875L205.65625,-5.90625L203.20312,-8.203125L203.20312,-70L197.59375,-70ZM182.76562,-99.53125C178.73438,-99.53125,175.42188,-96.375,175.42188,-92.1875C175.42188,-88.15625,178.73438,-85,182.76562,-85C186.79688,-85,189.95312,-88.15625,189.95312,-92.1875C189.95312,-96.375,186.79688,-99.53125,182.76562,-99.53125ZM209.54688,-99.53125C205.51562,-99.53125,202.20312,-96.375,202.20312,-92.1875C202.20312,-88.15625,205.51562,-85,209.54688,-85C213.57812,-85,216.75,-88.15625,216.75,-92.1875C216.75,-96.375,213.57812,-99.53125,209.54688,-99.53125Z"
|
||||
android:fillColor="#FFFFFF"/>
|
||||
<path android:pathData="M215.26562,-69L215.26562,-63.671875L224.0625,-62.796875L247.23438,0L255.29688,0L280.5,-62.515625L289,-63.671875L289,-69L263.51562,-69L263.51562,-63.671875L271.28125,-63.09375L272.4375,-60.796875L254,-13.90625L237.15625,-60.796875L238.89062,-63.09375L246.51562,-63.671875L246.51562,-69L215.26562,-69Z"
|
||||
android:fillColor="#FFFFFF"/>
|
||||
<path android:pathData="M351.5,-15.5C343.29688,-10.0625,336.09375,-7.203125,328.89062,-7.203125C314.35938,-7.203125,304.70312,-18.078125,304.70312,-36.421875C304.70312,-36.84375,304.70312,-37.421875,304.70312,-38L351.35938,-38C351.5,-39.578125,351.5,-41.015625,351.5,-42.453125C351.5,-60.296875,341.28125,-71,325.57812,-71C306.28125,-71,292.3125,-56.09375,292.3125,-34.265625C292.3125,-13.21875,305.42188,1,324.4375,1C333.5,1,342.85938,-1.875,351.5,-7.625L351.5,-15.5ZM338.6875,-44.046875L305.42188,-44.046875C306.28125,-56.375,314.5,-64.515625,323.71875,-64.515625C333.35938,-64.515625,338.6875,-58.125,338.6875,-46.359375C338.6875,-45.640625,338.6875,-44.765625,338.6875,-44.046875Z"
|
||||
android:fillColor="#FFFFFF"/>
|
||||
</group>
|
||||
</group>
|
||||
</vector>
|
6
apk/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml
Normal file
|
@ -0,0 +1,6 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<adaptive-icon xmlns:android="http://schemas.android.com/apk/res/android">
|
||||
<background android:drawable="@color/ic_launcher_background"/>
|
||||
<foreground android:drawable="@drawable/ic_launcher_foreground"/>
|
||||
<monochrome android:drawable="@drawable/ic_launcher_foreground" />
|
||||
</adaptive-icon>
|
|
@ -0,0 +1,6 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<adaptive-icon xmlns:android="http://schemas.android.com/apk/res/android">
|
||||
<background android:drawable="@color/ic_launcher_background"/>
|
||||
<foreground android:drawable="@drawable/ic_launcher_foreground"/>
|
||||
<monochrome android:drawable="@drawable/ic_launcher_foreground" />
|
||||
</adaptive-icon>
|
BIN
apk/app/src/main/res/mipmap-hdpi/ic_launcher.png
Normal file
After Width: | Height: | Size: 1.7 KiB |
BIN
apk/app/src/main/res/mipmap-hdpi/ic_launcher_round.png
Normal file
After Width: | Height: | Size: 3.6 KiB |
BIN
apk/app/src/main/res/mipmap-mdpi/ic_launcher.png
Normal file
After Width: | Height: | Size: 1.1 KiB |
BIN
apk/app/src/main/res/mipmap-mdpi/ic_launcher_round.png
Normal file
After Width: | Height: | Size: 2.3 KiB |
BIN
apk/app/src/main/res/mipmap-xhdpi/ic_launcher.png
Normal file
After Width: | Height: | Size: 2.3 KiB |
BIN
apk/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png
Normal file
After Width: | Height: | Size: 5.2 KiB |
BIN
apk/app/src/main/res/mipmap-xxhdpi/ic_launcher.png
Normal file
After Width: | Height: | Size: 3.6 KiB |
BIN
apk/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png
Normal file
After Width: | Height: | Size: 8.2 KiB |
BIN
apk/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png
Normal file
After Width: | Height: | Size: 5.1 KiB |
BIN
apk/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png
Normal file
After Width: | Height: | Size: 12 KiB |
4
apk/app/src/main/res/values/ic_launcher_background.xml
Normal file
|
@ -0,0 +1,4 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<resources>
|
||||
<color name="ic_launcher_background">#E91E63</color>
|
||||
</resources>
|
18
apk/build.gradle
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Top-level build file where you can add configuration options common to all sub-projects/modules.
|
||||
buildscript {
|
||||
repositories {
|
||||
google()
|
||||
mavenCentral()
|
||||
}
|
||||
dependencies {
|
||||
classpath 'com.android.tools.build:gradle:8.6.0'
|
||||
classpath 'org.jetbrains.kotlin:kotlin-gradle-plugin:2.0.20'
|
||||
|
||||
// NOTE: Do not place your application dependencies here; they belong
|
||||
// in the individual module build.gradle files
|
||||
}
|
||||
}
|
||||
|
||||
task clean(type: Delete) {
|
||||
delete rootProject.buildDir
|
||||
}
|
21
apk/gradle.properties
Normal file
|
@ -0,0 +1,21 @@
|
|||
# Project-wide Gradle settings.
|
||||
# IDE (e.g. Android Studio) users:
|
||||
# Gradle settings configured through the IDE *will override*
|
||||
# any settings specified in this file.
|
||||
# For more details on how to configure your build environment visit
|
||||
# http://www.gradle.org/docs/current/userguide/build_environment.html
|
||||
# Specifies the JVM arguments used for the daemon process.
|
||||
# The setting is particularly useful for tweaking memory settings.
|
||||
org.gradle.jvmargs=-Xmx2048m -Dfile.encoding=UTF-8
|
||||
# When configured, Gradle will run in incubating parallel mode.
|
||||
# This option should only be used with decoupled projects. More details, visit
|
||||
# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
|
||||
# org.gradle.parallel=true
|
||||
# AndroidX package structure to make it clearer which packages are bundled with the
|
||||
# Android operating system, and which are packaged with your app"s APK
|
||||
# https://developer.android.com/topic/libraries/support-library/androidx-rn
|
||||
android.useAndroidX=true
|
||||
# Automatically convert third-party libraries to use AndroidX
|
||||
android.enableJetifier=true
|
||||
# Kotlin code style for this project: "official" or "obsolete":
|
||||
kotlin.code.style=official
|
BIN
apk/gradle/wrapper/gradle-wrapper.jar
vendored
Normal file
7
apk/gradle/wrapper/gradle-wrapper.properties
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-bin.zip
|
||||
networkTimeout=10000
|
||||
validateDistributionUrl=true
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
252
apk/gradlew
vendored
Executable file
|
@ -0,0 +1,252 @@
|
|||
#!/bin/sh
|
||||
|
||||
#
|
||||
# Copyright © 2015-2021 the original authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
##############################################################################
|
||||
#
|
||||
# Gradle start up script for POSIX generated by Gradle.
|
||||
#
|
||||
# Important for running:
|
||||
#
|
||||
# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is
|
||||
# noncompliant, but you have some other compliant shell such as ksh or
|
||||
# bash, then to run this script, type that shell name before the whole
|
||||
# command line, like:
|
||||
#
|
||||
# ksh Gradle
|
||||
#
|
||||
# Busybox and similar reduced shells will NOT work, because this script
|
||||
# requires all of these POSIX shell features:
|
||||
# * functions;
|
||||
# * expansions «$var», «${var}», «${var:-default}», «${var+SET}»,
|
||||
# «${var#prefix}», «${var%suffix}», and «$( cmd )»;
|
||||
# * compound commands having a testable exit status, especially «case»;
|
||||
# * various built-in commands including «command», «set», and «ulimit».
|
||||
#
|
||||
# Important for patching:
|
||||
#
|
||||
# (2) This script targets any POSIX shell, so it avoids extensions provided
|
||||
# by Bash, Ksh, etc; in particular arrays are avoided.
|
||||
#
|
||||
# The "traditional" practice of packing multiple parameters into a
|
||||
# space-separated string is a well documented source of bugs and security
|
||||
# problems, so this is (mostly) avoided, by progressively accumulating
|
||||
# options in "$@", and eventually passing that to Java.
|
||||
#
|
||||
# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS,
|
||||
# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly;
|
||||
# see the in-line comments for details.
|
||||
#
|
||||
# There are tweaks for specific operating systems such as AIX, CygWin,
|
||||
# Darwin, MinGW, and NonStop.
|
||||
#
|
||||
# (3) This script is generated from the Groovy template
|
||||
# https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt
|
||||
# within the Gradle project.
|
||||
#
|
||||
# You can find Gradle at https://github.com/gradle/gradle/.
|
||||
#
|
||||
##############################################################################
|
||||
|
||||
# Attempt to set APP_HOME
|
||||
|
||||
# Resolve links: $0 may be a link
|
||||
app_path=$0
|
||||
|
||||
# Need this for daisy-chained symlinks.
|
||||
while
|
||||
APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path
|
||||
[ -h "$app_path" ]
|
||||
do
|
||||
ls=$( ls -ld "$app_path" )
|
||||
link=${ls#*' -> '}
|
||||
case $link in #(
|
||||
/*) app_path=$link ;; #(
|
||||
*) app_path=$APP_HOME$link ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# This is normally unused
|
||||
# shellcheck disable=SC2034
|
||||
APP_BASE_NAME=${0##*/}
|
||||
# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036)
|
||||
APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s
|
||||
' "$PWD" ) || exit
|
||||
|
||||
# Use the maximum available, or set MAX_FD != -1 to use that value.
|
||||
MAX_FD=maximum
|
||||
|
||||
warn () {
|
||||
echo "$*"
|
||||
} >&2
|
||||
|
||||
die () {
|
||||
echo
|
||||
echo "$*"
|
||||
echo
|
||||
exit 1
|
||||
} >&2
|
||||
|
||||
# OS specific support (must be 'true' or 'false').
|
||||
cygwin=false
|
||||
msys=false
|
||||
darwin=false
|
||||
nonstop=false
|
||||
case "$( uname )" in #(
|
||||
CYGWIN* ) cygwin=true ;; #(
|
||||
Darwin* ) darwin=true ;; #(
|
||||
MSYS* | MINGW* ) msys=true ;; #(
|
||||
NONSTOP* ) nonstop=true ;;
|
||||
esac
|
||||
|
||||
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
|
||||
|
||||
|
||||
# Determine the Java command to use to start the JVM.
|
||||
if [ -n "$JAVA_HOME" ] ; then
|
||||
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
|
||||
# IBM's JDK on AIX uses strange locations for the executables
|
||||
JAVACMD=$JAVA_HOME/jre/sh/java
|
||||
else
|
||||
JAVACMD=$JAVA_HOME/bin/java
|
||||
fi
|
||||
if [ ! -x "$JAVACMD" ] ; then
|
||||
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
|
||||
|
||||
Please set the JAVA_HOME variable in your environment to match the
|
||||
location of your Java installation."
|
||||
fi
|
||||
else
|
||||
JAVACMD=java
|
||||
if ! command -v java >/dev/null 2>&1
|
||||
then
|
||||
die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
|
||||
|
||||
Please set the JAVA_HOME variable in your environment to match the
|
||||
location of your Java installation."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Increase the maximum file descriptors if we can.
|
||||
if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then
|
||||
case $MAX_FD in #(
|
||||
max*)
|
||||
# In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked.
|
||||
# shellcheck disable=SC2039,SC3045
|
||||
MAX_FD=$( ulimit -H -n ) ||
|
||||
warn "Could not query maximum file descriptor limit"
|
||||
esac
|
||||
case $MAX_FD in #(
|
||||
'' | soft) :;; #(
|
||||
*)
|
||||
# In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked.
|
||||
# shellcheck disable=SC2039,SC3045
|
||||
ulimit -n "$MAX_FD" ||
|
||||
warn "Could not set maximum file descriptor limit to $MAX_FD"
|
||||
esac
|
||||
fi
|
||||
|
||||
# Collect all arguments for the java command, stacking in reverse order:
|
||||
# * args from the command line
|
||||
# * the main class name
|
||||
# * -classpath
|
||||
# * -D...appname settings
|
||||
# * --module-path (only if needed)
|
||||
# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables.
|
||||
|
||||
# For Cygwin or MSYS, switch paths to Windows format before running java
|
||||
if "$cygwin" || "$msys" ; then
|
||||
APP_HOME=$( cygpath --path --mixed "$APP_HOME" )
|
||||
CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" )
|
||||
|
||||
JAVACMD=$( cygpath --unix "$JAVACMD" )
|
||||
|
||||
# Now convert the arguments - kludge to limit ourselves to /bin/sh
|
||||
for arg do
|
||||
if
|
||||
case $arg in #(
|
||||
-*) false ;; # don't mess with options #(
|
||||
/?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath
|
||||
[ -e "$t" ] ;; #(
|
||||
*) false ;;
|
||||
esac
|
||||
then
|
||||
arg=$( cygpath --path --ignore --mixed "$arg" )
|
||||
fi
|
||||
# Roll the args list around exactly as many times as the number of
|
||||
# args, so each arg winds up back in the position where it started, but
|
||||
# possibly modified.
|
||||
#
|
||||
# NB: a `for` loop captures its iteration list before it begins, so
|
||||
# changing the positional parameters here affects neither the number of
|
||||
# iterations, nor the values presented in `arg`.
|
||||
shift # remove old arg
|
||||
set -- "$@" "$arg" # push replacement arg
|
||||
done
|
||||
fi
|
||||
|
||||
|
||||
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
|
||||
DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
|
||||
|
||||
# Collect all arguments for the java command:
|
||||
# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
|
||||
# and any embedded shellness will be escaped.
|
||||
# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be
|
||||
# treated as '${Hostname}' itself on the command line.
|
||||
|
||||
set -- \
|
||||
"-Dorg.gradle.appname=$APP_BASE_NAME" \
|
||||
-classpath "$CLASSPATH" \
|
||||
org.gradle.wrapper.GradleWrapperMain \
|
||||
"$@"
|
||||
|
||||
# Stop when "xargs" is not available.
|
||||
if ! command -v xargs >/dev/null 2>&1
|
||||
then
|
||||
die "xargs is not available"
|
||||
fi
|
||||
|
||||
# Use "xargs" to parse quoted args.
|
||||
#
|
||||
# With -n1 it outputs one arg per line, with the quotes and backslashes removed.
|
||||
#
|
||||
# In Bash we could simply go:
|
||||
#
|
||||
# readarray ARGS < <( xargs -n1 <<<"$var" ) &&
|
||||
# set -- "${ARGS[@]}" "$@"
|
||||
#
|
||||
# but POSIX shell has neither arrays nor command substitution, so instead we
|
||||
# post-process each arg (as a line of input to sed) to backslash-escape any
|
||||
# character that might be a shell metacharacter, then use eval to reverse
|
||||
# that process (while maintaining the separation between arguments), and wrap
|
||||
# the whole thing up as a single "set" statement.
|
||||
#
|
||||
# This will of course break if any of these variables contains a newline or
|
||||
# an unmatched quote.
|
||||
#
|
||||
|
||||
eval "set -- $(
|
||||
printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" |
|
||||
xargs -n1 |
|
||||
sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' |
|
||||
tr '\n' ' '
|
||||
)" '"$@"'
|
||||
|
||||
exec "$JAVACMD" "$@"
|
94
apk/gradlew.bat
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
@rem
|
||||
@rem Copyright 2015 the original author or authors.
|
||||
@rem
|
||||
@rem Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@rem you may not use this file except in compliance with the License.
|
||||
@rem You may obtain a copy of the License at
|
||||
@rem
|
||||
@rem https://www.apache.org/licenses/LICENSE-2.0
|
||||
@rem
|
||||
@rem Unless required by applicable law or agreed to in writing, software
|
||||
@rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@rem See the License for the specific language governing permissions and
|
||||
@rem limitations under the License.
|
||||
@rem
|
||||
@rem SPDX-License-Identifier: Apache-2.0
|
||||
@rem
|
||||
|
||||
@if "%DEBUG%"=="" @echo off
|
||||
@rem ##########################################################################
|
||||
@rem
|
||||
@rem Gradle startup script for Windows
|
||||
@rem
|
||||
@rem ##########################################################################
|
||||
|
||||
@rem Set local scope for the variables with windows NT shell
|
||||
if "%OS%"=="Windows_NT" setlocal
|
||||
|
||||
set DIRNAME=%~dp0
|
||||
if "%DIRNAME%"=="" set DIRNAME=.
|
||||
@rem This is normally unused
|
||||
set APP_BASE_NAME=%~n0
|
||||
set APP_HOME=%DIRNAME%
|
||||
|
||||
@rem Resolve any "." and ".." in APP_HOME to make it shorter.
|
||||
for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
|
||||
|
||||
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
|
||||
set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
|
||||
|
||||
@rem Find java.exe
|
||||
if defined JAVA_HOME goto findJavaFromJavaHome
|
||||
|
||||
set JAVA_EXE=java.exe
|
||||
%JAVA_EXE% -version >NUL 2>&1
|
||||
if %ERRORLEVEL% equ 0 goto execute
|
||||
|
||||
echo. 1>&2
|
||||
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2
|
||||
echo. 1>&2
|
||||
echo Please set the JAVA_HOME variable in your environment to match the 1>&2
|
||||
echo location of your Java installation. 1>&2
|
||||
|
||||
goto fail
|
||||
|
||||
:findJavaFromJavaHome
|
||||
set JAVA_HOME=%JAVA_HOME:"=%
|
||||
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
|
||||
|
||||
if exist "%JAVA_EXE%" goto execute
|
||||
|
||||
echo. 1>&2
|
||||
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2
|
||||
echo. 1>&2
|
||||
echo Please set the JAVA_HOME variable in your environment to match the 1>&2
|
||||
echo location of your Java installation. 1>&2
|
||||
|
||||
goto fail
|
||||
|
||||
:execute
|
||||
@rem Setup the command line
|
||||
|
||||
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
|
||||
|
||||
|
||||
@rem Execute Gradle
|
||||
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
|
||||
|
||||
:end
|
||||
@rem End local scope for the variables with windows NT shell
|
||||
if %ERRORLEVEL% equ 0 goto mainEnd
|
||||
|
||||
:fail
|
||||
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
|
||||
rem the _cmd.exe /c_ return code!
|
||||
set EXIT_CODE=%ERRORLEVEL%
|
||||
if %EXIT_CODE% equ 0 set EXIT_CODE=1
|
||||
if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
|
||||
exit /b %EXIT_CODE%
|
||||
|
||||
:mainEnd
|
||||
if "%OS%"=="Windows_NT" endlocal
|
||||
|
||||
:omega
|
BIN
apk/release.keystore
Normal file
10
apk/settings.gradle
Normal file
|
@ -0,0 +1,10 @@
|
|||
dependencyResolutionManagement {
|
||||
repositoriesMode.set(RepositoriesMode.FAIL_ON_PROJECT_REPOS)
|
||||
repositories {
|
||||
google()
|
||||
mavenCentral()
|
||||
}
|
||||
}
|
||||
rootProject.name = "Naive Plugin"
|
||||
|
||||
include ':app'
|
|
@ -7,6 +7,51 @@ BasedOnStyle: Chromium
|
|||
# 'int>>' if the file already contains at least one such instance.)
|
||||
Standard: Cpp11
|
||||
|
||||
# TODO(crbug.com/1392808): Remove when InsertBraces has been upstreamed into
|
||||
# the Chromium style (is implied by BasedOnStyle: Chromium).
|
||||
InsertBraces: true
|
||||
InsertNewlineAtEOF: true
|
||||
|
||||
# Sort #includes by following
|
||||
# https://google.github.io/styleguide/cppguide.html#Names_and_Order_of_Includes
|
||||
#
|
||||
# ref: https://clang.llvm.org/docs/ClangFormatStyleOptions.html#includeblocks
|
||||
IncludeBlocks: Regroup
|
||||
# ref: https://clang.llvm.org/docs/ClangFormatStyleOptions.html#includecategories
|
||||
IncludeCategories:
|
||||
# The win32 api has all sorts of implicit include order dependencies :-/
|
||||
# Give a few headers special priorities that make sure they appear before
|
||||
# all other headers.
|
||||
# Sync this with SerializeIncludes in tools/add_header.py.
|
||||
# TODO(crbug.com/329138753): remove include sorting from tools/add_header.py
|
||||
# after confirming clang-format sort works well.
|
||||
# LINT.IfChange(winheader)
|
||||
- Regex: '^<objbase\.h>' # This has to be before initguid.h.
|
||||
Priority: 1
|
||||
- Regex: '^<(atlbase|initguid|mmdeviceapi|ocidl|ole2|shobjidl|tchar|unknwn|windows|winsock2|winternl|ws2tcpip)\.h>'
|
||||
Priority: 2
|
||||
# LINT.ThenChange(/tools/add_header.py:winheader)
|
||||
# UIAutomation*.h needs to be after base/win/atl.h.
|
||||
# Note the low priority number.
|
||||
- Regex: '^<UIAutomation.*\.h>'
|
||||
Priority: 6
|
||||
# Other C system headers.
|
||||
- Regex: '^<.*\.h>'
|
||||
Priority: 3
|
||||
# C++ standard library headers.
|
||||
- Regex: '^<.*>'
|
||||
Priority: 4
|
||||
# windows_h_disallowed.h should appear last. Note the low priority number.
|
||||
- Regex: '"(.*/)?windows_h_disallowed\.h"'
|
||||
Priority: 7
|
||||
# Other libraries.
|
||||
- Regex: '.*'
|
||||
Priority: 5
|
||||
# ref: https://clang.llvm.org/docs/ClangFormatStyleOptions.html#includeismainregex
|
||||
IncludeIsMainRegex: "\
|
||||
(_(32|64|android|apple|chromeos|freebsd|fuchsia|fuzzer|ios|linux|mac|nacl|openbsd|posix|stubs?|win))?\
|
||||
(_(unit|browser|perf)?tests?)?$"
|
||||
|
||||
# Make sure code like:
|
||||
# IPC_BEGIN_MESSAGE_MAP()
|
||||
# IPC_MESSAGE_HANDLER(WidgetHostViewHost_Update, OnUpdate)
|
||||
|
@ -34,6 +79,3 @@ IPC_STRUCT_END|\
|
|||
IPC_STRUCT_TRAITS_END|\
|
||||
POLPARAMS_END|\
|
||||
PPAPI_END_MESSAGE_MAP$"
|
||||
|
||||
# TODO: Remove this once clang-format r357700 is rolled in.
|
||||
JavaImportGroups: ['android', 'androidx', 'com', 'dalvik', 'junit', 'org', 'com.google.android.apps.chrome', 'org.chromium', 'java', 'javax']
|
||||
|
|
3
src/.gitattributes
vendored
|
@ -30,6 +30,7 @@
|
|||
*.proto text eol=lf
|
||||
*.rs text eol=lf
|
||||
*.sh text eol=lf
|
||||
*.spec text eol=lf
|
||||
*.sql text eol=lf
|
||||
*.toml text eol=lf
|
||||
*.txt text eol=lf
|
||||
|
@ -43,7 +44,7 @@
|
|||
.vpython text eol=lf
|
||||
codereview.settings text eol=lf
|
||||
DEPS text eol=lf
|
||||
ENG_REVIEW_OWNERS text eol=lf
|
||||
ATL_OWNERS text eol=lf
|
||||
LICENSE text eol=lf
|
||||
LICENSE.* text eol=lf
|
||||
MAJOR_BRANCH_DATE text eol=lf
|
||||
|
|
47
src/.gn
|
@ -39,6 +39,12 @@ default_args = {
|
|||
# Don't include webrtc's builtin task queue implementation.
|
||||
rtc_link_task_queue_impl = false
|
||||
|
||||
# When building with Chromium, `webrtc::Location` is replaced by
|
||||
# `base::Location`. Since WebRTC doesn't use `public_deps` (webrtc:8603), it
|
||||
# would fail to propagate the dependency internally. Instead WebRTC let its
|
||||
# embedders to define it globally for all of its targets.
|
||||
rtc_common_public_deps = [ "//base" ]
|
||||
|
||||
# Don't include the iLBC audio codec.
|
||||
# TODO(bugs.webrtc.org/8396): Once WebRTC gets rid of its internal
|
||||
# deps on codecs, we can remove this.
|
||||
|
@ -49,18 +55,25 @@ default_args = {
|
|||
crashpad_dependencies = "chromium"
|
||||
|
||||
# Override ANGLE's Vulkan dependencies.
|
||||
angle_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
|
||||
angle_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src"
|
||||
angle_vulkan_tools_dir = "//third_party/vulkan-deps/vulkan-tools/src"
|
||||
angle_vulkan_headers_dir = "//third_party/vulkan-headers/src"
|
||||
angle_vulkan_loader_dir = "//third_party/vulkan-loader/src"
|
||||
angle_vulkan_tools_dir = "//third_party/vulkan-tools/src"
|
||||
angle_vulkan_validation_layers_dir =
|
||||
"//third_party/vulkan-deps/vulkan-validation-layers/src"
|
||||
"//third_party/vulkan-validation-layers/src"
|
||||
|
||||
# Override VMA's Vulkan dependencies.
|
||||
vma_vulkan_headers_dir = "//third_party/vulkan-headers/src"
|
||||
|
||||
# Overwrite default args declared in the Fuchsia sdk
|
||||
fuchsia_sdk_readelf_exec =
|
||||
"//third_party/llvm-build/Release+Asserts/bin/llvm-readelf"
|
||||
fuchsia_target_api_level = 8
|
||||
|
||||
# Overwrite default args declared in the pdfium library
|
||||
pdf_partition_alloc_dir = "//base/allocator/partition_allocator"
|
||||
|
||||
devtools_visibility = [ "*" ]
|
||||
|
||||
clang_unsafe_buffers_paths = "//build/config/unsafe_buffers_paths.txt"
|
||||
}
|
||||
|
||||
# These are the targets to skip header checking by default. The files in targets
|
||||
|
@ -68,27 +81,16 @@ default_args = {
|
|||
# their includes checked for proper dependencies when you run either
|
||||
# "gn check" or "gn gen --check".
|
||||
no_check_targets = [
|
||||
"//headless:headless_non_renderer", # 9 errors
|
||||
"//headless:headless_renderer", # 13 errors
|
||||
"//headless:headless_shared_sources", # 4 errors
|
||||
"//headless:headless_shell_browser_lib", # 10 errors
|
||||
"//headless:headless_shell_lib", # 10 errors
|
||||
|
||||
# //v8, https://crbug.com/v8/7330
|
||||
"//v8/src/inspector:inspector", # 20 errors
|
||||
"//v8/test/cctest:cctest_sources", # 2 errors
|
||||
"//v8/test/cctest:cctest_sources", # 15 errors
|
||||
"//v8/test/unittests:inspector_unittests_sources", # 2 errors
|
||||
"//v8:cppgc_base", # 1 error
|
||||
"//v8:v8_internal_headers", # 11 errors
|
||||
"//v8:v8_libplatform", # 2 errors
|
||||
|
||||
# After making partition_alloc a standalone library, remove partition_alloc
|
||||
# target from the skip list, because partition_aloc will depend on its own
|
||||
# base.
|
||||
# partition alloc standalone library bug is https://crbug.com/1151236.
|
||||
"//base/allocator/partition_allocator:partition_alloc", # 292 errors
|
||||
]
|
||||
|
||||
# These are the list of GN files that run exec_script. This whitelist exists
|
||||
# These are the list of GN files that run exec_script. This allowlist exists
|
||||
# to force additional review for new uses of exec_script, which is strongly
|
||||
# discouraged.
|
||||
#
|
||||
|
@ -143,17 +145,18 @@ no_check_targets = [
|
|||
# this situation much easier to create. if the build always lists the
|
||||
# files and passes them to a script, it will always be correct.
|
||||
|
||||
exec_script_whitelist =
|
||||
build_dotfile_settings.exec_script_whitelist +
|
||||
exec_script_allowlist =
|
||||
build_dotfile_settings.exec_script_allowlist +
|
||||
angle_dotfile_settings.exec_script_whitelist +
|
||||
[
|
||||
# Whitelist entries for //build should go into
|
||||
# Allowlist entries for //build should go into
|
||||
# //build/dotfile_settings.gni instead, so that they can be shared
|
||||
# with other repos. The entries in this list should be only for files
|
||||
# in the Chromium repo outside of //build.
|
||||
"//build_overrides/build.gni",
|
||||
|
||||
"//chrome/android/webapk/shell_apk/prepare_upload_dir/BUILD.gn",
|
||||
"//chrome/version.gni",
|
||||
|
||||
# TODO(dgn): Layer violation but breaks the build otherwise, see
|
||||
# https://crbug.com/474506.
|
||||
|
|
369
src/AUTHORS
39
src/BUILD.gn
|
@ -1,4 +1,4 @@
|
|||
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||
# Copyright 2013 The Chromium Authors
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
|||
# file to your new one or GN won't know about it.
|
||||
|
||||
import("//build/config/compiler/compiler.gni")
|
||||
import("//build/config/cronet/config.gni")
|
||||
import("//build/config/dcheck_always_on.gni")
|
||||
import("//build/config/features.gni")
|
||||
import("//build/config/rust.gni")
|
||||
|
@ -33,43 +34,25 @@ if (is_official_build) {
|
|||
assert(!is_component_build)
|
||||
}
|
||||
|
||||
# This file defines the following two main targets:
|
||||
# The `gn_all` target is used to list all of the main targets in the build, so
|
||||
# that we can figure out which BUILD.gn files to process, following the process
|
||||
# described at the top of this file.
|
||||
#
|
||||
# "gn_all" is used to create explicit dependencies from the root BUILD.gn to
|
||||
# each top-level component that we wish to include when building everything via
|
||||
# "all". This is required since the set of targets built by "all" is determined
|
||||
# automatically based on reachability from the root BUILD.gn (for details, see
|
||||
# crbug.com/503241). Builders should typically use "all", or list targets
|
||||
# explicitly, rather than relying on "gn_all".
|
||||
#
|
||||
# "gn_visibility": targets that are normally not visible to top-level targets,
|
||||
# but are built anyway by "all". Since we don't want any such targets, we have
|
||||
# this placeholder to make sure hidden targets that aren't otherwise depended
|
||||
# on yet are accounted for.
|
||||
# Because of the way GN works (again, as described above), there may be targets
|
||||
# built by `all` that aren't built by `gn_all`. We always want `all` to build,
|
||||
# so there's really never a reason you'd want to build `gn_all` instead of
|
||||
# `all`, and no tooling should depend directly on this target. Tools should
|
||||
# should depend on either an explicit list of targets, or `all`.
|
||||
|
||||
group("gn_all") {
|
||||
testonly = true
|
||||
|
||||
deps = [
|
||||
":gn_visibility",
|
||||
"//net",
|
||||
"//components/cronet",
|
||||
]
|
||||
|
||||
if (is_ios) {
|
||||
deps -= [ "//components/cronet" ]
|
||||
}
|
||||
}
|
||||
|
||||
group("gn_visibility") {
|
||||
deps = [
|
||||
"//build/config/sanitizers:options_sources",
|
||||
# "//third_party/pdfium:pdfium_embeddertests", # TODO(GYP): visibility?
|
||||
# "//third_party/pdfium:pdfium_unittests", # TODO(GYP): visibility?
|
||||
]
|
||||
}
|
||||
|
||||
if (is_android) {
|
||||
if (is_android && !is_cronet_build) {
|
||||
group("optimize_gn_gen") {
|
||||
deps = [
|
||||
# These run expensive scripts in non-default toolchains. Generally, host
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Chromium Authors. All rights reserved.
|
||||
// Copyright 2015 The Chromium Authors
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
|
@ -10,7 +10,7 @@
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// * Neither the name of Google LLC nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
|
|
2202
src/base/BUILD.gn
|
@ -3,6 +3,10 @@ include_rules = [
|
|||
"+third_party/apple_apsl",
|
||||
"+third_party/boringssl/src/include",
|
||||
"+third_party/ced",
|
||||
"+third_party/fuzztest",
|
||||
# We are moving the old jni_generator to jni_zero, some references will remain
|
||||
# in //base.
|
||||
"+third_party/jni_zero",
|
||||
"+third_party/libunwindstack/src/libunwindstack/include",
|
||||
"+third_party/lss",
|
||||
"+third_party/modp_b64",
|
||||
|
@ -14,6 +18,9 @@ include_rules = [
|
|||
# //build/rust:cxx_cppdeps.
|
||||
"+third_party/rust/cxx",
|
||||
"+third_party/test_fonts",
|
||||
# JSON Deserialization.
|
||||
"+third_party/rust/serde_json_lenient/v0_2/wrapper",
|
||||
"+third_party/zlib",
|
||||
|
||||
# These are implicitly brought in from the root, and we don't want them.
|
||||
"-ipc",
|
||||
|
@ -25,3 +32,14 @@ include_rules = [
|
|||
# //base/util can use //base but not vice versa.
|
||||
"-util",
|
||||
]
|
||||
|
||||
specific_include_rules = {
|
||||
# Special case
|
||||
"process/current_process(|_test)\.h": [
|
||||
"+third_party/perfetto/protos/perfetto/trace/track_event/chrome_process_descriptor.pbzero.h",
|
||||
],
|
||||
# To evaluate the performance effects of using absl's flat_hash_map.
|
||||
"supports_user_data\.cc": [
|
||||
"+third_party/abseil-cpp/absl/container/flat_hash_map.h",
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
monorail {
|
||||
monorail: {
|
||||
component: "Internals>Core"
|
||||
}
|
||||
buganizer_public: {
|
||||
component_id: 1456128
|
||||
}
|
||||
|
|
|
@ -1,18 +1,19 @@
|
|||
# See //base/README.md to find qualification for being an owner.
|
||||
|
||||
set noparent
|
||||
# NOTE: keep this in sync with lsc-owners-override@chromium.org owners
|
||||
# NOTE: keep this in sync with global-owners-override@chromium.org owners
|
||||
# by emailing lsc-policy@chromium.org when this list changes.
|
||||
danakj@chromium.org
|
||||
altimin@chromium.org
|
||||
dcheng@chromium.org
|
||||
fdoray@chromium.org
|
||||
gab@chromium.org
|
||||
kylechar@chromium.org
|
||||
mark@chromium.org
|
||||
pkasting@chromium.org
|
||||
thakis@chromium.org
|
||||
thestig@chromium.org
|
||||
wez@chromium.org
|
||||
# NOTE: keep this in sync with lsc-owners-override@chromium.org owners
|
||||
# NOTE: keep this in sync with global-owners-override@chromium.org owners
|
||||
# by emailing lsc-policy@chromium.org when this list changes.
|
||||
|
||||
# per-file rules:
|
||||
|
@ -29,10 +30,21 @@ per-file ..._fuchsia*=file://build/fuchsia/OWNERS
|
|||
# For Windows-specific changes:
|
||||
per-file ..._win*=file://base/win/OWNERS
|
||||
|
||||
per-file callback_list*=pkasting@chromium.org
|
||||
per-file feature_list*=asvitkine@chromium.org
|
||||
per-file feature_list*=isherman@chromium.org
|
||||
|
||||
# Logging-related changes:
|
||||
per-file check*=olivierli@chromium.org
|
||||
per-file check*=pbos@chromium.org
|
||||
per-file dcheck*=olivierli@chromium.org
|
||||
per-file dcheck*=pbos@chromium.org
|
||||
per-file logging*=olivierli@chromium.org
|
||||
per-file logging*=pbos@chromium.org
|
||||
per-file notimplemented.h=olivierli@chromium.org
|
||||
per-file notimplemented.h=pbos@chromium.org
|
||||
per-file notreached.h=olivierli@chromium.org
|
||||
per-file notreached.h=pbos@chromium.org
|
||||
|
||||
# Restricted since rand_util.h also backs the cryptographically secure RNG.
|
||||
per-file rand_util*=set noparent
|
||||
per-file rand_util*=file://ipc/SECURITY_OWNERS
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
# Copyright 2012 The Chromium Authors
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
|
@ -8,8 +8,22 @@ See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
|
|||
for more details on the presubmit API built into depot_tools.
|
||||
"""
|
||||
|
||||
|
||||
USE_PYTHON3 = True
|
||||
def CheckChangeLintsClean(input_api, output_api):
|
||||
"""Makes sure that the code is cpplint clean."""
|
||||
# lint_filters=[] stops the OFF_BY_DEFAULT_LINT_FILTERS from being disabled,
|
||||
# finding many more issues. verbose_level=1 finds a small number of additional
|
||||
# issues.
|
||||
# The only valid extensions for cpplint are .cc, .h, .cpp, .cu, and .ch.
|
||||
# Only process those extensions which are used in Chromium, in directories
|
||||
# that currently lint clean.
|
||||
CLEAN_CPP_FILES_ONLY = (r'base/win/.*\.(cc|h)$', )
|
||||
source_file_filter = lambda x: input_api.FilterSourceFile(
|
||||
x,
|
||||
files_to_check=CLEAN_CPP_FILES_ONLY,
|
||||
files_to_skip=input_api.DEFAULT_FILES_TO_SKIP)
|
||||
return input_api.canned_checks.CheckChangeLintsClean(
|
||||
input_api, output_api, source_file_filter=source_file_filter,
|
||||
lint_filters=[], verbose_level=1)
|
||||
|
||||
|
||||
def _CheckNoInterfacesInBase(input_api, output_api):
|
||||
|
@ -22,7 +36,7 @@ def _CheckNoInterfacesInBase(input_api, output_api):
|
|||
not "/test/" in f.LocalPath() and
|
||||
not f.LocalPath().endswith('.java') and
|
||||
not f.LocalPath().endswith('_unittest.mm') and
|
||||
not f.LocalPath().endswith('mac/sdk_forward_declarations.h')):
|
||||
not f.LocalPath().endswith('_spi.h')):
|
||||
contents = input_api.ReadFile(f)
|
||||
if pattern.search(contents):
|
||||
files.append(f)
|
||||
|
@ -72,9 +86,9 @@ def _CheckNoTraceEventInclude(input_api, output_api):
|
|||
r".*\.(h|cc|mm)$",
|
||||
]
|
||||
files_to_skip = [
|
||||
r".*[\\/]test[\\/].*",
|
||||
r".*[\\/]trace_event[\\/].*",
|
||||
r".*[\\/]tracing[\\/].*",
|
||||
r".*/test/.*",
|
||||
r".*/trace_event/.*",
|
||||
r".*/tracing/.*",
|
||||
]
|
||||
|
||||
locations = _FindLocations(input_api, discouraged_includes, files_to_check,
|
||||
|
@ -105,9 +119,9 @@ def _WarnPbzeroIncludes(input_api, output_api):
|
|||
r".*\.(h|cc|mm)$",
|
||||
]
|
||||
files_to_skip = [
|
||||
r".*[\\/]test[\\/].*",
|
||||
r".*[\\/]trace_event[\\/].*",
|
||||
r".*[\\/]tracing[\\/].*",
|
||||
r".*/test/.*",
|
||||
r".*/trace_event/.*",
|
||||
r".*/tracing/.*",
|
||||
]
|
||||
|
||||
locations = _FindLocations(input_api, warn_includes, files_to_check,
|
||||
|
@ -129,6 +143,7 @@ def _CommonChecks(input_api, output_api):
|
|||
results.extend(_CheckNoInterfacesInBase(input_api, output_api))
|
||||
results.extend(_CheckNoTraceEventInclude(input_api, output_api))
|
||||
results.extend(_WarnPbzeroIncludes(input_api, output_api))
|
||||
results.extend(CheckChangeLintsClean(input_api, output_api))
|
||||
return results
|
||||
|
||||
|
||||
|
|
|
@ -65,14 +65,20 @@ synthetic microbenchmarks that measure performance in various scenarios:
|
|||
* MessageLoopPerfTest: Measures the speed of task posting in various
|
||||
configurations.
|
||||
* ObserverListPerfTest: Exercises adding, removing and signalling observers.
|
||||
* PartitionLockPerfTest: Tests the implementation of Lock used in
|
||||
PartitionAlloc
|
||||
* PthreadEventPerfTest: Establishes the baseline thread switching cost using
|
||||
pthreads.
|
||||
* RandUtilPerfTest: Measures the time it takes to generate random numbers.
|
||||
* ScheduleWorkTest: Measures the overhead of MessagePump::ScheduleWork.
|
||||
* SequenceManagerPerfTest: Benchmarks SequenceManager scheduling with various
|
||||
underlying task runners.
|
||||
* TaskObserverPerfTest: Measures the incremental cost of adding task
|
||||
observers.
|
||||
* TaskPerfTest: Checks the cost of posting tasks between threads.
|
||||
* ThreadLocalStoragePerfTest: Exercises different mechanisms for accessing
|
||||
data associated with the current thread (C++ `thread_local`, the
|
||||
implementation in //base, the POSIX/WinAPI directly)
|
||||
* WaitableEvent{Thread,}PerfTest: Measures waitable events in single and
|
||||
multithreaded scenarios.
|
||||
|
||||
|
|
|
@ -9,5 +9,4 @@
|
|||
# yourself, don't hesitate to seek help from another security team member!
|
||||
# Nobody knows everything, and the only way to learn is from experience.
|
||||
dcheng@chromium.org
|
||||
rsesek@chromium.org
|
||||
tsepez@chromium.org
|
||||
|
|
|
@ -1,97 +1,33 @@
|
|||
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||
# Copyright 2013 The Chromium Authors
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
import("//base/allocator/allocator.gni")
|
||||
import("//base/allocator/partition_allocator/partition_alloc.gni")
|
||||
import("//build/buildflag_header.gni")
|
||||
import("//build/config/compiler/compiler.gni")
|
||||
import("//build/config/dcheck_always_on.gni")
|
||||
|
||||
buildflag_header("buildflags") {
|
||||
header = "buildflags.h"
|
||||
_use_partition_alloc_as_malloc = use_allocator == "partition"
|
||||
assert(use_allocator_shim || !_use_partition_alloc_as_malloc,
|
||||
"Partition alloc requires the allocator shim")
|
||||
|
||||
# BackupRefPtr(BRP) build flags.
|
||||
_use_backup_ref_ptr = use_backup_ref_ptr && use_partition_alloc && !is_nacl
|
||||
_put_ref_count_in_previous_slot =
|
||||
put_ref_count_in_previous_slot && _use_backup_ref_ptr
|
||||
_enable_backup_ref_ptr_slow_checks =
|
||||
enable_backup_ref_ptr_slow_checks && _use_backup_ref_ptr
|
||||
_enable_dangling_raw_ptr_checks =
|
||||
enable_dangling_raw_ptr_checks && _use_backup_ref_ptr
|
||||
use_partition_alloc_as_gwp_asan_store =
|
||||
enable_backup_ref_ptr_support && use_raw_ptr_backup_ref_impl
|
||||
|
||||
# MTECheckedPtr is exclusive against BRP (asserted at declaration).
|
||||
# MTECheckedPtr requires 64-bit pointers (not available in NaCl).
|
||||
_use_mte_checked_ptr = use_mte_checked_ptr && !is_nacl
|
||||
|
||||
_record_alloc_info = false
|
||||
|
||||
flags = [
|
||||
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
|
||||
"USE_PARTITION_ALLOC=$use_partition_alloc",
|
||||
"USE_PARTITION_ALLOC_AS_MALLOC=$_use_partition_alloc_as_malloc",
|
||||
|
||||
"USE_BACKUP_REF_PTR=$_use_backup_ref_ptr",
|
||||
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
|
||||
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$_enable_backup_ref_ptr_slow_checks",
|
||||
"ENABLE_DANGLING_RAW_PTR_CHECKS=$_enable_dangling_raw_ptr_checks",
|
||||
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$_put_ref_count_in_previous_slot",
|
||||
|
||||
# Not to be used directly - see `partition_alloc_config.h`.
|
||||
"USE_MTE_CHECKED_PTR=$_use_mte_checked_ptr",
|
||||
|
||||
"USE_FAKE_BINARY_EXPERIMENT=$use_fake_binary_experiment",
|
||||
|
||||
"RECORD_ALLOC_INFO=$_record_alloc_info",
|
||||
]
|
||||
flags = [ "USE_PARTITION_ALLOC_AS_GWP_ASAN_STORE=$use_partition_alloc_as_gwp_asan_store" ]
|
||||
}
|
||||
|
||||
if (is_apple) {
|
||||
source_set("early_zone_registration_mac") {
|
||||
source_set("early_zone_registration_apple") {
|
||||
sources = [
|
||||
"early_zone_registration_mac.cc",
|
||||
"early_zone_registration_mac.h",
|
||||
"early_zone_registration_apple.cc",
|
||||
"early_zone_registration_apple.h",
|
||||
"partition_allocator/src/partition_alloc/shim/early_zone_registration_constants.h",
|
||||
]
|
||||
|
||||
deps = [ ":buildflags" ]
|
||||
}
|
||||
}
|
||||
|
||||
# Used to shim malloc symbols on Android. see //base/allocator/README.md.
|
||||
config("wrap_malloc_symbols") {
|
||||
ldflags = [
|
||||
"-Wl,-wrap,calloc",
|
||||
"-Wl,-wrap,free",
|
||||
"-Wl,-wrap,malloc",
|
||||
"-Wl,-wrap,memalign",
|
||||
"-Wl,-wrap,posix_memalign",
|
||||
"-Wl,-wrap,pvalloc",
|
||||
"-Wl,-wrap,realloc",
|
||||
"-Wl,-wrap,valloc",
|
||||
|
||||
# <stdlib.h> functions
|
||||
"-Wl,-wrap,realpath",
|
||||
|
||||
# <string.h> functions
|
||||
"-Wl,-wrap,strdup",
|
||||
"-Wl,-wrap,strndup",
|
||||
|
||||
# <unistd.h> functions
|
||||
"-Wl,-wrap,getcwd",
|
||||
|
||||
# <stdio.h> functions
|
||||
"-Wl,-wrap,asprintf",
|
||||
"-Wl,-wrap,vasprintf",
|
||||
]
|
||||
}
|
||||
|
||||
config("mac_no_default_new_delete_symbols") {
|
||||
if (!is_component_build) {
|
||||
# This is already set when we compile libc++, see
|
||||
# buildtools/third_party/libc++/BUILD.gn. But it needs to be set here as well,
|
||||
# since the shim defines the symbols, to prevent them being exported.
|
||||
cflags = [ "-fvisibility-global-new-delete-hidden" ]
|
||||
deps = [
|
||||
":buildflags",
|
||||
"//base/allocator/partition_allocator:buildflags",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
monorail {
|
||||
monorail: {
|
||||
component: "Internals"
|
||||
}
|
||||
buganizer_public: {
|
||||
component_id: 1456292
|
||||
}
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
lizeb@chromium.org
|
||||
primiano@chromium.org
|
||||
wfh@chromium.org
|
||||
|
||||
per-file allocator.gni=bartekn@chromium.org
|
||||
per-file allocator_shim_default_dispatch_to_partition_alloc*=bartekn@chromium.org
|
||||
per-file partition_alloc*=bartekn@chromium.org
|
||||
per-file BUILD.gn=bartekn@chromium.org
|
||||
per-file allocator.gni=file://base/allocator/partition_allocator/OWNERS
|
||||
per-file partition_alloc*=file://base/allocator/partition_allocator/OWNERS
|
||||
per-file BUILD.gn=file://base/allocator/partition_allocator/OWNERS
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
This document describes how malloc / new calls are routed in the various Chrome
|
||||
platforms.
|
||||
|
||||
Bare in mind that the chromium codebase does not always just use `malloc()`.
|
||||
Bear in mind that the chromium codebase does not always just use `malloc()`.
|
||||
Some examples:
|
||||
- Large parts of the renderer (Blink) use two home-brewed allocators,
|
||||
PartitionAlloc and BlinkGC (Oilpan).
|
||||
|
@ -15,29 +15,13 @@ Background
|
|||
----------
|
||||
The `allocator` target defines at compile-time the platform-specific choice of
|
||||
the allocator and extra-hooks which services calls to malloc/new. The relevant
|
||||
build-time flags involved are `use_allocator` and `use_allocator_shim`.
|
||||
build-time flags involved are `use_allocator_shim` and
|
||||
`use_partition_alloc_as_malloc`.
|
||||
|
||||
The default choices are as follows:
|
||||
|
||||
**Windows**
|
||||
`use_allocator: winheap`, the default Windows heap.
|
||||
Additionally, `static_library` (i.e. non-component) builds have a shim
|
||||
layer wrapping malloc/new, which is controlled by `use_allocator_shim`.
|
||||
The shim layer provides extra security features, such as preventing large
|
||||
allocations that can hit signed vs. unsigned bugs in third_party code.
|
||||
|
||||
**Android**
|
||||
`use_allocator: none`, always use the allocator symbols coming from Android's
|
||||
libc (Bionic). As it is developed as part of the OS, it is considered to be
|
||||
optimized for small devices and more memory-efficient than other choices.
|
||||
The actual implementation backing malloc symbols in Bionic is up to the board
|
||||
config and can vary (typically *dlmalloc* or *jemalloc* on most Nexus devices).
|
||||
|
||||
**Mac/iOS**
|
||||
`use_allocator: none`, we always use the system's allocator implementation.
|
||||
|
||||
In addition, when building for `asan` / `msan` both the allocator and the shim
|
||||
layer are disabled.
|
||||
By default, these are true on all platforms except iOS (not yet supported) and
|
||||
NaCl (no plan to support).
|
||||
Furthermore, when building with a sanitizer (e.g. `asan`, `msan`, ...) both the
|
||||
allocator and the shim layer are disabled.
|
||||
|
||||
|
||||
Layering and build deps
|
||||
|
@ -51,8 +35,7 @@ indirectly, on `base` within the scope of a linker unit.
|
|||
More importantly, **no other place outside of `/base` should depend on the
|
||||
specific allocator**.
|
||||
If such a functional dependency is required that should be achieved using
|
||||
abstractions in `base` (see `/base/allocator/allocator_extension.h` and
|
||||
`/base/memory/`)
|
||||
abstractions in `base` (see `/base/memory/`)
|
||||
|
||||
**Why `base` depends on `allocator`?**
|
||||
Because it needs to provide services that depend on the actual allocator
|
||||
|
@ -86,7 +69,7 @@ a central place.
|
|||
- Full documentation: [Allocator shim design doc][url-allocator-shim].
|
||||
- Current state: Available and enabled by default on Android, CrOS, Linux,
|
||||
Mac OS and Windows.
|
||||
- Tracking bug: [https://crbug.com/550886][crbug.com/550886].
|
||||
- Tracking bug: [crbug.com/550886](https://crbug.com/550886).
|
||||
- Build-time flag: `use_allocator_shim`.
|
||||
|
||||
**Overview of the unified allocator shim**
|
||||
|
@ -112,7 +95,7 @@ allocator shim (next point).
|
|||
This is taken care of by the headers in `allocator_shim_override_*`.
|
||||
|
||||
*On Windows*: Windows' UCRT (Universal C Runtime) exports weak symbols, that we
|
||||
can override in `allocator_shim_override_ucr_symbols_win.h`.
|
||||
can override in `allocator_shim_override_ucrt_symbols_win.h`.
|
||||
|
||||
*On Linux/CrOS*: the allocator symbols are defined as exported global symbols
|
||||
in `allocator_shim_override_libc_symbols.h` (for `malloc`, `free` and friends)
|
||||
|
|
|
@ -1,150 +1,27 @@
|
|||
# Copyright 2019 The Chromium Authors. All rights reserved.
|
||||
# Copyright 2019 The Chromium Authors
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
import("//build/config/chromecast_build.gni")
|
||||
import("//build/config/sanitizers/sanitizers.gni")
|
||||
import("//base/allocator/partition_allocator/partition_alloc.gni")
|
||||
|
||||
if (is_ios) {
|
||||
import("//build/config/ios/ios_sdk.gni")
|
||||
# Chromium-specific asserts. External embedders _may_ elect to use these
|
||||
# features even without PA-E.
|
||||
if (!use_partition_alloc_as_malloc) {
|
||||
# In theory, BackupRefPtr will work just fine without
|
||||
# PartitionAlloc-Everywhere, but its scope would be limited to partitions
|
||||
# that are invoked explicitly (not via malloc). These are only Blink
|
||||
# partition, where we currently don't even use raw_ptr<T>.
|
||||
assert(!enable_backup_ref_ptr_support,
|
||||
"Chromium does not use BRP without PA-E")
|
||||
|
||||
# Pointer compression works only if all pointers are guaranteed to be
|
||||
# allocated by PA (in one of its core pools, to be precise). In theory,
|
||||
# this could be useful with partitions that are invoked explicitly. In
|
||||
# practice, the pointers we have in mind for compression (scoped_refptr<>,
|
||||
# unique_ptr<>) require PA-E.
|
||||
assert(!enable_pointer_compression_support,
|
||||
"Pointer compressions likely doesn't make sense without PA-E")
|
||||
}
|
||||
|
||||
# Sanitizers replace the allocator, don't use our own.
|
||||
_is_using_sanitizers = is_asan || is_hwasan || is_lsan || is_tsan || is_msan
|
||||
|
||||
# - Component build support is disabled on all platforms. It is known to cause
|
||||
# issues on some (e.g. Windows with shims, Android with non-universal symbol
|
||||
# wrapping), and has not been validated on others.
|
||||
# - Windows: debug CRT is not compatible, see below.
|
||||
_disable_partition_alloc = is_component_build || (is_win && is_debug)
|
||||
|
||||
# - NaCl: No plans to support it.
|
||||
# - iOS: not done yet.
|
||||
_is_partition_alloc_platform = !is_nacl && !is_ios
|
||||
|
||||
# Under Windows Debug the allocator shim is not compatible with CRT.
|
||||
# NaCl in particular does seem to link some binaries statically
|
||||
# against the debug CRT with "is_nacl=false".
|
||||
# Under Fuchsia the allocator shim is only required for PA-E.
|
||||
# For all other platforms & configurations, the shim is required, to replace
|
||||
# the default system allocators, e.g. with Partition Alloc.
|
||||
if ((is_linux || is_chromeos || is_android || is_apple ||
|
||||
(is_fuchsia && !_disable_partition_alloc) ||
|
||||
(is_win && !is_component_build && !is_debug)) && !_is_using_sanitizers) {
|
||||
_default_use_allocator_shim = true
|
||||
} else {
|
||||
_default_use_allocator_shim = false
|
||||
}
|
||||
|
||||
if (_default_use_allocator_shim && _is_partition_alloc_platform &&
|
||||
!_disable_partition_alloc) {
|
||||
_default_allocator = "partition"
|
||||
} else {
|
||||
_default_allocator = "none"
|
||||
}
|
||||
|
||||
declare_args() {
|
||||
# Memory allocator to use. Set to "none" to use default allocator.
|
||||
use_allocator = _default_allocator
|
||||
|
||||
# Causes all the allocations to be routed via allocator_shim.cc.
|
||||
use_allocator_shim = _default_use_allocator_shim
|
||||
|
||||
# Whether PartitionAlloc should be available for use or not.
|
||||
# true makes PartitionAlloc linked to the executable or shared library and
|
||||
# makes it available for use. It doesn't mean that the default allocator
|
||||
# is PartitionAlloc, which is governed by |use_allocator|.
|
||||
#
|
||||
# This flag is currently set to false only on Cronet bots, because Cronet
|
||||
# doesn't use PartitionAlloc at all, and doesn't wish to incur the library
|
||||
# size increase (crbug.com/674570).
|
||||
use_partition_alloc = true
|
||||
}
|
||||
|
||||
if (!use_partition_alloc && use_allocator == "partition") {
|
||||
# If there is a conflict, prioritize |use_partition_alloc| over
|
||||
# |use_allocator|.
|
||||
use_allocator = "none"
|
||||
}
|
||||
|
||||
assert(use_allocator == "none" || use_allocator == "partition")
|
||||
|
||||
assert(
|
||||
!use_allocator_shim || is_linux || is_chromeos || is_android || is_win ||
|
||||
is_fuchsia || is_apple,
|
||||
"use_allocator_shim works only on Android, iOS, Linux, macOS, Fuchsia, " +
|
||||
"and Windows.")
|
||||
|
||||
if (is_win && use_allocator_shim) {
|
||||
# TODO(crbug.com/1245317): Add a comment indicating why the shim doesn't work.
|
||||
assert(!is_component_build,
|
||||
"The allocator shim doesn't work for the component build on Windows.")
|
||||
}
|
||||
|
||||
_is_brp_supported = (is_win || is_android) && use_allocator == "partition"
|
||||
|
||||
declare_args() {
|
||||
# Set use_backup_ref_ptr true to use BackupRefPtr (BRP) as the implementation
|
||||
# of raw_ptr<T>, and enable PartitionAlloc support for it.
|
||||
use_backup_ref_ptr = _is_brp_supported
|
||||
|
||||
use_mte_checked_ptr = false
|
||||
}
|
||||
|
||||
assert(!(use_backup_ref_ptr && use_mte_checked_ptr),
|
||||
"MTECheckedPtr conflicts with BRP.")
|
||||
|
||||
declare_args() {
|
||||
# If BRP is enabled, additional options are available:
|
||||
# - put_ref_count_in_previous_slot: place the ref-count at the end of the
|
||||
# previous slot (or in metadata if a slot starts on the page boundary), as
|
||||
# opposed to the beginning of the slot.
|
||||
# - enable_backup_ref_ptr_slow_checks: enable additional safety checks that
|
||||
# are too expensive to have on by default.
|
||||
# - enable_dangling_raw_ptr_checks: enable checking raw_ptr do not become
|
||||
# dangling during their lifetime.
|
||||
put_ref_count_in_previous_slot = use_backup_ref_ptr
|
||||
enable_backup_ref_ptr_slow_checks = false
|
||||
enable_dangling_raw_ptr_checks = false
|
||||
|
||||
# Registers the binary for a fake binary A/B experiment. The binaries built
|
||||
# with this flag have no behavior difference, except for setting a synthetic
|
||||
# Finch.
|
||||
use_fake_binary_experiment = false
|
||||
|
||||
use_asan_backup_ref_ptr = false
|
||||
}
|
||||
|
||||
# Prevent using BackupRefPtr when PartitionAlloc-Everywhere isn't used.
|
||||
# In theory, such a configuration is possible, but its scope would be limited to
|
||||
# only Blink partitions, which is currently not tested. Better to trigger an
|
||||
# error, than have BackupRefPtr silently disabled while believing it is enabled.
|
||||
if (!is_nacl) {
|
||||
assert(!use_backup_ref_ptr || use_allocator == "partition",
|
||||
"Can't use BackupRefPtr without PartitionAlloc-Everywhere")
|
||||
}
|
||||
|
||||
# put_ref_count_in_previous_slot can only be used if use_backup_ref_ptr
|
||||
# is true.
|
||||
assert(
|
||||
use_backup_ref_ptr || !put_ref_count_in_previous_slot,
|
||||
"Can't put ref count in the previous slot if BackupRefPtr isn't enabled at all")
|
||||
|
||||
# enable_backup_ref_ptr_slow_checks can only be used if use_backup_ref_ptr
|
||||
# is true.
|
||||
assert(use_backup_ref_ptr || !enable_backup_ref_ptr_slow_checks,
|
||||
"Can't enable additional BackupRefPtr checks if it isn't enabled at all")
|
||||
|
||||
# enable_dangling_raw_ptr_checks can only be used if use_backup_ref_ptr
|
||||
# is true.
|
||||
assert(
|
||||
use_backup_ref_ptr || !enable_dangling_raw_ptr_checks,
|
||||
"Can't enable dangling raw_ptr checks if BackupRefPtr isn't enabled at all")
|
||||
|
||||
# BackupRefPtr and AsanBackupRefPtr are mutually exclusive variants of raw_ptr.
|
||||
assert(
|
||||
!use_backup_ref_ptr || !use_asan_backup_ref_ptr,
|
||||
"Both BackupRefPtr and AsanBackupRefPtr can't be enabled at the same time")
|
||||
|
||||
assert(!use_asan_backup_ref_ptr || is_asan,
|
||||
"AsanBackupRefPtr requires AddressSanitizer")
|
||||
assert(use_allocator_shim || !use_partition_alloc_as_malloc,
|
||||
"PartitionAlloc-Everywhere requires the allocator shim")
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Copyright 2016 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/allocator_check.h"
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "build/build_config.h"
|
||||
#include "partition_alloc/buildflags.h"
|
||||
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
#include "base/allocator/winheap_stubs_win.h"
|
||||
#include "partition_alloc/shim/winheap_stubs_win.h"
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||
|
@ -16,25 +16,24 @@
|
|||
#endif
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
#include "base/allocator/allocator_interception_mac.h"
|
||||
#include "partition_alloc/shim/allocator_interception_apple.h"
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
namespace base::allocator {
|
||||
|
||||
bool IsAllocatorInitialized() {
|
||||
#if BUILDFLAG(IS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
#if BUILDFLAG(IS_WIN) && PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
// Set by allocator_shim_override_ucrt_symbols_win.h when the
|
||||
// shimmed _set_new_mode() is called.
|
||||
return g_is_win_shim_layer_initialized;
|
||||
return allocator_shim::g_is_win_shim_layer_initialized;
|
||||
#elif BUILDFLAG(IS_APPLE) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
|
||||
!BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
!PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
|
||||
PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
// From allocator_interception_mac.mm.
|
||||
return base::allocator::g_replaced_default_zone;
|
||||
return allocator_shim::g_replaced_default_zone;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
} // namespace base::allocator
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Copyright 2016 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/allocator_extension.h"
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/check.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
void ReleaseFreeMemory() {}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
|
@ -1,23 +0,0 @@
|
|||
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
|
||||
|
||||
#include <stddef.h> // for size_t
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
// Request that the allocator release any free memory it knows about to the
|
||||
// system.
|
||||
BASE_EXPORT void ReleaseFreeMemory();
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
|
|
@ -1,65 +0,0 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "third_party/apple_apsl/malloc.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
struct MallocZoneFunctions;
|
||||
|
||||
// This initializes AllocatorDispatch::default_dispatch by saving pointers to
|
||||
// the functions in the current default malloc zone. This must be called before
|
||||
// the default malloc zone is changed to have its intended effect.
|
||||
void InitializeDefaultDispatchToMacAllocator();
|
||||
|
||||
// Saves the function pointers currently used by the default zone.
|
||||
void StoreFunctionsForDefaultZone();
|
||||
|
||||
// Same as StoreFunctionsForDefaultZone, but for all malloc zones.
|
||||
void StoreFunctionsForAllZones();
|
||||
|
||||
// For all malloc zones that have been stored, replace their functions with
|
||||
// |functions|.
|
||||
void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions);
|
||||
|
||||
extern bool g_replaced_default_zone;
|
||||
|
||||
// Calls the original implementation of malloc/calloc prior to interception.
|
||||
bool UncheckedMallocMac(size_t size, void** result);
|
||||
bool UncheckedCallocMac(size_t num_items, size_t size, void** result);
|
||||
|
||||
// Intercepts calls to default and purgeable malloc zones. Intercepts Core
|
||||
// Foundation and Objective-C allocations.
|
||||
// Has no effect on the default malloc zone if the allocator shim already
|
||||
// performs that interception.
|
||||
BASE_EXPORT void InterceptAllocationsMac();
|
||||
|
||||
// Updates all malloc zones to use their original functions.
|
||||
// Also calls ClearAllMallocZonesForTesting.
|
||||
BASE_EXPORT void UninterceptMallocZonesForTesting();
|
||||
|
||||
// Returns true if allocations are successfully being intercepted for all malloc
|
||||
// zones.
|
||||
bool AreMallocZonesIntercepted();
|
||||
|
||||
// Periodically checks for, and shims new malloc zones. Stops checking after 1
|
||||
// minute.
|
||||
BASE_EXPORT void PeriodicallyShimNewMallocZones();
|
||||
|
||||
// Exposed for testing.
|
||||
BASE_EXPORT void ShimNewMallocZones();
|
||||
BASE_EXPORT void ReplaceZoneFunctions(ChromeMallocZone* zone,
|
||||
const MallocZoneFunctions* functions);
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
|
|
@ -1,613 +0,0 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// This file contains all the logic necessary to intercept allocations on
|
||||
// macOS. "malloc zones" are an abstraction that allows the process to intercept
|
||||
// all malloc-related functions. There is no good mechanism [short of
|
||||
// interposition] to determine new malloc zones are added, so there's no clean
|
||||
// mechanism to intercept all malloc zones. This file contains logic to
|
||||
// intercept the default and purgeable zones, which always exist. A cursory
|
||||
// review of Chrome seems to imply that non-default zones are almost never used.
|
||||
//
|
||||
// This file also contains logic to intercept Core Foundation and Objective-C
|
||||
// allocations. The implementations forward to the default malloc zone, so the
|
||||
// only reason to intercept these calls is to re-label OOM crashes with slightly
|
||||
// more details.
|
||||
|
||||
#include "base/allocator/allocator_interception_mac.h"
|
||||
|
||||
#include <CoreFoundation/CoreFoundation.h>
|
||||
#import <Foundation/Foundation.h>
|
||||
#include <errno.h>
|
||||
#include <mach/mach.h>
|
||||
#import <objc/runtime.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#include <new>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||
#include "base/bind.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/mac/mach_logging.h"
|
||||
#include "base/process/memory.h"
|
||||
#include "base/threading/sequenced_task_runner_handle.h"
|
||||
#include "base/time/time.h"
|
||||
#include "build/build_config.h"
|
||||
#include "third_party/apple_apsl/CFBase.h"
|
||||
|
||||
#if BUILDFLAG(IS_IOS)
|
||||
#include "base/ios/ios_util.h"
|
||||
#else
|
||||
#include "base/mac/mac_util.h"
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
bool g_replaced_default_zone = false;
|
||||
|
||||
namespace {
|
||||
|
||||
bool g_oom_killer_enabled;
|
||||
bool g_allocator_shims_failed_to_install;
|
||||
|
||||
// Starting with Mac OS X 10.7, the zone allocators set up by the system are
|
||||
// read-only, to prevent them from being overwritten in an attack. However,
|
||||
// blindly unprotecting and reprotecting the zone allocators fails with
|
||||
// GuardMalloc because GuardMalloc sets up its zone allocator using a block of
|
||||
// memory in its bss. Explicit saving/restoring of the protection is required.
|
||||
//
|
||||
// This function takes a pointer to a malloc zone, de-protects it if necessary,
|
||||
// and returns (in the out parameters) a region of memory (if any) to be
|
||||
// re-protected when modifications are complete. This approach assumes that
|
||||
// there is no contention for the protection of this memory.
|
||||
//
|
||||
// Returns true if the malloc zone was properly de-protected, or false
|
||||
// otherwise. If this function returns false, the out parameters are invalid and
|
||||
// the region does not need to be re-protected.
|
||||
bool DeprotectMallocZone(ChromeMallocZone* default_zone,
|
||||
vm_address_t* reprotection_start,
|
||||
vm_size_t* reprotection_length,
|
||||
vm_prot_t* reprotection_value) {
|
||||
mach_port_t unused;
|
||||
*reprotection_start = reinterpret_cast<vm_address_t>(default_zone);
|
||||
struct vm_region_basic_info_64 info;
|
||||
mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
|
||||
kern_return_t result =
|
||||
vm_region_64(mach_task_self(), reprotection_start, reprotection_length,
|
||||
VM_REGION_BASIC_INFO_64,
|
||||
reinterpret_cast<vm_region_info_t>(&info), &count, &unused);
|
||||
if (result != KERN_SUCCESS) {
|
||||
MACH_LOG(ERROR, result) << "vm_region_64";
|
||||
return false;
|
||||
}
|
||||
|
||||
// The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
|
||||
// balance it with a deallocate in case this ever changes. See
|
||||
// the VM_REGION_BASIC_INFO_64 case in vm_map_region() in 10.15's
|
||||
// https://opensource.apple.com/source/xnu/xnu-6153.11.26/osfmk/vm/vm_map.c .
|
||||
mach_port_deallocate(mach_task_self(), unused);
|
||||
|
||||
if (!(info.max_protection & VM_PROT_WRITE)) {
|
||||
LOG(ERROR) << "Invalid max_protection " << info.max_protection;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Does the region fully enclose the zone pointers? Possibly unwarranted
|
||||
// simplification used: using the size of a full version 10 malloc zone rather
|
||||
// than the actual smaller size if the passed-in zone is not version 10.
|
||||
DCHECK(*reprotection_start <= reinterpret_cast<vm_address_t>(default_zone));
|
||||
vm_size_t zone_offset = reinterpret_cast<vm_address_t>(default_zone) -
|
||||
reinterpret_cast<vm_address_t>(*reprotection_start);
|
||||
DCHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
|
||||
|
||||
if (info.protection & VM_PROT_WRITE) {
|
||||
// No change needed; the zone is already writable.
|
||||
*reprotection_start = 0;
|
||||
*reprotection_length = 0;
|
||||
*reprotection_value = VM_PROT_NONE;
|
||||
} else {
|
||||
*reprotection_value = info.protection;
|
||||
result =
|
||||
vm_protect(mach_task_self(), *reprotection_start, *reprotection_length,
|
||||
false, info.protection | VM_PROT_WRITE);
|
||||
if (result != KERN_SUCCESS) {
|
||||
MACH_LOG(ERROR, result) << "vm_protect";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#if !defined(ADDRESS_SANITIZER)
|
||||
|
||||
MallocZoneFunctions g_old_zone;
|
||||
MallocZoneFunctions g_old_purgeable_zone;
|
||||
|
||||
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
void* oom_killer_malloc(struct _malloc_zone_t* zone, size_t size) {
|
||||
void* result = g_old_zone.malloc(zone, size);
|
||||
if (!result && size)
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_calloc(struct _malloc_zone_t* zone,
|
||||
size_t num_items,
|
||||
size_t size) {
|
||||
void* result = g_old_zone.calloc(zone, num_items, size);
|
||||
if (!result && num_items && size)
|
||||
TerminateBecauseOutOfMemory(num_items * size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_valloc(struct _malloc_zone_t* zone, size_t size) {
|
||||
void* result = g_old_zone.valloc(zone, size);
|
||||
if (!result && size)
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void oom_killer_free(struct _malloc_zone_t* zone, void* ptr) {
|
||||
g_old_zone.free(zone, ptr);
|
||||
}
|
||||
|
||||
void* oom_killer_realloc(struct _malloc_zone_t* zone, void* ptr, size_t size) {
|
||||
void* result = g_old_zone.realloc(zone, ptr, size);
|
||||
if (!result && size)
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_memalign(struct _malloc_zone_t* zone,
|
||||
size_t alignment,
|
||||
size_t size) {
|
||||
void* result = g_old_zone.memalign(zone, alignment, size);
|
||||
// Only die if posix_memalign would have returned ENOMEM, since there are
|
||||
// other reasons why null might be returned. See posix_memalign() in 10.15's
|
||||
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
|
||||
if (!result && size && alignment >= sizeof(void*) &&
|
||||
base::bits::IsPowerOfTwo(alignment)) {
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
|
||||
void* result = g_old_purgeable_zone.malloc(zone, size);
|
||||
if (!result && size)
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
|
||||
size_t num_items,
|
||||
size_t size) {
|
||||
void* result = g_old_purgeable_zone.calloc(zone, num_items, size);
|
||||
if (!result && num_items && size)
|
||||
TerminateBecauseOutOfMemory(num_items * size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
|
||||
void* result = g_old_purgeable_zone.valloc(zone, size);
|
||||
if (!result && size)
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void oom_killer_free_purgeable(struct _malloc_zone_t* zone, void* ptr) {
|
||||
g_old_purgeable_zone.free(zone, ptr);
|
||||
}
|
||||
|
||||
void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
|
||||
void* ptr,
|
||||
size_t size) {
|
||||
void* result = g_old_purgeable_zone.realloc(zone, ptr, size);
|
||||
if (!result && size)
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
|
||||
size_t alignment,
|
||||
size_t size) {
|
||||
void* result = g_old_purgeable_zone.memalign(zone, alignment, size);
|
||||
// Only die if posix_memalign would have returned ENOMEM, since there are
|
||||
// other reasons why null might be returned. See posix_memalign() in 10.15's
|
||||
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
|
||||
if (!result && size && alignment >= sizeof(void*) &&
|
||||
base::bits::IsPowerOfTwo(alignment)) {
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif // !defined(ADDRESS_SANITIZER)
|
||||
|
||||
#if !defined(ADDRESS_SANITIZER)
|
||||
|
||||
// === Core Foundation CFAllocators ===
|
||||
|
||||
bool CanGetContextForCFAllocator() {
|
||||
#if BUILDFLAG(IS_IOS)
|
||||
return !base::ios::IsRunningOnOrLater(16, 0, 0);
|
||||
#else
|
||||
return !base::mac::IsOSLaterThan12_DontCallThis();
|
||||
#endif
|
||||
}
|
||||
|
||||
CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
|
||||
ChromeCFAllocatorLions* our_allocator = const_cast<ChromeCFAllocatorLions*>(
|
||||
reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
|
||||
return &our_allocator->_context;
|
||||
}
|
||||
|
||||
CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
|
||||
CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
|
||||
CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
|
||||
|
||||
void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
|
||||
CFOptionFlags hint,
|
||||
void* info) {
|
||||
void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
|
||||
if (!result)
|
||||
TerminateBecauseOutOfMemory(alloc_size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
|
||||
CFOptionFlags hint,
|
||||
void* info) {
|
||||
void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
|
||||
if (!result)
|
||||
TerminateBecauseOutOfMemory(alloc_size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
|
||||
CFOptionFlags hint,
|
||||
void* info) {
|
||||
void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
|
||||
if (!result)
|
||||
TerminateBecauseOutOfMemory(alloc_size);
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif // !defined(ADDRESS_SANITIZER)
|
||||
|
||||
// === Cocoa NSObject allocation ===
|
||||
|
||||
typedef id (*allocWithZone_t)(id, SEL, NSZone*);
|
||||
allocWithZone_t g_old_allocWithZone;
|
||||
|
||||
id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone) {
|
||||
id result = g_old_allocWithZone(self, _cmd, zone);
|
||||
if (!result)
|
||||
TerminateBecauseOutOfMemory(0);
|
||||
return result;
|
||||
}
|
||||
|
||||
void UninterceptMallocZoneForTesting(struct _malloc_zone_t* zone) {
|
||||
ChromeMallocZone* chrome_zone = reinterpret_cast<ChromeMallocZone*>(zone);
|
||||
if (!IsMallocZoneAlreadyStored(chrome_zone))
|
||||
return;
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(zone);
|
||||
ReplaceZoneFunctions(chrome_zone, &functions);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
bool UncheckedMallocMac(size_t size, void** result) {
|
||||
#if defined(ADDRESS_SANITIZER)
|
||||
*result = malloc(size);
|
||||
#else
|
||||
if (g_old_zone.malloc) {
|
||||
*result = g_old_zone.malloc(malloc_default_zone(), size);
|
||||
} else {
|
||||
*result = malloc(size);
|
||||
}
|
||||
#endif // defined(ADDRESS_SANITIZER)
|
||||
|
||||
return *result != NULL;
|
||||
}
|
||||
|
||||
bool UncheckedCallocMac(size_t num_items, size_t size, void** result) {
|
||||
#if defined(ADDRESS_SANITIZER)
|
||||
*result = calloc(num_items, size);
|
||||
#else
|
||||
if (g_old_zone.calloc) {
|
||||
*result = g_old_zone.calloc(malloc_default_zone(), num_items, size);
|
||||
} else {
|
||||
*result = calloc(num_items, size);
|
||||
}
|
||||
#endif // defined(ADDRESS_SANITIZER)
|
||||
|
||||
return *result != NULL;
|
||||
}
|
||||
|
||||
void InitializeDefaultDispatchToMacAllocator() {
|
||||
StoreFunctionsForAllZones();
|
||||
}
|
||||
|
||||
void StoreFunctionsForDefaultZone() {
|
||||
ChromeMallocZone* default_zone = reinterpret_cast<ChromeMallocZone*>(
|
||||
malloc_default_zone());
|
||||
StoreMallocZone(default_zone);
|
||||
}
|
||||
|
||||
void StoreFunctionsForAllZones() {
|
||||
// This ensures that the default zone is always at the front of the array,
|
||||
// which is important for performance.
|
||||
StoreFunctionsForDefaultZone();
|
||||
|
||||
vm_address_t* zones;
|
||||
unsigned int count;
|
||||
kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
|
||||
if (kr != KERN_SUCCESS)
|
||||
return;
|
||||
for (unsigned int i = 0; i < count; ++i) {
|
||||
ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
|
||||
StoreMallocZone(zone);
|
||||
}
|
||||
}
|
||||
|
||||
void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions) {
|
||||
// The default zone does not get returned in malloc_get_all_zones().
|
||||
ChromeMallocZone* default_zone =
|
||||
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
||||
if (DoesMallocZoneNeedReplacing(default_zone, functions)) {
|
||||
ReplaceZoneFunctions(default_zone, functions);
|
||||
}
|
||||
|
||||
vm_address_t* zones;
|
||||
unsigned int count;
|
||||
kern_return_t kr =
|
||||
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &count);
|
||||
if (kr != KERN_SUCCESS)
|
||||
return;
|
||||
for (unsigned int i = 0; i < count; ++i) {
|
||||
ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
|
||||
if (DoesMallocZoneNeedReplacing(zone, functions)) {
|
||||
ReplaceZoneFunctions(zone, functions);
|
||||
}
|
||||
}
|
||||
g_replaced_default_zone = true;
|
||||
}
|
||||
|
||||
void InterceptAllocationsMac() {
|
||||
if (g_oom_killer_enabled)
|
||||
return;
|
||||
|
||||
g_oom_killer_enabled = true;
|
||||
|
||||
// === C malloc/calloc/valloc/realloc/posix_memalign ===
|
||||
|
||||
// This approach is not perfect, as requests for amounts of memory larger than
|
||||
// MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will still
|
||||
// fail with a NULL rather than dying (see malloc_zone_malloc() in
|
||||
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c for
|
||||
// details). Unfortunately, it's the best we can do. Also note that this does
|
||||
// not affect allocations from non-default zones.
|
||||
|
||||
#if !defined(ADDRESS_SANITIZER)
|
||||
// Don't do anything special on OOM for the malloc zones replaced by
|
||||
// AddressSanitizer, as modifying or protecting them may not work correctly.
|
||||
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
// The malloc zone backed by PartitionAlloc crashes by default, so there is
|
||||
// no need to install the OOM killer.
|
||||
ChromeMallocZone* default_zone =
|
||||
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
||||
if (!IsMallocZoneAlreadyStored(default_zone)) {
|
||||
StoreZoneFunctions(default_zone, &g_old_zone);
|
||||
MallocZoneFunctions new_functions = {};
|
||||
new_functions.malloc = oom_killer_malloc;
|
||||
new_functions.calloc = oom_killer_calloc;
|
||||
new_functions.valloc = oom_killer_valloc;
|
||||
new_functions.free = oom_killer_free;
|
||||
new_functions.realloc = oom_killer_realloc;
|
||||
new_functions.memalign = oom_killer_memalign;
|
||||
|
||||
ReplaceZoneFunctions(default_zone, &new_functions);
|
||||
g_replaced_default_zone = true;
|
||||
}
|
||||
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
ChromeMallocZone* purgeable_zone =
|
||||
reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
|
||||
if (purgeable_zone && !IsMallocZoneAlreadyStored(purgeable_zone)) {
|
||||
StoreZoneFunctions(purgeable_zone, &g_old_purgeable_zone);
|
||||
MallocZoneFunctions new_functions = {};
|
||||
new_functions.malloc = oom_killer_malloc_purgeable;
|
||||
new_functions.calloc = oom_killer_calloc_purgeable;
|
||||
new_functions.valloc = oom_killer_valloc_purgeable;
|
||||
new_functions.free = oom_killer_free_purgeable;
|
||||
new_functions.realloc = oom_killer_realloc_purgeable;
|
||||
new_functions.memalign = oom_killer_memalign_purgeable;
|
||||
ReplaceZoneFunctions(purgeable_zone, &new_functions);
|
||||
}
|
||||
#endif
|
||||
|
||||
// === C malloc_zone_batch_malloc ===
|
||||
|
||||
// batch_malloc is omitted because the default malloc zone's implementation
|
||||
// only supports batch_malloc for "tiny" allocations from the free list. It
|
||||
// will fail for allocations larger than "tiny", and will only allocate as
|
||||
// many blocks as it's able to from the free list. These factors mean that it
|
||||
// can return less than the requested memory even in a non-out-of-memory
|
||||
// situation. There's no good way to detect whether a batch_malloc failure is
|
||||
// due to these other factors, or due to genuine memory or address space
|
||||
// exhaustion. The fact that it only allocates space from the "tiny" free list
|
||||
// means that it's likely that a failure will not be due to memory exhaustion.
|
||||
// Similarly, these constraints on batch_malloc mean that callers must always
|
||||
// be expecting to receive less memory than was requested, even in situations
|
||||
// where memory pressure is not a concern. Finally, the only public interface
|
||||
// to batch_malloc is malloc_zone_batch_malloc, which is specific to the
|
||||
// system's malloc implementation. It's unlikely that anyone's even heard of
|
||||
// it.
|
||||
|
||||
#ifndef ADDRESS_SANITIZER
|
||||
// === Core Foundation CFAllocators ===
|
||||
|
||||
// This will not catch allocation done by custom allocators, but will catch
|
||||
// all allocation done by system-provided ones.
|
||||
|
||||
CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
|
||||
!g_old_cfallocator_malloc_zone)
|
||||
<< "Old allocators unexpectedly non-null";
|
||||
|
||||
bool cf_allocator_internals_known = CanGetContextForCFAllocator();
|
||||
|
||||
if (cf_allocator_internals_known) {
|
||||
CFAllocatorContext* context =
|
||||
ContextForCFAllocator(kCFAllocatorSystemDefault);
|
||||
CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
|
||||
g_old_cfallocator_system_default = context->allocate;
|
||||
CHECK(g_old_cfallocator_system_default)
|
||||
<< "Failed to get kCFAllocatorSystemDefault allocation function.";
|
||||
context->allocate = oom_killer_cfallocator_system_default;
|
||||
|
||||
context = ContextForCFAllocator(kCFAllocatorMalloc);
|
||||
CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
|
||||
g_old_cfallocator_malloc = context->allocate;
|
||||
CHECK(g_old_cfallocator_malloc)
|
||||
<< "Failed to get kCFAllocatorMalloc allocation function.";
|
||||
context->allocate = oom_killer_cfallocator_malloc;
|
||||
|
||||
context = ContextForCFAllocator(kCFAllocatorMallocZone);
|
||||
CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
|
||||
g_old_cfallocator_malloc_zone = context->allocate;
|
||||
CHECK(g_old_cfallocator_malloc_zone)
|
||||
<< "Failed to get kCFAllocatorMallocZone allocation function.";
|
||||
context->allocate = oom_killer_cfallocator_malloc_zone;
|
||||
} else {
|
||||
DLOG(WARNING) << "Internals of CFAllocator not known; out-of-memory "
|
||||
"failures via CFAllocator will not result in termination. "
|
||||
"http://crbug.com/45650";
|
||||
}
|
||||
#endif
|
||||
|
||||
// === Cocoa NSObject allocation ===
|
||||
|
||||
// Note that both +[NSObject new] and +[NSObject alloc] call through to
|
||||
// +[NSObject allocWithZone:].
|
||||
|
||||
CHECK(!g_old_allocWithZone) << "Old allocator unexpectedly non-null";
|
||||
|
||||
Class nsobject_class = [NSObject class];
|
||||
Method orig_method =
|
||||
class_getClassMethod(nsobject_class, @selector(allocWithZone:));
|
||||
g_old_allocWithZone =
|
||||
reinterpret_cast<allocWithZone_t>(method_getImplementation(orig_method));
|
||||
CHECK(g_old_allocWithZone)
|
||||
<< "Failed to get allocWithZone allocation function.";
|
||||
method_setImplementation(orig_method,
|
||||
reinterpret_cast<IMP>(oom_killer_allocWithZone));
|
||||
}
|
||||
|
||||
void UninterceptMallocZonesForTesting() {
|
||||
UninterceptMallocZoneForTesting(malloc_default_zone());
|
||||
vm_address_t* zones;
|
||||
unsigned int count;
|
||||
kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
|
||||
CHECK(kr == KERN_SUCCESS);
|
||||
for (unsigned int i = 0; i < count; ++i) {
|
||||
UninterceptMallocZoneForTesting(
|
||||
reinterpret_cast<struct _malloc_zone_t*>(zones[i]));
|
||||
}
|
||||
|
||||
ClearAllMallocZonesForTesting();
|
||||
}
|
||||
|
||||
bool AreMallocZonesIntercepted() {
|
||||
return !g_allocator_shims_failed_to_install;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
void ShimNewMallocZonesAndReschedule(base::Time end_time,
|
||||
base::TimeDelta delay) {
|
||||
ShimNewMallocZones();
|
||||
|
||||
if (base::Time::Now() > end_time)
|
||||
return;
|
||||
|
||||
base::TimeDelta next_delay = delay * 2;
|
||||
SequencedTaskRunnerHandle::Get()->PostDelayedTask(
|
||||
FROM_HERE,
|
||||
base::BindOnce(&ShimNewMallocZonesAndReschedule, end_time, next_delay),
|
||||
delay);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void PeriodicallyShimNewMallocZones() {
|
||||
base::Time end_time = base::Time::Now() + base::Minutes(1);
|
||||
base::TimeDelta initial_delay = base::Seconds(1);
|
||||
ShimNewMallocZonesAndReschedule(end_time, initial_delay);
|
||||
}
|
||||
|
||||
void ShimNewMallocZones() {
|
||||
StoreFunctionsForAllZones();
|
||||
|
||||
// Use the functions for the default zone as a template to replace those
|
||||
// new zones.
|
||||
ChromeMallocZone* default_zone =
|
||||
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
||||
DCHECK(IsMallocZoneAlreadyStored(default_zone));
|
||||
|
||||
MallocZoneFunctions new_functions;
|
||||
StoreZoneFunctions(default_zone, &new_functions);
|
||||
ReplaceFunctionsForStoredZones(&new_functions);
|
||||
}
|
||||
|
||||
void ReplaceZoneFunctions(ChromeMallocZone* zone,
|
||||
const MallocZoneFunctions* functions) {
|
||||
// Remove protection.
|
||||
vm_address_t reprotection_start = 0;
|
||||
vm_size_t reprotection_length = 0;
|
||||
vm_prot_t reprotection_value = VM_PROT_NONE;
|
||||
bool success = DeprotectMallocZone(zone, &reprotection_start,
|
||||
&reprotection_length, &reprotection_value);
|
||||
if (!success) {
|
||||
g_allocator_shims_failed_to_install = true;
|
||||
return;
|
||||
}
|
||||
|
||||
CHECK(functions->malloc && functions->calloc && functions->valloc &&
|
||||
functions->free && functions->realloc);
|
||||
zone->malloc = functions->malloc;
|
||||
zone->calloc = functions->calloc;
|
||||
zone->valloc = functions->valloc;
|
||||
zone->free = functions->free;
|
||||
zone->realloc = functions->realloc;
|
||||
if (functions->batch_malloc)
|
||||
zone->batch_malloc = functions->batch_malloc;
|
||||
if (functions->batch_free)
|
||||
zone->batch_free = functions->batch_free;
|
||||
if (functions->size)
|
||||
zone->size = functions->size;
|
||||
if (zone->version >= 5 && functions->memalign) {
|
||||
zone->memalign = functions->memalign;
|
||||
}
|
||||
if (zone->version >= 6 && functions->free_definite_size) {
|
||||
zone->free_definite_size = functions->free_definite_size;
|
||||
}
|
||||
|
||||
// Restore protection if it was active.
|
||||
if (reprotection_start) {
|
||||
kern_return_t result =
|
||||
vm_protect(mach_task_self(), reprotection_start, reprotection_length,
|
||||
false, reprotection_value);
|
||||
MACH_DCHECK(result == KERN_SUCCESS, result) << "vm_protect";
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
|
@ -1,418 +0,0 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/allocator_shim.h"
|
||||
|
||||
#include <errno.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <new>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/check_op.h"
|
||||
#include "base/memory/page_size.h"
|
||||
#include "base/threading/platform_thread.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if !BUILDFLAG(IS_WIN)
|
||||
#include <unistd.h>
|
||||
#else
|
||||
#include "base/allocator/winheap_stubs_win.h"
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
#include <malloc/malloc.h>
|
||||
|
||||
#include "base/allocator/allocator_interception_mac.h"
|
||||
#include "base/mac/mach_logging.h"
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
|
||||
#endif
|
||||
|
||||
// No calls to malloc / new in this file. They would would cause re-entrancy of
|
||||
// the shim, which is hard to deal with. Keep this code as simple as possible
|
||||
// and don't use any external C++ object here, not even //base ones. Even if
|
||||
// they are safe to use today, in future they might be refactored.
|
||||
|
||||
namespace {
|
||||
|
||||
std::atomic<const base::allocator::AllocatorDispatch*> g_chain_head{
|
||||
&base::allocator::AllocatorDispatch::default_dispatch};
|
||||
|
||||
bool g_call_new_handler_on_malloc_failure = false;
|
||||
|
||||
ALWAYS_INLINE size_t GetCachedPageSize() {
|
||||
static size_t pagesize = 0;
|
||||
if (!pagesize)
|
||||
pagesize = base::GetPageSize();
|
||||
return pagesize;
|
||||
}
|
||||
|
||||
// Calls the std::new handler thread-safely. Returns true if a new_handler was
|
||||
// set and called, false if no new_handler was set.
|
||||
bool CallNewHandler(size_t size) {
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
return base::allocator::WinCallNewHandler(size);
|
||||
#else
|
||||
std::new_handler nh = std::get_new_handler();
|
||||
if (!nh)
|
||||
return false;
|
||||
(*nh)();
|
||||
// Assume the new_handler will abort if it fails. Exception are disabled and
|
||||
// we don't support the case of a new_handler throwing std::bad_balloc.
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
ALWAYS_INLINE const base::allocator::AllocatorDispatch* GetChainHead() {
|
||||
return g_chain_head.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
void SetCallNewHandlerOnMallocFailure(bool value) {
|
||||
g_call_new_handler_on_malloc_failure = value;
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
base::internal::PartitionAllocSetCallNewHandlerOnMallocFailure(value);
|
||||
#endif
|
||||
}
|
||||
|
||||
void* UncheckedAlloc(size_t size) {
|
||||
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->alloc_unchecked_function(chain_head, size, nullptr);
|
||||
}
|
||||
|
||||
void UncheckedFree(void* ptr) {
|
||||
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->free_function(chain_head, ptr, nullptr);
|
||||
}
|
||||
|
||||
void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
|
||||
// Loop in case of (an unlikely) race on setting the list head.
|
||||
size_t kMaxRetries = 7;
|
||||
for (size_t i = 0; i < kMaxRetries; ++i) {
|
||||
const AllocatorDispatch* chain_head = GetChainHead();
|
||||
dispatch->next = chain_head;
|
||||
|
||||
// This function guarantees to be thread-safe w.r.t. concurrent
|
||||
// insertions. It also has to guarantee that all the threads always
|
||||
// see a consistent chain, hence the atomic_thread_fence() below.
|
||||
// InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
|
||||
// we don't really want this to be a release-store with a corresponding
|
||||
// acquire-load during malloc().
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
// Set the chain head to the new dispatch atomically. If we lose the race,
|
||||
// retry.
|
||||
if (g_chain_head.compare_exchange_strong(chain_head, dispatch,
|
||||
std::memory_order_relaxed,
|
||||
std::memory_order_relaxed)) {
|
||||
// Success.
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
CHECK(false); // Too many retries, this shouldn't happen.
|
||||
}
|
||||
|
||||
void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
|
||||
DCHECK_EQ(GetChainHead(), dispatch);
|
||||
g_chain_head.store(dispatch->next, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
// The Shim* functions below are the entry-points into the shim-layer and
|
||||
// are supposed to be invoked by the allocator_shim_override_*
|
||||
// headers to route the malloc / new symbols through the shim layer.
|
||||
// They are defined as ALWAYS_INLINE in order to remove a level of indirection
|
||||
// between the system-defined entry points and the shim implementations.
|
||||
extern "C" {
|
||||
|
||||
// The general pattern for allocations is:
|
||||
// - Try to allocate, if succeded return the pointer.
|
||||
// - If the allocation failed:
|
||||
// - Call the std::new_handler if it was a C++ allocation.
|
||||
// - Call the std::new_handler if it was a malloc() (or calloc() or similar)
|
||||
// AND SetCallNewHandlerOnMallocFailure(true).
|
||||
// - If the std::new_handler is NOT set just return nullptr.
|
||||
// - If the std::new_handler is set:
|
||||
// - Assume it will abort() if it fails (very likely the new_handler will
|
||||
// just suicide printing a message).
|
||||
// - Assume it did succeed if it returns, in which case reattempt the alloc.
|
||||
|
||||
ALWAYS_INLINE void* ShimCppNew(size_t size) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr;
|
||||
do {
|
||||
void* context = nullptr;
|
||||
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
context = malloc_default_zone();
|
||||
#endif
|
||||
ptr = chain_head->alloc_function(chain_head, size, context);
|
||||
} while (!ptr && CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimCppNewNoThrow(size_t size) {
|
||||
void* context = nullptr;
|
||||
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
context = malloc_default_zone();
|
||||
#endif
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->alloc_unchecked_function(chain_head, size, context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimCppAlignedNew(size_t size, size_t alignment) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr;
|
||||
do {
|
||||
void* context = nullptr;
|
||||
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
context = malloc_default_zone();
|
||||
#endif
|
||||
ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
|
||||
context);
|
||||
} while (!ptr && CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void ShimCppDelete(void* address) {
|
||||
void* context = nullptr;
|
||||
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
context = malloc_default_zone();
|
||||
#endif
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->free_function(chain_head, address, context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr;
|
||||
do {
|
||||
ptr = chain_head->alloc_function(chain_head, size, context);
|
||||
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||
CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr;
|
||||
do {
|
||||
ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
|
||||
context);
|
||||
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||
CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
|
||||
// realloc(size == 0) means free() and might return a nullptr. We should
|
||||
// not call the std::new_handler in that case, though.
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr;
|
||||
do {
|
||||
ptr = chain_head->realloc_function(chain_head, address, size, context);
|
||||
} while (!ptr && size && g_call_new_handler_on_malloc_failure &&
|
||||
CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimMemalign(size_t alignment, size_t size, void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr;
|
||||
do {
|
||||
ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
|
||||
context);
|
||||
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||
CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
|
||||
// posix_memalign is supposed to check the arguments. See tc_posix_memalign()
|
||||
// in tc_malloc.cc.
|
||||
if (((alignment % sizeof(void*)) != 0) ||
|
||||
!base::bits::IsPowerOfTwo(alignment)) {
|
||||
return EINVAL;
|
||||
}
|
||||
void* ptr = ShimMemalign(alignment, size, nullptr);
|
||||
*res = ptr;
|
||||
return ptr ? 0 : ENOMEM;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
|
||||
return ShimMemalign(GetCachedPageSize(), size, context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimPvalloc(size_t size) {
|
||||
// pvalloc(0) should allocate one page, according to its man page.
|
||||
if (size == 0) {
|
||||
size = GetCachedPageSize();
|
||||
} else {
|
||||
size = base::bits::AlignUp(size, GetCachedPageSize());
|
||||
}
|
||||
// The third argument is nullptr because pvalloc is glibc only and does not
|
||||
// exist on OSX/BSD systems.
|
||||
return ShimMemalign(GetCachedPageSize(), size, nullptr);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void ShimFree(void* address, void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->free_function(chain_head, address, context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address, void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->get_size_estimate_function(
|
||||
chain_head, const_cast<void*>(address), context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
|
||||
void** results,
|
||||
unsigned num_requested,
|
||||
void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->batch_malloc_function(chain_head, size, results,
|
||||
num_requested, context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
|
||||
unsigned num_to_be_freed,
|
||||
void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->batch_free_function(chain_head, to_be_freed,
|
||||
num_to_be_freed, context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->free_definite_size_function(chain_head, ptr, size,
|
||||
context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr = nullptr;
|
||||
do {
|
||||
ptr = chain_head->aligned_malloc_function(chain_head, size, alignment,
|
||||
context);
|
||||
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||
CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimAlignedRealloc(void* address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
// _aligned_realloc(size == 0) means _aligned_free() and might return a
|
||||
// nullptr. We should not call the std::new_handler in that case, though.
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr = nullptr;
|
||||
do {
|
||||
ptr = chain_head->aligned_realloc_function(chain_head, address, size,
|
||||
alignment, context);
|
||||
} while (!ptr && size && g_call_new_handler_on_malloc_failure &&
|
||||
CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->aligned_free_function(chain_head, address, context);
|
||||
}
|
||||
|
||||
} // extern "C"
|
||||
|
||||
#if !BUILDFLAG(IS_WIN) && \
|
||||
!(BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC))
|
||||
// Cpp symbols (new / delete) should always be routed through the shim layer
|
||||
// except on Windows and macOS (except for PartitionAlloc-Everywhere) where the
|
||||
// malloc intercept is deep enough that it also catches the cpp calls.
|
||||
//
|
||||
// In case of PartitionAlloc-Everywhere on macOS, malloc backed by
|
||||
// base::internal::PartitionMalloc crashes on OOM, and we need to avoid crashes
|
||||
// in case of operator new() noexcept. Thus, operator new() noexcept needs to
|
||||
// be routed to base::internal::PartitionMallocUnchecked through the shim layer.
|
||||
#include "base/allocator/allocator_shim_override_cpp_symbols.h"
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(IS_ANDROID)
|
||||
// Android does not support symbol interposition. The way malloc symbols are
|
||||
// intercepted on Android is by using link-time -wrap flags.
|
||||
#include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
|
||||
#elif BUILDFLAG(IS_WIN)
|
||||
// On Windows we use plain link-time overriding of the CRT symbols.
|
||||
#include "base/allocator/allocator_shim_override_ucrt_symbols_win.h"
|
||||
#elif BUILDFLAG(IS_APPLE)
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
#include "base/allocator/allocator_shim_override_mac_default_zone.h"
|
||||
#else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
#include "base/allocator/allocator_shim_override_mac_symbols.h"
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
#else
|
||||
#include "base/allocator/allocator_shim_override_libc_symbols.h"
|
||||
#endif
|
||||
|
||||
// Some glibc versions (until commit 6c444ad6e953dbdf9c7be065308a0a777)
|
||||
// incorrectly call __libc_memalign() to allocate memory (see elf/dl-tls.c in
|
||||
// glibc 2.23 for instance), and free() to free it. This causes issues for us,
|
||||
// as we are then asked to free memory we didn't allocate.
|
||||
//
|
||||
// This only happened in glibc to allocate TLS storage metadata, and there are
|
||||
// no other callers of __libc_memalign() there as of September 2020. To work
|
||||
// around this issue, intercept this internal libc symbol to make sure that both
|
||||
// the allocation and the free() are caught by the shim.
|
||||
//
|
||||
// This seems fragile, and is, but there is ample precedent for it, making it
|
||||
// quite likely to keep working in the future. For instance, LLVM for LSAN uses
|
||||
// this mechanism.
|
||||
|
||||
#if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
#include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
void InitializeAllocatorShim() {
|
||||
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
// Prepares the default dispatch. After the intercepted malloc calls have
|
||||
// traversed the shim this will route them to the default malloc zone.
|
||||
InitializeDefaultDispatchToMacAllocator();
|
||||
|
||||
MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
|
||||
|
||||
// This replaces the default malloc zone, causing calls to malloc & friends
|
||||
// from the codebase to be routed to ShimMalloc() above.
|
||||
base::allocator::ReplaceFunctionsForStoredZones(&functions);
|
||||
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
#endif
|
||||
|
||||
// Cross-checks.
|
||||
|
||||
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||
#error The allocator shim should not be compiled when building for memory tools.
|
||||
#endif
|
||||
|
||||
#if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
|
||||
(defined(_MSC_VER) && defined(_CPPUNWIND))
|
||||
#error This code cannot be used when exceptions are turned on.
|
||||
#endif
|
|
@ -1,200 +0,0 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/types/strong_alias.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(PA_ALLOW_PCSCAN)
|
||||
#include "base/allocator/partition_allocator/starscan/pcscan.h"
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
// Allocator Shim API. Allows to:
|
||||
// - Configure the behavior of the allocator (what to do on OOM failures).
|
||||
// - Install new hooks (AllocatorDispatch) in the allocator chain.
|
||||
|
||||
// When this shim layer is enabled, the route of an allocation is as-follows:
|
||||
//
|
||||
// [allocator_shim_override_*.h] Intercept malloc() / operator new calls:
|
||||
// The override_* headers define the symbols required to intercept calls to
|
||||
// malloc() and operator new (if not overridden by specific C++ classes).
|
||||
//
|
||||
// [allocator_shim.cc] Routing allocation calls to the shim:
|
||||
// The headers above route the calls to the internal ShimMalloc(), ShimFree(),
|
||||
// ShimCppNew() etc. methods defined in allocator_shim.cc.
|
||||
// These methods will: (1) forward the allocation call to the front of the
|
||||
// AllocatorDispatch chain. (2) perform security hardenings (e.g., might
|
||||
// call std::new_handler on OOM failure).
|
||||
//
|
||||
// [allocator_shim_default_dispatch_to_*.cc] The AllocatorDispatch chain:
|
||||
// It is a singly linked list where each element is a struct with function
|
||||
// pointers (|malloc_function|, |free_function|, etc). Normally the chain
|
||||
// consists of a single AllocatorDispatch element, herein called
|
||||
// the "default dispatch", which is statically defined at build time and
|
||||
// ultimately routes the calls to the actual allocator defined by the build
|
||||
// config (glibc, ...).
|
||||
//
|
||||
// It is possible to dynamically insert further AllocatorDispatch stages
|
||||
// to the front of the chain, for debugging / profiling purposes.
|
||||
//
|
||||
// All the functions must be thread safe. The shim does not enforce any
|
||||
// serialization. This is to route to thread-aware allocators without
|
||||
// introducing unnecessary perf hits.
|
||||
|
||||
struct AllocatorDispatch {
|
||||
using AllocFn = void*(const AllocatorDispatch* self,
|
||||
size_t size,
|
||||
void* context);
|
||||
using AllocUncheckedFn = void*(const AllocatorDispatch* self,
|
||||
size_t size,
|
||||
void* context);
|
||||
using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
|
||||
size_t n,
|
||||
size_t size,
|
||||
void* context);
|
||||
using AllocAlignedFn = void*(const AllocatorDispatch* self,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context);
|
||||
using ReallocFn = void*(const AllocatorDispatch* self,
|
||||
void* address,
|
||||
size_t size,
|
||||
void* context);
|
||||
using FreeFn = void(const AllocatorDispatch* self,
|
||||
void* address,
|
||||
void* context);
|
||||
// Returns the allocated size of user data (not including heap overhead).
|
||||
// Can be larger than the requested size.
|
||||
using GetSizeEstimateFn = size_t(const AllocatorDispatch* self,
|
||||
void* address,
|
||||
void* context);
|
||||
using BatchMallocFn = unsigned(const AllocatorDispatch* self,
|
||||
size_t size,
|
||||
void** results,
|
||||
unsigned num_requested,
|
||||
void* context);
|
||||
using BatchFreeFn = void(const AllocatorDispatch* self,
|
||||
void** to_be_freed,
|
||||
unsigned num_to_be_freed,
|
||||
void* context);
|
||||
using FreeDefiniteSizeFn = void(const AllocatorDispatch* self,
|
||||
void* ptr,
|
||||
size_t size,
|
||||
void* context);
|
||||
using AlignedMallocFn = void*(const AllocatorDispatch* self,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context);
|
||||
using AlignedReallocFn = void*(const AllocatorDispatch* self,
|
||||
void* address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context);
|
||||
using AlignedFreeFn = void(const AllocatorDispatch* self,
|
||||
void* address,
|
||||
void* context);
|
||||
|
||||
AllocFn* const alloc_function;
|
||||
AllocUncheckedFn* const alloc_unchecked_function;
|
||||
AllocZeroInitializedFn* const alloc_zero_initialized_function;
|
||||
AllocAlignedFn* const alloc_aligned_function;
|
||||
ReallocFn* const realloc_function;
|
||||
FreeFn* const free_function;
|
||||
GetSizeEstimateFn* const get_size_estimate_function;
|
||||
// batch_malloc, batch_free, and free_definite_size are specific to the OSX
|
||||
// and iOS allocators.
|
||||
BatchMallocFn* const batch_malloc_function;
|
||||
BatchFreeFn* const batch_free_function;
|
||||
FreeDefiniteSizeFn* const free_definite_size_function;
|
||||
// _aligned_malloc, _aligned_realloc, and _aligned_free are specific to the
|
||||
// Windows allocator.
|
||||
AlignedMallocFn* const aligned_malloc_function;
|
||||
AlignedReallocFn* const aligned_realloc_function;
|
||||
AlignedFreeFn* const aligned_free_function;
|
||||
|
||||
const AllocatorDispatch* next;
|
||||
|
||||
// |default_dispatch| is statically defined by one (and only one) of the
|
||||
// allocator_shim_default_dispatch_to_*.cc files, depending on the build
|
||||
// configuration.
|
||||
static const AllocatorDispatch default_dispatch;
|
||||
};
|
||||
|
||||
// When true makes malloc behave like new, w.r.t calling the new_handler if
|
||||
// the allocation fails (see set_new_mode() in Windows).
|
||||
BASE_EXPORT void SetCallNewHandlerOnMallocFailure(bool value);
|
||||
|
||||
// Allocates |size| bytes or returns nullptr. It does NOT call the new_handler,
|
||||
// regardless of SetCallNewHandlerOnMallocFailure().
|
||||
BASE_EXPORT void* UncheckedAlloc(size_t size);
|
||||
|
||||
// Frees memory allocated with UncheckedAlloc().
|
||||
BASE_EXPORT void UncheckedFree(void* ptr);
|
||||
|
||||
// Inserts |dispatch| in front of the allocator chain. This method is
|
||||
// thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
|
||||
// The callers have responsibility for inserting a single dispatch no more
|
||||
// than once.
|
||||
BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
|
||||
|
||||
// Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
|
||||
// removal of arbitrary elements from a singly linked list would require a lock
|
||||
// in malloc(), which we really don't want.
|
||||
BASE_EXPORT void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_WIN)
|
||||
// Configures the allocator for the caller's allocation domain. Allocations that
|
||||
// take place prior to this configuration step will succeed, but will not
|
||||
// benefit from its one-time mitigations. As such, this function must be called
|
||||
// as early as possible during startup.
|
||||
BASE_EXPORT void ConfigurePartitionAlloc();
|
||||
#endif // BUILDFLAG(IS_WIN)
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
void InitializeDefaultAllocatorPartitionRoot();
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
// On macOS, the allocator shim needs to be turned on during runtime.
|
||||
BASE_EXPORT void InitializeAllocatorShim();
|
||||
#endif // BUILDFLAG(IS_APPLE)
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
BASE_EXPORT void EnablePartitionAllocMemoryReclaimer();
|
||||
|
||||
using EnableBrp = base::StrongAlias<class EnableBrpTag, bool>;
|
||||
using SplitMainPartition = base::StrongAlias<class SplitMainPartitionTag, bool>;
|
||||
using UseDedicatedAlignedPartition =
|
||||
base::StrongAlias<class UseDedicatedAlignedPartitionTag, bool>;
|
||||
using AlternateBucketDistribution =
|
||||
base::StrongAlias<class AlternateBucketDistributionTag, bool>;
|
||||
|
||||
// If |thread_cache_on_non_quarantinable_partition| is specified, the
|
||||
// thread-cache will be enabled on the non-quarantinable partition. The
|
||||
// thread-cache on the main (malloc) partition will be disabled.
|
||||
BASE_EXPORT void ConfigurePartitions(
|
||||
EnableBrp enable_brp,
|
||||
SplitMainPartition split_main_partition,
|
||||
UseDedicatedAlignedPartition use_dedicated_aligned_partition,
|
||||
AlternateBucketDistribution use_alternate_bucket_distribution);
|
||||
|
||||
#if defined(PA_ALLOW_PCSCAN)
|
||||
BASE_EXPORT void EnablePCScan(base::internal::PCScan::InitConfig);
|
||||
#endif
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
|
|
@ -1,122 +0,0 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include "base/allocator/allocator_shim.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/numerics/checked_math.h"
|
||||
#include "base/process/memory.h"
|
||||
|
||||
#include <dlfcn.h>
|
||||
#include <malloc.h>
|
||||
|
||||
// This translation unit defines a default dispatch for the allocator shim which
|
||||
// routes allocations to libc functions.
|
||||
// The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
|
||||
|
||||
extern "C" {
|
||||
void* __libc_malloc(size_t size);
|
||||
void* __libc_calloc(size_t n, size_t size);
|
||||
void* __libc_realloc(void* address, size_t size);
|
||||
void* __libc_memalign(size_t alignment, size_t size);
|
||||
void __libc_free(void* ptr);
|
||||
} // extern "C"
|
||||
|
||||
namespace {
|
||||
|
||||
using base::allocator::AllocatorDispatch;
|
||||
|
||||
// Strictly speaking, it would make more sense to not subtract amything, but
|
||||
// other shims limit to something lower than INT_MAX (which is 0x7FFFFFFF on
|
||||
// most platforms), and tests expect that.
|
||||
constexpr size_t kMaxAllowedSize = std::numeric_limits<int>::max() - (1 << 12);
|
||||
|
||||
void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
|
||||
// Cannot force glibc's malloc() to crash when a large size is requested, do
|
||||
// it in the shim instead.
|
||||
if (UNLIKELY(size >= kMaxAllowedSize))
|
||||
base::TerminateBecauseOutOfMemory(size);
|
||||
|
||||
return __libc_malloc(size);
|
||||
}
|
||||
|
||||
void* GlibcUncheckedMalloc(const AllocatorDispatch*,
|
||||
size_t size,
|
||||
void* context) {
|
||||
if (UNLIKELY(size >= kMaxAllowedSize))
|
||||
return nullptr;
|
||||
|
||||
return __libc_malloc(size);
|
||||
}
|
||||
|
||||
void* GlibcCalloc(const AllocatorDispatch*,
|
||||
size_t n,
|
||||
size_t size,
|
||||
void* context) {
|
||||
const auto total = base::CheckMul(n, size);
|
||||
if (UNLIKELY(!total.IsValid() || total.ValueOrDie() >= kMaxAllowedSize))
|
||||
base::TerminateBecauseOutOfMemory(size * n);
|
||||
|
||||
return __libc_calloc(n, size);
|
||||
}
|
||||
|
||||
void* GlibcRealloc(const AllocatorDispatch*,
|
||||
void* address,
|
||||
size_t size,
|
||||
void* context) {
|
||||
if (UNLIKELY(size >= kMaxAllowedSize))
|
||||
base::TerminateBecauseOutOfMemory(size);
|
||||
|
||||
return __libc_realloc(address, size);
|
||||
}
|
||||
|
||||
void* GlibcMemalign(const AllocatorDispatch*,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context) {
|
||||
if (UNLIKELY(size >= kMaxAllowedSize))
|
||||
base::TerminateBecauseOutOfMemory(size);
|
||||
|
||||
return __libc_memalign(alignment, size);
|
||||
}
|
||||
|
||||
void GlibcFree(const AllocatorDispatch*, void* address, void* context) {
|
||||
__libc_free(address);
|
||||
}
|
||||
|
||||
NO_SANITIZE("cfi-icall")
|
||||
size_t GlibcGetSizeEstimate(const AllocatorDispatch*,
|
||||
void* address,
|
||||
void* context) {
|
||||
// glibc does not expose an alias to resolve malloc_usable_size. Dynamically
|
||||
// resolve it instead. This should be safe because glibc (and hence dlfcn)
|
||||
// does not use malloc_size internally and so there should not be a risk of
|
||||
// recursion.
|
||||
using MallocUsableSizeFunction = decltype(malloc_usable_size)*;
|
||||
static MallocUsableSizeFunction fn_ptr =
|
||||
reinterpret_cast<MallocUsableSizeFunction>(
|
||||
dlsym(RTLD_NEXT, "malloc_usable_size"));
|
||||
|
||||
return fn_ptr(address);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||
&GlibcMalloc, /* alloc_function */
|
||||
&GlibcUncheckedMalloc, /* alloc_unchecked_function */
|
||||
&GlibcCalloc, /* alloc_zero_initialized_function */
|
||||
&GlibcMemalign, /* alloc_aligned_function */
|
||||
&GlibcRealloc, /* realloc_function */
|
||||
&GlibcFree, /* free_function */
|
||||
&GlibcGetSizeEstimate, /* get_size_estimate_function */
|
||||
nullptr, /* batch_malloc_function */
|
||||
nullptr, /* batch_free_function */
|
||||
nullptr, /* free_definite_size_function */
|
||||
nullptr, /* aligned_malloc_function */
|
||||
nullptr, /* aligned_realloc_function */
|
||||
nullptr, /* aligned_free_function */
|
||||
nullptr, /* next */
|
||||
};
|
|
@ -1,77 +0,0 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include <malloc.h>
|
||||
|
||||
#include "base/allocator/allocator_shim.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
// This translation unit defines a default dispatch for the allocator shim which
|
||||
// routes allocations to the original libc functions when using the link-time
|
||||
// -Wl,-wrap,malloc approach (see README.md).
|
||||
// The __real_X functions here are special symbols that the linker will relocate
|
||||
// against the real "X" undefined symbol, so that __real_malloc becomes the
|
||||
// equivalent of what an undefined malloc symbol reference would have been.
|
||||
// This is the counterpart of allocator_shim_override_linker_wrapped_symbols.h,
|
||||
// which routes the __wrap_X functions into the shim.
|
||||
|
||||
extern "C" {
|
||||
void* __real_malloc(size_t);
|
||||
void* __real_calloc(size_t, size_t);
|
||||
void* __real_realloc(void*, size_t);
|
||||
void* __real_memalign(size_t, size_t);
|
||||
void __real_free(void*);
|
||||
} // extern "C"
|
||||
|
||||
namespace {
|
||||
|
||||
using base::allocator::AllocatorDispatch;
|
||||
|
||||
void* RealMalloc(const AllocatorDispatch*, size_t size, void* context) {
|
||||
return __real_malloc(size);
|
||||
}
|
||||
|
||||
void* RealCalloc(const AllocatorDispatch*,
|
||||
size_t n,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return __real_calloc(n, size);
|
||||
}
|
||||
|
||||
void* RealRealloc(const AllocatorDispatch*,
|
||||
void* address,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return __real_realloc(address, size);
|
||||
}
|
||||
|
||||
void* RealMemalign(const AllocatorDispatch*,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return __real_memalign(alignment, size);
|
||||
}
|
||||
|
||||
void RealFree(const AllocatorDispatch*, void* address, void* context) {
|
||||
__real_free(address);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||
&RealMalloc, /* alloc_function */
|
||||
&RealMalloc, /* alloc_unchecked_function */
|
||||
&RealCalloc, /* alloc_zero_initialized_function */
|
||||
&RealMemalign, /* alloc_aligned_function */
|
||||
&RealRealloc, /* realloc_function */
|
||||
&RealFree, /* free_function */
|
||||
nullptr, /* get_size_estimate_function */
|
||||
nullptr, /* batch_malloc_function */
|
||||
nullptr, /* batch_free_function */
|
||||
nullptr, /* free_definite_size_function */
|
||||
nullptr, /* aligned_malloc_function */
|
||||
nullptr, /* aligned_realloc_function */
|
||||
nullptr, /* aligned_free_function */
|
||||
nullptr, /* next */
|
||||
};
|
|
@ -1,107 +0,0 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "base/allocator/allocator_interception_mac.h"
|
||||
#include "base/allocator/allocator_shim.h"
|
||||
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
namespace {
|
||||
|
||||
void* MallocImpl(const AllocatorDispatch*, size_t size, void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
return functions.malloc(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||
size);
|
||||
}
|
||||
|
||||
void* CallocImpl(const AllocatorDispatch*,
|
||||
size_t n,
|
||||
size_t size,
|
||||
void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
return functions.calloc(reinterpret_cast<struct _malloc_zone_t*>(context), n,
|
||||
size);
|
||||
}
|
||||
|
||||
void* MemalignImpl(const AllocatorDispatch*,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
return functions.memalign(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||
alignment, size);
|
||||
}
|
||||
|
||||
void* ReallocImpl(const AllocatorDispatch*,
|
||||
void* ptr,
|
||||
size_t size,
|
||||
void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
return functions.realloc(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||
ptr, size);
|
||||
}
|
||||
|
||||
void FreeImpl(const AllocatorDispatch*, void* ptr, void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
functions.free(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
|
||||
}
|
||||
|
||||
size_t GetSizeEstimateImpl(const AllocatorDispatch*, void* ptr, void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
return functions.size(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
|
||||
}
|
||||
|
||||
unsigned BatchMallocImpl(const AllocatorDispatch* self,
|
||||
size_t size,
|
||||
void** results,
|
||||
unsigned num_requested,
|
||||
void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
return functions.batch_malloc(
|
||||
reinterpret_cast<struct _malloc_zone_t*>(context), size, results,
|
||||
num_requested);
|
||||
}
|
||||
|
||||
void BatchFreeImpl(const AllocatorDispatch* self,
|
||||
void** to_be_freed,
|
||||
unsigned num_to_be_freed,
|
||||
void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
functions.batch_free(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||
to_be_freed, num_to_be_freed);
|
||||
}
|
||||
|
||||
void FreeDefiniteSizeImpl(const AllocatorDispatch* self,
|
||||
void* ptr,
|
||||
size_t size,
|
||||
void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
functions.free_definite_size(
|
||||
reinterpret_cast<struct _malloc_zone_t*>(context), ptr, size);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||
&MallocImpl, /* alloc_function */
|
||||
&MallocImpl, /* alloc_unchecked_function */
|
||||
&CallocImpl, /* alloc_zero_initialized_function */
|
||||
&MemalignImpl, /* alloc_aligned_function */
|
||||
&ReallocImpl, /* realloc_function */
|
||||
&FreeImpl, /* free_function */
|
||||
&GetSizeEstimateImpl, /* get_size_estimate_function */
|
||||
&BatchMallocImpl, /* batch_malloc_function */
|
||||
&BatchFreeImpl, /* batch_free_function */
|
||||
&FreeDefiniteSizeImpl, /* free_definite_size_function */
|
||||
nullptr, /* aligned_malloc_function */
|
||||
nullptr, /* aligned_realloc_function */
|
||||
nullptr, /* aligned_free_function */
|
||||
nullptr, /* next */
|
||||
};
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
|
@ -1,789 +0,0 @@
|
|||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
|
||||
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
|
||||
#include "base/allocator/allocator_shim_internals.h"
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_alloc_features.h"
|
||||
#include "base/allocator/partition_allocator/allocation_guard.h"
|
||||
#include "base/allocator/partition_allocator/memory_reclaimer.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_root.h"
|
||||
#include "base/allocator/partition_allocator/partition_stats.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/feature_list.h"
|
||||
#include "base/memory/nonscannable_memory.h"
|
||||
#include "base/numerics/checked_math.h"
|
||||
#include "build/build_config.h"
|
||||
#include "build/chromecast_buildflags.h"
|
||||
|
||||
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||
#include <malloc.h>
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
using base::allocator::AllocatorDispatch;
|
||||
|
||||
namespace {
|
||||
|
||||
class SimpleScopedSpinLocker {
|
||||
public:
|
||||
explicit SimpleScopedSpinLocker(std::atomic<bool>& lock) : lock_(lock) {
|
||||
// Lock. Semantically equivalent to base::Lock::Acquire().
|
||||
bool expected = false;
|
||||
// Weak CAS since we are in a retry loop, relaxed ordering for failure since
|
||||
// in this case we don't imply any ordering.
|
||||
//
|
||||
// This matches partition_allocator/spinning_mutex.h fast path on Linux.
|
||||
while (!lock_.compare_exchange_weak(
|
||||
expected, true, std::memory_order_acquire, std::memory_order_relaxed)) {
|
||||
expected = false;
|
||||
}
|
||||
}
|
||||
|
||||
~SimpleScopedSpinLocker() { lock_.store(false, std::memory_order_release); }
|
||||
|
||||
private:
|
||||
std::atomic<bool>& lock_;
|
||||
};
|
||||
|
||||
// We can't use a "static local" or a base::LazyInstance, as:
|
||||
// - static local variables call into the runtime on Windows, which is not
|
||||
// prepared to handle it, as the first allocation happens during CRT init.
|
||||
// - We don't want to depend on base::LazyInstance, which may be converted to
|
||||
// static locals one day.
|
||||
//
|
||||
// Nevertheless, this provides essentially the same thing.
|
||||
template <typename T, typename Constructor>
|
||||
class LeakySingleton {
|
||||
public:
|
||||
constexpr LeakySingleton() = default;
|
||||
|
||||
ALWAYS_INLINE T* Get() {
|
||||
auto* instance = instance_.load(std::memory_order_acquire);
|
||||
if (LIKELY(instance))
|
||||
return instance;
|
||||
|
||||
return GetSlowPath();
|
||||
}
|
||||
|
||||
// Replaces the instance pointer with a new one.
|
||||
void Replace(T* new_instance) {
|
||||
SimpleScopedSpinLocker scoped_lock{initialization_lock_};
|
||||
|
||||
// Modify under the lock to avoid race between |if (instance)| and
|
||||
// |instance_.store()| in GetSlowPath().
|
||||
instance_.store(new_instance, std::memory_order_release);
|
||||
}
|
||||
|
||||
private:
|
||||
T* GetSlowPath();
|
||||
|
||||
std::atomic<T*> instance_;
|
||||
// Before C++20, having an initializer here causes a "variable does not have a
|
||||
// constant initializer" error. In C++20, omitting it causes a similar error.
|
||||
// Presumably this is due to the C++20 changes to make atomic initialization
|
||||
// (of the other members of this class) sane, so guarding under that
|
||||
// feature-test.
|
||||
#if !defined(__cpp_lib_atomic_value_initialization) || \
|
||||
__cpp_lib_atomic_value_initialization < 201911L
|
||||
alignas(T) uint8_t instance_buffer_[sizeof(T)];
|
||||
#else
|
||||
alignas(T) uint8_t instance_buffer_[sizeof(T)] = {0};
|
||||
#endif
|
||||
std::atomic<bool> initialization_lock_;
|
||||
};
|
||||
|
||||
template <typename T, typename Constructor>
|
||||
T* LeakySingleton<T, Constructor>::GetSlowPath() {
|
||||
// The instance has not been set, the proper way to proceed (correct
|
||||
// double-checked locking) is:
|
||||
//
|
||||
// auto* instance = instance_.load(std::memory_order_acquire);
|
||||
// if (!instance) {
|
||||
// ScopedLock initialization_lock;
|
||||
// root = instance_.load(std::memory_order_relaxed);
|
||||
// if (root)
|
||||
// return root;
|
||||
// instance = Create new root;
|
||||
// instance_.store(instance, std::memory_order_release);
|
||||
// return instance;
|
||||
// }
|
||||
//
|
||||
// However, we don't want to use a base::Lock here, so instead we use
|
||||
// compare-and-exchange on a lock variable, which provides the same
|
||||
// guarantees.
|
||||
SimpleScopedSpinLocker scoped_lock{initialization_lock_};
|
||||
|
||||
T* instance = instance_.load(std::memory_order_relaxed);
|
||||
// Someone beat us.
|
||||
if (instance)
|
||||
return instance;
|
||||
|
||||
instance = Constructor::New(reinterpret_cast<void*>(instance_buffer_));
|
||||
instance_.store(instance, std::memory_order_release);
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
class MainPartitionConstructor {
|
||||
public:
|
||||
static partition_alloc::ThreadSafePartitionRoot* New(void* buffer) {
|
||||
constexpr base::PartitionOptions::ThreadCache thread_cache =
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
// Additional partitions may be created in ConfigurePartitions(). Since
|
||||
// only one partition can have thread cache enabled, postpone the
|
||||
// decision to turn the thread cache on until after that call.
|
||||
// TODO(bartekn): Enable it here by default, once the "split-only" mode
|
||||
// is no longer needed.
|
||||
base::PartitionOptions::ThreadCache::kDisabled;
|
||||
#else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
// Other tests, such as the ThreadCache tests create a thread cache,
|
||||
// and only one is supported at a time.
|
||||
base::PartitionOptions::ThreadCache::kDisabled;
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
auto* new_root = new (buffer) partition_alloc::ThreadSafePartitionRoot({
|
||||
base::PartitionOptions::AlignedAlloc::kAllowed,
|
||||
thread_cache,
|
||||
base::PartitionOptions::Quarantine::kAllowed,
|
||||
base::PartitionOptions::Cookie::kAllowed,
|
||||
base::PartitionOptions::BackupRefPtr::kDisabled,
|
||||
base::PartitionOptions::UseConfigurablePool::kNo,
|
||||
});
|
||||
|
||||
return new_root;
|
||||
}
|
||||
};
|
||||
|
||||
LeakySingleton<partition_alloc::ThreadSafePartitionRoot,
|
||||
MainPartitionConstructor>
|
||||
g_root CONSTINIT = {};
|
||||
partition_alloc::ThreadSafePartitionRoot* Allocator() {
|
||||
return g_root.Get();
|
||||
}
|
||||
|
||||
// Original g_root_ if it was replaced by ConfigurePartitions().
|
||||
std::atomic<partition_alloc::ThreadSafePartitionRoot*> g_original_root(nullptr);
|
||||
|
||||
class AlignedPartitionConstructor {
|
||||
public:
|
||||
static partition_alloc::ThreadSafePartitionRoot* New(void* buffer) {
|
||||
return g_root.Get();
|
||||
}
|
||||
};
|
||||
|
||||
LeakySingleton<partition_alloc::ThreadSafePartitionRoot,
|
||||
AlignedPartitionConstructor>
|
||||
g_aligned_root CONSTINIT = {};
|
||||
|
||||
partition_alloc::ThreadSafePartitionRoot* OriginalAllocator() {
|
||||
return g_original_root.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
partition_alloc::ThreadSafePartitionRoot* AlignedAllocator() {
|
||||
return g_aligned_root.Get();
|
||||
}
|
||||
|
||||
#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
bool IsRunning32bitEmulatedOnArm64() {
|
||||
using IsWow64Process2Function = decltype(&IsWow64Process2);
|
||||
|
||||
IsWow64Process2Function is_wow64_process2 =
|
||||
reinterpret_cast<IsWow64Process2Function>(::GetProcAddress(
|
||||
::GetModuleHandleA("kernel32.dll"), "IsWow64Process2"));
|
||||
if (!is_wow64_process2)
|
||||
return false;
|
||||
USHORT process_machine;
|
||||
USHORT native_machine;
|
||||
bool retval = is_wow64_process2(::GetCurrentProcess(), &process_machine,
|
||||
&native_machine);
|
||||
if (!retval)
|
||||
return false;
|
||||
if (native_machine == IMAGE_FILE_MACHINE_ARM64)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
// The number of bytes to add to every allocation. Ordinarily zero, but set to 8
|
||||
// when emulating an x86 on ARM64 to avoid a bug in the Windows x86 emulator.
|
||||
size_t g_extra_bytes;
|
||||
#endif // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||
|
||||
// TODO(brucedawson): Remove this when https://crbug.com/1151455 is fixed.
|
||||
ALWAYS_INLINE size_t MaybeAdjustSize(size_t size) {
|
||||
#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||
return base::CheckAdd(size, g_extra_bytes).ValueOrDie();
|
||||
#else // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||
return size;
|
||||
#endif // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||
}
|
||||
|
||||
void* AllocateAlignedMemory(size_t alignment, size_t size) {
|
||||
// Memory returned by the regular allocator *always* respects |kAlignment|,
|
||||
// which is a power of two, and any valid alignment is also a power of two. So
|
||||
// we can directly fulfill these requests with the main allocator.
|
||||
//
|
||||
// This has several advantages:
|
||||
// - The thread cache is supported on the main partition
|
||||
// - Reduced fragmentation
|
||||
// - Better coverage for MiraclePtr variants requiring extras
|
||||
//
|
||||
// There are several call sites in Chromium where base::AlignedAlloc is called
|
||||
// with a small alignment. Some may be due to overly-careful code, some are
|
||||
// because the client code doesn't know the required alignment at compile
|
||||
// time.
|
||||
//
|
||||
// Note that all "AlignedFree()" variants (_aligned_free() on Windows for
|
||||
// instance) directly call PartitionFree(), so there is no risk of
|
||||
// mismatch. (see below the default_dispatch definition).
|
||||
if (alignment <= partition_alloc::internal::kAlignment) {
|
||||
// This is mandated by |posix_memalign()| and friends, so should never fire.
|
||||
PA_CHECK(base::bits::IsPowerOfTwo(alignment));
|
||||
// TODO(bartekn): See if the compiler optimizes branches down the stack on
|
||||
// Mac, where PartitionPageSize() isn't constexpr.
|
||||
return Allocator()->AllocWithFlagsNoHooks(
|
||||
0, size, partition_alloc::PartitionPageSize());
|
||||
}
|
||||
|
||||
return AlignedAllocator()->AlignedAllocWithFlags(
|
||||
partition_alloc::AllocFlags::kNoHooks, alignment, size);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
int g_alloc_flags = 0;
|
||||
#else
|
||||
constexpr int g_alloc_flags = 0;
|
||||
#endif
|
||||
} // namespace
|
||||
|
||||
void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value) {
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
// We generally prefer to always crash rather than returning nullptr for
|
||||
// OOM. However, on some macOS releases, we have to locally allow it due to
|
||||
// weirdness in OS code. See https://crbug.com/654695 for details.
|
||||
//
|
||||
// Apple only since it's not needed elsewhere, and there is a performance
|
||||
// penalty.
|
||||
|
||||
if (value)
|
||||
g_alloc_flags = 0;
|
||||
else
|
||||
g_alloc_flags = partition_alloc::AllocFlags::kReturnNull;
|
||||
#endif
|
||||
}
|
||||
|
||||
void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
|
||||
ScopedDisallowAllocations guard{};
|
||||
return Allocator()->AllocWithFlagsNoHooks(
|
||||
0 | g_alloc_flags, MaybeAdjustSize(size),
|
||||
partition_alloc::PartitionPageSize());
|
||||
}
|
||||
|
||||
void* PartitionMallocUnchecked(const AllocatorDispatch*,
|
||||
size_t size,
|
||||
void* context) {
|
||||
ScopedDisallowAllocations guard{};
|
||||
return Allocator()->AllocWithFlagsNoHooks(
|
||||
partition_alloc::AllocFlags::kReturnNull | g_alloc_flags,
|
||||
MaybeAdjustSize(size), partition_alloc::PartitionPageSize());
|
||||
}
|
||||
|
||||
void* PartitionCalloc(const AllocatorDispatch*,
|
||||
size_t n,
|
||||
size_t size,
|
||||
void* context) {
|
||||
ScopedDisallowAllocations guard{};
|
||||
const size_t total = base::CheckMul(n, MaybeAdjustSize(size)).ValueOrDie();
|
||||
return Allocator()->AllocWithFlagsNoHooks(
|
||||
partition_alloc::AllocFlags::kZeroFill | g_alloc_flags, total,
|
||||
partition_alloc::PartitionPageSize());
|
||||
}
|
||||
|
||||
void* PartitionMemalign(const AllocatorDispatch*,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context) {
|
||||
ScopedDisallowAllocations guard{};
|
||||
return AllocateAlignedMemory(alignment, size);
|
||||
}
|
||||
|
||||
void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
ScopedDisallowAllocations guard{};
|
||||
return AllocateAlignedMemory(alignment, size);
|
||||
}
|
||||
|
||||
// aligned_realloc documentation is
|
||||
// https://docs.microsoft.com/ja-jp/cpp/c-runtime-library/reference/aligned-realloc
|
||||
// TODO(tasak): Expand the given memory block to the given size if possible.
|
||||
// This realloc always free the original memory block and allocates a new memory
|
||||
// block.
|
||||
// TODO(tasak): Implement PartitionRoot<thread_safe>::AlignedReallocWithFlags
|
||||
// and use it.
|
||||
void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
|
||||
void* address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
ScopedDisallowAllocations guard{};
|
||||
void* new_ptr = nullptr;
|
||||
if (size > 0) {
|
||||
size = MaybeAdjustSize(size);
|
||||
new_ptr = AllocateAlignedMemory(alignment, size);
|
||||
} else {
|
||||
// size == 0 and address != null means just "free(address)".
|
||||
if (address)
|
||||
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address);
|
||||
}
|
||||
// The original memory block (specified by address) is unchanged if ENOMEM.
|
||||
if (!new_ptr)
|
||||
return nullptr;
|
||||
// TODO(tasak): Need to compare the new alignment with the address' alignment.
|
||||
// If the two alignments are not the same, need to return nullptr with EINVAL.
|
||||
if (address) {
|
||||
size_t usage =
|
||||
partition_alloc::ThreadSafePartitionRoot::GetUsableSize(address);
|
||||
size_t copy_size = usage > size ? size : usage;
|
||||
memcpy(new_ptr, address, copy_size);
|
||||
|
||||
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address);
|
||||
}
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
void* PartitionRealloc(const AllocatorDispatch*,
|
||||
void* address,
|
||||
size_t size,
|
||||
void* context) {
|
||||
ScopedDisallowAllocations guard{};
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
if (UNLIKELY(!base::IsManagedByPartitionAlloc(
|
||||
reinterpret_cast<uintptr_t>(address)) &&
|
||||
address)) {
|
||||
// A memory region allocated by the system allocator is passed in this
|
||||
// function. Forward the request to `realloc` which supports zone-
|
||||
// dispatching so that it appropriately selects the right zone.
|
||||
return realloc(address, size);
|
||||
}
|
||||
#endif // BUILDFLAG(IS_APPLE)
|
||||
|
||||
return Allocator()->ReallocWithFlags(
|
||||
partition_alloc::AllocFlags::kNoHooks | g_alloc_flags, address,
|
||||
MaybeAdjustSize(size), "");
|
||||
}
|
||||
|
||||
#if BUILDFLAG(IS_ANDROID) && BUILDFLAG(IS_CHROMECAST)
|
||||
extern "C" {
|
||||
void __real_free(void*);
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
|
||||
ScopedDisallowAllocations guard{};
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
// TODO(bartekn): Add MTE unmasking here (and below).
|
||||
if (UNLIKELY(!base::IsManagedByPartitionAlloc(
|
||||
reinterpret_cast<uintptr_t>(object)) &&
|
||||
object)) {
|
||||
// A memory region allocated by the system allocator is passed in this
|
||||
// function. Forward the request to `free` which supports zone-
|
||||
// dispatching so that it appropriately selects the right zone.
|
||||
return free(object);
|
||||
}
|
||||
#endif // BUILDFLAG(IS_APPLE)
|
||||
|
||||
// On Chromecast, there is at least one case where a system malloc() pointer
|
||||
// can be passed to PartitionAlloc's free(). If we don't own the pointer, pass
|
||||
// it along. This should not have a runtime cost vs regular Android, since on
|
||||
// Android we have a PA_CHECK() rather than the branch here.
|
||||
#if BUILDFLAG(IS_ANDROID) && BUILDFLAG(IS_CHROMECAST)
|
||||
if (UNLIKELY(!base::IsManagedByPartitionAlloc(
|
||||
reinterpret_cast<uintptr_t>(object)) &&
|
||||
object)) {
|
||||
// A memory region allocated by the system allocator is passed in this
|
||||
// function. Forward the request to `free()`, which is `__real_free()`
|
||||
// here.
|
||||
return __real_free(object);
|
||||
}
|
||||
#endif
|
||||
|
||||
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(object);
|
||||
}
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
// Normal free() path on Apple OSes:
|
||||
// 1. size = GetSizeEstimate(ptr);
|
||||
// 2. if (size) FreeDefiniteSize(ptr, size)
|
||||
//
|
||||
// So we don't need to re-check that the pointer is owned in Free(), and we
|
||||
// can use the size.
|
||||
void PartitionFreeDefiniteSize(const AllocatorDispatch*,
|
||||
void* address,
|
||||
size_t size,
|
||||
void* context) {
|
||||
ScopedDisallowAllocations guard{};
|
||||
// TODO(lizeb): Optimize PartitionAlloc to use the size information. This is
|
||||
// still useful though, as we avoid double-checking that the address is owned.
|
||||
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address);
|
||||
}
|
||||
#endif // BUILDFLAG(IS_APPLE)
|
||||
|
||||
size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
|
||||
void* address,
|
||||
void* context) {
|
||||
// This is used to implement malloc_usable_size(3). Per its man page, "if ptr
|
||||
// is NULL, 0 is returned".
|
||||
if (!address)
|
||||
return 0;
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
if (!base::IsManagedByPartitionAlloc(reinterpret_cast<uintptr_t>(address))) {
|
||||
// The object pointed to by `address` is not allocated by the
|
||||
// PartitionAlloc. The return value `0` means that the pointer does not
|
||||
// belong to this malloc zone.
|
||||
return 0;
|
||||
}
|
||||
#endif // BUILDFLAG(IS_APPLE)
|
||||
|
||||
// TODO(lizeb): Returns incorrect values for aligned allocations.
|
||||
const size_t size =
|
||||
partition_alloc::ThreadSafePartitionRoot::GetUsableSize(address);
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
// The object pointed to by `address` is allocated by the PartitionAlloc.
|
||||
// So, this function must not return zero so that the malloc zone dispatcher
|
||||
// finds the appropriate malloc zone.
|
||||
PA_DCHECK(size);
|
||||
#endif // BUILDFLAG(IS_APPLE)
|
||||
return size;
|
||||
}
|
||||
|
||||
unsigned PartitionBatchMalloc(const AllocatorDispatch*,
|
||||
size_t size,
|
||||
void** results,
|
||||
unsigned num_requested,
|
||||
void* context) {
|
||||
// No real batching: we could only acquire the lock once for instance, keep it
|
||||
// simple for now.
|
||||
for (unsigned i = 0; i < num_requested; i++) {
|
||||
// No need to check the results, we crash if it fails.
|
||||
results[i] = PartitionMalloc(nullptr, size, nullptr);
|
||||
}
|
||||
|
||||
// Either all succeeded, or we crashed.
|
||||
return num_requested;
|
||||
}
|
||||
|
||||
void PartitionBatchFree(const AllocatorDispatch*,
|
||||
void** to_be_freed,
|
||||
unsigned num_to_be_freed,
|
||||
void* context) {
|
||||
// No real batching: we could only acquire the lock once for instance, keep it
|
||||
// simple for now.
|
||||
for (unsigned i = 0; i < num_to_be_freed; i++) {
|
||||
PartitionFree(nullptr, to_be_freed[i], nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
ThreadSafePartitionRoot* PartitionAllocMalloc::Allocator() {
|
||||
return ::Allocator();
|
||||
}
|
||||
|
||||
// static
|
||||
ThreadSafePartitionRoot* PartitionAllocMalloc::OriginalAllocator() {
|
||||
return ::OriginalAllocator();
|
||||
}
|
||||
|
||||
// static
|
||||
ThreadSafePartitionRoot* PartitionAllocMalloc::AlignedAllocator() {
|
||||
return ::AlignedAllocator();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
void EnablePartitionAllocMemoryReclaimer() {
|
||||
// Unlike other partitions, Allocator() and AlignedAllocator() do not register
|
||||
// their PartitionRoots to the memory reclaimer, because doing so may allocate
|
||||
// memory. Thus, the registration to the memory reclaimer has to be done
|
||||
// some time later, when the main root is fully configured.
|
||||
// TODO(bartekn): Aligned allocator can use the regular initialization path.
|
||||
::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
|
||||
Allocator());
|
||||
auto* original_root = OriginalAllocator();
|
||||
if (original_root)
|
||||
::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
|
||||
original_root);
|
||||
if (AlignedAllocator() != Allocator()) {
|
||||
::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
|
||||
AlignedAllocator());
|
||||
}
|
||||
}
|
||||
|
||||
alignas(partition_alloc::ThreadSafePartitionRoot) uint8_t
|
||||
g_allocator_buffer_for_new_main_partition[sizeof(
|
||||
partition_alloc::ThreadSafePartitionRoot)];
|
||||
|
||||
alignas(partition_alloc::ThreadSafePartitionRoot) uint8_t
|
||||
g_allocator_buffer_for_aligned_alloc_partition[sizeof(
|
||||
partition_alloc::ThreadSafePartitionRoot)];
|
||||
|
||||
void ConfigurePartitions(
|
||||
EnableBrp enable_brp,
|
||||
SplitMainPartition split_main_partition,
|
||||
UseDedicatedAlignedPartition use_dedicated_aligned_partition,
|
||||
AlternateBucketDistribution use_alternate_bucket_distribution) {
|
||||
// BRP cannot be enabled without splitting the main partition. Furthermore, in
|
||||
// the "before allocation" mode, it can't be enabled without further splitting
|
||||
// out the aligned partition.
|
||||
PA_CHECK(!enable_brp || split_main_partition);
|
||||
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
PA_CHECK(!enable_brp || use_dedicated_aligned_partition);
|
||||
#endif
|
||||
// Can't split out the aligned partition, without splitting the main one.
|
||||
PA_CHECK(!use_dedicated_aligned_partition || split_main_partition);
|
||||
|
||||
static bool configured = false;
|
||||
PA_CHECK(!configured);
|
||||
configured = true;
|
||||
|
||||
// Calling Get() is actually important, even if the return values weren't
|
||||
// used, because it has a side effect of initializing the variables, if they
|
||||
// weren't already.
|
||||
auto* current_root = g_root.Get();
|
||||
auto* current_aligned_root = g_aligned_root.Get();
|
||||
|
||||
if (!split_main_partition) {
|
||||
if (!use_alternate_bucket_distribution) {
|
||||
current_root->SwitchToDenserBucketDistribution();
|
||||
current_aligned_root->SwitchToDenserBucketDistribution();
|
||||
}
|
||||
PA_DCHECK(!enable_brp);
|
||||
PA_DCHECK(!use_dedicated_aligned_partition);
|
||||
PA_DCHECK(!current_root->with_thread_cache);
|
||||
return;
|
||||
}
|
||||
|
||||
auto* new_root =
|
||||
new (g_allocator_buffer_for_new_main_partition) ThreadSafePartitionRoot({
|
||||
!use_dedicated_aligned_partition
|
||||
? base::PartitionOptions::AlignedAlloc::kAllowed
|
||||
: base::PartitionOptions::AlignedAlloc::kDisallowed,
|
||||
base::PartitionOptions::ThreadCache::kDisabled,
|
||||
base::PartitionOptions::Quarantine::kAllowed,
|
||||
base::PartitionOptions::Cookie::kAllowed,
|
||||
enable_brp ? base::PartitionOptions::BackupRefPtr::kEnabled
|
||||
: base::PartitionOptions::BackupRefPtr::kDisabled,
|
||||
base::PartitionOptions::UseConfigurablePool::kNo,
|
||||
});
|
||||
|
||||
partition_alloc::ThreadSafePartitionRoot* new_aligned_root;
|
||||
if (use_dedicated_aligned_partition) {
|
||||
// TODO(bartekn): Use the original root instead of creating a new one. It'd
|
||||
// result in one less partition, but come at a cost of commingling types.
|
||||
new_aligned_root = new (g_allocator_buffer_for_aligned_alloc_partition)
|
||||
ThreadSafePartitionRoot({
|
||||
base::PartitionOptions::AlignedAlloc::kAllowed,
|
||||
base::PartitionOptions::ThreadCache::kDisabled,
|
||||
base::PartitionOptions::Quarantine::kAllowed,
|
||||
base::PartitionOptions::Cookie::kAllowed,
|
||||
base::PartitionOptions::BackupRefPtr::kDisabled,
|
||||
base::PartitionOptions::UseConfigurablePool::kNo,
|
||||
});
|
||||
} else {
|
||||
// The new main root can also support AlignedAlloc.
|
||||
new_aligned_root = new_root;
|
||||
}
|
||||
|
||||
// Now switch traffic to the new partitions.
|
||||
g_aligned_root.Replace(new_aligned_root);
|
||||
g_root.Replace(new_root);
|
||||
|
||||
// g_original_root has to be set after g_root, because other code doesn't
|
||||
// handle well both pointing to the same root.
|
||||
// TODO(bartekn): Reorder, once handled well. It isn't ideal for one
|
||||
// partition to be invisible temporarily.
|
||||
g_original_root = current_root;
|
||||
|
||||
// No need for g_original_aligned_root, because in cases where g_aligned_root
|
||||
// is replaced, it must've been g_original_root.
|
||||
PA_CHECK(current_aligned_root == g_original_root);
|
||||
|
||||
// Purge memory, now that the traffic to the original partition is cut off.
|
||||
current_root->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
|
||||
PurgeFlags::kDiscardUnusedSystemPages);
|
||||
|
||||
if (!use_alternate_bucket_distribution) {
|
||||
g_root.Get()->SwitchToDenserBucketDistribution();
|
||||
g_aligned_root.Get()->SwitchToDenserBucketDistribution();
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(PA_ALLOW_PCSCAN)
|
||||
void EnablePCScan(base::internal::PCScan::InitConfig config) {
|
||||
internal::PCScan::Initialize(config);
|
||||
|
||||
internal::PCScan::RegisterScannableRoot(Allocator());
|
||||
if (OriginalAllocator() != nullptr)
|
||||
internal::PCScan::RegisterScannableRoot(OriginalAllocator());
|
||||
if (Allocator() != AlignedAllocator())
|
||||
internal::PCScan::RegisterScannableRoot(AlignedAllocator());
|
||||
|
||||
internal::NonScannableAllocator::Instance().NotifyPCScanEnabled();
|
||||
internal::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
|
||||
}
|
||||
#endif // defined(PA_ALLOW_PCSCAN)
|
||||
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
// Call this as soon as possible during startup.
|
||||
void ConfigurePartitionAlloc() {
|
||||
#if defined(ARCH_CPU_X86)
|
||||
if (IsRunning32bitEmulatedOnArm64())
|
||||
g_extra_bytes = 8;
|
||||
#endif // defined(ARCH_CPU_X86)
|
||||
}
|
||||
#endif // BUILDFLAG(IS_WIN)
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||
&base::internal::PartitionMalloc, // alloc_function
|
||||
&base::internal::PartitionMallocUnchecked, // alloc_unchecked_function
|
||||
&base::internal::PartitionCalloc, // alloc_zero_initialized_function
|
||||
&base::internal::PartitionMemalign, // alloc_aligned_function
|
||||
&base::internal::PartitionRealloc, // realloc_function
|
||||
&base::internal::PartitionFree, // free_function
|
||||
&base::internal::PartitionGetSizeEstimate, // get_size_estimate_function
|
||||
&base::internal::PartitionBatchMalloc, // batch_malloc_function
|
||||
&base::internal::PartitionBatchFree, // batch_free_function
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
// On Apple OSes, free_definite_size() is always called from free(), since
|
||||
// get_size_estimate() is used to determine whether an allocation belongs to
|
||||
// the current zone. It makes sense to optimize for it.
|
||||
&base::internal::PartitionFreeDefiniteSize,
|
||||
#else
|
||||
nullptr, // free_definite_size_function
|
||||
#endif
|
||||
&base::internal::PartitionAlignedAlloc, // aligned_malloc_function
|
||||
&base::internal::PartitionAlignedRealloc, // aligned_realloc_function
|
||||
&base::internal::PartitionFree, // aligned_free_function
|
||||
nullptr, // next
|
||||
};
|
||||
|
||||
// Intercept diagnostics symbols as well, even though they are not part of the
|
||||
// unified shim layer.
|
||||
//
|
||||
// TODO(lizeb): Implement the ones that doable.
|
||||
|
||||
extern "C" {
|
||||
|
||||
#if !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
|
||||
|
||||
SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {}
|
||||
|
||||
SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
|
||||
|
||||
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||
SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
|
||||
base::SimplePartitionStatsDumper allocator_dumper;
|
||||
Allocator()->DumpStats("malloc", true, &allocator_dumper);
|
||||
// TODO(bartekn): Dump OriginalAllocator() into "malloc" as well.
|
||||
|
||||
base::SimplePartitionStatsDumper aligned_allocator_dumper;
|
||||
if (AlignedAllocator() != Allocator()) {
|
||||
AlignedAllocator()->DumpStats("posix_memalign", true,
|
||||
&aligned_allocator_dumper);
|
||||
}
|
||||
|
||||
// Dump stats for nonscannable and nonquarantinable allocators.
|
||||
auto& nonscannable_allocator =
|
||||
base::internal::NonScannableAllocator::Instance();
|
||||
base::SimplePartitionStatsDumper nonscannable_allocator_dumper;
|
||||
if (auto* nonscannable_root = nonscannable_allocator.root())
|
||||
nonscannable_root->DumpStats("malloc", true,
|
||||
&nonscannable_allocator_dumper);
|
||||
auto& nonquarantinable_allocator =
|
||||
base::internal::NonQuarantinableAllocator::Instance();
|
||||
base::SimplePartitionStatsDumper nonquarantinable_allocator_dumper;
|
||||
if (auto* nonquarantinable_root = nonquarantinable_allocator.root())
|
||||
nonquarantinable_root->DumpStats("malloc", true,
|
||||
&nonquarantinable_allocator_dumper);
|
||||
|
||||
struct mallinfo info = {0};
|
||||
info.arena = 0; // Memory *not* allocated with mmap().
|
||||
|
||||
// Memory allocated with mmap(), aka virtual size.
|
||||
info.hblks = allocator_dumper.stats().total_mmapped_bytes +
|
||||
aligned_allocator_dumper.stats().total_mmapped_bytes +
|
||||
nonscannable_allocator_dumper.stats().total_mmapped_bytes +
|
||||
nonquarantinable_allocator_dumper.stats().total_mmapped_bytes;
|
||||
// Resident bytes.
|
||||
info.hblkhd = allocator_dumper.stats().total_resident_bytes +
|
||||
aligned_allocator_dumper.stats().total_resident_bytes +
|
||||
nonscannable_allocator_dumper.stats().total_resident_bytes +
|
||||
nonquarantinable_allocator_dumper.stats().total_resident_bytes;
|
||||
// Allocated bytes.
|
||||
info.uordblks = allocator_dumper.stats().total_active_bytes +
|
||||
aligned_allocator_dumper.stats().total_active_bytes +
|
||||
nonscannable_allocator_dumper.stats().total_active_bytes +
|
||||
nonquarantinable_allocator_dumper.stats().total_active_bytes;
|
||||
|
||||
return info;
|
||||
}
|
||||
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||
|
||||
} // extern "C"
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
void InitializeDefaultAllocatorPartitionRoot() {
|
||||
// On OS_APPLE, the initialization of PartitionRoot uses memory allocations
|
||||
// internally, e.g. __builtin_available, and it's not easy to avoid it.
|
||||
// Thus, we initialize the PartitionRoot with using the system default
|
||||
// allocator before we intercept the system default allocator.
|
||||
std::ignore = Allocator();
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
#endif // BUILDFLAG(IS_APPLE)
|
||||
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
|
@ -1,75 +0,0 @@
|
|||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
|
||||
|
||||
#include "base/allocator/allocator_shim.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||
#include "base/base_export.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value);
|
||||
|
||||
class BASE_EXPORT PartitionAllocMalloc {
|
||||
public:
|
||||
static ThreadSafePartitionRoot* Allocator();
|
||||
// May return |nullptr|, will never return the same pointer as |Allocator()|.
|
||||
static ThreadSafePartitionRoot* OriginalAllocator();
|
||||
// May return the same pointer as |Allocator()|.
|
||||
static ThreadSafePartitionRoot* AlignedAllocator();
|
||||
};
|
||||
|
||||
BASE_EXPORT void* PartitionMalloc(const base::allocator::AllocatorDispatch*,
|
||||
size_t size,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT void* PartitionMallocUnchecked(
|
||||
const base::allocator::AllocatorDispatch*,
|
||||
size_t size,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT void* PartitionCalloc(const base::allocator::AllocatorDispatch*,
|
||||
size_t n,
|
||||
size_t size,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT void* PartitionMemalign(const base::allocator::AllocatorDispatch*,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT void* PartitionAlignedAlloc(
|
||||
const base::allocator::AllocatorDispatch* dispatch,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT void* PartitionAlignedRealloc(
|
||||
const base::allocator::AllocatorDispatch* dispatch,
|
||||
void* address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT void* PartitionRealloc(const base::allocator::AllocatorDispatch*,
|
||||
void* address,
|
||||
size_t size,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT void PartitionFree(const base::allocator::AllocatorDispatch*,
|
||||
void* object,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT size_t
|
||||
PartitionGetSizeEstimate(const base::allocator::AllocatorDispatch*,
|
||||
void* address,
|
||||
void* context);
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
|
|
@ -1,106 +0,0 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/allocator_shim.h"
|
||||
|
||||
#include <ostream>
|
||||
|
||||
#include "base/allocator/winheap_stubs_win.h"
|
||||
#include "base/check.h"
|
||||
|
||||
namespace {
|
||||
|
||||
using base::allocator::AllocatorDispatch;
|
||||
|
||||
void* DefaultWinHeapMallocImpl(const AllocatorDispatch*,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return base::allocator::WinHeapMalloc(size);
|
||||
}
|
||||
|
||||
void* DefaultWinHeapCallocImpl(const AllocatorDispatch* self,
|
||||
size_t n,
|
||||
size_t elem_size,
|
||||
void* context) {
|
||||
// Overflow check.
|
||||
const size_t size = n * elem_size;
|
||||
if (elem_size != 0 && size / elem_size != n)
|
||||
return nullptr;
|
||||
|
||||
void* result = DefaultWinHeapMallocImpl(self, size, context);
|
||||
if (result) {
|
||||
memset(result, 0, size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void* DefaultWinHeapMemalignImpl(const AllocatorDispatch* self,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context) {
|
||||
CHECK(false) << "The windows heap does not support memalign.";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void* DefaultWinHeapReallocImpl(const AllocatorDispatch* self,
|
||||
void* address,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return base::allocator::WinHeapRealloc(address, size);
|
||||
}
|
||||
|
||||
void DefaultWinHeapFreeImpl(const AllocatorDispatch*,
|
||||
void* address,
|
||||
void* context) {
|
||||
base::allocator::WinHeapFree(address);
|
||||
}
|
||||
|
||||
size_t DefaultWinHeapGetSizeEstimateImpl(const AllocatorDispatch*,
|
||||
void* address,
|
||||
void* context) {
|
||||
return base::allocator::WinHeapGetSizeEstimate(address);
|
||||
}
|
||||
|
||||
void* DefaultWinHeapAlignedMallocImpl(const AllocatorDispatch*,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
return base::allocator::WinHeapAlignedMalloc(size, alignment);
|
||||
}
|
||||
|
||||
void* DefaultWinHeapAlignedReallocImpl(const AllocatorDispatch*,
|
||||
void* ptr,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
return base::allocator::WinHeapAlignedRealloc(ptr, size, alignment);
|
||||
}
|
||||
|
||||
void DefaultWinHeapAlignedFreeImpl(const AllocatorDispatch*,
|
||||
void* ptr,
|
||||
void* context) {
|
||||
base::allocator::WinHeapAlignedFree(ptr);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Guarantee that default_dispatch is compile-time initialized to avoid using
|
||||
// it before initialization (allocations before main in release builds with
|
||||
// optimizations disabled).
|
||||
constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||
&DefaultWinHeapMallocImpl,
|
||||
&DefaultWinHeapMallocImpl, /* alloc_unchecked_function */
|
||||
&DefaultWinHeapCallocImpl,
|
||||
&DefaultWinHeapMemalignImpl,
|
||||
&DefaultWinHeapReallocImpl,
|
||||
&DefaultWinHeapFreeImpl,
|
||||
&DefaultWinHeapGetSizeEstimateImpl,
|
||||
nullptr, /* batch_malloc_function */
|
||||
nullptr, /* batch_free_function */
|
||||
nullptr, /* free_definite_size_function */
|
||||
&DefaultWinHeapAlignedMallocImpl,
|
||||
&DefaultWinHeapAlignedReallocImpl,
|
||||
&DefaultWinHeapAlignedFreeImpl,
|
||||
nullptr, /* next */
|
||||
};
|
|
@ -1,166 +0,0 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
|
||||
#error This header is meant to be included only once by allocator_shim.cc
|
||||
#endif
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
|
||||
|
||||
// Preempt the default new/delete C++ symbols so they call the shim entry
|
||||
// points. This file is strongly inspired by tcmalloc's
|
||||
// libc_override_redefine.h.
|
||||
|
||||
#include <new>
|
||||
|
||||
#include "base/allocator/allocator_shim_internals.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
// std::align_val_t isn't available until C++17, but we want to override aligned
|
||||
// new/delete anyway to prevent a possible situation where a library gets loaded
|
||||
// in that uses the aligned operators. We want to avoid a situation where
|
||||
// separate heaps are used.
|
||||
// TODO(thomasanderson): Remove this once building with C++17 or later.
|
||||
#if defined(__cpp_aligned_new) && __cpp_aligned_new >= 201606
|
||||
#define ALIGN_VAL_T std::align_val_t
|
||||
#define ALIGN_LINKAGE
|
||||
#define ALIGN_NEW operator new
|
||||
#define ALIGN_NEW_NOTHROW operator new
|
||||
#define ALIGN_DEL operator delete
|
||||
#define ALIGN_DEL_SIZED operator delete
|
||||
#define ALIGN_DEL_NOTHROW operator delete
|
||||
#define ALIGN_NEW_ARR operator new[]
|
||||
#define ALIGN_NEW_ARR_NOTHROW operator new[]
|
||||
#define ALIGN_DEL_ARR operator delete[]
|
||||
#define ALIGN_DEL_ARR_SIZED operator delete[]
|
||||
#define ALIGN_DEL_ARR_NOTHROW operator delete[]
|
||||
#else
|
||||
#define ALIGN_VAL_T size_t
|
||||
#define ALIGN_LINKAGE extern "C"
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
#error "Mangling is different on these platforms."
|
||||
#else
|
||||
#define ALIGN_NEW _ZnwmSt11align_val_t
|
||||
#define ALIGN_NEW_NOTHROW _ZnwmSt11align_val_tRKSt9nothrow_t
|
||||
#define ALIGN_DEL _ZdlPvSt11align_val_t
|
||||
#define ALIGN_DEL_SIZED _ZdlPvmSt11align_val_t
|
||||
#define ALIGN_DEL_NOTHROW _ZdlPvSt11align_val_tRKSt9nothrow_t
|
||||
#define ALIGN_NEW_ARR _ZnamSt11align_val_t
|
||||
#define ALIGN_NEW_ARR_NOTHROW _ZnamSt11align_val_tRKSt9nothrow_t
|
||||
#define ALIGN_DEL_ARR _ZdaPvSt11align_val_t
|
||||
#define ALIGN_DEL_ARR_SIZED _ZdaPvmSt11align_val_t
|
||||
#define ALIGN_DEL_ARR_NOTHROW _ZdaPvSt11align_val_tRKSt9nothrow_t
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !BUILDFLAG(IS_APPLE)
|
||||
#define SHIM_CPP_SYMBOLS_EXPORT SHIM_ALWAYS_EXPORT
|
||||
#else
|
||||
// On Apple OSes, prefer not exporting these symbols (as this reverts to the
|
||||
// default behavior, they are still exported in e.g. component builds). This is
|
||||
// partly due to intentional limits on exported symbols in the main library, but
|
||||
// it is also needless, since no library used on macOS imports these.
|
||||
//
|
||||
// TODO(lizeb): It may not be necessary anywhere to export these.
|
||||
#define SHIM_CPP_SYMBOLS_EXPORT NOINLINE
|
||||
#endif
|
||||
|
||||
SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size) {
|
||||
return ShimCppNew(size);
|
||||
}
|
||||
|
||||
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
SHIM_CPP_SYMBOLS_EXPORT void* operator new[](size_t size) {
|
||||
return ShimCppNew(size);
|
||||
}
|
||||
|
||||
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size,
|
||||
const std::nothrow_t&) __THROW {
|
||||
return ShimCppNewNoThrow(size);
|
||||
}
|
||||
|
||||
SHIM_CPP_SYMBOLS_EXPORT void* operator new[](size_t size,
|
||||
const std::nothrow_t&) __THROW {
|
||||
return ShimCppNewNoThrow(size);
|
||||
}
|
||||
|
||||
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
|
||||
const std::nothrow_t&) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
|
||||
const std::nothrow_t&) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p, size_t) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p, size_t) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW(std::size_t size,
|
||||
ALIGN_VAL_T alignment) {
|
||||
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_NOTHROW(
|
||||
std::size_t size,
|
||||
ALIGN_VAL_T alignment,
|
||||
const std::nothrow_t&) __THROW {
|
||||
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void ALIGN_DEL(void* p,
|
||||
ALIGN_VAL_T) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
|
||||
ALIGN_DEL_SIZED(void* p, std::size_t size, ALIGN_VAL_T) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
|
||||
ALIGN_DEL_NOTHROW(void* p, ALIGN_VAL_T, const std::nothrow_t&) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_ARR(
|
||||
std::size_t size,
|
||||
ALIGN_VAL_T alignment) {
|
||||
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_ARR_NOTHROW(
|
||||
std::size_t size,
|
||||
ALIGN_VAL_T alignment,
|
||||
const std::nothrow_t&) __THROW {
|
||||
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void ALIGN_DEL_ARR(void* p,
|
||||
ALIGN_VAL_T) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
|
||||
ALIGN_DEL_ARR_SIZED(void* p, std::size_t size, ALIGN_VAL_T) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
|
||||
ALIGN_DEL_ARR_NOTHROW(void* p, ALIGN_VAL_T, const std::nothrow_t&) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
|
@ -1,147 +0,0 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
|
||||
#error This header is meant to be included only once by allocator_shim.cc
|
||||
#endif
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
|
||||
|
||||
// This header overrides the __wrap_X symbols when using the link-time
|
||||
// -Wl,-wrap,malloc shim-layer approach (see README.md).
|
||||
// All references to malloc, free, etc. within the linker unit that gets the
|
||||
// -wrap linker flags (e.g., libchrome.so) will be rewritten to the
|
||||
// linker as references to __wrap_malloc, __wrap_free, which are defined here.
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
|
||||
#include "base/allocator/allocator_shim_internals.h"
|
||||
|
||||
extern "C" {
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t n, size_t size) {
|
||||
return ShimCalloc(n, size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void __wrap_free(void* ptr) {
|
||||
ShimFree(ptr, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t size) {
|
||||
return ShimMalloc(size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t align, size_t size) {
|
||||
return ShimMemalign(align, size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void** res,
|
||||
size_t align,
|
||||
size_t size) {
|
||||
return ShimPosixMemalign(res, align, size);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t size) {
|
||||
return ShimPvalloc(size);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __wrap_realloc(void* address, size_t size) {
|
||||
return ShimRealloc(address, size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t size) {
|
||||
return ShimValloc(size, nullptr);
|
||||
}
|
||||
|
||||
const size_t kPathMaxSize = 8192;
|
||||
static_assert(kPathMaxSize >= PATH_MAX, "");
|
||||
|
||||
extern char* __wrap_strdup(const char* str);
|
||||
|
||||
// Override <stdlib.h>
|
||||
|
||||
extern char* __real_realpath(const char* path, char* resolved_path);
|
||||
|
||||
SHIM_ALWAYS_EXPORT char* __wrap_realpath(const char* path,
|
||||
char* resolved_path) {
|
||||
if (resolved_path)
|
||||
return __real_realpath(path, resolved_path);
|
||||
|
||||
char buffer[kPathMaxSize];
|
||||
if (!__real_realpath(path, buffer))
|
||||
return nullptr;
|
||||
return __wrap_strdup(buffer);
|
||||
}
|
||||
|
||||
// Override <string.h> functions
|
||||
|
||||
SHIM_ALWAYS_EXPORT char* __wrap_strdup(const char* str) {
|
||||
std::size_t length = std::strlen(str) + 1;
|
||||
void* buffer = ShimMalloc(length, nullptr);
|
||||
if (!buffer)
|
||||
return nullptr;
|
||||
return reinterpret_cast<char*>(std::memcpy(buffer, str, length));
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT char* __wrap_strndup(const char* str, size_t n) {
|
||||
std::size_t length = std::min(std::strlen(str), n);
|
||||
char* buffer = reinterpret_cast<char*>(ShimMalloc(length + 1, nullptr));
|
||||
if (!buffer)
|
||||
return nullptr;
|
||||
std::memcpy(buffer, str, length);
|
||||
buffer[length] = '\0';
|
||||
return buffer;
|
||||
}
|
||||
|
||||
// Override <unistd.h>
|
||||
|
||||
extern char* __real_getcwd(char* buffer, size_t size);
|
||||
|
||||
SHIM_ALWAYS_EXPORT char* __wrap_getcwd(char* buffer, size_t size) {
|
||||
if (buffer)
|
||||
return __real_getcwd(buffer, size);
|
||||
|
||||
if (!size)
|
||||
size = kPathMaxSize;
|
||||
char local_buffer[size];
|
||||
if (!__real_getcwd(local_buffer, size))
|
||||
return nullptr;
|
||||
return __wrap_strdup(local_buffer);
|
||||
}
|
||||
|
||||
// Override stdio.h
|
||||
|
||||
// This is non-standard (_GNU_SOURCE only), but implemented by Bionic on
|
||||
// Android, and used by libc++.
|
||||
SHIM_ALWAYS_EXPORT int __wrap_vasprintf(char** strp,
|
||||
const char* fmt,
|
||||
va_list va_args) {
|
||||
constexpr int kInitialSize = 128;
|
||||
*strp = static_cast<char*>(
|
||||
malloc(kInitialSize)); // Our malloc() doesn't return nullptr.
|
||||
|
||||
int actual_size = vsnprintf(*strp, kInitialSize, fmt, va_args);
|
||||
*strp = static_cast<char*>(realloc(*strp, actual_size + 1));
|
||||
|
||||
// Now we know the size. This is not very efficient, but we cannot really do
|
||||
// better without accessing internal libc functions, or reimplementing
|
||||
// *printf().
|
||||
//
|
||||
// This is very lightly used in Chromium in practice, see crbug.com/116558 for
|
||||
// details.
|
||||
if (actual_size >= kInitialSize)
|
||||
return vsnprintf(*strp, actual_size + 1, fmt, va_args);
|
||||
|
||||
return actual_size;
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT int __wrap_asprintf(char** strp, const char* fmt, ...) {
|
||||
va_list va_args;
|
||||
va_start(va_args, fmt);
|
||||
int retval = vasprintf(strp, fmt, va_args);
|
||||
va_end(va_args);
|
||||
return retval;
|
||||
}
|
||||
|
||||
} // extern "C"
|
|
@ -1,377 +0,0 @@
|
|||
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
|
||||
#error This header is meant to be included only once by allocator_shim.cc
|
||||
#endif
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
|
||||
|
||||
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
#error This header must be included iff PartitionAlloc-Everywhere is enabled.
|
||||
#endif
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include <tuple>
|
||||
|
||||
#include "base/allocator/early_zone_registration_mac.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/logging.h"
|
||||
|
||||
namespace partition_alloc {
|
||||
|
||||
// Defined in base/allocator/partition_allocator/partition_root.cc
|
||||
void PartitionAllocMallocHookOnBeforeForkInParent();
|
||||
void PartitionAllocMallocHookOnAfterForkInParent();
|
||||
void PartitionAllocMallocHookOnAfterForkInChild();
|
||||
|
||||
} // namespace partition_alloc
|
||||
|
||||
namespace base::allocator {
|
||||
|
||||
namespace {
|
||||
|
||||
// malloc_introspection_t's callback functions for our own zone
|
||||
|
||||
kern_return_t MallocIntrospectionEnumerator(task_t task,
|
||||
void*,
|
||||
unsigned type_mask,
|
||||
vm_address_t zone_address,
|
||||
memory_reader_t reader,
|
||||
vm_range_recorder_t recorder) {
|
||||
// Should enumerate all memory regions allocated by this allocator, but not
|
||||
// implemented just because of no use case for now.
|
||||
return KERN_FAILURE;
|
||||
}
|
||||
|
||||
size_t MallocIntrospectionGoodSize(malloc_zone_t* zone, size_t size) {
|
||||
return base::bits::AlignUp(size, partition_alloc::internal::kAlignment);
|
||||
}
|
||||
|
||||
boolean_t MallocIntrospectionCheck(malloc_zone_t* zone) {
|
||||
// Should check the consistency of the allocator implementing this malloc
|
||||
// zone, but not implemented just because of no use case for now.
|
||||
return true;
|
||||
}
|
||||
|
||||
void MallocIntrospectionPrint(malloc_zone_t* zone, boolean_t verbose) {
|
||||
// Should print the current states of the zone for debugging / investigation
|
||||
// purpose, but not implemented just because of no use case for now.
|
||||
}
|
||||
|
||||
void MallocIntrospectionLog(malloc_zone_t* zone, void* address) {
|
||||
// Should enable logging of the activities on the given `address`, but not
|
||||
// implemented just because of no use case for now.
|
||||
}
|
||||
|
||||
void MallocIntrospectionForceLock(malloc_zone_t* zone) {
|
||||
// Called before fork(2) to acquire the lock.
|
||||
partition_alloc::PartitionAllocMallocHookOnBeforeForkInParent();
|
||||
}
|
||||
|
||||
void MallocIntrospectionForceUnlock(malloc_zone_t* zone) {
|
||||
// Called in the parent process after fork(2) to release the lock.
|
||||
partition_alloc::PartitionAllocMallocHookOnAfterForkInParent();
|
||||
}
|
||||
|
||||
void MallocIntrospectionStatistics(malloc_zone_t* zone,
|
||||
malloc_statistics_t* stats) {
|
||||
// Should report the memory usage correctly, but not implemented just because
|
||||
// of no use case for now.
|
||||
stats->blocks_in_use = 0;
|
||||
stats->size_in_use = 0;
|
||||
stats->max_size_in_use = 0; // High water mark of touched memory
|
||||
stats->size_allocated = 0; // Reserved in memory
|
||||
}
|
||||
|
||||
boolean_t MallocIntrospectionZoneLocked(malloc_zone_t* zone) {
|
||||
// Should return true if the underlying PartitionRoot is locked, but not
|
||||
// implemented just because this function seems not used effectively.
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean_t MallocIntrospectionEnableDischargeChecking(malloc_zone_t* zone) {
|
||||
// 'discharge' is not supported.
|
||||
return false;
|
||||
}
|
||||
|
||||
void MallocIntrospectionDisableDischargeChecking(malloc_zone_t* zone) {
|
||||
// 'discharge' is not supported.
|
||||
}
|
||||
|
||||
void MallocIntrospectionDischarge(malloc_zone_t* zone, void* memory) {
|
||||
// 'discharge' is not supported.
|
||||
}
|
||||
|
||||
void MallocIntrospectionEnumerateDischargedPointers(
|
||||
malloc_zone_t* zone,
|
||||
void (^report_discharged)(void* memory, void* info)) {
|
||||
// 'discharge' is not supported.
|
||||
}
|
||||
|
||||
void MallocIntrospectionReinitLock(malloc_zone_t* zone) {
|
||||
// Called in a child process after fork(2) to re-initialize the lock.
|
||||
partition_alloc::PartitionAllocMallocHookOnAfterForkInChild();
|
||||
}
|
||||
|
||||
void MallocIntrospectionPrintTask(task_t task,
|
||||
unsigned level,
|
||||
vm_address_t zone_address,
|
||||
memory_reader_t reader,
|
||||
print_task_printer_t printer) {
|
||||
// Should print the current states of another process's zone for debugging /
|
||||
// investigation purpose, but not implemented just because of no use case
|
||||
// for now.
|
||||
}
|
||||
|
||||
void MallocIntrospectionTaskStatistics(task_t task,
|
||||
vm_address_t zone_address,
|
||||
memory_reader_t reader,
|
||||
malloc_statistics_t* stats) {
|
||||
// Should report the memory usage in another process's zone, but not
|
||||
// implemented just because of no use case for now.
|
||||
stats->blocks_in_use = 0;
|
||||
stats->size_in_use = 0;
|
||||
stats->max_size_in_use = 0; // High water mark of touched memory
|
||||
stats->size_allocated = 0; // Reserved in memory
|
||||
}
|
||||
|
||||
// malloc_zone_t's callback functions for our own zone
|
||||
|
||||
size_t MallocZoneSize(malloc_zone_t* zone, const void* ptr) {
|
||||
return ShimGetSizeEstimate(ptr, nullptr);
|
||||
}
|
||||
|
||||
void* MallocZoneMalloc(malloc_zone_t* zone, size_t size) {
|
||||
return ShimMalloc(size, nullptr);
|
||||
}
|
||||
|
||||
void* MallocZoneCalloc(malloc_zone_t* zone, size_t n, size_t size) {
|
||||
return ShimCalloc(n, size, nullptr);
|
||||
}
|
||||
|
||||
void* MallocZoneValloc(malloc_zone_t* zone, size_t size) {
|
||||
return ShimValloc(size, nullptr);
|
||||
}
|
||||
|
||||
void MallocZoneFree(malloc_zone_t* zone, void* ptr) {
|
||||
return ShimFree(ptr, nullptr);
|
||||
}
|
||||
|
||||
void* MallocZoneRealloc(malloc_zone_t* zone, void* ptr, size_t size) {
|
||||
return ShimRealloc(ptr, size, nullptr);
|
||||
}
|
||||
|
||||
void MallocZoneDestroy(malloc_zone_t* zone) {
|
||||
// No support to destroy the zone for now.
|
||||
}
|
||||
|
||||
void* MallocZoneMemalign(malloc_zone_t* zone, size_t alignment, size_t size) {
|
||||
return ShimMemalign(alignment, size, nullptr);
|
||||
}
|
||||
|
||||
void MallocZoneFreeDefiniteSize(malloc_zone_t* zone, void* ptr, size_t size) {
|
||||
return ShimFreeDefiniteSize(ptr, size, nullptr);
|
||||
}
|
||||
|
||||
unsigned MallocZoneBatchMalloc(malloc_zone_t* zone,
|
||||
size_t size,
|
||||
void** results,
|
||||
unsigned num_requested) {
|
||||
return ShimBatchMalloc(size, results, num_requested, nullptr);
|
||||
}
|
||||
|
||||
void MallocZoneBatchFree(malloc_zone_t* zone,
|
||||
void** to_be_freed,
|
||||
unsigned num) {
|
||||
return ShimBatchFree(to_be_freed, num, nullptr);
|
||||
}
|
||||
|
||||
malloc_introspection_t g_mac_malloc_introspection{};
|
||||
malloc_zone_t g_mac_malloc_zone{};
|
||||
|
||||
malloc_zone_t* GetDefaultMallocZone() {
|
||||
// malloc_default_zone() does not return... the default zone, but the initial
|
||||
// one. The default one is the first element of the default zone array.
|
||||
unsigned int zone_count = 0;
|
||||
vm_address_t* zones = nullptr;
|
||||
kern_return_t result =
|
||||
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
|
||||
MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
|
||||
return reinterpret_cast<malloc_zone_t*>(zones[0]);
|
||||
}
|
||||
|
||||
bool IsAlreadyRegistered() {
|
||||
// HACK: This should really only be called once, but it is not.
|
||||
//
|
||||
// This function is a static constructor of its binary. If it is included in a
|
||||
// dynamic library, then the same process may end up executing this code
|
||||
// multiple times, once per library. As a consequence, each new library will
|
||||
// add its own allocator as the default zone. Aside from splitting the heap
|
||||
// further, the main issue arises if/when the last library to be loaded
|
||||
// (dlopen()-ed) gets dlclose()-ed.
|
||||
//
|
||||
// See crbug.com/1271139 for details.
|
||||
//
|
||||
// In this case, subsequent free() will be routed by libmalloc to the deleted
|
||||
// zone (since its code has been unloaded from memory), and crash inside
|
||||
// libsystem's free(). This in practice happens as soon as dlclose() is
|
||||
// called, inside the dynamic linker (dyld).
|
||||
//
|
||||
// Since we are talking about different library, and issues inside the dynamic
|
||||
// linker, we cannot use a global static variable (which would be
|
||||
// per-library), or anything from pthread.
|
||||
//
|
||||
// The solution used here is to check whether the current default zone is
|
||||
// already ours, in which case we are not the first dynamic library here, and
|
||||
// should do nothing. This is racy, and hacky.
|
||||
vm_address_t* zones = nullptr;
|
||||
unsigned int zone_count = 0;
|
||||
// *Not* using malloc_default_zone(), as it seems to be hardcoded to return
|
||||
// something else than the default zone. See the difference between
|
||||
// malloc_default_zone() and inline_malloc_default_zone() in Apple's malloc.c
|
||||
// (in libmalloc).
|
||||
kern_return_t result =
|
||||
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
|
||||
MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
|
||||
// Checking all the zones, in case someone registered their own zone on top of
|
||||
// our own.
|
||||
for (unsigned int i = 0; i < zone_count; i++) {
|
||||
malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
|
||||
|
||||
// strcmp() and not a pointer comparison, as the zone was registered from
|
||||
// another library, the pointers don't match.
|
||||
if (zone->zone_name &&
|
||||
(strcmp(zone->zone_name, partition_alloc::kPartitionAllocZoneName) ==
|
||||
0)) {
|
||||
// This zone is provided by PartitionAlloc, so this function has been
|
||||
// called from another library (or the main executable), nothing to do.
|
||||
//
|
||||
// This should be a crash, ideally, but callers do it, so only warn, for
|
||||
// now.
|
||||
RAW_LOG(ERROR,
|
||||
"Trying to load the allocator multiple times. This is *not* "
|
||||
"supported.");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void InitializeZone() {
|
||||
g_mac_malloc_introspection.enumerator = MallocIntrospectionEnumerator;
|
||||
g_mac_malloc_introspection.good_size = MallocIntrospectionGoodSize;
|
||||
g_mac_malloc_introspection.check = MallocIntrospectionCheck;
|
||||
g_mac_malloc_introspection.print = MallocIntrospectionPrint;
|
||||
g_mac_malloc_introspection.log = MallocIntrospectionLog;
|
||||
g_mac_malloc_introspection.force_lock = MallocIntrospectionForceLock;
|
||||
g_mac_malloc_introspection.force_unlock = MallocIntrospectionForceUnlock;
|
||||
g_mac_malloc_introspection.statistics = MallocIntrospectionStatistics;
|
||||
g_mac_malloc_introspection.zone_locked = MallocIntrospectionZoneLocked;
|
||||
g_mac_malloc_introspection.enable_discharge_checking =
|
||||
MallocIntrospectionEnableDischargeChecking;
|
||||
g_mac_malloc_introspection.disable_discharge_checking =
|
||||
MallocIntrospectionDisableDischargeChecking;
|
||||
g_mac_malloc_introspection.discharge = MallocIntrospectionDischarge;
|
||||
g_mac_malloc_introspection.enumerate_discharged_pointers =
|
||||
MallocIntrospectionEnumerateDischargedPointers;
|
||||
g_mac_malloc_introspection.reinit_lock = MallocIntrospectionReinitLock;
|
||||
g_mac_malloc_introspection.print_task = MallocIntrospectionPrintTask;
|
||||
g_mac_malloc_introspection.task_statistics =
|
||||
MallocIntrospectionTaskStatistics;
|
||||
// `version` member indicates which APIs are supported in this zone.
|
||||
// version >= 5: memalign is supported
|
||||
// version >= 6: free_definite_size is supported
|
||||
// version >= 7: introspect's discharge family is supported
|
||||
// version >= 8: pressure_relief is supported
|
||||
// version >= 9: introspect.reinit_lock is supported
|
||||
// version >= 10: claimed_address is supported
|
||||
// version >= 11: introspect.print_task is supported
|
||||
// version >= 12: introspect.task_statistics is supported
|
||||
g_mac_malloc_zone.version = partition_alloc::kZoneVersion;
|
||||
g_mac_malloc_zone.zone_name = partition_alloc::kPartitionAllocZoneName;
|
||||
g_mac_malloc_zone.introspect = &g_mac_malloc_introspection;
|
||||
g_mac_malloc_zone.size = MallocZoneSize;
|
||||
g_mac_malloc_zone.malloc = MallocZoneMalloc;
|
||||
g_mac_malloc_zone.calloc = MallocZoneCalloc;
|
||||
g_mac_malloc_zone.valloc = MallocZoneValloc;
|
||||
g_mac_malloc_zone.free = MallocZoneFree;
|
||||
g_mac_malloc_zone.realloc = MallocZoneRealloc;
|
||||
g_mac_malloc_zone.destroy = MallocZoneDestroy;
|
||||
g_mac_malloc_zone.batch_malloc = MallocZoneBatchMalloc;
|
||||
g_mac_malloc_zone.batch_free = MallocZoneBatchFree;
|
||||
g_mac_malloc_zone.memalign = MallocZoneMemalign;
|
||||
g_mac_malloc_zone.free_definite_size = MallocZoneFreeDefiniteSize;
|
||||
g_mac_malloc_zone.pressure_relief = nullptr;
|
||||
g_mac_malloc_zone.claimed_address = nullptr;
|
||||
}
|
||||
|
||||
// Replaces the default malloc zone with our own malloc zone backed by
|
||||
// PartitionAlloc. Since we'd like to make as much code as possible to use our
|
||||
// own memory allocator (and reduce bugs caused by mixed use of the system
|
||||
// allocator and our own allocator), run the following function
|
||||
// `InitializeDefaultAllocatorPartitionRoot` with the highest priority.
|
||||
//
|
||||
// Note that, despite of the highest priority of the initialization order,
|
||||
// [NSThread init] runs before InitializeDefaultMallocZoneWithPartitionAlloc
|
||||
// unfortunately and allocates memory with the system allocator. Plus, the
|
||||
// allocated memory will be deallocated with the default zone's `free` at that
|
||||
// moment without using a zone dispatcher. Hence, our own `free` function
|
||||
// receives an address allocated by the system allocator.
|
||||
__attribute__((constructor(0))) void
|
||||
InitializeDefaultMallocZoneWithPartitionAlloc() {
|
||||
if (IsAlreadyRegistered())
|
||||
return;
|
||||
|
||||
// Instantiate the existing regular and purgeable zones in order to make the
|
||||
// existing purgeable zone use the existing regular zone since PartitionAlloc
|
||||
// doesn't support a purgeable zone.
|
||||
std::ignore = malloc_default_zone();
|
||||
std::ignore = malloc_default_purgeable_zone();
|
||||
|
||||
// Initialize the default allocator's PartitionRoot with the existing zone.
|
||||
InitializeDefaultAllocatorPartitionRoot();
|
||||
|
||||
// Create our own malloc zone.
|
||||
InitializeZone();
|
||||
|
||||
malloc_zone_t* system_default_zone = GetDefaultMallocZone();
|
||||
if (strcmp(system_default_zone->zone_name,
|
||||
partition_alloc::kDelegatingZoneName) == 0) {
|
||||
// The first zone is our zone, we can unregister it, replacing it with the
|
||||
// new one. This relies on a precise zone setup, done in
|
||||
// |EarlyMallocZoneRegistration()|.
|
||||
malloc_zone_register(&g_mac_malloc_zone);
|
||||
malloc_zone_unregister(system_default_zone);
|
||||
return;
|
||||
}
|
||||
|
||||
// Not in the path where the zone was registered early. This is either racy,
|
||||
// or fine if the current process is not hosting multiple threads.
|
||||
//
|
||||
// This path is fine for e.g. most unit tests.
|
||||
//
|
||||
// Make our own zone the default zone.
|
||||
//
|
||||
// Put our own zone at the last position, so that it promotes to the default
|
||||
// zone. The implementation logic of malloc_zone_unregister is:
|
||||
// zone_table.swap(unregistered_zone, last_zone);
|
||||
// zone_table.shrink_size_by_1();
|
||||
malloc_zone_register(&g_mac_malloc_zone);
|
||||
malloc_zone_unregister(system_default_zone);
|
||||
// Between malloc_zone_unregister(system_default_zone) (above) and
|
||||
// malloc_zone_register(system_default_zone) (below), i.e. while absence of
|
||||
// system_default_zone, it's possible that another thread calls free(ptr) and
|
||||
// "no zone found" error is hit, crashing the process.
|
||||
malloc_zone_register(system_default_zone);
|
||||
|
||||
// Confirm that our own zone is now the default zone.
|
||||
CHECK_EQ(GetDefaultMallocZone(), &g_mac_malloc_zone);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace base::allocator
|
|
@ -1,60 +0,0 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
|
||||
#error This header is meant to be included only once by allocator_shim.cc
|
||||
#endif
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
|
||||
|
||||
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||
#include "third_party/apple_apsl/malloc.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
MallocZoneFunctions MallocZoneFunctionsToReplaceDefault() {
|
||||
MallocZoneFunctions new_functions;
|
||||
memset(&new_functions, 0, sizeof(MallocZoneFunctions));
|
||||
new_functions.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
|
||||
return ShimGetSizeEstimate(ptr, zone);
|
||||
};
|
||||
new_functions.malloc = [](malloc_zone_t* zone, size_t size) -> void* {
|
||||
return ShimMalloc(size, zone);
|
||||
};
|
||||
new_functions.calloc = [](malloc_zone_t* zone, size_t n,
|
||||
size_t size) -> void* {
|
||||
return ShimCalloc(n, size, zone);
|
||||
};
|
||||
new_functions.valloc = [](malloc_zone_t* zone, size_t size) -> void* {
|
||||
return ShimValloc(size, zone);
|
||||
};
|
||||
new_functions.free = [](malloc_zone_t* zone, void* ptr) {
|
||||
ShimFree(ptr, zone);
|
||||
};
|
||||
new_functions.realloc = [](malloc_zone_t* zone, void* ptr,
|
||||
size_t size) -> void* {
|
||||
return ShimRealloc(ptr, size, zone);
|
||||
};
|
||||
new_functions.batch_malloc = [](struct _malloc_zone_t* zone, size_t size,
|
||||
void** results,
|
||||
unsigned num_requested) -> unsigned {
|
||||
return ShimBatchMalloc(size, results, num_requested, zone);
|
||||
};
|
||||
new_functions.batch_free = [](struct _malloc_zone_t* zone, void** to_be_freed,
|
||||
unsigned num_to_be_freed) -> void {
|
||||
ShimBatchFree(to_be_freed, num_to_be_freed, zone);
|
||||
};
|
||||
new_functions.memalign = [](malloc_zone_t* zone, size_t alignment,
|
||||
size_t size) -> void* {
|
||||
return ShimMemalign(alignment, size, zone);
|
||||
};
|
||||
new_functions.free_definite_size = [](malloc_zone_t* zone, void* ptr,
|
||||
size_t size) {
|
||||
ShimFreeDefiniteSize(ptr, size, zone);
|
||||
};
|
||||
return new_functions;
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
|
@ -1,178 +0,0 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// This header defines symbols to override the same functions in the Visual C++
|
||||
// CRT implementation.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
|
||||
|
||||
#include <malloc.h>
|
||||
|
||||
#include <windows.h>
|
||||
|
||||
#include "base/allocator/allocator_shim_internals.h"
|
||||
|
||||
// Even though most C++ allocation operators can be left alone since the
|
||||
// interception works at a lower level, these ones should be
|
||||
// overridden. Otherwise they redirect to malloc(), which is configured to crash
|
||||
// with an OOM in failure cases, such as allocation requests that are too large.
|
||||
SHIM_ALWAYS_EXPORT void* operator new(size_t size,
|
||||
const std::nothrow_t&) noexcept {
|
||||
return ShimCppNewNoThrow(size);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
|
||||
const std::nothrow_t&) noexcept {
|
||||
return ShimCppNewNoThrow(size);
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
|
||||
void* (*malloc_unchecked)(size_t) = &base::allocator::UncheckedAlloc;
|
||||
|
||||
namespace {
|
||||
|
||||
int win_new_mode = 0;
|
||||
|
||||
} // namespace
|
||||
|
||||
// This function behaves similarly to MSVC's _set_new_mode.
|
||||
// If flag is 0 (default), calls to malloc will behave normally.
|
||||
// If flag is 1, calls to malloc will behave like calls to new,
|
||||
// and the std_new_handler will be invoked on failure.
|
||||
// Returns the previous mode.
|
||||
//
|
||||
// Replaces _set_new_mode in ucrt\heap\new_mode.cpp
|
||||
int _set_new_mode(int flag) {
|
||||
// The MS CRT calls this function early on in startup, so this serves as a low
|
||||
// overhead proof that the allocator shim is in place for this process.
|
||||
base::allocator::g_is_win_shim_layer_initialized = true;
|
||||
int old_mode = win_new_mode;
|
||||
win_new_mode = flag;
|
||||
|
||||
base::allocator::SetCallNewHandlerOnMallocFailure(win_new_mode != 0);
|
||||
|
||||
return old_mode;
|
||||
}
|
||||
|
||||
// Replaces _query_new_mode in ucrt\heap\new_mode.cpp
|
||||
int _query_new_mode() {
|
||||
return win_new_mode;
|
||||
}
|
||||
|
||||
// These symbols override the CRT's implementation of the same functions.
|
||||
__declspec(restrict) void* malloc(size_t size) {
|
||||
return ShimMalloc(size, nullptr);
|
||||
}
|
||||
|
||||
void free(void* ptr) {
|
||||
ShimFree(ptr, nullptr);
|
||||
}
|
||||
|
||||
__declspec(restrict) void* realloc(void* ptr, size_t size) {
|
||||
return ShimRealloc(ptr, size, nullptr);
|
||||
}
|
||||
|
||||
__declspec(restrict) void* calloc(size_t n, size_t size) {
|
||||
return ShimCalloc(n, size, nullptr);
|
||||
}
|
||||
|
||||
// _msize() is the Windows equivalent of malloc_size().
|
||||
size_t _msize(void* memblock) {
|
||||
return ShimGetSizeEstimate(memblock, nullptr);
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _aligned_malloc(size_t size, size_t alignment) {
|
||||
return ShimAlignedMalloc(size, alignment, nullptr);
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _aligned_realloc(void* address,
|
||||
size_t size,
|
||||
size_t alignment) {
|
||||
return ShimAlignedRealloc(address, size, alignment, nullptr);
|
||||
}
|
||||
|
||||
void _aligned_free(void* address) {
|
||||
ShimAlignedFree(address, nullptr);
|
||||
}
|
||||
|
||||
// _recalloc_base is called by CRT internally.
|
||||
__declspec(restrict) void* _recalloc_base(void* block,
|
||||
size_t count,
|
||||
size_t size) {
|
||||
const size_t old_block_size = (block != nullptr) ? _msize(block) : 0;
|
||||
base::CheckedNumeric<size_t> new_block_size_checked = count;
|
||||
new_block_size_checked *= size;
|
||||
const size_t new_block_size = new_block_size_checked.ValueOrDie();
|
||||
|
||||
void* const new_block = realloc(block, new_block_size);
|
||||
|
||||
if (new_block != nullptr && old_block_size < new_block_size) {
|
||||
memset(static_cast<char*>(new_block) + old_block_size, 0,
|
||||
new_block_size - old_block_size);
|
||||
}
|
||||
|
||||
return new_block;
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _malloc_base(size_t size) {
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _calloc_base(size_t n, size_t size) {
|
||||
return calloc(n, size);
|
||||
}
|
||||
|
||||
void _free_base(void* block) {
|
||||
free(block);
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _recalloc(void* block, size_t count, size_t size) {
|
||||
return _recalloc_base(block, count, size);
|
||||
}
|
||||
|
||||
// The following uncommon _aligned_* routines are not used in Chromium and have
|
||||
// been shimmed to immediately crash to ensure that implementations are added if
|
||||
// uses are introduced.
|
||||
__declspec(restrict) void* _aligned_recalloc(void* address,
|
||||
size_t num,
|
||||
size_t size,
|
||||
size_t alignment) {
|
||||
CHECK(false) << "This routine has not been implemented";
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
size_t _aligned_msize(void* address, size_t alignment, size_t offset) {
|
||||
CHECK(false) << "This routine has not been implemented";
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _aligned_offset_malloc(size_t size,
|
||||
size_t alignment,
|
||||
size_t offset) {
|
||||
CHECK(false) << "This routine has not been implemented";
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _aligned_offset_realloc(void* address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t offset) {
|
||||
CHECK(false) << "This routine has not been implemented";
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _aligned_offset_recalloc(void* address,
|
||||
size_t num,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t offset) {
|
||||
CHECK(false) << "This routine has not been implemented";
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
} // extern "C"
|
||||
|
||||
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
|
24
src/base/allocator/dispatcher/configuration.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
namespace base::allocator::dispatcher::configuration {
|
||||
|
||||
// The maximum number of optional observers that may be present depending on
|
||||
// command line parameters.
|
||||
constexpr size_t kMaximumNumberOfOptionalObservers = 4;
|
||||
|
||||
// The total number of observers including mandatory and optional observers.
|
||||
// Primarily the number of observers affects the performance at allocation time.
|
||||
// The current value of 4 doesn't have hard evidence. Keep in mind that
|
||||
// also a single observer can severely impact performance.
|
||||
constexpr size_t kMaximumNumberOfObservers = 4;
|
||||
|
||||
} // namespace base::allocator::dispatcher::configuration
|
||||
|
||||
#endif // BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
|
114
src/base/allocator/dispatcher/dispatcher.cc
Normal file
|
@ -0,0 +1,114 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/dispatcher/dispatcher.h"
|
||||
|
||||
#include "base/allocator/dispatcher/internal/dispatch_data.h"
|
||||
#include "base/check.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
#include "base/no_destructor.h"
|
||||
#include "partition_alloc/buildflags.h"
|
||||
#include "partition_alloc/shim/allocator_shim.h"
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
#include <atomic>
|
||||
#endif
|
||||
|
||||
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
|
||||
#include "partition_alloc/partition_alloc_hooks.h" // nogncheck
|
||||
#endif
|
||||
|
||||
namespace base::allocator::dispatcher {
|
||||
|
||||
// The private implementation of Dispatcher.
|
||||
struct Dispatcher::Impl {
|
||||
void Initialize(const internal::DispatchData& dispatch_data) {
|
||||
#if DCHECK_IS_ON()
|
||||
DCHECK(!is_initialized_check_flag_.test_and_set());
|
||||
#endif
|
||||
|
||||
dispatch_data_ = dispatch_data;
|
||||
ConnectToEmitters(dispatch_data_);
|
||||
}
|
||||
|
||||
void Reset() {
|
||||
#if DCHECK_IS_ON()
|
||||
DCHECK([&] {
|
||||
auto const was_set = is_initialized_check_flag_.test_and_set();
|
||||
is_initialized_check_flag_.clear();
|
||||
return was_set;
|
||||
}());
|
||||
#endif
|
||||
|
||||
DisconnectFromEmitters(dispatch_data_);
|
||||
dispatch_data_ = {};
|
||||
}
|
||||
|
||||
private:
|
||||
// Connect the hooks to the memory subsystem. In some cases, most notably when
|
||||
// we have no observers at all, the hooks will be invalid and must NOT be
|
||||
// connected. This way we prevent notifications although no observers are
|
||||
// present.
|
||||
static void ConnectToEmitters(const internal::DispatchData& dispatch_data) {
|
||||
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
if (auto* const allocator_dispatch = dispatch_data.GetAllocatorDispatch()) {
|
||||
allocator_shim::InsertAllocatorDispatch(allocator_dispatch);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
|
||||
{
|
||||
auto* const allocation_hook = dispatch_data.GetAllocationObserverHook();
|
||||
auto* const free_hook = dispatch_data.GetFreeObserverHook();
|
||||
if (allocation_hook && free_hook) {
|
||||
partition_alloc::PartitionAllocHooks::SetObserverHooks(allocation_hook,
|
||||
free_hook);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void DisconnectFromEmitters(internal::DispatchData& dispatch_data) {
|
||||
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
if (auto* const allocator_dispatch = dispatch_data.GetAllocatorDispatch()) {
|
||||
allocator_shim::RemoveAllocatorDispatchForTesting(
|
||||
allocator_dispatch); // IN-TEST
|
||||
}
|
||||
#endif
|
||||
|
||||
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
|
||||
partition_alloc::PartitionAllocHooks::SetObserverHooks(nullptr, nullptr);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Information on the hooks.
|
||||
internal::DispatchData dispatch_data_;
|
||||
#if DCHECK_IS_ON()
|
||||
// Indicator if the dispatcher has been initialized before.
|
||||
#if !defined(__cpp_lib_atomic_value_initialization) || \
|
||||
__cpp_lib_atomic_value_initialization < 201911L
|
||||
std::atomic_flag is_initialized_check_flag_ = ATOMIC_FLAG_INIT;
|
||||
#else
|
||||
std::atomic_flag is_initialized_check_flag_;
|
||||
#endif
|
||||
#endif
|
||||
};
|
||||
|
||||
Dispatcher::Dispatcher() : impl_(std::make_unique<Impl>()) {}
|
||||
|
||||
Dispatcher::~Dispatcher() = default;
|
||||
|
||||
Dispatcher& Dispatcher::GetInstance() {
|
||||
static base::NoDestructor<Dispatcher> instance;
|
||||
return *instance;
|
||||
}
|
||||
|
||||
void Dispatcher::Initialize(const internal::DispatchData& dispatch_data) {
|
||||
impl_->Initialize(dispatch_data);
|
||||
}
|
||||
|
||||
void Dispatcher::ResetForTesting() {
|
||||
impl_->Reset();
|
||||
}
|
||||
} // namespace base::allocator::dispatcher
|
75
src/base/allocator/dispatcher/dispatcher.h
Normal file
|
@ -0,0 +1,75 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "base/allocator/dispatcher/internal/dispatcher_internal.h"
|
||||
#include "base/base_export.h"
|
||||
|
||||
namespace base::allocator::dispatcher {
|
||||
|
||||
namespace internal {
|
||||
struct DispatchData;
|
||||
}
|
||||
|
||||
// Dispatcher serves as the top level instance for managing the dispatch
|
||||
// mechanism. The class instance manages connections to the various memory
|
||||
// subsystems such as PartitionAlloc. To keep the public interface as lean as
|
||||
// possible it uses a pimpl pattern.
|
||||
class BASE_EXPORT Dispatcher {
|
||||
public:
|
||||
static Dispatcher& GetInstance();
|
||||
|
||||
Dispatcher();
|
||||
|
||||
// Initialize the dispatch mechanism with the given tuple of observers. The
|
||||
// observers must be valid (it is only DCHECKed internally at initialization,
|
||||
// but not verified further)
|
||||
// If Initialize is called multiple times, the first one wins. All later
|
||||
// invocations are silently ignored. Initialization is protected from
|
||||
// concurrent invocations. In case of concurrent accesses, the first one to
|
||||
// get the lock wins.
|
||||
// The dispatcher invokes following functions on the observers:
|
||||
// void OnAllocation(void* address,
|
||||
// size_t size,
|
||||
// AllocationSubsystem sub_system,
|
||||
// const char* type_name);
|
||||
// void OnFree(void* address);
|
||||
//
|
||||
// Note: The dispatcher mechanism does NOT bring systematic protection against
|
||||
// recursive invocations. That is, observers which allocate memory on the
|
||||
// heap, i.e. through dynamically allocated containers or by using the
|
||||
// CHECK-macro, are responsible to break these recursions!
|
||||
template <typename... ObserverTypes>
|
||||
void Initialize(const std::tuple<ObserverTypes...>& observers) {
|
||||
// Get the hooks for running these observers and pass them to further
|
||||
// initialization
|
||||
Initialize(internal::GetNotificationHooks(observers));
|
||||
}
|
||||
|
||||
// The following functions provide an interface to setup and tear down the
|
||||
// dispatcher when testing. This must NOT be used from production code since
|
||||
// the hooks cannot be removed reliably under all circumstances.
|
||||
template <typename ObserverType>
|
||||
void InitializeForTesting(ObserverType* observer) {
|
||||
Initialize(std::make_tuple(observer));
|
||||
}
|
||||
|
||||
void ResetForTesting();
|
||||
|
||||
private:
|
||||
// structure and pointer to the private implementation.
|
||||
struct Impl;
|
||||
std::unique_ptr<Impl> const impl_;
|
||||
|
||||
~Dispatcher();
|
||||
|
||||
void Initialize(const internal::DispatchData& dispatch_data);
|
||||
};
|
||||
} // namespace base::allocator::dispatcher
|
||||
|
||||
#endif // BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
|
205
src/base/allocator/dispatcher/initializer.h
Normal file
|
@ -0,0 +1,205 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
|
||||
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
|
||||
#include "base/allocator/dispatcher/configuration.h"
|
||||
#include "base/allocator/dispatcher/dispatcher.h"
|
||||
#include "base/allocator/dispatcher/internal/tools.h"
|
||||
|
||||
namespace base::allocator::dispatcher {
|
||||
namespace internal {
|
||||
|
||||
// Filter the passed observers and perform initialization of the passed
|
||||
// dispatcher.
|
||||
template <size_t CurrentIndex,
|
||||
typename DispatcherType,
|
||||
typename CheckObserverPredicate,
|
||||
typename VerifiedObservers,
|
||||
typename UnverifiedObservers,
|
||||
size_t... IndicesToSelect>
|
||||
inline void DoInitialize(DispatcherType& dispatcher,
|
||||
CheckObserverPredicate check_observer,
|
||||
const VerifiedObservers& verified_observers,
|
||||
const UnverifiedObservers& unverified_observers,
|
||||
std::index_sequence<IndicesToSelect...> indices) {
|
||||
if constexpr (CurrentIndex < std::tuple_size_v<UnverifiedObservers>) {
|
||||
// We still have some items left to handle.
|
||||
if (check_observer(std::get<CurrentIndex>(unverified_observers))) {
|
||||
// The current observer is valid. Hence, append the index of the current
|
||||
// item to the set of indices and head on to the next item.
|
||||
DoInitialize<CurrentIndex + 1>(
|
||||
dispatcher, check_observer, verified_observers, unverified_observers,
|
||||
std::index_sequence<IndicesToSelect..., CurrentIndex>{});
|
||||
} else {
|
||||
// The current observer is not valid. Hence, head on to the next item with
|
||||
// an unaltered list of indices.
|
||||
DoInitialize<CurrentIndex + 1>(dispatcher, check_observer,
|
||||
verified_observers, unverified_observers,
|
||||
indices);
|
||||
}
|
||||
} else if constexpr (CurrentIndex == std::tuple_size_v<UnverifiedObservers>) {
|
||||
// So we have met the end of the tuple of observers to verify.
|
||||
// Hence, we extract the additional valid observers, append to the tuple of
|
||||
// already verified observers and hand over to the dispatcher.
|
||||
auto observers = std::tuple_cat(
|
||||
verified_observers,
|
||||
std::make_tuple(std::get<IndicesToSelect>(unverified_observers)...));
|
||||
|
||||
// Do a final check that neither the maximum total number of observers nor
|
||||
// the maximum number of optional observers is exceeded.
|
||||
static_assert(std::tuple_size_v<decltype(observers)> <=
|
||||
configuration::kMaximumNumberOfObservers);
|
||||
static_assert(sizeof...(IndicesToSelect) <=
|
||||
configuration::kMaximumNumberOfOptionalObservers);
|
||||
|
||||
dispatcher.Initialize(std::move(observers));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
// The result of concatenating two tuple-types.
|
||||
template <typename... tuples>
|
||||
using TupleCat = decltype(std::tuple_cat(std::declval<tuples>()...));
|
||||
|
||||
// Initializer collects mandatory and optional observers and initializes the
|
||||
// passed Dispatcher with only the enabled observers.
|
||||
//
|
||||
// In some situations, presence of observers depends on runtime. i.e. command
|
||||
// line parameters or CPU features. With 3 optional observers we already have 8
|
||||
// different combinations. Initializer takes the job of dealing with all
|
||||
// combinations from the user. It allows users to pass all observers (including
|
||||
// nullptr for disabled optional observers) and initializes the Dispatcher with
|
||||
// only the enabled observers.
|
||||
//
|
||||
// Since this process results in a combinatoric explosion, Initializer
|
||||
// distinguishes between optional and mandatory observers. Mandatory observers
|
||||
// are not included in the filtering process and must always be enabled (not
|
||||
// nullptr).
|
||||
//
|
||||
// To allow the Initializer to track the number and exact type of observers, it
|
||||
// is implemented as a templated class which holds information on the types in
|
||||
// the std::tuples passed as template parameters. Therefore, whenever any type
|
||||
// observer it set, the initializer changes its type to reflect this.
|
||||
template <typename MandatoryObservers = std::tuple<>,
|
||||
typename OptionalObservers = std::tuple<>>
|
||||
struct BASE_EXPORT Initializer {
|
||||
Initializer() = default;
|
||||
Initializer(MandatoryObservers mandatory_observers,
|
||||
OptionalObservers optional_observers)
|
||||
: mandatory_observers_(std::move(mandatory_observers)),
|
||||
optional_observers_(std::move(optional_observers)) {}
|
||||
|
||||
// Set the mandatory observers. The number of observers that can be set is
|
||||
// limited by configuration::maximum_number_of_observers.
|
||||
template <typename... NewMandatoryObservers,
|
||||
std::enable_if_t<
|
||||
internal::LessEqual((sizeof...(NewMandatoryObservers) +
|
||||
std::tuple_size_v<OptionalObservers>),
|
||||
configuration::kMaximumNumberOfObservers),
|
||||
bool> = true>
|
||||
Initializer<std::tuple<NewMandatoryObservers*...>, OptionalObservers>
|
||||
SetMandatoryObservers(NewMandatoryObservers*... mandatory_observers) const {
|
||||
return {std::make_tuple(mandatory_observers...), GetOptionalObservers()};
|
||||
}
|
||||
|
||||
// Add mandatory observers. The number of observers that can be added is
|
||||
// limited by the current number of observers, see
|
||||
// configuration::maximum_number_of_observers.
|
||||
template <typename... AdditionalMandatoryObservers,
|
||||
std::enable_if_t<internal::LessEqual(
|
||||
std::tuple_size_v<MandatoryObservers> +
|
||||
sizeof...(AdditionalMandatoryObservers) +
|
||||
std::tuple_size_v<OptionalObservers>,
|
||||
configuration::kMaximumNumberOfObservers),
|
||||
bool> = true>
|
||||
Initializer<TupleCat<MandatoryObservers,
|
||||
std::tuple<AdditionalMandatoryObservers*...>>,
|
||||
OptionalObservers>
|
||||
AddMandatoryObservers(
|
||||
AdditionalMandatoryObservers*... additional_mandatory_observers) const {
|
||||
return {std::tuple_cat(GetMandatoryObservers(),
|
||||
std::make_tuple(additional_mandatory_observers...)),
|
||||
GetOptionalObservers()};
|
||||
}
|
||||
|
||||
// Set the optional observers. The number of observers that can be set is
|
||||
// limited by configuration::maximum_number_of_optional_observers as well as
|
||||
// configuration::maximum_number_of_observers.
|
||||
template <
|
||||
typename... NewOptionalObservers,
|
||||
std::enable_if_t<
|
||||
internal::LessEqual(
|
||||
sizeof...(NewOptionalObservers),
|
||||
configuration::kMaximumNumberOfOptionalObservers) &&
|
||||
internal::LessEqual((sizeof...(NewOptionalObservers) +
|
||||
std::tuple_size_v<MandatoryObservers>),
|
||||
configuration::kMaximumNumberOfObservers),
|
||||
bool> = true>
|
||||
Initializer<MandatoryObservers, std::tuple<NewOptionalObservers*...>>
|
||||
SetOptionalObservers(NewOptionalObservers*... optional_observers) const {
|
||||
return {GetMandatoryObservers(), std::make_tuple(optional_observers...)};
|
||||
}
|
||||
|
||||
// Add optional observers. The number of observers that can be added is
|
||||
// limited by the current number of optional observers,
|
||||
// configuration::maximum_number_of_optional_observers as well as
|
||||
// configuration::maximum_number_of_observers.
|
||||
template <
|
||||
typename... AdditionalOptionalObservers,
|
||||
std::enable_if_t<
|
||||
internal::LessEqual(
|
||||
std::tuple_size_v<OptionalObservers> +
|
||||
sizeof...(AdditionalOptionalObservers),
|
||||
configuration::kMaximumNumberOfOptionalObservers) &&
|
||||
internal::LessEqual((std::tuple_size_v<OptionalObservers> +
|
||||
sizeof...(AdditionalOptionalObservers) +
|
||||
std::tuple_size_v<MandatoryObservers>),
|
||||
configuration::kMaximumNumberOfObservers),
|
||||
bool> = true>
|
||||
Initializer<
|
||||
MandatoryObservers,
|
||||
TupleCat<OptionalObservers, std::tuple<AdditionalOptionalObservers*...>>>
|
||||
AddOptionalObservers(
|
||||
AdditionalOptionalObservers*... additional_optional_observers) const {
|
||||
return {GetMandatoryObservers(),
|
||||
std::tuple_cat(GetOptionalObservers(),
|
||||
std::make_tuple(additional_optional_observers...))};
|
||||
}
|
||||
|
||||
// Perform the actual initialization on the passed dispatcher.
|
||||
// The dispatcher is passed as a template only to provide better testability.
|
||||
template <typename DispatcherType>
|
||||
void DoInitialize(DispatcherType& dispatcher) const {
|
||||
internal::DoInitialize<0>(dispatcher, internal::IsValidObserver{},
|
||||
GetMandatoryObservers(), GetOptionalObservers(),
|
||||
{});
|
||||
}
|
||||
|
||||
const MandatoryObservers& GetMandatoryObservers() const {
|
||||
return mandatory_observers_;
|
||||
}
|
||||
|
||||
const OptionalObservers& GetOptionalObservers() const {
|
||||
return optional_observers_;
|
||||
}
|
||||
|
||||
private:
|
||||
MandatoryObservers mandatory_observers_;
|
||||
OptionalObservers optional_observers_;
|
||||
};
|
||||
|
||||
// Convenience function for creating an empty Initializer.
|
||||
inline Initializer<> CreateInitializer() {
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace base::allocator::dispatcher
|
||||
|
||||
#endif // BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
|
43
src/base/allocator/dispatcher/internal/dispatch_data.cc
Normal file
|
@ -0,0 +1,43 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/dispatcher/internal/dispatch_data.h"
|
||||
|
||||
#include "partition_alloc/buildflags.h"
|
||||
|
||||
namespace base::allocator::dispatcher::internal {
|
||||
|
||||
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
|
||||
|
||||
DispatchData& DispatchData::SetAllocationObserverHooks(
|
||||
AllocationObserverHook* allocation_observer_hook,
|
||||
FreeObserverHook* free_observer_hook) {
|
||||
allocation_observer_hook_ = allocation_observer_hook;
|
||||
free_observer_hook_ = free_observer_hook;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
DispatchData::AllocationObserverHook* DispatchData::GetAllocationObserverHook()
|
||||
const {
|
||||
return allocation_observer_hook_;
|
||||
}
|
||||
|
||||
DispatchData::FreeObserverHook* DispatchData::GetFreeObserverHook() const {
|
||||
return free_observer_hook_;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
DispatchData& DispatchData::SetAllocatorDispatch(
|
||||
AllocatorDispatch* allocator_dispatch) {
|
||||
allocator_dispatch_ = allocator_dispatch;
|
||||
return *this;
|
||||
}
|
||||
|
||||
AllocatorDispatch* DispatchData::GetAllocatorDispatch() const {
|
||||
return allocator_dispatch_;
|
||||
}
|
||||
#endif
|
||||
} // namespace base::allocator::dispatcher::internal
|
58
src/base/allocator/dispatcher/internal/dispatch_data.h
Normal file
|
@ -0,0 +1,58 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "build/build_config.h"
|
||||
#include "partition_alloc/buildflags.h"
|
||||
|
||||
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
|
||||
#include "partition_alloc/partition_alloc_hooks.h" // nogncheck
|
||||
#endif
|
||||
|
||||
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
#include "partition_alloc/shim/allocator_shim.h" // nogncheck
|
||||
#endif
|
||||
|
||||
namespace base::allocator::dispatcher::internal {
|
||||
|
||||
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
using allocator_shim::AllocatorDispatch;
|
||||
#endif
|
||||
|
||||
// A simple utility class to pass all the information required to properly hook
|
||||
// into the memory allocation subsystems from DispatcherImpl to the Dispatcher.
|
||||
struct BASE_EXPORT DispatchData {
|
||||
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
|
||||
using AllocationObserverHook =
|
||||
partition_alloc::PartitionAllocHooks::AllocationObserverHook;
|
||||
using FreeObserverHook =
|
||||
partition_alloc::PartitionAllocHooks::FreeObserverHook;
|
||||
|
||||
DispatchData& SetAllocationObserverHooks(AllocationObserverHook*,
|
||||
FreeObserverHook*);
|
||||
AllocationObserverHook* GetAllocationObserverHook() const;
|
||||
FreeObserverHook* GetFreeObserverHook() const;
|
||||
|
||||
private:
|
||||
AllocationObserverHook* allocation_observer_hook_ = nullptr;
|
||||
FreeObserverHook* free_observer_hook_ = nullptr;
|
||||
|
||||
public:
|
||||
#endif
|
||||
|
||||
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
DispatchData& SetAllocatorDispatch(AllocatorDispatch* allocator_dispatch);
|
||||
AllocatorDispatch* GetAllocatorDispatch() const;
|
||||
|
||||
private:
|
||||
AllocatorDispatch* allocator_dispatch_ = nullptr;
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace base::allocator::dispatcher::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
|
378
src/base/allocator/dispatcher/internal/dispatcher_internal.h
Normal file
|
@ -0,0 +1,378 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_
|
||||
|
||||
#include "base/allocator/dispatcher/configuration.h"
|
||||
#include "base/allocator/dispatcher/internal/dispatch_data.h"
|
||||
#include "base/allocator/dispatcher/internal/tools.h"
|
||||
#include "base/allocator/dispatcher/memory_tagging.h"
|
||||
#include "base/allocator/dispatcher/notification_data.h"
|
||||
#include "base/allocator/dispatcher/subsystem.h"
|
||||
#include "base/check.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "partition_alloc/buildflags.h"
|
||||
|
||||
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
|
||||
#include "partition_alloc/partition_alloc_allocation_data.h" // nogncheck
|
||||
#endif
|
||||
|
||||
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
#include "partition_alloc/shim/allocator_shim.h"
|
||||
#endif
|
||||
|
||||
#include <tuple>
|
||||
|
||||
namespace base::allocator::dispatcher::internal {
|
||||
|
||||
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
using allocator_shim::AllocatorDispatch;
|
||||
#endif
|
||||
|
||||
template <typename CheckObserverPredicate,
|
||||
typename... ObserverTypes,
|
||||
size_t... Indices>
|
||||
void inline PerformObserverCheck(const std::tuple<ObserverTypes...>& observers,
|
||||
std::index_sequence<Indices...>,
|
||||
CheckObserverPredicate check_observer) {
|
||||
([](bool b) { DCHECK(b); }(check_observer(std::get<Indices>(observers))),
|
||||
...);
|
||||
}
|
||||
|
||||
template <typename... ObserverTypes, size_t... Indices>
|
||||
ALWAYS_INLINE void PerformAllocationNotification(
|
||||
const std::tuple<ObserverTypes...>& observers,
|
||||
std::index_sequence<Indices...>,
|
||||
const AllocationNotificationData& notification_data) {
|
||||
((std::get<Indices>(observers)->OnAllocation(notification_data)), ...);
|
||||
}
|
||||
|
||||
template <typename... ObserverTypes, size_t... Indices>
|
||||
ALWAYS_INLINE void PerformFreeNotification(
|
||||
const std::tuple<ObserverTypes...>& observers,
|
||||
std::index_sequence<Indices...>,
|
||||
const FreeNotificationData& notification_data) {
|
||||
((std::get<Indices>(observers)->OnFree(notification_data)), ...);
|
||||
}
|
||||
|
||||
// DispatcherImpl provides hooks into the various memory subsystems. These hooks
|
||||
// are responsible for dispatching any notification to the observers.
|
||||
// In order to provide as many information on the exact type of the observer and
|
||||
// prevent any conditional jumps in the hot allocation path, observers are
|
||||
// stored in a std::tuple. DispatcherImpl performs a CHECK at initialization
|
||||
// time to ensure they are valid.
|
||||
template <typename... ObserverTypes>
|
||||
struct DispatcherImpl {
|
||||
using AllObservers = std::index_sequence_for<ObserverTypes...>;
|
||||
|
||||
template <std::enable_if_t<
|
||||
internal::LessEqual(sizeof...(ObserverTypes),
|
||||
configuration::kMaximumNumberOfObservers),
|
||||
bool> = true>
|
||||
static DispatchData GetNotificationHooks(
|
||||
std::tuple<ObserverTypes*...> observers) {
|
||||
s_observers = std::move(observers);
|
||||
|
||||
PerformObserverCheck(s_observers, AllObservers{}, IsValidObserver{});
|
||||
|
||||
return CreateDispatchData();
|
||||
}
|
||||
|
||||
private:
|
||||
static DispatchData CreateDispatchData() {
|
||||
return DispatchData()
|
||||
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
|
||||
.SetAllocationObserverHooks(&PartitionAllocatorAllocationHook,
|
||||
&PartitionAllocatorFreeHook)
|
||||
#endif
|
||||
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
.SetAllocatorDispatch(&allocator_dispatch_)
|
||||
#endif
|
||||
;
|
||||
}
|
||||
|
||||
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
|
||||
static void PartitionAllocatorAllocationHook(
|
||||
const partition_alloc::AllocationNotificationData& pa_notification_data) {
|
||||
AllocationNotificationData dispatcher_notification_data(
|
||||
pa_notification_data.address(), pa_notification_data.size(),
|
||||
pa_notification_data.type_name(),
|
||||
AllocationSubsystem::kPartitionAllocator);
|
||||
|
||||
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||
dispatcher_notification_data.SetMteReportingMode(
|
||||
ConvertToMTEMode(pa_notification_data.mte_reporting_mode()));
|
||||
#endif
|
||||
|
||||
DoNotifyAllocation(dispatcher_notification_data);
|
||||
}
|
||||
|
||||
static void PartitionAllocatorFreeHook(
|
||||
const partition_alloc::FreeNotificationData& pa_notification_data) {
|
||||
FreeNotificationData dispatcher_notification_data(
|
||||
pa_notification_data.address(),
|
||||
AllocationSubsystem::kPartitionAllocator);
|
||||
|
||||
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||
dispatcher_notification_data.SetMteReportingMode(
|
||||
ConvertToMTEMode(pa_notification_data.mte_reporting_mode()));
|
||||
#endif
|
||||
|
||||
DoNotifyFree(dispatcher_notification_data);
|
||||
}
|
||||
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC)
|
||||
|
||||
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
static void* AllocFn(size_t size, void* context) {
|
||||
void* const address =
|
||||
allocator_dispatch_.next->alloc_function(size, context);
|
||||
|
||||
DoNotifyAllocationForShim(address, size);
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
static void* AllocUncheckedFn(size_t size, void* context) {
|
||||
void* const address =
|
||||
allocator_dispatch_.next->alloc_unchecked_function(size, context);
|
||||
|
||||
DoNotifyAllocationForShim(address, size);
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
static void* AllocZeroInitializedFn(size_t n, size_t size, void* context) {
|
||||
void* const address =
|
||||
allocator_dispatch_.next->alloc_zero_initialized_function(n, size,
|
||||
context);
|
||||
|
||||
DoNotifyAllocationForShim(address, n * size);
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
static void* AllocAlignedFn(size_t alignment, size_t size, void* context) {
|
||||
void* const address = allocator_dispatch_.next->alloc_aligned_function(
|
||||
alignment, size, context);
|
||||
|
||||
DoNotifyAllocationForShim(address, size);
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
static void* ReallocFn(void* address, size_t size, void* context) {
|
||||
// Note: size == 0 actually performs free.
|
||||
DoNotifyFreeForShim(address);
|
||||
void* const reallocated_address =
|
||||
allocator_dispatch_.next->realloc_function(address, size, context);
|
||||
|
||||
DoNotifyAllocationForShim(reallocated_address, size);
|
||||
|
||||
return reallocated_address;
|
||||
}
|
||||
|
||||
static void* ReallocUncheckedFn(void* address, size_t size, void* context) {
|
||||
// Note: size == 0 actually performs free.
|
||||
DoNotifyFreeForShim(address);
|
||||
void* const reallocated_address =
|
||||
allocator_dispatch_.next->realloc_unchecked_function(address, size,
|
||||
context);
|
||||
|
||||
DoNotifyAllocationForShim(reallocated_address, size);
|
||||
|
||||
return reallocated_address;
|
||||
}
|
||||
|
||||
static void FreeFn(void* address, void* context) {
|
||||
// Note: DoNotifyFree should be called before free_function (here and in
|
||||
// other places). That is because observers need to handle the allocation
|
||||
// being freed before calling free_function, as once the latter is executed
|
||||
// the address becomes available and can be allocated by another thread.
|
||||
// That would be racy otherwise.
|
||||
DoNotifyFreeForShim(address);
|
||||
MUSTTAIL return allocator_dispatch_.next->free_function(address, context);
|
||||
}
|
||||
|
||||
static unsigned BatchMallocFn(size_t size,
|
||||
void** results,
|
||||
unsigned num_requested,
|
||||
void* context) {
|
||||
unsigned const num_allocated =
|
||||
allocator_dispatch_.next->batch_malloc_function(size, results,
|
||||
num_requested, context);
|
||||
for (unsigned i = 0; i < num_allocated; ++i) {
|
||||
DoNotifyAllocationForShim(results[i], size);
|
||||
}
|
||||
return num_allocated;
|
||||
}
|
||||
|
||||
static void BatchFreeFn(void** to_be_freed,
|
||||
unsigned num_to_be_freed,
|
||||
void* context) {
|
||||
for (unsigned i = 0; i < num_to_be_freed; ++i) {
|
||||
DoNotifyFreeForShim(to_be_freed[i]);
|
||||
}
|
||||
|
||||
MUSTTAIL return allocator_dispatch_.next->batch_free_function(
|
||||
to_be_freed, num_to_be_freed, context);
|
||||
}
|
||||
|
||||
static void FreeDefiniteSizeFn(void* address, size_t size, void* context) {
|
||||
DoNotifyFreeForShim(address);
|
||||
MUSTTAIL return allocator_dispatch_.next->free_definite_size_function(
|
||||
address, size, context);
|
||||
}
|
||||
|
||||
static void TryFreeDefaultFn(void* address, void* context) {
|
||||
DoNotifyFreeForShim(address);
|
||||
MUSTTAIL return allocator_dispatch_.next->try_free_default_function(
|
||||
address, context);
|
||||
}
|
||||
|
||||
static void* AlignedMallocFn(size_t size, size_t alignment, void* context) {
|
||||
void* const address = allocator_dispatch_.next->aligned_malloc_function(
|
||||
size, alignment, context);
|
||||
|
||||
DoNotifyAllocationForShim(address, size);
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
static void* AlignedMallocUncheckedFn(size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
void* const address =
|
||||
allocator_dispatch_.next->aligned_malloc_unchecked_function(
|
||||
size, alignment, context);
|
||||
|
||||
DoNotifyAllocationForShim(address, size);
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
static void* AlignedReallocFn(void* address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
// Note: size == 0 actually performs free.
|
||||
DoNotifyFreeForShim(address);
|
||||
address = allocator_dispatch_.next->aligned_realloc_function(
|
||||
address, size, alignment, context);
|
||||
|
||||
DoNotifyAllocationForShim(address, size);
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
static void* AlignedReallocUncheckedFn(void* address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
// Note: size == 0 actually performs free.
|
||||
DoNotifyFreeForShim(address);
|
||||
address = allocator_dispatch_.next->aligned_realloc_unchecked_function(
|
||||
address, size, alignment, context);
|
||||
|
||||
DoNotifyAllocationForShim(address, size);
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
static void AlignedFreeFn(void* address, void* context) {
|
||||
DoNotifyFreeForShim(address);
|
||||
MUSTTAIL return allocator_dispatch_.next->aligned_free_function(address,
|
||||
context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static void DoNotifyAllocationForShim(void* address,
|
||||
size_t size) {
|
||||
AllocationNotificationData notification_data(
|
||||
address, size, nullptr, AllocationSubsystem::kAllocatorShim);
|
||||
|
||||
DoNotifyAllocation(notification_data);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static void DoNotifyFreeForShim(void* address) {
|
||||
FreeNotificationData notification_data(address,
|
||||
AllocationSubsystem::kAllocatorShim);
|
||||
|
||||
DoNotifyFree(notification_data);
|
||||
}
|
||||
|
||||
static AllocatorDispatch allocator_dispatch_;
|
||||
#endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
|
||||
ALWAYS_INLINE static void DoNotifyAllocation(
|
||||
const AllocationNotificationData& notification_data) {
|
||||
PerformAllocationNotification(s_observers, AllObservers{},
|
||||
notification_data);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static void DoNotifyFree(
|
||||
const FreeNotificationData& notification_data) {
|
||||
PerformFreeNotification(s_observers, AllObservers{}, notification_data);
|
||||
}
|
||||
|
||||
static std::tuple<ObserverTypes*...> s_observers;
|
||||
};
|
||||
|
||||
template <typename... ObserverTypes>
|
||||
std::tuple<ObserverTypes*...> DispatcherImpl<ObserverTypes...>::s_observers;
|
||||
|
||||
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
template <typename... ObserverTypes>
|
||||
AllocatorDispatch DispatcherImpl<ObserverTypes...>::allocator_dispatch_ = {
|
||||
AllocFn, // alloc_function
|
||||
AllocUncheckedFn, // alloc_unchecked_function
|
||||
AllocZeroInitializedFn, // alloc_zero_initialized_function
|
||||
AllocAlignedFn, // alloc_aligned_function
|
||||
ReallocFn, // realloc_function
|
||||
ReallocUncheckedFn, // realloc_unchecked_function
|
||||
FreeFn, // free_function
|
||||
nullptr, // get_size_estimate_function
|
||||
nullptr, // good_size_function
|
||||
nullptr, // claimed_address_function
|
||||
BatchMallocFn, // batch_malloc_function
|
||||
BatchFreeFn, // batch_free_function
|
||||
FreeDefiniteSizeFn, // free_definite_size_function
|
||||
TryFreeDefaultFn, // try_free_default_function
|
||||
AlignedMallocFn, // aligned_malloc_function
|
||||
AlignedMallocUncheckedFn, // aligned_malloc_unchecked_function
|
||||
AlignedReallocFn, // aligned_realloc_function
|
||||
AlignedReallocUncheckedFn, // aligned_realloc_unchecked_function
|
||||
AlignedFreeFn, // aligned_free_function
|
||||
nullptr // next
|
||||
};
|
||||
#endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
|
||||
// Specialization of DispatcherImpl in case we have no observers to notify. In
|
||||
// this special case we return a set of null pointers as the Dispatcher must not
|
||||
// install any hooks at all.
|
||||
template <>
|
||||
struct DispatcherImpl<> {
|
||||
static DispatchData GetNotificationHooks(std::tuple<> /*observers*/) {
|
||||
return DispatchData()
|
||||
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
|
||||
.SetAllocationObserverHooks(nullptr, nullptr)
|
||||
#endif
|
||||
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
.SetAllocatorDispatch(nullptr)
|
||||
#endif
|
||||
;
|
||||
}
|
||||
};
|
||||
|
||||
// A little utility function that helps using DispatcherImpl by providing
|
||||
// automated type deduction for templates.
|
||||
template <typename... ObserverTypes>
|
||||
inline DispatchData GetNotificationHooks(
|
||||
std::tuple<ObserverTypes*...> observers) {
|
||||
return DispatcherImpl<ObserverTypes...>::GetNotificationHooks(
|
||||
std::move(observers));
|
||||
}
|
||||
|
||||
} // namespace base::allocator::dispatcher::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_
|
29
src/base/allocator/dispatcher/internal/tools.h
Normal file
|
@ -0,0 +1,29 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_TOOLS_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_TOOLS_H_
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
namespace base::allocator::dispatcher::internal {
|
||||
|
||||
constexpr bool LessEqual(size_t lhs, size_t rhs) {
|
||||
return lhs <= rhs;
|
||||
}
|
||||
|
||||
constexpr bool Equal(size_t lhs, size_t rhs) {
|
||||
return lhs == rhs;
|
||||
}
|
||||
|
||||
struct IsValidObserver {
|
||||
template <typename T>
|
||||
constexpr bool operator()(T const* ptr) const noexcept {
|
||||
return ptr != nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace base::allocator::dispatcher::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_DISPATCHER_INTERNAL_TOOLS_H_
|
20
src/base/allocator/dispatcher/memory_tagging.cc
Normal file
|
@ -0,0 +1,20 @@
|
|||
// Copyright 2023 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/dispatcher/memory_tagging.h"
|
||||
|
||||
namespace base::allocator::dispatcher {
|
||||
static_assert(
|
||||
MTEMode::kUndefined ==
|
||||
ConvertToMTEMode(partition_alloc::TagViolationReportingMode::kUndefined));
|
||||
static_assert(
|
||||
MTEMode::kDisabled ==
|
||||
ConvertToMTEMode(partition_alloc::TagViolationReportingMode::kDisabled));
|
||||
static_assert(
|
||||
MTEMode::kSynchronous ==
|
||||
ConvertToMTEMode(partition_alloc::TagViolationReportingMode::kSynchronous));
|
||||
static_assert(MTEMode::kAsynchronous ==
|
||||
ConvertToMTEMode(
|
||||
partition_alloc::TagViolationReportingMode::kAsynchronous));
|
||||
} // namespace base::allocator::dispatcher
|
42
src/base/allocator/dispatcher/memory_tagging.h
Normal file
|
@ -0,0 +1,42 @@
|
|||
// Copyright 2023 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_MEMORY_TAGGING_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_MEMORY_TAGGING_H_
|
||||
|
||||
#include "partition_alloc/tagging.h"
|
||||
|
||||
namespace base::allocator::dispatcher {
|
||||
// The various modes of Arm's MTE extension. The enum values should match their
|
||||
// pendants in partition_alloc::TagViolationReportingMode, otherwise the below
|
||||
// conversion function would involve a translation table or conditional jumps.
|
||||
enum class MTEMode {
|
||||
// Default settings
|
||||
kUndefined,
|
||||
// MTE explicitly disabled.
|
||||
kDisabled,
|
||||
// Precise tag violation reports, higher overhead. Good for unittests
|
||||
// and security critical threads.
|
||||
kSynchronous,
|
||||
// Imprecise tag violation reports (async mode). Lower overhead.
|
||||
kAsynchronous,
|
||||
};
|
||||
|
||||
constexpr MTEMode ConvertToMTEMode(
|
||||
partition_alloc::TagViolationReportingMode pa_mte_reporting_mode) {
|
||||
switch (pa_mte_reporting_mode) {
|
||||
case partition_alloc::TagViolationReportingMode::kUndefined:
|
||||
return MTEMode::kUndefined;
|
||||
case partition_alloc::TagViolationReportingMode::kDisabled:
|
||||
return MTEMode::kDisabled;
|
||||
case partition_alloc::TagViolationReportingMode::kSynchronous:
|
||||
return MTEMode::kSynchronous;
|
||||
case partition_alloc::TagViolationReportingMode::kAsynchronous:
|
||||
return MTEMode::kAsynchronous;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace base::allocator::dispatcher
|
||||
|
||||
#endif // BASE_ALLOCATOR_DISPATCHER_MEMORY_TAGGING_H_
|
110
src/base/allocator/dispatcher/notification_data.h
Normal file
|
@ -0,0 +1,110 @@
|
|||
// Copyright 2023 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_NOTIFICATION_DATA_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_NOTIFICATION_DATA_H_
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "base/allocator/dispatcher/memory_tagging.h"
|
||||
#include "base/allocator/dispatcher/subsystem.h"
|
||||
#include "base/base_export.h"
|
||||
#include "partition_alloc/buildflags.h"
|
||||
|
||||
namespace base::allocator::dispatcher {
|
||||
|
||||
// Definitions of the parameter structures passed to the observer hooks. They
|
||||
// are similar to the structures defined by PartitionAllocator but provide
|
||||
// further information.
|
||||
|
||||
// The notification data for the allocation path.
|
||||
class BASE_EXPORT AllocationNotificationData {
|
||||
public:
|
||||
constexpr AllocationNotificationData(void* address,
|
||||
size_t size,
|
||||
const char* type_name,
|
||||
AllocationSubsystem allocation_subsystem)
|
||||
: address_(address),
|
||||
size_(size),
|
||||
type_name_(type_name),
|
||||
allocation_subsystem_(allocation_subsystem) {}
|
||||
|
||||
constexpr void* address() const { return address_; }
|
||||
|
||||
constexpr size_t size() const { return size_; }
|
||||
|
||||
constexpr const char* type_name() const { return type_name_; }
|
||||
|
||||
constexpr AllocationSubsystem allocation_subsystem() const {
|
||||
return allocation_subsystem_;
|
||||
}
|
||||
|
||||
// In the allocation observer path, it's interesting which reporting mode is
|
||||
// enabled.
|
||||
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||
constexpr AllocationNotificationData& SetMteReportingMode(MTEMode mode) {
|
||||
mte_reporting_mode_ = mode;
|
||||
return *this;
|
||||
}
|
||||
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||
|
||||
constexpr MTEMode mte_reporting_mode() const {
|
||||
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||
return mte_reporting_mode_;
|
||||
#else
|
||||
return MTEMode::kUndefined;
|
||||
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||
}
|
||||
|
||||
private:
|
||||
void* address_ = nullptr;
|
||||
size_t size_ = 0;
|
||||
const char* type_name_ = nullptr;
|
||||
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||
MTEMode mte_reporting_mode_ = MTEMode::kUndefined;
|
||||
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||
AllocationSubsystem allocation_subsystem_;
|
||||
};
|
||||
|
||||
// The notification data for the free path.
|
||||
class BASE_EXPORT FreeNotificationData {
|
||||
public:
|
||||
constexpr explicit FreeNotificationData(
|
||||
void* address,
|
||||
AllocationSubsystem allocation_subsystem)
|
||||
: address_(address), allocation_subsystem_(allocation_subsystem) {}
|
||||
|
||||
constexpr void* address() const { return address_; }
|
||||
|
||||
constexpr AllocationSubsystem allocation_subsystem() const {
|
||||
return allocation_subsystem_;
|
||||
}
|
||||
|
||||
// In the free observer path, it's interesting which reporting mode is
|
||||
// enabled.
|
||||
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||
constexpr FreeNotificationData& SetMteReportingMode(MTEMode mode) {
|
||||
mte_reporting_mode_ = mode;
|
||||
return *this;
|
||||
}
|
||||
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||
|
||||
constexpr MTEMode mte_reporting_mode() const {
|
||||
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||
return mte_reporting_mode_;
|
||||
#else
|
||||
return MTEMode::kUndefined;
|
||||
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||
}
|
||||
|
||||
private:
|
||||
void* address_ = nullptr;
|
||||
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||
MTEMode mte_reporting_mode_ = MTEMode::kUndefined;
|
||||
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||
AllocationSubsystem allocation_subsystem_;
|
||||
};
|
||||
|
||||
} // namespace base::allocator::dispatcher
|
||||
#endif // BASE_ALLOCATOR_DISPATCHER_NOTIFICATION_DATA_H_
|
61
src/base/allocator/dispatcher/reentry_guard.cc
Normal file
|
@ -0,0 +1,61 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/dispatcher/reentry_guard.h"
|
||||
|
||||
#include "base/check.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/debug/crash_logging.h"
|
||||
#include "base/strings/string_number_conversions.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
|
||||
#include <pthread.h>
|
||||
#endif
|
||||
|
||||
namespace base::allocator::dispatcher {
|
||||
|
||||
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
|
||||
// pthread_key_t has different signedness on Mac and Android. Store the null
|
||||
// value in a strongly-typed constant to avoid "comparison of integers of
|
||||
// different signs" warnings when comparing with 0.
|
||||
constexpr pthread_key_t kNullKey = 0;
|
||||
|
||||
pthread_key_t ReentryGuard::entered_key_ = kNullKey;
|
||||
|
||||
void ReentryGuard::InitTLSSlot() {
|
||||
if (entered_key_ == kNullKey) {
|
||||
int error = pthread_key_create(&entered_key_, nullptr);
|
||||
CHECK(!error);
|
||||
// Touch the TLS slot immediately to force any allocations.
|
||||
// TODO(crbug.com/40062835): Use this technique to avoid allocations
|
||||
// in PoissonAllocationSampler::ScopedMuteThreadSamples, which will make
|
||||
// ReentryGuard redundant.
|
||||
pthread_setspecific(entered_key_, nullptr);
|
||||
}
|
||||
|
||||
DCHECK_NE(entered_key_, kNullKey);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void ReentryGuard::InitTLSSlot() {}
|
||||
|
||||
#endif
|
||||
|
||||
void ReentryGuard::RecordTLSSlotToCrashKey() {
|
||||
// Record the key in crash dumps to detect when it's higher than 32
|
||||
// (PTHREAD_KEY_2NDLEVEL_SIZE).
|
||||
// TODO(crbug.com/40062835): Remove this after diagnosing reentry crashes.
|
||||
static auto* const crash_key = base::debug::AllocateCrashKeyString(
|
||||
"reentry_guard_tls_slot", base::debug::CrashKeySize::Size32);
|
||||
|
||||
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
|
||||
base::debug::SetCrashKeyString(crash_key, base::NumberToString(entered_key_));
|
||||
#else
|
||||
base::debug::SetCrashKeyString(crash_key, "unused");
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace base::allocator::dispatcher
|
76
src/base/allocator/dispatcher/reentry_guard.h
Normal file
|
@ -0,0 +1,76 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/check.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
|
||||
#include <pthread.h>
|
||||
#endif
|
||||
|
||||
namespace base::allocator::dispatcher {
|
||||
|
||||
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
|
||||
|
||||
// The macOS implementation of libmalloc sometimes calls malloc recursively,
|
||||
// delegating allocations between zones. That causes our hooks being called
|
||||
// twice. The scoped guard allows us to detect that.
|
||||
//
|
||||
// Besides that the implementations of thread_local on macOS and Android
|
||||
// seem to allocate memory lazily on the first access to thread_local variables
|
||||
// (and on Android at least thread_local is implemented on top of pthread so is
|
||||
// strictly worse for performance). Make use of pthread TLS instead of C++
|
||||
// thread_local there.
|
||||
struct BASE_EXPORT ReentryGuard {
|
||||
ALWAYS_INLINE ReentryGuard() : allowed_(!pthread_getspecific(entered_key_)) {
|
||||
pthread_setspecific(entered_key_, reinterpret_cast<void*>(true));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE ~ReentryGuard() {
|
||||
if (allowed_) [[likely]] {
|
||||
pthread_setspecific(entered_key_, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
explicit operator bool() const noexcept { return allowed_; }
|
||||
|
||||
// This function must be called before installing any allocator hooks because
|
||||
// some TLS implementations may allocate (eg. glibc will require a malloc call
|
||||
// to allocate storage for a higher slot number (>= PTHREAD_KEY_2NDLEVEL_SIZE
|
||||
// == 32). This touches the thread-local storage so that any malloc happens
|
||||
// before installing the hooks.
|
||||
static void InitTLSSlot();
|
||||
|
||||
// InitTLSSlot() is called before crash keys are available. At some point
|
||||
// after SetCrashKeyImplementation() is called, this function should be
|
||||
// called to record `entered_key_` to a crash key for debugging. This may
|
||||
// allocate so it must not be called from inside an allocator hook.
|
||||
static void RecordTLSSlotToCrashKey();
|
||||
|
||||
private:
|
||||
static pthread_key_t entered_key_;
|
||||
const bool allowed_;
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
// Use [[maybe_unused]] as this lightweight stand-in for the more heavyweight
|
||||
// ReentryGuard above will otherwise trigger the "unused code" warnings.
|
||||
struct [[maybe_unused]] BASE_EXPORT ReentryGuard {
|
||||
constexpr explicit operator bool() const noexcept { return true; }
|
||||
|
||||
static void InitTLSSlot();
|
||||
static void RecordTLSSlotToCrashKey();
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace base::allocator::dispatcher
|
||||
|
||||
#endif // BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
|
27
src/base/allocator/dispatcher/subsystem.h
Normal file
|
@ -0,0 +1,27 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
|
||||
|
||||
namespace base::allocator::dispatcher {
|
||||
|
||||
// Identifiers for the memory subsystem handling the allocation. Some observers
|
||||
// require more detailed information on who is performing the allocation, i.e.
|
||||
// SamplingHeapProfiler.
|
||||
enum class AllocationSubsystem {
|
||||
// Allocation is handled by PartitionAllocator.
|
||||
kPartitionAllocator = 1,
|
||||
// Allocation is handled by AllocatorShims.
|
||||
kAllocatorShim = 2,
|
||||
// Represents a simulated allocation event during testing and is used to
|
||||
// filter out these allocations from real ones.
|
||||
//
|
||||
// Included for backward compatibility, this value becomes obsolete once the
|
||||
// old allocation hooks are removed from PoissonAllocationSampler.
|
||||
kManualForTesting = 3,
|
||||
};
|
||||
} // namespace base::allocator::dispatcher
|
||||
|
||||
#endif // BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
|
27
src/base/allocator/dispatcher/testing/dispatcher_test.h
Normal file
|
@ -0,0 +1,27 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
|
||||
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace base::allocator::dispatcher::testing {
|
||||
|
||||
// DispatcherTest provides some common initialization which most of the
|
||||
// unittests of the dispatcher require. DispatcherTest should not be used
|
||||
// directly. Instead, derive your test fixture from it.
|
||||
struct DispatcherTest : public ::testing::Test {
|
||||
// Perform some commonly required initialization, at them moment
|
||||
// - Initialize the TLS slot for the ReentryGuard
|
||||
DispatcherTest();
|
||||
|
||||
protected:
|
||||
// Protected d'tor only to prevent direct usage of this class.
|
||||
~DispatcherTest() override;
|
||||
};
|
||||
|
||||
} // namespace base::allocator::dispatcher::testing
|
||||
|
||||
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
|
33
src/base/allocator/dispatcher/testing/observer_mock.h
Normal file
|
@ -0,0 +1,33 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_
|
||||
|
||||
#include "testing/gmock/include/gmock/gmock.h"
|
||||
|
||||
namespace base::allocator::dispatcher {
|
||||
class AllocationNotificationData;
|
||||
class FreeNotificationData;
|
||||
|
||||
namespace testing {
|
||||
|
||||
// ObserverMock is a small mock class based on GoogleMock.
|
||||
// It complies to the interface enforced by the dispatcher. The template
|
||||
// parameter serves only to create distinct types of observers if required.
|
||||
template <typename T = void>
|
||||
struct ObserverMock {
|
||||
MOCK_METHOD(void,
|
||||
OnAllocation,
|
||||
(const AllocationNotificationData& notification_data),
|
||||
());
|
||||
MOCK_METHOD(void,
|
||||
OnFree,
|
||||
(const FreeNotificationData& notification_data),
|
||||
());
|
||||
};
|
||||
} // namespace testing
|
||||
} // namespace base::allocator::dispatcher
|
||||
|
||||
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_
|
50
src/base/allocator/dispatcher/testing/tools.h
Normal file
|
@ -0,0 +1,50 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_TOOLS_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_TESTING_TOOLS_H_
|
||||
|
||||
#include <array>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
|
||||
namespace base::allocator::dispatcher::testing {
|
||||
|
||||
namespace internal {
|
||||
template <size_t Size, typename Type, typename... AppendedTypes>
|
||||
struct DefineTupleFromSingleType {
|
||||
using type = typename DefineTupleFromSingleType<Size - 1,
|
||||
Type,
|
||||
AppendedTypes...,
|
||||
Type>::type;
|
||||
};
|
||||
|
||||
template <typename Type, typename... AppendedTypes>
|
||||
struct DefineTupleFromSingleType<0, Type, AppendedTypes...> {
|
||||
using type = std::tuple<AppendedTypes...>;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
template <size_t Size, typename Type>
|
||||
struct DefineTupleFromSingleType {
|
||||
using type = typename internal::DefineTupleFromSingleType<Size, Type>::type;
|
||||
};
|
||||
|
||||
template <typename Type, size_t Size, size_t... Indices>
|
||||
typename internal::DefineTupleFromSingleType<Size, Type*>::type
|
||||
CreateTupleOfPointers(std::array<Type, Size>& items,
|
||||
std::index_sequence<Indices...>) {
|
||||
return std::make_tuple((&items[Indices])...);
|
||||
}
|
||||
|
||||
template <typename Type, size_t Size>
|
||||
typename internal::DefineTupleFromSingleType<Size, Type*>::type
|
||||
CreateTupleOfPointers(std::array<Type, Size>& items) {
|
||||
return CreateTupleOfPointers(items, std::make_index_sequence<Size>{});
|
||||
}
|
||||
|
||||
} // namespace base::allocator::dispatcher::testing
|
||||
|
||||
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_TOOLS_H_
|
167
src/base/allocator/dispatcher/tls.cc
Normal file
|
@ -0,0 +1,167 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/dispatcher/tls.h"
|
||||
|
||||
#include <string_view>
|
||||
|
||||
#if USE_LOCAL_TLS_EMULATION()
|
||||
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include "base/check.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
#include "base/debug/crash_logging.h"
|
||||
#include "base/immediate_crash.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
|
||||
#include <sys/prctl.h>
|
||||
#endif
|
||||
|
||||
namespace base::allocator::dispatcher::internal {
|
||||
namespace {
|
||||
base::debug::CrashKeySize GetCrashKeySize(const std::string& crash_key_name) {
|
||||
if (std::size(crash_key_name) <= 32ul) {
|
||||
return base::debug::CrashKeySize::Size32;
|
||||
}
|
||||
if (std::size(crash_key_name) <= 64ul) {
|
||||
return base::debug::CrashKeySize::Size64;
|
||||
}
|
||||
if (std::size(crash_key_name) <= 256ul) {
|
||||
return base::debug::CrashKeySize::Size256;
|
||||
}
|
||||
CHECK(std::size(crash_key_name) <= 1024ul);
|
||||
|
||||
return base::debug::CrashKeySize::Size1024;
|
||||
}
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
void Swap(std::atomic_bool& lh_op, std::atomic_bool& rh_op) {
|
||||
auto lh_op_value = lh_op.load(std::memory_order_relaxed);
|
||||
auto rh_op_value = rh_op.load(std::memory_order_relaxed);
|
||||
|
||||
CHECK(lh_op.compare_exchange_strong(lh_op_value, rh_op_value));
|
||||
CHECK(rh_op.compare_exchange_strong(rh_op_value, lh_op_value));
|
||||
}
|
||||
#endif
|
||||
} // namespace
|
||||
|
||||
void* MMapAllocator::AllocateMemory(size_t size_in_bytes) {
|
||||
void* const mmap_res = mmap(nullptr, size_in_bytes, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
|
||||
#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
|
||||
if (mmap_res != MAP_FAILED) {
|
||||
// Allow the anonymous memory region allocated by mmap(MAP_ANONYMOUS) to
|
||||
// be identified in /proc/$PID/smaps. This helps improve visibility into
|
||||
// Chromium's memory usage on Android.
|
||||
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, mmap_res, size_in_bytes,
|
||||
"tls-mmap-allocator");
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
return (mmap_res != MAP_FAILED) ? mmap_res : nullptr;
|
||||
}
|
||||
|
||||
bool MMapAllocator::FreeMemoryForTesting(void* pointer_to_allocated,
|
||||
size_t size_in_bytes) {
|
||||
auto const munmap_res = munmap(pointer_to_allocated, size_in_bytes);
|
||||
return (munmap_res == 0);
|
||||
}
|
||||
|
||||
PThreadTLSSystem::PThreadTLSSystem() = default;
|
||||
|
||||
PThreadTLSSystem::PThreadTLSSystem(PThreadTLSSystem&& other) {
|
||||
std::swap(crash_key_, other.crash_key_);
|
||||
std::swap(data_access_key_, other.data_access_key_);
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
Swap(initialized_, other.initialized_);
|
||||
#endif
|
||||
}
|
||||
|
||||
PThreadTLSSystem& PThreadTLSSystem::operator=(PThreadTLSSystem&& other) {
|
||||
std::swap(crash_key_, other.crash_key_);
|
||||
std::swap(data_access_key_, other.data_access_key_);
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
Swap(initialized_, other.initialized_);
|
||||
#endif
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool PThreadTLSSystem::Setup(
|
||||
OnThreadTerminationFunction thread_termination_function,
|
||||
std::string_view instance_id) {
|
||||
#if DCHECK_IS_ON()
|
||||
// Initialize must happen outside of the allocation path. Therefore, it is
|
||||
// secure to verify with DCHECK.
|
||||
DCHECK(!initialized_.exchange(true, std::memory_order_acq_rel));
|
||||
#endif
|
||||
|
||||
auto const key_create_res =
|
||||
pthread_key_create(&data_access_key_, thread_termination_function);
|
||||
|
||||
// On some platforms creating a new pthread-key requires an allocation when a
|
||||
// given number of keys has been created. I.e. in glibc this limit is denoted
|
||||
// by PTHREAD_KEY_2NDLEVEL_SIZE. However, this value is neither present on all
|
||||
// systems nor accessible from here. Hence, we do not do any checks here.
|
||||
// However, we strongly recommend to setup the TLS system as early as possible
|
||||
// to avoid exceeding this limit.
|
||||
|
||||
// Some crashes might be caused by the initialization being performed too late
|
||||
// and running into the problems mentioned above. Since there's no way to
|
||||
// handle this issue programmatically, we include the key into the crashpad
|
||||
// report to allow for later inspection.
|
||||
std::string crash_key_name = "tls_system-";
|
||||
crash_key_name += instance_id;
|
||||
|
||||
crash_key_ = base::debug::AllocateCrashKeyString(
|
||||
crash_key_name.c_str(), GetCrashKeySize(crash_key_name));
|
||||
base::debug::SetCrashKeyString(crash_key_,
|
||||
base::NumberToString(data_access_key_));
|
||||
|
||||
return (0 == key_create_res);
|
||||
}
|
||||
|
||||
bool PThreadTLSSystem::TearDownForTesting() {
|
||||
#if DCHECK_IS_ON()
|
||||
// TearDownForTesting must happen outside of the allocation path. Therefore,
|
||||
// it is secure to verify with DCHECK.
|
||||
DCHECK(initialized_.exchange(false, std::memory_order_acq_rel));
|
||||
#endif
|
||||
|
||||
base::debug::ClearCrashKeyString(crash_key_);
|
||||
crash_key_ = nullptr;
|
||||
|
||||
auto const key_delete_res = pthread_key_delete(data_access_key_);
|
||||
return (0 == key_delete_res);
|
||||
}
|
||||
|
||||
void* PThreadTLSSystem::GetThreadSpecificData() {
|
||||
#if DCHECK_IS_ON()
|
||||
if (!initialized_.load(std::memory_order_acquire)) {
|
||||
return nullptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
return pthread_getspecific(data_access_key_);
|
||||
}
|
||||
|
||||
bool PThreadTLSSystem::SetThreadSpecificData(void* data) {
|
||||
#if DCHECK_IS_ON()
|
||||
if (!initialized_.load(std::memory_order_acquire)) {
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
return (0 == pthread_setspecific(data_access_key_, data));
|
||||
}
|
||||
|
||||
} // namespace base::allocator::dispatcher::internal
|
||||
|
||||
#endif // USE_LOCAL_TLS_EMULATION()
|
487
src/base/allocator/dispatcher/tls.h
Normal file
|
@ -0,0 +1,487 @@
|
|||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_TLS_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_TLS_H_
|
||||
|
||||
#include <string_view>
|
||||
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if BUILDFLAG(IS_POSIX) // the current allocation mechanism (mmap) and TLS
|
||||
// support (pthread) are both defined by POSIX
|
||||
#define USE_LOCAL_TLS_EMULATION() true
|
||||
#else
|
||||
#define USE_LOCAL_TLS_EMULATION() false
|
||||
#endif
|
||||
|
||||
#if USE_LOCAL_TLS_EMULATION()
|
||||
#include <pthread.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/check.h"
|
||||
#include "base/compiler_specific.h"
|
||||
|
||||
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
|
||||
#include "partition_alloc/partition_alloc_constants.h" // nogncheck
|
||||
#endif
|
||||
|
||||
#if HAS_FEATURE(thread_sanitizer)
|
||||
#define DISABLE_TSAN_INSTRUMENTATION __attribute__((no_sanitize("thread")))
|
||||
#else
|
||||
#define DISABLE_TSAN_INSTRUMENTATION
|
||||
#endif
|
||||
|
||||
#define STR_HELPER(x) #x
|
||||
#define STR(x) STR_HELPER(x)
|
||||
|
||||
// Verify that a condition holds and cancel the process in case it doesn't. The
|
||||
// functionality is similar to RAW_CHECK but includes more information in the
|
||||
// logged messages. It is non allocating to prevent recursions.
|
||||
#define TLS_RAW_CHECK(error_message, condition) \
|
||||
TLS_RAW_CHECK_IMPL(error_message, condition, __FILE__, __LINE__)
|
||||
|
||||
#define TLS_RAW_CHECK_IMPL(error_message, condition, file, line) \
|
||||
do { \
|
||||
if (!(condition)) { \
|
||||
constexpr const char* message = \
|
||||
"TLS System: " error_message " Failed condition '" #condition \
|
||||
"' in (" file "@" STR(line) ").\n"; \
|
||||
::logging::RawCheckFailure(message); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
namespace base::debug {
|
||||
struct CrashKeyString;
|
||||
}
|
||||
|
||||
namespace base::allocator::dispatcher {
|
||||
namespace internal {
|
||||
|
||||
// Allocate memory using POSIX' mmap and unmap functionality. The allocator
|
||||
// implements the allocator interface required by ThreadLocalStorage.
|
||||
struct BASE_EXPORT MMapAllocator {
|
||||
// The minimum size of a memory chunk when allocating. Even for chunks with
|
||||
// fewer bytes, at least AllocationChunkSize bytes are allocated. For mmap, this
|
||||
// is usually the page size of the system.
|
||||
// For various OS-CPU combinations, partition_alloc::PartitionPageSize() is not
|
||||
// constexpr. Hence, we can not use this value but define it locally.
|
||||
#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR) && \
|
||||
PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
|
||||
constexpr static size_t AllocationChunkSize =
|
||||
partition_alloc::PartitionPageSize();
|
||||
#elif BUILDFLAG(IS_APPLE)
|
||||
constexpr static size_t AllocationChunkSize = 16384;
|
||||
#elif BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_64_BITS)
|
||||
constexpr static size_t AllocationChunkSize = 16384;
|
||||
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||
constexpr static size_t AllocationChunkSize = 16384;
|
||||
#else
|
||||
constexpr static size_t AllocationChunkSize = 4096;
|
||||
#endif
|
||||
|
||||
// Allocate size_in_bytes bytes of raw memory. Return nullptr if allocation
|
||||
// fails.
|
||||
void* AllocateMemory(size_t size_in_bytes);
|
||||
// Free the raw memory pointed to by pointer_to_allocated. Returns a boolean
|
||||
// value indicating if the free was successful.
|
||||
bool FreeMemoryForTesting(void* pointer_to_allocated, size_t size_in_bytes);
|
||||
};
|
||||
|
||||
// The allocator used by default for the thread local storage.
|
||||
using DefaultAllocator = MMapAllocator;
|
||||
|
||||
using OnThreadTerminationFunction = void (*)(void*);
|
||||
|
||||
// The TLS system used by default for the thread local storage. It stores and
|
||||
// retrieves thread specific data pointers.
|
||||
class BASE_EXPORT PThreadTLSSystem {
|
||||
public:
|
||||
PThreadTLSSystem();
|
||||
|
||||
PThreadTLSSystem(const PThreadTLSSystem&) = delete;
|
||||
PThreadTLSSystem(PThreadTLSSystem&&);
|
||||
PThreadTLSSystem& operator=(const PThreadTLSSystem&) = delete;
|
||||
PThreadTLSSystem& operator=(PThreadTLSSystem&&);
|
||||
|
||||
// Initialize the TLS system to store a data set for different threads.
|
||||
// @param thread_termination_function An optional function which will be
|
||||
// invoked upon termination of a thread.
|
||||
bool Setup(OnThreadTerminationFunction thread_termination_function,
|
||||
std::string_view instance_id);
|
||||
// Tear down the TLS system. After completing tear down, the thread
|
||||
// termination function passed to Setup will not be invoked anymore.
|
||||
bool TearDownForTesting();
|
||||
|
||||
// Get the pointer to the data associated to the current thread. Returns
|
||||
// nullptr if the TLS system is not initialized or no data was set before.
|
||||
void* GetThreadSpecificData();
|
||||
// Set the pointer to the data associated to the current thread. Return true
|
||||
// if stored successfully, false otherwise.
|
||||
bool SetThreadSpecificData(void* data);
|
||||
|
||||
private:
|
||||
base::debug::CrashKeyString* crash_key_ = nullptr;
|
||||
pthread_key_t data_access_key_ = 0;
|
||||
#if DCHECK_IS_ON()
|
||||
// From POSIX standard at https://www.open-std.org/jtc1/sc22/open/n4217.pdf:
|
||||
// The effect of calling pthread_getspecific() or pthread_setspecific() with a
|
||||
// key value not obtained from pthread_key_create() or after key has been
|
||||
// deleted with pthread_key_delete() is undefined.
|
||||
//
|
||||
// Unfortunately, POSIX doesn't define a special value of pthread_key_t
|
||||
// indicating an invalid key which would allow us to detect accesses outside
|
||||
// of initialized state. Hence, to prevent us from drifting into the evil
|
||||
// realm of undefined behaviour we store whether we're somewhere between Setup
|
||||
// and Teardown.
|
||||
std::atomic_bool initialized_{false};
|
||||
#endif
|
||||
};
|
||||
|
||||
using DefaultTLSSystem = PThreadTLSSystem;
|
||||
|
||||
// In some scenarios, most notably when testing, the allocator and TLS system
|
||||
// passed to |ThreadLocalStorage| are not copyable and have to be wrapped, i.e.
|
||||
// using std::reference_wrapper. |dereference| is a small helper to retrieve the
|
||||
// underlying value.
|
||||
template <typename T>
|
||||
T& dereference(T& ref) {
|
||||
return ref;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T& dereference(std::reference_wrapper<T>& ref) {
|
||||
// std::reference_wrapper requires a valid reference for construction,
|
||||
// therefore, no need in checking here.
|
||||
return ref.get();
|
||||
}
|
||||
|
||||
// Store thread local data. The data is organized in chunks, where each chunk
|
||||
// holds |ItemsPerChunk|. Each item may be free or used.
|
||||
//
|
||||
// When a thread requests data, the chunks are searched for a free data item,
|
||||
// which is registered for this thread and marked as |used|. Further requests by
|
||||
// this thread will then always return the same item. When a thread terminates,
|
||||
// the item will be reset and return to the pool of free items.
|
||||
//
|
||||
// Upon construction, the first chunk is created. If a thread requests data and
|
||||
// there is no free item available, another chunk is created. Upon destruction,
|
||||
// all memory is freed. Pointers to data items become invalid!
|
||||
//
|
||||
// Constructor and destructor are not thread safe.
|
||||
//
|
||||
// @tparam PayloadType The item type to be stored.
|
||||
// @tparam AllocatorType The allocator being used. An allocator must provide
|
||||
// the following interface:
|
||||
// void* AllocateMemory(size_t size_in_bytes); // Allocate size_in_bytes bytes
|
||||
// of raw memory.
|
||||
// void FreeMemory(void* pointer_to_allocated, size_t size_in_bytes); // Free
|
||||
// the raw memory pointed to by pointer_to_allocated.
|
||||
// Any failure in allocation or free must terminate the process.
|
||||
// @tparam TLSSystemType The TLS system being used. A TLS system must provide
|
||||
// the following interface:
|
||||
// bool Setup(OnThreadTerminationFunction thread_termination_function);
|
||||
// bool Destroy();
|
||||
// void* GetThreadSpecificData();
|
||||
// bool SetThreadSpecificData(void* data);
|
||||
// @tparam AllocationChunkSize The minimum size of a memory chunk that the
|
||||
// allocator can handle. We try to size the chunks so that each chunk uses this
|
||||
// size to the maximum.
|
||||
// @tparam IsDestructibleForTesting For testing purposes we allow the destructor
|
||||
// to perform clean up upon destruction. Otherwise, using the destructor will
|
||||
// result in a compilation failure.
|
||||
template <typename PayloadType,
|
||||
typename AllocatorType,
|
||||
typename TLSSystemType,
|
||||
size_t AllocationChunkSize,
|
||||
bool IsDestructibleForTesting>
|
||||
struct ThreadLocalStorage {
|
||||
explicit ThreadLocalStorage(std::string_view instance_id)
|
||||
: root_(AllocateAndInitializeChunk()) {
|
||||
Initialize(instance_id);
|
||||
}
|
||||
|
||||
// Create a new instance of |ThreadLocalStorage| using the passed allocator
|
||||
// and TLS system. This initializes the underlying TLS system and creates the
|
||||
// first chunk of data.
|
||||
ThreadLocalStorage(std::string_view instance_id,
|
||||
AllocatorType allocator,
|
||||
TLSSystemType tls_system)
|
||||
: allocator_(std::move(allocator)),
|
||||
tls_system_(std::move(tls_system)),
|
||||
root_(AllocateAndInitializeChunk()) {
|
||||
Initialize(instance_id);
|
||||
}
|
||||
|
||||
// Deletes an instance of |ThreadLocalStorage| and delete all the data chunks
|
||||
// created.
|
||||
~ThreadLocalStorage() {
|
||||
if constexpr (IsDestructibleForTesting) {
|
||||
TearDownForTesting();
|
||||
} else if constexpr (!IsDestructibleForTesting) {
|
||||
static_assert(
|
||||
IsDestructibleForTesting,
|
||||
"ThreadLocalStorage cannot be destructed outside of test code.");
|
||||
}
|
||||
}
|
||||
|
||||
// Explicitly prevent all forms of Copy/Move construction/assignment. For an
|
||||
// exact copy of ThreadLocalStorage we would need to copy the mapping of
|
||||
// thread to item, which we can't do at the moment. On the other side, our
|
||||
// atomic members do not support moving out of the box.
|
||||
ThreadLocalStorage(const ThreadLocalStorage&) = delete;
|
||||
ThreadLocalStorage(ThreadLocalStorage&& other) = delete;
|
||||
ThreadLocalStorage& operator=(const ThreadLocalStorage&) = delete;
|
||||
ThreadLocalStorage& operator=(ThreadLocalStorage&&) = delete;
|
||||
|
||||
// Get the data item for the current thread. If no data is registered so far,
|
||||
// find a free item in the chunks and register it for the current thread.
|
||||
PayloadType* GetThreadLocalData() {
|
||||
auto& tls_system = dereference(tls_system_);
|
||||
|
||||
auto* slot = static_cast<SingleSlot*>(tls_system.GetThreadSpecificData());
|
||||
|
||||
if (slot == nullptr) [[unlikely]] {
|
||||
slot = FindAndAllocateFreeSlot(root_.load(std::memory_order_relaxed));
|
||||
|
||||
// We might be called in the course of handling a memory allocation. We do
|
||||
// not use CHECK since they might allocate and cause a recursion.
|
||||
TLS_RAW_CHECK("Failed to set thread specific data.",
|
||||
tls_system.SetThreadSpecificData(slot));
|
||||
|
||||
// Reset the content to wipe out any previous data.
|
||||
Reset(slot->item);
|
||||
}
|
||||
|
||||
return &(slot->item);
|
||||
}
|
||||
|
||||
private:
|
||||
// Encapsulate the payload item and some administrative data.
|
||||
struct SingleSlot {
|
||||
PayloadType item;
|
||||
#if !defined(__cpp_lib_atomic_value_initialization) || \
|
||||
__cpp_lib_atomic_value_initialization < 201911L
|
||||
std::atomic_flag is_used = ATOMIC_FLAG_INIT;
|
||||
#else
|
||||
std::atomic_flag is_used;
|
||||
#endif
|
||||
};
|
||||
|
||||
template <size_t NumberOfItems>
|
||||
struct ChunkT {
|
||||
SingleSlot slots[NumberOfItems];
|
||||
// Pointer to the next chunk.
|
||||
std::atomic<ChunkT*> next_chunk = nullptr;
|
||||
// Helper flag to ensure we create the next chunk only once in a multi
|
||||
// threaded environment.
|
||||
std::once_flag create_next_chunk_flag;
|
||||
};
|
||||
|
||||
template <size_t LowerNumberOfItems,
|
||||
size_t UpperNumberOfItems,
|
||||
size_t NumberOfBytes>
|
||||
static constexpr size_t CalculateEffectiveNumberOfItemsBinSearch() {
|
||||
if constexpr (LowerNumberOfItems == UpperNumberOfItems) {
|
||||
return LowerNumberOfItems;
|
||||
}
|
||||
|
||||
constexpr size_t CurrentNumberOfItems =
|
||||
(UpperNumberOfItems - LowerNumberOfItems) / 2 + LowerNumberOfItems;
|
||||
|
||||
if constexpr (sizeof(ChunkT<CurrentNumberOfItems>) > NumberOfBytes) {
|
||||
return CalculateEffectiveNumberOfItemsBinSearch<
|
||||
LowerNumberOfItems, CurrentNumberOfItems, NumberOfBytes>();
|
||||
}
|
||||
|
||||
if constexpr (sizeof(ChunkT<CurrentNumberOfItems + 1>) < NumberOfBytes) {
|
||||
return CalculateEffectiveNumberOfItemsBinSearch<
|
||||
CurrentNumberOfItems + 1, UpperNumberOfItems, NumberOfBytes>();
|
||||
}
|
||||
|
||||
return CurrentNumberOfItems;
|
||||
}
|
||||
|
||||
// Calculate the maximum number of items we can store in one chunk without the
|
||||
// size of the chunk exceeding NumberOfBytes. To avoid things like alignment
|
||||
// and packing tampering with the calculation, instead of calculating the
|
||||
// correct number of items we use sizeof-operator against ChunkT to search for
|
||||
// the correct size. Unfortunately, the number of recursions is limited by the
|
||||
// compiler. Therefore, we use a binary search instead of a simple linear
|
||||
// search.
|
||||
template <size_t MinimumNumberOfItems, size_t NumberOfBytes>
|
||||
static constexpr size_t CalculateEffectiveNumberOfItems() {
|
||||
if constexpr (sizeof(ChunkT<MinimumNumberOfItems>) < NumberOfBytes) {
|
||||
constexpr size_t LowerNumberOfItems = MinimumNumberOfItems;
|
||||
constexpr size_t UpperNumberOfItems =
|
||||
NumberOfBytes / sizeof(PayloadType) + 1;
|
||||
return CalculateEffectiveNumberOfItemsBinSearch<
|
||||
LowerNumberOfItems, UpperNumberOfItems, NumberOfBytes>();
|
||||
}
|
||||
|
||||
return MinimumNumberOfItems;
|
||||
}
|
||||
|
||||
public:
|
||||
// The minimum number of items per chunk. It should be high enough to
|
||||
// accommodate most items in the root chunk whilst not wasting to much space
|
||||
// on unnecessary items.
|
||||
static constexpr size_t MinimumNumberOfItemsPerChunk = 75;
|
||||
// The effective number of items per chunk. We use the AllocationChunkSize as
|
||||
// a hint to calculate to effective number of items so we occupy one of these
|
||||
// memory chunks to the maximum extent possible.
|
||||
static constexpr size_t ItemsPerChunk =
|
||||
CalculateEffectiveNumberOfItems<MinimumNumberOfItemsPerChunk,
|
||||
AllocationChunkSize>();
|
||||
|
||||
private:
|
||||
using Chunk = ChunkT<ItemsPerChunk>;
|
||||
|
||||
static_assert(ItemsPerChunk >= MinimumNumberOfItemsPerChunk);
|
||||
|
||||
// Mark an item's slot ready for reuse. This function is used as thread
|
||||
// termination function in the TLS system. We do not destroy anything at this
|
||||
// point but simply mark the slot as unused.
|
||||
static void MarkSlotAsFree(void* data) {
|
||||
// We always store SingleSlots in the TLS system. Therefore, we cast to
|
||||
// SingleSlot and reset the is_used flag.
|
||||
auto* const slot = static_cast<SingleSlot*>(data);
|
||||
|
||||
// We might be called in the course of handling a memory allocation.
|
||||
// Therefore, do not use CHECK since it might allocate and cause a
|
||||
// recursion.
|
||||
TLS_RAW_CHECK("Received an invalid slot.",
|
||||
slot && slot->is_used.test_and_set());
|
||||
|
||||
slot->is_used.clear(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
// Perform common initialization during construction of an instance.
|
||||
void Initialize(std::string_view instance_id) {
|
||||
// The constructor must be called outside of the allocation path. Therefore,
|
||||
// it is secure to verify with CHECK.
|
||||
|
||||
// Passing MarkSlotAsFree as thread_termination_function we ensure the
|
||||
// slot/item assigned to the finished thread will be returned to the pool of
|
||||
// unused items.
|
||||
CHECK(dereference(tls_system_).Setup(&MarkSlotAsFree, instance_id));
|
||||
}
|
||||
|
||||
Chunk* AllocateAndInitializeChunk() {
|
||||
void* const uninitialized_memory =
|
||||
dereference(allocator_).AllocateMemory(sizeof(Chunk));
|
||||
|
||||
// We might be called in the course of handling a memory allocation. We do
|
||||
// not use CHECK since they might allocate and cause a recursion.
|
||||
TLS_RAW_CHECK("Failed to allocate memory for new chunk.",
|
||||
uninitialized_memory != nullptr);
|
||||
|
||||
return new (uninitialized_memory) Chunk{};
|
||||
}
|
||||
|
||||
void FreeAndDeallocateChunkForTesting(Chunk* chunk_to_erase) {
|
||||
chunk_to_erase->~Chunk();
|
||||
|
||||
// FreeAndDeallocateChunkForTesting must be called outside of the allocation
|
||||
// path. Therefore, it is secure to verify with CHECK.
|
||||
CHECK(dereference(allocator_)
|
||||
.FreeMemoryForTesting(chunk_to_erase, sizeof(Chunk)));
|
||||
}
|
||||
|
||||
// Find a free slot in the passed chunk, reserve it and return it to the
|
||||
// caller. If no free slot can be found, head on to the next chunk. If the
|
||||
// next chunk doesn't exist, create it.
|
||||
SingleSlot* FindAndAllocateFreeSlot(Chunk* const chunk) {
|
||||
SingleSlot* const slot = std::find_if_not(
|
||||
std::begin(chunk->slots), std::end(chunk->slots),
|
||||
[](SingleSlot& candidate_slot) {
|
||||
return candidate_slot.is_used.test_and_set(std::memory_order_relaxed);
|
||||
});
|
||||
|
||||
// So we found a slot. Happily return it to the caller.
|
||||
if (slot != std::end(chunk->slots)) {
|
||||
return slot;
|
||||
}
|
||||
|
||||
// Ok, there are no more free slots in this chunk. First, ensure the next
|
||||
// chunk is valid and create one if necessary.
|
||||
std::call_once(chunk->create_next_chunk_flag, [&] {
|
||||
// From https://eel.is/c++draft/thread.once.callonce#3
|
||||
//
|
||||
// Synchronization: For any given once_flag: all active executions occur
|
||||
// in a total order; completion of an active execution synchronizes with
|
||||
// the start of the next one in this total order; and the returning
|
||||
// execution synchronizes with the return from all passive executions.
|
||||
//
|
||||
// Therefore, we do only a relaxed store here, call_once synchronizes with
|
||||
// other threads.
|
||||
chunk->next_chunk.store(AllocateAndInitializeChunk(),
|
||||
std::memory_order_relaxed);
|
||||
});
|
||||
|
||||
return FindAndAllocateFreeSlot(chunk->next_chunk);
|
||||
}
|
||||
|
||||
template <bool IsDestructibleForTestingP = IsDestructibleForTesting>
|
||||
typename std::enable_if<IsDestructibleForTestingP>::type
|
||||
TearDownForTesting() {
|
||||
// The destructor must be called outside of the allocation path. Therefore,
|
||||
// it is secure to verify with CHECK.
|
||||
|
||||
// All accessing threads must be terminated by now. For additional security
|
||||
// we tear down the TLS system first. This way we ensure that
|
||||
// MarkSlotAsFree is not called anymore and we have no accesses from the
|
||||
// TLS system's side.
|
||||
CHECK(dereference(tls_system_).TearDownForTesting());
|
||||
|
||||
// Delete all data chunks.
|
||||
for (auto* chunk = root_.load(); chunk != nullptr;) {
|
||||
auto* next_chunk = chunk->next_chunk.load();
|
||||
FreeAndDeallocateChunkForTesting(chunk);
|
||||
chunk = next_chunk;
|
||||
}
|
||||
}
|
||||
|
||||
// Reset a single item to its default value.
|
||||
// Since items are re-used, they may be accessed from different threads,
|
||||
// causing TSan to trigger. Therefore, the reset is exempt from TSan
|
||||
// instrumentation.
|
||||
DISABLE_TSAN_INSTRUMENTATION void Reset(PayloadType& item) { item = {}; }
|
||||
|
||||
AllocatorType allocator_;
|
||||
TLSSystemType tls_system_;
|
||||
std::atomic<Chunk*> const root_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
// The ThreadLocalStorage visible to the user. This uses the internal default
|
||||
// allocator and TLS system.
|
||||
template <typename StorageType,
|
||||
typename AllocatorType = internal::DefaultAllocator,
|
||||
typename TLSSystemType = internal::DefaultTLSSystem,
|
||||
size_t AllocationChunkSize = AllocatorType::AllocationChunkSize,
|
||||
bool IsDestructibleForTesting = false>
|
||||
using ThreadLocalStorage =
|
||||
internal::ThreadLocalStorage<StorageType,
|
||||
AllocatorType,
|
||||
TLSSystemType,
|
||||
AllocationChunkSize,
|
||||
IsDestructibleForTesting>;
|
||||
|
||||
} // namespace base::allocator::dispatcher
|
||||
|
||||
#undef TLS_RAW_CHECK_IMPL
|
||||
#undef TLS_RAW_CHECK
|
||||
#undef STR
|
||||
#undef STR_HELPER
|
||||
|
||||
#endif // USE_LOCAL_TLS_EMULATION()
|
||||
#endif // BASE_ALLOCATOR_DISPATCHER_TLS_H_
|
266
src/base/allocator/early_zone_registration_apple.cc
Normal file
|
@ -0,0 +1,266 @@
|
|||
// Copyright 2021 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/early_zone_registration_apple.h"
|
||||
|
||||
#include <mach/mach.h>
|
||||
#include <malloc/malloc.h>
|
||||
|
||||
#include "partition_alloc/buildflags.h"
|
||||
#include "partition_alloc/shim/early_zone_registration_constants.h"
|
||||
|
||||
// BASE_EXPORT tends to be defined as soon as anything from //base is included.
|
||||
#if defined(BASE_EXPORT)
|
||||
#error "This file cannot depend on //base"
|
||||
#endif
|
||||
|
||||
namespace partition_alloc {
|
||||
|
||||
#if !PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
void EarlyMallocZoneRegistration() {}
|
||||
void AllowDoublePartitionAllocZoneRegistration() {}
|
||||
|
||||
#else
|
||||
|
||||
extern "C" {
|
||||
// abort_report_np() records the message in a special section that both the
|
||||
// system CrashReporter and Crashpad collect in crash reports. See also in
|
||||
// chrome_exe_main_mac.cc.
|
||||
void abort_report_np(const char* fmt, ...);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
malloc_zone_t* GetDefaultMallocZone() {
|
||||
// malloc_default_zone() does not return... the default zone, but the
|
||||
// initial one. The default one is the first element of the default zone
|
||||
// array.
|
||||
unsigned int zone_count = 0;
|
||||
vm_address_t* zones = nullptr;
|
||||
kern_return_t result = malloc_get_all_zones(
|
||||
mach_task_self(), /*reader=*/nullptr, &zones, &zone_count);
|
||||
if (result != KERN_SUCCESS) {
|
||||
abort_report_np("Cannot enumerate malloc() zones");
|
||||
}
|
||||
return reinterpret_cast<malloc_zone_t*>(zones[0]);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void EarlyMallocZoneRegistration() {
|
||||
// Must have static storage duration, as raw pointers are passed to
|
||||
// libsystem_malloc.
|
||||
static malloc_zone_t g_delegating_zone;
|
||||
static malloc_introspection_t g_delegating_zone_introspect;
|
||||
static malloc_zone_t* g_default_zone;
|
||||
|
||||
// Make sure that the default zone is instantiated.
|
||||
malloc_zone_t* purgeable_zone = malloc_default_purgeable_zone();
|
||||
|
||||
g_default_zone = GetDefaultMallocZone();
|
||||
|
||||
// The delegating zone:
|
||||
// - Forwards all allocations to the existing default zone
|
||||
// - Does *not* claim to own any memory, meaning that it will always be
|
||||
// skipped in free() in libsystem_malloc.dylib.
|
||||
//
|
||||
// This is a temporary zone, until it gets replaced by PartitionAlloc, inside
|
||||
// the main library. Since the main library depends on many external
|
||||
// libraries, we cannot install PartitionAlloc as the default zone without
|
||||
// concurrency issues.
|
||||
//
|
||||
// Instead, what we do is here, while the process is single-threaded:
|
||||
// - Register the delegating zone as the default one.
|
||||
// - Set the original (libsystem_malloc's) one as the second zone
|
||||
//
|
||||
// Later, when PartitionAlloc initializes, we replace the default (delegating)
|
||||
// zone with ours. The end state is:
|
||||
// 1. PartitionAlloc zone
|
||||
// 2. libsystem_malloc zone
|
||||
|
||||
// Set up of the delegating zone. Note that it doesn't just forward calls to
|
||||
// the default zone. This is because the system zone's malloc_zone_t pointer
|
||||
// actually points to a larger struct, containing allocator metadata. So if we
|
||||
// pass as the first parameter the "simple" delegating zone pointer, then we
|
||||
// immediately crash inside the system zone functions. So we need to replace
|
||||
// the zone pointer as well.
|
||||
//
|
||||
// Calls fall into 4 categories:
|
||||
// - Allocation calls: forwarded to the real system zone
|
||||
// - "Is this pointer yours" calls: always answer no
|
||||
// - free(): Should never be called, but is in practice, see comments below.
|
||||
// - Diagnostics and debugging: these are typically called for every
|
||||
// zone. They are no-ops for us, as we don't want to double-count, or lock
|
||||
// the data structures of the real zone twice.
|
||||
|
||||
// Allocation: Forward to the real zone.
|
||||
g_delegating_zone.malloc = [](malloc_zone_t* zone, size_t size) {
|
||||
return g_default_zone->malloc(g_default_zone, size);
|
||||
};
|
||||
g_delegating_zone.calloc = [](malloc_zone_t* zone, size_t num_items,
|
||||
size_t size) {
|
||||
return g_default_zone->calloc(g_default_zone, num_items, size);
|
||||
};
|
||||
g_delegating_zone.valloc = [](malloc_zone_t* zone, size_t size) {
|
||||
return g_default_zone->valloc(g_default_zone, size);
|
||||
};
|
||||
g_delegating_zone.realloc = [](malloc_zone_t* zone, void* ptr, size_t size) {
|
||||
return g_default_zone->realloc(g_default_zone, ptr, size);
|
||||
};
|
||||
g_delegating_zone.batch_malloc = [](malloc_zone_t* zone, size_t size,
|
||||
void** results, unsigned num_requested) {
|
||||
return g_default_zone->batch_malloc(g_default_zone, size, results,
|
||||
num_requested);
|
||||
};
|
||||
g_delegating_zone.memalign = [](malloc_zone_t* zone, size_t alignment,
|
||||
size_t size) {
|
||||
return g_default_zone->memalign(g_default_zone, alignment, size);
|
||||
};
|
||||
|
||||
// Does ptr belong to this zone? Return value is != 0 if so.
|
||||
g_delegating_zone.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
|
||||
return 0;
|
||||
};
|
||||
|
||||
// Free functions.
|
||||
// The normal path for freeing memory is:
|
||||
// 1. Try all zones in order, call zone->size(ptr)
|
||||
// 2. If zone->size(ptr) != 0, call zone->free(ptr) (or free_definite_size)
|
||||
// 3. If no zone matches, crash.
|
||||
//
|
||||
// Since this zone always returns 0 in size() (see above), then zone->free()
|
||||
// should never be called. Unfortunately, this is not the case, as some places
|
||||
// in CoreFoundation call malloc_zone_free(zone, ptr) directly. So rather than
|
||||
// crashing, forward the call. It's the caller's responsibility to use the
|
||||
// same zone for free() as for the allocation (this is in the contract of
|
||||
// malloc_zone_free()).
|
||||
//
|
||||
// However, note that the sequence of calls size() -> free() is not possible
|
||||
// for this zone, as size() always returns 0.
|
||||
g_delegating_zone.free = [](malloc_zone_t* zone, void* ptr) {
|
||||
return g_default_zone->free(g_default_zone, ptr);
|
||||
};
|
||||
g_delegating_zone.free_definite_size = [](malloc_zone_t* zone, void* ptr,
|
||||
size_t size) {
|
||||
return g_default_zone->free_definite_size(g_default_zone, ptr, size);
|
||||
};
|
||||
g_delegating_zone.batch_free = [](malloc_zone_t* zone, void** to_be_freed,
|
||||
unsigned num_to_be_freed) {
|
||||
return g_default_zone->batch_free(g_default_zone, to_be_freed,
|
||||
num_to_be_freed);
|
||||
};
|
||||
#if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
|
||||
g_delegating_zone.try_free_default = [](malloc_zone_t* zone, void* ptr) {
|
||||
return g_default_zone->try_free_default(g_default_zone, ptr);
|
||||
};
|
||||
#endif
|
||||
|
||||
// Diagnostics and debugging.
|
||||
//
|
||||
// Do nothing to reduce memory footprint, the real
|
||||
// zone will do it.
|
||||
g_delegating_zone.pressure_relief = [](malloc_zone_t* zone,
|
||||
size_t goal) -> size_t { return 0; };
|
||||
|
||||
// Introspection calls are not all optional, for instance locking and
|
||||
// unlocking before/after fork() is not optional.
|
||||
//
|
||||
// Nothing to enumerate.
|
||||
g_delegating_zone_introspect.enumerator =
|
||||
[](task_t task, void*, unsigned type_mask, vm_address_t zone_address,
|
||||
memory_reader_t reader,
|
||||
vm_range_recorder_t recorder) -> kern_return_t {
|
||||
return KERN_SUCCESS;
|
||||
};
|
||||
// Need to provide a real implementation, it is used for e.g. array sizing.
|
||||
g_delegating_zone_introspect.good_size = [](malloc_zone_t* zone,
|
||||
size_t size) {
|
||||
return g_default_zone->introspect->good_size(g_default_zone, size);
|
||||
};
|
||||
// Nothing to do.
|
||||
g_delegating_zone_introspect.check = [](malloc_zone_t* zone) -> boolean_t {
|
||||
return true;
|
||||
};
|
||||
g_delegating_zone_introspect.print = [](malloc_zone_t* zone,
|
||||
boolean_t verbose) {};
|
||||
g_delegating_zone_introspect.log = [](malloc_zone_t*, void*) {};
|
||||
// Do not forward the lock / unlock calls. Since the default zone is still
|
||||
// there, we should not lock here, as it would lock the zone twice (all
|
||||
// zones are locked before fork().). Rather, do nothing, since this fake
|
||||
// zone does not need any locking.
|
||||
g_delegating_zone_introspect.force_lock = [](malloc_zone_t* zone) {};
|
||||
g_delegating_zone_introspect.force_unlock = [](malloc_zone_t* zone) {};
|
||||
g_delegating_zone_introspect.reinit_lock = [](malloc_zone_t* zone) {};
|
||||
// No stats.
|
||||
g_delegating_zone_introspect.statistics = [](malloc_zone_t* zone,
|
||||
malloc_statistics_t* stats) {};
|
||||
// We are not locked.
|
||||
g_delegating_zone_introspect.zone_locked =
|
||||
[](malloc_zone_t* zone) -> boolean_t { return false; };
|
||||
// Don't support discharge checking.
|
||||
g_delegating_zone_introspect.enable_discharge_checking =
|
||||
[](malloc_zone_t* zone) -> boolean_t { return false; };
|
||||
g_delegating_zone_introspect.disable_discharge_checking =
|
||||
[](malloc_zone_t* zone) {};
|
||||
g_delegating_zone_introspect.discharge = [](malloc_zone_t* zone,
|
||||
void* memory) {};
|
||||
|
||||
// Could use something lower to support fewer functions, but this is
|
||||
// consistent with the real zone installed by PartitionAlloc.
|
||||
g_delegating_zone.version = allocator_shim::kZoneVersion;
|
||||
g_delegating_zone.introspect = &g_delegating_zone_introspect;
|
||||
// This name is used in PartitionAlloc's initialization to determine whether
|
||||
// it should replace the delegating zone.
|
||||
g_delegating_zone.zone_name = allocator_shim::kDelegatingZoneName;
|
||||
|
||||
// Register puts the new zone at the end, unregister swaps the new zone with
|
||||
// the last one.
|
||||
// The zone array is, after these lines, in order:
|
||||
// 1. |g_default_zone|...|g_delegating_zone|
|
||||
// 2. |g_delegating_zone|...| (no more default)
|
||||
// 3. |g_delegating_zone|...|g_default_zone|
|
||||
malloc_zone_register(&g_delegating_zone);
|
||||
malloc_zone_unregister(g_default_zone);
|
||||
malloc_zone_register(g_default_zone);
|
||||
|
||||
// Make sure that the purgeable zone is after the default one.
|
||||
// Will make g_default_zone take the purgeable zone spot
|
||||
malloc_zone_unregister(purgeable_zone);
|
||||
// Add back the purgeable zone as the last one.
|
||||
malloc_zone_register(purgeable_zone);
|
||||
|
||||
// Final configuration:
|
||||
// |g_delegating_zone|...|g_default_zone|purgeable_zone|
|
||||
|
||||
// Sanity check.
|
||||
if (GetDefaultMallocZone() != &g_delegating_zone) {
|
||||
abort_report_np("Failed to install the delegating zone as default.");
|
||||
}
|
||||
}
|
||||
|
||||
void AllowDoublePartitionAllocZoneRegistration() {
|
||||
unsigned int zone_count = 0;
|
||||
vm_address_t* zones = nullptr;
|
||||
kern_return_t result = malloc_get_all_zones(
|
||||
mach_task_self(), /*reader=*/nullptr, &zones, &zone_count);
|
||||
if (result != KERN_SUCCESS) {
|
||||
abort_report_np("Cannot enumerate malloc() zones");
|
||||
}
|
||||
|
||||
// If PartitionAlloc is one of the zones, *change* its name so that
|
||||
// registration can happen multiple times. This works because zone
|
||||
// registration only keeps a pointer to the struct, it does not copy the data.
|
||||
for (unsigned int i = 0; i < zone_count; i++) {
|
||||
malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
|
||||
if (zone->zone_name &&
|
||||
strcmp(zone->zone_name, allocator_shim::kPartitionAllocZoneName) == 0) {
|
||||
zone->zone_name = "RenamedPartitionAlloc";
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
} // namespace partition_alloc
|
29
src/base/allocator/early_zone_registration_apple.h
Normal file
|
@ -0,0 +1,29 @@
|
|||
// Copyright 2021 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_APPLE_H_
|
||||
#define BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_APPLE_H_
|
||||
|
||||
// This is an Apple-only file, used to register PartitionAlloc's zone *before*
|
||||
// the process becomes multi-threaded.
|
||||
|
||||
namespace partition_alloc {
|
||||
|
||||
// Must be called *once*, *before* the process becomes multi-threaded.
|
||||
void EarlyMallocZoneRegistration();
|
||||
|
||||
// Tricks the registration code to believe that PartitionAlloc was not already
|
||||
// registered. This allows a future library load to register PartitionAlloc's
|
||||
// zone as well, rather than bailing out.
|
||||
//
|
||||
// This is mutually exclusive with EarlyMallocZoneRegistration(), and should
|
||||
// ideally be removed. Indeed, by allowing two zones to be registered, we still
|
||||
// end up with a split heap, and more memory usage.
|
||||
//
|
||||
// This is a hack for https://crbug.com/1274236.
|
||||
void AllowDoublePartitionAllocZoneRegistration();
|
||||
|
||||
} // namespace partition_alloc
|
||||
|
||||
#endif // BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_APPLE_H_
|
|
@ -1,256 +0,0 @@
|
|||
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/early_zone_registration_mac.h"
|
||||
|
||||
#include <mach/mach.h>
|
||||
#include <malloc/malloc.h>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
|
||||
// BASE_EXPORT tends to be defined as soon as anything from //base is included.
|
||||
#if defined(BASE_EXPORT)
|
||||
#error "This file cannot depend on //base"
|
||||
#endif
|
||||
|
||||
namespace partition_alloc {
|
||||
|
||||
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
void EarlyMallocZoneRegistration() {}
|
||||
void AllowDoublePartitionAllocZoneRegistration() {}
|
||||
|
||||
#else
|
||||
|
||||
extern "C" {
|
||||
// abort_report_np() records the message in a special section that both the
|
||||
// system CrashReporter and Crashpad collect in crash reports. See also in
|
||||
// chrome_exe_main_mac.cc.
|
||||
void abort_report_np(const char* fmt, ...);
|
||||
}
|
||||
|
||||
namespace {
|
||||
malloc_zone_t* GetDefaultMallocZone() {
|
||||
// malloc_default_zone() does not return... the default zone, but the
|
||||
// initial one. The default one is the first element of the default zone
|
||||
// array.
|
||||
unsigned int zone_count = 0;
|
||||
vm_address_t* zones = nullptr;
|
||||
kern_return_t result =
|
||||
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
|
||||
if (result != KERN_SUCCESS)
|
||||
abort_report_np("Cannot enumerate malloc() zones");
|
||||
return reinterpret_cast<malloc_zone_t*>(zones[0]);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void EarlyMallocZoneRegistration() {
|
||||
// Must have static storage duration, as raw pointers are passed to
|
||||
// libsystem_malloc.
|
||||
static malloc_zone_t g_delegating_zone;
|
||||
static malloc_introspection_t g_delegating_zone_introspect;
|
||||
static malloc_zone_t* g_default_zone;
|
||||
|
||||
// Make sure that the default zone is instantiated.
|
||||
malloc_zone_t* purgeable_zone = malloc_default_purgeable_zone();
|
||||
|
||||
g_default_zone = GetDefaultMallocZone();
|
||||
|
||||
// The delegating zone:
|
||||
// - Forwards all allocations to the existing default zone
|
||||
// - Does *not* claim to own any memory, meaning that it will always be
|
||||
// skipped in free() in libsystem_malloc.dylib.
|
||||
//
|
||||
// This is a temporary zone, until it gets replaced by PartitionAlloc, inside
|
||||
// the main library. Since the main library depends on many external
|
||||
// libraries, we cannot install PartitionAlloc as the default zone without
|
||||
// concurrency issues.
|
||||
//
|
||||
// Instead, what we do is here, while the process is single-threaded:
|
||||
// - Register the delegating zone as the default one.
|
||||
// - Set the original (libsystem_malloc's) one as the second zone
|
||||
//
|
||||
// Later, when PartitionAlloc initializes, we replace the default (delegating)
|
||||
// zone with ours. The end state is:
|
||||
// 1. PartitionAlloc zone
|
||||
// 2. libsystem_malloc zone
|
||||
|
||||
// Set up of the delegating zone. Note that it doesn't just forward calls to
|
||||
// the default zone. This is because the system zone's malloc_zone_t pointer
|
||||
// actually points to a larger struct, containing allocator metadata. So if we
|
||||
// pass as the first parameter the "simple" delegating zone pointer, then we
|
||||
// immediately crash inside the system zone functions. So we need to replace
|
||||
// the zone pointer as well.
|
||||
//
|
||||
// Calls fall into 4 categories:
|
||||
// - Allocation calls: forwarded to the real system zone
|
||||
// - "Is this pointer yours" calls: always answer no
|
||||
// - free(): Should never be called, but is in practice, see comments below.
|
||||
// - Diagnostics and debugging: these are typically called for every
|
||||
// zone. They are no-ops for us, as we don't want to double-count, or lock
|
||||
// the data structures of the real zone twice.
|
||||
|
||||
// Allocation: Forward to the real zone.
|
||||
g_delegating_zone.malloc = [](malloc_zone_t* zone, size_t size) {
|
||||
return g_default_zone->malloc(g_default_zone, size);
|
||||
};
|
||||
g_delegating_zone.calloc = [](malloc_zone_t* zone, size_t num_items,
|
||||
size_t size) {
|
||||
return g_default_zone->calloc(g_default_zone, num_items, size);
|
||||
};
|
||||
g_delegating_zone.valloc = [](malloc_zone_t* zone, size_t size) {
|
||||
return g_default_zone->valloc(g_default_zone, size);
|
||||
};
|
||||
g_delegating_zone.realloc = [](malloc_zone_t* zone, void* ptr, size_t size) {
|
||||
return g_default_zone->realloc(g_default_zone, ptr, size);
|
||||
};
|
||||
g_delegating_zone.batch_malloc = [](malloc_zone_t* zone, size_t size,
|
||||
void** results, unsigned num_requested) {
|
||||
return g_default_zone->batch_malloc(g_default_zone, size, results,
|
||||
num_requested);
|
||||
};
|
||||
g_delegating_zone.memalign = [](malloc_zone_t* zone, size_t alignment,
|
||||
size_t size) {
|
||||
return g_default_zone->memalign(g_default_zone, alignment, size);
|
||||
};
|
||||
|
||||
// Does ptr belong to this zone? Return value is != 0 if so.
|
||||
g_delegating_zone.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
|
||||
return 0;
|
||||
};
|
||||
|
||||
// Free functions.
|
||||
// The normal path for freeing memory is:
|
||||
// 1. Try all zones in order, call zone->size(ptr)
|
||||
// 2. If zone->size(ptr) != 0, call zone->free(ptr) (or free_definite_size)
|
||||
// 3. If no zone matches, crash.
|
||||
//
|
||||
// Since this zone always returns 0 in size() (see above), then zone->free()
|
||||
// should never be called. Unfortunately, this is not the case, as some places
|
||||
// in CoreFoundation call malloc_zone_free(zone, ptr) directly. So rather than
|
||||
// crashing, forward the call. It's the caller's responsibility to use the
|
||||
// same zone for free() as for the allocation (this is in the contract of
|
||||
// malloc_zone_free()).
|
||||
//
|
||||
// However, note that the sequence of calls size() -> free() is not possible
|
||||
// for this zone, as size() always returns 0.
|
||||
g_delegating_zone.free = [](malloc_zone_t* zone, void* ptr) {
|
||||
return g_default_zone->free(g_default_zone, ptr);
|
||||
};
|
||||
g_delegating_zone.free_definite_size = [](malloc_zone_t* zone, void* ptr,
|
||||
size_t size) {
|
||||
return g_default_zone->free_definite_size(g_default_zone, ptr, size);
|
||||
};
|
||||
g_delegating_zone.batch_free = [](malloc_zone_t* zone, void** to_be_freed,
|
||||
unsigned num_to_be_freed) {
|
||||
return g_default_zone->batch_free(g_default_zone, to_be_freed,
|
||||
num_to_be_freed);
|
||||
};
|
||||
|
||||
// Diagnostics and debugging.
|
||||
//
|
||||
// Do nothing to reduce memory footprint, the real
|
||||
// zone will do it.
|
||||
g_delegating_zone.pressure_relief = [](malloc_zone_t* zone,
|
||||
size_t goal) -> size_t { return 0; };
|
||||
|
||||
// Introspection calls are not all optional, for instance locking and
|
||||
// unlocking before/after fork() is not optional.
|
||||
//
|
||||
// Nothing to enumerate.
|
||||
g_delegating_zone_introspect.enumerator =
|
||||
[](task_t task, void*, unsigned type_mask, vm_address_t zone_address,
|
||||
memory_reader_t reader,
|
||||
vm_range_recorder_t recorder) -> kern_return_t {
|
||||
return KERN_SUCCESS;
|
||||
};
|
||||
// Need to provide a real implementation, it is used for e.g. array sizing.
|
||||
g_delegating_zone_introspect.good_size = [](malloc_zone_t* zone,
|
||||
size_t size) {
|
||||
return g_default_zone->introspect->good_size(g_default_zone, size);
|
||||
};
|
||||
// Nothing to do.
|
||||
g_delegating_zone_introspect.check = [](malloc_zone_t* zone) -> boolean_t {
|
||||
return true;
|
||||
};
|
||||
g_delegating_zone_introspect.print = [](malloc_zone_t* zone,
|
||||
boolean_t verbose) {};
|
||||
g_delegating_zone_introspect.log = [](malloc_zone_t*, void*) {};
|
||||
// Do not forward the lock / unlock calls. Since the default zone is still
|
||||
// there, we should not lock here, as it would lock the zone twice (all
|
||||
// zones are locked before fork().). Rather, do nothing, since this fake
|
||||
// zone does not need any locking.
|
||||
g_delegating_zone_introspect.force_lock = [](malloc_zone_t* zone) {};
|
||||
g_delegating_zone_introspect.force_unlock = [](malloc_zone_t* zone) {};
|
||||
g_delegating_zone_introspect.reinit_lock = [](malloc_zone_t* zone) {};
|
||||
// No stats.
|
||||
g_delegating_zone_introspect.statistics = [](malloc_zone_t* zone,
|
||||
malloc_statistics_t* stats) {};
|
||||
// We are not locked.
|
||||
g_delegating_zone_introspect.zone_locked =
|
||||
[](malloc_zone_t* zone) -> boolean_t { return false; };
|
||||
// Don't support discharge checking.
|
||||
g_delegating_zone_introspect.enable_discharge_checking =
|
||||
[](malloc_zone_t* zone) -> boolean_t { return false; };
|
||||
g_delegating_zone_introspect.disable_discharge_checking =
|
||||
[](malloc_zone_t* zone) {};
|
||||
g_delegating_zone_introspect.discharge = [](malloc_zone_t* zone,
|
||||
void* memory) {};
|
||||
|
||||
// Could use something lower to support fewer functions, but this is
|
||||
// consistent with the real zone installed by PartitionAlloc.
|
||||
g_delegating_zone.version = kZoneVersion;
|
||||
g_delegating_zone.introspect = &g_delegating_zone_introspect;
|
||||
// This name is used in PartitionAlloc's initialization to determine whether
|
||||
// it should replace the delegating zone.
|
||||
g_delegating_zone.zone_name = kDelegatingZoneName;
|
||||
|
||||
// Register puts the new zone at the end, unregister swaps the new zone with
|
||||
// the last one.
|
||||
// The zone array is, after these lines, in order:
|
||||
// 1. |g_default_zone|...|g_delegating_zone|
|
||||
// 2. |g_delegating_zone|...| (no more default)
|
||||
// 3. |g_delegating_zone|...|g_default_zone|
|
||||
malloc_zone_register(&g_delegating_zone);
|
||||
malloc_zone_unregister(g_default_zone);
|
||||
malloc_zone_register(g_default_zone);
|
||||
|
||||
// Make sure that the purgeable zone is after the default one.
|
||||
// Will make g_default_zone take the purgeable zone spot
|
||||
malloc_zone_unregister(purgeable_zone);
|
||||
// Add back the purgeable zone as the last one.
|
||||
malloc_zone_register(purgeable_zone);
|
||||
|
||||
// Final configuration:
|
||||
// |g_delegating_zone|...|g_default_zone|purgeable_zone|
|
||||
|
||||
// Sanity check.
|
||||
if (GetDefaultMallocZone() != &g_delegating_zone)
|
||||
abort_report_np("Failed to install the delegating zone as default.");
|
||||
}
|
||||
|
||||
void AllowDoublePartitionAllocZoneRegistration() {
|
||||
unsigned int zone_count = 0;
|
||||
vm_address_t* zones = nullptr;
|
||||
kern_return_t result =
|
||||
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
|
||||
if (result != KERN_SUCCESS)
|
||||
abort_report_np("Cannot enumerate malloc() zones");
|
||||
|
||||
// If PartitionAlloc is one of the zones, *change* its name so that
|
||||
// registration can happen multiple times. This works because zone
|
||||
// registration only keeps a pointer to the struct, it does not copy the data.
|
||||
for (unsigned int i = 0; i < zone_count; i++) {
|
||||
malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
|
||||
if (zone->zone_name &&
|
||||
strcmp(zone->zone_name, kPartitionAllocZoneName) == 0) {
|
||||
zone->zone_name = "RenamedPartitionAlloc";
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
} // namespace partition_alloc
|
|
@ -1,37 +0,0 @@
|
|||
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_MAC_H_
|
||||
#define BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_MAC_H_
|
||||
|
||||
// This is an Apple-only file, used to register PartitionAlloc's zone *before*
|
||||
// the process becomes multi-threaded.
|
||||
|
||||
namespace partition_alloc {
|
||||
|
||||
static constexpr char kDelegatingZoneName[] =
|
||||
"DelegatingDefaultZoneForPartitionAlloc";
|
||||
static constexpr char kPartitionAllocZoneName[] = "PartitionAlloc";
|
||||
|
||||
// Zone version. Determines which callbacks are set in the various malloc_zone_t
|
||||
// structs.
|
||||
constexpr int kZoneVersion = 9;
|
||||
|
||||
// Must be called *once*, *before* the process becomes multi-threaded.
|
||||
void EarlyMallocZoneRegistration();
|
||||
|
||||
// Tricks the registration code to believe that PartitionAlloc was not already
|
||||
// registered. This allows a future library load to register PartitionAlloc's
|
||||
// zone as well, rather than bailing out.
|
||||
//
|
||||
// This is mutually exclusive with EarlyMallocZoneRegistation(), and should
|
||||
// ideally be removed. Indeed, by allowing two zones to be registered, we still
|
||||
// end up with a split heap, and more memory usage.
|
||||
//
|
||||
// This is a hack for crbug.com/1274236.
|
||||
void AllowDoublePartitionAllocZoneRegistration();
|
||||
|
||||
} // namespace partition_alloc
|
||||
|
||||
#endif // BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_H_
|
|
@ -1,119 +0,0 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "base/synchronization/lock.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
|
||||
static_assert(std::is_pod<MallocZoneFunctions>::value,
|
||||
"MallocZoneFunctions must be POD");
|
||||
|
||||
void StoreZoneFunctions(const ChromeMallocZone* zone,
|
||||
MallocZoneFunctions* functions) {
|
||||
memset(functions, 0, sizeof(MallocZoneFunctions));
|
||||
functions->malloc = zone->malloc;
|
||||
functions->calloc = zone->calloc;
|
||||
functions->valloc = zone->valloc;
|
||||
functions->free = zone->free;
|
||||
functions->realloc = zone->realloc;
|
||||
functions->size = zone->size;
|
||||
CHECK(functions->malloc && functions->calloc && functions->valloc &&
|
||||
functions->free && functions->realloc && functions->size);
|
||||
|
||||
// These functions might be nullptr.
|
||||
functions->batch_malloc = zone->batch_malloc;
|
||||
functions->batch_free = zone->batch_free;
|
||||
|
||||
if (zone->version >= 5) {
|
||||
// Not all custom malloc zones have a memalign.
|
||||
functions->memalign = zone->memalign;
|
||||
}
|
||||
if (zone->version >= 6) {
|
||||
// This may be nullptr.
|
||||
functions->free_definite_size = zone->free_definite_size;
|
||||
}
|
||||
|
||||
// Note that zone version 8 introduced a pressure relief callback, and version
|
||||
// 10 introduced a claimed address callback, but neither are allocation or
|
||||
// deallocation callbacks and so aren't important to intercept.
|
||||
|
||||
functions->context = zone;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
// All modifications to g_malloc_zones are gated behind this lock.
|
||||
// Dispatch to a malloc zone does not need to acquire this lock.
|
||||
base::Lock& GetLock() {
|
||||
static base::Lock* g_lock = new base::Lock;
|
||||
return *g_lock;
|
||||
}
|
||||
|
||||
void EnsureMallocZonesInitializedLocked() {
|
||||
GetLock().AssertAcquired();
|
||||
}
|
||||
|
||||
int g_zone_count = 0;
|
||||
|
||||
bool IsMallocZoneAlreadyStoredLocked(ChromeMallocZone* zone) {
|
||||
EnsureMallocZonesInitializedLocked();
|
||||
GetLock().AssertAcquired();
|
||||
for (int i = 0; i < g_zone_count; ++i) {
|
||||
if (g_malloc_zones[i].context == reinterpret_cast<void*>(zone))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
bool StoreMallocZone(ChromeMallocZone* zone) {
|
||||
base::AutoLock l(GetLock());
|
||||
EnsureMallocZonesInitializedLocked();
|
||||
if (IsMallocZoneAlreadyStoredLocked(zone))
|
||||
return false;
|
||||
|
||||
if (g_zone_count == kMaxZoneCount)
|
||||
return false;
|
||||
|
||||
StoreZoneFunctions(zone, &g_malloc_zones[g_zone_count]);
|
||||
++g_zone_count;
|
||||
|
||||
// No other thread can possibly see these stores at this point. The code that
|
||||
// reads these values is triggered after this function returns. so we want to
|
||||
// guarantee that they are committed at this stage"
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone) {
|
||||
base::AutoLock l(GetLock());
|
||||
return IsMallocZoneAlreadyStoredLocked(zone);
|
||||
}
|
||||
|
||||
bool DoesMallocZoneNeedReplacing(ChromeMallocZone* zone,
|
||||
const MallocZoneFunctions* functions) {
|
||||
return IsMallocZoneAlreadyStored(zone) && zone->malloc != functions->malloc;
|
||||
}
|
||||
|
||||
int GetMallocZoneCountForTesting() {
|
||||
base::AutoLock l(GetLock());
|
||||
return g_zone_count;
|
||||
}
|
||||
|
||||
void ClearAllMallocZonesForTesting() {
|
||||
base::AutoLock l(GetLock());
|
||||
EnsureMallocZonesInitializedLocked();
|
||||
memset(g_malloc_zones, 0, kMaxZoneCount * sizeof(MallocZoneFunctions));
|
||||
g_zone_count = 0;
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
|
@ -1,103 +0,0 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
|
||||
#define BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
|
||||
|
||||
#include <malloc/malloc.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/immediate_crash.h"
|
||||
#include "third_party/apple_apsl/malloc.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
typedef void* (*malloc_type)(struct _malloc_zone_t* zone, size_t size);
|
||||
typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
|
||||
size_t num_items,
|
||||
size_t size);
|
||||
typedef void* (*valloc_type)(struct _malloc_zone_t* zone, size_t size);
|
||||
typedef void (*free_type)(struct _malloc_zone_t* zone, void* ptr);
|
||||
typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
|
||||
void* ptr,
|
||||
size_t size);
|
||||
typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
|
||||
size_t alignment,
|
||||
size_t size);
|
||||
typedef unsigned (*batch_malloc_type)(struct _malloc_zone_t* zone,
|
||||
size_t size,
|
||||
void** results,
|
||||
unsigned num_requested);
|
||||
typedef void (*batch_free_type)(struct _malloc_zone_t* zone,
|
||||
void** to_be_freed,
|
||||
unsigned num_to_be_freed);
|
||||
typedef void (*free_definite_size_type)(struct _malloc_zone_t* zone,
|
||||
void* ptr,
|
||||
size_t size);
|
||||
typedef size_t (*size_fn_type)(struct _malloc_zone_t* zone, const void* ptr);
|
||||
|
||||
struct MallocZoneFunctions {
|
||||
malloc_type malloc;
|
||||
calloc_type calloc;
|
||||
valloc_type valloc;
|
||||
free_type free;
|
||||
realloc_type realloc;
|
||||
memalign_type memalign;
|
||||
batch_malloc_type batch_malloc;
|
||||
batch_free_type batch_free;
|
||||
free_definite_size_type free_definite_size;
|
||||
size_fn_type size;
|
||||
const ChromeMallocZone* context;
|
||||
};
|
||||
|
||||
BASE_EXPORT void StoreZoneFunctions(const ChromeMallocZone* zone,
|
||||
MallocZoneFunctions* functions);
|
||||
static constexpr int kMaxZoneCount = 30;
|
||||
BASE_EXPORT extern MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
|
||||
|
||||
// The array g_malloc_zones stores all information about malloc zones before
|
||||
// they are shimmed. This information needs to be accessed during dispatch back
|
||||
// into the zone, and additional zones may be added later in the execution fo
|
||||
// the program, so the array needs to be both thread-safe and high-performance.
|
||||
//
|
||||
// We begin by creating an array of MallocZoneFunctions of fixed size. We will
|
||||
// never modify the container, which provides thread-safety to iterators. When
|
||||
// we want to add a MallocZoneFunctions to the container, we:
|
||||
// 1. Fill in all the fields.
|
||||
// 2. Update the total zone count.
|
||||
// 3. Insert a memory barrier.
|
||||
// 4. Insert our shim.
|
||||
//
|
||||
// Each MallocZoneFunctions is uniquely identified by |context|, which is a
|
||||
// pointer to the original malloc zone. When we wish to dispatch back to the
|
||||
// original malloc zones, we iterate through the array, looking for a matching
|
||||
// |context|.
|
||||
//
|
||||
// Most allocations go through the default allocator. We will ensure that the
|
||||
// default allocator is stored as the first MallocZoneFunctions.
|
||||
//
|
||||
// Returns whether the zone was successfully stored.
|
||||
BASE_EXPORT bool StoreMallocZone(ChromeMallocZone* zone);
|
||||
BASE_EXPORT bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone);
|
||||
BASE_EXPORT bool DoesMallocZoneNeedReplacing(
|
||||
ChromeMallocZone* zone,
|
||||
const MallocZoneFunctions* functions);
|
||||
|
||||
BASE_EXPORT int GetMallocZoneCountForTesting();
|
||||
BASE_EXPORT void ClearAllMallocZonesForTesting();
|
||||
|
||||
inline MallocZoneFunctions& GetFunctionsForZone(void* zone) {
|
||||
for (unsigned int i = 0; i < kMaxZoneCount; ++i) {
|
||||
if (g_malloc_zones[i].context == zone)
|
||||
return g_malloc_zones[i];
|
||||
}
|
||||
IMMEDIATE_CRASH();
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
|
76
src/base/allocator/miracle_parameter.cc
Normal file
|
@ -0,0 +1,76 @@
|
|||
// Copyright 2023 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/miracle_parameter.h"
|
||||
|
||||
#include "base/command_line.h"
|
||||
#include "base/strings/strcat.h"
|
||||
#include "base/system/sys_info.h"
|
||||
|
||||
namespace base::miracle_parameter {
|
||||
|
||||
std::string GetParamNameWithSuffix(const std::string& param_name) {
|
||||
// `base::SysInfo::AmountOfPhysicalMemoryMB()` refers to CommandLine
|
||||
// internally. If the CommandLine is not initialized, we return early to avoid
|
||||
// a crash.
|
||||
if (!base::CommandLine::InitializedForCurrentProcess()) {
|
||||
return param_name;
|
||||
}
|
||||
int physical_memory_mb = base::SysInfo::AmountOfPhysicalMemoryMB();
|
||||
const char* suffix =
|
||||
physical_memory_mb < kMiracleParameterMemory512MB ? "ForLessThan512MB"
|
||||
: physical_memory_mb < kMiracleParameterMemory1GB ? "For512MBTo1GB"
|
||||
: physical_memory_mb < kMiracleParameterMemory2GB ? "For1GBTo2GB"
|
||||
: physical_memory_mb < kMiracleParameterMemory4GB ? "For2GBTo4GB"
|
||||
: physical_memory_mb < kMiracleParameterMemory8GB ? "For4GBTo8GB"
|
||||
: physical_memory_mb < kMiracleParameterMemory16GB ? "For8GBTo16GB"
|
||||
: "For16GBAndAbove";
|
||||
return base::StrCat({param_name, suffix});
|
||||
}
|
||||
|
||||
std::string GetMiracleParameterAsString(const base::Feature& feature,
|
||||
const std::string& param_name,
|
||||
const std::string& default_value) {
|
||||
return GetFieldTrialParamByFeatureAsString(
|
||||
feature, GetParamNameWithSuffix(param_name),
|
||||
GetFieldTrialParamByFeatureAsString(feature, param_name, default_value));
|
||||
}
|
||||
|
||||
double GetMiracleParameterAsDouble(const base::Feature& feature,
|
||||
const std::string& param_name,
|
||||
double default_value) {
|
||||
return base::GetFieldTrialParamByFeatureAsDouble(
|
||||
feature, GetParamNameWithSuffix(param_name),
|
||||
base::GetFieldTrialParamByFeatureAsDouble(feature, param_name,
|
||||
default_value));
|
||||
}
|
||||
|
||||
int GetMiracleParameterAsInt(const base::Feature& feature,
|
||||
const std::string& param_name,
|
||||
int default_value) {
|
||||
return base::GetFieldTrialParamByFeatureAsInt(
|
||||
feature, GetParamNameWithSuffix(param_name),
|
||||
base::GetFieldTrialParamByFeatureAsInt(feature, param_name,
|
||||
default_value));
|
||||
}
|
||||
|
||||
bool GetMiracleParameterAsBool(const base::Feature& feature,
|
||||
const std::string& param_name,
|
||||
bool default_value) {
|
||||
return base::GetFieldTrialParamByFeatureAsBool(
|
||||
feature, GetParamNameWithSuffix(param_name),
|
||||
base::GetFieldTrialParamByFeatureAsBool(feature, param_name,
|
||||
default_value));
|
||||
}
|
||||
|
||||
base::TimeDelta GetMiracleParameterAsTimeDelta(const base::Feature& feature,
|
||||
const std::string& param_name,
|
||||
base::TimeDelta default_value) {
|
||||
return base::GetFieldTrialParamByFeatureAsTimeDelta(
|
||||
feature, GetParamNameWithSuffix(param_name),
|
||||
base::GetFieldTrialParamByFeatureAsTimeDelta(feature, param_name,
|
||||
default_value));
|
||||
}
|
||||
|
||||
} // namespace base::miracle_parameter
|
177
src/base/allocator/miracle_parameter.h
Normal file
|
@ -0,0 +1,177 @@
|
|||
// Copyright 2023 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_MIRACLE_PARAMETER_H_
|
||||
#define BASE_ALLOCATOR_MIRACLE_PARAMETER_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/containers/span.h"
|
||||
#include "base/feature_list.h"
|
||||
#include "base/metrics/field_trial_params.h"
|
||||
|
||||
// This is a mirror copy of the //components/miracle_parameter/ to resolve the
|
||||
// dependency cycle of (base->miracle_parameter->base).
|
||||
// Eventually the miracle_parameter component will have a public interface in
|
||||
// //base/ and this could be removed.
|
||||
// TODO(crbug.com/40279826): remove miracle_parameter from
|
||||
// //base/allocator/.
|
||||
|
||||
namespace base {
|
||||
|
||||
namespace miracle_parameter {
|
||||
|
||||
namespace {
|
||||
|
||||
template <typename Enum>
|
||||
Enum GetFieldTrialParamByFeatureAsEnum(
|
||||
const base::Feature& feature,
|
||||
const std::string& param_name,
|
||||
const Enum default_value,
|
||||
const base::span<const typename base::FeatureParam<Enum>::Option>&
|
||||
options) {
|
||||
std::string string_value =
|
||||
base::GetFieldTrialParamValueByFeature(feature, param_name);
|
||||
if (string_value.empty()) {
|
||||
return default_value;
|
||||
}
|
||||
|
||||
for (const auto& option : options) {
|
||||
if (string_value == option.name) {
|
||||
return option.value;
|
||||
}
|
||||
}
|
||||
|
||||
base::LogInvalidEnumValue(feature, param_name, string_value,
|
||||
static_cast<int>(default_value));
|
||||
return default_value;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
constexpr int kMiracleParameterMemory512MB = 512;
|
||||
constexpr int kMiracleParameterMemory1GB = 1024;
|
||||
constexpr int kMiracleParameterMemory2GB = 2 * 1024;
|
||||
constexpr int kMiracleParameterMemory4GB = 4 * 1024;
|
||||
constexpr int kMiracleParameterMemory8GB = 8 * 1024;
|
||||
constexpr int kMiracleParameterMemory16GB = 16 * 1024;
|
||||
|
||||
// GetParamNameWithSuffix put a parameter name suffix based on
|
||||
// the amount of physical memory.
|
||||
//
|
||||
// - "ForLessThan512MB" for less than 512MB memory devices.
|
||||
// - "For512MBTo1GB" for 512MB to 1GB memory devices.
|
||||
// - "For1GBTo2GB" for 1GB to 2GB memory devices.
|
||||
// - "For2GBTo4GB" for 2GB to 4GB memory devices.
|
||||
// - "For4GBTo8GB" for 4GB to 8GB memory devices.
|
||||
// - "For8GBTo16GB" for 8GB to 16GB memory devices.
|
||||
// - "For16GBAndAbove" for 16GB memory and above devices.
|
||||
BASE_EXPORT
|
||||
std::string GetParamNameWithSuffix(const std::string& param_name);
|
||||
|
||||
// Provides a similar behavior with FeatureParam<std::string> except the return
|
||||
// value is determined by the amount of physical memory.
|
||||
BASE_EXPORT
|
||||
std::string GetMiracleParameterAsString(const base::Feature& feature,
|
||||
const std::string& param_name,
|
||||
const std::string& default_value);
|
||||
|
||||
// Provides a similar behavior with FeatureParam<double> except the return value
|
||||
// is determined by the amount of physical memory.
|
||||
BASE_EXPORT
|
||||
double GetMiracleParameterAsDouble(const base::Feature& feature,
|
||||
const std::string& param_name,
|
||||
double default_value);
|
||||
|
||||
// Provides a similar behavior with FeatureParam<int> except the return value is
|
||||
// determined by the amount of physical memory.
|
||||
BASE_EXPORT
|
||||
int GetMiracleParameterAsInt(const base::Feature& feature,
|
||||
const std::string& param_name,
|
||||
int default_value);
|
||||
|
||||
// Provides a similar behavior with FeatureParam<bool> except the return value
|
||||
// is determined by the amount of physical memory.
|
||||
BASE_EXPORT
|
||||
bool GetMiracleParameterAsBool(const base::Feature& feature,
|
||||
const std::string& param_name,
|
||||
bool default_value);
|
||||
|
||||
// Provides a similar behavior with FeatureParam<base::TimeDelta> except the
|
||||
// return value is determined by the amount of physical memory.
|
||||
BASE_EXPORT
|
||||
base::TimeDelta GetMiracleParameterAsTimeDelta(const base::Feature& feature,
|
||||
const std::string& param_name,
|
||||
base::TimeDelta default_value);
|
||||
|
||||
// Provides a similar behavior with FeatureParam<Enum> except the return value
|
||||
// is determined by the amount of physical memory.
|
||||
template <typename Enum>
|
||||
Enum GetMiracleParameterAsEnum(
|
||||
const base::Feature& feature,
|
||||
const std::string& param_name,
|
||||
const Enum default_value,
|
||||
const base::span<const typename base::FeatureParam<Enum>::Option> options) {
|
||||
return GetFieldTrialParamByFeatureAsEnum(
|
||||
feature, GetParamNameWithSuffix(param_name),
|
||||
GetFieldTrialParamByFeatureAsEnum(feature, param_name, default_value,
|
||||
options),
|
||||
options);
|
||||
}
|
||||
|
||||
#define MIRACLE_PARAMETER_FOR_STRING(function_name, feature, param_name, \
|
||||
default_value) \
|
||||
std::string function_name() { \
|
||||
static const std::string value = \
|
||||
miracle_parameter::GetMiracleParameterAsString(feature, param_name, \
|
||||
default_value); \
|
||||
return value; \
|
||||
}
|
||||
|
||||
#define MIRACLE_PARAMETER_FOR_DOUBLE(function_name, feature, param_name, \
|
||||
default_value) \
|
||||
double function_name() { \
|
||||
static const double value = \
|
||||
miracle_parameter::GetMiracleParameterAsDouble(feature, param_name, \
|
||||
default_value); \
|
||||
return value; \
|
||||
}
|
||||
|
||||
#define MIRACLE_PARAMETER_FOR_INT(function_name, feature, param_name, \
|
||||
default_value) \
|
||||
int function_name() { \
|
||||
static const int value = miracle_parameter::GetMiracleParameterAsInt( \
|
||||
feature, param_name, default_value); \
|
||||
return value; \
|
||||
}
|
||||
|
||||
#define MIRACLE_PARAMETER_FOR_BOOL(function_name, feature, param_name, \
|
||||
default_value) \
|
||||
bool function_name() { \
|
||||
static const bool value = miracle_parameter::GetMiracleParameterAsBool( \
|
||||
feature, param_name, default_value); \
|
||||
return value; \
|
||||
}
|
||||
|
||||
#define MIRACLE_PARAMETER_FOR_TIME_DELTA(function_name, feature, param_name, \
|
||||
default_value) \
|
||||
base::TimeDelta function_name() { \
|
||||
static const base::TimeDelta value = \
|
||||
miracle_parameter::GetMiracleParameterAsTimeDelta(feature, param_name, \
|
||||
default_value); \
|
||||
return value; \
|
||||
}
|
||||
|
||||
#define MIRACLE_PARAMETER_FOR_ENUM(function_name, feature, param_name, \
|
||||
default_value, type, options) \
|
||||
type function_name() { \
|
||||
static const type value = miracle_parameter::GetMiracleParameterAsEnum( \
|
||||
feature, param_name, default_value, base::span(options)); \
|
||||
return value; \
|
||||
}
|
||||
|
||||
} // namespace miracle_parameter
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_MIRACLE_PARAMETER_H_
|
|
@ -1,116 +1,500 @@
|
|||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Copyright 2020 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_alloc_features.h"
|
||||
|
||||
#include "base/allocator/miracle_parameter.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/feature_list.h"
|
||||
#include "base/features.h"
|
||||
#include "base/metrics/field_trial_params.h"
|
||||
#include "base/time/time.h"
|
||||
#include "build/build_config.h"
|
||||
#include "build/chromecast_buildflags.h"
|
||||
#include "partition_alloc/buildflags.h"
|
||||
#include "partition_alloc/partition_alloc_base/time/time.h"
|
||||
#include "partition_alloc/partition_alloc_constants.h"
|
||||
#include "partition_alloc/partition_root.h"
|
||||
#include "partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h"
|
||||
#include "partition_alloc/thread_cache.h"
|
||||
|
||||
namespace base {
|
||||
namespace features {
|
||||
namespace base::features {
|
||||
|
||||
#if defined(PA_ALLOW_PCSCAN)
|
||||
// If enabled, PCScan is turned on by default for all partitions that don't
|
||||
// disable it explicitly.
|
||||
const Feature kPartitionAllocPCScan{"PartitionAllocPCScan",
|
||||
FEATURE_DISABLED_BY_DEFAULT};
|
||||
#endif // defined(PA_ALLOW_PCSCAN)
|
||||
namespace {
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
// If enabled, PCScan is turned on only for the browser's malloc partition.
|
||||
const Feature kPartitionAllocPCScanBrowserOnly{
|
||||
"PartitionAllocPCScanBrowserOnly", FEATURE_DISABLED_BY_DEFAULT};
|
||||
static constexpr char kPAFeatureEnabledProcessesStr[] = "enabled-processes";
|
||||
static constexpr char kBrowserOnlyStr[] = "browser-only";
|
||||
static constexpr char kBrowserAndRendererStr[] = "browser-and-renderer";
|
||||
static constexpr char kNonRendererStr[] = "non-renderer";
|
||||
static constexpr char kAllProcessesStr[] = "all-processes";
|
||||
|
||||
// If enabled, PCScan is turned on only for the renderer's malloc partition.
|
||||
const Feature kPartitionAllocPCScanRendererOnly{
|
||||
"PartitionAllocPCScanRendererOnly", FEATURE_DISABLED_BY_DEFAULT};
|
||||
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
|
||||
static constexpr char kRendererOnlyStr[] = "renderer-only";
|
||||
static constexpr char kAllChildProcessesStr[] = "all-child-processes";
|
||||
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
|
||||
|
||||
// If enabled, this instance belongs to the Control group of the BackupRefPtr
|
||||
// binary experiment.
|
||||
const Feature kPartitionAllocBackupRefPtrControl{
|
||||
"PartitionAllocBackupRefPtrControl", FEATURE_DISABLED_BY_DEFAULT};
|
||||
} // namespace
|
||||
|
||||
// Use a larger maximum thread cache cacheable bucket size.
|
||||
const Feature kPartitionAllocLargeThreadCacheSize{
|
||||
"PartitionAllocLargeThreadCacheSize",
|
||||
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
|
||||
// Not unconditionally enabled on 32 bit Android, since it is a more
|
||||
// memory-constrained platform.
|
||||
FEATURE_DISABLED_BY_DEFAULT
|
||||
#else
|
||||
FEATURE_ENABLED_BY_DEFAULT
|
||||
#endif
|
||||
BASE_FEATURE(kPartitionAllocUnretainedDanglingPtr,
|
||||
"PartitionAllocUnretainedDanglingPtr",
|
||||
FEATURE_ENABLED_BY_DEFAULT);
|
||||
|
||||
constexpr FeatureParam<UnretainedDanglingPtrMode>::Option
|
||||
kUnretainedDanglingPtrModeOption[] = {
|
||||
{UnretainedDanglingPtrMode::kCrash, "crash"},
|
||||
{UnretainedDanglingPtrMode::kDumpWithoutCrashing,
|
||||
"dump_without_crashing"},
|
||||
};
|
||||
// Note: Do not use the prepared macro as of no need for a local cache.
|
||||
constinit const FeatureParam<UnretainedDanglingPtrMode>
|
||||
kUnretainedDanglingPtrModeParam = {
|
||||
&kPartitionAllocUnretainedDanglingPtr,
|
||||
"mode",
|
||||
UnretainedDanglingPtrMode::kCrash,
|
||||
&kUnretainedDanglingPtrModeOption,
|
||||
};
|
||||
|
||||
const BASE_EXPORT Feature kPartitionAllocLargeEmptySlotSpanRing{
|
||||
"PartitionAllocLargeEmptySlotSpanRing", FEATURE_DISABLED_BY_DEFAULT};
|
||||
// Note: DPD conflicts with no-op `free()` (see
|
||||
// `base::allocator::MakeFreeNoOp()`). No-op `free()` stands down in the
|
||||
// presence of DPD, but hypothetically fully launching DPD should prompt
|
||||
// a rethink of no-op `free()`.
|
||||
BASE_FEATURE(kPartitionAllocDanglingPtr,
|
||||
"PartitionAllocDanglingPtr",
|
||||
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG)
|
||||
FEATURE_ENABLED_BY_DEFAULT
|
||||
#else
|
||||
FEATURE_DISABLED_BY_DEFAULT
|
||||
#endif
|
||||
);
|
||||
|
||||
const Feature kPartitionAllocBackupRefPtr{"PartitionAllocBackupRefPtr",
|
||||
FEATURE_DISABLED_BY_DEFAULT};
|
||||
constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
|
||||
{DanglingPtrMode::kCrash, "crash"},
|
||||
{DanglingPtrMode::kLogOnly, "log_only"},
|
||||
};
|
||||
// Note: Do not use the prepared macro as of no need for a local cache.
|
||||
constinit const FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
|
||||
&kPartitionAllocDanglingPtr,
|
||||
"mode",
|
||||
DanglingPtrMode::kCrash,
|
||||
&kDanglingPtrModeOption,
|
||||
};
|
||||
constexpr FeatureParam<DanglingPtrType>::Option kDanglingPtrTypeOption[] = {
|
||||
{DanglingPtrType::kAll, "all"},
|
||||
{DanglingPtrType::kCrossTask, "cross_task"},
|
||||
};
|
||||
// Note: Do not use the prepared macro as of no need for a local cache.
|
||||
constinit const FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
|
||||
&kPartitionAllocDanglingPtr,
|
||||
"type",
|
||||
DanglingPtrType::kAll,
|
||||
&kDanglingPtrTypeOption,
|
||||
};
|
||||
|
||||
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
// Use a larger maximum thread cache cacheable bucket size.
|
||||
BASE_FEATURE(kPartitionAllocLargeThreadCacheSize,
|
||||
"PartitionAllocLargeThreadCacheSize",
|
||||
FEATURE_ENABLED_BY_DEFAULT);
|
||||
|
||||
MIRACLE_PARAMETER_FOR_INT(GetPartitionAllocLargeThreadCacheSizeValue,
|
||||
kPartitionAllocLargeThreadCacheSize,
|
||||
"PartitionAllocLargeThreadCacheSizeValue",
|
||||
::partition_alloc::kThreadCacheLargeSizeThreshold)
|
||||
|
||||
MIRACLE_PARAMETER_FOR_INT(
|
||||
GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid,
|
||||
kPartitionAllocLargeThreadCacheSize,
|
||||
"PartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid",
|
||||
::partition_alloc::kThreadCacheDefaultSizeThreshold)
|
||||
|
||||
BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
|
||||
"PartitionAllocLargeEmptySlotSpanRing",
|
||||
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
|
||||
FEATURE_ENABLED_BY_DEFAULT);
|
||||
#else
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
#endif
|
||||
|
||||
BASE_FEATURE(kPartitionAllocWithAdvancedChecks,
|
||||
"PartitionAllocWithAdvancedChecks",
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
constexpr FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>::Option
|
||||
kPartitionAllocWithAdvancedChecksEnabledProcessesOptions[] = {
|
||||
{PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserOnly,
|
||||
kBrowserOnlyStr},
|
||||
{PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserAndRenderer,
|
||||
kBrowserAndRendererStr},
|
||||
{PartitionAllocWithAdvancedChecksEnabledProcesses::kNonRenderer,
|
||||
kNonRendererStr},
|
||||
{PartitionAllocWithAdvancedChecksEnabledProcesses::kAllProcesses,
|
||||
kAllProcessesStr}};
|
||||
// Note: Do not use the prepared macro as of no need for a local cache.
|
||||
constinit const FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>
|
||||
kPartitionAllocWithAdvancedChecksEnabledProcessesParam{
|
||||
&kPartitionAllocWithAdvancedChecks, kPAFeatureEnabledProcessesStr,
|
||||
PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserOnly,
|
||||
&kPartitionAllocWithAdvancedChecksEnabledProcessesOptions};
|
||||
|
||||
BASE_FEATURE(kPartitionAllocSchedulerLoopQuarantine,
|
||||
"PartitionAllocSchedulerLoopQuarantine",
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
// Scheduler Loop Quarantine's per-branch capacity in bytes.
|
||||
// Note: Do not use the prepared macro as of no need for a local cache.
|
||||
constinit const FeatureParam<int>
|
||||
kPartitionAllocSchedulerLoopQuarantineBranchCapacity{
|
||||
&kPartitionAllocSchedulerLoopQuarantine,
|
||||
"PartitionAllocSchedulerLoopQuarantineBranchCapacity", 0};
|
||||
// Scheduler Loop Quarantine's capacity for the UI thread in bytes.
|
||||
BASE_FEATURE_PARAM(int,
|
||||
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity,
|
||||
&kPartitionAllocSchedulerLoopQuarantine,
|
||||
"PartitionAllocSchedulerLoopQuarantineBrowserUICapacity",
|
||||
0);
|
||||
|
||||
BASE_FEATURE(kPartitionAllocZappingByFreeFlags,
|
||||
"PartitionAllocZappingByFreeFlags",
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
BASE_FEATURE(kPartitionAllocEventuallyZeroFreedMemory,
|
||||
"PartitionAllocEventuallyZeroFreedMemory",
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
BASE_FEATURE(kPartitionAllocFewerMemoryRegions,
|
||||
"PartitionAllocFewerMemoryRegions",
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
BASE_FEATURE(kPartitionAllocBackupRefPtr,
|
||||
"PartitionAllocBackupRefPtr",
|
||||
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG)
|
||||
FEATURE_ENABLED_BY_DEFAULT
|
||||
#else
|
||||
FEATURE_DISABLED_BY_DEFAULT
|
||||
#endif
|
||||
);
|
||||
|
||||
constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
|
||||
kBackupRefPtrEnabledProcessesOptions[] = {
|
||||
{BackupRefPtrEnabledProcesses::kBrowserOnly, "browser-only"},
|
||||
{BackupRefPtrEnabledProcesses::kBrowserOnly, kBrowserOnlyStr},
|
||||
{BackupRefPtrEnabledProcesses::kBrowserAndRenderer,
|
||||
"browser-and-renderer"},
|
||||
{BackupRefPtrEnabledProcesses::kNonRenderer, "non-renderer"},
|
||||
{BackupRefPtrEnabledProcesses::kAllProcesses, "all-processes"}};
|
||||
kBrowserAndRendererStr},
|
||||
{BackupRefPtrEnabledProcesses::kNonRenderer, kNonRendererStr},
|
||||
{BackupRefPtrEnabledProcesses::kAllProcesses, kAllProcessesStr}};
|
||||
|
||||
const base::FeatureParam<BackupRefPtrEnabledProcesses>
|
||||
kBackupRefPtrEnabledProcessesParam{
|
||||
&kPartitionAllocBackupRefPtr, "enabled-processes",
|
||||
BackupRefPtrEnabledProcesses::kBrowserOnly,
|
||||
&kBackupRefPtrEnabledProcessesOptions};
|
||||
BASE_FEATURE_ENUM_PARAM(BackupRefPtrEnabledProcesses,
|
||||
kBackupRefPtrEnabledProcessesParam,
|
||||
&kPartitionAllocBackupRefPtr,
|
||||
kPAFeatureEnabledProcessesStr,
|
||||
#if PA_BUILDFLAG(IS_MAC) && PA_BUILDFLAG(PA_ARCH_CPU_ARM64)
|
||||
BackupRefPtrEnabledProcesses::kNonRenderer,
|
||||
#else
|
||||
BackupRefPtrEnabledProcesses::kAllProcesses,
|
||||
#endif
|
||||
&kBackupRefPtrEnabledProcessesOptions);
|
||||
|
||||
constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
|
||||
{BackupRefPtrMode::kDisabled, "disabled"},
|
||||
{BackupRefPtrMode::kEnabled, "enabled"},
|
||||
{BackupRefPtrMode::kDisabledButSplitPartitions2Way,
|
||||
"disabled-but-2-way-split"},
|
||||
{BackupRefPtrMode::kDisabledButSplitPartitions3Way,
|
||||
"disabled-but-3-way-split"},
|
||||
};
|
||||
|
||||
const base::FeatureParam<BackupRefPtrMode> kBackupRefPtrModeParam{
|
||||
&kPartitionAllocBackupRefPtr, "brp-mode", BackupRefPtrMode::kEnabled,
|
||||
&kBackupRefPtrModeOptions};
|
||||
BASE_FEATURE_ENUM_PARAM(BackupRefPtrMode,
|
||||
kBackupRefPtrModeParam,
|
||||
&kPartitionAllocBackupRefPtr,
|
||||
"brp-mode",
|
||||
BackupRefPtrMode::kEnabled,
|
||||
&kBackupRefPtrModeOptions);
|
||||
// Note: Do not use the prepared macro as of no need for a local cache.
|
||||
constinit const FeatureParam<int> kBackupRefPtrExtraExtrasSizeParam{
|
||||
&kPartitionAllocBackupRefPtr, "brp-extra-extras-size", 0};
|
||||
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
// If enabled, switches the bucket distribution to an alternate one. The
|
||||
// alternate distribution must have buckets that are a subset of the default
|
||||
// one.
|
||||
const Feature kPartitionAllocUseAlternateDistribution{
|
||||
"PartitionAllocUseAlternateDistribution", FEATURE_DISABLED_BY_DEFAULT};
|
||||
|
||||
// If enabled, switches PCScan scheduling to a mutator-aware scheduler. Does not
|
||||
// affect whether PCScan is enabled itself.
|
||||
const Feature kPartitionAllocPCScanMUAwareScheduler{
|
||||
"PartitionAllocPCScanMUAwareScheduler", FEATURE_ENABLED_BY_DEFAULT};
|
||||
|
||||
// If enabled, PCScan frees unconditionally all quarantined objects.
|
||||
// This is a performance testing feature.
|
||||
const Feature kPartitionAllocPCScanImmediateFreeing{
|
||||
"PartitionAllocPCScanImmediateFreeing", FEATURE_DISABLED_BY_DEFAULT};
|
||||
|
||||
// If enabled, PCScan clears eagerly (synchronously) on free().
|
||||
const Feature kPartitionAllocPCScanEagerClearing{
|
||||
"PartitionAllocPCScanEagerClearing", FEATURE_DISABLED_BY_DEFAULT};
|
||||
|
||||
// In addition to heap, scan also the stack of the current mutator.
|
||||
const Feature kPartitionAllocPCScanStackScanning {
|
||||
"PartitionAllocPCScanStackScanning",
|
||||
#if defined(PA_PCSCAN_STACK_SUPPORTED)
|
||||
FEATURE_ENABLED_BY_DEFAULT
|
||||
BASE_FEATURE(kPartitionAllocMemoryTagging,
|
||||
"PartitionAllocMemoryTagging",
|
||||
#if PA_BUILDFLAG(USE_FULL_MTE) || BUILDFLAG(IS_ANDROID)
|
||||
FEATURE_ENABLED_BY_DEFAULT
|
||||
#else
|
||||
FEATURE_DISABLED_BY_DEFAULT
|
||||
#endif // defined(PA_PCSCAN_STACK_SUPPORTED)
|
||||
FEATURE_DISABLED_BY_DEFAULT
|
||||
#endif
|
||||
);
|
||||
|
||||
constexpr FeatureParam<MemtagMode>::Option kMemtagModeOptions[] = {
|
||||
{MemtagMode::kSync, "sync"},
|
||||
{MemtagMode::kAsync, "async"}};
|
||||
|
||||
// Note: Do not use the prepared macro as of no need for a local cache.
|
||||
constinit const FeatureParam<MemtagMode> kMemtagModeParam{
|
||||
&kPartitionAllocMemoryTagging, "memtag-mode",
|
||||
#if PA_BUILDFLAG(USE_FULL_MTE)
|
||||
MemtagMode::kSync,
|
||||
#else
|
||||
MemtagMode::kAsync,
|
||||
#endif
|
||||
&kMemtagModeOptions};
|
||||
|
||||
constexpr FeatureParam<RetagMode>::Option kRetagModeOptions[] = {
|
||||
{RetagMode::kIncrement, "increment"},
|
||||
{RetagMode::kRandom, "random"},
|
||||
};
|
||||
|
||||
const Feature kPartitionAllocDCScan{"PartitionAllocDCScan",
|
||||
FEATURE_DISABLED_BY_DEFAULT};
|
||||
// Note: Do not use the prepared macro as of no need for a local cache.
|
||||
constinit const FeatureParam<RetagMode> kRetagModeParam{
|
||||
&kPartitionAllocMemoryTagging, "retag-mode", RetagMode::kIncrement,
|
||||
&kRetagModeOptions};
|
||||
|
||||
} // namespace features
|
||||
} // namespace base
|
||||
constexpr FeatureParam<MemoryTaggingEnabledProcesses>::Option
|
||||
kMemoryTaggingEnabledProcessesOptions[] = {
|
||||
{MemoryTaggingEnabledProcesses::kBrowserOnly, kBrowserOnlyStr},
|
||||
{MemoryTaggingEnabledProcesses::kNonRenderer, kNonRendererStr},
|
||||
{MemoryTaggingEnabledProcesses::kAllProcesses, kAllProcessesStr}};
|
||||
|
||||
// Note: Do not use the prepared macro as of no need for a local cache.
|
||||
constinit const FeatureParam<MemoryTaggingEnabledProcesses>
|
||||
kMemoryTaggingEnabledProcessesParam{
|
||||
&kPartitionAllocMemoryTagging, kPAFeatureEnabledProcessesStr,
|
||||
#if PA_BUILDFLAG(USE_FULL_MTE)
|
||||
MemoryTaggingEnabledProcesses::kAllProcesses,
|
||||
#else
|
||||
MemoryTaggingEnabledProcesses::kNonRenderer,
|
||||
#endif
|
||||
&kMemoryTaggingEnabledProcessesOptions};
|
||||
|
||||
BASE_FEATURE(kKillPartitionAllocMemoryTagging,
|
||||
"KillPartitionAllocMemoryTagging",
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte);
|
||||
BASE_FEATURE(kPartitionAllocPermissiveMte,
|
||||
"PartitionAllocPermissiveMte",
|
||||
#if PA_BUILDFLAG(USE_FULL_MTE)
|
||||
// We want to actually crash if USE_FULL_MTE is enabled.
|
||||
FEATURE_DISABLED_BY_DEFAULT
|
||||
#else
|
||||
FEATURE_ENABLED_BY_DEFAULT
|
||||
#endif
|
||||
);
|
||||
|
||||
BASE_FEATURE(kAsanBrpDereferenceCheck,
|
||||
"AsanBrpDereferenceCheck",
|
||||
FEATURE_ENABLED_BY_DEFAULT);
|
||||
BASE_FEATURE(kAsanBrpExtractionCheck,
|
||||
"AsanBrpExtractionCheck", // Not much noise at the moment to
|
||||
FEATURE_DISABLED_BY_DEFAULT); // enable by default.
|
||||
BASE_FEATURE(kAsanBrpInstantiationCheck,
|
||||
"AsanBrpInstantiationCheck",
|
||||
FEATURE_ENABLED_BY_DEFAULT);
|
||||
|
||||
// If enabled, switches the bucket distribution to a denser one.
|
||||
//
|
||||
// We enable this by default everywhere except for 32-bit Android, since we saw
|
||||
// regressions there.
|
||||
BASE_FEATURE(kPartitionAllocUseDenserDistribution,
|
||||
"PartitionAllocUseDenserDistribution",
|
||||
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
|
||||
FEATURE_DISABLED_BY_DEFAULT
|
||||
#else
|
||||
FEATURE_ENABLED_BY_DEFAULT
|
||||
#endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
|
||||
);
|
||||
const FeatureParam<BucketDistributionMode>::Option
|
||||
kPartitionAllocBucketDistributionOption[] = {
|
||||
{BucketDistributionMode::kDefault, "default"},
|
||||
{BucketDistributionMode::kDenser, "denser"},
|
||||
};
|
||||
// Note: Do not use the prepared macro as of no need for a local cache.
|
||||
constinit const FeatureParam<BucketDistributionMode>
|
||||
kPartitionAllocBucketDistributionParam{
|
||||
&kPartitionAllocUseDenserDistribution, "mode",
|
||||
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
|
||||
BucketDistributionMode::kDefault,
|
||||
#else
|
||||
BucketDistributionMode::kDenser,
|
||||
#endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
|
||||
&kPartitionAllocBucketDistributionOption};
|
||||
|
||||
BASE_FEATURE(kPartitionAllocMemoryReclaimer,
|
||||
"PartitionAllocMemoryReclaimer",
|
||||
FEATURE_ENABLED_BY_DEFAULT);
|
||||
BASE_FEATURE_PARAM(TimeDelta,
|
||||
kPartitionAllocMemoryReclaimerInterval,
|
||||
&kPartitionAllocMemoryReclaimer,
|
||||
"interval",
|
||||
TimeDelta() // Defaults to zero.
|
||||
);
|
||||
|
||||
// Configures whether we set a lower limit for renderers that do not have a main
|
||||
// frame, similar to the limit that is already done for backgrounded renderers.
|
||||
BASE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers,
|
||||
"LowerPAMemoryLimitForNonMainRenderers",
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
// Whether to straighten free lists for larger slot spans in PurgeMemory() ->
|
||||
// ... -> PartitionPurgeSlotSpan().
|
||||
BASE_FEATURE(kPartitionAllocStraightenLargerSlotSpanFreeLists,
|
||||
"PartitionAllocStraightenLargerSlotSpanFreeLists",
|
||||
FEATURE_ENABLED_BY_DEFAULT);
|
||||
const FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>::
|
||||
Option kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption[] = {
|
||||
{partition_alloc::StraightenLargerSlotSpanFreeListsMode::
|
||||
kOnlyWhenUnprovisioning,
|
||||
"only-when-unprovisioning"},
|
||||
{partition_alloc::StraightenLargerSlotSpanFreeListsMode::kAlways,
|
||||
"always"},
|
||||
};
|
||||
// Note: Do not use the prepared macro as of no need for a local cache.
|
||||
constinit const FeatureParam<
|
||||
partition_alloc::StraightenLargerSlotSpanFreeListsMode>
|
||||
kPartitionAllocStraightenLargerSlotSpanFreeListsMode = {
|
||||
&kPartitionAllocStraightenLargerSlotSpanFreeLists,
|
||||
"mode",
|
||||
partition_alloc::StraightenLargerSlotSpanFreeListsMode::
|
||||
kOnlyWhenUnprovisioning,
|
||||
&kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption,
|
||||
};
|
||||
|
||||
// Whether to sort free lists for smaller slot spans in PurgeMemory().
|
||||
BASE_FEATURE(kPartitionAllocSortSmallerSlotSpanFreeLists,
|
||||
"PartitionAllocSortSmallerSlotSpanFreeLists",
|
||||
FEATURE_ENABLED_BY_DEFAULT);
|
||||
|
||||
// Whether to sort the active slot spans in PurgeMemory().
|
||||
BASE_FEATURE(kPartitionAllocSortActiveSlotSpans,
|
||||
"PartitionAllocSortActiveSlotSpans",
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
// Whether to retry allocations when commit fails.
|
||||
BASE_FEATURE(kPageAllocatorRetryOnCommitFailure,
|
||||
"PageAllocatorRetryOnCommitFailure",
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
|
||||
// A parameter to exclude or not exclude PartitionAllocSupport from
|
||||
// PartialLowModeOnMidRangeDevices. This is used to see how it affects
|
||||
// renderer performances, e.g. blink_perf.parser benchmark.
|
||||
// The feature: kPartialLowEndModeOnMidRangeDevices is defined in
|
||||
// //base/features.cc. Since the following feature param is related to
|
||||
// PartitionAlloc, define the param here.
|
||||
BASE_FEATURE_PARAM(bool,
|
||||
kPartialLowEndModeExcludePartitionAllocSupport,
|
||||
&kPartialLowEndModeOnMidRangeDevices,
|
||||
"exclude-partition-alloc-support",
|
||||
false);
|
||||
#endif
|
||||
|
||||
BASE_FEATURE(kEnableConfigurableThreadCacheMultiplier,
|
||||
"EnableConfigurableThreadCacheMultiplier",
|
||||
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
MIRACLE_PARAMETER_FOR_DOUBLE(GetThreadCacheMultiplier,
|
||||
kEnableConfigurableThreadCacheMultiplier,
|
||||
"ThreadCacheMultiplier",
|
||||
2.)
|
||||
|
||||
MIRACLE_PARAMETER_FOR_DOUBLE(GetThreadCacheMultiplierForAndroid,
|
||||
kEnableConfigurableThreadCacheMultiplier,
|
||||
"ThreadCacheMultiplierForAndroid",
|
||||
1.)
|
||||
|
||||
constexpr partition_alloc::internal::base::TimeDelta ToPartitionAllocTimeDelta(
|
||||
TimeDelta time_delta) {
|
||||
return partition_alloc::internal::base::Microseconds(
|
||||
time_delta.InMicroseconds());
|
||||
}
|
||||
|
||||
constexpr TimeDelta FromPartitionAllocTimeDelta(
|
||||
partition_alloc::internal::base::TimeDelta time_delta) {
|
||||
return Microseconds(time_delta.InMicroseconds());
|
||||
}
|
||||
|
||||
BASE_FEATURE(kEnableConfigurableThreadCachePurgeInterval,
|
||||
"EnableConfigurableThreadCachePurgeInterval",
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
MIRACLE_PARAMETER_FOR_TIME_DELTA(
|
||||
GetThreadCacheMinPurgeIntervalValue,
|
||||
kEnableConfigurableThreadCachePurgeInterval,
|
||||
"ThreadCacheMinPurgeInterval",
|
||||
FromPartitionAllocTimeDelta(partition_alloc::kMinPurgeInterval))
|
||||
|
||||
MIRACLE_PARAMETER_FOR_TIME_DELTA(
|
||||
GetThreadCacheMaxPurgeIntervalValue,
|
||||
kEnableConfigurableThreadCachePurgeInterval,
|
||||
"ThreadCacheMaxPurgeInterval",
|
||||
FromPartitionAllocTimeDelta(partition_alloc::kMaxPurgeInterval))
|
||||
|
||||
MIRACLE_PARAMETER_FOR_TIME_DELTA(
|
||||
GetThreadCacheDefaultPurgeIntervalValue,
|
||||
kEnableConfigurableThreadCachePurgeInterval,
|
||||
"ThreadCacheDefaultPurgeInterval",
|
||||
FromPartitionAllocTimeDelta(partition_alloc::kDefaultPurgeInterval))
|
||||
|
||||
const partition_alloc::internal::base::TimeDelta
|
||||
GetThreadCacheMinPurgeInterval() {
|
||||
return ToPartitionAllocTimeDelta(GetThreadCacheMinPurgeIntervalValue());
|
||||
}
|
||||
|
||||
const partition_alloc::internal::base::TimeDelta
|
||||
GetThreadCacheMaxPurgeInterval() {
|
||||
return ToPartitionAllocTimeDelta(GetThreadCacheMaxPurgeIntervalValue());
|
||||
}
|
||||
|
||||
const partition_alloc::internal::base::TimeDelta
|
||||
GetThreadCacheDefaultPurgeInterval() {
|
||||
return ToPartitionAllocTimeDelta(GetThreadCacheDefaultPurgeIntervalValue());
|
||||
}
|
||||
|
||||
BASE_FEATURE(kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
|
||||
"EnableConfigurableThreadCacheMinCachedMemoryForPurging",
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
MIRACLE_PARAMETER_FOR_INT(
|
||||
GetThreadCacheMinCachedMemoryForPurgingBytes,
|
||||
kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
|
||||
"ThreadCacheMinCachedMemoryForPurgingBytes",
|
||||
partition_alloc::kMinCachedMemoryForPurgingBytes)
|
||||
|
||||
// An apparent quarantine leak in the buffer partition unacceptably
|
||||
// bloats memory when MiraclePtr is enabled in the renderer process.
|
||||
// We believe we have found and patched the leak, but out of an
|
||||
// abundance of caution, we provide this toggle that allows us to
|
||||
// wholly disable MiraclePtr in the buffer partition, if necessary.
|
||||
//
|
||||
// TODO(crbug.com/40064499): this is unneeded once
|
||||
// MiraclePtr-for-Renderer launches.
|
||||
BASE_FEATURE(kPartitionAllocDisableBRPInBufferPartition,
|
||||
"PartitionAllocDisableBRPInBufferPartition",
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
BASE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground,
|
||||
"PartitionAllocAdjustSizeWhenInForeground",
|
||||
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
|
||||
FEATURE_ENABLED_BY_DEFAULT);
|
||||
#else
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
#endif
|
||||
|
||||
BASE_FEATURE(kPartitionAllocUseSmallSingleSlotSpans,
|
||||
"PartitionAllocUseSmallSingleSlotSpans",
|
||||
FEATURE_ENABLED_BY_DEFAULT);
|
||||
|
||||
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
|
||||
BASE_FEATURE(kPartitionAllocShadowMetadata,
|
||||
"PartitionAllocShadowMetadata",
|
||||
FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
constexpr FeatureParam<ShadowMetadataEnabledProcesses>::Option
|
||||
kShadowMetadataEnabledProcessesOptions[] = {
|
||||
{ShadowMetadataEnabledProcesses::kRendererOnly, kRendererOnlyStr},
|
||||
{ShadowMetadataEnabledProcesses::kAllChildProcesses,
|
||||
kAllChildProcessesStr}};
|
||||
|
||||
// Note: Do not use the prepared macro as of no need for a local cache.
|
||||
constinit const FeatureParam<ShadowMetadataEnabledProcesses>
|
||||
kShadowMetadataEnabledProcessesParam{
|
||||
&kPartitionAllocShadowMetadata, kPAFeatureEnabledProcessesStr,
|
||||
ShadowMetadataEnabledProcesses::kRendererOnly,
|
||||
&kShadowMetadataEnabledProcessesOptions};
|
||||
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
|
||||
|
||||
} // namespace base::features
|
||||
|
|