Compare commits

...

No commits in common. "v127.0.6533.64-1" and "master" have entirely different histories.

16498 changed files with 1311323 additions and 674564 deletions

View file

@ -49,7 +49,7 @@ jobs:
wget https://snapshot.debian.org/archive/debian/20230611T210420Z/pool/main/q/qemu/qemu-user-static_8.0%2Bdfsg-4_amd64.deb
fi
cache-toolchains-win:
runs-on: windows-2019
runs-on: windows-2022
steps:
- uses: actions/checkout@v4
- name: Cache toolchains
@ -179,9 +179,13 @@ jobs:
abi: armeabi-v7a
env:
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}" target_os="android"'
BUNDLE: naiveproxy-plugin-${{ github.event.release.tag_name || 'v1' }}-${{ matrix.abi }}.apk
BUNDLE: naiveproxy-plugin-${{ github.event.release.tag_name || 'v1.1.1.1-1' }}-${{ matrix.abi }}.apk
steps:
- uses: actions/checkout@v4
- uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: 17
- name: Cache toolchains (Linux, OpenWrt, Android)
uses: actions/cache@v4
with:
@ -236,7 +240,7 @@ jobs:
working-directory: apk
env:
APK_ABI: ${{ matrix.abi }}
APK_VERSION_NAME: ${{ github.event.release.tag_name || 'v1' }}
APK_VERSION_NAME: ${{ github.event.release.tag_name || 'v1.1.1.1-1' }}
KEYSTORE_PASS: ${{ secrets.KEYSTORE_PASS }}
run: |
mkdir -p app/libs/$APK_ABI
@ -256,7 +260,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
win:
needs: cache-toolchains-win
runs-on: windows-2019
runs-on: windows-2022
strategy:
fail-fast: false
matrix:
@ -371,40 +375,6 @@ jobs:
run: gh release upload "${GITHUB_REF##*/}" ${{ env.BUNDLE }}.tar.xz --clobber
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ios:
needs: cache-toolchains-mac
runs-on: macos-13
strategy:
fail-fast: false
matrix:
arch: [arm64]
env:
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}" target_os="ios" ios_enable_code_signing=false'
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
steps:
- uses: actions/checkout@v4
- name: Cache toolchains and PGO
uses: actions/cache@v4
with:
path: |
src/third_party/llvm-build/Release+Asserts/
src/chrome/build/pgo_profiles/chrome-mac-*
src/gn/
key: toolchains-pgo-mac-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- id: ccache-timestamp
run: echo "CCACHE_TIMESTAMP=$(date +%s)" >>$GITHUB_OUTPUT
- name: Cache ccache files
uses: actions/cache@v4
with:
path: ~/Library/Caches/ccache
key: ccache-ios-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.CCACHE_TIMESTAMP }}
restore-keys: ccache-ios-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
- run: brew install ninja ccache
- run: pip install setuptools
- run: ./get-clang.sh
- run: ccache -z
- run: ./build.sh
- run: ccache -s
openwrt:
needs: cache-toolchains-posix
runs-on: ubuntu-22.04
@ -416,7 +386,7 @@ jobs:
openwrt: "target=x86 subtarget=64"
target_cpu: x64
- arch: x86
openwrt: "target=x86 subtarget=generic"
openwrt: "target=x86 subtarget=geode"
target_cpu: x86
- arch: aarch64_cortex-a53
openwrt: "target=sunxi subtarget=cortexa53"
@ -425,7 +395,9 @@ jobs:
- arch: aarch64_cortex-a53-static
openwrt: "target=sunxi subtarget=cortexa53"
target_cpu: arm64
extra: 'arm_cpu="cortex-a53" build_static=true no_madvise_syscall=true'
extra: 'arm_cpu="cortex-a53" build_static=true use_allocator_shim=false use_partition_alloc=false'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: aarch64_cortex-a72
openwrt: "target=mvebu subtarget=cortexa72"
target_cpu: arm64
@ -433,16 +405,26 @@ jobs:
- arch: aarch64_cortex-a72-static
openwrt: "target=mvebu subtarget=cortexa72"
target_cpu: arm64
extra: 'arm_cpu="cortex-a72" build_static=true no_madvise_syscall=true'
extra: 'arm_cpu="cortex-a72" build_static=true use_allocator_shim=false use_partition_alloc=false'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: aarch64_cortex-a76
openwrt: "target=bcm27xx subtarget=bcm2712"
target_cpu: arm64
extra: 'arm_cpu="cortex-a76"'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: aarch64_generic
openwrt: "target=rockchip subtarget=armv8"
openwrt: "target=layerscape subtarget=armv8_64b"
target_cpu: arm64
- arch: aarch64_generic-static
openwrt: "target=rockchip subtarget=armv8"
openwrt: "target=layerscape subtarget=armv8_64b"
target_cpu: arm64
extra: "build_static=true no_madvise_syscall=true"
extra: "build_static=true use_allocator_shim=false use_partition_alloc=false"
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: arm_arm1176jzf-s_vfp
openwrt: "target=bcm27xx subtarget=bcm2708"
openwrt: "target=brcm2708 subtarget=bcm2708"
target_cpu: arm
extra: 'arm_version=0 arm_cpu="arm1176jzf-s" arm_fpu="vfp" arm_float_abi="hard" arm_use_neon=false arm_use_thumb=false'
- arch: arm_arm926ej-s
@ -450,29 +432,35 @@ jobs:
target_cpu: arm
extra: 'arm_version=0 arm_cpu="arm926ej-s" arm_float_abi="soft" arm_use_neon=false arm_use_thumb=false'
- arch: arm_cortex-a15_neon-vfpv4
openwrt: "target=armsr subtarget=armv7"
openwrt: "target=ipq806x subtarget=generic"
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a15" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true'
- arch: arm_cortex-a5_vfpv4
openwrt: "target=at91 subtarget=sama5"
openwrt: "target=at91 subtarget=sama5d3"
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a5" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false'
- arch: arm_cortex-a7
openwrt: "target=mediatek subtarget=mt7629"
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_float_abi="soft" arm_use_neon=false'
openwrt_release: '21.02.0'
openwrt_gcc_ver: '8.4.0'
- arch: arm_cortex-a7_neon-vfpv4
openwrt: "target=sunxi subtarget=cortexa7"
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true'
- arch: arm_cortex-a7_neon-vfpv4-static
openwrt: "target=sunxi subtarget=cortexa7"
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true build_static=true use_allocator_shim=false use_partition_alloc=false'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: arm_cortex-a7_vfpv4
openwrt: "target=at91 subtarget=sama7"
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false'
- arch: arm_cortex-a7_neon-vfpv4-static
openwrt: "target=sunxi subtarget=cortexa7"
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true build_static=true no_madvise_syscall=true'
openwrt_release: '22.03.0'
openwrt_gcc_ver: '11.2.0'
- arch: arm_cortex-a8_vfpv3
openwrt: "target=sunxi subtarget=cortexa8"
target_cpu: arm
@ -484,13 +472,15 @@ jobs:
- arch: arm_cortex-a9-static
openwrt: "target=bcm53xx subtarget=generic"
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_float_abi="soft" arm_use_neon=false build_static=true no_madvise_syscall=true'
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_float_abi="soft" arm_use_neon=false build_static=true use_allocator_shim=false use_partition_alloc=false'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: arm_cortex-a9_neon
openwrt: "target=zynq subtarget=generic"
openwrt: "target=imx6 subtarget=generic"
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="neon" arm_float_abi="hard" arm_use_neon=true'
- arch: arm_cortex-a9_vfpv3-d16
openwrt: "target=tegra subtarget=generic"
openwrt: "target=mvebu subtarget=cortexa9"
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="vfpv3-d16" arm_float_abi="hard" arm_use_neon=false'
- arch: arm_mpcore
@ -508,17 +498,21 @@ jobs:
- arch: mipsel_24kc-static
openwrt: "target=ramips subtarget=rt305x"
target_cpu: mipsel
extra: 'mips_arch_variant="r2" mips_float_abi="soft" build_static=true no_madvise_syscall=true'
extra: 'mips_arch_variant="r2" mips_float_abi="soft" build_static=true use_allocator_shim=false use_partition_alloc=false'
openwrt_release: '24.10.0'
openwrt_gcc_ver: '13.3.0'
- arch: mipsel_mips32
openwrt: "target=bcm47xx subtarget=generic"
openwrt: "target=brcm47xx subtarget=legacy"
target_cpu: mipsel
extra: 'mips_arch_variant="r1" mips_float_abi="soft"'
- arch: riscv64
openwrt: "target=sifiveu subtarget=generic"
target_cpu: riscv64
openwrt_release: '23.05.0'
openwrt_gcc_ver: '12.3.0'
env:
EXTRA_FLAGS: target_cpu="${{ matrix.target_cpu }}" target_os="openwrt" ${{ matrix.extra }}
OPENWRT_FLAGS: arch=${{ matrix.arch }} release=23.05.0 gcc_ver=12.3.0 ${{ matrix.openwrt }}
EXTRA_FLAGS: target_cpu="${{ matrix.target_cpu }}" target_os="openwrt" ${{ matrix.extra }} enable_shadow_metadata=false
OPENWRT_FLAGS: arch=${{ matrix.arch }} release=${{ matrix.openwrt_release || '18.06.0' }} gcc_ver=${{ matrix.openwrt_gcc_ver || '7.3.0' }} ${{ matrix.openwrt }}
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
steps:
- uses: actions/checkout@v4

View file

@ -1 +1 @@
127.0.6533.64
135.0.7049.38

View file

@ -21,7 +21,7 @@ The Naïve server here works as a forward proxy and a packet length padding laye
## Download NaïveProxy
Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)).
Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [Exclave](https://github.com/dyhkwong/Exclave), [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)).
Users should always use the latest version to keep signatures identical to Chrome.
@ -82,7 +82,6 @@ Or `quic://user:pass@example.com`, if it works better. See also [parameter usage
* [v2rayN](https://github.com/2dust/v2rayN), GUI client, Windows
* [NekoBox for Android](https://github.com/MatsuriDayo/NekoBoxForAndroid), Proxy toolchain, Android
* [NekoRay / NekoBox For PC](https://github.com/MatsuriDayo/nekoray), Qt based GUI, Windows, Linux
* [Yet Another Shadow Socket](https://github.com/Chilledheart/yass), NaïveProxy-compatible forward proxy, Android, iOS, Windows, macOS, Linux, FreeBSD
## Notes for downstream
@ -114,7 +113,7 @@ Further reads and writes after `kFirstPaddings` are unpadded to avoid performanc
### H2 RST_STREAM frame padding
In experiments, NaïveProxy tends to send too many RST_STREAM frames per session, an uncommon behavior from regular browsers. To solve this, an END_STREAM DATA frame padded with total length distributed in [48, 72] is prepended to the RST_STREAM frame so it looks like a HEADERS frame. The server often replies to this with a WINDOW_UPDATE because padding is accounted in flow control. Whether this results in a new uncommon behavior is still unclear.
In experiments, NaïveProxy tends to send too many RST_STREAM frames per session, an uncommon behavior from regular browsers. To solve this, an END_STREAM DATA frame padded with total length distributed in [48, 72] is prepended to the RST_STREAM frame so it looks like a HEADERS frame. The server often replies to this with a WINDOW_UPDATE because padding is accounted in flow control. Whether this results in a new uncommon behavior is still unclear.
### H2 HEADERS frame padding
@ -130,7 +129,7 @@ The first CONNECT request to a server cannot use "Fast Open" to send payload bef
## Changes from Chromium upstream
- Minimize source code and build size (1% of the original)
- Minimize source code and build size (0.3% of the original)
- Disable exceptions and RTTI, except on Mac and Android.
- Support OpenWrt builds
- (Android, Linux) Use the builtin verifier instead of the system verifier (drop dependency of NSS on Linux) and read the system trust store from (following Go's behavior in crypto/x509/root_unix.go and crypto/x509/root_linux.go):

1
apk/.gitignore vendored
View file

@ -1,2 +1,3 @@
.gradle/
app/build/
app/libs/

View file

@ -4,7 +4,7 @@ plugins {
}
android {
namespace = "moe.matsuri.exe.naive"
namespace = "io.nekohasekai.sagernet.plugin.naive"
signingConfigs {
create("release") {
@ -17,23 +17,21 @@ android {
buildTypes {
getByName("release") {
proguardFiles(
getDefaultProguardFile("proguard-android-optimize.txt"),
file("proguard-rules.pro")
)
isMinifyEnabled = true
signingConfig = signingConfigs.getByName("release")
}
}
compileSdk = 33
buildToolsVersion = "35.0.0"
compileSdk = 35
defaultConfig {
minSdk = 21
targetSdk = 33
minSdk = 24
targetSdk = 35
applicationId = "moe.matsuri.exe.naive"
versionCode = System.getenv("APK_VERSION_NAME").removePrefix("v").split(".")[0].toInt()
applicationId = "io.nekohasekai.sagernet.plugin.naive"
versionCode = System.getenv("APK_VERSION_NAME").removePrefix("v").split(".")[0].toInt() * 10 + System.getenv("APK_VERSION_NAME").removePrefix("v").split("-")[1].toInt()
versionName = System.getenv("APK_VERSION_NAME").removePrefix("v")
splits.abi {
isEnable = true
@ -44,12 +42,8 @@ android {
}
compileOptions {
sourceCompatibility = JavaVersion.VERSION_1_8
targetCompatibility = JavaVersion.VERSION_1_8
}
kotlinOptions {
jvmTarget = "1.8"
sourceCompatibility = JavaVersion.VERSION_17
targetCompatibility = JavaVersion.VERSION_17
}
lint {
@ -59,6 +53,10 @@ android {
warningsAsErrors = true
}
packaging {
jniLibs.useLegacyPackaging = true
}
applicationVariants.all {
outputs.all {
this as com.android.build.gradle.internal.api.BaseVariantOutputImpl

View file

@ -13,13 +13,12 @@
<application
android:allowBackup="false"
android:extractNativeLibs="true"
android:icon="@mipmap/ic_launcher"
android:label="Naïve For NekoBox"
android:label="Naïve Plugin"
android:roundIcon="@mipmap/ic_launcher_round">
<provider
android:name=".BinaryProvider"
android:authorities="moe.matsuri.exe.naive.BinaryProvider"
android:authorities="io.nekohasekai.sagernet.plugin.naive.BinaryProvider"
android:directBootAware="true"
android:exported="true"
tools:ignore="ExportedContentProvider">
@ -29,7 +28,7 @@
<intent-filter>
<action android:name="io.nekohasekai.sagernet.plugin.ACTION_NATIVE_PLUGIN" />
<data
android:host="moe.matsuri.lite"
android:host="io.nekohasekai.sagernet"
android:path="/naive-plugin"
android:scheme="plugin" />
</intent-filter>

View file

@ -17,7 +17,7 @@
* *
******************************************************************************/
package moe.matsuri.exe.naive
package io.nekohasekai.sagernet.plugin.naive
import android.net.Uri
import android.os.ParcelFileDescriptor

View file

@ -5,8 +5,8 @@ buildscript {
mavenCentral()
}
dependencies {
classpath 'com.android.tools.build:gradle:7.3.1'
classpath 'org.jetbrains.kotlin:kotlin-gradle-plugin:1.6.10'
classpath 'com.android.tools.build:gradle:8.6.0'
classpath 'org.jetbrains.kotlin:kotlin-gradle-plugin:2.0.20'
// NOTE: Do not place your application dependencies here; they belong
// in the individual module build.gradle files

Binary file not shown.

View file

@ -1,6 +1,7 @@
#Thu Jan 27 22:42:44 HKT 2022
distributionBase=GRADLE_USER_HOME
distributionUrl=https\://services.gradle.org/distributions/gradle-7.4-bin.zip
distributionPath=wrapper/dists
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-bin.zip
networkTimeout=10000
validateDistributionUrl=true
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists

297
apk/gradlew vendored
View file

@ -1,7 +1,7 @@
#!/usr/bin/env sh
#!/bin/sh
#
# Copyright 2015 the original author or authors.
# Copyright © 2015-2021 the original authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -15,69 +15,104 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
##############################################################################
##
## Gradle start up script for UN*X
##
#
# Gradle start up script for POSIX generated by Gradle.
#
# Important for running:
#
# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is
# noncompliant, but you have some other compliant shell such as ksh or
# bash, then to run this script, type that shell name before the whole
# command line, like:
#
# ksh Gradle
#
# Busybox and similar reduced shells will NOT work, because this script
# requires all of these POSIX shell features:
# * functions;
# * expansions «$var», «${var}», «${var:-default}», «${var+SET}»,
# «${var#prefix}», «${var%suffix}», and «$( cmd )»;
# * compound commands having a testable exit status, especially «case»;
# * various built-in commands including «command», «set», and «ulimit».
#
# Important for patching:
#
# (2) This script targets any POSIX shell, so it avoids extensions provided
# by Bash, Ksh, etc; in particular arrays are avoided.
#
# The "traditional" practice of packing multiple parameters into a
# space-separated string is a well documented source of bugs and security
# problems, so this is (mostly) avoided, by progressively accumulating
# options in "$@", and eventually passing that to Java.
#
# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS,
# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly;
# see the in-line comments for details.
#
# There are tweaks for specific operating systems such as AIX, CygWin,
# Darwin, MinGW, and NonStop.
#
# (3) This script is generated from the Groovy template
# https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt
# within the Gradle project.
#
# You can find Gradle at https://github.com/gradle/gradle/.
#
##############################################################################
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
PRG="$0"
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
app_path=$0
# Need this for daisy-chained symlinks.
while
APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path
[ -h "$app_path" ]
do
ls=$( ls -ld "$app_path" )
link=${ls#*' -> '}
case $link in #(
/*) app_path=$link ;; #(
*) app_path=$APP_HOME$link ;;
esac
done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >/dev/null
APP_HOME="`pwd -P`"
cd "$SAVED" >/dev/null
APP_NAME="Gradle"
APP_BASE_NAME=`basename "$0"`
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
# This is normally unused
# shellcheck disable=SC2034
APP_BASE_NAME=${0##*/}
# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036)
APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s
' "$PWD" ) || exit
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum"
MAX_FD=maximum
warn () {
echo "$*"
}
} >&2
die () {
echo
echo "$*"
echo
exit 1
}
} >&2
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
nonstop=false
case "`uname`" in
CYGWIN* )
cygwin=true
;;
Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
NONSTOP* )
nonstop=true
;;
case "$( uname )" in #(
CYGWIN* ) cygwin=true ;; #(
Darwin* ) darwin=true ;; #(
MSYS* | MINGW* ) msys=true ;; #(
NONSTOP* ) nonstop=true ;;
esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
@ -87,9 +122,9 @@ CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
JAVACMD=$JAVA_HOME/jre/sh/java
else
JAVACMD="$JAVA_HOME/bin/java"
JAVACMD=$JAVA_HOME/bin/java
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
@ -98,88 +133,120 @@ Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD="java"
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
JAVACMD=java
if ! command -v java >/dev/null 2>&1
then
die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
fi
# Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
MAX_FD_LIMIT=`ulimit -H -n`
if [ $? -eq 0 ] ; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
MAX_FD="$MAX_FD_LIMIT"
fi
ulimit -n $MAX_FD
if [ $? -ne 0 ] ; then
warn "Could not set maximum file descriptor limit: $MAX_FD"
fi
else
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
fi
fi
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin or MSYS, switch paths to Windows format before running java
if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=`cygpath --unix "$JAVACMD"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=`expr $i + 1`
done
case $i in
0) set -- ;;
1) set -- "$args0" ;;
2) set -- "$args0" "$args1" ;;
3) set -- "$args0" "$args1" "$args2" ;;
4) set -- "$args0" "$args1" "$args2" "$args3" ;;
5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then
case $MAX_FD in #(
max*)
# In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked.
# shellcheck disable=SC2039,SC3045
MAX_FD=$( ulimit -H -n ) ||
warn "Could not query maximum file descriptor limit"
esac
case $MAX_FD in #(
'' | soft) :;; #(
*)
# In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked.
# shellcheck disable=SC2039,SC3045
ulimit -n "$MAX_FD" ||
warn "Could not set maximum file descriptor limit to $MAX_FD"
esac
fi
# Escape application args
save () {
for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
echo " "
}
APP_ARGS=`save "$@"`
# Collect all arguments for the java command, stacking in reverse order:
# * args from the command line
# * the main class name
# * -classpath
# * -D...appname settings
# * --module-path (only if needed)
# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables.
# Collect all arguments for the java command, following the shell quoting and substitution rules
eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
# For Cygwin or MSYS, switch paths to Windows format before running java
if "$cygwin" || "$msys" ; then
APP_HOME=$( cygpath --path --mixed "$APP_HOME" )
CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" )
JAVACMD=$( cygpath --unix "$JAVACMD" )
# Now convert the arguments - kludge to limit ourselves to /bin/sh
for arg do
if
case $arg in #(
-*) false ;; # don't mess with options #(
/?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath
[ -e "$t" ] ;; #(
*) false ;;
esac
then
arg=$( cygpath --path --ignore --mixed "$arg" )
fi
# Roll the args list around exactly as many times as the number of
# args, so each arg winds up back in the position where it started, but
# possibly modified.
#
# NB: a `for` loop captures its iteration list before it begins, so
# changing the positional parameters here affects neither the number of
# iterations, nor the values presented in `arg`.
shift # remove old arg
set -- "$@" "$arg" # push replacement arg
done
fi
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
# Collect all arguments for the java command:
# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
# and any embedded shellness will be escaped.
# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be
# treated as '${Hostname}' itself on the command line.
set -- \
"-Dorg.gradle.appname=$APP_BASE_NAME" \
-classpath "$CLASSPATH" \
org.gradle.wrapper.GradleWrapperMain \
"$@"
# Stop when "xargs" is not available.
if ! command -v xargs >/dev/null 2>&1
then
die "xargs is not available"
fi
# Use "xargs" to parse quoted args.
#
# With -n1 it outputs one arg per line, with the quotes and backslashes removed.
#
# In Bash we could simply go:
#
# readarray ARGS < <( xargs -n1 <<<"$var" ) &&
# set -- "${ARGS[@]}" "$@"
#
# but POSIX shell has neither arrays nor command substitution, so instead we
# post-process each arg (as a line of input to sed) to backslash-escape any
# character that might be a shell metacharacter, then use eval to reverse
# that process (while maintaining the separation between arguments), and wrap
# the whole thing up as a single "set" statement.
#
# This will of course break if any of these variables contains a newline or
# an unmatched quote.
#
eval "set -- $(
printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" |
xargs -n1 |
sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' |
tr '\n' ' '
)" '"$@"'
exec "$JAVACMD" "$@"

94
apk/gradlew.bat vendored Normal file
View file

@ -0,0 +1,94 @@
@rem
@rem Copyright 2015 the original author or authors.
@rem
@rem Licensed under the Apache License, Version 2.0 (the "License");
@rem you may not use this file except in compliance with the License.
@rem You may obtain a copy of the License at
@rem
@rem https://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem
@rem SPDX-License-Identifier: Apache-2.0
@rem
@if "%DEBUG%"=="" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%"=="" set DIRNAME=.
@rem This is normally unused
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Resolve any "." and ".." in APP_HOME to make it shorter.
for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if %ERRORLEVEL% equ 0 goto execute
echo. 1>&2
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2
echo. 1>&2
echo Please set the JAVA_HOME variable in your environment to match the 1>&2
echo location of your Java installation. 1>&2
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto execute
echo. 1>&2
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2
echo. 1>&2
echo Please set the JAVA_HOME variable in your environment to match the 1>&2
echo location of your Java installation. 1>&2
goto fail
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
:end
@rem End local scope for the variables with windows NT shell
if %ERRORLEVEL% equ 0 goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
set EXIT_CODE=%ERRORLEVEL%
if %EXIT_CODE% equ 0 set EXIT_CODE=1
if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
exit /b %EXIT_CODE%
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega

View file

@ -5,6 +5,6 @@ dependencyResolutionManagement {
mavenCentral()
}
}
rootProject.name = "Matsuri Plugins"
rootProject.name = "Naive Plugin"
include ':app'

View file

@ -28,10 +28,10 @@ IncludeCategories:
# LINT.IfChange(winheader)
- Regex: '^<objbase\.h>' # This has to be before initguid.h.
Priority: 1
- Regex: '^<(initguid|mmdeviceapi|windows|winsock2|ws2tcpip|shobjidl|atlbase|ole2|unknwn|tchar|ocidl)\.h>'
- Regex: '^<(atlbase|initguid|mmdeviceapi|ocidl|ole2|shobjidl|tchar|unknwn|windows|winsock2|winternl|ws2tcpip)\.h>'
Priority: 2
# LINT.ThenChange(/tools/add_header.py:winheader)
# UIAutomation*.h need to be after base/win/atl.h.
# UIAutomation*.h needs to be after base/win/atl.h.
# Note the low priority number.
- Regex: '^<UIAutomation.*\.h>'
Priority: 6
@ -39,8 +39,11 @@ IncludeCategories:
- Regex: '^<.*\.h>'
Priority: 3
# C++ standard library headers.
- Regex: '^<.*'
- Regex: '^<.*>'
Priority: 4
# windows_h_disallowed.h should appear last. Note the low priority number.
- Regex: '"(.*/)?windows_h_disallowed\.h"'
Priority: 7
# Other libraries.
- Regex: '.*'
Priority: 5

View file

@ -90,7 +90,7 @@ no_check_targets = [
"//v8:v8_libplatform", # 2 errors
]
# These are the list of GN files that run exec_script. This whitelist exists
# These are the list of GN files that run exec_script. This allowlist exists
# to force additional review for new uses of exec_script, which is strongly
# discouraged.
#
@ -145,11 +145,11 @@ no_check_targets = [
# this situation much easier to create. if the build always lists the
# files and passes them to a script, it will always be correct.
exec_script_whitelist =
build_dotfile_settings.exec_script_whitelist +
exec_script_allowlist =
build_dotfile_settings.exec_script_allowlist +
angle_dotfile_settings.exec_script_whitelist +
[
# Whitelist entries for //build should go into
# Allowlist entries for //build should go into
# //build/dotfile_settings.gni instead, so that they can be shared
# with other repos. The entries in this list should be only for files
# in the Chromium repo outside of //build.

View file

@ -17,6 +17,7 @@ Aaron Jacobs <samusaaron3@gmail.com>
Aaron Leventhal <aaronlevbugs@gmail.com>
Aaron Randolph <aaron.randolph@gmail.com>
Aaryaman Vasishta <jem456.vasishta@gmail.com>
AbdAlRahman Gad <abdobngad@gmail.com>
Abdu Ameen <abdu.ameen000@gmail.com>
Abdullah Abu Tasneem <a.tasneem@samsung.com>
Abhijeet Kandalkar <abhijeet.k@samsung.com>
@ -48,6 +49,7 @@ Ahmed Elwasefi <a.m.elwasefi@gmail.com>
Ahmet Emir Ercin <ahmetemiremir@gmail.com>
Aidarbek Suleimenov <suleimenov.aidarbek@gmail.com>
Aiden Grossman <aidengrossmanpso@gmail.com>
Airing Deng <airingdeng@gmail.com>
Ajay Berwal <a.berwal@samsung.com>
Ajay Berwal <ajay.berwal@samsung.com>
Ajay Sharma <ajay.sh@samsung.com>
@ -61,7 +63,6 @@ Aldo Culquicondor <alculquicondor@gmail.com>
Alec Petridis <alecthechop@gmail.com>
Aleksandar Stojiljkovic <aleksandar.stojiljkovic@intel.com>
Aleksei Gurianov <gurianov@gmail.com>
Aleksey Khoroshilov <akhoroshilov@brave.com>
Alesandro Ortiz <alesandro@alesandroortiz.com>
Alessandro Astone <ales.astone@gmail.com>
Alex Chronopoulos <achronop@gmail.com>
@ -117,6 +118,7 @@ Andreas Papacharalampous <andreas@apap04.com>
Andrei Borza <andrei.borza@gmail.com>
Andrei Parvu <andrei.prv@gmail.com>
Andrei Parvu <parvu@adobe.com>
Andrei Volykhin <andrei.volykhin@gmail.com>
Andres Salomon <dilinger@queued.net>
Andreu Botella <andreu@andreubotella.com>
Andrew Boyarshin <andrew.boyarshin@gmail.com>
@ -192,6 +194,7 @@ Ben Noordhuis <ben@strongloop.com>
Benedek Heilig <benecene@gmail.com>
Benjamin Dupont <bedupont@cisco.com>
Benjamin Jemlich <pcgod99@gmail.com>
Beomsik Min <beomsikm@gmail.com>
Bernard Cafarelli <voyageur@gentoo.org>
Bernhard M. Wiedemann <bwiedemann@suse.de>
Bert Belder <bertbelder@gmail.com>
@ -209,7 +212,6 @@ Brendan Kirby <brendan.kirby@imgtec.com>
Brendan Long <self@brendanlong.com>
Brendon Tiszka <btiszka@gmail.com>
Brett Lewis <brettlewis@brettlewis.us>
Brian Clifton <clifton@brave.com>
Brian Dunn <brian@theophil.us>
Brian G. Merrell <bgmerrell@gmail.com>
Brian Konzman, SJ <b.g.konzman@gmail.com>
@ -238,6 +240,7 @@ Cameron Gutman <aicommander@gmail.com>
Camille Viot <viot.camille@outlook.com>
Can Liu <peter.can.liu@gmail.com>
Carlos Santa <carlos.santa@intel.com>
Casey Primozic <me@ameo.link>
Catalin Badea <badea@adobe.com>
Cathie Chen <cathiechen@tencent.com>
Cem Kocagil <cem.kocagil@gmail.com>
@ -277,6 +280,7 @@ Chris Szurgot <szurgotc@amazon.com>
Chris Tserng <tserng@amazon.com>
Chris Vasselli <clindsay@gmail.com>
Chris Ye <hawkoyates@gmail.com>
Christian Liebel <christianliebel@gmail.com>
Christoph Staengle <christoph142@gmx.com>
Christophe Dumez <ch.dumez@samsung.com>
Christopher Dale <chrelad@gmail.com>
@ -297,6 +301,7 @@ Daiwei Li <daiweili@suitabletech.com>
Damien Marié <damien@dam.io>
Dan McCombs <overridex@gmail.com>
Daniel Adams <msub2official@gmail.com>
Daniel Bertalan <dani@danielbertalan.dev>
Daniel Bevenius <daniel.bevenius@gmail.com>
Daniel Bomar <dbdaniel42@gmail.com>
Daniel Carvalho Liedke <dliedke@gmail.com>
@ -308,9 +313,11 @@ Daniel Lockyer <thisisdaniellockyer@gmail.com>
Daniel Nishi <dhnishi@gmail.com>
Daniel Platz <daplatz@googlemail.com>
Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
Daniel Richard G. <iskunk@gmail.com>
Daniel Shaulov <dshaulov@ptc.com>
Daniel Trebbien <dtrebbien@gmail.com>
Daniel Waxweiler <daniel.waxweiler@gmail.com>
Daniel Zhao <zhaodani@amazon.com>
Dániel Bátyai <dbatyai@inf.u-szeged.hu>
Dániel Vince <vinced@inf.u-szeged.hu>
Daniil Suvorov <severecloud@gmail.com>
@ -318,13 +325,16 @@ Danny Weiss <danny.weiss.fr@gmail.com>
Danylo Boiko <danielboyko02@gmail.com>
Daoming Qiu <daoming.qiu@intel.com>
Darik Harter <darik.harter@gmail.com>
Darryl Pogue <darryl@dpogue.ca>
Darshan Sen <raisinten@gmail.com>
Darshini KN <kn.darshini@samsung.com>
Dave Vandyke <kzar@kzar.co.uk>
David Benjamin <davidben@mit.edu>
David Brown <develop.david.brown@gmail.com>
David Cernoch <dcernoch@uplandsoftware.com>
David Davidovic <david@davidovic.io>
David Erceg <erceg.david@gmail.com>
David Faden <dfaden@gmail.com>
David Fox <david@davidjfox.com>
David Futcher <david.mike.futcher@gmail.com>
David Jin <davidjin@amazon.com>
@ -333,6 +343,7 @@ David Leen <davileen@amazon.com>
David Manouchehri <david@davidmanouchehri.com>
David McAllister <mcdavid@amazon.com>
David Michael Barr <david.barr@samsung.com>
David Redondo <kde@david-redondo.de>
David Sanders <dsanders11@ucsbalum.com>
David Spellman <dspell@amazon.com>
David Valachovic <adenflorian@gmail.com>
@ -340,6 +351,7 @@ Dax Kelson <dkelson@gurulabs.com>
Dean Leitersdorf <dean.leitersdorf@gmail.com>
Debadree Chatterjee <debadree333@gmail.com>
Debashish Samantaray <d.samantaray@samsung.com>
Debin Zhang <debinzhang3@gmail.com>
Debug Wang <debugwang@tencent.com>
Deep Shah <deep.shah@samsung.com>
Deepak Dilip Borade <deepak.db@samsung.com>
@ -359,6 +371,7 @@ Diana Suvorova <diana.suvorova@gmail.com>
Diego Fernández Santos <agujaydedal@gmail.com>
Diego Ferreiro Val <elfogris@gmail.com>
Dillon Sellars <dill.sellars@gmail.com>
Dingming Liu <liudingming@bytedance.com>
Divya Bansal <divya.bansal@samsung.com>
Dmitry Shachnev <mitya57@gmail.com>
Dmitry Sokolov <dimanne@gmail.com>
@ -409,6 +422,7 @@ Erik Kurzinger <ekurzinger@gmail.com>
Erik Sjölund <erik.sjolund@gmail.com>
Eriq Augustine <eriq.augustine@gmail.com>
Ernesto Mudu <ernesto.mudu@gmail.com>
Ethan Chen <randomgamingdev@gmail.com>
Ethan Wong <bunnnywong@gmail.com>
Etienne Laurin <etienne@atnnn.com>
Eugene Kim <eugene70kim@gmail.com>
@ -435,7 +449,6 @@ Finbar Crago <finbar.crago@gmail.com>
François Beaufort <beaufort.francois@gmail.com>
François Devatine <devatine@verizonmedia.com>
Francois Kritzinger <francoisk777@gmail.com>
Francois Marier <francois@brave.com>
Francois Rauch <leopardb@gmail.com>
Frankie Dintino <fdintino@theatlantic.com>
Franklin Ta <fta2012@gmail.com>
@ -478,6 +491,7 @@ Greg Visser <gregvis@gmail.com>
Gregory Davis <gpdavis.chromium@gmail.com>
Grzegorz Czajkowski <g.czajkowski@samsung.com>
Guangzhen Li <guangzhen.li@intel.com>
Guobin Wu <wuguobin.1229@bytedance.com>
Gurpreet Kaur <k.gurpreet@samsung.com>
Gustav Tiger <gustav.tiger@sonymobile.com>
Gyuyoung Kim <gyuyoung.kim@navercorp.com>
@ -494,6 +508,7 @@ Hansel Lee <mr.hansel.lee@gmail.com>
Hanwen Zheng <eserinc.z@gmail.com>
Hao Li <hao.x.li@intel.com>
Haojian Wu <hokein.wu@gmail.com>
Haoran Tang <haoran.tang.personal@gmail.com>
Haoxuan Zhang <zhanghaoxuan.59@bytedance.com>
Hari Singh <hari.singh1@samsung.com>
Harpreet Singh Khurana <harpreet.sk@samsung.com>
@ -567,6 +582,7 @@ Ivan Naydonov <samogot@gmail.com>
Ivan Pavlotskiy <ivan.pavlotskiy@lgepartner.com>
Ivan Sham <ivansham@amazon.com>
Ivan Sidorov <ivansid@gmail.com>
Jacek Fedoryński <jfedor@gmail.com>
Jack Bates <jack@nottheoilrig.com>
Jack Shi <flystone2020@gmail.com>
Jackson Loeffler <j@jloeffler.com>
@ -574,11 +590,13 @@ Jacky Hu <flameddd@gmail.com>
Jacob Clark <jacob.jh.clark@googlemail.com>
Jacob Mandelson <jacob@mandelson.org>
Jaehun Lim <ljaehun.lim@samsung.com>
Jaehyun Chung <jaehyun.chung@amd.com>
Jaehyun Ko <jaehyun.dev@gmail.com>
Jaehyun Lee <j-hyun.lee@samsung.com>
Jaekyeom Kim <btapiz@gmail.com>
Jaemin Seo <jaemin86.seo@samsung.com>
Jaemo Koo <jaemok@amazon.com>
Jaemo Koo <koo2434@gmail.com>
Jaeseok Yoon <yjaeseok@gmail.com>
Jaewon Choi <jaewon.james.choi@gmail.com>
Jaewon Jung <jw.jung@navercorp.com>
@ -593,6 +611,7 @@ Jakob Weigert <jakob.j.w@googlemail.com>
Jakub Machacek <xtreit@gmail.com>
James Burton <jb@0.me.uk>
James Choi <jchoi42@pha.jhu.edu>
James Crosby <crosby.james@gmail.com>
James Raphael Tiovalen <jamestiotio@gmail.com>
James Stanley <james@apphaus.co.uk>
James Vega <vega.james@gmail.com>
@ -611,8 +630,10 @@ Jared Wein <weinjared@gmail.com>
Jari Karppanen <jkarp@amazon.com>
Jason Gronn <jasontopia03@gmail.com>
Javayhu <javayhu@gmail.com>
Jay Kapadia <jaykapadia389@gmail.com>
Jay Oster <jay@kodewerx.org>
Jay Soffian <jaysoffian@gmail.com>
Jay Yang <sjyang1126@gmail.com>
Jeado Ko <haibane84@gmail.com>
Jeffrey C <jeffreyca16@gmail.com>
Jeffrey Yeung <jeffrey.yeung@poly.com>
@ -631,6 +652,7 @@ Jesper Storm Bache <jsbache@gmail.com>
Jesper van den Ende <jespertheend@gmail.com>
Jesse Miller <jesse@jmiller.biz>
Jesus Sanchez-Palencia <jesus.sanchez-palencia.fernandez.fil@intel.com>
Jia Yu <yujia.1019@bytedance.com>
Jiadong Chen <chenjiadong@huawei.com>
Jiadong Zhu <jiadong.zhu@linaro.org>
Jiahao Lu <lujjjh@gmail.com>
@ -686,6 +708,7 @@ John Kleinschmidt <kleinschmidtorama@gmail.com>
John Yani <vanuan@gmail.com>
John Yoo <nearbyh13@gmail.com>
Johnson Lin <johnson.lin@intel.com>
Jojo R <rjiejie@gmail.com>
Jon Jensen <jonj@netflix.com>
Jonathan Frazer <listedegarde@gmail.com>
Jonathan Garbee <jonathan@garbee.me>
@ -699,6 +722,7 @@ JongKwon Lee <jongkwon.lee@navercorp.com>
Jongmok Kim <jongmok.kim@navercorp.com>
Jongmok Kim <johny.kimc@gmail.com>
Jongsoo Lee <leejongsoo@gmail.com>
Joonas Halinen <joonashalinen@outlook.com>
Joone Hur <joone.hur@intel.com>
Joonghun Park <pjh0718@gmail.com>
Jorge Villatoro <jorge@tomatocannon.com>
@ -708,6 +732,7 @@ Joseph Lolak <joseph.lolak@samsung.com>
Josh Triplett <josh.triplett@intel.com>
Josh Triplett <josh@joshtriplett.org>
Joshua Lock <joshua.lock@intel.com>
Joshua Olaoye <joshuaolaoye46@gmail.com>
Joshua Roesslein <jroesslein@gmail.com>
Josué Ratelle <jorat1346@gmail.com>
Josyula Venkat Narasimham <venkat.nj@samsung.com>
@ -734,6 +759,7 @@ Junmin Zhu <junmin.zhu@intel.com>
Junsang Mo <mojunsang26@gmail.com>
Junsong Li <ljs.darkfish@gmail.com>
Jun Wang <wangjuna@uniontech.com>
Jun Xu <jun1.xu@intel.com>
Jun Zeng <hjunzeng6@gmail.com>
Justin Okamoto <justmoto@amazon.com>
Justin Ribeiro <justin@justinribeiro.com>
@ -741,7 +767,7 @@ Jüri Valdmann <juri.valdmann@qt.io>
Juyoung Kim <chattank05@gmail.com>
Jingge Yu <jinggeyu423@gmail.com>
Jing Peiyang <jingpeiyang@eswincomputing.com>
Jinli Wu <wujinli.cn@gmail.com>
Jinli Wu <wujinli@bytedance.com>
K. M. Merajul Arefin <m.arefin@samsung.com>
Kai Jiang <jiangkai@gmail.com>
Kai Köhne <kai.koehne@qt.io>
@ -756,6 +782,7 @@ Kangyuan Shu <kangyuan.shu@intel.com>
Karan Thakkar <karanjthakkar@gmail.com>
Karel Král <kralkareliv@gmail.com>
Karl <karlpolicechromium@gmail.com>
Karl Piper <karl4piper@gmail.com>
Kartikey Bhatt <kartikey@amazon.com>
Kaspar Brand <googlecontrib@velox.ch>
Kaushalendra Mishra <k.mishra@samsung.com>
@ -782,6 +809,8 @@ Ketan Goyal <ketan.goyal@samsung.com>
Kevin Gibbons <bakkot@gmail.com>
Kevin Lee Helpingstine <sig11@reprehensible.net>
Kevin M. McCormick <mckev@amazon.com>
Kexy Biscuit <kexybiscuit@aosc.io>
Kexy Biscuit <kexybiscuit@gmail.com>
Keyou <qqkillyou@gmail.com>
Khasim Syed Mohammed <khasim.mohammed@linaro.org>
Khem Raj <raj.khem@gmail.com>
@ -817,6 +846,7 @@ Kyungtae Kim <ktf.kim@samsung.com>
Kyungyoung Heo <bbvch13531@gmail.com>
Kyutae Lee <gorisanson@gmail.com>
Lalit Chandivade <lalit.chandivade@einfochips.com>
Lalit Rana <lalitrn44@gmail.com>
Lam Lu <lamlu@amazon.com>
Laszlo Gombos <l.gombos@samsung.com>
Laszlo Radanyi <bekkra@gmail.com>
@ -846,6 +876,7 @@ Lin Peng <penglin220@gmail.com>
Lin Peng <penglin22@huawei.com>
Lingqi Chi <someway.bit@gmail.com>
Lingyun Cai <lingyun.cai@intel.com>
Linnan Li <lilinnan0903@gmail.com>
Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Lisha Guo <lisha.guo@intel.com>
Lizhi Fan <lizhi.fan@samsung.com>
@ -881,6 +912,7 @@ Malcolm Wang <malcolm.2.wang@gmail.com>
Mallikarjuna Rao V <vm.arjun@samsung.com>
Manish Chhajer <chhajer.m@samsung.com>
Manish Jethani <m.jethani@eyeo.com>
Manjunath Babu <10manju@gmail.com>
Manojkumar Bhosale <manojkumar.bhosale@imgtec.com>
Manuel Braun <thembrown@gmail.com>
Manuel Lagana <manuel.lagana.dev@gmail.com>
@ -910,6 +942,7 @@ Martin Persson <mnpn03@gmail.com>
Martin Rogalla <martin@martinrogalla.com>
Martina Kollarova <martina.kollarova@intel.com>
Martino Fontana <tinozzo123@gmail.com>
Marvin Giessing <marvin.giessing@gmail.com>
Masahiro Yado <yado.masa@gmail.com>
Masaru Nishida <msr.i386@gmail.com>
Masayuki Wakizaka <mwakizaka0108@gmail.com>
@ -919,6 +952,8 @@ Mathias Bynens <mathias@qiwi.be>
Mathieu Meisser <mmeisser@logitech.com>
Matt Arpidone <mma.public@gmail.com>
Matt Fysh <mattfysh@gmail.com>
Matt Harding <majaharding@gmail.com>
Matt Jolly <kangie@gentoo.org>
Matt Strum <mstrum@amazon.com>
Matt Zeunert <matt@mostlystatic.com>
Matthew "strager" Glazar <strager.nds@gmail.com>
@ -933,7 +968,6 @@ Matthieu Rigolot <matthieu.rigolot@gmail.com>
Matthieu Vlad Hauglustaine <matt.hauglustaine@gmail.com>
Mattias Buelens <mattias.buelens@gmail.com>
Max Coplan <mchcopl@gmail.com>
Max Karolinskiy <max@brave.com>
Max Perepelitsyn <pph34r@gmail.com>
Max Schmitt <max@schmitt.mx>
Max Vujovic <mvujovic@adobe.com>
@ -943,16 +977,19 @@ Mc Zeng <zengmcong@gmail.com>
Md Abdullah Al Alamin <a.alamin.cse@gmail.com>
Md. Hasanur Rashid <hasanur.r@samsung.com>
Md Hasibul Hasan <hasibulhasan873@gmail.com>
Md Hasibul Hasan <hasibul.h@samsung.com>
Md Jobed Hossain <jobed.h@samsung.com>
Md Raiyan bin Sayeed <mrbsayee@uwaterloo.ca>
Md. Sadiqul Amin <sadiqul.amin@samsung.com>
Md Sami Uddin <md.sami@samsung.com>
Mego Tan <tannal2409@gmail.com>
Merajul Arefin <merajularefin@gmail.com>
Micha Hanselmann <micha.hanselmann@gmail.com>
Michael Cirone <mikecirone@gmail.com>
Michael Constant <mconst@gmail.com>
Michael Forney <mforney@mforney.org>
Michael Gilbert <floppymaster@gmail.com>
Michael Herrmann <michael@herrmann.io>
Michael Kolomeytsev <michael.kolomeytsev@gmail.com>
Michael Lopez <lopes92290@gmail.com>
Michael Morrison <codebythepound@gmail.com>
@ -969,11 +1006,11 @@ Mihai Tica <mitica@adobe.com>
Mike Pennisi <mike@mikepennisi.com>
Mike Tilburg <mtilburg@adobe.com>
Mikhail Pozdnyakov <mikhail.pozdnyakov@intel.com>
Mikhail Atuchin <matuchin@brave.com>
Milko Leporis <milko.leporis@imgtec.com>
Milton Chiang <milton.chiang@mediatek.com>
Milutin Smiljanic <msmiljanic.gm@gmail.com>
Minchul Kang <tegongkang@gmail.com>
Ming Lei <minggeorgelei@gmail.com>
Mingeun Park <mindal99546@gmail.com>
Minggang Wang <minggang.wang@intel.com>
Mingmin Xie <melvinxie@gmail.com>
@ -990,10 +1027,14 @@ Mitchell Cohen <mitchell@agilebits.com>
Miyoung Shin <myid.shin@navercorp.com>
Mohamed I. Hammad <ibraaaa@gmail.com>
Mohamed Mansour <m0.interactive@gmail.com>
Mohamed Hany Youns <mohamedhyouns@gmail.com>
Mohammad Azam <m.azam@samsung.com>
MohammadSabri <mohammad.kh.sabri@exalt.ps>
Mohammed Ashraf <mohammedashraf4599@gmail.com>
Mohammed Wajahat Ali Siddiqui <wajahat.s@samsung.com>
Mohan Reddy <mohan.reddy@samsung.com>
Mohit Bhalla <bhallam@amazon.com>
Mohraiel Matta <mohraielmatta@gmail.com>
Moiseanu Rares-Marian <moiseanurares@gmail.com>
Momoka Yamamoto <momoka.my6@gmail.com>
Momoko Hattori <momohatt10@gmail.com>
@ -1029,7 +1070,9 @@ Nedeljko Babic <nedeljko.babic@imgtec.com>
Neehit Goyal <neehit.goyal@samsung.com>
Nidhi Jaju <nidhijaju127@gmail.com>
Niek van der Maas <mail@niekvandermaas.nl>
Nik Pavlov <nikita.pavlov.dev@gmail.com>
Nikhil Bansal <n.bansal@samsung.com>
Nikhil Meena <iakhilmeena@gmail.com>
Nikhil Sahni <nikhil.sahni@samsung.com>
Nikita Ofitserov <himikof@gmail.com>
Niklas Hambüchen <mail@nh2.me>
@ -1043,6 +1086,7 @@ Nivedan Sharma <ni.sharma@samsung.com>
Noam Rosenthal <noam.j.rosenthal@gmail.com>
Noj Vek <nojvek@gmail.com>
Nolan Cao <nolan.robin.cao@gmail.com>
Nourhan Hasan <nourhan.m.hasan@gmail.com>
Oleksii Kadurin <ovkadurin@gmail.com>
Oliver Dunk <oliver@oliverdunk.com>
Olivier Tilloy <olivier+chromium@tilloy.net>
@ -1076,6 +1120,7 @@ Paul Wicks <pwicks86@gmail.com>
Pavan Kumar Emani <pavan.e@samsung.com>
Pavel Golikov <paullo612@ya.ru>
Pavel Ivanov <paivanof@gmail.com>
Pawan Udassi <pawanudassi@hotmail.com>
Pawel Forysiuk <p.forysiuk@samsung.com>
Paweł Hajdan jr <phajdan.jr@gmail.com>
Paweł Stanek <pawel@gener8ads.com>
@ -1110,7 +1155,6 @@ Po-Chun Chang <pochang0403@gmail.com>
Prakhar Shrivastav <p.shri@samsung.com>
Pramod Begur Srinath <pramod.bs@samsung.com>
Pranay Kumar <pranay.kumar@samsung.com>
Pranjal Jumde <pranjal@brave.com>
Prashant Hiremath <prashhir@cisco.com>
Prashant Nevase <prashant.n@samsung.com>
Prashant Patil <prashant.patil@imgtec.com>
@ -1137,7 +1181,6 @@ Rahul Gupta <rahul.g@samsung.com>
Rahul Yadav <rahul.yadav@samsung.com>
Rajesh Mahindra <rmahindra@uber.com>
Rajneesh Rana <rajneesh.r@samsung.com>
Ralph Giles <rgiles@brave.com>
Raman Tenneti <raman.tenneti@gmail.com>
Ramkumar Gokarnesan <ramkumar.gokarnesan@gmail.com>
Ramkumar Ramachandra <artagnon@gmail.com>
@ -1203,6 +1246,7 @@ Ryan Manuel <rfmanuel@gmail.com>
Ryan Norton <rnorton10@gmail.com>
Ryan Sleevi <ryan-chromium-dev@sleevi.com>
Ryan Yoakum <ryoakum@skobalt.com>
Ryan Huen <ryanhuenprivate@gmail.com>
Rye Zhang <ryezhang@tencent.com>
Ryo Ogawa <negibokken@gmail.com>
Ryuan Choi <ryuan.choi@samsung.com>
@ -1257,6 +1301,7 @@ Sergei Poletaev <spylogsster@gmail.com>
Sergei Romanov <rsv.981@gmail.com>
Sergey Romanov <svromanov@sberdevices.ru>
Sergey Kipet <sergey.kipet@gmail.com>
Sergey Markelov <sergionso@gmail.com>
Sergey Putilin <p.sergey@samsung.com>
Sergey Shekyan <shekyan@gmail.com>
Sergey Talantov <sergey.talantov@gmail.com>
@ -1267,6 +1312,7 @@ Serhii Matrunchyk <sergiy.matrunchyk@gmail.com>
Seshadri Mahalingam <seshadri.mahalingam@gmail.com>
Seungkyu Lee <zx6658@gmail.com>
Sevan Janiyan <venture37@geeklan.co.uk>
Shaheen Fazim <fazim.pentester@gmail.com>
Shahriar Rostami <shahriar.rostami@gmail.com>
Shail Singhal <shail.s@samsung.com>
Shane Hansen <shanemhansen@gmail.com>
@ -1324,6 +1370,7 @@ Sooho Park <sooho1000@gmail.com>
Soojung Choi <crystal2840@gmail.com>
Soorya R <soorya.r@samsung.com>
Soren Dreijer <dreijerbit@gmail.com>
Spencer Wilson <spencer@spencerwilson.org>
Sreerenj Balachandran <sreerenj.balachandran@intel.com>
Srirama Chandra Sekhar Mogali <srirama.m@samsung.com>
Stacy Kim <stacy.kim@ucla.edu>
@ -1347,6 +1394,7 @@ Sunchang Li <johnstonli@tencent.com>
Sundoo Kim <nerdooit@gmail.com>
Sundoo Kim <0xd00d00b@gmail.com>
Suneel Kota <suneel.kota@samsung.com>
Sung Lee <sung.lee@amd.com>
Sungguk Lim <limasdf@gmail.com>
Sunghyeok Kang <sh0528.kang@samsung.com>
Sungmann Cho <sungmann.cho@gmail.com>
@ -1386,6 +1434,7 @@ Takuya Kurimoto <takuya004869@gmail.com>
Tanay Chowdhury <tanay.c@samsung.com>
Tanvir Rizvi <tanvir.rizvi@samsung.com>
Tao Wang <tao.wang.2261@gmail.com>
Tao Xiong <taox4@illinois.edu>
Tapu Kumar Ghose <ghose.tapu@gmail.com>
Taylor Price <trprice@gmail.com>
Ted Kim <neot0000@gmail.com>
@ -1405,6 +1454,7 @@ Tibor Dusnoki <tibor.dusnoki.91@gmail.com>
Tibor Dusnoki <tdusnoki@inf.u-szeged.hu>
Tien Hock Loh <tienhock.loh@starfivetech.com>
Tim Ansell <mithro@mithis.com>
Tim Barry <oregongraperoot@gmail.com>
Tim Niederhausen <tim@rnc-ag.de>
Tim Steiner <twsteiner@gmail.com>
Timo Gurr <timo.gurr@gmail.com>
@ -1420,6 +1470,7 @@ Tom Harwood <tfh@skip.org>
Tomas Popela <tomas.popela@gmail.com>
Tomasz Edward Posłuszny <tom@devpeer.net>
Tony Shen <legendmastertony@gmail.com>
Topi Lassila <tolassila@gmail.com>
Torsten Kurbad <google@tk-webart.de>
Toshihito Kikuchi <leamovret@gmail.com>
Toshiaki Tanaka <zokutyou2@gmail.com>
@ -1463,6 +1514,7 @@ Vishal Bhatnagar <vishal.b@samsung.com>
Vishal Lingam <vishal.reddy@samsung.com>
Vitaliy Kharin <kvserr@gmail.com>
Vivek Galatage <vivek.vg@samsung.com>
Vlad Zahorodnii <vlad.zahorodnii@kde.org>
Volker Sorge <volker.sorge@gmail.com>
Waihung Fu <fufranci@amazon.com>
wafuwafu13 <mariobaske@i.softbank.jp>
@ -1470,9 +1522,11 @@ Wojciech Bielawski <wojciech.bielawski@gmail.com>
Wang Chen <wangchen20@iscas.ac.cn>
Wang Chen <unicornxw@gmail.com>
Wang Weiwei <wangww@dingdao.com>
Wang Zirui <kingzirvi@gmail.com>
Wangyang Dai <jludwy@gmail.com>
Wanming Lin <wanming.lin@intel.com>
Wei Li <wei.c.li@intel.com>
Weicong Yu <yuweicong666@gmail.com>
Wen Fan <fanwen1@huawei.com>
Wenxiang Qian <leonwxqian@gmail.com>
WenSheng He <wensheng.he@samsung.com>
@ -1538,6 +1592,7 @@ Yong Shin <sy3620@gmail.com>
Yong Wang <ccyongwang@tencent.com>
Yonggang Luo <luoyonggang@gmail.com>
Yongha Lee <yongha78.lee@samsung.com>
Yongsang Park <yongsangpark980813@gmail.com>
Yongseok Choi <yongseok.choi@navercorp.com>
Yongsheng Zhu <yongsheng.zhu@intel.com>
Yoonjae Cho <yoonjae.cho92@gmail.com>
@ -1570,11 +1625,13 @@ Yuta Kasai <kasai.yuta0810@gmail.com>
Yuvanesh Natarajan <yuvanesh.n1@samsung.com>
Zach Bjornson <zbbjornson@gmail.com>
Zachary Capalbo <zach.geek@gmail.com>
Zehan Li <synclzhhans@gmail.com>
Zeno Albisser <zeno.albisser@digia.com>
Zeqin Chen <talonchen@tencent.com>
Zhanbang He <hezhanbang@gmail.com>
Zhang Hao <zhanghao.m@bytedance.com>
Zhang Hao <15686357310a@gmail.com>
Zhao Qin <qzmiss@gmail.com>
Zhaoming Jiang <zhaoming.jiang@intel.com>
Zhaoze Zhou <zhaoze.zhou@partner.samsung.com>
Zheda Chen <zheda.chen@intel.com>
@ -1600,6 +1657,7 @@ Zsolt Borbely <zsborbely.u-szeged@partner.samsung.com>
迷渡 <justjavac@gmail.com>
郑苏波 (Super Zheng) <superzheng@tencent.com>
一丝 (Yisi) <yiorsi@gmail.com>
林训杰 (XunJie Lin) <wick.linxunjie@gmail.com>
# Please DO NOT APPEND here. See comments at the top of the file.
# END individuals section.
@ -1611,6 +1669,7 @@ Akamai Inc. <*@akamai.com>
ARM Holdings <*@arm.com>
BlackBerry Limited <*@blackberry.com>
Bocoup <*@bocoup.com>
Brave Software Inc. <*@brave.com>
Canonical Limited <*@canonical.com>
Cloudflare, Inc. <*@cloudflare.com>
CloudMosa, Inc. <*@cloudmosa.com>
@ -1628,6 +1687,7 @@ EngFlow, Inc. <*@engflow.com>
Estimote, Inc. <*@estimote.com>
Google Inc. <*@google.com>
Grammarly, Inc. <*@grammarly.com>
Here Inc. <*@here.io>
Hewlett-Packard Development Company, L.P. <*@hp.com>
HyperConnect Inc. <*@hpcnt.com>
IBM Inc. <*@*.ibm.com>
@ -1657,6 +1717,7 @@ NVIDIA Corporation <*@nvidia.com>
OpenFin Inc. <*@openfin.co>
Opera Software ASA <*@opera.com>
Optical Tone Ltd <*@opticaltone.com>
Palo Alto Networks, Inc. <*@paloaltonetworks.com>
Pengutronix e.K. <*@pengutronix.de>
Quality First Software GmbH <*@qf-software.com>
Rakuten Kobo Inc. <*@kobo.com>

View file

@ -12,7 +12,6 @@ import("//build/config/compiler/compiler.gni")
import("//build/config/cronet/config.gni")
import("//build/config/dcheck_always_on.gni")
import("//build/config/features.gni")
import("//build/config/ios/config.gni")
import("//build/config/rust.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("//build/config/ui.gni")

2901
src/DEPS

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,17 +1,12 @@
include_rules = [
# `#include "partition_alloc/..."` is prefered to
# `#include "base/allocator/partition_allocator/src/partition_alloc/..."`.
"+partition_alloc",
"-base/allocator/partition_allocator",
"+third_party/ashmem",
"+third_party/apple_apsl",
"+third_party/boringssl/src/include",
"+third_party/ced",
"+third_party/fuzztest",
# We are moving the old jni_generator to jni_zero, some references will remain
# in //base.
"+third_party/jni_zero",
"+third_party/libevent",
"+third_party/libunwindstack/src/libunwindstack/include",
"+third_party/lss",
"+third_party/modp_b64",
@ -25,6 +20,7 @@ include_rules = [
"+third_party/test_fonts",
# JSON Deserialization.
"+third_party/rust/serde_json_lenient/v0_2/wrapper",
"+third_party/zlib",
# These are implicitly brought in from the root, and we don't want them.
"-ipc",

View file

@ -4,7 +4,6 @@ set noparent
# NOTE: keep this in sync with global-owners-override@chromium.org owners
# by emailing lsc-policy@chromium.org when this list changes.
altimin@chromium.org
danakj@chromium.org
dcheng@chromium.org
fdoray@chromium.org
gab@chromium.org

View file

@ -9,5 +9,4 @@
# yourself, don't hesitate to seek help from another security team member!
# Nobody knows everything, and the only way to learn is from experience.
dcheng@chromium.org
rsesek@chromium.org
tsepez@chromium.org

View file

@ -1,5 +1,4 @@
lizeb@chromium.org
primiano@chromium.org
wfh@chromium.org
per-file allocator.gni=file://base/allocator/partition_allocator/OWNERS

View file

@ -5,7 +5,7 @@
#include "base/allocator/allocator_check.h"
#include "build/build_config.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#if BUILDFLAG(IS_WIN)
#include "partition_alloc/shim/winheap_stubs_win.h"

View file

@ -21,4 +21,4 @@ constexpr size_t kMaximumNumberOfObservers = 4;
} // namespace base::allocator::dispatcher::configuration
#endif // BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
#endif // BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_

View file

@ -8,7 +8,7 @@
#include "base/check.h"
#include "base/dcheck_is_on.h"
#include "base/no_destructor.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/shim/allocator_shim.h"
#if DCHECK_IS_ON()
@ -16,7 +16,7 @@
#endif
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
#include "partition_alloc/partition_alloc_hooks.h"
#include "partition_alloc/partition_alloc_hooks.h" // nogncheck
#endif
namespace base::allocator::dispatcher {
@ -34,7 +34,7 @@ struct Dispatcher::Impl {
void Reset() {
#if DCHECK_IS_ON()
DCHECK([&]() {
DCHECK([&] {
auto const was_set = is_initialized_check_flag_.test_and_set();
is_initialized_check_flag_.clear();
return was_set;

View file

@ -5,11 +5,11 @@
#ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#include <memory>
#include "base/allocator/dispatcher/internal/dispatcher_internal.h"
#include "base/base_export.h"
#include <memory>
namespace base::allocator::dispatcher {
namespace internal {

View file

@ -5,13 +5,13 @@
#ifndef BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
#define BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
#include <tuple>
#include <utility>
#include "base/allocator/dispatcher/configuration.h"
#include "base/allocator/dispatcher/dispatcher.h"
#include "base/allocator/dispatcher/internal/tools.h"
#include <tuple>
#include <utility>
namespace base::allocator::dispatcher {
namespace internal {

View file

@ -3,7 +3,8 @@
// found in the LICENSE file.
#include "base/allocator/dispatcher/internal/dispatch_data.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
namespace base::allocator::dispatcher::internal {

View file

@ -7,14 +7,14 @@
#include "base/base_export.h"
#include "build/build_config.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
#include "partition_alloc/partition_alloc_hooks.h"
#include "partition_alloc/partition_alloc_hooks.h" // nogncheck
#endif
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
#include "partition_alloc/shim/allocator_shim.h"
#include "partition_alloc/shim/allocator_shim.h" // nogncheck
#endif
namespace base::allocator::dispatcher::internal {

View file

@ -13,10 +13,10 @@
#include "base/allocator/dispatcher/subsystem.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
#include "partition_alloc/partition_alloc_allocation_data.h"
#include "partition_alloc/partition_alloc_allocation_data.h" // nogncheck
#endif
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
@ -125,149 +125,165 @@ struct DispatcherImpl {
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC)
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
static void* AllocFn(const AllocatorDispatch* self,
size_t size,
void* context) {
void* const address = self->next->alloc_function(self->next, size, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AllocUncheckedFn(const AllocatorDispatch* self,
size_t size,
void* context) {
static void* AllocFn(size_t size, void* context) {
void* const address =
self->next->alloc_unchecked_function(self->next, size, context);
allocator_dispatch_.next->alloc_function(size, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AllocZeroInitializedFn(const AllocatorDispatch* self,
size_t n,
size_t size,
void* context) {
void* const address = self->next->alloc_zero_initialized_function(
self->next, n, size, context);
static void* AllocUncheckedFn(size_t size, void* context) {
void* const address =
allocator_dispatch_.next->alloc_unchecked_function(size, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AllocZeroInitializedFn(size_t n, size_t size, void* context) {
void* const address =
allocator_dispatch_.next->alloc_zero_initialized_function(n, size,
context);
DoNotifyAllocationForShim(address, n * size);
return address;
}
static void* AllocAlignedFn(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context) {
void* const address = self->next->alloc_aligned_function(
self->next, alignment, size, context);
static void* AllocAlignedFn(size_t alignment, size_t size, void* context) {
void* const address = allocator_dispatch_.next->alloc_aligned_function(
alignment, size, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* ReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
static void* ReallocFn(void* address, size_t size, void* context) {
// Note: size == 0 actually performs free.
DoNotifyFreeForShim(address);
void* const reallocated_address =
self->next->realloc_function(self->next, address, size, context);
allocator_dispatch_.next->realloc_function(address, size, context);
DoNotifyAllocationForShim(reallocated_address, size);
return reallocated_address;
}
static void FreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
static void* ReallocUncheckedFn(void* address, size_t size, void* context) {
// Note: size == 0 actually performs free.
DoNotifyFreeForShim(address);
void* const reallocated_address =
allocator_dispatch_.next->realloc_unchecked_function(address, size,
context);
DoNotifyAllocationForShim(reallocated_address, size);
return reallocated_address;
}
static void FreeFn(void* address, void* context) {
// Note: DoNotifyFree should be called before free_function (here and in
// other places). That is because observers need to handle the allocation
// being freed before calling free_function, as once the latter is executed
// the address becomes available and can be allocated by another thread.
// That would be racy otherwise.
DoNotifyFreeForShim(address);
self->next->free_function(self->next, address, context);
MUSTTAIL return allocator_dispatch_.next->free_function(address, context);
}
static unsigned BatchMallocFn(const AllocatorDispatch* self,
size_t size,
static unsigned BatchMallocFn(size_t size,
void** results,
unsigned num_requested,
void* context) {
unsigned const num_allocated = self->next->batch_malloc_function(
self->next, size, results, num_requested, context);
unsigned const num_allocated =
allocator_dispatch_.next->batch_malloc_function(size, results,
num_requested, context);
for (unsigned i = 0; i < num_allocated; ++i) {
DoNotifyAllocationForShim(results[i], size);
}
return num_allocated;
}
static void BatchFreeFn(const AllocatorDispatch* self,
void** to_be_freed,
static void BatchFreeFn(void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
for (unsigned i = 0; i < num_to_be_freed; ++i) {
DoNotifyFreeForShim(to_be_freed[i]);
}
self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
context);
MUSTTAIL return allocator_dispatch_.next->batch_free_function(
to_be_freed, num_to_be_freed, context);
}
static void FreeDefiniteSizeFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
static void FreeDefiniteSizeFn(void* address, size_t size, void* context) {
DoNotifyFreeForShim(address);
self->next->free_definite_size_function(self->next, address, size, context);
MUSTTAIL return allocator_dispatch_.next->free_definite_size_function(
address, size, context);
}
static void TryFreeDefaultFn(const AllocatorDispatch* self,
void* address,
void* context) {
static void TryFreeDefaultFn(void* address, void* context) {
DoNotifyFreeForShim(address);
self->next->try_free_default_function(self->next, address, context);
MUSTTAIL return allocator_dispatch_.next->try_free_default_function(
address, context);
}
static void* AlignedMallocFn(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context) {
void* const address = self->next->aligned_malloc_function(
self->next, size, alignment, context);
static void* AlignedMallocFn(size_t size, size_t alignment, void* context) {
void* const address = allocator_dispatch_.next->aligned_malloc_function(
size, alignment, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AlignedReallocFn(const AllocatorDispatch* self,
void* address,
static void* AlignedMallocUncheckedFn(size_t size,
size_t alignment,
void* context) {
void* const address =
allocator_dispatch_.next->aligned_malloc_unchecked_function(
size, alignment, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AlignedReallocFn(void* address,
size_t size,
size_t alignment,
void* context) {
// Note: size == 0 actually performs free.
DoNotifyFreeForShim(address);
address = self->next->aligned_realloc_function(self->next, address, size,
alignment, context);
address = allocator_dispatch_.next->aligned_realloc_function(
address, size, alignment, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void AlignedFreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
static void* AlignedReallocUncheckedFn(void* address,
size_t size,
size_t alignment,
void* context) {
// Note: size == 0 actually performs free.
DoNotifyFreeForShim(address);
self->next->aligned_free_function(self->next, address, context);
address = allocator_dispatch_.next->aligned_realloc_unchecked_function(
address, size, alignment, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void AlignedFreeFn(void* address, void* context) {
DoNotifyFreeForShim(address);
MUSTTAIL return allocator_dispatch_.next->aligned_free_function(address,
context);
}
ALWAYS_INLINE static void DoNotifyAllocationForShim(void* address,
@ -308,23 +324,26 @@ std::tuple<ObserverTypes*...> DispatcherImpl<ObserverTypes...>::s_observers;
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
template <typename... ObserverTypes>
AllocatorDispatch DispatcherImpl<ObserverTypes...>::allocator_dispatch_ = {
AllocFn, // alloc_function
AllocUncheckedFn, // alloc_unchecked_function
AllocZeroInitializedFn, // alloc_zero_initialized_function
AllocAlignedFn, // alloc_aligned_function
ReallocFn, // realloc_function
FreeFn, // free_function
nullptr, // get_size_estimate_function
nullptr, // good_size_function
nullptr, // claimed_address_function
BatchMallocFn, // batch_malloc_function
BatchFreeFn, // batch_free_function
FreeDefiniteSizeFn, // free_definite_size_function
TryFreeDefaultFn, // try_free_default_function
AlignedMallocFn, // aligned_malloc_function
AlignedReallocFn, // aligned_realloc_function
AlignedFreeFn, // aligned_free_function
nullptr // next
AllocFn, // alloc_function
AllocUncheckedFn, // alloc_unchecked_function
AllocZeroInitializedFn, // alloc_zero_initialized_function
AllocAlignedFn, // alloc_aligned_function
ReallocFn, // realloc_function
ReallocUncheckedFn, // realloc_unchecked_function
FreeFn, // free_function
nullptr, // get_size_estimate_function
nullptr, // good_size_function
nullptr, // claimed_address_function
BatchMallocFn, // batch_malloc_function
BatchFreeFn, // batch_free_function
FreeDefiniteSizeFn, // free_definite_size_function
TryFreeDefaultFn, // try_free_default_function
AlignedMallocFn, // aligned_malloc_function
AlignedMallocUncheckedFn, // aligned_malloc_unchecked_function
AlignedReallocFn, // aligned_realloc_function
AlignedReallocUncheckedFn, // aligned_realloc_unchecked_function
AlignedFreeFn, // aligned_free_function
nullptr // next
};
#endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)

View file

@ -10,7 +10,7 @@
#include "base/allocator/dispatcher/memory_tagging.h"
#include "base/allocator/dispatcher/subsystem.h"
#include "base/base_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
namespace base::allocator::dispatcher {

View file

@ -33,8 +33,9 @@ struct BASE_EXPORT ReentryGuard {
}
ALWAYS_INLINE ~ReentryGuard() {
if (LIKELY(allowed_))
if (allowed_) [[likely]] {
pthread_setspecific(entered_key_, nullptr);
}
}
explicit operator bool() const noexcept { return allowed_; }

View file

@ -24,4 +24,4 @@ enum class AllocationSubsystem {
};
} // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
#endif // BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_

View file

@ -24,4 +24,4 @@ struct DispatcherTest : public ::testing::Test {
} // namespace base::allocator::dispatcher::testing
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_

View file

@ -30,4 +30,4 @@ struct ObserverMock {
} // namespace testing
} // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_

View file

@ -8,14 +8,14 @@
#if USE_LOCAL_TLS_EMULATION()
#include <sys/mman.h>
#include "base/check.h"
#include "base/dcheck_is_on.h"
#include "base/debug/crash_logging.h"
#include "base/immediate_crash.h"
#include "build/build_config.h"
#include <sys/mman.h>
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
#include <sys/prctl.h>
#endif
@ -96,7 +96,7 @@ PThreadTLSSystem& PThreadTLSSystem::operator=(PThreadTLSSystem&& other) {
bool PThreadTLSSystem::Setup(
OnThreadTerminationFunction thread_termination_function,
const std::string_view instance_id) {
std::string_view instance_id) {
#if DCHECK_IS_ON()
// Initialize must happen outside of the allocation path. Therefore, it is
// secure to verify with DCHECK.

View file

@ -17,17 +17,21 @@
#endif
#if USE_LOCAL_TLS_EMULATION()
#include <pthread.h>
#include <algorithm>
#include <atomic>
#include <functional>
#include <memory>
#include <mutex>
#include "base/base_export.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "partition_alloc/partition_alloc_constants.h"
#include <pthread.h>
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
#include "partition_alloc/partition_alloc_constants.h" // nogncheck
#endif
#if HAS_FEATURE(thread_sanitizer)
#define DISABLE_TSAN_INSTRUMENTATION __attribute__((no_sanitize("thread")))
@ -111,7 +115,7 @@ class BASE_EXPORT PThreadTLSSystem {
// @param thread_termination_function An optional function which will be
// invoked upon termination of a thread.
bool Setup(OnThreadTerminationFunction thread_termination_function,
const std::string_view instance_id);
std::string_view instance_id);
// Tear down the TLS system. After completing tear down, the thread
// termination function passed to Setup will not be invoked anymore.
bool TearDownForTesting();
@ -199,7 +203,7 @@ template <typename PayloadType,
size_t AllocationChunkSize,
bool IsDestructibleForTesting>
struct ThreadLocalStorage {
explicit ThreadLocalStorage(const std::string_view instance_id)
explicit ThreadLocalStorage(std::string_view instance_id)
: root_(AllocateAndInitializeChunk()) {
Initialize(instance_id);
}
@ -207,7 +211,7 @@ struct ThreadLocalStorage {
// Create a new instance of |ThreadLocalStorage| using the passed allocator
// and TLS system. This initializes the underlying TLS system and creates the
// first chunk of data.
ThreadLocalStorage(const std::string_view instance_id,
ThreadLocalStorage(std::string_view instance_id,
AllocatorType allocator,
TLSSystemType tls_system)
: allocator_(std::move(allocator)),
@ -244,7 +248,7 @@ struct ThreadLocalStorage {
auto* slot = static_cast<SingleSlot*>(tls_system.GetThreadSpecificData());
if (UNLIKELY(slot == nullptr)) {
if (slot == nullptr) [[unlikely]] {
slot = FindAndAllocateFreeSlot(root_.load(std::memory_order_relaxed));
// We might be called in the course of handling a memory allocation. We do
@ -360,7 +364,7 @@ struct ThreadLocalStorage {
}
// Perform common initialization during construction of an instance.
void Initialize(const std::string_view instance_id) {
void Initialize(std::string_view instance_id) {
// The constructor must be called outside of the allocation path. Therefore,
// it is secure to verify with CHECK.

View file

@ -7,7 +7,7 @@
#include <mach/mach.h>
#include <malloc/malloc.h>
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/shim/early_zone_registration_constants.h"
// BASE_EXPORT tends to be defined as soon as anything from //base is included.

View file

@ -8,22 +8,7 @@
#include "base/strings/strcat.h"
#include "base/system/sys_info.h"
namespace base {
namespace miracle_parameter {
namespace {
std::string GetFieldTrialParamByFeatureAsString(
const base::Feature& feature,
const std::string& param_name,
const std::string& default_value) {
const std::string value =
base::GetFieldTrialParamValueByFeature(feature, param_name);
return value.empty() ? default_value : value;
}
} // namespace
namespace base::miracle_parameter {
std::string GetParamNameWithSuffix(const std::string& param_name) {
// `base::SysInfo::AmountOfPhysicalMemoryMB()` refers to CommandLine
@ -88,6 +73,4 @@ base::TimeDelta GetMiracleParameterAsTimeDelta(const base::Feature& feature,
default_value));
}
} // namespace miracle_parameter
} // namespace base
} // namespace base::miracle_parameter

View file

@ -166,7 +166,7 @@ Enum GetMiracleParameterAsEnum(
default_value, type, options) \
type function_name() { \
static const type value = miracle_parameter::GetMiracleParameterAsEnum( \
feature, param_name, default_value, base::make_span(options)); \
feature, param_name, default_value, base::span(options)); \
return value; \
}

View file

@ -12,16 +12,29 @@
#include "base/time/time.h"
#include "build/build_config.h"
#include "build/chromecast_buildflags.h"
#include "build/chromeos_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/time/time.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_root.h"
#include "partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h"
#include "partition_alloc/thread_cache.h"
namespace base {
namespace features {
namespace base::features {
namespace {
static constexpr char kPAFeatureEnabledProcessesStr[] = "enabled-processes";
static constexpr char kBrowserOnlyStr[] = "browser-only";
static constexpr char kBrowserAndRendererStr[] = "browser-and-renderer";
static constexpr char kNonRendererStr[] = "non-renderer";
static constexpr char kAllProcessesStr[] = "all-processes";
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
static constexpr char kRendererOnlyStr[] = "renderer-only";
static constexpr char kAllChildProcessesStr[] = "all-child-processes";
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
} // namespace
BASE_FEATURE(kPartitionAllocUnretainedDanglingPtr,
"PartitionAllocUnretainedDanglingPtr",
@ -33,7 +46,8 @@ constexpr FeatureParam<UnretainedDanglingPtrMode>::Option
{UnretainedDanglingPtrMode::kDumpWithoutCrashing,
"dump_without_crashing"},
};
const base::FeatureParam<UnretainedDanglingPtrMode>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam = {
&kPartitionAllocUnretainedDanglingPtr,
"mode",
@ -41,6 +55,10 @@ const base::FeatureParam<UnretainedDanglingPtrMode>
&kUnretainedDanglingPtrModeOption,
};
// Note: DPD conflicts with no-op `free()` (see
// `base::allocator::MakeFreeNoOp()`). No-op `free()` stands down in the
// presence of DPD, but hypothetically fully launching DPD should prompt
// a rethink of no-op `free()`.
BASE_FEATURE(kPartitionAllocDanglingPtr,
"PartitionAllocDanglingPtr",
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG)
@ -54,7 +72,8 @@ constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
{DanglingPtrMode::kCrash, "crash"},
{DanglingPtrMode::kLogOnly, "log_only"},
};
const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
&kPartitionAllocDanglingPtr,
"mode",
DanglingPtrMode::kCrash,
@ -64,32 +83,15 @@ constexpr FeatureParam<DanglingPtrType>::Option kDanglingPtrTypeOption[] = {
{DanglingPtrType::kAll, "all"},
{DanglingPtrType::kCrossTask, "cross_task"},
};
const base::FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
&kPartitionAllocDanglingPtr,
"type",
DanglingPtrType::kAll,
&kDanglingPtrTypeOption,
};
#if PA_BUILDFLAG(USE_STARSCAN)
// If enabled, PCScan is turned on by default for all partitions that don't
// disable it explicitly.
BASE_FEATURE(kPartitionAllocPCScan,
"PartitionAllocPCScan",
FEATURE_DISABLED_BY_DEFAULT);
#endif // PA_BUILDFLAG(USE_STARSCAN)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If enabled, PCScan is turned on only for the browser's malloc partition.
BASE_FEATURE(kPartitionAllocPCScanBrowserOnly,
"PartitionAllocPCScanBrowserOnly",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, PCScan is turned on only for the renderer's malloc partition.
BASE_FEATURE(kPartitionAllocPCScanRendererOnly,
"PartitionAllocPCScanRendererOnly",
FEATURE_DISABLED_BY_DEFAULT);
// Use a larger maximum thread cache cacheable bucket size.
BASE_FEATURE(kPartitionAllocLargeThreadCacheSize,
"PartitionAllocLargeThreadCacheSize",
@ -108,32 +110,64 @@ MIRACLE_PARAMETER_FOR_INT(
BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
"PartitionAllocLargeEmptySlotSpanRing",
#if BUILDFLAG(IS_MAC)
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
FEATURE_ENABLED_BY_DEFAULT);
#else
FEATURE_DISABLED_BY_DEFAULT);
#endif
BASE_FEATURE(kPartitionAllocWithAdvancedChecks,
"PartitionAllocWithAdvancedChecks",
FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>::Option
kPartitionAllocWithAdvancedChecksEnabledProcessesOptions[] = {
{PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserOnly,
kBrowserOnlyStr},
{PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserAndRenderer,
kBrowserAndRendererStr},
{PartitionAllocWithAdvancedChecksEnabledProcesses::kNonRenderer,
kNonRendererStr},
{PartitionAllocWithAdvancedChecksEnabledProcesses::kAllProcesses,
kAllProcessesStr}};
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>
kPartitionAllocWithAdvancedChecksEnabledProcessesParam{
&kPartitionAllocWithAdvancedChecks, kPAFeatureEnabledProcessesStr,
PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserOnly,
&kPartitionAllocWithAdvancedChecksEnabledProcessesOptions};
BASE_FEATURE(kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantine",
FEATURE_DISABLED_BY_DEFAULT);
// Scheduler Loop Quarantine's per-branch capacity in bytes.
const base::FeatureParam<int>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<int>
kPartitionAllocSchedulerLoopQuarantineBranchCapacity{
&kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantineBranchCapacity", 0};
// Scheduler Loop Quarantine's capacity for the UI thread in bytes.
BASE_FEATURE_PARAM(int,
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity,
&kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantineBrowserUICapacity",
0);
BASE_FEATURE(kPartitionAllocZappingByFreeFlags,
"PartitionAllocZappingByFreeFlags",
FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kPartitionAllocEventuallyZeroFreedMemory,
"PartitionAllocEventuallyZeroFreedMemory",
FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kPartitionAllocFewerMemoryRegions,
"PartitionAllocFewerMemoryRegions",
FEATURE_DISABLED_BY_DEFAULT);
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_FEATURE(kPartitionAllocBackupRefPtr,
"PartitionAllocBackupRefPtr",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || \
BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS) || \
(BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_CASTOS)) || \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG)
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG)
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
@ -142,30 +176,41 @@ BASE_FEATURE(kPartitionAllocBackupRefPtr,
constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
kBackupRefPtrEnabledProcessesOptions[] = {
{BackupRefPtrEnabledProcesses::kBrowserOnly, "browser-only"},
{BackupRefPtrEnabledProcesses::kBrowserOnly, kBrowserOnlyStr},
{BackupRefPtrEnabledProcesses::kBrowserAndRenderer,
"browser-and-renderer"},
{BackupRefPtrEnabledProcesses::kNonRenderer, "non-renderer"},
{BackupRefPtrEnabledProcesses::kAllProcesses, "all-processes"}};
kBrowserAndRendererStr},
{BackupRefPtrEnabledProcesses::kNonRenderer, kNonRendererStr},
{BackupRefPtrEnabledProcesses::kAllProcesses, kAllProcessesStr}};
const base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam{
&kPartitionAllocBackupRefPtr, "enabled-processes",
BackupRefPtrEnabledProcesses::kNonRenderer,
&kBackupRefPtrEnabledProcessesOptions};
BASE_FEATURE_ENUM_PARAM(BackupRefPtrEnabledProcesses,
kBackupRefPtrEnabledProcessesParam,
&kPartitionAllocBackupRefPtr,
kPAFeatureEnabledProcessesStr,
#if PA_BUILDFLAG(IS_MAC) && PA_BUILDFLAG(PA_ARCH_CPU_ARM64)
BackupRefPtrEnabledProcesses::kNonRenderer,
#else
BackupRefPtrEnabledProcesses::kAllProcesses,
#endif
&kBackupRefPtrEnabledProcessesOptions);
constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
{BackupRefPtrMode::kDisabled, "disabled"},
{BackupRefPtrMode::kEnabled, "enabled"},
};
const base::FeatureParam<BackupRefPtrMode> kBackupRefPtrModeParam{
&kPartitionAllocBackupRefPtr, "brp-mode", BackupRefPtrMode::kEnabled,
&kBackupRefPtrModeOptions};
BASE_FEATURE_ENUM_PARAM(BackupRefPtrMode,
kBackupRefPtrModeParam,
&kPartitionAllocBackupRefPtr,
"brp-mode",
BackupRefPtrMode::kEnabled,
&kBackupRefPtrModeOptions);
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<int> kBackupRefPtrExtraExtrasSizeParam{
&kPartitionAllocBackupRefPtr, "brp-extra-extras-size", 0};
BASE_FEATURE(kPartitionAllocMemoryTagging,
"PartitionAllocMemoryTagging",
#if PA_BUILDFLAG(USE_FULL_MTE)
#if PA_BUILDFLAG(USE_FULL_MTE) || BUILDFLAG(IS_ANDROID)
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
@ -176,7 +221,8 @@ constexpr FeatureParam<MemtagMode>::Option kMemtagModeOptions[] = {
{MemtagMode::kSync, "sync"},
{MemtagMode::kAsync, "async"}};
const base::FeatureParam<MemtagMode> kMemtagModeParam{
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<MemtagMode> kMemtagModeParam{
&kPartitionAllocMemoryTagging, "memtag-mode",
#if PA_BUILDFLAG(USE_FULL_MTE)
MemtagMode::kSync,
@ -185,19 +231,30 @@ const base::FeatureParam<MemtagMode> kMemtagModeParam{
#endif
&kMemtagModeOptions};
constexpr FeatureParam<RetagMode>::Option kRetagModeOptions[] = {
{RetagMode::kIncrement, "increment"},
{RetagMode::kRandom, "random"},
};
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<RetagMode> kRetagModeParam{
&kPartitionAllocMemoryTagging, "retag-mode", RetagMode::kIncrement,
&kRetagModeOptions};
constexpr FeatureParam<MemoryTaggingEnabledProcesses>::Option
kMemoryTaggingEnabledProcessesOptions[] = {
{MemoryTaggingEnabledProcesses::kBrowserOnly, "browser-only"},
{MemoryTaggingEnabledProcesses::kNonRenderer, "non-renderer"},
{MemoryTaggingEnabledProcesses::kAllProcesses, "all-processes"}};
{MemoryTaggingEnabledProcesses::kBrowserOnly, kBrowserOnlyStr},
{MemoryTaggingEnabledProcesses::kNonRenderer, kNonRendererStr},
{MemoryTaggingEnabledProcesses::kAllProcesses, kAllProcessesStr}};
const base::FeatureParam<MemoryTaggingEnabledProcesses>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<MemoryTaggingEnabledProcesses>
kMemoryTaggingEnabledProcessesParam{
&kPartitionAllocMemoryTagging, "enabled-processes",
&kPartitionAllocMemoryTagging, kPAFeatureEnabledProcessesStr,
#if PA_BUILDFLAG(USE_FULL_MTE)
MemoryTaggingEnabledProcesses::kAllProcesses,
#else
MemoryTaggingEnabledProcesses::kBrowserOnly,
MemoryTaggingEnabledProcesses::kNonRenderer,
#endif
&kMemoryTaggingEnabledProcessesOptions};
@ -216,13 +273,15 @@ BASE_FEATURE(kPartitionAllocPermissiveMte,
#endif
);
const base::FeatureParam<bool> kBackupRefPtrAsanEnableDereferenceCheckParam{
&kPartitionAllocBackupRefPtr, "asan-enable-dereference-check", true};
const base::FeatureParam<bool> kBackupRefPtrAsanEnableExtractionCheckParam{
&kPartitionAllocBackupRefPtr, "asan-enable-extraction-check",
false}; // Not much noise at the moment to enable by default.
const base::FeatureParam<bool> kBackupRefPtrAsanEnableInstantiationCheckParam{
&kPartitionAllocBackupRefPtr, "asan-enable-instantiation-check", true};
BASE_FEATURE(kAsanBrpDereferenceCheck,
"AsanBrpDereferenceCheck",
FEATURE_ENABLED_BY_DEFAULT);
BASE_FEATURE(kAsanBrpExtractionCheck,
"AsanBrpExtractionCheck", // Not much noise at the moment to
FEATURE_DISABLED_BY_DEFAULT); // enable by default.
BASE_FEATURE(kAsanBrpInstantiationCheck,
"AsanBrpInstantiationCheck",
FEATURE_ENABLED_BY_DEFAULT);
// If enabled, switches the bucket distribution to a denser one.
//
@ -236,29 +295,31 @@ BASE_FEATURE(kPartitionAllocUseDenserDistribution,
FEATURE_ENABLED_BY_DEFAULT
#endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
);
const base::FeatureParam<BucketDistributionMode>::Option
const FeatureParam<BucketDistributionMode>::Option
kPartitionAllocBucketDistributionOption[] = {
{BucketDistributionMode::kDefault, "default"},
{BucketDistributionMode::kDenser, "denser"},
};
const base::FeatureParam<BucketDistributionMode>
kPartitionAllocBucketDistributionParam {
&kPartitionAllocUseDenserDistribution, "mode",
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<BucketDistributionMode>
kPartitionAllocBucketDistributionParam{
&kPartitionAllocUseDenserDistribution, "mode",
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
BucketDistributionMode::kDefault,
BucketDistributionMode::kDefault,
#else
BucketDistributionMode::kDenser,
BucketDistributionMode::kDenser,
#endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
&kPartitionAllocBucketDistributionOption
};
&kPartitionAllocBucketDistributionOption};
BASE_FEATURE(kPartitionAllocMemoryReclaimer,
"PartitionAllocMemoryReclaimer",
FEATURE_ENABLED_BY_DEFAULT);
const base::FeatureParam<TimeDelta> kPartitionAllocMemoryReclaimerInterval = {
&kPartitionAllocMemoryReclaimer, "interval",
TimeDelta(), // Defaults to zero.
};
BASE_FEATURE_PARAM(TimeDelta,
kPartitionAllocMemoryReclaimerInterval,
&kPartitionAllocMemoryReclaimer,
"interval",
TimeDelta() // Defaults to zero.
);
// Configures whether we set a lower limit for renderers that do not have a main
// frame, similar to the limit that is already done for backgrounded renderers.
@ -266,52 +327,22 @@ BASE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers,
"LowerPAMemoryLimitForNonMainRenderers",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, switches PCScan scheduling to a mutator-aware scheduler. Does not
// affect whether PCScan is enabled itself.
BASE_FEATURE(kPartitionAllocPCScanMUAwareScheduler,
"PartitionAllocPCScanMUAwareScheduler",
FEATURE_ENABLED_BY_DEFAULT);
// If enabled, PCScan frees unconditionally all quarantined objects.
// This is a performance testing feature.
BASE_FEATURE(kPartitionAllocPCScanImmediateFreeing,
"PartitionAllocPCScanImmediateFreeing",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, PCScan clears eagerly (synchronously) on free().
BASE_FEATURE(kPartitionAllocPCScanEagerClearing,
"PartitionAllocPCScanEagerClearing",
FEATURE_DISABLED_BY_DEFAULT);
// In addition to heap, scan also the stack of the current mutator.
BASE_FEATURE(kPartitionAllocPCScanStackScanning,
"PartitionAllocPCScanStackScanning",
#if PA_BUILDFLAG(STACK_SCAN_SUPPORTED)
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
#endif // PA_BUILDFLAG(STACK_SCAN_SUPPORTED)
);
BASE_FEATURE(kPartitionAllocDCScan,
"PartitionAllocDCScan",
FEATURE_DISABLED_BY_DEFAULT);
// Whether to straighten free lists for larger slot spans in PurgeMemory() ->
// ... -> PartitionPurgeSlotSpan().
BASE_FEATURE(kPartitionAllocStraightenLargerSlotSpanFreeLists,
"PartitionAllocStraightenLargerSlotSpanFreeLists",
FEATURE_ENABLED_BY_DEFAULT);
const base::FeatureParam<
partition_alloc::StraightenLargerSlotSpanFreeListsMode>::Option
kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption[] = {
const FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>::
Option kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption[] = {
{partition_alloc::StraightenLargerSlotSpanFreeListsMode::
kOnlyWhenUnprovisioning,
"only-when-unprovisioning"},
{partition_alloc::StraightenLargerSlotSpanFreeListsMode::kAlways,
"always"},
};
const base::FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<
partition_alloc::StraightenLargerSlotSpanFreeListsMode>
kPartitionAllocStraightenLargerSlotSpanFreeListsMode = {
&kPartitionAllocStraightenLargerSlotSpanFreeLists,
"mode",
@ -344,9 +375,11 @@ BASE_FEATURE(kPageAllocatorRetryOnCommitFailure,
// The feature: kPartialLowEndModeOnMidRangeDevices is defined in
// //base/features.cc. Since the following feature param is related to
// PartitionAlloc, define the param here.
const FeatureParam<bool> kPartialLowEndModeExcludePartitionAllocSupport{
&kPartialLowEndModeOnMidRangeDevices, "exclude-partition-alloc-support",
false};
BASE_FEATURE_PARAM(bool,
kPartialLowEndModeExcludePartitionAllocSupport,
&kPartialLowEndModeOnMidRangeDevices,
"exclude-partition-alloc-support",
false);
#endif
BASE_FEATURE(kEnableConfigurableThreadCacheMultiplier,
@ -364,19 +397,19 @@ MIRACLE_PARAMETER_FOR_DOUBLE(GetThreadCacheMultiplierForAndroid,
1.)
constexpr partition_alloc::internal::base::TimeDelta ToPartitionAllocTimeDelta(
base::TimeDelta time_delta) {
TimeDelta time_delta) {
return partition_alloc::internal::base::Microseconds(
time_delta.InMicroseconds());
}
constexpr base::TimeDelta FromPartitionAllocTimeDelta(
constexpr TimeDelta FromPartitionAllocTimeDelta(
partition_alloc::internal::base::TimeDelta time_delta) {
return base::Microseconds(time_delta.InMicroseconds());
return Microseconds(time_delta.InMicroseconds());
}
BASE_FEATURE(kEnableConfigurableThreadCachePurgeInterval,
"EnableConfigurableThreadCachePurgeInterval",
base::FEATURE_DISABLED_BY_DEFAULT);
FEATURE_DISABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_TIME_DELTA(
GetThreadCacheMinPurgeIntervalValue,
@ -413,7 +446,7 @@ GetThreadCacheDefaultPurgeInterval() {
BASE_FEATURE(kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
"EnableConfigurableThreadCacheMinCachedMemoryForPurging",
base::FEATURE_DISABLED_BY_DEFAULT);
FEATURE_DISABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_INT(
GetThreadCacheMinCachedMemoryForPurgingBytes,
@ -433,64 +466,9 @@ BASE_FEATURE(kPartitionAllocDisableBRPInBufferPartition,
"PartitionAllocDisableBRPInBufferPartition",
FEATURE_DISABLED_BY_DEFAULT);
#if PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
BASE_FEATURE(kUsePoolOffsetFreelists,
"PartitionAllocUsePoolOffsetFreelists",
base::FEATURE_DISABLED_BY_DEFAULT);
#endif
BASE_FEATURE(kPartitionAllocMakeFreeNoOpOnShutdown,
"PartitionAllocMakeFreeNoOpOnShutdown",
FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<WhenFreeBecomesNoOp>::Option
kPartitionAllocMakeFreeNoOpOnShutdownOptions[] = {
{WhenFreeBecomesNoOp::kBeforePreShutdown, "before-preshutdown"},
{WhenFreeBecomesNoOp::kBeforeHaltingStartupTracingController,
"before-halting-startup-tracing-controller"},
{
WhenFreeBecomesNoOp::kBeforeShutDownThreads,
"before-shutdown-threads",
},
{
WhenFreeBecomesNoOp::kInShutDownThreads,
"in-shutdown-threads",
},
{
WhenFreeBecomesNoOp::kAfterShutDownThreads,
"after-shutdown-threads",
},
};
const base::FeatureParam<WhenFreeBecomesNoOp>
kPartitionAllocMakeFreeNoOpOnShutdownParam{
&kPartitionAllocMakeFreeNoOpOnShutdown, "callsite",
WhenFreeBecomesNoOp::kBeforeShutDownThreads,
&kPartitionAllocMakeFreeNoOpOnShutdownOptions};
void MakeFreeNoOp(WhenFreeBecomesNoOp callsite) {
CHECK(base::FeatureList::GetInstance());
// Ignoring `free()` during Shutdown would allow developers to introduce new
// dangling pointers. So we want to avoid ignoring free when it is enabled.
// Note: For now, the DanglingPointerDetector is only enabled on 5 bots, and
// on linux non-official configuration.
// TODO(b/40802063): Reconsider this decision after the experiment.
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
if (base::FeatureList::IsEnabled(features::kPartitionAllocDanglingPtr)) {
return;
}
#endif // PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
if (base::FeatureList::IsEnabled(kPartitionAllocMakeFreeNoOpOnShutdown) &&
kPartitionAllocMakeFreeNoOpOnShutdownParam.Get() == callsite) {
allocator_shim::InsertNoOpOnFreeAllocatorShimOnShutDown();
}
#endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
}
BASE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground,
"PartitionAllocAdjustSizeWhenInForeground",
#if BUILDFLAG(IS_MAC)
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
FEATURE_ENABLED_BY_DEFAULT);
#else
FEATURE_DISABLED_BY_DEFAULT);
@ -498,7 +476,25 @@ BASE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground,
BASE_FEATURE(kPartitionAllocUseSmallSingleSlotSpans,
"PartitionAllocUseSmallSingleSlotSpans",
base::FEATURE_DISABLED_BY_DEFAULT);
FEATURE_ENABLED_BY_DEFAULT);
} // namespace features
} // namespace base
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
BASE_FEATURE(kPartitionAllocShadowMetadata,
"PartitionAllocShadowMetadata",
FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<ShadowMetadataEnabledProcesses>::Option
kShadowMetadataEnabledProcessesOptions[] = {
{ShadowMetadataEnabledProcesses::kRendererOnly, kRendererOnlyStr},
{ShadowMetadataEnabledProcesses::kAllChildProcesses,
kAllChildProcessesStr}};
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<ShadowMetadataEnabledProcesses>
kShadowMetadataEnabledProcessesParam{
&kPartitionAllocShadowMetadata, kPAFeatureEnabledProcessesStr,
ShadowMetadataEnabledProcesses::kRendererOnly,
&kShadowMetadataEnabledProcessesOptions};
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
} // namespace base::features

View file

@ -9,23 +9,40 @@
#include "base/compiler_specific.h"
#include "base/feature_list.h"
#include "base/metrics/field_trial_params.h"
#include "base/strings/string_piece.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/time/time.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_root.h"
namespace base {
namespace features {
namespace base::features {
extern const BASE_EXPORT Feature kPartitionAllocUnretainedDanglingPtr;
namespace internal {
enum class PAFeatureEnabledProcesses {
// Enabled only in the browser process.
kBrowserOnly,
// Enabled only in the browser and renderer processes.
kBrowserAndRenderer,
// Enabled in all processes, except renderer.
kNonRenderer,
// Enabled only in renderer processes.
kRendererOnly,
// Enabled in all child processes, except zygote.
kAllChildProcesses,
// Enabled in all processes.
kAllProcesses,
};
} // namespace internal
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUnretainedDanglingPtr);
enum class UnretainedDanglingPtrMode {
kCrash,
kDumpWithoutCrashing,
};
extern const BASE_EXPORT base::FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(UnretainedDanglingPtrMode,
kUnretainedDanglingPtrModeParam);
// See /docs/dangling_ptr.md
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr);
@ -44,8 +61,7 @@ enum class DanglingPtrMode {
// Note: This will be extended with a single shot DumpWithoutCrashing.
};
extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode>
kDanglingPtrModeParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(DanglingPtrMode, kDanglingPtrModeParam);
enum class DanglingPtrType {
// Act on any dangling raw_ptr released after being freed.
kAll, // (default)
@ -56,39 +72,47 @@ enum class DanglingPtrType {
// Note: This will be extended with LongLived
};
extern const BASE_EXPORT base::FeatureParam<DanglingPtrType>
kDanglingPtrTypeParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(DanglingPtrType, kDanglingPtrTypeParam);
using PartitionAllocWithAdvancedChecksEnabledProcesses =
internal::PAFeatureEnabledProcesses;
#if PA_BUILDFLAG(USE_STARSCAN)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScan);
#endif
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanBrowserOnly);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanRendererOnly);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeThreadCacheSize);
BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValue();
BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid();
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocWithAdvancedChecks);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
PartitionAllocWithAdvancedChecksEnabledProcesses,
kPartitionAllocWithAdvancedChecksEnabledProcessesParam);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSchedulerLoopQuarantine);
// Scheduler Loop Quarantine's per-thread capacity in bytes.
extern const BASE_EXPORT base::FeatureParam<int>
kPartitionAllocSchedulerLoopQuarantineBranchCapacity;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
int,
kPartitionAllocSchedulerLoopQuarantineBranchCapacity);
// Scheduler Loop Quarantine's capacity for the UI thread in bytes.
// TODO(https://crbug.com/387470567): Support more thread types.
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
int,
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocZappingByFreeFlags);
// Eventually zero out most PartitionAlloc memory. This is not meant as a
// security guarantee, but to increase the compression ratio of PartitionAlloc's
// fragmented super pages.
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocEventuallyZeroFreedMemory);
// Whether to make PartitionAlloc use fewer memory regions. This matters on
// Linux-based systems, where there is a per-process limit that we hit in some
// cases. See the comment in PartitionBucket::SlotSpanCOmmitedSize() for detail.
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocFewerMemoryRegions);
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
enum class BackupRefPtrEnabledProcesses {
// BRP enabled only in the browser process.
kBrowserOnly,
// BRP enabled only in the browser and renderer processes.
kBrowserAndRenderer,
// BRP enabled in all processes, except renderer.
kNonRenderer,
// BRP enabled in all processes.
kAllProcesses,
};
using BackupRefPtrEnabledProcesses = internal::PAFeatureEnabledProcesses;
enum class BackupRefPtrMode {
// BRP is disabled across all partitions. Equivalent to the Finch flag being
@ -107,76 +131,54 @@ enum class MemtagMode {
kAsync,
};
enum class MemoryTaggingEnabledProcesses {
// Memory tagging enabled only in the browser process.
kBrowserOnly,
// Memory tagging enabled in all processes, except renderer.
kNonRenderer,
// Memory tagging enabled in all processes.
kAllProcesses,
enum class RetagMode {
// Allocations are retagged by incrementing the current tag.
kIncrement,
// Allocations are retagged with a random tag.
kRandom,
};
using MemoryTaggingEnabledProcesses = internal::PAFeatureEnabledProcesses;
enum class BucketDistributionMode : uint8_t {
kDefault,
kDenser,
};
// Parameter for 'kPartitionAllocMakeFreeNoOpOnShutdown' feature which
// controls when free() becomes a no-op during Shutdown()
enum class WhenFreeBecomesNoOp {
kBeforePreShutdown,
kBeforeHaltingStartupTracingController,
kBeforeShutDownThreads,
kInShutDownThreads,
kAfterShutDownThreads,
};
// Inserts a no-op on 'free()' allocator shim at the front of the
// dispatch chain if called from the appropriate callsite.
BASE_EXPORT void MakeFreeNoOp(WhenFreeBecomesNoOp callsite);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMakeFreeNoOpOnShutdown);
extern const BASE_EXPORT base::FeatureParam<WhenFreeBecomesNoOp>
kPartitionAllocMakeFreeNoOpOnShutdownParam;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtr);
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam;
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrMode>
kBackupRefPtrModeParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BackupRefPtrEnabledProcesses,
kBackupRefPtrEnabledProcessesParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BackupRefPtrMode,
kBackupRefPtrModeParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(int,
kBackupRefPtrExtraExtrasSizeParam);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryTagging);
extern const BASE_EXPORT base::FeatureParam<MemtagMode> kMemtagModeParam;
extern const BASE_EXPORT base::FeatureParam<MemoryTaggingEnabledProcesses>
kMemoryTaggingEnabledProcessesParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(MemtagMode, kMemtagModeParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(RetagMode, kRetagModeParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(MemoryTaggingEnabledProcesses,
kMemoryTaggingEnabledProcessesParam);
// Kill switch for memory tagging. Skips any code related to memory tagging when
// enabled.
BASE_EXPORT BASE_DECLARE_FEATURE(kKillPartitionAllocMemoryTagging);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte);
extern const BASE_EXPORT base::FeatureParam<bool>
kBackupRefPtrAsanEnableDereferenceCheckParam;
extern const BASE_EXPORT base::FeatureParam<bool>
kBackupRefPtrAsanEnableExtractionCheckParam;
extern const BASE_EXPORT base::FeatureParam<bool>
kBackupRefPtrAsanEnableInstantiationCheckParam;
extern const BASE_EXPORT base::FeatureParam<BucketDistributionMode>
kPartitionAllocBucketDistributionParam;
BASE_EXPORT BASE_DECLARE_FEATURE(kAsanBrpDereferenceCheck);
BASE_EXPORT BASE_DECLARE_FEATURE(kAsanBrpExtractionCheck);
BASE_EXPORT BASE_DECLARE_FEATURE(kAsanBrpInstantiationCheck);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BucketDistributionMode,
kPartitionAllocBucketDistributionParam);
BASE_EXPORT BASE_DECLARE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanMUAwareScheduler);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanStackScanning);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDCScan);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanImmediateFreeing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanEagerClearing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseDenserDistribution);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryReclaimer);
extern const BASE_EXPORT base::FeatureParam<TimeDelta>
kPartitionAllocMemoryReclaimerInterval;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(TimeDelta,
kPartitionAllocMemoryReclaimerInterval);
BASE_EXPORT BASE_DECLARE_FEATURE(
kPartitionAllocStraightenLargerSlotSpanFreeLists);
extern const BASE_EXPORT
base::FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>
kPartitionAllocStraightenLargerSlotSpanFreeListsMode;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
partition_alloc::StraightenLargerSlotSpanFreeListsMode,
kPartitionAllocStraightenLargerSlotSpanFreeListsMode);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortSmallerSlotSpanFreeLists);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortActiveSlotSpans);
@ -185,8 +187,9 @@ BASE_EXPORT BASE_DECLARE_FEATURE(kPageAllocatorRetryOnCommitFailure);
#endif
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
extern const base::FeatureParam<bool>
kPartialLowEndModeExcludePartitionAllocSupport;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
bool,
kPartialLowEndModeExcludePartitionAllocSupport);
#endif
BASE_EXPORT BASE_DECLARE_FEATURE(kEnableConfigurableThreadCacheMultiplier);
@ -207,13 +210,6 @@ BASE_EXPORT int GetThreadCacheMinCachedMemoryForPurgingBytes();
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDisableBRPInBufferPartition);
// This feature is additionally gated behind a buildflag because
// pool offset freelists cannot be represented when PartitionAlloc uses
// 32-bit pointers.
#if PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
BASE_EXPORT BASE_DECLARE_FEATURE(kUsePoolOffsetFreelists);
#endif
// When set, partitions use a larger ring buffer and free memory less
// aggressively when in the foreground.
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground);
@ -224,7 +220,14 @@ BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground);
// See also: https://crbug.com/333443437
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseSmallSingleSlotSpans);
} // namespace features
} // namespace base
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
using ShadowMetadataEnabledProcesses = internal::PAFeatureEnabledProcesses;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocShadowMetadata);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(ShadowMetadataEnabledProcesses,
kShadowMetadataEnabledProcessesParam);
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
} // namespace base::features
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_

View file

@ -4,6 +4,7 @@
#include "base/allocator/partition_alloc_support.h"
#include <algorithm>
#include <array>
#include <cinttypes>
#include <cstdint>
@ -15,6 +16,7 @@
#include "base/allocator/partition_alloc_features.h"
#include "base/at_exit.h"
#include "base/check.h"
#include "base/containers/span.h"
#include "base/cpu.h"
#include "base/debug/dump_without_crashing.h"
#include "base/debug/stack_trace.h"
@ -30,7 +32,6 @@
#include "base/metrics/histogram_macros.h"
#include "base/no_destructor.h"
#include "base/pending_task.h"
#include "base/ranges/algorithm.h"
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
#include "base/system/sys_info.h"
@ -42,12 +43,13 @@
#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
#include "partition_alloc/allocation_guard.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/dangling_raw_ptr_checks.h"
#include "partition_alloc/memory_reclaimer.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/partition_alloc_base/debug/alias.h"
#include "partition_alloc/partition_alloc_base/immediate_crash.h"
#include "partition_alloc/partition_alloc_base/threading/platform_thread.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h"
@ -57,17 +59,10 @@
#include "partition_alloc/pointers/raw_ptr.h"
#include "partition_alloc/shim/allocator_shim.h"
#include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
#include "partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h"
#include "partition_alloc/stack/stack.h"
#include "partition_alloc/thread_cache.h"
#if PA_BUILDFLAG(USE_STARSCAN)
#include "partition_alloc/shim/nonscannable_allocator.h"
#include "partition_alloc/starscan/pcscan.h"
#include "partition_alloc/starscan/pcscan_scheduling.h"
#include "partition_alloc/starscan/stats_collector.h"
#include "partition_alloc/starscan/stats_reporter.h"
#endif // PA_BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(IS_ANDROID)
#include "base/system/sys_info.h"
#endif
@ -76,6 +71,11 @@
#include "partition_alloc/memory_reclaimer.h"
#endif
#if PA_BUILDFLAG( \
ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT)
#include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc_with_advanced_checks.h"
#endif
#if BUILDFLAG(IS_ANDROID) && PA_BUILDFLAG(HAS_MEMORY_TAGGING)
#include <sys/system_properties.h>
#endif
@ -107,12 +107,9 @@ BootloaderOverride GetBootloaderOverride() {
}
#endif
// When under this experiment avoid running periodic purging or reclaim for the
// first minute after the first attempt. This is based on the insight that
// processes often don't live paste this minute.
static BASE_FEATURE(kDelayFirstPeriodicPAPurgeOrReclaim,
"DelayFirstPeriodicPAPurgeOrReclaim",
base::FEATURE_ENABLED_BY_DEFAULT);
// Avoid running periodic purging or reclaim for the first minute after the
// first attempt. This is based on the insight that processes often don't live
// paste this minute.
constexpr base::TimeDelta kFirstPAPurgeOrReclaimDelay = base::Minutes(1);
// This is defined in content/public/common/content_switches.h, which is not
@ -120,129 +117,10 @@ constexpr base::TimeDelta kFirstPAPurgeOrReclaimDelay = base::Minutes(1);
namespace switches {
[[maybe_unused]] constexpr char kRendererProcess[] = "renderer";
constexpr char kZygoteProcess[] = "zygote";
#if PA_BUILDFLAG(USE_STARSCAN)
constexpr char kGpuProcess[] = "gpu-process";
constexpr char kUtilityProcess[] = "utility";
#endif
} // namespace switches
#if PA_BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(ENABLE_BASE_TRACING)
constexpr const char* ScannerIdToTracingString(
partition_alloc::internal::StatsCollector::ScannerId id) {
switch (id) {
case partition_alloc::internal::StatsCollector::ScannerId::kClear:
return "PCScan.Scanner.Clear";
case partition_alloc::internal::StatsCollector::ScannerId::kScan:
return "PCScan.Scanner.Scan";
case partition_alloc::internal::StatsCollector::ScannerId::kSweep:
return "PCScan.Scanner.Sweep";
case partition_alloc::internal::StatsCollector::ScannerId::kOverall:
return "PCScan.Scanner";
case partition_alloc::internal::StatsCollector::ScannerId::kNumIds:
__builtin_unreachable();
}
}
constexpr const char* MutatorIdToTracingString(
partition_alloc::internal::StatsCollector::MutatorId id) {
switch (id) {
case partition_alloc::internal::StatsCollector::MutatorId::kClear:
return "PCScan.Mutator.Clear";
case partition_alloc::internal::StatsCollector::MutatorId::kScanStack:
return "PCScan.Mutator.ScanStack";
case partition_alloc::internal::StatsCollector::MutatorId::kScan:
return "PCScan.Mutator.Scan";
case partition_alloc::internal::StatsCollector::MutatorId::kOverall:
return "PCScan.Mutator";
case partition_alloc::internal::StatsCollector::MutatorId::kNumIds:
__builtin_unreachable();
}
}
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
// Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes.
class StatsReporterImpl final : public partition_alloc::StatsReporter {
public:
void ReportTraceEvent(
partition_alloc::internal::StatsCollector::ScannerId id,
[[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
int64_t start_time_ticks_internal_value,
int64_t end_time_ticks_internal_value) override {
#if BUILDFLAG(ENABLE_BASE_TRACING)
// TRACE_EVENT_* macros below drop most parameters when tracing is
// disabled at compile time.
const char* tracing_id = ScannerIdToTracingString(id);
const TimeTicks start_time =
TimeTicks::FromInternalValue(start_time_ticks_internal_value);
const TimeTicks end_time =
TimeTicks::FromInternalValue(end_time_ticks_internal_value);
TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
perfetto::ThreadTrack::ForThread(tid), start_time);
TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
end_time);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
void ReportTraceEvent(
partition_alloc::internal::StatsCollector::MutatorId id,
[[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
int64_t start_time_ticks_internal_value,
int64_t end_time_ticks_internal_value) override {
#if BUILDFLAG(ENABLE_BASE_TRACING)
// TRACE_EVENT_* macros below drop most parameters when tracing is
// disabled at compile time.
const char* tracing_id = MutatorIdToTracingString(id);
const TimeTicks start_time =
TimeTicks::FromInternalValue(start_time_ticks_internal_value);
const TimeTicks end_time =
TimeTicks::FromInternalValue(end_time_ticks_internal_value);
TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
perfetto::ThreadTrack::ForThread(tid), start_time);
TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
end_time);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
void ReportSurvivedQuarantineSize(size_t survived_size) override {
TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantineSize",
survived_size);
}
void ReportSurvivedQuarantinePercent(double survived_rate) override {
// Multiply by 1000 since TRACE_COUNTER1 expects integer. In catapult,
// divide back.
// TODO(bikineev): Remove after switching to perfetto.
TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantinePercent",
1000 * survived_rate);
}
void ReportStats(const char* stats_name, int64_t sample_in_usec) override {
TimeDelta sample = Microseconds(sample_in_usec);
UmaHistogramTimes(stats_name, sample);
}
private:
static constexpr char kTraceCategory[] = "partition_alloc";
};
#endif // PA_BUILDFLAG(USE_STARSCAN)
} // namespace
#if PA_BUILDFLAG(USE_STARSCAN)
void RegisterPCScanStatsReporter() {
static StatsReporterImpl s_reporter;
static bool registered = false;
DCHECK(!registered);
partition_alloc::internal::PCScan::RegisterStatsReporter(&s_reporter);
registered = true;
}
#endif // PA_BUILDFLAG(USE_STARSCAN)
namespace {
void RunThreadCachePeriodicPurge() {
@ -268,7 +146,7 @@ BASE_FEATURE(kDisableMemoryReclaimerInBackground,
// exceeded.
BASE_FEATURE(kPartitionAllocShortMemoryReclaim,
"PartitionAllocShortMemoryReclaim",
base::FEATURE_DISABLED_BY_DEFAULT);
base::FEATURE_ENABLED_BY_DEFAULT);
// static
MemoryReclaimerSupport& MemoryReclaimerSupport::Instance() {
@ -290,6 +168,8 @@ void MemoryReclaimerSupport::Start(scoped_refptr<TaskRunner> task_runner) {
return;
}
task_runner_ = task_runner;
// The caller of the API fully controls where running the reclaim.
// However there are a few reasons to recommend that the caller runs
// it on the main thread:
@ -305,13 +185,7 @@ void MemoryReclaimerSupport::Start(scoped_refptr<TaskRunner> task_runner) {
// seconds is useful. Since this is meant to run during idle time only, it is
// a reasonable starting point balancing effectivenes vs cost. See
// crbug.com/942512 for details and experimental results.
TimeDelta delay;
if (base::FeatureList::IsEnabled(kDelayFirstPeriodicPAPurgeOrReclaim)) {
delay = std::max(delay, kFirstPAPurgeOrReclaimDelay);
}
task_runner_ = task_runner;
MaybeScheduleTask(delay);
MaybeScheduleTask(kFirstPAPurgeOrReclaimDelay);
}
void MemoryReclaimerSupport::SetForegrounded(bool in_foreground) {
@ -372,12 +246,9 @@ void MemoryReclaimerSupport::MaybeScheduleTask(TimeDelta delay) {
void StartThreadCachePeriodicPurge() {
auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
TimeDelta delay =
Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
if (base::FeatureList::IsEnabled(kDelayFirstPeriodicPAPurgeOrReclaim)) {
delay = std::max(delay, kFirstPAPurgeOrReclaimDelay);
}
TimeDelta delay = std::max(
Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds()),
kFirstPAPurgeOrReclaimDelay);
SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
@ -458,6 +329,40 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
return trials;
}
namespace {
bool ShouldEnableFeatureOnProcess(
features::internal::PAFeatureEnabledProcesses enabled_processes,
const std::string& process_type) {
switch (enabled_processes) {
case features::internal::PAFeatureEnabledProcesses::kBrowserOnly:
return process_type.empty();
case features::internal::PAFeatureEnabledProcesses::kNonRenderer:
return process_type != switches::kRendererProcess;
case features::internal::PAFeatureEnabledProcesses::kBrowserAndRenderer:
return process_type.empty() || process_type == switches::kRendererProcess;
case features::internal::PAFeatureEnabledProcesses::kRendererOnly:
return process_type == switches::kRendererProcess;
case features::internal::PAFeatureEnabledProcesses::kAllChildProcesses:
return !process_type.empty() && process_type != switches::kZygoteProcess;
case features::internal::PAFeatureEnabledProcesses::kAllProcesses:
return true;
}
}
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
bool ShouldEnableShadowMetadata(const std::string& process_type) {
if (!base::FeatureList::IsEnabled(
base::features::kPartitionAllocShadowMetadata)) {
return false;
}
return ShouldEnableFeatureOnProcess(
features::kShadowMetadataEnabledProcessesParam.Get(), process_type);
}
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
} // namespace
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
namespace {
@ -550,8 +455,8 @@ std::string ExtractDanglingPtrSignature(std::string stacktrace) {
size_t caller_index = 0;
for (size_t i = 0; i < lines.size(); ++i) {
for (const auto& patterns : callee_patterns) {
if (ranges::all_of(patterns, [&](std::string_view pattern) {
return lines[i].find(pattern) != StringPiece::npos;
if (std::ranges::all_of(patterns, [&](std::string_view pattern) {
return lines[i].find(pattern) != std::string_view::npos;
})) {
caller_index = i + 1;
}
@ -673,38 +578,55 @@ void DanglingRawPtrReleased(uintptr_t id) {
std::string dangling_signature = ExtractDanglingPtrSignature(
free_info, stack_trace_release, task_trace_release);
static const char dangling_ptr_footer[] =
"\n"
"\n"
"Please check for more information on:\n"
"https://chromium.googlesource.com/chromium/src/+/main/docs/"
"dangling_ptr_guide.md\n"
"\n"
"Googlers: Please give us your feedback about the dangling pointer\n"
" detector at:\n"
" http://go/dangling-ptr-cq-survey\n";
if (free_info) {
LOG(ERROR) << "Detected dangling raw_ptr with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n"
<< dangling_signature << "\n\n"
<< "The memory was freed at:\n"
<< free_info->stack_trace << "\n"
<< free_info->task_trace << "\n"
<< "The dangling raw_ptr was released at:\n"
<< stack_trace_release << "\n"
<< task_trace_release << dangling_ptr_footer;
} else {
LOG(ERROR) << "Detected dangling raw_ptr with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< dangling_signature << "\n\n"
<< "It was not recorded where the memory was freed.\n\n"
<< "The dangling raw_ptr was released at:\n"
<< stack_trace_release << "\n"
<< task_trace_release << dangling_ptr_footer;
{
// Log the full error in a single LogMessage. Printing StackTrace is
// expensive, so we want to avoid interleaving the output with other logs.
logging::LogMessage log_message(__FILE__, __LINE__, logging::LOGGING_ERROR);
std::ostream& error = log_message.stream();
// The dangling signature can be used by script to locate the origin of
// every dangling pointers.
error << "\n\n"
<< ExtractDanglingPtrSignature(free_info, stack_trace_release,
task_trace_release)
<< "\n\n";
error << "[DanglingPtr](1/3) A raw_ptr/raw_ref is dangling.\n\n";
auto print_traces = [](debug::StackTrace stack_trace,
debug::TaskTrace task_trace, std::ostream& error) {
error << "Stack trace:\n";
error << stack_trace << "\n";
// Printing "Task trace:" is implied by the TaskTrace itself.
if (!task_trace.empty()) {
error << task_trace << "\n";
}
};
error << "[DanglingPtr](2/3) ";
if (free_info) {
error << "First, the memory was freed at:\n\n";
print_traces(free_info->stack_trace, free_info->task_trace, error);
} else {
error << "It was not recorded where the memory was freed.\n";
}
error << "[DanglingPtr](3/3) Later, the dangling raw_ptr was released "
"at:\n\n";
print_traces(stack_trace_release, task_trace_release, error);
error << "Please check for more information on:\n";
error << "https://chromium.googlesource.com/chromium/src/+/main/docs/";
error << "dangling_ptr_guide.md\n";
error << "\n";
}
if constexpr (dangling_pointer_mode == features::DanglingPtrMode::kCrash) {
ImmediateCrash();
// We use `PA_IMMEDIATE_CRASH()` instead of base's ImmediateCrash() to avoid
// printing the raw_ptr release stack trace twice.
PA_IMMEDIATE_CRASH();
}
}
@ -729,14 +651,22 @@ void CheckDanglingRawPtrBufferEmpty() {
<< entry->task_trace << "\n"
<< entry->stack_trace << "\n";
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER)
auto is_frame_ptr_not_null = [](const void* frame_ptr) {
return frame_ptr != nullptr;
};
std::vector<std::array<const void*, 32>> stack_traces =
internal::InstanceTracer::GetStackTracesForDanglingRefs(entry->id);
for (const auto& raw_stack_trace : stack_traces) {
CHECK(std::ranges::is_partitioned(raw_stack_trace, is_frame_ptr_not_null))
<< "`raw_stack_trace` is expected to be partitioned: non-null values "
"at the begining followed by `nullptr`s.";
LOG(ERROR) << "Dangling reference from:\n";
LOG(ERROR) << debug::StackTrace(raw_stack_trace.data(),
raw_stack_trace.size() -
static_cast<size_t>(ranges::count(
raw_stack_trace, nullptr)))
LOG(ERROR) << debug::StackTrace(
// This call truncates the `nullptr` tail of the stack
// trace (see the `is_partitioned` CHECK above).
span(raw_stack_trace.begin(),
std::ranges::partition_point(
raw_stack_trace, is_frame_ptr_not_null)))
<< "\n";
}
#else
@ -855,102 +785,28 @@ void InstallUnretainedDanglingRawPtrChecks() {
}
}
namespace {
#if PA_BUILDFLAG(USE_STARSCAN)
void SetProcessNameForPCScan(const std::string& process_type) {
const char* name = [&process_type] {
if (process_type.empty()) {
// Empty means browser process.
return "Browser";
}
if (process_type == switches::kRendererProcess) {
return "Renderer";
}
if (process_type == switches::kGpuProcess) {
return "Gpu";
}
if (process_type == switches::kUtilityProcess) {
return "Utility";
}
return static_cast<const char*>(nullptr);
}();
if (name) {
partition_alloc::internal::PCScan::SetProcessName(name);
}
}
bool EnablePCScanForMallocPartitionsIfNeeded() {
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
partition_alloc::internal::base::PlatformThread::SetThreadNameHook(
&base::PlatformThread::SetName);
using Config = partition_alloc::internal::PCScan::InitConfig;
DCHECK(base::FeatureList::GetInstance());
if (base::FeatureList::IsEnabled(base::features::kPartitionAllocPCScan)) {
allocator_shim::EnablePCScan({Config::WantedWriteProtectionMode::kEnabled,
Config::SafepointMode::kEnabled});
base::allocator::RegisterPCScanStatsReporter();
return true;
}
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
return false;
}
bool EnablePCScanForMallocPartitionsInBrowserProcessIfNeeded() {
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
using Config = partition_alloc::internal::PCScan::InitConfig;
DCHECK(base::FeatureList::GetInstance());
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanBrowserOnly)) {
const Config::WantedWriteProtectionMode wp_mode =
base::FeatureList::IsEnabled(base::features::kPartitionAllocDCScan)
? Config::WantedWriteProtectionMode::kEnabled
: Config::WantedWriteProtectionMode::kDisabled;
#if !PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
CHECK_EQ(Config::WantedWriteProtectionMode::kDisabled, wp_mode)
<< "DCScan is currently only supported on Linux based systems";
#endif
allocator_shim::EnablePCScan({wp_mode, Config::SafepointMode::kEnabled});
base::allocator::RegisterPCScanStatsReporter();
return true;
}
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
return false;
}
bool EnablePCScanForMallocPartitionsInRendererProcessIfNeeded() {
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
using Config = partition_alloc::internal::PCScan::InitConfig;
DCHECK(base::FeatureList::GetInstance());
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanRendererOnly)) {
const Config::WantedWriteProtectionMode wp_mode =
base::FeatureList::IsEnabled(base::features::kPartitionAllocDCScan)
? Config::WantedWriteProtectionMode::kEnabled
: Config::WantedWriteProtectionMode::kDisabled;
#if !PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
CHECK_EQ(Config::WantedWriteProtectionMode::kDisabled, wp_mode)
<< "DCScan is currently only supported on Linux based systems";
#endif
allocator_shim::EnablePCScan({wp_mode, Config::SafepointMode::kDisabled});
base::allocator::RegisterPCScanStatsReporter();
return true;
}
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
return false;
}
#endif // PA_BUILDFLAG(USE_STARSCAN)
} // namespace
void ReconfigurePartitionForKnownProcess(const std::string& process_type) {
DCHECK_NE(process_type, switches::kZygoteProcess);
// TODO(keishi): Move the code to enable BRP back here after Finch
// experiments.
}
void MakeFreeNoOp() {
// Ignoring `free()` during Shutdown would allow developers to introduce new
// dangling pointers. So we want to avoid ignoring free when it is enabled.
// Note: For now, the DanglingPointerDetector is only enabled on 5 bots, and
// on linux non-official configuration.
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
CHECK(base::FeatureList::GetInstance());
if (base::FeatureList::IsEnabled(features::kPartitionAllocDanglingPtr)) {
return;
}
#endif // PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
allocator_shim::InsertNoOpOnFreeAllocatorShimOnShutDown();
#endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
}
PartitionAllocSupport* PartitionAllocSupport::Get() {
static auto* singleton = new PartitionAllocSupport();
return singleton;
@ -982,14 +838,8 @@ bool PartitionAllocSupport::ShouldEnableMemoryTagging(
base::features::kKillPartitionAllocMemoryTagging)) {
return false;
}
switch (base::features::kMemoryTaggingEnabledProcessesParam.Get()) {
case base::features::MemoryTaggingEnabledProcesses::kBrowserOnly:
return process_type.empty();
case base::features::MemoryTaggingEnabledProcesses::kNonRenderer:
return process_type != switches::kRendererProcess;
case base::features::MemoryTaggingEnabledProcesses::kAllProcesses:
return true;
}
return ShouldEnableFeatureOnProcess(
base::features::kMemoryTaggingEnabledProcessesParam.Get(), process_type);
}
// static
@ -997,55 +847,51 @@ bool PartitionAllocSupport::ShouldEnableMemoryTaggingInRendererProcess() {
return ShouldEnableMemoryTagging(switches::kRendererProcess);
}
// static
bool PartitionAllocSupport::ShouldEnablePartitionAllocWithAdvancedChecks(
const std::string& process_type) {
#if !PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
return false;
#else
if (!base::FeatureList::IsEnabled(
base::features::kPartitionAllocWithAdvancedChecks)) {
return false;
}
return ShouldEnableFeatureOnProcess(
base::features::kPartitionAllocWithAdvancedChecksEnabledProcessesParam
.Get(),
process_type);
#endif // !PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
}
// static
PartitionAllocSupport::BrpConfiguration
PartitionAllocSupport::GetBrpConfiguration(const std::string& process_type) {
// TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
CHECK(base::FeatureList::GetInstance());
bool process_affected_by_brp_flag = false;
#if (PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) || \
PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocBackupRefPtr)) {
// No specified process type means this is the Browser process.
switch (base::features::kBackupRefPtrEnabledProcessesParam.Get()) {
case base::features::BackupRefPtrEnabledProcesses::kBrowserOnly:
process_affected_by_brp_flag = process_type.empty();
break;
case base::features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
process_affected_by_brp_flag =
process_type.empty() ||
(process_type == switches::kRendererProcess);
break;
case base::features::BackupRefPtrEnabledProcesses::kNonRenderer:
process_affected_by_brp_flag =
(process_type != switches::kRendererProcess);
break;
case base::features::BackupRefPtrEnabledProcesses::kAllProcesses:
process_affected_by_brp_flag = true;
break;
}
}
#endif // (PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) ||
// PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
const bool enable_brp =
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
// kDisabled is equivalent to !IsEnabled(kPartitionAllocBackupRefPtr).
process_affected_by_brp_flag &&
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
!PA_BUILDFLAG(FORCE_DISABLE_BACKUP_REF_PTR_FEATURE)
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocBackupRefPtr) &&
base::features::kBackupRefPtrModeParam.Get() !=
base::features::BackupRefPtrMode::kDisabled;
#else
false;
base::features::BackupRefPtrMode::kDisabled &&
ShouldEnableFeatureOnProcess(
base::features::kBackupRefPtrEnabledProcessesParam.Get(),
process_type)) {
return {
.enable_brp = true,
.extra_extras_size = static_cast<size_t>(
base::features::kBackupRefPtrExtraExtrasSizeParam.Get()),
};
}
#endif
return {
enable_brp,
process_affected_by_brp_flag,
.enable_brp = false,
.extra_extras_size = 0,
};
}
@ -1148,20 +994,26 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
[[maybe_unused]] BrpConfiguration brp_config =
GetBrpConfiguration(process_type);
#if PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
if (brp_config.process_affected_by_brp_flag) {
base::RawPtrAsanService::GetInstance().Configure(
base::EnableDereferenceCheck(
base::features::kBackupRefPtrAsanEnableDereferenceCheckParam.Get()),
base::EnableExtractionCheck(
base::features::kBackupRefPtrAsanEnableExtractionCheckParam.Get()),
base::EnableInstantiationCheck(
base::features::kBackupRefPtrAsanEnableInstantiationCheckParam
.Get()));
// Configure ASAN hooks to report the `MiraclePtr status`. This is enabled
// only if BackupRefPtr is normally enabled in the current process for the
// current platform. Note that CastOS and iOS aren't protected by BackupRefPtr
// a the moment, so they are excluded.
#if PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && !PA_BUILDFLAG(IS_CASTOS) && \
!PA_BUILDFLAG(IS_IOS)
if (ShouldEnableFeatureOnProcess(
base::features::kBackupRefPtrEnabledProcessesParam.Get(),
process_type)) {
RawPtrAsanService::GetInstance().Configure(
EnableDereferenceCheck(
FeatureList::IsEnabled(features::kAsanBrpDereferenceCheck)),
EnableExtractionCheck(
FeatureList::IsEnabled(features::kAsanBrpExtractionCheck)),
EnableInstantiationCheck(
FeatureList::IsEnabled(features::kAsanBrpInstantiationCheck)));
} else {
base::RawPtrAsanService::GetInstance().Configure(
base::EnableDereferenceCheck(false), base::EnableExtractionCheck(false),
base::EnableInstantiationCheck(false));
RawPtrAsanService::GetInstance().Configure(EnableDereferenceCheck(false),
EnableExtractionCheck(false),
EnableInstantiationCheck(false));
}
#endif // PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
@ -1186,13 +1038,10 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
.Get());
const bool zapping_by_free_flags = base::FeatureList::IsEnabled(
base::features::kPartitionAllocZappingByFreeFlags);
#if PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
const bool use_pool_offset_freelists =
base::FeatureList::IsEnabled(base::features::kUsePoolOffsetFreelists);
#else
const bool use_pool_offset_freelists = false;
#endif // PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
const bool eventually_zero_freed_memory = base::FeatureList::IsEnabled(
base::features::kPartitionAllocEventuallyZeroFreedMemory);
const bool fewer_memory_regions = base::FeatureList::IsEnabled(
base::features::kPartitionAllocFewerMemoryRegions);
bool enable_memory_tagging = false;
partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode =
@ -1203,17 +1052,37 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
// check here too to wrap the GetMemoryTaggingModeForCurrentThread() call.
if (!base::FeatureList::IsEnabled(
base::features::kKillPartitionAllocMemoryTagging)) {
// If synchronous mode is enabled from startup it means this is a test and
// memory tagging should be enabled.
if (partition_alloc::internal::GetMemoryTaggingModeForCurrentThread() ==
// If synchronous mode is enabled from startup it means this is a test or it
// was force enabled in Chrome some how so honor that choice.
partition_alloc::TagViolationReportingMode
startup_memory_tagging_reporting_mode =
partition_alloc::internal::GetMemoryTaggingModeForCurrentThread();
if (startup_memory_tagging_reporting_mode ==
partition_alloc::TagViolationReportingMode::kSynchronous) {
enable_memory_tagging = true;
memory_tagging_reporting_mode =
partition_alloc::TagViolationReportingMode::kSynchronous;
// Not enabling permissive mode as this config is used to crash and detect
// bugs.
VLOG(1) << "PartitionAlloc: Memory tagging enabled in SYNC mode at "
"startup (Process: "
<< process_type << ")";
} else {
enable_memory_tagging = ShouldEnableMemoryTagging(process_type);
#if BUILDFLAG(IS_ANDROID)
// Android Scudo does not allow MTE to be re-enabled if MTE was disabled.
if (enable_memory_tagging &&
startup_memory_tagging_reporting_mode ==
partition_alloc::TagViolationReportingMode::kDisabled) {
LOG(ERROR) << "PartitionAlloc: Failed to enable memory tagging due to "
"MTE disabled at startup (Process: "
<< process_type << ")";
debug::DumpWithoutCrashing();
enable_memory_tagging = false;
}
if (enable_memory_tagging) {
// Configure MTE.
switch (base::features::kMemtagModeParam.Get()) {
case base::features::MemtagMode::kSync:
memory_tagging_reporting_mode =
@ -1224,15 +1093,28 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
partition_alloc::TagViolationReportingMode::kAsynchronous;
break;
}
partition_alloc::PermissiveMte::SetEnabled(base::FeatureList::IsEnabled(
base::features::kPartitionAllocPermissiveMte));
bool enable_permissive_mte = base::FeatureList::IsEnabled(
base::features::kPartitionAllocPermissiveMte);
partition_alloc::PermissiveMte::SetEnabled(enable_permissive_mte);
CHECK(partition_alloc::internal::
ChangeMemoryTaggingModeForAllThreadsPerProcess(
memory_tagging_reporting_mode));
CHECK_EQ(
partition_alloc::internal::GetMemoryTaggingModeForCurrentThread(),
memory_tagging_reporting_mode);
VLOG(1)
<< "PartitionAlloc: Memory tagging enabled in "
<< (memory_tagging_reporting_mode ==
partition_alloc::TagViolationReportingMode::kSynchronous
? "SYNC"
: "ASYNC")
<< " mode (Process: " << process_type << ")";
if (enable_permissive_mte) {
VLOG(1) << "PartitionAlloc: Permissive MTE enabled (Process: "
<< process_type << ")";
}
} else if (base::CPU::GetInstanceNoAllocation().has_mte()) {
// Disable MTE.
memory_tagging_reporting_mode =
partition_alloc::TagViolationReportingMode::kDisabled;
CHECK(partition_alloc::internal::
@ -1241,36 +1123,28 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
CHECK_EQ(
partition_alloc::internal::GetMemoryTaggingModeForCurrentThread(),
memory_tagging_reporting_mode);
VLOG(1) << "PartitionAlloc: Memory tagging disabled (Process: "
<< process_type << ")";
}
#endif // BUILDFLAG(IS_ANDROID)
}
}
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
if (enable_memory_tagging) {
CHECK((memory_tagging_reporting_mode ==
partition_alloc::TagViolationReportingMode::kSynchronous) ||
(memory_tagging_reporting_mode ==
partition_alloc::TagViolationReportingMode::kAsynchronous));
} else {
CHECK((memory_tagging_reporting_mode ==
partition_alloc::TagViolationReportingMode::kUndefined) ||
(memory_tagging_reporting_mode ==
partition_alloc::TagViolationReportingMode::kDisabled));
}
allocator_shim::UseSmallSingleSlotSpans use_small_single_slot_spans(
base::FeatureList::IsEnabled(
features::kPartitionAllocUseSmallSingleSlotSpans));
allocator_shim::ConfigurePartitions(
allocator_shim::EnableBrp(brp_config.enable_brp),
brp_config.extra_extras_size,
allocator_shim::EnableMemoryTagging(enable_memory_tagging),
memory_tagging_reporting_mode, bucket_distribution,
allocator_shim::SchedulerLoopQuarantine(scheduler_loop_quarantine),
scheduler_loop_quarantine_branch_capacity_in_bytes,
allocator_shim::ZappingByFreeFlags(zapping_by_free_flags),
allocator_shim::UsePoolOffsetFreelists(use_pool_offset_freelists),
allocator_shim::EventuallyZeroFreedMemory(eventually_zero_freed_memory),
allocator_shim::FewerMemoryRegions(fewer_memory_regions),
use_small_single_slot_spans);
const uint32_t extras_size = allocator_shim::GetMainPartitionRootExtrasSize();
@ -1279,68 +1153,44 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
// 100 is a reasonable cap for this value.
UmaHistogramCounts100("Memory.PartitionAlloc.PartitionRoot.ExtrasSize",
int(extras_size));
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If BRP is not enabled, check if any of PCScan flags is enabled.
[[maybe_unused]] bool scan_enabled = false;
#if PA_BUILDFLAG(USE_STARSCAN)
if (!brp_config.enable_brp) {
scan_enabled = EnablePCScanForMallocPartitionsIfNeeded();
// No specified process type means this is the Browser process.
if (process_type.empty()) {
scan_enabled = scan_enabled ||
EnablePCScanForMallocPartitionsInBrowserProcessIfNeeded();
}
if (process_type == switches::kRendererProcess) {
scan_enabled = scan_enabled ||
EnablePCScanForMallocPartitionsInRendererProcessIfNeeded();
}
if (scan_enabled) {
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanStackScanning)) {
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
partition_alloc::internal::PCScan::EnableStackScanning();
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
}
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanImmediateFreeing)) {
partition_alloc::internal::PCScan::EnableImmediateFreeing();
}
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanEagerClearing)) {
partition_alloc::internal::PCScan::SetClearType(
partition_alloc::internal::PCScan::ClearType::kEager);
}
SetProcessNameForPCScan(process_type);
}
}
#endif // PA_BUILDFLAG(USE_STARSCAN)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if PA_BUILDFLAG(USE_STARSCAN)
#if !defined(__MUSL__)
// This call causes hanging in pthread_getattr_np() under qemu-user, see
// https://www.openwall.com/lists/musl/2017/06/15/9.
partition_alloc::internal::StackTopRegistry::Get().NotifyThreadCreated(
partition_alloc::internal::GetStackTop());
#endif
// Non-quarantinable partition is dealing with hot V8's zone allocations.
// In case PCScan is enabled in Renderer, enable thread cache on this
// partition. At the same time, thread cache on the main(malloc) partition
// must be disabled, because only one partition can have it on.
if (scan_enabled && process_type == switches::kRendererProcess) {
allocator_shim::NonQuarantinableAllocator::Instance()
.root()
->EnableThreadCacheIfSupported();
} else
#endif // PA_BUILDFLAG(USE_STARSCAN)
{
allocator_shim::internal::PartitionAllocMalloc::Allocator()
->EnableThreadCacheIfSupported();
}
allocator_shim::internal::PartitionAllocMalloc::Allocator()
->EnableThreadCacheIfSupported();
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocLargeEmptySlotSpanRing)) {
allocator_shim::internal::PartitionAllocMalloc::Allocator()
->EnableLargeEmptySlotSpanRing();
}
if (process_type == "" &&
base::FeatureList::IsEnabled(
base::features::kPartitionAllocSchedulerLoopQuarantine)) {
// `ReconfigureAfterTaskRunnerInit()` is called on the UI thread.
const size_t capacity_in_bytes = static_cast<size_t>(
base::features::kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity
.Get());
allocator_shim::internal::PartitionAllocMalloc::Allocator()
->SetSchedulerLoopQuarantineThreadLocalBranchCapacity(
capacity_in_bytes);
}
#if PA_BUILDFLAG( \
ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT)
bool enable_pa_with_advanced_checks =
ShouldEnablePartitionAllocWithAdvancedChecks(process_type);
if (enable_pa_with_advanced_checks) {
allocator_shim::InstallCustomDispatchForPartitionAllocWithAdvancedChecks();
}
#endif // PA_BUILDFLAG(
// ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT)
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if BUILDFLAG(IS_WIN)
@ -1436,20 +1286,6 @@ void PartitionAllocSupport::ReconfigureAfterTaskRunnerInit(
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED) &&
// PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if PA_BUILDFLAG(USE_STARSCAN)
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanMUAwareScheduler)) {
// Assign PCScan a task-based scheduling backend.
static base::NoDestructor<
partition_alloc::internal::MUAwareTaskBasedBackend>
mu_aware_task_based_backend{
partition_alloc::internal::PCScan::scheduler(),
&partition_alloc::internal::PCScan::PerformDelayedScan};
partition_alloc::internal::PCScan::scheduler().SetNewSchedulingBackend(
*mu_aware_task_based_backend.get());
}
#endif // PA_BUILDFLAG(USE_STARSCAN)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
base::allocator::StartMemoryReclaimer(
base::SingleThreadTaskRunner::GetCurrentDefault());
@ -1466,6 +1302,15 @@ void PartitionAllocSupport::ReconfigureAfterTaskRunnerInit(
partition_alloc::PartitionRoot::SetSortActiveSlotSpansEnabled(
base::FeatureList::IsEnabled(
base::features::kPartitionAllocSortActiveSlotSpans));
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
if (ShouldEnableShadowMetadata(process_type)) {
partition_alloc::PartitionRoot::EnableShadowMetadata(
partition_alloc::internal::PoolHandleMask::kRegular |
partition_alloc::internal::PoolHandleMask::kBRP |
partition_alloc::internal::PoolHandleMask::kConfigurable);
}
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
}
void PartitionAllocSupport::OnForegrounded(bool has_main_frame) {
@ -1520,9 +1365,8 @@ void PartitionAllocSupport::OnBackgrounded() {
// TODO(lizeb): Remove once/if the behavior of idle tasks changes.
base::PostDelayedMemoryReductionTask(
base::SingleThreadTaskRunner::GetCurrentDefault(), FROM_HERE,
base::BindOnce([]() {
::partition_alloc::MemoryReclaimer::Instance()->ReclaimAll();
}),
base::BindOnce(
[] { ::partition_alloc::MemoryReclaimer::Instance()->ReclaimAll(); }),
base::Seconds(10));
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)

View file

@ -14,16 +14,12 @@
#include "base/synchronization/lock.h"
#include "base/task/sequenced_task_runner.h"
#include "base/thread_annotations.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/thread_cache.h"
namespace base::allocator {
#if PA_BUILDFLAG(USE_STARSCAN)
BASE_EXPORT void RegisterPCScanStatsReporter();
#endif
// Starts a periodic timer on the current thread to purge all thread caches.
BASE_EXPORT void StartThreadCachePeriodicPurge();
@ -41,12 +37,21 @@ BASE_EXPORT std::map<std::string, std::string> ProposeSyntheticFinchTrials();
BASE_EXPORT void InstallDanglingRawPtrChecks();
BASE_EXPORT void InstallUnretainedDanglingRawPtrChecks();
// Once called, makes `free()` do nothing. This is done to reduce
// shutdown hangs on CrOS.
// Does nothing if Dangling Pointer Detector (`docs/dangling_ptr.md`)
// is not active.
// Does nothing if allocator shim support is not built.
BASE_EXPORT void MakeFreeNoOp();
// Allows to re-configure PartitionAlloc at run-time.
class BASE_EXPORT PartitionAllocSupport {
public:
struct BrpConfiguration {
bool enable_brp = false;
bool process_affected_by_brp_flag = false;
// TODO(https://crbug.com/371135823): Remove after the investigation.
size_t extra_extras_size = 0;
};
// Reconfigure* functions re-configure PartitionAlloc. It is impossible to
@ -102,6 +107,11 @@ class BASE_EXPORT PartitionAllocSupport {
// For calling from within third_party/blink/.
static bool ShouldEnableMemoryTaggingInRendererProcess();
// Returns true if PA advanced checks should be enabled if available for the
// given process type. May be called multiple times per process.
static bool ShouldEnablePartitionAllocWithAdvancedChecks(
const std::string& process_type);
private:
PartitionAllocSupport();

View file

@ -0,0 +1,7 @@
---
Checks: 'google-build-namespaces,
readability-redundant-smartptr-get,
readability-static-accessed-through-instance'
InheritParentConfig: true
HeaderFilterRegex: 'partition_alloc/*'
...

View file

@ -11,8 +11,14 @@ group("buildflags") {
public_deps = [ "src/partition_alloc:buildflags" ]
}
if (is_clang_or_gcc) {
if (use_partition_alloc && is_clang_or_gcc) {
group("partition_alloc") {
public_deps = [ "src/partition_alloc:partition_alloc" ]
}
}
if (use_allocator_shim) {
group("allocator_shim") {
public_deps = [ "src/partition_alloc:allocator_shim" ]
}
}

View file

@ -9,35 +9,48 @@ noparent = True
# `partition_alloc` can depend only on itself, via its `include_dirs`.
include_rules = [ "+partition_alloc" ]
# TODO(crbug.com/40158212): Depending on what is tested, split the tests in
# between chromium and partition_alloc. Remove those exceptions:
specific_include_rules = {
".*_(perf|unit)test\.cc$": [
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
"+base/allocator/dispatcher/dispatcher.h",
"+base/debug/allocation_trace.h",
"+base/debug/debugging_buildflags.h",
"+base/debug/proc_maps_linux.h",
"+base/system/sys_info.h",
"+base/test/gtest_util.h",
"+base/timer/lap_timer.h",
"+base/win/windows_version.h",
# Dependencies on //testing:
".*_(perf|unit)?test.*\.(h|cc)": [
"+testing/gmock/include/gmock/gmock.h",
"+testing/gtest/include/gtest/gtest.h",
"+testing/perf/perf_result_reporter.h",
],
"extended_api\.cc$": [
"gtest_util.h": [
"+testing/gtest/include/gtest/gtest.h",
],
# Dependencies on //base:
"extended_api\.cc": [
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
],
"raw_(ptr|ref)_unittest\.cc$": [
"+base",
"+third_party/abseil-cpp/absl/types/optional.h",
"+third_party/abseil-cpp/absl/types/variant.h",
"partition_alloc_perftest\.cc": [
"+base/allocator/dispatcher/dispatcher.h",
"+base/debug/allocation_trace.h",
"+base/debug/debugging_buildflags.h",
"+base/timer/lap_timer.h",
],
"raw_ptr_test_support\.h$": [
"+testing/gmock/include/gmock/gmock.h",
"+third_party/abseil-cpp/absl/types/optional.h",
"partition_lock_perftest\.cc": [
"+base/timer/lap_timer.h",
],
"use_death_tests\.h$": [
"+testing/gtest/include/gtest/gtest.h",
"raw_ptr_unittest\.cc": [
"+base/allocator/partition_alloc_features.h",
"+base/allocator/partition_alloc_support.h",
"+base/cpu.h",
"+base/debug/asan_service.h",
"+base/metrics/histogram_base.h",
"+base/test/bind.h",
"+base/test/gtest_util.h",
"+base/test/memory/dangling_ptr_instrumentation.h",
"+base/test/scoped_feature_list.h",
"+base/types/to_address.h",
],
"raw_ref_unittest\.cc": [
"+base/debug/asan_service.h",
"+base/memory/raw_ptr_asan_service.h",
"+base/test/gtest_util.h",
],
}

View file

@ -1,4 +1,3 @@
bartekn@chromium.org
haraken@chromium.org
keishi@chromium.org
lizeb@chromium.org

View file

@ -13,6 +13,12 @@ PRESUBMIT_VERSION = '2.0.0'
# chromium repository. PRESUBMIT.py is executed from chromium.
_PARTITION_ALLOC_BASE_PATH = 'base/allocator/partition_allocator/src/'
# Pattern matching C/C++ source files, for use in allowlist args.
_SOURCE_FILE_PATTERN = r'.*\.(h|hpp|c|cc|cpp)$'
# Similar pattern, matching GN files.
_BUILD_FILE_PATTERN = r'.*\.(gn|gni)$'
# This is adapted from Chromium's PRESUBMIT.py. The differences are:
# - Base path: It is relative to the partition_alloc's source directory instead
# of chromium.
@ -99,25 +105,145 @@ def CheckForIncludeGuards(input_api, output_api):
# overrides the default build settings and forward the dependencies to
# partition_alloc.
def CheckNoExternalImportInGn(input_api, output_api):
def gn_files(file):
return file.LocalPath().endswith('.gn') or \
file.LocalPath().endswith('.gni')
# Match and capture <path> from import("<path>").
import_re = input_api.re.compile(r'^ *import\("([^"]+)"\)')
sources = lambda affected_file: input_api.FilterSourceFile(
affected_file,
files_to_skip=[],
files_to_check=[_BUILD_FILE_PATTERN])
errors = []
for f in input_api.AffectedSourceFiles(gn_files):
for line_number, line in enumerate(input_api.ReadFile(f).splitlines()):
for f in input_api.AffectedSourceFiles(sources):
for line_number, line in f.ChangedContents():
match = import_re.search(line)
if not match:
continue
import_path = match.group(1)
if import_path.startswith('//build_overrides/'):
continue;
continue
if not import_path.startswith('//'):
continue;
errors.append(output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallow external import: %s' %
(f.LocalPath(), line_number + 1, import_path)))
return errors;
# partition_alloc still supports C++17, because Skia still uses C++17.
def CheckCpp17CompatibleHeaders(input_api, output_api):
CPP_20_HEADERS = [
"barrier",
"bit",
#"compare", Three-way comparison may be used under appropriate guards.
"format",
"numbers",
"ranges",
"semaphore",
"source_location",
"span",
"stop_token",
"syncstream",
"version",
]
CPP_23_HEADERS = [
"expected",
"flat_map",
"flat_set",
"generator",
"mdspan",
"print",
"spanstream",
"stacktrace",
"stdatomic.h",
"stdfloat",
]
sources = lambda affected_file: input_api.FilterSourceFile(
affected_file,
# compiler_specific.h may use these headers in guarded ways.
files_to_skip=[
r'.*partition_alloc_base/augmentations/compiler_specific\.h'
],
files_to_check=[_SOURCE_FILE_PATTERN])
errors = []
for f in input_api.AffectedSourceFiles(sources):
# for line_number, line in f.ChangedContents():
for line_number, line in enumerate(f.NewContents()):
for header in CPP_20_HEADERS:
if not "#include <%s>" % header in line:
continue
errors.append(
output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallows C++20 headers: <%s>'
% (f.LocalPath(), line_number + 1, header)))
for header in CPP_23_HEADERS:
if not "#include <%s>" % header in line:
continue
errors.append(
output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallows C++23 headers: <%s>'
% (f.LocalPath(), line_number + 1, header)))
return errors
def CheckCpp17CompatibleKeywords(input_api, output_api):
CPP_20_KEYWORDS = [
"concept",
"consteval",
"constinit",
"co_await",
"co_return",
"co_yield",
"requires",
"std::hardware_",
"std::is_constant_evaluated",
"std::bit_cast",
"std::midpoint",
"std::to_array",
]
# Note: C++23 doesn't introduce new keywords.
sources = lambda affected_file: input_api.FilterSourceFile(
affected_file,
# compiler_specific.h may use these keywords in guarded macros.
files_to_skip=[r'.*partition_alloc_base/compiler_specific\.h'],
files_to_check=[_SOURCE_FILE_PATTERN])
errors = []
for f in input_api.AffectedSourceFiles(sources):
for line_number, line in f.ChangedContents():
for keyword in CPP_20_KEYWORDS:
if not keyword in line:
continue
# Skip if part of a comment
if '//' in line and line.index('//') < line.index(keyword):
continue
# Make sure there are word separators around the keyword:
regex = r'\b%s\b' % keyword
if not input_api.re.search(regex, line):
continue
errors.append(
output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallows C++20 keywords: %s'
% (f.LocalPath(), line_number + 1, keyword)))
return errors
# Check `NDEBUG` is not used inside partition_alloc. We prefer to use the
# buildflags `#if PA_BUILDFLAG(IS_DEBUG)` instead.
def CheckNoNDebug(input_api, output_api):
sources = lambda affected_file: input_api.FilterSourceFile(
affected_file,
files_to_skip=[],
files_to_check=[_SOURCE_FILE_PATTERN])
errors = []
for f in input_api.AffectedSourceFiles(sources):
for line_number, line in f.ChangedContents():
if 'NDEBUG' in line:
errors.append(output_api.PresubmitError('%s:%d\nPartitionAlloc'
% (f.LocalPath(), line_number + 1)
+ 'disallows NDEBUG, use PA_BUILDFLAG(IS_DEBUG) instead'))
return errors

View file

@ -119,7 +119,7 @@ partition page that holds metadata (32B struct per partition page).
of each super page).
* In some configurations, PartitionAlloc stores more metadata than can
fit in the one system page at the front. These are the bitmaps for
StarScan and `MTECheckedPtr<T>`, and they are relegated to the head of
`MTECheckedPtr<T>`, and they are relegated to the head of
what would otherwise be usable space for slot spans. One, both, or
none of these bitmaps may be present, depending on build
configuration, runtime configuration, and type of allocation.

View file

@ -1,9 +0,0 @@
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file will be used to check out PartitionAlloc and to build it as
# standalone library. In this case, PartitionAlloc needs to define
# build_with_chromium. If building PartitionAlloc as a part of chromium,
# chromium will provide build_with_chromium=true.
build_with_chromium = false

View file

@ -2,7 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build_overrides/build.gni")
# By definition, PartitionAlloc standalone builds outside of chromium.
build_with_chromium = false
# This is the default build configuration when building PartitionAlloc
# as a standalone library.
@ -14,9 +15,13 @@ use_allocator_shim_default = false
enable_backup_ref_ptr_support_default = false
enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false
assert_cpp20_default = true
enable_ios_corruption_hardening_default = false
# This is the default build configuration for pointers/raw_ptr*.
raw_ptr_zero_on_construct_default = true
raw_ptr_zero_on_move_default = true
raw_ptr_zero_on_destruct_default = false
# PartitionAlloc needs to support cpp17 for standalone builds, as long as Skia
# supports it.
assert_cpp20_default = false

View file

@ -169,7 +169,7 @@ tracking a non-contiguous set of allocations using a bitmap.
The usable area of a super page in which slot spans
reside. While generally this means "everything between the first
and last guard partition pages in a super page," the presence of
other metadata (e.g. StarScan bitmaps) can bump the starting offset
other metadata can bump the starting offset
forward. While this term is entrenched in the code, the team
considers it suboptimal and is actively looking for a replacement.

View file

@ -8,7 +8,6 @@
# - skia: //gn/BUILDCONFIG.gn
# - chromium: //build/config/BUILDCONFIG.gn
is_partition_alloc_standalone = true
build_with_chromium = false
is_asan = false
@ -53,7 +52,6 @@ is_nacl = false
is_win = current_os == "win" || current_os == "winuwp"
is_cast_android = false
is_castos = false
is_chromeos_ash = false
is_cronet_build = false
enable_expensive_dchecks = false
dcheck_is_configurable = false

View file

@ -13,7 +13,7 @@ config("default") {
"-fvisibility=hidden",
]
cflags_cc = [
"-std=c++20",
"-std=c++17",
"-fvisibility-inlines-hidden",
]
cflags_objcc = cflags_cc

View file

@ -4,6 +4,56 @@
import("//build_overrides/partition_alloc.gni")
# -----------------------------------------------------------------------------
# Note on the use of `xxx_default` variable in partition_alloc.
#
# GN provides default_args() instruction. It is meant to be used by embedders,
# to override the default args declared by the embeddees (e.g. partition_alloc).
# This is the intended way to use GN. It properly interacts with the args.gn
# user's file.
#
# Unfortunately, Chrome and others embedders aren't using it. Instead, they
# expect embeddees to import global '.gni' file from the embedder, e.g.
# `//build_overrides/partition_alloc.gni`. This file sets some `xxx_default`
# variable that will be transferred to the declared args. For instance
# a library would use:
# ```
# import("//build_overrides/library.gni")
# declare_args() {
# xxx = xxx_default
# }
# ```
#
# We don't really want to break embedders when introducing new args. Ideally,
# We would have liked to have defaults for default variables. That would be
# a recursive problem. To resolve it, we sometimes use the `defined(...)`
# instruction to check if the embedder has defined the `xxx_default` variable or
# not.
#
# In general, we should aim to support the embedders that are using GN normally,
# and avoid requiring them to define `xxx_default` in the `//build_overrides`
# -----------------------------------------------------------------------------
# Some embedders uses `is_debug`, it can be used to set the default value of
# `partition_alloc_is_debug_default`.
if (!defined(partition_alloc_is_debug_default)) {
if (defined(is_debug)) {
partition_alloc_is_debug_default = is_debug
} else {
partition_alloc_is_debug_default = false
}
}
# Some embedders uses `dcheck_always_on`, it can be used to set the default
# value of `partition_alloc_dcheck_always_on_default`.
if (!defined(partition_alloc_dcheck_always_on_default)) {
if (defined(dcheck_always_on)) {
partition_alloc_dcheck_always_on_default = dcheck_always_on
} else {
partition_alloc_dcheck_always_on_default = false
}
}
# PartitionAlloc have limited support for MSVC's cl.exe compiler. It can only
# access the generate "buildflags" and the "raw_ptr" definitions implemented
# with RawPtrNoOpImpl. Everything else is considered not supported.
@ -28,8 +78,16 @@ if (is_nacl) {
assert(false, "Unknown CPU: $current_cpu")
}
# Increases the size of the empty slot span ring.
use_large_empty_slot_span_ring = is_mac
# Makes the number of empty slot spans that can remain committed larger in
# foreground mode compared to background mode
# (see `PartitionRoot::AdjustFor(Background|Foreground)`).
#
# Foreground/background modes are used by default on macOS and Windows so this
# must be true on these platforms. It's also true on other platforms to allow
# experiments.
#
# TODO(crbug.com/329199197): Clean this up when experiments are complete.
use_large_empty_slot_span_ring = true
# Disables for Android ARM64 because it actually requires API 31+.
# See partition_alloc/tagging.cc:
@ -40,6 +98,12 @@ has_memory_tagging =
current_cpu == "arm64" && is_clang && !is_asan && is_linux && current_os != "openwrt"
declare_args() {
# Debug configuration.
partition_alloc_is_debug = partition_alloc_is_debug_default
# Enable PA_DCHECKs in PartitionAlloc in release mode.
partition_alloc_dcheck_always_on = partition_alloc_dcheck_always_on_default
# Causes all the allocations to be routed via allocator_shim.cc. Usually,
# the allocator shim will, in turn, route them to PartitionAlloc, but
# other allocators are also supported by the allocator shim.
@ -83,26 +147,57 @@ declare_args() {
use_partition_alloc_as_malloc_default
}
assert(!use_allocator_shim || (is_android || is_apple || is_chromeos ||
is_fuchsia || is_linux || is_win),
"The allocator shim does not (yet) support the platform.")
declare_args() {
# Whether PartitionAlloc dispatch can be replaced with another dispatch with
# some more safety checks at runtime or not. When true, the allocator shim
# provides an extended API to swap PartitionAlloc.
enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support =
use_partition_alloc_as_malloc
}
declare_args() {
# This is a flag for binary experiment on iOS. When BRP for iOS is enabled,
# we see some un-actionable `DoubleFreeOrCorruptionDetected` crashes.
# This flag enables some extra `CHECK`s to get actionable crash reports.
# TODO(crbug.com/371135823): Remove upon completion of investigation.
enable_ios_corruption_hardening = use_partition_alloc_as_malloc && is_ios &&
enable_ios_corruption_hardening_default
}
assert(
!enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support || use_partition_alloc_as_malloc,
"PartitionAlloc with advanced checks requires PartitionAlloc itself.")
assert(!use_allocator_shim || !is_nacl,
"The allocator shim supports every platform, except nacl")
if (use_allocator_shim && is_win) {
# It's hard to override CRT's malloc family in every case in the component
# build, and it's very easy to override it partially and to be inconsistent
# among allocations and deallocations. Then, we'll crash when PA deallocates
# a memory region allocated by the CRT's malloc or vice versa.
assert(!is_component_build,
"The allocator shim doesn't work for the component build on Windows.")
# Since PartitionAlloc depends on libc++, it is difficult to link libc++.dll
# with PartitionAlloc to replace its allocator with PartitionAlloc.
# If using libcxx_is_shared=true,
# a. since inline methods or inline functions defined in some libc++ headers,
# e.g. vector, use new, malloc(), and so on, the memory allocation will
# be done inside a client code.
# b. on the other hand, libc++.dll deallocates the memory allocated by the
# inline methods or inline functions. It will not be run inside the client
# code.
# So a.'s allocation is done by PartitionAlloc, but b.'s deallocation is
# done by system allocator. This will cause heap check failure (WinHeap
# doesn't know PartitionAlloc) and crash.
# If libcxx_is_shared=false, libc++ is a static library. All libc++ code
# will be run inside the client. The above issue will disappear.
assert(
!is_component_build || (!libcxx_is_shared && !partition_alloc_is_debug),
"The allocator shim for the Windows component build needs !libcxx_is_shared && !partition_alloc_is_debug.")
}
declare_args() {
use_freeslot_bitmap = false
# Puts the regular and BRP pools right next to each other, so that we can
# check "belongs to one of the two pools" with a single bitmask operation.
glue_core_pools = false
# Introduces pointer compression support in PA. These are 4-byte
# pointers that can point within the core pools (regular and BRP).
#
@ -124,6 +219,23 @@ declare_args() {
# through malloc. Useful for using with tools that intercept malloc, e.g.
# heaptrack.
forward_through_malloc = false
# Enable reentrancy checks at `partition_alloc::internal::Lock`.
# TODO(crbug.com/371135823): Remove upon completion of investigation.
enable_partition_lock_reentrancy_check = enable_ios_corruption_hardening
# This will write a fixed cookie pattern at the end of each allocation, and
# later verify the pattern remain unchanged to ensure there is no OOB write.
# It comes with performance and memory cost, hence enabled only in debug.
use_partition_cookie =
partition_alloc_is_debug || partition_alloc_dcheck_always_on ||
enable_ios_corruption_hardening
# This will change partition cookie size to 4B or 8B, whichever equivalent to
# size of InSlotMetadata. This option is useful for InSlotMetadata corruption
# investigation.
# TODO(crbug.com/371135823): Remove upon completion of investigation.
smaller_partition_cookie = enable_ios_corruption_hardening
}
declare_args() {
@ -193,12 +305,21 @@ declare_args() {
# Enable the feature flag required to activate backup ref pointers. That is to
# say `PartitionAllocBackupRefPtr`.
#
# This is meant to be used primarily on bots. It is much easier to override
# the feature flags using a binary flag instead of updating multiple bots's
# scripts to pass command line arguments.
# This is meant to be modified primarily on bots. It is much easier to
# override the feature flags using a binary flag instead of updating multiple
# bots's scripts to pass command line arguments.
#
# TODO(328104161): Remove this flag.
enable_backup_ref_ptr_feature_flag = false
enable_backup_ref_ptr_feature_flag =
enable_backup_ref_ptr_support && use_raw_ptr_backup_ref_impl &&
# Platforms where BackupRefPtr hasn't shipped yet:
!is_castos && !is_ios
# While keeping BRP support, override a feature flag to make it disabled
# state. This will overwrite `enable_backup_ref_ptr_feature_flag`.
# TODO(https://crbug.com/372183586): Fix the bug and remove this arg.
force_disable_backup_ref_ptr_feature =
enable_backup_ref_ptr_support && enable_ios_corruption_hardening
# Build support for Dangling Ptr Detection (DPD) via BackupRefPtr (BRP),
# making the raw_ptr<T> implementation to RawPtrBackupRefImpl if active.
@ -216,9 +337,9 @@ declare_args() {
# Enable the feature flag required to check for dangling pointers. That is to
# say `PartitionAllocDanglingPtr`.
#
# This is meant to be used primarily on bots. It is much easier to override
# the feature flags using a binary flag instead of updating multiple bots's
# scripts to pass command line arguments.
# This is meant to be modified primarily on bots. It is much easier to
# override the feature flags using a binary flag instead of updating multiple
# bots's scripts to pass command line arguments.
#
# TODO(328104161): Remove this flag.
enable_dangling_raw_ptr_feature_flag = enable_dangling_raw_ptr_checks
@ -232,7 +353,7 @@ declare_args() {
declare_args() {
# Shadow metadata is still under development and only supports Linux
# for now.
enable_shadow_metadata = false
enable_shadow_metadata = is_linux && has_64_bit_pointers
}
declare_args() {
@ -245,9 +366,6 @@ declare_args() {
use_full_mte = false
}
# *Scan is currently only used by Chromium, and supports only 64-bit.
use_starscan = build_with_chromium && has_64_bit_pointers
stack_scan_supported =
current_cpu == "x64" || current_cpu == "x86" || current_cpu == "arm" ||
current_cpu == "arm64" || current_cpu == "riscv64"
@ -257,11 +375,23 @@ stack_scan_supported =
# PartitionAlloc at all. If `use_partition_alloc` is false, we jam all
# related args to `false`.
#
# We also disable PA-Everywhere and PA-based features in two types of
# toolchains:
# - Toolchains that disable PA-Everywhere explicitly.
# - The rust host build tools toochain, which builds DLLs to dlopen into the
# compiler for proc macros. We would want any allocations to use the same
# paths as the compiler.
#
# Do not clear the following, as they can function outside of PartitionAlloc
# - has_64_bit_pointers
# - has_memory_tagging
if (!use_partition_alloc) {
if (!use_partition_alloc ||
(defined(toolchain_allows_use_partition_alloc_as_malloc) &&
!toolchain_allows_use_partition_alloc_as_malloc) ||
(defined(toolchain_for_rust_host_build_tools) &&
toolchain_for_rust_host_build_tools)) {
use_partition_alloc_as_malloc = false
glue_core_pools = false
enable_backup_ref_ptr_support = false
use_raw_ptr_backup_ref_impl = false
use_asan_backup_ref_ptr = false
@ -272,8 +402,8 @@ if (!use_partition_alloc) {
enable_dangling_raw_ptr_feature_flag = false
enable_pointer_subtraction_check = false
backup_ref_ptr_poison_oob_ptr = false
backup_ref_ptr_extra_oob_checks = false
enable_backup_ref_ptr_instance_tracer = false
use_starscan = false
use_full_mte = false
}
@ -409,17 +539,3 @@ declare_args() {
# Embedders may opt-out of using C++ 20 build.
assert_cpp20 = assert_cpp20_default
}
declare_args() {
# Enables compilation of the freelist dispatcher, which we'll use to
# carry out runtime evaluation of PartitionAlloc's two freelist
# implementations: the existing encoded-next freelist and the new
# pool offset freelist. When false, the latter is not built.
#
# This is being exposed as a GN arg because of an undiagnosed crashy
# interaction with Mac PGO builders: crbug.com/338094768#comment20
use_freelist_dispatcher = has_64_bit_pointers && false
}
assert(has_64_bit_pointers || !use_freelist_dispatcher,
"freelist dispatcher can't be used without 64-bit pointers")

View file

@ -15,7 +15,34 @@ if (!defined(partition_alloc_remove_configs)) {
partition_alloc_remove_configs = []
}
# Add partition_alloc.gni and import it for partition_alloc configs.
# Enables compilation of the freelist dispatcher, which we'll use to
# carry out runtime evaluation of PartitionAlloc's two freelist
# implementations: the existing encoded-next freelist and the new
# pool offset freelist. When false, the latter is not built.
use_freelist_dispatcher = has_64_bit_pointers
assert(has_64_bit_pointers || !use_freelist_dispatcher,
"freelist dispatcher can't be used without 64-bit pointers")
record_alloc_info = false
# GWP-ASan is tied to BRP's enablement.
enable_gwp_asan_support = enable_backup_ref_ptr_support
# Pointer compression requires 64-bit pointers.
enable_pointer_compression =
enable_pointer_compression_support && has_64_bit_pointers
# Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`, but avails it
# as a buildflag.
partition_alloc_dchecks_are_on =
partition_alloc_is_debug || partition_alloc_dcheck_always_on
# Building PartitionAlloc for Windows component build.
# Currently use build_with_chromium not to affect any third_party code,
# but if any third_party code wants to use, remove build_with_chromium.
use_partition_alloc_as_malloc_on_win_component_build =
build_with_chromium && is_win && is_component_build
# TODO(crbug.com/40276913): Split PartitionAlloc into a public and
# private parts. The public config would include add the "./include" dir and
@ -54,6 +81,7 @@ config("dependants_extra_warnings") {
"-Wduplicate-enum",
"-Wextra-semi",
"-Wextra-semi-stmt",
"-Widiomatic-parentheses",
"-Wimplicit-fallthrough",
"-Winconsistent-missing-destructor-override",
"-Winvalid-offsetof",
@ -90,11 +118,105 @@ config("wexit_time_destructors") {
}
}
source_set("buildflag_macro") {
sources = [ "buildflag.h" ]
public_configs = [ ":public_includes" ]
}
# When developers are repeatedly growing a buffer with `realloc`, they are
# expected to request a new size that is larger than the current size by
# some growth factor. This growth factor allows to amortize the cost of
# memcpy. Unfortunately, some nVidia drivers have a bug where they repeatedly
# increase the buffer by 4144 byte only.
#
# In particular, most Skia Linux bots are using the affected nVidia driver. So
# this flag is used as a workaround for Skia standalone, not in production.
#
# External link:
# https://forums.developer.nvidia.com/t/550-54-14-very-bad-performance-due-to-bunch-of-reallocations-during-glcore-initialization/287027
#
# Internal discussion at @chrome-memory-safety:
# https://groups.google.com/a/google.com/d/msgid/chrome-memory-safety/CAAzos5HrexY2njz2YzWrffTq1xEfkx15GVpSvHUyQED6wBSXvA%40mail.gmail.com?utm_medium=email&utm_source=footer
declare_args() {
partition_alloc_realloc_growth_factor_mitigation = false
}
pa_buildflag_header("buildflags") {
header = "buildflags.h"
flags = [
"ASSERT_CPP_20=$assert_cpp20",
"BACKUP_REF_PTR_EXTRA_OOB_CHECKS=$backup_ref_ptr_extra_oob_checks",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
"DCHECKS_ARE_ON=$partition_alloc_dchecks_are_on",
"DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT=$enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support",
"ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
"ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER=$enable_backup_ref_ptr_instance_tracer",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag",
"ENABLE_GWP_ASAN_SUPPORT=$enable_gwp_asan_support",
"ENABLE_PARTITION_LOCK_REENTRANCY_CHECK=$enable_partition_lock_reentrancy_check",
"ENABLE_PKEYS=$enable_pkeys",
"ENABLE_POINTER_ARITHMETIC_TRAIT_CHECK=$enable_pointer_arithmetic_trait_check",
"ENABLE_POINTER_COMPRESSION=$enable_pointer_compression",
"ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
"ENABLE_THREAD_ISOLATION=$enable_pkeys",
"EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"FORCE_DISABLE_BACKUP_REF_PTR_FEATURE=$force_disable_backup_ref_ptr_feature",
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
"FORWARD_THROUGH_MALLOC=$forward_through_malloc",
"HAS_64_BIT_POINTERS=$has_64_bit_pointers",
"HAS_MEMORY_TAGGING=$has_memory_tagging",
"IS_ANDROID=$is_android",
"IS_CASTOS=$is_castos",
"IS_CAST_ANDROID=$is_cast_android",
"IS_CHROMEOS=$is_chromeos",
"IS_DEBUG=$partition_alloc_is_debug",
"RAW_PTR_ZERO_ON_CONSTRUCT=$raw_ptr_zero_on_construct",
"RAW_PTR_ZERO_ON_DESTRUCT=$raw_ptr_zero_on_destruct",
"RAW_PTR_ZERO_ON_MOVE=$raw_ptr_zero_on_move",
"REALLOC_GROWTH_FACTOR_MITIGATION=$partition_alloc_realloc_growth_factor_mitigation",
"RECORD_ALLOC_INFO=$record_alloc_info",
"SMALLER_PARTITION_COOKIE=$smaller_partition_cookie",
"STACK_SCAN_SUPPORTED=$stack_scan_supported",
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
"USE_FREELIST_DISPATCHER=$use_freelist_dispatcher",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
"USE_FULL_MTE=$use_full_mte",
"USE_LARGE_EMPTY_SLOT_SPAN_RING=$use_large_empty_slot_span_ring",
"USE_PARTITION_ALLOC=$use_partition_alloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
"USE_PARTITION_COOKIE=$use_partition_cookie",
"USE_RAW_PTR_ASAN_UNOWNED_IMPL=$use_raw_ptr_asan_unowned_impl",
"USE_RAW_PTR_BACKUP_REF_IMPL=$use_raw_ptr_backup_ref_impl",
"USE_RAW_PTR_HOOKABLE_IMPL=$use_raw_ptr_hookable_impl",
]
}
# TODO(crbug.com/41481467): Remove this alias.
# Temporary alias, the time to update partition_alloc dependants.
# Currently needed by pdfium and dawn.
source_set("partition_alloc_buildflags") {
public = [ "partition_alloc_buildflags.h" ]
public_deps = [ ":buildflags" ]
}
# Provides platform and architecture detections from the compiler defines.
source_set("build_config") {
sources = [
"build_config.h",
"buildflag.h",
]
public_deps = [
":buildflag_macro", # Provides 'PA_BUILDFLAG()' macro.
":buildflags", # Provides `IS_CHROMEOS` definition.
]
public_configs = [ ":public_includes" ]
}
component("raw_ptr") {
@ -132,11 +254,13 @@ component("raw_ptr") {
sources += [ "pointers/raw_ptr_noop_impl.h" ]
sources += [ "pointers/empty.cc" ]
}
public_deps = [ ":build_config" ]
public_deps = [
":build_config",
":buildflags",
]
if (use_partition_alloc) {
public_deps += [ ":partition_alloc" ]
}
deps = [ ":buildflags" ]
# See also: `partition_alloc_base/component_export.h`
defines = [ "IS_RAW_PTR_IMPL" ]
@ -146,135 +270,6 @@ component("raw_ptr") {
configs += [ ":dependants_extra_warnings" ]
}
pa_buildflag_header("partition_alloc_buildflags") {
header = "partition_alloc_buildflags.h"
_record_alloc_info = false
# GWP-ASan is tied to BRP's enablement.
_enable_gwp_asan_support = enable_backup_ref_ptr_support
# Pools are a logical concept when address space is 32-bit.
_glue_core_pools = glue_core_pools && has_64_bit_pointers
# Pointer compression requires 64-bit pointers.
_enable_pointer_compression =
enable_pointer_compression_support && has_64_bit_pointers
# TODO(crbug.com/40158212): Need to refactor the following buildflags.
# The buildflags (except RECORD_ALLOC_INFO) are used by both chrome and
# PartitionAlloc. For PartitionAlloc,
# gen/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h
# defines and PartitionAlloc includes the header file. For chrome,
# gen/base/allocator/buildflags.h defines and chrome includes.
flags = [
"HAS_64_BIT_POINTERS=$has_64_bit_pointers",
"HAS_MEMORY_TAGGING=$has_memory_tagging",
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
"USE_LARGE_EMPTY_SLOT_SPAN_RING=$use_large_empty_slot_span_ring",
"USE_PARTITION_ALLOC=$use_partition_alloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
"ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
"ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER=$enable_backup_ref_ptr_instance_tracer",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag",
"ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
"ENABLE_POINTER_ARITHMETIC_TRAIT_CHECK=$enable_pointer_arithmetic_trait_check",
"BACKUP_REF_PTR_EXTRA_OOB_CHECKS=$backup_ref_ptr_extra_oob_checks",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"USE_RAW_PTR_BACKUP_REF_IMPL=$use_raw_ptr_backup_ref_impl",
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
"USE_RAW_PTR_ASAN_UNOWNED_IMPL=$use_raw_ptr_asan_unowned_impl",
"USE_RAW_PTR_HOOKABLE_IMPL=$use_raw_ptr_hookable_impl",
"ENABLE_GWP_ASAN_SUPPORT=$_enable_gwp_asan_support",
# TODO(bartekn): Remove once PDFium switches to
# USE_RAW_PTR_ASAN_UNOWNED_IMPL.
"USE_ASAN_UNOWNED_PTR=$use_raw_ptr_asan_unowned_impl",
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
"USE_FULL_MTE=$use_full_mte",
"RECORD_ALLOC_INFO=$_record_alloc_info",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
"GLUE_CORE_POOLS=$_glue_core_pools",
"ENABLE_POINTER_COMPRESSION=$_enable_pointer_compression",
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
"USE_FREELIST_DISPATCHER=$use_freelist_dispatcher",
"USE_STARSCAN=$use_starscan",
"STACK_SCAN_SUPPORTED=$stack_scan_supported",
"ENABLE_PKEYS=$enable_pkeys",
"ENABLE_THREAD_ISOLATION=$enable_pkeys",
"FORWARD_THROUGH_MALLOC=$forward_through_malloc",
"ASSERT_CPP_20=$assert_cpp20",
"IS_DEBUG=$is_debug",
]
}
pa_buildflag_header("raw_ptr_buildflags") {
header = "raw_ptr_buildflags.h"
flags = [
"RAW_PTR_ZERO_ON_CONSTRUCT=$raw_ptr_zero_on_construct",
"RAW_PTR_ZERO_ON_MOVE=$raw_ptr_zero_on_move",
"RAW_PTR_ZERO_ON_DESTRUCT=$raw_ptr_zero_on_destruct",
]
}
pa_buildflag_header("chromecast_buildflags") {
header = "chromecast_buildflags.h"
# TODO(https://crbug.com/41481467): Remove the "PA_" prefix, because it
# will already be part of the define and the `PA_BUILDFLAG` macro.
flags = [
"PA_IS_CAST_ANDROID=$is_cast_android",
"PA_IS_CASTOS=$is_castos",
]
}
pa_buildflag_header("chromeos_buildflags") {
header = "chromeos_buildflags.h"
flags = [ "IS_CHROMEOS=$is_chromeos" ]
}
pa_buildflag_header("debugging_buildflags") {
header = "debugging_buildflags.h"
header_dir = rebase_path(".", "//") + "/partition_alloc_base/debug"
# Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`,
# but avails it as a buildflag.
_dcheck_is_on = is_debug || dcheck_always_on
# TODO(https://crbug.com/41481467): Remove the "PA_" prefix, because it
# will already be part of the define and the `PA_BUILDFLAG` macro.
flags = [
"PA_DCHECK_IS_ON=$_dcheck_is_on",
"PA_EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"PA_DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"PA_CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
]
}
group("buildflags") {
public_deps = [
":chromecast_buildflags",
":chromeos_buildflags",
":debugging_buildflags",
":partition_alloc_buildflags",
":raw_ptr_buildflags",
]
public_configs = [ ":public_includes" ]
}
if (is_clang_or_gcc) {
config("partition_alloc_implementation") {
# See also: `partition_alloc_base/component_export.h`
@ -355,7 +350,7 @@ if (is_clang_or_gcc) {
}
}
if (enable_pkeys && is_debug) {
if (enable_pkeys && partition_alloc_is_debug) {
config("no_stack_protector") {
cflags = [ "-fno-stack-protector" ]
}
@ -366,12 +361,40 @@ if (is_clang_or_gcc) {
":allocator_base",
":allocator_core",
":allocator_shim",
":buildflags",
]
}
if (is_win && is_component_build) {
group("win_component_build_adapter") {
# Currently guard this target by using build_with_chromium to avoid
# any issues on third_party build. But if any third_party code wants to
# use allocator_shim for its own component build, we will remove this
# guard.
if (build_with_chromium) {
if (use_allocator_shim) {
public_deps = [
":allocator_base",
":allocator_shim",
]
}
}
# If not with chromium, currently do nothing.
}
}
component("allocator_core") {
visibility = [ ":*" ]
if (use_partition_alloc_as_malloc_on_win_component_build) {
# Since common_deps defined in //build/config/BUILD.gn depends on
# PartitionAlloc for PartitionAlloc-Everywhere, we need no_default_deps
# here, because default deps includes common_deps dependency.
# Without no_defaults_deps=true, we will see cyclic deps:
# common_deps=>PartitionAlloc=>common_deps
no_default_deps = true
}
sources = [
"aarch64_support.h",
"address_pool_manager.cc",
@ -424,6 +447,7 @@ if (is_clang_or_gcc) {
"partition_bucket.cc",
"partition_bucket.h",
"partition_bucket_lookup.h",
"partition_cookie.cc",
"partition_cookie.h",
"partition_dcheck_helper.cc",
"partition_dcheck_helper.h",
@ -438,6 +462,7 @@ if (is_clang_or_gcc) {
"partition_page_constants.h",
"partition_root.cc",
"partition_root.h",
"partition_shared_mutex.h",
"partition_stats.cc",
"partition_stats.h",
"partition_superpage_extent_entry.h",
@ -463,29 +488,6 @@ if (is_clang_or_gcc) {
"yield_processor.h",
]
if (use_starscan) {
sources += [
"starscan/logging.h",
"starscan/pcscan.cc",
"starscan/pcscan.h",
"starscan/pcscan_internal.cc",
"starscan/pcscan_internal.h",
"starscan/pcscan_scheduling.cc",
"starscan/pcscan_scheduling.h",
"starscan/raceful_worklist.h",
"starscan/scan_loop.h",
"starscan/snapshot.cc",
"starscan/snapshot.h",
"starscan/starscan_fwd.h",
"starscan/state_bitmap.h",
"starscan/stats_collector.cc",
"starscan/stats_collector.h",
"starscan/stats_reporter.h",
"starscan/write_protector.cc",
"starscan/write_protector.h",
]
}
defines = []
if (is_win) {
sources += [
@ -534,10 +536,7 @@ if (is_clang_or_gcc) {
public_deps = [
":build_config",
":chromecast_buildflags",
":chromeos_buildflags",
":debugging_buildflags",
":partition_alloc_buildflags",
":buildflags",
]
configs += [
@ -546,11 +545,13 @@ if (is_clang_or_gcc) {
":wexit_time_destructors",
]
deps = [ ":allocator_base" ]
public_configs = []
if (is_android) {
# tagging.cc requires __arm_mte_set_* functions.
deps += [ "//third_party/cpu_features:ndk_compat" ]
if (use_partition_alloc_as_malloc_on_win_component_build) {
# We need to add explicit libc++ dependency here because of
# no_default_deps=true.
deps += [ "//buildtools/third_party/libc++:libc++" ]
}
public_configs = []
if (is_fuchsia) {
deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.kernel:fuchsia.kernel_cpp",
@ -586,13 +587,21 @@ if (is_clang_or_gcc) {
# We want to be able to test pkey mode without access to the default pkey.
# This is incompatible with stack protectors since the TLS won't be pkey-tagged.
if (enable_pkeys && is_debug) {
if (enable_pkeys && partition_alloc_is_debug) {
configs += [ ":no_stack_protector" ]
}
}
component("allocator_base") {
visibility = [ ":*" ]
if (use_partition_alloc_as_malloc_on_win_component_build) {
# Since common_deps defined in //build/config/BUILD.gn depends on
# PartitionAlloc for PartitionAlloc-Everywhere, we need no_default_deps
# here, because default deps includes common_deps dependency.
# Without no_defaults_deps=true, we will see cyclic deps:
# common_deps=>PartitionAlloc=>common_deps
no_default_deps = true
}
sources = [
"partition_alloc_base/atomic_ref_count.h",
@ -611,6 +620,7 @@ if (is_clang_or_gcc) {
"partition_alloc_base/debug/stack_trace.cc",
"partition_alloc_base/debug/stack_trace.h",
"partition_alloc_base/export_template.h",
"partition_alloc_base/files/platform_file.h",
"partition_alloc_base/immediate_crash.h",
"partition_alloc_base/log_message.cc",
"partition_alloc_base/log_message.h",
@ -656,11 +666,13 @@ if (is_clang_or_gcc) {
"partition_alloc_base/time/time.h",
"partition_alloc_base/time/time_override.cc",
"partition_alloc_base/time/time_override.h",
"partition_alloc_base/types/same_as_any.h",
"partition_alloc_base/types/strong_alias.h",
"partition_alloc_base/win/win_handle_types.h",
"partition_alloc_base/win/win_handle_types_list.inc",
"partition_alloc_base/win/windows_types.h",
]
libs = []
if (is_win) {
sources += [
@ -672,7 +684,12 @@ if (is_clang_or_gcc) {
"partition_alloc_base/threading/platform_thread_win.cc",
"partition_alloc_base/time/time_win.cc",
]
} else if (is_posix) {
libs += [
"winmm.lib", # For timeGetTime.
]
}
if (is_posix) {
sources += [
"partition_alloc_base/debug/stack_trace_posix.cc",
"partition_alloc_base/files/file_util.h",
@ -703,7 +720,9 @@ if (is_clang_or_gcc) {
} else {
sources += [ "partition_alloc_base/time/time_now_posix.cc" ]
}
} else if (is_fuchsia) {
}
if (is_fuchsia) {
sources += [
"partition_alloc_base/fuchsia/fuchsia_logging.cc",
"partition_alloc_base/fuchsia/fuchsia_logging.h",
@ -717,6 +736,7 @@ if (is_clang_or_gcc) {
"partition_alloc_base/time/time_fuchsia.cc",
]
}
if (is_android) {
# Only android build requires native_library, and native_library depends
# on file_path. So file_path is added if is_android = true.
@ -729,6 +749,7 @@ if (is_clang_or_gcc) {
"partition_alloc_base/native_library_posix.cc",
]
}
if (is_apple) {
# Apple-specific utilities
sources += [
@ -757,10 +778,7 @@ if (is_clang_or_gcc) {
public_deps = [
":build_config",
":chromecast_buildflags",
":chromeos_buildflags",
":debugging_buildflags",
":partition_alloc_buildflags",
":buildflags",
]
public_configs = [ ":public_includes" ]
configs += [
@ -769,6 +787,11 @@ if (is_clang_or_gcc) {
]
deps = []
if (use_partition_alloc_as_malloc_on_win_component_build) {
# We need to add explicit libc++ dependency here because of
# no_default_deps=true.
deps += [ "//buildtools/third_party/libc++:libc++" ]
}
if (is_fuchsia) {
public_deps += [ "//third_party/fuchsia-sdk/sdk/pkg/fit" ]
}
@ -787,8 +810,6 @@ if (is_clang_or_gcc) {
}
component("allocator_shim") {
visibility = [ ":*" ]
sources = []
deps = []
all_dependent_configs = []
@ -819,14 +840,14 @@ if (is_clang_or_gcc) {
"shim/allocator_shim_dispatch_to_noop_on_free.h",
]
if (use_partition_alloc) {
shim_sources += [
"shim/allocator_shim_default_dispatch_to_partition_alloc.cc",
"shim/nonscannable_allocator.cc",
]
shim_headers += [
"shim/allocator_shim_default_dispatch_to_partition_alloc.h",
"shim/nonscannable_allocator.h",
]
shim_sources +=
[ "shim/allocator_shim_default_dispatch_to_partition_alloc.cc" ]
shim_headers +=
[ "shim/allocator_shim_default_dispatch_to_partition_alloc.h" ]
}
if (enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support) {
shim_sources += [ "shim/allocator_shim_default_dispatch_to_partition_alloc_with_advanced_checks.cc" ]
shim_headers += [ "shim/allocator_shim_default_dispatch_to_partition_alloc_with_advanced_checks.h" ]
}
if (is_android) {
shim_headers += [
@ -929,6 +950,199 @@ if (is_clang_or_gcc) {
]
}
} # if (is_clang_or_gcc)
# TODO(crbug.com/40158212): After making partition_alloc a standalone library,
# move test code here. i.e. test("partition_alloc_tests") { ... } and
# test("partition_alloc_perftests").
# TODO(crbug.com/40158212): Consider supporting building tests outside of
# chromium and having a dedicated 'partition_alloc_unittests' target.
if (build_with_chromium) {
source_set("unittests") {
testonly = true
sources = [ "partition_alloc_base/test/gtest_util.h" ]
if (is_linux || is_chromeos || is_android) {
sources += [
"partition_alloc_base/debug/proc_maps_linux.cc",
"partition_alloc_base/debug/proc_maps_linux.h",
]
}
if (is_android) {
sources += [
"partition_alloc_base/files/file_path_pa_unittest.cc",
"partition_alloc_base/native_library_pa_unittest.cc",
]
}
if (use_partition_alloc) {
sources += [
"address_pool_manager_unittest.cc",
"address_space_randomization_unittest.cc",
"compressed_pointer_unittest.cc",
"freeslot_bitmap_unittest.cc",
"hardening_unittest.cc",
"lightweight_quarantine_unittest.cc",
"memory_reclaimer_unittest.cc",
"page_allocator_unittest.cc",
"partition_alloc_base/bits_pa_unittest.cc",
"partition_alloc_base/component_export_pa_unittest.cc",
"partition_alloc_base/cpu_pa_unittest.cc",
"partition_alloc_base/logging_pa_unittest.cc",
"partition_alloc_base/no_destructor_pa_unittest.cc",
"partition_alloc_base/rand_util_pa_unittest.cc",
"partition_alloc_base/scoped_clear_last_error_pa_unittest.cc",
"partition_alloc_base/strings/cstring_builder_pa_unittest.cc",
"partition_alloc_base/strings/safe_sprintf_pa_unittest.cc",
"partition_alloc_base/strings/string_util_pa_unittest.cc",
"partition_alloc_base/strings/stringprintf_pa_unittest.cc",
"partition_alloc_base/thread_annotations_pa_unittest.cc",
"partition_alloc_unittest.cc",
"partition_lock_unittest.cc",
"reverse_bytes_unittest.cc",
"slot_start_unittest.cc",
"thread_cache_unittest.cc",
"use_death_tests.h",
]
}
if (is_fuchsia) {
sources +=
[ "partition_alloc_base/fuchsia/fuchsia_logging_pa_unittest.cc" ]
}
if (use_allocator_shim) {
sources += [
"shim/allocator_shim_default_dispatch_to_partition_alloc_unittest.cc",
]
if (is_win) {
sources += [ "shim/winheap_stubs_win_unittest.cc" ]
}
if (is_ios) {
sources += [
"shim/allocator_interception_apple_unittest.mm",
"shim/malloc_zone_functions_apple_unittest.cc",
]
}
}
if ((is_android || is_linux) && target_cpu == "arm64") {
cflags = [
"-Xclang",
"-target-feature",
"-Xclang",
"+mte",
]
}
if (enable_pkeys && partition_alloc_is_debug && !is_component_build) {
# This test requires RELRO, which is not enabled in component builds.
# Also, require a debug build, since we only disable stack protectors in
# debug builds in PartitionAlloc (see below why it's needed).
sources += [ "thread_isolation/pkey_unittest.cc" ]
# We want to test the pkey code without access to memory that is not
# pkey-tagged. This will allow us to catch unintended memory accesses
# that could break our security assumptions. The stack protector reads a
# value from the TLS which won't be pkey-tagged, hence disabling it for
# the test.
configs += [ ":no_stack_protector" ]
}
frameworks = []
if (is_mac) {
frameworks += [
"Foundation.framework",
"OpenCL.framework",
]
}
deps = [
":partition_alloc",
":test_support",
"//testing/gmock",
"//testing/gtest",
]
configs -= partition_alloc_remove_configs
configs += partition_alloc_add_configs
}
}
source_set("test_support") {
testonly = true
sources = [
"extended_api.cc",
"extended_api.h",
"partition_alloc_base/threading/platform_thread_for_testing.h",
"partition_alloc_for_testing.h",
"pointers/raw_ptr_counting_impl_for_test.h",
]
if (is_posix) {
sources += [
"partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
]
}
if (is_fuchsia) {
sources += [
"partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc",
"partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
]
}
if (is_win) {
sources +=
[ "partition_alloc_base/threading/platform_thread_win_for_testing.cc" ]
}
if (is_apple) {
sources += [
"partition_alloc_base/threading/platform_thread_apple_for_testing.mm",
]
}
if (is_linux || is_chromeos) {
sources += [
"partition_alloc_base/threading/platform_thread_linux_for_testing.cc",
]
}
if (is_android) {
sources += [
"partition_alloc_base/threading/platform_thread_android_for_testing.cc",
]
}
public_deps = [
":arm_bti_testfunctions",
":buildflags",
":partition_alloc",
":raw_ptr",
]
public_configs = []
if (is_fuchsia) {
public_deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.logger:fuchsia.logger_hlcpp",
"//third_party/fuchsia-sdk/sdk/pkg/fit",
"//third_party/fuchsia-sdk/sdk/pkg/sync",
"//third_party/fuchsia-sdk/sdk/pkg/sys_cpp",
"//third_party/fuchsia-sdk/sdk/pkg/zx",
]
# Needed for users of spinning_mutex.h, which for performance reasons,
# contains inlined calls to `libsync` inside the header file.
# It appends an entry to the "libs" section of the dependent target.
public_configs += [ ":fuchsia_sync_lib" ]
}
configs -= partition_alloc_remove_configs
configs += partition_alloc_add_configs
}
source_set("arm_bti_testfunctions") {
testonly = true
sources = []
if (target_cpu == "arm64" && (is_linux || is_android)) {
sources = [
"arm_bti_test_functions.S",
"arm_bti_test_functions.h",
]
}
}

View file

@ -8,7 +8,7 @@
#include <stdint.h>
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#if defined(__MUSL__)
// Musl does not support ifunc.

View file

@ -11,11 +11,10 @@
#include "partition_alloc/address_space_stats.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/notreached.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/reservation_offset_table.h"
@ -27,7 +26,7 @@
namespace partition_alloc::internal {
AddressPoolManager AddressPoolManager::singleton_;
PA_CONSTINIT AddressPoolManager AddressPoolManager::singleton_;
// static
AddressPoolManager& AddressPoolManager::GetInstance() {
@ -125,7 +124,7 @@ void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
PA_CHECK(!(ptr & kSuperPageOffsetMask));
PA_CHECK(!(length & kSuperPageOffsetMask));
address_begin_ = ptr;
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
address_end_ = ptr + length;
PA_DCHECK(address_begin_ < address_end_);
#endif
@ -204,7 +203,7 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
bit_hint_ = end_bit;
}
uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(address + requested_size <= address_end_);
#endif
return address;
@ -246,7 +245,7 @@ void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
PA_DCHECK(!(free_size & kSuperPageOffsetMask));
PA_DCHECK(address_begin_ <= address);
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(address + free_size <= address_end_);
#endif
@ -557,7 +556,7 @@ void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
// This function just exists to static_assert the layout of the private fields
// in Pool.
// in Pool. It is never called.
void AddressPoolManager::AssertThreadIsolatedLayout() {
constexpr size_t last_pool_offset =
offsetof(AddressPoolManager, pools_) + sizeof(Pool) * (kNumPools - 1);

View file

@ -10,12 +10,11 @@
#include "partition_alloc/address_pool_manager_types.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_address_space.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/thread_annotations.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_lock.h"
@ -114,6 +113,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool GetStats(AddressSpaceStats* stats);
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
// This function just exists to static_assert the layout of the private fields
// in Pool. It is never called.
static void AssertThreadIsolatedLayout();
#endif // PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
@ -162,7 +163,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
size_t total_bits_ = 0;
uintptr_t address_begin_ = 0;
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
uintptr_t address_end_ = 0;
#endif
@ -201,7 +202,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
#endif // PA_BUILDFLAG(HAS_64_BIT_POINTERS)
static PA_CONSTINIT AddressPoolManager singleton_;
PA_CONSTINIT static AddressPoolManager singleton_;
};
} // namespace partition_alloc::internal

View file

@ -4,7 +4,7 @@
#include "partition_alloc/address_pool_manager_bitmap.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_constants.h"
#if !PA_BUILDFLAG(HAS_64_BIT_POINTERS)

View file

@ -11,9 +11,9 @@
#include <limits>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_lock.h"

View file

@ -5,7 +5,7 @@
#include "partition_alloc/address_space_randomization.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/random.h"

View file

@ -178,8 +178,18 @@ AslrMask(uintptr_t bits) {
}
#else // !PA_BUILDFLAG(IS_AIX) && !PA_BUILDFLAG(PA_ARCH_CPU_BIG_ENDIAN)
#if PA_BUILDFLAG(IS_LINUX)
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t ASLRMask() {
return AslrMask(46);
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t ASLROffset() {
return AslrAddress(0);
}
#else
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
return AslrMask(46);
}
@ -187,6 +197,8 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0);
}
#endif
#endif // !PA_BUILDFLAG(IS_AIX) && !PA_BUILDFLAG(PA_ARCH_CPU_BIG_ENDIAN)
#elif PA_BUILDFLAG(PA_ARCH_CPU_S390X)
@ -280,7 +292,7 @@ AslrMask(uintptr_t bits) {
#endif // PA_BUILDFLAG(PA_ARCH_CPU_32_BITS)
// clang-format on
// clang-format on
} // namespace internal

View file

@ -7,8 +7,8 @@
#include <cstddef>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
namespace partition_alloc {

View file

@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "partition_alloc/allocation_guard.h"
#include "partition_alloc/partition_alloc_base/immediate_crash.h"
#include "partition_alloc/partition_alloc_config.h"

View file

@ -16,9 +16,8 @@
// This files contains the following definition:
//
// Operating system:
// IS_IOS / IS_AIX / IS_ANDROID / IS_ASMJS / IS_FREEBSD / IS_FUCHSIA /
// IS_LINUX / IS_MAC / IS_NACL / IS_NETBSD / IS_OPENBSD / IS_QNX /
// IS_SOLARIS / IS_WIN
// IS_IOS / IS_AIX / IS_ASMJS / IS_FREEBSD / IS_FUCHSIA / IS_LINUX / IS_MAC /
// IS_NACL / IS_NETBSD / IS_OPENBSD / IS_QNX / IS_SOLARIS / IS_WIN
//
// Operating system family:
// IS_APPLE / IS_BSD / IS_POSIX
@ -48,7 +47,7 @@
#include "partition_alloc/buildflag.h" // IWYU pragma: export
// Definition of PA_BUILDFLAG(IS_CHROMEOS).
#include "partition_alloc/chromeos_buildflags.h" // IWYU pragma: export
#include "partition_alloc/buildflags.h" // IWYU pragma: export
// Clangd does not detect PA_BUILDFLAG_INTERNAL_* indirect usage, so mark the
// header as "always_keep" to avoid "unused include" warning.
@ -59,8 +58,14 @@
#if defined(__native_client__)
// __native_client__ must be first, so that other IS_ defines are not set.
#define PA_IS_NACL
#elif defined(ANDROID)
#define PA_IS_ANDROID
#elif PA_BUILDFLAG(IS_ANDROID)
// The IS_ANDROID PA_BUILDFLAG macro is defined in buildflags.h.
//
// PartitionAlloc's embedders (Chromium, Dawn, Pdfium, Skia) define different
// macros for Android builds: "ANDROID" or "SK_BUILD_FOR_ANDROID".
//
// To avoid relying on these external definitions, PartitionAlloc uses its own
// dedicated build flag.
#elif defined(__APPLE__)
// Only include TargetConditionals after testing ANDROID as some Android builds
// on the Mac have this header available and it's not needed unless the target
@ -74,7 +79,7 @@
#elif defined(__linux__)
#if !PA_BUILDFLAG(IS_CHROMEOS)
// Do not define PA_IS_LINUX on Chrome OS build.
// The IS_CHROMEOS PA_BUILDFLAG macro is defined in chromeos_buildflags.h.
// The IS_CHROMEOS PA_BUILDFLAG macro is defined in buildflags.h.
#define PA_IS_LINUX
#endif // !PA_BUILDFLAG(IS_CHROMEOS)
// Include a system header to pull in features.h for glibc/uclibc macros.
@ -114,11 +119,11 @@
#define PA_IS_BSD
#endif
#if defined(PA_IS_AIX) || defined(PA_IS_ANDROID) || defined(PA_IS_ASMJS) || \
defined(PA_IS_FREEBSD) || defined(PA_IS_IOS) || defined(PA_IS_LINUX) || \
defined(PA_IS_CHROMEOS) || defined(PA_IS_MAC) || defined(PA_IS_NACL) || \
defined(PA_IS_NETBSD) || defined(PA_IS_OPENBSD) || defined(PA_IS_QNX) || \
defined(PA_IS_SOLARIS) || PA_BUILDFLAG(IS_CHROMEOS)
#if defined(PA_IS_AIX) || defined(PA_IS_ASMJS) || defined(PA_IS_FREEBSD) || \
defined(PA_IS_IOS) || defined(PA_IS_LINUX) || defined(PA_IS_CHROMEOS) || \
defined(PA_IS_MAC) || defined(PA_IS_NACL) || defined(PA_IS_NETBSD) || \
defined(PA_IS_OPENBSD) || defined(PA_IS_QNX) || defined(PA_IS_SOLARIS) || \
PA_BUILDFLAG(IS_ANDROID) || PA_BUILDFLAG(IS_CHROMEOS)
#define PA_IS_POSIX
#endif
@ -388,13 +393,6 @@
#endif
#undef PA_IS_AIX
#if defined(PA_IS_ANDROID)
#define PA_BUILDFLAG_INTERNAL_IS_ANDROID() (1)
#else
#define PA_BUILDFLAG_INTERNAL_IS_ANDROID() (0)
#endif
#undef PA_IS_ANDROID
#if defined(PA_IS_APPLE)
#define PA_BUILDFLAG_INTERNAL_IS_APPLE() (1)
#else

View file

@ -116,6 +116,6 @@ template("pa_buildflag_header") {
"visibility",
])
public_deps = [ "${_current_dir}:build_config" ]
public_deps = [ "${_current_dir}:buildflag_macro" ]
}
}

View file

@ -3,7 +3,8 @@
// found in the LICENSE file.
#include "partition_alloc/compressed_pointer.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#if PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)

View file

@ -5,14 +5,14 @@
#ifndef PARTITION_ALLOC_COMPRESSED_POINTER_H_
#define PARTITION_ALLOC_COMPRESSED_POINTER_H_
#include <bit>
#include <climits>
#include <type_traits>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_address_space.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#if PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)
@ -78,7 +78,7 @@ constexpr bool IsDecayedSame =
class CompressedPointerBaseGlobal final {
public:
static constexpr size_t kUsefulBits =
std::countr_zero(PartitionAddressSpace::CorePoolsSize());
base::bits::CountrZero(PartitionAddressSpace::CorePoolsSize());
static_assert(kUsefulBits >= sizeof(uint32_t) * CHAR_BIT);
static constexpr size_t kBitsToShift =
kUsefulBits - sizeof(uint32_t) * CHAR_BIT;
@ -102,11 +102,11 @@ class CompressedPointerBaseGlobal final {
static constexpr uintptr_t kUsefulBitsMask =
PartitionAddressSpace::CorePoolsSize() - 1;
static union alignas(kPartitionCachelineSize)
PA_CONSTINIT static union alignas(kPartitionCachelineSize)
PA_COMPONENT_EXPORT(PARTITION_ALLOC) Base {
uintptr_t base;
char cache_line[kPartitionCachelineSize];
} g_base_ PA_CONSTINIT;
} g_base_;
PA_ALWAYS_INLINE static bool IsBaseConsistent() {
return kUsefulBitsMask == (g_base_.base & kUsefulBitsMask);
@ -232,7 +232,7 @@ class PA_TRIVIAL_ABI CompressedPointer final {
static constexpr size_t kMinimalRequiredAlignment = 8;
static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment);
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(reinterpret_cast<uintptr_t>(ptr) % kMinimalRequiredAlignment ==
0);
PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
@ -243,7 +243,7 @@ class PA_TRIVIAL_ABI CompressedPointer final {
PA_DCHECK(!ptr ||
(base & kCorePoolsBaseMask) ==
(reinterpret_cast<uintptr_t>(ptr) & kCorePoolsBaseMask));
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
const auto uptr = reinterpret_cast<uintptr_t>(ptr);
// Shift the pointer and truncate.

View file

@ -9,11 +9,10 @@
#include <cstdint>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/freeslot_bitmap.h"
#include "partition_alloc/partition_alloc-inl.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h"
@ -177,16 +176,16 @@ class EncodedNextFreelistEntry {
// SetNext() is either called on the freelist head, when provisioning new
// slots, or when GetNext() has been called before, no need to pass the
// size.
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// Regular freelists always point to an entry within the same super page.
//
// This is most likely a PartitionAlloc bug if this triggers.
if (PA_UNLIKELY(entry &&
(SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
(SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) {
if (entry && (SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
(SlotStartPtr2Addr(entry) & kSuperPageBaseMask))
[[unlikely]] {
FreelistCorruptionDetected(0);
}
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
encoded_next_ = EncodedFreelistPtr(entry);
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
@ -221,7 +220,7 @@ class EncodedNextFreelistEntry {
}
auto* ret = encoded_next_.Decode();
if (PA_UNLIKELY(!IsWellFormed<for_thread_cache>(this, ret))) {
if (!IsWellFormed<for_thread_cache>(this, ret)) [[unlikely]] {
if constexpr (crash_on_corruption) {
// Put the corrupted data on the stack, it may give us more information
// about what kind of corruption that was.

View file

@ -4,7 +4,7 @@
#include "partition_alloc/extended_api.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
#include "partition_alloc/thread_cache.h"
@ -77,7 +77,17 @@ ThreadCacheProcessScopeForTesting::ThreadCacheProcessScopeForTesting(
// Replace ThreadCache's PartitionRoot.
ThreadCache::SwapForTesting(root_);
} else {
if (!regular_was_enabled_) {
bool regular_was_disabled = !regular_was_enabled_;
#if PA_BUILDFLAG(IS_WIN)
// ThreadCache may be tombstone because of the previous test. In the
// case, we have to remove tombstone and re-create ThreadCache for
// a new test.
if (ThreadCache::IsTombstone(ThreadCache::Get())) {
ThreadCache::RemoveTombstoneForTesting();
regular_was_disabled = true;
}
#endif
if (regular_was_disabled) {
EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
ThreadCache::SwapForTesting(root_);
}
@ -89,6 +99,7 @@ ThreadCacheProcessScopeForTesting::ThreadCacheProcessScopeForTesting(
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PA_CHECK(ThreadCache::Get());
PA_CHECK(!ThreadCache::IsTombstone(ThreadCache::Get()));
}
ThreadCacheProcessScopeForTesting::~ThreadCacheProcessScopeForTesting() {

View file

@ -5,7 +5,7 @@
#ifndef PARTITION_ALLOC_EXTENDED_API_H_
#define PARTITION_ALLOC_EXTENDED_API_H_
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_root.h"
#include "partition_alloc/partition_stats.h"
#include "partition_alloc/thread_cache.h"

View file

@ -46,12 +46,6 @@ constexpr inline IfEnum<EnumType, bool> ContainsFlags(EnumType superset,
return (superset & subset) == subset;
}
// Removes flags `target` from `from`.
template <typename EnumType>
constexpr inline IfEnum<EnumType> RemoveFlags(EnumType from, EnumType target) {
return from & ~target;
}
// A macro to define binary arithmetic over `EnumType`.
// Use inside `namespace partition_alloc::internal`.
#define PA_DEFINE_OPERATORS_FOR_FLAGS(EnumType) \

View file

@ -9,10 +9,10 @@
#include <cstdint>
#include <utility>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/freeslot_bitmap_constants.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_constants.h"
#if PA_BUILDFLAG(USE_FREESLOT_BITMAP)
@ -92,7 +92,7 @@ PA_ALWAYS_INLINE void FreeSlotBitmapReset(uintptr_t begin_addr,
*cell &= ~CellWithAOne(bit_index);
}
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// Checks if the cells that are meant to contain only unset bits are really 0.
auto [begin_cell, begin_bit_index] =
GetFreeSlotBitmapCellPtrAndBitIndex(begin_addr);
@ -131,7 +131,7 @@ PA_ALWAYS_INLINE void FreeSlotBitmapReset(uintptr_t begin_addr,
for (FreeSlotBitmapCellType* cell = begin_cell; cell < end_cell; ++cell) {
PA_DCHECK(*cell == 0u);
}
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
}
} // namespace partition_alloc::internal

View file

@ -7,9 +7,9 @@
#include <cstdint>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_alloc_forward.h"
#include "partition_alloc/reservation_offset_table.h"

View file

@ -19,17 +19,21 @@
namespace partition_alloc {
namespace {
PartitionOptions GwpAsanPartitionOptions() {
PartitionOptions options;
options.backup_ref_ptr = PartitionOptions::kEnabled;
return options;
}
} // namespace
// static
void* GwpAsanSupport::MapRegion(size_t slot_count,
std::vector<uint16_t>& free_list) {
PA_CHECK(slot_count > 0);
constexpr PartitionOptions kConfig = []() {
PartitionOptions opts;
opts.backup_ref_ptr = PartitionOptions::kEnabled;
return opts;
}();
static internal::base::NoDestructor<PartitionRoot> root(kConfig);
static internal::base::NoDestructor<PartitionRoot> root(
GwpAsanPartitionOptions());
const size_t kSlotSize = 2 * internal::SystemPageSize();
uint16_t bucket_index = PartitionRoot::SizeToBucketIndex(
@ -38,8 +42,7 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
const size_t kSuperPagePayloadStartOffset =
internal::SuperPagePayloadStartOffset(
/* is_managed_by_normal_buckets = */ true,
/* with_quarantine = */ false);
/* is_managed_by_normal_buckets = */ true);
PA_CHECK(kSuperPagePayloadStartOffset % kSlotSize == 0);
const size_t kSuperPageGwpAsanSlotAreaBeginOffset =
kSuperPagePayloadStartOffset;
@ -90,15 +93,16 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
partition_page_idx += bucket->get_pages_per_slot_span()) {
auto* slot_span_metadata =
&page_metadata[partition_page_idx].slot_span_metadata;
bucket->InitializeSlotSpanForGwpAsan(slot_span_metadata);
bucket->InitializeSlotSpanForGwpAsan(slot_span_metadata, root.get());
auto slot_span_start =
internal::SlotSpanMetadata::ToSlotSpanStart(slot_span_metadata);
internal::SlotSpanMetadata<internal::MetadataKind::kReadOnly>::
ToSlotSpanStart(slot_span_metadata);
for (uintptr_t slot_idx = 0; slot_idx < kSlotsPerSlotSpan; ++slot_idx) {
auto slot_start = slot_span_start + slot_idx * kSlotSize;
PartitionRoot::InSlotMetadataPointerFromSlotStartAndSize(slot_start,
kSlotSize)
->InitalizeForGwpAsan();
->InitializeForGwpAsan();
size_t global_slot_idx = (slot_start - super_page_span_start -
kSuperPageGwpAsanSlotAreaBeginOffset) /
kSlotSize;

View file

@ -5,8 +5,8 @@
#ifndef PARTITION_ALLOC_GWP_ASAN_SUPPORT_H_
#define PARTITION_ALLOC_GWP_ASAN_SUPPORT_H_
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#if PA_BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)

View file

@ -6,28 +6,23 @@
#define PARTITION_ALLOC_IN_SLOT_METADATA_H_
#include <atomic>
#include <bit>
#include <cstddef>
#include <cstdint>
#include <limits>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/dangling_raw_ptr_checks.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/immediate_crash.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_alloc_forward.h"
#include "partition_alloc/tagging.h"
#if PA_BUILDFLAG(IS_APPLE)
#include "partition_alloc/partition_alloc_base/bits.h"
#endif // PA_BUILDFLAG(IS_APPLE)
namespace partition_alloc::internal {
// Aligns up (on 8B boundary) `in_slot_metadata_size` on Mac as a workaround for
@ -40,10 +35,10 @@ namespace partition_alloc::internal {
// Placed outside `PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)`
// intentionally to accommodate usage in contexts also outside
// this gating.
PA_ALWAYS_INLINE size_t
AlignUpInSlotMetadataSizeForApple(size_t in_slot_metadata_size) {
PA_ALWAYS_INLINE constexpr size_t AlignUpInSlotMetadataSizeForApple(
size_t in_slot_metadata_size) {
#if PA_BUILDFLAG(IS_APPLE)
return internal::base::bits::AlignUp<size_t>(in_slot_metadata_size, 8);
return base::bits::AlignUp<size_t>(in_slot_metadata_size, 8);
#else
return in_slot_metadata_size;
#endif // PA_BUILDFLAG(IS_APPLE)
@ -51,7 +46,6 @@ AlignUpInSlotMetadataSizeForApple(size_t in_slot_metadata_size) {
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
namespace {
// Utility functions to define a bit field.
template <typename CountType>
static constexpr CountType SafeShift(CountType lhs, int rhs) {
@ -69,7 +63,6 @@ struct BitField {
~(SafeShift<CountType>(1, lo) - 1);
}
};
} // namespace
// Special-purpose atomic bit field class mainly used by RawPtrBackupRefImpl.
// Formerly known as `PartitionRefCount`, but renamed to support usage that is
@ -171,9 +164,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
std::numeric_limits<CountType>::max());
static constexpr auto kPtrInc =
SafeShift<CountType>(1, std::countr_zero(kPtrCountMask));
SafeShift<CountType>(1, base::bits::CountrZero(kPtrCountMask));
static constexpr auto kUnprotectedPtrInc =
SafeShift<CountType>(1, std::countr_zero(kUnprotectedPtrCountMask));
SafeShift<CountType>(1, base::bits::CountrZero(kUnprotectedPtrCountMask));
PA_ALWAYS_INLINE explicit InSlotMetadata(bool needs_mac11_malloc_size_hack);
@ -220,8 +213,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
// If a dangling raw_ptr<> was detected, report it.
if (PA_UNLIKELY((old_count & kDanglingRawPtrDetectedBit) ==
kDanglingRawPtrDetectedBit)) {
if ((old_count & kDanglingRawPtrDetectedBit) == kDanglingRawPtrDetectedBit)
[[unlikely]] {
partition_alloc::internal::DanglingRawPtrReleased(
reinterpret_cast<uintptr_t>(this));
}
@ -269,13 +262,13 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
// detection mechanism isn't perfect, because in-slot-metadata can be
// overwritten by the freelist pointer (or its shadow) for very small slots,
// thus masking the error away.
if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit))) {
if (!(old_count & kMemoryHeldByAllocatorBit)) [[unlikely]] {
DoubleFreeOrCorruptionDetected(old_count);
}
// Release memory when no raw_ptr<> exists anymore:
static constexpr CountType mask = kPtrCountMask | kUnprotectedPtrCountMask;
if (PA_LIKELY((old_count & mask) == 0)) {
if ((old_count & mask) == 0) [[likely]] {
std::atomic_thread_fence(std::memory_order_acquire);
// The allocation is about to get freed, so clear the cookie.
ClearCookieIfSupported();
@ -322,6 +315,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
// Request to quarantine this allocation. The request might be ignored if
// the allocation is already freed.
// TODO(crbug.com/329027914) This is an unused function. Start using it in
// tests and/or in production code.
PA_ALWAYS_INLINE void SetQuarantineRequest() {
CountType old_count =
count_.fetch_or(kRequestQuarantineBit, std::memory_order_relaxed);
@ -330,6 +325,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
}
// Get and clear out quarantine request.
// TODO(crbug.com/329027914) This is an unused function. Start using it in
// tests and/or in production code.
PA_ALWAYS_INLINE bool PopQuarantineRequest() {
CountType old_count =
count_.fetch_and(~kRequestQuarantineBit, std::memory_order_acq_rel);
@ -342,7 +339,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
// make sure the `raw_ptr<T>` release operation will never attempt to call the
// PA `free` on such a slot. GWP-ASan takes the extra reference into account
// when determining whether the slot can be reused.
PA_ALWAYS_INLINE void InitalizeForGwpAsan() {
PA_ALWAYS_INLINE void InitializeForGwpAsan() {
#if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
brp_cookie_ = CalculateCookie();
#endif
@ -374,7 +371,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
// The `kPtrCountMask` counts the number of raw_ptr<T>. It is expected to be
// zero when there are no unexpected dangling pointers.
if (PA_LIKELY((count & kPtrCountMask) == 0)) {
if ((count & kPtrCountMask) == 0) [[likely]] {
return;
}
@ -409,9 +406,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
// - A raw_ptr<T, DisableDanglingPtrDetection>
//
// Assuming this raw_ptr is not dangling, the memory must still be held at
// least by the allocator, so this is PA_LIKELY true.
if (PA_LIKELY((count & (kMemoryHeldByAllocatorBit | kPtrCountMask |
kUnprotectedPtrCountMask)))) {
// least by the allocator, so this is `[[likely]]`.
if ((count & (kMemoryHeldByAllocatorBit | kPtrCountMask |
kUnprotectedPtrCountMask))) [[likely]] {
return false; // Do not release the memory.
}
@ -547,10 +544,10 @@ PA_ALWAYS_INLINE InSlotMetadata* InSlotMetadataPointer(uintptr_t slot_start,
// the InSlotMetadata object out-of-line in this case, specifically in a
// special table after the super page metadata (see InSlotMetadataTable in
// partition_alloc_constants.h).
if (PA_LIKELY(slot_start & SystemPageOffsetMask())) {
if (slot_start & SystemPageOffsetMask()) [[likely]] {
uintptr_t refcount_address =
slot_start + slot_size - sizeof(InSlotMetadata);
#if PA_BUILDFLAG(PA_DCHECK_IS_ON) || \
#if PA_BUILDFLAG(DCHECKS_ARE_ON) || \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
PA_CHECK(refcount_address % alignof(InSlotMetadata) == 0);
#endif
@ -563,7 +560,7 @@ PA_ALWAYS_INLINE InSlotMetadata* InSlotMetadataPointer(uintptr_t slot_start,
(slot_start & kSuperPageBaseMask) + SystemPageSize() * 2);
size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift())
<< GetInSlotMetadataIndexMultiplierShift();
#if PA_BUILDFLAG(PA_DCHECK_IS_ON) || \
#if PA_BUILDFLAG(DCHECKS_ARE_ON) || \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
PA_CHECK(sizeof(InSlotMetadata) * index <= SystemPageSize());
#endif
@ -575,7 +572,7 @@ PA_ALWAYS_INLINE InSlotMetadata* InSlotMetadataPointer(uintptr_t slot_start,
static inline constexpr size_t kInSlotMetadataSizeAdjustment =
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
sizeof(InSlotMetadata);
AlignUpInSlotMetadataSizeForApple(sizeof(InSlotMetadata));
#else
0ul;
#endif

View file

@ -7,7 +7,7 @@
namespace partition_alloc::internal {
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
PartitionRoot& InternalAllocatorRoot() {
static internal::base::NoDestructor<PartitionRoot> allocator([]() {
static internal::base::NoDestructor<PartitionRoot> allocator([] {
// Disable features using the internal root to avoid reentrancy issue.
PartitionOptions opts;
opts.thread_cache = PartitionOptions::kDisabled;
@ -37,8 +37,4 @@ void InternalPartitionAllocated::operator delete(void* ptr, std::align_val_t) {
InternalAllocatorRoot().Free<FreeFlags::kNoHooks>(ptr);
}
// A deleter for `std::unique_ptr<T>`.
void InternalPartitionDeleter::operator()(void* ptr) const {
InternalAllocatorRoot().Free<FreeFlags::kNoHooks>(ptr);
}
} // namespace partition_alloc::internal

View file

@ -26,7 +26,7 @@ PartitionRoot& InternalAllocatorRoot();
// A class that meets C++ named requirements, Allocator.
template <typename T>
InternalAllocator<T>::value_type* InternalAllocator<T>::allocate(
typename InternalAllocator<T>::value_type* InternalAllocator<T>::allocate(
std::size_t count) {
PA_CHECK(count <=
std::numeric_limits<std::size_t>::max() / sizeof(value_type));

View file

@ -71,8 +71,9 @@ template <typename T>
void DestroyAtInternalPartition(T* ptr);
// A deleter for `std::unique_ptr<T>`.
struct PA_COMPONENT_EXPORT(PARTITION_ALLOC) InternalPartitionDeleter final {
void operator()(void* ptr) const;
template <typename T>
struct InternalPartitionDeleter final {
void operator()(T* ptr) const { DestroyAtInternalPartition(ptr); }
};
} // namespace partition_alloc::internal

View file

@ -9,19 +9,51 @@
#include "partition_alloc/partition_root.h"
namespace partition_alloc::internal {
namespace {
// An utility to lock only if a condition is met.
class PA_SCOPED_LOCKABLE ConditionalScopedGuard {
// Utility classes to lock only if a condition is met.
template <>
class PA_SCOPED_LOCKABLE
LightweightQuarantineBranch::CompileTimeConditionalScopedGuard<
LightweightQuarantineBranch::LockRequired::kNotRequired> {
public:
PA_ALWAYS_INLINE ConditionalScopedGuard(bool condition, Lock& lock)
PA_ALWAYS_INLINE explicit CompileTimeConditionalScopedGuard(Lock& lock)
PA_EXCLUSIVE_LOCK_FUNCTION(lock) {}
// For some reason, defaulting this causes a thread safety annotation failure.
PA_ALWAYS_INLINE
~CompileTimeConditionalScopedGuard() // NOLINT(modernize-use-equals-default)
PA_UNLOCK_FUNCTION() {}
};
template <>
class PA_SCOPED_LOCKABLE
LightweightQuarantineBranch::CompileTimeConditionalScopedGuard<
LightweightQuarantineBranch::LockRequired::kRequired> {
public:
PA_ALWAYS_INLINE explicit CompileTimeConditionalScopedGuard(Lock& lock)
PA_EXCLUSIVE_LOCK_FUNCTION(lock)
: lock_(lock) {
lock_.Acquire();
}
PA_ALWAYS_INLINE ~CompileTimeConditionalScopedGuard() PA_UNLOCK_FUNCTION() {
lock_.Release();
}
private:
Lock& lock_;
};
class PA_SCOPED_LOCKABLE
LightweightQuarantineBranch::RuntimeConditionalScopedGuard {
public:
PA_ALWAYS_INLINE RuntimeConditionalScopedGuard(bool condition, Lock& lock)
PA_EXCLUSIVE_LOCK_FUNCTION(lock)
: condition_(condition), lock_(lock) {
if (condition_) {
lock_.Acquire();
}
}
PA_ALWAYS_INLINE ~ConditionalScopedGuard() PA_UNLOCK_FUNCTION() {
PA_ALWAYS_INLINE ~RuntimeConditionalScopedGuard() PA_UNLOCK_FUNCTION() {
if (condition_) {
lock_.Release();
}
@ -32,8 +64,6 @@ class PA_SCOPED_LOCKABLE ConditionalScopedGuard {
Lock& lock_;
};
} // namespace
LightweightQuarantineBranch LightweightQuarantineRoot::CreateBranch(
const LightweightQuarantineBranchConfig& config) {
return LightweightQuarantineBranch(*this, config);
@ -44,7 +74,12 @@ LightweightQuarantineBranch::LightweightQuarantineBranch(
const LightweightQuarantineBranchConfig& config)
: root_(root),
lock_required_(config.lock_required),
branch_capacity_in_bytes_(config.branch_capacity_in_bytes) {}
branch_capacity_in_bytes_(config.branch_capacity_in_bytes) {
if (lock_required_) {
to_be_freed_working_memory_ =
ConstructAtInternalPartition<ToBeFreedArray>();
}
}
LightweightQuarantineBranch::LightweightQuarantineBranch(
LightweightQuarantineBranch&& b)
@ -55,56 +90,23 @@ LightweightQuarantineBranch::LightweightQuarantineBranch(
branch_capacity_in_bytes_(
b.branch_capacity_in_bytes_.load(std::memory_order_relaxed)) {
b.branch_size_in_bytes_ = 0;
if (lock_required_) {
to_be_freed_working_memory_.store(b.to_be_freed_working_memory_.exchange(
nullptr, std::memory_order_relaxed),
std::memory_order_relaxed);
}
}
LightweightQuarantineBranch::~LightweightQuarantineBranch() {
Purge();
slots_.clear();
}
bool LightweightQuarantineBranch::Quarantine(void* object,
SlotSpanMetadata* slot_span,
uintptr_t slot_start) {
const auto usable_size = root_.allocator_root_.GetSlotUsableSize(slot_span);
const size_t capacity_in_bytes =
branch_capacity_in_bytes_.load(std::memory_order_relaxed);
{
ConditionalScopedGuard guard(lock_required_, lock_);
if (capacity_in_bytes < usable_size) {
// Even if this branch dequarantines all entries held by it, this entry
// cannot fit within the capacity.
root_.allocator_root_.FreeNoHooksImmediate(object, slot_span, slot_start);
root_.quarantine_miss_count_.fetch_add(1u, std::memory_order_relaxed);
return false;
}
// Dequarantine some entries as required.
PurgeInternal(capacity_in_bytes - usable_size);
// Put the entry onto the list.
branch_size_in_bytes_ += usable_size;
slots_.emplace_back(slot_start, usable_size);
// Swap randomly so that the quarantine list remain shuffled.
// This is not uniformly random, but sufficiently random.
const size_t random_index = random_.RandUint32() % slots_.size();
std::swap(slots_[random_index], slots_.back());
if (lock_required_) {
DestroyAtInternalPartition(to_be_freed_working_memory_.exchange(
nullptr, std::memory_order_relaxed));
}
// Update stats (not locked).
root_.count_.fetch_add(1, std::memory_order_relaxed);
root_.size_in_bytes_.fetch_add(usable_size, std::memory_order_relaxed);
root_.cumulative_count_.fetch_add(1, std::memory_order_relaxed);
root_.cumulative_size_in_bytes_.fetch_add(usable_size,
std::memory_order_relaxed);
return true;
}
bool LightweightQuarantineBranch::IsQuarantinedForTesting(void* object) {
ConditionalScopedGuard guard(lock_required_, lock_);
RuntimeConditionalScopedGuard guard(lock_required_, lock_);
uintptr_t slot_start =
root_.allocator_root_.ObjectToSlotStartUnchecked(object);
for (const auto& slot : slots_) {
@ -120,26 +122,139 @@ void LightweightQuarantineBranch::SetCapacityInBytes(size_t capacity_in_bytes) {
}
void LightweightQuarantineBranch::Purge() {
ConditionalScopedGuard guard(lock_required_, lock_);
RuntimeConditionalScopedGuard guard(lock_required_, lock_);
PurgeInternal(0);
slots_.shrink_to_fit();
}
template <LightweightQuarantineBranch::LockRequired lock_required>
bool LightweightQuarantineBranch::QuarantineInternal(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size) {
PA_DCHECK(lock_required_ ? lock_required == LockRequired::kRequired
: lock_required == LockRequired::kNotRequired);
PA_DCHECK(usable_size == root_.allocator_root_.GetSlotUsableSize(slot_span));
const size_t capacity_in_bytes =
branch_capacity_in_bytes_.load(std::memory_order_relaxed);
if (capacity_in_bytes < usable_size) [[unlikely]] {
// Even if this branch dequarantines all entries held by it, this entry
// cannot fit within the capacity.
root_.allocator_root_.FreeNoHooksImmediate(object, slot_span, slot_start);
root_.quarantine_miss_count_.fetch_add(1u, std::memory_order_relaxed);
return false;
}
if constexpr (lock_required == LockRequired::kNotRequired) {
// Although there is no need to actually acquire the lock as
// LockRequired::kNotRequired is specified,
// a CompileTimeConditionalScopedGuard is necessary in order to touch
// `slots_` as `slots_` is annotated with `PA_GUARDED_BY(lock_)`.
// CompileTimeConditionalScopedGuard's ctor and dtor behave as
// PA_EXCLUSIVE_LOCK_FUNCTION and PA_UNLOCK_FUNCTION.
CompileTimeConditionalScopedGuard<lock_required> guard(lock_);
// Dequarantine some entries as required.
PurgeInternal(capacity_in_bytes - usable_size);
// Put the entry onto the list.
branch_size_in_bytes_ += usable_size;
slots_.push_back({slot_start, usable_size});
// Swap randomly so that the quarantine list remain shuffled.
// This is not uniformly random, but sufficiently random.
const size_t random_index = random_.RandUint32() % slots_.size();
std::swap(slots_[random_index], slots_.back());
} else {
std::unique_ptr<ToBeFreedArray, InternalPartitionDeleter<ToBeFreedArray>>
to_be_freed;
size_t num_of_slots = 0;
{
CompileTimeConditionalScopedGuard<lock_required> guard(lock_);
// Borrow the reserved working memory from to_be_freed_working_memory_,
// and set nullptr to it indicating that it's in use.
to_be_freed.reset(to_be_freed_working_memory_.exchange(nullptr));
if (!to_be_freed) {
// When the reserved working memory has already been in use by another
// thread, fall back to allocate another chunk of working memory.
to_be_freed.reset(ConstructAtInternalPartition<ToBeFreedArray>());
}
// Dequarantine some entries as required. Save the objects to be
// deallocated into `to_be_freed`.
PurgeInternalWithDefferedFree(capacity_in_bytes - usable_size,
*to_be_freed, num_of_slots);
// Put the entry onto the list.
branch_size_in_bytes_ += usable_size;
slots_.push_back({slot_start, usable_size});
// Swap randomly so that the quarantine list remain shuffled.
// This is not uniformly random, but sufficiently random.
const size_t random_index = random_.RandUint32() % slots_.size();
std::swap(slots_[random_index], slots_.back());
}
// Actually deallocate the dequarantined objects.
BatchFree(*to_be_freed, num_of_slots);
// Return the possibly-borrowed working memory to
// to_be_freed_working_memory_. It doesn't matter much if it's really
// borrowed or locally-allocated. The important facts are 1) to_be_freed is
// non-null, and 2) to_be_freed_working_memory_ may likely be null (because
// this or another thread has already borrowed it). It's simply good to make
// to_be_freed_working_memory_ non-null whenever possible. Maybe yet another
// thread would be about to borrow the working memory.
to_be_freed.reset(
to_be_freed_working_memory_.exchange(to_be_freed.release()));
}
// Update stats (not locked).
root_.count_.fetch_add(1, std::memory_order_relaxed);
root_.size_in_bytes_.fetch_add(usable_size, std::memory_order_relaxed);
root_.cumulative_count_.fetch_add(1, std::memory_order_relaxed);
root_.cumulative_size_in_bytes_.fetch_add(usable_size,
std::memory_order_relaxed);
return true;
}
template bool LightweightQuarantineBranch::QuarantineInternal<
LightweightQuarantineBranch::LockRequired::kNotRequired>(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size);
template bool LightweightQuarantineBranch::QuarantineInternal<
LightweightQuarantineBranch::LockRequired::kRequired>(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size);
PA_ALWAYS_INLINE void LightweightQuarantineBranch::PurgeInternal(
size_t target_size_in_bytes) {
int64_t freed_count = 0;
int64_t freed_size_in_bytes = 0;
// Dequarantine some entries as required.
while (!slots_.empty() && target_size_in_bytes < branch_size_in_bytes_) {
while (target_size_in_bytes < branch_size_in_bytes_) {
PA_DCHECK(!slots_.empty());
// As quarantined entries are shuffled, picking last entry is equivalent
// to picking random entry.
const auto& to_free = slots_.back();
size_t to_free_size = to_free.usable_size;
auto* slot_span = SlotSpanMetadata::FromSlotStart(to_free.slot_start);
auto* slot_span = SlotSpanMetadata<MetadataKind::kReadOnly>::FromSlotStart(
to_free.slot_start);
void* object = root_.allocator_root_.SlotStartToObject(to_free.slot_start);
PA_DCHECK(slot_span == SlotSpanMetadata::FromObject(object));
PA_DCHECK(slot_span ==
SlotSpanMetadata<MetadataKind::kReadOnly>::FromObject(object));
PA_DCHECK(to_free.slot_start);
root_.allocator_root_.FreeNoHooksImmediate(object, slot_span,
@ -157,4 +272,53 @@ PA_ALWAYS_INLINE void LightweightQuarantineBranch::PurgeInternal(
root_.count_.fetch_sub(freed_count, std::memory_order_relaxed);
}
PA_ALWAYS_INLINE void
LightweightQuarantineBranch::PurgeInternalWithDefferedFree(
size_t target_size_in_bytes,
ToBeFreedArray& to_be_freed,
size_t& num_of_slots) {
num_of_slots = 0;
int64_t freed_size_in_bytes = 0;
// Dequarantine some entries as required.
while (target_size_in_bytes < branch_size_in_bytes_) {
PA_DCHECK(!slots_.empty());
// As quarantined entries are shuffled, picking last entry is equivalent to
// picking random entry.
const QuarantineSlot& to_free = slots_.back();
const size_t to_free_size = to_free.usable_size;
to_be_freed[num_of_slots++] = to_free.slot_start;
slots_.pop_back();
freed_size_in_bytes += to_free_size;
branch_size_in_bytes_ -= to_free_size;
if (num_of_slots >= kMaxFreeTimesPerPurge) {
break;
}
}
root_.size_in_bytes_.fetch_sub(freed_size_in_bytes,
std::memory_order_relaxed);
root_.count_.fetch_sub(num_of_slots, std::memory_order_relaxed);
}
PA_ALWAYS_INLINE void LightweightQuarantineBranch::BatchFree(
const ToBeFreedArray& to_be_freed,
size_t num_of_slots) {
for (size_t i = 0; i < num_of_slots; ++i) {
const uintptr_t slot_start = to_be_freed[i];
PA_DCHECK(slot_start);
auto* slot_span =
SlotSpanMetadata<MetadataKind::kReadOnly>::FromSlotStart(slot_start);
void* object = root_.allocator_root_.SlotStartToObject(slot_start);
PA_DCHECK(slot_span ==
SlotSpanMetadata<MetadataKind::kReadOnly>::FromObject(object));
root_.allocator_root_.FreeNoHooksImmediate(object, slot_span, slot_start);
}
}
} // namespace partition_alloc::internal

View file

@ -108,9 +108,35 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LightweightQuarantineBranch {
// as much as possible. If the object is too large, this may return
// `false`, meaning that quarantine request has failed (and freed
// immediately). Otherwise, returns `true`.
bool Quarantine(void* object,
SlotSpanMetadata* slot_span,
uintptr_t slot_start);
PA_ALWAYS_INLINE bool Quarantine(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size) {
return lock_required_ ? QuarantineWithAcquiringLock(object, slot_span,
slot_start, usable_size)
: QuarantineWithoutAcquiringLock(
object, slot_span, slot_start, usable_size);
}
// Despite that LightweightQuarantineBranchConfig::lock_required_ is already
// specified, we provide two versions `With/WithoutAcquiringLock` so that we
// can avoid the overhead of runtime conditional branches.
PA_ALWAYS_INLINE bool QuarantineWithAcquiringLock(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size) {
PA_MUSTTAIL return QuarantineInternal<LockRequired::kRequired>(
object, slot_span, slot_start, usable_size);
}
PA_ALWAYS_INLINE bool QuarantineWithoutAcquiringLock(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size) {
PA_MUSTTAIL return QuarantineInternal<LockRequired::kNotRequired>(
object, slot_span, slot_start, usable_size);
}
// Dequarantine all entries **held by this branch**.
// It is possible that another branch with entries and it remains untouched.
@ -129,9 +155,27 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LightweightQuarantineBranch {
void SetCapacityInBytes(size_t capacity_in_bytes);
private:
enum class LockRequired { kNotRequired, kRequired };
template <LockRequired lock_required>
class PA_SCOPED_LOCKABLE CompileTimeConditionalScopedGuard;
class PA_SCOPED_LOCKABLE RuntimeConditionalScopedGuard;
// `ToBeFreedArray` is used in `PurgeInternalInTwoPhases1of2` and
// `PurgeInternalInTwoPhases2of2`. See the function comment about the purpose.
// In order to avoid reentrancy issues, we must not deallocate any object in
// `Quarantine`. So, std::vector is not an option. std::array doesn't
// deallocate, plus, std::array has perf advantages.
static constexpr size_t kMaxFreeTimesPerPurge = 1024;
using ToBeFreedArray = std::array<uintptr_t, kMaxFreeTimesPerPurge>;
LightweightQuarantineBranch(Root& root,
const LightweightQuarantineBranchConfig& config);
template <LockRequired lock_required>
bool QuarantineInternal(void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size);
// Try to dequarantine entries to satisfy below:
// root_.size_in_bytes_ <= target_size_in_bytes
// It is possible that this branch cannot satisfy the
@ -139,6 +183,19 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LightweightQuarantineBranch {
// constraint, call `Purge()` for each branch in sequence, synchronously.
PA_ALWAYS_INLINE void PurgeInternal(size_t target_size_in_bytes)
PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
// In order to reduce thread contention, dequarantines entries in two phases:
// Phase 1) With the lock acquired, saves `slot_start`s of the quarantined
// objects in an array, and shrinks `slots_`. Then, releases the lock so
// that another thread can quarantine an object.
// Phase 2) Without the lock acquired, deallocates objects saved in the
// array in Phase 1. This may take some time, but doesn't block other
// threads.
PA_ALWAYS_INLINE void PurgeInternalWithDefferedFree(
size_t target_size_in_bytes,
ToBeFreedArray& to_be_freed,
size_t& num_of_slots) PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
PA_ALWAYS_INLINE void BatchFree(const ToBeFreedArray& to_be_freed,
size_t num_of_slots);
Root& root_;
@ -160,9 +217,35 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LightweightQuarantineBranch {
// Using `std::atomic` here so that other threads can update this value.
std::atomic_size_t branch_capacity_in_bytes_;
// This working memory is temporarily needed only while dequarantining
// objects in slots_ when lock_required_ is true. However, allocating this
// working memory on stack may cause stack overflow [1]. Plus, it's non-
// negligible perf penalty to allocate and deallocate this working memory on
// heap only while dequarantining. So, we reserve one chunk of working memory
// on heap during the entire lifetime of this branch object and try to reuse
// this working memory among threads. Only when thread contention occurs, we
// allocate and deallocate another chunk of working memory.
// [1] https://issues.chromium.org/issues/387508217
std::atomic<ToBeFreedArray*> to_be_freed_working_memory_ = nullptr;
friend class LightweightQuarantineRoot;
};
extern template PA_COMPONENT_EXPORT(
PARTITION_ALLOC) bool LightweightQuarantineBranch::
QuarantineInternal<LightweightQuarantineBranch::LockRequired::kNotRequired>(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size);
extern template PA_COMPONENT_EXPORT(
PARTITION_ALLOC) bool LightweightQuarantineBranch::
QuarantineInternal<LightweightQuarantineBranch::LockRequired::kRequired>(
void* object,
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
uintptr_t slot_start,
size_t usable_size);
} // namespace internal
} // namespace partition_alloc

View file

@ -4,16 +4,12 @@
#include "partition_alloc/memory_reclaimer.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc.h"
#include "partition_alloc/partition_alloc_base/no_destructor.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h"
#if PA_BUILDFLAG(USE_STARSCAN)
#include "partition_alloc/starscan/pcscan.h"
#endif
namespace partition_alloc {
// static
@ -63,26 +59,6 @@ void MemoryReclaimer::Reclaim(int flags) {
internal::ScopedGuard lock(
lock_); // Has to protect from concurrent (Un)Register calls.
// PCScan quarantines freed slots. Trigger the scan first to let it call
// FreeNoHooksImmediate on slots that pass the quarantine.
//
// In turn, FreeNoHooksImmediate may add slots to thread cache. Purge it next
// so that the slots are actually freed. (This is done synchronously only for
// the current thread.)
//
// Lastly decommit empty slot spans and lastly try to discard unused pages at
// the end of the remaining active slots.
#if PA_CONFIG(STARSCAN_ENABLE_STARSCAN_ON_RECLAIM) && PA_BUILDFLAG(USE_STARSCAN)
{
using PCScan = internal::PCScan;
const auto invocation_mode = flags & PurgeFlags::kAggressiveReclaim
? PCScan::InvocationMode::kForcedBlocking
: PCScan::InvocationMode::kBlocking;
PCScan::PerformScanIfNeeded(invocation_mode);
}
#endif // PA_CONFIG(STARSCAN_ENABLE_STARSCAN_ON_RECLAIM) &&
// PA_BUILDFLAG(USE_STARSCAN)
#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
// Don't completely empty the thread cache outside of low memory situations,
// as there is periodic purge which makes sure that it doesn't take too much

View file

@ -15,6 +15,7 @@
#include <array>
#include <cstdlib>
#include <limits>
#endif // PA_BUILDFLAG(IS_WIN)
namespace partition_alloc {
@ -26,23 +27,50 @@ namespace internal {
// Crash server classifies base::internal::OnNoMemoryInternal as OOM.
// TODO(crbug.com/40158212): Update to
// partition_alloc::internal::base::internal::OnNoMemoryInternal
PA_NOINLINE void OnNoMemoryInternal(size_t size) {
[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void OnNoMemoryInternal(
size_t size) {
g_oom_size = size;
size_t tmp_size = size;
internal::base::debug::Alias(&tmp_size);
#if PA_BUILDFLAG(IS_WIN)
// Create an exception vector with:
// [0] the size of the allocation, in bytes
// [1] "current committed memory limit for the system or the current process,
// whichever is smaller, in bytes"
// [2] "maximum amount of memory the current process can commit, in bytes"
//
// Citations from
// https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-memorystatusex
//
// System commit constraints (which may be different from the process commit
// constraints) are in the stability_report.SystemMemoryState.WindowsMemory
// proto attached to crash reports.
//
// Note: Both the process commit constraints in the exception vector and the
// system commit constraints in the proto are collected *after* the OOM and
// may therefore not reflect the state at the time of the OOM (e.g. another
// process may have exited or the page file may have been resized).
constexpr size_t kInvalid = std::numeric_limits<ULONG_PTR>::max();
ULONG_PTR exception_args[] = {size, kInvalid, kInvalid};
MEMORYSTATUSEX memory_status = {};
memory_status.dwLength = sizeof(memory_status);
if (::GlobalMemoryStatusEx(&memory_status) != 0) {
exception_args[1] = memory_status.ullTotalPageFile;
exception_args[2] = memory_status.ullAvailPageFile;
}
internal::base::debug::Alias(&memory_status);
// Kill the process. This is important for security since most of code
// does not check the result of memory allocation.
// https://msdn.microsoft.com/en-us/library/het71c37.aspx
// Pass the size of the failed request in an exception argument.
ULONG_PTR exception_args[] = {size};
// Documentation: https://msdn.microsoft.com/en-us/library/het71c37.aspx
::RaiseException(win::kOomExceptionCode, EXCEPTION_NONCONTINUABLE,
std::size(exception_args), exception_args);
// Safety check, make sure process exits here.
_exit(win::kOomExceptionCode);
#else
size_t tmp_size = size;
internal::base::debug::Alias(&tmp_size);
// Note: Don't add anything that may allocate here. Depending on the
// allocator, this may be called from within the allocator (e.g. with
// PartitionAlloc), and would deadlock as our locks are not recursive.

View file

@ -22,8 +22,8 @@ namespace partition_alloc {
// |size| is the size of the failed allocation, or 0 if not known.
// Crash reporting classifies such crashes as OOM.
// Must be allocation-safe.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void TerminateBecauseOutOfMemory(size_t size);
[[noreturn]] PA_NOT_TAIL_CALLED PA_COMPONENT_EXPORT(
PARTITION_ALLOC) void TerminateBecauseOutOfMemory(size_t size);
// Records the size of the allocation that caused the current OOM crash, for
// consumption by Breakpad.

View file

@ -5,12 +5,12 @@
#include "partition_alloc/page_allocator.h"
#include <atomic>
#include <bit>
#include <cstdint>
#include "partition_alloc/address_space_randomization.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/page_allocator_internal.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/thread_annotations.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_lock.h"
@ -79,7 +79,7 @@ uintptr_t TrimMapping(uintptr_t base_address,
uintptr_t alignment_offset,
PageAccessibilityConfiguration accessibility) {
PA_DCHECK(base_length >= trim_length);
PA_DCHECK(std::has_single_bit(alignment));
PA_DCHECK(internal::base::bits::HasSingleBit(alignment));
PA_DCHECK(alignment_offset < alignment);
uintptr_t new_base =
NextAlignedWithOffset(base_address, alignment, alignment_offset);
@ -108,7 +108,7 @@ uintptr_t TrimMapping(uintptr_t base_address,
uintptr_t NextAlignedWithOffset(uintptr_t address,
uintptr_t alignment,
uintptr_t requested_offset) {
PA_DCHECK(std::has_single_bit(alignment));
PA_DCHECK(internal::base::bits::HasSingleBit(alignment));
PA_DCHECK(requested_offset < alignment);
uintptr_t actual_offset = address & (alignment - 1);
@ -183,7 +183,7 @@ uintptr_t AllocPagesWithAlignOffset(
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(align >= internal::PageAllocationGranularity());
// Alignment must be power of 2 for masking math to work.
PA_DCHECK(std::has_single_bit(align));
PA_DCHECK(internal::base::bits::HasSingleBit(align));
PA_DCHECK(align_offset < align);
PA_DCHECK(!(align_offset & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
@ -363,6 +363,14 @@ void DiscardSystemPages(void* address, size_t length) {
DiscardSystemPages(reinterpret_cast<uintptr_t>(address), length);
}
bool SealSystemPages(uintptr_t address, size_t length) {
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
return internal::SealSystemPagesInternal(address, length);
}
bool SealSystemPages(void* address, size_t length) {
return SealSystemPages(reinterpret_cast<uintptr_t>(address), length);
}
bool ReserveAddressSpace(size_t size) {
// To avoid deadlock, call only SystemAllocPages.
internal::ScopedGuard guard(GetReserveLock());

View file

@ -9,10 +9,10 @@
#include <cstdint>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/thread_isolation/thread_isolation.h"
namespace partition_alloc {
@ -173,6 +173,14 @@ void FreePages(void* address, size_t length);
//
// Returns true if the permission change succeeded. In most cases you must
// |CHECK| the result.
//
// Note: On Windows, setting permissions to `PAGE_NOACCESS` will also decommit
// pages. This is desirable because clients assume that pages with no access
// rights should be "free" from a resource standpoint. In particular this allows
// clients to map a large amount of memory, set its access rights to
// `PAGE_NOACCESS` and not worry about commit limit exhaustion.
// On the flip side, this means that changing permissions can often fail on this
// platform.
[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TrySetSystemPagesAccess(
uintptr_t address,
size_t length,
@ -187,6 +195,8 @@ void FreePages(void* address, size_t length);
// bytes.
//
// Performs a CHECK that the operation succeeds.
//
// See the note above for Windows-specific behavior.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void SetSystemPagesAccess(uintptr_t address,
size_t length,
@ -323,11 +333,36 @@ void RecommitSystemPages(
// that the page is required again. Once written to, the content of the page is
// guaranteed stable once more. After being written to, the page content may be
// based on the original page content, or a page of zeroes.
//
// WARNING: Do not discard a large amount of pages, for a potentially long
// duration. Discarded pages are *not* decommitted on Windows, where total
// system-wide committed memory is limited. As most Chromium OOM crashes are
// commit limit related, this will both impact Private Memory Footprint (which
// reports committed memory) and stability (since we will bump into the limit
// more often).
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void DiscardSystemPages(uintptr_t address, size_t length);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void DiscardSystemPages(void* address, size_t length);
// Seal a number of system pages starting at |address|. Returns |true| on
// success.
//
// This blocks various modifications to the pages such as unmapping, remapping
// or changing page permissions. Note that it doesn't change the accessibility
// of the memory, sealed writable pages will still be writable.
//
// This is mainly useful for non-writable memory (either via page permissions or
// other hardware features like pkeys) that is bound to the process lifetime.
//
// While unmapping the pages gets blocked, it can still be possible to release
// the memory using |DiscardSystemPages()|, though note that at least on Linux,
// it requires write access to the page to succeed.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool SealSystemPages(uintptr_t address, size_t length);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool SealSystemPages(void* address, size_t length);
// Rounds up |address| to the next multiple of |SystemPageSize()|. Returns
// 0 for an |address| of 0.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t

View file

@ -26,7 +26,8 @@
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
#elif (PA_BUILDFLAG(IS_ANDROID) && PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)) || \
(PA_BUILDFLAG(IS_LINUX) && PA_BUILDFLAG(PA_ARCH_CPU_ARM64))
(PA_BUILDFLAG(IS_LINUX) && PA_BUILDFLAG(PA_ARCH_CPU_ARM64)) || \
(PA_BUILDFLAG(IS_LINUX) && PA_BUILDFLAG(PA_ARCH_CPU_PPC64))
// This should work for all POSIX (if needed), but currently all other
// supported OS/architecture combinations use either hard-coded values
// (such as x86) or have means to determine these values without needing
@ -38,6 +39,7 @@
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
#include <unistd.h>
#include <atomic>
namespace partition_alloc::internal {
@ -68,9 +70,30 @@ extern PageCharacteristics page_characteristics;
// Ability to name anonymous VMAs is available on some, but not all Linux-based
// systems.
#if PA_BUILDFLAG(IS_ANDROID) || PA_BUILDFLAG(IS_LINUX)
#if PA_BUILDFLAG(IS_ANDROID) || PA_BUILDFLAG(IS_LINUX) || \
PA_BUILDFLAG(IS_CHROMEOS)
#include <sys/prctl.h>
#if (PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)) && \
!(defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME))
// The PR_SET_VMA* symbols are originally from
// https://android.googlesource.com/platform/bionic/+/lollipop-release/libc/private/bionic_prctl.h
// and were subsequently added to mainline Linux in Jan 2022, see
// https://github.com/torvalds/linux/commit/9a10064f5625d5572c3626c1516e0bebc6c9fe9b.
//
// Define them to support compiling with older headers.
#if !defined(PR_SET_VMA)
#define PR_SET_VMA 0x53564d41
#endif
#if !defined(PR_SET_VMA_ANON_NAME)
#define PR_SET_VMA_ANON_NAME 0
#endif
#endif // (PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)) &&
// !(defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME))
#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
#define LINUX_NAME_REGION 1
#endif
@ -86,7 +109,27 @@ PageAllocationGranularity();
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PageAllocationGranularityShift() {
#if PA_BUILDFLAG(IS_WIN) || PA_BUILDFLAG(PA_ARCH_CPU_PPC64)
#if defined(PARTITION_ALLOCATOR_CONSTANTS_POSIX_NONCONST_PAGE_SIZE)
// arm64 supports 4kb (shift = 12), 16kb (shift = 14), and 64kb (shift = 16)
// page sizes. Retrieve from or initialize cache.
size_t shift = page_characteristics.shift.load(std::memory_order_relaxed);
if (shift == 0) [[unlikely]] {
shift = static_cast<size_t>(
__builtin_ctz((unsigned int)PageAllocationGranularity()));
page_characteristics.shift.store(shift, std::memory_order_relaxed);
}
return shift;
#elif PA_BUILDFLAG(IS_WIN) || PA_BUILDFLAG(PA_ARCH_CPU_PPC64)
// Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
// sizes. Since 64kB is the de facto standard on the platform and binaries
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
// here.
return 16; // 64kB
#elif defined(_MIPS_ARCH_LOONGSON) || PA_BUILDFLAG(PA_ARCH_CPU_LOONGARCH64)
return 14; // 16kB
#elif PA_BUILDFLAG(IS_APPLE) && PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)
return static_cast<size_t>(vm_page_shift);
#elif PA_BUILDFLAG(IS_WIN) || PA_BUILDFLAG(PA_ARCH_CPU_PPC64)
// Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
// sizes. Since 64kB is the de facto standard on the platform and binaries
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
@ -96,16 +139,6 @@ PageAllocationGranularityShift() {
return 14; // 16kB
#elif PA_BUILDFLAG(IS_APPLE) && PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)
return static_cast<size_t>(vm_page_shift);
#elif defined(PARTITION_ALLOCATOR_CONSTANTS_POSIX_NONCONST_PAGE_SIZE)
// arm64 supports 4kb (shift = 12), 16kb (shift = 14), and 64kb (shift = 16)
// page sizes. Retrieve from or initialize cache.
size_t shift = page_characteristics.shift.load(std::memory_order_relaxed);
if (PA_UNLIKELY(shift == 0)) {
shift = static_cast<size_t>(
__builtin_ctz((unsigned int)PageAllocationGranularity()));
page_characteristics.shift.store(shift, std::memory_order_relaxed);
}
return shift;
#else
return 12; // 4kB
#endif
@ -121,7 +154,7 @@ PageAllocationGranularity() {
// arm64 supports 4kb, 16kb, and 64kb page sizes. Retrieve from or
// initialize cache.
size_t size = page_characteristics.size.load(std::memory_order_relaxed);
if (PA_UNLIKELY(size == 0)) {
if (size == 0) [[unlikely]] {
size = static_cast<size_t>(getpagesize());
page_characteristics.size.store(size, std::memory_order_relaxed);
}

View file

@ -28,8 +28,6 @@
namespace partition_alloc::internal {
namespace {
zx::resource GetVmexResource() {
auto vmex_resource_client =
component::Connect<fuchsia_kernel::VmexResource>();
@ -94,8 +92,6 @@ zx_vm_option_t PageAccessibilityToZxVmOptions(
PA_NOTREACHED();
}
} // namespace
// zx_vmar_map() will fail if the VMO cannot be mapped at |vmar_offset|, i.e.
// |hint| is not advisory.
constexpr bool kHintIsAdvisory = false;
@ -210,6 +206,10 @@ void DiscardSystemPagesInternal(uint64_t address, size_t length) {
PA_ZX_CHECK(status == ZX_OK, status);
}
bool SealSystemPagesInternal(uint64_t address, size_t length) {
return false;
}
void DecommitSystemPagesInternal(
uint64_t address,
size_t length,

View file

@ -5,12 +5,41 @@
#include <sys/mman.h>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_base/notreached.h"
#if PA_BUILDFLAG(IS_APPLE)
#include "partition_alloc/partition_alloc_base/apple/foundation_util.h"
#if PA_BUILDFLAG(IS_IOS)
#include "partition_alloc/partition_alloc_base/ios/ios_util.h"
#elif PA_BUILDFLAG(IS_MAC)
#include "partition_alloc/partition_alloc_base/mac/mac_util.h"
#else
#error "Unknown platform"
#endif
#include <Availability.h>
#include <Security/Security.h>
#include <mach/mach.h>
#include "partition_alloc/partition_alloc_base/apple/scoped_cftyperef.h"
#endif
#if PA_BUILDFLAG(IS_MAC)
// SecTaskGetCodeSignStatus is marked as unavailable on macOS, although its
// available on iOS and other Apple operating systems. It is, in fact, present
// on the system since macOS 10.12.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wavailability"
uint32_t SecTaskGetCodeSignStatus(SecTaskRef task) API_AVAILABLE(macos(10.12));
#pragma clang diagnostic pop
#endif // PA_BUILDFLAG(IS_MAC)
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING) || \
(defined(__ARM_FEATURE_BTI_DEFAULT) && (__ARM_FEATURE_BTI_DEFAULT == 1) && \
!defined(__MUSL__))
__has_include(<sys/ifunc.h>))
struct __ifunc_arg_t;
#include "partition_alloc/aarch64_support.h"
@ -91,4 +120,88 @@ int GetAccessFlags(PageAccessibilityConfiguration accessibility)
}
#endif
#if defined(LINUX_NAME_REGION)
void NameRegion(void* start, size_t length, PageTag page_tag) {
// Important: All the names should be string literals. As per prctl.h in
// //third_party/android_toolchain/ndk the kernel keeps a pointer to the name
// instead of copying it.
//
// Having the name in .rodata ensures that the pointer remains valid as
// long as the mapping is alive.
const char* name = nullptr;
switch (page_tag) {
case PageTag::kSimulation:
name = "simulation";
break;
case PageTag::kBlinkGC:
name = "blink_gc";
break;
case PageTag::kPartitionAlloc:
name = "partition_alloc";
break;
case PageTag::kChromium:
name = "chromium";
break;
case PageTag::kV8:
name = "v8";
break;
default:
PA_NOTREACHED();
}
// No error checking on purpose, used for debugging only.
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, start, length, name);
}
#endif // defined(LINUX_NAME_REGION)
#if PA_BUILDFLAG(IS_MAC)
// Tests whether the version of macOS supports the MAP_JIT flag and if the
// current process is signed with the hardened runtime and the allow-jit
// entitlement, returning whether MAP_JIT should be used to allocate regions
// that will contain JIT-compiled executable code.
bool UseMapJit() {
// Until determining that the hardened runtime is enabled, early returns will
// return true, so that MAP_JIT will be used. This is important on arm64,
// which only allows pages to be simultaneously writable and executable when
// in a region allocated with MAP_JIT, regardless of code signing options. On
// arm64, an attempt to set a non-MAP_JIT page as simultaneously writable and
// executable fails with EPERM. Although this is not enforced on x86_64,
// MAP_JIT is harmless in that case.
base::apple::ScopedCFTypeRef<SecTaskRef> task(
SecTaskCreateFromSelf(kCFAllocatorDefault));
if (!task) {
return true;
}
uint32_t flags = SecTaskGetCodeSignStatus(task);
if (!(flags & kSecCodeSignatureRuntime)) {
// The hardened runtime is not enabled. Note that kSecCodeSignatureRuntime
// == CS_RUNTIME.
return true;
}
// The hardened runtime is enabled. From this point on, early returns must
// return false, indicating that MAP_JIT is not to be used. Its an error
// (EINVAL) to use MAP_JIT with the hardened runtime unless the JIT
// entitlement is specified.
base::apple::ScopedCFTypeRef<CFTypeRef> jit_entitlement(
SecTaskCopyValueForEntitlement(
task.get(), CFSTR("com.apple.security.cs.allow-jit"), nullptr));
if (!jit_entitlement) {
return false;
}
return base::apple::CFCast<CFBooleanRef>(jit_entitlement.get()) ==
kCFBooleanTrue;
}
#elif PA_BUILDFLAG(IS_IOS)
bool UseMapJit() {
return true;
}
#endif // PA_BUILDFLAG(IS_IOS)
} // namespace partition_alloc::internal

View file

@ -6,6 +6,7 @@
#define PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
#include <sys/mman.h>
#include <sys/syscall.h>
#include <algorithm>
#include <atomic>
@ -14,33 +15,18 @@
#include <cstring>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/oom.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/notreached.h"
#include "partition_alloc/partition_alloc_base/posix/eintr_wrapper.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/thread_isolation/thread_isolation.h"
#if PA_BUILDFLAG(IS_APPLE)
#include "partition_alloc/partition_alloc_base/apple/foundation_util.h"
#if PA_BUILDFLAG(IS_IOS)
#include "partition_alloc/partition_alloc_base/ios/ios_util.h"
#elif PA_BUILDFLAG(IS_MAC)
#include "partition_alloc/partition_alloc_base/mac/mac_util.h"
#else
#error "Unknown platform"
#endif
#include "partition_alloc/partition_alloc_base/apple/scoped_cftyperef.h"
#include <Availability.h>
#include <Security/Security.h>
#include <mach/mach.h>
#endif
#if PA_BUILDFLAG(IS_ANDROID) || PA_BUILDFLAG(IS_LINUX)
#include <sys/prctl.h>
#endif
#if PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
#include <sys/resource.h>
#endif
@ -49,114 +35,19 @@
#define MAP_ANONYMOUS MAP_ANON
#endif
#if PA_BUILDFLAG(IS_MAC)
// SecTaskGetCodeSignStatus is marked as unavailable on macOS, although its
// available on iOS and other Apple operating systems. It is, in fact, present
// on the system since macOS 10.12.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wavailability"
uint32_t SecTaskGetCodeSignStatus(SecTaskRef task) API_AVAILABLE(macos(10.12));
#pragma clang diagnostic pop
#endif // PA_BUILDFLAG(IS_MAC)
namespace partition_alloc::internal {
namespace {
#if defined(LINUX_NAME_REGION)
void NameRegion(void* start, size_t length, PageTag page_tag) {
// Important: All the names should be string literals. As per prctl.h in
// //third_party/android_toolchain/ndk the kernel keeps a pointer to the name
// instead of copying it.
//
// Having the name in .rodata ensures that the pointer remains valid as
// long as the mapping is alive.
const char* name = nullptr;
switch (page_tag) {
case PageTag::kSimulation:
name = "simulation";
break;
case PageTag::kBlinkGC:
name = "blink_gc";
break;
case PageTag::kPartitionAlloc:
name = "partition_alloc";
break;
case PageTag::kChromium:
name = "chromium";
break;
case PageTag::kV8:
name = "v8";
break;
default:
PA_NOTREACHED();
break;
}
// No error checking on purpose, testing only.
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, start, length, name);
}
void NameRegion(void* start, size_t length, PageTag page_tag);
#endif // defined(LINUX_NAME_REGION)
#if PA_BUILDFLAG(IS_MAC)
#if PA_BUILDFLAG(IS_APPLE)
// Tests whether the version of macOS supports the MAP_JIT flag and if the
// current process is signed with the hardened runtime and the allow-jit
// entitlement, returning whether MAP_JIT should be used to allocate regions
// that will contain JIT-compiled executable code.
bool UseMapJit() {
// Until determining that the hardened runtime is enabled, early returns will
// return true, so that MAP_JIT will be used. This is important on arm64,
// which only allows pages to be simultaneously writable and executable when
// in a region allocated with MAP_JIT, regardless of code signing options. On
// arm64, an attempt to set a non-MAP_JIT page as simultaneously writable and
// executable fails with EPERM. Although this is not enforced on x86_64,
// MAP_JIT is harmless in that case.
base::apple::ScopedCFTypeRef<SecTaskRef> task(
SecTaskCreateFromSelf(kCFAllocatorDefault));
if (!task) {
return true;
}
uint32_t flags = SecTaskGetCodeSignStatus(task);
if (!(flags & kSecCodeSignatureRuntime)) {
// The hardened runtime is not enabled. Note that kSecCodeSignatureRuntime
// == CS_RUNTIME.
return true;
}
// The hardened runtime is enabled. From this point on, early returns must
// return false, indicating that MAP_JIT is not to be used. Its an error
// (EINVAL) to use MAP_JIT with the hardened runtime unless the JIT
// entitlement is specified.
base::apple::ScopedCFTypeRef<CFTypeRef> jit_entitlement(
SecTaskCopyValueForEntitlement(
task.get(), CFSTR("com.apple.security.cs.allow-jit"), nullptr));
if (!jit_entitlement) {
return false;
}
return base::apple::CFCast<CFBooleanRef>(jit_entitlement.get()) ==
kCFBooleanTrue;
}
#elif PA_BUILDFLAG(IS_IOS)
bool UseMapJit() {
// Always enable MAP_JIT in simulator as it is supported unconditionally.
#if TARGET_IPHONE_SIMULATOR
return true;
#else
// TODO(crbug.com/40255826): Fill this out when the API it is
// available.
return false;
#endif // TARGET_IPHONE_SIMULATOR
}
bool UseMapJit();
#endif // PA_BUILDFLAG(IS_IOS)
} // namespace
// |mmap| uses a nearby address if the hint address is blocked.
constexpr bool kHintIsAdvisory = true;
@ -192,6 +83,13 @@ uintptr_t SystemAllocPagesInternal(uintptr_t hint,
PageAccessibilityConfiguration::kInaccessibleWillJitLater &&
kUseMapJit) {
map_flags |= MAP_JIT;
// iOS devices do not support toggling the page permissions after a MAP_JIT
// call, they must be set initially. iOS has per-thread W^X state that
// takes precedence over the mapping's permissions for MAP_JIT regions.
// See https://developer.apple.com/forums/thread/672804
#if PA_BUILDFLAG(IS_IOS)
access_flag = PROT_READ | PROT_WRITE | PROT_EXEC;
#endif
}
#endif
@ -299,14 +197,14 @@ void DecommitSystemPagesInternal(
bool change_permissions =
accessibility_disposition == PageAccessibilityDisposition::kRequireUpdate;
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// This is not guaranteed, show that we're serious.
//
// More specifically, several callers have had issues with assuming that
// memory is zeroed, this would hopefully make these bugs more visible. We
// don't memset() everything, because ranges can be very large, and doing it
// over the entire range could make Chrome unusable with
// PA_BUILDFLAG(PA_DCHECK_IS_ON).
// PA_BUILDFLAG(DCHECKS_ARE_ON).
//
// Only do it when we are about to change the permissions, since we don't know
// the previous permissions, and cannot restore them.
@ -432,6 +330,17 @@ void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
#endif // PA_BUILDFLAG(IS_APPLE)
}
bool SealSystemPagesInternal(uintptr_t address, size_t length) {
// TODO(sroettger): we either need to ensure that __NR_mseal is defined in the
// headers used by builders or define it ourselves.
#if PA_BUILDFLAG(IS_LINUX) && defined(__NR_mseal)
long ret = syscall(__NR_mseal, address, length, 0);
return ret == 0;
#else
return false;
#endif
}
} // namespace partition_alloc::internal
#endif // PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNALS_POSIX_H_

View file

@ -7,11 +7,11 @@
#include <cstdint>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/oom.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/page_allocator_internal.h"
#include "partition_alloc/partition_alloc_base/notreached.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
namespace partition_alloc::internal {
@ -238,6 +238,10 @@ void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
}
}
bool SealSystemPagesInternal(uintptr_t address, size_t length) {
return false;
}
} // namespace partition_alloc::internal
#endif // PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNALS_WIN_H_

View file

@ -5,19 +5,21 @@
#include "partition_alloc/partition_address_space.h"
#include <array>
#include <bit>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <ostream>
#include <string>
#include "partition_alloc/address_pool_manager.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/compressed_pointer.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/debug/alias.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_base/files/platform_file.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h"
@ -80,7 +82,7 @@ PA_NOINLINE void HandlePoolAllocFailure() {
} // namespace
PartitionAddressSpace::PoolSetup PartitionAddressSpace::setup_;
PA_CONSTINIT PartitionAddressSpace::PoolSetup PartitionAddressSpace::setup_;
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
std::ptrdiff_t PartitionAddressSpace::regular_pool_shadow_offset_ = 0;
@ -88,9 +90,12 @@ std::ptrdiff_t PartitionAddressSpace::brp_pool_shadow_offset_ = 0;
std::ptrdiff_t PartitionAddressSpace::configurable_pool_shadow_offset_ = 0;
// File descriptors for shared mappings.
int PartitionAddressSpace::regular_pool_fd_ = -1;
int PartitionAddressSpace::brp_pool_fd_ = -1;
int PartitionAddressSpace::configurable_pool_fd_ = -1;
base::PlatformFile PartitionAddressSpace::regular_pool_fd_ =
base::kInvalidPlatformFile;
base::PlatformFile PartitionAddressSpace::brp_pool_fd_ =
base::kInvalidPlatformFile;
base::PlatformFile PartitionAddressSpace::configurable_pool_fd_ =
base::kInvalidPlatformFile;
uintptr_t PartitionAddressSpace::pool_shadow_address_ =
PartitionAddressSpace::kUninitializedPoolBaseAddress;
@ -101,8 +106,7 @@ uintptr_t PartitionAddressSpace::pool_shadow_address_ =
#error Dynamic pool size is only supported on iOS.
#endif
namespace {
bool IsIOSTestProcess() {
bool PartitionAddressSpace::IsIOSTestProcess() {
// On iOS, only applications with the extended virtual addressing entitlement
// can use a large address space. Since Earl Grey test runner apps cannot get
// entitlements, they must use a much smaller pool size. Similarly,
@ -133,28 +137,15 @@ bool IsIOSTestProcess() {
return has_suffix("Runner") || has_suffix("ios_web_view_inttests");
}
} // namespace
PA_ALWAYS_INLINE size_t PartitionAddressSpace::RegularPoolSize() {
return IsIOSTestProcess() ? kRegularPoolSizeForIOSTestProcess
: kRegularPoolSize;
}
PA_ALWAYS_INLINE size_t PartitionAddressSpace::BRPPoolSize() {
return IsIOSTestProcess() ? kBRPPoolSizeForIOSTestProcess : kBRPPoolSize;
}
#endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
size_t PartitionAddressSpace::RegularPoolShadowSize() {
return (RegularPoolSize() >> kSuperPageShift) << SystemPageShift();
}
size_t PartitionAddressSpace::BRPPoolShadowSize() {
return (BRPPoolSize() >> kSuperPageShift) << SystemPageShift();
size_t PartitionAddressSpace::CorePoolShadowSize() {
return CorePoolSize();
}
size_t PartitionAddressSpace::ConfigurablePoolShadowSize() {
return (kConfigurablePoolMaxSize >> kSuperPageShift) << SystemPageShift();
return kConfigurablePoolMaxSize;
}
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
@ -163,115 +154,72 @@ void PartitionAddressSpace::Init() {
return;
}
const size_t regular_pool_size = RegularPoolSize();
const size_t brp_pool_size = BRPPoolSize();
const size_t core_pool_size = CorePoolSize();
#if PA_BUILDFLAG(GLUE_CORE_POOLS)
// Gluing core pools (regular & BRP) makes sense only when both pools are of
// the same size. This the only way we can check belonging to either of the
// two with a single bitmask operation.
PA_CHECK(regular_pool_size == brp_pool_size);
// TODO(crbug.com/40238514): Support PA_ENABLE_SHADOW_METADATA.
int pools_fd = -1;
size_t glued_pool_sizes = regular_pool_size * 2;
size_t glued_pool_sizes = core_pool_size * 2;
// Note, BRP pool requires to be preceded by a "forbidden zone", which is
// conveniently taken care of by the last guard page of the regular pool.
setup_.regular_pool_base_address_ =
AllocPages(glued_pool_sizes, glued_pool_sizes,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc, pools_fd);
PageTag::kPartitionAlloc);
#if PA_BUILDFLAG(IS_ANDROID)
// On Android, Adreno-GSL library fails to mmap if we snatch address
// 0x400000000. Find a different address instead.
if (setup_.regular_pool_base_address_ == 0x400000000) {
uintptr_t new_base_address =
AllocPages(glued_pool_sizes, glued_pool_sizes,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc);
FreePages(setup_.regular_pool_base_address_, glued_pool_sizes);
setup_.regular_pool_base_address_ = new_base_address;
}
#endif // PA_BUILDFLAG(IS_ANDROID)
if (!setup_.regular_pool_base_address_) {
HandlePoolAllocFailure();
}
setup_.brp_pool_base_address_ =
setup_.regular_pool_base_address_ + regular_pool_size;
#else // PA_BUILDFLAG(GLUE_CORE_POOLS)
setup_.regular_pool_base_address_ =
AllocPages(regular_pool_size, regular_pool_size,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc);
if (!setup_.regular_pool_base_address_) {
HandlePoolAllocFailure();
}
// Reserve an extra allocation granularity unit before the BRP pool, but keep
// the pool aligned at BRPPoolSize(). A pointer immediately past an allocation
// is a valid pointer, and having a "forbidden zone" before the BRP pool
// prevents such a pointer from "sneaking into" the pool.
const size_t kForbiddenZoneSize = PageAllocationGranularity();
uintptr_t base_address = AllocPagesWithAlignOffset(
0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
brp_pool_size - kForbiddenZoneSize,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc, -1);
if (!base_address) {
HandlePoolAllocFailure();
}
setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize;
#endif // PA_BUILDFLAG(GLUE_CORE_POOLS)
setup_.regular_pool_base_address_ + core_pool_size;
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1);
setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1);
#if PA_BUILDFLAG(GLUE_CORE_POOLS)
// When PA_GLUE_CORE_POOLS is on, the BRP pool is placed at the end of the
// regular pool, effectively forming one virtual pool of a twice bigger
// size. Adjust the mask appropriately.
setup_.core_pools_base_mask_ = setup_.regular_pool_base_mask_ << 1;
PA_DCHECK(setup_.core_pools_base_mask_ == (setup_.brp_pool_base_mask_ << 1));
#endif
setup_.core_pool_base_mask_ = ~(core_pool_size - 1);
// The BRP pool is placed at the end of the regular pool, effectively forming
// one virtual pool of a twice bigger size. Adjust the mask appropriately.
setup_.glued_pools_base_mask_ = setup_.core_pool_base_mask_ << 1;
#endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
AddressPoolManager::GetInstance().Add(
kRegularPoolHandle, setup_.regular_pool_base_address_, regular_pool_size);
kRegularPoolHandle, setup_.regular_pool_base_address_, core_pool_size);
AddressPoolManager::GetInstance().Add(
kBRPPoolHandle, setup_.brp_pool_base_address_, brp_pool_size);
kBRPPoolHandle, setup_.brp_pool_base_address_, core_pool_size);
// Sanity check pool alignment.
PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1)));
PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1)));
#if PA_BUILDFLAG(GLUE_CORE_POOLS)
PA_DCHECK(!(setup_.regular_pool_base_address_ & (core_pool_size - 1)));
PA_DCHECK(!(setup_.brp_pool_base_address_ & (core_pool_size - 1)));
PA_DCHECK(!(setup_.regular_pool_base_address_ & (glued_pool_sizes - 1)));
#endif
// Sanity check pool belonging.
PA_DCHECK(!IsInRegularPool(setup_.regular_pool_base_address_ - 1));
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_));
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_ +
regular_pool_size - 1));
PA_DCHECK(
!IsInRegularPool(setup_.regular_pool_base_address_ + regular_pool_size));
IsInRegularPool(setup_.regular_pool_base_address_ + core_pool_size - 1));
PA_DCHECK(
!IsInRegularPool(setup_.regular_pool_base_address_ + core_pool_size));
PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ - 1));
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_));
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size - 1));
PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size));
#if PA_BUILDFLAG(GLUE_CORE_POOLS)
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_ + core_pool_size - 1));
PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ + core_pool_size));
PA_DCHECK(!IsInCorePools(setup_.regular_pool_base_address_ - 1));
PA_DCHECK(IsInCorePools(setup_.regular_pool_base_address_));
PA_DCHECK(
IsInCorePools(setup_.regular_pool_base_address_ + regular_pool_size - 1));
PA_DCHECK(
IsInCorePools(setup_.regular_pool_base_address_ + regular_pool_size));
IsInCorePools(setup_.regular_pool_base_address_ + core_pool_size - 1));
PA_DCHECK(IsInCorePools(setup_.regular_pool_base_address_ + core_pool_size));
PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ - 1));
PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_));
PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size - 1));
PA_DCHECK(!IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size));
#endif // PA_BUILDFLAG(GLUE_CORE_POOLS)
#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
// Reserve memory for PCScan quarantine card table.
uintptr_t requested_address = setup_.regular_pool_base_address_;
uintptr_t actual_address = AddressPoolManager::GetInstance().Reserve(
kRegularPoolHandle, requested_address, kSuperPageSize);
PA_CHECK(requested_address == actual_address)
<< "QuarantineCardTable is required to be allocated at the beginning of "
"the regular pool";
#endif // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ + core_pool_size - 1));
PA_DCHECK(!IsInCorePools(setup_.brp_pool_base_address_ + core_pool_size));
#if PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)
CompressedPointerBaseGlobal::SetBase(setup_.regular_pool_base_address_);
@ -295,7 +243,7 @@ void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
PA_CHECK(pool_base);
PA_CHECK(size <= kConfigurablePoolMaxSize);
PA_CHECK(size >= kConfigurablePoolMinSize);
PA_CHECK(std::has_single_bit(size));
PA_CHECK(base::bits::HasSingleBit(size));
PA_CHECK(pool_base % size == 0);
setup_.configurable_pool_base_address_ = pool_base;
@ -353,18 +301,9 @@ void PartitionAddressSpace::UninitForTesting() {
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
UninitThreadIsolatedPoolForTesting(); // IN-TEST
#endif
#if PA_BUILDFLAG(GLUE_CORE_POOLS)
// The core pools (regular & BRP) were allocated using a single allocation of
// double size.
FreePages(setup_.regular_pool_base_address_, 2 * RegularPoolSize());
#else // PA_BUILDFLAG(GLUE_CORE_POOLS)
FreePages(setup_.regular_pool_base_address_, RegularPoolSize());
// For BRP pool, the allocation region includes a "forbidden zone" before the
// pool.
const size_t kForbiddenZoneSize = PageAllocationGranularity();
FreePages(setup_.brp_pool_base_address_ - kForbiddenZoneSize,
BRPPoolSize() + kForbiddenZoneSize);
#endif // PA_BUILDFLAG(GLUE_CORE_POOLS)
FreePages(setup_.regular_pool_base_address_, 2 * CorePoolSize());
// Do not free pages for the configurable pool, because its memory is owned
// by someone else, but deinitialize it nonetheless.
setup_.regular_pool_base_address_ = kUninitializedPoolBaseAddress;
@ -401,7 +340,7 @@ void PartitionAddressSpace::UninitConfigurablePoolForTesting() {
void PartitionAddressSpace::UninitThreadIsolatedPoolForTesting() {
if (IsThreadIsolatedPoolInitialized()) {
UnprotectThreadIsolatedGlobals();
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
ThreadIsolationSettings::settings.enabled = false;
#endif
@ -418,9 +357,10 @@ void PartitionAddressSpace::UninitThreadIsolatedPoolForTesting() {
namespace {
int CreateAnonymousFileForMapping([[maybe_unused]] const char* name,
[[maybe_unused]] size_t size) {
int fd = -1;
base::PlatformFile CreateAnonymousFileForMapping(
[[maybe_unused]] const char* name,
[[maybe_unused]] size_t size) {
base::PlatformFile fd = base::kInvalidPlatformFile;
#if PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
// TODO(crbug.com/40238514): if memfd_secret() is available, try
// memfd_secret() first.
@ -441,7 +381,7 @@ void PartitionAddressSpace::InitShadowMetadata(PoolHandleMask mask) {
// Reserve 1 address space for all pools.
const size_t shadow_pool_size =
std::max(ConfigurablePoolShadowSize(),
std::max(RegularPoolShadowSize(), BRPPoolShadowSize()));
std::max(CorePoolShadowSize(), CorePoolShadowSize()));
// Reserve virtual address space for the shadow pool.
uintptr_t pool_shadow_address =
@ -458,7 +398,7 @@ void PartitionAddressSpace::InitShadowMetadata(PoolHandleMask mask) {
// Set up a memory file for the given pool, and init |offset|.
if (ContainsFlags(mask, PoolHandleMask::kConfigurable)) {
if (configurable_pool_fd_ == -1) {
if (configurable_pool_fd_ == base::kInvalidPlatformFile) {
PA_DCHECK(pool_shadow_address_);
PA_DCHECK(configurable_pool_shadow_offset_ == 0);
configurable_pool_fd_ = CreateAnonymousFileForMapping(
@ -469,22 +409,22 @@ void PartitionAddressSpace::InitShadowMetadata(PoolHandleMask mask) {
}
}
if (ContainsFlags(mask, PoolHandleMask::kBRP)) {
if (brp_pool_fd_ == -1) {
if (brp_pool_fd_ == base::kInvalidPlatformFile) {
PA_DCHECK(pool_shadow_address_);
PA_DCHECK(brp_pool_shadow_offset_ == 0);
brp_pool_fd_ =
CreateAnonymousFileForMapping("brp_pool_shadow", BRPPoolShadowSize());
brp_pool_fd_ = CreateAnonymousFileForMapping("brp_pool_shadow",
CorePoolShadowSize());
brp_pool_shadow_offset_ =
pool_shadow_address_ - BRPPoolBase() +
SystemPageSize() * kSystemPageOffsetOfBRPPoolShadow;
}
}
if (ContainsFlags(mask, PoolHandleMask::kRegular)) {
if (regular_pool_fd_ == -1) {
if (regular_pool_fd_ == base::kInvalidPlatformFile) {
PA_DCHECK(pool_shadow_address_);
PA_DCHECK(regular_pool_shadow_offset_ == 0);
regular_pool_fd_ = CreateAnonymousFileForMapping("regular_pool_shadow",
RegularPoolShadowSize());
CorePoolShadowSize());
regular_pool_shadow_offset_ =
pool_shadow_address_ - RegularPoolBase() +
SystemPageSize() * kSystemPageOffsetOfRegularPoolShadow;
@ -499,7 +439,7 @@ void PartitionAddressSpace::MapMetadata(uintptr_t super_page,
PA_DCHECK(pool_shadow_address_);
PA_DCHECK(0u == (super_page & kSuperPageOffsetMask));
std::ptrdiff_t offset;
int pool_fd = -1;
base::PlatformFile pool_fd = base::kInvalidPlatformFile;
uintptr_t base_address;
if (IsInRegularPool(super_page)) {
@ -530,7 +470,7 @@ void PartitionAddressSpace::MapMetadata(uintptr_t super_page,
PA_CHECK(ptr != MAP_FAILED);
PA_CHECK(ptr == reinterpret_cast<void*>(writable_metadata));
if (PA_UNLIKELY(copy_metadata)) {
if (copy_metadata) [[unlikely]] {
// Copy the metadata from the private and copy-on-write page to
// the shared page. (=update the memory file)
memcpy(reinterpret_cast<void*>(writable_metadata),
@ -558,13 +498,13 @@ void PartitionAddressSpace::UnmapShadowMetadata(uintptr_t super_page,
switch (pool) {
case kRegularPoolHandle:
PA_DCHECK(RegularPoolBase() <= super_page);
PA_DCHECK((super_page - RegularPoolBase()) < RegularPoolSize());
PA_DCHECK((super_page - RegularPoolBase()) < CorePoolSize());
PA_DCHECK(IsShadowMetadataEnabled(kRegularPoolHandle));
offset = regular_pool_shadow_offset_;
break;
case kBRPPoolHandle:
PA_DCHECK(BRPPoolBase() <= super_page);
PA_DCHECK((super_page - BRPPoolBase()) < BRPPoolSize());
PA_DCHECK((super_page - BRPPoolBase()) < CorePoolSize());
PA_DCHECK(IsShadowMetadataEnabled(kBRPPoolHandle));
offset = brp_pool_shadow_offset_;
break;

View file

@ -5,17 +5,18 @@
#ifndef PARTITION_ALLOC_PARTITION_ADDRESS_SPACE_H_
#define PARTITION_ALLOC_PARTITION_ADDRESS_SPACE_H_
#include <bit>
#include <cstddef>
#include <utility>
#include "partition_alloc/address_pool_manager_types.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/files/platform_file.h"
#include "partition_alloc/partition_alloc_base/notreached.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h"
@ -46,18 +47,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
};
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
PA_ALWAYS_INLINE static uintptr_t BRPPoolBaseMask() {
return setup_.brp_pool_base_mask_;
}
PA_ALWAYS_INLINE static uintptr_t RegularPoolBaseMask() {
return setup_.regular_pool_base_mask_;
PA_ALWAYS_INLINE static uintptr_t CorePoolBaseMask() {
return setup_.core_pool_base_mask_;
}
#else
PA_ALWAYS_INLINE static constexpr uintptr_t BRPPoolBaseMask() {
return kBRPPoolBaseMask;
}
PA_ALWAYS_INLINE static constexpr uintptr_t RegularPoolBaseMask() {
return kRegularPoolBaseMask;
PA_ALWAYS_INLINE static constexpr uintptr_t CorePoolBaseMask() {
return kCorePoolBaseMask;
}
#endif
@ -73,13 +68,13 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
if (IsInBRPPool(address)) {
pool = kBRPPoolHandle;
base = setup_.brp_pool_base_address_;
base_mask = BRPPoolBaseMask();
base_mask = CorePoolBaseMask();
} else
#endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (IsInRegularPool(address)) {
pool = kRegularPoolHandle;
base = setup_.regular_pool_base_address_;
base_mask = RegularPoolBaseMask();
base_mask = CorePoolBaseMask();
} else if (IsInConfigurablePool(address)) {
PA_DCHECK(IsConfigurablePoolInitialized());
pool = kConfigurablePoolHandle;
@ -150,9 +145,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
// Returns false for nullptr.
PA_ALWAYS_INLINE static bool IsInRegularPool(uintptr_t address) {
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_;
const uintptr_t regular_pool_base_mask = setup_.core_pool_base_mask_;
#else
constexpr uintptr_t regular_pool_base_mask = kRegularPoolBaseMask;
constexpr uintptr_t regular_pool_base_mask = kCorePoolBaseMask;
#endif
return (address & regular_pool_base_mask) ==
setup_.regular_pool_base_address_;
@ -165,34 +160,29 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
// Returns false for nullptr.
PA_ALWAYS_INLINE static bool IsInBRPPool(uintptr_t address) {
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_;
const uintptr_t brp_pool_base_mask = setup_.core_pool_base_mask_;
#else
constexpr uintptr_t brp_pool_base_mask = kBRPPoolBaseMask;
constexpr uintptr_t brp_pool_base_mask = kCorePoolBaseMask;
#endif
return (address & brp_pool_base_mask) == setup_.brp_pool_base_address_;
}
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
PA_ALWAYS_INLINE static uintptr_t BRPPoolBase() {
#if PA_BUILDFLAG(GLUE_CORE_POOLS)
return RegularPoolBase() + RegularPoolSize();
#else
return setup_.brp_pool_base_address_;
#endif // PA_BUILDFLAG(GLUE_CORE_POOLS)
return RegularPoolBase() + CorePoolSize();
}
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
#if PA_BUILDFLAG(GLUE_CORE_POOLS)
// Checks whether the address belongs to either regular or BRP pool.
// Returns false for nullptr.
PA_ALWAYS_INLINE static bool IsInCorePools(uintptr_t address) {
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t core_pools_base_mask = setup_.core_pools_base_mask_;
const uintptr_t core_pools_base_mask = setup_.glued_pools_base_mask_;
#else
// When PA_GLUE_CORE_POOLS is on, the BRP pool is placed at the end of the
// regular pool, effectively forming one virtual pool of a twice bigger
// size. Adjust the mask appropriately.
constexpr uintptr_t core_pools_base_mask = kRegularPoolBaseMask << 1;
// The BRP pool is placed at the end of the regular pool, effectively
// forming one virtual pool of a twice bigger size. Adjust the mask
// appropriately.
constexpr uintptr_t core_pools_base_mask = kCorePoolBaseMask << 1;
#endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
bool ret =
(address & core_pools_base_mask) == setup_.regular_pool_base_address_;
@ -200,15 +190,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
return ret;
}
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
PA_ALWAYS_INLINE static size_t CorePoolsSize() {
return RegularPoolSize() * 2;
}
PA_ALWAYS_INLINE static size_t CorePoolsSize() { return CorePoolSize() * 2; }
#else
PA_ALWAYS_INLINE static constexpr size_t CorePoolsSize() {
return RegularPoolSize() * 2;
return CorePoolSize() * 2;
}
#endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
#endif // PA_BUILDFLAG(GLUE_CORE_POOLS)
PA_ALWAYS_INLINE static uintptr_t OffsetInBRPPool(uintptr_t address) {
PA_DCHECK(IsInBRPPool(address));
@ -235,15 +222,15 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
PA_ALWAYS_INLINE static bool IsShadowMetadataEnabledOnRegularPool() {
return regular_pool_fd_ != -1;
return regular_pool_fd_ != base::kInvalidPlatformFile;
}
PA_ALWAYS_INLINE static bool IsShadowMetadataEnabledOnBRPPool() {
return brp_pool_fd_ != -1;
return brp_pool_fd_ != base::kInvalidPlatformFile;
}
PA_ALWAYS_INLINE static bool IsShadowMetadataEnabledOnConfigurablePool() {
return configurable_pool_fd_ != -1;
return configurable_pool_fd_ != base::kInvalidPlatformFile;
}
PA_ALWAYS_INLINE static bool IsShadowMetadataEnabled(pool_handle pool) {
@ -290,8 +277,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static constexpr size_t kSystemPageOffsetOfBRPPoolShadow = 2u;
static constexpr size_t kSystemPageOffsetOfConfigurablePoolShadow = 4u;
static size_t RegularPoolShadowSize();
static size_t BRPPoolShadowSize();
static size_t CorePoolShadowSize();
static size_t ConfigurablePoolShadowSize();
PA_ALWAYS_INLINE static std::ptrdiff_t RegularPoolShadowOffset() {
@ -315,20 +301,22 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
"because the test process cannot use an extended virtual address space. "
"Temporarily disable ShadowMetadata feature on iOS");
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// Check whether the given |ptr| points to an address inside the address space
// reserved for the regular and brp shadow. However the result |true| doesn't
// mean the given |ptr| is valid. Because we don't use the entire address
// space for the shadow. We only use 2 SystemPageSize() / kSuperPageSize(%)
// of the space. See PoolShadowOffset().
// of the space.
//
// TODO(crbug.com/40238514) This is an unused function. Start using it in
// tests and/or in production code.
PA_ALWAYS_INLINE static bool IsInPoolShadow(const void* ptr) {
uintptr_t ptr_as_uintptr = reinterpret_cast<uintptr_t>(ptr);
return (pool_shadow_address_ <= ptr_as_uintptr &&
(ptr_as_uintptr < pool_shadow_address_ + RegularPoolSize() ||
ptr_as_uintptr < pool_shadow_address_ + BRPPoolSize() ||
(ptr_as_uintptr < pool_shadow_address_ + CorePoolSize() ||
ptr_as_uintptr < pool_shadow_address_ + kConfigurablePoolMaxSize));
}
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
static void InitShadowMetadata(PoolHandleMask pool);
static void MapMetadata(uintptr_t super_page, bool copy_metadata);
@ -343,15 +331,15 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
private:
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
PA_ALWAYS_INLINE static size_t RegularPoolSize();
PA_ALWAYS_INLINE static size_t BRPPoolSize();
static bool IsIOSTestProcess();
PA_ALWAYS_INLINE static size_t CorePoolSize() {
return IsIOSTestProcess() ? kCorePoolSizeForIOSTestProcess : kCorePoolSize;
}
#else
// The pool sizes should be as large as maximum whenever possible.
PA_ALWAYS_INLINE static constexpr size_t RegularPoolSize() {
return kRegularPoolSize;
}
PA_ALWAYS_INLINE static constexpr size_t BRPPoolSize() {
return kBRPPoolSize;
PA_ALWAYS_INLINE static constexpr size_t CorePoolSize() {
return kCorePoolSize;
}
#endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
@ -380,19 +368,17 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
// certain PA allocations must be located inside a given virtual address
// region. One use case for this Pool is V8 Sandbox, which requires that
// ArrayBuffers be located inside of it.
static constexpr size_t kRegularPoolSize = kPoolMaxSize;
static constexpr size_t kBRPPoolSize = kPoolMaxSize;
static_assert(std::has_single_bit(kRegularPoolSize));
static_assert(std::has_single_bit(kBRPPoolSize));
static constexpr size_t kCorePoolSize = kPoolMaxSize;
static_assert(base::bits::HasSingleBit(kCorePoolSize));
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
static constexpr size_t kThreadIsolatedPoolSize = kGiB / 4;
static_assert(std::has_single_bit(kThreadIsolatedPoolSize));
static_assert(base::bits::HasSingleBit(kThreadIsolatedPoolSize));
#endif
static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB;
static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize);
static_assert(std::has_single_bit(kConfigurablePoolMaxSize));
static_assert(std::has_single_bit(kConfigurablePoolMinSize));
static_assert(base::bits::HasSingleBit(kConfigurablePoolMaxSize));
static_assert(base::bits::HasSingleBit(kConfigurablePoolMinSize));
#if PA_BUILDFLAG(IS_IOS)
@ -403,22 +389,16 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
// We can't afford pool sizes as large as kPoolMaxSize in iOS EarlGrey tests,
// since the test process cannot use an extended virtual address space (see
// crbug.com/1250788).
static constexpr size_t kRegularPoolSizeForIOSTestProcess = kGiB / 4;
static constexpr size_t kBRPPoolSizeForIOSTestProcess = kGiB / 4;
static_assert(kRegularPoolSizeForIOSTestProcess < kRegularPoolSize);
static_assert(kBRPPoolSizeForIOSTestProcess < kBRPPoolSize);
static_assert(std::has_single_bit(kRegularPoolSizeForIOSTestProcess));
static_assert(std::has_single_bit(kBRPPoolSizeForIOSTestProcess));
static constexpr size_t kCorePoolSizeForIOSTestProcess = kGiB / 4;
static_assert(kCorePoolSizeForIOSTestProcess < kCorePoolSize);
static_assert(base::bits::HasSingleBit(kCorePoolSizeForIOSTestProcess));
#endif // PA_BUILDFLAG(IOS_IOS)
#if !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
// Masks used to easy determine belonging to a pool.
static constexpr uintptr_t kRegularPoolOffsetMask =
static_cast<uintptr_t>(kRegularPoolSize) - 1;
static constexpr uintptr_t kRegularPoolBaseMask = ~kRegularPoolOffsetMask;
static constexpr uintptr_t kBRPPoolOffsetMask =
static_cast<uintptr_t>(kBRPPoolSize) - 1;
static constexpr uintptr_t kBRPPoolBaseMask = ~kBRPPoolOffsetMask;
static constexpr uintptr_t kCorePoolOffsetMask =
static_cast<uintptr_t>(kCorePoolSize) - 1;
static constexpr uintptr_t kCorePoolBaseMask = ~kCorePoolOffsetMask;
#endif // !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
@ -448,11 +428,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
kUninitializedPoolBaseAddress;
#endif
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
uintptr_t regular_pool_base_mask_ = 0;
uintptr_t brp_pool_base_mask_ = 0;
#if PA_BUILDFLAG(GLUE_CORE_POOLS)
uintptr_t core_pools_base_mask_ = 0;
#endif
uintptr_t core_pool_base_mask_ = 0;
uintptr_t glued_pools_base_mask_ = 0;
#endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
uintptr_t configurable_pool_base_mask_ = 0;
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
@ -472,16 +449,15 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
// These are write-once fields, frequently accessed thereafter. Make sure they
// don't share a cacheline with other, potentially writeable data, through
// alignment and padding.
static PoolSetup setup_ PA_CONSTINIT;
PA_CONSTINIT static PoolSetup setup_;
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
static std::ptrdiff_t regular_pool_shadow_offset_;
static std::ptrdiff_t brp_pool_shadow_offset_;
static std::ptrdiff_t configurable_pool_shadow_offset_;
// TODO(crbug.com/40238514): Use platform file handles instead of |int|.
static int regular_pool_fd_;
static int brp_pool_fd_;
static int configurable_pool_fd_;
static base::PlatformFile regular_pool_fd_;
static base::PlatformFile brp_pool_fd_;
static base::PlatformFile configurable_pool_fd_;
static uintptr_t pool_shadow_address_;
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
@ -513,10 +489,8 @@ PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
#if !PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PA_DCHECK(!internal::PartitionAddressSpace::IsInBRPPool(address));
#endif
return internal::PartitionAddressSpace::IsInRegularPool(address)
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|| internal::PartitionAddressSpace::IsInBRPPool(address)
#endif
return internal::PartitionAddressSpace::IsInCorePools(address)
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
|| internal::PartitionAddressSpace::IsInThreadIsolatedPool(address)
#endif
@ -533,13 +507,11 @@ PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
return internal::PartitionAddressSpace::IsInBRPPool(address);
}
#if PA_BUILDFLAG(GLUE_CORE_POOLS)
// Checks whether the address belongs to either regular or BRP pool.
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocCorePools(uintptr_t address) {
return internal::PartitionAddressSpace::IsInCorePools(address);
}
#endif // PA_BUILDFLAG(GLUE_CORE_POOLS)
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(

View file

@ -9,9 +9,9 @@
#include <cstring>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/in_slot_metadata.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/random.h"
#include "partition_alloc/tagging.h"
@ -57,7 +57,7 @@ PA_ALWAYS_INLINE void SecureMemset(void* ptr, uint8_t value, size_t size) {
#pragma optimize("", on)
#endif
#if PA_BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
#if PA_BUILDFLAG(EXPENSIVE_DCHECKS_ARE_ON)
// Used to memset() memory for debugging purposes only.
PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
// Only set the first 512kiB of the allocation. This is enough to detect uses
@ -65,21 +65,21 @@ PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
// faster. Note that for direct-mapped allocations, memory is decomitted at
// free() time, so freed memory usage cannot happen.
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION) && PA_BUILDFLAG(ENABLE_PKEYS)
LiftThreadIsolationScope lift_thread_isolation_restrictions;
#endif
size_t size_to_memset = std::min(size, size_t{1} << 19);
memset(ptr, value, size_to_memset);
}
#endif // PA_BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
#endif // PA_BUILDFLAG(EXPENSIVE_DCHECKS_ARE_ON)
// Returns true if we've hit the end of a random-length period. We don't want to
// invoke `RandomValue` too often, because we call this function in a hot spot
// (`Free`), and `RandomValue` incurs the cost of atomics.
#if !PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if !PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_ALWAYS_INLINE bool RandomPeriod() {
static thread_local uint8_t counter = 0;
if (PA_UNLIKELY(counter == 0)) {
if (counter == 0) [[unlikely]] {
// It's OK to truncate this value.
counter = static_cast<uint8_t>(RandomValue());
}
@ -87,7 +87,7 @@ PA_ALWAYS_INLINE bool RandomPeriod() {
counter--;
return counter == 0;
}
#endif // !PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // !PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_ALWAYS_INLINE uintptr_t ObjectInnerPtr2Addr(const void* ptr) {
return UntagPtr(ptr);

View file

@ -9,10 +9,9 @@
#include <memory>
#include "partition_alloc/address_pool_manager.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/memory_reclaimer.h"
#include "partition_alloc/partition_address_space.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_hooks.h"
#include "partition_alloc/partition_direct_map_extent.h"
#include "partition_alloc/partition_oom.h"
@ -20,10 +19,6 @@
#include "partition_alloc/partition_root.h"
#include "partition_alloc/partition_stats.h"
#if PA_BUILDFLAG(USE_STARSCAN)
#include "partition_alloc/starscan/pcscan.h"
#endif
namespace partition_alloc {
void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
@ -51,7 +46,12 @@ void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
(internal::PartitionPageSize() & internal::SystemPageOffsetMask()) == 0,
"ok partition page multiple");
static_assert(
sizeof(internal::PartitionPageMetadata) <= internal::kPageMetadataSize,
sizeof(
internal::PartitionPageMetadata<internal::MetadataKind::kReadOnly>) <=
internal::kPageMetadataSize &&
sizeof(internal::PartitionPageMetadata<
internal::MetadataKind::kWritable>) <=
internal::kPageMetadataSize,
"PartitionPage should not be too big");
STATIC_ASSERT_OR_PA_CHECK(
internal::kPageMetadataSize * internal::NumPartitionPagesPerSuperPage() <=

View file

@ -7,8 +7,8 @@
#include <cstdint>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/tagging.h"
namespace partition_alloc {

Some files were not shown because too many files have changed in this diff Show more