Compare commits

..

No commits in common. "v133.0.6943.49-1" and "master" have entirely different histories.

4500 changed files with 271439 additions and 180981 deletions

View file

@ -1 +1 @@
133.0.6943.49
135.0.7049.38

View file

@ -90,7 +90,7 @@ no_check_targets = [
"//v8:v8_libplatform", # 2 errors
]
# These are the list of GN files that run exec_script. This whitelist exists
# These are the list of GN files that run exec_script. This allowlist exists
# to force additional review for new uses of exec_script, which is strongly
# discouraged.
#
@ -145,11 +145,11 @@ no_check_targets = [
# this situation much easier to create. if the build always lists the
# files and passes them to a script, it will always be correct.
exec_script_whitelist =
build_dotfile_settings.exec_script_whitelist +
exec_script_allowlist =
build_dotfile_settings.exec_script_allowlist +
angle_dotfile_settings.exec_script_whitelist +
[
# Whitelist entries for //build should go into
# Allowlist entries for //build should go into
# //build/dotfile_settings.gni instead, so that they can be shared
# with other repos. The entries in this list should be only for files
# in the Chromium repo outside of //build.

View file

@ -118,6 +118,7 @@ Andreas Papacharalampous <andreas@apap04.com>
Andrei Borza <andrei.borza@gmail.com>
Andrei Parvu <andrei.prv@gmail.com>
Andrei Parvu <parvu@adobe.com>
Andrei Volykhin <andrei.volykhin@gmail.com>
Andres Salomon <dilinger@queued.net>
Andreu Botella <andreu@andreubotella.com>
Andrew Boyarshin <andrew.boyarshin@gmail.com>
@ -312,6 +313,7 @@ Daniel Lockyer <thisisdaniellockyer@gmail.com>
Daniel Nishi <dhnishi@gmail.com>
Daniel Platz <daplatz@googlemail.com>
Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
Daniel Richard G. <iskunk@gmail.com>
Daniel Shaulov <dshaulov@ptc.com>
Daniel Trebbien <dtrebbien@gmail.com>
Daniel Waxweiler <daniel.waxweiler@gmail.com>
@ -329,6 +331,7 @@ Darshini KN <kn.darshini@samsung.com>
Dave Vandyke <kzar@kzar.co.uk>
David Benjamin <davidben@mit.edu>
David Brown <develop.david.brown@gmail.com>
David Cernoch <dcernoch@uplandsoftware.com>
David Davidovic <david@davidovic.io>
David Erceg <erceg.david@gmail.com>
David Faden <dfaden@gmail.com>
@ -348,6 +351,7 @@ Dax Kelson <dkelson@gurulabs.com>
Dean Leitersdorf <dean.leitersdorf@gmail.com>
Debadree Chatterjee <debadree333@gmail.com>
Debashish Samantaray <d.samantaray@samsung.com>
Debin Zhang <debinzhang3@gmail.com>
Debug Wang <debugwang@tencent.com>
Deep Shah <deep.shah@samsung.com>
Deepak Dilip Borade <deepak.db@samsung.com>
@ -938,6 +942,7 @@ Martin Persson <mnpn03@gmail.com>
Martin Rogalla <martin@martinrogalla.com>
Martina Kollarova <martina.kollarova@intel.com>
Martino Fontana <tinozzo123@gmail.com>
Marvin Giessing <marvin.giessing@gmail.com>
Masahiro Yado <yado.masa@gmail.com>
Masaru Nishida <msr.i386@gmail.com>
Masayuki Wakizaka <mwakizaka0108@gmail.com>
@ -977,6 +982,7 @@ Md Jobed Hossain <jobed.h@samsung.com>
Md Raiyan bin Sayeed <mrbsayee@uwaterloo.ca>
Md. Sadiqul Amin <sadiqul.amin@samsung.com>
Md Sami Uddin <md.sami@samsung.com>
Mego Tan <tannal2409@gmail.com>
Merajul Arefin <merajularefin@gmail.com>
Micha Hanselmann <micha.hanselmann@gmail.com>
Michael Cirone <mikecirone@gmail.com>
@ -1004,6 +1010,7 @@ Milko Leporis <milko.leporis@imgtec.com>
Milton Chiang <milton.chiang@mediatek.com>
Milutin Smiljanic <msmiljanic.gm@gmail.com>
Minchul Kang <tegongkang@gmail.com>
Ming Lei <minggeorgelei@gmail.com>
Mingeun Park <mindal99546@gmail.com>
Minggang Wang <minggang.wang@intel.com>
Mingmin Xie <melvinxie@gmail.com>
@ -1023,6 +1030,7 @@ Mohamed Mansour <m0.interactive@gmail.com>
Mohamed Hany Youns <mohamedhyouns@gmail.com>
Mohammad Azam <m.azam@samsung.com>
MohammadSabri <mohammad.kh.sabri@exalt.ps>
Mohammed Ashraf <mohammedashraf4599@gmail.com>
Mohammed Wajahat Ali Siddiqui <wajahat.s@samsung.com>
Mohan Reddy <mohan.reddy@samsung.com>
Mohit Bhalla <bhallam@amazon.com>
@ -1304,6 +1312,7 @@ Serhii Matrunchyk <sergiy.matrunchyk@gmail.com>
Seshadri Mahalingam <seshadri.mahalingam@gmail.com>
Seungkyu Lee <zx6658@gmail.com>
Sevan Janiyan <venture37@geeklan.co.uk>
Shaheen Fazim <fazim.pentester@gmail.com>
Shahriar Rostami <shahriar.rostami@gmail.com>
Shail Singhal <shail.s@samsung.com>
Shane Hansen <shanemhansen@gmail.com>
@ -1461,6 +1470,7 @@ Tom Harwood <tfh@skip.org>
Tomas Popela <tomas.popela@gmail.com>
Tomasz Edward Posłuszny <tom@devpeer.net>
Tony Shen <legendmastertony@gmail.com>
Topi Lassila <tolassila@gmail.com>
Torsten Kurbad <google@tk-webart.de>
Toshihito Kikuchi <leamovret@gmail.com>
Toshiaki Tanaka <zokutyou2@gmail.com>
@ -1512,6 +1522,7 @@ Wojciech Bielawski <wojciech.bielawski@gmail.com>
Wang Chen <wangchen20@iscas.ac.cn>
Wang Chen <unicornxw@gmail.com>
Wang Weiwei <wangww@dingdao.com>
Wang Zirui <kingzirvi@gmail.com>
Wangyang Dai <jludwy@gmail.com>
Wanming Lin <wanming.lin@intel.com>
Wei Li <wei.c.li@intel.com>
@ -1646,6 +1657,7 @@ Zsolt Borbely <zsborbely.u-szeged@partner.samsung.com>
迷渡 <justjavac@gmail.com>
郑苏波 (Super Zheng) <superzheng@tencent.com>
一丝 (Yisi) <yiorsi@gmail.com>
林训杰 (XunJie Lin) <wick.linxunjie@gmail.com>
# Please DO NOT APPEND here. See comments at the top of the file.
# END individuals section.

800
src/DEPS

File diff suppressed because it is too large Load diff

View file

@ -25,7 +25,6 @@ import("//build/buildflag_header.gni")
import("//build/config/arm.gni")
import("//build/config/c++/c++.gni")
import("//build/config/cast.gni")
import("//build/config/chromeos/ui_mode.gni")
import("//build/config/compiler/compiler.gni")
import("//build/config/cronet/config.gni")
import("//build/config/dcheck_always_on.gni")
@ -41,7 +40,7 @@ import("//build/util/process_version.gni")
import("//build_overrides/build.gni")
if (is_ios) {
# Used to access target_environment.
# Used to access target_environment and target_platform.
import("//build/config/apple/mobile_config.gni")
# Used to access ios_is_app_extension variable definition.
@ -244,8 +243,6 @@ component("base") {
"containers/unique_ptr_adapters.h",
"containers/util.h",
"containers/vector_buffer.h",
"cpu_reduction_experiment.cc",
"cpu_reduction_experiment.h",
"critical_closure.h",
"dcheck_is_on.h",
"debug/alias.cc",
@ -531,6 +528,7 @@ component("base") {
"profiler/periodic_sampling_scheduler.h",
"profiler/profile_builder.h",
"profiler/register_context.h",
"profiler/register_context_registers.h",
"profiler/sample_metadata.cc",
"profiler/sample_metadata.h",
"profiler/sampling_profiler_thread_token.cc",
@ -549,19 +547,19 @@ component("base") {
"profiler/stack_unwind_data.h",
"profiler/suspendable_thread_delegate.h",
"profiler/thread_delegate.h",
"profiler/thread_group_profiler.cc",
"profiler/thread_group_profiler.h",
"profiler/thread_group_profiler_client.h",
"profiler/unwinder.cc",
"profiler/unwinder.h",
"rand_util.cc",
"rand_util.h",
"ranges/algorithm.h",
"ranges/from_range.h",
"ranges/functional.h",
"ranges/ranges.h",
"run_loop.cc",
"run_loop.h",
"sampling_heap_profiler/lock_free_address_hash_set.cc",
"sampling_heap_profiler/lock_free_address_hash_set.h",
"sampling_heap_profiler/lock_free_bloom_filter.cc",
"sampling_heap_profiler/lock_free_bloom_filter.h",
"sampling_heap_profiler/poisson_allocation_sampler.cc",
"sampling_heap_profiler/poisson_allocation_sampler.h",
"sampling_heap_profiler/sampling_heap_profiler.cc",
@ -586,6 +584,7 @@ component("base") {
"strings/abseil_string_number_conversions.cc",
"strings/abseil_string_number_conversions.h",
"strings/cstring_view.h",
"strings/durable_string_view.h",
"strings/escape.cc",
"strings/escape.h",
"strings/latin1_string_conversions.cc",
@ -691,7 +690,6 @@ component("base") {
"task/sequence_manager/enqueue_order_generator.h",
"task/sequence_manager/fence.cc",
"task/sequence_manager/fence.h",
"task/sequence_manager/lazily_deallocated_deque.h",
"task/sequence_manager/sequence_manager.cc",
"task/sequence_manager/sequence_manager.h",
"task/sequence_manager/sequence_manager_impl.cc",
@ -870,6 +868,8 @@ component("base") {
"trace_event/heap_profiler_allocation_context.h",
"trace_event/heap_profiler_allocation_context_tracker.cc",
"trace_event/heap_profiler_allocation_context_tracker.h",
"trace_event/histogram_scope.cc",
"trace_event/histogram_scope.h",
"trace_event/memory_allocator_dump_guid.cc",
"trace_event/memory_allocator_dump_guid.h",
"trace_event/named_trigger.cc",
@ -879,6 +879,7 @@ component("base") {
"traits_bag.h",
"tuple.h",
"types/always_false.h",
"types/cxx23_from_range.h",
"types/cxx23_is_scoped_enum.h",
"types/cxx23_to_underlying.h",
"types/expected.h",
@ -895,6 +896,7 @@ component("base") {
"types/same_as_any.h",
"types/strong_alias.h",
"types/supports_ostream_operator.h",
"types/supports_to_string.h",
"types/to_address.h",
"types/token_type.h",
"types/variant_util.h",
@ -1060,10 +1062,13 @@ component("base") {
"//base/numerics:base_numerics",
"//base/third_party/icu",
"//build:chromecast_buildflags",
"//build:chromeos_buildflags",
"//third_party/abseil-cpp:absl",
]
# TODO(crbug.com/354842935): Remove this dependency once other modules don't
# accidentally (transitively) depend on it anymore.
public_deps += [ "//build:chromeos_buildflags" ]
# Needed for <atomic> if using newer C++ library than sysroot, except if
# building inside the cros_sdk environment - use host_toolchain as a
# more robust check for this.
@ -1183,7 +1188,6 @@ component("base") {
if (is_robolectric) {
# Make jni.h available.
configs += [ "//third_party/jdk" ]
deps += [ ":base_robolectric_jni" ]
}
if (is_robolectric) {
sources += [
@ -1192,6 +1196,8 @@ component("base") {
"android/callback_android.cc",
"android/callback_android.h",
"android/command_line_android.cc",
"android/int_string_callback.cc",
"android/int_string_callback.h",
"android/java_exception_reporter.cc",
"android/java_exception_reporter.h",
"android/jni_android.cc",
@ -1200,6 +1206,8 @@ component("base") {
"android/jni_array.h",
"android/jni_bytebuffer.cc",
"android/jni_bytebuffer.h",
"android/jni_callback.cc",
"android/jni_callback.h",
"android/jni_registrar.cc",
"android/jni_registrar.h",
"android/jni_string.cc",
@ -1207,8 +1215,13 @@ component("base") {
"android/jni_utils.cc",
"android/jni_utils.h",
"android/jni_weak_ref.h",
"android/library_loader/anchor_functions.cc",
"android/library_loader/anchor_functions.h",
"android/library_loader/library_loader_hooks.cc",
"android/library_loader/library_loader_hooks.h",
"android/library_loader/library_prefetcher.cc",
"android/library_loader/library_prefetcher.h",
"android/library_loader/library_prefetcher_hooks.cc",
"android/native_uma_recorder.cc",
"android/scoped_java_ref.h",
"android/token_android.cc",
@ -1881,8 +1894,8 @@ component("base") {
"apple/call_with_eh_frame.cc",
"apple/call_with_eh_frame.h",
"apple/call_with_eh_frame_asm.S",
"apple/dispatch_source_mach.cc",
"apple/dispatch_source_mach.h",
"apple/dispatch_source.cc",
"apple/dispatch_source.h",
"apple/foundation_util.h",
"apple/foundation_util.mm",
"apple/mach_logging.cc",
@ -1913,10 +1926,9 @@ component("base") {
"message_loop/message_pump_apple.h",
"message_loop/message_pump_apple.mm",
"native_library_apple.mm",
"process/process_metrics_apple.cc",
"process/process_metrics_apple.mm",
"profiler/module_cache_apple.cc",
"strings/sys_string_conversions_apple.mm",
"synchronization/waitable_event_apple.cc",
"system/sys_info_apple.mm",
"threading/platform_thread_apple.mm",
"time/time_apple.mm",
@ -1928,6 +1940,10 @@ component("base") {
"Foundation.framework",
"Security.framework",
]
if (!is_ios || !use_blink) {
sources += [ "synchronization/waitable_event_apple.cc" ]
}
}
# Desktop Mac.
@ -1989,7 +2005,7 @@ component("base") {
"process/port_provider_mac.cc",
"process/port_provider_mac.h",
"process/process_handle_mac.cc",
"process/process_info_mac.cc",
"process/process_info_mac.mm",
"process/process_iterator_mac.cc",
"process/process_mac.cc",
"process/process_metrics_mac.cc",
@ -2064,12 +2080,20 @@ component("base") {
"process/process_handle_mac.cc",
"process/process_ios.cc",
"process/process_iterator_mac.cc",
"process/process_mac.cc",
"process/process_posix.cc",
"sync_socket_posix.cc",
"synchronization/waitable_event_watcher_mac.cc",
"synchronization/waitable_event_posix.cc",
"synchronization/waitable_event_watcher_posix.cc",
]
if (target_platform == "tvos") {
# tvOS apps must be single-process. Build a Process version that does
# the bare minimum and does not use Mach ports.
sources += [ "process/process_tvos.cc" ]
} else {
sources += [ "process/process_mac.cc" ]
}
# We include launch_mac on simulator builds so unittests can fork.
if (target_environment == "simulator") {
sources += [
@ -2092,6 +2116,11 @@ component("base") {
"message_loop/message_pump_io_ios.cc",
"message_loop/message_pump_io_ios.h",
]
} else if (use_blink) {
sources += [
"message_loop/message_pump_io_ios_libdispatch.cc",
"message_loop/message_pump_io_ios_libdispatch.h",
]
} else {
sources += [
"message_loop/message_pump_kqueue.cc",
@ -2247,7 +2276,6 @@ component("base") {
if (enable_base_tracing) {
sources += [
"trace_event/auto_open_close_event.h",
"trace_event/builtin_categories.cc",
"trace_event/builtin_categories.h",
"trace_event/heap_profiler.h",
"trace_event/interned_args_helper.cc",
@ -2450,7 +2478,7 @@ buildflag_header("debugging_buildflags") {
buildflag_header("feature_list_buildflags") {
header = "feature_list_buildflags.h"
if (is_chromeos_ash) {
if (is_chromeos) {
flags = [
"ENABLE_BANNED_BASE_FEATURE_PREFIX=true",
"BANNED_BASE_FEATURE_PREFIX=\"CrOSLateBoot\"",
@ -2572,10 +2600,7 @@ static_library("base_static") {
"immediate_crash.h",
]
deps = [
":fuzzing_buildflags",
"//build:chromeos_buildflags",
]
deps = [ ":fuzzing_buildflags" ]
if (is_win) {
sources += [

View file

@ -12,7 +12,6 @@
#include "base/time/time.h"
#include "build/build_config.h"
#include "build/chromecast_buildflags.h"
#include "build/chromeos_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/time/time.h"
#include "partition_alloc/partition_alloc_constants.h"
@ -20,8 +19,7 @@
#include "partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h"
#include "partition_alloc/thread_cache.h"
namespace base {
namespace features {
namespace base::features {
namespace {
@ -48,7 +46,8 @@ constexpr FeatureParam<UnretainedDanglingPtrMode>::Option
{UnretainedDanglingPtrMode::kDumpWithoutCrashing,
"dump_without_crashing"},
};
const base::FeatureParam<UnretainedDanglingPtrMode>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam = {
&kPartitionAllocUnretainedDanglingPtr,
"mode",
@ -73,7 +72,8 @@ constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
{DanglingPtrMode::kCrash, "crash"},
{DanglingPtrMode::kLogOnly, "log_only"},
};
const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
&kPartitionAllocDanglingPtr,
"mode",
DanglingPtrMode::kCrash,
@ -83,7 +83,8 @@ constexpr FeatureParam<DanglingPtrType>::Option kDanglingPtrTypeOption[] = {
{DanglingPtrType::kAll, "all"},
{DanglingPtrType::kCrossTask, "cross_task"},
};
const base::FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
&kPartitionAllocDanglingPtr,
"type",
DanglingPtrType::kAll,
@ -128,7 +129,8 @@ constexpr FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>::Option
kNonRendererStr},
{PartitionAllocWithAdvancedChecksEnabledProcesses::kAllProcesses,
kAllProcessesStr}};
const base::FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>
kPartitionAllocWithAdvancedChecksEnabledProcessesParam{
&kPartitionAllocWithAdvancedChecks, kPAFeatureEnabledProcessesStr,
PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserOnly,
@ -138,15 +140,17 @@ BASE_FEATURE(kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantine",
FEATURE_DISABLED_BY_DEFAULT);
// Scheduler Loop Quarantine's per-branch capacity in bytes.
const base::FeatureParam<int>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<int>
kPartitionAllocSchedulerLoopQuarantineBranchCapacity{
&kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantineBranchCapacity", 0};
// Scheduler Loop Quarantine's capacity for the UI thread in bytes.
const base::FeatureParam<int>
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity{
&kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantineBrowserUICapacity", 0};
BASE_FEATURE_PARAM(int,
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity,
&kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantineBrowserUICapacity",
0);
BASE_FEATURE(kPartitionAllocZappingByFreeFlags,
"PartitionAllocZappingByFreeFlags",
@ -155,6 +159,10 @@ BASE_FEATURE(kPartitionAllocZappingByFreeFlags,
BASE_FEATURE(kPartitionAllocEventuallyZeroFreedMemory,
"PartitionAllocEventuallyZeroFreedMemory",
FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kPartitionAllocFewerMemoryRegions,
"PartitionAllocFewerMemoryRegions",
FEATURE_DISABLED_BY_DEFAULT);
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_FEATURE(kPartitionAllocBackupRefPtr,
@ -174,25 +182,30 @@ constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
{BackupRefPtrEnabledProcesses::kNonRenderer, kNonRendererStr},
{BackupRefPtrEnabledProcesses::kAllProcesses, kAllProcessesStr}};
const base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam{
&kPartitionAllocBackupRefPtr, kPAFeatureEnabledProcessesStr,
BASE_FEATURE_ENUM_PARAM(BackupRefPtrEnabledProcesses,
kBackupRefPtrEnabledProcessesParam,
&kPartitionAllocBackupRefPtr,
kPAFeatureEnabledProcessesStr,
#if PA_BUILDFLAG(IS_MAC) && PA_BUILDFLAG(PA_ARCH_CPU_ARM64)
BackupRefPtrEnabledProcesses::kNonRenderer,
BackupRefPtrEnabledProcesses::kNonRenderer,
#else
BackupRefPtrEnabledProcesses::kAllProcesses,
BackupRefPtrEnabledProcesses::kAllProcesses,
#endif
&kBackupRefPtrEnabledProcessesOptions};
&kBackupRefPtrEnabledProcessesOptions);
constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
{BackupRefPtrMode::kDisabled, "disabled"},
{BackupRefPtrMode::kEnabled, "enabled"},
};
const base::FeatureParam<BackupRefPtrMode> kBackupRefPtrModeParam{
&kPartitionAllocBackupRefPtr, "brp-mode", BackupRefPtrMode::kEnabled,
&kBackupRefPtrModeOptions};
const base::FeatureParam<int> kBackupRefPtrExtraExtrasSizeParam{
BASE_FEATURE_ENUM_PARAM(BackupRefPtrMode,
kBackupRefPtrModeParam,
&kPartitionAllocBackupRefPtr,
"brp-mode",
BackupRefPtrMode::kEnabled,
&kBackupRefPtrModeOptions);
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<int> kBackupRefPtrExtraExtrasSizeParam{
&kPartitionAllocBackupRefPtr, "brp-extra-extras-size", 0};
BASE_FEATURE(kPartitionAllocMemoryTagging,
@ -208,7 +221,8 @@ constexpr FeatureParam<MemtagMode>::Option kMemtagModeOptions[] = {
{MemtagMode::kSync, "sync"},
{MemtagMode::kAsync, "async"}};
const base::FeatureParam<MemtagMode> kMemtagModeParam{
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<MemtagMode> kMemtagModeParam{
&kPartitionAllocMemoryTagging, "memtag-mode",
#if PA_BUILDFLAG(USE_FULL_MTE)
MemtagMode::kSync,
@ -222,7 +236,8 @@ constexpr FeatureParam<RetagMode>::Option kRetagModeOptions[] = {
{RetagMode::kRandom, "random"},
};
const base::FeatureParam<RetagMode> kRetagModeParam{
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<RetagMode> kRetagModeParam{
&kPartitionAllocMemoryTagging, "retag-mode", RetagMode::kIncrement,
&kRetagModeOptions};
@ -232,7 +247,8 @@ constexpr FeatureParam<MemoryTaggingEnabledProcesses>::Option
{MemoryTaggingEnabledProcesses::kNonRenderer, kNonRendererStr},
{MemoryTaggingEnabledProcesses::kAllProcesses, kAllProcessesStr}};
const base::FeatureParam<MemoryTaggingEnabledProcesses>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<MemoryTaggingEnabledProcesses>
kMemoryTaggingEnabledProcessesParam{
&kPartitionAllocMemoryTagging, kPAFeatureEnabledProcessesStr,
#if PA_BUILDFLAG(USE_FULL_MTE)
@ -257,13 +273,15 @@ BASE_FEATURE(kPartitionAllocPermissiveMte,
#endif
);
const base::FeatureParam<bool> kBackupRefPtrAsanEnableDereferenceCheckParam{
&kPartitionAllocBackupRefPtr, "asan-enable-dereference-check", true};
const base::FeatureParam<bool> kBackupRefPtrAsanEnableExtractionCheckParam{
&kPartitionAllocBackupRefPtr, "asan-enable-extraction-check",
false}; // Not much noise at the moment to enable by default.
const base::FeatureParam<bool> kBackupRefPtrAsanEnableInstantiationCheckParam{
&kPartitionAllocBackupRefPtr, "asan-enable-instantiation-check", true};
BASE_FEATURE(kAsanBrpDereferenceCheck,
"AsanBrpDereferenceCheck",
FEATURE_ENABLED_BY_DEFAULT);
BASE_FEATURE(kAsanBrpExtractionCheck,
"AsanBrpExtractionCheck", // Not much noise at the moment to
FEATURE_DISABLED_BY_DEFAULT); // enable by default.
BASE_FEATURE(kAsanBrpInstantiationCheck,
"AsanBrpInstantiationCheck",
FEATURE_ENABLED_BY_DEFAULT);
// If enabled, switches the bucket distribution to a denser one.
//
@ -277,12 +295,13 @@ BASE_FEATURE(kPartitionAllocUseDenserDistribution,
FEATURE_ENABLED_BY_DEFAULT
#endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
);
const base::FeatureParam<BucketDistributionMode>::Option
const FeatureParam<BucketDistributionMode>::Option
kPartitionAllocBucketDistributionOption[] = {
{BucketDistributionMode::kDefault, "default"},
{BucketDistributionMode::kDenser, "denser"},
};
const base::FeatureParam<BucketDistributionMode>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<BucketDistributionMode>
kPartitionAllocBucketDistributionParam{
&kPartitionAllocUseDenserDistribution, "mode",
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
@ -295,10 +314,12 @@ const base::FeatureParam<BucketDistributionMode>
BASE_FEATURE(kPartitionAllocMemoryReclaimer,
"PartitionAllocMemoryReclaimer",
FEATURE_ENABLED_BY_DEFAULT);
const base::FeatureParam<TimeDelta> kPartitionAllocMemoryReclaimerInterval = {
&kPartitionAllocMemoryReclaimer, "interval",
TimeDelta(), // Defaults to zero.
};
BASE_FEATURE_PARAM(TimeDelta,
kPartitionAllocMemoryReclaimerInterval,
&kPartitionAllocMemoryReclaimer,
"interval",
TimeDelta() // Defaults to zero.
);
// Configures whether we set a lower limit for renderers that do not have a main
// frame, similar to the limit that is already done for backgrounded renderers.
@ -311,16 +332,17 @@ BASE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers,
BASE_FEATURE(kPartitionAllocStraightenLargerSlotSpanFreeLists,
"PartitionAllocStraightenLargerSlotSpanFreeLists",
FEATURE_ENABLED_BY_DEFAULT);
const base::FeatureParam<
partition_alloc::StraightenLargerSlotSpanFreeListsMode>::Option
kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption[] = {
const FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>::
Option kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption[] = {
{partition_alloc::StraightenLargerSlotSpanFreeListsMode::
kOnlyWhenUnprovisioning,
"only-when-unprovisioning"},
{partition_alloc::StraightenLargerSlotSpanFreeListsMode::kAlways,
"always"},
};
const base::FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<
partition_alloc::StraightenLargerSlotSpanFreeListsMode>
kPartitionAllocStraightenLargerSlotSpanFreeListsMode = {
&kPartitionAllocStraightenLargerSlotSpanFreeLists,
"mode",
@ -353,9 +375,11 @@ BASE_FEATURE(kPageAllocatorRetryOnCommitFailure,
// The feature: kPartialLowEndModeOnMidRangeDevices is defined in
// //base/features.cc. Since the following feature param is related to
// PartitionAlloc, define the param here.
const FeatureParam<bool> kPartialLowEndModeExcludePartitionAllocSupport{
&kPartialLowEndModeOnMidRangeDevices, "exclude-partition-alloc-support",
false};
BASE_FEATURE_PARAM(bool,
kPartialLowEndModeExcludePartitionAllocSupport,
&kPartialLowEndModeOnMidRangeDevices,
"exclude-partition-alloc-support",
false);
#endif
BASE_FEATURE(kEnableConfigurableThreadCacheMultiplier,
@ -373,19 +397,19 @@ MIRACLE_PARAMETER_FOR_DOUBLE(GetThreadCacheMultiplierForAndroid,
1.)
constexpr partition_alloc::internal::base::TimeDelta ToPartitionAllocTimeDelta(
base::TimeDelta time_delta) {
TimeDelta time_delta) {
return partition_alloc::internal::base::Microseconds(
time_delta.InMicroseconds());
}
constexpr base::TimeDelta FromPartitionAllocTimeDelta(
constexpr TimeDelta FromPartitionAllocTimeDelta(
partition_alloc::internal::base::TimeDelta time_delta) {
return base::Microseconds(time_delta.InMicroseconds());
return Microseconds(time_delta.InMicroseconds());
}
BASE_FEATURE(kEnableConfigurableThreadCachePurgeInterval,
"EnableConfigurableThreadCachePurgeInterval",
base::FEATURE_DISABLED_BY_DEFAULT);
FEATURE_DISABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_TIME_DELTA(
GetThreadCacheMinPurgeIntervalValue,
@ -422,7 +446,7 @@ GetThreadCacheDefaultPurgeInterval() {
BASE_FEATURE(kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
"EnableConfigurableThreadCacheMinCachedMemoryForPurging",
base::FEATURE_DISABLED_BY_DEFAULT);
FEATURE_DISABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_INT(
GetThreadCacheMinCachedMemoryForPurgingBytes,
@ -442,12 +466,6 @@ BASE_FEATURE(kPartitionAllocDisableBRPInBufferPartition,
"PartitionAllocDisableBRPInBufferPartition",
FEATURE_DISABLED_BY_DEFAULT);
#if PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
BASE_FEATURE(kUsePoolOffsetFreelists,
"PartitionAllocUsePoolOffsetFreelists",
base::FEATURE_ENABLED_BY_DEFAULT);
#endif
BASE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground,
"PartitionAllocAdjustSizeWhenInForeground",
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
@ -458,12 +476,12 @@ BASE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground,
BASE_FEATURE(kPartitionAllocUseSmallSingleSlotSpans,
"PartitionAllocUseSmallSingleSlotSpans",
base::FEATURE_ENABLED_BY_DEFAULT);
FEATURE_ENABLED_BY_DEFAULT);
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
BASE_FEATURE(kPartitionAllocShadowMetadata,
"PartitionAllocShadowMetadata",
base::FEATURE_DISABLED_BY_DEFAULT);
FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<ShadowMetadataEnabledProcesses>::Option
kShadowMetadataEnabledProcessesOptions[] = {
@ -471,12 +489,12 @@ constexpr FeatureParam<ShadowMetadataEnabledProcesses>::Option
{ShadowMetadataEnabledProcesses::kAllChildProcesses,
kAllChildProcessesStr}};
const base::FeatureParam<ShadowMetadataEnabledProcesses>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<ShadowMetadataEnabledProcesses>
kShadowMetadataEnabledProcessesParam{
&kPartitionAllocShadowMetadata, kPAFeatureEnabledProcessesStr,
ShadowMetadataEnabledProcesses::kRendererOnly,
&kShadowMetadataEnabledProcessesOptions};
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
} // namespace features
} // namespace base
} // namespace base::features

View file

@ -15,8 +15,7 @@
#include "partition_alloc/partition_alloc_base/time/time.h"
#include "partition_alloc/partition_root.h"
namespace base {
namespace features {
namespace base::features {
namespace internal {
@ -37,13 +36,13 @@ enum class PAFeatureEnabledProcesses {
} // namespace internal
extern const BASE_EXPORT Feature kPartitionAllocUnretainedDanglingPtr;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUnretainedDanglingPtr);
enum class UnretainedDanglingPtrMode {
kCrash,
kDumpWithoutCrashing,
};
extern const BASE_EXPORT base::FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(UnretainedDanglingPtrMode,
kUnretainedDanglingPtrModeParam);
// See /docs/dangling_ptr.md
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr);
@ -62,8 +61,7 @@ enum class DanglingPtrMode {
// Note: This will be extended with a single shot DumpWithoutCrashing.
};
extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode>
kDanglingPtrModeParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(DanglingPtrMode, kDanglingPtrModeParam);
enum class DanglingPtrType {
// Act on any dangling raw_ptr released after being freed.
kAll, // (default)
@ -74,8 +72,7 @@ enum class DanglingPtrType {
// Note: This will be extended with LongLived
};
extern const BASE_EXPORT base::FeatureParam<DanglingPtrType>
kDanglingPtrTypeParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(DanglingPtrType, kDanglingPtrTypeParam);
using PartitionAllocWithAdvancedChecksEnabledProcesses =
internal::PAFeatureEnabledProcesses;
@ -88,17 +85,19 @@ BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid();
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocWithAdvancedChecks);
extern const BASE_EXPORT
base::FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>
kPartitionAllocWithAdvancedChecksEnabledProcessesParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
PartitionAllocWithAdvancedChecksEnabledProcesses,
kPartitionAllocWithAdvancedChecksEnabledProcessesParam);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSchedulerLoopQuarantine);
// Scheduler Loop Quarantine's per-thread capacity in bytes.
extern const BASE_EXPORT base::FeatureParam<int>
kPartitionAllocSchedulerLoopQuarantineBranchCapacity;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
int,
kPartitionAllocSchedulerLoopQuarantineBranchCapacity);
// Scheduler Loop Quarantine's capacity for the UI thread in bytes.
// TODO(https://crbug.com/387470567): Support more thread types.
extern const BASE_EXPORT base::FeatureParam<int>
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
int,
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocZappingByFreeFlags);
@ -106,6 +105,11 @@ BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocZappingByFreeFlags);
// security guarantee, but to increase the compression ratio of PartitionAlloc's
// fragmented super pages.
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocEventuallyZeroFreedMemory);
// Whether to make PartitionAlloc use fewer memory regions. This matters on
// Linux-based systems, where there is a per-process limit that we hit in some
// cases. See the comment in PartitionBucket::SlotSpanCOmmitedSize() for detail.
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocFewerMemoryRegions);
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
using BackupRefPtrEnabledProcesses = internal::PAFeatureEnabledProcesses;
@ -143,41 +147,38 @@ enum class BucketDistributionMode : uint8_t {
};
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtr);
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam;
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrMode>
kBackupRefPtrModeParam;
extern const BASE_EXPORT base::FeatureParam<int>
kBackupRefPtrExtraExtrasSizeParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BackupRefPtrEnabledProcesses,
kBackupRefPtrEnabledProcessesParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BackupRefPtrMode,
kBackupRefPtrModeParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(int,
kBackupRefPtrExtraExtrasSizeParam);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryTagging);
extern const BASE_EXPORT base::FeatureParam<MemtagMode> kMemtagModeParam;
extern const BASE_EXPORT base::FeatureParam<RetagMode> kRetagModeParam;
extern const BASE_EXPORT base::FeatureParam<MemoryTaggingEnabledProcesses>
kMemoryTaggingEnabledProcessesParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(MemtagMode, kMemtagModeParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(RetagMode, kRetagModeParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(MemoryTaggingEnabledProcesses,
kMemoryTaggingEnabledProcessesParam);
// Kill switch for memory tagging. Skips any code related to memory tagging when
// enabled.
BASE_EXPORT BASE_DECLARE_FEATURE(kKillPartitionAllocMemoryTagging);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte);
extern const BASE_EXPORT base::FeatureParam<bool>
kBackupRefPtrAsanEnableDereferenceCheckParam;
extern const BASE_EXPORT base::FeatureParam<bool>
kBackupRefPtrAsanEnableExtractionCheckParam;
extern const BASE_EXPORT base::FeatureParam<bool>
kBackupRefPtrAsanEnableInstantiationCheckParam;
extern const BASE_EXPORT base::FeatureParam<BucketDistributionMode>
kPartitionAllocBucketDistributionParam;
BASE_EXPORT BASE_DECLARE_FEATURE(kAsanBrpDereferenceCheck);
BASE_EXPORT BASE_DECLARE_FEATURE(kAsanBrpExtractionCheck);
BASE_EXPORT BASE_DECLARE_FEATURE(kAsanBrpInstantiationCheck);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BucketDistributionMode,
kPartitionAllocBucketDistributionParam);
BASE_EXPORT BASE_DECLARE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseDenserDistribution);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryReclaimer);
extern const BASE_EXPORT base::FeatureParam<TimeDelta>
kPartitionAllocMemoryReclaimerInterval;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(TimeDelta,
kPartitionAllocMemoryReclaimerInterval);
BASE_EXPORT BASE_DECLARE_FEATURE(
kPartitionAllocStraightenLargerSlotSpanFreeLists);
extern const BASE_EXPORT
base::FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>
kPartitionAllocStraightenLargerSlotSpanFreeListsMode;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
partition_alloc::StraightenLargerSlotSpanFreeListsMode,
kPartitionAllocStraightenLargerSlotSpanFreeListsMode);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortSmallerSlotSpanFreeLists);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortActiveSlotSpans);
@ -186,8 +187,9 @@ BASE_EXPORT BASE_DECLARE_FEATURE(kPageAllocatorRetryOnCommitFailure);
#endif
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
extern const base::FeatureParam<bool>
kPartialLowEndModeExcludePartitionAllocSupport;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
bool,
kPartialLowEndModeExcludePartitionAllocSupport);
#endif
BASE_EXPORT BASE_DECLARE_FEATURE(kEnableConfigurableThreadCacheMultiplier);
@ -208,13 +210,6 @@ BASE_EXPORT int GetThreadCacheMinCachedMemoryForPurgingBytes();
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDisableBRPInBufferPartition);
// This feature is additionally gated behind a buildflag because
// pool offset freelists cannot be represented when PartitionAlloc uses
// 32-bit pointers.
#if PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
BASE_EXPORT BASE_DECLARE_FEATURE(kUsePoolOffsetFreelists);
#endif
// When set, partitions use a larger ring buffer and free memory less
// aggressively when in the foreground.
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground);
@ -229,11 +224,10 @@ BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseSmallSingleSlotSpans);
using ShadowMetadataEnabledProcesses = internal::PAFeatureEnabledProcesses;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocShadowMetadata);
extern const BASE_EXPORT base::FeatureParam<ShadowMetadataEnabledProcesses>
kShadowMetadataEnabledProcessesParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(ShadowMetadataEnabledProcesses,
kShadowMetadataEnabledProcessesParam);
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
} // namespace features
} // namespace base
} // namespace base::features
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_

View file

@ -4,6 +4,7 @@
#include "base/allocator/partition_alloc_support.h"
#include <algorithm>
#include <array>
#include <cinttypes>
#include <cstdint>
@ -31,7 +32,6 @@
#include "base/metrics/histogram_macros.h"
#include "base/no_destructor.h"
#include "base/pending_task.h"
#include "base/ranges/algorithm.h"
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
#include "base/system/sys_info.h"
@ -107,12 +107,9 @@ BootloaderOverride GetBootloaderOverride() {
}
#endif
// When under this experiment avoid running periodic purging or reclaim for the
// first minute after the first attempt. This is based on the insight that
// processes often don't live paste this minute.
static BASE_FEATURE(kDelayFirstPeriodicPAPurgeOrReclaim,
"DelayFirstPeriodicPAPurgeOrReclaim",
base::FEATURE_ENABLED_BY_DEFAULT);
// Avoid running periodic purging or reclaim for the first minute after the
// first attempt. This is based on the insight that processes often don't live
// paste this minute.
constexpr base::TimeDelta kFirstPAPurgeOrReclaimDelay = base::Minutes(1);
// This is defined in content/public/common/content_switches.h, which is not
@ -171,6 +168,8 @@ void MemoryReclaimerSupport::Start(scoped_refptr<TaskRunner> task_runner) {
return;
}
task_runner_ = task_runner;
// The caller of the API fully controls where running the reclaim.
// However there are a few reasons to recommend that the caller runs
// it on the main thread:
@ -186,13 +185,7 @@ void MemoryReclaimerSupport::Start(scoped_refptr<TaskRunner> task_runner) {
// seconds is useful. Since this is meant to run during idle time only, it is
// a reasonable starting point balancing effectivenes vs cost. See
// crbug.com/942512 for details and experimental results.
TimeDelta delay;
if (base::FeatureList::IsEnabled(kDelayFirstPeriodicPAPurgeOrReclaim)) {
delay = std::max(delay, kFirstPAPurgeOrReclaimDelay);
}
task_runner_ = task_runner;
MaybeScheduleTask(delay);
MaybeScheduleTask(kFirstPAPurgeOrReclaimDelay);
}
void MemoryReclaimerSupport::SetForegrounded(bool in_foreground) {
@ -253,12 +246,9 @@ void MemoryReclaimerSupport::MaybeScheduleTask(TimeDelta delay) {
void StartThreadCachePeriodicPurge() {
auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
TimeDelta delay =
Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
if (base::FeatureList::IsEnabled(kDelayFirstPeriodicPAPurgeOrReclaim)) {
delay = std::max(delay, kFirstPAPurgeOrReclaimDelay);
}
TimeDelta delay = std::max(
Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds()),
kFirstPAPurgeOrReclaimDelay);
SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
@ -465,7 +455,7 @@ std::string ExtractDanglingPtrSignature(std::string stacktrace) {
size_t caller_index = 0;
for (size_t i = 0; i < lines.size(); ++i) {
for (const auto& patterns : callee_patterns) {
if (ranges::all_of(patterns, [&](std::string_view pattern) {
if (std::ranges::all_of(patterns, [&](std::string_view pattern) {
return lines[i].find(pattern) != std::string_view::npos;
})) {
caller_index = i + 1;
@ -667,7 +657,7 @@ void CheckDanglingRawPtrBufferEmpty() {
std::vector<std::array<const void*, 32>> stack_traces =
internal::InstanceTracer::GetStackTracesForDanglingRefs(entry->id);
for (const auto& raw_stack_trace : stack_traces) {
CHECK(ranges::is_partitioned(raw_stack_trace, is_frame_ptr_not_null))
CHECK(std::ranges::is_partitioned(raw_stack_trace, is_frame_ptr_not_null))
<< "`raw_stack_trace` is expected to be partitioned: non-null values "
"at the begining followed by `nullptr`s.";
LOG(ERROR) << "Dangling reference from:\n";
@ -675,8 +665,8 @@ void CheckDanglingRawPtrBufferEmpty() {
// This call truncates the `nullptr` tail of the stack
// trace (see the `is_partitioned` CHECK above).
span(raw_stack_trace.begin(),
ranges::partition_point(raw_stack_trace,
is_frame_ptr_not_null)))
std::ranges::partition_point(
raw_stack_trace, is_frame_ptr_not_null)))
<< "\n";
}
#else
@ -880,43 +870,28 @@ PartitionAllocSupport::GetBrpConfiguration(const std::string& process_type) {
// TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
CHECK(base::FeatureList::GetInstance());
bool process_affected_by_brp_flag = false;
#if (PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
!PA_BUILDFLAG(FORCE_DISABLE_BACKUP_REF_PTR_FEATURE)) || \
PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocBackupRefPtr)) {
// No specified process type means this is the Browser process.
process_affected_by_brp_flag = ShouldEnableFeatureOnProcess(
base::features::kBackupRefPtrEnabledProcessesParam.Get(), process_type);
}
#endif // (PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)&&
// !PA_BUILDFLAG(FORCE_DISABLE_BACKUP_REF_PTR_FEATURE)) ||
// PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
const bool enable_brp =
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
// kDisabled is equivalent to !IsEnabled(kPartitionAllocBackupRefPtr).
process_affected_by_brp_flag &&
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
!PA_BUILDFLAG(FORCE_DISABLE_BACKUP_REF_PTR_FEATURE)
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocBackupRefPtr) &&
base::features::kBackupRefPtrModeParam.Get() !=
base::features::BackupRefPtrMode::kDisabled;
#else
false;
base::features::BackupRefPtrMode::kDisabled &&
ShouldEnableFeatureOnProcess(
base::features::kBackupRefPtrEnabledProcessesParam.Get(),
process_type)) {
return {
.enable_brp = true,
.extra_extras_size = static_cast<size_t>(
base::features::kBackupRefPtrExtraExtrasSizeParam.Get()),
};
}
#endif
size_t extra_extras_size = 0;
if (enable_brp) {
extra_extras_size = static_cast<size_t>(
base::features::kBackupRefPtrExtraExtrasSizeParam.Get());
}
return {
enable_brp,
process_affected_by_brp_flag,
extra_extras_size,
.enable_brp = false,
.extra_extras_size = 0,
};
}
@ -1019,20 +994,26 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
[[maybe_unused]] BrpConfiguration brp_config =
GetBrpConfiguration(process_type);
#if PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
if (brp_config.process_affected_by_brp_flag) {
base::RawPtrAsanService::GetInstance().Configure(
base::EnableDereferenceCheck(
base::features::kBackupRefPtrAsanEnableDereferenceCheckParam.Get()),
base::EnableExtractionCheck(
base::features::kBackupRefPtrAsanEnableExtractionCheckParam.Get()),
base::EnableInstantiationCheck(
base::features::kBackupRefPtrAsanEnableInstantiationCheckParam
.Get()));
// Configure ASAN hooks to report the `MiraclePtr status`. This is enabled
// only if BackupRefPtr is normally enabled in the current process for the
// current platform. Note that CastOS and iOS aren't protected by BackupRefPtr
// a the moment, so they are excluded.
#if PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && !PA_BUILDFLAG(IS_CASTOS) && \
!PA_BUILDFLAG(IS_IOS)
if (ShouldEnableFeatureOnProcess(
base::features::kBackupRefPtrEnabledProcessesParam.Get(),
process_type)) {
RawPtrAsanService::GetInstance().Configure(
EnableDereferenceCheck(
FeatureList::IsEnabled(features::kAsanBrpDereferenceCheck)),
EnableExtractionCheck(
FeatureList::IsEnabled(features::kAsanBrpExtractionCheck)),
EnableInstantiationCheck(
FeatureList::IsEnabled(features::kAsanBrpInstantiationCheck)));
} else {
base::RawPtrAsanService::GetInstance().Configure(
base::EnableDereferenceCheck(false), base::EnableExtractionCheck(false),
base::EnableInstantiationCheck(false));
RawPtrAsanService::GetInstance().Configure(EnableDereferenceCheck(false),
EnableExtractionCheck(false),
EnableInstantiationCheck(false));
}
#endif // PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
@ -1059,13 +1040,8 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
base::features::kPartitionAllocZappingByFreeFlags);
const bool eventually_zero_freed_memory = base::FeatureList::IsEnabled(
base::features::kPartitionAllocEventuallyZeroFreedMemory);
#if PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
const bool use_pool_offset_freelists =
base::FeatureList::IsEnabled(base::features::kUsePoolOffsetFreelists);
#else
const bool use_pool_offset_freelists = false;
#endif // PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
const bool fewer_memory_regions = base::FeatureList::IsEnabled(
base::features::kPartitionAllocFewerMemoryRegions);
bool enable_memory_tagging = false;
partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode =
@ -1168,7 +1144,7 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
scheduler_loop_quarantine_branch_capacity_in_bytes,
allocator_shim::ZappingByFreeFlags(zapping_by_free_flags),
allocator_shim::EventuallyZeroFreedMemory(eventually_zero_freed_memory),
allocator_shim::UsePoolOffsetFreelists(use_pool_offset_freelists),
allocator_shim::FewerMemoryRegions(fewer_memory_regions),
use_small_single_slot_spans);
const uint32_t extras_size = allocator_shim::GetMainPartitionRootExtrasSize();

View file

@ -49,7 +49,6 @@ class BASE_EXPORT PartitionAllocSupport {
public:
struct BrpConfiguration {
bool enable_brp = false;
bool process_affected_by_brp_flag = false;
// TODO(https://crbug.com/371135823): Remove after the investigation.
size_t extra_extras_size = 0;

View file

@ -230,3 +230,20 @@ def CheckCpp17CompatibleKeywords(input_api, output_api):
'%s:%d\nPartitionAlloc disallows C++20 keywords: %s'
% (f.LocalPath(), line_number + 1, keyword)))
return errors
# Check `NDEBUG` is not used inside partition_alloc. We prefer to use the
# buildflags `#if PA_BUILDFLAG(IS_DEBUG)` instead.
def CheckNoNDebug(input_api, output_api):
sources = lambda affected_file: input_api.FilterSourceFile(
affected_file,
files_to_skip=[],
files_to_check=[_SOURCE_FILE_PATTERN])
errors = []
for f in input_api.AffectedSourceFiles(sources):
for line_number, line in f.ChangedContents():
if 'NDEBUG' in line:
errors.append(output_api.PresubmitError('%s:%d\nPartitionAlloc'
% (f.LocalPath(), line_number + 1)
+ 'disallows NDEBUG, use PA_BUILDFLAG(IS_DEBUG) instead'))
return errors

View file

@ -52,7 +52,6 @@ is_nacl = false
is_win = current_os == "win" || current_os == "winuwp"
is_cast_android = false
is_castos = false
is_chromeos_ash = false
is_cronet_build = false
enable_expensive_dchecks = false
dcheck_is_configurable = false

View file

@ -4,6 +4,56 @@
import("//build_overrides/partition_alloc.gni")
# -----------------------------------------------------------------------------
# Note on the use of `xxx_default` variable in partition_alloc.
#
# GN provides default_args() instruction. It is meant to be used by embedders,
# to override the default args declared by the embeddees (e.g. partition_alloc).
# This is the intended way to use GN. It properly interacts with the args.gn
# user's file.
#
# Unfortunately, Chrome and others embedders aren't using it. Instead, they
# expect embeddees to import global '.gni' file from the embedder, e.g.
# `//build_overrides/partition_alloc.gni`. This file sets some `xxx_default`
# variable that will be transferred to the declared args. For instance
# a library would use:
# ```
# import("//build_overrides/library.gni")
# declare_args() {
# xxx = xxx_default
# }
# ```
#
# We don't really want to break embedders when introducing new args. Ideally,
# We would have liked to have defaults for default variables. That would be
# a recursive problem. To resolve it, we sometimes use the `defined(...)`
# instruction to check if the embedder has defined the `xxx_default` variable or
# not.
#
# In general, we should aim to support the embedders that are using GN normally,
# and avoid requiring them to define `xxx_default` in the `//build_overrides`
# -----------------------------------------------------------------------------
# Some embedders uses `is_debug`, it can be used to set the default value of
# `partition_alloc_is_debug_default`.
if (!defined(partition_alloc_is_debug_default)) {
if (defined(is_debug)) {
partition_alloc_is_debug_default = is_debug
} else {
partition_alloc_is_debug_default = false
}
}
# Some embedders uses `dcheck_always_on`, it can be used to set the default
# value of `partition_alloc_dcheck_always_on_default`.
if (!defined(partition_alloc_dcheck_always_on_default)) {
if (defined(dcheck_always_on)) {
partition_alloc_dcheck_always_on_default = dcheck_always_on
} else {
partition_alloc_dcheck_always_on_default = false
}
}
# PartitionAlloc have limited support for MSVC's cl.exe compiler. It can only
# access the generate "buildflags" and the "raw_ptr" definitions implemented
# with RawPtrNoOpImpl. Everything else is considered not supported.
@ -48,6 +98,12 @@ has_memory_tagging =
current_cpu == "arm64" && is_clang && !is_asan && is_linux && current_os != "openwrt"
declare_args() {
# Debug configuration.
partition_alloc_is_debug = partition_alloc_is_debug_default
# Enable PA_DCHECKs in PartitionAlloc in release mode.
partition_alloc_dcheck_always_on = partition_alloc_dcheck_always_on_default
# Causes all the allocations to be routed via allocator_shim.cc. Usually,
# the allocator shim will, in turn, route them to PartitionAlloc, but
# other allocators are also supported by the allocator shim.
@ -135,8 +191,8 @@ if (use_allocator_shim && is_win) {
# If libcxx_is_shared=false, libc++ is a static library. All libc++ code
# will be run inside the client. The above issue will disappear.
assert(
!is_component_build || (!libcxx_is_shared && !is_debug),
"The allocator shim for the Windows component build needs !libcxx_is_shared && !is_debug.")
!is_component_build || (!libcxx_is_shared && !partition_alloc_is_debug),
"The allocator shim for the Windows component build needs !libcxx_is_shared && !partition_alloc_is_debug.")
}
declare_args() {
@ -172,7 +228,8 @@ declare_args() {
# later verify the pattern remain unchanged to ensure there is no OOB write.
# It comes with performance and memory cost, hence enabled only in debug.
use_partition_cookie =
is_debug || dcheck_always_on || enable_ios_corruption_hardening
partition_alloc_is_debug || partition_alloc_dcheck_always_on ||
enable_ios_corruption_hardening
# This will change partition cookie size to 4B or 8B, whichever equivalent to
# size of InSlotMetadata. This option is useful for InSlotMetadata corruption

View file

@ -35,7 +35,8 @@ enable_pointer_compression =
# Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`, but avails it
# as a buildflag.
dchecks_are_on = is_debug || dcheck_always_on
partition_alloc_dchecks_are_on =
partition_alloc_is_debug || partition_alloc_dcheck_always_on
# Building PartitionAlloc for Windows component build.
# Currently use build_with_chromium not to affect any third_party code,
@ -122,12 +123,34 @@ source_set("buildflag_macro") {
public_configs = [ ":public_includes" ]
}
# When developers are repeatedly growing a buffer with `realloc`, they are
# expected to request a new size that is larger than the current size by
# some growth factor. This growth factor allows to amortize the cost of
# memcpy. Unfortunately, some nVidia drivers have a bug where they repeatedly
# increase the buffer by 4144 byte only.
#
# In particular, most Skia Linux bots are using the affected nVidia driver. So
# this flag is used as a workaround for Skia standalone, not in production.
#
# External link:
# https://forums.developer.nvidia.com/t/550-54-14-very-bad-performance-due-to-bunch-of-reallocations-during-glcore-initialization/287027
#
# Internal discussion at @chrome-memory-safety:
# https://groups.google.com/a/google.com/d/msgid/chrome-memory-safety/CAAzos5HrexY2njz2YzWrffTq1xEfkx15GVpSvHUyQED6wBSXvA%40mail.gmail.com?utm_medium=email&utm_source=footer
declare_args() {
partition_alloc_realloc_growth_factor_mitigation = false
}
pa_buildflag_header("buildflags") {
header = "buildflags.h"
flags = [
"ASSERT_CPP_20=$assert_cpp20",
"BACKUP_REF_PTR_EXTRA_OOB_CHECKS=$backup_ref_ptr_extra_oob_checks",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
"DCHECKS_ARE_ON=$partition_alloc_dchecks_are_on",
"DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT=$enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support",
"ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
"ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER=$enable_backup_ref_ptr_instance_tracer",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
@ -142,6 +165,7 @@ pa_buildflag_header("buildflags") {
"ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
"ENABLE_THREAD_ISOLATION=$enable_pkeys",
"EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"FORCE_DISABLE_BACKUP_REF_PTR_FEATURE=$force_disable_backup_ref_ptr_feature",
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
"FORWARD_THROUGH_MALLOC=$forward_through_malloc",
@ -151,10 +175,11 @@ pa_buildflag_header("buildflags") {
"IS_CASTOS=$is_castos",
"IS_CAST_ANDROID=$is_cast_android",
"IS_CHROMEOS=$is_chromeos",
"IS_DEBUG=$is_debug",
"IS_DEBUG=$partition_alloc_is_debug",
"RAW_PTR_ZERO_ON_CONSTRUCT=$raw_ptr_zero_on_construct",
"RAW_PTR_ZERO_ON_DESTRUCT=$raw_ptr_zero_on_destruct",
"RAW_PTR_ZERO_ON_MOVE=$raw_ptr_zero_on_move",
"REALLOC_GROWTH_FACTOR_MITIGATION=$partition_alloc_realloc_growth_factor_mitigation",
"RECORD_ALLOC_INFO=$record_alloc_info",
"SMALLER_PARTITION_COOKIE=$smaller_partition_cookie",
"STACK_SCAN_SUPPORTED=$stack_scan_supported",
@ -170,11 +195,6 @@ pa_buildflag_header("buildflags") {
"USE_RAW_PTR_ASAN_UNOWNED_IMPL=$use_raw_ptr_asan_unowned_impl",
"USE_RAW_PTR_BACKUP_REF_IMPL=$use_raw_ptr_backup_ref_impl",
"USE_RAW_PTR_HOOKABLE_IMPL=$use_raw_ptr_hookable_impl",
"ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT=$enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support",
"DCHECKS_ARE_ON=$dchecks_are_on",
"EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
]
}
@ -330,7 +350,7 @@ if (is_clang_or_gcc) {
}
}
if (enable_pkeys && is_debug) {
if (enable_pkeys && partition_alloc_is_debug) {
config("no_stack_protector") {
cflags = [ "-fno-stack-protector" ]
}
@ -567,7 +587,7 @@ if (is_clang_or_gcc) {
# We want to be able to test pkey mode without access to the default pkey.
# This is incompatible with stack protectors since the TLS won't be pkey-tagged.
if (enable_pkeys && is_debug) {
if (enable_pkeys && partition_alloc_is_debug) {
configs += [ ":no_stack_protector" ]
}
}
@ -1015,7 +1035,7 @@ if (build_with_chromium) {
]
}
if (enable_pkeys && is_debug && !is_component_build) {
if (enable_pkeys && partition_alloc_is_debug && !is_component_build) {
# This test requires RELRO, which is not enabled in component builds.
# Also, require a debug build, since we only disable stack protectors in
# debug builds in PartitionAlloc (see below why it's needed).

View file

@ -37,8 +37,4 @@ void InternalPartitionAllocated::operator delete(void* ptr, std::align_val_t) {
InternalAllocatorRoot().Free<FreeFlags::kNoHooks>(ptr);
}
// A deleter for `std::unique_ptr<T>`.
void InternalPartitionDeleter::operator()(void* ptr) const {
InternalAllocatorRoot().Free<FreeFlags::kNoHooks>(ptr);
}
} // namespace partition_alloc::internal

View file

@ -48,8 +48,6 @@ T* ConstructAtInternalPartition(Args&&... args) {
}
// Destroy an object on heap in the internal partition.
// TODO(crbug.com/40274826) This is an unused function. Start using it in tests
// and/or in production code.
template <typename T>
void DestroyAtInternalPartition(T* ptr) {
// Destroying an array is not supported.

View file

@ -67,14 +67,13 @@ template <typename T, typename... Args>
T* ConstructAtInternalPartition(Args&&... args);
// Destroy an object on heap in the internal partition.
// TODO(crbug.com/40274826) This is an unused function. Start using it in tests
// and/or in production code.
template <typename T>
void DestroyAtInternalPartition(T* ptr);
// A deleter for `std::unique_ptr<T>`.
struct PA_COMPONENT_EXPORT(PARTITION_ALLOC) InternalPartitionDeleter final {
void operator()(void* ptr) const;
template <typename T>
struct InternalPartitionDeleter final {
void operator()(T* ptr) const { DestroyAtInternalPartition(ptr); }
};
} // namespace partition_alloc::internal

View file

@ -19,7 +19,10 @@ class PA_SCOPED_LOCKABLE
public:
PA_ALWAYS_INLINE explicit CompileTimeConditionalScopedGuard(Lock& lock)
PA_EXCLUSIVE_LOCK_FUNCTION(lock) {}
PA_ALWAYS_INLINE ~CompileTimeConditionalScopedGuard() PA_UNLOCK_FUNCTION() {}
// For some reason, defaulting this causes a thread safety annotation failure.
PA_ALWAYS_INLINE
~CompileTimeConditionalScopedGuard() // NOLINT(modernize-use-equals-default)
PA_UNLOCK_FUNCTION() {}
};
template <>
@ -71,7 +74,12 @@ LightweightQuarantineBranch::LightweightQuarantineBranch(
const LightweightQuarantineBranchConfig& config)
: root_(root),
lock_required_(config.lock_required),
branch_capacity_in_bytes_(config.branch_capacity_in_bytes) {}
branch_capacity_in_bytes_(config.branch_capacity_in_bytes) {
if (lock_required_) {
to_be_freed_working_memory_ =
ConstructAtInternalPartition<ToBeFreedArray>();
}
}
LightweightQuarantineBranch::LightweightQuarantineBranch(
LightweightQuarantineBranch&& b)
@ -82,10 +90,19 @@ LightweightQuarantineBranch::LightweightQuarantineBranch(
branch_capacity_in_bytes_(
b.branch_capacity_in_bytes_.load(std::memory_order_relaxed)) {
b.branch_size_in_bytes_ = 0;
if (lock_required_) {
to_be_freed_working_memory_.store(b.to_be_freed_working_memory_.exchange(
nullptr, std::memory_order_relaxed),
std::memory_order_relaxed);
}
}
LightweightQuarantineBranch::~LightweightQuarantineBranch() {
Purge();
if (lock_required_) {
DestroyAtInternalPartition(to_be_freed_working_memory_.exchange(
nullptr, std::memory_order_relaxed));
}
}
bool LightweightQuarantineBranch::IsQuarantinedForTesting(void* object) {
@ -151,16 +168,26 @@ bool LightweightQuarantineBranch::QuarantineInternal(
const size_t random_index = random_.RandUint32() % slots_.size();
std::swap(slots_[random_index], slots_.back());
} else {
ToBeFreedArray to_be_freed;
std::unique_ptr<ToBeFreedArray, InternalPartitionDeleter<ToBeFreedArray>>
to_be_freed;
size_t num_of_slots = 0;
{
CompileTimeConditionalScopedGuard<lock_required> guard(lock_);
// Borrow the reserved working memory from to_be_freed_working_memory_,
// and set nullptr to it indicating that it's in use.
to_be_freed.reset(to_be_freed_working_memory_.exchange(nullptr));
if (!to_be_freed) {
// When the reserved working memory has already been in use by another
// thread, fall back to allocate another chunk of working memory.
to_be_freed.reset(ConstructAtInternalPartition<ToBeFreedArray>());
}
// Dequarantine some entries as required. Save the objects to be
// deallocated into `to_be_freed`.
PurgeInternalWithDefferedFree(capacity_in_bytes - usable_size,
to_be_freed, num_of_slots);
*to_be_freed, num_of_slots);
// Put the entry onto the list.
branch_size_in_bytes_ += usable_size;
@ -173,7 +200,17 @@ bool LightweightQuarantineBranch::QuarantineInternal(
}
// Actually deallocate the dequarantined objects.
BatchFree(to_be_freed, num_of_slots);
BatchFree(*to_be_freed, num_of_slots);
// Return the possibly-borrowed working memory to
// to_be_freed_working_memory_. It doesn't matter much if it's really
// borrowed or locally-allocated. The important facts are 1) to_be_freed is
// non-null, and 2) to_be_freed_working_memory_ may likely be null (because
// this or another thread has already borrowed it). It's simply good to make
// to_be_freed_working_memory_ non-null whenever possible. Maybe yet another
// thread would be about to borrow the working memory.
to_be_freed.reset(
to_be_freed_working_memory_.exchange(to_be_freed.release()));
}
// Update stats (not locked).

View file

@ -217,6 +217,17 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LightweightQuarantineBranch {
// Using `std::atomic` here so that other threads can update this value.
std::atomic_size_t branch_capacity_in_bytes_;
// This working memory is temporarily needed only while dequarantining
// objects in slots_ when lock_required_ is true. However, allocating this
// working memory on stack may cause stack overflow [1]. Plus, it's non-
// negligible perf penalty to allocate and deallocate this working memory on
// heap only while dequarantining. So, we reserve one chunk of working memory
// on heap during the entire lifetime of this branch object and try to reuse
// this working memory among threads. Only when thread contention occurs, we
// allocate and deallocate another chunk of working memory.
// [1] https://issues.chromium.org/issues/387508217
std::atomic<ToBeFreedArray*> to_be_freed_working_memory_ = nullptr;
friend class LightweightQuarantineRoot;
};

View file

@ -70,9 +70,30 @@ extern PageCharacteristics page_characteristics;
// Ability to name anonymous VMAs is available on some, but not all Linux-based
// systems.
#if PA_BUILDFLAG(IS_ANDROID) || PA_BUILDFLAG(IS_LINUX)
#if PA_BUILDFLAG(IS_ANDROID) || PA_BUILDFLAG(IS_LINUX) || \
PA_BUILDFLAG(IS_CHROMEOS)
#include <sys/prctl.h>
#if (PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)) && \
!(defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME))
// The PR_SET_VMA* symbols are originally from
// https://android.googlesource.com/platform/bionic/+/lollipop-release/libc/private/bionic_prctl.h
// and were subsequently added to mainline Linux in Jan 2022, see
// https://github.com/torvalds/linux/commit/9a10064f5625d5572c3626c1516e0bebc6c9fe9b.
//
// Define them to support compiling with older headers.
#if !defined(PR_SET_VMA)
#define PR_SET_VMA 0x53564d41
#endif
#if !defined(PR_SET_VMA_ANON_NAME)
#define PR_SET_VMA_ANON_NAME 0
#endif
#endif // (PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)) &&
// !(defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME))
#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
#define LINUX_NAME_REGION 1
#endif

View file

@ -150,7 +150,7 @@ void NameRegion(void* start, size_t length, PageTag page_tag) {
PA_NOTREACHED();
}
// No error checking on purpose, testing only.
// No error checking on purpose, used for debugging only.
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, start, length, name);
}
@ -200,14 +200,7 @@ bool UseMapJit() {
}
#elif PA_BUILDFLAG(IS_IOS)
bool UseMapJit() {
// Always enable MAP_JIT in simulator as it is supported unconditionally.
#if TARGET_IPHONE_SIMULATOR
return true;
#else
// TODO(crbug.com/40255826): Fill this out when the API it is
// available.
return false;
#endif // TARGET_IPHONE_SIMULATOR
}
#endif // PA_BUILDFLAG(IS_IOS)

View file

@ -83,6 +83,13 @@ uintptr_t SystemAllocPagesInternal(uintptr_t hint,
PageAccessibilityConfiguration::kInaccessibleWillJitLater &&
kUseMapJit) {
map_flags |= MAP_JIT;
// iOS devices do not support toggling the page permissions after a MAP_JIT
// call, they must be set initially. iOS has per-thread W^X state that
// takes precedence over the mapping's permissions for MAP_JIT regions.
// See https://developer.apple.com/forums/thread/672804
#if PA_BUILDFLAG(IS_IOS)
access_flag = PROT_READ | PROT_WRITE | PROT_EXEC;
#endif
}
#endif

View file

@ -65,7 +65,7 @@ PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
// faster. Note that for direct-mapped allocations, memory is decomitted at
// free() time, so freed memory usage cannot happen.
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION) && PA_BUILDFLAG(ENABLE_PKEYS)
LiftThreadIsolationScope lift_thread_isolation_restrictions;
#endif
size_t size_to_memset = std::min(size, size_t{1} << 19);

View file

@ -20,11 +20,12 @@
// This header defines the CHECK, DCHECK, and DPCHECK macros.
//
// CHECK dies with a fatal error if its condition is not true. It is not
// controlled by NDEBUG, so the check will be executed regardless of compilation
// mode.
// controlled by PA_BUILDFLAG(IS_DEBUG), so the check will be executed
// regardless of compilation mode.
//
// DCHECK, the "debug mode" check, is enabled depending on NDEBUG and
// DCHECK_ALWAYS_ON, and its severity depends on DCHECK_IS_CONFIGURABLE.
// DCHECK, the "debug mode" check, is enabled depending on
// PA_BUILDFLAG(IS_DEBUG) and PA_BUILDFLAG(DCHECK_ALWAYS_ON), and its severity
// depends on PA_BUILDFLAG(DCHECK_IS_CONFIGURABLE).
//
// (D)PCHECK is like (D)CHECK, but includes the system error code (c.f.
// perror(3)).
@ -141,9 +142,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) NotImplemented
} // namespace check_error
#if defined(OFFICIAL_BUILD) && !defined(NDEBUG)
#if defined(OFFICIAL_BUILD) && PA_BUILDFLAG(IS_DEBUG)
#error "Debug builds are not expected to be optimized as official builds."
#endif // defined(OFFICIAL_BUILD) && !defined(NDEBUG)
#endif // defined(OFFICIAL_BUILD) && BUILDFLAG(IS_DEBUG)
#if defined(OFFICIAL_BUILD) && !PA_BUILDFLAG(DCHECKS_ARE_ON)

View file

@ -6,6 +6,7 @@
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_COMPILER_SPECIFIC_H_
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
// A wrapper around `__has_cpp_attribute()`, which is in C++20 and thus not yet
// available for all targets PA supports (since PA's minimum C++ version is 17).
@ -87,7 +88,7 @@
//
// Since `ALWAYS_INLINE` is performance-oriented but can hamper debugging,
// ignore it in debug mode.
#if defined(NDEBUG)
#if !PA_BUILDFLAG(IS_DEBUG)
#if PA_HAS_CPP_ATTRIBUTE(clang::always_inline)
#define PA_ALWAYS_INLINE [[clang::always_inline]] inline
#elif PA_HAS_CPP_ATTRIBUTE(gnu::always_inline)
@ -95,7 +96,7 @@
#elif defined(PA_COMPILER_MSVC)
#define PA_ALWAYS_INLINE __forceinline
#endif
#endif
#endif // !PA_BUILDFLAG(IS_DEBUG)
#if !defined(PA_ALWAYS_INLINE)
#define PA_ALWAYS_INLINE inline
#endif

View file

@ -9,6 +9,8 @@
#include <limits>
#include <type_traits>
#include "partition_alloc/buildflags.h"
namespace partition_alloc::internal::base::internal {
// The std library doesn't provide a binary max_exponent for integers, however
@ -83,10 +85,10 @@ constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
// TODO(jschuh): Debug builds don't reliably propagate constants, so we restrict
// some accelerated runtime paths to release builds until this can be forced
// with consteval support in C++20 or C++23.
#if defined(NDEBUG)
constexpr bool kEnableAsmCode = true;
#if PA_BUILDFLAG(IS_DEBUG)
inline constexpr bool kEnableAsmCode = false;
#else
constexpr bool kEnableAsmCode = false;
inline constexpr bool kEnableAsmCode = true;
#endif
// Forces a crash, like a NOTREACHED(). Used for numeric boundary errors.

View file

@ -18,6 +18,7 @@
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_POSIX_EINTR_WRAPPER_H_
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#if PA_BUILDFLAG(IS_POSIX)
#include <cerrno>
@ -31,7 +32,7 @@ template <typename Fn>
inline auto WrapEINTR(Fn fn) {
return [fn](auto&&... args) {
int out = -1;
#if defined(NDEBUG)
#if !PA_BUILDFLAG(IS_DEBUG)
while (true)
#else
for (int retry_count = 0; retry_count < 100; ++retry_count)

View file

@ -7,13 +7,8 @@
#include <cstddef>
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#if !PA_BUILDFLAG(IS_WIN)
#include <unistd.h>
#endif
namespace partition_alloc::internal::base::strings {
// Similar to std::ostringstream, but creates a C string, i.e. nul-terminated

View file

@ -10,8 +10,9 @@
#include <limits>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#if !defined(NDEBUG)
#if PA_BUILDFLAG(IS_DEBUG)
// In debug builds, we use RAW_CHECK() to print useful error messages, if
// SafeSPrintf() is called with broken arguments.
// As our contract promises that SafeSPrintf() can be called from any
@ -41,7 +42,7 @@
if (x) { \
} \
} while (0)
#endif
#endif // PA_BUILDFLAG(IS_DEBUG)
namespace partition_alloc::internal::base::strings {
@ -74,7 +75,7 @@ const char kUpCaseHexDigits[] = "0123456789ABCDEF";
const char kDownCaseHexDigits[] = "0123456789abcdef";
} // namespace
#if defined(NDEBUG)
#if !PA_BUILDFLAG(IS_DEBUG)
// We would like to define kSSizeMax as std::numeric_limits<ssize_t>::max(),
// but C++ doesn't allow us to do that for constants. Instead, we have to
// use careful casting and shifting. We later use a static_assert to
@ -82,7 +83,7 @@ const char kDownCaseHexDigits[] = "0123456789abcdef";
namespace {
const size_t kSSizeMax = kSSizeMaxConst;
}
#else // defined(NDEBUG)
#else // !PA_BUILDFLAG(IS_DEBUG)
// For efficiency, we really need kSSizeMax to be a constant. But for unit
// tests, it should be adjustable. This allows us to verify edge cases without
// having to fill the entire available address space. As a compromise, we make
@ -101,7 +102,7 @@ size_t GetSafeSPrintfSSizeMaxForTest() {
return kSSizeMax;
}
} // namespace internal
#endif // defined(NDEBUG)
#endif // !PA_BUILDFLAG(IS_DEBUG)
namespace {
class Buffer {
@ -111,10 +112,7 @@ class Buffer {
// to ensure that the buffer is at least one byte in size, so that it fits
// the trailing NUL that will be added by the destructor. The buffer also
// must be smaller or equal to kSSizeMax in size.
Buffer(char* buffer, size_t size)
: buffer_(buffer),
size_(size - 1), // Account for trailing NUL byte
count_(0) {
Buffer(char* buffer, size_t size) : buffer_(buffer), size_(size - 1) {
// MSVS2013's standard library doesn't mark max() as constexpr yet. cl.exe
// supports static_cast but doesn't really implement constexpr yet so it doesn't
// complain, but clang does.
@ -276,7 +274,7 @@ class Buffer {
// Number of bytes that would have been emitted to the buffer, if the buffer
// was sufficiently big. This number always excludes the trailing NUL byte
// and it is guaranteed to never grow bigger than kSSizeMax-1.
size_t count_;
size_t count_ = 0;
};
bool Buffer::IToASCII(bool sign,

View file

@ -28,6 +28,10 @@
#include <zircon/process.h>
#endif
#if defined(__MUSL__)
#include "partition_alloc/shim/allocator_shim.h"
#endif
namespace partition_alloc::internal::base {
#if PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
@ -59,8 +63,21 @@ thread_local bool g_is_main_thread = true;
class InitAtFork {
public:
InitAtFork() {
#if !defined(__MUSL__)
#if defined(__MUSL__)
allocator_shim::AllocatorDispatch d =
*allocator_shim::GetAllocatorDispatchChainHeadForTesting();
d.alloc_function = +[](size_t size, void*) -> void* {
// The size of the scratch fits struct atfork_funcs in Musl pthread_atfork.c.
static char scratch[5 * sizeof(void*)];
return size != sizeof(scratch) ? nullptr : scratch;
};
allocator_shim::InsertAllocatorDispatch(&d);
#endif
pthread_atfork(nullptr, nullptr, internal::InvalidateTidCache);
#if defined(__MUSL__)
allocator_shim::RemoveAllocatorDispatchForTesting(&d);
#endif
}
};

View file

@ -61,11 +61,11 @@
// Expensive dchecks that run within *Scan. These checks are only enabled in
// debug builds with dchecks enabled.
#if !defined(NDEBUG)
#if PA_BUILDFLAG(IS_DEBUG)
#define PA_SCAN_DCHECK_IS_ON() PA_BUILDFLAG(DCHECKS_ARE_ON)
#else
#define PA_SCAN_DCHECK_IS_ON() 0
#endif
#endif // PA_BUILDFLAG(IS_DEBUG)
#if PA_SCAN_DCHECK_IS_ON()
#define PA_SCAN_DCHECK(expr) PA_DCHECK(expr)

View file

@ -63,9 +63,7 @@ enum class FreeFlags {
kNoHooks = 1 << 1, // Internal.
// Quarantine for a while to ensure no UaF from on-stack pointers.
kSchedulerLoopQuarantine = 1 << 2,
// Zap the object region on `Free()`.
kZap = 1 << 3,
kMaxValue = kZap,
kMaxValue = kSchedulerLoopQuarantine,
};
PA_DEFINE_OPERATORS_FOR_FLAGS(FreeFlags);
} // namespace internal

View file

@ -684,7 +684,7 @@ PartitionBucket::AllocNewSlotSpan(PartitionRoot* root,
for (auto* page = gap_start_page->ToWritable(root);
page < gap_end_page->ToWritable(root); ++page) {
PA_DCHECK(!page->is_valid);
page->has_valid_span_after_this = 1;
page->has_valid_span_after_this = true;
}
root->next_partition_page =
adjusted_next_partition_page + slot_span_reservation_size;
@ -708,7 +708,7 @@ PartitionBucket::AllocNewSlotSpan(PartitionRoot* root,
PA_DEBUG_DATA_ON_STACK("spancmt", slot_span_committed_size);
root->RecommitSystemPagesForData(
slot_span_start, slot_span_committed_size,
slot_span_start, SlotSpanCommittedSize(root),
PageAccessibilityDisposition::kRequireUpdate,
slot_size <= kMaxMemoryTaggingSize);
}
@ -1590,4 +1590,63 @@ void PartitionBucket::InitializeSlotSpanForGwpAsan(
InitializeSlotSpan(slot_span, root);
}
size_t PartitionBucket::SlotSpanCommittedSize(PartitionRoot* root) const {
// With lazy commit, we certainly don't want to commit more than
// necessary. This is not reached, but keep the CHECK() as documentation.
PA_CHECK(!kUseLazyCommit);
// Memory is reserved in units of PartitionPage, but a given slot span may be
// smaller than the reserved area. For instance (assuming 4k pages), for a
// bucket where the slot span size is 40kiB, we reserve 4 PartitionPage = 16 *
// 4 = 48kiB, but only ever commit 40kiB out of it.
//
// This means that the address space then looks like, assuming that the
// PartitionPage next to it is committed:
// [SlotSpan range, 40kiB] rw-p
// [Unused area in the last PartitionPage, 8kiB] ---p
// [Next PartitionPages, size unknown ] rw-p
//
// So we have a "hole" of inaccessible memory, and 3 memory regions. If
// instead we commit the full PartitionPages, we get (due to the kernel
// merging neighboring regions with uniform permissions):
//
// [SlotSpan range, 40kiB + Unused area, 8kiB + next PartitionPages] rw-p
//
// So 1 memory region rather then 3. This matters, because on Linux kernels,
// there is a maximum number of VMAs per process, with the default limit a bit
// less than 2^16, and Chromium sometimes hits the limit (see
// /proc/sys/vm/max_map_count for the current limit), largely because of
// PartitionAlloc contributing thousands of regions. Locally, on a Linux
// system, this reduces the number of PartitionAlloc regions by up to ~4x.
//
// Why is it safe?
// The extra memory is not used by anything, so committing it doesn't make a
// difference. It makes it accessible though.
//
// How much does it cost?
// Almost nothing. On Linux, "committing" memory merely changes its
// permissions, it doesn't cost any memory until the pages are touched, which
// they are not. However, mprotect()-ed areas that are writable count towards
// the RLIMIT_DATA resource limit, which is used by the sandbox. So, while
// this change costs 0 physical memory (and actually saves some, by reducing
// the size of the VMA red-black tree in the kernel), it might increase
// slightly the cases where we bump into the sandbox memory limit.
//
// Is it safe to do while running?
// Since this is decided through root settings, the value changes at runtime,
// so we may decommit memory that was never committed. This is safe onLinux,
// since decommitting is just changing permissions back to PROT_NONE, which
// the tail end would already have.
//
// Can we do better?
// For simplicity, we do not "fix" the regions that were committed before the
// settings are changed (after feature list initialization). This means that
// we end up with more regions that we could. The intent is to run a field
// experiment, then change the default value, at which point we get the full
// impact, so this is only temporary.
return root->settings.fewer_memory_regions
? (get_pages_per_slot_span() << PartitionPageShift())
: get_bytes_per_span();
}
} // namespace partition_alloc::internal

View file

@ -1,7 +1,6 @@
// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef PARTITION_ALLOC_PARTITION_BUCKET_H_
#define PARTITION_ALLOC_PARTITION_BUCKET_H_
@ -171,6 +170,8 @@ struct PartitionBucket {
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
PartitionRoot* root);
size_t SlotSpanCommittedSize(PartitionRoot* root) const;
private:
// Sets `this->can_store_raw_size`.
void InitCanStoreRawSize(bool use_small_single_slot_spans);

View file

@ -248,7 +248,7 @@ void SlotSpanMetadata<MetadataKind::kWritable>::Decommit(PartitionRoot* root) {
size_t dirty_size =
base::bits::AlignUp(GetProvisionedSize(), SystemPageSize());
size_t size_to_decommit =
kUseLazyCommit ? dirty_size : bucket->get_bytes_per_span();
kUseLazyCommit ? dirty_size : bucket->SlotSpanCommittedSize(root);
PA_DCHECK(root->empty_slot_spans_dirty_bytes >= dirty_size);
root->empty_slot_spans_dirty_bytes -= dirty_size;

View file

@ -51,6 +51,10 @@
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
#endif // PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
#if defined(__MUSL__)
#include "partition_alloc/shim/allocator_shim.h"
#endif
namespace partition_alloc::internal {
#if PA_BUILDFLAG(RECORD_ALLOC_INFO)
@ -297,11 +301,7 @@ void PartitionAllocMallocInitOnce() {
return;
}
#if defined(__MUSL__)
static_cast<void>(BeforeForkInParent);
static_cast<void>(AfterForkInParent);
static_cast<void>(AfterForkInChild);
#elif PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
#if PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
// When fork() is called, only the current thread continues to execute in the
// child process. If the lock is held, but *not* by this thread when fork() is
// called, we have a deadlock.
@ -323,9 +323,25 @@ void PartitionAllocMallocInitOnce() {
// However, no perfect solution really exists to make threads + fork()
// cooperate, but deadlocks are real (and fork() is used in DEATH_TEST()s),
// and other malloc() implementations use the same techniques.
#if defined(__MUSL__)
allocator_shim::AllocatorDispatch d =
*allocator_shim::GetAllocatorDispatchChainHeadForTesting();
d.alloc_function = +[](size_t size, void*) -> void* {
// The size of the scratch fits struct atfork_funcs in Musl pthread_atfork.c.
static char scratch[5 * sizeof(void*)];
return size != sizeof(scratch) ? nullptr : scratch;
};
allocator_shim::InsertAllocatorDispatch(&d);
#endif
int err =
pthread_atfork(BeforeForkInParent, AfterForkInParent, AfterForkInChild);
PA_CHECK(err == 0);
#if defined(__MUSL__)
allocator_shim::RemoveAllocatorDispatchForTesting(&d);
#endif
#endif // PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
}
@ -1158,6 +1174,8 @@ void PartitionRoot::Init(PartitionOptions opts) {
opts.zapping_by_free_flags == PartitionOptions::kEnabled;
settings.eventually_zero_freed_memory =
opts.eventually_zero_freed_memory == PartitionOptions::kEnabled;
settings.fewer_memory_regions =
opts.fewer_memory_regions == PartitionOptions::kEnabled;
settings.scheduler_loop_quarantine =
opts.scheduler_loop_quarantine == PartitionOptions::kEnabled;

View file

@ -182,6 +182,7 @@ struct PartitionOptions {
// compression ratio of freed memory inside partially allocated pages (due to
// fragmentation).
EnableToggle eventually_zero_freed_memory = kDisabled;
EnableToggle fewer_memory_regions = kDisabled;
struct {
EnableToggle enabled = kDisabled;
@ -261,9 +262,14 @@ struct alignas(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
size_t in_slot_metadata_size = 0;
#endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
bool use_configurable_pool = false;
// Despite its name, `FreeFlags` for zapping is deleted and does not exist.
// This value is used for SchedulerLoopQuarantine.
// TODO(https://crbug.com/351974425): group this setting and quarantine
// setting in one place.
bool zapping_by_free_flags = false;
bool eventually_zero_freed_memory = false;
bool scheduler_loop_quarantine = false;
bool fewer_memory_regions = false;
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
bool memory_tagging_enabled_ = false;
bool use_random_memory_tagging_ = false;
@ -1516,16 +1522,11 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeInline(void* object) {
// cacheline ping-pong.
PA_PREFETCH(slot_span);
// Further down, we may zap the memory, no point in doing it twice. We may
// zap twice if kZap is enabled without kSchedulerLoopQuarantine. Make sure it
// does not happen. This is not a hard requirement: if this is deemed cheap
// enough, it can be relaxed, the static_assert() is here to make it a
// conscious decision.
static_assert(!ContainsFlags(flags, FreeFlags::kZap) ||
ContainsFlags(flags, FreeFlags::kSchedulerLoopQuarantine),
"kZap and kSchedulerLoopQuarantine should be used together to "
"avoid double zapping");
if constexpr (ContainsFlags(flags, FreeFlags::kZap)) {
// TODO(crbug.com/40287058): Collecting objects for
// `kSchedulerLoopQuarantineBranch` here means it "delays" other checks (BRP
// refcount, cookie, etc.)
// For better debuggability, we should do these checks before quarantining.
if constexpr (ContainsFlags(flags, FreeFlags::kSchedulerLoopQuarantine)) {
// No need to zap direct mapped allocations, as they are unmapped right
// away. This also ensures that we don't needlessly memset() very large
// allocations.
@ -1534,12 +1535,7 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeInline(void* object) {
internal::SecureMemset(object, internal::kFreedByte,
GetSlotUsableSize(slot_span));
}
}
// TODO(crbug.com/40287058): Collecting objects for
// `kSchedulerLoopQuarantineBranch` here means it "delays" other checks (BRP
// refcount, cookie, etc.)
// For better debuggability, we should do these checks before quarantining.
if constexpr (ContainsFlags(flags, FreeFlags::kSchedulerLoopQuarantine)) {
if (settings.scheduler_loop_quarantine) {
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
// TODO(keishi): Add `[[likely]]` when brp is fully enabled as
@ -2482,7 +2478,7 @@ void* PartitionRoot::ReallocInline(void* ptr,
constexpr bool no_hooks = ContainsFlags(alloc_flags, AllocFlags::kNoHooks);
const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
bool overridden = false;
size_t old_usable_size;
size_t old_usable_size = 0;
if (!no_hooks && hooks_enabled) [[unlikely]] {
overridden = PartitionAllocHooks::ReallocOverrideHookIfEnabled(
&old_usable_size, ptr);
@ -2525,6 +2521,21 @@ void* PartitionRoot::ReallocInline(void* ptr,
}
}
#if PA_BUILDFLAG(REALLOC_GROWTH_FACTOR_MITIGATION)
// Some nVidia drivers have a performance bug where they repeatedly realloc a
// buffer with a small 4144 byte increment instead of using a growth factor to
// amortize the cost of a memcpy. To work around this, we apply a growth
// factor to the new size to avoid this issue. This workaround is only
// intended to be used for Skia bots, and is not intended to be a general
// solution.
if (new_size > old_usable_size && new_size > 12 << 20) {
// 1.5x growth factor.
// Note that in case of integer overflow, the std::max ensures that the
// new_size is at least as large as the old_usable_size.
new_size = std::max(new_size, old_usable_size * 3 / 2);
}
#endif
// This realloc cannot be resized in-place. Sadness.
void* ret = AllocInternal<alloc_flags>(
new_size, internal::PartitionPageSize(), type_name);

View file

@ -1255,6 +1255,34 @@ struct pointer_traits<::raw_ptr<T, Traits>> {
}
};
#if PA_BUILDFLAG(ASSERT_CPP_20)
// Mark `raw_ptr<T>` and `T*` as having a common reference type (the type to
// which both can be converted or bound) of `T*`. This makes them satisfy
// `std::equality_comparable`, which allows usage like:
// ```
// std::vector<raw_ptr<T>> v;
// T* e;
// auto it = std::ranges::find(v, e);
// ```
// Without this, the `find()` call above would fail to compile with a cryptic
// error about being unable to invoke `std::ranges::equal_to()`.
template <typename T,
base::RawPtrTraits Traits,
template <typename> typename TQ,
template <typename> typename UQ>
struct basic_common_reference<raw_ptr<T, Traits>, T*, TQ, UQ> {
using type = T*;
};
template <typename T,
base::RawPtrTraits Traits,
template <typename> typename TQ,
template <typename> typename UQ>
struct basic_common_reference<T*, raw_ptr<T, Traits>, TQ, UQ> {
using type = T*;
};
#endif // PA_BUILDFLAG(ASSERT_CPP_20)
} // namespace std
#endif // PARTITION_ALLOC_POINTERS_RAW_PTR_H_

View file

@ -27,7 +27,7 @@ class RandomGenerator {
}
private:
::partition_alloc::internal::Lock lock_ = {};
::partition_alloc::internal::Lock lock_;
bool initialized_ PA_GUARDED_BY(lock_) = false;
union {
internal::base::InsecureRandomGenerator instance_ PA_GUARDED_BY(lock_);

View file

@ -8,7 +8,6 @@
#include <cstddef>
#include <cstdint>
#include <limits>
#include <tuple>
#include "partition_alloc/address_pool_manager.h"
#include "partition_alloc/build_config.h"

View file

@ -150,9 +150,9 @@ using ZappingByFreeFlags =
bool>;
using EventuallyZeroFreedMemory = partition_alloc::internal::base::
StrongAlias<class EventuallyZeroFreedMemoryTag, bool>;
using UsePoolOffsetFreelists = partition_alloc::internal::base::
StrongAlias<class UsePoolOffsetFreelistsTag, bool>;
using FewerMemoryRegions =
partition_alloc::internal::base::StrongAlias<class FewerMemoryRegionsTag,
bool>;
using UseSmallSingleSlotSpans = partition_alloc::internal::base::
StrongAlias<class UseSmallSingleSlotSpansTag, bool>;
@ -170,7 +170,7 @@ void ConfigurePartitions(
size_t scheduler_loop_quarantine_branch_capacity_in_bytes,
ZappingByFreeFlags zapping_by_free_flags,
EventuallyZeroFreedMemory eventually_zero_freed_memory,
UsePoolOffsetFreelists use_pool_offset_freelists,
FewerMemoryRegions fewer_memory_regions,
UseSmallSingleSlotSpans use_small_single_slot_spans);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM) uint32_t GetMainPartitionRootExtrasSize();

View file

@ -26,6 +26,7 @@
#include "partition_alloc/partition_root.h"
#include "partition_alloc/partition_stats.h"
#include "partition_alloc/shim/allocator_dispatch.h"
#include "partition_alloc/shim/allocator_shim.h"
#include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc_internal.h"
#include "partition_alloc/shim/allocator_shim_internals.h"
@ -570,7 +571,6 @@ template class PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
PartitionAllocFunctionsInternal<
partition_alloc::AllocFlags::kNoHooks,
partition_alloc::FreeFlags::kNoHooks |
partition_alloc::FreeFlags::kZap |
partition_alloc::FreeFlags::kSchedulerLoopQuarantine>;
// static
@ -619,7 +619,7 @@ void ConfigurePartitions(
size_t scheduler_loop_quarantine_branch_capacity_in_bytes,
ZappingByFreeFlags zapping_by_free_flags,
EventuallyZeroFreedMemory eventually_zero_freed_memory,
UsePoolOffsetFreelists use_pool_offset_freelists,
FewerMemoryRegions fewer_memory_regions,
UseSmallSingleSlotSpans use_small_single_slot_spans) {
// Calling Get() is actually important, even if the return value isn't
// used, because it has a side effect of initializing the variable, if it
@ -643,6 +643,7 @@ void ConfigurePartitions(
opts.backup_ref_ptr =
enable_brp ? partition_alloc::PartitionOptions::kEnabled
: partition_alloc::PartitionOptions::kDisabled;
opts.backup_ref_ptr_extra_extras_size = brp_extra_extras_size;
opts.zapping_by_free_flags =
zapping_by_free_flags
? partition_alloc::PartitionOptions::kEnabled
@ -651,6 +652,9 @@ void ConfigurePartitions(
eventually_zero_freed_memory
? partition_alloc::PartitionOptions::kEnabled
: partition_alloc::PartitionOptions::kDisabled;
opts.fewer_memory_regions =
fewer_memory_regions ? partition_alloc::PartitionOptions::kEnabled
: partition_alloc::PartitionOptions::kDisabled;
opts.scheduler_loop_quarantine =
scheduler_loop_quarantine
? partition_alloc::PartitionOptions::kEnabled
@ -663,9 +667,7 @@ void ConfigurePartitions(
: partition_alloc::PartitionOptions::kDisabled,
.reporting_mode = memory_tagging_reporting_mode};
opts.use_pool_offset_freelists =
use_pool_offset_freelists
? partition_alloc::PartitionOptions::kEnabled
: partition_alloc::PartitionOptions::kDisabled;
partition_alloc::PartitionOptions::kEnabled;
opts.use_small_single_slot_spans =
use_small_single_slot_spans
? partition_alloc::PartitionOptions::kEnabled

View file

@ -135,7 +135,6 @@ using PartitionAllocWithAdvancedChecksFunctions =
PartitionAllocFunctionsInternal<
partition_alloc::AllocFlags::kNoHooks,
partition_alloc::FreeFlags::kNoHooks |
partition_alloc::FreeFlags::kZap |
partition_alloc::FreeFlags::kSchedulerLoopQuarantine>;
// `PartitionAllocFunctions` in instantiated in cc file.
@ -147,7 +146,6 @@ extern template class PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
PartitionAllocFunctionsInternal<
partition_alloc::AllocFlags::kNoHooks,
partition_alloc::FreeFlags::kNoHooks |
partition_alloc::FreeFlags::kZap |
partition_alloc::FreeFlags::kSchedulerLoopQuarantine>;
} // namespace internal
@ -182,7 +180,7 @@ PA_ALWAYS_INLINE void ConfigurePartitionsForTesting() {
size_t scheduler_loop_quarantine_capacity_in_bytes = 0;
auto zapping_by_free_flags = ZappingByFreeFlags(false);
auto eventually_zero_freed_memory = EventuallyZeroFreedMemory(false);
auto use_pool_offset_freelists = UsePoolOffsetFreelists(true);
auto fewer_memory_regions = FewerMemoryRegions(false);
auto use_small_single_slot_spans = UseSmallSingleSlotSpans(true);
ConfigurePartitions(enable_brp, brp_extra_extras_size, enable_memory_tagging,
@ -190,7 +188,7 @@ PA_ALWAYS_INLINE void ConfigurePartitionsForTesting() {
scheduler_loop_quarantine,
scheduler_loop_quarantine_capacity_in_bytes,
zapping_by_free_flags, eventually_zero_freed_memory,
use_pool_offset_freelists, use_small_single_slot_spans);
fewer_memory_regions, use_small_single_slot_spans);
}
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

View file

@ -276,7 +276,7 @@ errno_t _wdupenv_s(wchar_t** buffer,
}
#endif
#if !defined(NDEBUG)
#if PA_BUILDFLAG(IS_DEBUG)
typedef void (*_CRT_DUMP_CLIENT)(void*, size_t);
int _crtDbgFlag = 0;
@ -451,7 +451,7 @@ errno_t _wdupenv_s_dbg(wchar_t** buffer,
}
#endif // defined(COMPONENT_BUILD)
#endif // !defined(NDEBUG)
#endif // PA_BUILDFLAG(IS_DEBUG)
} // extern "C"
#endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)

View file

@ -143,7 +143,12 @@ void SpinningMutex::LockSlow() {
#elif PA_BUILDFLAG(IS_APPLE)
// TODO(verwaest): We should use the constants from the header, but they aren't
// exposed until macOS 15.
// exposed until macOS 15. See their definition here:
// https://github.com/apple-oss-distributions/libplatform/blob/4f6349dfea579c35b8fa838d785644e441d14e0e/private/os/lock_private.h#L265
//
// The first flag prevents the runtime from creating more threads in response to
// contention. The second will spin in the kernel if the lock owner is currently
// running.
#define OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION 0x00010000
#define OS_UNFAIR_LOCK_ADAPTIVE_SPIN 0x00040000

View file

@ -0,0 +1,196 @@
// Copyright 2025 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/android/android_info.h"
#include <cstring>
#include <mutex>
#include <string>
#include <variant>
#include "base/android/jni_android.h"
#include "base/android/jni_array.h"
#include "base/android/jni_string.h"
#include "base/android/scoped_java_ref.h"
#include "base/check.h"
#include "base/strings/string_number_conversions.h"
// Must come after all headers that specialize FromJniType() / ToJniType().
#include "base/android_info_jni/AndroidInfo_jni.h"
namespace base::android::android_info {
namespace {
struct AndroidInfo {
// Const char* is used instead of std::strings because these values must be
// available even if the process is in a crash state. Sadly
// std::string.c_str() doesn't guarantee that memory won't be allocated when
// it is called.
const char* device;
const char* manufacturer;
const char* model;
const char* brand;
const char* android_build_id;
const char* build_type;
const char* board;
const char* android_build_fp;
int sdk_int;
bool is_debug_android;
const char* version_incremental;
const char* hardware;
bool is_at_least_u;
const char* codename;
// Available only on android S+. For S-, this method returns empty string.
const char* soc_manufacturer;
bool is_at_least_t;
const char* abi_name;
};
std::optional<AndroidInfo> holder;
const AndroidInfo& get_android_info() {
[[maybe_unused]] static auto once = [] {
Java_AndroidInfo_nativeReadyForFields(AttachCurrentThread());
return std::monostate();
}();
// holder should be initialized as the java is supposed to call the native
// method FillFields which will initialize the fields within the holder.
DCHECK(holder.has_value());
return *holder;
}
} // namespace
static void JNI_AndroidInfo_FillFields(
JNIEnv* env,
const jni_zero::JavaParamRef<jstring>& brand,
const jni_zero::JavaParamRef<jstring>& device,
const jni_zero::JavaParamRef<jstring>& buildId,
const jni_zero::JavaParamRef<jstring>& manufacturer,
const jni_zero::JavaParamRef<jstring>& model,
const jni_zero::JavaParamRef<jstring>& type,
const jni_zero::JavaParamRef<jstring>& board,
const jni_zero::JavaParamRef<jstring>& androidBuildFingerprint,
const jni_zero::JavaParamRef<jstring>& versionIncremental,
const jni_zero::JavaParamRef<jstring>& hardware,
const jni_zero::JavaParamRef<jstring>& codeName,
const jni_zero::JavaParamRef<jstring>& socManufacturer,
const jni_zero::JavaParamRef<jstring>& supportedAbis,
jint sdkInt,
jboolean isDebugAndroid,
jboolean isAtleastU,
jboolean isAtleastT) {
DCHECK(!holder.has_value());
auto java_string_to_const_char =
[](const jni_zero::JavaParamRef<jstring>& str) {
return strdup(ConvertJavaStringToUTF8(str).c_str());
};
holder = AndroidInfo{
.device = java_string_to_const_char(device),
.manufacturer = java_string_to_const_char(manufacturer),
.model = java_string_to_const_char(model),
.brand = java_string_to_const_char(brand),
.android_build_id = java_string_to_const_char(buildId),
.build_type = java_string_to_const_char(type),
.board = java_string_to_const_char(board),
.android_build_fp = java_string_to_const_char(androidBuildFingerprint),
.sdk_int = sdkInt,
.is_debug_android = static_cast<bool>(isDebugAndroid),
.version_incremental = java_string_to_const_char(versionIncremental),
.hardware = java_string_to_const_char(hardware),
.is_at_least_u = static_cast<bool>(isAtleastU),
.codename = java_string_to_const_char(codeName),
.soc_manufacturer = java_string_to_const_char(socManufacturer),
.is_at_least_t = static_cast<bool>(isAtleastT),
.abi_name = java_string_to_const_char(supportedAbis),
};
}
const char* device() {
return get_android_info().device;
}
const char* manufacturer() {
return get_android_info().manufacturer;
}
const char* model() {
return get_android_info().model;
}
const char* brand() {
return get_android_info().brand;
}
const char* android_build_id() {
return get_android_info().android_build_id;
}
const char* build_type() {
return get_android_info().build_type;
}
const char* board() {
return get_android_info().board;
}
const char* android_build_fp() {
return get_android_info().android_build_fp;
}
int sdk_int() {
return get_android_info().sdk_int;
}
bool is_debug_android() {
return get_android_info().is_debug_android;
}
const char* version_incremental() {
return get_android_info().version_incremental;
}
const char* hardware() {
return get_android_info().hardware;
}
bool is_at_least_u() {
return get_android_info().is_at_least_u;
}
const char* codename() {
return get_android_info().codename;
}
// Available only on android S+. For S-, this method returns empty string.
const char* soc_manufacturer() {
return get_android_info().soc_manufacturer;
}
bool is_at_least_t() {
return get_android_info().is_at_least_t;
}
const char* abi_name() {
return get_android_info().abi_name;
}
} // namespace base::android::android_info

View file

@ -0,0 +1,74 @@
// Copyright 2025 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ANDROID_ANDROID_INFO_H_
#define BASE_ANDROID_ANDROID_INFO_H_
#include "base/base_export.h"
namespace base::android::android_info {
// This enumeration maps to the values returned by AndroidInfo::sdk_int(),
// indicating the Android release associated with a given SDK version.
enum SdkVersion {
SDK_VERSION_JELLY_BEAN = 16,
SDK_VERSION_JELLY_BEAN_MR1 = 17,
SDK_VERSION_JELLY_BEAN_MR2 = 18,
SDK_VERSION_KITKAT = 19,
SDK_VERSION_KITKAT_WEAR = 20,
SDK_VERSION_LOLLIPOP = 21,
SDK_VERSION_LOLLIPOP_MR1 = 22,
SDK_VERSION_MARSHMALLOW = 23,
SDK_VERSION_NOUGAT = 24,
SDK_VERSION_NOUGAT_MR1 = 25,
SDK_VERSION_OREO = 26,
SDK_VERSION_O_MR1 = 27,
SDK_VERSION_P = 28,
SDK_VERSION_Q = 29,
SDK_VERSION_R = 30,
SDK_VERSION_S = 31,
SDK_VERSION_Sv2 = 32,
SDK_VERSION_T = 33,
SDK_VERSION_U = 34,
SDK_VERSION_V = 35,
};
const char* device();
const char* manufacturer();
const char* model();
BASE_EXPORT const char* brand();
const char* android_build_id();
const char* build_type();
const char* board();
const char* android_build_fp();
BASE_EXPORT int sdk_int();
bool is_debug_android();
const char* version_incremental();
BASE_EXPORT const char* hardware();
bool is_at_least_u();
const char* codename();
// Available only on android S+. For S-, this method returns empty string.
const char* soc_manufacturer();
bool is_at_least_t();
const char* abi_name();
} // namespace base::android::android_info
#endif // BASE_ANDROID_ANDROID_INFO_H_

View file

@ -0,0 +1,131 @@
// Copyright 2025 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/android/apk_info.h"
#include <string>
#include <variant>
#include "base/android/jni_android.h"
#include "base/android/jni_array.h"
#include "base/android/jni_string.h"
#include "base/android/scoped_java_ref.h"
#include "base/strings/string_number_conversions.h"
// Must come after all headers that specialize FromJniType() / ToJniType().
#include "base/build_info_jni/ApkInfo_jni.h"
namespace base::android::apk_info {
namespace {
struct ApkInfo {
// Const char* is used instead of std::strings because these values must be
// available even if the process is in a crash state. Sadly
// std::string.c_str() doesn't guarantee that memory won't be allocated when
// it is called.
const char* host_package_name;
const char* host_version_code;
const char* host_package_label;
const char* package_version_code;
const char* package_version_name;
const char* package_name;
const char* resources_version;
const char* installer_package_name;
bool is_debug_app;
bool targets_at_least_u;
int target_sdk_version;
};
std::optional<ApkInfo> holder;
ApkInfo& get_apk_info() {
[[maybe_unused]] static auto once = [] {
Java_ApkInfo_nativeReadyForFields(AttachCurrentThread());
return std::monostate();
}();
// holder should be initialized as the java is supposed to call the native
// method FillFields which will initialize the fields within the holder.
DCHECK(holder.has_value());
return *holder;
}
} // namespace
static void JNI_ApkInfo_FillFields(
JNIEnv* env,
const jni_zero::JavaParamRef<jstring>& hostPackageName,
const jni_zero::JavaParamRef<jstring>& hostVersionCode,
const jni_zero::JavaParamRef<jstring>& hostPackageLabel,
const jni_zero::JavaParamRef<jstring>& packageVersionCode,
const jni_zero::JavaParamRef<jstring>& packageVersionName,
const jni_zero::JavaParamRef<jstring>& packageName,
const jni_zero::JavaParamRef<jstring>& resourcesVersion,
const jni_zero::JavaParamRef<jstring>& installerPackageName,
jboolean isDebugApp,
jboolean targetsAtleastU,
jint targetSdkVersion) {
DCHECK(!holder.has_value());
auto java_string_to_const_char =
[](const jni_zero::JavaParamRef<jstring>& str) {
return strdup(ConvertJavaStringToUTF8(str).c_str());
};
holder = ApkInfo{
.host_package_name = java_string_to_const_char(hostPackageName),
.host_version_code = java_string_to_const_char(hostVersionCode),
.host_package_label = java_string_to_const_char(hostPackageLabel),
.package_version_code = java_string_to_const_char(packageVersionCode),
.package_version_name = java_string_to_const_char(packageVersionName),
.package_name = java_string_to_const_char(packageName),
.resources_version = java_string_to_const_char(resourcesVersion),
.installer_package_name = java_string_to_const_char(installerPackageName),
.is_debug_app = static_cast<bool>(isDebugApp),
.targets_at_least_u = static_cast<bool>(targetsAtleastU),
.target_sdk_version = targetSdkVersion};
}
const char* host_package_name() {
return get_apk_info().host_package_name;
}
const char* host_version_code() {
return get_apk_info().host_version_code;
}
const char* host_package_label() {
return get_apk_info().host_package_label;
}
const char* package_version_code() {
return get_apk_info().package_version_code;
}
const char* package_version_name() {
return get_apk_info().package_version_name;
}
const char* package_name() {
return get_apk_info().package_name;
}
const char* resources_version() {
return get_apk_info().resources_version;
}
const char* installer_package_name() {
return get_apk_info().installer_package_name;
}
bool is_debug_app() {
return get_apk_info().is_debug_app;
}
int target_sdk_version() {
return get_apk_info().target_sdk_version;
}
bool targets_at_least_u() {
return get_apk_info().targets_at_least_u;
}
} // namespace base::android::apk_info

View file

@ -0,0 +1,43 @@
// Copyright 2025 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ANDROID_APK_INFO_H_
#define BASE_ANDROID_APK_INFO_H_
namespace base::android::apk_info {
// The package name of the host app which has loaded WebView, retrieved from
// the application context. In the context of the SDK Runtime, the package
// name of the app that owns this particular instance of the SDK Runtime will
// also be included. e.g.
// com.google.android.sdksandbox:com:com.example.myappwithads
const char* host_package_name();
// The application name (e.g. "Chrome"). For WebView, this is name of the
// embedding app. In the context of the SDK Runtime, this is the name of the
// app that owns this particular instance of the SDK Runtime.
const char* host_version_code();
// By default: same as versionCode. For WebView: versionCode of the embedding
// app. In the context of the SDK Runtime, this is the versionCode of the app
// that owns this particular instance of the SDK Runtime.
const char* host_package_label();
const char* package_version_code();
const char* package_version_name();
const char* package_name();
const char* resources_version();
const char* installer_package_name();
bool is_debug_app();
int target_sdk_version();
bool targets_at_least_u();
} // namespace base::android::apk_info
#endif // BASE_ANDROID_APK_INFO_H_

View file

@ -22,8 +22,8 @@ const base::Feature* const kFeaturesExposedToJava[] = {
// static
base::android::FeatureMap* GetFeatureMap() {
static base::NoDestructor<base::android::FeatureMap> kFeatureMap(std::vector(
std::begin(kFeaturesExposedToJava), std::end(kFeaturesExposedToJava)));
static base::NoDestructor<base::android::FeatureMap> kFeatureMap(
kFeaturesExposedToJava);
return kFeatureMap.get();
}

View file

@ -6,6 +6,9 @@
#include <string>
#include "base/android/android_info.h"
#include "base/android/apk_info.h"
#include "base/android/device_info.h"
#include "base/android/jni_android.h"
#include "base/android/jni_array.h"
#include "base/android/jni_string.h"
@ -16,35 +19,13 @@
#include "base/strings/string_number_conversions.h"
// Must come after all headers that specialize FromJniType() / ToJniType().
#include "base/base_jni/BuildInfo_jni.h"
#include "base/build_info_jni/BuildInfo_jni.h"
namespace base {
namespace android {
namespace {
// We are leaking these strings.
const char* StrDupParam(const std::vector<std::string>& params, size_t index) {
return strdup(params[index].c_str());
}
int GetIntParam(const std::vector<std::string>& params, size_t index) {
int ret = 0;
bool success = StringToInt(params[index], &ret);
DCHECK(success);
return ret;
}
} // namespace
struct BuildInfoSingletonTraits {
static BuildInfo* New() {
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jobjectArray> params_objs = Java_BuildInfo_getAll(env);
std::vector<std::string> params;
AppendJavaStringArrayToStringVector(env, params_objs, &params);
return new BuildInfo(params);
}
static BuildInfo* New() { return new BuildInfo(); }
static void Delete(BuildInfo* x) {
// We're leaking this type, see kRegisterAtExit.
@ -57,51 +38,50 @@ struct BuildInfoSingletonTraits {
#endif
};
BuildInfo::BuildInfo(const std::vector<std::string>& params)
: brand_(StrDupParam(params, 0)),
device_(StrDupParam(params, 1)),
android_build_id_(StrDupParam(params, 2)),
manufacturer_(StrDupParam(params, 3)),
model_(StrDupParam(params, 4)),
sdk_int_(GetIntParam(params, 5)),
build_type_(StrDupParam(params, 6)),
board_(StrDupParam(params, 7)),
host_package_name_(StrDupParam(params, 8)),
host_version_code_(StrDupParam(params, 9)),
host_package_label_(StrDupParam(params, 10)),
package_name_(StrDupParam(params, 11)),
package_version_code_(StrDupParam(params, 12)),
package_version_name_(StrDupParam(params, 13)),
android_build_fp_(StrDupParam(params, 14)),
gms_version_code_(StrDupParam(params, 15)),
installer_package_name_(StrDupParam(params, 16)),
abi_name_(StrDupParam(params, 17)),
custom_themes_(StrDupParam(params, 18)),
resources_version_(StrDupParam(params, 19)),
target_sdk_version_(GetIntParam(params, 20)),
is_debug_android_(GetIntParam(params, 21)),
is_tv_(GetIntParam(params, 22)),
version_incremental_(StrDupParam(params, 23)),
hardware_(StrDupParam(params, 24)),
is_at_least_t_(GetIntParam(params, 25)),
is_automotive_(GetIntParam(params, 26)),
is_at_least_u_(GetIntParam(params, 27)),
targets_at_least_u_(GetIntParam(params, 28)),
codename_(StrDupParam(params, 29)),
vulkan_deqp_level_(GetIntParam(params, 30)),
is_foldable_(GetIntParam(params, 31)),
soc_manufacturer_(StrDupParam(params, 32)),
is_debug_app_(GetIntParam(params, 33)),
is_desktop_(GetIntParam(params, 34)) {}
BuildInfo::BuildInfo()
: brand_(android_info::brand()),
device_(android_info::device()),
android_build_id_(android_info::android_build_id()),
manufacturer_(android_info::manufacturer()),
model_(android_info::model()),
sdk_int_(android_info::sdk_int()),
build_type_(android_info::build_type()),
board_(android_info::board()),
host_package_name_(apk_info::host_package_name()),
host_version_code_(apk_info::host_version_code()),
host_package_label_(apk_info::host_package_label()),
package_name_(apk_info::package_name()),
package_version_code_(apk_info::package_version_code()),
package_version_name_(apk_info::package_version_name()),
android_build_fp_(android_info::android_build_fp()),
installer_package_name_(apk_info::installer_package_name()),
abi_name_(android_info::abi_name()),
resources_version_(apk_info::resources_version()),
target_sdk_version_(apk_info::target_sdk_version()),
is_debug_android_(android_info::is_debug_android()),
is_tv_(device_info::is_tv()),
version_incremental_(android_info::version_incremental()),
hardware_(android_info::hardware()),
is_at_least_t_(android_info::is_at_least_t()),
is_automotive_(device_info::is_automotive()),
is_at_least_u_(android_info::is_at_least_u()),
targets_at_least_u_(apk_info::targets_at_least_u()),
codename_(android_info::codename()),
vulkan_deqp_level_(device_info::vulkan_deqp_level()),
is_foldable_(device_info::is_foldable()),
soc_manufacturer_(android_info::soc_manufacturer()),
is_debug_app_(apk_info::is_debug_app()),
is_desktop_(device_info::is_desktop()) {}
BuildInfo::~BuildInfo() = default;
const char* BuildInfo::gms_version_code() const {
return device_info::gms_version_code();
}
void BuildInfo::set_gms_version_code_for_test(
const std::string& gms_version_code) {
// This leaks the string, just like production code.
gms_version_code_ = strdup(gms_version_code.c_str());
Java_BuildInfo_setGmsVersionCodeForTest(AttachCurrentThread(),
gms_version_code);
device_info::set_gms_version_code_for_test(gms_version_code);
}
std::string BuildInfo::host_signing_cert_sha256() {

View file

@ -10,36 +10,42 @@
#include <string>
#include <vector>
#include "base/android/android_info.h"
#include "base/base_export.h"
#include "base/memory/singleton.h"
namespace base::android {
// DEPRECATED: Please use android_info::SdkVersion.
//
// This enumeration maps to the values returned by BuildInfo::sdk_int(),
// indicating the Android release associated with a given SDK version.
enum SdkVersion {
SDK_VERSION_JELLY_BEAN = 16,
SDK_VERSION_JELLY_BEAN_MR1 = 17,
SDK_VERSION_JELLY_BEAN_MR2 = 18,
SDK_VERSION_KITKAT = 19,
SDK_VERSION_KITKAT_WEAR = 20,
SDK_VERSION_LOLLIPOP = 21,
SDK_VERSION_LOLLIPOP_MR1 = 22,
SDK_VERSION_MARSHMALLOW = 23,
SDK_VERSION_NOUGAT = 24,
SDK_VERSION_NOUGAT_MR1 = 25,
SDK_VERSION_OREO = 26,
SDK_VERSION_O_MR1 = 27,
SDK_VERSION_P = 28,
SDK_VERSION_Q = 29,
SDK_VERSION_R = 30,
SDK_VERSION_S = 31,
SDK_VERSION_Sv2 = 32,
SDK_VERSION_T = 33,
SDK_VERSION_U = 34,
SDK_VERSION_V = 35,
SDK_VERSION_JELLY_BEAN = android_info::SDK_VERSION_JELLY_BEAN,
SDK_VERSION_JELLY_BEAN_MR1 = android_info::SDK_VERSION_JELLY_BEAN_MR1,
SDK_VERSION_JELLY_BEAN_MR2 = android_info::SDK_VERSION_JELLY_BEAN_MR2,
SDK_VERSION_KITKAT = android_info::SDK_VERSION_KITKAT,
SDK_VERSION_KITKAT_WEAR = android_info::SDK_VERSION_KITKAT_WEAR,
SDK_VERSION_LOLLIPOP = android_info::SDK_VERSION_LOLLIPOP,
SDK_VERSION_LOLLIPOP_MR1 = android_info::SDK_VERSION_LOLLIPOP_MR1,
SDK_VERSION_MARSHMALLOW = android_info::SDK_VERSION_MARSHMALLOW,
SDK_VERSION_NOUGAT = android_info::SDK_VERSION_NOUGAT,
SDK_VERSION_NOUGAT_MR1 = android_info::SDK_VERSION_NOUGAT_MR1,
SDK_VERSION_OREO = android_info::SDK_VERSION_OREO,
SDK_VERSION_O_MR1 = android_info::SDK_VERSION_O_MR1,
SDK_VERSION_P = android_info::SDK_VERSION_P,
SDK_VERSION_Q = android_info::SDK_VERSION_Q,
SDK_VERSION_R = android_info::SDK_VERSION_R,
SDK_VERSION_S = android_info::SDK_VERSION_S,
SDK_VERSION_Sv2 = android_info::SDK_VERSION_Sv2,
SDK_VERSION_T = android_info::SDK_VERSION_T,
SDK_VERSION_U = android_info::SDK_VERSION_U,
SDK_VERSION_V = android_info::SDK_VERSION_V,
};
// DEPRECATED: Use AndroidInfo, DeviceInfo or ApkInfo instead.
// These are more efficient because they only retrieve the data being queried.
//
// BuildInfo is a singleton class that stores android build and device
// information. It will be called from Android specific code and gets used
// primarily in crash reporting.
@ -72,7 +78,7 @@ class BASE_EXPORT BuildInfo {
const char* android_build_fp() const { return android_build_fp_; }
const char* gms_version_code() const { return gms_version_code_; }
const char* gms_version_code() const;
void set_gms_version_code_for_test(const std::string& gms_version_code);
@ -103,8 +109,6 @@ class BASE_EXPORT BuildInfo {
const char* package_name() const { return package_name_; }
const char* custom_themes() const { return custom_themes_; }
const char* resources_version() const { return resources_version_; }
const char* build_type() const { return build_type_; }
@ -158,7 +162,7 @@ class BASE_EXPORT BuildInfo {
private:
friend struct BuildInfoSingletonTraits;
explicit BuildInfo(const std::vector<std::string>& params);
explicit BuildInfo();
// Const char* is used instead of std::strings because these values must be
// available even if the process is in a crash state. Sadly
@ -179,11 +183,8 @@ class BASE_EXPORT BuildInfo {
const char* const package_version_code_;
const char* const package_version_name_;
const char* const android_build_fp_;
// Can be overridden in tests.
const char* gms_version_code_ = nullptr;
const char* const installer_package_name_;
const char* const abi_name_;
const char* const custom_themes_;
const char* const resources_version_;
// Not needed by breakpad.
const int target_sdk_version_;

View file

@ -22,7 +22,7 @@ namespace base {
namespace android {
struct BuildInfoSingletonTraits {
static BuildInfo* New() { return new BuildInfo({}); }
static BuildInfo* New() { return new BuildInfo(); }
static void Delete(BuildInfo* x) {
// We're leaking this type, see kRegisterAtExit.
@ -32,7 +32,7 @@ struct BuildInfoSingletonTraits {
static const bool kRegisterAtExit = false;
};
BuildInfo::BuildInfo(const std::vector<std::string>& params)
BuildInfo::BuildInfo()
: brand_(""),
device_(""),
android_build_id_(""),
@ -48,10 +48,8 @@ BuildInfo::BuildInfo(const std::vector<std::string>& params)
package_version_code_(""),
package_version_name_(""),
android_build_fp_(""),
gms_version_code_(""),
installer_package_name_(""),
abi_name_(""),
custom_themes_(""),
resources_version_(""),
target_sdk_version_(0),
is_debug_android_(false),

View file

@ -10,13 +10,9 @@
#include "base/android/scoped_java_ref.h"
#include "base/time/time.h"
#include "base/types/optional_ref.h"
#include "build/robolectric_buildflags.h"
#if BUILDFLAG(IS_ROBOLECTRIC)
#include "base/base_robolectric_jni/Callback_jni.h" // nogncheck
#else
// Must come after all headers that specialize FromJniType() / ToJniType().
#include "base/callback_jni/Callback_jni.h"
#endif
namespace base {
namespace android {

View file

@ -5,14 +5,9 @@
#include "base/command_line.h"
#include "base/android/jni_string.h"
#include "build/robolectric_buildflags.h"
// Must come after all headers that specialize FromJniType() / ToJniType().
#if BUILDFLAG(IS_ROBOLECTRIC)
#include "base/base_robolectric_jni/CommandLine_jni.h" // nogncheck
#else
#include "base/command_line_jni/CommandLine_jni.h"
#endif
using base::CommandLine;
using base::android::JavaParamRef;
@ -41,21 +36,8 @@ static std::string JNI_CommandLine_GetSwitchValue(JNIEnv* env,
return CommandLine::ForCurrentProcess()->GetSwitchValueNative(switch_string);
}
static std::vector<std::string> JNI_CommandLine_GetSwitchesFlattened(
JNIEnv* env) {
// JNI doesn't support returning Maps. Instead, express this map as a 1
// dimensional array: [ key1, value1, key2, value2, ... ]
std::vector<std::string> keys_and_values;
for (const auto& entry : CommandLine::ForCurrentProcess()->GetSwitches()) {
keys_and_values.push_back(entry.first);
keys_and_values.push_back(entry.second);
}
return keys_and_values;
}
static void JNI_CommandLine_AppendSwitch(JNIEnv* env,
std::string& switch_string) {
CommandLine::ForCurrentProcess()->AppendSwitch(switch_string);
static CommandLine::SwitchMap JNI_CommandLine_GetSwitches(JNIEnv* env) {
return CommandLine::ForCurrentProcess()->GetSwitches();
}
static void JNI_CommandLine_AppendSwitchWithValue(JNIEnv* env,

View file

@ -0,0 +1,101 @@
// Copyright 2025 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/android/device_info.h"
#include <string>
#include "base/android/jni_android.h"
#include "base/android/jni_array.h"
#include "base/android/jni_string.h"
#include "base/android/scoped_java_ref.h"
#include "base/strings/string_number_conversions.h"
// Must come after all headers that specialize FromJniType() / ToJniType().
#include "base/build_info_jni/DeviceInfo_jni.h"
#include "base/synchronization/lock.h"
namespace base::android::device_info {
namespace {
struct DeviceInfo {
// Const char* is used instead of std::strings because these values must be
// available even if the process is in a crash state. Sadly
// std::string.c_str() doesn't guarantee that memory won't be allocated when
// it is called.
const char* gms_version_code;
bool is_tv;
bool is_automotive;
bool is_foldable;
bool is_desktop;
// Available only on Android T+.
int32_t vulkan_deqp_level;
};
std::optional<DeviceInfo> holder;
DeviceInfo& get_device_info() {
[[maybe_unused]] static auto once = [] {
Java_DeviceInfo_nativeReadyForFields(AttachCurrentThread());
return std::monostate();
}();
// holder should be initialized as the java is supposed to call the native
// method FillFields which will initialize the fields within the holder.
DCHECK(holder.has_value());
return *holder;
}
} // namespace
static void JNI_DeviceInfo_FillFields(
JNIEnv* env,
const jni_zero::JavaParamRef<jstring>& gmsVersionCode,
jboolean isTV,
jboolean isAutomotive,
jboolean isFoldable,
jboolean isDesktop,
jint vulkanDeqpLevel) {
DCHECK(!holder.has_value());
auto java_string_to_const_char =
[](const jni_zero::JavaParamRef<jstring>& str) {
return strdup(ConvertJavaStringToUTF8(str).c_str());
};
holder =
DeviceInfo{.gms_version_code = java_string_to_const_char(gmsVersionCode),
.is_tv = static_cast<bool>(isTV),
.is_automotive = static_cast<bool>(isAutomotive),
.is_foldable = static_cast<bool>(isFoldable),
.is_desktop = static_cast<bool>(isDesktop),
.vulkan_deqp_level = vulkanDeqpLevel};
}
const char* gms_version_code() {
return get_device_info().gms_version_code;
}
void set_gms_version_code_for_test(const std::string& gms_version_code) {
get_device_info().gms_version_code = strdup(gms_version_code.c_str());
Java_DeviceInfo_setGmsVersionCodeForTest(AttachCurrentThread(),
gms_version_code);
}
bool is_tv() {
return get_device_info().is_tv;
}
bool is_automotive() {
return get_device_info().is_automotive;
}
bool is_foldable() {
return get_device_info().is_foldable;
}
bool is_desktop() {
return get_device_info().is_desktop;
}
// Available only on Android T+.
int32_t vulkan_deqp_level() {
return get_device_info().vulkan_deqp_level;
}
} // namespace base::android::device_info

View file

@ -0,0 +1,24 @@
// Copyright 2025 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ANDROID_DEVICE_INFO_H_
#define BASE_ANDROID_DEVICE_INFO_H_
#include <string>
namespace base::android::device_info {
const char* gms_version_code();
void set_gms_version_code_for_test(const std::string& gms_version_code);
bool is_tv();
bool is_automotive();
bool is_foldable();
bool is_desktop();
// Available only on Android T+.
int32_t vulkan_deqp_level();
} // namespace base::android::device_info
#endif // BASE_ANDROID_DEVICE_INFO_H_

View file

@ -28,8 +28,8 @@ static void JNI_EarlyTraceEvent_RecordEarlyBeginEvent(JNIEnv* env,
TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(internal::kJavaTraceCategory);
trace_event_internal::AddTraceEventWithThreadIdAndTimestamps(
TRACE_EVENT_PHASE_BEGIN, category_group_enabled, name.c_str(),
/*scope=*/nullptr, trace_event_internal::kNoId, thread_id,
TimeTicks::FromJavaNanoTime(time_ns),
/*scope=*/nullptr, trace_event_internal::kNoId,
PlatformThreadId(thread_id), TimeTicks::FromJavaNanoTime(time_ns),
ThreadTicks() + Milliseconds(thread_time_ms),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
@ -45,8 +45,8 @@ static void JNI_EarlyTraceEvent_RecordEarlyEndEvent(JNIEnv* env,
TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(internal::kJavaTraceCategory);
trace_event_internal::AddTraceEventWithThreadIdAndTimestamps(
TRACE_EVENT_PHASE_END, category_group_enabled, name.c_str(),
/*scope=*/nullptr, trace_event_internal::kNoId, thread_id,
TimeTicks::FromJavaNanoTime(time_ns),
/*scope=*/nullptr, trace_event_internal::kNoId,
PlatformThreadId(thread_id), TimeTicks::FromJavaNanoTime(time_ns),
ThreadTicks() + Milliseconds(thread_time_ms),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
@ -64,8 +64,8 @@ static void JNI_EarlyTraceEvent_RecordEarlyToplevelBeginEvent(
internal::kToplevelTraceCategory);
trace_event_internal::AddTraceEventWithThreadIdAndTimestamps(
TRACE_EVENT_PHASE_BEGIN, category_group_enabled, name.c_str(),
/*scope=*/nullptr, trace_event_internal::kNoId, thread_id,
TimeTicks::FromJavaNanoTime(time_ns),
/*scope=*/nullptr, trace_event_internal::kNoId,
PlatformThreadId(thread_id), TimeTicks::FromJavaNanoTime(time_ns),
ThreadTicks() + Milliseconds(thread_time_ms),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
@ -83,8 +83,8 @@ static void JNI_EarlyTraceEvent_RecordEarlyToplevelEndEvent(
internal::kToplevelTraceCategory);
trace_event_internal::AddTraceEventWithThreadIdAndTimestamps(
TRACE_EVENT_PHASE_END, category_group_enabled, name.c_str(),
/*scope=*/nullptr, trace_event_internal::kNoId, thread_id,
TimeTicks::FromJavaNanoTime(time_ns),
/*scope=*/nullptr, trace_event_internal::kNoId,
PlatformThreadId(thread_id), TimeTicks::FromJavaNanoTime(time_ns),
ThreadTicks() + Milliseconds(thread_time_ms),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
@ -109,7 +109,7 @@ static void JNI_EarlyTraceEvent_RecordEarlyAsyncEndEvent(JNIEnv* env,
perfetto::Track(static_cast<uint64_t>(id)));
}
bool GetBackgroundStartupTracingFlag() {
bool GetBackgroundStartupTracingFlagFromJava() {
JNIEnv* env = jni_zero::AttachCurrentThread();
return base::android::Java_EarlyTraceEvent_getBackgroundStartupTracingFlag(
env);

View file

@ -12,7 +12,7 @@ namespace android {
// Returns true if background startup tracing flag was set on the previous
// startup.
BASE_EXPORT bool GetBackgroundStartupTracingFlag();
BASE_EXPORT bool GetBackgroundStartupTracingFlagFromJava();
// Sets a flag to chrome application preferences to enable startup tracing next
// time the app is started.

View file

@ -24,7 +24,8 @@ std::pair<std::string_view, const Feature*> MakeNameToFeaturePair(
return std::make_pair(feature->name, feature);
}
FeatureMap::FeatureMap(std::vector<const Feature*> features_exposed_to_java) {
FeatureMap::FeatureMap(
base::span<const Feature* const> features_exposed_to_java) {
mapping_ =
MakeFlatMap<std::string_view, raw_ptr<const Feature, CtnExperimental>>(
features_exposed_to_java, {}, &MakeNameToFeaturePair);

View file

@ -9,6 +9,7 @@
#include "base/base_export.h"
#include "base/containers/flat_map.h"
#include "base/containers/span.h"
#include "base/feature_list.h"
#include "base/memory/raw_ptr.h"
@ -22,7 +23,8 @@ namespace base::android {
// Each component should have its own FeatureMap.
class BASE_EXPORT FeatureMap {
public:
explicit FeatureMap(std::vector<const Feature*> featuresExposedToJava);
explicit FeatureMap(
base::span<const Feature* const> features_exposed_to_java);
~FeatureMap();
// Map a |feature_name| to a Feature*.

View file

@ -73,6 +73,8 @@ static std::string JNI_FieldTrialList_GetVariationParameter(
// friend the JNI function and is, in turn, friended by
// FieldTrialListIncludingLowAnonymity which allows for the private
// GetActiveFieldTrialGroups() to be reached.
static void JNI_FieldTrialList_LogActiveTrials(JNIEnv* env);
class AndroidFieldTrialListLogActiveTrialsFriendHelper {
private:
friend void ::JNI_FieldTrialList_LogActiveTrials(JNIEnv* env);
@ -110,3 +112,5 @@ static jboolean JNI_FieldTrialList_CreateFieldTrial(JNIEnv* env,
return base::FieldTrialList::CreateFieldTrial(trial_name, group_name) !=
nullptr;
}
DEFINE_JNI_FOR_FieldTrialList()

View file

@ -46,6 +46,7 @@ BASE_FEATURE(kYieldWithInputHint,
// Min time delta between checks for the input hint. Must be a smaller than
// time to produce a frame, but a bit longer than the time it takes to retrieve
// the hint.
// Note: Do not use the prepared macro as of no need for a local cache.
const base::FeatureParam<int> kPollIntervalMillisParam{&kYieldWithInputHint,
"poll_interval_ms", 1};

View file

@ -13,7 +13,7 @@
#include "base/android/jni_string.h"
// Must come after all headers that specialize FromJniType() / ToJniType().
#include "base/base_jni/IntStringCallback_jni.h"
#include "base/base_minimal_jni/IntStringCallback_jni.h"
namespace base {
namespace android {
@ -27,3 +27,5 @@ void RunIntStringCallbackAndroid(const JavaRef<jobject>& callback,
} // namespace android
} // namespace base
DEFINE_JNI_FOR_IntStringCallback()

View file

@ -11,16 +11,10 @@
#include "base/functional/callback.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "build/robolectric_buildflags.h"
// Must come after all headers that specialize FromJniType() / ToJniType().
#if BUILDFLAG(IS_ROBOLECTRIC)
#include "base/base_robolectric_jni/JavaExceptionReporter_jni.h" // nogncheck
#else
#include "base/base_jni/JavaExceptionReporter_jni.h"
#endif
#include "base/base_minimal_jni/JavaExceptionReporter_jni.h"
using jni_zero::JavaParamRef;
using jni_zero::JavaRef;
namespace base {
@ -83,7 +77,7 @@ void SetJavaException(const char* exception) {
void JNI_JavaExceptionReporter_ReportJavaException(
JNIEnv* env,
jboolean crash_after_report,
const JavaParamRef<jthrowable>& e) {
const JavaRef<jthrowable>& e) {
std::string exception_info = base::android::GetJavaExceptionInfo(env, e);
bool should_report_exception = g_java_exception_filter.Get().Run(e);
if (should_report_exception) {
@ -108,3 +102,5 @@ void JNI_JavaExceptionReporter_ReportJavaStackTrace(JNIEnv* env,
} // namespace android
} // namespace base
DEFINE_JNI_FOR_JavaExceptionReporter()

View file

@ -19,11 +19,8 @@
#include "build/robolectric_buildflags.h"
#include "third_party/jni_zero/jni_zero.h"
#if BUILDFLAG(IS_ROBOLECTRIC)
#include "base/base_robolectric_jni/JniAndroid_jni.h" // nogncheck
#else
#include "base/base_jni/JniAndroid_jni.h"
#endif
// Must come after all headers that specialize FromJniType() / ToJniType().
#include "base/base_minimal_jni/JniAndroid_jni.h"
namespace base {
namespace android {
@ -268,3 +265,5 @@ std::string GetJavaStackTraceIfPresent() {
} // namespace android
} // namespace base
DEFINE_JNI_FOR_JniAndroid()

View file

@ -0,0 +1,116 @@
// Copyright 2025 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/android/jni_callback.h"
// Must come after all headers that specialize FromJniType() / ToJniType().
#include "base/base_minimal_jni/JniCallbackUtils_jni.h"
#include "base/base_minimal_jni/JniOnceCallback_jni.h"
#include "base/base_minimal_jni/JniRepeatingCallback_jni.h"
namespace base::android {
namespace {
class JniOnceCallback {
public:
explicit JniOnceCallback(JniOnceWrappedCallbackType&& on_complete)
: wrapped_callback_(std::make_unique<JniOnceWrappedCallbackType>(
std::move(on_complete))) {}
~JniOnceCallback() = default;
JniOnceCallback(const JniOnceCallback&) = delete;
const JniOnceCallback& operator=(const JniOnceCallback&) = delete;
jni_zero::ScopedJavaLocalRef<jobject> TransferToJava(JNIEnv* env) && {
CHECK(wrapped_callback_);
CHECK(!wrapped_callback_->is_null());
return Java_JniOnceCallback_Constructor(
env, reinterpret_cast<jlong>(wrapped_callback_.release()));
}
private:
std::unique_ptr<JniOnceWrappedCallbackType> wrapped_callback_;
};
class JniRepeatingCallback {
public:
explicit JniRepeatingCallback(
const JniRepeatingWrappedCallbackType& on_complete)
: wrapped_callback_(
std::make_unique<JniRepeatingWrappedCallbackType>(on_complete)) {}
explicit JniRepeatingCallback(JniRepeatingWrappedCallbackType&& on_complete)
: wrapped_callback_(std::make_unique<JniRepeatingWrappedCallbackType>(
std::move(on_complete))) {}
~JniRepeatingCallback() = default;
jni_zero::ScopedJavaLocalRef<jobject> TransferToJava(JNIEnv* env) && {
CHECK(wrapped_callback_);
CHECK(!wrapped_callback_->is_null());
return Java_JniRepeatingCallback_Constructor(
env, reinterpret_cast<jlong>(wrapped_callback_.release()));
}
JniRepeatingCallback(const JniRepeatingCallback&) = delete;
const JniRepeatingCallback& operator=(const JniRepeatingCallback&) = delete;
private:
std::unique_ptr<JniRepeatingWrappedCallbackType> wrapped_callback_;
};
} // namespace
ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
JniOnceWrappedCallbackType&& callback) {
return JniOnceCallback(std::move(callback)).TransferToJava(env);
}
ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
JniRepeatingWrappedCallbackType&& callback) {
return JniRepeatingCallback(std::move(callback)).TransferToJava(env);
}
ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
const JniRepeatingWrappedCallbackType& callback) {
return JniRepeatingCallback(callback).TransferToJava(env);
}
void JNI_JniCallbackUtils_OnResult(
JNIEnv* env,
jlong callbackPtr,
jboolean isRepeating,
const jni_zero::JavaParamRef<jobject>& j_result) {
if (isRepeating) {
auto* callback =
reinterpret_cast<JniRepeatingWrappedCallbackType*>(callbackPtr);
callback->Run(j_result);
} else {
auto* callback = reinterpret_cast<JniOnceWrappedCallbackType*>(callbackPtr);
std::move(*callback).Run(j_result);
delete callback;
}
}
void JNI_JniCallbackUtils_Destroy(JNIEnv* env,
jlong callbackPtr,
jboolean isRepeating) {
if (isRepeating) {
auto* callback =
reinterpret_cast<JniRepeatingWrappedCallbackType*>(callbackPtr);
// Call Reset to ensure all accidental use-after-frees fail loudly.
callback->Reset();
delete callback;
} else {
auto* callback = reinterpret_cast<JniOnceWrappedCallbackType*>(callbackPtr);
// Call Reset to ensure all accidental use-after-frees fail loudly.
callback->Reset();
delete callback;
}
}
} // namespace base::android
DEFINE_JNI_FOR_JniCallbackUtils()
DEFINE_JNI_FOR_JniOnceCallback()
DEFINE_JNI_FOR_JniRepeatingCallback()

View file

@ -0,0 +1,99 @@
// Copyright 2025 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ANDROID_JNI_CALLBACK_H_
#define BASE_ANDROID_JNI_CALLBACK_H_
#include <jni.h>
#include <type_traits>
#include "base/android/scoped_java_ref.h"
#include "base/base_export.h"
#include "base/functional/callback_forward.h"
#include "base/functional/callback_helpers.h"
#include "third_party/jni_zero/jni_zero.h"
namespace base::android {
using JniOnceWrappedCallbackType =
base::OnceCallback<void(const jni_zero::JavaRef<jobject>&)>;
using JniRepeatingWrappedCallbackType =
base::RepeatingCallback<void(const jni_zero::JavaRef<jobject>&)>;
BASE_EXPORT ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
JniOnceWrappedCallbackType&& callback);
BASE_EXPORT ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
JniRepeatingWrappedCallbackType&& callback);
BASE_EXPORT ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
const JniRepeatingWrappedCallbackType& callback);
// Java Callbacks don't return a value so any return value by the passed in
// callback will be ignored.
template <typename R, typename Arg>
BASE_EXPORT ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
base::OnceCallback<R(Arg)>&& callback) {
return ToJniCallback(env, base::BindOnce(
[](base::OnceCallback<R(Arg)> captured_callback,
const jni_zero::JavaRef<jobject>& j_result) {
Arg result = jni_zero::FromJniType<Arg>(
jni_zero::AttachCurrentThread(),
j_result);
std::move(captured_callback).Run(result);
},
std::move(callback)));
}
// Java Callbacks don't return a value so any return value by the passed in
// callback will be ignored.
template <typename R>
BASE_EXPORT ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
base::OnceCallback<R()>&& callback) {
return ToJniCallback(env, base::BindOnce(
[](base::OnceCallback<R()> captured_callback,
const jni_zero::JavaRef<jobject>& j_result) {
std::move(captured_callback).Run();
},
std::move(callback)));
}
// Java Callbacks don't return a value so any return value by the passed in
// callback will be ignored.
template <typename R, typename Arg>
BASE_EXPORT ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
const base::RepeatingCallback<R(Arg)>& callback) {
return ToJniCallback(
env, base::BindRepeating(
[](const base::RepeatingCallback<R(Arg)>& captured_callback,
const jni_zero::JavaRef<jobject>& j_result) {
Arg result = jni_zero::FromJniType<Arg>(
jni_zero::AttachCurrentThread(), j_result);
captured_callback.Run(result);
},
callback));
}
// Java Callbacks don't return a value so any return value by the passed in
// callback will be ignored.
template <typename R>
BASE_EXPORT ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
const base::RepeatingCallback<R()>& callback) {
return ToJniCallback(
env, base::BindRepeating(
[](const base::RepeatingCallback<R()>& captured_callback,
const jni_zero::JavaRef<jobject>& j_result) {
captured_callback.Run();
},
callback));
}
} // namespace base::android
#endif // BASE_ANDROID_JNI_CALLBACK_H_

View file

@ -82,6 +82,10 @@ std::string ConvertJavaStringToUTF8(JNIEnv* env, const JavaRef<jstring>& str) {
ScopedJavaLocalRef<jstring> ConvertUTF8ToJavaString(JNIEnv* env,
std::string_view str) {
// ART allocates new empty strings, so use a singleton when applicable.
if (str.empty()) {
return jni_zero::g_empty_string.AsLocalRef(env);
}
// JNI's NewStringUTF expects "modified" UTF8 so instead create the string
// via our own UTF16 conversion utility.
// Further, Dalvik requires the string passed into NewStringUTF() to come from
@ -146,6 +150,10 @@ std::u16string ConvertJavaStringToUTF16(JNIEnv* env,
ScopedJavaLocalRef<jstring> ConvertUTF16ToJavaString(JNIEnv* env,
std::u16string_view str) {
// ART allocates new empty strings, so use a singleton when applicable.
if (str.empty()) {
return jni_zero::g_empty_string.AsLocalRef(env);
}
return ScopedJavaLocalRef<jstring>(env,
ConvertUTF16ToJavaStringImpl(env, str));
}

View file

@ -7,26 +7,29 @@
#include "base/android/jni_string.h"
#include "base/android/scoped_java_ref.h"
#include "base/containers/flat_map.h"
#include "base/lazy_instance.h"
#include "base/no_destructor.h"
#include "base/synchronization/lock.h"
#include "build/robolectric_buildflags.h"
#if BUILDFLAG(IS_ROBOLECTRIC)
#include "base/base_robolectric_jni/JNIUtils_jni.h" // nogncheck
#else
#include "base/base_jni/JNIUtils_jni.h"
#endif
// Must come after all headers that specialize FromJniType() / ToJniType().
#include "base/base_minimal_jni/JNIUtils_jni.h"
namespace base {
namespace android {
jobject GetSplitClassLoader(JNIEnv* env, const char* split_name) {
struct LockAndMap {
base::Lock lock;
base::flat_map<const char*, ScopedJavaGlobalRef<jobject>> map;
};
namespace {
struct LockAndMap {
base::Lock lock;
base::flat_map<const char*, ScopedJavaGlobalRef<jobject>> map;
};
LockAndMap* GetLockAndMap() {
static base::NoDestructor<LockAndMap> lock_and_map;
return lock_and_map.get();
}
} // namespace
jobject GetSplitClassLoader(JNIEnv* env, const char* split_name) {
LockAndMap* lock_and_map = GetLockAndMap();
base::AutoLock guard(lock_and_map->lock);
auto it = lock_and_map->map.find(split_name);
if (it != lock_and_map->map.end()) {
@ -34,7 +37,7 @@ jobject GetSplitClassLoader(JNIEnv* env, const char* split_name) {
}
ScopedJavaGlobalRef<jobject> class_loader(
Java_JNIUtils_getSplitClassLoader(env, split_name));
env, Java_JNIUtils_getSplitClassLoader(env, split_name));
jobject class_loader_obj = class_loader.obj();
lock_and_map->map.insert({split_name, std::move(class_loader)});
return class_loader_obj;
@ -42,3 +45,5 @@ jobject GetSplitClassLoader(JNIEnv* env, const char* split_name) {
} // namespace android
} // namespace base
DEFINE_JNI_FOR_JNIUtils()

View file

@ -10,19 +10,15 @@
#include "base/android/library_loader/anchor_functions_buildflags.h"
#include "base/android/library_loader/library_prefetcher.h"
#include "base/android/orderfile/orderfile_buildflags.h"
#include "base/android/sys_utils.h"
#include "base/at_exit.h"
#include "base/base_switches.h"
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "build/robolectric_buildflags.h"
#include "base/system/sys_info.h"
#if BUILDFLAG(IS_ROBOLECTRIC)
#include "base/base_robolectric_jni/LibraryLoader_jni.h" // nogncheck
#else
// Must come after all headers that specialize FromJniType() / ToJniType().
#include "base/library_loader_jni/LibraryLoader_jni.h"
#endif
#if BUILDFLAG(ORDERFILE_INSTRUMENTATION)
#include "base/android/orderfile/orderfile_instrumentation.h"
@ -46,7 +42,7 @@ LibraryProcessType GetLibraryProcessType() {
bool IsUsingOrderfileOptimization() {
#if BUILDFLAG(SUPPORTS_CODE_ORDERING)
return SysUtils::IsLowEndDeviceFromJni();
return SysInfo::IsLowEndDevice();
#else // !SUPPORTS_CODE_ORDERING
return false;
#endif

View file

@ -15,6 +15,7 @@
#include <sys/wait.h>
#include <unistd.h>
#include <algorithm>
#include <atomic>
#include <cstdlib>
#include <memory>
@ -28,9 +29,9 @@
#include "base/files/file.h"
#include "base/format_macros.h"
#include "base/logging.h"
#include "base/metrics/histogram_functions.h"
#include "base/posix/eintr_wrapper.h"
#include "base/process/process_metrics.h"
#include "base/ranges/algorithm.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "build/build_config.h"
@ -195,6 +196,8 @@ void Prefetch(size_t start, size_t end) {
// These values were used in the past for recording
// "LibraryLoader.PrefetchDetailedStatus".
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused. See PrefetchStatus in enums.xml.
enum class PrefetchStatus {
kSuccess = 0,
kWrongOrdering = 1,
@ -275,7 +278,12 @@ void NativeLibraryPrefetcher::ForkAndPrefetchNativeLibrary(bool ordered_only) {
// would create a dump as well.
return;
#else
base::TimeTicks start_time = base::TimeTicks::Now();
PrefetchStatus status = ForkAndPrefetch(ordered_only);
base::UmaHistogramMediumTimes("Android.LibraryLoader.Prefetch.Duration",
base::TimeTicks::Now() - start_time);
base::UmaHistogramEnumeration("Android.LibraryLoader.Prefetch.Status",
status);
if (status != PrefetchStatus::kSuccess) {
LOG(WARNING) << "Cannot prefetch the library. status = "
<< static_cast<int>(status);
@ -296,7 +304,7 @@ int NativeLibraryPrefetcher::PercentageOfResidentCode(size_t start,
}
total_pages += residency.size();
resident_pages += static_cast<size_t>(
ranges::count_if(residency, [](unsigned char x) { return x & 1; }));
std::ranges::count_if(residency, [](unsigned char x) { return x & 1; }));
if (total_pages == 0) {
return -1;
}

View file

@ -25,6 +25,12 @@ static void JNI_LibraryPrefetcher_ForkAndPrefetchNativeLibrary(JNIEnv* env) {
#endif
}
static void JNI_LibraryPrefetcher_PrefetchNativeLibraryForWebView(JNIEnv* env) {
#if BUILDFLAG(SUPPORTS_CODE_ORDERING)
return NativeLibraryPrefetcher::ForkAndPrefetchNativeLibrary(false);
#endif
}
static jint JNI_LibraryPrefetcher_PercentageOfResidentNativeLibraryCode(
JNIEnv* env) {
#if BUILDFLAG(SUPPORTS_CODE_ORDERING)

View file

@ -30,7 +30,3 @@ static jboolean JNI_MemoryPurgeManager_IsOnPreFreezeMemoryTrimEnabled(
JNIEnv* env) {
return base::android::PreFreezeBackgroundMemoryTrimmer::ShouldUseModernTrim();
}
static jboolean JNI_MemoryPurgeManager_IsSelfFreezeEnabled(JNIEnv* env) {
return base::FeatureList::IsEnabled(base::android::kShouldFreezeSelf);
}

View file

@ -2,10 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <map>
#include "base/android/callback_android.h"
#include "base/android/jni_android.h"
#include "base/android/jni_array.h"
#include "base/android/jni_string.h"
#include "base/containers/map_util.h"
#include "base/format_macros.h"
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_base.h"
@ -14,14 +17,9 @@
#include "base/metrics/user_metrics.h"
#include "base/strings/stringprintf.h"
#include "base/time/time.h"
#include "build/robolectric_buildflags.h"
// Must come after all headers that specialize FromJniType() / ToJniType().
#if BUILDFLAG(IS_ROBOLECTRIC)
#include "base/base_robolectric_jni/NativeUmaRecorder_jni.h" // nogncheck
#else
#include "base/metrics_jni/NativeUmaRecorder_jni.h"
#endif
namespace base {
namespace android {
@ -29,25 +27,25 @@ namespace android {
namespace {
using HistogramsSnapshot =
std::map<std::string, std::unique_ptr<HistogramSamples>>;
std::map<std::string, std::unique_ptr<HistogramSamples>, std::less<>>;
std::string HistogramConstructionParamsToString(HistogramBase* histogram) {
std::string params_str = histogram->histogram_name();
std::string_view name = histogram->histogram_name();
switch (histogram->GetHistogramType()) {
case HISTOGRAM:
case LINEAR_HISTOGRAM:
case BOOLEAN_HISTOGRAM:
case CUSTOM_HISTOGRAM: {
Histogram* hist = static_cast<Histogram*>(histogram);
params_str += StringPrintf("/%d/%d/%" PRIuS, hist->declared_min(),
hist->declared_max(), hist->bucket_count());
break;
return StringPrintf("%.*s/%d/%d/%" PRIuS, name.length(), name.data(),
hist->declared_min(), hist->declared_max(),
hist->bucket_count());
}
case SPARSE_HISTOGRAM:
case DUMMY_HISTOGRAM:
break;
}
return params_str;
return std::string(name);
}
// Convert a jlong |histogram_hint| from Java to a HistogramBase* via a cast.
@ -279,9 +277,9 @@ JNI_NativeUmaRecorder_GetHistogramSamplesForTesting(JNIEnv* env,
std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
for (auto sampleCountIterator = samples->Iterator();
!sampleCountIterator->Done(); sampleCountIterator->Next()) {
HistogramBase::Sample min;
HistogramBase::Sample32 min;
int64_t max;
HistogramBase::Count count;
HistogramBase::Count32 count;
sampleCountIterator->Get(&min, &max, &count);
buckets.push_back(min);
buckets.push_back(max);
@ -294,7 +292,8 @@ JNI_NativeUmaRecorder_GetHistogramSamplesForTesting(JNIEnv* env,
jlong JNI_NativeUmaRecorder_CreateHistogramSnapshotForTesting(JNIEnv* env) {
HistogramsSnapshot* snapshot = new HistogramsSnapshot();
for (const auto* const histogram : StatisticsRecorder::GetHistograms()) {
(*snapshot)[histogram->histogram_name()] = histogram->SnapshotSamples();
InsertOrAssign(*snapshot, histogram->histogram_name(),
histogram->SnapshotSamples());
}
return reinterpret_cast<intptr_t>(snapshot);
}

View file

@ -28,6 +28,7 @@
#include "base/task/thread_pool/thread_pool_instance.h"
#include "base/time/time.h"
#include "base/trace_event/base_tracing.h"
#include "base/trace_event/named_trigger.h" // no-presubmit-check
namespace base::android {
namespace {
@ -73,8 +74,7 @@ std::string GetPreFreezeMetricName(std::string_view name,
std::string GetSelfCompactionMetricName(std::string_view name,
std::string_view suffix) {
const char* process_type = GetProcessType();
return StrCat({"Memory.SelfCompact.", process_type, ".", name, ".", suffix});
return StrCat({"Memory.SelfCompact2.Renderer.", name, ".", suffix});
}
class PrivateMemoryFootprintMetric
@ -221,7 +221,7 @@ void PreFreezeBackgroundMemoryTrimmer::RecordMetrics() {
// determine the current process, which is used for the names of metrics
// below.
CHECK(base::CommandLine::InitializedForCurrentProcess());
base::AutoLock locker(lock_);
base::AutoLock locker(lock());
if (metrics_.size() != values_before_.size()) {
UmaHistogramEnumeration("Memory.PreFreeze2.RecordMetricsFailureType",
MetricsFailure::kSizeMismatch);
@ -306,7 +306,7 @@ void PreFreezeBackgroundMemoryTrimmer::CompactionMetric::
return;
}
if (!ShouldContinueSelfCompaction(started_at_)) {
if (!ShouldContinueSelfCompaction(self_compaction_triggered_at_)) {
return;
}
@ -329,7 +329,7 @@ void PreFreezeBackgroundMemoryTrimmer::CompactionMetric::
void PreFreezeBackgroundMemoryTrimmer::CompactionMetric::RecordSmapsRollup(
std::optional<debug::SmapsRollup>* target) {
if (!ShouldContinueSelfCompaction(started_at_)) {
if (!ShouldContinueSelfCompaction(self_compaction_triggered_at_)) {
return;
}
@ -402,7 +402,7 @@ void PreFreezeBackgroundMemoryTrimmer::PostDelayedBackgroundTaskModern(
return;
}
base::AutoLock locker(lock_);
base::AutoLock locker(lock());
PostDelayedBackgroundTaskModernHelper(std::move(task_runner), from_here,
std::move(task), delay);
}
@ -423,7 +423,7 @@ PreFreezeBackgroundMemoryTrimmer::PostDelayedBackgroundTaskModernHelper(
// static
void PreFreezeBackgroundMemoryTrimmer::RegisterMemoryMetric(
const PreFreezeMetric* metric) {
base::AutoLock locker(Instance().lock_);
base::AutoLock locker(lock());
Instance().RegisterMemoryMetricInternal(metric);
}
@ -441,7 +441,7 @@ void PreFreezeBackgroundMemoryTrimmer::RegisterMemoryMetricInternal(
// static
void PreFreezeBackgroundMemoryTrimmer::UnregisterMemoryMetric(
const PreFreezeMetric* metric) {
base::AutoLock locker(Instance().lock_);
base::AutoLock locker(lock());
Instance().UnregisterMemoryMetricInternal(metric);
}
@ -457,6 +457,12 @@ void PreFreezeBackgroundMemoryTrimmer::UnregisterMemoryMetricInternal(
metrics_.erase(metrics_.begin() + index);
}
void PreFreezeBackgroundMemoryTrimmer::SetOnStartSelfCompactionCallback(
base::RepeatingCallback<void(void)> callback) {
base::AutoLock locker(lock());
Instance().on_self_compact_callback_ = callback;
}
// static
bool PreFreezeBackgroundMemoryTrimmer::SelfCompactionIsSupported() {
return IsMadvisePageoutSupported();
@ -464,10 +470,10 @@ bool PreFreezeBackgroundMemoryTrimmer::SelfCompactionIsSupported() {
// static
bool PreFreezeBackgroundMemoryTrimmer::ShouldContinueSelfCompaction(
base::TimeTicks self_compaction_started_at) {
base::AutoLock locker(Instance().lock_);
base::TimeTicks self_compaction_triggered_at) {
base::AutoLock locker(lock());
return Instance().self_compaction_last_cancelled_ <
self_compaction_started_at;
self_compaction_triggered_at;
}
void PreFreezeBackgroundMemoryTrimmer::MaybePostSelfCompactionTask(
@ -475,19 +481,18 @@ void PreFreezeBackgroundMemoryTrimmer::MaybePostSelfCompactionTask(
std::vector<debug::MappedMemoryRegion> regions,
scoped_refptr<CompactionMetric> metric,
uint64_t max_size,
base::TimeTicks started_at) {
base::TimeTicks triggered_at) {
TRACE_EVENT0("base", "MaybePostSelfCompactionTask");
if (ShouldContinueSelfCompaction(started_at) && !regions.empty()) {
if (ShouldContinueSelfCompaction(triggered_at) && !regions.empty()) {
task_runner->PostDelayedTask(
FROM_HERE,
// |base::Unretained| is safe here because we never destroy |this|.
base::BindOnce(&PreFreezeBackgroundMemoryTrimmer::SelfCompactionTask,
base::Unretained(this), std::move(task_runner),
std::move(regions), std::move(metric), max_size,
started_at),
base::Unretained(this), task_runner, std::move(regions),
std::move(metric), max_size, triggered_at),
GetDelayBetweenSelfCompaction());
} else {
FinishSelfCompaction(std::move(metric), started_at);
FinishSelfCompaction(std::move(metric), triggered_at);
}
}
@ -496,8 +501,8 @@ void PreFreezeBackgroundMemoryTrimmer::SelfCompactionTask(
std::vector<debug::MappedMemoryRegion> regions,
scoped_refptr<CompactionMetric> metric,
uint64_t max_size,
base::TimeTicks started_at) {
if (!ShouldContinueSelfCompaction(started_at)) {
base::TimeTicks triggered_at) {
if (!ShouldContinueSelfCompaction(triggered_at)) {
return;
}
@ -506,27 +511,45 @@ void PreFreezeBackgroundMemoryTrimmer::SelfCompactionTask(
CompactMemory(&regions, max_size);
MaybePostSelfCompactionTask(std::move(task_runner), std::move(regions),
std::move(metric), max_size, started_at);
std::move(metric), max_size, triggered_at);
}
void PreFreezeBackgroundMemoryTrimmer::StartSelfCompaction(
scoped_refptr<base::SequencedTaskRunner> task_runner,
std::vector<debug::MappedMemoryRegion> regions,
scoped_refptr<CompactionMetric> metric,
uint64_t max_bytes,
base::TimeTicks started_at) {
base::TimeTicks triggered_at) {
scoped_refptr<CompactionMetric> metric =
MakeRefCounted<CompactionMetric>(triggered_at, base::TimeTicks::Now());
TRACE_EVENT0("base", "StartSelfCompaction");
base::trace_event::EmitNamedTrigger("start-self-compaction");
{
base::AutoLock locker(lock());
process_compacted_metadata_.emplace(
"PreFreezeBackgroundMemoryTrimmer.ProcessCompacted",
/*is_compacted=*/1, base::SampleMetadataScope::kProcess);
if (on_self_compact_callback_) {
on_self_compact_callback_.Run();
}
}
metric->RecordBeforeMetrics();
SelfCompactionTask(std::move(task_runner), std::move(regions),
std::move(metric), max_bytes, started_at);
MaybePostSelfCompactionTask(std::move(task_runner), std::move(regions),
std::move(metric), max_bytes, triggered_at);
}
void PreFreezeBackgroundMemoryTrimmer::FinishSelfCompaction(
scoped_refptr<CompactionMetric> metric,
base::TimeTicks started_at) {
base::TimeTicks triggered_at) {
TRACE_EVENT0("base", "FinishSelfCompaction");
if (ShouldContinueSelfCompaction(started_at)) {
{
base::AutoLock locker(lock());
self_compaction_last_finished_ = base::TimeTicks::Now();
}
if (ShouldContinueSelfCompaction(triggered_at)) {
metric->RecordDelayedMetrics();
base::AutoLock locker(lock());
metric->RecordTimeMetrics(self_compaction_last_finished_,
self_compaction_last_cancelled_);
}
}
@ -539,57 +562,80 @@ PreFreezeBackgroundMemoryTrimmer::GetDelayBetweenSelfCompaction() {
}
// static
void PreFreezeBackgroundMemoryTrimmer::MaybeCancelSelfCompaction() {
Instance().MaybeCancelSelfCompactionInternal();
void PreFreezeBackgroundMemoryTrimmer::MaybeCancelSelfCompaction(
SelfCompactCancellationReason cancellation_reason) {
Instance().MaybeCancelSelfCompactionInternal(cancellation_reason);
}
void PreFreezeBackgroundMemoryTrimmer::MaybeCancelSelfCompactionInternal() {
base::AutoLock locker(lock_);
self_compaction_last_cancelled_ = base::TimeTicks::Now();
void PreFreezeBackgroundMemoryTrimmer::MaybeCancelSelfCompactionInternal(
SelfCompactCancellationReason cancellation_reason) {
base::AutoLock locker(lock());
process_compacted_metadata_.reset();
// Check for the last time cancelled here in order to avoid recording this
// metric multiple times. Also, only record this metric if a compaction is
// currently running.
if (self_compaction_last_cancelled_ < self_compaction_last_triggered_ &&
self_compaction_last_finished_ < self_compaction_last_triggered_) {
UmaHistogramEnumeration("Memory.SelfCompact2.Renderer.CancellationReason2",
cancellation_reason);
}
self_compaction_last_finished_ = self_compaction_last_cancelled_ =
base::TimeTicks::Now();
}
// static
void PreFreezeBackgroundMemoryTrimmer::CompactSelf() {
void PreFreezeBackgroundMemoryTrimmer::CompactSelf(
scoped_refptr<base::SequencedTaskRunner> task_runner,
base::TimeTicks triggered_at) {
// MADV_PAGEOUT was only added in Linux 5.4, so do nothing in earlier
// versions.
if (!SelfCompactionIsSupported()) {
return;
}
if (!ShouldContinueSelfCompaction(triggered_at)) {
return;
}
TRACE_EVENT0("base", "CompactSelf");
std::vector<debug::MappedMemoryRegion> regions;
std::string proc_maps;
if (!debug::ReadProcMaps(&proc_maps) || !ParseProcMaps(proc_maps, &regions)) {
return;
// We still start the task in the control group, in order to record metrics.
if (base::FeatureList::IsEnabled(kShouldFreezeSelf)) {
std::string proc_maps;
if (!debug::ReadProcMaps(&proc_maps) ||
!ParseProcMaps(proc_maps, &regions)) {
return;
}
if (regions.size() == 0) {
return;
}
}
if (regions.size() == 0) {
return;
}
auto started_at = base::TimeTicks::Now();
Instance().StartSelfCompaction(
base::ThreadPool::CreateSequencedTaskRunner(
{base::TaskPriority::BEST_EFFORT, MayBlock()}),
std::move(regions), MakeRefCounted<CompactionMetric>(started_at),
MiBToBytes(kShouldFreezeSelfMaxSize.Get()), started_at);
Instance().StartSelfCompaction(std::move(task_runner), std::move(regions),
MiBToBytes(kShouldFreezeSelfMaxSize.Get()),
triggered_at);
}
// static
std::optional<uint64_t> PreFreezeBackgroundMemoryTrimmer::CompactRegion(
debug::MappedMemoryRegion region) {
#if defined(MADV_PAGEOUT)
using Permission = debug::MappedMemoryRegion::Permission;
// Skip file-backed regions
if (region.inode != 0 || region.dev_major != 0) {
return 0;
}
// Skip shared regions
if ((region.permissions & debug::MappedMemoryRegion::Permission::PRIVATE) ==
0) {
if ((region.permissions & Permission::PRIVATE) == 0) {
return 0;
}
const bool is_inaccessible =
(region.permissions &
(Permission::READ | Permission::WRITE | Permission::EXECUTE)) == 0;
TRACE_EVENT1("base", __PRETTY_FUNCTION__, "size", region.end - region.start);
int error = madvise(reinterpret_cast<void*>(region.start),
@ -610,7 +656,7 @@ std::optional<uint64_t> PreFreezeBackgroundMemoryTrimmer::CompactRegion(
return 0;
}
return region.end - region.start;
return is_inaccessible ? 0 : region.end - region.start;
#else
return std::nullopt;
#endif
@ -646,23 +692,25 @@ void PreFreezeBackgroundMemoryTrimmer::PostMetricsTasksIfModern() {
// static
void PreFreezeBackgroundMemoryTrimmer::OnSelfFreeze() {
if (!base::FeatureList::IsEnabled(kShouldFreezeSelf)) {
return;
}
TRACE_EVENT0("base", "OnSelfFreeze");
Instance().OnSelfFreezeInternal();
auto task_runner = base::ThreadPool::CreateSequencedTaskRunner(
{base::TaskPriority::BEST_EFFORT, MayBlock()});
Instance().OnSelfFreezeInternal(std::move(task_runner));
}
void PreFreezeBackgroundMemoryTrimmer::OnSelfFreezeInternal() {
base::AutoLock locker(lock_);
RunPreFreezeTasks();
base::ThreadPool::PostDelayedTask(
FROM_HERE, {base::TaskPriority::BEST_EFFORT, MayBlock()},
void PreFreezeBackgroundMemoryTrimmer::OnSelfFreezeInternal(
scoped_refptr<SequencedTaskRunner> task_runner) {
const auto triggered_at = base::TimeTicks::Now();
base::AutoLock locker(lock());
self_compaction_last_triggered_ = triggered_at;
if (base::FeatureList::IsEnabled(kShouldFreezeSelf)) {
RunPreFreezeTasks();
}
task_runner->PostDelayedTask(
FROM_HERE,
base::BindOnce(&PreFreezeBackgroundMemoryTrimmer::CompactSelf,
base::Unretained(this)),
base::Unretained(this), task_runner, triggered_at),
base::Seconds(kShouldFreezeSelfDelayAfterPreFreezeTasks.Get()));
}
@ -671,7 +719,7 @@ void PreFreezeBackgroundMemoryTrimmer::OnPreFreeze() {
// If we have scheduled a self compaction task, cancel it, since App Freezer
// will handle the compaction for us, and we don't want to potentially run
// self compaction after we have resumed.
MaybeCancelSelfCompaction();
MaybeCancelSelfCompaction(SelfCompactCancellationReason::kAppFreezer);
Instance().OnPreFreezeInternal();
}
@ -696,13 +744,13 @@ void PreFreezeBackgroundMemoryTrimmer::RunPreFreezeTasks() {
// (1) To avoid holding it too long while running all the background tasks.
// (2) To prevent a deadlock if the |background_task| needs to acquire the
// lock (e.g. to post another task).
base::AutoUnlock unlocker(lock_);
base::AutoUnlock unlocker(lock());
BackgroundTask::RunNow(std::move(background_task));
}
}
void PreFreezeBackgroundMemoryTrimmer::OnPreFreezeInternal() {
base::AutoLock locker(lock_);
base::AutoLock locker(lock());
PostMetricsTasksIfModern();
if (!ShouldUseModernTrim()) {
@ -720,13 +768,13 @@ void PreFreezeBackgroundMemoryTrimmer::UnregisterBackgroundTask(
void PreFreezeBackgroundMemoryTrimmer::UnregisterBackgroundTaskInternal(
BackgroundTask* timer) {
base::AutoLock locker(lock_);
base::AutoLock locker(lock());
std::erase_if(background_tasks_, [&](auto& t) { return t.get() == timer; });
}
// static
void PreFreezeBackgroundMemoryTrimmer::RegisterPrivateMemoryFootprintMetric() {
base::AutoLock locker(Instance().lock_);
base::AutoLock locker(lock());
static base::NoDestructor<PrivateMemoryFootprintMetric> pmf_metric;
if (!PrivateMemoryFootprintMetric::did_register_) {
PrivateMemoryFootprintMetric::did_register_ = true;
@ -757,40 +805,41 @@ void PreFreezeBackgroundMemoryTrimmer::SetSupportsModernTrimForTesting(
// static
void PreFreezeBackgroundMemoryTrimmer::ClearMetricsForTesting() {
base::AutoLock locker(Instance().lock_);
base::AutoLock locker(lock());
Instance().metrics_.clear();
PrivateMemoryFootprintMetric::did_register_ = false;
}
bool PreFreezeBackgroundMemoryTrimmer::DidRegisterTasksForTesting() const {
base::AutoLock locker(lock_);
base::AutoLock locker(lock());
return metrics_.size() != 0;
}
size_t
PreFreezeBackgroundMemoryTrimmer::GetNumberOfPendingBackgroundTasksForTesting()
const {
base::AutoLock locker(lock_);
base::AutoLock locker(lock());
return background_tasks_.size();
}
size_t PreFreezeBackgroundMemoryTrimmer::GetNumberOfKnownMetricsForTesting()
const {
base::AutoLock locker(lock_);
base::AutoLock locker(lock());
return metrics_.size();
}
size_t PreFreezeBackgroundMemoryTrimmer::GetNumberOfValuesBeforeForTesting()
const {
base::AutoLock locker(lock_);
base::AutoLock locker(lock());
return values_before_.size();
}
// static
void PreFreezeBackgroundMemoryTrimmer::
ResetSelfCompactionLastCancelledForTesting() {
base::AutoLock locker(Instance().lock_);
void PreFreezeBackgroundMemoryTrimmer::ResetSelfCompactionForTesting() {
base::AutoLock locker(lock());
Instance().self_compaction_last_cancelled_ = base::TimeTicks::Min();
Instance().self_compaction_last_finished_ = base::TimeTicks::Min();
Instance().self_compaction_last_triggered_ = base::TimeTicks::Min();
}
// static
@ -870,8 +919,10 @@ PreFreezeBackgroundMemoryTrimmer::PreFreezeMetric::PreFreezeMetric(
PreFreezeBackgroundMemoryTrimmer::PreFreezeMetric::~PreFreezeMetric() = default;
PreFreezeBackgroundMemoryTrimmer::CompactionMetric::CompactionMetric(
base::TimeTicks triggered_at,
base::TimeTicks started_at)
: started_at_(started_at) {}
: self_compaction_triggered_at_(triggered_at),
self_compaction_started_at_(started_at) {}
PreFreezeBackgroundMemoryTrimmer::CompactionMetric::~CompactionMetric() =
default;
@ -887,4 +938,13 @@ void PreFreezeBackgroundMemoryTrimmer::CompactionMetric::
RecordSmapsRollupWithDelay(&smaps_after_60s_, base::Seconds(60));
}
void PreFreezeBackgroundMemoryTrimmer::CompactionMetric::RecordTimeMetrics(
base::TimeTicks last_finished,
base::TimeTicks last_cancelled) {
UmaHistogramMediumTimes("Memory.SelfCompact2.Renderer.SelfCompactionTime",
last_finished - self_compaction_started_at_);
UmaHistogramMediumTimes("Memory.SelfCompact2.Renderer.TimeSinceLastCancel",
last_finished - last_cancelled);
}
} // namespace base::android

View file

@ -13,6 +13,7 @@
#include "base/functional/callback.h"
#include "base/memory/post_delayed_memory_reduction_task.h"
#include "base/no_destructor.h"
#include "base/profiler/sample_metadata.h"
#include "base/task/delayed_task_handle.h"
#include "base/task/sequenced_task_runner.h"
#include "base/timer/timer.h"
@ -34,6 +35,14 @@ BASE_EXPORT BASE_DECLARE_FEATURE(kShouldFreezeSelf);
// be frozen.
class BASE_EXPORT PreFreezeBackgroundMemoryTrimmer {
public:
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused.
enum class SelfCompactCancellationReason {
kAppFreezer,
kPageResumed,
kMaxValue = kPageResumed
};
static PreFreezeBackgroundMemoryTrimmer& Instance();
~PreFreezeBackgroundMemoryTrimmer() = delete;
@ -44,7 +53,7 @@ class BASE_EXPORT PreFreezeBackgroundMemoryTrimmer {
scoped_refptr<base::SequencedTaskRunner> task_runner,
const base::Location& from_here,
OnceCallback<void(void)> task,
base::TimeDelta delay) LOCKS_EXCLUDED(lock_) {
base::TimeDelta delay) LOCKS_EXCLUDED(lock()) {
PostDelayedBackgroundTask(
task_runner, from_here,
BindOnce(
@ -59,7 +68,7 @@ class BASE_EXPORT PreFreezeBackgroundMemoryTrimmer {
scoped_refptr<base::SequencedTaskRunner> task_runner,
const base::Location& from_here,
OnceCallback<void(MemoryReductionTaskContext)> task,
base::TimeDelta delay) LOCKS_EXCLUDED(lock_);
base::TimeDelta delay) LOCKS_EXCLUDED(lock());
class PreFreezeMetric {
public:
@ -101,38 +110,47 @@ class BASE_EXPORT PreFreezeBackgroundMemoryTrimmer {
// See "Memory.PreFreeze2.{process_type}.{name}.{suffix}" for details on the
// exact metrics.
static void RegisterMemoryMetric(const PreFreezeMetric* metric)
LOCKS_EXCLUDED(Instance().lock_);
LOCKS_EXCLUDED(lock());
static void UnregisterMemoryMetric(const PreFreezeMetric* metric)
LOCKS_EXCLUDED(Instance().lock_);
LOCKS_EXCLUDED(lock());
// The callback runs in the thread pool. The caller cannot make any thread
// safety assumptions for the callback execution (e.g. it could run
// concurrently with the thread that registered it).
static void SetOnStartSelfCompactionCallback(base::RepeatingClosure callback)
LOCKS_EXCLUDED(lock());
static bool SelfCompactionIsSupported();
// Compacts the memory for the process.
void CompactSelf();
void CompactSelf(scoped_refptr<SequencedTaskRunner> task_runner,
base::TimeTicks triggered_at);
// If we are currently running self compaction, cancel it.
static void MaybeCancelSelfCompaction();
// If we are currently running self compaction, cancel it. If it was running,
// record a metric with the reason for the cancellation.
static void MaybeCancelSelfCompaction(
SelfCompactCancellationReason cancellation_reason);
static void SetSupportsModernTrimForTesting(bool is_supported);
static void ClearMetricsForTesting() LOCKS_EXCLUDED(lock_);
static void ClearMetricsForTesting() LOCKS_EXCLUDED(lock());
size_t GetNumberOfPendingBackgroundTasksForTesting() const
LOCKS_EXCLUDED(lock_);
size_t GetNumberOfKnownMetricsForTesting() const LOCKS_EXCLUDED(lock_);
size_t GetNumberOfValuesBeforeForTesting() const LOCKS_EXCLUDED(lock_);
LOCKS_EXCLUDED(lock());
size_t GetNumberOfKnownMetricsForTesting() const LOCKS_EXCLUDED(lock());
size_t GetNumberOfValuesBeforeForTesting() const LOCKS_EXCLUDED(lock());
bool DidRegisterTasksForTesting() const;
static void OnPreFreezeForTesting() LOCKS_EXCLUDED(lock_) { OnPreFreeze(); }
static void ResetSelfCompactionLastCancelledForTesting();
static void OnPreFreezeForTesting() LOCKS_EXCLUDED(lock()) { OnPreFreeze(); }
static void ResetSelfCompactionForTesting();
static std::optional<uint64_t> CompactRegion(
debug::MappedMemoryRegion region);
// Called when Chrome is about to be frozen. Runs as many delayed tasks as
// possible immediately, before we are frozen.
static void OnPreFreeze() LOCKS_EXCLUDED(lock_);
static void OnPreFreeze() LOCKS_EXCLUDED(lock());
static void OnSelfFreeze() LOCKS_EXCLUDED(lock_);
static void OnSelfFreeze() LOCKS_EXCLUDED(lock());
static bool SupportsModernTrim();
static bool ShouldUseModernTrim();
@ -148,6 +166,7 @@ class BASE_EXPORT PreFreezeBackgroundMemoryTrimmer {
friend class PreFreezeSelfCompactionTest;
FRIEND_TEST_ALL_PREFIXES(PreFreezeSelfCompactionTest, Cancel);
FRIEND_TEST_ALL_PREFIXES(PreFreezeSelfCompactionTest, NotCanceled);
FRIEND_TEST_ALL_PREFIXES(PreFreezeSelfCompactionTest, OnSelfFreezeCancel);
// We use our own implementation here, based on |PostCancelableDelayedTask|,
// rather than relying on something like |base::OneShotTimer|, since
@ -189,20 +208,30 @@ class BASE_EXPORT PreFreezeBackgroundMemoryTrimmer {
private:
class CompactionMetric : public RefCountedThreadSafe<CompactionMetric> {
public:
explicit CompactionMetric(base::TimeTicks started_at);
CompactionMetric(base::TimeTicks triggered_at, base::TimeTicks started_at);
void RecordDelayedMetrics();
void RecordTimeMetrics(base::TimeTicks self_compaction_last_finished,
base::TimeTicks self_compaction_last_cancelled);
void RecordBeforeMetrics();
void MaybeRecordCompactionMetrics();
void MaybeRecordCompactionMetrics() LOCKS_EXCLUDED(lock());
private:
friend class RefCountedThreadSafe<CompactionMetric>;
~CompactionMetric();
void RecordSmapsRollup(std::optional<debug::SmapsRollup>* target);
void RecordSmapsRollup(std::optional<debug::SmapsRollup>* target)
LOCKS_EXCLUDED(lock());
void RecordSmapsRollupWithDelay(std::optional<debug::SmapsRollup>* target,
base::TimeDelta delay);
base::TimeTicks started_at_;
// When the self compaction was first triggered. There is a delay between
// this time and when we actually begin the compaction.
base::TimeTicks self_compaction_triggered_at_;
// When the self compaction first started. This should generally be
// |self_compaction_triggered_at_ +
// kShouldFreezeSelfDelayAfterPreFreezeTasks.Get()|, but may be longer if
// the task was delayed.
base::TimeTicks self_compaction_started_at_;
// We use std::optional here because:
// - We record these incrementally.
// - We may stop recording at some point.
@ -216,84 +245,84 @@ class BASE_EXPORT PreFreezeBackgroundMemoryTrimmer {
PreFreezeBackgroundMemoryTrimmer();
static base::Lock& lock() { return Instance().lock_; }
void StartSelfCompaction(scoped_refptr<base::SequencedTaskRunner> task_runner,
std::vector<debug::MappedMemoryRegion> regions,
scoped_refptr<CompactionMetric> metric,
uint64_t max_size,
base::TimeTicks started_at);
base::TimeTicks triggered_at) LOCKS_EXCLUDED(lock());
static base::TimeDelta GetDelayBetweenSelfCompaction();
void MaybePostSelfCompactionTask(
scoped_refptr<base::SequencedTaskRunner> task_runner,
std::vector<debug::MappedMemoryRegion> regions,
scoped_refptr<CompactionMetric> metric,
uint64_t max_size,
base::TimeTicks started_at);
base::TimeTicks triggered_at) LOCKS_EXCLUDED(lock());
void SelfCompactionTask(scoped_refptr<base::SequencedTaskRunner> task_runner,
std::vector<debug::MappedMemoryRegion> regions,
scoped_refptr<CompactionMetric> metric,
uint64_t max_size,
base::TimeTicks started_at);
base::TimeTicks triggered_at) LOCKS_EXCLUDED(lock());
void FinishSelfCompaction(scoped_refptr<CompactionMetric> metric,
base::TimeTicks started_at);
base::TimeTicks triggered_at)
LOCKS_EXCLUDED(lock());
static bool ShouldContinueSelfCompaction(
base::TimeTicks compaction_started_at) LOCKS_EXCLUDED(Instance().lock_);
base::TimeTicks self_compaction_triggered_at) LOCKS_EXCLUDED(lock());
static std::optional<uint64_t> CompactMemory(
std::vector<debug::MappedMemoryRegion>* regions,
const uint64_t max_bytes);
void RegisterMemoryMetricInternal(const PreFreezeMetric* metric)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
EXCLUSIVE_LOCKS_REQUIRED(lock());
void UnregisterMemoryMetricInternal(const PreFreezeMetric* metric)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
static void UnregisterBackgroundTask(BackgroundTask*) LOCKS_EXCLUDED(lock_);
EXCLUSIVE_LOCKS_REQUIRED(lock());
static void UnregisterBackgroundTask(BackgroundTask*) LOCKS_EXCLUDED(lock());
void UnregisterBackgroundTaskInternal(BackgroundTask*) LOCKS_EXCLUDED(lock_);
void UnregisterBackgroundTaskInternal(BackgroundTask*) LOCKS_EXCLUDED(lock());
static void RegisterPrivateMemoryFootprintMetric() LOCKS_EXCLUDED(lock_);
void RegisterPrivateMemoryFootprintMetricInternal() LOCKS_EXCLUDED(lock_);
static void RegisterPrivateMemoryFootprintMetric() LOCKS_EXCLUDED(lock());
void RegisterPrivateMemoryFootprintMetricInternal() LOCKS_EXCLUDED(lock());
void PostDelayedBackgroundTaskInternal(
scoped_refptr<base::SequencedTaskRunner> task_runner,
const base::Location& from_here,
OnceCallback<void(MemoryReductionTaskContext)> task,
base::TimeDelta delay) LOCKS_EXCLUDED(lock_);
base::TimeDelta delay) LOCKS_EXCLUDED(lock());
void PostDelayedBackgroundTaskModern(
scoped_refptr<base::SequencedTaskRunner> task_runner,
const base::Location& from_here,
OnceCallback<void(MemoryReductionTaskContext)> task,
base::TimeDelta delay) LOCKS_EXCLUDED(lock_);
base::TimeDelta delay) LOCKS_EXCLUDED(lock());
BackgroundTask* PostDelayedBackgroundTaskModernHelper(
scoped_refptr<base::SequencedTaskRunner> task_runner,
const base::Location& from_here,
OnceCallback<void(MemoryReductionTaskContext)> task,
base::TimeDelta delay) EXCLUSIVE_LOCKS_REQUIRED(lock_);
base::TimeDelta delay) EXCLUSIVE_LOCKS_REQUIRED(lock());
void OnPreFreezeInternal() LOCKS_EXCLUDED(lock_);
void RunPreFreezeTasks() EXCLUSIVE_LOCKS_REQUIRED(lock_);
void OnPreFreezeInternal() LOCKS_EXCLUDED(lock());
void RunPreFreezeTasks() EXCLUSIVE_LOCKS_REQUIRED(lock());
void OnSelfFreezeInternal();
void OnSelfFreezeInternal(scoped_refptr<SequencedTaskRunner> task_runner);
void MaybeCancelSelfCompactionInternal() LOCKS_EXCLUDED(lock_);
void MaybeCancelSelfCompactionInternal(
SelfCompactCancellationReason cancellation_reason) LOCKS_EXCLUDED(lock());
void PostMetricsTasksIfModern() EXCLUSIVE_LOCKS_REQUIRED(lock_);
void PostMetricsTask() EXCLUSIVE_LOCKS_REQUIRED(lock_);
void RecordMetrics() LOCKS_EXCLUDED(lock_);
void RecordSmapsRollup(std::optional<debug::SmapsRollup>* target,
base::TimeTicks started_at);
void PostMetricsTasksIfModern() EXCLUSIVE_LOCKS_REQUIRED(lock());
void PostMetricsTask() EXCLUSIVE_LOCKS_REQUIRED(lock());
void RecordMetrics() LOCKS_EXCLUDED(lock());
mutable base::Lock lock_;
std::deque<std::unique_ptr<BackgroundTask>> background_tasks_
GUARDED_BY(lock_);
std::vector<const PreFreezeMetric*> metrics_ GUARDED_BY(lock_);
GUARDED_BY(lock());
std::vector<const PreFreezeMetric*> metrics_ GUARDED_BY(lock());
// When a metrics task is posted (see |RecordMetrics|), the values of each
// metric before any tasks are run are saved here. The "i"th entry corresponds
// to the "i"th entry in |metrics_|. When there is no pending metrics task,
// |values_before_| should be empty.
std::vector<std::optional<uint64_t>> values_before_ GUARDED_BY(lock_);
std::vector<std::optional<uint64_t>> values_before_ GUARDED_BY(lock());
// Whether or not we should continue self compaction. There are two reasons
// why we would cancel:
// (1) We have resumed, meaning we are likely to touch much of the process
@ -303,8 +332,18 @@ class BASE_EXPORT PreFreezeBackgroundMemoryTrimmer {
// work for us. This situation should be relatively rare, because we
// attempt to not do self compaction if we know that we are going to
// frozen by App Freezer.
base::TimeTicks self_compaction_last_cancelled_ GUARDED_BY(lock_) =
base::TimeTicks self_compaction_last_cancelled_ GUARDED_BY(lock()) =
base::TimeTicks::Min();
// When we last triggered self compaction. Used to record metrics.
base::TimeTicks self_compaction_last_triggered_ GUARDED_BY(lock()) =
base::TimeTicks::Min();
// When we last finished self compaction (either successfully, or from
// being cancelled). Used to record metrics.
base::TimeTicks self_compaction_last_finished_ GUARDED_BY(lock()) =
base::TimeTicks::Min();
std::optional<base::ScopedSampleMetadata> process_compacted_metadata_
GUARDED_BY(lock());
base::RepeatingClosure on_self_compact_callback_ GUARDED_BY(lock());
bool supports_modern_trim_;
};

View file

@ -35,13 +35,6 @@
public <init>();
}
# Keep all enum values and valueOf methods. See
# http://proguard.sourceforge.net/index.html#manual/examples.html
# for the reason for this. Also, see http://crbug.com/248037.
-keepclassmembers enum !cr_allowunused,** {
public static **[] values();
}
# This is to workaround crbug.com/1204690 - an old GMS app version crashes when
# ObjectWrapper contains > 1 fields, and this prevents R8 from inserting a
# synthetic field.

View file

@ -17,22 +17,11 @@
namespace base {
namespace android {
bool SysUtils::IsLowEndDeviceFromJni() {
JNIEnv* env = AttachCurrentThread();
return Java_SysUtils_isLowEndDevice(env);
}
bool SysUtils::IsCurrentlyLowMemory() {
JNIEnv* env = AttachCurrentThread();
return Java_SysUtils_isCurrentlyLowMemory(env);
}
// static
int SysUtils::AmountOfPhysicalMemoryKB() {
JNIEnv* env = AttachCurrentThread();
return Java_SysUtils_amountOfPhysicalMemoryKB(env);
}
// Logs the number of minor / major page faults to tracing (and also the time to
// collect) the metrics. Does nothing if tracing is not enabled.
static void JNI_SysUtils_LogPageFaultCountToTracing(JNIEnv* env) {

View file

@ -12,12 +12,8 @@ namespace android {
class BASE_EXPORT SysUtils {
public:
// Returns true iff this is a low-end device.
static bool IsLowEndDeviceFromJni();
// Returns true if system has low available memory.
static bool IsCurrentlyLowMemory();
// Returns amount of physical ram detected in KB, or 0 if detection failed.
static int AmountOfPhysicalMemoryKB();
};
} // namespace android

View file

@ -36,15 +36,8 @@ TaskRunnerAndroid::UiThreadTaskRunnerCallback& GetUiThreadTaskRunnerCallback() {
return *callback;
}
void RunJavaTask(base::android::ScopedJavaGlobalRef<jobject> task,
const std::string& runnable_class_name) {
TRACE_EVENT("toplevel", nullptr, [&](::perfetto::EventContext& ctx) {
std::string event_name =
base::StrCat({"JniPostTask: ", runnable_class_name});
ctx.event()->set_name(event_name.c_str());
});
JNIEnv* env = jni_zero::AttachCurrentThread();
JNI_Runnable::Java_Runnable_run(env, task);
void RunJavaTask(jint task_index) {
Java_TaskRunnerImpl_runTask(jni_zero::AttachCurrentThread(), task_index);
}
} // namespace
@ -68,19 +61,13 @@ void TaskRunnerAndroid::Destroy(JNIEnv* env) {
delete this;
}
void TaskRunnerAndroid::PostDelayedTask(
JNIEnv* env,
const base::android::JavaRef<jobject>& task,
jlong delay,
std::string& runnable_class_name) {
void TaskRunnerAndroid::PostDelayedTask(JNIEnv* env,
jlong delay,
jint task_index) {
// This could be run on any java thread, so we can't cache |env| in the
// BindOnce because JNIEnv is thread specific.
task_runner_->PostDelayedTask(
FROM_HERE,
base::BindOnce(&RunJavaTask,
base::android::ScopedJavaGlobalRef<jobject>(task),
runnable_class_name),
Milliseconds(delay));
FROM_HERE, base::BindOnce(&RunJavaTask, task_index), Milliseconds(delay));
}
// static

View file

@ -32,10 +32,7 @@ class BASE_EXPORT TaskRunnerAndroid {
void Destroy(JNIEnv* env);
void PostDelayedTask(JNIEnv* env,
const base::android::JavaRef<jobject>& task,
jlong delay,
std::string& runnable_class_name);
void PostDelayedTask(JNIEnv* env, jlong delay, jint taskIndex);
bool BelongsToCurrentThread(JNIEnv* env);

View file

@ -4,15 +4,8 @@
#include "base/android/token_android.h"
#include "build/robolectric_buildflags.h"
#if BUILDFLAG(IS_ROBOLECTRIC)
#include "base/base_robolectric_jni/TokenBase_jni.h" // nogncheck
#include "base/base_robolectric_jni/Token_jni.h" // nogncheck
#else
#include "base/base_jni/TokenBase_jni.h"
#include "base/base_jni/Token_jni.h"
#endif
#include "base/base_minimal_jni/TokenBase_jni.h"
#include "base/base_minimal_jni/Token_jni.h"
namespace base::android {
@ -36,3 +29,5 @@ static base::Token JNI_Token_CreateRandom(JNIEnv* env) {
}
} // namespace base::android
DEFINE_JNI_FOR_Token()

View file

@ -13,13 +13,6 @@
#include "base/metrics/histogram_macros.h"
#include "base/trace_event/base_tracing.h"
#include "base/tracing_buildflags.h"
#include "build/robolectric_buildflags.h"
#if BUILDFLAG(IS_ROBOLECTRIC)
#include "base/base_robolectric_jni/TraceEvent_jni.h" // nogncheck
#else
#include "base/tasks_jni/TraceEvent_jni.h"
#endif
#if BUILDFLAG(ENABLE_BASE_TRACING)
#include "base/trace_event/trace_event_impl.h" // no-presubmit-check
@ -27,6 +20,9 @@
#include "third_party/perfetto/protos/perfetto/config/chrome/chrome_config.gen.h" // nogncheck
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
// Must come after all headers that specialize FromJniType() / ToJniType().
#include "base/tasks_minimal_jni/TraceEvent_jni.h"
namespace base {
namespace android {
@ -310,21 +306,24 @@ static void JNI_TraceEvent_WebViewStartupStage1(JNIEnv* env,
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
static void JNI_TraceEvent_WebViewStartupStage2(JNIEnv* env,
jlong start_time_ms,
jlong duration_ms,
jboolean is_cold_startup) {
static void JNI_TraceEvent_WebViewStartupFirstInstance(
JNIEnv* env,
jlong start_time_ms,
jlong duration_ms,
jboolean included_global_startup) {
#if BUILDFLAG(ENABLE_BASE_TRACING)
auto t = perfetto::Track::ThreadScoped(
reinterpret_cast<void*>(trace_event::GetNextGlobalTraceId()));
if (is_cold_startup) {
TRACE_EVENT_BEGIN("android_webview.timeline",
"WebView.Startup.CreationTime.Stage2.ProviderInit.Cold",
t, TimeTicks() + Milliseconds(start_time_ms));
if (included_global_startup) {
TRACE_EVENT_BEGIN(
"android_webview.timeline",
"WebView.Startup.CreationTime.FirstInstanceWithGlobalStartup", t,
TimeTicks() + Milliseconds(start_time_ms));
} else {
TRACE_EVENT_BEGIN("android_webview.timeline",
"WebView.Startup.CreationTime.Stage2.ProviderInit.Warm",
t, TimeTicks() + Milliseconds(start_time_ms));
TRACE_EVENT_BEGIN(
"android_webview.timeline",
"WebView.Startup.CreationTime.FirstInstanceWithoutGlobalStartup", t,
TimeTicks() + Milliseconds(start_time_ms));
}
TRACE_EVENT_END("android_webview.timeline", t,
@ -332,6 +331,20 @@ static void JNI_TraceEvent_WebViewStartupStage2(JNIEnv* env,
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
static void JNI_TraceEvent_WebViewStartupNotFirstInstance(JNIEnv* env,
jlong start_time_ms,
jlong duration_ms) {
#if BUILDFLAG(ENABLE_BASE_TRACING)
auto t = perfetto::Track::ThreadScoped(
reinterpret_cast<void*>(trace_event::GetNextGlobalTraceId()));
TRACE_EVENT_BEGIN("android_webview.timeline",
"WebView.Startup.CreationTime.NotFirstInstance", t,
TimeTicks() + Milliseconds(start_time_ms));
TRACE_EVENT_END("android_webview.timeline", t,
TimeTicks() + Milliseconds(start_time_ms + duration_ms));
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
static void JNI_TraceEvent_WebViewStartupStartChromiumLocked(
JNIEnv* env,
jlong start_time_ms,

View file

@ -4,16 +4,9 @@
#include "base/android/unguessable_token_android.h"
#include "build/robolectric_buildflags.h"
// Must come after all headers that specialize FromJniType() / ToJniType().
#if BUILDFLAG(IS_ROBOLECTRIC)
#include "base/base_robolectric_jni/TokenBase_jni.h" // nogncheck
#include "base/base_robolectric_jni/UnguessableToken_jni.h" // nogncheck
#else
#include "base/base_jni/TokenBase_jni.h"
#include "base/base_jni/UnguessableToken_jni.h"
#endif
#include "base/base_minimal_jni/TokenBase_jni.h"
#include "base/base_minimal_jni/UnguessableToken_jni.h"
namespace base {
namespace android {
@ -50,3 +43,5 @@ UnguessableTokenAndroid::ParcelAndUnparcelForTesting(
} // namespace android
} // namespace base
DEFINE_JNI_FOR_UnguessableToken()

View file

@ -0,0 +1,101 @@
// Copyright 2014 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/apple/dispatch_source.h"
#include "base/apple/scoped_dispatch_object.h"
namespace base::apple {
struct DispatchSource::Storage {
// The dispatch queue used to service the source_.
ScopedDispatchObject<dispatch_queue_t> queue;
// Dispatch source type, either DISPATCH_SOURCE_TYPE_MACH_RECV,
// DISPATCH_SOURCE_TYPE_READ or DISPATCH_SOURCE_TYPE_WRITE.
ScopedDispatchObject<dispatch_source_t> source;
// Semaphore used to wait on the |source_|'s cancellation in the destructor.
ScopedDispatchObject<dispatch_semaphore_t> source_canceled;
};
DispatchSource::DispatchSource(const char* name,
mach_port_t port,
void (^event_handler)())
: DispatchSource(dispatch_queue_create(name, DISPATCH_QUEUE_SERIAL),
port,
event_handler) {
// Since the queue was created above in the delegated constructor, and it was
// subsequently retained, release it here.
dispatch_release(storage_->queue.get());
}
DispatchSource::DispatchSource(dispatch_queue_t queue,
int fd,
dispatch_source_type_t type,
void (^event_handler)())
: storage_(std::make_unique<Storage>()) {
DCHECK(type == DISPATCH_SOURCE_TYPE_READ ||
type == DISPATCH_SOURCE_TYPE_WRITE);
storage_->queue.reset(queue, base::scoped_policy::RETAIN);
storage_->source.reset(dispatch_source_create(
type, static_cast<uintptr_t>(fd), 0, storage_->queue.get()));
storage_->source_canceled.reset(dispatch_semaphore_create(0));
dispatch_source_set_event_handler(storage_->source.get(), event_handler);
dispatch_source_set_cancel_handler(storage_->source.get(), ^{
dispatch_semaphore_signal(storage_->source_canceled.get());
});
}
DispatchSource::DispatchSource(dispatch_queue_t queue,
mach_port_t port,
void (^event_handler)())
: storage_(std::make_unique<Storage>()) {
storage_->queue.reset(queue, base::scoped_policy::RETAIN);
storage_->source.reset(dispatch_source_create(
DISPATCH_SOURCE_TYPE_MACH_RECV, port, 0, storage_->queue.get()));
storage_->source_canceled.reset(dispatch_semaphore_create(0));
dispatch_source_set_event_handler(storage_->source.get(), event_handler);
dispatch_source_set_cancel_handler(storage_->source.get(), ^{
dispatch_semaphore_signal(storage_->source_canceled.get());
});
}
DispatchSource::~DispatchSource() {
if (suspended_) {
Resume();
}
// Cancel the source and wait for the semaphore to be signaled. This will
// ensure the source managed by this class is not used after it is freed.
dispatch_source_cancel(storage_->source.get());
storage_->source.reset();
dispatch_semaphore_wait(storage_->source_canceled.get(),
DISPATCH_TIME_FOREVER);
}
void DispatchSource::Resume() {
if (!suspended_) {
return;
}
suspended_ = false;
dispatch_resume(storage_->source.get());
}
void DispatchSource::Suspend() {
if (suspended_) {
return;
}
suspended_ = true;
dispatch_suspend(storage_->source.get());
}
dispatch_queue_t DispatchSource::Queue() const {
return storage_->queue.get();
}
} // namespace base::apple

View file

@ -0,0 +1,63 @@
// Copyright 2014 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_APPLE_DISPATCH_SOURCE_H_
#define BASE_APPLE_DISPATCH_SOURCE_H_
#include <dispatch/dispatch.h>
#include <memory>
#include "base/base_export.h"
namespace base::apple {
// This class encapsulates a dispatch source of type dispatch_source_type_t.
// When this object is destroyed, the source will be cancelled and it will wait
// for the source to stop executing work. The source can run on either a
// user-supplied queue, or it can create its own for the source.
class BASE_EXPORT DispatchSource {
public:
// Creates a new dispatch source for the |port| and schedules it on a new
// queue that will be created with |name|. When a Mach message is received,
// the |event_handler| will be called.
DispatchSource(const char* name, mach_port_t port, void (^event_handler)());
// Creates a new dispatch source with the same semantics as above, but rather
// than creating a new queue, it schedules the source on |queue|.
DispatchSource(dispatch_queue_t queue,
mach_port_t port,
void (^event_handler)());
// Create a dispatch source for a file descriptor.
// `type` should either be DISPATCH_SOURCE_TYPE_READ or
// DISPATCH_SOURCE_TYPE_WRITE.
DispatchSource(dispatch_queue_t queue,
int fd,
dispatch_source_type_t type,
void (^event_handler)());
DispatchSource(const DispatchSource&) = delete;
DispatchSource& operator=(const DispatchSource&) = delete;
// Cancels the source and waits for it to become fully cancelled before
// releasing the source.
~DispatchSource();
// Resumes the source. This must be called before any Mach messages will
// be received.
void Resume();
void Suspend();
dispatch_queue_t Queue() const;
private:
bool suspended_ = true;
struct Storage;
std::unique_ptr<Storage> storage_;
};
} // namespace base::apple
#endif // BASE_APPLE_DISPATCH_SOURCE_H_

View file

@ -1,66 +0,0 @@
// Copyright 2014 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/apple/dispatch_source_mach.h"
#include "base/apple/scoped_dispatch_object.h"
namespace base::apple {
struct DispatchSourceMach::Storage {
// The dispatch queue used to service the source_.
ScopedDispatchObject<dispatch_queue_t> queue;
// A MACH_RECV dispatch source.
ScopedDispatchObject<dispatch_source_t> source;
// Semaphore used to wait on the |source_|'s cancellation in the destructor.
ScopedDispatchObject<dispatch_semaphore_t> source_canceled;
};
DispatchSourceMach::DispatchSourceMach(const char* name,
mach_port_t port,
void (^event_handler)())
: DispatchSourceMach(dispatch_queue_create(name, DISPATCH_QUEUE_SERIAL),
port,
event_handler) {
// Since the queue was created above in the delegated constructor, and it was
// subsequently retained, release it here.
dispatch_release(storage_->queue.get());
}
DispatchSourceMach::DispatchSourceMach(dispatch_queue_t queue,
mach_port_t port,
void (^event_handler)())
: storage_(std::make_unique<Storage>()) {
storage_->queue.reset(queue, base::scoped_policy::RETAIN);
storage_->source.reset(dispatch_source_create(
DISPATCH_SOURCE_TYPE_MACH_RECV, port, 0, storage_->queue.get()));
storage_->source_canceled.reset(dispatch_semaphore_create(0));
dispatch_source_set_event_handler(storage_->source.get(), event_handler);
dispatch_source_set_cancel_handler(storage_->source.get(), ^{
dispatch_semaphore_signal(storage_->source_canceled.get());
});
}
DispatchSourceMach::~DispatchSourceMach() {
// Cancel the source and wait for the semaphore to be signaled. This will
// ensure the source managed by this class is not used after it is freed.
dispatch_source_cancel(storage_->source.get());
storage_->source.reset();
dispatch_semaphore_wait(storage_->source_canceled.get(),
DISPATCH_TIME_FOREVER);
}
void DispatchSourceMach::Resume() {
dispatch_resume(storage_->source.get());
}
dispatch_queue_t DispatchSourceMach::Queue() const {
return storage_->queue.get();
}
} // namespace base::apple

View file

@ -1,55 +0,0 @@
// Copyright 2014 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_APPLE_DISPATCH_SOURCE_MACH_H_
#define BASE_APPLE_DISPATCH_SOURCE_MACH_H_
#include <dispatch/dispatch.h>
#include <memory>
#include "base/base_export.h"
namespace base::apple {
// This class encapsulates a MACH_RECV dispatch source. When this object is
// destroyed, the source will be cancelled and it will wait for the source
// to stop executing work. The source can run on either a user-supplied queue,
// or it can create its own for the source.
class BASE_EXPORT DispatchSourceMach {
public:
// Creates a new dispatch source for the |port| and schedules it on a new
// queue that will be created with |name|. When a Mach message is received,
// the |event_handler| will be called.
DispatchSourceMach(const char* name,
mach_port_t port,
void (^event_handler)());
// Creates a new dispatch source with the same semantics as above, but rather
// than creating a new queue, it schedules the source on |queue|.
DispatchSourceMach(dispatch_queue_t queue,
mach_port_t port,
void (^event_handler)());
DispatchSourceMach(const DispatchSourceMach&) = delete;
DispatchSourceMach& operator=(const DispatchSourceMach&) = delete;
// Cancels the source and waits for it to become fully cancelled before
// releasing the source.
~DispatchSourceMach();
// Resumes the source. This must be called before any Mach messages will
// be received.
void Resume();
dispatch_queue_t Queue() const;
private:
struct Storage;
std::unique_ptr<Storage> storage_;
};
} // namespace base::apple
#endif // BASE_APPLE_DISPATCH_SOURCE_MACH_H_

View file

@ -10,6 +10,7 @@
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <vector>
#include "base/apple/bridging.h"
@ -22,7 +23,6 @@
#include "base/logging.h"
#include "base/numerics/checked_math.h"
#include "base/numerics/safe_conversions.h"
#include "base/ranges/algorithm.h"
#include "base/strings/string_util.h"
#include "base/strings/sys_string_conversions.h"
#include "build/branding_buildflags.h"
@ -234,7 +234,7 @@ FilePath GetInnermostAppBundlePath(const FilePath& exec_name) {
return FilePath();
}
auto app = ranges::find_if(
auto app = std::ranges::find_if(
Reversed(components), [](const std::string& component) -> bool {
return component.size() > kExtLength && EndsWith(component, kExt);
});

View file

@ -69,7 +69,7 @@ constexpr size_t kMaxInfoPlistDataSize = 18 * 1024;
#endif
// This limit is arbitrary and can be safely increased in the future.
constexpr size_t kMaximumRendezvousPorts = 5;
constexpr size_t kMaximumRendezvousPorts = 6;
enum MachRendezvousMsgId : mach_msg_id_t {
kMachRendezvousMsgIdRequest = 'mrzv',
@ -295,7 +295,7 @@ MachPortRendezvousServerIOS::MachPortRendezvousServerIOS(
DCHECK_LT(ports_.size(), kMaximumRendezvousPorts);
bool res = apple::CreateMachPort(&server_port_, &send_right_);
CHECK(res) << "Failed to create mach server port";
dispatch_source_ = std::make_unique<apple::DispatchSourceMach>(
dispatch_source_ = std::make_unique<apple::DispatchSource>(
"MachPortRendezvousServer", server_port_.get(), ^{
HandleRequest();
});
@ -409,7 +409,7 @@ MachPortRendezvousServerMac::MachPortRendezvousServerMac() {
apple::ScopedMachReceiveRight::Receiver(server_port_).get());
BOOTSTRAP_CHECK(kr == KERN_SUCCESS, kr)
<< "bootstrap_check_in " << bootstrap_name;
dispatch_source_ = std::make_unique<apple::DispatchSourceMach>(
dispatch_source_ = std::make_unique<apple::DispatchSource>(
bootstrap_name.c_str(), server_port_.get(), ^{
HandleRequest();
});

View file

@ -15,7 +15,7 @@
#include <string>
#include <vector>
#include "base/apple/dispatch_source_mach.h"
#include "base/apple/dispatch_source.h"
#include "base/apple/scoped_mach_port.h"
#include "base/base_export.h"
#include "base/containers/buffer_iterator.h"
@ -99,7 +99,7 @@ class BASE_EXPORT MachPortRendezvousServerBase {
apple::ScopedMachReceiveRight server_port_;
// Mach message dispatch source for |server_port_|.
std::unique_ptr<apple::DispatchSourceMach> dispatch_source_;
std::unique_ptr<apple::DispatchSource> dispatch_source_;
// Ask for the associated ports associated with `audit_token`.
// Return `std::nullopt` if the client is not authorized to

View file

@ -81,7 +81,10 @@ bool Base64Decode(std::string_view input,
// in-place, but it violates the API contract that `output` is only modified
// on success.
std::string input_without_whitespace;
RemoveChars(input, kInfraAsciiWhitespace, &input_without_whitespace);
RemoveChars(input,
std::string_view(std::begin(kInfraAsciiWhitespace),
std::end(kInfraAsciiWhitespace)),
&input_without_whitespace);
// This means that the required size to decode is at most what was needed
// above, which means `decode_buf` will fit the decoded bytes at its current
// size and we don't need to call `modp_b64_decode_len()` again.

View file

@ -238,6 +238,17 @@ bool PathProviderWin(int key, FilePath* result) {
return false;
}
break;
case base::DIR_ONE_DRIVE: {
base::win::ScopedCoMem<wchar_t> path_buf;
// FOLDERID_OneDrive points on the user OneDrive folder. The default path
// is %USERPROFILE%\OneDrive. It is formerly known as FOLDERID_SkyDrive.
if (FAILED(SHGetKnownFolderPath(FOLDERID_OneDrive, 0, NULL, &path_buf))) {
return false;
}
cur = FilePath(path_buf.get());
break;
}
default:
return false;
}

View file

@ -57,6 +57,7 @@ enum {
DIR_WINDOWS_FONTS, // Usually C:\Windows\Fonts.
DIR_SYSTEM_TEMP, // %SystemRoot%\SystemTemp or %ProgramFiles%;
// only for admin processes.
DIR_ONE_DRIVE, // The synced personal OneDrive directory.
PATH_WIN_END
};

View file

@ -5,7 +5,6 @@
#include "base/base_switches.h"
#include "build/build_config.h"
#include "build/chromeos_buildflags.h"
namespace switches {

View file

@ -8,7 +8,6 @@
#define BASE_BASE_SWITCHES_H_
#include "build/build_config.h"
#include "build/chromeos_buildflags.h"
namespace switches {

View file

@ -5,6 +5,7 @@
#ifndef BASE_CALLBACK_LIST_H_
#define BASE_CALLBACK_LIST_H_
#include <algorithm>
#include <list>
#include <memory>
#include <utility>
@ -15,7 +16,6 @@
#include "base/functional/bind.h"
#include "base/functional/callback.h"
#include "base/memory/weak_ptr.h"
#include "base/ranges/algorithm.h"
#include "base/types/is_instantiation.h"
// OVERVIEW:
@ -174,7 +174,7 @@ class CallbackListBase {
// Returns whether the list of registered callbacks is empty (from an external
// perspective -- meaning no remaining callbacks are live).
bool empty() const {
return ranges::all_of(
return std::ranges::all_of(
callbacks_, [](const auto& callback) { return callback.is_null(); });
}

View file

@ -2,6 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/390223051): Remove C-library calls to fix the errors.
#pragma allow_unsafe_libc_calls
#endif
#include "base/check.h"
#include <optional>
@ -164,7 +169,7 @@ class NotReachedLogMessage : public LogMessage {
class DCheckLogMessage : public LogMessage {
public:
DCheckLogMessage(const base::Location& location)
explicit DCheckLogMessage(const base::Location& location)
: LogMessage(location.file_name(),
location.line_number(),
LOGGING_DCHECK),

View file

@ -216,6 +216,9 @@ class BASE_EXPORT NotReachedNoreturnError : public CheckError {
// The weird ternary is to still generate an "is not contextually convertible to
// 'bool' when provided weird parameters (regardless of ANALYZER_ASSUME_TRUE's
// implementation). See base/check_nocompile.nc.
//
// The lambda is here to here permit the compiler to out-of-line much of the
// CHECK-failure path and optimize better for the fast path.
#define LOGGING_CHECK_FUNCTION_IMPL(check_stream, condition) \
switch (0) \
case 0: \
@ -223,7 +226,7 @@ class BASE_EXPORT NotReachedNoreturnError : public CheckError {
if (ANALYZER_ASSUME_TRUE((condition) ? true : false)) \
[[likely]]; \
else \
(check_stream)
[&]() { return (check_stream); }()
// A helper macro like LOGGING_CHECK_FUNCTION_IMPL above but discarding any
// log-stream parameters rather than evaluate them on failure.

View file

@ -2,6 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/390223051): Remove C-library calls to fix the errors.
#pragma allow_unsafe_libc_calls
#endif
#include "base/check_op.h"
#include <string.h>

Some files were not shown because too many files have changed in this diff Show more