Compare commits

...

No commits in common. "v133.0.6943.49-1" and "master" have entirely different histories.

2560 changed files with 76856 additions and 48581 deletions

View file

@ -1 +1 @@
133.0.6943.49
134.0.6998.39

View file

@ -90,7 +90,7 @@ no_check_targets = [
"//v8:v8_libplatform", # 2 errors
]
# These are the list of GN files that run exec_script. This whitelist exists
# These are the list of GN files that run exec_script. This allowlist exists
# to force additional review for new uses of exec_script, which is strongly
# discouraged.
#
@ -145,11 +145,11 @@ no_check_targets = [
# this situation much easier to create. if the build always lists the
# files and passes them to a script, it will always be correct.
exec_script_whitelist =
build_dotfile_settings.exec_script_whitelist +
exec_script_allowlist =
build_dotfile_settings.exec_script_allowlist +
angle_dotfile_settings.exec_script_whitelist +
[
# Whitelist entries for //build should go into
# Allowlist entries for //build should go into
# //build/dotfile_settings.gni instead, so that they can be shared
# with other repos. The entries in this list should be only for files
# in the Chromium repo outside of //build.

View file

@ -118,6 +118,7 @@ Andreas Papacharalampous <andreas@apap04.com>
Andrei Borza <andrei.borza@gmail.com>
Andrei Parvu <andrei.prv@gmail.com>
Andrei Parvu <parvu@adobe.com>
Andrei Volykhin <andrei.volykhin@gmail.com>
Andres Salomon <dilinger@queued.net>
Andreu Botella <andreu@andreubotella.com>
Andrew Boyarshin <andrew.boyarshin@gmail.com>
@ -312,6 +313,7 @@ Daniel Lockyer <thisisdaniellockyer@gmail.com>
Daniel Nishi <dhnishi@gmail.com>
Daniel Platz <daplatz@googlemail.com>
Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
Daniel Richard G. <iskunk@gmail.com>
Daniel Shaulov <dshaulov@ptc.com>
Daniel Trebbien <dtrebbien@gmail.com>
Daniel Waxweiler <daniel.waxweiler@gmail.com>
@ -329,6 +331,7 @@ Darshini KN <kn.darshini@samsung.com>
Dave Vandyke <kzar@kzar.co.uk>
David Benjamin <davidben@mit.edu>
David Brown <develop.david.brown@gmail.com>
David Cernoch <dcernoch@uplandsoftware.com>
David Davidovic <david@davidovic.io>
David Erceg <erceg.david@gmail.com>
David Faden <dfaden@gmail.com>
@ -938,6 +941,7 @@ Martin Persson <mnpn03@gmail.com>
Martin Rogalla <martin@martinrogalla.com>
Martina Kollarova <martina.kollarova@intel.com>
Martino Fontana <tinozzo123@gmail.com>
Marvin Giessing <marvin.giessing@gmail.com>
Masahiro Yado <yado.masa@gmail.com>
Masaru Nishida <msr.i386@gmail.com>
Masayuki Wakizaka <mwakizaka0108@gmail.com>
@ -977,6 +981,7 @@ Md Jobed Hossain <jobed.h@samsung.com>
Md Raiyan bin Sayeed <mrbsayee@uwaterloo.ca>
Md. Sadiqul Amin <sadiqul.amin@samsung.com>
Md Sami Uddin <md.sami@samsung.com>
Mego Tan <tannal2409@gmail.com>
Merajul Arefin <merajularefin@gmail.com>
Micha Hanselmann <micha.hanselmann@gmail.com>
Michael Cirone <mikecirone@gmail.com>
@ -1004,6 +1009,7 @@ Milko Leporis <milko.leporis@imgtec.com>
Milton Chiang <milton.chiang@mediatek.com>
Milutin Smiljanic <msmiljanic.gm@gmail.com>
Minchul Kang <tegongkang@gmail.com>
Ming Lei <minggeorgelei@gmail.com>
Mingeun Park <mindal99546@gmail.com>
Minggang Wang <minggang.wang@intel.com>
Mingmin Xie <melvinxie@gmail.com>
@ -1461,6 +1467,7 @@ Tom Harwood <tfh@skip.org>
Tomas Popela <tomas.popela@gmail.com>
Tomasz Edward Posłuszny <tom@devpeer.net>
Tony Shen <legendmastertony@gmail.com>
Topi Lassila <tolassila@gmail.com>
Torsten Kurbad <google@tk-webart.de>
Toshihito Kikuchi <leamovret@gmail.com>
Toshiaki Tanaka <zokutyou2@gmail.com>
@ -1512,6 +1519,7 @@ Wojciech Bielawski <wojciech.bielawski@gmail.com>
Wang Chen <wangchen20@iscas.ac.cn>
Wang Chen <unicornxw@gmail.com>
Wang Weiwei <wangww@dingdao.com>
Wang Zirui <kingzirvi@gmail.com>
Wangyang Dai <jludwy@gmail.com>
Wanming Lin <wanming.lin@intel.com>
Wei Li <wei.c.li@intel.com>
@ -1646,6 +1654,7 @@ Zsolt Borbely <zsborbely.u-szeged@partner.samsung.com>
迷渡 <justjavac@gmail.com>
郑苏波 (Super Zheng) <superzheng@tencent.com>
一丝 (Yisi) <yiorsi@gmail.com>
林训杰 (XunJie Lin) <wick.linxunjie@gmail.com>
# Please DO NOT APPEND here. See comments at the top of the file.
# END individuals section.

686
src/DEPS

File diff suppressed because it is too large Load diff

View file

@ -531,6 +531,7 @@ component("base") {
"profiler/periodic_sampling_scheduler.h",
"profiler/profile_builder.h",
"profiler/register_context.h",
"profiler/register_context_registers.h",
"profiler/sample_metadata.cc",
"profiler/sample_metadata.h",
"profiler/sampling_profiler_thread_token.cc",
@ -549,15 +550,13 @@ component("base") {
"profiler/stack_unwind_data.h",
"profiler/suspendable_thread_delegate.h",
"profiler/thread_delegate.h",
"profiler/thread_group_profiler.cc",
"profiler/thread_group_profiler.h",
"profiler/thread_group_profiler_client.h",
"profiler/unwinder.cc",
"profiler/unwinder.h",
"rand_util.cc",
"rand_util.h",
"ranges/algorithm.h",
"ranges/from_range.h",
"ranges/functional.h",
"ranges/ranges.h",
"run_loop.cc",
"run_loop.h",
"sampling_heap_profiler/lock_free_address_hash_set.cc",
@ -691,7 +690,6 @@ component("base") {
"task/sequence_manager/enqueue_order_generator.h",
"task/sequence_manager/fence.cc",
"task/sequence_manager/fence.h",
"task/sequence_manager/lazily_deallocated_deque.h",
"task/sequence_manager/sequence_manager.cc",
"task/sequence_manager/sequence_manager.h",
"task/sequence_manager/sequence_manager_impl.cc",
@ -879,6 +877,7 @@ component("base") {
"traits_bag.h",
"tuple.h",
"types/always_false.h",
"types/cxx23_from_range.h",
"types/cxx23_is_scoped_enum.h",
"types/cxx23_to_underlying.h",
"types/expected.h",
@ -1200,6 +1199,8 @@ component("base") {
"android/jni_array.h",
"android/jni_bytebuffer.cc",
"android/jni_bytebuffer.h",
"android/jni_callback.cc",
"android/jni_callback.h",
"android/jni_registrar.cc",
"android/jni_registrar.h",
"android/jni_string.cc",
@ -1989,7 +1990,7 @@ component("base") {
"process/port_provider_mac.cc",
"process/port_provider_mac.h",
"process/process_handle_mac.cc",
"process/process_info_mac.cc",
"process/process_info_mac.mm",
"process/process_iterator_mac.cc",
"process/process_mac.cc",
"process/process_metrics_mac.cc",

View file

@ -20,8 +20,7 @@
#include "partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h"
#include "partition_alloc/thread_cache.h"
namespace base {
namespace features {
namespace base::features {
namespace {
@ -48,7 +47,8 @@ constexpr FeatureParam<UnretainedDanglingPtrMode>::Option
{UnretainedDanglingPtrMode::kDumpWithoutCrashing,
"dump_without_crashing"},
};
const base::FeatureParam<UnretainedDanglingPtrMode>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam = {
&kPartitionAllocUnretainedDanglingPtr,
"mode",
@ -73,7 +73,8 @@ constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
{DanglingPtrMode::kCrash, "crash"},
{DanglingPtrMode::kLogOnly, "log_only"},
};
const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
&kPartitionAllocDanglingPtr,
"mode",
DanglingPtrMode::kCrash,
@ -83,7 +84,8 @@ constexpr FeatureParam<DanglingPtrType>::Option kDanglingPtrTypeOption[] = {
{DanglingPtrType::kAll, "all"},
{DanglingPtrType::kCrossTask, "cross_task"},
};
const base::FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
&kPartitionAllocDanglingPtr,
"type",
DanglingPtrType::kAll,
@ -128,7 +130,8 @@ constexpr FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>::Option
kNonRendererStr},
{PartitionAllocWithAdvancedChecksEnabledProcesses::kAllProcesses,
kAllProcessesStr}};
const base::FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>
kPartitionAllocWithAdvancedChecksEnabledProcessesParam{
&kPartitionAllocWithAdvancedChecks, kPAFeatureEnabledProcessesStr,
PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserOnly,
@ -138,15 +141,17 @@ BASE_FEATURE(kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantine",
FEATURE_DISABLED_BY_DEFAULT);
// Scheduler Loop Quarantine's per-branch capacity in bytes.
const base::FeatureParam<int>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<int>
kPartitionAllocSchedulerLoopQuarantineBranchCapacity{
&kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantineBranchCapacity", 0};
// Scheduler Loop Quarantine's capacity for the UI thread in bytes.
const base::FeatureParam<int>
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity{
&kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantineBrowserUICapacity", 0};
BASE_FEATURE_PARAM(int,
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity,
&kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantineBrowserUICapacity",
0);
BASE_FEATURE(kPartitionAllocZappingByFreeFlags,
"PartitionAllocZappingByFreeFlags",
@ -155,6 +160,10 @@ BASE_FEATURE(kPartitionAllocZappingByFreeFlags,
BASE_FEATURE(kPartitionAllocEventuallyZeroFreedMemory,
"PartitionAllocEventuallyZeroFreedMemory",
FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kPartitionAllocFewerMemoryRegions,
"PartitionAllocFewerMemoryRegions",
FEATURE_DISABLED_BY_DEFAULT);
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_FEATURE(kPartitionAllocBackupRefPtr,
@ -174,25 +183,30 @@ constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
{BackupRefPtrEnabledProcesses::kNonRenderer, kNonRendererStr},
{BackupRefPtrEnabledProcesses::kAllProcesses, kAllProcessesStr}};
const base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam{
&kPartitionAllocBackupRefPtr, kPAFeatureEnabledProcessesStr,
BASE_FEATURE_ENUM_PARAM(BackupRefPtrEnabledProcesses,
kBackupRefPtrEnabledProcessesParam,
&kPartitionAllocBackupRefPtr,
kPAFeatureEnabledProcessesStr,
#if PA_BUILDFLAG(IS_MAC) && PA_BUILDFLAG(PA_ARCH_CPU_ARM64)
BackupRefPtrEnabledProcesses::kNonRenderer,
BackupRefPtrEnabledProcesses::kNonRenderer,
#else
BackupRefPtrEnabledProcesses::kAllProcesses,
BackupRefPtrEnabledProcesses::kAllProcesses,
#endif
&kBackupRefPtrEnabledProcessesOptions};
&kBackupRefPtrEnabledProcessesOptions);
constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
{BackupRefPtrMode::kDisabled, "disabled"},
{BackupRefPtrMode::kEnabled, "enabled"},
};
const base::FeatureParam<BackupRefPtrMode> kBackupRefPtrModeParam{
&kPartitionAllocBackupRefPtr, "brp-mode", BackupRefPtrMode::kEnabled,
&kBackupRefPtrModeOptions};
const base::FeatureParam<int> kBackupRefPtrExtraExtrasSizeParam{
BASE_FEATURE_ENUM_PARAM(BackupRefPtrMode,
kBackupRefPtrModeParam,
&kPartitionAllocBackupRefPtr,
"brp-mode",
BackupRefPtrMode::kEnabled,
&kBackupRefPtrModeOptions);
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<int> kBackupRefPtrExtraExtrasSizeParam{
&kPartitionAllocBackupRefPtr, "brp-extra-extras-size", 0};
BASE_FEATURE(kPartitionAllocMemoryTagging,
@ -208,7 +222,8 @@ constexpr FeatureParam<MemtagMode>::Option kMemtagModeOptions[] = {
{MemtagMode::kSync, "sync"},
{MemtagMode::kAsync, "async"}};
const base::FeatureParam<MemtagMode> kMemtagModeParam{
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<MemtagMode> kMemtagModeParam{
&kPartitionAllocMemoryTagging, "memtag-mode",
#if PA_BUILDFLAG(USE_FULL_MTE)
MemtagMode::kSync,
@ -222,7 +237,8 @@ constexpr FeatureParam<RetagMode>::Option kRetagModeOptions[] = {
{RetagMode::kRandom, "random"},
};
const base::FeatureParam<RetagMode> kRetagModeParam{
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<RetagMode> kRetagModeParam{
&kPartitionAllocMemoryTagging, "retag-mode", RetagMode::kIncrement,
&kRetagModeOptions};
@ -232,7 +248,8 @@ constexpr FeatureParam<MemoryTaggingEnabledProcesses>::Option
{MemoryTaggingEnabledProcesses::kNonRenderer, kNonRendererStr},
{MemoryTaggingEnabledProcesses::kAllProcesses, kAllProcessesStr}};
const base::FeatureParam<MemoryTaggingEnabledProcesses>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<MemoryTaggingEnabledProcesses>
kMemoryTaggingEnabledProcessesParam{
&kPartitionAllocMemoryTagging, kPAFeatureEnabledProcessesStr,
#if PA_BUILDFLAG(USE_FULL_MTE)
@ -257,13 +274,16 @@ BASE_FEATURE(kPartitionAllocPermissiveMte,
#endif
);
const base::FeatureParam<bool> kBackupRefPtrAsanEnableDereferenceCheckParam{
// Note: Do not use the prepared macro to implement following FeatureParams
// as of no need for a local cache.
constinit const FeatureParam<bool> kBackupRefPtrAsanEnableDereferenceCheckParam{
&kPartitionAllocBackupRefPtr, "asan-enable-dereference-check", true};
const base::FeatureParam<bool> kBackupRefPtrAsanEnableExtractionCheckParam{
constinit const FeatureParam<bool> kBackupRefPtrAsanEnableExtractionCheckParam{
&kPartitionAllocBackupRefPtr, "asan-enable-extraction-check",
false}; // Not much noise at the moment to enable by default.
const base::FeatureParam<bool> kBackupRefPtrAsanEnableInstantiationCheckParam{
&kPartitionAllocBackupRefPtr, "asan-enable-instantiation-check", true};
constinit const FeatureParam<bool>
kBackupRefPtrAsanEnableInstantiationCheckParam{
&kPartitionAllocBackupRefPtr, "asan-enable-instantiation-check", true};
// If enabled, switches the bucket distribution to a denser one.
//
@ -277,12 +297,13 @@ BASE_FEATURE(kPartitionAllocUseDenserDistribution,
FEATURE_ENABLED_BY_DEFAULT
#endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
);
const base::FeatureParam<BucketDistributionMode>::Option
const FeatureParam<BucketDistributionMode>::Option
kPartitionAllocBucketDistributionOption[] = {
{BucketDistributionMode::kDefault, "default"},
{BucketDistributionMode::kDenser, "denser"},
};
const base::FeatureParam<BucketDistributionMode>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<BucketDistributionMode>
kPartitionAllocBucketDistributionParam{
&kPartitionAllocUseDenserDistribution, "mode",
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
@ -295,10 +316,12 @@ const base::FeatureParam<BucketDistributionMode>
BASE_FEATURE(kPartitionAllocMemoryReclaimer,
"PartitionAllocMemoryReclaimer",
FEATURE_ENABLED_BY_DEFAULT);
const base::FeatureParam<TimeDelta> kPartitionAllocMemoryReclaimerInterval = {
&kPartitionAllocMemoryReclaimer, "interval",
TimeDelta(), // Defaults to zero.
};
BASE_FEATURE_PARAM(TimeDelta,
kPartitionAllocMemoryReclaimerInterval,
&kPartitionAllocMemoryReclaimer,
"interval",
TimeDelta() // Defaults to zero.
);
// Configures whether we set a lower limit for renderers that do not have a main
// frame, similar to the limit that is already done for backgrounded renderers.
@ -311,16 +334,17 @@ BASE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers,
BASE_FEATURE(kPartitionAllocStraightenLargerSlotSpanFreeLists,
"PartitionAllocStraightenLargerSlotSpanFreeLists",
FEATURE_ENABLED_BY_DEFAULT);
const base::FeatureParam<
partition_alloc::StraightenLargerSlotSpanFreeListsMode>::Option
kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption[] = {
const FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>::
Option kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption[] = {
{partition_alloc::StraightenLargerSlotSpanFreeListsMode::
kOnlyWhenUnprovisioning,
"only-when-unprovisioning"},
{partition_alloc::StraightenLargerSlotSpanFreeListsMode::kAlways,
"always"},
};
const base::FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<
partition_alloc::StraightenLargerSlotSpanFreeListsMode>
kPartitionAllocStraightenLargerSlotSpanFreeListsMode = {
&kPartitionAllocStraightenLargerSlotSpanFreeLists,
"mode",
@ -353,9 +377,11 @@ BASE_FEATURE(kPageAllocatorRetryOnCommitFailure,
// The feature: kPartialLowEndModeOnMidRangeDevices is defined in
// //base/features.cc. Since the following feature param is related to
// PartitionAlloc, define the param here.
const FeatureParam<bool> kPartialLowEndModeExcludePartitionAllocSupport{
&kPartialLowEndModeOnMidRangeDevices, "exclude-partition-alloc-support",
false};
BASE_FEATURE_PARAM(bool,
kPartialLowEndModeExcludePartitionAllocSupport,
&kPartialLowEndModeOnMidRangeDevices,
"exclude-partition-alloc-support",
false);
#endif
BASE_FEATURE(kEnableConfigurableThreadCacheMultiplier,
@ -373,19 +399,19 @@ MIRACLE_PARAMETER_FOR_DOUBLE(GetThreadCacheMultiplierForAndroid,
1.)
constexpr partition_alloc::internal::base::TimeDelta ToPartitionAllocTimeDelta(
base::TimeDelta time_delta) {
TimeDelta time_delta) {
return partition_alloc::internal::base::Microseconds(
time_delta.InMicroseconds());
}
constexpr base::TimeDelta FromPartitionAllocTimeDelta(
constexpr TimeDelta FromPartitionAllocTimeDelta(
partition_alloc::internal::base::TimeDelta time_delta) {
return base::Microseconds(time_delta.InMicroseconds());
return Microseconds(time_delta.InMicroseconds());
}
BASE_FEATURE(kEnableConfigurableThreadCachePurgeInterval,
"EnableConfigurableThreadCachePurgeInterval",
base::FEATURE_DISABLED_BY_DEFAULT);
FEATURE_DISABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_TIME_DELTA(
GetThreadCacheMinPurgeIntervalValue,
@ -422,7 +448,7 @@ GetThreadCacheDefaultPurgeInterval() {
BASE_FEATURE(kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
"EnableConfigurableThreadCacheMinCachedMemoryForPurging",
base::FEATURE_DISABLED_BY_DEFAULT);
FEATURE_DISABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_INT(
GetThreadCacheMinCachedMemoryForPurgingBytes,
@ -445,7 +471,7 @@ BASE_FEATURE(kPartitionAllocDisableBRPInBufferPartition,
#if PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
BASE_FEATURE(kUsePoolOffsetFreelists,
"PartitionAllocUsePoolOffsetFreelists",
base::FEATURE_ENABLED_BY_DEFAULT);
FEATURE_ENABLED_BY_DEFAULT);
#endif
BASE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground,
@ -458,12 +484,12 @@ BASE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground,
BASE_FEATURE(kPartitionAllocUseSmallSingleSlotSpans,
"PartitionAllocUseSmallSingleSlotSpans",
base::FEATURE_ENABLED_BY_DEFAULT);
FEATURE_ENABLED_BY_DEFAULT);
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
BASE_FEATURE(kPartitionAllocShadowMetadata,
"PartitionAllocShadowMetadata",
base::FEATURE_DISABLED_BY_DEFAULT);
FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<ShadowMetadataEnabledProcesses>::Option
kShadowMetadataEnabledProcessesOptions[] = {
@ -471,12 +497,12 @@ constexpr FeatureParam<ShadowMetadataEnabledProcesses>::Option
{ShadowMetadataEnabledProcesses::kAllChildProcesses,
kAllChildProcessesStr}};
const base::FeatureParam<ShadowMetadataEnabledProcesses>
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<ShadowMetadataEnabledProcesses>
kShadowMetadataEnabledProcessesParam{
&kPartitionAllocShadowMetadata, kPAFeatureEnabledProcessesStr,
ShadowMetadataEnabledProcesses::kRendererOnly,
&kShadowMetadataEnabledProcessesOptions};
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
} // namespace features
} // namespace base
} // namespace base::features

View file

@ -15,8 +15,7 @@
#include "partition_alloc/partition_alloc_base/time/time.h"
#include "partition_alloc/partition_root.h"
namespace base {
namespace features {
namespace base::features {
namespace internal {
@ -37,13 +36,13 @@ enum class PAFeatureEnabledProcesses {
} // namespace internal
extern const BASE_EXPORT Feature kPartitionAllocUnretainedDanglingPtr;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUnretainedDanglingPtr);
enum class UnretainedDanglingPtrMode {
kCrash,
kDumpWithoutCrashing,
};
extern const BASE_EXPORT base::FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(UnretainedDanglingPtrMode,
kUnretainedDanglingPtrModeParam);
// See /docs/dangling_ptr.md
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr);
@ -62,8 +61,7 @@ enum class DanglingPtrMode {
// Note: This will be extended with a single shot DumpWithoutCrashing.
};
extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode>
kDanglingPtrModeParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(DanglingPtrMode, kDanglingPtrModeParam);
enum class DanglingPtrType {
// Act on any dangling raw_ptr released after being freed.
kAll, // (default)
@ -74,8 +72,7 @@ enum class DanglingPtrType {
// Note: This will be extended with LongLived
};
extern const BASE_EXPORT base::FeatureParam<DanglingPtrType>
kDanglingPtrTypeParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(DanglingPtrType, kDanglingPtrTypeParam);
using PartitionAllocWithAdvancedChecksEnabledProcesses =
internal::PAFeatureEnabledProcesses;
@ -88,17 +85,19 @@ BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid();
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocWithAdvancedChecks);
extern const BASE_EXPORT
base::FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>
kPartitionAllocWithAdvancedChecksEnabledProcessesParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
PartitionAllocWithAdvancedChecksEnabledProcesses,
kPartitionAllocWithAdvancedChecksEnabledProcessesParam);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSchedulerLoopQuarantine);
// Scheduler Loop Quarantine's per-thread capacity in bytes.
extern const BASE_EXPORT base::FeatureParam<int>
kPartitionAllocSchedulerLoopQuarantineBranchCapacity;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
int,
kPartitionAllocSchedulerLoopQuarantineBranchCapacity);
// Scheduler Loop Quarantine's capacity for the UI thread in bytes.
// TODO(https://crbug.com/387470567): Support more thread types.
extern const BASE_EXPORT base::FeatureParam<int>
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
int,
kPartitionAllocSchedulerLoopQuarantineBrowserUICapacity);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocZappingByFreeFlags);
@ -106,6 +105,11 @@ BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocZappingByFreeFlags);
// security guarantee, but to increase the compression ratio of PartitionAlloc's
// fragmented super pages.
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocEventuallyZeroFreedMemory);
// Whether to make PartitionAlloc use fewer memory regions. This matters on
// Linux-based systems, where there is a per-process limit that we hit in some
// cases. See the comment in PartitionBucket::SlotSpanCOmmitedSize() for detail.
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocFewerMemoryRegions);
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
using BackupRefPtrEnabledProcesses = internal::PAFeatureEnabledProcesses;
@ -143,41 +147,44 @@ enum class BucketDistributionMode : uint8_t {
};
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtr);
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam;
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrMode>
kBackupRefPtrModeParam;
extern const BASE_EXPORT base::FeatureParam<int>
kBackupRefPtrExtraExtrasSizeParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BackupRefPtrEnabledProcesses,
kBackupRefPtrEnabledProcessesParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BackupRefPtrMode,
kBackupRefPtrModeParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(int,
kBackupRefPtrExtraExtrasSizeParam);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryTagging);
extern const BASE_EXPORT base::FeatureParam<MemtagMode> kMemtagModeParam;
extern const BASE_EXPORT base::FeatureParam<RetagMode> kRetagModeParam;
extern const BASE_EXPORT base::FeatureParam<MemoryTaggingEnabledProcesses>
kMemoryTaggingEnabledProcessesParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(MemtagMode, kMemtagModeParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(RetagMode, kRetagModeParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(MemoryTaggingEnabledProcesses,
kMemoryTaggingEnabledProcessesParam);
// Kill switch for memory tagging. Skips any code related to memory tagging when
// enabled.
BASE_EXPORT BASE_DECLARE_FEATURE(kKillPartitionAllocMemoryTagging);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte);
extern const BASE_EXPORT base::FeatureParam<bool>
kBackupRefPtrAsanEnableDereferenceCheckParam;
extern const BASE_EXPORT base::FeatureParam<bool>
kBackupRefPtrAsanEnableExtractionCheckParam;
extern const BASE_EXPORT base::FeatureParam<bool>
kBackupRefPtrAsanEnableInstantiationCheckParam;
extern const BASE_EXPORT base::FeatureParam<BucketDistributionMode>
kPartitionAllocBucketDistributionParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
bool,
kBackupRefPtrAsanEnableDereferenceCheckParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
bool,
kBackupRefPtrAsanEnableExtractionCheckParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
bool,
kBackupRefPtrAsanEnableInstantiationCheckParam);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(BucketDistributionMode,
kPartitionAllocBucketDistributionParam);
BASE_EXPORT BASE_DECLARE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseDenserDistribution);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryReclaimer);
extern const BASE_EXPORT base::FeatureParam<TimeDelta>
kPartitionAllocMemoryReclaimerInterval;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(TimeDelta,
kPartitionAllocMemoryReclaimerInterval);
BASE_EXPORT BASE_DECLARE_FEATURE(
kPartitionAllocStraightenLargerSlotSpanFreeLists);
extern const BASE_EXPORT
base::FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>
kPartitionAllocStraightenLargerSlotSpanFreeListsMode;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
partition_alloc::StraightenLargerSlotSpanFreeListsMode,
kPartitionAllocStraightenLargerSlotSpanFreeListsMode);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortSmallerSlotSpanFreeLists);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortActiveSlotSpans);
@ -186,8 +193,9 @@ BASE_EXPORT BASE_DECLARE_FEATURE(kPageAllocatorRetryOnCommitFailure);
#endif
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
extern const base::FeatureParam<bool>
kPartialLowEndModeExcludePartitionAllocSupport;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
bool,
kPartialLowEndModeExcludePartitionAllocSupport);
#endif
BASE_EXPORT BASE_DECLARE_FEATURE(kEnableConfigurableThreadCacheMultiplier);
@ -229,11 +237,10 @@ BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseSmallSingleSlotSpans);
using ShadowMetadataEnabledProcesses = internal::PAFeatureEnabledProcesses;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocShadowMetadata);
extern const BASE_EXPORT base::FeatureParam<ShadowMetadataEnabledProcesses>
kShadowMetadataEnabledProcessesParam;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(ShadowMetadataEnabledProcesses,
kShadowMetadataEnabledProcessesParam);
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
} // namespace features
} // namespace base
} // namespace base::features
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_

View file

@ -4,6 +4,7 @@
#include "base/allocator/partition_alloc_support.h"
#include <algorithm>
#include <array>
#include <cinttypes>
#include <cstdint>
@ -31,7 +32,6 @@
#include "base/metrics/histogram_macros.h"
#include "base/no_destructor.h"
#include "base/pending_task.h"
#include "base/ranges/algorithm.h"
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
#include "base/system/sys_info.h"
@ -465,7 +465,7 @@ std::string ExtractDanglingPtrSignature(std::string stacktrace) {
size_t caller_index = 0;
for (size_t i = 0; i < lines.size(); ++i) {
for (const auto& patterns : callee_patterns) {
if (ranges::all_of(patterns, [&](std::string_view pattern) {
if (std::ranges::all_of(patterns, [&](std::string_view pattern) {
return lines[i].find(pattern) != std::string_view::npos;
})) {
caller_index = i + 1;
@ -667,7 +667,7 @@ void CheckDanglingRawPtrBufferEmpty() {
std::vector<std::array<const void*, 32>> stack_traces =
internal::InstanceTracer::GetStackTracesForDanglingRefs(entry->id);
for (const auto& raw_stack_trace : stack_traces) {
CHECK(ranges::is_partitioned(raw_stack_trace, is_frame_ptr_not_null))
CHECK(std::ranges::is_partitioned(raw_stack_trace, is_frame_ptr_not_null))
<< "`raw_stack_trace` is expected to be partitioned: non-null values "
"at the begining followed by `nullptr`s.";
LOG(ERROR) << "Dangling reference from:\n";
@ -675,8 +675,8 @@ void CheckDanglingRawPtrBufferEmpty() {
// This call truncates the `nullptr` tail of the stack
// trace (see the `is_partitioned` CHECK above).
span(raw_stack_trace.begin(),
ranges::partition_point(raw_stack_trace,
is_frame_ptr_not_null)))
std::ranges::partition_point(
raw_stack_trace, is_frame_ptr_not_null)))
<< "\n";
}
#else
@ -880,43 +880,28 @@ PartitionAllocSupport::GetBrpConfiguration(const std::string& process_type) {
// TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
CHECK(base::FeatureList::GetInstance());
bool process_affected_by_brp_flag = false;
#if (PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
!PA_BUILDFLAG(FORCE_DISABLE_BACKUP_REF_PTR_FEATURE)) || \
PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocBackupRefPtr)) {
// No specified process type means this is the Browser process.
process_affected_by_brp_flag = ShouldEnableFeatureOnProcess(
base::features::kBackupRefPtrEnabledProcessesParam.Get(), process_type);
}
#endif // (PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)&&
// !PA_BUILDFLAG(FORCE_DISABLE_BACKUP_REF_PTR_FEATURE)) ||
// PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
const bool enable_brp =
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
// kDisabled is equivalent to !IsEnabled(kPartitionAllocBackupRefPtr).
process_affected_by_brp_flag &&
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
!PA_BUILDFLAG(FORCE_DISABLE_BACKUP_REF_PTR_FEATURE)
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocBackupRefPtr) &&
base::features::kBackupRefPtrModeParam.Get() !=
base::features::BackupRefPtrMode::kDisabled;
#else
false;
base::features::BackupRefPtrMode::kDisabled &&
ShouldEnableFeatureOnProcess(
base::features::kBackupRefPtrEnabledProcessesParam.Get(),
process_type)) {
return {
.enable_brp = true,
.extra_extras_size = static_cast<size_t>(
base::features::kBackupRefPtrExtraExtrasSizeParam.Get()),
};
}
#endif
size_t extra_extras_size = 0;
if (enable_brp) {
extra_extras_size = static_cast<size_t>(
base::features::kBackupRefPtrExtraExtrasSizeParam.Get());
}
return {
enable_brp,
process_affected_by_brp_flag,
extra_extras_size,
.enable_brp = false,
.extra_extras_size = 0,
};
}
@ -1019,8 +1004,15 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
[[maybe_unused]] BrpConfiguration brp_config =
GetBrpConfiguration(process_type);
#if PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
if (brp_config.process_affected_by_brp_flag) {
// Configure ASAN hooks to report the `MiraclePtr status`. This is enabled
// only if BackupRefPtr is normally enabled in the current process for the
// current platform. Note that CastOS and iOS aren't protected by BackupRefPtr
// a the moment, so they are excluded.
#if PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && !PA_BUILDFLAG(IS_CASTOS) && \
!PA_BUILDFLAG(IS_IOS)
if (ShouldEnableFeatureOnProcess(
base::features::kBackupRefPtrEnabledProcessesParam.Get(),
process_type)) {
base::RawPtrAsanService::GetInstance().Configure(
base::EnableDereferenceCheck(
base::features::kBackupRefPtrAsanEnableDereferenceCheckParam.Get()),
@ -1059,6 +1051,8 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
base::features::kPartitionAllocZappingByFreeFlags);
const bool eventually_zero_freed_memory = base::FeatureList::IsEnabled(
base::features::kPartitionAllocEventuallyZeroFreedMemory);
const bool fewer_memory_regions = base::FeatureList::IsEnabled(
base::features::kPartitionAllocFewerMemoryRegions);
#if PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
const bool use_pool_offset_freelists =
@ -1168,6 +1162,7 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
scheduler_loop_quarantine_branch_capacity_in_bytes,
allocator_shim::ZappingByFreeFlags(zapping_by_free_flags),
allocator_shim::EventuallyZeroFreedMemory(eventually_zero_freed_memory),
allocator_shim::FewerMemoryRegions(fewer_memory_regions),
allocator_shim::UsePoolOffsetFreelists(use_pool_offset_freelists),
use_small_single_slot_spans);

View file

@ -49,7 +49,6 @@ class BASE_EXPORT PartitionAllocSupport {
public:
struct BrpConfiguration {
bool enable_brp = false;
bool process_affected_by_brp_flag = false;
// TODO(https://crbug.com/371135823): Remove after the investigation.
size_t extra_extras_size = 0;

View file

@ -230,3 +230,20 @@ def CheckCpp17CompatibleKeywords(input_api, output_api):
'%s:%d\nPartitionAlloc disallows C++20 keywords: %s'
% (f.LocalPath(), line_number + 1, keyword)))
return errors
# Check `NDEBUG` is not used inside partition_alloc. We prefer to use the
# buildflags `#if PA_BUILDFLAG(IS_DEBUG)` instead.
def CheckNoNDebug(input_api, output_api):
sources = lambda affected_file: input_api.FilterSourceFile(
affected_file,
files_to_skip=[],
files_to_check=[_SOURCE_FILE_PATTERN])
errors = []
for f in input_api.AffectedSourceFiles(sources):
for line_number, line in f.ChangedContents():
if 'NDEBUG' in line:
errors.append(output_api.PresubmitError('%s:%d\nPartitionAlloc'
% (f.LocalPath(), line_number + 1)
+ 'disallows NDEBUG, use PA_BUILDFLAG(IS_DEBUG) instead'))
return errors

View file

@ -4,6 +4,56 @@
import("//build_overrides/partition_alloc.gni")
# -----------------------------------------------------------------------------
# Note on the use of `xxx_default` variable in partition_alloc.
#
# GN provides default_args() instruction. It is meant to be used by embedders,
# to override the default args declared by the embeddees (e.g. partition_alloc).
# This is the intended way to use GN. It properly interacts with the args.gn
# user's file.
#
# Unfortunately, Chrome and others embedders aren't using it. Instead, they
# expect embeddees to import global '.gni' file from the embedder, e.g.
# `//build_overrides/partition_alloc.gni`. This file sets some `xxx_default`
# variable that will be transferred to the declared args. For instance
# a library would use:
# ```
# import("//build_overrides/library.gni")
# declare_args() {
# xxx = xxx_default
# }
# ```
#
# We don't really want to break embedders when introducing new args. Ideally,
# We would have liked to have defaults for default variables. That would be
# a recursive problem. To resolve it, we sometimes use the `defined(...)`
# instruction to check if the embedder has defined the `xxx_default` variable or
# not.
#
# In general, we should aim to support the embedders that are using GN normally,
# and avoid requiring them to define `xxx_default` in the `//build_overrides`
# -----------------------------------------------------------------------------
# Some embedders uses `is_debug`, it can be used to set the default value of
# `partition_alloc_is_debug_default`.
if (!defined(partition_alloc_is_debug_default)) {
if (defined(is_debug)) {
partition_alloc_is_debug_default = is_debug
} else {
partition_alloc_is_debug_default = false
}
}
# Some embedders uses `dcheck_always_on`, it can be used to set the default
# value of `partition_alloc_dcheck_always_on_default`.
if (!defined(partition_alloc_dcheck_always_on_default)) {
if (defined(dcheck_always_on)) {
partition_alloc_dcheck_always_on_default = dcheck_always_on
} else {
partition_alloc_dcheck_always_on_default = false
}
}
# PartitionAlloc have limited support for MSVC's cl.exe compiler. It can only
# access the generate "buildflags" and the "raw_ptr" definitions implemented
# with RawPtrNoOpImpl. Everything else is considered not supported.
@ -48,6 +98,12 @@ has_memory_tagging =
current_cpu == "arm64" && is_clang && !is_asan && is_linux && current_os != "openwrt"
declare_args() {
# Debug configuration.
partition_alloc_is_debug = partition_alloc_is_debug_default
# Enable PA_DCHECKs in PartitionAlloc in release mode.
partition_alloc_dcheck_always_on = partition_alloc_dcheck_always_on_default
# Causes all the allocations to be routed via allocator_shim.cc. Usually,
# the allocator shim will, in turn, route them to PartitionAlloc, but
# other allocators are also supported by the allocator shim.
@ -135,8 +191,8 @@ if (use_allocator_shim && is_win) {
# If libcxx_is_shared=false, libc++ is a static library. All libc++ code
# will be run inside the client. The above issue will disappear.
assert(
!is_component_build || (!libcxx_is_shared && !is_debug),
"The allocator shim for the Windows component build needs !libcxx_is_shared && !is_debug.")
!is_component_build || (!libcxx_is_shared && !partition_alloc_is_debug),
"The allocator shim for the Windows component build needs !libcxx_is_shared && !partition_alloc_is_debug.")
}
declare_args() {
@ -172,7 +228,8 @@ declare_args() {
# later verify the pattern remain unchanged to ensure there is no OOB write.
# It comes with performance and memory cost, hence enabled only in debug.
use_partition_cookie =
is_debug || dcheck_always_on || enable_ios_corruption_hardening
partition_alloc_is_debug || partition_alloc_dcheck_always_on ||
enable_ios_corruption_hardening
# This will change partition cookie size to 4B or 8B, whichever equivalent to
# size of InSlotMetadata. This option is useful for InSlotMetadata corruption

View file

@ -35,7 +35,8 @@ enable_pointer_compression =
# Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`, but avails it
# as a buildflag.
dchecks_are_on = is_debug || dcheck_always_on
partition_alloc_dchecks_are_on =
partition_alloc_is_debug || partition_alloc_dcheck_always_on
# Building PartitionAlloc for Windows component build.
# Currently use build_with_chromium not to affect any third_party code,
@ -151,7 +152,7 @@ pa_buildflag_header("buildflags") {
"IS_CASTOS=$is_castos",
"IS_CAST_ANDROID=$is_cast_android",
"IS_CHROMEOS=$is_chromeos",
"IS_DEBUG=$is_debug",
"IS_DEBUG=$partition_alloc_is_debug",
"RAW_PTR_ZERO_ON_CONSTRUCT=$raw_ptr_zero_on_construct",
"RAW_PTR_ZERO_ON_DESTRUCT=$raw_ptr_zero_on_destruct",
"RAW_PTR_ZERO_ON_MOVE=$raw_ptr_zero_on_move",
@ -171,7 +172,7 @@ pa_buildflag_header("buildflags") {
"USE_RAW_PTR_BACKUP_REF_IMPL=$use_raw_ptr_backup_ref_impl",
"USE_RAW_PTR_HOOKABLE_IMPL=$use_raw_ptr_hookable_impl",
"ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT=$enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support",
"DCHECKS_ARE_ON=$dchecks_are_on",
"DCHECKS_ARE_ON=$partition_alloc_dchecks_are_on",
"EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
@ -330,7 +331,7 @@ if (is_clang_or_gcc) {
}
}
if (enable_pkeys && is_debug) {
if (enable_pkeys && partition_alloc_is_debug) {
config("no_stack_protector") {
cflags = [ "-fno-stack-protector" ]
}
@ -567,7 +568,7 @@ if (is_clang_or_gcc) {
# We want to be able to test pkey mode without access to the default pkey.
# This is incompatible with stack protectors since the TLS won't be pkey-tagged.
if (enable_pkeys && is_debug) {
if (enable_pkeys && partition_alloc_is_debug) {
configs += [ ":no_stack_protector" ]
}
}
@ -1015,7 +1016,7 @@ if (build_with_chromium) {
]
}
if (enable_pkeys && is_debug && !is_component_build) {
if (enable_pkeys && partition_alloc_is_debug && !is_component_build) {
# This test requires RELRO, which is not enabled in component builds.
# Also, require a debug build, since we only disable stack protectors in
# debug builds in PartitionAlloc (see below why it's needed).

View file

@ -37,8 +37,4 @@ void InternalPartitionAllocated::operator delete(void* ptr, std::align_val_t) {
InternalAllocatorRoot().Free<FreeFlags::kNoHooks>(ptr);
}
// A deleter for `std::unique_ptr<T>`.
void InternalPartitionDeleter::operator()(void* ptr) const {
InternalAllocatorRoot().Free<FreeFlags::kNoHooks>(ptr);
}
} // namespace partition_alloc::internal

View file

@ -48,8 +48,6 @@ T* ConstructAtInternalPartition(Args&&... args) {
}
// Destroy an object on heap in the internal partition.
// TODO(crbug.com/40274826) This is an unused function. Start using it in tests
// and/or in production code.
template <typename T>
void DestroyAtInternalPartition(T* ptr) {
// Destroying an array is not supported.

View file

@ -67,14 +67,13 @@ template <typename T, typename... Args>
T* ConstructAtInternalPartition(Args&&... args);
// Destroy an object on heap in the internal partition.
// TODO(crbug.com/40274826) This is an unused function. Start using it in tests
// and/or in production code.
template <typename T>
void DestroyAtInternalPartition(T* ptr);
// A deleter for `std::unique_ptr<T>`.
struct PA_COMPONENT_EXPORT(PARTITION_ALLOC) InternalPartitionDeleter final {
void operator()(void* ptr) const;
template <typename T>
struct InternalPartitionDeleter final {
void operator()(T* ptr) const { DestroyAtInternalPartition(ptr); }
};
} // namespace partition_alloc::internal

View file

@ -19,7 +19,10 @@ class PA_SCOPED_LOCKABLE
public:
PA_ALWAYS_INLINE explicit CompileTimeConditionalScopedGuard(Lock& lock)
PA_EXCLUSIVE_LOCK_FUNCTION(lock) {}
PA_ALWAYS_INLINE ~CompileTimeConditionalScopedGuard() PA_UNLOCK_FUNCTION() {}
// For some reason, defaulting this causes a thread safety annotation failure.
PA_ALWAYS_INLINE
~CompileTimeConditionalScopedGuard() // NOLINT(modernize-use-equals-default)
PA_UNLOCK_FUNCTION() {}
};
template <>
@ -71,7 +74,12 @@ LightweightQuarantineBranch::LightweightQuarantineBranch(
const LightweightQuarantineBranchConfig& config)
: root_(root),
lock_required_(config.lock_required),
branch_capacity_in_bytes_(config.branch_capacity_in_bytes) {}
branch_capacity_in_bytes_(config.branch_capacity_in_bytes) {
if (lock_required_) {
to_be_freed_working_memory_ =
ConstructAtInternalPartition<ToBeFreedArray>();
}
}
LightweightQuarantineBranch::LightweightQuarantineBranch(
LightweightQuarantineBranch&& b)
@ -82,10 +90,19 @@ LightweightQuarantineBranch::LightweightQuarantineBranch(
branch_capacity_in_bytes_(
b.branch_capacity_in_bytes_.load(std::memory_order_relaxed)) {
b.branch_size_in_bytes_ = 0;
if (lock_required_) {
to_be_freed_working_memory_.store(b.to_be_freed_working_memory_.exchange(
nullptr, std::memory_order_relaxed),
std::memory_order_relaxed);
}
}
LightweightQuarantineBranch::~LightweightQuarantineBranch() {
Purge();
if (lock_required_) {
DestroyAtInternalPartition(to_be_freed_working_memory_.exchange(
nullptr, std::memory_order_relaxed));
}
}
bool LightweightQuarantineBranch::IsQuarantinedForTesting(void* object) {
@ -151,16 +168,26 @@ bool LightweightQuarantineBranch::QuarantineInternal(
const size_t random_index = random_.RandUint32() % slots_.size();
std::swap(slots_[random_index], slots_.back());
} else {
ToBeFreedArray to_be_freed;
std::unique_ptr<ToBeFreedArray, InternalPartitionDeleter<ToBeFreedArray>>
to_be_freed;
size_t num_of_slots = 0;
{
CompileTimeConditionalScopedGuard<lock_required> guard(lock_);
// Borrow the reserved working memory from to_be_freed_working_memory_,
// and set nullptr to it indicating that it's in use.
to_be_freed.reset(to_be_freed_working_memory_.exchange(nullptr));
if (!to_be_freed) {
// When the reserved working memory has already been in use by another
// thread, fall back to allocate another chunk of working memory.
to_be_freed.reset(ConstructAtInternalPartition<ToBeFreedArray>());
}
// Dequarantine some entries as required. Save the objects to be
// deallocated into `to_be_freed`.
PurgeInternalWithDefferedFree(capacity_in_bytes - usable_size,
to_be_freed, num_of_slots);
*to_be_freed, num_of_slots);
// Put the entry onto the list.
branch_size_in_bytes_ += usable_size;
@ -173,7 +200,17 @@ bool LightweightQuarantineBranch::QuarantineInternal(
}
// Actually deallocate the dequarantined objects.
BatchFree(to_be_freed, num_of_slots);
BatchFree(*to_be_freed, num_of_slots);
// Return the possibly-borrowed working memory to
// to_be_freed_working_memory_. It doesn't matter much if it's really
// borrowed or locally-allocated. The important facts are 1) to_be_freed is
// non-null, and 2) to_be_freed_working_memory_ may likely be null (because
// this or another thread has already borrowed it). It's simply good to make
// to_be_freed_working_memory_ non-null whenever possible. Maybe yet another
// thread would be about to borrow the working memory.
to_be_freed.reset(
to_be_freed_working_memory_.exchange(to_be_freed.release()));
}
// Update stats (not locked).

View file

@ -217,6 +217,17 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LightweightQuarantineBranch {
// Using `std::atomic` here so that other threads can update this value.
std::atomic_size_t branch_capacity_in_bytes_;
// This working memory is temporarily needed only while dequarantining
// objects in slots_ when lock_required_ is true. However, allocating this
// working memory on stack may cause stack overflow [1]. Plus, it's non-
// negligible perf penalty to allocate and deallocate this working memory on
// heap only while dequarantining. So, we reserve one chunk of working memory
// on heap during the entire lifetime of this branch object and try to reuse
// this working memory among threads. Only when thread contention occurs, we
// allocate and deallocate another chunk of working memory.
// [1] https://issues.chromium.org/issues/387508217
std::atomic<ToBeFreedArray*> to_be_freed_working_memory_ = nullptr;
friend class LightweightQuarantineRoot;
};

View file

@ -70,9 +70,30 @@ extern PageCharacteristics page_characteristics;
// Ability to name anonymous VMAs is available on some, but not all Linux-based
// systems.
#if PA_BUILDFLAG(IS_ANDROID) || PA_BUILDFLAG(IS_LINUX)
#if PA_BUILDFLAG(IS_ANDROID) || PA_BUILDFLAG(IS_LINUX) || \
PA_BUILDFLAG(IS_CHROMEOS)
#include <sys/prctl.h>
#if (PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)) && \
!(defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME))
// The PR_SET_VMA* symbols are originally from
// https://android.googlesource.com/platform/bionic/+/lollipop-release/libc/private/bionic_prctl.h
// and were subsequently added to mainline Linux in Jan 2022, see
// https://github.com/torvalds/linux/commit/9a10064f5625d5572c3626c1516e0bebc6c9fe9b.
//
// Define them to support compiling with older headers.
#if !defined(PR_SET_VMA)
#define PR_SET_VMA 0x53564d41
#endif
#if !defined(PR_SET_VMA_ANON_NAME)
#define PR_SET_VMA_ANON_NAME 0
#endif
#endif // (PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)) &&
// !(defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME))
#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
#define LINUX_NAME_REGION 1
#endif

View file

@ -150,7 +150,7 @@ void NameRegion(void* start, size_t length, PageTag page_tag) {
PA_NOTREACHED();
}
// No error checking on purpose, testing only.
// No error checking on purpose, used for debugging only.
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, start, length, name);
}

View file

@ -20,11 +20,12 @@
// This header defines the CHECK, DCHECK, and DPCHECK macros.
//
// CHECK dies with a fatal error if its condition is not true. It is not
// controlled by NDEBUG, so the check will be executed regardless of compilation
// mode.
// controlled by PA_BUILDFLAG(IS_DEBUG), so the check will be executed
// regardless of compilation mode.
//
// DCHECK, the "debug mode" check, is enabled depending on NDEBUG and
// DCHECK_ALWAYS_ON, and its severity depends on DCHECK_IS_CONFIGURABLE.
// DCHECK, the "debug mode" check, is enabled depending on
// PA_BUILDFLAG(IS_DEBUG) and PA_BUILDFLAG(DCHECK_ALWAYS_ON), and its severity
// depends on PA_BUILDFLAG(DCHECK_IS_CONFIGURABLE).
//
// (D)PCHECK is like (D)CHECK, but includes the system error code (c.f.
// perror(3)).
@ -141,9 +142,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) NotImplemented
} // namespace check_error
#if defined(OFFICIAL_BUILD) && !defined(NDEBUG)
#if defined(OFFICIAL_BUILD) && PA_BUILDFLAG(IS_DEBUG)
#error "Debug builds are not expected to be optimized as official builds."
#endif // defined(OFFICIAL_BUILD) && !defined(NDEBUG)
#endif // defined(OFFICIAL_BUILD) && BUILDFLAG(IS_DEBUG)
#if defined(OFFICIAL_BUILD) && !PA_BUILDFLAG(DCHECKS_ARE_ON)

View file

@ -6,6 +6,7 @@
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_COMPILER_SPECIFIC_H_
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
// A wrapper around `__has_cpp_attribute()`, which is in C++20 and thus not yet
// available for all targets PA supports (since PA's minimum C++ version is 17).
@ -87,7 +88,7 @@
//
// Since `ALWAYS_INLINE` is performance-oriented but can hamper debugging,
// ignore it in debug mode.
#if defined(NDEBUG)
#if !PA_BUILDFLAG(IS_DEBUG)
#if PA_HAS_CPP_ATTRIBUTE(clang::always_inline)
#define PA_ALWAYS_INLINE [[clang::always_inline]] inline
#elif PA_HAS_CPP_ATTRIBUTE(gnu::always_inline)
@ -95,7 +96,7 @@
#elif defined(PA_COMPILER_MSVC)
#define PA_ALWAYS_INLINE __forceinline
#endif
#endif
#endif // !PA_BUILDFLAG(IS_DEBUG)
#if !defined(PA_ALWAYS_INLINE)
#define PA_ALWAYS_INLINE inline
#endif

View file

@ -9,6 +9,8 @@
#include <limits>
#include <type_traits>
#include "partition_alloc/buildflags.h"
namespace partition_alloc::internal::base::internal {
// The std library doesn't provide a binary max_exponent for integers, however
@ -83,10 +85,10 @@ constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
// TODO(jschuh): Debug builds don't reliably propagate constants, so we restrict
// some accelerated runtime paths to release builds until this can be forced
// with consteval support in C++20 or C++23.
#if defined(NDEBUG)
constexpr bool kEnableAsmCode = true;
#if PA_BUILDFLAG(IS_DEBUG)
inline constexpr bool kEnableAsmCode = false;
#else
constexpr bool kEnableAsmCode = false;
inline constexpr bool kEnableAsmCode = true;
#endif
// Forces a crash, like a NOTREACHED(). Used for numeric boundary errors.

View file

@ -18,6 +18,7 @@
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_POSIX_EINTR_WRAPPER_H_
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#if PA_BUILDFLAG(IS_POSIX)
#include <cerrno>
@ -31,7 +32,7 @@ template <typename Fn>
inline auto WrapEINTR(Fn fn) {
return [fn](auto&&... args) {
int out = -1;
#if defined(NDEBUG)
#if !PA_BUILDFLAG(IS_DEBUG)
while (true)
#else
for (int retry_count = 0; retry_count < 100; ++retry_count)

View file

@ -10,8 +10,9 @@
#include <limits>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#if !defined(NDEBUG)
#if PA_BUILDFLAG(IS_DEBUG)
// In debug builds, we use RAW_CHECK() to print useful error messages, if
// SafeSPrintf() is called with broken arguments.
// As our contract promises that SafeSPrintf() can be called from any
@ -41,7 +42,7 @@
if (x) { \
} \
} while (0)
#endif
#endif // PA_BUILDFLAG(IS_DEBUG)
namespace partition_alloc::internal::base::strings {
@ -74,7 +75,7 @@ const char kUpCaseHexDigits[] = "0123456789ABCDEF";
const char kDownCaseHexDigits[] = "0123456789abcdef";
} // namespace
#if defined(NDEBUG)
#if !PA_BUILDFLAG(IS_DEBUG)
// We would like to define kSSizeMax as std::numeric_limits<ssize_t>::max(),
// but C++ doesn't allow us to do that for constants. Instead, we have to
// use careful casting and shifting. We later use a static_assert to
@ -82,7 +83,7 @@ const char kDownCaseHexDigits[] = "0123456789abcdef";
namespace {
const size_t kSSizeMax = kSSizeMaxConst;
}
#else // defined(NDEBUG)
#else // !PA_BUILDFLAG(IS_DEBUG)
// For efficiency, we really need kSSizeMax to be a constant. But for unit
// tests, it should be adjustable. This allows us to verify edge cases without
// having to fill the entire available address space. As a compromise, we make
@ -101,7 +102,7 @@ size_t GetSafeSPrintfSSizeMaxForTest() {
return kSSizeMax;
}
} // namespace internal
#endif // defined(NDEBUG)
#endif // !PA_BUILDFLAG(IS_DEBUG)
namespace {
class Buffer {
@ -111,10 +112,7 @@ class Buffer {
// to ensure that the buffer is at least one byte in size, so that it fits
// the trailing NUL that will be added by the destructor. The buffer also
// must be smaller or equal to kSSizeMax in size.
Buffer(char* buffer, size_t size)
: buffer_(buffer),
size_(size - 1), // Account for trailing NUL byte
count_(0) {
Buffer(char* buffer, size_t size) : buffer_(buffer), size_(size - 1) {
// MSVS2013's standard library doesn't mark max() as constexpr yet. cl.exe
// supports static_cast but doesn't really implement constexpr yet so it doesn't
// complain, but clang does.
@ -276,7 +274,7 @@ class Buffer {
// Number of bytes that would have been emitted to the buffer, if the buffer
// was sufficiently big. This number always excludes the trailing NUL byte
// and it is guaranteed to never grow bigger than kSSizeMax-1.
size_t count_;
size_t count_ = 0;
};
bool Buffer::IToASCII(bool sign,

View file

@ -28,6 +28,10 @@
#include <zircon/process.h>
#endif
#if defined(__MUSL__)
#include "partition_alloc/shim/allocator_shim.h"
#endif
namespace partition_alloc::internal::base {
#if PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
@ -59,8 +63,21 @@ thread_local bool g_is_main_thread = true;
class InitAtFork {
public:
InitAtFork() {
#if !defined(__MUSL__)
#if defined(__MUSL__)
allocator_shim::AllocatorDispatch d =
*allocator_shim::GetAllocatorDispatchChainHeadForTesting();
d.alloc_function = +[](size_t size, void*) -> void* {
// The size of the scratch fits struct atfork_funcs in Musl pthread_atfork.c.
static char scratch[5 * sizeof(void*)];
return size != sizeof(scratch) ? nullptr : scratch;
};
allocator_shim::InsertAllocatorDispatch(&d);
#endif
pthread_atfork(nullptr, nullptr, internal::InvalidateTidCache);
#if defined(__MUSL__)
allocator_shim::RemoveAllocatorDispatchForTesting(&d);
#endif
}
};

View file

@ -61,11 +61,11 @@
// Expensive dchecks that run within *Scan. These checks are only enabled in
// debug builds with dchecks enabled.
#if !defined(NDEBUG)
#if PA_BUILDFLAG(IS_DEBUG)
#define PA_SCAN_DCHECK_IS_ON() PA_BUILDFLAG(DCHECKS_ARE_ON)
#else
#define PA_SCAN_DCHECK_IS_ON() 0
#endif
#endif // PA_BUILDFLAG(IS_DEBUG)
#if PA_SCAN_DCHECK_IS_ON()
#define PA_SCAN_DCHECK(expr) PA_DCHECK(expr)

View file

@ -63,9 +63,7 @@ enum class FreeFlags {
kNoHooks = 1 << 1, // Internal.
// Quarantine for a while to ensure no UaF from on-stack pointers.
kSchedulerLoopQuarantine = 1 << 2,
// Zap the object region on `Free()`.
kZap = 1 << 3,
kMaxValue = kZap,
kMaxValue = kSchedulerLoopQuarantine,
};
PA_DEFINE_OPERATORS_FOR_FLAGS(FreeFlags);
} // namespace internal

View file

@ -684,7 +684,7 @@ PartitionBucket::AllocNewSlotSpan(PartitionRoot* root,
for (auto* page = gap_start_page->ToWritable(root);
page < gap_end_page->ToWritable(root); ++page) {
PA_DCHECK(!page->is_valid);
page->has_valid_span_after_this = 1;
page->has_valid_span_after_this = true;
}
root->next_partition_page =
adjusted_next_partition_page + slot_span_reservation_size;
@ -708,7 +708,7 @@ PartitionBucket::AllocNewSlotSpan(PartitionRoot* root,
PA_DEBUG_DATA_ON_STACK("spancmt", slot_span_committed_size);
root->RecommitSystemPagesForData(
slot_span_start, slot_span_committed_size,
slot_span_start, SlotSpanCommittedSize(root),
PageAccessibilityDisposition::kRequireUpdate,
slot_size <= kMaxMemoryTaggingSize);
}
@ -1590,4 +1590,63 @@ void PartitionBucket::InitializeSlotSpanForGwpAsan(
InitializeSlotSpan(slot_span, root);
}
size_t PartitionBucket::SlotSpanCommittedSize(PartitionRoot* root) const {
// With lazy commit, we certainly don't want to commit more than
// necessary. This is not reached, but keep the CHECK() as documentation.
PA_CHECK(!kUseLazyCommit);
// Memory is reserved in units of PartitionPage, but a given slot span may be
// smaller than the reserved area. For instance (assuming 4k pages), for a
// bucket where the slot span size is 40kiB, we reserve 4 PartitionPage = 16 *
// 4 = 48kiB, but only ever commit 40kiB out of it.
//
// This means that the address space then looks like, assuming that the
// PartitionPage next to it is committed:
// [SlotSpan range, 40kiB] rw-p
// [Unused area in the last PartitionPage, 8kiB] ---p
// [Next PartitionPages, size unknown ] rw-p
//
// So we have a "hole" of inaccessible memory, and 3 memory regions. If
// instead we commit the full PartitionPages, we get (due to the kernel
// merging neighboring regions with uniform permissions):
//
// [SlotSpan range, 40kiB + Unused area, 8kiB + next PartitionPages] rw-p
//
// So 1 memory region rather then 3. This matters, because on Linux kernels,
// there is a maximum number of VMAs per process, with the default limit a bit
// less than 2^16, and Chromium sometimes hits the limit (see
// /proc/sys/vm/max_map_count for the current limit), largely because of
// PartitionAlloc contributing thousands of regions. Locally, on a Linux
// system, this reduces the number of PartitionAlloc regions by up to ~4x.
//
// Why is it safe?
// The extra memory is not used by anything, so committing it doesn't make a
// difference. It makes it accessible though.
//
// How much does it cost?
// Almost nothing. On Linux, "committing" memory merely changes its
// permissions, it doesn't cost any memory until the pages are touched, which
// they are not. However, mprotect()-ed areas that are writable count towards
// the RLIMIT_DATA resource limit, which is used by the sandbox. So, while
// this change costs 0 physical memory (and actually saves some, by reducing
// the size of the VMA red-black tree in the kernel), it might increase
// slightly the cases where we bump into the sandbox memory limit.
//
// Is it safe to do while running?
// Since this is decided through root settings, the value changes at runtime,
// so we may decommit memory that was never committed. This is safe onLinux,
// since decommitting is just changing permissions back to PROT_NONE, which
// the tail end would already have.
//
// Can we do better?
// For simplicity, we do not "fix" the regions that were committed before the
// settings are changed (after feature list initialization). This means that
// we end up with more regions that we could. The intent is to run a field
// experiment, then change the default value, at which point we get the full
// impact, so this is only temporary.
return root->settings.fewer_memory_regions
? (get_pages_per_slot_span() << PartitionPageShift())
: get_bytes_per_span();
}
} // namespace partition_alloc::internal

View file

@ -1,7 +1,6 @@
// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef PARTITION_ALLOC_PARTITION_BUCKET_H_
#define PARTITION_ALLOC_PARTITION_BUCKET_H_
@ -171,6 +170,8 @@ struct PartitionBucket {
SlotSpanMetadata<MetadataKind::kReadOnly>* slot_span,
PartitionRoot* root);
size_t SlotSpanCommittedSize(PartitionRoot* root) const;
private:
// Sets `this->can_store_raw_size`.
void InitCanStoreRawSize(bool use_small_single_slot_spans);

View file

@ -248,7 +248,7 @@ void SlotSpanMetadata<MetadataKind::kWritable>::Decommit(PartitionRoot* root) {
size_t dirty_size =
base::bits::AlignUp(GetProvisionedSize(), SystemPageSize());
size_t size_to_decommit =
kUseLazyCommit ? dirty_size : bucket->get_bytes_per_span();
kUseLazyCommit ? dirty_size : bucket->SlotSpanCommittedSize(root);
PA_DCHECK(root->empty_slot_spans_dirty_bytes >= dirty_size);
root->empty_slot_spans_dirty_bytes -= dirty_size;

View file

@ -51,6 +51,10 @@
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
#endif // PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
#if defined(__MUSL__)
#include "partition_alloc/shim/allocator_shim.h"
#endif
namespace partition_alloc::internal {
#if PA_BUILDFLAG(RECORD_ALLOC_INFO)
@ -297,11 +301,7 @@ void PartitionAllocMallocInitOnce() {
return;
}
#if defined(__MUSL__)
static_cast<void>(BeforeForkInParent);
static_cast<void>(AfterForkInParent);
static_cast<void>(AfterForkInChild);
#elif PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
#if PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
// When fork() is called, only the current thread continues to execute in the
// child process. If the lock is held, but *not* by this thread when fork() is
// called, we have a deadlock.
@ -323,9 +323,25 @@ void PartitionAllocMallocInitOnce() {
// However, no perfect solution really exists to make threads + fork()
// cooperate, but deadlocks are real (and fork() is used in DEATH_TEST()s),
// and other malloc() implementations use the same techniques.
#if defined(__MUSL__)
allocator_shim::AllocatorDispatch d =
*allocator_shim::GetAllocatorDispatchChainHeadForTesting();
d.alloc_function = +[](size_t size, void*) -> void* {
// The size of the scratch fits struct atfork_funcs in Musl pthread_atfork.c.
static char scratch[5 * sizeof(void*)];
return size != sizeof(scratch) ? nullptr : scratch;
};
allocator_shim::InsertAllocatorDispatch(&d);
#endif
int err =
pthread_atfork(BeforeForkInParent, AfterForkInParent, AfterForkInChild);
PA_CHECK(err == 0);
#if defined(__MUSL__)
allocator_shim::RemoveAllocatorDispatchForTesting(&d);
#endif
#endif // PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
}
@ -1158,6 +1174,8 @@ void PartitionRoot::Init(PartitionOptions opts) {
opts.zapping_by_free_flags == PartitionOptions::kEnabled;
settings.eventually_zero_freed_memory =
opts.eventually_zero_freed_memory == PartitionOptions::kEnabled;
settings.fewer_memory_regions =
opts.fewer_memory_regions == PartitionOptions::kEnabled;
settings.scheduler_loop_quarantine =
opts.scheduler_loop_quarantine == PartitionOptions::kEnabled;

View file

@ -182,6 +182,7 @@ struct PartitionOptions {
// compression ratio of freed memory inside partially allocated pages (due to
// fragmentation).
EnableToggle eventually_zero_freed_memory = kDisabled;
EnableToggle fewer_memory_regions = kDisabled;
struct {
EnableToggle enabled = kDisabled;
@ -261,9 +262,14 @@ struct alignas(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
size_t in_slot_metadata_size = 0;
#endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
bool use_configurable_pool = false;
// Despite its name, `FreeFlags` for zapping is deleted and does not exist.
// This value is used for SchedulerLoopQuarantine.
// TODO(https://crbug.com/351974425): group this setting and quarantine
// setting in one place.
bool zapping_by_free_flags = false;
bool eventually_zero_freed_memory = false;
bool scheduler_loop_quarantine = false;
bool fewer_memory_regions = false;
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
bool memory_tagging_enabled_ = false;
bool use_random_memory_tagging_ = false;
@ -1516,16 +1522,11 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeInline(void* object) {
// cacheline ping-pong.
PA_PREFETCH(slot_span);
// Further down, we may zap the memory, no point in doing it twice. We may
// zap twice if kZap is enabled without kSchedulerLoopQuarantine. Make sure it
// does not happen. This is not a hard requirement: if this is deemed cheap
// enough, it can be relaxed, the static_assert() is here to make it a
// conscious decision.
static_assert(!ContainsFlags(flags, FreeFlags::kZap) ||
ContainsFlags(flags, FreeFlags::kSchedulerLoopQuarantine),
"kZap and kSchedulerLoopQuarantine should be used together to "
"avoid double zapping");
if constexpr (ContainsFlags(flags, FreeFlags::kZap)) {
// TODO(crbug.com/40287058): Collecting objects for
// `kSchedulerLoopQuarantineBranch` here means it "delays" other checks (BRP
// refcount, cookie, etc.)
// For better debuggability, we should do these checks before quarantining.
if constexpr (ContainsFlags(flags, FreeFlags::kSchedulerLoopQuarantine)) {
// No need to zap direct mapped allocations, as they are unmapped right
// away. This also ensures that we don't needlessly memset() very large
// allocations.
@ -1534,12 +1535,7 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeInline(void* object) {
internal::SecureMemset(object, internal::kFreedByte,
GetSlotUsableSize(slot_span));
}
}
// TODO(crbug.com/40287058): Collecting objects for
// `kSchedulerLoopQuarantineBranch` here means it "delays" other checks (BRP
// refcount, cookie, etc.)
// For better debuggability, we should do these checks before quarantining.
if constexpr (ContainsFlags(flags, FreeFlags::kSchedulerLoopQuarantine)) {
if (settings.scheduler_loop_quarantine) {
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
// TODO(keishi): Add `[[likely]]` when brp is fully enabled as

View file

@ -1255,6 +1255,36 @@ struct pointer_traits<::raw_ptr<T, Traits>> {
}
};
// Mark `raw_ptr<T>` and `T*` as having a common reference type (the type to
// which both can be converted or bound) of `T*`. This makes them satisfy
// `std::equality_comparable`, which allows usage like:
// ```
// std::vector<raw_ptr<T>> v;
// T* e;
// auto it = std::ranges::find(v, e);
// ```
// Without this, the `find()` call above would fail to compile with a cryptic
// error about being unable to invoke `std::ranges::equal_to()`.
template <typename T,
base::RawPtrTraits Traits,
template <typename>
typename TQ,
template <typename>
typename UQ>
struct std::basic_common_reference<raw_ptr<T, Traits>, T*, TQ, UQ> {
using type = T*;
};
template <typename T,
base::RawPtrTraits Traits,
template <typename>
typename TQ,
template <typename>
typename UQ>
struct std::basic_common_reference<T*, raw_ptr<T, Traits>, TQ, UQ> {
using type = T*;
};
} // namespace std
#endif // PARTITION_ALLOC_POINTERS_RAW_PTR_H_

View file

@ -27,7 +27,7 @@ class RandomGenerator {
}
private:
::partition_alloc::internal::Lock lock_ = {};
::partition_alloc::internal::Lock lock_;
bool initialized_ PA_GUARDED_BY(lock_) = false;
union {
internal::base::InsecureRandomGenerator instance_ PA_GUARDED_BY(lock_);

View file

@ -8,7 +8,6 @@
#include <cstddef>
#include <cstdint>
#include <limits>
#include <tuple>
#include "partition_alloc/address_pool_manager.h"
#include "partition_alloc/build_config.h"

View file

@ -150,6 +150,9 @@ using ZappingByFreeFlags =
bool>;
using EventuallyZeroFreedMemory = partition_alloc::internal::base::
StrongAlias<class EventuallyZeroFreedMemoryTag, bool>;
using FewerMemoryRegions =
partition_alloc::internal::base::StrongAlias<class FewerMemoryRegionsTag,
bool>;
using UsePoolOffsetFreelists = partition_alloc::internal::base::
StrongAlias<class UsePoolOffsetFreelistsTag, bool>;
@ -170,6 +173,7 @@ void ConfigurePartitions(
size_t scheduler_loop_quarantine_branch_capacity_in_bytes,
ZappingByFreeFlags zapping_by_free_flags,
EventuallyZeroFreedMemory eventually_zero_freed_memory,
FewerMemoryRegions fewer_memory_regions,
UsePoolOffsetFreelists use_pool_offset_freelists,
UseSmallSingleSlotSpans use_small_single_slot_spans);

View file

@ -26,6 +26,7 @@
#include "partition_alloc/partition_root.h"
#include "partition_alloc/partition_stats.h"
#include "partition_alloc/shim/allocator_dispatch.h"
#include "partition_alloc/shim/allocator_shim.h"
#include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc_internal.h"
#include "partition_alloc/shim/allocator_shim_internals.h"
@ -570,7 +571,6 @@ template class PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
PartitionAllocFunctionsInternal<
partition_alloc::AllocFlags::kNoHooks,
partition_alloc::FreeFlags::kNoHooks |
partition_alloc::FreeFlags::kZap |
partition_alloc::FreeFlags::kSchedulerLoopQuarantine>;
// static
@ -619,6 +619,7 @@ void ConfigurePartitions(
size_t scheduler_loop_quarantine_branch_capacity_in_bytes,
ZappingByFreeFlags zapping_by_free_flags,
EventuallyZeroFreedMemory eventually_zero_freed_memory,
FewerMemoryRegions fewer_memory_regions,
UsePoolOffsetFreelists use_pool_offset_freelists,
UseSmallSingleSlotSpans use_small_single_slot_spans) {
// Calling Get() is actually important, even if the return value isn't
@ -643,6 +644,7 @@ void ConfigurePartitions(
opts.backup_ref_ptr =
enable_brp ? partition_alloc::PartitionOptions::kEnabled
: partition_alloc::PartitionOptions::kDisabled;
opts.backup_ref_ptr_extra_extras_size = brp_extra_extras_size;
opts.zapping_by_free_flags =
zapping_by_free_flags
? partition_alloc::PartitionOptions::kEnabled
@ -651,6 +653,9 @@ void ConfigurePartitions(
eventually_zero_freed_memory
? partition_alloc::PartitionOptions::kEnabled
: partition_alloc::PartitionOptions::kDisabled;
opts.fewer_memory_regions =
fewer_memory_regions ? partition_alloc::PartitionOptions::kEnabled
: partition_alloc::PartitionOptions::kDisabled;
opts.scheduler_loop_quarantine =
scheduler_loop_quarantine
? partition_alloc::PartitionOptions::kEnabled

View file

@ -135,7 +135,6 @@ using PartitionAllocWithAdvancedChecksFunctions =
PartitionAllocFunctionsInternal<
partition_alloc::AllocFlags::kNoHooks,
partition_alloc::FreeFlags::kNoHooks |
partition_alloc::FreeFlags::kZap |
partition_alloc::FreeFlags::kSchedulerLoopQuarantine>;
// `PartitionAllocFunctions` in instantiated in cc file.
@ -147,7 +146,6 @@ extern template class PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
PartitionAllocFunctionsInternal<
partition_alloc::AllocFlags::kNoHooks,
partition_alloc::FreeFlags::kNoHooks |
partition_alloc::FreeFlags::kZap |
partition_alloc::FreeFlags::kSchedulerLoopQuarantine>;
} // namespace internal
@ -182,15 +180,16 @@ PA_ALWAYS_INLINE void ConfigurePartitionsForTesting() {
size_t scheduler_loop_quarantine_capacity_in_bytes = 0;
auto zapping_by_free_flags = ZappingByFreeFlags(false);
auto eventually_zero_freed_memory = EventuallyZeroFreedMemory(false);
auto fewer_memory_regions = FewerMemoryRegions(false);
auto use_pool_offset_freelists = UsePoolOffsetFreelists(true);
auto use_small_single_slot_spans = UseSmallSingleSlotSpans(true);
ConfigurePartitions(enable_brp, brp_extra_extras_size, enable_memory_tagging,
memory_tagging_reporting_mode, distribution,
scheduler_loop_quarantine,
scheduler_loop_quarantine_capacity_in_bytes,
zapping_by_free_flags, eventually_zero_freed_memory,
use_pool_offset_freelists, use_small_single_slot_spans);
ConfigurePartitions(
enable_brp, brp_extra_extras_size, enable_memory_tagging,
memory_tagging_reporting_mode, distribution, scheduler_loop_quarantine,
scheduler_loop_quarantine_capacity_in_bytes, zapping_by_free_flags,
eventually_zero_freed_memory, fewer_memory_regions,
use_pool_offset_freelists, use_small_single_slot_spans);
}
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

View file

@ -276,7 +276,7 @@ errno_t _wdupenv_s(wchar_t** buffer,
}
#endif
#if !defined(NDEBUG)
#if PA_BUILDFLAG(IS_DEBUG)
typedef void (*_CRT_DUMP_CLIENT)(void*, size_t);
int _crtDbgFlag = 0;
@ -451,7 +451,7 @@ errno_t _wdupenv_s_dbg(wchar_t** buffer,
}
#endif // defined(COMPONENT_BUILD)
#endif // !defined(NDEBUG)
#endif // PA_BUILDFLAG(IS_DEBUG)
} // extern "C"
#endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)

View file

@ -143,7 +143,12 @@ void SpinningMutex::LockSlow() {
#elif PA_BUILDFLAG(IS_APPLE)
// TODO(verwaest): We should use the constants from the header, but they aren't
// exposed until macOS 15.
// exposed until macOS 15. See their definition here:
// https://github.com/apple-oss-distributions/libplatform/blob/4f6349dfea579c35b8fa838d785644e441d14e0e/private/os/lock_private.h#L265
//
// The first flag prevents the runtime from creating more threads in response to
// contention. The second will spin in the kernel if the lock owner is currently
// running.
#define OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION 0x00010000
#define OS_UNFAIR_LOCK_ADAPTIVE_SPIN 0x00040000

View file

@ -22,8 +22,8 @@ const base::Feature* const kFeaturesExposedToJava[] = {
// static
base::android::FeatureMap* GetFeatureMap() {
static base::NoDestructor<base::android::FeatureMap> kFeatureMap(std::vector(
std::begin(kFeaturesExposedToJava), std::end(kFeaturesExposedToJava)));
static base::NoDestructor<base::android::FeatureMap> kFeatureMap(
kFeaturesExposedToJava);
return kFeatureMap.get();
}

View file

@ -41,21 +41,8 @@ static std::string JNI_CommandLine_GetSwitchValue(JNIEnv* env,
return CommandLine::ForCurrentProcess()->GetSwitchValueNative(switch_string);
}
static std::vector<std::string> JNI_CommandLine_GetSwitchesFlattened(
JNIEnv* env) {
// JNI doesn't support returning Maps. Instead, express this map as a 1
// dimensional array: [ key1, value1, key2, value2, ... ]
std::vector<std::string> keys_and_values;
for (const auto& entry : CommandLine::ForCurrentProcess()->GetSwitches()) {
keys_and_values.push_back(entry.first);
keys_and_values.push_back(entry.second);
}
return keys_and_values;
}
static void JNI_CommandLine_AppendSwitch(JNIEnv* env,
std::string& switch_string) {
CommandLine::ForCurrentProcess()->AppendSwitch(switch_string);
static CommandLine::SwitchMap JNI_CommandLine_GetSwitches(JNIEnv* env) {
return CommandLine::ForCurrentProcess()->GetSwitches();
}
static void JNI_CommandLine_AppendSwitchWithValue(JNIEnv* env,

View file

@ -24,7 +24,8 @@ std::pair<std::string_view, const Feature*> MakeNameToFeaturePair(
return std::make_pair(feature->name, feature);
}
FeatureMap::FeatureMap(std::vector<const Feature*> features_exposed_to_java) {
FeatureMap::FeatureMap(
base::span<const Feature* const> features_exposed_to_java) {
mapping_ =
MakeFlatMap<std::string_view, raw_ptr<const Feature, CtnExperimental>>(
features_exposed_to_java, {}, &MakeNameToFeaturePair);

View file

@ -9,6 +9,7 @@
#include "base/base_export.h"
#include "base/containers/flat_map.h"
#include "base/containers/span.h"
#include "base/feature_list.h"
#include "base/memory/raw_ptr.h"
@ -22,7 +23,8 @@ namespace base::android {
// Each component should have its own FeatureMap.
class BASE_EXPORT FeatureMap {
public:
explicit FeatureMap(std::vector<const Feature*> featuresExposedToJava);
explicit FeatureMap(
base::span<const Feature* const> features_exposed_to_java);
~FeatureMap();
// Map a |feature_name| to a Feature*.

View file

@ -46,6 +46,7 @@ BASE_FEATURE(kYieldWithInputHint,
// Min time delta between checks for the input hint. Must be a smaller than
// time to produce a frame, but a bit longer than the time it takes to retrieve
// the hint.
// Note: Do not use the prepared macro as of no need for a local cache.
const base::FeatureParam<int> kPollIntervalMillisParam{&kYieldWithInputHint,
"poll_interval_ms", 1};

View file

@ -0,0 +1,112 @@
// Copyright 2025 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/android/jni_callback.h"
// Must come after all headers that specialize FromJniType() / ToJniType().
#include "base/base_jni/JniCallbackUtils_jni.h"
#include "base/base_jni/JniOnceCallback_jni.h"
#include "base/base_jni/JniRepeatingCallback_jni.h"
namespace base::android {
namespace {
class JniOnceCallback {
public:
explicit JniOnceCallback(JniOnceWrappedCallbackType&& on_complete)
: wrapped_callback_(std::make_unique<JniOnceWrappedCallbackType>(
std::move(on_complete))) {}
~JniOnceCallback() = default;
JniOnceCallback(const JniOnceCallback&) = delete;
const JniOnceCallback& operator=(const JniOnceCallback&) = delete;
jni_zero::ScopedJavaLocalRef<jobject> TransferToJava(JNIEnv* env) && {
CHECK(wrapped_callback_);
CHECK(!wrapped_callback_->is_null());
return Java_JniOnceCallback_Constructor(
env, reinterpret_cast<jlong>(wrapped_callback_.release()));
}
private:
std::unique_ptr<JniOnceWrappedCallbackType> wrapped_callback_;
};
class JniRepeatingCallback {
public:
explicit JniRepeatingCallback(
const JniRepeatingWrappedCallbackType& on_complete)
: wrapped_callback_(
std::make_unique<JniRepeatingWrappedCallbackType>(on_complete)) {}
explicit JniRepeatingCallback(JniRepeatingWrappedCallbackType&& on_complete)
: wrapped_callback_(std::make_unique<JniRepeatingWrappedCallbackType>(
std::move(on_complete))) {}
~JniRepeatingCallback() = default;
jni_zero::ScopedJavaLocalRef<jobject> TransferToJava(JNIEnv* env) && {
CHECK(wrapped_callback_);
CHECK(!wrapped_callback_->is_null());
return Java_JniRepeatingCallback_Constructor(
env, reinterpret_cast<jlong>(wrapped_callback_.release()));
}
JniRepeatingCallback(const JniRepeatingCallback&) = delete;
const JniRepeatingCallback& operator=(const JniRepeatingCallback&) = delete;
private:
std::unique_ptr<JniRepeatingWrappedCallbackType> wrapped_callback_;
};
} // namespace
ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
JniOnceWrappedCallbackType&& callback) {
return JniOnceCallback(std::move(callback)).TransferToJava(env);
}
ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
JniRepeatingWrappedCallbackType&& callback) {
return JniRepeatingCallback(std::move(callback)).TransferToJava(env);
}
ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
const JniRepeatingWrappedCallbackType& callback) {
return JniRepeatingCallback(callback).TransferToJava(env);
}
void JNI_JniCallbackUtils_OnResult(
JNIEnv* env,
jlong callbackPtr,
jboolean isRepeating,
const jni_zero::JavaParamRef<jobject>& j_result) {
if (isRepeating) {
auto* callback =
reinterpret_cast<JniRepeatingWrappedCallbackType*>(callbackPtr);
callback->Run(j_result);
} else {
auto* callback = reinterpret_cast<JniOnceWrappedCallbackType*>(callbackPtr);
std::move(*callback).Run(j_result);
delete callback;
}
}
void JNI_JniCallbackUtils_Destroy(JNIEnv* env,
jlong callbackPtr,
jboolean isRepeating) {
if (isRepeating) {
auto* callback =
reinterpret_cast<JniRepeatingWrappedCallbackType*>(callbackPtr);
// Call Reset to ensure all accidental use-after-frees fail loudly.
callback->Reset();
delete callback;
} else {
auto* callback = reinterpret_cast<JniOnceWrappedCallbackType*>(callbackPtr);
// Call Reset to ensure all accidental use-after-frees fail loudly.
callback->Reset();
delete callback;
}
}
} // namespace base::android

View file

@ -0,0 +1,99 @@
// Copyright 2025 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ANDROID_JNI_CALLBACK_H_
#define BASE_ANDROID_JNI_CALLBACK_H_
#include <jni.h>
#include <type_traits>
#include "base/android/scoped_java_ref.h"
#include "base/base_export.h"
#include "base/functional/callback_forward.h"
#include "base/functional/callback_helpers.h"
#include "third_party/jni_zero/jni_zero.h"
namespace base::android {
using JniOnceWrappedCallbackType =
base::OnceCallback<void(const jni_zero::JavaRef<jobject>&)>;
using JniRepeatingWrappedCallbackType =
base::RepeatingCallback<void(const jni_zero::JavaRef<jobject>&)>;
BASE_EXPORT ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
JniOnceWrappedCallbackType&& callback);
BASE_EXPORT ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
JniRepeatingWrappedCallbackType&& callback);
BASE_EXPORT ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
const JniRepeatingWrappedCallbackType& callback);
// Java Callbacks don't return a value so any return value by the passed in
// callback will be ignored.
template <typename R, typename Arg>
BASE_EXPORT ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
base::OnceCallback<R(Arg)>&& callback) {
return ToJniCallback(env, base::BindOnce(
[](base::OnceCallback<R(Arg)> captured_callback,
const jni_zero::JavaRef<jobject>& j_result) {
Arg result = jni_zero::FromJniType<Arg>(
jni_zero::AttachCurrentThread(),
j_result);
std::move(captured_callback).Run(result);
},
std::move(callback)));
}
// Java Callbacks don't return a value so any return value by the passed in
// callback will be ignored.
template <typename R>
BASE_EXPORT ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
base::OnceCallback<R()>&& callback) {
return ToJniCallback(env, base::BindOnce(
[](base::OnceCallback<R()> captured_callback,
const jni_zero::JavaRef<jobject>& j_result) {
std::move(captured_callback).Run();
},
std::move(callback)));
}
// Java Callbacks don't return a value so any return value by the passed in
// callback will be ignored.
template <typename R, typename Arg>
BASE_EXPORT ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
const base::RepeatingCallback<R(Arg)>& callback) {
return ToJniCallback(
env, base::BindRepeating(
[](const base::RepeatingCallback<R(Arg)>& captured_callback,
const jni_zero::JavaRef<jobject>& j_result) {
Arg result = jni_zero::FromJniType<Arg>(
jni_zero::AttachCurrentThread(), j_result);
captured_callback.Run(result);
},
callback));
}
// Java Callbacks don't return a value so any return value by the passed in
// callback will be ignored.
template <typename R>
BASE_EXPORT ScopedJavaLocalRef<jobject> ToJniCallback(
JNIEnv* env,
const base::RepeatingCallback<R()>& callback) {
return ToJniCallback(
env, base::BindRepeating(
[](const base::RepeatingCallback<R()>& captured_callback,
const jni_zero::JavaRef<jobject>& j_result) {
captured_callback.Run();
},
callback));
}
} // namespace base::android
#endif // BASE_ANDROID_JNI_CALLBACK_H_

View file

@ -82,6 +82,10 @@ std::string ConvertJavaStringToUTF8(JNIEnv* env, const JavaRef<jstring>& str) {
ScopedJavaLocalRef<jstring> ConvertUTF8ToJavaString(JNIEnv* env,
std::string_view str) {
// ART allocates new empty strings, so use a singleton when applicable.
if (str.empty()) {
return jni_zero::g_empty_string.AsLocalRef(env);
}
// JNI's NewStringUTF expects "modified" UTF8 so instead create the string
// via our own UTF16 conversion utility.
// Further, Dalvik requires the string passed into NewStringUTF() to come from
@ -146,6 +150,10 @@ std::u16string ConvertJavaStringToUTF16(JNIEnv* env,
ScopedJavaLocalRef<jstring> ConvertUTF16ToJavaString(JNIEnv* env,
std::u16string_view str) {
// ART allocates new empty strings, so use a singleton when applicable.
if (str.empty()) {
return jni_zero::g_empty_string.AsLocalRef(env);
}
return ScopedJavaLocalRef<jstring>(env,
ConvertUTF16ToJavaStringImpl(env, str));
}

View file

@ -10,12 +10,12 @@
#include "base/android/library_loader/anchor_functions_buildflags.h"
#include "base/android/library_loader/library_prefetcher.h"
#include "base/android/orderfile/orderfile_buildflags.h"
#include "base/android/sys_utils.h"
#include "base/at_exit.h"
#include "base/base_switches.h"
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/system/sys_info.h"
#include "build/robolectric_buildflags.h"
#if BUILDFLAG(IS_ROBOLECTRIC)
@ -46,7 +46,7 @@ LibraryProcessType GetLibraryProcessType() {
bool IsUsingOrderfileOptimization() {
#if BUILDFLAG(SUPPORTS_CODE_ORDERING)
return SysUtils::IsLowEndDeviceFromJni();
return SysInfo::IsLowEndDevice();
#else // !SUPPORTS_CODE_ORDERING
return false;
#endif

View file

@ -15,6 +15,7 @@
#include <sys/wait.h>
#include <unistd.h>
#include <algorithm>
#include <atomic>
#include <cstdlib>
#include <memory>
@ -28,9 +29,9 @@
#include "base/files/file.h"
#include "base/format_macros.h"
#include "base/logging.h"
#include "base/metrics/histogram_functions.h"
#include "base/posix/eintr_wrapper.h"
#include "base/process/process_metrics.h"
#include "base/ranges/algorithm.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "build/build_config.h"
@ -195,6 +196,8 @@ void Prefetch(size_t start, size_t end) {
// These values were used in the past for recording
// "LibraryLoader.PrefetchDetailedStatus".
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused. See PrefetchStatus in enums.xml.
enum class PrefetchStatus {
kSuccess = 0,
kWrongOrdering = 1,
@ -275,7 +278,12 @@ void NativeLibraryPrefetcher::ForkAndPrefetchNativeLibrary(bool ordered_only) {
// would create a dump as well.
return;
#else
base::TimeTicks start_time = base::TimeTicks::Now();
PrefetchStatus status = ForkAndPrefetch(ordered_only);
base::UmaHistogramMediumTimes("Android.LibraryLoader.Prefetch.Duration",
base::TimeTicks::Now() - start_time);
base::UmaHistogramEnumeration("Android.LibraryLoader.Prefetch.Status",
status);
if (status != PrefetchStatus::kSuccess) {
LOG(WARNING) << "Cannot prefetch the library. status = "
<< static_cast<int>(status);
@ -296,7 +304,7 @@ int NativeLibraryPrefetcher::PercentageOfResidentCode(size_t start,
}
total_pages += residency.size();
resident_pages += static_cast<size_t>(
ranges::count_if(residency, [](unsigned char x) { return x & 1; }));
std::ranges::count_if(residency, [](unsigned char x) { return x & 1; }));
if (total_pages == 0) {
return -1;
}

View file

@ -25,6 +25,12 @@ static void JNI_LibraryPrefetcher_ForkAndPrefetchNativeLibrary(JNIEnv* env) {
#endif
}
static void JNI_LibraryPrefetcher_PrefetchNativeLibraryForWebView(JNIEnv* env) {
#if BUILDFLAG(SUPPORTS_CODE_ORDERING)
return NativeLibraryPrefetcher::ForkAndPrefetchNativeLibrary(false);
#endif
}
static jint JNI_LibraryPrefetcher_PercentageOfResidentNativeLibraryCode(
JNIEnv* env) {
#if BUILDFLAG(SUPPORTS_CODE_ORDERING)

View file

@ -30,7 +30,3 @@ static jboolean JNI_MemoryPurgeManager_IsOnPreFreezeMemoryTrimEnabled(
JNIEnv* env) {
return base::android::PreFreezeBackgroundMemoryTrimmer::ShouldUseModernTrim();
}
static jboolean JNI_MemoryPurgeManager_IsSelfFreezeEnabled(JNIEnv* env) {
return base::FeatureList::IsEnabled(base::android::kShouldFreezeSelf);
}

View file

@ -279,9 +279,9 @@ JNI_NativeUmaRecorder_GetHistogramSamplesForTesting(JNIEnv* env,
std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
for (auto sampleCountIterator = samples->Iterator();
!sampleCountIterator->Done(); sampleCountIterator->Next()) {
HistogramBase::Sample min;
HistogramBase::Sample32 min;
int64_t max;
HistogramBase::Count count;
HistogramBase::Count32 count;
sampleCountIterator->Get(&min, &max, &count);
buckets.push_back(min);
buckets.push_back(max);

View file

@ -74,7 +74,7 @@ std::string GetPreFreezeMetricName(std::string_view name,
std::string GetSelfCompactionMetricName(std::string_view name,
std::string_view suffix) {
const char* process_type = GetProcessType();
return StrCat({"Memory.SelfCompact.", process_type, ".", name, ".", suffix});
return StrCat({"Memory.SelfCompact2.", process_type, ".", name, ".", suffix});
}
class PrivateMemoryFootprintMetric
@ -516,9 +516,15 @@ void PreFreezeBackgroundMemoryTrimmer::StartSelfCompaction(
uint64_t max_bytes,
base::TimeTicks started_at) {
TRACE_EVENT0("base", "StartSelfCompaction");
{
base::AutoLock locker(lock_);
process_compacted_metadata_.emplace(
"PreFreezeBackgroundMemoryTrimmer.ProcessCompacted",
/*is_compacted=*/1, base::SampleMetadataScope::kProcess);
}
metric->RecordBeforeMetrics();
SelfCompactionTask(std::move(task_runner), std::move(regions),
std::move(metric), max_bytes, started_at);
MaybePostSelfCompactionTask(std::move(task_runner), std::move(regions),
std::move(metric), max_bytes, started_at);
}
void PreFreezeBackgroundMemoryTrimmer::FinishSelfCompaction(
@ -545,6 +551,7 @@ void PreFreezeBackgroundMemoryTrimmer::MaybeCancelSelfCompaction() {
void PreFreezeBackgroundMemoryTrimmer::MaybeCancelSelfCompactionInternal() {
base::AutoLock locker(lock_);
process_compacted_metadata_.reset();
self_compaction_last_cancelled_ = base::TimeTicks::Now();
}
@ -559,13 +566,17 @@ void PreFreezeBackgroundMemoryTrimmer::CompactSelf() {
TRACE_EVENT0("base", "CompactSelf");
std::vector<debug::MappedMemoryRegion> regions;
std::string proc_maps;
if (!debug::ReadProcMaps(&proc_maps) || !ParseProcMaps(proc_maps, &regions)) {
return;
}
// We still start the task in the control group, in order to record metrics.
if (base::FeatureList::IsEnabled(kShouldFreezeSelf)) {
std::string proc_maps;
if (!debug::ReadProcMaps(&proc_maps) ||
!ParseProcMaps(proc_maps, &regions)) {
return;
}
if (regions.size() == 0) {
return;
if (regions.size() == 0) {
return;
}
}
auto started_at = base::TimeTicks::Now();
@ -646,10 +657,6 @@ void PreFreezeBackgroundMemoryTrimmer::PostMetricsTasksIfModern() {
// static
void PreFreezeBackgroundMemoryTrimmer::OnSelfFreeze() {
if (!base::FeatureList::IsEnabled(kShouldFreezeSelf)) {
return;
}
TRACE_EVENT0("base", "OnSelfFreeze");
Instance().OnSelfFreezeInternal();
@ -657,7 +664,9 @@ void PreFreezeBackgroundMemoryTrimmer::OnSelfFreeze() {
void PreFreezeBackgroundMemoryTrimmer::OnSelfFreezeInternal() {
base::AutoLock locker(lock_);
RunPreFreezeTasks();
if (base::FeatureList::IsEnabled(kShouldFreezeSelf)) {
RunPreFreezeTasks();
}
base::ThreadPool::PostDelayedTask(
FROM_HERE, {base::TaskPriority::BEST_EFFORT, MayBlock()},

View file

@ -13,6 +13,7 @@
#include "base/functional/callback.h"
#include "base/memory/post_delayed_memory_reduction_task.h"
#include "base/no_destructor.h"
#include "base/profiler/sample_metadata.h"
#include "base/task/delayed_task_handle.h"
#include "base/task/sequenced_task_runner.h"
#include "base/timer/timer.h"
@ -305,6 +306,8 @@ class BASE_EXPORT PreFreezeBackgroundMemoryTrimmer {
// frozen by App Freezer.
base::TimeTicks self_compaction_last_cancelled_ GUARDED_BY(lock_) =
base::TimeTicks::Min();
std::optional<base::ScopedSampleMetadata> process_compacted_metadata_
GUARDED_BY(lock_);
bool supports_modern_trim_;
};

View file

@ -35,13 +35,6 @@
public <init>();
}
# Keep all enum values and valueOf methods. See
# http://proguard.sourceforge.net/index.html#manual/examples.html
# for the reason for this. Also, see http://crbug.com/248037.
-keepclassmembers enum !cr_allowunused,** {
public static **[] values();
}
# This is to workaround crbug.com/1204690 - an old GMS app version crashes when
# ObjectWrapper contains > 1 fields, and this prevents R8 from inserting a
# synthetic field.

View file

@ -17,22 +17,11 @@
namespace base {
namespace android {
bool SysUtils::IsLowEndDeviceFromJni() {
JNIEnv* env = AttachCurrentThread();
return Java_SysUtils_isLowEndDevice(env);
}
bool SysUtils::IsCurrentlyLowMemory() {
JNIEnv* env = AttachCurrentThread();
return Java_SysUtils_isCurrentlyLowMemory(env);
}
// static
int SysUtils::AmountOfPhysicalMemoryKB() {
JNIEnv* env = AttachCurrentThread();
return Java_SysUtils_amountOfPhysicalMemoryKB(env);
}
// Logs the number of minor / major page faults to tracing (and also the time to
// collect) the metrics. Does nothing if tracing is not enabled.
static void JNI_SysUtils_LogPageFaultCountToTracing(JNIEnv* env) {

View file

@ -12,12 +12,8 @@ namespace android {
class BASE_EXPORT SysUtils {
public:
// Returns true iff this is a low-end device.
static bool IsLowEndDeviceFromJni();
// Returns true if system has low available memory.
static bool IsCurrentlyLowMemory();
// Returns amount of physical ram detected in KB, or 0 if detection failed.
static int AmountOfPhysicalMemoryKB();
};
} // namespace android

View file

@ -36,15 +36,8 @@ TaskRunnerAndroid::UiThreadTaskRunnerCallback& GetUiThreadTaskRunnerCallback() {
return *callback;
}
void RunJavaTask(base::android::ScopedJavaGlobalRef<jobject> task,
const std::string& runnable_class_name) {
TRACE_EVENT("toplevel", nullptr, [&](::perfetto::EventContext& ctx) {
std::string event_name =
base::StrCat({"JniPostTask: ", runnable_class_name});
ctx.event()->set_name(event_name.c_str());
});
JNIEnv* env = jni_zero::AttachCurrentThread();
JNI_Runnable::Java_Runnable_run(env, task);
void RunJavaTask(jint task_index) {
Java_TaskRunnerImpl_runTask(jni_zero::AttachCurrentThread(), task_index);
}
} // namespace
@ -68,19 +61,13 @@ void TaskRunnerAndroid::Destroy(JNIEnv* env) {
delete this;
}
void TaskRunnerAndroid::PostDelayedTask(
JNIEnv* env,
const base::android::JavaRef<jobject>& task,
jlong delay,
std::string& runnable_class_name) {
void TaskRunnerAndroid::PostDelayedTask(JNIEnv* env,
jlong delay,
jint task_index) {
// This could be run on any java thread, so we can't cache |env| in the
// BindOnce because JNIEnv is thread specific.
task_runner_->PostDelayedTask(
FROM_HERE,
base::BindOnce(&RunJavaTask,
base::android::ScopedJavaGlobalRef<jobject>(task),
runnable_class_name),
Milliseconds(delay));
FROM_HERE, base::BindOnce(&RunJavaTask, task_index), Milliseconds(delay));
}
// static

View file

@ -32,10 +32,7 @@ class BASE_EXPORT TaskRunnerAndroid {
void Destroy(JNIEnv* env);
void PostDelayedTask(JNIEnv* env,
const base::android::JavaRef<jobject>& task,
jlong delay,
std::string& runnable_class_name);
void PostDelayedTask(JNIEnv* env, jlong delay, jint taskIndex);
bool BelongsToCurrentThread(JNIEnv* env);

View file

@ -10,6 +10,7 @@
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <vector>
#include "base/apple/bridging.h"
@ -22,7 +23,6 @@
#include "base/logging.h"
#include "base/numerics/checked_math.h"
#include "base/numerics/safe_conversions.h"
#include "base/ranges/algorithm.h"
#include "base/strings/string_util.h"
#include "base/strings/sys_string_conversions.h"
#include "build/branding_buildflags.h"
@ -234,7 +234,7 @@ FilePath GetInnermostAppBundlePath(const FilePath& exec_name) {
return FilePath();
}
auto app = ranges::find_if(
auto app = std::ranges::find_if(
Reversed(components), [](const std::string& component) -> bool {
return component.size() > kExtLength && EndsWith(component, kExt);
});

View file

@ -69,7 +69,7 @@ constexpr size_t kMaxInfoPlistDataSize = 18 * 1024;
#endif
// This limit is arbitrary and can be safely increased in the future.
constexpr size_t kMaximumRendezvousPorts = 5;
constexpr size_t kMaximumRendezvousPorts = 6;
enum MachRendezvousMsgId : mach_msg_id_t {
kMachRendezvousMsgIdRequest = 'mrzv',

View file

@ -5,6 +5,7 @@
#ifndef BASE_CALLBACK_LIST_H_
#define BASE_CALLBACK_LIST_H_
#include <algorithm>
#include <list>
#include <memory>
#include <utility>
@ -15,7 +16,6 @@
#include "base/functional/bind.h"
#include "base/functional/callback.h"
#include "base/memory/weak_ptr.h"
#include "base/ranges/algorithm.h"
#include "base/types/is_instantiation.h"
// OVERVIEW:
@ -174,7 +174,7 @@ class CallbackListBase {
// Returns whether the list of registered callbacks is empty (from an external
// perspective -- meaning no remaining callbacks are live).
bool empty() const {
return ranges::all_of(
return std::ranges::all_of(
callbacks_, [](const auto& callback) { return callback.is_null(); });
}

View file

@ -2,6 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/390223051): Remove C-library calls to fix the errors.
#pragma allow_unsafe_libc_calls
#endif
#include "base/check.h"
#include <optional>
@ -164,7 +169,7 @@ class NotReachedLogMessage : public LogMessage {
class DCheckLogMessage : public LogMessage {
public:
DCheckLogMessage(const base::Location& location)
explicit DCheckLogMessage(const base::Location& location)
: LogMessage(location.file_name(),
location.line_number(),
LOGGING_DCHECK),

View file

@ -2,6 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/390223051): Remove C-library calls to fix the errors.
#pragma allow_unsafe_libc_calls
#endif
#include "base/check_op.h"
#include <string.h>

View file

@ -9,6 +9,8 @@
#include "base/command_line.h"
#include <algorithm>
#include <array>
#include <ostream>
#include <string_view>
@ -20,7 +22,6 @@
#include "base/logging.h"
#include "base/notreached.h"
#include "base/numerics/checked_math.h"
#include "base/ranges/algorithm.h"
#include "base/strings/strcat.h"
#include "base/strings/string_split.h"
#include "base/strings/string_tokenizer.h"
@ -54,10 +55,17 @@ constexpr CommandLine::CharType kSwitchValueSeparator[] =
// By putting slash last, we can control whether it is treaded as a switch
// value by changing the value of switch_prefix_count to be one less than
// the array size.
constexpr CommandLine::StringViewType kSwitchPrefixes[] = {L"--", L"-", L"/"};
constexpr auto kSwitchPrefixes = std::to_array<CommandLine::StringViewType>({
L"--",
L"-",
L"/",
});
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
// Unixes don't use slash as a switch.
constexpr CommandLine::StringViewType kSwitchPrefixes[] = {"--", "-"};
constexpr auto kSwitchPrefixes = std::to_array<CommandLine::StringViewType>({
"--",
"-",
});
#endif
size_t switch_prefix_count = std::size(kSwitchPrefixes);
@ -486,7 +494,7 @@ CommandLine::StringVector CommandLine::GetArgs() const {
// Gather all arguments after the last switch (may include kSwitchTerminator).
StringVector args(argv_.begin() + begin_args_, argv_.end());
// Erase only the first kSwitchTerminator (maybe "--" is a legitimate page?)
auto switch_terminator = ranges::find(args, kSwitchTerminator);
auto switch_terminator = std::ranges::find(args, kSwitchTerminator);
if (switch_terminator != args.end()) {
args.erase(switch_terminator);
}
@ -512,7 +520,7 @@ void CommandLine::AppendArgNative(StringViewType value) {
#if BUILDFLAG(ENABLE_COMMANDLINE_SEQUENCE_CHECKS)
sequence_checker_.Check();
#endif
argv_.push_back(StringType(value));
argv_.emplace_back(value);
}
void CommandLine::AppendArguments(const CommandLine& other,

View file

@ -59,7 +59,9 @@
// // This body will not be inlined into callers.
// }
// ```
#if __has_cpp_attribute(gnu::noinline)
#if __has_cpp_attribute(clang::noinline)
#define NOINLINE [[clang::noinline]]
#elif __has_cpp_attribute(gnu::noinline)
#define NOINLINE [[gnu::noinline]]
#elif __has_cpp_attribute(msvc::noinline)
#define NOINLINE [[msvc::noinline]]
@ -67,6 +69,24 @@
#define NOINLINE
#endif
// Annotates a call site indicating that the callee should not be inlined.
//
// See also:
// https://clang.llvm.org/docs/AttributeReference.html#noinline
//
// Usage:
// ```
// void Func() {
// // This specific call to `DoSomething` should not be inlined.
// NOINLINE_CALL DoSomething();
// }
// ```
#if __has_cpp_attribute(clang::noinline)
#define NOINLINE_CALL [[clang::noinline]]
#else
#define NOINLINE_CALL
#endif
// Annotates a function indicating it should not be optimized.
//
// See also:
@ -102,7 +122,9 @@
// Since `ALWAYS_INLINE` is performance-oriented but can hamper debugging,
// ignore it in debug mode.
#if defined(NDEBUG)
#if __has_cpp_attribute(gnu::always_inline)
#if __has_cpp_attribute(clang::always_inline)
#define ALWAYS_INLINE [[clang::always_inline]] inline
#elif __has_cpp_attribute(gnu::always_inline)
#define ALWAYS_INLINE [[gnu::always_inline]] inline
#elif defined(COMPILER_MSVC)
#define ALWAYS_INLINE __forceinline
@ -112,6 +134,30 @@
#define ALWAYS_INLINE inline
#endif
// Annotates a call site indicating the calee should always be inlined.
//
// See also:
// https://clang.llvm.org/docs/AttributeReference.html#always-inline-force-inline
//
// Usage:
// ```
// void Func() {
// // This specific call will be inlined if possible.
// ALWAYS_INLINE_CALL DoSomething();
// }
// ```
//
// Since `ALWAYS_INLINE_CALL` is performance-oriented but can hamper debugging,
// ignore it in debug mode.
#if defined(NDEBUG)
#if __has_cpp_attribute(clang::always_inline)
#define ALWAYS_INLINE_CALL [[clang::always_inline]]
#endif
#endif
#if !defined(ALWAYS_INLINE_CALL)
#define ALWAYS_INLINE_CALL
#endif
// Annotates a function indicating it should never be tail called. Useful to
// make sure callers of the annotated function are never omitted from call
// stacks. Often useful with `NOINLINE` to make sure the function itself is also

View file

@ -4,97 +4,105 @@
## What goes here
This directory contains some STL-like containers.
This directory contains some stdlib-like containers.
Things should be moved here that are generally applicable across the code base.
Don't add things here just because you need them in one place and think others
may someday want something similar. You can put specialized containers in
your component's directory and we can promote them here later if we feel there
is broad applicability.
may someday want something similar. You can put specialized containers in your
component's directory and we can promote them here later if we feel there is
broad applicability.
### Design and naming
Fundamental [//base principles](../README.md#design-and-naming) apply, i.e.:
Containers should adhere as closely to STL as possible. Functions and behaviors
not present in STL should only be added when they are related to the specific
data structure implemented by the container.
Containers should adhere as closely to stdlib as possible. Functions and
behaviors not present in stdlib should only be added when they are related to
the specific data structure implemented by the container.
For STL-like containers our policy is that they should use STL-like naming even
when it may conflict with the style guide. So functions and class names should
be lower case with underscores. Non-STL-like classes and functions should use
Google naming. Be sure to use the base namespace.
For stdlib-like containers our policy is that they should use stdlib-like naming
even when it may conflict with the style guide. So functions and class names
should be lower case with underscores. Non-stdlib-like classes and functions
should use Google naming. Be sure to use the base namespace.
## Map and set selection
### Usage advice
* Do not use `base::flat_map` or `base::flat_set` if the number of items will
be large or unbounded and elements will be inserted/deleted outside of the
containers constructor/destructor - they have O(n) performance on inserts
and deletes of individual items.
1. If you just need a generic map or set container without any additional
properties then prefer to use `absl::flat_hash_map` and
`absl::flat_hash_set`. These are versatile containers that have good
performance on both large and small sized data.
* Do not default to using `std::unordered_set` and `std::unordered_map`. In
the common case, query performance is unlikely to be sufficiently higher
than `std::map` to make a difference, insert performance is slightly worse,
and the memory overhead is high. This makes sense mostly for large tables
where you expect a lot of lookups.
1. Is pointer-stability of values (but not keys) required? Then use
`absl::flat_hash_map<Key, std::unique_ptr<Value>>`.
2. Is pointer-stability of keys required? Then use `absl::node_hash_map`
and `absl::node_hash_set`.
* Most maps and sets in Chrome are small and contain objects that can be moved
efficiently. In this case, consider `base::flat_map` and `base::flat_set`.
You need to be aware of the maximum expected size of the container since
individual inserts and deletes are O(n), giving O(n^2) construction time for
the entire map. But because it avoids mallocs in most cases, inserts are
better or comparable to other containers even for several dozen items, and
efficiently-moved types are unlikely to have performance problems for most
cases until you have hundreds of items. If your container can be constructed
in one shot, the constructor from vector gives O(n log n) construction times
and it should be strictly better than a `std::map`.
2. If you require sorted order, then the best choice depends on whether your
map is going to be written once and read many times, or if it is going to be
written frequently throughout its lifetime.
Conceptually inserting a range of n elements into a `base::flat_map` or
`base::flat_set` behaves as if insert() was called for each individually
element. Thus in case the input range contains repeated elements, only the
first one of these duplicates will be inserted into the container. This
behaviour applies to construction from a range as well.
1. If the map is written once, then `base::flat_map` and `base::flat_set`
are good choices. While they have poor asymptotic behavior on writes, on
a write-once container this performance is no worse than the standard
library tree containers and so they are strictly better in terms of
overhead.
2. If the map is always very small, then `base::flat_map` and
`base::flat_set` are again good choices, even if the map is being
written to multiple times. While mutations are O(n) this cost is
negligible for very small values of n compared to the cost of doing a
malloc on every mutation.
3. If the map is written multiple times and is large then then `std::map`
and `std::set` are the best choices.
4. If you require pointer stability (on either the key or value) then
`std::map` and `std::set` are the also the best choices.
* `base::small_map` has better runtime memory usage without the poor mutation
performance of large containers that `base::flat_map` has. But this
advantage is partially offset by additional code size. Prefer in cases where
you make many objects so that the code/heap tradeoff is good.
When using `base::flat_map` and `base::flat_set` there are also fixed versions
of these that are backed by a `std::array` instead of a `std::vector` and which
don't provide mutating operators, but which are constexpr friendly and support
stack allocation. If you are using the flat structures because your container is
only written once then the fixed versions may be an even better alternative,
particularly if you're looking for a structure that can be used as a
compile-time lookup table.
* Use `std::map` and `std::set` if you can't decide. Even if they're not
great, they're unlikely to be bad or surprising.
Note that this advice never suggests the use of `std::unordered_map` and
`std::unordered_set`. These containers provides similar features to the Abseil
flat hash containers but with worse performance. They should only be used if
absolutely required for compatibility with third-party code.
### Map and set details
Also note that this advice does not suggest the use of the Abseil btree
structures, `absl::btree_map` and `absl::btree_set`. This is because while these
types do provide good performance for cases where you need a sorted container
they have been found to introduce a very large code size penalty when using them
in Chromium. Until this problem can be resolved they should not be used in
Chromium code.
Sizes are on 64-bit platforms. Stable iterators aren't invalidated when the
container is mutated.
### Map and set implementation details
| Container | Empty size | Per-item overhead | Stable iterators? | Insert/delete complexity |
|:------------------------------------------ |:--------------------- |:----------------- |:----------------- |:-----------------------------|
| `std::map`, `std::set` | 16 bytes | 32 bytes | Yes | O(log n) |
| `std::unordered_map`, `std::unordered_set` | 128 bytes | 16 - 24 bytes | No | O(1) |
| `base::flat_map`, `base::flat_set` | 24 bytes | 0 (see notes) | No | O(n) |
| `base::small_map` | 24 bytes (see notes) | 32 bytes | No | depends on fallback map type |
Sizes are on 64-bit platforms. Ordered iterators means that iteration occurs in
the sorted key order. Stable iterators means that iterators are not invalidated
by unrelated modifications to the container. Stable pointers means that pointers
to keys and values are not invalidated by unrelated modifications to the
container.
**Takeaways:** `std::unordered_map` and `std::unordered_set` have high
overhead for small container sizes, so prefer these only for larger workloads.
The table lists the values for maps, but the same properties apply to the
corresponding set types.
Code size comparisons for a block of code (see appendix) on Windows using
strings as keys.
| Container | Code size |
|:-------------------- |:---------- |
| `std::unordered_map` | 1646 bytes |
| `std::map` | 1759 bytes |
| `base::flat_map` | 1872 bytes |
| `base::small_map` | 2410 bytes |
| Container | Empty size | Per-item overhead | Ordered iterators? | Stable iterators? | Stable pointers? | Lookup complexity | Mutate complexity |
|:--------------------- |:---------- |:----------------- |:------------------ |:----------------- |:---------------- |:----------------- |:----------------- |
| `std::map` | 16 bytes | 32 bytes | Yes | Yes | Yes | O(log n) | O(log n) |
| `std::unordered_map` | 128 bytes | 16-24 bytes | No | No | Yes | O(1) | O(1) |
| `base::flat_map` | 24 bytes | 0 bytes | Yes | No | No | O(log n) | O(n) |
| `absl::flat_hash_map` | 40 bytes | 1 byte | No | No | No | O(1) | O(1) |
| `absl::node_hash_map` | 40 bytes | 1 byte | No | No | Yes | O(1) | O(1) |
**Takeaways:** `base::small_map` generates more code because of the inlining of
both brute-force and red-black tree searching. This makes it less attractive
for random one-off uses. But if your code is called frequently, the runtime
memory benefits will be more important. The code sizes of the other maps are
close enough it's not worth worrying about.
Note that all of these containers except for `std::map` have some additional
memory overhead based on their load factor that isn't accounted for by their
per-item overhead. This includes `base::flat_map` which doesn't have a hash
table load factor but does have the `std::vector` equivalent, unused capacity
from its double-on-resize allocation strategy.
### std::map and std::set
@ -216,20 +224,43 @@ constexpr auto kMap = base::MakeFixedFlatMap<std::string_view, int>(
Both `MakeFixedFlatSet` and `MakeFixedFlatMap` require callers to explicitly
specify the key (and mapped) type.
### base::small\_map
### absl::flat\_hash\_map and absl::flat\_hash\_set
A small inline buffer that is brute-force searched that overflows into a full
`std::map` or `std::unordered_map`. This gives the memory benefit of
`base::flat_map` for small data sizes without the degenerate insertion
performance for large container sizes.
A hash table. These use Abseil's "swiss table" design which is elaborated on in
more detail at https://abseil.io/about/design/swisstables and
https://abseil.io/docs/cpp/guides/container#hash-tables. The short version is
that it uses an open addressing scheme with a lookup scheme that is designed to
minimize memory accesses and branch mispredicts.
Since instantiations require both code for a `std::map` and a brute-force search
of the inline container, plus a fancy iterator to cover both cases, code size
is larger.
The flat hash map structures also store the key and value directly in the hash
table slots, eliminating the need for additional memory allocations for
inserting or removing individual nodes. The comes at the cost of eliminating
pointer stability: unlike the standard library hash tables a rehash will not
only invalidate all iterators but also all pointers to the stored elements.
The initial size in the above table is assuming a very small inline table. The
actual size will be `sizeof(int) + min(sizeof(std::map), sizeof(T) *
inline_size)`.
In practical use these Abseil containers perform well enough that they are a
good default choice for a map or set container when you don't have any stronger
constraints. In fact, even when you require value pointer-stability it is still
generally better to wrap the value in a `std::unique_ptr` than to use an
alternative structure that provides such stability directly.
### absl::node\_hash\_map and absl::node\_hash\_set
A variant of the Abseil hash maps that stores the key-value pair in a separately
allocated node rather than directly in the hash table slots. This guarantees
pointer-stability for both the keys and values in the table, invalidating them
only when the element is deleted, but it comes at the cost of requiring an
additional allocation for every element inserted.
There are two main uses for this structure. One is for cases where you require a
map with pointer-stability for the key (not the value), which cannot be done
with the Abseil flat map or set. The other is for cases where you want a drop-in
replacement for an existing `std::unordered_map` or `std::unordered_set` and you
aren't sure if pointer-stability is required. If you know that pointer-stability
is unnecessary then it would be better to convert to the flat tables but this
may be difficult to prove when working on unfamiliar code or doing a large scale
change. In such cases the node hash maps are still generally superior to the
standard library maps.
## Deque
@ -363,32 +394,3 @@ require safety. There are several problems with that approach:
Therefore, the minimal checks that we are adding to these base classes are the
most efficient and effective way to achieve the beginning of the safety that we
need. (Note that we cannot account for undefined behavior in callers.)
## Appendix
### Code for map code size comparison
This just calls insert and query a number of times, with `printf`s that prevent
things from being dead-code eliminated.
```cpp
TEST(Foo, Bar) {
base::small_map<std::map<std::string, Flubber>> foo;
foo.insert(std::make_pair("foo", Flubber(8, "bar")));
foo.insert(std::make_pair("bar", Flubber(8, "bar")));
foo.insert(std::make_pair("foo1", Flubber(8, "bar")));
foo.insert(std::make_pair("bar1", Flubber(8, "bar")));
foo.insert(std::make_pair("foo", Flubber(8, "bar")));
foo.insert(std::make_pair("bar", Flubber(8, "bar")));
auto found = foo.find("asdf");
printf("Found is %d\n", (int)(found == foo.end()));
found = foo.find("foo");
printf("Found is %d\n", (int)(found == foo.end()));
found = foo.find("bar");
printf("Found is %d\n", (int)(found == foo.end()));
found = foo.find("asdfhf");
printf("Found is %d\n", (int)(found == foo.end()));
found = foo.find("bar1");
printf("Found is %d\n", (int)(found == foo.end()));
}
```

View file

@ -17,8 +17,7 @@
#include "base/memory/raw_ptr_exclusion.h"
#include "base/numerics/checked_math.h"
#include "base/numerics/safe_conversions.h"
#include "base/ranges/algorithm.h"
#include "base/ranges/from_range.h"
#include "base/types/cxx23_from_range.h"
#if DCHECK_IS_ON()
#include <ostream>
@ -1262,18 +1261,18 @@ class circular_deque {
// Implementations of base::Erase[If] (see base/stl_util.h).
template <class T, class Value>
size_t Erase(circular_deque<T>& container, const Value& value) {
auto it = ranges::remove(container, value);
size_t removed = std::distance(it, container.end());
container.erase(it, container.end());
return removed;
auto removed = std::ranges::remove(container, value);
size_t num_removed = removed.size();
container.erase(removed.begin(), removed.end());
return num_removed;
}
template <class T, class Predicate>
size_t EraseIf(circular_deque<T>& container, Predicate pred) {
auto it = ranges::remove_if(container, pred);
size_t removed = std::distance(it, container.end());
container.erase(it, container.end());
return removed;
auto removed = std::ranges::remove_if(container, pred);
size_t num_removed = removed.size();
container.erase(removed.begin(), removed.end());
return num_removed;
}
} // namespace base

View file

@ -5,25 +5,20 @@
#ifndef BASE_CONTAINERS_CONTAINS_H_
#define BASE_CONTAINERS_CONTAINS_H_
// Provides `Contains()`, a general purpose utility to check whether a container
// contains a value. This will probe whether a `contains` or `find` member
// function on `container` exists, and fall back to a generic linear search over
// `container`.
#include <algorithm>
#include <ranges>
#include <type_traits>
#include <utility>
#include "base/ranges/algorithm.h"
// TODO(dcheng): Remove this after fixing any IWYU errors.
#include "base/ranges/ranges.h"
#include <ranges>
namespace base {
namespace internal {
// Small helper to detect whether a given type has a nested `key_type` typedef.
// Used below to catch misuses of the API for associative containers.
template <typename T>
concept HasKeyType = requires { typename T::key_type; };
} // namespace internal
// A general purpose utility to check whether `container` contains `value`. This
// will probe whether a `contains` or `find` member function on `container`
// exists, and fall back to a generic linear search over `container`.
@ -39,22 +34,22 @@ constexpr bool Contains(const Container& container, const Value& value) {
return container.find(value) != container.end();
} else {
static_assert(
!internal::HasKeyType<Container>,
!requires { typename Container::key_type; },
"Error: About to perform linear search on an associative container. "
"Either use a more generic comparator (e.g. std::less<>) or, if a "
"linear search is desired, provide an explicit projection parameter.");
return ranges::find(container, value) != std::ranges::end(container);
return std::ranges::find(container, value) != std::ranges::end(container);
}
}
// Overload that allows to provide an additional projection invocable. This
// projection will be applied to every element in `container` before comparing
// it with `value`. This will always perform a linear search.
// Overload that allows callers to provide an additional projection invocable.
// This projection will be applied to every element in `container` before
// comparing it with `value`. This will always perform a linear search.
template <typename Container, typename Value, typename Proj>
constexpr bool Contains(const Container& container,
const Value& value,
Proj proj) {
return ranges::find(container, value, std::move(proj)) !=
return std::ranges::find(container, value, std::move(proj)) !=
std::ranges::end(container);
}

View file

@ -7,18 +7,17 @@
#include <set>
#include <string_view>
#include <string>
#include "base/containers/contains.h"
namespace base {
// The following code would perform a linear search through the set which is
// likely unexpected and not intended. This is because the expression
// `set.find(kFoo)` is ill-formed, since there is no implimit conversion from
// std::string_view to `std::string`. This means Contains would fall back to the
// general purpose `base::ranges::find(set, kFoo)` linear search.
// general purpose `std::ranges::find(set, kFoo)` linear search.
// To fix this clients can either use a more generic comparator like std::less<>
// (in this case `set.find()` accepts any type that is comparable to a
// std::string), or pass an explicit projection parameter to Contains, at which

View file

@ -380,8 +380,8 @@ constexpr flat_map<Key, Mapped, KeyCompare, Container> MakeFlatMap(
const Projection& proj = Projection()) {
Container elements;
internal::ReserveIfSupported(elements, unprojected_elements);
base::ranges::transform(unprojected_elements, std::back_inserter(elements),
proj);
std::ranges::transform(unprojected_elements, std::back_inserter(elements),
proj);
return flat_map<Key, Mapped, KeyCompare, Container>(std::move(elements),
comp);
}

View file

@ -5,11 +5,11 @@
#ifndef BASE_CONTAINERS_FLAT_SET_H_
#define BASE_CONTAINERS_FLAT_SET_H_
#include <algorithm>
#include <functional>
#include <vector>
#include "base/containers/flat_tree.h"
#include "base/ranges/algorithm.h"
namespace base {
@ -174,8 +174,8 @@ constexpr flat_set<Key, Compare, Container> MakeFlatSet(
const Projection& proj = Projection()) {
Container elements;
internal::ReserveIfSupported(elements, unprojected_elements);
base::ranges::transform(unprojected_elements, std::back_inserter(elements),
proj);
std::ranges::transform(unprojected_elements, std::back_inserter(elements),
proj);
return flat_set<Key, Compare, Container>(std::move(elements), comp);
}

View file

@ -20,7 +20,6 @@
#include "base/compiler_specific.h"
#include "base/containers/span.h"
#include "base/memory/raw_ptr_exclusion.h"
#include "base/ranges/algorithm.h"
namespace base {
@ -41,7 +40,7 @@ constexpr bool is_sorted_and_unique(const Range& range, Comp comp) {
// Being unique implies that there are no adjacent elements that
// compare equal. So this checks that each element is strictly less
// than the element after it.
return ranges::adjacent_find(range, std::not_fn(comp)) ==
return std::ranges::adjacent_find(range, std::not_fn(comp)) ==
std::ranges::end(range);
}
@ -1029,7 +1028,7 @@ template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::lower_bound(
const Key& key) const -> const_iterator {
KeyValueCompare comp(comp_);
return ranges::lower_bound(*this, key, comp);
return std::ranges::lower_bound(*this, key, comp);
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
@ -1050,7 +1049,7 @@ auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::lower_bound(
const KeyTypeOrK<K>& key_ref = key;
KeyValueCompare comp(comp_);
return ranges::lower_bound(*this, key_ref, comp);
return std::ranges::lower_bound(*this, key_ref, comp);
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
@ -1063,7 +1062,7 @@ template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::upper_bound(
const Key& key) const -> const_iterator {
KeyValueCompare comp(comp_);
return ranges::upper_bound(*this, key, comp);
return std::ranges::upper_bound(*this, key, comp);
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
@ -1084,7 +1083,7 @@ auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::upper_bound(
const KeyTypeOrK<K>& key_ref = key;
KeyValueCompare comp(comp_);
return ranges::upper_bound(*this, key_ref, comp);
return std::ranges::upper_bound(*this, key_ref, comp);
}
// ----------------------------------------------------------------------------
@ -1151,10 +1150,10 @@ size_t EraseIf(
base::internal::flat_tree<Key, GetKeyFromValue, KeyCompare, Container>&
container,
Predicate pred) {
auto it = ranges::remove_if(container, pred);
size_t removed = std::distance(it, container.end());
container.erase(it, container.end());
return removed;
auto removed = std::ranges::remove_if(container, pred);
size_t num_removed = removed.size();
container.erase(removed.begin(), removed.end());
return num_removed;
}
} // namespace base

View file

@ -144,8 +144,7 @@
#include "base/check_op.h"
#include "base/compiler_specific.h"
#include "base/memory/ptr_util.h"
#include "base/ranges/algorithm.h"
#include "base/ranges/from_range.h"
#include "base/types/cxx23_from_range.h"
#include "third_party/abseil-cpp/absl/container/inlined_vector.h"
namespace base {
@ -479,7 +478,7 @@ class IntrusiveHeap {
}
// Repair the heap and ensure handles are pointing to the right index.
ranges::make_heap(impl_.heap_, value_comp());
std::ranges::make_heap(impl_.heap_, value_comp());
for (size_t i = 0; i < size(); ++i) {
SetHeapHandle(i);
}

View file

@ -6,9 +6,7 @@
#include "base/check_op.h"
namespace base {
namespace internal {
namespace base::internal {
LinkNodeBase::LinkNodeBase() = default;
@ -56,6 +54,4 @@ void LinkNodeBase::InsertAfterBase(LinkNodeBase* e) {
e->next_ = this;
}
} // namespace internal
} // namespace base
} // namespace base::internal

View file

@ -772,15 +772,15 @@ class GSL_POINTER span {
requires(std::is_const_v<element_type> &&
std::equality_comparable<const element_type>)
{
return std::ranges::equal(span<const element_type>(lhs),
span<const element_type>(rhs));
return std::ranges::equal(span<const element_type, extent>(lhs),
span<const element_type, extent>(rhs));
}
friend constexpr bool operator==(span lhs,
span<const element_type, extent> rhs)
requires(!std::is_const_v<element_type> &&
std::equality_comparable<const element_type>)
{
return std::ranges::equal(span<const element_type>(lhs), rhs);
return std::ranges::equal(span<const element_type, extent>(lhs), rhs);
}
template <typename OtherElementType,
size_t OtherExtent,
@ -791,7 +791,7 @@ class GSL_POINTER span {
friend constexpr bool operator==(
span lhs,
span<OtherElementType, OtherExtent, OtherInternalPtrType> rhs) {
return std::ranges::equal(span<const element_type>(lhs),
return std::ranges::equal(span<const element_type, extent>(lhs),
span<const OtherElementType, OtherExtent>(rhs));
}

View file

@ -78,11 +78,36 @@ class SpanWriter {
// For a SpanWriter over bytes, we can write integer values directly to those
// bytes as a memcpy. Returns true if there was room remaining and the bytes
// were written.
// were written. The macros below implement the following methods:
//
// This provides big, little, and native endian writing orders. Note that
// "native" order is almost never what you want; it only makes sense for byte
// buffers that stay in memory and are never written to the disk or network.
// bool WriteU8BigEndian(uint8_t)
// bool WriteU16BigEndian(uint16_t)
// bool WriteU32BigEndian(uint32_t)
// bool WriteU64BigEndian(uint64_t)
// bool WriteU8LittleEndian(uint8_t)
// bool WriteU16LittleEndian(uint16_t)
// bool WriteU32LittleEndian(uint32_t)
// bool WriteU64LittleEndian(uint64_t)
// bool WriteU8NativeEndian(uint8_t)
// bool WriteU16NativeEndian(uint16_t)
// bool WriteU32NativeEndian(uint32_t)
// bool WriteU64NativeEndian(uint64_t)
// bool WriteI8BigEndian(int8_t)
// bool WriteI16BigEndian(int16_t)
// bool WriteI32BigEndian(int32_t)
// bool WriteI64BigEndian(int64_t)
// bool WriteI8LittleEndian(int8_t)
// bool WriteI16LittleEndian(int16_t)
// bool WriteI32LittleEndian(int32_t)
// bool WriteI64LittleEndian(int64_t)
// bool WriteI8NativeEndian(int8_t)
// bool WriteI16NativeEndian(int16_t)
// bool WriteI32NativeEndian(int32_t)
// bool WriteI64NativeEndian(int64_t)
//
// Note that "native" order is almost never what you want; it only makes sense
// for byte buffers that stay in memory and are never written to the disk or
// network.
#define BASE_SPANWRITER_WRITE(signchar, bitsize, endian, typeprefix) \
constexpr bool Write##signchar##bitsize##endian##Endian( \
typeprefix##int##bitsize##_t value) \

View file

@ -5,6 +5,7 @@
#ifndef BASE_CONTAINERS_TO_VALUE_LIST_H_
#define BASE_CONTAINERS_TO_VALUE_LIST_H_
#include <algorithm>
#include <concepts>
#include <functional>
#include <iterator>
@ -12,8 +13,6 @@
#include <type_traits>
#include <utility>
#include "base/ranges/algorithm.h"
#include "base/ranges/ranges.h"
#include "base/values.h"
namespace base {
@ -29,12 +28,13 @@ concept AppendableToValueList =
// Complexity: Exactly `size(range)` applications of `proj`.
template <typename Range, typename Proj = std::identity>
requires std::ranges::sized_range<Range> && std::ranges::input_range<Range> &&
std::indirectly_unary_invocable<Proj, ranges::iterator_t<Range>> &&
std::indirectly_unary_invocable<Proj,
std::ranges::iterator_t<Range>> &&
internal::AppendableToValueList<
std::indirect_result_t<Proj, ranges::iterator_t<Range>>>
std::indirect_result_t<Proj, std::ranges::iterator_t<Range>>>
Value::List ToValueList(Range&& range, Proj proj = {}) {
auto container = Value::List::with_capacity(std::ranges::size(range));
ranges::for_each(
std::ranges::for_each(
std::forward<Range>(range),
[&]<typename T>(T&& value) { container.Append(std::forward<T>(value)); },
std::move(proj));

View file

@ -5,6 +5,7 @@
#ifndef BASE_CONTAINERS_TO_VECTOR_H_
#define BASE_CONTAINERS_TO_VECTOR_H_
#include <algorithm>
#include <functional>
#include <iterator>
#include <ranges>
@ -12,9 +13,6 @@
#include <utility>
#include <vector>
#include "base/ranges/algorithm.h"
#include "base/ranges/ranges.h"
namespace base {
// Maps a container to a std::vector<> with respect to the provided projection.
@ -28,14 +26,14 @@ namespace base {
// Complexity: Exactly `size(range)` applications of `proj`.
template <typename Range, typename Proj = std::identity>
requires std::ranges::sized_range<Range> && std::ranges::input_range<Range> &&
std::indirectly_unary_invocable<Proj, ranges::iterator_t<Range>>
std::indirectly_unary_invocable<Proj, std::ranges::iterator_t<Range>>
auto ToVector(Range&& range, Proj proj = {}) {
using ProjectedType =
std::projected<ranges::iterator_t<Range>, Proj>::value_type;
std::projected<std::ranges::iterator_t<Range>, Proj>::value_type;
std::vector<ProjectedType> container;
container.reserve(std::ranges::size(range));
ranges::transform(std::forward<Range>(range), std::back_inserter(container),
std::move(proj));
std::ranges::transform(std::forward<Range>(range),
std::back_inserter(container), std::move(proj));
return container;
}

View file

@ -64,7 +64,7 @@ struct UniquePtrComparator {
// Example usage:
// std::vector<std::unique_ptr<Foo>> vector;
// Foo* element = ...
// auto iter = base::ranges::find_if(vector, MatchesUniquePtr(element));
// auto iter = std::ranges::find_if(vector, MatchesUniquePtr(element));
//
// Example of erasing from container:
// EraseIf(v, MatchesUniquePtr(element));

View file

@ -2,6 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/390223051): Remove C-library calls to fix the errors.
#pragma allow_unsafe_libc_calls
#endif
#ifndef BASE_CONTAINERS_VECTOR_BUFFER_H_
#define BASE_CONTAINERS_VECTOR_BUFFER_H_

View file

@ -21,9 +21,10 @@
#include <asm/hwcap.h>
#include <sys/auxv.h>
#include <algorithm>
#include "base/files/file_util.h"
#include "base/numerics/checked_math.h"
#include "base/ranges/algorithm.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"

View file

@ -6,12 +6,10 @@
#include "base/compiler_specific.h"
namespace base {
namespace debug {
namespace base::debug {
// This file/function should be excluded from LTO/LTCG to ensure that the
// compiler can't see this function's implementation when compiling calls to it.
NOINLINE void Alias(const void* var) {}
} // namespace debug
} // namespace base
} // namespace base::debug

View file

@ -8,6 +8,7 @@
#include <stddef.h>
#include "base/base_export.h"
#include "base/compiler_specific.h"
namespace base {
namespace debug {
@ -88,19 +89,19 @@ BASE_EXPORT size_t wcslcpy(wchar_t* dst, const wchar_t* src, size_t dst_size);
// Convenience macro that copies the null-terminated string from `c_str` into a
// stack-allocated char array named `var_name` that holds up to `array_size - 1`
// characters and should be preserved in memory dumps.
#define DEBUG_ALIAS_FOR_CSTR(var_name, c_str, array_size) \
char var_name[array_size] = {}; \
::base::strlcpy(var_name, (c_str), std::size(var_name)); \
#define DEBUG_ALIAS_FOR_CSTR(var_name, c_str, array_size) \
char var_name[array_size] = {}; \
UNSAFE_TODO(::base::strlcpy(var_name, (c_str), std::size(var_name))); \
::base::debug::Alias(var_name)
#define DEBUG_ALIAS_FOR_U16CSTR(var_name, c_str, array_size) \
char16_t var_name[array_size] = {}; \
::base::u16cstrlcpy(var_name, (c_str), std::size(var_name)); \
#define DEBUG_ALIAS_FOR_U16CSTR(var_name, c_str, array_size) \
char16_t var_name[array_size] = {}; \
UNSAFE_TODO(::base::u16cstrlcpy(var_name, (c_str), std::size(var_name))); \
::base::debug::Alias(var_name)
#define DEBUG_ALIAS_FOR_WCHARCSTR(var_name, c_str, array_size) \
wchar_t var_name[array_size] = {}; \
::base::wcslcpy(var_name, (c_str), std::size(var_name)); \
#define DEBUG_ALIAS_FOR_WCHARCSTR(var_name, c_str, array_size) \
wchar_t var_name[array_size] = {}; \
UNSAFE_TODO(::base::wcslcpy(var_name, (c_str), std::size(var_name))); \
::base::debug::Alias(var_name)
// Code folding is a linker optimization whereby the linker identifies functions

View file

@ -22,8 +22,7 @@
#include <windows.h>
#endif
namespace base {
namespace debug {
namespace base::debug {
namespace {
@ -107,5 +106,4 @@ void AsanCorruptHeap() {
#endif // BUILDFLAG(IS_WIN)
#endif // ADDRESS_SANITIZER
} // namespace debug
} // namespace base
} // namespace base::debug

View file

@ -13,8 +13,7 @@
#include "base/test/clang_profiling.h"
#endif
namespace base {
namespace debug {
namespace base::debug {
static bool is_debug_ui_suppressed = false;
@ -53,5 +52,4 @@ bool IsDebugUISuppressed() {
return is_debug_ui_suppressed;
}
} // namespace debug
} // namespace base
} // namespace base::debug

View file

@ -60,8 +60,7 @@
#include "base/third_party/symbolize/symbolize.h" // nogncheck
#endif
namespace base {
namespace debug {
namespace base::debug {
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_BSD)
@ -350,5 +349,4 @@ void BreakDebuggerAsyncSafe() {
#endif
}
} // namespace debug
} // namespace base
} // namespace base::debug

View file

@ -78,9 +78,7 @@ bool ShouldDumpWithoutCrashWithLocationAndUniqueId(
} // namespace
namespace base {
namespace debug {
namespace base::debug {
bool DumpWithoutCrashingUnthrottled() {
TRACE_EVENT0("base", "DumpWithoutCrashingUnthrottled");
@ -147,5 +145,4 @@ void ClearMapsForTesting() {
LocationAndUniqueIdentifierToTimestampMap().clear();
}
} // namespace debug
} // namespace base
} // namespace base::debug

View file

@ -9,7 +9,6 @@
#include "base/debug/dwarf_line_no.h"
#include "partition_alloc/pointers/raw_ref.h"
#ifdef USE_SYMBOLIZE
#include <stdlib.h>
@ -17,6 +16,7 @@
#include <unistd.h>
#include <algorithm>
#include <array>
#include <charconv>
#include <cstdint>
#include <limits>
@ -25,9 +25,9 @@
#include "base/debug/buffered_dwarf_reader.h"
#include "base/third_party/symbolize/symbolize.h"
#include "partition_alloc/pointers/raw_ptr.h"
#include "partition_alloc/pointers/raw_ref.h"
namespace base {
namespace debug {
namespace base::debug {
namespace {
@ -1339,7 +1339,7 @@ void GetDwarfCompileUnitOffsets(const void* const* trace,
uint64_t* cu_offsets,
size_t num_frames) {
// LINT.IfChange(max_stack_frames)
FrameInfo frame_info[250] = {};
std::array<FrameInfo, 250> frame_info = {};
// LINT.ThenChange(stack_trace.h:max_stack_frames)
for (size_t i = 0; i < num_frames; i++) {
// The `cu_offset` also encodes the original sort order.
@ -1382,8 +1382,7 @@ void GetDwarfCompileUnitOffsets(const void* const* trace,
}
}
} // namespace debug
} // namespace base
} // namespace base::debug
#else // USE_SYMBOLIZE

View file

@ -26,8 +26,7 @@
// must avoid dynamic memory allocation or using data structures which rely on
// dynamic allocation.
namespace base {
namespace debug {
namespace base::debug {
namespace {
// See https://refspecs.linuxbase.org/elf/elf.pdf for the ELF specification.
@ -159,7 +158,7 @@ std::optional<std::string_view> ReadElfLibraryName(
const Dyn* dynamic_end = reinterpret_cast<const Dyn*>(
header.p_vaddr + relocation_offset + header.p_memsz);
Xword soname_strtab_offset = 0;
const char* strtab_addr = 0;
const char* strtab_addr = nullptr;
for (const Dyn* dynamic_iter = dynamic_start; dynamic_iter < dynamic_end;
++dynamic_iter) {
if (dynamic_iter->d_tag == DT_STRTAB) {
@ -219,5 +218,4 @@ size_t GetRelocationOffset(const void* elf_mapped_base) {
reinterpret_cast<uintptr_t>(nullptr));
}
} // namespace debug
} // namespace base
} // namespace base::debug

View file

@ -18,6 +18,7 @@
#include "base/logging.h"
#include "base/notreached.h"
#include "base/process/process.h"
#include "base/win/scoped_gdi_object.h"
#include "base/win/scoped_handle.h"
#include "base/win/win_util.h"
#include "base/win/windows_version.h"
@ -355,11 +356,10 @@ NOINLINE void CrashIfCannotAllocateSmallBitmap(BITMAPINFOHEADER* header,
base::debug::Alias(&small_data);
header->biWidth = 5;
header->biHeight = -5;
HBITMAP small_bitmap =
base::win::ScopedGDIObject<HBITMAP> small_bitmap(
CreateDIBSection(nullptr, reinterpret_cast<BITMAPINFO*>(&header), 0,
&small_data, shared_section, 0);
CHECK(small_bitmap != nullptr);
DeleteObject(small_bitmap);
&small_data, shared_section, 0));
CHECK(small_bitmap.is_valid());
}
NOINLINE void GetProcessMemoryInfo(PROCESS_MEMORY_COUNTERS_EX* pmc) {

View file

@ -14,8 +14,7 @@
#include "base/win/pe_image.h"
#endif // BUILDFLAG(IS_WIN)
namespace base {
namespace debug {
namespace base::debug {
void StartProfiling(const std::string& name) {}
@ -122,5 +121,4 @@ MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc() {
#endif // BUILDFLAG(IS_WIN)
} // namespace debug
} // namespace base
} // namespace base::debug

View file

@ -35,8 +35,7 @@ extern "C" void* __libc_stack_end;
#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
namespace base {
namespace debug {
namespace base::debug {
namespace {
@ -446,5 +445,4 @@ ScopedStackFrameLinker::~ScopedStackFrameLinker() {
#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
} // namespace debug
} // namespace base
} // namespace base::debug

View file

@ -2,6 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/390223051): Remove C-library calls to fix the errors.
#pragma allow_unsafe_libc_calls
#endif
#include "base/debug/stack_trace.h"
#include <errno.h>
@ -687,7 +692,7 @@ class SandboxSymbolizeHelper {
private:
friend struct DefaultSingletonTraits<SandboxSymbolizeHelper>;
SandboxSymbolizeHelper() : is_initialized_(false) { Init(); }
SandboxSymbolizeHelper() { Init(); }
~SandboxSymbolizeHelper() {
UnregisterCallback();
@ -963,7 +968,7 @@ class SandboxSymbolizeHelper {
}
// Set to true upon successful initialization.
bool is_initialized_;
bool is_initialized_ = false;
#if !defined(OFFICIAL_BUILD) || !defined(NO_UNWIND_TABLES)
// Mapping from file name to file descriptor. Includes file descriptors

View file

@ -17,7 +17,6 @@
#include "base/files/file_path.h"
#include "base/logging.h"
#include "base/memory/singleton.h"
#include "base/ranges/algorithm.h"
#include "base/strings/strcat_win.h"
#include "base/strings/string_util.h"
#include "base/synchronization/lock.h"
@ -338,7 +337,7 @@ StackTrace::StackTrace(const CONTEXT* context) {
void StackTrace::InitTrace(const CONTEXT* context_record) {
if (ShouldSuppressOutput()) {
CHECK_EQ(count_, 0U);
base::ranges::fill(trace_, nullptr);
std::ranges::fill(trace_, nullptr);
return;
}
@ -383,7 +382,7 @@ void StackTrace::InitTrace(const CONTEXT* context_record) {
trace_[count_++] = reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
}
base::ranges::fill(span(trace_).last(trace_.size() - count_), nullptr);
std::ranges::fill(span(trace_).last(trace_.size() - count_), nullptr);
}
// static

View file

@ -9,7 +9,6 @@
#include <sstream>
#include "base/pending_task.h"
#include "base/ranges/algorithm.h"
#include "base/task/common/task_annotator.h"
#include "build/build_config.h"
@ -19,8 +18,7 @@
#include "base/no_destructor.h"
#endif // BUILDFLAG(IS_ANDROID)
namespace base {
namespace debug {
namespace base::debug {
namespace {
#if BUILDFLAG(IS_ANDROID)
// Android sends stdout and stderr to /dev/null; logging should be done through
@ -54,7 +52,7 @@ TaskTrace::TaskTrace() {
}
std::array<const void*, PendingTask::kTaskBacktraceLength + 1> task_trace;
task_trace[0] = current_task->posted_from.program_counter();
ranges::copy(current_task->task_backtrace, task_trace.begin() + 1);
std::ranges::copy(current_task->task_backtrace, task_trace.begin() + 1);
size_t length = 0;
while (length < task_trace.size() && task_trace[length]) {
++length;
@ -100,9 +98,10 @@ size_t TaskTrace::GetAddresses(span<const void*> addresses) const {
return count;
}
span<const void* const> current_addresses = stack_trace_->addresses();
ranges::copy_n(current_addresses.begin(),
std::min(current_addresses.size(), addresses.size()),
addresses.begin());
std::ranges::copy_n(current_addresses.begin(),
static_cast<ptrdiff_t>(
std::min(current_addresses.size(), addresses.size())),
addresses.begin());
return current_addresses.size();
}
@ -111,5 +110,4 @@ std::ostream& operator<<(std::ostream& os, const TaskTrace& task_trace) {
return os;
}
} // namespace debug
} // namespace base
} // namespace base::debug

View file

@ -52,12 +52,19 @@ BASE_FEATURE(kUseRustJsonParser,
// If true, use the Rust JSON parser in-thread; otherwise, it runs in a thread
// pool.
const base::FeatureParam<bool> kUseRustJsonParserInCurrentSequence{
&kUseRustJsonParser, "UseRustJsonParserInCurrentSequence", false};
BASE_FEATURE_PARAM(bool,
kUseRustJsonParserInCurrentSequence,
&kUseRustJsonParser,
"UseRustJsonParserInCurrentSequence",
false);
// Use non default low memory device threshold.
// Value should be given via |LowMemoryDeviceThresholdMB|.
#if BUILDFLAG(IS_IOS)
#if BUILDFLAG(IS_ANDROID)
// LINT.IfChange
#define LOW_MEMORY_DEVICE_THRESHOLD_MB 1024
// LINT.ThenChange(//base/android/java/src/org/chromium/base/SysUtils.java)
#elif BUILDFLAG(IS_IOS)
// For M99, 45% of devices have 2GB of RAM, and 55% have more.
#define LOW_MEMORY_DEVICE_THRESHOLD_MB 1024
#else
@ -67,9 +74,11 @@ const base::FeatureParam<bool> kUseRustJsonParserInCurrentSequence{
BASE_FEATURE(kLowEndMemoryExperiment,
"LowEndMemoryExperiment",
FEATURE_DISABLED_BY_DEFAULT);
const base::FeatureParam<int> kLowMemoryDeviceThresholdMB{
&kLowEndMemoryExperiment, "LowMemoryDeviceThresholdMB",
LOW_MEMORY_DEVICE_THRESHOLD_MB};
BASE_FEATURE_PARAM(size_t,
kLowMemoryDeviceThresholdMB,
&kLowEndMemoryExperiment,
"LowMemoryDeviceThresholdMB",
LOW_MEMORY_DEVICE_THRESHOLD_MB);
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
// Force to enable LowEndDeviceMode partially on Android 3Gb devices.

View file

@ -19,12 +19,12 @@ BASE_EXPORT BASE_DECLARE_FEATURE(kFeatureParamWithCache);
BASE_EXPORT BASE_DECLARE_FEATURE(kUseRustJsonParser);
BASE_EXPORT extern const base::FeatureParam<bool>
kUseRustJsonParserInCurrentSequence;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(bool,
kUseRustJsonParserInCurrentSequence);
BASE_EXPORT BASE_DECLARE_FEATURE(kLowEndMemoryExperiment);
BASE_EXPORT extern const base::FeatureParam<int> kLowMemoryDeviceThresholdMB;
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(size_t, kLowMemoryDeviceThresholdMB);
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartialLowEndModeOn3GbDevices);

View file

@ -12,6 +12,7 @@
#include "base/files/file_util.h"
#include "base/files/scoped_file.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "build/build_config.h"
@ -30,6 +31,7 @@ std::optional<DriveInfo> GetFileDriveInfo(const FilePath& file_path) {
constexpr char kRotationalFormat[] =
"/sys/dev/block/%lu:%lu/queue/rotational";
constexpr char kRemovableFormat[] = "/sys/dev/block/%lu:%lu/removable";
constexpr char kDeviceFormat[] = "/sys/dev/block/%lu:%lu";
constexpr char kSizeFormat[] = "/sys/dev/block/%lu:%lu/size";
File file(file_path, File::FLAG_OPEN | File::FLAG_READ);
if (!file.IsValid()) {
@ -49,22 +51,48 @@ std::optional<DriveInfo> GetFileDriveInfo(const FilePath& file_path) {
std::string size_path = StringPrintf(kSizeFormat, MAJOR(path_stat.st_dev),
MINOR(path_stat.st_dev));
// Depending on the device, partitions may not expose the desired info. In the
// case when reading the device returned by Fstat() fails in both the
// rotational and removable attributes, attempt to crawl up one directory in
// the sysfs absolute path for the partition which was queried, which should
// give the device which contains this partition, and which should contain the
// desired info in the case where that info is not exposed for the partition
// itself.
std::string rotates;
if (ReadFileToString(base::FilePath(rotational_path), &rotates) &&
rotates.length() == 1 && (rotates[0] == '0' || rotates[0] == '1')) {
drive_info.has_seek_penalty = rotates[0] == '1';
std::string removable;
bool rotates_read = ReadFileToString(FilePath(rotational_path), &rotates);
bool removable_read = ReadFileToString(FilePath(removable_path), &removable);
if (!rotates_read && !removable_read) {
std::string device_path = StringPrintf(
kDeviceFormat, MAJOR(path_stat.st_dev), MINOR(path_stat.st_dev));
FilePath parent_device_path =
MakeAbsoluteFilePath(FilePath(device_path)).DirName();
rotates_read = ReadFileToString(
parent_device_path.Append("queue/rotational"), &rotates);
removable_read =
ReadFileToString(parent_device_path.Append("removable"), &removable);
}
std::string removable;
if (ReadFileToString(base::FilePath(removable_path), &removable) &&
removable.length() == 1 && (removable[0] == '0' || removable[0] == '1')) {
drive_info.is_removable = removable[0] == '1';
if (rotates_read) {
rotates = TrimString(rotates, "\n", TrimPositions::TRIM_TRAILING);
if (rotates.length() == 1 && (rotates[0] == '0' || rotates[0] == '1')) {
drive_info.has_seek_penalty = rotates[0] == '1';
}
}
if (removable_read) {
removable = TrimString(removable, "\n", TrimPositions::TRIM_TRAILING);
if (removable.length() == 1 &&
(removable[0] == '0' || removable[0] == '1')) {
drive_info.is_removable = removable[0] == '1';
}
}
std::string size;
uint64_t bytes;
if (ReadFileToString(FilePath(size_path), &size) &&
StringToUint64(size, &bytes)) {
StringToUint64(TrimString(size, "\n", TrimPositions::TRIM_TRAILING),
&bytes)) {
drive_info.size_bytes = bytes;
}

Some files were not shown because too many files have changed in this diff Show more