背景介绍
system_server进程是Android系统中非常重要的系统进程。通常所说的AMS就是system_server的一部分。在AMS中有一个CpuTracker线程,它主要用于监控系统CPU占用率的数据。本文通过对该部分源码的分析,把CPU占用率的计算方法讲解清楚。
CpuTracker的初始化
主线程流程图
CpuTracker线程流程图
源码分析
frameworks/base/services/core/java/com/android/server/am/ActivityManagerService.java
public ActivityManagerService(Context systemContext) {
// 创建CpuTracker Thread
mProcessCpuThread = new Thread("CpuTracker") {
@Override
public void run() {
synchronized (mProcessCpuTracker) {
mProcessCpuInitLatch.countDown();
mProcessCpuTracker.init();
}
while (true) {
try {
try {
synchronized(this) {
final long now = SystemClock.uptimeMillis();
long nextCpuDelay = (mLastCpuTime.get()+MONITOR_CPU_MAX_TIME)-now;
long nextWriteDelay = (mLastWriteTime+BATTERY_STATS_TIME)-now;
//Slog.i(TAG, "Cpu delay=" + nextCpuDelay
// + ", write delay=" + nextWriteDelay);
if (nextWriteDelay < nextCpuDelay) {
nextCpuDelay = nextWriteDelay;
}
// 至少是30分钟update一次
if (nextCpuDelay > 0) {
mProcessCpuMutexFree.set(true);
this.wait(nextCpuDelay);
}
}
} catch (InterruptedException e) {
}
updateCpuStatsNow();
} catch (Exception e) {
Slog.e(TAG, "Unexpected exception collecting process stats", e);
}
}
}
};
}
private void start() {
removeAllProcessGroups();
mProcessCpuThread.start();
// Wait for the synchronized block started in mProcessCpuThread,
// so that any other acccess to mProcessCpuTracker from main thread
// will be blocked during mProcessCpuTracker initialization.
try {
mProcessCpuInitLatch.await();
} catch (InterruptedException e) {
Slog.wtf(TAG, "Interrupted wait during start", e);
Thread.currentThread().interrupt();
throw new IllegalStateException("Interrupted wait during start");
}
}
// 在startProcessLocked时被调用,会触发一次updateCpuStatsNow
void updateCpuStats() {
final long now = SystemClock.uptimeMillis();
if (mLastCpuTime.get() >= now - MONITOR_CPU_MIN_TIME) {
return;
}
if (mProcessCpuMutexFree.compareAndSet(true, false)) {
synchronized (mProcessCpuThread) {
mProcessCpuThread.notify();
}
}
}
void updateCpuStatsNow() {
synchronized (mProcessCpuTracker) {
mProcessCpuMutexFree.set(false);
final long now = SystemClock.uptimeMillis();
boolean haveNewCpuStats = false;
// 确保距离上一次cpu update超过5s钟,才能进行下一次cpu update
if (MONITOR_CPU_USAGE &&
mLastCpuTime.get() < (now-MONITOR_CPU_MIN_TIME)) {
mLastCpuTime.set(now);
mProcessCpuTracker.update();
if (mProcessCpuTracker.hasGoodLastStats()) {
haveNewCpuStats = true;
//Slog.i(TAG, mProcessCpu.printCurrentState());
//Slog.i(TAG, "Total CPU usage: "
// + mProcessCpu.getTotalCpuPercent() + "%");
// Slog the cpu usage if the property is set.
if ("true".equals(SystemProperties.get("events.cpu"))) {
int user = mProcessCpuTracker.getLastUserTime();
int system = mProcessCpuTracker.getLastSystemTime();
int iowait = mProcessCpuTracker.getLastIoWaitTime();
int irq = mProcessCpuTracker.getLastIrqTime();
int softIrq = mProcessCpuTracker.getLastSoftIrqTime();
int idle = mProcessCpuTracker.getLastIdleTime();
int total = user + system + iowait + irq + softIrq + idle;
if (total == 0) total = 1;
EventLog.writeEvent(EventLogTags.CPU,
((user+system+iowait+irq+softIrq) * 100) / total,
(user * 100) / total,
(system * 100) / total,
(iowait * 100) / total,
(irq * 100) / total,
(softIrq * 100) / total);
}
}
}
final BatteryStatsImpl bstats = mBatteryStatsService.getActiveStatistics();
synchronized(bstats) {
synchronized(mPidsSelfLocked) {
if (haveNewCpuStats) {
if (bstats.startAddingCpuLocked()) {
int totalUTime = 0;
int totalSTime = 0;
final int N = mProcessCpuTracker.countStats();
for (int i=0; i<N; i++) {
ProcessCpuTracker.Stats st = mProcessCpuTracker.getStats(i);
if (!st.working) {
continue;
}
ProcessRecord pr = mPidsSelfLocked.get(st.pid);
totalUTime += st.rel_utime;
totalSTime += st.rel_stime;
if (pr != null) {
BatteryStatsImpl.Uid.Proc ps = pr.curProcBatteryStats;
if (ps == null || !ps.isActive()) {
pr.curProcBatteryStats = ps = bstats.getProcessStatsLocked(
pr.info.uid, pr.processName);
}
ps.addCpuTimeLocked(st.rel_utime, st.rel_stime);
pr.curCpuTime += st.rel_utime + st.rel_stime;
if (pr.lastCpuTime == 0) {
pr.lastCpuTime = pr.curCpuTime;
}
} else {
BatteryStatsImpl.Uid.Proc ps = st.batteryStats;
if (ps == null || !ps.isActive()) {
st.batteryStats = ps = bstats.getProcessStatsLocked(
bstats.mapUid(st.uid), st.name);
}
ps.addCpuTimeLocked(st.rel_utime, st.rel_stime);
}
}
final int userTime = mProcessCpuTracker.getLastUserTime();
final int systemTime = mProcessCpuTracker.getLastSystemTime();
final int iowaitTime = mProcessCpuTracker.getLastIoWaitTime();
final int irqTime = mProcessCpuTracker.getLastIrqTime();
final int softIrqTime = mProcessCpuTracker.getLastSoftIrqTime();
final int idleTime = mProcessCpuTracker.getLastIdleTime();
bstats.finishAddingCpuLocked(totalUTime, totalSTime, userTime,
systemTime, iowaitTime, irqTime, softIrqTime, idleTime);
}
}
}
// 至少30分钟写入一次
if (mLastWriteTime < (now-BATTERY_STATS_TIME)) {
mLastWriteTime = now;
mBatteryStatsService.scheduleWriteToDisk();
}
}
}
}
static class CpuBinder extends Binder {
ActivityManagerService mActivityManagerService;
private final PriorityDump.PriorityDumper mPriorityDumper =
new PriorityDump.PriorityDumper() {
@Override
public void dumpCritical(FileDescriptor fd, PrintWriter pw, String[] args,
boolean asProto) {
if (asProto) return;
if (!DumpUtils.checkDumpAndUsageStatsPermission(mActivityManagerService.mContext,
"cpuinfo", pw)) return;
synchronized (mActivityManagerService.mProcessCpuTracker) {
// 仅仅是打印Cpuload和cpu占用率,并不update cpu数据
pw.print(mActivityManagerService.mProcessCpuTracker.printCurrentLoad());
pw.print(mActivityManagerService.mProcessCpuTracker.printCurrentState(
SystemClock.uptimeMillis()));
}
}
};
CpuBinder(ActivityManagerService activityManagerService) {
mActivityManagerService = activityManagerService;
}
@Override
protected void dump(FileDescriptor fd, PrintWriter pw, String[] args) {
PriorityDump.dump(mPriorityDumper, fd, pw, args);
}
}
frameworks/base/core/java/com/android/internal/os/ProcessCpuTracker.java
public void init() {
if (DEBUG) Slog.v(TAG, "Init: " + this);
mFirst = true;
update();
}
// 刷新CPU数据
public void update() {
if (DEBUG) Slog.v(TAG, "Update: " + this);
final long nowUptime = SystemClock.uptimeMillis();
final long nowRealtime = SystemClock.elapsedRealtime();
final long nowWallTime = System.currentTimeMillis();
final long[] sysCpu = mSystemCpuData;
if (Process.readProcFile("/proc/stat", SYSTEM_CPU_FORMAT,
null, sysCpu, null)) {
// Total user time is user + nice time.
final long usertime = (sysCpu[0]+sysCpu[1]) * mJiffyMillis;
// Total system time is simply system time.
final long systemtime = sysCpu[2] * mJiffyMillis;
// Total idle time is simply idle time.
final long idletime = sysCpu[3] * mJiffyMillis;
// Total irq time is iowait + irq + softirq time.
final long iowaittime = sysCpu[4] * mJiffyMillis;
final long irqtime = sysCpu[5] * mJiffyMillis;
final long softirqtime = sysCpu[6] * mJiffyMillis;
// This code is trying to avoid issues with idle time going backwards,
// but currently it gets into situations where it triggers most of the time. :(
if (true || (usertime >= mBaseUserTime && systemtime >= mBaseSystemTime
&& iowaittime >= mBaseIoWaitTime && irqtime >= mBaseIrqTime
&& softirqtime >= mBaseSoftIrqTime && idletime >= mBaseIdleTime)) {
// 当获取的数据大于基准数据时,认为这次数据的读取有效,进行后面的CPU数据计算。
mRelUserTime = (int)(usertime - mBaseUserTime);
mRelSystemTime = (int)(systemtime - mBaseSystemTime);
mRelIoWaitTime = (int)(iowaittime - mBaseIoWaitTime);
mRelIrqTime = (int)(irqtime - mBaseIrqTime);
mRelSoftIrqTime = (int)(softirqtime - mBaseSoftIrqTime);
mRelIdleTime = (int)(idletime - mBaseIdleTime);
mRelStatsAreGood = true;
if (DEBUG) {
Slog.i("Load", "Total U:" + (sysCpu[0]*mJiffyMillis)
+ " N:" + (sysCpu[1]*mJiffyMillis)
+ " S:" + (sysCpu[2]*mJiffyMillis) + " I:" + (sysCpu[3]*mJiffyMillis)
+ " W:" + (sysCpu[4]*mJiffyMillis) + " Q:" + (sysCpu[5]*mJiffyMillis)
+ " O:" + (sysCpu[6]*mJiffyMillis));
Slog.i("Load", "Rel U:" + mRelUserTime + " S:" + mRelSystemTime
+ " I:" + mRelIdleTime + " Q:" + mRelIrqTime);
}
// 刷新base值数据,用于下次计算
mBaseUserTime = usertime;
mBaseSystemTime = systemtime;
mBaseIoWaitTime = iowaittime;
mBaseIrqTime = irqtime;
mBaseSoftIrqTime = softirqtime;
mBaseIdleTime = idletime;
} else {
// 本次获取的数据小于基准数据,认为本次数据获取异常,不进行后续的CPU数据计算。
mRelUserTime = 0;
mRelSystemTime = 0;
mRelIoWaitTime = 0;
mRelIrqTime = 0;
mRelSoftIrqTime = 0;
mRelIdleTime = 0;
mRelStatsAreGood = false;
Slog.w(TAG, "/proc/stats has gone backwards; skipping CPU update");
return;
}
}
// 更新3种类型的时间戳
mLastSampleTime = mCurrentSampleTime;
mCurrentSampleTime = nowUptime;
mLastSampleRealTime = mCurrentSampleRealTime;
mCurrentSampleRealTime = nowRealtime;
mLastSampleWallTime = mCurrentSampleWallTime;
mCurrentSampleWallTime = nowWallTime;
final StrictMode.ThreadPolicy savedPolicy = StrictMode.allowThreadDiskReads();
try {
mCurPids = collectStats("/proc", -1, mFirst, mCurPids, mProcStats);
} finally {
StrictMode.setThreadPolicy(savedPolicy);
}
final float[] loadAverages = mLoadAverageData;
if (Process.readProcFile("/proc/loadavg", LOAD_AVERAGE_FORMAT,
null, null, loadAverages)) {
// 获取load数据
float load1 = loadAverages[0];
float load5 = loadAverages[1];
float load15 = loadAverages[2];
if (load1 != mLoad1 || load5 != mLoad5 || load15 != mLoad15) {
mLoad1 = load1;
mLoad5 = load5;
mLoad15 = load15;
onLoadChanged(load1, load5, load15);
}
}
if (DEBUG) Slog.i(TAG, "*** TIME TO COLLECT STATS: "
+ (SystemClock.uptimeMillis()-mCurrentSampleTime));
mWorkingProcsSorted = false;
mFirst = false;
}
private int[] collectStats(String statsFile, int parentPid, boolean first,
int[] curPids, ArrayList<Stats> allProcs) {
// 从/proc下获取全部的pid
int[] pids = Process.getPids(statsFile, curPids);
int NP = (pids == null) ? 0 : pids.length;
int NS = allProcs.size();
int curStatsIndex = 0;
for (int i=0; i<NP; i++) {
int pid = pids[i];
if (pid < 0) {
NP = pid;
break;
}
Stats st = curStatsIndex < NS ? allProcs.get(curStatsIndex) : null;
// 非首次创建实例走到这里,有新增pid走else流程
if (st != null && st.pid == pid) {
// Update an existing process...
st.added = false;
st.working = false;
curStatsIndex++;
if (DEBUG) Slog.v(TAG, "Existing "
+ (parentPid < 0 ? "process" : "thread")
+ " pid " + pid + ": " + st);
if (st.interesting) {
final long uptime = SystemClock.uptimeMillis();
final long[] procStats = mProcessStatsData;
if (!Process.readProcFile(st.statFile.toString(),
PROCESS_STATS_FORMAT, null, procStats, null)) {
continue;
}
final long minfaults = procStats[PROCESS_STAT_MINOR_FAULTS];
final long majfaults = procStats[PROCESS_STAT_MAJOR_FAULTS];
final long utime = procStats[PROCESS_STAT_UTIME] * mJiffyMillis;
final long stime = procStats[PROCESS_STAT_STIME] * mJiffyMillis;
if (utime == st.base_utime && stime == st.base_stime) {
// CPU数据消耗为0,直接计算下一个pid
st.rel_utime = 0;
st.rel_stime = 0;
st.rel_minfaults = 0;
st.rel_majfaults = 0;
if (st.active) {
st.active = false;
}
continue;
}
if (!st.active) {
st.active = true;
}
if (parentPid < 0) {
getName(st, st.cmdlineFile);
if (st.threadStats != null) {
// 获取pid下各个线程的cpu数据
mCurThreadPids = collectStats(st.threadsDir, pid, false,
mCurThreadPids, st.threadStats);
}
}
if (DEBUG) Slog.v("Load", "Stats changed " + st.name + " pid=" + st.pid
+ " utime=" + utime + "-" + st.base_utime
+ " stime=" + stime + "-" + st.base_stime
+ " minfaults=" + minfaults + "-" + st.base_minfaults
+ " majfaults=" + majfaults + "-" + st.base_majfaults);
// 计算这段时间的CPU数据消耗
st.rel_uptime = uptime - st.base_uptime;
st.base_uptime = uptime;
st.rel_utime = (int)(utime - st.base_utime);
st.rel_stime = (int)(stime - st.base_stime);
st.base_utime = utime;
st.base_stime = stime;
st.rel_minfaults = (int)(minfaults - st.base_minfaults);
st.rel_majfaults = (int)(majfaults - st.base_majfaults);
st.base_minfaults = minfaults;
st.base_majfaults = majfaults;
st.working = true;
}
continue;
}
// 首次创建这个实例会走到这里,把pid一个个添加到list里面去
if (st == null || st.pid > pid) {
// We have a new process!
st = new Stats(pid, parentPid, mIncludeThreads);
allProcs.add(curStatsIndex, st);
curStatsIndex++;
NS++;
if (DEBUG) Slog.v(TAG, "New "
+ (parentPid < 0 ? "process" : "thread")
+ " pid " + pid + ": " + st);
final String[] procStatsString = mProcessFullStatsStringData;
final long[] procStats = mProcessFullStatsData;
st.base_uptime = SystemClock.uptimeMillis();
String path = st.statFile.toString();
//Slog.d(TAG, "Reading proc file: " + path);
if (Process.readProcFile(path, PROCESS_FULL_STATS_FORMAT, procStatsString,
procStats, null)) {
// This is a possible way to filter out processes that
// are actually kernel threads... do we want to? Some
// of them do use CPU, but there can be a *lot* that are
// not doing anything.
st.vsize = procStats[PROCESS_FULL_STAT_VSIZE];
if (true || procStats[PROCESS_FULL_STAT_VSIZE] != 0) {
// 设置base数据
st.interesting = true;
st.baseName = procStatsString[0];
st.base_minfaults = procStats[PROCESS_FULL_STAT_MINOR_FAULTS];
st.base_majfaults = procStats[PROCESS_FULL_STAT_MAJOR_FAULTS];
st.base_utime = procStats[PROCESS_FULL_STAT_UTIME] * mJiffyMillis;
st.base_stime = procStats[PROCESS_FULL_STAT_STIME] * mJiffyMillis;
} else {
Slog.i(TAG, "Skipping kernel process pid " + pid
+ " name " + procStatsString[0]);
st.baseName = procStatsString[0];
}
} else {
Slog.w(TAG, "Skipping unknown process pid " + pid);
st.baseName = "<unknown>";
st.base_utime = st.base_stime = 0;
st.base_minfaults = st.base_majfaults = 0;
}
if (parentPid < 0) {
getName(st, st.cmdlineFile);
if (st.threadStats != null) {
// 设置pid进程下的线程数据
mCurThreadPids = collectStats(st.threadsDir, pid, true,
mCurThreadPids, st.threadStats);
}
} else if (st.interesting) {
st.name = st.baseName;
st.nameWidth = onMeasureProcessName(st.name);
}
if (DEBUG) Slog.v("Load", "Stats added " + st.name + " pid=" + st.pid
+ " utime=" + st.base_utime + " stime=" + st.base_stime
+ " minfaults=" + st.base_minfaults + " majfaults=" + st.base_majfaults);
st.rel_utime = 0;
st.rel_stime = 0;
st.rel_minfaults = 0;
st.rel_majfaults = 0;
st.added = true;
if (!first && st.interesting) {
st.working = true;
}
continue;
}
// This process has gone away!
st.rel_utime = 0;
st.rel_stime = 0;
st.rel_minfaults = 0;
st.rel_majfaults = 0;
st.removed = true;
st.working = true;
allProcs.remove(curStatsIndex);
NS--;
if (DEBUG) Slog.v(TAG, "Removed "
+ (parentPid < 0 ? "process" : "thread")
+ " pid " + pid + ": " + st);
// Decrement the loop counter so that we process the current pid
// again the next time through the loop.
i--;
continue;
}
while (curStatsIndex < NS) {
// This process has gone away!
final Stats st = allProcs.get(curStatsIndex);
st.rel_utime = 0;
st.rel_stime = 0;
st.rel_minfaults = 0;
st.rel_majfaults = 0;
st.removed = true;
st.working = true;
allProcs.remove(curStatsIndex);
NS--;
if (localLOGV) Slog.v(TAG, "Removed pid " + st.pid + ": " + st);
}
return pids;
}
// 仅仅是打印输出CPU数据,并不update cpu数据
final public String printCurrentState(long now) {
final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
buildWorkingProcs();
StringWriter sw = new StringWriter();
PrintWriter pw = new FastPrintWriter(sw, false, 1024);
pw.print("CPU usage from ");
if (now > mLastSampleTime) {
pw.print(now-mLastSampleTime);
pw.print("ms to ");
pw.print(now-mCurrentSampleTime);
pw.print("ms ago");
} else {
pw.print(mLastSampleTime-now);
pw.print("ms to ");
pw.print(mCurrentSampleTime-now);
pw.print("ms later");
}
pw.print(" (");
pw.print(sdf.format(new Date(mLastSampleWallTime)));
pw.print(" to ");
pw.print(sdf.format(new Date(mCurrentSampleWallTime)));
pw.print(")");
long sampleTime = mCurrentSampleTime - mLastSampleTime;
long sampleRealTime = mCurrentSampleRealTime - mLastSampleRealTime;
long percAwake = sampleRealTime > 0 ? ((sampleTime*100) / sampleRealTime) : 0;
if (percAwake != 100) {
pw.print(" with ");
pw.print(percAwake);
pw.print("% awake");
}
pw.println(":");
final int totalTime = mRelUserTime + mRelSystemTime + mRelIoWaitTime
+ mRelIrqTime + mRelSoftIrqTime + mRelIdleTime;
if (DEBUG) Slog.i(TAG, "totalTime " + totalTime + " over sample time "
+ (mCurrentSampleTime-mLastSampleTime));
int N = mWorkingProcs.size();
for (int i=0; i<N; i++) {
Stats st = mWorkingProcs.get(i);
printProcessCPU(pw, st.added ? " +" : (st.removed ? " -": " "),
st.pid, st.name, (int)st.rel_uptime,
st.rel_utime, st.rel_stime, 0, 0, 0, st.rel_minfaults, st.rel_majfaults);
if (!st.removed && st.workingThreads != null) {
int M = st.workingThreads.size();
for (int j=0; j<M; j++) {
Stats tst = st.workingThreads.get(j);
printProcessCPU(pw,
tst.added ? " +" : (tst.removed ? " -": " "),
tst.pid, tst.name, (int)st.rel_uptime,
tst.rel_utime, tst.rel_stime, 0, 0, 0, 0, 0);
}
}
}
printProcessCPU(pw, "", -1, "TOTAL", totalTime, mRelUserTime, mRelSystemTime,
mRelIoWaitTime, mRelIrqTime, mRelSoftIrqTime, 0, 0);
pw.flush();
return sw.toString();
}
// 对进程进行排序
final void buildWorkingProcs() {
if (!mWorkingProcsSorted) {
mWorkingProcs.clear();
final int N = mProcStats.size();
for (int i=0; i<N; i++) {
Stats stats = mProcStats.get(i);
if (stats.working) {
mWorkingProcs.add(stats);
if (stats.threadStats != null && stats.threadStats.size() > 1) {
stats.workingThreads.clear();
final int M = stats.threadStats.size();
for (int j=0; j<M; j++) {
Stats tstats = stats.threadStats.get(j);
if (tstats.working) {
stats.workingThreads.add(tstats);
}
}
Collections.sort(stats.workingThreads, sLoadComparator);
}
}
}
Collections.sort(mWorkingProcs, sLoadComparator);
mWorkingProcsSorted = true;
}
}
private void printProcessCPU(PrintWriter pw, String prefix, int pid, String label,
int totalTime, int user, int system, int iowait, int irq, int softIrq,
int minFaults, int majFaults) {
pw.print(prefix);
if (totalTime == 0) totalTime = 1;
printRatio(pw, user+system+iowait+irq+softIrq, totalTime);
pw.print("% ");
if (pid >= 0) {
pw.print(pid);
pw.print("/");
}
pw.print(label);
pw.print(": ");
printRatio(pw, user, totalTime);
pw.print("% user + ");
printRatio(pw, system, totalTime);
pw.print("% kernel");
if (iowait > 0) {
pw.print(" + ");
printRatio(pw, iowait, totalTime);
pw.print("% iowait");
}
if (irq > 0) {
pw.print(" + ");
printRatio(pw, irq, totalTime);
pw.print("% irq");
}
if (softIrq > 0) {
pw.print(" + ");
printRatio(pw, softIrq, totalTime);
pw.print("% softirq");
}
if (minFaults > 0 || majFaults > 0) {
pw.print(" / faults:");
if (minFaults > 0) {
pw.print(" ");
pw.print(minFaults);
pw.print(" minor");
}
if (majFaults > 0) {
pw.print(" ");
pw.print(majFaults);
pw.print(" major");
}
}
pw.println();
}
system/core/libprocessgroup/processgroup.cpp
void removeAllProcessGroups()
{
LOG(VERBOSE) << "removeAllProcessGroups()";
const auto& cgroup_root_path = GetCgroupRootPath();
std::unique_ptr<DIR, decltype(&closedir)> root(opendir(cgroup_root_path.c_str()), closedir);
if (root == NULL) {
PLOG(ERROR) << "Failed to open " << cgroup_root_path;
} else {
dirent* dir;
while ((dir = readdir(root.get())) != nullptr) {
if (dir->d_type != DT_DIR) {
continue;
}
if (!StartsWith(dir->d_name, "uid_")) {
continue;
}
auto path = StringPrintf("%s/%s", cgroup_root_path.c_str(), dir->d_name);
RemoveUidProcessGroups(path);
LOG(VERBOSE) << "Removing " << path;
if (rmdir(path.c_str()) == -1) PLOG(WARNING) << "Failed to remove " << path;
}
}
}
static const std::string& GetCgroupRootPath() {
static std::string cgroup_root_path;
std::call_once(init_path_flag, [&]() {
#ifdef __ANDROID__
// low-ram devices use per-app memcg by default, unlike high-end ones
bool low_ram_device = GetBoolProperty("ro.config.low_ram", false);
bool per_app_memcg =
GetBoolProperty("ro.config.per_app_memcg", low_ram_device);
#else
// host does not support Android properties
bool per_app_memcg = false;
#endif
if (per_app_memcg) {
// Check if mem cgroup is mounted, only then check for
// write-access to avoid SELinux denials
cgroup_root_path =
(access(MEM_CGROUP_TASKS, F_OK) || access(MEM_CGROUP_PATH, W_OK) ?
ACCT_CGROUP_PATH : MEM_CGROUP_PATH);
} else {
cgroup_root_path = ACCT_CGROUP_PATH;
}
});
return cgroup_root_path;
}
static void RemoveUidProcessGroups(const std::string& uid_path) {
std::unique_ptr<DIR, decltype(&closedir)> uid(opendir(uid_path.c_str()), closedir);
if (uid != NULL) {
dirent* dir;
while ((dir = readdir(uid.get())) != nullptr) {
if (dir->d_type != DT_DIR) {
continue;
}
if (!StartsWith(dir->d_name, "pid_")) {
continue;
}
auto path = StringPrintf("%s/%s", uid_path.c_str(), dir->d_name);
LOG(VERBOSE) << "Removing " << path;
if (rmdir(path.c_str()) == -1) PLOG(WARNING) << "Failed to remove " << path;
}
}
}