diff --git a/mm/bede.c b/mm/bede.c index 97101a230..6b216c0d8 100644 --- a/mm/bede.c +++ b/mm/bede.c @@ -7,7 +7,10 @@ bool bede_flush_node_rss(struct mem_cgroup *memcg) { // work around for every time call policy_node for delayed int nid; - // mem_cgroup_flush_stats(); + if (mem_cgroup_disabled()){ + return false; + } + mem_cgroup_flush_stats(); for_each_node_state(nid, N_MEMORY) { u64 size; struct lruvec *lruvec; @@ -17,7 +20,7 @@ bool bede_flush_node_rss(struct mem_cgroup *memcg) { // work around for every ti lruvec = mem_cgroup_lruvec(memcg, pgdat); if (!lruvec) return false; - size = (lruvec_page_state(lruvec, NR_ANON_MAPPED)) * PAGE_SIZE; + size = lruvec_page_state_local(lruvec, NR_ANON_MAPPED) >> PAGE_SHIFT; memcg->node_rss[nid] = size >> 20; } return true; @@ -129,7 +132,7 @@ void bede_do_page_walk_and_migration(struct work_struct *work) return; } memcg = get_mem_cgroup_from_mm(task->mm); - bede_flush_node_rss(memcg); + // bede_flush_node_rss(memcg); // res = bede_get_node(memcg, 0); // // Here consider the control of node limit vs. node rss // if (bede_work->should_migrate){ // The same as before requires it's filled to full diff --git a/mm/mempolicy.c b/mm/mempolicy.c index eeb8f1d72..821ad914b 100755 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1875,16 +1875,13 @@ ALLOW_ERROR_INJECTION(policy_nodemask, TRUE); int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) { if (policy->mode != MPOL_BIND && bede_should_policy) { - rcu_read_lock(); struct mem_cgroup *memcg = get_mem_cgroup_from_mm(current->mm); if (memcg && root_mem_cgroup && memcg != root_mem_cgroup) { - if (bede_flush_node_rss(memcg)) { + // if (bede_flush_node_rss(memcg)) { // bede_append_page_walk_and_migration(current->cgroups->dfl_cgrp->bede); - mem_cgroup_flush_stats(); nd = bede_get_node(memcg, nd); - rcu_read_unlock(); return nd; - } + // } } } if (policy->mode == MPOL_PREFERRED) {