Linux内存控制器(二)

article/2025/10/1 8:13:10

1. memcg_stock_pcp

// 每处理器记账缓存一次从内存控制组批量申请32页, 然后把内存控制组的内存使用量加上32页
#define CHARGE_BATCH	32U
// 在内存控制组记账(charge)时, 先查看当前处理器的memcg_stock_pcp
// 如果memcg_stock_pcp保存的内存控制组(memcg_stock_pcp->cached)正是准备记账的内存控制组
// 并且预留页数(memcg_stock_pcp->nr_pages)大于或等于准备记账的页数, 则将预留页数减去准备记账的页数
struct memcg_stock_pcp {// 内存控制组(非根控制组)struct mem_cgroup *cached; /* this never be root cgroup */// 预留页数unsigned int nr_pages;// 工作任务struct work_struct work;unsigned long flags;
#define FLUSHING_CACHED_CHARGE	0
};
// 为减少处理器之间的竞争, 提高内存记账效率, 定义一个每处理器记账缓存
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
static DEFINE_MUTEX(percpu_charge_mutex);

2. mem_cgroup_try_charge

/* Whether the swap controller is active */
#ifdef CONFIG_MEMCG_SWAP
// 启用交换分区记账
int do_swap_account __read_mostly;
#else
#define do_swap_account		0
#endif// 尝试记账一个page
// page: 准备记账的页
// mm: 指向申请物理页的进程的内存描述符
// gfp_mask: 申请分配物理页的分配掩码
// memcgp: 输出参数, 返回记账的内存控制组
// compound: 以复合页还是单页的方式记账
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,gfp_t gfp_mask, struct mem_cgroup **memcgp,bool compound)
{struct mem_cgroup *memcg = NULL;// 如果是复合页则需要计算页数, 否则就是单页unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;int ret = 0;// 如果禁用内存控制组则返回0if (mem_cgroup_disabled())goto out;// page在swap cache中if (PageSwapCache(page)) {/** Every swap fault against a single page tries to charge the* page, bail as early as possible.  shmem_unuse() encounters* already charged pages, too.  The USED bit is protected by* the page lock, which serializes swap cache removal, which* in turn serializes uncharging.*/VM_BUG_ON_PAGE(!PageLocked(page), page);if (compound_head(page)->mem_cgroup)goto out;if (do_swap_account) {// 从page->private成员得到交换项swp_entry_t ent = { .val = page_private(page), };// 根据交换项得到内存控制组idunsigned short id = lookup_swap_cgroup_id(ent);rcu_read_lock();// 根据id查找内存控制组memcg = mem_cgroup_from_id(id);// 如果该内存控制组存在, 则引用计算加1if (memcg && !css_tryget_online(&memcg->css))memcg = NULL;rcu_read_unlock();}}// 如果内存控制组为空, 则根据内存描述符查找进程所属的内存控制组if (!memcg)memcg = get_mem_cgroup_from_mm(mm);// 进入真正的尝试记账流程[见2.1节]ret = try_charge(memcg, gfp_mask, nr_pages);// 内存控制组的引用计数减1css_put(&memcg->css);
out:// 入参memcgp用于返回记账的内存控制组*memcgp = memcg;return ret;
}

2.1 try_charge

static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,unsigned int nr_pages)
{// 取32和记账页数较大的那个值unsigned int batch = max(CHARGE_BATCH, nr_pages);// 最多重试5次int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;// 用于记录超过内存使用限制(硬限制)的控制组struct mem_cgroup *mem_over_limit;// 记账的页面计数器struct page_counter *counter;// 回收的页面数量unsigned long nr_reclaimed;bool may_swap = true;bool drained = false;// 进程属于根控制组: 因为根控制组对内存使用量没有限制, 所以不需要记账直接返回if (mem_cgroup_is_root(memcg))return 0;
retry:// 如果当前cpu的记账缓存从准备记账的内存控制组预留的页数足够多// 那么从记账缓存减去准备记账的页数(而无需向内存控制组记账), 并结束记账返回成功[见2.2节]if (consume_stock(memcg, nr_pages))return 0;// 1. 没有开启内存+交换分区记账, 直接进入mem_cgroup->memory记账流程[见2.3节]// 2. 开启内存+交换分区记账, 则首先进入memsw记账流程, 成功后再进入memory记账流程if (!do_memsw_account() ||page_counter_try_charge(&memcg->memsw, batch, &counter)) {// 当前内存控制组和其所有启用分层记账的祖先的内存使用量没有超过限制, 则进入记账成功流程if (page_counter_try_charge(&memcg->memory, batch, &counter))goto done_restock;// 如果mem_cgroup->memory超过限制记账失败// 而且开启内存+交换分区记账, 则还需要撤销29行内存+交换分区的记账if (do_memsw_account())page_counter_uncharge(&memcg->memsw, batch);// 记录内存使用量超过限制的内存控制组mem_over_limit = mem_cgroup_from_counter(counter, memory);} else {// 记录内存+交换分区使用量超过限制的内存控制组mem_over_limit = mem_cgroup_from_counter(counter, memsw);may_swap = false;}// 如果batch大于准备记账的实际页数, 则使用实际页数重试记账流程if (batch > nr_pages) {batch = nr_pages;goto retry;}/** Unlike in global OOM situations, memcg is not in a physical* memory shortage.  Allow dying and OOM-killed tasks to* bypass the last charges so that they can exit quickly and* free their memory.*/if (unlikely(tsk_is_oom_victim(current) ||fatal_signal_pending(current) ||current->flags & PF_EXITING))goto force;/** Prevent unbounded recursion when reclaim operations need to* allocate memory. This might exceed the limits temporarily,* but we prefer facilitating memory reclaim and getting back* under the limit over triggering OOM kills in these cases.*/if (unlikely(current->flags & PF_MEMALLOC))goto force;if (unlikely(task_in_memcg_oom(current)))goto nomem;if (!gfpflags_allow_blocking(gfp_mask))goto nomem;// 记录MEMCG_MAX事件到mem_cgroup_stat_cpumem_cgroup_event(mem_over_limit, MEMCG_MAX);// mem_over_limit记录了超过限制的内存控制组// 这里尝试针对该控制组进行内存回收, 并返回回收的页面数量nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,gfp_mask, may_swap);// 内存回收之后, 剩余的内存使用量已经足够记账申请的页数, 则进入重试流程[见2.4节]if (mem_cgroup_margin(mem_over_limit) >= nr_pages)goto retry;// drained默认为false, 代表是否有把每处理器记账缓存预留的页数归还给内存控制组if (!drained) {// 把每处理器记账缓存预留的页数归还给内存控制组, 并进入重试流程[见2.5节]drain_all_stock(mem_over_limit);drained = true;goto retry;}// 如果申请页面时不允许重试, 则进入nomem流程if (gfp_mask & __GFP_NORETRY)goto nomem;/** Even though the limit is exceeded at this point, reclaim* may have been able to free some pages.  Retry the charge* before killing the task.** Only for regular pages, though: huge pages are rather* unlikely to succeed so close to the limit, and we fall back* to regular pages anyway in case of failure.*/// 如果从超过限制的内存控制组回收的页数大于0, 并且准备记账的页数没有超过8, 则进入重试流程if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))goto retry;/** At task move, charge accounts can be doubly counted. So, it's* better to wait until the end of task_move if something is going on.*/if (mem_cgroup_wait_acct_move(mem_over_limit))goto retry;// 最多重试5次if (nr_retries--)goto retry;// 如果申请页面时不允许失败, 则强制记账允许内存使用超过限制if (gfp_mask & __GFP_NOFAIL)goto force;if (fatal_signal_pending(current))goto force;// 记录MEMCG_OOM事件到mem_cgroup_stat_cpumem_cgroup_event(mem_over_limit, MEMCG_OOM);// 把进程设置为内存控制组内存耗尽状态mem_cgroup_oom(mem_over_limit, gfp_mask,get_order(nr_pages * PAGE_SIZE));
nomem:// 如果申请页面时允许失败, 则返回错误码ENOMEMif (!(gfp_mask & __GFP_NOFAIL))return -ENOMEM;
force:/** The allocation either can't fail or will lead to more memory* being freed very soon.  Allow memory usage go over the limit* temporarily by force charging it.*/// 如果申请页面时不允许失败, 则强制记账, 允许内存使用量临时超过硬限制page_counter_charge(&memcg->memory, nr_pages);if (do_memsw_account())page_counter_charge(&memcg->memsw, nr_pages);// 内存控制组的引用计数加上记账的页数css_get_many(&memcg->css, nr_pages);return 0;done_restock:// 内存控制组引用计数加上记账的页数css_get_many(&memcg->css, batch);// 把记账多余的页数保留到当前cpu的记账缓存[见2.6节]if (batch > nr_pages)refill_stock(memcg, batch - nr_pages);/** If the hierarchy is above the normal consumption range, schedule* reclaim on returning to userland.  We can perform reclaim here* if __GFP_RECLAIM but let's always punt for simplicity and so that* GFP_KERNEL can consistently be used during reclaim.  @memcg is* not recorded as it most likely matches current's and won't* change in the meantime.  As high limit is checked again before* reclaim, the cost of mismatch is negligible.*/do {if (page_counter_read(&memcg->memory) > memcg->high) {/* Don't bother a random interrupted task */if (in_interrupt()) {schedule_work(&memcg->high_work);break;}current->memcg_nr_pages_over_high += batch;set_notify_resume(current);break;}} while ((memcg = parent_mem_cgroup(memcg)));return 0;
}

2.2 consume_stock

static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
{struct memcg_stock_pcp *stock;unsigned long flags;bool ret = false;// 如果需要申请的页数大于记账缓存预留的最大页数32, 则直接返回, 准备从内存控制组中申请if (nr_pages > CHARGE_BATCH)return ret;local_irq_save(flags);// 取出当前cpu的记账缓存stock = this_cpu_ptr(&memcg_stock);// 满足以下2个条件时, 从记账缓存中分配相应的页数// 1. 准备记账的内存控制组与当前记账缓存保存的内存控制组是同一个// 2. 记账缓存中预留的页数大于申请的页数if (memcg == stock->cached && stock->nr_pages >= nr_pages) {// 记账缓存预留的页数减去记账页数stock->nr_pages -= nr_pages;ret = true;}local_irq_restore(flags);return ret;
}

2.3 page_counter_try_charge

// counter: mem_cgroup的页面计数器(包括memory, memsw, kmem, tcpmem和swap)
// nr_pages: 记账的页数
// fail: 需要返回的参数, 记录超过限制的页面计数器
bool page_counter_try_charge(struct page_counter *counter,unsigned long nr_pages,struct page_counter **fail)
{struct page_counter *c;// 遍历当前页面计数器以及所有支持分层记账的父控制组的页面计数器for (c = counter; c; c = c->parent) {long new;// 将记账的页数与页面计数器当前使用量相加得到新的使用量new = atomic_long_add_return(nr_pages, &c->count);// 判断新的使用量是否超过页面计数器的硬限制if (new > c->limit) {// 如果超过硬限制, 则将记账的页数从新的使用量中减掉atomic_long_sub(nr_pages, &c->count);/** This is racy, but we can live with some* inaccuracy in the failcnt.*/// 将页面计数器超过硬限制的次数加1, 并返回超过硬限制的页面计数器c->failcnt++;*fail = c;goto failed;}/** Just like with failcnt, we can live with some* inaccuracy in the watermark.*/// 如果未超过硬限制而且新的内存使用量大于记录的历史最大内存使用量, 则更新最大内存使用量if (new > c->watermark)c->watermark = new;}return true;failed:// 如果超过硬限制则代表记账失败, 需要取消之前所有父控制组的页面计数器的记账for (c = counter; c != *fail; c = c->parent)page_counter_cancel(c, nr_pages);return false;
}

2.4 mem_cgroup_margin

static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
{unsigned long margin = 0;unsigned long count;unsigned long limit;// 计算memory计数器的内存使用量count = page_counter_read(&memcg->memory);// 计算memory计数器的内存使用硬限制limit = READ_ONCE(memcg->memory.limit);// 计算剩余可用的内存使用量if (count < limit)margin = limit - count;// 如果支持内存+交换分区记账if (do_memsw_account()) {// 计算memsw计数器的内存使用量count = page_counter_read(&memcg->memsw);// 计算memsw计数器的内存使用硬限制limit = READ_ONCE(memcg->memsw.limit);// 计算剩余可用的内存+交换分区使用量, 并取与剩余可用内存使用量之间较小值作为总剩余可用内存if (count <= limit)margin = min(margin, limit - count);elsemargin = 0;}return margin;
}

2.5 drain_all_stock

/** Drains all per-CPU charge caches for given root_memcg resp. subtree* of the hierarchy under it.*/
// 将每处理器记账缓存预留的页面返回给内存控制组
static void drain_all_stock(struct mem_cgroup *root_memcg)
{int cpu, curcpu;/* If someone's already draining, avoid adding running more workers. */if (!mutex_trylock(&percpu_charge_mutex))return;/** Notify other cpus that system-wide "drain" is running* We do not care about races with the cpu hotplug because cpu down* as well as workers from this path always operate on the local* per-cpu data. CPU up doesn't touch memcg_stock at all.*/// 获取当前cpucurcpu = get_cpu();// 遍历每个cpufor_each_online_cpu(cpu) {// 取出每个每处理器记账缓存struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);struct mem_cgroup *memcg;// 取出每处理器记账缓存保存的内存控制组memcg = stock->cached;// 如果保存的内存控制组为空, 或者记账缓存预留的页数等于0, 或者控制组引用计数为0, 则跳过此次循环if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))continue;// 判断当前内存控制组是否是根控制组的后代if (!mem_cgroup_is_descendant(memcg, root_memcg)) {css_put(&memcg->css);continue;}// 给memcg_stock_pcp设置FLUSHING_CACHED_CHARGE标志位if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {// 如果是当前cpu, 则将其对应的记账缓存预留页面返回给内存控制组[见2.5.1节]if (cpu == curcpu)drain_local_stock(&stock->work);elseschedule_work_on(cpu, &stock->work);}css_put(&memcg->css);}put_cpu();mutex_unlock(&percpu_charge_mutex);
}

2.5.1 drain_local_stock

static void drain_local_stock(struct work_struct *dummy)
{struct memcg_stock_pcp *stock;unsigned long flags;/** The only protection from memory hotplug vs. drain_stock races is* that we always operate on local CPU stock here with IRQ disabled*/local_irq_save(flags);// 获取当前cpu每处理器缓存stock = this_cpu_ptr(&memcg_stock);// 见2.5.2节drain_stock(stock);// 清除FLUSHING_CACHED_CHARGE标志位clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);local_irq_restore(flags);
}

2.5.2 drain_stock

/** Returns stocks cached in percpu and reset cached information.*/
static void drain_stock(struct memcg_stock_pcp *stock)
{// 当前记账缓存保存的内存控制组struct mem_cgroup *old = stock->cached;// 记账缓存预留的页数大于0if (stock->nr_pages) {// 将预留页返回给控制组后, 控制组的内存使用量也相应的较少这么多page_counter_uncharge(&old->memory, stock->nr_pages);// 如果支持内存+交换分区记账: 控制组的内存+交换分区使用量也减少这么多if (do_memsw_account())page_counter_uncharge(&old->memsw, stock->nr_pages);css_put_many(&old->css, stock->nr_pages);// 记账缓存预留页重置为0stock->nr_pages = 0;}// 记账缓存保留的内存控制组重置为空stock->cached = NULL;
}

2.6 refill_stock

static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
{struct memcg_stock_pcp *stock;unsigned long flags;local_irq_save(flags);// 取出当前cpu的记账缓存stock = this_cpu_ptr(&memcg_stock);// 当前cpu记账缓存保存的内存控制组与目标控制组不是同一个if (stock->cached != memcg) { /* reset if necessary */// 将记账缓存预留的页面返还给内存控制组drain_stock(stock);// 将记账缓存保存的内存控制组设置为当前控制组stock->cached = memcg;}// 将多余的页数预留给记账缓存stock->nr_pages += nr_pages;// 如果总的预留的页数已经超过32, 则需要归还给内存控制组if (stock->nr_pages > CHARGE_BATCH)drain_stock(stock);local_irq_restore(flags);
}

3. mem_cgroup_commit_charge

// 提交记账
// page: 需要记账的page
// memcg: 记账的内存控制组
// lrucare: 记账的page是否在lru链表
// compound: 是否是复合页
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,bool lrucare, bool compound)
{unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;VM_BUG_ON_PAGE(!page->mapping, page);VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);if (mem_cgroup_disabled())return;/** Swap faults will attempt to charge the same page multiple* times.  But reuse_swap_page() might have removed the page* from swapcache already, so we can't check PageSwapCache().*/if (!memcg)return;// 主要工作由commit_charge完成[见3.1节]commit_charge(page, memcg, lrucare);local_irq_disable();// 统计内存控制组记账事件[见3.2节]mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);memcg_check_events(memcg, page);local_irq_enable();if (do_memsw_account() && PageSwapCache(page)) {swp_entry_t entry = { .val = page_private(page) };/** The swap entry might not get freed for a long time,* let's not wait for it.  The page already received a* memory+swap charge, drop the swap entry duplicate.*/mem_cgroup_uncharge_swap(entry, nr_pages);}
}

3.1 commit_charge

static void commit_charge(struct page *page, struct mem_cgroup *memcg,bool lrucare)
{int isolated;VM_BUG_ON_PAGE(page->mem_cgroup, page);/** In some cases, SwapCache and FUSE(splice_buf->radixtree), the page* may already be on some other mem_cgroup's LRU.  Take care of it.*/// page已经在其他内存控制组私有的lru链表上, 则把page从lru链表中删除[见3.1.1节]if (lrucare)lock_page_lru(page, &isolated);/** Nobody should be changing or seriously looking at* page->mem_cgroup at this point:** - the page is uncharged** - the page is off-LRU** - an anonymous fault has exclusive page access, except for*   a locked page table** - a page cache insertion, a swapin fault, or a migration*   have the page locked*/// 指定page属于该内存控制组page->mem_cgroup = memcg;// 如果以前页在lru链表中, 则把页添加到记账的内存控制组私有的lru链表中[见3.1.2节]if (lrucare)unlock_page_lru(page, isolated);
}

3.1.1 lock_page_lru

static void lock_page_lru(struct page *page, int *isolated)
{struct zone *zone = page_zone(page);spin_lock_irq(zone_lru_lock(zone));// page已经在lru链表if (PageLRU(page)) {struct lruvec *lruvec;// 返回page所属的内存控制组的私有lruveclruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);// 清除page的lru标志位ClearPageLRU(page);// 并将其从私有的lruvec中移除del_page_from_lru_list(page, lruvec, page_lru(page));*isolated = 1;} else*isolated = 0;
}

3.1.2 unlock_page_lru

static void unlock_page_lru(struct page *page, int isolated)
{struct zone *zone = page_zone(page);if (isolated) {struct lruvec *lruvec;// 返回page所属的内存控制组的私有lruveclruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);VM_BUG_ON_PAGE(PageLRU(page), page);// 设置page的lru标志位SetPageLRU(page);// 将page添加到私有的lruvecadd_page_to_lru_list(page, lruvec, page_lru(page));}spin_unlock_irq(zone_lru_lock(zone));
}

3.2 mem_cgroup_charge_statistics

static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,struct page *page,bool compound, int nr_pages)
{/** Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is* counted as CACHE even if it's on ANON LRU.*/// 匿名页数量增加if (PageAnon(page))__this_cpu_add(memcg->stat->count[MEMCG_RSS], nr_pages);else {// 文件缓存页数量增加__this_cpu_add(memcg->stat->count[MEMCG_CACHE], nr_pages);// 共享页数量增加if (PageSwapBacked(page))__this_cpu_add(memcg->stat->count[NR_SHMEM], nr_pages);}if (compound) {VM_BUG_ON_PAGE(!PageTransHuge(page), page);__this_cpu_add(memcg->stat->count[MEMCG_RSS_HUGE], nr_pages);}/* pagein of a big page is an event. So, ignore page size */// 添加进控制组的事件增加if (nr_pages > 0)__this_cpu_inc(memcg->stat->events[PGPGIN]);else {// 移除控制组的事件增加__this_cpu_inc(memcg->stat->events[PGPGOUT]);nr_pages = -nr_pages; /* for event */}__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
}

4. mem_cgroup_cancel_charge

// 如果尝试记账失败, 则需要取消记账
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,bool compound)
{unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;if (mem_cgroup_disabled())return;/** Swap faults will attempt to charge the same page multiple* times.  But reuse_swap_page() might have removed the page* from swapcache already, so we can't check PageSwapCache().*/if (!memcg)return;// 主要工作由cancel_charge完成[见4.1节]cancel_charge(memcg, nr_pages);
}

4.1 cancel_charge

static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
{// 根控制组对内存使用量没有限制, 不需要记账if (mem_cgroup_is_root(memcg))return;// 将内存控制组和所有开启分层记账的祖先的内存使用量减去记账的页数page_counter_uncharge(&memcg->memory, nr_pages);// 如果开启了内存+交换分区记账// 将内存控制组和所有开启分层记账的祖先的内存+交换分区使用量减去记账的页数if (do_memsw_account())page_counter_uncharge(&memcg->memsw, nr_pages);// 内存控制组的引用计数减去记账的页数css_put_many(&memcg->css, nr_pages);
}

http://chatgpt.dhexx.cn/article/bR92435a.shtml

相关文章

DDR控制器

SCL&#xff1a;Self-Calibration Logic&#xff0c;通过寄存器编程方式实现DDR物理层信号校准的逻辑&#xff0c;这部分逻辑全部由硬件实现&#xff0c;软件需要在物理层自动校准之前对寄存器进行初始化。 SDRAM接口宽度在保持相同速率的前提下&#xff0c;可以采用全宽、半宽…

XLINX系列之Zynq-7000系列DDR内存控制器详解

1DDR内存控制器介绍 DDR内存控制器支持DDR2&#xff0c;DDR3&#xff0c;DDR3L和LPDDR2设备&#xff0c;包括三个主要块&#xff1a;AXI存储器端口接口&#xff08;DDRI&#xff09;&#xff0c;带有交易调度器&#xff08;DDRC&#xff09;的核心控制器和具有数字PHY&#xf…

3. 内存控制器与SDRAM

内存控制器&#xff08;内存接口设备&#xff09; 地址处于不同的范围&#xff0c;会发出不同的片选引脚&#xff0c;换句话说&#xff0c;SOC外接的不同内存芯片&#xff0c;会有不同的地址范围。 CPU统一编址包括GPIO&#xff0c;各种协议类接口控制器&#xff08;UART&…

存储控制器

存储控制器是按照一定的时序规则对存储器的访问进行必要控制的设备&#xff0c;包括地址信号、数据信号以及各种命令信号的控制&#xff0c;使主设备(访问存储器的设备)能够根据自己的要求使用存储器上的存储资源。 存储控制器的作用主要就是进行接口的转换&#xff0c;将主设…

内存控制器

1.内存控制器&#xff08;Memory Controller&#xff09; 内存控制器&#xff08;Memory Controller&#xff09;是计算机系统内部控制内存并且通过内存控制器使内存与CPU之间交换数据的重要组成部分。内存控制器决定了计算机系统所能使用的最大内存容量、内存BANK数、内存类型…

Java并发包中常用类小结(一)

Java并发包中常用类小结(一) 从JDK1.5以后&#xff0c;Java为我们引入了一个并发包&#xff0c;用于解决实际开发中经常用到的并发问题&#xff0c;那我们今天就来简单看一下相关的一些常见类的使用情况。 1、ConcurrentHashMap ConcurrentHashMap其实就是线程安全版本的has…

java并发包源码分析

java并发包之AbstractQueuedSynchronizer源码分析 分析并发包首先要了解AbstractQueuedSynchronizer&#xff08;AQS&#xff09;&#xff0c;因为AQS是并发包的基础工具类。本文从ReentrantLock的公平锁出发&#xff0c;分析AbstractQueuedSynchronizer的工作过程。 lock与u…

【Java进阶】Java并发包提供了哪些并发工具类?

通过前面的学习&#xff0c;我们一起回顾了线程、锁等各种并发编程的基本元素&#xff0c;也逐步涉及了 Java 并发包中的部分内容&#xff0c;相信经过前面的热身&#xff0c;我们能够更快地理解 Java 并发包。 今天我要问你的问题是&#xff0c;Java 并发包提供了哪些并发工具…

java---JUC并发包详解

目录 前言 一、atomic包 AtomicInteger类 AtomicReference类 AtomicStampedReference类 二、locks包 接口 Condition Lock ReadWriteLock 实现类 ReentrantLock类 ReentrantReadWriteLock类 三、CountDownLatch 四、Semaphore(信号量) 总结 前言 JUC是java.u…

Java多线程并发编程--Java并发包(JUC)

Java多线程并发–Java并发包&#xff08;JUC&#xff09; 前言 前一篇文章中&#xff0c;笔者已经介绍了Java多线程的一些基础知识&#xff0c;但是想要成为一名中高级Java程序员还必须懂得Java并发包&#xff08;JUC&#xff09;的知识点&#xff0c;而且JUC现在也是面试中必…

接口测试--apipost如何自定义header中的content-type

使用apipost进行接口测试的时候&#xff0c;有时候会用到一些自定义或者不常见的content-type格式&#xff0c;这个时候就要手动在header头部自定义content-type。 这里我们自定义一个content-type&#xff0c;格式为application/octet-stream 然后body选择的为form-data&…

Type-c引脚定义

Type-c口是什么口&#xff0c;有什么作用&#xff1f; Type-c口在大家的视野中或许比较陌生&#xff0c;但是生活中处处离不开Type-c口的存在。手机&#xff0c;电脑&#xff0c;音箱&#xff0c;小家电&#xff0c;无人机…等等&#xff0c;都存在Type-c接口。 Type-c只是一种…

TYPE-C12PIN接口电路

16PIN封装只有12个PIN脚&#xff0c;所有16/12PIN其实是一种规格 特点&#xff0c;正反插 VBS-VCC A5,B5接电阻拉地 A7,B7串联接电阻成为DN A6,B6串联接电阻成为DP GND拉地 外壳拉地 USB接口介绍

TypeScript-interface接口篇

简介 接口的作用&#xff1a;在面向对象的编程中&#xff0c;接口是一种规范的定义&#xff0c;它定义了行为和动作的规范&#xff0c;在程序设计里面&#xff0c;接口起到一种限制和规范的作用。接口定义了某一批类所需要遵守的规范&#xff0c;接口不关心这些类的内部状态数…

TypeScript(四)接口

目录 前言 定义 用法 基本用法 约定规则 属性控制 任意属性 可选属性 只读属性 定义函数 冒号定义 箭头定义 接口类型 函数接口 索引接口 继承接口 类接口 总结 前言 本文收录于TypeScript知识总结系列文章&#xff0c;欢迎指正&#xff01; 在介绍TS对象…

Type-C接口简单介绍-面向单片机应用

Type-C接口简单介绍-面向单片机应用 1、绪论 用单片机做一些东西时&#xff0c;Type-C接口逐渐替代了MicroUSB接口。但不像MicroUSB那样只有5V、GND、D、D-、ID五个接口&#xff0c;Type-C接口有24个引脚&#xff0c;比较复杂。大多时候我们用TypeC也用不到USB3.0协议&#x…

CTP_将C++封装为Python可调用接口

目录 写在前面&#xff1a; 前置准备&#xff1a; step 1 与上期所原始代码对比分析源码 td源码 1 配置属性-》常规-》配置类型 要为 “动态库(.dll)” 2 VC目录 -》包含目录 3 VC目录 -》 库目录 4 链接器-》常规-》附加库目录 5 链接器-》输入-》附加依赖项 vnctp.h 的功…