4.19内核SLUB内存分配器

article/2025/11/10 0:21:58

初始化

内核的大部分管理数据结构都是通过kmalloc分配内存的,那么slab本身结构的内存管理就出现了一个鸡与蛋的问题,slab数据结构所需内存远小于一整页的内存块,这些最适合kmalloc分配,而kmalloc只有在slab初始化完之后才能使用。

start_kernel->mm_init->kmem_cache_init

void __init kmem_cache_init(void)
{static __initdata struct kmem_cache boot_kmem_cache,boot_kmem_cache_node;if (debug_guardpage_minorder())slub_max_order = 0;kmem_cache_node = &boot_kmem_cache_node;kmem_cache = &boot_kmem_cache;create_boot_cache(kmem_cache_node, "kmem_cache_node",sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);register_hotmemory_notifier(&slab_memory_callback_nb);/* Able to allocate the per node structures */slab_state = PARTIAL;create_boot_cache(kmem_cache, "kmem_cache",offsetof(struct kmem_cache, node) +nr_node_ids * sizeof(struct kmem_cache_node *),SLAB_HWCACHE_ALIGN, 0, 0);kmem_cache = bootstrap(&boot_kmem_cache);kmem_cache_node = bootstrap(&boot_kmem_cache_node);/* Now we can use the kmem_cache to allocate kmalloc slabs */setup_kmalloc_cache_index_table();create_kmalloc_caches(0);/* Setup random freelists for each cache */init_freelist_randomization();cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,slub_cpu_dead);pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",cache_line_size(),slub_min_order, slub_max_order, slub_min_objects,nr_cpu_ids, nr_node_ids);
}

create_kmalloc_caches中循环通过new_kmalloc_cache创建kmem_cache。 

struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __ro_after_init;
EXPORT_SYMBOL(kmalloc_caches);#ifdef CONFIG_ZONE_DMA
struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1] __ro_after_init;
EXPORT_SYMBOL(kmalloc_dma_caches);
#endifvoid __init create_kmalloc_caches(slab_flags_t flags)
{int i, type;for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {if (!kmalloc_caches[type][i])new_kmalloc_cache(i, type, flags); //创建kmem_cache数组。/** Caches that are not of the two-to-the-power-of size.* These have to be created immediately after the* earlier power of two caches*/if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&!kmalloc_caches[type][1])new_kmalloc_cache(1, type, flags);if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&!kmalloc_caches[type][2])new_kmalloc_cache(2, type, flags);}}/* Kmalloc array is now usable */slab_state = UP;#ifdef CONFIG_ZONE_DMAfor (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];if (s) {unsigned int size = kmalloc_size(i);const char *n = kmalloc_cache_name("dma-kmalloc", size);BUG_ON(!n);kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(n, size, SLAB_CACHE_DMA | flags, 0, 0);}}
#endif
}

 new_kmalloc_cache创建内核预先准备的固定大小的内存块。 

static void __init new_kmalloc_cache(int idx, slab_flags_t flags)
{kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,kmalloc_info[idx].size, flags, 0,kmalloc_info[idx].size);
}/** kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.* kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is* kmalloc-67108864.*/
const struct kmalloc_info_struct kmalloc_info[] __initconst = {{NULL,                      0},		{"kmalloc-96",             96},{"kmalloc-192",           192},		{"kmalloc-8",               8},{"kmalloc-16",             16},		{"kmalloc-32",             32},{"kmalloc-64",             64},		{"kmalloc-128",           128},{"kmalloc-256",           256},		{"kmalloc-512",           512},{"kmalloc-1024",         1024},		{"kmalloc-2048",         2048},{"kmalloc-4096",         4096},		{"kmalloc-8192",         8192},{"kmalloc-16384",       16384},		{"kmalloc-32768",       32768},{"kmalloc-65536",       65536},		{"kmalloc-131072",     131072},{"kmalloc-262144",     262144},		{"kmalloc-524288",     524288},{"kmalloc-1048576",   1048576},		{"kmalloc-2097152",   2097152},{"kmalloc-4194304",   4194304},		{"kmalloc-8388608",   8388608},{"kmalloc-16777216", 16777216},		{"kmalloc-33554432", 33554432},{"kmalloc-67108864", 67108864}
};

kmem_cache_create

内核通过kmem_cache_create()接口来创建一个slab缓存。

一共有5个参数。

name: 要创建的slab对象的名称
size: slab对象的大小
align: slab对象的对齐大小
flags: slab内存分配器的掩码和标志位, 比如常用的SLAB_HWCACHE_ALIGN标志位,创建的kmem_cache管理的object按照硬件cache 对齐
ctor: 对象的构造函数

struct kmem_cache *
kmem_cache_create(const char *name, unsigned int size, unsigned int align,slab_flags_t flags, void (*ctor)(void *))
{return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,ctor);
}/*内容通过精简*/
struct kmem_cache *
kmem_cache_create_usercopy(const char *name,unsigned int size, unsigned int align,slab_flags_t flags,unsigned int useroffset, unsigned int usersize,void (*ctor)(void *))
{struct kmem_cache *s = NULL;const char *cache_name;int err;if (!usersize)s = __kmem_cache_alias(name, size, align, flags, ctor);if (s)goto out_unlock;cache_name = kstrdup_const(name, GFP_KERNEL);if (!cache_name) {err = -ENOMEM;goto out_unlock;}
/*通过create_cache函数 创建*/s = create_cache(cache_name, size,calculate_alignment(flags, align, size),flags, useroffset, usersize, ctor, NULL, NULL);if (IS_ERR(s)) {err = PTR_ERR(s);kfree_const(cache_name);}out_unlock:return s;
}


create_cache函数

static struct kmem_cache *create_cache(const char *name,unsigned int object_size, unsigned int align,slab_flags_t flags, unsigned int useroffset,unsigned int usersize, void (*ctor)(void *),struct mem_cgroup *memcg, struct kmem_cache *root_cache)
{struct kmem_cache *s;int err;if (WARN_ON(useroffset + usersize > object_size))useroffset = usersize = 0;err = -ENOMEM;s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); //kmem_cache是 kmem 初始化创建的 kmem_cache对象if (!s)goto out;s->name = name;s->size = s->object_size = object_size;s->align = align;s->ctor = ctor;s->useroffset = useroffset;s->usersize = usersize;err = init_memcg_params(s, memcg, root_cache);if (err)goto out_free_cache;err = __kmem_cache_create(s, flags); //进一步初始化if (err)goto out_free_cache;s->refcount = 1;list_add(&s->list, &slab_caches);  //加入到slab_caches链表中memcg_link_cache(s);
out:if (err)return ERR_PTR(err);return s;out_free_cache:destroy_memcg_params(s);kmem_cache_free(kmem_cache, s);goto out;
}

__kmem_cache_create函数 

int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
{int err;err = kmem_cache_open(s, flags);if (err)return err;/* Mutex is not taken during early boot */if (slab_state <= UP)return 0;memcg_propagate_slab_attrs(s);err = sysfs_slab_add(s);if (err)__kmem_cache_release(s);return err;
}//内容精简
static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
{
....................s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
#ifdef CONFIG_SLAB_FREELIST_HARDENEDs->random = get_random_long();
#endifif (!calculate_sizes(s, -1))goto error;if (!init_kmem_cache_nodes(s))   //创建kmem_cache 的struct kmem_cache_node *node[MAX_NUMNODES];goto error;if (alloc_kmem_cache_cpus(s))   //创建kmem_cache的struct kmem_cache_cpu __percpu *cpu_slab;...........
}



 

kmalloc函数

从kmalloc函数看,内核如何分配小块内存:

KMALLOC(SLUB):
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{if (__builtin_constant_p(size)) {
#ifndef CONFIG_SLOBunsigned int index;
#endifif (size > KMALLOC_MAX_CACHE_SIZE)return kmalloc_large(size, flags);
#ifndef CONFIG_SLOBindex = kmalloc_index(size);if (!index)return ZERO_SIZE_PTR;return kmem_cache_alloc_trace(kmalloc_caches[kmalloc_type(flags)][index],flags, size);
#endif}return __kmalloc(size, flags);
}void *__kmalloc(size_t size, gfp_t flags)
{struct kmem_cache *s;void *ret;if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))return kmalloc_large(size, flags);s = kmalloc_slab(size, flags);   // 1 获取kmem_cacheif (unlikely(ZERO_OR_NULL_PTR(s)))return s;ret = slab_alloc(s, flags, _RET_IP_); //2 ,分配内存trace_kmalloc(_RET_IP_, ret, size, s->size, flags);ret = kasan_kmalloc(s, ret, size, flags);return ret;
}static __always_inline void *slab_alloc(struct kmem_cache *s,gfp_t gfpflags, unsigned long addr)
{return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
}

1:kmalloc_slab获取kmem_cahce

struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
{unsigned int index;if (unlikely(size > KMALLOC_MAX_SIZE)) {WARN_ON_ONCE(!(flags & __GFP_NOWARN));return NULL;}if (size <= 192) {if (!size)return ZERO_SIZE_PTR;index = size_index[size_index_elem(size)];} elseindex = fls(size - 1);#ifdef CONFIG_ZONE_DMAif (unlikely((flags & GFP_DMA)))return kmalloc_dma_caches[index];#endifreturn kmalloc_caches[index];   //从已经存在的kmalloc_caches中获取 。
}

2 slab_alloc函数

static __always_inline void *slab_alloc(struct kmem_cache *s,gfp_t gfpflags, unsigned long addr)
{return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
}

kmem_cache_alloc 

相比于kmalloc。kmem_cache_alloc创建内存是需要携带kmem_cache * 指针,其创建的内存块大小是固定的。因为kmalloc所利用的内存块的大小是事先定义好的,所以很多情况下会产生内部碎片,浪费空间,而kmem_cache_alloc由于内存大小也是量身定做的缘故则不会。

void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{void *ret = slab_alloc(s, gfpflags, _RET_IP_);  // 依然是slab_alloc函数分配内存trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,s->size, gfpflags);return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc);

static __always_inline void *slab_alloc(struct kmem_cache *s,gfp_t gfpflags, unsigned long addr)
{return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
}static __always_inline void *slab_alloc_node(struct kmem_cache *s,gfp_t gfpflags, int node, unsigned long addr)
{void *object;struct kmem_cache_cpu *c;struct page *page;unsigned long tid;s = slab_pre_alloc_hook(s, gfpflags);if (!s)return NULL;
redo:/** Must read kmem_cache cpu data via this cpu ptr. Preemption is* enabled. We may switch back and forth between cpus while* reading from one cpu area. That does not matter as long* as we end up on the original cpu again when doing the cmpxchg.** We should guarantee that tid and kmem_cache are retrieved on* the same cpu. It could be different if CONFIG_PREEMPT so we need* to check if it is matched or not.*/do {tid = this_cpu_read(s->cpu_slab->tid);c = raw_cpu_ptr(s->cpu_slab);} while (IS_ENABLED(CONFIG_PREEMPT) &&unlikely(tid != READ_ONCE(c->tid)));/** Irqless object alloc/free algorithm used here depends on sequence* of fetching cpu_slab's data. tid should be fetched before anything* on c to guarantee that object and page associated with previous tid* won't be used with current tid. If we fetch tid first, object and* page could be one associated with next tid and our alloc/free* request will be failed. In this case, we will retry. So, no problem.*/barrier();/** The transaction ids are globally unique per cpu and per operation on* a per cpu queue. Thus they can be guarantee that the cmpxchg_double* occurs on the right processor and that there was no operation on the* linked list in between.*/object = c->freelist;page = c->page;if (unlikely(!object || !node_match(page, node))) { //当前per-cpu 没有空闲或者node节点不匹配  从新创建一个缓冲区object = __slab_alloc(s, gfpflags, node, addr, c);stat(s, ALLOC_SLOWPATH);} else {void *next_object = get_freepointer_safe(s, object);/** The cmpxchg will only match if there was no additional* operation and if we are on the right processor.** The cmpxchg does the following atomically (without lock* semantics!)* 1. Relocate first pointer to the current per cpu area.* 2. Verify that tid and freelist have not been changed* 3. If they were not changed replace tid and freelist** Since this is without lock semantics the protection is only* against code executing on this cpu *not* from access by* other cpus.*/
/* 下面交换cpu_slab->freelist 与next_object的值,使得freelist指向下次申请的object地址*/if (unlikely(!this_cpu_cmpxchg_double(s->cpu_slab->freelist, s->cpu_slab->tid,object, tid,next_object, next_tid(tid)))) {note_cmpxchg_failure("slab_alloc", s, tid);goto redo;}prefetch_freepointer(s, next_object);stat(s, ALLOC_FASTPATH);}if (unlikely(gfpflags & __GFP_ZERO) && object)memset(object, 0, s->object_size);slab_post_alloc_hook(s, gfpflags, 1, &object);return object;
}

slab_alloc_node函数

slab_alloc_node
    __slab_alloc
        ___slab_alloc
            new_slab_objects
                new_slab
                    allocate_slab
                        alloc_slab_page
                        shuffle_freelist  //会循环填充page中的object中的地址内容。
                c=raw_cpu_ptr
                c->page = page

          c->freelist = get_freepoint
            
    get_freepointer_safe  //获取下一个object的地址

___slab_alloc 精简 

static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,unsigned long addr, struct kmem_cache_cpu *c)
{void *freelist;struct page *page;page = c->page;if (!page)goto new_slab;
redo:.....load_freelist:/** freelist is pointing to the list of objects to be used.* page is pointing to the page from which the objects are obtained.* That page must be frozen for per cpu allocations to work.*/VM_BUG_ON(!c->page->frozen);c->freelist = get_freepointer(s, freelist);c->tid = next_tid(c->tid);return freelist;new_slab:freelist = new_slab_objects(s, gfpflags, node, &c);page = c->page;if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))goto load_freelist; //跳转,设置c->freelist........}

 shuffle_freelist函数

static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
{void *start;void *cur;void *next;unsigned long idx, pos, page_limit, freelist_count;if (page->objects < 2 || !s->random_seq)return false;freelist_count = oo_objects(s->oo);pos = get_random_int() % freelist_count;page_limit = page->objects * s->size;start = fixup_red_left(s, page_address(page));/* First entry is used as the base of the freelist */cur = next_freelist_entry(s, page, &pos, start, page_limit,freelist_count);page->freelist = cur;for (idx = 1; idx < page->objects; idx++) {setup_object(s, page, cur);next = next_freelist_entry(s, page, &pos, start, page_limit,freelist_count);set_freepointer(s, cur, next);   //设置每个object的地址,根据偏移量。cur = next;}setup_object(s, page, cur);set_freepointer(s, cur, NULL);return true;
}//往当前object 偏移 offset 的位置中,填充下一个object的地址fp 。
static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
{unsigned long freeptr_addr = (unsigned long)object + s->offset;#ifdef CONFIG_SLAB_FREELIST_HARDENEDBUG_ON(object == fp); /* naive detection of double free or corruption */
#endif*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
}

slab缓存区的object布局: 

static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
{unsigned long freepointer_addr;void *p;if (!debug_pagealloc_enabled())return get_freepointer(s, object);freepointer_addr = (unsigned long)object + s->offset;probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p)); //从地址freepointer_addr读取8字节的内容,到p。 实际上该位置存储这下一个object的地址。return freelist_ptr(s, p, freepointer_addr);
}static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,unsigned long ptr_addr)
{
#ifdef CONFIG_SLAB_FREELIST_HARDENEDreturn (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
#elsereturn ptr;
#endif
}


http://chatgpt.dhexx.cn/article/jQpwLwge.shtml

相关文章

SLUB内存管理之slub初始化

在讲slub内存管理涉及的四个函数之前&#xff0c;先从slub内存分配算法的初始化开始。系统启动时&#xff0c;会进行slub内存分配算法的初始化&#xff0c;函数流程是&#xff1a;start_kernel() -> mm_init()->kmem_cache_init()。在start_kernel()函数中的setup_arch()…

SLUB DEBUG原理

1. 前言 在工作中&#xff0c;经常会遇到由于越界导致的各种奇怪的问题。为什么越界访问导致的问题很奇怪呢&#xff1f;在工作差不多半年的时间里我就遇到了很多越界访问导致的问题&#xff08;不得不吐槽下IC厂商提供的driver&#xff0c;总是隐藏着bug&#xff09;。比如说…

slub allocator工作原理

前言 在Linux中&#xff0c;伙伴系统&#xff08;buddy system&#xff09;是以页&#xff08;1page等于4K&#xff09;为单位管理和分配内存。对于小内存的分配&#xff0c;如果还是使用伙伴系统进行内存分配&#xff0c;就会导致严重浪费内存。此时&#xff0c;slab分配器就应…

SLUB缓存管理

注&#xff1a;本文分析基于linux-4.18.0-193.14.2.el8_2内核版本&#xff0c;即CentOS 8.2 1、关于SLUB 目前大多数系统都不使用slab作为缓存的管理模式&#xff0c;转而使用slub&#xff0c;比如CentOS 7和8默认都是使用slub管理器。slub是基于slab的进一步优化改进&#x…

slub debug(linux4.16.1)

在命令行选项中添加slub_debugUFPZ&#xff0c;以使能相关检测功能&#xff0c;其含义如下&#xff1a; &#xff08;1&#xff09;U&#xff1a;跟踪该slab内存的相关属性&#xff0c;如其创建时所属的cpu、进程、时间 &#xff08;2&#xff09;F&#xff1a;开启sanity检查功…

linux内核内存管理slub

一、概述 linux内存管理核心是伙伴系统&#xff0c;slab&#xff0c;slub&#xff0c;slob是基于伙伴系统之上提供api&#xff0c;用于内核内存分配释放管理&#xff0c;适用于小内存&#xff08;小于&#xff11;页&#xff09;分配与释放&#xff0c;当然大于&#xff11;页…

内存管理 slub算法

内核管理页面使用了2个算法&#xff1a;伙伴算法和slub算法&#xff0c;伙伴算法以页为单位管理内存&#xff0c;但在大多数情况下&#xff0c;程序需要的并不是一整页&#xff0c;而是几个、几十个字节的小内存。于是需要另外一套系统来完成对小内存的管理&#xff0c;这就是s…

linux slub分配器,slub分配器

原标题&#xff1a;slub分配器 概述&#xff1a; Linux的物理内存管理采用了以页为单位的buddy system(伙伴系统)&#xff0c;但是很多情况下&#xff0c;内核仅仅需要一个较小的对象空间&#xff0c;而且这些小块的空间对于不同对象又是变化的、不可预测的&#xff0c;所以需要…

Linux内存管理(八): slub分配器和kmalloc

kernel: 5.10 Arch: aarch64 上文 介绍过&#xff0c; 伙伴系统在分配内存时是以物理页为单位的&#xff0c;但在很多场景下&#xff0c;内存需要分配的大小是以字节为单位的&#xff0c;达不到一个物理页的大小。如果继续使用伙伴系统进行内存分配&#xff0c; 那么就会出现严…

Linux 踩内存 slub,Linux SLUB 内存分配器分析

本文简介 本文主要介绍了Linux SLUB分配的产生原因、设计思路及其代码分析。适合于对Linux内核&#xff0c;特别是对Linux内存分配器感兴趣的读者。 1.为何需要SLUB&#xff1f; Linux SLUB内存分配器合入Linux主分支已经整整10年了&#xff01;并且是Linux目前默认的内存分配器…

SLUB

 内核管理页面使用了2个算法:伙伴算法和slub算法,伙伴算法以页为单位管理内存,但在大多数情况下,程序需要的并不是一整页,而是几个、几十个字节的小内存。于是需要另外一套系统来完成对小内存的管理,这就是slub系统。slub系统运行在伙伴系统之上,为内核提供小内存管…

linux内存源码分析 - SLUB分配器概述

本文为原创&#xff0c;转载请注明&#xff1a;http://www.cnblogs.com/tolimit/ SLUB和SLAB的区别 首先为什么要说slub分配器&#xff0c;内核里小内存分配一共有三种&#xff0c;SLAB/SLUB/SLOB&#xff0c;slub分配器是slab分配器的进化版&#xff0c;而slob是一种精简的小内…

Linux 内存管理(三)—— SLUB

目录 一、概述 二、SLUB 2.1 数据结构 2.2 初始化 2.2.1 静态建立过程 2.3 API 2.3.1 kmem_cache_create 2.3.2 kmem_cache_alloc 2.3.3 kmem_cache_free 2.3.4 kmalloc/kfree 三、参考 一、概述 伙伴系统最小的分配单位是页&#xff0c;这对于较小的内存需求会造…

SLUB内存管理的4个主要接口函数介绍(4)

slub内存管理的4个主要接口函数如下&#xff08;参考kernel-4.19&#xff09;&#xff1a; //slab缓存的创建 struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); //slab object的分配 void *k…

Linux内核:内存管理——SLUB分配器

SLUB和SLAB的区别 首先为什么要说slub分配器&#xff0c;内核里小内存分配一共有三种&#xff0c;SLAB/SLUB/SLOB&#xff0c;slub分配器是slab分配器的进化版&#xff0c;而slob是一种精简的小内存分配算法&#xff0c;主要用于嵌入式系统。慢慢的slab分配器或许会被slub取代&…

SLUB的引入及举例说明

我们都知道Buddy分配器是按照页的单位分配的&#xff08;Buddy系统分配器实现&#xff09;&#xff0c;如果我们需要分配几十个字节&#xff0c;几百个字节的时候&#xff0c;就需要用到SLAB分配器。 SLAB分配器专门是针对小内存分配而设计的&#xff0c;比如我们驱动中常见的…

电信光猫改桥接模式

如果只是改桥接 可以试试下面这两个地址&#xff1a;http://192.168.1.1/bridge_route.gchhttp://192.168.1.1:8080/bridge_route.gch 转载于:https://www.cnblogs.com/Devopser/p/11257535.html

电信光猫改桥接还在苦苦破解超级密码吗?

电信光猫路由改桥接&#xff0c;不同的地区有不通的方法。比较幸运的地区和终端&#xff0c;有通用的超级密码。但是不幸的地区&#xff0c;就需要通过破解这个超级密码。我就属于比较不幸的地区&#xff0c;遇到不幸的终端&#xff1a;天翼网关TEWA-708G。然后按照网上大神的破…

获取电信光猫TEWA-600超级管理密码,修改电信光猫为桥接模式

文章转载&#xff1a;玩转盒子 前些年各地运营商响应国家政策&#xff0c;光进铜退小区宽带由网线入户改成光缆入户&#xff0c;这样必须用光猫把光信号转换成电信号。我用的是中国电信的宽带&#xff0c;2017年我们小区也进行了网络改造&#xff0c;网线入户变成了光缆入户。…

友华PT921G光猫破解获取超级密码和更改桥接模式

获取超级密码 1.登陆光猫管理地址192.168.1.1 2.打开新的窗口输入&#xff1a;http://192.168.1.1/romfile.cfg ,就能下载到配置文件 3.用记事本打开romfile.cfg&#xff0c;点击编辑–>查找–>输入telecomadmin->点击查找下一个 4.查找到username“telecomadmin”,而…