re_design {}
Re helps its partners develop a story they can tell, tools they can use, and experiences that engage people in their world. The team consists of design thinkers and makers across the UK, Europe, Australia, the UAE, and North America.
- Responsivedesignmatters
- Navigationshouldflow
- Personaliseexperiences
- Scalablearchitecturewins
- Contentdrivesengagement
- Followdesignpatterns
- Stayplatformagnostic
- Createuserpersonas
- Intuitiveinterfaceswin
- Supportretinadisplays
- Enhancewithanimations
- Createreusablecomponents
- Followdesigntrends
- Qualityoverquantity
- Prototypebeforedev
- Secureyourwebsite
- Simplifynavigationflow
- Includehumanelements
- Mobilefirstapproach
- Regularupdatesmatter
- Engagingvisualsconvert
- SEOimprovesvisibility
- Testacrossdevices
- Optimiseimagesizes
- Respectusers’time
- Prioritisedatasecurity
- Adaptivedesignwins
- Userexperiencefirst
<Tag=”re_design”>
#include <linux/error-injection.h>\n#include <linux/debugfs.h>\n#include <linux/kallsyms.h>\n#include <linux/kprobes.h>\n#include <linux/module.h>\n#include <linux/mutex.h>\n#include <linux/list.h>\n#include <linux/slab.h>\n#include <asm/sections.h>\n\nstatic LIST_HEAD(error_injection_list);\nstatic DEFINE_MUTEX(ei_mutex);\nstruct ei_entry {\nstruct list_head list;\nunsigned long start_addr;\nunsigned long end_addr;\nint etype;\nvoid *priv;\n};\n\nbool within_error_injection_list(unsigned long addr)\n{\nstruct ei_entry *ent;\nbool ret = false;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nret = true;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\nreturn ret;\n}\n\nint get_injectable_error_type(unsigned long addr)\n{\nstruct ei_entry *ent;\nint ei_type = -EINVAL;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nei_type = ent->etype;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\n\nreturn ei_type;\n}\n\nstatic voidpopulate_error_injection_list(struct error_injection_entry*start,\nstruct error_injection_entry*end,\nvoid *priv)\n{\nstruct error_injection_entry *iter;\nstruct ei_entry *ent;\nunsigned long entry, offset = 0, size = 0;\n\nmutex_lock(&ei_mutex);\nfor (iter = start; iter < end; iter++) {\nentry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr);\n\nif (!kernel_text_address(entry) ||\n!kallsyms_lookup_size_offset(entry, &size, &offset)) {\npr_err("%p\n",\n(void *)entry);\ncontinue;\n}\n\nent = kmalloc(sizeof(*ent), GFP_KERNEL);\nif (!ent)\nbreak;\nent->start_addr = entry;\nent->end_addr = entry + size;\nent->etype = iter->etype;\nent->priv = priv;\nINIT_LIST_HEAD(&ent->list);\nlist_add_tail(&ent->list, &error_injection_list);\n}\nmutex_unlock(&ei_mutex);\n}\n\nextern struct error_injection_entry__start_error_injection_whitelist[];\nextern struct error_injection_entry__stop_error_injection_whitelist[];\n\nstatic void __init populate_kernel_ei_list(void)\n{\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*siz*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse <pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCKnblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize <pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCKnblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsizennblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_ #include <linux/error-injection.h>\n#include <linux/debugfs.h>\n#include <linux/kallsyms.h>\n#include <linux/kprobes.h>\n#include <linux/module.h>\n#include <linux/mutex.h>\n#include <linux/list.h>\n#include <linux/slab.h>\n#include <asm/sections.h>\n\nstatic LIST_HEAD(error_injection_list);\nstatic DEFINE_MUTEX(ei_mutex);\nstruct ei_entry {\nstruct list_head list;\nunsigned long start_addr;\nunsigned long end_addr;\nint etype;\nvoid *priv;\n};\n\nbool within_error_injection_list(unsigned long addr)\n{\nstruct ei_entry *ent;\nbool ret = false;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nret = true;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\nreturn ret;\n}\n\nint get_injectable_error_type(unsigned long addr)\n{\nstruct ei_entry *ent;\nint ei_type = -EINVAL;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nei_type = ent->etype;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\n\nreturn ei_type;\n}\n\nstatic voidpopulate_error_injection_list(struct error_injection_entry*start,\nstruct error_injection_entry*end,\nvoid *priv)\n{\nstruct error_injection_entry *iter;\nstruct ei_entry *ent;\nunsigned long entry, offset = 0, size = 0;\n\nmutex_lock(&ei_mutex);\nfor (iter = start; iter < end; iter++) {\nentry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr);\n\nif (!kernel_text_address(entry) ||\n!kallsyms_lookup_size_offset(entry, &size, &offset)) {\npr_err("%p\n",\n(void *)entry);\ncontinue;\n}\n\nent = kmalloc(sizeof(*ent), GFP_KERNEL);\nif (!ent)\nbreak;\nent->start_addr = entry;\nent->end_addr = entry + size;\nent->etype = iter->etype;\nent->priv = priv;\nINIT_LIST_HEAD(&ent->list);\nlist_add_tail(&ent->list, &error_injection_list);\n}\nmutex_unlock(&ei_mutex);\n}\n\nextern struct error_injection_entry__start_error_injection_whitelist[];\nextern struct error_injection_entry__stop_error_injection_whitelist[];\n\nstatic void __init populate_kernel_ei_list(void)\n{\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCKnblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsizennblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_ {\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n{\nif (!mod->num_ei_funcs)\nreturn;\n\npopulate_error_injection_list(mod->ei_funcs,\nmod->ei_funcs + mod->num_ei_funcs, mod);\n}\n\nstatic void module_unload_ei_list(struct module *mod)\n{\nstruct ei_entry *ent, *n;\n\nif (!mod->num_ei_funcs)\nreturn;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry_safe(ent, n, &error_injection_list, list) {\nif (ent->priv == mod){\nlist_del_init(&ent->list);\nkfree(ent);\n}\n}\nmutex_unlock(&ei_mutex);\n}\n\nstatic int ei_module_callback(struct notifier_block *nb,\nunsigned long val, void *data)\n{\nstruct module *mod = data;\n\nif (val == MODULE_STATE_COMING)\nmodule_load_ei_list(mod);\nelse if (val == MODULE_STATE_GOING)\nmodule_unload_ei_list(mod);\n\nreturn NOTIFY_DONE;\n}\n\nstatic struct notifier_block ei_module_nb = {\n.notifier_call = ei_module_callback,\n.priority = 0\n};\n\nstatic __init int module_ei_init(void)\n{\nreturn register_module_notifier(&ei_module_nb);\n}\n#else /* !CONFIG_MODULES */\n#define module_ei_init() (0)\n#endif\n\nstatic void *ei_seq_start(struct seq_file *m, loff_t *pos)\n{\nmutex_lock(&ei_mutex);\nreturn seq_list_start(&error_injection_list, *pos);\n}\n\nstatic void ei_seq_stop(struct seq_file *m, void *v)\n{\nmutex_unlock(&ei_mutex);\n}\n\nstatic void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)\n{\nreturn seq_list_next(v, &error_injection_list, pos);\n}\n\nstatic const char *error_type_string(int etype)\n{\nswitch (etype) {\ncase EI_ETYPE_NULL:\nreturn "NULL";\ncase EI_ETYPE_ERRNO:\nreturn "ERRNO";\ncase EI_ETYPE_ERRNO_NULL:\nreturn "ERRNO_NULL";\ncase EI_ETYPE_TRUE:\nreturn "TRUE";\ndefault:\nreturn "(unknown)";\n}\n}\n\nstatic int ei_seq_show(struct seq_file *m, void *v)\n{\nstruct ei_entry *ent = list_entry(v, struct ei_entry, list);\n\nseq_printf(m, "%ps\t%s\n", (void *)ent->start_addr,\nerror_type_string(ent->etype));\nreturn 0;\n}\n\nstatic const struct seq_operations ei_sops = {\n.start = ei_seq_start,\n.next = ei_seq_next,\n.stop = ei_seq_stop,\n.show = ei_seq_show,\n};\n\nDEFINE_SEQ_ATTRIBUTE(ei);\n\nstatic int __init ei_debugfs_init(void)\n{\nstruct dentry *dir, *file;\n\ndir = debugfs_create_dir("error_injection", NULL);\n\nfile = debugfs_create_file("list", 0444, dir, NULL, &ei_fops);\nif (!file) {\ndebugfs_remove(dir);\nreturn -ENOMEM;\n}\n\nreturn 0;\n}\n\nstatic int __init init_error_injection(void)\n{\npopulate_kernel_ei_list();\n\nif (!module_ei_init())\nei_debugfs_init();\n\nreturn 0;\n}\nlate_initcall(init_error_injection); nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),Gnnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocnnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocsmall_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_ #include <linux/error-injection.h>\n#include <linux/debugfs.h>\n#include <linux/kallsyms.h>\n#include <linux/kprobes.h>\n#include <linux/module.h>\n#include <linux/mutex.h>\n#include <linux/list.h>\n#include <linux/slab.h>\n#include <asm/sections.h>\n\nstatic LIST_HEAD(error_injection_list);\nstatic DEFINE_MUTEX(ei_mutex);\nstruct ei_entry {\nstruct list_head list;\nunsigned long start_addr;\nunsigned long end_addr;\nint etype;\nvoid *priv;\n};\n\nbool within_error_injection_list(unsigned long addr)\n{\nstruct ei_entry *ent;\nbool ret = false;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nret = true;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\nreturn ret;\n}\n\nint get_injectable_error_type(unsigned long addr)\n{\nstruct ei_entry *ent;\nint ei_type = -EINVAL;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nei_type = ent->etype;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\n\nreturn ei_type;\n}\n\nstatic voidpopulate_error_injection_list(struct error_injection_entry*start,\nstruct error_injection_entry*end,\nvoid *priv)\n{\nstruct error_injection_entry *iter;\nstruct ei_entry *ent;\nunsigned long entry, offset = 0, size = 0;\n\nmutex_lock(&ei_mutex);\nfor (iter = start; iter < end; iter++) {\nentry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr);\n\nif (!kernel_text_address(entry) ||\n!kallsyms_lookup_size_offset(entry, &size, &offset)) {\npr_err("%p\n",\n(void *)entry);\ncontinue;\n}\n\nent = kmalloc(sizeof(*ent), GFP_KERNEL);\nif (!ent)\nbreak;\nent->start_addr = entry;\nent->end_addr = entry + size;\nent->etype = iter->etype;\nent->priv = priv;\nINIT_LIST_HEAD(&ent->list);\nlist_add_tail(&ent->list, &error_injection_list);\n}\nmutex_unlock(&ei_mutex);\n}\n\nextern struct error_injection_entry__start_error_injection_whitelist[];\nextern struct error_injection_entry__stop_error_injection_whitelist[];\n\nstatic void __init populate_kernel_ei_list(void)\n{\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n {\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n{\nif (!mod->num_ei_funcs)\nreturn;\n\npopulate_error_injection_list(mod->ei_funcs,\nmod->ei_funcs + mod->num_ei_funcs, mod);\n}\n\nstatic void module_unload_ei_list(struct module *mod)\n{\nstruct ei_entry *ent, *n;\n\nif (!mod->num_ei_funcs)\nreturn;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry_safe(ent, n, &error_injection_list, list) {\nif (ent->priv == mod){\nlist_del_init(&ent->list);\nkfree(ent);\n}\n}\nmutex_unlock(&ei_mutex);\n}\n\nstatic int ei_module_callback(struct notifier_block *nb,\nunsigned long val, void *data)\n{\nstruct module *mod = data;\n\nif (val == MODULE_STATE_COMING)\nmodule_load_ei_list(mod);\nelse if (val == MODULE_STATE_GOING)\nmodule_unload_ei_list(mod);\n\nreturn NOTIFY_DONE;\n}\n\nstatic struct notifier_block ei_module_nb = {\n.notifier_call = ei_module_callback,\n.priority = 0\n};\n\nstatic __init int module_ei_init(void)\n{\nreturn register_module_notifier(&ei_module_nb);\n}\n#else /* !CONFIG_MODULES */\n#define module_ei_init() (0)\n#endif\n\nstatic void *ei_seq_start(struct seq_file *m, loff_t *pos)\n{\nmutex_lock(&ei_mutex);\nreturn seq_list_start(&error_injection_list, *pos);\n}\n\nstatic void ei_seq_stop(struct seq_file *m, void *v)\n{\nmutex_unlock(&ei_mutex);\n}\n\nstatic void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)\n{\nreturn seq_list_next(v, &error_injection_list, pos);\n}\n\nstatic const char *error_type_string(int etype)\n{\nswitch (etype) {\ncase EI_ETYPE_NULL:\nreturn "NULL";\ncase EI_ETYPE_ERRNO:\nreturn "ERRNO";\ncase EI_ETYPE_ERRNO_NULL:\nreturn "ERRNO_NULL";\ncase EI_ETYPE_TRUE:\nreturn "TRUE";\ndefault:\nreturn "(unknown)";\n}\n}\n\nstatic int ei_seq_show(struct seq_file *m, void *v)\n{\nstruct ei_entry *ent = list_entry(v, struct ei_entry, list);\n\nseq_printf(m, "%ps\t%s\n", (void *)ent->start_addr,\nerror_type_string(ent->etype));\nreturn 0;\n}\n\nstatic const struct seq_operations ei_sops = {\n.start = ei_seq_start,\n.next = ei_seq_next,\n.stop = ei_seq_stop,\n.show = ei_seq_show,\n};\n\nDEFINE_SEQ_ATTRIBUTE(ei);\n\nstatic int __init ei_debugfs_init(void)\n{\nstruct dentry *dir, *file;\n\ndir = debugfs_create_dir("error_injection", NULL);\n\nfile = debugfs_create_file("list", 0444, dir, NULL, &ei_fops);\nif (!file) {\ndebugfs_remove(dir);\nreturn -ENOMEM;\n}\n\nreturn 0;\n}\n\nstatic int __init init_error_injection(void)\n{\npopulate_kernel_ei_list();\n\nif (!module_ei_init())\nei_debugfs_init();\n\nreturn 0;\n}\nlate_initcall(init_error_injection); nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocnnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocsmall_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_ nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocsmall_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_ nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*siz*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse <pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCKnblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize <pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCKnblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsizennblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_ #include <linux/error-injection.h>\n#include <linux/debugfs.h>\n#include <linux/kallsyms.h>\n#include <linux/kprobes.h>\n#include <linux/module.h>\n#include <linux/mutex.h>\n#include <linux/list.h>\n#include <linux/slab.h>\n#include <asm/sections.h>\n\nstatic LIST_HEAD(error_injection_list);\nstatic DEFINE_MUTEX(ei_mutex);\nstruct ei_entry {\nstruct list_head list;\nunsigned long start_addr;\nunsigned long end_addr;\nint etype;\nvoid *priv;\n};\n\nbool within_error_injection_list(unsigned long addr)\n{\nstruct ei_entry *ent;\nbool ret = false;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nret = true;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\nreturn ret;\n}\n\nint get_injectable_error_type(unsigned long addr)\n{\nstruct ei_entry *ent;\nint ei_type = -EINVAL;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nei_type = ent->etype;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\n\nreturn ei_type;\n}\n\nstatic voidpopulate_error_injection_list(struct error_injection_entry*start,\nstruct error_injection_entry*end,\nvoid *priv)\n{\nstruct error_injection_entry *iter;\nstruct ei_entry *ent;\nunsigned long entry, offset = 0, size = 0;\n\nmutex_lock(&ei_mutex);\nfor (iter = start; iter < end; iter++) {\nentry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr);\n\nif (!kernel_text_address(entry) ||\n!kallsyms_lookup_size_offset(entry, &size, &offset)) {\npr_err("%p\n",\n(void *)entry);\ncontinue;\n}\n\nent = kmalloc(sizeof(*ent), GFP_KERNEL);\nif (!ent)\nbreak;\nent->start_addr = entry;\nent->end_addr = entry + size;\nent->etype = iter->etype;\nent->priv = priv;\nINIT_LIST_HEAD(&ent->list);\nlist_add_tail(&ent->list, &error_injection_list);\n}\nmutex_unlock(&ei_mutex);\n}\n\nextern struct error_injection_entry__start_error_injection_whitelist[];\nextern struct error_injection_entry__stop_error_injection_whitelist[];\n\nstatic void __init populate_kernel_ei_list(void)\n{\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n {\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n{\nif (!mod->num_ei_funcs)\nreturn;\n\npopulate_error_injection_list(mod->ei_funcs,\nmod->ei_funcs + mod->num_ei_funcs, mod);\n}\n\nstatic void module_unload_ei_list(struct module *mod)\n{\nstruct ei_entry *ent, *n;\n\nif (!mod->num_ei_funcs)\nreturn;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry_safe(ent, n, &error_injection_list, list) {\nif (ent->priv == mod){\nlist_del_init(&ent->list);\nkfree(ent);\n}\n}\nmutex_unlock(&ei_mutex);\n}\n\nstatic int ei_module_callback(struct notifier_block *nb,\nunsigned long val, void *data)\n{\nstruct module *mod = data;\n\nif (val == MODULE_STATE_COMING)\nmodule_load_ei_list(mod);\nelse if (val == MODULE_STATE_GOING)\nmodule_unload_ei_list(mod);\n\nreturn NOTIFY_DONE;\n}\n\nstatic struct notifier_block ei_module_nb = {\n.notifier_call = ei_module_callback,\n.priority = 0\n};\n\nstatic __init int module_ei_init(void)\n{\nreturn register_module_notifier(&ei_module_nb);\n}\n#else /* !CONFIG_MODULES */\n#define module_ei_init() (0)\n#endif\n\nstatic void *ei_seq_start(struct seq_file *m, loff_t *pos)\n{\nmutex_lock(&ei_mutex);\nreturn seq_list_start(&error_injection_list, *pos);\n}\n\nstatic void ei_seq_stop(struct seq_file *m, void *v)\n{\nmutex_unlock(&ei_mutex);\n}\n\nstatic void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)\n{\nreturn seq_list_next(v, &error_injection_list, pos);\n}\n\nstatic const char *error_type_string(int etype)\n{\nswitch (etype) {\ncase EI_ETYPE_NULL:\nreturn "NULL";\ncase EI_ETYPE_ERRNO:\nreturn "ERRNO";\ncase EI_ETYPE_ERRNO_NULL:\nreturn "ERRNO_NULL";\ncase EI_ETYPE_TRUE:\nreturn "TRUE";\ndefault:\nreturn "(unknown)";\n}\n}\n\nstatic int ei_seq_show(struct seq_file *m, void *v)\n{\nstruct ei_entry *ent = list_entry(v, struct ei_entry, list);\n\nseq_printf(m, "%ps\t%s\n", (void *)ent->start_addr,\nerror_type_string(ent->etype));\nreturn 0;\n}\n\nstatic const struct seq_operations ei_sops = {\n.start = ei_seq_start,\n.next = ei_seq_next,\n.stop = ei_seq_stop,\n.show = ei_seq_show,\n};\n\nDEFINE_SEQ_ATTRIBUTE(ei);\n\nstatic int __init ei_debugfs_init(void)\n{\nstruct dentry *dir, *file;\n\ndir = debugfs_create_dir("error_injection", NULL);\n\nfile = debugfs_create_file("list", 0444, dir, NULL, &ei_fops);\nif (!file) {\ndebugfs_remove(dir);\nreturn -ENOMEM;\n}\n\nreturn 0;\n}\n\nstatic int __init init_error_injection(void)\n{\npopulate_kernel_ei_list();\n\nif (!module_ei_init())\nei_debugfs_init();\n\nreturn 0;\n}\nlate_initcall(init_error_injection); nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCKnblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsizennblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_ #include <linux/error-injection.h>\n#include <linux/debugfs.h>\n#include <linux/kallsyms.h>\n#include <linux/kprobes.h>\n#include <linux/module.h>\n#include <linux/mutex.h>\n#include <linux/list.h>\n#include <linux/slab.h>\n#include <asm/sections.h>\n\nstatic LIST_HEAD(error_injection_list);\nstatic DEFINE_MUTEX(ei_mutex);\nstruct ei_entry {\nstruct list_head list;\nunsigned long start_addr;\nunsigned long end_addr;\nint etype;\nvoid *priv;\n};\n\nbool within_error_injection_list(unsigned long addr)\n{\nstruct ei_entry *ent;\nbool ret = false;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nret = true;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\nreturn ret;\n}\n\nint get_injectable_error_type(unsigned long addr)\n{\nstruct ei_entry *ent;\nint ei_type = -EINVAL;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nei_type = ent->etype;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\n\nreturn ei_type;\n}\n\nstatic voidpopulate_error_injection_list(struct error_injection_entry*start,\nstruct error_injection_entry*end,\nvoid *priv)\n{\nstruct error_injection_entry *iter;\nstruct ei_entry *ent;\nunsigned long entry, offset = 0, size = 0;\n\nmutex_lock(&ei_mutex);\nfor (iter = start; iter < end; iter++) {\nentry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr);\n\nif (!kernel_text_address(entry) ||\n!kallsyms_lookup_size_offset(entry, &size, &offset)) {\npr_err("%p\n",\n(void *)entry);\ncontinue;\n}\n\nent = kmalloc(sizeof(*ent), GFP_KERNEL);\nif (!ent)\nbreak;\nent->start_addr = entry;\nent->end_addr = entry + size;\nent->etype = iter->etype;\nent->priv = priv;\nINIT_LIST_HEAD(&ent->list);\nlist_add_tail(&ent->list, &error_injection_list);\n}\nmutex_unlock(&ei_mutex);\n}\n\nextern struct error_injection_entry__start_error_injection_whitelist[];\nextern struct error_injection_entry__stop_error_injection_whitelist[];\n\nstatic void __init populate_kernel_ei_list(void)\n{\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc {\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n{\nif (!mod->num_ei_funcs)\nreturn;\n\npopulate_error_injection_list(mod->ei_funcs,\nmod->ei_funcs + mod->num_ei_funcs, mod);\n}\n\nstatic void module_unload_ei_list(struct module *mod)\n{\nstruct ei_entry *ent, *n;\n\nif (!mod->num_ei_funcs)\nreturn;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry_safe(ent, n, &error_injection_list, list) {\nif (ent->priv == mod){\nlist_del_init(&ent->list);\nkfree(ent);\n}\n}\nmutex_unlock(&ei_mutex);\n}\n\nstatic int ei_module_callback(struct notifier_block *nb,\nunsigned long val, void *data)\n{\nstruct module *mod = data;\n\nif (val == MODULE_STATE_COMING)\nmodule_load_ei_list(mod);\nelse if (val == MODULE_STATE_GOING)\nmodule_unload_ei_list(mod);\n\nreturn NOTIFY_DONE;\n}\n\nstatic struct notifier_block ei_module_nb = {\n.notifier_call = ei_module_callback,\n.priority = 0\n};\n\nstatic __init int module_ei_init(void)\n{\nreturn register_module_notifier(&ei_module_nb);\n}\n#else /* !CONFIG_MODULES */\n#define module_ei_init() (0)\n#endif\n\nstatic void *ei_seq_start(struct seq_file *m, loff_t *pos)\n{\nmutex_lock(&ei_mutex);\nreturn seq_list_start(&error_injection_list, *pos);\n}\n\nstatic void ei_seq_stop(struct seq_file *m, void *v)\n{\nmutex_unlock(&ei_mutex);\n}\n\nstatic void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)\n{\nreturn seq_list_next(v, &error_injection_list, pos);\n}\n\nstatic const char *error_type_string(int etype)\n{\nswitch (etype) {\ncase EI_ETYPE_NULL:\nreturn "NULL";\ncase EI_ETYPE_ERRNO:\nreturn "ERRNO";\ncase EI_ETYPE_ERRNO_NULL:\nreturn "ERRNO_NULL";\ncase EI_ETYPE_TRUE:\nreturn "TRUE";\ndefault:\nreturn "(unknown)";\n}\n}\n\nstatic int ei_seq_show(struct seq_file *m, void *v)\n{\nstruct ei_entry *ent = list_entry(v, struct ei_entry, list);\n\nseq_printf(m, "%ps\t%s\n", (void *)ent->start_addr,\nerror_type_string(ent->etype));\nreturn 0;\n}\n\nstatic const struct seq_operations ei_sops = {\n.start = ei_seq_start,\n.next = ei_seq_next,\n.stop = ei_seq_stop,\n.show = ei_seq_show,\n};\n\nDEFINE_SEQ_ATTRIBUTE(ei);\n\nstatic int __init ei_debugfs_init(void)\n{\nstruct dentry *dir, *file;\n\ndir = debugfs_create_dir("error_injection", NULL);\n\nfile = debugfs_create_file("list", 0444, dir, NULL, &ei_fops);\nif (!file) {\ndebugfs_remove(dir);\nreturn -ENOMEM;\n}\n\nreturn 0;\n}\n\nstatic int __init init_error_injection(void)\n{\npopulate_kernel_ei_list();\n\nif (!module_ei_init())\nei_debugfs_init();\n\nreturn 0;\n}\nlate_initcall(init_error_injection); nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),Gnnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocnnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocsmall_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_ #include <linux/error-injection.h>\n#include <linux/debugfs.h>\n#include <linux/kallsyms.h>\n#include <linux/kprobes.h>\n#include <linux/module.h>\n#include <linux/mutex.h>\n#include <linux/list.h>\n#include <linux/slab.h>\n#include <asm/sections.h>\n\nstatic LIST_HEAD(error_injection_list);\nstatic DEFINE_MUTEX(ei_mutex);\nstruct ei_entry {\nstruct list_head list;\nunsigned long start_addr;\nunsigned long end_addr;\nint etype;\nvoid *priv;\n};\n\nbool within_error_injection_list(unsigned long addr)\n{\nstruct ei_entry *ent;\nbool ret = false;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nret = true;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\nreturn ret;\n}\n\nint get_injectable_error_type(unsigned long addr)\n{\nstruct ei_entry *ent;\nint ei_type = -EINVAL;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nei_type = ent->etype;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\n\nreturn ei_type;\n}\n\nstatic voidpopulate_error_injection_list(struct error_injection_entry*start,\nstruct error_injection_entry*end,\nvoid *priv)\n{\nstruct error_injection_entry *iter;\nstruct ei_entry *ent;\nunsigned long entry, offset = 0, size = 0;\n\nmutex_lock(&ei_mutex);\nfor (iter = start; iter < end; iter++) {\nentry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr);\n\nif (!kernel_text_address(entry) ||\n!kallsyms_lookup_size_offset(entry, &size, &offset)) {\npr_err("%p\n",\n(void *)entry);\ncontinue;\n}\n\nent = kmalloc(sizeof(*ent), GFP_KERNEL);\nif (!ent)\nbreak;\nent->start_addr = entry;\nent->end_addr = entry + size;\nent->etype = iter->etype;\nent->priv = priv;\nINIT_LIST_HEAD(&ent->list);\nlist_add_tail(&ent->list, &error_injection_list);\n}\nmutex_unlock(&ei_mutex);\n}\n\nextern struct error_injection_entry__start_error_injection_whitelist[];\nextern struct error_injection_entry__stop_error_injection_whitelist[];\n\nstatic void __init populate_kernel_ei_list(void)\n{\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocnnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocsmall_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_ {\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n{\nif (!mod->num_ei_funcs)\nreturn;\n\npopulate_error_injection_list(mod->ei_funcs,\nmod->ei_funcs + mod->num_ei_funcs, mod);\n}\n\nstatic void module_unload_ei_list(struct module *mod)\n{\nstruct ei_entry *ent, *n;\n\nif (!mod->num_ei_funcs)\nreturn;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry_safe(ent, n, &error_injection_list, list) {\nif (ent->priv == mod){\nlist_del_init(&ent->list);\nkfree(ent);\n}\n}\nmutex_unlock(&ei_mutex);\n}\n\nstatic int ei_module_callback(struct notifier_block *nb,\nunsigned long val, void *data)\n{\nstruct module *mod = data;\n\nif (val == MODULE_STATE_COMING)\nmodule_load_ei_list(mod);\nelse if (val == MODULE_STATE_GOING)\nmodule_unload_ei_list(mod);\n\nreturn NOTIFY_DONE;\n}\n\nstatic struct notifier_block ei_module_nb = {\n.notifier_call = ei_module_callback,\n.priority = 0\n};\n\nstatic __init int module_ei_init(void)\n{\nreturn register_module_notifier(&ei_module_nb);\n}\n#else /* !CONFIG_MODULES */\n#define module_ei_init() (0)\n#endif\n\nstatic void *ei_seq_start(struct seq_file *m, loff_t *pos)\n{\nmutex_lock(&ei_mutex);\nreturn seq_list_start(&error_injection_list, *pos);\n}\n\nstatic void ei_seq_stop(struct seq_file *m, void *v)\n{\nmutex_unlock(&ei_mutex);\n}\n\nstatic void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)\n{\nreturn seq_list_next(v, &error_injection_list, pos);\n}\n\nstatic const char *error_type_string(int etype)\n{\nswitch (etype) {\ncase EI_ETYPE_NULL:\nreturn "NULL";\ncase EI_ETYPE_ERRNO:\nreturn "ERRNO";\ncase EI_ETYPE_ERRNO_NULL:\nreturn "ERRNO_NULL";\ncase EI_ETYPE_TRUE:\nreturn "TRUE";\ndefault:\nreturn "(unknown)";\n}\n}\n\nstatic int ei_seq_show(struct seq_file *m, void *v)\n{\nstruct ei_entry *ent = list_entry(v, struct ei_entry, list);\n\nseq_printf(m, "%ps\t%s\n", (void *)ent->start_addr,\nerror_type_string(ent->etype));\nreturn 0;\n}\n\nstatic const struct seq_operations ei_sops = {\n.start = ei_seq_start,\n.next = ei_seq_next,\n.stop = ei_seq_stop,\n.show = ei_seq_show,\n};\n\nDEFINE_SEQ_ATTRIBUTE(ei);\n\nstatic int __init ei_debugfs_init(void)\n{\nstruct dentry *dir, *file;\n\ndir = debugfs_create_dir("error_injection", NULL);\n\nfile = debugfs_create_file("list", 0444, dir, NULL, &ei_fops);\nif (!file) {\ndebugfs_remove(dir);\nreturn -ENOMEM;\n}\n\nreturn 0;\n}\n\nstatic int __init init_error_injection(void)\n{\npopulate_kernel_ei_list();\n\nif (!module_ei_init())\nei_debugfs_init();\n\nreturn 0;\n}\nlate_initcall(init_error_injection); #include <linux/error-injection.h>\n#include <linux/debugfs.h>\n#include <linux/kallsyms.h>\n#include <linux/kprobes.h>\n#include <linux/module.h>\n#include <linux/mutex.h>\n#include <linux/list.h>\n#include <linux/slab.h>\n#include <asm/sections.h>\n\nstatic LIST_HEAD(error_injection_list);\nstatic DEFINE_MUTEX(ei_mutex);\nstruct ei_entry {\nstruct list_head list;\nunsigned long start_addr;\nunsigned long end_addr;\nint etype;\nvoid *priv;\n};\n\nbool within_error_injection_list(unsigned long addr)\n{\nstruct ei_entry *ent;\nbool ret = false;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nret = true;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\nreturn ret;\n}\n\nint get_injectable_error_type(unsigned long addr)\n{\nstruct ei_entry *ent;\nint ei_type = -EINVAL;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nei_type = ent->etype;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\n\nreturn ei_type;\n}\n\nstatic voidpopulate_error_injection_list(struct error_injection_entry*start,\nstruct error_injection_entry*end,\nvoid *priv)\n{\nstruct error_injection_entry *iter;\nstruct ei_entry *ent;\nunsigned long entry, offset = 0, size = 0;\n\nmutex_lock(&ei_mutex);\nfor (iter = start; iter < end; iter++) {\nentry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr);\n\nif (!kernel_text_address(entry) ||\n!kallsyms_lookup_size_offset(entry, &size, &offset)) {\npr_err("%p\n",\n(void *)entry);\ncontinue;\n}\n\nent = kmalloc(sizeof(*ent), GFP_KERNEL);\nif (!ent)\nbreak;\nent->start_addr = entry;\nent->end_addr = entry + size;\nent->etype = iter->etype;\nent->priv = priv;\nINIT_LIST_HEAD(&ent->list);\nlist_add_tail(&ent->list, &error_injection_list);\n}\nmutex_unlock(&ei_mutex);\n}\n\nextern struct error_injection_entry__start_error_injection_whitelist[];\nextern struct error_injection_entry__stop_error_injection_whitelist[];\n\nstatic void __init populate_kernel_ei_list(void)\n{\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocsmall_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_ nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*siz*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse <pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCKnblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize <pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCKnblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsizennblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_ #include <linux/error-injection.h>\n#include <linux/debugfs.h>\n#include <linux/kallsyms.h>\n#include <linux/kprobes.h>\n#include <linux/module.h>\n#include <linux/mutex.h>\n#include <linux/list.h>\n#include <linux/slab.h>\n#include <asm/sections.h>\n\nstatic LIST_HEAD(error_injection_list);\nstatic DEFINE_MUTEX(ei_mutex);\nstruct ei_entry {\nstruct list_head list;\nunsigned long start_addr;\nunsigned long end_addr;\nint etype;\nvoid *priv;\n};\n\nbool within_error_injection_list(unsigned long addr)\n{\nstruct ei_entry *ent;\nbool ret = false;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nret = true;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\nreturn ret;\n}\n\nint get_injectable_error_type(unsigned long addr)\n{\nstruct ei_entry *ent;\nint ei_type = -EINVAL;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nei_type = ent->etype;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\n\nreturn ei_type;\n}\n\nstatic voidpopulate_error_injection_list(struct error_injection_entry*start,\nstruct error_injection_entry*end,\nvoid *priv)\n{\nstruct error_injection_entry *iter;\nstruct ei_entry *ent;\nunsigned long entry, offset = 0, size = 0;\n\nmutex_lock(&ei_mutex);\nfor (iter = start; iter < end; iter++) {\nentry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr);\n\nif (!kernel_text_address(entry) ||\n!kallsyms_lookup_size_offset(entry, &size, &offset)) {\npr_err("%p\n",\n(void *)entry);\ncontinue;\n}\n\nent = kmalloc(sizeof(*ent), GFP_KERNEL);\nif (!ent)\nbreak;\nent->start_addr = entry;\nent->end_addr = entry + size;\nent->etype = iter->etype;\nent->priv = priv;\nINIT_LIST_HEAD(&ent->list);\nlist_add_tail(&ent->list, &error_injection_list);\n}\nmutex_unlock(&ei_mutex);\n}\n\nextern struct error_injection_entry__start_error_injection_whitelist[];\nextern struct error_injection_entry__stop_error_injection_whitelist[];\n\nstatic void __init populate_kernel_ei_list(void)\n{\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCKnblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsizennblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_ {\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n{\nif (!mod->num_ei_funcs)\nreturn;\n\npopulate_error_injection_list(mod->ei_funcs,\nmod->ei_funcs + mod->num_ei_funcs, mod);\n}\n\nstatic void module_unload_ei_list(struct module *mod)\n{\nstruct ei_entry *ent, *n;\n\nif (!mod->num_ei_funcs)\nreturn;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry_safe(ent, n, &error_injection_list, list) {\nif (ent->priv == mod){\nlist_del_init(&ent->list);\nkfree(ent);\n}\n}\nmutex_unlock(&ei_mutex);\n}\n\nstatic int ei_module_callback(struct notifier_block *nb,\nunsigned long val, void *data)\n{\nstruct module *mod = data;\n\nif (val == MODULE_STATE_COMING)\nmodule_load_ei_list(mod);\nelse if (val == MODULE_STATE_GOING)\nmodule_unload_ei_list(mod);\n\nreturn NOTIFY_DONE;\n}\n\nstatic struct notifier_block ei_module_nb = {\n.notifier_call = ei_module_callback,\n.priority = 0\n};\n\nstatic __init int module_ei_init(void)\n{\nreturn register_module_notifier(&ei_module_nb);\n}\n#else /* !CONFIG_MODULES */\n#define module_ei_init() (0)\n#endif\n\nstatic void *ei_seq_start(struct seq_file *m, loff_t *pos)\n{\nmutex_lock(&ei_mutex);\nreturn seq_list_start(&error_injection_list, *pos);\n}\n\nstatic void ei_seq_stop(struct seq_file *m, void *v)\n{\nmutex_unlock(&ei_mutex);\n}\n\nstatic void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)\n{\nreturn seq_list_next(v, &error_injection_list, pos);\n}\n\nstatic const char *error_type_string(int etype)\n{\nswitch (etype) {\ncase EI_ETYPE_NULL:\nreturn "NULL";\ncase EI_ETYPE_ERRNO:\nreturn "ERRNO";\ncase EI_ETYPE_ERRNO_NULL:\nreturn "ERRNO_NULL";\ncase EI_ETYPE_TRUE:\nreturn "TRUE";\ndefault:\nreturn "(unknown)";\n}\n}\n\nstatic int ei_seq_show(struct seq_file *m, void *v)\n{\nstruct ei_entry *ent = list_entry(v, struct ei_entry, list);\n\nseq_printf(m, "%ps\t%s\n", (void *)ent->start_addr,\nerror_type_string(ent->etype));\nreturn 0;\n}\n\nstatic const struct seq_operations ei_sops = {\n.start = ei_seq_start,\n.next = ei_seq_next,\n.stop = ei_seq_stop,\n.show = ei_seq_show,\n};\n\nDEFINE_SEQ_ATTRIBUTE(ei);\n\nstatic int __init ei_debugfs_init(void)\n{\nstruct dentry *dir, *file;\n\ndir = debugfs_create_dir("error_injection", NULL);\n\nfile = debugfs_create_file("list", 0444, dir, NULL, &ei_fops);\nif (!file) {\ndebugfs_remove(dir);\nreturn -ENOMEM;\n}\n\nreturn 0;\n}\n\nstatic int __init init_error_injection(void)\n{\npopulate_kernel_ei_list();\n\nif (!module_ei_init())\nei_debugfs_init();\n\nreturn 0;\n}\nlate_initcall(init_error_injection); nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc {\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n{\nif (!mod->num_ei_funcs)\nreturn;\n\npopulate_error_injection_list(mod->ei_funcs,\nmod->ei_funcs + mod->num_ei_funcs, mod);\n}\n\nstatic void module_unload_ei_list(struct module *mod)\n{\nstruct ei_entry *ent, *n;\n\nif (!mod->num_ei_funcs)\nreturn;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry_safe(ent, n, &error_injection_list, list) {\nif (ent->priv == mod){\nlist_del_init(&ent->list);\nkfree(ent);\n}\n}\nmutex_unlock(&ei_mutex);\n}\n\nstatic int ei_module_callback(struct notifier_block *nb,\nunsigned long val, void *data)\n{\nstruct module *mod = data;\n\nif (val == MODULE_STATE_COMING)\nmodule_load_ei_list(mod);\nelse if (val == MODULE_STATE_GOING)\nmodule_unload_ei_list(mod);\n\nreturn NOTIFY_DONE;\n}\n\nstatic struct notifier_block ei_module_nb = {\n.notifier_call = ei_module_callback,\n.priority = 0\n};\n\nstatic __init int module_ei_init(void)\n{\nreturn register_module_notifier(&ei_module_nb);\n}\n#else /* !CONFIG_MODULES */\n#define module_ei_init() (0)\n#endif\n\nstatic void *ei_seq_start(struct seq_file *m, loff_t *pos)\n{\nmutex_lock(&ei_mutex);\nreturn seq_list_start(&error_injection_list, *pos);\n}\n\nstatic void ei_seq_stop(struct seq_file *m, void *v)\n{\nmutex_unlock(&ei_mutex);\n}\n\nstatic void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)\n{\nreturn seq_list_next(v, &error_injection_list, pos);\n}\n\nstatic const char *error_type_string(int etype)\n{\nswitch (etype) {\ncase EI_ETYPE_NULL:\nreturn "NULL";\ncase EI_ETYPE_ERRNO:\nreturn "ERRNO";\ncase EI_ETYPE_ERRNO_NULL:\nreturn "ERRNO_NULL";\ncase EI_ETYPE_TRUE:\nreturn "TRUE";\ndefault:\nreturn "(unknown)";\n}\n}\n\nstatic int ei_seq_show(struct seq_file *m, void *v)\n{\nstruct ei_entry *ent = list_entry(v, struct ei_entry, list);\n\nseq_printf(m, "%ps\t%s\n", (void *)ent->start_addr,\nerror_type_string(ent->etype));\nreturn 0;\n}\n\nstatic const struct seq_operations ei_sops = {\n.start = ei_seq_start,\n.next = ei_seq_next,\n.stop = ei_seq_stop,\n.show = ei_seq_show,\n};\n\nDEFINE_SEQ_ATTRIBUTE(ei);\n\nstatic int __init ei_debugfs_init(void)\n{\nstruct dentry *dir, *file;\n\ndir = debugfs_create_dir("error_injection", NULL);\n\nfile = debugfs_create_file("list", 0444, dir, NULL, &ei_fops);\nif (!file) {\ndebugfs_remove(dir);\nreturn -ENOMEM;\n}\n\nreturn 0;\n}\n\nstatic int __init init_error_injection(void)\n{\npopulate_kernel_ei_list();\n\nif (!module_ei_init())\nei_debugfs_init();\n\nreturn 0;\n}\nlate_initcall(init_error_injection); #include <linux/error-injection.h>\n#include <linux/debugfs.h>\n#include <linux/kallsyms.h>\n#include <linux/kprobes.h>\n#include <linux/module.h>\n#include <linux/mutex.h>\n#include <linux/list.h>\n#include <linux/slab.h>\n#include <asm/sections.h>\n\nstatic LIST_HEAD(error_injection_list);\nstatic DEFINE_MUTEX(ei_mutex);\nstruct ei_entry {\nstruct list_head list;\nunsigned long start_addr;\nunsigned long end_addr;\nint etype;\nvoid *priv;\n};\n\nbool within_error_injection_list(unsigned long addr)\n{\nstruct ei_entry *ent;\nbool ret = false;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nret = true;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\nreturn ret;\n}\n\nint get_injectable_error_type(unsigned long addr)\n{\nstruct ei_entry *ent;\nint ei_type = -EINVAL;\n\nmutex_lock(&ei_mutex);\nlist_for_each_entry(ent, &error_injection_list, list) {\nif (addr >= ent->start_addr && addr < ent->end_addr) {\nei_type = ent->etype;\nbreak;\n}\n}\nmutex_unlock(&ei_mutex);\n\nreturn ei_type;\n}\n\nstatic voidpopulate_error_injection_list(struct error_injection_entry*start,\nstruct error_injection_entry*end,\nvoid *priv)\n{\nstruct error_injection_entry *iter;\nstruct ei_entry *ent;\nunsigned long entry, offset = 0, size = 0;\n\nmutex_lock(&ei_mutex);\nfor (iter = start; iter < end; iter++) {\nentry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr);\n\nif (!kernel_text_address(entry) ||\n!kallsyms_lookup_size_offset(entry, &size, &offset)) {\npr_err("%p\n",\n(void *)entry);\ncontinue;\n}\n\nent = kmalloc(sizeof(*ent), GFP_KERNEL);\nif (!ent)\nbreak;\nent->start_addr = entry;\nent->end_addr = entry + size;\nent->etype = iter->etype;\nent->priv = priv;\nINIT_LIST_HEAD(&ent->list);\nlist_add_tail(&ent->list, &error_injection_list);\n}\nmutex_unlock(&ei_mutex);\n}\n\nextern struct error_injection_entry__start_error_injection_whitelist[];\nextern struct error_injection_entry__stop_error_injection_whitelist[];\n\nstatic void __init populate_kernel_ei_list(void)\n{\npopulate_error_injection_list(__start_error_injection_whitelist,\n__stop_error_injection_whitelist,\nNULL);\n}\n\n#ifdef CONFIG_MODULES\nstatic void module_load_ei_list(struct module *mod)\n nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_undo_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_undo_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocnnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmallocsmall_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_ >/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/ngotoout_partial_alloc;/ngroup_info->blocks[i]=b;/n}/n}/nreturngroup_info;/n/n/nout_partial_alloc:/n/nwhile(--i>=0){/n/nfree_page((unsignedlong)group_info->blocks[i]);/n/n}/n/nkfree(group_info);/n/nreturnNULL;/n/n}/n/n/n/nEXPORT_SYMBOL(groups_alloc);/n/n/n/nvoidgroups_free(structgroup_info*group_info)/n/n{/n/nif(group_info->blocks[0]!=group_info->small_block){/n/ninti;/n/nfor(i=0;i<group_info->nblocks;i++)/n/n/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);/nif(!group_info)/nreturnNULL;/n/ngroup_info->ngroups=gidsetsize;/ngroup_info->nblocks=nblocks;/natomic_set(&group_info->usage,1);/n/nif(gidsetsize<=NGROUPS_SMALL)/ngroup_info->blocks[0]=group_info->small_block;/nelse{/nfor(i=0;i<nblocks;i++){/ngid_t*b;/nb=(void*)__get_free_page(GFP_USER);/nif(!b)/<pclass="text"data-text="/nstructgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};/n/nstructgroup_info*groups_alloc(intgidsetsize){/nstructgroup_info*group_info;/nintnblocks;/ninti;/n/n/nnblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)>/NGROUPS_PER_BLOCK;/n/*Makesurewealwaysallocateatleastoneindirectblockpointer*//nnblocks=nblocks?:1;/ngroup_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),G