Commit 4b8feff2 authored by Paul Moore's avatar Paul Moore

netlabel: fix the horribly broken catmap functions

The NetLabel secattr catmap functions, and the SELinux import/export
glue routines, were broken in many horrible ways and the SELinux glue
code fiddled with the NetLabel catmap structures in ways that we
probably shouldn't allow.  At some point this "worked", but that was
likely due to a bit of dumb luck and sub-par testing (both inflicted
by yours truly).  This patch corrects these problems by basically
gutting the code in favor of something less obtuse and restoring the
NetLabel abstractions in the SELinux catmap glue code.

Everything is working now, and if it decides to break itself in the
future this code will be much easier to debug than the code it
replaces.

One noteworthy side effect of the changes is that it is no longer
necessary to allocate a NetLabel catmap before calling one of the
NetLabel APIs to set a bit in the catmap.  NetLabel will automatically
allocate the catmap nodes when needed, resulting in less allocations
when the lowest bit is greater than 255 and less code in the LSMs.

Cc: stable@vger.kernel.org
Reported-by: default avatarChristian Evans <frodox@zoho.com>
Signed-off-by: default avatarPaul Moore <pmoore@redhat.com>
Tested-by: default avatarCasey Schaufler <casey@schaufler-ca.com>
parent 41c3bd20
...@@ -285,11 +285,11 @@ static inline void netlbl_secattr_catmap_free( ...@@ -285,11 +285,11 @@ static inline void netlbl_secattr_catmap_free(
{ {
struct netlbl_lsm_secattr_catmap *iter; struct netlbl_lsm_secattr_catmap *iter;
do { while (catmap) {
iter = catmap; iter = catmap;
catmap = catmap->next; catmap = catmap->next;
kfree(iter); kfree(iter);
} while (catmap); }
} }
/** /**
...@@ -394,6 +394,9 @@ int netlbl_secattr_catmap_walk(struct netlbl_lsm_secattr_catmap *catmap, ...@@ -394,6 +394,9 @@ int netlbl_secattr_catmap_walk(struct netlbl_lsm_secattr_catmap *catmap,
u32 offset); u32 offset);
int netlbl_secattr_catmap_walk_rng(struct netlbl_lsm_secattr_catmap *catmap, int netlbl_secattr_catmap_walk_rng(struct netlbl_lsm_secattr_catmap *catmap,
u32 offset); u32 offset);
int netlbl_secattr_catmap_getlong(struct netlbl_lsm_secattr_catmap *catmap,
u32 *offset,
unsigned long *bitmap);
int netlbl_secattr_catmap_setbit(struct netlbl_lsm_secattr_catmap **catmap, int netlbl_secattr_catmap_setbit(struct netlbl_lsm_secattr_catmap **catmap,
u32 bit, u32 bit,
gfp_t flags); gfp_t flags);
...@@ -401,6 +404,10 @@ int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap **catmap, ...@@ -401,6 +404,10 @@ int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap **catmap,
u32 start, u32 start,
u32 end, u32 end,
gfp_t flags); gfp_t flags);
int netlbl_secattr_catmap_setlong(struct netlbl_lsm_secattr_catmap **catmap,
u32 offset,
unsigned long bitmap,
gfp_t flags);
/* /*
* LSM protocol operations (NetLabel LSM/kernel API) * LSM protocol operations (NetLabel LSM/kernel API)
...@@ -504,6 +511,13 @@ static inline int netlbl_secattr_catmap_walk_rng( ...@@ -504,6 +511,13 @@ static inline int netlbl_secattr_catmap_walk_rng(
{ {
return -ENOENT; return -ENOENT;
} }
static inline int netlbl_secattr_catmap_getlong(
struct netlbl_lsm_secattr_catmap *catmap,
u32 *offset,
unsigned long *bitmap)
{
return 0;
}
static inline int netlbl_secattr_catmap_setbit( static inline int netlbl_secattr_catmap_setbit(
struct netlbl_lsm_secattr_catmap **catmap, struct netlbl_lsm_secattr_catmap **catmap,
u32 bit, u32 bit,
...@@ -519,6 +533,14 @@ static inline int netlbl_secattr_catmap_setrng( ...@@ -519,6 +533,14 @@ static inline int netlbl_secattr_catmap_setrng(
{ {
return 0; return 0;
} }
static int netlbl_secattr_catmap_setlong(
struct netlbl_lsm_secattr_catmap **catmap,
u32 offset,
unsigned long bitmap,
gfp_t flags)
{
return 0;
}
static inline int netlbl_enabled(void) static inline int netlbl_enabled(void)
{ {
return 0; return 0;
......
...@@ -1335,10 +1335,6 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, ...@@ -1335,10 +1335,6 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
secattr->flags |= NETLBL_SECATTR_MLS_LVL; secattr->flags |= NETLBL_SECATTR_MLS_LVL;
if (tag_len > 4) { if (tag_len > 4) {
secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
if (secattr->attr.mls.cat == NULL)
return -ENOMEM;
ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def,
&tag[4], &tag[4],
tag_len - 4, tag_len - 4,
...@@ -1430,10 +1426,6 @@ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, ...@@ -1430,10 +1426,6 @@ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def,
secattr->flags |= NETLBL_SECATTR_MLS_LVL; secattr->flags |= NETLBL_SECATTR_MLS_LVL;
if (tag_len > 4) { if (tag_len > 4) {
secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
if (secattr->attr.mls.cat == NULL)
return -ENOMEM;
ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, ret_val = cipso_v4_map_cat_enum_ntoh(doi_def,
&tag[4], &tag[4],
tag_len - 4, tag_len - 4,
...@@ -1524,10 +1516,6 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, ...@@ -1524,10 +1516,6 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
secattr->flags |= NETLBL_SECATTR_MLS_LVL; secattr->flags |= NETLBL_SECATTR_MLS_LVL;
if (tag_len > 4) { if (tag_len > 4) {
secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
if (secattr->attr.mls.cat == NULL)
return -ENOMEM;
ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, ret_val = cipso_v4_map_cat_rng_ntoh(doi_def,
&tag[4], &tag[4],
tag_len - 4, tag_len - 4,
......
...@@ -405,6 +405,63 @@ int netlbl_cfg_cipsov4_map_add(u32 doi, ...@@ -405,6 +405,63 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
* Security Attribute Functions * Security Attribute Functions
*/ */
#define _CM_F_NONE 0x00000000
#define _CM_F_ALLOC 0x00000001
/**
* _netlbl_secattr_catmap_getnode - Get a individual node from a catmap
* @catmap: pointer to the category bitmap
* @offset: the requested offset
* @cm_flags: catmap flags, see _CM_F_*
* @gfp_flags: memory allocation flags
*
* Description:
* Iterate through the catmap looking for the node associated with @offset; if
* the _CM_F_ALLOC flag is set in @cm_flags and there is no associated node,
* one will be created and inserted into the catmap. Returns a pointer to the
* node on success, NULL on failure.
*
*/
static struct netlbl_lsm_secattr_catmap *_netlbl_secattr_catmap_getnode(
struct netlbl_lsm_secattr_catmap **catmap,
u32 offset,
unsigned int cm_flags,
gfp_t gfp_flags)
{
struct netlbl_lsm_secattr_catmap *iter = *catmap;
struct netlbl_lsm_secattr_catmap *prev = NULL;
if (iter == NULL || offset < iter->startbit)
goto secattr_catmap_getnode_alloc;
while (iter && offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) {
prev = iter;
iter = iter->next;
}
if (iter == NULL || offset < iter->startbit)
goto secattr_catmap_getnode_alloc;
return iter;
secattr_catmap_getnode_alloc:
if (!(cm_flags & _CM_F_ALLOC))
return NULL;
iter = netlbl_secattr_catmap_alloc(gfp_flags);
if (iter == NULL)
return NULL;
iter->startbit = offset & ~(NETLBL_CATMAP_SIZE - 1);
if (prev == NULL) {
iter->next = *catmap;
*catmap = iter;
} else {
iter->next = prev->next;
prev->next = iter;
}
return iter;
}
/** /**
* netlbl_secattr_catmap_walk - Walk a LSM secattr catmap looking for a bit * netlbl_secattr_catmap_walk - Walk a LSM secattr catmap looking for a bit
* @catmap: the category bitmap * @catmap: the category bitmap
...@@ -520,6 +577,54 @@ int netlbl_secattr_catmap_walk_rng(struct netlbl_lsm_secattr_catmap *catmap, ...@@ -520,6 +577,54 @@ int netlbl_secattr_catmap_walk_rng(struct netlbl_lsm_secattr_catmap *catmap,
return -ENOENT; return -ENOENT;
} }
/**
* netlbl_secattr_catmap_getlong - Export an unsigned long bitmap
* @catmap: pointer to the category bitmap
* @offset: pointer to the requested offset
* @bitmap: the exported bitmap
*
* Description:
* Export a bitmap with an offset greater than or equal to @offset and return
* it in @bitmap. The @offset must be aligned to an unsigned long and will be
* updated on return if different from what was requested; if the catmap is
* empty at the requested offset and beyond, the @offset is set to (u32)-1.
* Returns zero on sucess, negative values on failure.
*
*/
int netlbl_secattr_catmap_getlong(struct netlbl_lsm_secattr_catmap *catmap,
u32 *offset,
unsigned long *bitmap)
{
struct netlbl_lsm_secattr_catmap *iter;
u32 off = *offset;
u32 idx;
/* only allow aligned offsets */
if ((off & (BITS_PER_LONG - 1)) != 0)
return -EINVAL;
if (off < catmap->startbit) {
off = catmap->startbit;
*offset = off;
}
iter = _netlbl_secattr_catmap_getnode(&catmap, off, _CM_F_NONE, 0);
if (iter == NULL) {
*offset = (u32)-1;
return 0;
}
if (off < iter->startbit) {
off = iter->startbit;
*offset = off;
} else
off -= iter->startbit;
idx = off / NETLBL_CATMAP_MAPSIZE;
*bitmap = iter->bitmap[idx] >> (off % NETLBL_CATMAP_SIZE);
return 0;
}
/** /**
* netlbl_secattr_catmap_setbit - Set a bit in a LSM secattr catmap * netlbl_secattr_catmap_setbit - Set a bit in a LSM secattr catmap
* @catmap: pointer to the category bitmap * @catmap: pointer to the category bitmap
...@@ -535,32 +640,16 @@ int netlbl_secattr_catmap_setbit(struct netlbl_lsm_secattr_catmap **catmap, ...@@ -535,32 +640,16 @@ int netlbl_secattr_catmap_setbit(struct netlbl_lsm_secattr_catmap **catmap,
u32 bit, u32 bit,
gfp_t flags) gfp_t flags)
{ {
struct netlbl_lsm_secattr_catmap *iter = *catmap; struct netlbl_lsm_secattr_catmap *iter;
u32 node_bit; u32 idx;
u32 node_idx;
while (iter->next != NULL && iter = _netlbl_secattr_catmap_getnode(catmap, bit, _CM_F_ALLOC, flags);
bit >= (iter->startbit + NETLBL_CATMAP_SIZE)) if (iter == NULL)
iter = iter->next; return -ENOMEM;
if (bit < iter->startbit) {
iter = netlbl_secattr_catmap_alloc(flags);
if (iter == NULL)
return -ENOMEM;
iter->next = *catmap;
iter->startbit = bit & ~(NETLBL_CATMAP_SIZE - 1);
*catmap = iter;
} else if (bit >= (iter->startbit + NETLBL_CATMAP_SIZE)) {
iter->next = netlbl_secattr_catmap_alloc(flags);
if (iter->next == NULL)
return -ENOMEM;
iter = iter->next;
iter->startbit = bit & ~(NETLBL_CATMAP_SIZE - 1);
}
/* gcc always rounds to zero when doing integer division */ bit -= iter->startbit;
node_idx = (bit - iter->startbit) / NETLBL_CATMAP_MAPSIZE; idx = bit / NETLBL_CATMAP_MAPSIZE;
node_bit = bit - iter->startbit - (NETLBL_CATMAP_MAPSIZE * node_idx); iter->bitmap[idx] |= NETLBL_CATMAP_BIT << (bit % NETLBL_CATMAP_MAPSIZE);
iter->bitmap[node_idx] |= NETLBL_CATMAP_BIT << node_bit;
return 0; return 0;
} }
...@@ -582,34 +671,61 @@ int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap **catmap, ...@@ -582,34 +671,61 @@ int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap **catmap,
u32 end, u32 end,
gfp_t flags) gfp_t flags)
{ {
int ret_val = 0; int rc = 0;
struct netlbl_lsm_secattr_catmap *iter = *catmap; u32 spot = start;
u32 iter_max_spot;
u32 spot; while (rc == 0 && spot <= end) {
u32 orig_spot = iter->startbit; if (((spot & (BITS_PER_LONG - 1)) != 0) &&
((end - spot) > BITS_PER_LONG)) {
/* XXX - This could probably be made a bit faster by combining writes rc = netlbl_secattr_catmap_setlong(catmap,
* to the catmap instead of setting a single bit each time, but for spot,
* right now skipping to the start of the range in the catmap should (unsigned long)-1,
* be a nice improvement over calling the individual setbit function flags);
* repeatedly from a loop. */ spot += BITS_PER_LONG;
} else
while (iter->next != NULL && rc = netlbl_secattr_catmap_setbit(catmap,
start >= (iter->startbit + NETLBL_CATMAP_SIZE)) spot++,
iter = iter->next; flags);
iter_max_spot = iter->startbit + NETLBL_CATMAP_SIZE;
for (spot = start; spot <= end && ret_val == 0; spot++) {
if (spot >= iter_max_spot && iter->next != NULL) {
iter = iter->next;
iter_max_spot = iter->startbit + NETLBL_CATMAP_SIZE;
}
ret_val = netlbl_secattr_catmap_setbit(&iter, spot, flags);
if (iter->startbit < orig_spot)
*catmap = iter;
} }
return ret_val; return rc;
}
/**
* netlbl_secattr_catmap_setlong - Import an unsigned long bitmap
* @catmap: pointer to the category bitmap
* @offset: offset to the start of the imported bitmap
* @bitmap: the bitmap to import
* @flags: memory allocation flags
*
* Description:
* Import the bitmap specified in @bitmap into @catmap, using the offset
* in @offset. The offset must be aligned to an unsigned long. Returns zero
* on success, negative values on failure.
*
*/
int netlbl_secattr_catmap_setlong(struct netlbl_lsm_secattr_catmap **catmap,
u32 offset,
unsigned long bitmap,
gfp_t flags)
{
struct netlbl_lsm_secattr_catmap *iter;
u32 idx;
/* only allow aligned offsets */
if ((offset & (BITS_PER_LONG - 1)) != 0)
return -EINVAL;
iter = _netlbl_secattr_catmap_getnode(catmap,
offset, _CM_F_ALLOC, flags);
if (iter == NULL)
return -ENOMEM;
offset -= iter->startbit;
idx = offset / NETLBL_CATMAP_MAPSIZE;
iter->bitmap[idx] |= bitmap << (offset % NETLBL_CATMAP_MAPSIZE);
return 0;
} }
/* /*
......
...@@ -89,48 +89,33 @@ int ebitmap_netlbl_export(struct ebitmap *ebmap, ...@@ -89,48 +89,33 @@ int ebitmap_netlbl_export(struct ebitmap *ebmap,
struct netlbl_lsm_secattr_catmap **catmap) struct netlbl_lsm_secattr_catmap **catmap)
{ {
struct ebitmap_node *e_iter = ebmap->node; struct ebitmap_node *e_iter = ebmap->node;
struct netlbl_lsm_secattr_catmap *c_iter; unsigned long e_map;
u32 cmap_idx, cmap_sft; u32 offset;
int i; unsigned int iter;
int rc;
/* NetLabel's NETLBL_CATMAP_MAPTYPE is defined as an array of u64,
* however, it is not always compatible with an array of unsigned long
* in ebitmap_node.
* In addition, you should pay attention the following implementation
* assumes unsigned long has a width equal with or less than 64-bit.
*/
if (e_iter == NULL) { if (e_iter == NULL) {
*catmap = NULL; *catmap = NULL;
return 0; return 0;
} }
c_iter = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (*catmap != NULL)
if (c_iter == NULL) netlbl_secattr_catmap_free(*catmap);
return -ENOMEM; *catmap = NULL;
*catmap = c_iter;
c_iter->startbit = e_iter->startbit & ~(NETLBL_CATMAP_SIZE - 1);
while (e_iter) { while (e_iter) {
for (i = 0; i < EBITMAP_UNIT_NUMS; i++) { offset = e_iter->startbit;
unsigned int delta, e_startbit, c_endbit; for (iter = 0; iter < EBITMAP_UNIT_NUMS; iter++) {
e_map = e_iter->maps[iter];
e_startbit = e_iter->startbit + i * EBITMAP_UNIT_SIZE; if (e_map != 0) {
c_endbit = c_iter->startbit + NETLBL_CATMAP_SIZE; rc = netlbl_secattr_catmap_setlong(catmap,
if (e_startbit >= c_endbit) { offset,
c_iter->next e_map,
= netlbl_secattr_catmap_alloc(GFP_ATOMIC); GFP_ATOMIC);
if (c_iter->next == NULL) if (rc != 0)
goto netlbl_export_failure; goto netlbl_export_failure;
c_iter = c_iter->next;
c_iter->startbit
= e_startbit & ~(NETLBL_CATMAP_SIZE - 1);
} }
delta = e_startbit - c_iter->startbit; offset += EBITMAP_UNIT_SIZE;
cmap_idx = delta / NETLBL_CATMAP_MAPSIZE;
cmap_sft = delta % NETLBL_CATMAP_MAPSIZE;
c_iter->bitmap[cmap_idx]
|= e_iter->maps[i] << cmap_sft;
} }
e_iter = e_iter->next; e_iter = e_iter->next;
} }
...@@ -155,56 +140,42 @@ int ebitmap_netlbl_export(struct ebitmap *ebmap, ...@@ -155,56 +140,42 @@ int ebitmap_netlbl_export(struct ebitmap *ebmap,
int ebitmap_netlbl_import(struct ebitmap *ebmap, int ebitmap_netlbl_import(struct ebitmap *ebmap,
struct netlbl_lsm_secattr_catmap *catmap) struct netlbl_lsm_secattr_catmap *catmap)
{ {
int rc;
struct ebitmap_node *e_iter = NULL; struct ebitmap_node *e_iter = NULL;
struct ebitmap_node *emap_prev = NULL; struct ebitmap_node *e_prev = NULL;
struct netlbl_lsm_secattr_catmap *c_iter = catmap; u32 offset = 0, idx;
u32 c_idx, c_pos, e_idx, e_sft; unsigned long bitmap;
/* NetLabel's NETLBL_CATMAP_MAPTYPE is defined as an array of u64, for (;;) {
* however, it is not always compatible with an array of unsigned long rc = netlbl_secattr_catmap_getlong(catmap, &offset, &bitmap);
* in ebitmap_node. if (rc < 0)
* In addition, you should pay attention the following implementation goto netlbl_import_failure;
* assumes unsigned long has a width equal with or less than 64-bit. if (offset == (u32)-1)
*/ return 0;
do {
for (c_idx = 0; c_idx < NETLBL_CATMAP_MAPCNT; c_idx++) {
unsigned int delta;
u64 map = c_iter->bitmap[c_idx];
if (!map)
continue;
c_pos = c_iter->startbit if (e_iter == NULL ||
+ c_idx * NETLBL_CATMAP_MAPSIZE; offset >= e_iter->startbit + EBITMAP_SIZE) {
if (!e_iter e_prev = e_iter;
|| c_pos >= e_iter->startbit + EBITMAP_SIZE) { e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC);
e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC); if (e_iter == NULL)
if (!e_iter) goto netlbl_import_failure;
goto netlbl_import_failure; e_iter->startbit = offset & ~(EBITMAP_SIZE - 1);
e_iter->startbit if (e_prev == NULL)
= c_pos - (c_pos % EBITMAP_SIZE); ebmap->node = e_iter;
if (emap_prev == NULL) else
ebmap->node = e_iter; e_prev->next = e_iter;
else ebmap->highbit = e_iter->startbit + EBITMAP_SIZE;
emap_prev->next = e_iter;
emap_prev = e_iter;
}
delta = c_pos - e_iter->startbit;
e_idx = delta / EBITMAP_UNIT_SIZE;
e_sft = delta % EBITMAP_UNIT_SIZE;
while (map) {
e_iter->maps[e_idx++] |= map & (-1UL);
map = EBITMAP_SHIFT_UNIT_SIZE(map);
}
} }
c_iter = c_iter->next;
} while (c_iter);
if (e_iter != NULL)
ebmap->highbit = e_iter->startbit + EBITMAP_SIZE;
else
ebitmap_destroy(ebmap);
/* offset will always be aligned to an unsigned long */
idx = EBITMAP_NODE_INDEX(e_iter, offset);
e_iter->maps[idx] = bitmap;
/* next */
offset += EBITMAP_UNIT_SIZE;
}
/* NOTE: we should never reach this return */
return 0; return 0;
netlbl_import_failure: netlbl_import_failure:
......
...@@ -435,10 +435,7 @@ int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap, ...@@ -435,10 +435,7 @@ int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap,
sap->flags |= NETLBL_SECATTR_MLS_CAT; sap->flags |= NETLBL_SECATTR_MLS_CAT;
sap->attr.mls.lvl = level; sap->attr.mls.lvl = level;
sap->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); sap->attr.mls.cat = NULL;
if (!sap->attr.mls.cat)
return -ENOMEM;
sap->attr.mls.cat->startbit = 0;
for (cat = 1, cp = catset, byte = 0; byte < len; cp++, byte++) for (cat = 1, cp = catset, byte = 0; byte < len; cp++, byte++)
for (m = 0x80; m != 0; m >>= 1, cat++) { for (m = 0x80; m != 0; m >>= 1, cat++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment