Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
d3eb004b
Commit
d3eb004b
authored
Sep 11, 2002
by
David Woodhouse
Browse files
Options
Browse Files
Download
Plain Diff
Merge infradead.org:/home/torvalds/BK/linux-2.5
into infradead.org:/home/dwmw2/BK/rbtree-2.5
parents
0e9387ab
972d3674
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
151 additions
and
84 deletions
+151
-84
include/linux/mm.h
include/linux/mm.h
+1
-1
include/linux/rbtree.h
include/linux/rbtree.h
+26
-19
include/linux/sched.h
include/linux/sched.h
+1
-1
lib/rbtree.c
lib/rbtree.c
+87
-18
mm/mmap.c
mm/mmap.c
+18
-16
net/sched/sch_htb.c
net/sched/sch_htb.c
+18
-29
No files found.
include/linux/mm.h
View file @
d3eb004b
...
@@ -54,7 +54,7 @@ struct vm_area_struct {
...
@@ -54,7 +54,7 @@ struct vm_area_struct {
pgprot_t
vm_page_prot
;
/* Access permissions of this VMA. */
pgprot_t
vm_page_prot
;
/* Access permissions of this VMA. */
unsigned
long
vm_flags
;
/* Flags, listed below. */
unsigned
long
vm_flags
;
/* Flags, listed below. */
rb_node_t
vm_rb
;
struct
rb_node
vm_rb
;
/*
/*
* For areas with an address space and backing store,
* For areas with an address space and backing store,
...
...
include/linux/rbtree.h
View file @
d3eb004b
...
@@ -34,7 +34,7 @@
...
@@ -34,7 +34,7 @@
static inline struct page * rb_search_page_cache(struct inode * inode,
static inline struct page * rb_search_page_cache(struct inode * inode,
unsigned long offset)
unsigned long offset)
{
{
rb_node_t
* n = inode->i_rb_page_cache.rb_node;
struct rb_node
* n = inode->i_rb_page_cache.rb_node;
struct page * page;
struct page * page;
while (n)
while (n)
...
@@ -53,10 +53,10 @@ static inline struct page * rb_search_page_cache(struct inode * inode,
...
@@ -53,10 +53,10 @@ static inline struct page * rb_search_page_cache(struct inode * inode,
static inline struct page * __rb_insert_page_cache(struct inode * inode,
static inline struct page * __rb_insert_page_cache(struct inode * inode,
unsigned long offset,
unsigned long offset,
rb_node_t
* node)
struct rb_node
* node)
{
{
rb_node_t
** p = &inode->i_rb_page_cache.rb_node;
struct rb_node
** p = &inode->i_rb_page_cache.rb_node;
rb_node_t
* parent = NULL;
struct rb_node
* parent = NULL;
struct page * page;
struct page * page;
while (*p)
while (*p)
...
@@ -79,7 +79,7 @@ static inline struct page * __rb_insert_page_cache(struct inode * inode,
...
@@ -79,7 +79,7 @@ static inline struct page * __rb_insert_page_cache(struct inode * inode,
static inline struct page * rb_insert_page_cache(struct inode * inode,
static inline struct page * rb_insert_page_cache(struct inode * inode,
unsigned long offset,
unsigned long offset,
rb_node_t
* node)
struct rb_node
* node)
{
{
struct page * ret;
struct page * ret;
if ((ret = __rb_insert_page_cache(inode, offset, node)))
if ((ret = __rb_insert_page_cache(inode, offset, node)))
...
@@ -97,31 +97,38 @@ static inline struct page * rb_insert_page_cache(struct inode * inode,
...
@@ -97,31 +97,38 @@ static inline struct page * rb_insert_page_cache(struct inode * inode,
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/stddef.h>
typedef
struct
rb_node_s
struct
rb_node
{
{
struct
rb_node
_s
*
rb_parent
;
struct
rb_node
*
rb_parent
;
int
rb_color
;
int
rb_color
;
#define RB_RED 0
#define RB_RED 0
#define RB_BLACK 1
#define RB_BLACK 1
struct
rb_node_s
*
rb_right
;
struct
rb_node
*
rb_right
;
struct
rb_node_s
*
rb_left
;
struct
rb_node
*
rb_left
;
}
};
rb_node_t
;
typedef
struct
rb_root_s
struct
rb_root
{
{
struct
rb_node_s
*
rb_node
;
struct
rb_node
*
rb_node
;
}
};
rb_root_t
;
#define RB_ROOT (
rb_root_
t) { NULL, }
#define RB_ROOT (
struct rb_roo
t) { NULL, }
#define rb_entry(ptr, type, member) \
#define rb_entry(ptr, type, member) \
((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
extern
void
rb_insert_color
(
rb_node_t
*
,
rb_root_t
*
);
extern
void
rb_insert_color
(
struct
rb_node
*
,
struct
rb_root
*
);
extern
void
rb_erase
(
rb_node_t
*
,
rb_root_t
*
);
extern
void
rb_erase
(
struct
rb_node
*
,
struct
rb_root
*
);
/* Find logical next and previous nodes in a tree */
extern
struct
rb_node
*
rb_next
(
struct
rb_node
*
);
extern
struct
rb_node
*
rb_prev
(
struct
rb_node
*
);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern
void
rb_replace_node
(
struct
rb_node
*
victim
,
struct
rb_node
*
new
,
struct
rb_root
*
root
);
static
inline
void
rb_link_node
(
rb_node_t
*
node
,
rb_node_t
*
parent
,
rb_node_t
**
rb_link
)
static
inline
void
rb_link_node
(
struct
rb_node
*
node
,
struct
rb_node
*
parent
,
struct
rb_node
**
rb_link
)
{
{
node
->
rb_parent
=
parent
;
node
->
rb_parent
=
parent
;
node
->
rb_color
=
RB_RED
;
node
->
rb_color
=
RB_RED
;
...
...
include/linux/sched.h
View file @
d3eb004b
...
@@ -172,7 +172,7 @@ struct namespace;
...
@@ -172,7 +172,7 @@ struct namespace;
struct
kioctx
;
struct
kioctx
;
struct
mm_struct
{
struct
mm_struct
{
struct
vm_area_struct
*
mmap
;
/* list of VMAs */
struct
vm_area_struct
*
mmap
;
/* list of VMAs */
rb_root_
t
mm_rb
;
struct
rb_roo
t
mm_rb
;
struct
vm_area_struct
*
mmap_cache
;
/* last find_vma result */
struct
vm_area_struct
*
mmap_cache
;
/* last find_vma result */
pgd_t
*
pgd
;
pgd_t
*
pgd
;
atomic_t
mm_users
;
/* How many users with user space? */
atomic_t
mm_users
;
/* How many users with user space? */
...
...
lib/rbtree.c
View file @
d3eb004b
/*
/*
Red Black Trees
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
(C) 1999 Andrea Arcangeli <andrea@suse.de>
(C) 2002 David Woodhouse <dwmw2@infradead.org>
This program is free software; you can redistribute it and/or modify
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
it under the terms of the GNU General Public License as published by
...
@@ -22,9 +23,9 @@
...
@@ -22,9 +23,9 @@
#include <linux/rbtree.h>
#include <linux/rbtree.h>
#include <linux/module.h>
#include <linux/module.h>
static
void
__rb_rotate_left
(
rb_node_t
*
node
,
rb_root_t
*
root
)
static
void
__rb_rotate_left
(
struct
rb_node
*
node
,
struct
rb_root
*
root
)
{
{
rb_node_t
*
right
=
node
->
rb_right
;
struct
rb_node
*
right
=
node
->
rb_right
;
if
((
node
->
rb_right
=
right
->
rb_left
))
if
((
node
->
rb_right
=
right
->
rb_left
))
right
->
rb_left
->
rb_parent
=
node
;
right
->
rb_left
->
rb_parent
=
node
;
...
@@ -42,9 +43,9 @@ static void __rb_rotate_left(rb_node_t * node, rb_root_t * root)
...
@@ -42,9 +43,9 @@ static void __rb_rotate_left(rb_node_t * node, rb_root_t * root)
node
->
rb_parent
=
right
;
node
->
rb_parent
=
right
;
}
}
static
void
__rb_rotate_right
(
rb_node_t
*
node
,
rb_root_t
*
root
)
static
void
__rb_rotate_right
(
struct
rb_node
*
node
,
struct
rb_root
*
root
)
{
{
rb_node_t
*
left
=
node
->
rb_left
;
struct
rb_node
*
left
=
node
->
rb_left
;
if
((
node
->
rb_left
=
left
->
rb_right
))
if
((
node
->
rb_left
=
left
->
rb_right
))
left
->
rb_right
->
rb_parent
=
node
;
left
->
rb_right
->
rb_parent
=
node
;
...
@@ -62,9 +63,9 @@ static void __rb_rotate_right(rb_node_t * node, rb_root_t * root)
...
@@ -62,9 +63,9 @@ static void __rb_rotate_right(rb_node_t * node, rb_root_t * root)
node
->
rb_parent
=
left
;
node
->
rb_parent
=
left
;
}
}
void
rb_insert_color
(
rb_node_t
*
node
,
rb_root_t
*
root
)
void
rb_insert_color
(
struct
rb_node
*
node
,
struct
rb_root
*
root
)
{
{
rb_node_t
*
parent
,
*
gparent
;
struct
rb_node
*
parent
,
*
gparent
;
while
((
parent
=
node
->
rb_parent
)
&&
parent
->
rb_color
==
RB_RED
)
while
((
parent
=
node
->
rb_parent
)
&&
parent
->
rb_color
==
RB_RED
)
{
{
...
@@ -73,7 +74,7 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
...
@@ -73,7 +74,7 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
if
(
parent
==
gparent
->
rb_left
)
if
(
parent
==
gparent
->
rb_left
)
{
{
{
{
register
rb_node_t
*
uncle
=
gparent
->
rb_right
;
register
struct
rb_node
*
uncle
=
gparent
->
rb_right
;
if
(
uncle
&&
uncle
->
rb_color
==
RB_RED
)
if
(
uncle
&&
uncle
->
rb_color
==
RB_RED
)
{
{
uncle
->
rb_color
=
RB_BLACK
;
uncle
->
rb_color
=
RB_BLACK
;
...
@@ -86,7 +87,7 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
...
@@ -86,7 +87,7 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
if
(
parent
->
rb_right
==
node
)
if
(
parent
->
rb_right
==
node
)
{
{
register
rb_node_t
*
tmp
;
register
struct
rb_node
*
tmp
;
__rb_rotate_left
(
parent
,
root
);
__rb_rotate_left
(
parent
,
root
);
tmp
=
parent
;
tmp
=
parent
;
parent
=
node
;
parent
=
node
;
...
@@ -98,7 +99,7 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
...
@@ -98,7 +99,7 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
__rb_rotate_right
(
gparent
,
root
);
__rb_rotate_right
(
gparent
,
root
);
}
else
{
}
else
{
{
{
register
rb_node_t
*
uncle
=
gparent
->
rb_left
;
register
struct
rb_node
*
uncle
=
gparent
->
rb_left
;
if
(
uncle
&&
uncle
->
rb_color
==
RB_RED
)
if
(
uncle
&&
uncle
->
rb_color
==
RB_RED
)
{
{
uncle
->
rb_color
=
RB_BLACK
;
uncle
->
rb_color
=
RB_BLACK
;
...
@@ -111,7 +112,7 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
...
@@ -111,7 +112,7 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
if
(
parent
->
rb_left
==
node
)
if
(
parent
->
rb_left
==
node
)
{
{
register
rb_node_t
*
tmp
;
register
struct
rb_node
*
tmp
;
__rb_rotate_right
(
parent
,
root
);
__rb_rotate_right
(
parent
,
root
);
tmp
=
parent
;
tmp
=
parent
;
parent
=
node
;
parent
=
node
;
...
@@ -128,10 +129,10 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
...
@@ -128,10 +129,10 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
}
}
EXPORT_SYMBOL
(
rb_insert_color
);
EXPORT_SYMBOL
(
rb_insert_color
);
static
void
__rb_erase_color
(
rb_node_t
*
node
,
rb_node_t
*
parent
,
static
void
__rb_erase_color
(
struct
rb_node
*
node
,
struct
rb_node
*
parent
,
rb_root_t
*
root
)
struct
rb_root
*
root
)
{
{
rb_node_t
*
other
;
struct
rb_node
*
other
;
while
((
!
node
||
node
->
rb_color
==
RB_BLACK
)
&&
node
!=
root
->
rb_node
)
while
((
!
node
||
node
->
rb_color
==
RB_BLACK
)
&&
node
!=
root
->
rb_node
)
{
{
...
@@ -159,7 +160,7 @@ static void __rb_erase_color(rb_node_t * node, rb_node_t * parent,
...
@@ -159,7 +160,7 @@ static void __rb_erase_color(rb_node_t * node, rb_node_t * parent,
if
(
!
other
->
rb_right
||
if
(
!
other
->
rb_right
||
other
->
rb_right
->
rb_color
==
RB_BLACK
)
other
->
rb_right
->
rb_color
==
RB_BLACK
)
{
{
register
rb_node_t
*
o_left
;
register
struct
rb_node
*
o_left
;
if
((
o_left
=
other
->
rb_left
))
if
((
o_left
=
other
->
rb_left
))
o_left
->
rb_color
=
RB_BLACK
;
o_left
->
rb_color
=
RB_BLACK
;
other
->
rb_color
=
RB_RED
;
other
->
rb_color
=
RB_RED
;
...
@@ -199,7 +200,7 @@ static void __rb_erase_color(rb_node_t * node, rb_node_t * parent,
...
@@ -199,7 +200,7 @@ static void __rb_erase_color(rb_node_t * node, rb_node_t * parent,
if
(
!
other
->
rb_left
||
if
(
!
other
->
rb_left
||
other
->
rb_left
->
rb_color
==
RB_BLACK
)
other
->
rb_left
->
rb_color
==
RB_BLACK
)
{
{
register
rb_node_t
*
o_right
;
register
struct
rb_node
*
o_right
;
if
((
o_right
=
other
->
rb_right
))
if
((
o_right
=
other
->
rb_right
))
o_right
->
rb_color
=
RB_BLACK
;
o_right
->
rb_color
=
RB_BLACK
;
other
->
rb_color
=
RB_RED
;
other
->
rb_color
=
RB_RED
;
...
@@ -220,9 +221,9 @@ static void __rb_erase_color(rb_node_t * node, rb_node_t * parent,
...
@@ -220,9 +221,9 @@ static void __rb_erase_color(rb_node_t * node, rb_node_t * parent,
node
->
rb_color
=
RB_BLACK
;
node
->
rb_color
=
RB_BLACK
;
}
}
void
rb_erase
(
rb_node_t
*
node
,
rb_root_t
*
root
)
void
rb_erase
(
struct
rb_node
*
node
,
struct
rb_root
*
root
)
{
{
rb_node_t
*
child
,
*
parent
;
struct
rb_node
*
child
,
*
parent
;
int
color
;
int
color
;
if
(
!
node
->
rb_left
)
if
(
!
node
->
rb_left
)
...
@@ -231,7 +232,7 @@ void rb_erase(rb_node_t * node, rb_root_t * root)
...
@@ -231,7 +232,7 @@ void rb_erase(rb_node_t * node, rb_root_t * root)
child
=
node
->
rb_left
;
child
=
node
->
rb_left
;
else
else
{
{
rb_node_t
*
old
=
node
,
*
left
;
struct
rb_node
*
old
=
node
,
*
left
;
node
=
node
->
rb_right
;
node
=
node
->
rb_right
;
while
((
left
=
node
->
rb_left
))
while
((
left
=
node
->
rb_left
))
...
@@ -294,3 +295,71 @@ void rb_erase(rb_node_t * node, rb_root_t * root)
...
@@ -294,3 +295,71 @@ void rb_erase(rb_node_t * node, rb_root_t * root)
__rb_erase_color
(
child
,
parent
,
root
);
__rb_erase_color
(
child
,
parent
,
root
);
}
}
EXPORT_SYMBOL
(
rb_erase
);
EXPORT_SYMBOL
(
rb_erase
);
struct
rb_node
*
rb_next
(
struct
rb_node
*
node
)
{
/* If we have a right-hand child, go down and then left as far
as we can. */
if
(
node
->
rb_right
)
{
node
=
node
->
rb_right
;
while
(
node
->
rb_left
)
node
=
node
->
rb_left
;
return
node
;
}
/* No right-hand children. Everything down and left is
smaller than us, so any 'next' node must be in the general
direction of our parent. Go up the tree; any time the
ancestor is a right-hand child of its parent, keep going
up. First time it's a left-hand child of its parent, said
parent is our 'next' node. */
while
(
node
->
rb_parent
&&
node
==
node
->
rb_parent
->
rb_right
)
node
=
node
->
rb_parent
;
return
node
->
rb_parent
;
}
EXPORT_SYMBOL
(
rb_next
);
struct
rb_node
*
rb_prev
(
struct
rb_node
*
node
)
{
/* If we have a left-hand child, go down and then right as far
as we can. */
if
(
node
->
rb_left
)
{
node
=
node
->
rb_left
;
while
(
node
->
rb_right
)
node
=
node
->
rb_right
;
return
node
;
}
/* No left-hand children. Go up till we find an ancestor which
is a right-hand child of its parent */
while
(
node
->
rb_parent
&&
node
==
node
->
rb_parent
->
rb_left
)
node
=
node
->
rb_parent
;
return
node
->
rb_parent
;
}
EXPORT_SYMBOL
(
rb_prev
);
void
rb_replace_node
(
struct
rb_node
*
victim
,
struct
rb_node
*
new
,
struct
rb_root
*
root
)
{
struct
rb_node
*
parent
=
victim
->
rb_parent
;
/* Set the surrounding nodes to point to the replacement */
if
(
parent
)
{
if
(
victim
==
parent
->
rb_left
)
parent
->
rb_left
=
new
;
else
parent
->
rb_right
=
new
;
}
else
{
root
->
rb_node
=
new
;
}
if
(
victim
->
rb_left
)
victim
->
rb_left
->
rb_parent
=
new
;
if
(
victim
->
rb_right
)
victim
->
rb_right
->
rb_parent
=
new
;
/* Copy the pointers/colour from the victim to the replacement */
*
new
=
*
victim
;
}
EXPORT_SYMBOL
(
rb_replace_node
);
mm/mmap.c
View file @
d3eb004b
...
@@ -245,7 +245,7 @@ static inline unsigned long calc_vm_flags(unsigned long prot, unsigned long flag
...
@@ -245,7 +245,7 @@ static inline unsigned long calc_vm_flags(unsigned long prot, unsigned long flag
}
}
#ifdef DEBUG_MM_RB
#ifdef DEBUG_MM_RB
static
int
browse_rb
(
rb_node_t
*
rb_node
)
{
static
int
browse_rb
(
struct
rb_node
*
rb_node
)
{
int
i
=
0
;
int
i
=
0
;
if
(
rb_node
)
{
if
(
rb_node
)
{
i
++
;
i
++
;
...
@@ -277,10 +277,11 @@ static void validate_mm(struct mm_struct * mm) {
...
@@ -277,10 +277,11 @@ static void validate_mm(struct mm_struct * mm) {
static
struct
vm_area_struct
*
find_vma_prepare
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
static
struct
vm_area_struct
*
find_vma_prepare
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
struct
vm_area_struct
**
pprev
,
struct
vm_area_struct
**
pprev
,
rb_node_t
***
rb_link
,
rb_node_t
**
rb_parent
)
struct
rb_node
***
rb_link
,
struct
rb_node
**
rb_parent
)
{
{
struct
vm_area_struct
*
vma
;
struct
vm_area_struct
*
vma
;
rb_node_t
**
__rb_link
,
*
__rb_parent
,
*
rb_prev
;
struct
rb_node
**
__rb_link
,
*
__rb_parent
,
*
rb_prev
;
__rb_link
=
&
mm
->
mm_rb
.
rb_node
;
__rb_link
=
&
mm
->
mm_rb
.
rb_node
;
rb_prev
=
__rb_parent
=
NULL
;
rb_prev
=
__rb_parent
=
NULL
;
...
@@ -311,8 +312,8 @@ static struct vm_area_struct * find_vma_prepare(struct mm_struct * mm, unsigned
...
@@ -311,8 +312,8 @@ static struct vm_area_struct * find_vma_prepare(struct mm_struct * mm, unsigned
return
vma
;
return
vma
;
}
}
static
inline
void
__vma_link_list
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
struct
vm_area_struct
*
prev
,
static
inline
void
__vma_link_list
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
rb_node_t
*
rb_parent
)
struct
vm_area_struct
*
prev
,
struct
rb_node
*
rb_parent
)
{
{
if
(
prev
)
{
if
(
prev
)
{
vma
->
vm_next
=
prev
->
vm_next
;
vma
->
vm_next
=
prev
->
vm_next
;
...
@@ -327,7 +328,7 @@ static inline void __vma_link_list(struct mm_struct * mm, struct vm_area_struct
...
@@ -327,7 +328,7 @@ static inline void __vma_link_list(struct mm_struct * mm, struct vm_area_struct
}
}
static
inline
void
__vma_link_rb
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
static
inline
void
__vma_link_rb
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
rb_node_t
**
rb_link
,
rb_node_t
*
rb_parent
)
struct
rb_node
**
rb_link
,
struct
rb_node
*
rb_parent
)
{
{
rb_link_node
(
&
vma
->
vm_rb
,
rb_parent
,
rb_link
);
rb_link_node
(
&
vma
->
vm_rb
,
rb_parent
,
rb_link
);
rb_insert_color
(
&
vma
->
vm_rb
,
&
mm
->
mm_rb
);
rb_insert_color
(
&
vma
->
vm_rb
,
&
mm
->
mm_rb
);
...
@@ -353,7 +354,7 @@ static inline void __vma_link_file(struct vm_area_struct * vma)
...
@@ -353,7 +354,7 @@ static inline void __vma_link_file(struct vm_area_struct * vma)
}
}
static
void
__vma_link
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
struct
vm_area_struct
*
prev
,
static
void
__vma_link
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
struct
vm_area_struct
*
prev
,
rb_node_t
**
rb_link
,
rb_node_t
*
rb_parent
)
struct
rb_node
**
rb_link
,
struct
rb_node
*
rb_parent
)
{
{
__vma_link_list
(
mm
,
vma
,
prev
,
rb_parent
);
__vma_link_list
(
mm
,
vma
,
prev
,
rb_parent
);
__vma_link_rb
(
mm
,
vma
,
rb_link
,
rb_parent
);
__vma_link_rb
(
mm
,
vma
,
rb_link
,
rb_parent
);
...
@@ -361,7 +362,7 @@ static void __vma_link(struct mm_struct * mm, struct vm_area_struct * vma, stru
...
@@ -361,7 +362,7 @@ static void __vma_link(struct mm_struct * mm, struct vm_area_struct * vma, stru
}
}
static
inline
void
vma_link
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
struct
vm_area_struct
*
prev
,
static
inline
void
vma_link
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
struct
vm_area_struct
*
prev
,
rb_node_t
**
rb_link
,
rb_node_t
*
rb_parent
)
struct
rb_node
**
rb_link
,
struct
rb_node
*
rb_parent
)
{
{
spin_lock
(
&
mm
->
page_table_lock
);
spin_lock
(
&
mm
->
page_table_lock
);
lock_vma_mappings
(
vma
);
lock_vma_mappings
(
vma
);
...
@@ -374,7 +375,8 @@ static inline void vma_link(struct mm_struct * mm, struct vm_area_struct * vma,
...
@@ -374,7 +375,8 @@ static inline void vma_link(struct mm_struct * mm, struct vm_area_struct * vma,
}
}
static
int
vma_merge
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
prev
,
static
int
vma_merge
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
prev
,
rb_node_t
*
rb_parent
,
unsigned
long
addr
,
unsigned
long
end
,
unsigned
long
vm_flags
)
struct
rb_node
*
rb_parent
,
unsigned
long
addr
,
unsigned
long
end
,
unsigned
long
vm_flags
)
{
{
spinlock_t
*
lock
=
&
mm
->
page_table_lock
;
spinlock_t
*
lock
=
&
mm
->
page_table_lock
;
if
(
!
prev
)
{
if
(
!
prev
)
{
...
@@ -426,7 +428,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
...
@@ -426,7 +428,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
unsigned
int
vm_flags
;
unsigned
int
vm_flags
;
int
correct_wcount
=
0
;
int
correct_wcount
=
0
;
int
error
;
int
error
;
rb_node_t
**
rb_link
,
*
rb_parent
;
struct
rb_node
**
rb_link
,
*
rb_parent
;
unsigned
long
charged
=
0
;
unsigned
long
charged
=
0
;
if
(
file
&&
(
!
file
->
f_op
||
!
file
->
f_op
->
mmap
))
if
(
file
&&
(
!
file
->
f_op
||
!
file
->
f_op
->
mmap
))
...
@@ -698,7 +700,7 @@ struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
...
@@ -698,7 +700,7 @@ struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
/* (Cache hit rate is typically around 35%.) */
/* (Cache hit rate is typically around 35%.) */
vma
=
mm
->
mmap_cache
;
vma
=
mm
->
mmap_cache
;
if
(
!
(
vma
&&
vma
->
vm_end
>
addr
&&
vma
->
vm_start
<=
addr
))
{
if
(
!
(
vma
&&
vma
->
vm_end
>
addr
&&
vma
->
vm_start
<=
addr
))
{
rb_node_t
*
rb_node
;
struct
rb_node
*
rb_node
;
rb_node
=
mm
->
mm_rb
.
rb_node
;
rb_node
=
mm
->
mm_rb
.
rb_node
;
vma
=
NULL
;
vma
=
NULL
;
...
@@ -728,7 +730,7 @@ struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
...
@@ -728,7 +730,7 @@ struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct
vm_area_struct
**
pprev
)
struct
vm_area_struct
**
pprev
)
{
{
struct
vm_area_struct
*
vma
=
NULL
,
*
prev
=
NULL
;
struct
vm_area_struct
*
vma
=
NULL
,
*
prev
=
NULL
;
rb_node_t
*
rb_node
;
struct
rb_node
*
rb_node
;
if
(
!
mm
)
if
(
!
mm
)
goto
out
;
goto
out
;
...
@@ -1158,7 +1160,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
...
@@ -1158,7 +1160,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
struct
mm_struct
*
mm
=
current
->
mm
;
struct
mm_struct
*
mm
=
current
->
mm
;
struct
vm_area_struct
*
vma
,
*
prev
;
struct
vm_area_struct
*
vma
,
*
prev
;
unsigned
long
flags
;
unsigned
long
flags
;
rb_node_t
**
rb_link
,
*
rb_parent
;
struct
rb_node
**
rb_link
,
*
rb_parent
;
len
=
PAGE_ALIGN
(
len
);
len
=
PAGE_ALIGN
(
len
);
if
(
!
len
)
if
(
!
len
)
...
@@ -1236,7 +1238,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
...
@@ -1236,7 +1238,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
void
build_mmap_rb
(
struct
mm_struct
*
mm
)
void
build_mmap_rb
(
struct
mm_struct
*
mm
)
{
{
struct
vm_area_struct
*
vma
;
struct
vm_area_struct
*
vma
;
rb_node_t
**
rb_link
,
*
rb_parent
;
struct
rb_node
**
rb_link
,
*
rb_parent
;
mm
->
mm_rb
=
RB_ROOT
;
mm
->
mm_rb
=
RB_ROOT
;
rb_link
=
&
mm
->
mm_rb
.
rb_node
;
rb_link
=
&
mm
->
mm_rb
.
rb_node
;
...
@@ -1319,7 +1321,7 @@ void exit_mmap(struct mm_struct * mm)
...
@@ -1319,7 +1321,7 @@ void exit_mmap(struct mm_struct * mm)
void
__insert_vm_struct
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
)
void
__insert_vm_struct
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
)
{
{
struct
vm_area_struct
*
__vma
,
*
prev
;
struct
vm_area_struct
*
__vma
,
*
prev
;
rb_node_t
**
rb_link
,
*
rb_parent
;
struct
rb_node
**
rb_link
,
*
rb_parent
;
__vma
=
find_vma_prepare
(
mm
,
vma
->
vm_start
,
&
prev
,
&
rb_link
,
&
rb_parent
);
__vma
=
find_vma_prepare
(
mm
,
vma
->
vm_start
,
&
prev
,
&
rb_link
,
&
rb_parent
);
if
(
__vma
&&
__vma
->
vm_start
<
vma
->
vm_end
)
if
(
__vma
&&
__vma
->
vm_start
<
vma
->
vm_end
)
...
@@ -1332,7 +1334,7 @@ void __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
...
@@ -1332,7 +1334,7 @@ void __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
void
insert_vm_struct
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
)
void
insert_vm_struct
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
)
{
{
struct
vm_area_struct
*
__vma
,
*
prev
;
struct
vm_area_struct
*
__vma
,
*
prev
;
rb_node_t
**
rb_link
,
*
rb_parent
;
struct
rb_node
**
rb_link
,
*
rb_parent
;
__vma
=
find_vma_prepare
(
mm
,
vma
->
vm_start
,
&
prev
,
&
rb_link
,
&
rb_parent
);
__vma
=
find_vma_prepare
(
mm
,
vma
->
vm_start
,
&
prev
,
&
rb_link
,
&
rb_parent
);
if
(
__vma
&&
__vma
->
vm_start
<
vma
->
vm_end
)
if
(
__vma
&&
__vma
->
vm_start
<
vma
->
vm_end
)
...
...
net/sched/sch_htb.c
View file @
d3eb004b
...
@@ -162,12 +162,12 @@ struct htb_class
...
@@ -162,12 +162,12 @@ struct htb_class
struct
list_head
drop_list
;
struct
list_head
drop_list
;
}
leaf
;
}
leaf
;
struct
htb_class_inner
{
struct
htb_class_inner
{
rb_root_t
feed
[
TC_HTB_NUMPRIO
];
/* feed trees */
struct
rb_root
feed
[
TC_HTB_NUMPRIO
];
/* feed trees */
rb_node_t
*
ptr
[
TC_HTB_NUMPRIO
];
/* current class ptr */
struct
rb_node
*
ptr
[
TC_HTB_NUMPRIO
];
/* current class ptr */
}
inner
;
}
inner
;
}
un
;
}
un
;
rb_node_t
node
[
TC_HTB_NUMPRIO
];
/* node for self or feed tree */
struct
rb_node
node
[
TC_HTB_NUMPRIO
];
/* node for self or feed tree */
rb_node_t
pq_node
;
/* node for event queue */
struct
rb_node
pq_node
;
/* node for event queue */
unsigned
long
pq_key
;
/* the same type as jiffies global */
unsigned
long
pq_key
;
/* the same type as jiffies global */
int
prio_activity
;
/* for which prios are we active */
int
prio_activity
;
/* for which prios are we active */
...
@@ -207,12 +207,12 @@ struct htb_sched
...
@@ -207,12 +207,12 @@ struct htb_sched
struct
list_head
drops
[
TC_HTB_NUMPRIO
];
/* active leaves (for drops) */
struct
list_head
drops
[
TC_HTB_NUMPRIO
];
/* active leaves (for drops) */
/* self list - roots of self generating tree */
/* self list - roots of self generating tree */
rb_root_
t
row
[
TC_HTB_MAXDEPTH
][
TC_HTB_NUMPRIO
];
struct
rb_roo
t
row
[
TC_HTB_MAXDEPTH
][
TC_HTB_NUMPRIO
];
int
row_mask
[
TC_HTB_MAXDEPTH
];
int
row_mask
[
TC_HTB_MAXDEPTH
];
rb_node_t
*
ptr
[
TC_HTB_MAXDEPTH
][
TC_HTB_NUMPRIO
];
struct
rb_node
*
ptr
[
TC_HTB_MAXDEPTH
][
TC_HTB_NUMPRIO
];
/* self wait list - roots of wait PQs per row */
/* self wait list - roots of wait PQs per row */
rb_root_
t
wait_pq
[
TC_HTB_MAXDEPTH
];
struct
rb_roo
t
wait_pq
[
TC_HTB_MAXDEPTH
];
/* time of nearest event per level (row) */
/* time of nearest event per level (row) */
unsigned
long
near_ev_cache
[
TC_HTB_MAXDEPTH
];
unsigned
long
near_ev_cache
[
TC_HTB_MAXDEPTH
];
...
@@ -324,9 +324,9 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch)
...
@@ -324,9 +324,9 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch)
}
}
#ifdef HTB_DEBUG
#ifdef HTB_DEBUG
static
void
htb_next_rb_node
(
rb_node_t
**
n
);
static
void
htb_next_rb_node
(
struct
rb_node
**
n
);
#define HTB_DUMTREE(root,memb) if(root) { \
#define HTB_DUMTREE(root,memb) if(root) { \
rb_node_t
*n = (root)->rb_node; \
struct rb_node
*n = (root)->rb_node; \
while (n->rb_left) n = n->rb_left; \
while (n->rb_left) n = n->rb_left; \
while (n) { \
while (n) { \
struct htb_class *cl = rb_entry(n, struct htb_class, memb); \
struct htb_class *cl = rb_entry(n, struct htb_class, memb); \
...
@@ -375,10 +375,10 @@ static void htb_debug_dump (struct htb_sched *q)
...
@@ -375,10 +375,10 @@ static void htb_debug_dump (struct htb_sched *q)
* Routine adds class to the list (actually tree) sorted by classid.
* Routine adds class to the list (actually tree) sorted by classid.
* Make sure that class is not already on such list for given prio.
* Make sure that class is not already on such list for given prio.
*/
*/
static
void
htb_add_to_id_tree
(
HTB_ARGQ
rb_root_
t
*
root
,
static
void
htb_add_to_id_tree
(
HTB_ARGQ
struct
rb_roo
t
*
root
,
struct
htb_class
*
cl
,
int
prio
)
struct
htb_class
*
cl
,
int
prio
)
{
{
rb_node_t
**
p
=
&
root
->
rb_node
,
*
parent
=
NULL
;
struct
rb_node
**
p
=
&
root
->
rb_node
,
*
parent
=
NULL
;
HTB_DBG
(
7
,
3
,
"htb_add_id_tree cl=%X prio=%d
\n
"
,
cl
->
classid
,
prio
);
HTB_DBG
(
7
,
3
,
"htb_add_id_tree cl=%X prio=%d
\n
"
,
cl
->
classid
,
prio
);
#ifdef HTB_DEBUG
#ifdef HTB_DEBUG
if
(
cl
->
node
[
prio
].
rb_color
!=
-
1
)
{
BUG_TRAP
(
0
);
return
;
}
if
(
cl
->
node
[
prio
].
rb_color
!=
-
1
)
{
BUG_TRAP
(
0
);
return
;
}
...
@@ -411,7 +411,7 @@ static void htb_add_to_id_tree (HTB_ARGQ rb_root_t *root,
...
@@ -411,7 +411,7 @@ static void htb_add_to_id_tree (HTB_ARGQ rb_root_t *root,
static
void
htb_add_to_wait_tree
(
struct
htb_sched
*
q
,
static
void
htb_add_to_wait_tree
(
struct
htb_sched
*
q
,
struct
htb_class
*
cl
,
long
delay
,
int
debug_hint
)
struct
htb_class
*
cl
,
long
delay
,
int
debug_hint
)
{
{
rb_node_t
**
p
=
&
q
->
wait_pq
[
cl
->
level
].
rb_node
,
*
parent
=
NULL
;
struct
rb_node
**
p
=
&
q
->
wait_pq
[
cl
->
level
].
rb_node
,
*
parent
=
NULL
;
HTB_DBG
(
7
,
3
,
"htb_add_wt cl=%X key=%lu
\n
"
,
cl
->
classid
,
cl
->
pq_key
);
HTB_DBG
(
7
,
3
,
"htb_add_wt cl=%X key=%lu
\n
"
,
cl
->
classid
,
cl
->
pq_key
);
#ifdef HTB_DEBUG
#ifdef HTB_DEBUG
if
(
cl
->
pq_node
.
rb_color
!=
-
1
)
{
BUG_TRAP
(
0
);
return
;
}
if
(
cl
->
pq_node
.
rb_color
!=
-
1
)
{
BUG_TRAP
(
0
);
return
;
}
...
@@ -447,20 +447,9 @@ static void htb_add_to_wait_tree (struct htb_sched *q,
...
@@ -447,20 +447,9 @@ static void htb_add_to_wait_tree (struct htb_sched *q,
* When we are past last key we return NULL.
* When we are past last key we return NULL.
* Average complexity is 2 steps per call.
* Average complexity is 2 steps per call.
*/
*/
static
void
htb_next_rb_node
(
rb_node_t
**
n
)
static
void
htb_next_rb_node
(
struct
rb_node
**
n
)
{
{
rb_node_t
*
p
;
*
n
=
rb_next
(
*
n
);
if
((
*
n
)
->
rb_right
)
{
*
n
=
(
*
n
)
->
rb_right
;
while
((
*
n
)
->
rb_left
)
*
n
=
(
*
n
)
->
rb_left
;
return
;
}
while
((
p
=
(
*
n
)
->
rb_parent
)
!=
NULL
)
{
if
(
p
->
rb_left
==
*
n
)
break
;
*
n
=
p
;
}
*
n
=
p
;
}
}
/**
/**
...
@@ -869,7 +858,7 @@ static long htb_do_events(struct htb_sched *q,int level)
...
@@ -869,7 +858,7 @@ static long htb_do_events(struct htb_sched *q,int level)
for
(
i
=
0
;
i
<
500
;
i
++
)
{
for
(
i
=
0
;
i
<
500
;
i
++
)
{
struct
htb_class
*
cl
;
struct
htb_class
*
cl
;
long
diff
;
long
diff
;
rb_node_t
*
p
=
q
->
wait_pq
[
level
].
rb_node
;
struct
rb_node
*
p
=
q
->
wait_pq
[
level
].
rb_node
;
if
(
!
p
)
return
0
;
if
(
!
p
)
return
0
;
while
(
p
->
rb_left
)
p
=
p
->
rb_left
;
while
(
p
->
rb_left
)
p
=
p
->
rb_left
;
...
@@ -906,12 +895,12 @@ static long htb_do_events(struct htb_sched *q,int level)
...
@@ -906,12 +895,12 @@ static long htb_do_events(struct htb_sched *q,int level)
* Find leaf where current feed pointers points to.
* Find leaf where current feed pointers points to.
*/
*/
static
struct
htb_class
*
static
struct
htb_class
*
htb_lookup_leaf
(
rb_root_t
*
tree
,
int
prio
,
rb_node_t
**
pptr
)
htb_lookup_leaf
(
struct
rb_root
*
tree
,
int
prio
,
struct
rb_node
**
pptr
)
{
{
int
i
;
int
i
;
struct
{
struct
{
rb_node_t
*
root
;
struct
rb_node
*
root
;
rb_node_t
**
pptr
;
struct
rb_node
**
pptr
;
}
stk
[
TC_HTB_MAXDEPTH
],
*
sp
=
stk
;
}
stk
[
TC_HTB_MAXDEPTH
],
*
sp
=
stk
;
sp
->
root
=
tree
->
rb_node
;
sp
->
root
=
tree
->
rb_node
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment