25 unsigned long,
shared.linear.rb_subtree_last,
26 vma_start_pgoff, vma_last_pgoff,, vma_interval_tree)
35 unsigned long last = vma_last_pgoff(node);
37 VM_BUG_ON(vma_start_pgoff(node) != vma_start_pgoff(prev));
56 node->shared.linear.rb_subtree_last = last;
57 rb_link_node(&node->shared.linear.rb, &parent->
shared.
linear.rb, link);
58 rb_insert_augmented(&node->shared.linear.rb, root,
59 &vma_interval_tree_augment);
62 static inline unsigned long avc_start_pgoff(
struct anon_vma_chain *avc)
64 return vma_start_pgoff(avc->
vma);
67 static inline unsigned long avc_last_pgoff(
struct anon_vma_chain *avc)
69 return vma_last_pgoff(avc->
vma);
73 avc_start_pgoff, avc_last_pgoff,
74 static inline, __anon_vma_interval_tree)
79 #ifdef CONFIG_DEBUG_VM_RB
80 node->cached_vma_start = avc_start_pgoff(node);
81 node->cached_vma_last = avc_last_pgoff(node);
83 __anon_vma_interval_tree_insert(node, root);
89 __anon_vma_interval_tree_remove(node, root);
94 unsigned long first,
unsigned long last)
96 return __anon_vma_interval_tree_iter_first(root, first, last);
101 unsigned long first,
unsigned long last)
103 return __anon_vma_interval_tree_iter_next(node, first, last);
106 #ifdef CONFIG_DEBUG_VM_RB
109 WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
110 WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));