@@ -907,7 +907,7 @@ static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
907907 if (radix_tree_insert (root , idx , t_page )) {
908908 null_free_page (t_page );
909909 t_page = radix_tree_lookup (root , idx );
910- WARN_ON (!t_page || t_page -> page -> index != idx );
910+ WARN_ON (!t_page || t_page -> page -> private != idx );
911911 } else if (is_cache )
912912 nullb -> dev -> curr_cache += PAGE_SIZE ;
913913
@@ -930,7 +930,7 @@ static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
930930 (void * * )t_pages , pos , FREE_BATCH );
931931
932932 for (i = 0 ; i < nr_pages ; i ++ ) {
933- pos = t_pages [i ]-> page -> index ;
933+ pos = t_pages [i ]-> page -> private ;
934934 ret = radix_tree_delete_item (root , pos , t_pages [i ]);
935935 WARN_ON (ret != t_pages [i ]);
936936 null_free_page (ret );
@@ -956,7 +956,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
956956
957957 root = is_cache ? & nullb -> dev -> cache : & nullb -> dev -> data ;
958958 t_page = radix_tree_lookup (root , idx );
959- WARN_ON (t_page && t_page -> page -> index != idx );
959+ WARN_ON (t_page && t_page -> page -> private != idx );
960960
961961 if (t_page && (for_write || test_bit (sector_bit , t_page -> bitmap )))
962962 return t_page ;
@@ -999,7 +999,7 @@ static struct nullb_page *null_insert_page(struct nullb *nullb,
999999
10001000 spin_lock_irq (& nullb -> lock );
10011001 idx = sector >> PAGE_SECTORS_SHIFT ;
1002- t_page -> page -> index = idx ;
1002+ t_page -> page -> private = idx ;
10031003 t_page = null_radix_tree_insert (nullb , idx , t_page , !ignore_cache );
10041004 radix_tree_preload_end ();
10051005
@@ -1019,7 +1019,7 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
10191019 struct nullb_page * t_page , * ret ;
10201020 void * dst , * src ;
10211021
1022- idx = c_page -> page -> index ;
1022+ idx = c_page -> page -> private ;
10231023
10241024 t_page = null_insert_page (nullb , idx << PAGE_SECTORS_SHIFT , true);
10251025
@@ -1078,7 +1078,7 @@ static int null_make_cache_space(struct nullb *nullb, unsigned long n)
10781078 * avoid race, we don't allow page free
10791079 */
10801080 for (i = 0 ; i < nr_pages ; i ++ ) {
1081- nullb -> cache_flush_pos = c_pages [i ]-> page -> index ;
1081+ nullb -> cache_flush_pos = c_pages [i ]-> page -> private ;
10821082 /*
10831083 * We found the page which is being flushed to disk by other
10841084 * threads
0 commit comments