@@ -104,7 +104,7 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
104
104
atomic_long_t _totalhigh_pages __read_mostly ;
105
105
EXPORT_SYMBOL (_totalhigh_pages );
106
106
107
- unsigned int __nr_free_highpages (void )
107
+ unsigned int __nr_free_highpages (void )
108
108
{
109
109
struct zone * zone ;
110
110
unsigned int pages = 0 ;
@@ -120,7 +120,7 @@ unsigned int __nr_free_highpages (void)
120
120
static int pkmap_count [LAST_PKMAP ];
121
121
static __cacheline_aligned_in_smp DEFINE_SPINLOCK (kmap_lock );
122
122
123
- pte_t * pkmap_page_table ;
123
+ pte_t * pkmap_page_table ;
124
124
125
125
/*
126
126
* Most architectures have no use for kmap_high_get(), so let's abstract
@@ -147,6 +147,7 @@ struct page *__kmap_to_page(void *vaddr)
147
147
148
148
if (addr >= PKMAP_ADDR (0 ) && addr < PKMAP_ADDR (LAST_PKMAP )) {
149
149
int i = PKMAP_NR (addr );
150
+
150
151
return pte_page (pkmap_page_table [i ]);
151
152
}
152
153
@@ -278,9 +279,8 @@ void *kmap_high(struct page *page)
278
279
pkmap_count [PKMAP_NR (vaddr )]++ ;
279
280
BUG_ON (pkmap_count [PKMAP_NR (vaddr )] < 2 );
280
281
unlock_kmap ();
281
- return (void * ) vaddr ;
282
+ return (void * ) vaddr ;
282
283
}
283
-
284
284
EXPORT_SYMBOL (kmap_high );
285
285
286
286
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
@@ -305,7 +305,7 @@ void *kmap_high_get(struct page *page)
305
305
pkmap_count [PKMAP_NR (vaddr )]++ ;
306
306
}
307
307
unlock_kmap_any (flags );
308
- return (void * ) vaddr ;
308
+ return (void * ) vaddr ;
309
309
}
310
310
#endif
311
311
@@ -737,7 +737,6 @@ void *page_address(const struct page *page)
737
737
spin_unlock_irqrestore (& pas -> lock , flags );
738
738
return ret ;
739
739
}
740
-
741
740
EXPORT_SYMBOL (page_address );
742
741
743
742
/**
0 commit comments