Skip to content

Commit 920c7a5

Browse files
hharrisonLinus Torvalds
authored andcommitted
mm: remove fastcall from mm/
fastcall is always defined to be empty, remove it [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 1e548de commit 920c7a5

File tree

7 files changed

+24
-23
lines changed

7 files changed

+24
-23
lines changed

mm/filemap.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -527,7 +527,7 @@ static inline void wake_up_page(struct page *page, int bit)
527527
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
528528
}
529529

530-
void fastcall wait_on_page_bit(struct page *page, int bit_nr)
530+
void wait_on_page_bit(struct page *page, int bit_nr)
531531
{
532532
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
533533

@@ -551,7 +551,7 @@ EXPORT_SYMBOL(wait_on_page_bit);
551551
* the clear_bit and the read of the waitqueue (to avoid SMP races with a
552552
* parallel wait_on_page_locked()).
553553
*/
554-
void fastcall unlock_page(struct page *page)
554+
void unlock_page(struct page *page)
555555
{
556556
smp_mb__before_clear_bit();
557557
if (!TestClearPageLocked(page))
@@ -585,7 +585,7 @@ EXPORT_SYMBOL(end_page_writeback);
585585
* chances are that on the second loop, the block layer's plug list is empty,
586586
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
587587
*/
588-
void fastcall __lock_page(struct page *page)
588+
void __lock_page(struct page *page)
589589
{
590590
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
591591

@@ -606,7 +606,7 @@ int fastcall __lock_page_killable(struct page *page)
606606
* Variant of lock_page that does not require the caller to hold a reference
607607
* on the page's mapping.
608608
*/
609-
void fastcall __lock_page_nosync(struct page *page)
609+
void __lock_page_nosync(struct page *page)
610610
{
611611
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
612612
__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
@@ -1276,7 +1276,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
12761276
* This adds the requested page to the page cache if it isn't already there,
12771277
* and schedules an I/O to read in its contents from disk.
12781278
*/
1279-
static int fastcall page_cache_read(struct file * file, pgoff_t offset)
1279+
static int page_cache_read(struct file *file, pgoff_t offset)
12801280
{
12811281
struct address_space *mapping = file->f_mapping;
12821282
struct page *page;

mm/highmem.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ static inline unsigned long map_new_virtual(struct page *page)
163163
return vaddr;
164164
}
165165

166-
void fastcall *kmap_high(struct page *page)
166+
void *kmap_high(struct page *page)
167167
{
168168
unsigned long vaddr;
169169

@@ -185,7 +185,7 @@ void fastcall *kmap_high(struct page *page)
185185

186186
EXPORT_SYMBOL(kmap_high);
187187

188-
void fastcall kunmap_high(struct page *page)
188+
void kunmap_high(struct page *page)
189189
{
190190
unsigned long vaddr;
191191
unsigned long nr;

mm/internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ static inline void __put_page(struct page *page)
3434
atomic_dec(&page->_count);
3535
}
3636

37-
extern void fastcall __init __free_pages_bootmem(struct page *page,
37+
extern void __init __free_pages_bootmem(struct page *page,
3838
unsigned int order);
3939

4040
/*

mm/memory.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1109,7 +1109,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
11091109
}
11101110
EXPORT_SYMBOL(get_user_pages);
11111111

1112-
pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
1112+
pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1113+
spinlock_t **ptl)
11131114
{
11141115
pgd_t * pgd = pgd_offset(mm, addr);
11151116
pud_t * pud = pud_alloc(mm, pgd, addr);

mm/page-writeback.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1073,7 +1073,7 @@ static int __set_page_dirty(struct page *page)
10731073
return 0;
10741074
}
10751075

1076-
int fastcall set_page_dirty(struct page *page)
1076+
int set_page_dirty(struct page *page)
10771077
{
10781078
int ret = __set_page_dirty(page);
10791079
if (ret)

mm/page_alloc.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -537,7 +537,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
537537
/*
538538
* permit the bootmem allocator to evade page validation on high-order frees
539539
*/
540-
void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
540+
void __init __free_pages_bootmem(struct page *page, unsigned int order)
541541
{
542542
if (order == 0) {
543543
__ClearPageReserved(page);
@@ -974,7 +974,7 @@ void mark_free_pages(struct zone *zone)
974974
/*
975975
* Free a 0-order page
976976
*/
977-
static void fastcall free_hot_cold_page(struct page *page, int cold)
977+
static void free_hot_cold_page(struct page *page, int cold)
978978
{
979979
struct zone *zone = page_zone(page);
980980
struct per_cpu_pages *pcp;
@@ -1007,12 +1007,12 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
10071007
put_cpu();
10081008
}
10091009

1010-
void fastcall free_hot_page(struct page *page)
1010+
void free_hot_page(struct page *page)
10111011
{
10121012
free_hot_cold_page(page, 0);
10131013
}
10141014

1015-
void fastcall free_cold_page(struct page *page)
1015+
void free_cold_page(struct page *page)
10161016
{
10171017
free_hot_cold_page(page, 1);
10181018
}
@@ -1641,7 +1641,7 @@ EXPORT_SYMBOL(__alloc_pages);
16411641
/*
16421642
* Common helper functions.
16431643
*/
1644-
fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1644+
unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
16451645
{
16461646
struct page * page;
16471647
page = alloc_pages(gfp_mask, order);
@@ -1652,7 +1652,7 @@ fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
16521652

16531653
EXPORT_SYMBOL(__get_free_pages);
16541654

1655-
fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
1655+
unsigned long get_zeroed_page(gfp_t gfp_mask)
16561656
{
16571657
struct page * page;
16581658

@@ -1678,7 +1678,7 @@ void __pagevec_free(struct pagevec *pvec)
16781678
free_hot_cold_page(pvec->pages[i], pvec->cold);
16791679
}
16801680

1681-
fastcall void __free_pages(struct page *page, unsigned int order)
1681+
void __free_pages(struct page *page, unsigned int order)
16821682
{
16831683
if (put_page_testzero(page)) {
16841684
if (order == 0)
@@ -1690,7 +1690,7 @@ fastcall void __free_pages(struct page *page, unsigned int order)
16901690

16911691
EXPORT_SYMBOL(__free_pages);
16921692

1693-
fastcall void free_pages(unsigned long addr, unsigned int order)
1693+
void free_pages(unsigned long addr, unsigned int order)
16941694
{
16951695
if (addr != 0) {
16961696
VM_BUG_ON(!virt_addr_valid((void *)addr));

mm/swap.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs) = { 0, };
4141
* This path almost never happens for VM activity - pages are normally
4242
* freed via pagevecs. But it gets used by networking.
4343
*/
44-
static void fastcall __page_cache_release(struct page *page)
44+
static void __page_cache_release(struct page *page)
4545
{
4646
if (PageLRU(page)) {
4747
unsigned long flags;
@@ -165,7 +165,7 @@ int rotate_reclaimable_page(struct page *page)
165165
/*
166166
* FIXME: speed this up?
167167
*/
168-
void fastcall activate_page(struct page *page)
168+
void activate_page(struct page *page)
169169
{
170170
struct zone *zone = page_zone(page);
171171

@@ -186,7 +186,7 @@ void fastcall activate_page(struct page *page)
186186
* inactive,referenced -> active,unreferenced
187187
* active,unreferenced -> active,referenced
188188
*/
189-
void fastcall mark_page_accessed(struct page *page)
189+
void mark_page_accessed(struct page *page)
190190
{
191191
if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
192192
activate_page(page);
@@ -202,7 +202,7 @@ EXPORT_SYMBOL(mark_page_accessed);
202202
* lru_cache_add: add a page to the page lists
203203
* @page: the page to add
204204
*/
205-
void fastcall lru_cache_add(struct page *page)
205+
void lru_cache_add(struct page *page)
206206
{
207207
struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
208208

@@ -212,7 +212,7 @@ void fastcall lru_cache_add(struct page *page)
212212
put_cpu_var(lru_add_pvecs);
213213
}
214214

215-
void fastcall lru_cache_add_active(struct page *page)
215+
void lru_cache_add_active(struct page *page)
216216
{
217217
struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
218218

0 commit comments

Comments
 (0)