diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 3 | ||||
-rw-r--r-- | mm/highmem.c | 14 | ||||
-rw-r--r-- | mm/page_alloc.c | 10 | ||||
-rw-r--r-- | mm/slab.c | 136 |
4 files changed, 112 insertions, 51 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 43f1de720..58f642b30 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2515,8 +2515,9 @@ generic_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos) status = mapping->a_ops->prepare_write(file, page, offset, offset+bytes); if (status) goto unlock; - kaddr = (char*)page_address(page); + kaddr = page_address(page); status = copy_from_user(kaddr+offset, buf, bytes); + flush_dcache_page(page); if (status) goto fail_write; status = mapping->a_ops->commit_write(file, page, offset, offset+bytes); diff --git a/mm/highmem.c b/mm/highmem.c index e11b5d0b1..411f20c52 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -83,7 +83,7 @@ struct page * replace_with_highmem(struct page * page) } vaddr = kmap(highpage); - copy_page((void *)vaddr, (void *)page_address(page)); + copy_page((void *)vaddr, page_address(page)); kunmap(highpage); if (page->mapping) @@ -137,7 +137,7 @@ static void flush_all_zero_pkmaps(void) BUG(); pte_clear(pkmap_page_table+i); page = pte_page(pte); - page->virtual = 0; + page->virtual = NULL; } flush_tlb_all(); } @@ -176,17 +176,17 @@ start: /* Somebody else might have mapped it while we slept */ if (page->virtual) - return page->virtual; + return (unsigned long) page->virtual; /* Re-start */ goto start; } } vaddr = PKMAP_ADDR(last_pkmap_nr); - set_pte(pkmap_page_table + last_pkmap_nr, mk_pte(page, kmap_prot)); + set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); pkmap_count[last_pkmap_nr] = 1; - page->virtual = vaddr; + page->virtual = (void *) vaddr; return vaddr; } @@ -202,7 +202,7 @@ unsigned long kmap_high(struct page *page) * We cannot call this from interrupts, as it may block */ spin_lock(&kmap_lock); - vaddr = page->virtual; + vaddr = (unsigned long) page->virtual; if (!vaddr) vaddr = map_new_virtual(page); pkmap_count[PKMAP_NR(vaddr)]++; @@ -218,7 +218,7 @@ void kunmap_high(struct page *page) unsigned long nr; spin_lock(&kmap_lock); - vaddr = page->virtual; + vaddr = (unsigned long) page->virtual; if (!vaddr) BUG(); nr = PKMAP_NR(vaddr); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 941cb5909..420f91f92 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -326,7 +326,7 @@ unsigned long __get_free_pages(int gfp_mask, unsigned long order) page = alloc_pages(gfp_mask, order); if (!page) return 0; - return page_address(page); + return (unsigned long) page_address(page); } unsigned long get_zeroed_page(int gfp_mask) @@ -335,9 +335,9 @@ unsigned long get_zeroed_page(int gfp_mask) page = alloc_pages(gfp_mask, 0); if (page) { - unsigned long address = page_address(page); - clear_page((void *)address); - return address; + void *address = page_address(page); + clear_page(address); + return (unsigned long) address; } return 0; } @@ -639,7 +639,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, struct page *page = mem_map + offset + i; page->zone = zone; if (j != ZONE_HIGHMEM) { - page->virtual = (unsigned long)(__va(zone_start_paddr)); + page->virtual = __va(zone_start_paddr); zone_start_paddr += PAGE_SIZE; } } @@ -43,16 +43,13 @@ * of the entries in the array are given back into the global cache. * This reduces the number of spinlock operations. * - * The c_cpuarray can be changed with a smp_call_function call, - * it may not be read with enabled local interrupts. + * The c_cpuarray may not be read with enabled local interrupts. * * SMP synchronization: * constructors and destructors are called without any locking. * Several members in kmem_cache_t and slab_t never change, they * are accessed without any locking. * The per-cpu arrays are never accessed from the wrong cpu, no locking. - * smp_call_function() is used if one cpu must flush the arrays from - * other cpus. * The non-constant members are protected with a per-cache irq spinlock. * * Further notes from the original documentation: @@ -372,7 +369,6 @@ static kmem_cache_t *clock_searchp = &cache_cache; */ static int g_cpucache_up; -static void drain_cache (void *__cachep); static void enable_cpucache (kmem_cache_t *cachep); static void enable_all_cpucaches (void); #endif @@ -463,14 +459,17 @@ void __init kmem_cache_sizes_init(void) } while (sizes->cs_size); } -void __init kmem_cpucache_init(void) +int __init kmem_cpucache_init(void) { #ifdef CONFIG_SMP g_cpucache_up = 1; enable_all_cpucaches(); #endif + return 0; } +__initcall(kmem_cpucache_init); + /* Interface to system's page allocator. No need to hold the cache-lock. */ static inline void * kmem_getpages (kmem_cache_t *cachep, unsigned long flags) @@ -838,17 +837,50 @@ static int is_chained_kmem_cache(kmem_cache_t * cachep) return ret; } +#ifdef CONFIG_SMP +static DECLARE_MUTEX(cache_drain_sem); +static kmem_cache_t *cache_to_drain = NULL; +static DECLARE_WAIT_QUEUE_HEAD(cache_drain_wait); +unsigned long slab_cache_drain_mask; + +static void drain_cpu_caches(kmem_cache_t *cachep) +{ + DECLARE_WAITQUEUE(wait, current); + unsigned long cpu_mask = 0; + int i; + + for (i = 0; i < smp_num_cpus; i++) + cpu_mask |= (1UL << cpu_logical_map(i)); + + down(&cache_drain_sem); + + cache_to_drain = cachep; + slab_cache_drain_mask = cpu_mask; + + slab_drain_local_cache(); + + add_wait_queue(&cache_drain_wait, &wait); + current->state = TASK_UNINTERRUPTIBLE; + while (slab_cache_drain_mask != 0UL) + schedule(); + current->state = TASK_RUNNING; + remove_wait_queue(&cache_drain_wait, &wait); + + cache_to_drain = NULL; + + up(&cache_drain_sem); +} +#else +#define drain_cpu_caches(cachep) do { } while (0) +#endif + static int __kmem_cache_shrink(kmem_cache_t *cachep) { slab_t *slabp; int ret; -#ifdef CONFIG_SMP - smp_call_function(drain_cache, cachep, 1, 1); - local_irq_disable(); - drain_cache(cachep); - local_irq_enable(); -#endif + drain_cpu_caches(cachep); + spin_lock_irq(&cachep->spinlock); /* If the cache is growing, stop shrinking. */ @@ -1554,36 +1586,67 @@ kmem_cache_t * kmem_find_general_cachep (size_t size, int gfpflags) } #ifdef CONFIG_SMP -/* - * called with local interrupts disabled - */ -static void drain_cache (void* __cachep) + +typedef struct ccupdate_struct_s { - kmem_cache_t *cachep = __cachep; - cpucache_t *cc = cc_data(cachep); + kmem_cache_t *cachep; + cpucache_t *new[NR_CPUS]; +} ccupdate_struct_t; - if (cc && cc->avail) { - free_block(cachep, cc_entry(cc), cc->avail); - cc->avail = 0; +static ccupdate_struct_t *ccupdate_state = NULL; + +/* Called from per-cpu timer interrupt. */ +void slab_drain_local_cache(void) +{ + local_irq_disable(); + if (ccupdate_state != NULL) { + ccupdate_struct_t *new = ccupdate_state; + cpucache_t *old = cc_data(new->cachep); + + cc_data(new->cachep) = new->new[smp_processor_id()]; + new->new[smp_processor_id()] = old; + } else { + kmem_cache_t *cachep = cache_to_drain; + cpucache_t *cc = cc_data(cachep); + + if (cc && cc->avail) { + free_block(cachep, cc_entry(cc), cc->avail); + cc->avail = 0; + } } + local_irq_enable(); + + clear_bit(smp_processor_id(), &slab_cache_drain_mask); + if (slab_cache_drain_mask == 0) + wake_up(&cache_drain_wait); } -typedef struct ccupdate_struct_s +static void do_ccupdate(ccupdate_struct_t *data) { - kmem_cache_t* cachep; - cpucache_t* new[NR_CPUS]; -} ccupdate_struct_t; + DECLARE_WAITQUEUE(wait, current); + unsigned long cpu_mask = 0; + int i; -/* - * called with local interrupts disabled - */ -static void ccupdate_callback (void* __new) -{ - ccupdate_struct_t* new = __new; - cpucache_t *old = cc_data(new->cachep); + for (i = 0; i < smp_num_cpus; i++) + cpu_mask |= (1UL << cpu_logical_map(i)); + + down(&cache_drain_sem); - cc_data(new->cachep) = new->new[smp_processor_id()]; - new->new[smp_processor_id()] = old; + ccupdate_state = data; + slab_cache_drain_mask = cpu_mask; + + slab_drain_local_cache(); + + add_wait_queue(&cache_drain_wait, &wait); + current->state = TASK_UNINTERRUPTIBLE; + while (slab_cache_drain_mask != 0UL) + schedule(); + current->state = TASK_RUNNING; + remove_wait_queue(&cache_drain_wait, &wait); + + ccupdate_state = NULL; + + up(&cache_drain_sem); } /* called with cache_chain_sem acquired. */ @@ -1624,10 +1687,7 @@ static int kmem_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount) cachep->batchcount = batchcount; spin_unlock_irq(&cachep->spinlock); - smp_call_function(ccupdate_callback,&new,1,1); - local_irq_disable(); - ccupdate_callback(&new); - local_irq_enable(); + do_ccupdate(&new); for (i = 0; i < smp_num_cpus; i++) { cpucache_t* ccold = new.new[cpu_logical_map(i)]; |