diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2001-04-05 04:55:58 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2001-04-05 04:55:58 +0000 |
commit | 74a9f2e1b4d3ab45a9f72cb5b556c9f521524ab3 (patch) | |
tree | 7c4cdb103ab1b388c9852a88bd6fb1e73eba0b5c /include/asm-mips | |
parent | ee6374c8b0d333c08061c6a97bc77090d7461225 (diff) |
Merge with Linux 2.4.3.
Note that mingetty does no longer work with serial console, you have to
switch to another getty like getty_ps. This commit also includes a
fix for a setitimer bug which did prevent getty_ps from working on
older kernels.
Diffstat (limited to 'include/asm-mips')
-rw-r--r-- | include/asm-mips/atomic.h | 107 | ||||
-rw-r--r-- | include/asm-mips/pci.h | 9 | ||||
-rw-r--r-- | include/asm-mips/pgalloc.h | 87 | ||||
-rw-r--r-- | include/asm-mips/pgtable.h | 17 |
4 files changed, 152 insertions, 68 deletions
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index 12760cc3c..326471707 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -21,7 +21,23 @@ typedef struct { volatile int counter; } atomic_t; #ifdef __KERNEL__ #define ATOMIC_INIT(i) { (i) } +/* + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ #define atomic_read(v) ((v)->counter) + +/* + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ #define atomic_set(v,i) ((v)->counter = (i)) #if !defined(CONFIG_CPU_HAS_LLSC) @@ -31,6 +47,13 @@ typedef struct { volatile int counter; } atomic_t; /* * The MIPS I implementation is only atomic with respect to * interrupts. R3000 based multiprocessor machines are rare anyway ... + * + * atomic_add - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v. Note that the guaranteed useful range + * of an atomic_t is only 24 bits. */ extern __inline__ void atomic_add(int i, atomic_t * v) { @@ -42,6 +65,14 @@ extern __inline__ void atomic_add(int i, atomic_t * v) restore_flags(flags); } +/* + * atomic_sub - subtract the atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ extern __inline__ void atomic_sub(int i, atomic_t * v) { int flags; @@ -87,6 +118,14 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v) * implementation is SMP safe ... */ +/* + * atomic_add - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v. Note that the guaranteed useful range + * of an atomic_t is only 24 bits. + */ extern __inline__ void atomic_add(int i, atomic_t * v) { unsigned long temp; @@ -100,6 +139,14 @@ extern __inline__ void atomic_add(int i, atomic_t * v) : "Ir" (i), "m" (v->counter)); } +/* + * atomic_sub - subtract the atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ extern __inline__ void atomic_sub(int i, atomic_t * v) { unsigned long temp; @@ -160,11 +207,71 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v) #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) +/* + * atomic_sub_and_test - test variable then subtract + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) + +/* + * atomic_inc_and_test - increment by 1 and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ +#define atomic_inc_and_test(v) (atomic_inc_return(1, (v)) == 0) + +/* + * atomic_dec_and_test - decrement by 1 and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) +/* + * atomic_inc - increment atomic variable + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ #define atomic_inc(v) atomic_add(1,(v)) + +/* + * atomic_dec - decrement the atomic variable + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ #define atomic_dec(v) atomic_sub(1,(v)) + +/* + * atomic_add_negative - add and test if negative + * @v: pointer of type atomic_t + * @i: integer value to add + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + * + * Currently not implemented for MIPS. + */ + #endif /* defined(__KERNEL__) */ #endif /* __ASM_ATOMIC_H */ diff --git a/include/asm-mips/pci.h b/include/asm-mips/pci.h index 78f6a63e9..a5a4ee7b8 100644 --- a/include/asm-mips/pci.h +++ b/include/asm-mips/pci.h @@ -209,9 +209,18 @@ extern inline void pci_dma_sync_sg(struct pci_dev *hwdev, */ extern inline int pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask) { + /* + * we fall back to GFP_DMA when the mask isn't all 1s, + * so we can't guarantee allocations that must be + * within a tighter range than GFP_DMA.. + */ + if (mask < 0x00ffffff) + return 0; + return 1; } + /* * These macros should be used after a pci_map_sg call has been done * to get bus addresses of each of the SG entries and their lengths. diff --git a/include/asm-mips/pgalloc.h b/include/asm-mips/pgalloc.h index 0fa39f666..82ac65aac 100644 --- a/include/asm-mips/pgalloc.h +++ b/include/asm-mips/pgalloc.h @@ -33,9 +33,7 @@ extern inline void flush_tlb_pgtables(struct mm_struct *mm, /* - * Allocate and free page tables. The xxx_kernel() versions are - * used to allocate a kernel page table - this turns on ASN bits - * if any. + * Allocate and free page tables. */ #define pgd_quicklist (current_cpu_data.pgd_quick) @@ -43,6 +41,8 @@ extern inline void flush_tlb_pgtables(struct mm_struct *mm, #define pte_quicklist (current_cpu_data.pte_quick) #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz) +#define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte) + extern __inline__ pgd_t *get_pgd_slow(void) { pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init; @@ -82,7 +82,6 @@ extern __inline__ void free_pgd_slow(pgd_t *pgd) } extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted); -extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted); extern __inline__ pte_t *get_pte_fast(void) { @@ -123,67 +122,53 @@ extern __inline__ void free_pmd_slow(pmd_t *pmd) } extern void __bad_pte(pmd_t *pmd); -extern void __bad_pte_kernel(pmd_t *pmd); - -#define pte_free_kernel(pte) free_pte_fast(pte) -#define pte_free(pte) free_pte_fast(pte) -#define pgd_free(pgd) free_pgd_fast(pgd) -#define pgd_alloc() get_pgd_fast() -extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address) +static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address) { - address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); - - if (pmd_none(*pmd)) { - pte_t *page = get_pte_fast(); - if (page) { - pmd_val(*pmd) = (unsigned long)page; - return page + address; - } - return get_pte_kernel_slow(pmd, address); - } - if (pmd_bad(*pmd)) { - __bad_pte_kernel(pmd); - return NULL; - } - return (pte_t *) pmd_page(*pmd) + address; + pte_t *pte; + + pte = (pte_t *) __get_free_page(GFP_KERNEL); + if (pte) + clear_page(pte); + return pte; } -extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address) +static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address) { - address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); - - if (pmd_none(*pmd)) { - pte_t *page = get_pte_fast(); - if (page) { - pmd_val(*pmd) = (unsigned long)page; - return page + address; - } - return get_pte_slow(pmd, address); - } - if (pmd_bad(*pmd)) { - __bad_pte(pmd); - return NULL; + unsigned long *ret; + + if ((ret = (unsigned long *)pte_quicklist) != NULL) { + pte_quicklist = (unsigned long *)(*ret); + ret[0] = ret[1]; + pgtable_cache_size--; } - return (pte_t *) pmd_page(*pmd) + address; + return (pte_t *)ret; } -/* - * allocating and freeing a pmd is trivial: the 1-entry pmd is - * inside the pgd, so has no extra memory associated with it. - */ -extern inline void pmd_free(pmd_t * pmd) +extern __inline__ void pte_free_fast(pte_t *pte) { + *(unsigned long *)pte = (unsigned long) pte_quicklist; + pte_quicklist = (unsigned long *) pte; + pgtable_cache_size++; } -extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address) +extern __inline__ void pte_free_slow(pte_t *pte) { - /* Two level page tables. This level is a nop */ - return (pmd_t *) pgd; + free_page((unsigned long)pte); } -#define pmd_free_kernel pmd_free -#define pmd_alloc_kernel pmd_alloc +#define pte_free(pte) pte_free_slow(pte) +#define pgd_free(pgd) free_pgd_fast(pgd) +#define pgd_alloc() get_pgd_fast() + +/* + * allocating and freeing a pmd is trivial: the 1-entry pmd is + * inside the pgd, so has no extra memory associated with it. + */ +#define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); }) +#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) +#define pmd_free(x) do { } while (0) +#define pgd_populate(mm, pmd, pte) BUG() extern int do_check_pgt_cache(int, int); diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h index 7ff340656..6b4ebcbc3 100644 --- a/include/asm-mips/pgtable.h +++ b/include/asm-mips/pgtable.h @@ -222,21 +222,9 @@ extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) -/* - * BAD_PAGETABLE is used when we need a bogus page-table, while - * BAD_PAGE is used for a bogus page. - * - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -extern pte_t __bad_page(void); -extern pte_t *__bad_pagetable(void); - extern unsigned long empty_zero_page; extern unsigned long zero_page_mask; -#define BAD_PAGETABLE __bad_pagetable() -#define BAD_PAGE __bad_page() #define ZERO_PAGE(vaddr) \ (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) @@ -458,11 +446,6 @@ extern inline pte_t *pte_offset(pmd_t * dir, unsigned long address) */ extern void pgd_init(unsigned long page); -extern void __bad_pte(pmd_t *pmd); -extern void __bad_pte_kernel(pmd_t *pmd); - -#define pte_free_kernel(pte) free_pte_fast(pte) -#define pte_free(pte) free_pte_fast(pte) #define pgd_free(pgd) free_pgd_fast(pgd) #define pgd_alloc() get_pgd_fast() |