summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-03-07 15:45:24 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-03-07 15:45:24 +0000
commit9f9f3e6e8548a596697778337110a423c384b6f3 (patch)
tree5dd4b290ef532cf5ecb058e1a92cd3435afeac8c /include/asm-alpha
parentd5c9a365ee7d2fded249aa5abfc5e89587583029 (diff)
Merge with Linux 2.3.49.
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/hardirq.h13
-rw-r--r--include/asm-alpha/hw_irq.h17
-rw-r--r--include/asm-alpha/machvec.h6
-rw-r--r--include/asm-alpha/mmu_context.h86
-rw-r--r--include/asm-alpha/pci.h17
-rw-r--r--include/asm-alpha/pgalloc.h78
-rw-r--r--include/asm-alpha/pgtable.h3
-rw-r--r--include/asm-alpha/smp.h2
-rw-r--r--include/asm-alpha/socket.h2
9 files changed, 129 insertions, 95 deletions
diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h
index 6f8b9ffe3..1c8101d58 100644
--- a/include/asm-alpha/hardirq.h
+++ b/include/asm-alpha/hardirq.h
@@ -46,7 +46,16 @@ extern unsigned long __irq_attempt[];
extern int global_irq_holder;
extern spinlock_t global_irq_lock;
-extern atomic_t global_irq_count;
+
+static inline int irqs_running (void)
+{
+ int i;
+
+ for (i = 0; i < smp_num_cpus; i++)
+ if (local_irq_count(i))
+ return 1;
+ return 0;
+}
static inline void release_irqlock(int cpu)
{
@@ -60,7 +69,6 @@ static inline void release_irqlock(int cpu)
static inline void irq_enter(int cpu, int irq)
{
++local_irq_count(cpu);
- atomic_inc(&global_irq_count);
while (spin_is_locked(&global_irq_lock))
barrier();
@@ -68,7 +76,6 @@ static inline void irq_enter(int cpu, int irq)
static inline void irq_exit(int cpu, int irq)
{
- atomic_dec(&global_irq_count);
--local_irq_count(cpu);
}
diff --git a/include/asm-alpha/hw_irq.h b/include/asm-alpha/hw_irq.h
index 1bf6629b4..40ac4056d 100644
--- a/include/asm-alpha/hw_irq.h
+++ b/include/asm-alpha/hw_irq.h
@@ -1,5 +1,14 @@
-/* This exists merely to satisfy <linux/irq.h>. There is
- nothing that would go here of general interest.
+#ifndef _ALPHA_HW_IRQ_H
+#define _ALPHA_HW_IRQ_H
- Everything of consequence is in arch/alpha/kernel/irq_impl.h,
- to be used only in arch/alpha/kernel/. */
+#include <linux/config.h>
+
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
+
+#ifdef CONFIG_ALPHA_GENERIC
+#define ACTUAL_NR_IRQS alpha_mv.nr_irqs
+#else
+#define ACTUAL_NR_IRQS NR_IRQS
+#endif
+
+#endif
diff --git a/include/asm-alpha/machvec.h b/include/asm-alpha/machvec.h
index b8a2a1a96..0aad19668 100644
--- a/include/asm-alpha/machvec.h
+++ b/include/asm-alpha/machvec.h
@@ -22,7 +22,6 @@ struct linux_hose_info;
struct pci_dev;
struct pci_ops;
struct pci_controler;
-struct irqaction;
struct alpha_machine_vector
{
@@ -67,10 +66,9 @@ struct alpha_machine_vector
void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *,
struct task_struct *, long);
- void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *, long);
+ void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *);
void (*mv_flush_tlb_current)(struct mm_struct *);
- void (*mv_flush_tlb_other)(struct mm_struct *);
void (*mv_flush_tlb_current_page)(struct mm_struct * mm,
struct vm_area_struct *vma,
unsigned long addr);
@@ -82,7 +80,7 @@ struct alpha_machine_vector
void (*init_arch)(void);
void (*init_irq)(void);
- void (*init_rtc)(struct irqaction *);
+ void (*init_rtc)(void);
void (*init_pci)(void);
void (*kill_arch)(int);
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index 1d150d523..6b268f4e6 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -22,7 +22,8 @@
#include <asm/io.h>
#endif
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
}
@@ -58,8 +59,7 @@ __reload_thread(struct thread_struct *pcb)
* in use) and the Valid bit set, then entries can also effectively be
* made coherent by assigning a new, unused ASN to the currently
* running process and not reusing the previous ASN before calling the
- * appropriate PALcode routine to invalidate the translation buffer
- * (TB)".
+ * appropriate PALcode routine to invalidate the translation buffer (TB)".
*
* In short, the EV4 has a "kind of" ASN capability, but it doesn't actually
* work correctly and can thus not be used (explaining the lack of PAL-code
@@ -123,8 +123,6 @@ extern unsigned long last_asn;
#define __MMU_EXTERN_INLINE
#endif
-extern void get_new_mm_context(struct task_struct *p, struct mm_struct *mm);
-
static inline unsigned long
__get_new_mm_context(struct mm_struct *mm, long cpu)
{
@@ -133,6 +131,7 @@ __get_new_mm_context(struct mm_struct *mm, long cpu)
if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) {
tbiap();
+ imb();
next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
}
cpu_last_asn(cpu) = next;
@@ -140,35 +139,6 @@ __get_new_mm_context(struct mm_struct *mm, long cpu)
}
__EXTERN_INLINE void
-ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
- struct task_struct *next, long cpu)
-{
- /* As described, ASN's are broken. But we can optimize for
- switching between threads -- if the mm is unchanged from
- current we needn't flush. */
- /* ??? May not be needed because EV4 PALcode recognizes that
- ASN's are broken and does a tbiap itself on swpctx, under
- the "Must set ASN or flush" rule. At least this is true
- for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
- I'm going to leave this here anyway, just to Be Sure. -- r~ */
-
- if (prev_mm != next_mm)
- tbiap();
-}
-
-__EXTERN_INLINE void
-ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, long cpu)
-{
- /* This is only called after changing mm on current. */
- tbiap();
-
- current->thread.ptbr
- = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
-
- __reload_thread(&current->thread);
-}
-
-__EXTERN_INLINE void
ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
struct task_struct *next, long cpu)
{
@@ -193,28 +163,50 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
}
__EXTERN_INLINE void
-ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, long cpu)
+ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
+ struct task_struct *next, long cpu)
{
- unsigned long mmc = __get_new_mm_context(next_mm, cpu);
- next_mm->context = mmc;
- current->thread.asn = mmc & HARDWARE_ASN_MASK;
- current->thread.ptbr
- = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
+ /* As described, ASN's are broken for TLB usage. But we can
+ optimize for switching between threads -- if the mm is
+ unchanged from current we needn't flush. */
+ /* ??? May not be needed because EV4 PALcode recognizes that
+ ASN's are broken and does a tbiap itself on swpctx, under
+ the "Must set ASN or flush" rule. At least this is true
+ for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
+ I'm going to leave this here anyway, just to Be Sure. -- r~ */
+ if (prev_mm != next_mm)
+ tbiap();
+
+ /* Do continue to allocate ASNs, because we can still use them
+ to avoid flushing the icache. */
+ ev5_switch_mm(prev_mm, next_mm, next, cpu);
+}
- __reload_thread(&current->thread);
+extern void __load_new_mm_context(struct mm_struct *);
+
+__EXTERN_INLINE void
+ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
+{
+ __load_new_mm_context(next_mm);
}
+__EXTERN_INLINE void
+ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
+{
+ __load_new_mm_context(next_mm);
+ tbiap();
+}
#ifdef CONFIG_ALPHA_GENERIC
-# define switch_mm alpha_mv.mv_switch_mm
-# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y),smp_processor_id())
+# define switch_mm(a,b,c,d) alpha_mv.mv_switch_mm((a),(b),(c),(d))
+# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
#else
# ifdef CONFIG_ALPHA_EV4
-# define switch_mm ev4_switch_mm
-# define activate_mm(x,y) ev4_activate_mm((x),(y),smp_processor_id())
+# define switch_mm(a,b,c,d) ev4_switch_mm((a),(b),(c),(d))
+# define activate_mm(x,y) ev4_activate_mm((x),(y))
# else
-# define switch_mm ev5_switch_mm
-# define activate_mm(x,y) ev5_activate_mm((x),(y),smp_processor_id())
+# define switch_mm(a,b,c,d) ev5_switch_mm((a),(b),(c),(d))
+# define activate_mm(x,y) ev5_activate_mm((x),(y))
# endif
#endif
diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h
index 4695112ce..1a68d43a3 100644
--- a/include/asm-alpha/pci.h
+++ b/include/asm-alpha/pci.h
@@ -12,22 +12,7 @@
struct pci_dev;
struct pci_bus;
struct resource;
-
-/* A PCI IOMMU allocation arena. There are typically two of these
- regions per bus. */
-/* ??? The 8400 has a 32-byte pte entry, and the entire table apparently
- lives directly on the host bridge (no tlb?). We don't support this
- machine, but if we ever did, we'd need to parameterize all this quite
- a bit further. Probably with per-bus operation tables. */
-
-struct pci_iommu_arena
-{
- spinlock_t lock;
- unsigned long *ptes;
- dma_addr_t dma_base;
- unsigned int size;
- unsigned int alloc_hint;
-};
+struct pci_iommu_arena;
/* A controler. Used to manage multiple PCI busses. */
diff --git a/include/asm-alpha/pgalloc.h b/include/asm-alpha/pgalloc.h
index e4689c758..f3d42ed96 100644
--- a/include/asm-alpha/pgalloc.h
+++ b/include/asm-alpha/pgalloc.h
@@ -3,50 +3,84 @@
#include <linux/config.h>
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __MMU_EXTERN_INLINE
+#endif
+
+extern void __load_new_mm_context(struct mm_struct *);
+
+
/* Caches aren't brain-dead on the Alpha. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(mm, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
-/*
- * The icache is not coherent with the dcache on alpha, thus before
- * running self modified code like kernel modules we must always run
- * an imb().
- */
+
+/* Note that the following two definitions are _highly_ dependent
+ on the contexts in which they are used in the kernel. I personally
+ think it is criminal how loosely defined these macros are. */
+
+/* We need to flush the kernel's icache after loading modules. The
+ only other use of this macro is in load_aout_interp which is not
+ used on Alpha.
+
+ Note that this definition should *not* be used for userspace
+ icache flushing. While functional, it is _way_ overkill. The
+ icache is tagged with ASNs and it suffices to allocate a new ASN
+ for the process. */
#ifndef __SMP__
#define flush_icache_range(start, end) imb()
#else
#define flush_icache_range(start, end) smp_imb()
extern void smp_imb(void);
#endif
-#define flush_icache_page(vma, page) do { } while (0)
+
+/* We need to flush the userspace icache after setting breakpoints in
+ ptrace. I don't think it's needed in do_swap_page, or do_no_page,
+ but I don't know how to get rid of it either.
+
+ Instead of indiscriminately using imb, take advantage of the fact
+ that icache entries are tagged with the ASN and load a new mm context. */
+/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */
+
+#ifndef __SMP__
+static inline void
+flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ if (vma->vm_flags & VM_EXEC) {
+ struct mm_struct *mm = vma->vm_mm;
+ mm->context = 0;
+ if (current->active_mm == mm)
+ __load_new_mm_context(mm);
+ }
+}
+#else
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+#endif
+
/*
* Use a few helper functions to hide the ugly broken ASN
* numbers on early Alphas (ev4 and ev45)
*/
-#ifndef __EXTERN_INLINE
-#define __EXTERN_INLINE extern inline
-#define __MMU_EXTERN_INLINE
-#endif
-
__EXTERN_INLINE void
ev4_flush_tlb_current(struct mm_struct *mm)
{
+ __load_new_mm_context(mm);
tbiap();
}
__EXTERN_INLINE void
-ev4_flush_tlb_other(struct mm_struct *mm)
+ev5_flush_tlb_current(struct mm_struct *mm)
{
+ __load_new_mm_context(mm);
}
-extern void ev5_flush_tlb_current(struct mm_struct *mm);
-
-__EXTERN_INLINE void
-ev5_flush_tlb_other(struct mm_struct *mm)
+extern inline void
+flush_tlb_other(struct mm_struct *mm)
{
mm->context = 0;
}
@@ -62,7 +96,12 @@ ev4_flush_tlb_current_page(struct mm_struct * mm,
struct vm_area_struct *vma,
unsigned long addr)
{
- tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
+ int tbi_flag = 2;
+ if (vma->vm_flags & VM_EXEC) {
+ __load_new_mm_context(mm);
+ tbi_flag = 3;
+ }
+ tbi(tbi_flag, addr);
}
__EXTERN_INLINE void
@@ -71,7 +110,7 @@ ev5_flush_tlb_current_page(struct mm_struct * mm,
unsigned long addr)
{
if (vma->vm_flags & VM_EXEC)
- ev5_flush_tlb_current(mm);
+ __load_new_mm_context(mm);
else
tbi(2, addr);
}
@@ -79,16 +118,13 @@ ev5_flush_tlb_current_page(struct mm_struct * mm,
#ifdef CONFIG_ALPHA_GENERIC
# define flush_tlb_current alpha_mv.mv_flush_tlb_current
-# define flush_tlb_other alpha_mv.mv_flush_tlb_other
# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
#else
# ifdef CONFIG_ALPHA_EV4
# define flush_tlb_current ev4_flush_tlb_current
-# define flush_tlb_other ev4_flush_tlb_other
# define flush_tlb_current_page ev4_flush_tlb_current_page
# else
# define flush_tlb_current ev5_flush_tlb_current
-# define flush_tlb_other ev5_flush_tlb_other
# define flush_tlb_current_page ev5_flush_tlb_current_page
# endif
#endif
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index ff8c6e9d1..0c5052788 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -40,6 +40,7 @@
#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PGD ((1UL << (PAGE_SHIFT-3))-1)
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+#define FIRST_USER_PGD_NR 0
/* Number of pointers that fit on a page: this will go away. */
#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
@@ -306,4 +307,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
+extern void paging_init(void);
+
#endif /* _ALPHA_PGTABLE_H */
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index c189b6569..12ebc5087 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -55,6 +55,8 @@ extern int __cpu_logical_map[NR_CPUS];
#define hard_smp_processor_id() __hard_smp_processor_id()
#define smp_processor_id() (current->processor)
+extern unsigned long cpu_present_mask;
+
#endif /* __SMP__ */
#define NO_PROC_ID (-1)
diff --git a/include/asm-alpha/socket.h b/include/asm-alpha/socket.h
index a40786fad..f8b4154bd 100644
--- a/include/asm-alpha/socket.h
+++ b/include/asm-alpha/socket.h
@@ -43,6 +43,8 @@
#define SO_ATTACH_FILTER 26
#define SO_DETACH_FILTER 27
+#define SO_PEERNAME 28
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 19
#define SO_SECURITY_ENCRYPTION_TRANSPORT 20