diff options
Diffstat (limited to 'include/asm-ia64')
-rw-r--r-- | include/asm-ia64/asmmacro.h | 48 | ||||
-rw-r--r-- | include/asm-ia64/dma.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/ia32.h | 19 | ||||
-rw-r--r-- | include/asm-ia64/iosapic.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/offsets.h | 86 | ||||
-rw-r--r-- | include/asm-ia64/page.h | 6 | ||||
-rw-r--r-- | include/asm-ia64/pal.h | 132 | ||||
-rw-r--r-- | include/asm-ia64/pci.h | 5 | ||||
-rw-r--r-- | include/asm-ia64/pgtable.h | 20 | ||||
-rw-r--r-- | include/asm-ia64/processor.h | 31 | ||||
-rw-r--r-- | include/asm-ia64/ptrace.h | 12 | ||||
-rw-r--r-- | include/asm-ia64/ptrace_offsets.h | 14 | ||||
-rw-r--r-- | include/asm-ia64/sal.h | 28 | ||||
-rw-r--r-- | include/asm-ia64/siginfo.h | 3 | ||||
-rw-r--r-- | include/asm-ia64/spinlock.h | 92 | ||||
-rw-r--r-- | include/asm-ia64/stat.h | 21 | ||||
-rw-r--r-- | include/asm-ia64/string.h | 3 | ||||
-rw-r--r-- | include/asm-ia64/system.h | 52 | ||||
-rw-r--r-- | include/asm-ia64/unistd.h | 11 | ||||
-rw-r--r-- | include/asm-ia64/unwind.h | 158 |
20 files changed, 548 insertions, 197 deletions
diff --git a/include/asm-ia64/asmmacro.h b/include/asm-ia64/asmmacro.h new file mode 100644 index 000000000..4991bb26e --- /dev/null +++ b/include/asm-ia64/asmmacro.h @@ -0,0 +1,48 @@ +#ifndef _ASM_IA64_ASMMACRO_H +#define _ASM_IA64_ASMMACRO_H + +/* + * Copyright (C) 2000 Hewlett-Packard Co + * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#if 1 + +/* + * This is a hack that's necessary as long as we support old versions + * of gas, that have no unwind support. + */ +#include <linux/config.h> + +#ifdef CONFIG_IA64_NEW_UNWIND +# define UNW(args...) args +#else +# define UNW(args...) +#endif + +#endif + +#define ENTRY(name) \ + .align 16; \ + .proc name; \ +name: + +#define GLOBAL_ENTRY(name) \ + .global name; \ + ENTRY(name) + +#define END(name) \ + .endp name + +/* + * Helper macros to make unwind directives more readable: + */ + +/* prologue_gr: */ +#define ASM_UNW_PRLG_RP 0x8 +#define ASM_UNW_PRLG_PFS 0x4 +#define ASM_UNW_PRLG_PSP 0x2 +#define ASM_UNW_PRLG_PR 0x1 +#define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs)) + +#endif /* _ASM_IA64_ASMMACRO_H */ diff --git a/include/asm-ia64/dma.h b/include/asm-ia64/dma.h index 4e9b30a91..1d9eb2a41 100644 --- a/include/asm-ia64/dma.h +++ b/include/asm-ia64/dma.h @@ -21,7 +21,7 @@ #define dma_inb inb #define MAX_DMA_CHANNELS 8 -#define MAX_DMA_ADDRESS (~0UL) /* no limits on DMAing, for now */ +#define MAX_DMA_ADDRESS 0xffffffffUL extern spinlock_t dma_spin_lock; diff --git a/include/asm-ia64/ia32.h b/include/asm-ia64/ia32.h index 00219b00d..884f1314a 100644 --- a/include/asm-ia64/ia32.h +++ b/include/asm-ia64/ia32.h @@ -112,10 +112,16 @@ struct sigaction32 { sigset32_t sa_mask; /* A 32 bit mask */ }; +typedef struct sigaltstack_ia32 { + unsigned int ss_sp; + int ss_flags; + unsigned int ss_size; +} stack_ia32_t; + struct ucontext_ia32 { - unsigned long uc_flags; - struct ucontext_ia32 *uc_link; - stack_t uc_stack; + unsigned int uc_flags; + unsigned int uc_link; + stack_ia32_t uc_stack; struct sigcontext_ia32 uc_mcontext; sigset_t uc_sigmask; /* mask last for extensibility */ }; @@ -276,7 +282,8 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #define ELF_PLATFORM 0 #ifdef __KERNEL__ -#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX) +# define SET_PERSONALITY(EX,IBCS2) \ + (current->personality = (IBCS2) ? PER_SVR4 : PER_LINUX) #endif #define IA32_EFLAG 0x200 @@ -342,8 +349,8 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; * IA32 floating point control registers starting values */ -#define IA32_FSR_DEFAULT 0x55550000 /* set all tag bits */ -#define IA32_FCR_DEFAULT 0x33f /* single precision, all masks */ +#define IA32_FSR_DEFAULT 0x55550000 /* set all tag bits */ +#define IA32_FCR_DEFAULT 0x17800000037fULL /* extended precision, all masks */ #define IA32_PTRACE_GETREGS 12 #define IA32_PTRACE_SETREGS 13 diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h index 95934da1e..995c948ba 100644 --- a/include/asm-ia64/iosapic.h +++ b/include/asm-ia64/iosapic.h @@ -92,7 +92,7 @@ extern struct intr_routing_entry intr_routing[]; * } */ extern unsigned int iosapic_version(unsigned long); -extern void iosapic_init(unsigned long); +extern void iosapic_init(unsigned long, int); struct iosapic_vector { unsigned long iosapic_base; /* IOSAPIC Base address */ diff --git a/include/asm-ia64/offsets.h b/include/asm-ia64/offsets.h index de309ee56..25cf32c44 100644 --- a/include/asm-ia64/offsets.h +++ b/include/asm-ia64/offsets.h @@ -25,11 +25,95 @@ #define IA64_TASK_PID_OFFSET 188 /* 0xbc */ #define IA64_TASK_MM_OFFSET 88 /* 0x58 */ #define IA64_PT_REGS_CR_IPSR_OFFSET 0 /* 0x0 */ +#define IA64_PT_REGS_CR_IIP_OFFSET 8 /* 0x8 */ +#define IA64_PT_REGS_CR_IFS_OFFSET 16 /* 0x10 */ +#define IA64_PT_REGS_AR_UNAT_OFFSET 24 /* 0x18 */ +#define IA64_PT_REGS_AR_PFS_OFFSET 32 /* 0x20 */ +#define IA64_PT_REGS_AR_RSC_OFFSET 40 /* 0x28 */ +#define IA64_PT_REGS_AR_RNAT_OFFSET 48 /* 0x30 */ +#define IA64_PT_REGS_AR_BSPSTORE_OFFSET 56 /* 0x38 */ +#define IA64_PT_REGS_PR_OFFSET 64 /* 0x40 */ +#define IA64_PT_REGS_B6_OFFSET 72 /* 0x48 */ +#define IA64_PT_REGS_LOADRS_OFFSET 80 /* 0x50 */ +#define IA64_PT_REGS_R1_OFFSET 88 /* 0x58 */ +#define IA64_PT_REGS_R2_OFFSET 96 /* 0x60 */ +#define IA64_PT_REGS_R3_OFFSET 104 /* 0x68 */ #define IA64_PT_REGS_R12_OFFSET 112 /* 0x70 */ +#define IA64_PT_REGS_R13_OFFSET 120 /* 0x78 */ +#define IA64_PT_REGS_R14_OFFSET 128 /* 0x80 */ +#define IA64_PT_REGS_R15_OFFSET 136 /* 0x88 */ #define IA64_PT_REGS_R8_OFFSET 144 /* 0x90 */ +#define IA64_PT_REGS_R9_OFFSET 152 /* 0x98 */ +#define IA64_PT_REGS_R10_OFFSET 160 /* 0xa0 */ +#define IA64_PT_REGS_R11_OFFSET 168 /* 0xa8 */ #define IA64_PT_REGS_R16_OFFSET 176 /* 0xb0 */ -#define IA64_SWITCH_STACK_B0_OFFSET 464 /* 0x1d0 */ +#define IA64_PT_REGS_R17_OFFSET 184 /* 0xb8 */ +#define IA64_PT_REGS_R18_OFFSET 192 /* 0xc0 */ +#define IA64_PT_REGS_R19_OFFSET 200 /* 0xc8 */ +#define IA64_PT_REGS_R20_OFFSET 208 /* 0xd0 */ +#define IA64_PT_REGS_R21_OFFSET 216 /* 0xd8 */ +#define IA64_PT_REGS_R22_OFFSET 224 /* 0xe0 */ +#define IA64_PT_REGS_R23_OFFSET 232 /* 0xe8 */ +#define IA64_PT_REGS_R24_OFFSET 240 /* 0xf0 */ +#define IA64_PT_REGS_R25_OFFSET 248 /* 0xf8 */ +#define IA64_PT_REGS_R26_OFFSET 256 /* 0x100 */ +#define IA64_PT_REGS_R27_OFFSET 264 /* 0x108 */ +#define IA64_PT_REGS_R28_OFFSET 272 /* 0x110 */ +#define IA64_PT_REGS_R29_OFFSET 280 /* 0x118 */ +#define IA64_PT_REGS_R30_OFFSET 288 /* 0x120 */ +#define IA64_PT_REGS_R31_OFFSET 296 /* 0x128 */ +#define IA64_PT_REGS_AR_CCV_OFFSET 304 /* 0x130 */ +#define IA64_PT_REGS_AR_FPSR_OFFSET 312 /* 0x138 */ +#define IA64_PT_REGS_B0_OFFSET 320 /* 0x140 */ +#define IA64_PT_REGS_B7_OFFSET 328 /* 0x148 */ +#define IA64_PT_REGS_F6_OFFSET 336 /* 0x150 */ +#define IA64_PT_REGS_F7_OFFSET 352 /* 0x160 */ +#define IA64_PT_REGS_F8_OFFSET 368 /* 0x170 */ +#define IA64_PT_REGS_F9_OFFSET 384 /* 0x180 */ #define IA64_SWITCH_STACK_CALLER_UNAT_OFFSET 0 /* 0x0 */ +#define IA64_SWITCH_STACK_AR_FPSR_OFFSET 8 /* 0x8 */ +#define IA64_SWITCH_STACK_F2_OFFSET 16 /* 0x10 */ +#define IA64_SWITCH_STACK_F3_OFFSET 32 /* 0x20 */ +#define IA64_SWITCH_STACK_F4_OFFSET 48 /* 0x30 */ +#define IA64_SWITCH_STACK_F5_OFFSET 64 /* 0x40 */ +#define IA64_SWITCH_STACK_F10_OFFSET 80 /* 0x50 */ +#define IA64_SWITCH_STACK_F11_OFFSET 96 /* 0x60 */ +#define IA64_SWITCH_STACK_F12_OFFSET 112 /* 0x70 */ +#define IA64_SWITCH_STACK_F13_OFFSET 128 /* 0x80 */ +#define IA64_SWITCH_STACK_F14_OFFSET 144 /* 0x90 */ +#define IA64_SWITCH_STACK_F15_OFFSET 160 /* 0xa0 */ +#define IA64_SWITCH_STACK_F16_OFFSET 176 /* 0xb0 */ +#define IA64_SWITCH_STACK_F17_OFFSET 192 /* 0xc0 */ +#define IA64_SWITCH_STACK_F18_OFFSET 208 /* 0xd0 */ +#define IA64_SWITCH_STACK_F19_OFFSET 224 /* 0xe0 */ +#define IA64_SWITCH_STACK_F20_OFFSET 240 /* 0xf0 */ +#define IA64_SWITCH_STACK_F21_OFFSET 256 /* 0x100 */ +#define IA64_SWITCH_STACK_F22_OFFSET 272 /* 0x110 */ +#define IA64_SWITCH_STACK_F23_OFFSET 288 /* 0x120 */ +#define IA64_SWITCH_STACK_F24_OFFSET 304 /* 0x130 */ +#define IA64_SWITCH_STACK_F25_OFFSET 320 /* 0x140 */ +#define IA64_SWITCH_STACK_F26_OFFSET 336 /* 0x150 */ +#define IA64_SWITCH_STACK_F27_OFFSET 352 /* 0x160 */ +#define IA64_SWITCH_STACK_F28_OFFSET 368 /* 0x170 */ +#define IA64_SWITCH_STACK_F29_OFFSET 384 /* 0x180 */ +#define IA64_SWITCH_STACK_F30_OFFSET 400 /* 0x190 */ +#define IA64_SWITCH_STACK_F31_OFFSET 416 /* 0x1a0 */ +#define IA64_SWITCH_STACK_R4_OFFSET 432 /* 0x1b0 */ +#define IA64_SWITCH_STACK_R5_OFFSET 440 /* 0x1b8 */ +#define IA64_SWITCH_STACK_R6_OFFSET 448 /* 0x1c0 */ +#define IA64_SWITCH_STACK_R7_OFFSET 456 /* 0x1c8 */ +#define IA64_SWITCH_STACK_B0_OFFSET 464 /* 0x1d0 */ +#define IA64_SWITCH_STACK_B1_OFFSET 472 /* 0x1d8 */ +#define IA64_SWITCH_STACK_B2_OFFSET 480 /* 0x1e0 */ +#define IA64_SWITCH_STACK_B3_OFFSET 488 /* 0x1e8 */ +#define IA64_SWITCH_STACK_B4_OFFSET 496 /* 0x1f0 */ +#define IA64_SWITCH_STACK_B5_OFFSET 504 /* 0x1f8 */ +#define IA64_SWITCH_STACK_AR_PFS_OFFSET 512 /* 0x200 */ +#define IA64_SWITCH_STACK_AR_LC_OFFSET 520 /* 0x208 */ +#define IA64_SWITCH_STACK_AR_UNAT_OFFSET 528 /* 0x210 */ +#define IA64_SWITCH_STACK_AR_RNAT_OFFSET 536 /* 0x218 */ +#define IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET 544 /* 0x220 */ +#define IA64_SWITCH_STACK_PR_OFFSET 464 /* 0x1d0 */ #define IA64_SIGCONTEXT_AR_BSP_OFFSET 72 /* 0x48 */ #define IA64_SIGCONTEXT_AR_RNAT_OFFSET 80 /* 0x50 */ #define IA64_SIGCONTEXT_FLAGS_OFFSET 0 /* 0x0 */ diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 648ff8a12..445e42376 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -127,6 +127,12 @@ typedef union ia64_va { #define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;}) #define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;}) +#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;}) +#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;}) + +#define REGION_SIZE REGION_NUMBER(1) +#define REGION_KERNEL 7 + #define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); *(int *)0=0; } while (0) #define PAGE_BUG(page) do { BUG(); } while (0) diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index afae33050..5169b3f82 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h @@ -4,11 +4,12 @@ /* * Processor Abstraction Layer definitions. * - * This is based on version 2.4 of the manual "Enhanced Mode Processor - * Abstraction Layer". + * This is based on Intel IA-64 Architecture Software Developer's Manual rev 1.0 + * chapter 11 IA-64 Processor Abstraction Layer * * Copyright (C) 1998-2000 Hewlett-Packard Co * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com> @@ -16,6 +17,8 @@ * 99/10/01 davidm Make sure we pass zero for reserved parameters. * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6. * 00/03/23 cfleck Modified processor min-state save area to match updated PAL & SAL info + * 00/05/24 eranian Updated to latest PAL spec, fix structures bugs, added + * 00/05/25 eranian Support for stack calls, and statis physical calls */ /* @@ -127,8 +130,8 @@ typedef struct pal_freq_ratio { typedef union pal_cache_config_info_1_s { struct { u64 u : 1, /* 0 Unified cache ? */ - reserved : 5, /* 7-3 Reserved */ at : 2, /* 2-1 Cache mem attr*/ + reserved : 5, /* 7-3 Reserved */ associativity : 8, /* 16-8 Associativity*/ line_size : 8, /* 23-17 Line size */ stride : 8, /* 31-24 Stride */ @@ -164,8 +167,8 @@ typedef struct pal_cache_config_info_s { u64 pcci_reserved; } pal_cache_config_info_t; -#define pcci_ld_hint pcci_info_1.pcci1.load_hints -#define pcci_st_hint pcci_info_1.pcci1_bits.store_hints +#define pcci_ld_hints pcci_info_1.pcci1_bits.load_hints +#define pcci_st_hints pcci_info_1.pcci1_bits.store_hints #define pcci_ld_latency pcci_info_1.pcci1_bits.load_latency #define pcci_st_latency pcci_info_1.pcci1_bits.store_latency #define pcci_stride pcci_info_1.pcci1_bits.stride @@ -640,23 +643,13 @@ struct ia64_pal_retval { * (generally 0) MUST be passed. Reserved parameters are not optional * parameters. */ -#ifdef __GCC_MULTIREG_RETVALS__ - extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64); - /* - * If multi-register return values are returned according to the - * ia-64 calling convention, we can call ia64_pal_call_static - * directly. - */ -# define PAL_CALL(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_static(a0,a1, a2, a3) -#else - extern void ia64_pal_call_static (struct ia64_pal_retval *, u64, u64, u64, u64); - /* - * If multi-register return values are returned through an aggregate - * allocated in the caller, we need to use the stub implemented in - * sal-stub.S. - */ -# define PAL_CALL(iprv,a0,a1,a2,a3) ia64_pal_call_static(&iprv, a0, a1, a2, a3) -#endif +extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64); +extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64); +extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64); + +#define PAL_CALL(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_static(a0, a1, a2, a3) +#define PAL_CALL_STK(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_stacked(a0, a1, a2, a3) +#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_phys_static(a0, a1, a2, a3) typedef int (*ia64_pal_handler) (u64, ...); extern ia64_pal_handler ia64_pal; @@ -716,7 +709,7 @@ ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail, pal_bus_features_u_t *features_control) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0); + PAL_CALL_PHYS(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0); if (features_avail) features_avail->pal_bus_features_val = iprv.v0; if (features_status) @@ -725,15 +718,54 @@ ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail, features_control->pal_bus_features_val = iprv.v2; return iprv.status; } + /* Enables/disables specific processor bus features */ extern inline s64 ia64_pal_bus_set_features (pal_bus_features_u_t feature_select) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_BUS_SET_FEATURES, feature_select.pal_bus_features_val, 0, 0); + PAL_CALL_PHYS(iprv, PAL_BUS_SET_FEATURES, feature_select.pal_bus_features_val, 0, 0); return iprv.status; } +/* Get detailed cache information */ +extern inline s64 +ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, pal_cache_config_info_t *conf) +{ + struct ia64_pal_retval iprv; + + PAL_CALL(iprv, PAL_CACHE_INFO, cache_level, cache_type, 0); + + if (iprv.status == 0) { + conf->pcci_status = iprv.status; + conf->pcci_info_1.pcci1_data = iprv.v0; + conf->pcci_info_2.pcci2_data = iprv.v1; + conf->pcci_reserved = iprv.v2; + } + return iprv.status; + +} + +/* Get detailed cche protection information */ +extern inline s64 +ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, pal_cache_protection_info_t *prot) +{ + struct ia64_pal_retval iprv; + + PAL_CALL(iprv, PAL_CACHE_PROT_INFO, cache_level, cache_type, 0); + + if (iprv.status == 0) { + prot->pcpi_status = iprv.status; + prot->pcp_info[0].pcpi_data = iprv.v0 & 0xffffffff; + prot->pcp_info[1].pcpi_data = iprv.v0 >> 32; + prot->pcp_info[2].pcpi_data = iprv.v1 & 0xffffffff; + prot->pcp_info[3].pcpi_data = iprv.v1 >> 32; + prot->pcp_info[4].pcpi_data = iprv.v2 & 0xffffffff; + prot->pcp_info[5].pcpi_data = iprv.v2 >> 32; + } + return iprv.status; +} + /* * Flush the processor instruction or data caches. *PROGRESS must be * initialized to zero before calling this for the first time.. @@ -909,16 +941,19 @@ typedef union pal_power_mgmt_info_u { struct { u64 exit_latency : 16, entry_latency : 16, - power_consumption : 32; + power_consumption : 28, + im : 1, + co : 1, + reserved : 2; } pal_power_mgmt_info_s; } pal_power_mgmt_info_u_t; /* Return information about processor's optional power management capabilities. */ extern inline s64 ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf) -{ +{ struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0); + PAL_CALL_STK(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0); return iprv.status; } @@ -1027,7 +1062,7 @@ ia64_pal_mem_attrib (u64 *mem_attrib) struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_MEM_ATTRIB, 0, 0, 0); if (mem_attrib) - *mem_attrib = iprv.v0; + *mem_attrib = iprv.v0 & 0xff; return iprv.status; } @@ -1090,28 +1125,32 @@ ia64_pal_pmi_entrypoint (u64 sal_pmi_entry_addr) return iprv.status; } -#ifdef TBD struct pal_features_s; /* Provide information about configurable processor features */ extern inline s64 -ia64_pal_proc_get_features (struct pal_features_s *features_avail, - struct pal_features_s *features_status, - struct pal_features_s *features_control) +ia64_pal_proc_get_features (u64 *features_avail, + u64 *features_status, + u64 *features_control) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_PROC_GET_FEATURES, 0, 0, 0); + PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, 0, 0); + if (iprv.status == 0) { + *features_avail = iprv.v0; + *features_status = iprv.v1; + *features_control = iprv.v2; + } return iprv.status; } + /* Enable/disable processor dependent features */ extern inline s64 -ia64_pal_proc_set_features (feature_select) +ia64_pal_proc_set_features (u64 feature_select) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0); + PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0); return iprv.status; } -#endif /* * Put everything in a struct so we avoid the global offset table whenever * possible. @@ -1220,12 +1259,16 @@ typedef union pal_version_u { /* Return PAL version information */ extern inline s64 -ia64_pal_version (pal_version_u_t *pal_version) +ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version) { struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_VERSION, 0, 0, 0); - if (pal_version) - pal_version->pal_version_val = iprv.v0; + if (pal_min_version) + pal_min_version->pal_version_val = iprv.v0; + + if (pal_cur_version) + pal_cur_version->pal_version_val = iprv.v1; + return iprv.status; } @@ -1242,7 +1285,14 @@ typedef union pal_tc_info_u { } pal_tc_info_s; } pal_tc_info_u_t; - +#define tc_reduce_tr pal_tc_info_s.reduce_tr +#define tc_unified pal_tc_info_s.unified +#define tc_pf pal_tc_info_s.pf +#define tc_num_entries pal_tc_info_s.num_entries +#define tc_associativity pal_tc_info_s.associativity +#define tc_num_sets pal_tc_info_s.num_sets + + /* Return information about the virtual memory characteristics of the processor * implementation. */ @@ -1278,7 +1328,7 @@ typedef union pal_vm_info_1_u { struct { u64 vw : 1, phys_add_size : 7, - key_size : 16, + key_size : 8, max_pkr : 8, hash_tag_id : 8, max_dtr_entry : 8, diff --git a/include/asm-ia64/pci.h b/include/asm-ia64/pci.h index bbabe63e8..0c40b0e6b 100644 --- a/include/asm-ia64/pci.h +++ b/include/asm-ia64/pci.h @@ -16,6 +16,11 @@ extern inline void pcibios_set_master(struct pci_dev *dev) /* No special bus mastering setup handling */ } +extern inline void pcibios_penalize_isa_irq(int irq) +{ + /* We don't do dynamic PCI IRQ allocation */ +} + /* * Dynamic DMA mapping API. * IA-64 has everything mapped statically. diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index a7f5ceb56..ce0dea3fe 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -12,27 +12,18 @@ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com> */ -#include <linux/config.h> - #include <asm/mman.h> #include <asm/page.h> +#include <asm/processor.h> #include <asm/types.h> -/* Size of virtuaql and physical address spaces: */ -#ifdef CONFIG_ITANIUM -# define IA64_IMPL_VA_MSB 50 -# define IA64_PHYS_BITS 44 /* Itanium PRM defines 44 bits of ppn */ -#else -# define IA64_IMPL_VA_MSB 60 /* maximum value (bits 61-63 are region bits) */ -# define IA64_PHYS_BITS 50 /* EAS2.6 allows up to 50 bits of ppn */ -#endif -#define IA64_PHYS_SIZE (__IA64_UL(1) << IA64_PHYS_BITS) +#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */ /* Is ADDR a valid kernel address? */ #define kern_addr_valid(addr) ((addr) >= TASK_SIZE) /* Is ADDR a valid physical address? */ -#define phys_addr_valid(addr) ((addr) < IA64_PHYS_SIZE) +#define phys_addr_valid(addr) (((addr) & my_cpu_data.unimpl_pa_mask) == 0) /* * First, define the various bits in a PTE. Note that the PTE format @@ -63,7 +54,7 @@ #define _PAGE_AR_SHIFT 9 #define _PAGE_A (1 << 5) /* page accessed bit */ #define _PAGE_D (1 << 6) /* page dirty bit */ -#define _PAGE_PPN_MASK ((IA64_PHYS_SIZE - 1) & ~0xfffUL) +#define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL) #define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */ #define _PAGE_PROTNONE (__IA64_UL(1) << 63) @@ -120,7 +111,6 @@ #include <asm/bitops.h> #include <asm/mmu_context.h> -#include <asm/processor.h> #include <asm/system.h> /* @@ -133,7 +123,7 @@ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) -#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RW) +#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) /* * Next come the mappings that determine how mmap() protection bits diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index d702b7f0c..fa3721bde 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -10,6 +10,7 @@ * * 11/24/98 S.Eranian added ia64_set_iva() * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API + * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support */ #include <linux/config.h> @@ -237,6 +238,8 @@ struct cpuinfo_ia64 { __u64 proc_freq; /* frequency of processor */ __u64 cyc_per_usec; /* itc_freq/1000000 */ __u64 usec_per_cyc; /* 2^IA64_USEC_PER_CYC_SHIFT*1000000/itc_freq */ + __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */ + __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */ #ifdef CONFIG_SMP __u64 loops_per_sec; __u64 ipi_count; @@ -264,7 +267,8 @@ typedef struct { #define SET_UNALIGN_CTL(task,value) \ ({ \ - (task)->thread.flags |= ((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK; \ + (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \ + | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \ 0; \ }) #define GET_UNALIGN_CTL(task,addr) \ @@ -288,10 +292,13 @@ struct thread_struct { __u64 fcr; /* IA32 floating pt control reg */ __u64 fir; /* IA32 fp except. instr. reg */ __u64 fdr; /* IA32 fp except. data reg */ + __u64 csd; /* IA32 code selector descriptor */ + __u64 ssd; /* IA32 stack selector descriptor */ + __u64 tssd; /* IA32 TSS descriptor */ union { __u64 sigmask; /* aligned mask for sigsuspend scall */ } un; -# define INIT_THREAD_IA32 , 0, 0, 0, 0, 0, {0} +# define INIT_THREAD_IA32 , 0, 0, 0x17800000037fULL, 0, 0, 0, 0, 0, {0} #else # define INIT_THREAD_IA32 #endif /* CONFIG_IA32_SUPPORT */ @@ -318,6 +325,7 @@ struct thread_struct { set_fs(USER_DS); \ ia64_psr(regs)->cpl = 3; /* set user mode */ \ ia64_psr(regs)->ri = 0; /* clear return slot number */ \ + ia64_psr(regs)->is = 0; /* IA-64 instruction set */ \ regs->cr_iip = new_ip; \ regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ @@ -436,6 +444,14 @@ ia64_srlz_d (void) __asm__ __volatile__ (";; srlz.d" ::: "memory"); } +extern inline __u64 +ia64_get_rr (__u64 reg_bits) +{ + __u64 r; + __asm__ __volatile__ ("mov %0=rr[%1]" : "=r"(r) : "r"(reg_bits) : "memory"); + return r; +} + extern inline void ia64_set_rr (__u64 reg_bits, __u64 rr_val) { @@ -645,14 +661,17 @@ ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat) extern inline unsigned long thread_saved_pc (struct thread_struct *t) { - struct ia64_frame_info info; + struct unw_frame_info info; + unsigned long ip; + /* XXX ouch: Linus, please pass the task pointer to thread_saved_pc() instead! */ struct task_struct *p = (void *) ((unsigned long) t - IA64_TASK_THREAD_OFFSET); - ia64_unwind_init_from_blocked_task(&info, p); - if (ia64_unwind_to_previous_frame(&info) < 0) + unw_init_from_blocked_task(&info, p); + if (unw_unwind(&info) < 0) return 0; - return ia64_unwind_get_ip(&info); + unw_get_ip(&info, &ip); + return ip; } /* diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h index caae43a3d..b71acee5f 100644 --- a/include/asm-ia64/ptrace.h +++ b/include/asm-ia64/ptrace.h @@ -136,8 +136,8 @@ struct pt_regs { unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ - unsigned long ar_ccv; /* compare/exchange value */ - unsigned long ar_fpsr; /* floating point status*/ + unsigned long ar_ccv; /* compare/exchange value (scratch) */ + unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long b0; /* return pointer (bp) */ unsigned long b7; /* scratch */ @@ -219,11 +219,19 @@ struct switch_stack { extern void show_regs (struct pt_regs *); extern long ia64_peek (struct pt_regs *, struct task_struct *, unsigned long addr, long *val); extern long ia64_poke (struct pt_regs *, struct task_struct *, unsigned long addr, long val); + extern void ia64_sync_fph (struct task_struct *t); +#ifdef CONFIG_IA64_NEW_UNWIND + /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */ + extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat); + /* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */ + extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat); +#else /* get nat bits for r1-r31 such that bit N==1 iff rN is a NaT */ extern long ia64_get_nat_bits (struct pt_regs *pt, struct switch_stack *sw); /* put nat bits for r1-r31 such that rN is a NaT iff bit N==1 */ extern void ia64_put_nat_bits (struct pt_regs *pt, struct switch_stack *sw, unsigned long nat); +#endif extern void ia64_increment_ip (struct pt_regs *pt); extern void ia64_decrement_ip (struct pt_regs *pt); diff --git a/include/asm-ia64/ptrace_offsets.h b/include/asm-ia64/ptrace_offsets.h index fbbe9bff4..b32b6c89d 100644 --- a/include/asm-ia64/ptrace_offsets.h +++ b/include/asm-ia64/ptrace_offsets.h @@ -118,8 +118,8 @@ #define PT_F126 0x05e0 #define PT_F127 0x05f0 /* switch stack: */ -#define PT_CALLER_UNAT 0x0600 -#define PT_KERNEL_FPSR 0x0608 +#define PT_NAT_BITS 0x0600 + #define PT_F2 0x0610 #define PT_F3 0x0620 #define PT_F4 0x0630 @@ -150,23 +150,19 @@ #define PT_R5 0x07b8 #define PT_R6 0x07c0 #define PT_R7 0x07c8 -#define PT_K_B0 0x07d0 + #define PT_B1 0x07d8 #define PT_B2 0x07e0 #define PT_B3 0x07e8 #define PT_B4 0x07f0 #define PT_B5 0x07f8 -#define PT_K_AR_PFS 0x0800 + #define PT_AR_LC 0x0808 -#define PT_K_AR_UNAT 0x0810 -#define PT_K_AR_RNAT 0x0818 -#define PT_K_AR_BSPSTORE 0x0820 -#define PT_K_PR 0x0828 + /* pt_regs */ #define PT_CR_IPSR 0x0830 #define PT_CR_IIP 0x0838 #define PT_CFM 0x0840 -#define PT_CR_IFS PT_CFM /* Use of PT_CR_IFS is deprecated */ #define PT_AR_UNAT 0x0848 #define PT_AR_PFS 0x0850 #define PT_AR_RSC 0x0858 diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index 5fadec55f..06096644b 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h @@ -23,17 +23,7 @@ extern spinlock_t sal_lock; -#ifdef __GCC_MULTIREG_RETVALS__ - /* If multi-register return values are returned according to the - ia-64 calling convention, we can call ia64_sal directly. */ -# define __SAL_CALL(result,args...) result = (*ia64_sal)(args) -#else - /* If multi-register return values are returned through an aggregate - allocated in the caller, we need to use the stub implemented in - sal-stub.S. */ - extern struct ia64_sal_retval ia64_sal_stub (u64 index, ...); -# define __SAL_CALL(result,args...) result = ia64_sal_stub(args) -#endif +#define __SAL_CALL(result,args...) result = (*ia64_sal)(args) #ifdef CONFIG_SMP # define SAL_CALL(result,args...) do { \ @@ -494,7 +484,19 @@ extern inline s64 ia64_sal_pci_config_read (u64 pci_config_addr, u64 size, u64 *value) { struct ia64_sal_retval isrv; +#ifdef CONFIG_ITANIUM_A1_SPECIFIC + extern spinlock_t ivr_read_lock; + unsigned long flags; + + /* + * Avoid PCI configuration read/write overwrite -- A0 Interrupt loss workaround + */ + spin_lock_irqsave(&ivr_read_lock, flags); +#endif SAL_CALL(isrv, SAL_PCI_CONFIG_READ, pci_config_addr, size); +#ifdef CONFIG_ITANIUM_A1_SPECIFIC + spin_unlock_irqrestore(&ivr_read_lock, flags); +#endif if (value) *value = isrv.v0; return isrv.status; @@ -505,7 +507,7 @@ extern inline s64 ia64_sal_pci_config_write (u64 pci_config_addr, u64 size, u64 value) { struct ia64_sal_retval isrv; -#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) && !defined(SAPIC_FIXED) +#ifdef CONFIG_ITANIUM_A1_SPECIFIC extern spinlock_t ivr_read_lock; unsigned long flags; @@ -515,7 +517,7 @@ ia64_sal_pci_config_write (u64 pci_config_addr, u64 size, u64 value) spin_lock_irqsave(&ivr_read_lock, flags); #endif SAL_CALL(isrv, SAL_PCI_CONFIG_WRITE, pci_config_addr, size, value); -#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) && !defined(SAPIC_FIXED) +#ifdef CONFIG_ITANIUM_A1_SPECIFIC spin_unlock_irqrestore(&ivr_read_lock, flags); #endif return isrv.status; diff --git a/include/asm-ia64/siginfo.h b/include/asm-ia64/siginfo.h index f997b468d..7222fb285 100644 --- a/include/asm-ia64/siginfo.h +++ b/include/asm-ia64/siginfo.h @@ -56,6 +56,8 @@ typedef struct siginfo { struct { void *_addr; /* faulting insn/memory ref. */ int _imm; /* immediate value for "break" */ + int _pad0; + unsigned long _isr; /* isr */ } _sigfault; /* SIGPOLL */ @@ -79,6 +81,7 @@ typedef struct siginfo { #define si_ptr _sifields._rt._sigval.sival_ptr #define si_addr _sifields._sigfault._addr #define si_imm _sifields._sigfault._imm /* as per UNIX SysV ABI spec */ +#define si_isr _sifields._sigfault._isr /* valid if si_code==FPE_FLTxxx */ #define si_band _sifields._sigpoll._band #define si_fd _sifields._sigpoll._fd diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index 97a9511e8..fedd8f8c6 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h @@ -9,6 +9,8 @@ * This file is used for SMP configurations only. */ +#include <linux/kernel.h> + #include <asm/system.h> #include <asm/bitops.h> #include <asm/atomic.h> @@ -40,7 +42,7 @@ typedef struct { "cmp4.eq p0,p7 = r0, r2\n" \ "(p7) br.cond.spnt.few 1b\n" \ ";;\n" \ - :: "m" __atomic_fool_gcc((x)) : "r2", "r29") + :: "m" __atomic_fool_gcc((x)) : "r2", "r29", "memory") #else #define spin_lock(x) \ @@ -55,22 +57,12 @@ typedef struct { #define spin_is_locked(x) ((x)->lock != 0) -#define spin_unlock(x) (((spinlock_t *) x)->lock = 0) +#define spin_unlock(x) ({((spinlock_t *) x)->lock = 0; barrier();}) /* Streamlined !test_and_set_bit(0, (x)) */ -#define spin_trylock(x) \ -({ \ - spinlock_t *__x = (x); \ - __u32 old; \ - \ - do { \ - old = __x->lock; \ - } while (cmpxchg_acq(&__x->lock, old, 1) != old); \ - old == 0; \ -}) - -#define spin_unlock_wait(x) \ - ({ do { barrier(); } while(((volatile spinlock_t *)x)->lock); }) +#define spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) + +#define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); }) typedef struct { volatile int read_counter:31; @@ -78,45 +70,49 @@ typedef struct { } rwlock_t; #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } -#define read_lock(rw) \ -do { \ - int tmp = 0; \ - __asm__ __volatile__ ("1:\tfetchadd4.acq %0 = %1, 1\n" \ - ";;\n" \ - "tbit.nz p6,p0 = %0, 31\n" \ - "(p6) br.cond.sptk.few 2f\n" \ - ".section .text.lock,\"ax\"\n" \ - "2:\tfetchadd4.rel %0 = %1, -1\n" \ - ";;\n" \ - "3:\tld4.acq %0 = %1\n" \ - ";;\n" \ - "tbit.nz p6,p0 = %0, 31\n" \ - "(p6) br.cond.sptk.few 3b\n" \ - "br.cond.sptk.few 1b\n" \ - ";;\n" \ - ".previous\n": "=r" (tmp), "=m" (__atomic_fool_gcc(rw))); \ +#define read_lock(rw) \ +do { \ + int tmp = 0; \ + __asm__ __volatile__ ("1:\tfetchadd4.acq %0 = %1, 1\n" \ + ";;\n" \ + "tbit.nz p6,p0 = %0, 31\n" \ + "(p6) br.cond.sptk.few 2f\n" \ + ".section .text.lock,\"ax\"\n" \ + "2:\tfetchadd4.rel %0 = %1, -1\n" \ + ";;\n" \ + "3:\tld4.acq %0 = %1\n" \ + ";;\n" \ + "tbit.nz p6,p0 = %0, 31\n" \ + "(p6) br.cond.sptk.few 3b\n" \ + "br.cond.sptk.few 1b\n" \ + ";;\n" \ + ".previous\n" \ + : "=r" (tmp), "=m" (__atomic_fool_gcc(rw)) \ + :: "memory"); \ } while(0) -#define read_unlock(rw) \ -do { \ - int tmp = 0; \ - __asm__ __volatile__ ("fetchadd4.rel %0 = %1, -1\n" \ - : "=r" (tmp) : "m" (__atomic_fool_gcc(rw))); \ +#define read_unlock(rw) \ +do { \ + int tmp = 0; \ + __asm__ __volatile__ ("fetchadd4.rel %0 = %1, -1\n" \ + : "=r" (tmp) \ + : "m" (__atomic_fool_gcc(rw)) \ + : "memory"); \ } while(0) #define write_lock(rw) \ -while(1) { \ +do { \ do { \ - } while (!test_and_set_bit(31, (rw))); \ - if ((rw)->read_counter) { \ - clear_bit(31, (rw)); \ - while ((rw)->read_counter) \ - ; \ - } else { \ - break; \ - } \ -} + while ((rw)->write_lock); \ + } while (test_and_set_bit(31, (rw))); \ + while ((rw)->read_counter); \ + barrier(); \ +} while (0) -#define write_unlock(x) (clear_bit(31, (x))) +/* + * clear_bit() has "acq" semantics; we're really need "rel" semantics, + * but for simplicity, we simply do a fence for now... + */ +#define write_unlock(x) ({clear_bit(31, (x)); mb();}) #endif /* _ASM_IA64_SPINLOCK_H */ diff --git a/include/asm-ia64/stat.h b/include/asm-ia64/stat.h index c261a337e..b9dd64bd5 100644 --- a/include/asm-ia64/stat.h +++ b/include/asm-ia64/stat.h @@ -7,6 +7,27 @@ */ struct stat { + unsigned long st_dev; + unsigned long st_ino; + unsigned long st_nlink; + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int __pad0; + unsigned long st_rdev; + unsigned long st_size; + unsigned long st_atime; + unsigned long __reserved0; /* reserved for atime.nanoseconds */ + unsigned long st_mtime; + unsigned long __reserved1; /* reserved for mtime.nanoseconds */ + unsigned long st_ctime; + unsigned long __reserved2; /* reserved for ctime.nanoseconds */ + unsigned long st_blksize; + long st_blocks; + unsigned long __unused[3]; +}; + +struct ia64_oldstat { unsigned int st_dev; unsigned int st_ino; unsigned int st_mode; diff --git a/include/asm-ia64/string.h b/include/asm-ia64/string.h index 09a99daf4..2b7292067 100644 --- a/include/asm-ia64/string.h +++ b/include/asm-ia64/string.h @@ -12,4 +12,7 @@ #define __HAVE_ARCH_STRLEN 1 /* see arch/ia64/lib/strlen.S */ #define __HAVE_ARCH_MEMSET 1 /* see arch/ia64/lib/memset.S */ +extern __kernel_size_t strlen (const char *); +extern void *memset (void *,int,__kernel_size_t); + #endif /* _ASM_IA64_STRING_H */ diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index a7ba6daf7..689be6df6 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -33,9 +33,9 @@ struct pci_vector_struct { __u16 bus; /* PCI Bus number */ - __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ - __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ - __u8 irq; /* IRQ assigned */ + __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ + __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ + __u8 irq; /* IRQ assigned */ }; extern struct ia64_boot_param { @@ -54,6 +54,8 @@ extern struct ia64_boot_param { __u16 num_pci_vectors; /* number of ACPI derived PCI IRQ's*/ __u64 pci_vectors; /* physical address of PCI data (pci_vector_struct)*/ __u64 fpswa; /* physical address of the the fpswa interface */ + __u64 initrd_start; + __u64 initrd_size; } ia64_boot_param; extern inline void @@ -135,7 +137,7 @@ do { \ do { \ unsigned long ip, old_psr, psr = (x); \ \ - __asm__ __volatile__ ("mov %0=psr; mov psr.l=%1;; srlz.d" \ + __asm__ __volatile__ (";;mov %0=psr; mov psr.l=%1;; srlz.d" \ : "=&r" (old_psr) : "r" (psr) : "memory"); \ if ((old_psr & (1UL << 14)) && !(psr & (1UL << 14))) { \ __asm__ ("mov %0=ip" : "=r"(ip)); \ @@ -149,7 +151,7 @@ do { \ : "=r" (x) :: "memory") # define local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory") /* (potentially) setting psr.i requires data serialization: */ -# define local_irq_restore(x) __asm__ __volatile__ ("mov psr.l=%0;; srlz.d" \ +# define local_irq_restore(x) __asm__ __volatile__ (";; mov psr.l=%0;; srlz.d" \ :: "r" (x) : "memory") #endif /* !CONFIG_IA64_DEBUG_IRQ */ @@ -394,32 +396,13 @@ struct __xchg_dummy { unsigned long a[100]; }; #ifdef __KERNEL__ -extern void ia64_save_debug_regs (unsigned long *save_area); -extern void ia64_load_debug_regs (unsigned long *save_area); - #define prepare_to_switch() do { } while(0) #ifdef CONFIG_IA32_SUPPORT # define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0) -# define IA32_STATE(prev,next) \ - if (IS_IA32_PROCESS(ia64_task_regs(prev))) { \ - __asm__ __volatile__("mov %0=ar.eflag":"=r"((prev)->thread.eflag)); \ - __asm__ __volatile__("mov %0=ar.fsr":"=r"((prev)->thread.fsr)); \ - __asm__ __volatile__("mov %0=ar.fcr":"=r"((prev)->thread.fcr)); \ - __asm__ __volatile__("mov %0=ar.fir":"=r"((prev)->thread.fir)); \ - __asm__ __volatile__("mov %0=ar.fdr":"=r"((prev)->thread.fdr)); \ - } \ - if (IS_IA32_PROCESS(ia64_task_regs(next))) { \ - __asm__ __volatile__("mov ar.eflag=%0"::"r"((next)->thread.eflag)); \ - __asm__ __volatile__("mov ar.fsr=%0"::"r"((next)->thread.fsr)); \ - __asm__ __volatile__("mov ar.fcr=%0"::"r"((next)->thread.fcr)); \ - __asm__ __volatile__("mov ar.fir=%0"::"r"((next)->thread.fir)); \ - __asm__ __volatile__("mov ar.fdr=%0"::"r"((next)->thread.fdr)); \ - } -#else /* !CONFIG_IA32_SUPPORT */ -# define IA32_STATE(prev,next) +#else # define IS_IA32_PROCESS(regs) 0 -#endif /* CONFIG_IA32_SUPPORT */ +#endif /* * Context switch from one thread to another. If the two threads have @@ -432,15 +415,18 @@ extern void ia64_load_debug_regs (unsigned long *save_area); * ia64_ret_from_syscall_clear_r8. */ extern struct task_struct *ia64_switch_to (void *next_task); + +extern void ia64_save_extra (struct task_struct *task); +extern void ia64_load_extra (struct task_struct *task); + #define __switch_to(prev,next,last) do { \ + if (((prev)->thread.flags & IA64_THREAD_DBG_VALID) \ + || IS_IA32_PROCESS(ia64_task_regs(prev))) \ + ia64_save_extra(prev); \ + if (((next)->thread.flags & IA64_THREAD_DBG_VALID) \ + || IS_IA32_PROCESS(ia64_task_regs(next))) \ + ia64_load_extra(next); \ ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next)); \ - if ((prev)->thread.flags & IA64_THREAD_DBG_VALID) { \ - ia64_save_debug_regs(&(prev)->thread.dbr[0]); \ - } \ - if ((next)->thread.flags & IA64_THREAD_DBG_VALID) { \ - ia64_load_debug_regs(&(next)->thread.dbr[0]); \ - } \ - IA32_STATE(prev,next); \ (last) = ia64_switch_to((next)); \ } while (0) diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index 5be533112..41ffaaf2d 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h @@ -109,9 +109,9 @@ #define __NR_syslog 1117 #define __NR_setitimer 1118 #define __NR_getitimer 1119 -#define __NR_stat 1120 -#define __NR_lstat 1121 -#define __NR_fstat 1122 +#define __NR_old_stat 1120 +#define __NR_old_lstat 1121 +#define __NR_old_fstat 1122 #define __NR_vhangup 1123 #define __NR_lchown 1124 #define __NR_vm86 1125 @@ -199,6 +199,9 @@ #define __NR_sys_pivot_root 1207 #define __NR_mincore 1208 #define __NR_madvise 1209 +#define __NR_stat 1210 +#define __NR_lstat 1211 +#define __NR_fstat 1212 #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) @@ -269,7 +272,7 @@ type \ name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ { \ return __ia64_syscall((long) arg1, (long) arg2, (long) arg3, \ - (long) arg4, (long), __NR_##name); \ + (long) arg4, (long) arg5, __NR_##name); \ } #ifdef __KERNEL_SYSCALLS__ diff --git a/include/asm-ia64/unwind.h b/include/asm-ia64/unwind.h index 038edb798..60bb46cf9 100644 --- a/include/asm-ia64/unwind.h +++ b/include/asm-ia64/unwind.h @@ -2,8 +2,8 @@ #define _ASM_IA64_UNWIND_H /* - * Copyright (C) 1999 Hewlett-Packard Co - * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1999-2000 Hewlett-Packard Co + * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com> * * A simple API for unwinding kernel stacks. This is used for * debugging and error reporting purposes. The kernel doesn't need @@ -16,27 +16,72 @@ struct task_struct; /* forward declaration */ struct switch_stack; /* forward declaration */ +enum unw_application_register { + UNW_AR_BSP, + UNW_AR_BSPSTORE, + UNW_AR_PFS, + UNW_AR_RNAT, + UNW_AR_UNAT, + UNW_AR_LC, + UNW_AR_EC, + UNW_AR_FPSR, + UNW_AR_RSC, + UNW_AR_CCV +}; + /* * The following declarations are private to the unwind * implementation: */ -struct ia64_stack { - unsigned long *limit; - unsigned long *top; +struct unw_stack { + unsigned long limit; + unsigned long top; }; +#define UNW_FLAG_INTERRUPT_FRAME (1UL << 0) + /* * No user of this module should every access this structure directly * as it is subject to change. It is declared here solely so we can * use automatic variables. */ -struct ia64_frame_info { - struct ia64_stack regstk; - unsigned long *bsp; - unsigned long top_rnat; /* RSE NaT collection at top of backing store */ - unsigned long cfm; +struct unw_frame_info { + struct unw_stack regstk; + struct unw_stack memstk; + unsigned int flags; + short hint; + short prev_script; + unsigned long bsp; + unsigned long sp; /* stack pointer */ + unsigned long psp; /* previous sp */ unsigned long ip; /* instruction pointer */ + unsigned long pr_val; /* current predicates */ + unsigned long *cfm; + + struct task_struct *task; + struct switch_stack *sw; + + /* preserved state: */ + unsigned long *pbsp; /* previous bsp */ + unsigned long *bspstore; + unsigned long *pfs; + unsigned long *rnat; + unsigned long *rp; + unsigned long *pri_unat; + unsigned long *unat; + unsigned long *pr; + unsigned long *lc; + unsigned long *fpsr; + struct unw_ireg { + unsigned long *loc; + struct unw_ireg_nat { + int type : 3; /* enum unw_nat_type */ + signed int off; /* NaT word is at loc+nat.off */ + } nat; + } r4, r5, r6, r7; + unsigned long *b1, *b2, *b3, *b4, *b5; + struct ia64_fpreg *f2, *f3, *f4, *f5, *fr[16]; }; /* @@ -44,10 +89,22 @@ struct ia64_frame_info { */ /* + * Initialize unwind support. + */ +extern void unw_init (void); + +extern void *unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp, + void *table_start, void *table_end); + +extern void unw_remove_unwind_table (void *handle); + +/* * Prepare to unwind blocked task t. */ -extern void ia64_unwind_init_from_blocked_task (struct ia64_frame_info *info, - struct task_struct *t); +extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t); + +extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, + struct switch_stack *sw); /* * Prepare to unwind the current task. For this to work, the kernel @@ -63,15 +120,82 @@ extern void ia64_unwind_init_from_blocked_task (struct ia64_frame_info *info, * | struct switch_stack | * +---------------------+ */ -extern void ia64_unwind_init_from_current (struct ia64_frame_info *info, struct pt_regs *regs); +extern void unw_init_from_current (struct unw_frame_info *info, struct pt_regs *regs); + +/* + * Prepare to unwind the currently running thread. + */ +extern void unw_init_running (void (*callback)(struct unw_frame_info *info, void *arg), void *arg); /* * Unwind to previous to frame. Returns 0 if successful, negative * number in case of an error. */ -extern int ia64_unwind_to_previous_frame (struct ia64_frame_info *info); +extern int unw_unwind (struct unw_frame_info *info); + +/* + * Unwind until the return pointer is in user-land (or until an error + * occurs). Returns 0 if successful, negative number in case of + * error. + */ +extern int unw_unwind_to_user (struct unw_frame_info *info); + +#define unw_get_ip(info,vp) ({*(vp) = (info)->ip; 0;}) +#define unw_get_sp(info,vp) ({*(vp) = (unsigned long) (info)->sp; 0;}) +#define unw_get_psp(info,vp) ({*(vp) = (unsigned long) (info)->psp; 0;}) +#define unw_get_bsp(info,vp) ({*(vp) = (unsigned long) (info)->bsp; 0;}) +#define unw_get_cfm(info,vp) ({*(vp) = *(info)->cfm; 0;}) +#define unw_set_cfm(info,val) ({*(info)->cfm = (val); 0;}) + +static inline int +unw_get_rp (struct unw_frame_info *info, unsigned long *val) +{ + if (!info->rp) + return -1; + *val = *info->rp; + return 0; +} + +extern int unw_access_gr (struct unw_frame_info *, int, unsigned long *, char *, int); +extern int unw_access_br (struct unw_frame_info *, int, unsigned long *, int); +extern int unw_access_fr (struct unw_frame_info *, int, struct ia64_fpreg *, int); +extern int unw_access_ar (struct unw_frame_info *, int, unsigned long *, int); +extern int unw_access_pr (struct unw_frame_info *, unsigned long *, int); + +static inline int +unw_set_gr (struct unw_frame_info *i, int n, unsigned long v, char nat) +{ + return unw_access_gr(i, n, &v, &nat, 1); +} + +static inline int +unw_set_br (struct unw_frame_info *i, int n, unsigned long v) +{ + return unw_access_br(i, n, &v, 1); +} + +static inline int +unw_set_fr (struct unw_frame_info *i, int n, struct ia64_fpreg v) +{ + return unw_access_fr(i, n, &v, 1); +} + +static inline int +unw_set_ar (struct unw_frame_info *i, int n, unsigned long v) +{ + return unw_access_ar(i, n, &v, 1); +} + +static inline int +unw_set_pr (struct unw_frame_info *i, unsigned long v) +{ + return unw_access_pr(i, &v, 1); +} -#define ia64_unwind_get_ip(info) ((info)->ip) -#define ia64_unwind_get_bsp(info) ((unsigned long) (info)->bsp) +#define unw_get_gr(i,n,v,nat) unw_access_gr(i,n,v,nat,0) +#define unw_get_br(i,n,v) unw_access_br(i,n,v,0) +#define unw_get_fr(i,n,v) unw_access_fr(i,n,v,0) +#define unw_get_ar(i,n,v) unw_access_ar(i,n,v,0) +#define unw_get_pr(i,v) unw_access_pr(i,v,0) -#endif /* _ASM_IA64_UNWIND_H */ +#endif /* _ASM_UNWIND_H */ |