diff options
author | Michael Ellerman <michael@ellerman.id.au> | 2005-06-30 15:07:09 +1000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-06-30 15:07:09 +1000 |
commit | bea248fb30c3122ece8c34798527fac431c1d7b0 (patch) | |
tree | 9158d7a089312f92abcb6c8d5a8d942d543be24b /arch/ppc64 | |
parent | b1bdfbd0a29d6da4dbe42736faac02c43a9afe76 (diff) |
[PATCH] ppc64: Remove lpqueue pointer from the paca on iSeries
The iSeries code keeps a pointer to the ItLpQueue in its paca struct. But
all these pointers end up pointing to the one place, ie. xItLpQueue.
So remove the pointer from the paca struct and just refer to xItLpQueue
directly where needed.
The only complication is that the spread_lpevents logic was implemented by
having a NULL lpqueue pointer in the paca on CPUs that weren't supposed to
process events. Instead we just compare the spread_lpevents value to the
processor id to get the same behaviour.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Acked-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/ppc64')
-rw-r--r-- | arch/ppc64/kernel/ItLpQueue.c | 16 | ||||
-rw-r--r-- | arch/ppc64/kernel/iSeries_setup.c | 6 | ||||
-rw-r--r-- | arch/ppc64/kernel/idle.c | 4 | ||||
-rw-r--r-- | arch/ppc64/kernel/irq.c | 6 | ||||
-rw-r--r-- | arch/ppc64/kernel/mf.c | 5 | ||||
-rw-r--r-- | arch/ppc64/kernel/pacaData.c | 1 | ||||
-rw-r--r-- | arch/ppc64/kernel/time.c | 7 |
7 files changed, 19 insertions, 26 deletions
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/ppc64/kernel/ItLpQueue.c index cdea00d7707f..e90dca8bd136 100644 --- a/arch/ppc64/kernel/ItLpQueue.c +++ b/arch/ppc64/kernel/ItLpQueue.c @@ -69,15 +69,17 @@ struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue ) return nextLpEvent; } +unsigned long spread_lpevents = 1; + int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue ) { - int retval = 0; - struct HvLpEvent * nextLpEvent; - if ( lpQueue ) { - nextLpEvent = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr; - retval = nextLpEvent->xFlags.xValid | lpQueue->xPlicOverflowIntPending; - } - return retval; + struct HvLpEvent *next_event; + + if (smp_processor_id() >= spread_lpevents) + return 0; + + next_event = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr; + return next_event->xFlags.xValid | lpQueue->xPlicOverflowIntPending; } void ItLpQueue_clearValid( struct HvLpEvent * event ) diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c index 86966ce76b58..2049b6dbafc7 100644 --- a/arch/ppc64/kernel/iSeries_setup.c +++ b/arch/ppc64/kernel/iSeries_setup.c @@ -855,17 +855,15 @@ late_initcall(iSeries_src_init); static int set_spread_lpevents(char *str) { - unsigned long i; unsigned long val = simple_strtoul(str, NULL, 0); + extern unsigned long spread_lpevents; /* * The parameter is the number of processors to share in processing * lp events. */ if (( val > 0) && (val <= NR_CPUS)) { - for (i = 1; i < val; ++i) - paca[i].lpqueue_ptr = paca[0].lpqueue_ptr; - + spread_lpevents = val; printk("lpevent processing spread over %ld processors\n", val); } else { printk("invalid spread_lpevents %ld\n", val); diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c index bdf13b4dc1c8..63977a7a3094 100644 --- a/arch/ppc64/kernel/idle.c +++ b/arch/ppc64/kernel/idle.c @@ -88,7 +88,7 @@ static int iSeries_idle(void) while (1) { if (lpaca->lppaca.shared_proc) { - if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr)) + if (ItLpQueue_isLpIntPending(&xItLpQueue)) process_iSeries_events(); if (!need_resched()) yield_shared_processor(); @@ -100,7 +100,7 @@ static int iSeries_idle(void) while (!need_resched()) { HMT_medium(); - if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr)) + if (ItLpQueue_isLpIntPending(&xItLpQueue)) process_iSeries_events(); HMT_low(); } diff --git a/arch/ppc64/kernel/irq.c b/arch/ppc64/kernel/irq.c index 3defc8c33adf..b1e6acb02a9a 100644 --- a/arch/ppc64/kernel/irq.c +++ b/arch/ppc64/kernel/irq.c @@ -269,7 +269,6 @@ out: void do_IRQ(struct pt_regs *regs) { struct paca_struct *lpaca; - struct ItLpQueue *lpq; irq_enter(); @@ -295,9 +294,8 @@ void do_IRQ(struct pt_regs *regs) iSeries_smp_message_recv(regs); } #endif /* CONFIG_SMP */ - lpq = lpaca->lpqueue_ptr; - if (lpq && ItLpQueue_isLpIntPending(lpq)) - lpevent_count += ItLpQueue_process(lpq, regs); + if (ItLpQueue_isLpIntPending(&xItLpQueue)) + lpevent_count += ItLpQueue_process(&xItLpQueue, regs); irq_exit(); diff --git a/arch/ppc64/kernel/mf.c b/arch/ppc64/kernel/mf.c index d98bebf7042f..d6a297a4feb3 100644 --- a/arch/ppc64/kernel/mf.c +++ b/arch/ppc64/kernel/mf.c @@ -802,9 +802,8 @@ int mf_get_boot_rtc(struct rtc_time *tm) /* We need to poll here as we are not yet taking interrupts */ while (rtc_data.busy) { extern unsigned long lpevent_count; - struct ItLpQueue *lpq = get_paca()->lpqueue_ptr; - if (lpq && ItLpQueue_isLpIntPending(lpq)) - lpevent_count += ItLpQueue_process(lpq, NULL); + if (ItLpQueue_isLpIntPending(&xItLpQueue)) + lpevent_count += ItLpQueue_process(&xItLpQueue, NULL); } return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm); } diff --git a/arch/ppc64/kernel/pacaData.c b/arch/ppc64/kernel/pacaData.c index a3e0975c26c1..ebfb517019ef 100644 --- a/arch/ppc64/kernel/pacaData.c +++ b/arch/ppc64/kernel/pacaData.c @@ -45,7 +45,6 @@ extern unsigned long __toc_start; #ifdef CONFIG_PPC_ISERIES #define EXTRA_INITS(number, lpq) \ .lppaca_ptr = &paca[number].lppaca, \ - .lpqueue_ptr = (lpq), /* &xItLpQueue, */ \ .reg_save_ptr = &paca[number].reg_save, \ .reg_save = { \ .xDesc = 0xd397d9e2, /* "LpRS" */ \ diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c index 2a532db9138a..cdc43afb563e 100644 --- a/arch/ppc64/kernel/time.c +++ b/arch/ppc64/kernel/time.c @@ -367,11 +367,8 @@ int timer_interrupt(struct pt_regs * regs) set_dec(next_dec); #ifdef CONFIG_PPC_ISERIES - { - struct ItLpQueue *lpq = lpaca->lpqueue_ptr; - if (lpq && ItLpQueue_isLpIntPending(lpq)) - lpevent_count += ItLpQueue_process(lpq, regs); - } + if (ItLpQueue_isLpIntPending(&xItLpQueue)) + lpevent_count += ItLpQueue_process(&xItLpQueue, regs); #endif /* collect purr register values often, for accurate calculations */ |