Make ksoftirqd a normal per-cpu variable.

Make ksoftirqd a normal per-cpu variable.

Post by Rusty Russel » Fri, 04 Jul 2003 08:30:12



Linus, please apply.  Small diff overlap with previous patch, and next
patch.

Moves the ksoftirqd pointers out of the irq_stat struct, and use a
normal per-cpu variable.  It's not that time critical, nor referenced
in assembler.  This moves us closer to making irq_stat a per-cpu variable.

Because some archs have hardcoded asm references to offsets in this
structure, I haven't touched non-x86.  The __ksoftirqd_task field
is unused in other archs, too.

Name: ksoftirqds in per-cpu variable
Author: Rusty Russell
Status: Tested on 2.5.74
Depends: Percpu/irq_syscall_count_removal.patch.gz

D: Moves the ksoftirqd pointers out of the irq_stat struct, and use a
D: normal per-cpu variable.  It's not that time critical, nor referenced
D: in assembler.  This moves us closer to making irq_stat a per-cpu variable.
D:
D: Because some archs have hardcoded asm references to offsets in this
D: structure, I haven't touched non-x86.  The __ksoftirqd_task field
D: is unused in other archs, too.

diff -urpN --exclude TAGS -X /home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal .5097-2.5.73-bk8-ksoftirqd_percpu.pre/include/asm-i386/hardirq.h .5097-2.5.73-bk8-ksoftirqd_percpu/include/asm-i386/hardirq.h
--- .5097-2.5.73-bk8-ksoftirqd_percpu.pre/include/asm-i386/hardirq.h    2003-07-01 15:26:58.000000000 +1000

 typedef struct {
        unsigned int __softirq_pending;
-       struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
        unsigned long idle_timestamp;
        unsigned int __nmi_count;       /* arch dependent */
        unsigned int apic_timer_irqs;   /* arch dependent */
diff -urpN --exclude TAGS -X /home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal .5097-2.5.73-bk8-ksoftirqd_percpu.pre/include/linux/irq_cpustat.h .5097-2.5.73-bk8-ksoftirqd_percpu/include/linux/irq_cpustat.h
--- .5097-2.5.73-bk8-ksoftirqd_percpu.pre/include/linux/irq_cpustat.h   2003-07-01 15:26:58.000000000 +1000

   /* arch independent irq_stat fields */
 #define softirq_pending(cpu)   __IRQ_STAT((cpu), __softirq_pending)
 #define local_softirq_pending()        softirq_pending(smp_processor_id())
-#define ksoftirqd_task(cpu)    __IRQ_STAT((cpu), __ksoftirqd_task)
-#define local_ksoftirqd_task() ksoftirqd_task(smp_processor_id())

   /* arch dependent irq_stat fields */
 #define nmi_count(cpu)         __IRQ_STAT((cpu), __nmi_count)  /* i386 */
diff -urpN --exclude TAGS -X /home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal .5097-2.5.73-bk8-ksoftirqd_percpu.pre/kernel/softirq.c .5097-2.5.73-bk8-ksoftirqd_percpu/kernel/softirq.c
--- .5097-2.5.73-bk8-ksoftirqd_percpu.pre/kernel/softirq.c      2003-06-15 11:30:11.000000000 +1000

 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/notifier.h>
+#include <linux/percpu.h>
 #include <linux/cpu.h>


 static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;

+static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+
 /*
  * we cannot loop indefinitely here to avoid userspace starvation,
  * but we also don't want to introduce a worst case 1/HZ latency
  * to the pending events, so lets the scheduler to balance
  * the softirq load for us.
  */
-static inline void wakeup_softirqd(unsigned cpu)
+static inline void wakeup_softirqd(void)
 {
-       struct task_struct * tsk = ksoftirqd_task(cpu);
+       /* Interrupts are disabled: no need to stop preemption */
+       struct task_struct *tsk = __get_cpu_var(ksoftirqd);

        if (tsk && tsk->state != TASK_RUNNING)

                        goto restart;
                }
                if (pending)
-                       wakeup_softirqd(smp_processor_id());
+                       wakeup_softirqd();
                __local_bh_enable();
        }

         * schedule the softirq soon.
         */
        if (!in_interrupt())
-               wakeup_softirqd(cpu);
+               wakeup_softirqd();
 }


        __set_current_state(TASK_INTERRUPTIBLE);
        mb();

-       local_ksoftirqd_task() = current;
+       __get_cpu_var(ksoftirqd) = current;

        for (;;) {

                        return NOTIFY_BAD;
                }

-               while (!ksoftirqd_task(hotcpu))
+               while (!per_cpu(ksoftirqd, hotcpu))
                        yield();
        }
        return NOTIFY_OK;

--
  Anyone who quotes me in their sig is an idiot. -- Rusty Russell.
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in

More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

 
 
 

Make ksoftirqd a normal per-cpu variable.

Post by Linus Torvald » Fri, 04 Jul 2003 09:50:07



> Linus, please apply.  Small diff overlap with previous patch, and next
> patch.

This arrived in the wrong order, and because it was dependent on another
patch, and there was no explicit ordering, it didn't apply.

If you have interdependent patches, PLEASE PLEASE PLEASE make that very
clear in the subject line. Make it say something like

        [PATCH 2/2] Make ksoftirqd a normal per-cpu variable

and then call the patch it depends on "[PATCH 1/2] xxxx"

(Even if they don't necessarily depend on each other, if you have tested
them in some order this is a good idea. They may have dependencies that
you didn't think of).

                Linus

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in

More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

 
 
 

Make ksoftirqd a normal per-cpu variable.

Post by Rusty Russel » Fri, 04 Jul 2003 10:20:08




> > Linus, please apply.  Small diff overlap with previous patch, and next
> > patch.

> This arrived in the wrong order, and because it was dependent on another
> patch, and there was no explicit ordering, it didn't apply.

As ordered, here is 1/4:

Noone seems to use __syscall_count.  Remove the field from i386
irq_cpustat_t struct, and the generic accessor macros.

Because some archs have hardcoded asm references to offsets in this
structure, I haven't touched non-x86, but doing so is usually
trivial.
--
  Anyone who quotes me in their sig is an idiot. -- Rusty Russell.

Name: Remove unused __syscall_count from irq_stat struct.
Author: Rusty Russell
Status: Tested on 2.5.74

D: Noone seems to use __syscall_count.  Remove the field from i386
D: irq_cpustat_t struct, and the generic accessor macros.
D:
D: Because some archs have hardcoded asm references to offsets in this
D: structure, I haven't touched non-x86, but doing so is usually
D: trivial.

diff -urpN --exclude TAGS -X /home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal .2143-linux-2.5.73-bk8/include/asm-i386/hardirq.h .2143-linux-2.5.73-bk8.updated/include/asm-i386/hardirq.h
--- .2143-linux-2.5.73-bk8/include/asm-i386/hardirq.h   2003-04-08 11:14:55.000000000 +1000

 typedef struct {
        unsigned int __softirq_pending;
-       unsigned int __syscall_count;
        struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
        unsigned long idle_timestamp;
        unsigned int __nmi_count;       /* arch dependent */
diff -urpN --exclude TAGS -X /home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal .2143-linux-2.5.73-bk8/include/linux/irq_cpustat.h .2143-linux-2.5.73-bk8.updated/include/linux/irq_cpustat.h
--- .2143-linux-2.5.73-bk8/include/linux/irq_cpustat.h  2003-06-15 11:30:08.000000000 +1000

   /* arch independent irq_stat fields */
 #define softirq_pending(cpu)   __IRQ_STAT((cpu), __softirq_pending)
 #define local_softirq_pending()        softirq_pending(smp_processor_id())
-#define syscall_count(cpu)     __IRQ_STAT((cpu), __syscall_count)
-#define local_syscall_count()  syscall_count(smp_processor_id())
 #define ksoftirqd_task(cpu)    __IRQ_STAT((cpu), __ksoftirqd_task)
 #define local_ksoftirqd_task() ksoftirqd_task(smp_processor_id())

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in

More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/