aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarius Strobl <marius@FreeBSD.org>2010-07-07 21:00:17 +0000
committerMarius Strobl <marius@FreeBSD.org>2010-07-07 21:00:17 +0000
commit62e702af841e9756db93cf070ab4e41652acdb8e (patch)
treec94f6138192aeaec8b9a2096e3788acb6a96ddc5
parenta602ad907648df8f565f7a40990524141c603052 (diff)
downloadsrc-62e702af841e9756db93cf070ab4e41652acdb8e.tar.gz
src-62e702af841e9756db93cf070ab4e41652acdb8e.zip
MFC: r209695
- Pin the IPI cache and TLB demap functions in order to prevent migration between determining the other CPUs and calling cpu_ipi_selected(), which apart from generally doing the wrong thing can lead to a panic when a CPU is told to IPI itself (which sun4u doesn't support). Reported and tested by: Nathaniel W Filardo - Add __unused where appropriate. Approved by: re (kib)
Notes
Notes: svn path=/releng/8.1/; revision=209781
-rw-r--r--sys/sparc64/include/smp.h32
1 files changed, 24 insertions, 8 deletions
diff --git a/sys/sparc64/include/smp.h b/sys/sparc64/include/smp.h
index 467c6b639114..eda6d6fb0079 100644
--- a/sys/sparc64/include/smp.h
+++ b/sys/sparc64/include/smp.h
@@ -38,6 +38,9 @@
#ifndef LOCORE
+#include <sys/proc.h>
+#include <sys/sched.h>
+
#include <machine/intr_machdep.h>
#include <machine/pcb.h>
#include <machine/tte.h>
@@ -139,6 +142,7 @@ ipi_dcache_page_inval(void *func, vm_paddr_t pa)
if (smp_cpus == 1)
return (NULL);
+ sched_pin();
ica = &ipi_cache_args;
mtx_lock_spin(&ipi_mtx);
ica->ica_mask = all_cpus;
@@ -154,6 +158,7 @@ ipi_icache_page_inval(void *func, vm_paddr_t pa)
if (smp_cpus == 1)
return (NULL);
+ sched_pin();
ica = &ipi_cache_args;
mtx_lock_spin(&ipi_mtx);
ica->ica_mask = all_cpus;
@@ -170,8 +175,11 @@ ipi_tlb_context_demap(struct pmap *pm)
if (smp_cpus == 1)
return (NULL);
- if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
+ sched_pin();
+ if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
+ sched_unpin();
return (NULL);
+ }
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
@@ -189,8 +197,11 @@ ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
if (smp_cpus == 1)
return (NULL);
- if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
+ sched_pin();
+ if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
+ sched_unpin();
return (NULL);
+ }
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
@@ -208,8 +219,11 @@ ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
if (smp_cpus == 1)
return (NULL);
- if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
+ sched_pin();
+ if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
+ sched_unpin();
return (NULL);
+ }
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
@@ -230,6 +244,7 @@ ipi_wait(void *cookie)
while (*mask != 0)
;
mtx_unlock_spin(&ipi_mtx);
+ sched_unpin();
}
}
@@ -242,35 +257,36 @@ ipi_wait(void *cookie)
#ifndef LOCORE
static __inline void *
-ipi_dcache_page_inval(void *func, vm_paddr_t pa)
+ipi_dcache_page_inval(void *func __unused, vm_paddr_t pa __unused)
{
return (NULL);
}
static __inline void *
-ipi_icache_page_inval(void *func, vm_paddr_t pa)
+ipi_icache_page_inval(void *func __unused, vm_paddr_t pa __unused)
{
return (NULL);
}
static __inline void *
-ipi_tlb_context_demap(struct pmap *pm)
+ipi_tlb_context_demap(struct pmap *pm __unused)
{
return (NULL);
}
static __inline void *
-ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
+ipi_tlb_page_demap(struct pmap *pm __unused, vm_offset_t va __unused)
{
return (NULL);
}
static __inline void *
-ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
+ipi_tlb_range_demap(struct pmap *pm __unused, vm_offset_t start __unused,
+ __unused vm_offset_t end)
{
return (NULL);