1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
|
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
/*
* We need the APIC definitions automatically as part of 'smp.h'
*/
#ifndef __ASSEMBLY__
#include <linux/config.h>
#include <linux/threads.h>
#include <linux/ptrace.h>
#endif
#ifdef CONFIG_X86_LOCAL_APIC
#ifndef __ASSEMBLY__
#include <asm/fixmap.h>
#include <asm/bitops.h>
#include <asm/mpspec.h>
#ifdef CONFIG_X86_IO_APIC
#include <asm/io_apic.h>
#endif
#include <asm/apic.h>
#endif
#endif
#ifdef CONFIG_SMP
# ifdef CONFIG_MULTIQUAD
# define TARGET_CPUS 0xf /* all CPUs in *THIS* quad */
# define INT_DELIVERY_MODE 0 /* physical delivery on LOCAL quad */
# else
# define TARGET_CPUS cpu_online_map
# define INT_DELIVERY_MODE 1 /* logical delivery broadcast to all procs */
# endif
#else
# define INT_DELIVERY_MODE 1 /* logical delivery */
# define TARGET_CPUS 0x01
#endif
#ifndef clustered_apic_mode
#ifdef CONFIG_MULTIQUAD
#define clustered_apic_mode (1)
#define esr_disable (1)
#else /* !CONFIG_MULTIQUAD */
#define clustered_apic_mode (0)
#define esr_disable (0)
#endif /* CONFIG_MULTIQUAD */
#endif
#ifdef CONFIG_SMP
#ifndef __ASSEMBLY__
/*
* Private routines/data
*/
extern void smp_alloc_memory(void);
extern unsigned long phys_cpu_present_map;
extern unsigned long cpu_online_map;
extern volatile unsigned long smp_invalidate_needed;
extern int pic_mode;
extern void smp_flush_tlb(void);
extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
extern void smp_send_reschedule(int cpu);
extern void smp_invalidate_rcv(void); /* Process an NMI */
extern void (*mtrr_hook) (void);
extern void zap_low_mappings (void);
/*
* On x86 all CPUs are mapped 1:1 to the APIC space.
* This simplifies scheduling and IPI sending and
* compresses data structures.
*/
static inline int cpu_logical_map(int cpu)
{
return cpu;
}
static inline int cpu_number_map(int cpu)
{
return cpu;
}
/*
* Some lowlevel functions might want to know about
* the real APIC ID <-> CPU # mapping.
*/
#define MAX_APICID 256
extern volatile int cpu_to_physical_apicid[NR_CPUS];
extern volatile int physical_apicid_to_cpu[MAX_APICID];
extern volatile int cpu_to_logical_apicid[NR_CPUS];
extern volatile int logical_apicid_to_cpu[MAX_APICID];
/*
* General functions that each host system must provide.
*/
extern void smp_boot_cpus(void);
extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial udelay numbers */
/*
* This function is needed by all SMP systems. It must _always_ be valid
* from the initial startup. We map APIC_BASE very early in page_setup(),
* so this is correct in the x86 case.
*/
#define smp_processor_id() (current->processor)
static __inline int hard_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
}
static __inline int logical_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
}
#endif /* !__ASSEMBLY__ */
#define NO_PROC_ID 0xFF /* No processor magic marker */
/*
* This magic constant controls our willingness to transfer
* a process across CPUs. Such a transfer incurs misses on the L1
* cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
* gut feeling is this will vary by board in value. For a board
* with separate L2 cache it probably depends also on the RSS, and
* for a board with shared L2 cache it ought to decay fast as other
* processes are run.
*/
#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
#endif
#endif
|