1 | /* $NetBSD: cpu.h,v 1.41 2014/05/19 20:39:23 rmind Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2007 YAMAMOTO Takashi, |
5 | * All rights reserved. |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions |
9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. |
15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
17 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
20 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
21 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
22 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
24 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
26 | * SUCH DAMAGE. |
27 | */ |
28 | |
29 | #ifndef _SYS_CPU_H_ |
30 | #define _SYS_CPU_H_ |
31 | |
32 | #ifndef _LOCORE |
33 | |
34 | #include <machine/cpu.h> |
35 | |
36 | #include <sys/lwp.h> |
37 | |
38 | struct cpu_info; |
39 | |
40 | #ifdef _KERNEL |
41 | #ifndef cpu_idle |
42 | void cpu_idle(void); |
43 | #endif |
44 | |
45 | #ifdef CPU_UCODE |
46 | #include <sys/cpuio.h> |
47 | #include <dev/firmload.h> |
48 | #ifdef COMPAT_60 |
49 | #include <compat/sys/cpuio.h> |
50 | #endif |
51 | #endif |
52 | |
53 | /* |
54 | * cpu_need_resched() must always be called with the target CPU |
55 | * locked (via spc_lock() or another route), unless called locally. |
56 | * If called locally, the caller need only be at IPL_SCHED. |
57 | */ |
58 | #ifndef cpu_need_resched |
59 | void cpu_need_resched(struct cpu_info *, int); |
60 | #endif |
61 | |
62 | #ifndef cpu_did_resched |
63 | #define cpu_did_resched(l) /* nothing */ |
64 | #endif |
65 | |
66 | #ifndef CPU_INFO_ITERATOR |
67 | #define CPU_INFO_ITERATOR int |
68 | #define CPU_INFO_FOREACH(cii, ci) \ |
69 | (void)cii, ci = curcpu(); ci != NULL; ci = NULL |
70 | #endif |
71 | |
72 | #ifndef CPU_IS_PRIMARY |
73 | #define CPU_IS_PRIMARY(ci) ((void)ci, 1) |
74 | #endif |
75 | |
76 | #ifdef __HAVE_MD_CPU_OFFLINE |
77 | void cpu_offline_md(void); |
78 | #endif |
79 | |
80 | struct lwp *cpu_switchto(struct lwp *, struct lwp *, bool); |
81 | struct cpu_info *cpu_lookup(u_int); |
82 | int cpu_setmodel(const char *fmt, ...) __printflike(1, 2); |
83 | const char *cpu_getmodel(void); |
84 | int cpu_setstate(struct cpu_info *, bool); |
85 | int cpu_setintr(struct cpu_info *, bool); |
86 | bool cpu_intr_p(void); |
87 | bool cpu_softintr_p(void); |
88 | bool cpu_kpreempt_enter(uintptr_t, int); |
89 | void cpu_kpreempt_exit(uintptr_t); |
90 | bool cpu_kpreempt_disabled(void); |
91 | int cpu_lwp_setprivate(struct lwp *, void *); |
92 | void cpu_intr_redistribute(void); |
93 | u_int cpu_intr_count(struct cpu_info *); |
94 | #endif |
95 | |
96 | #ifdef _KERNEL |
97 | extern kmutex_t cpu_lock; |
98 | extern u_int maxcpus; |
99 | extern struct cpu_info **cpu_infos; |
100 | extern kcpuset_t *kcpuset_attached; |
101 | extern kcpuset_t *kcpuset_running; |
102 | |
103 | static inline u_int |
104 | cpu_index(const struct cpu_info *ci) |
105 | { |
106 | return ci->ci_index; |
107 | } |
108 | |
109 | static inline char * |
110 | cpu_name(struct cpu_info *ci) |
111 | { |
112 | return ci->ci_data.cpu_name; |
113 | } |
114 | |
115 | #ifdef CPU_UCODE |
116 | struct cpu_ucode_softc { |
117 | int loader_version; |
118 | char *sc_blob; |
119 | off_t sc_blobsize; |
120 | }; |
121 | |
122 | int cpu_ucode_get_version(struct cpu_ucode_version *); |
123 | int cpu_ucode_apply(const struct cpu_ucode *); |
124 | #ifdef COMPAT_60 |
125 | int compat6_cpu_ucode_get_version(struct compat6_cpu_ucode *); |
126 | int compat6_cpu_ucode_apply(const struct compat6_cpu_ucode *); |
127 | #endif |
128 | int cpu_ucode_load(struct cpu_ucode_softc *, const char *); |
129 | int cpu_ucode_md_open(firmware_handle_t *, int, const char *); |
130 | #endif |
131 | |
132 | #endif |
133 | #endif /* !_LOCORE */ |
134 | |
135 | /* flags for cpu_need_resched */ |
136 | #define RESCHED_LAZY 0x01 /* request a ctx switch */ |
137 | #define RESCHED_IMMED 0x02 /* request an immediate ctx switch */ |
138 | #define RESCHED_KPREEMPT 0x04 /* request in-kernel preemption */ |
139 | |
140 | #endif /* !_SYS_CPU_H_ */ |
141 | |