1/* $NetBSD: xen.h,v 1.37 2016/07/07 06:55:40 msaitoh Exp $ */
2
3/*
4 *
5 * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27
28#ifndef _XEN_H
29#define _XEN_H
30
31#ifdef _KERNEL_OPT
32#include "opt_xen.h"
33#endif
34
35
36#ifndef _LOCORE
37
38#include <machine/cpufunc.h>
39
40struct xen_netinfo {
41 uint32_t xi_ifno;
42 char *xi_root;
43 uint32_t xi_ip[5];
44};
45
46union xen_cmdline_parseinfo {
47 char xcp_bootdev[16]; /* sizeof(dv_xname) */
48 struct xen_netinfo xcp_netinfo;
49 char xcp_console[16];
50 char xcp_pcidevs[64];
51};
52
53#define XEN_PARSE_BOOTDEV 0
54#define XEN_PARSE_NETINFO 1
55#define XEN_PARSE_CONSOLE 2
56#define XEN_PARSE_BOOTFLAGS 3
57#define XEN_PARSE_PCIBACK 4
58
59void xen_parse_cmdline(int, union xen_cmdline_parseinfo *);
60
61void xenconscn_attach(void);
62
63void xenprivcmd_init(void);
64
65void xbdback_init(void);
66void xennetback_init(void);
67void xen_shm_init(void);
68
69void xenevt_event(int);
70void xenevt_setipending(int, int);
71void xenevt_notify(void);
72
73void idle_block(void);
74
75/* xen_machdep.c */
76void sysctl_xen_suspend_setup(void);
77
78#if defined(XENDEBUG) || 1 /* XXX */
79#include <sys/stdarg.h>
80
81void printk(const char *, ...);
82void vprintk(const char *, va_list);
83#endif
84
85#endif
86
87#endif /* _XEN_H */
88
89/******************************************************************************
90 * os.h
91 *
92 * random collection of macros and definition
93 */
94
95#ifndef _OS_H_
96#define _OS_H_
97
98/*
99 * These are the segment descriptors provided for us by the hypervisor.
100 * For now, these are hardwired -- guest OSes cannot update the GDT
101 * or LDT.
102 *
103 * It shouldn't be hard to support descriptor-table frobbing -- let me
104 * know if the BSD or XP ports require flexibility here.
105 */
106
107
108/*
109 * these are also defined in xen-public/xen.h but can't be pulled in as
110 * they are used in start of day assembly. Need to clean up the .h files
111 * a bit more...
112 */
113
114#ifndef FLAT_RING1_CS
115#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
116#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
117#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
118#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
119#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
120#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
121#endif
122
123#define __KERNEL_CS FLAT_RING1_CS
124#define __KERNEL_DS FLAT_RING1_DS
125
126/* Everything below this point is not included by assembler (.S) files. */
127#ifndef _LOCORE
128
129/* some function prototypes */
130void trap_init(void);
131void xpq_flush_cache(void);
132
133#define xendomain_is_dom0() (xen_start_info.flags & SIF_INITDOMAIN)
134#define xendomain_is_privileged() (xen_start_info.flags & SIF_PRIVILEGED)
135
136/*
137 * STI/CLI equivalents. These basically set and clear the virtual
138 * event_enable flag in the shared_info structure. Note that when
139 * the enable bit is set, there may be pending events to be handled.
140 * We may therefore call into do_hypervisor_callback() directly.
141 */
142
143#define __save_flags(x) \
144do { \
145 (x) = curcpu()->ci_vcpu->evtchn_upcall_mask; \
146} while (0)
147
148#define __restore_flags(x) \
149do { \
150 volatile struct vcpu_info *_vci = curcpu()->ci_vcpu; \
151 __insn_barrier(); \
152 if ((_vci->evtchn_upcall_mask = (x)) == 0) { \
153 x86_lfence(); \
154 if (__predict_false(_vci->evtchn_upcall_pending)) \
155 hypervisor_force_callback(); \
156 } \
157} while (0)
158
159#define __cli() \
160do { \
161 curcpu()->ci_vcpu->evtchn_upcall_mask = 1; \
162 x86_lfence(); \
163} while (0)
164
165#define __sti() \
166do { \
167 volatile struct vcpu_info *_vci = curcpu()->ci_vcpu; \
168 __insn_barrier(); \
169 _vci->evtchn_upcall_mask = 0; \
170 x86_lfence(); /* unmask then check (avoid races) */ \
171 if (__predict_false(_vci->evtchn_upcall_pending)) \
172 hypervisor_force_callback(); \
173} while (0)
174
175#define cli() __cli()
176#define sti() __sti()
177#define save_flags(x) __save_flags(x)
178#define restore_flags(x) __restore_flags(x)
179#define save_and_cli(x) do { \
180 __save_flags(x); \
181 __cli(); \
182} while (/* CONSTCOND */ 0)
183#define save_and_sti(x) __save_and_sti(x)
184
185/*
186 * always assume we're on multiprocessor. We don't know how many CPU the
187 * underlying hardware has.
188 */
189#define __LOCK_PREFIX "lock; "
190
191#define XATOMIC_T u_long
192#ifdef __x86_64__
193#define LONG_SHIFT 6
194#define LONG_MASK 63
195#else /* __x86_64__ */
196#define LONG_SHIFT 5
197#define LONG_MASK 31
198#endif /* __x86_64__ */
199
200#define xen_ffs __builtin_ffsl
201
202static __inline XATOMIC_T
203xen_atomic_xchg(volatile XATOMIC_T *ptr, unsigned long val)
204{
205 unsigned long result;
206
207 __asm volatile(__LOCK_PREFIX
208#ifdef __x86_64__
209 "xchgq %0,%1"
210#else
211 "xchgl %0,%1"
212#endif
213 :"=r" (result)
214 :"m" (*ptr), "0" (val)
215 :"memory");
216
217 return result;
218}
219
220static inline uint16_t
221xen_atomic_cmpxchg16(volatile uint16_t *ptr, uint16_t val, uint16_t newval)
222{
223 unsigned long result;
224
225 __asm volatile(__LOCK_PREFIX
226 "cmpxchgw %w1,%2"
227 :"=a" (result)
228 :"q"(newval), "m" (*ptr), "0" (val)
229 :"memory");
230
231 return result;
232}
233
234static __inline void
235xen_atomic_setbits_l (volatile XATOMIC_T *ptr, unsigned long bits) {
236#ifdef __x86_64__
237 __asm volatile("lock ; orq %1,%0" : "=m" (*ptr) : "ir" (bits));
238#else
239 __asm volatile("lock ; orl %1,%0" : "=m" (*ptr) : "ir" (bits));
240#endif
241}
242
243static __inline void
244xen_atomic_clearbits_l (volatile XATOMIC_T *ptr, unsigned long bits) {
245#ifdef __x86_64__
246 __asm volatile("lock ; andq %1,%0" : "=m" (*ptr) : "ir" (~bits));
247#else
248 __asm volatile("lock ; andl %1,%0" : "=m" (*ptr) : "ir" (~bits));
249#endif
250}
251
252static __inline XATOMIC_T
253xen_atomic_test_and_clear_bit(volatile void *ptr, unsigned long bitno)
254{
255 int result;
256
257 __asm volatile(__LOCK_PREFIX
258#ifdef __x86_64__
259 "btrq %2,%1 ;"
260 "sbbq %0,%0"
261#else
262 "btrl %2,%1 ;"
263 "sbbl %0,%0"
264#endif
265 :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr))
266 :"Ir" (bitno) : "memory");
267 return result;
268}
269
270static __inline XATOMIC_T
271xen_atomic_test_and_set_bit(volatile void *ptr, unsigned long bitno)
272{
273 long result;
274
275 __asm volatile(__LOCK_PREFIX
276#ifdef __x86_64__
277 "btsq %2,%1 ;"
278 "sbbq %0,%0"
279#else
280 "btsl %2,%1 ;"
281 "sbbl %0,%0"
282#endif
283 :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr))
284 :"Ir" (bitno) : "memory");
285 return result;
286}
287
288static __inline int
289xen_constant_test_bit(const volatile void *ptr, unsigned long bitno)
290{
291 return ((1UL << (bitno & LONG_MASK)) &
292 (((const volatile XATOMIC_T *) ptr)[bitno >> LONG_SHIFT])) != 0;
293}
294
295static __inline XATOMIC_T
296xen_variable_test_bit(const volatile void *ptr, unsigned long bitno)
297{
298 long result;
299
300 __asm volatile(
301#ifdef __x86_64__
302 "btq %2,%1 ;"
303 "sbbq %0,%0"
304#else
305 "btl %2,%1 ;"
306 "sbbl %0,%0"
307#endif
308 :"=r" (result)
309 :"m" (*(const volatile XATOMIC_T *)(ptr)), "Ir" (bitno));
310 return result;
311}
312
313#define xen_atomic_test_bit(ptr, bitno) \
314 (__builtin_constant_p(bitno) ? \
315 xen_constant_test_bit((ptr),(bitno)) : \
316 xen_variable_test_bit((ptr),(bitno)))
317
318static __inline void
319xen_atomic_set_bit(volatile void *ptr, unsigned long bitno)
320{
321 __asm volatile(__LOCK_PREFIX
322#ifdef __x86_64__
323 "btsq %1,%0"
324#else
325 "btsl %1,%0"
326#endif
327 :"=m" (*(volatile XATOMIC_T *)(ptr))
328 :"Ir" (bitno));
329}
330
331static __inline void
332xen_atomic_clear_bit(volatile void *ptr, unsigned long bitno)
333{
334 __asm volatile(__LOCK_PREFIX
335#ifdef __x86_64__
336 "btrq %1,%0"
337#else
338 "btrl %1,%0"
339#endif
340 :"=m" (*(volatile XATOMIC_T *)(ptr))
341 :"Ir" (bitno));
342}
343
344#undef XATOMIC_T
345
346void wbinvd(void);
347
348#include <xen/xen-public/features.h>
349#include <sys/systm.h>
350
351extern bool xen_feature_tables[];
352void xen_init_features(void);
353static __inline bool
354xen_feature(int f)
355{
356 KASSERT(f < XENFEAT_NR_SUBMAPS * 32);
357 return xen_feature_tables[f];
358}
359
360#endif /* !__ASSEMBLY__ */
361
362#endif /* _OS_H_ */
363