1/* $NetBSD: vmt.c,v 1.15 2016/11/10 03:32:04 ozaki-r Exp $ */
2/* $OpenBSD: vmt.c,v 1.11 2011/01/27 21:29:25 dtucker Exp $ */
3
4/*
5 * Copyright (c) 2007 David Crawshaw <david@zentus.com>
6 * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/*
22 * Protocol reverse engineered by Ken Kato:
23 * http://chitchat.at.infoseek.co.jp/vmware/backdoor.html
24 */
25
26#include <sys/param.h>
27#include <sys/systm.h>
28#include <sys/kernel.h>
29#include <sys/device.h>
30#include <sys/types.h>
31#include <sys/kmem.h>
32#include <sys/callout.h>
33#include <sys/reboot.h>
34#include <sys/syslog.h>
35#include <sys/proc.h>
36#include <sys/socket.h>
37#include <sys/timetc.h>
38#include <sys/module.h>
39#include <sys/sysctl.h>
40
41#include <net/if.h>
42#include <netinet/in.h>
43
44#include <machine/cpuvar.h>
45
46#include <dev/sysmon/sysmonvar.h>
47#include <dev/sysmon/sysmon_taskq.h>
48
49/* #define VMT_DEBUG */
50
51/* OS name to report to host */
52#ifdef __i386__
53#define VM_OS_NAME "other"
54#else
55#define VM_OS_NAME "other-64"
56#endif
57
58/* "The" magic number, always occupies the EAX register. */
59#define VM_MAGIC 0x564D5868
60
61/* Port numbers, passed on EDX.LOW . */
62#define VM_PORT_CMD 0x5658
63#define VM_PORT_RPC 0x5659
64
65/* Commands, passed on ECX.LOW. */
66#define VM_CMD_GET_SPEED 0x01
67#define VM_CMD_APM 0x02
68#define VM_CMD_GET_MOUSEPOS 0x04
69#define VM_CMD_SET_MOUSEPOS 0x05
70#define VM_CMD_GET_CLIPBOARD_LEN 0x06
71#define VM_CMD_GET_CLIPBOARD 0x07
72#define VM_CMD_SET_CLIPBOARD_LEN 0x08
73#define VM_CMD_SET_CLIPBOARD 0x09
74#define VM_CMD_GET_VERSION 0x0a
75#define VM_VERSION_UNMANAGED 0x7fffffff
76#define VM_CMD_GET_DEVINFO 0x0b
77#define VM_CMD_DEV_ADDREMOVE 0x0c
78#define VM_CMD_GET_GUI_OPTIONS 0x0d
79#define VM_CMD_SET_GUI_OPTIONS 0x0e
80#define VM_CMD_GET_SCREEN_SIZE 0x0f
81#define VM_CMD_GET_HWVER 0x11
82#define VM_CMD_POPUP_OSNOTFOUND 0x12
83#define VM_CMD_GET_BIOS_UUID 0x13
84#define VM_CMD_GET_MEM_SIZE 0x14
85/*#define VM_CMD_GET_TIME 0x17 */ /* deprecated */
86#define VM_CMD_RPC 0x1e
87#define VM_CMD_GET_TIME_FULL 0x2e
88
89/* RPC sub-commands, passed on ECX.HIGH. */
90#define VM_RPC_OPEN 0x00
91#define VM_RPC_SET_LENGTH 0x01
92#define VM_RPC_SET_DATA 0x02
93#define VM_RPC_GET_LENGTH 0x03
94#define VM_RPC_GET_DATA 0x04
95#define VM_RPC_GET_END 0x05
96#define VM_RPC_CLOSE 0x06
97
98/* RPC magic numbers, passed on EBX. */
99#define VM_RPC_OPEN_RPCI 0x49435052UL /* with VM_RPC_OPEN. */
100#define VM_RPC_OPEN_TCLO 0x4F4C4354UL /* with VP_RPC_OPEN. */
101#define VM_RPC_ENH_DATA 0x00010000UL /* with enhanced RPC data calls. */
102
103#define VM_RPC_FLAG_COOKIE 0x80000000UL
104
105/* RPC reply flags */
106#define VM_RPC_REPLY_SUCCESS 0x0001
107#define VM_RPC_REPLY_DORECV 0x0002 /* incoming message available */
108#define VM_RPC_REPLY_CLOSED 0x0004 /* RPC channel is closed */
109#define VM_RPC_REPLY_UNSENT 0x0008 /* incoming message was removed? */
110#define VM_RPC_REPLY_CHECKPOINT 0x0010 /* checkpoint occurred -> retry */
111#define VM_RPC_REPLY_POWEROFF 0x0020 /* underlying device is powering off */
112#define VM_RPC_REPLY_TIMEOUT 0x0040
113#define VM_RPC_REPLY_HB 0x0080 /* high-bandwidth tx/rx available */
114
115/* VM state change IDs */
116#define VM_STATE_CHANGE_HALT 1
117#define VM_STATE_CHANGE_REBOOT 2
118#define VM_STATE_CHANGE_POWERON 3
119#define VM_STATE_CHANGE_RESUME 4
120#define VM_STATE_CHANGE_SUSPEND 5
121
122/* VM guest info keys */
123#define VM_GUEST_INFO_DNS_NAME 1
124#define VM_GUEST_INFO_IP_ADDRESS 2
125#define VM_GUEST_INFO_DISK_FREE_SPACE 3
126#define VM_GUEST_INFO_BUILD_NUMBER 4
127#define VM_GUEST_INFO_OS_NAME_FULL 5
128#define VM_GUEST_INFO_OS_NAME 6
129#define VM_GUEST_INFO_UPTIME 7
130#define VM_GUEST_INFO_MEMORY 8
131#define VM_GUEST_INFO_IP_ADDRESS_V2 9
132
133/* RPC responses */
134#define VM_RPC_REPLY_OK "OK "
135#define VM_RPC_RESET_REPLY "OK ATR toolbox"
136#define VM_RPC_REPLY_ERROR "ERROR Unknown command"
137#define VM_RPC_REPLY_ERROR_IP_ADDR "ERROR Unable to find guest IP address"
138
139/* A register. */
140union vm_reg {
141 struct {
142 uint16_t low;
143 uint16_t high;
144 } part;
145 uint32_t word;
146#ifdef __amd64__
147 struct {
148 uint32_t low;
149 uint32_t high;
150 } words;
151 uint64_t quad;
152#endif
153} __packed;
154
155/* A register frame. */
156/* XXX 'volatile' as a workaround because BACKDOOR_OP is likely broken */
157struct vm_backdoor {
158 volatile union vm_reg eax;
159 volatile union vm_reg ebx;
160 volatile union vm_reg ecx;
161 volatile union vm_reg edx;
162 volatile union vm_reg esi;
163 volatile union vm_reg edi;
164 volatile union vm_reg ebp;
165} __packed;
166
167/* RPC context. */
168struct vm_rpc {
169 uint16_t channel;
170 uint32_t cookie1;
171 uint32_t cookie2;
172};
173
174static int vmt_match(device_t, cfdata_t, void *);
175static void vmt_attach(device_t, device_t, void *);
176static int vmt_detach(device_t, int);
177
178struct vmt_event {
179 struct sysmon_pswitch ev_smpsw;
180 int ev_code;
181};
182
183struct vmt_softc {
184 device_t sc_dev;
185
186 struct sysctllog *sc_log;
187 struct vm_rpc sc_tclo_rpc;
188 bool sc_tclo_rpc_open;
189 char *sc_rpc_buf;
190 int sc_rpc_error;
191 int sc_tclo_ping;
192 int sc_set_guest_os;
193#define VMT_RPC_BUFLEN 256
194
195 struct callout sc_tick;
196 struct callout sc_tclo_tick;
197
198#define VMT_CLOCK_SYNC_PERIOD_SECONDS 60
199 int sc_clock_sync_period_seconds;
200 struct callout sc_clock_sync_tick;
201
202 struct vmt_event sc_ev_power;
203 struct vmt_event sc_ev_reset;
204 struct vmt_event sc_ev_sleep;
205 bool sc_smpsw_valid;
206
207 char sc_hostname[MAXHOSTNAMELEN];
208};
209
210CFATTACH_DECL_NEW(vmt, sizeof(struct vmt_softc),
211 vmt_match, vmt_attach, vmt_detach, NULL);
212
213static int vmt_sysctl_setup_root(device_t);
214static int vmt_sysctl_setup_clock_sync(device_t, const struct sysctlnode *);
215static int vmt_sysctl_update_clock_sync_period(SYSCTLFN_PROTO);
216
217static void vm_cmd(struct vm_backdoor *);
218static void vm_ins(struct vm_backdoor *);
219static void vm_outs(struct vm_backdoor *);
220
221/* Functions for communicating with the VM Host. */
222static int vm_rpc_open(struct vm_rpc *, uint32_t);
223static int vm_rpc_close(struct vm_rpc *);
224static int vm_rpc_send(const struct vm_rpc *, const uint8_t *, uint32_t);
225static int vm_rpc_send_str(const struct vm_rpc *, const uint8_t *);
226static int vm_rpc_get_length(const struct vm_rpc *, uint32_t *, uint16_t *);
227static int vm_rpc_get_data(const struct vm_rpc *, char *, uint32_t, uint16_t);
228static int vm_rpc_send_rpci_tx_buf(struct vmt_softc *, const uint8_t *, uint32_t);
229static int vm_rpc_send_rpci_tx(struct vmt_softc *, const char *, ...)
230 __printflike(2, 3);
231static int vm_rpci_response_successful(struct vmt_softc *);
232
233static void vmt_tclo_state_change_success(struct vmt_softc *, int, char);
234static void vmt_do_reboot(struct vmt_softc *);
235static void vmt_do_shutdown(struct vmt_softc *);
236
237static void vmt_update_guest_info(struct vmt_softc *);
238static void vmt_update_guest_uptime(struct vmt_softc *);
239static void vmt_sync_guest_clock(struct vmt_softc *);
240
241static void vmt_tick(void *);
242static void vmt_tclo_tick(void *);
243static void vmt_clock_sync_tick(void *);
244static bool vmt_shutdown(device_t, int);
245static void vmt_pswitch_event(void *);
246
247extern char hostname[MAXHOSTNAMELEN];
248
249static bool
250vmt_probe(uint32_t *type)
251{
252 struct vm_backdoor frame;
253
254 memset(&frame, 0, sizeof(frame));
255
256 frame.eax.word = VM_MAGIC;
257 frame.ebx.word = ~VM_MAGIC;
258 frame.ecx.part.low = VM_CMD_GET_VERSION;
259 frame.ecx.part.high = 0xffff;
260 frame.edx.part.low = VM_PORT_CMD;
261 frame.edx.part.high = 0;
262
263 vm_cmd(&frame);
264
265 if (frame.eax.word == 0xffffffff ||
266 frame.ebx.word != VM_MAGIC)
267 return false;
268
269 if (type)
270 *type = frame.ecx.word;
271
272 return true;
273}
274
275static int
276vmt_match(device_t parent, cfdata_t match, void *aux)
277{
278 struct cpufeature_attach_args *cfaa = aux;
279 struct cpu_info *ci = cfaa->ci;
280
281 if (strcmp(cfaa->name, "vm") != 0)
282 return 0;
283 if ((ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) == 0)
284 return 0;
285
286 return vmt_probe(NULL);
287}
288
289static const char *
290vmt_type(void)
291{
292 uint32_t vmwaretype = 0;
293
294 vmt_probe(&vmwaretype);
295
296 switch (vmwaretype) {
297 case 1: return "Express";
298 case 2: return "ESX Server";
299 case 3: return "VMware Server";
300 case 4: return "Workstation";
301 default: return "Unknown";
302 }
303}
304
305static void
306vmt_attach(device_t parent, device_t self, void *aux)
307{
308 int rv;
309 struct vmt_softc *sc = device_private(self);
310
311 aprint_naive("\n");
312 aprint_normal(": %s\n", vmt_type());
313
314 sc->sc_dev = self;
315 sc->sc_log = NULL;
316
317 callout_init(&sc->sc_tick, 0);
318 callout_init(&sc->sc_tclo_tick, 0);
319 callout_init(&sc->sc_clock_sync_tick, 0);
320
321 sc->sc_clock_sync_period_seconds = VMT_CLOCK_SYNC_PERIOD_SECONDS;
322
323 rv = vmt_sysctl_setup_root(self);
324 if (rv != 0) {
325 aprint_error_dev(self, "failed to initialize sysctl "
326 "(err %d)\n", rv);
327 goto free;
328 }
329
330 sc->sc_rpc_buf = kmem_alloc(VMT_RPC_BUFLEN, KM_SLEEP);
331 if (sc->sc_rpc_buf == NULL) {
332 aprint_error_dev(self, "unable to allocate buffer for RPC\n");
333 goto free;
334 }
335
336 if (vm_rpc_open(&sc->sc_tclo_rpc, VM_RPC_OPEN_TCLO) != 0) {
337 aprint_error_dev(self, "failed to open backdoor RPC channel (TCLO protocol)\n");
338 goto free;
339 }
340 sc->sc_tclo_rpc_open = true;
341
342 /* don't know if this is important at all yet */
343 if (vm_rpc_send_rpci_tx(sc, "tools.capability.hgfs_server toolbox 1") != 0) {
344 aprint_error_dev(self, "failed to set HGFS server capability\n");
345 goto free;
346 }
347
348 pmf_device_register1(self, NULL, NULL, vmt_shutdown);
349
350 sysmon_task_queue_init();
351
352 sc->sc_ev_power.ev_smpsw.smpsw_type = PSWITCH_TYPE_POWER;
353 sc->sc_ev_power.ev_smpsw.smpsw_name = device_xname(self);
354 sc->sc_ev_power.ev_code = PSWITCH_EVENT_PRESSED;
355 sysmon_pswitch_register(&sc->sc_ev_power.ev_smpsw);
356 sc->sc_ev_reset.ev_smpsw.smpsw_type = PSWITCH_TYPE_RESET;
357 sc->sc_ev_reset.ev_smpsw.smpsw_name = device_xname(self);
358 sc->sc_ev_reset.ev_code = PSWITCH_EVENT_PRESSED;
359 sysmon_pswitch_register(&sc->sc_ev_reset.ev_smpsw);
360 sc->sc_ev_sleep.ev_smpsw.smpsw_type = PSWITCH_TYPE_SLEEP;
361 sc->sc_ev_sleep.ev_smpsw.smpsw_name = device_xname(self);
362 sc->sc_ev_sleep.ev_code = PSWITCH_EVENT_RELEASED;
363 sysmon_pswitch_register(&sc->sc_ev_sleep.ev_smpsw);
364 sc->sc_smpsw_valid = true;
365
366 callout_setfunc(&sc->sc_tick, vmt_tick, sc);
367 callout_schedule(&sc->sc_tick, hz);
368
369 callout_setfunc(&sc->sc_tclo_tick, vmt_tclo_tick, sc);
370 callout_schedule(&sc->sc_tclo_tick, hz);
371 sc->sc_tclo_ping = 1;
372
373 callout_setfunc(&sc->sc_clock_sync_tick, vmt_clock_sync_tick, sc);
374 callout_schedule(&sc->sc_clock_sync_tick,
375 mstohz(sc->sc_clock_sync_period_seconds * 1000));
376
377 vmt_sync_guest_clock(sc);
378
379 return;
380
381free:
382 if (sc->sc_rpc_buf)
383 kmem_free(sc->sc_rpc_buf, VMT_RPC_BUFLEN);
384 pmf_device_register(self, NULL, NULL);
385 if (sc->sc_log)
386 sysctl_teardown(&sc->sc_log);
387}
388
389static int
390vmt_detach(device_t self, int flags)
391{
392 struct vmt_softc *sc = device_private(self);
393
394 if (sc->sc_tclo_rpc_open)
395 vm_rpc_close(&sc->sc_tclo_rpc);
396
397 if (sc->sc_smpsw_valid) {
398 sysmon_pswitch_unregister(&sc->sc_ev_sleep.ev_smpsw);
399 sysmon_pswitch_unregister(&sc->sc_ev_reset.ev_smpsw);
400 sysmon_pswitch_unregister(&sc->sc_ev_power.ev_smpsw);
401 }
402
403 callout_halt(&sc->sc_tick, NULL);
404 callout_destroy(&sc->sc_tick);
405
406 callout_halt(&sc->sc_tclo_tick, NULL);
407 callout_destroy(&sc->sc_tclo_tick);
408
409 callout_halt(&sc->sc_clock_sync_tick, NULL);
410 callout_destroy(&sc->sc_clock_sync_tick);
411
412 if (sc->sc_rpc_buf)
413 kmem_free(sc->sc_rpc_buf, VMT_RPC_BUFLEN);
414
415 if (sc->sc_log) {
416 sysctl_teardown(&sc->sc_log);
417 sc->sc_log = NULL;
418 }
419
420 return 0;
421}
422
423static int
424vmt_sysctl_setup_root(device_t self)
425{
426 const struct sysctlnode *machdep_node, *vmt_node;
427 struct vmt_softc *sc = device_private(self);
428 int rv;
429
430 rv = sysctl_createv(&sc->sc_log, 0, NULL, &machdep_node,
431 CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL,
432 NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL);
433 if (rv != 0)
434 goto fail;
435
436 rv = sysctl_createv(&sc->sc_log, 0, &machdep_node, &vmt_node,
437 0, CTLTYPE_NODE, device_xname(self), NULL,
438 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
439 if (rv != 0)
440 goto fail;
441
442 rv = vmt_sysctl_setup_clock_sync(self, vmt_node);
443 if (rv != 0)
444 goto fail;
445
446 return 0;
447
448fail:
449 sysctl_teardown(&sc->sc_log);
450 sc->sc_log = NULL;
451
452 return rv;
453}
454
455static int
456vmt_sysctl_setup_clock_sync(device_t self, const struct sysctlnode *root_node)
457{
458 const struct sysctlnode *node, *period_node;
459 struct vmt_softc *sc = device_private(self);
460 int rv;
461
462 rv = sysctl_createv(&sc->sc_log, 0, &root_node, &node,
463 0, CTLTYPE_NODE, "clock_sync", NULL,
464 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
465 if (rv != 0)
466 return rv;
467
468 rv = sysctl_createv(&sc->sc_log, 0, &node, &period_node,
469 CTLFLAG_READWRITE, CTLTYPE_INT, "period",
470 SYSCTL_DESCR("Period, in seconds, at which to update the "
471 "guest's clock"),
472 vmt_sysctl_update_clock_sync_period, 0, (void *)sc, 0,
473 CTL_CREATE, CTL_EOL);
474 return rv;
475}
476
477static int
478vmt_sysctl_update_clock_sync_period(SYSCTLFN_ARGS)
479{
480 int error, period;
481 struct sysctlnode node;
482 struct vmt_softc *sc;
483
484 node = *rnode;
485 sc = (struct vmt_softc *)node.sysctl_data;
486
487 period = sc->sc_clock_sync_period_seconds;
488 node.sysctl_data = &period;
489 error = sysctl_lookup(SYSCTLFN_CALL(&node));
490 if (error || newp == NULL)
491 return error;
492
493 if (sc->sc_clock_sync_period_seconds != period) {
494 callout_halt(&sc->sc_clock_sync_tick, NULL);
495 sc->sc_clock_sync_period_seconds = period;
496 if (sc->sc_clock_sync_period_seconds > 0)
497 callout_schedule(&sc->sc_clock_sync_tick,
498 mstohz(sc->sc_clock_sync_period_seconds * 1000));
499 }
500 return 0;
501}
502
503static void
504vmt_clock_sync_tick(void *xarg)
505{
506 struct vmt_softc *sc = xarg;
507
508 vmt_sync_guest_clock(sc);
509
510 callout_schedule(&sc->sc_clock_sync_tick,
511 mstohz(sc->sc_clock_sync_period_seconds * 1000));
512}
513
514static void
515vmt_update_guest_uptime(struct vmt_softc *sc)
516{
517 /* host wants uptime in hundredths of a second */
518 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %" PRId64 "00",
519 VM_GUEST_INFO_UPTIME, time_uptime) != 0) {
520 device_printf(sc->sc_dev, "unable to set guest uptime\n");
521 sc->sc_rpc_error = 1;
522 }
523}
524
525static void
526vmt_update_guest_info(struct vmt_softc *sc)
527{
528 if (strncmp(sc->sc_hostname, hostname, sizeof(sc->sc_hostname)) != 0) {
529 strlcpy(sc->sc_hostname, hostname, sizeof(sc->sc_hostname));
530
531 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %s",
532 VM_GUEST_INFO_DNS_NAME, sc->sc_hostname) != 0) {
533 device_printf(sc->sc_dev, "unable to set hostname\n");
534 sc->sc_rpc_error = 1;
535 }
536 }
537
538 /*
539 * we're supposed to pass the full network address information back here,
540 * but that involves xdr (sunrpc) data encoding, which seems a bit unreasonable.
541 */
542
543 if (sc->sc_set_guest_os == 0) {
544 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %s %s %s",
545 VM_GUEST_INFO_OS_NAME_FULL, ostype, osrelease, machine_arch) != 0) {
546 device_printf(sc->sc_dev, "unable to set full guest OS\n");
547 sc->sc_rpc_error = 1;
548 }
549
550 /*
551 * host doesn't like it if we send an OS name it doesn't recognise,
552 * so use "other" for i386 and "other-64" for amd64
553 */
554 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %s",
555 VM_GUEST_INFO_OS_NAME, VM_OS_NAME) != 0) {
556 device_printf(sc->sc_dev, "unable to set guest OS\n");
557 sc->sc_rpc_error = 1;
558 }
559
560 sc->sc_set_guest_os = 1;
561 }
562}
563
564static void
565vmt_sync_guest_clock(struct vmt_softc *sc)
566{
567 struct vm_backdoor frame;
568 struct timespec ts;
569
570 memset(&frame, 0, sizeof(frame));
571 frame.eax.word = VM_MAGIC;
572 frame.ecx.part.low = VM_CMD_GET_TIME_FULL;
573 frame.edx.part.low = VM_PORT_CMD;
574 vm_cmd(&frame);
575
576 if (frame.eax.word != 0xffffffff) {
577 ts.tv_sec = ((uint64_t)frame.esi.word << 32) | frame.edx.word;
578 ts.tv_nsec = frame.ebx.word * 1000;
579 tc_setclock(&ts);
580 }
581}
582
583static void
584vmt_tick(void *xarg)
585{
586 struct vmt_softc *sc = xarg;
587
588 vmt_update_guest_info(sc);
589 vmt_update_guest_uptime(sc);
590
591 callout_schedule(&sc->sc_tick, hz * 15);
592}
593
594static void
595vmt_tclo_state_change_success(struct vmt_softc *sc, int success, char state)
596{
597 if (vm_rpc_send_rpci_tx(sc, "tools.os.statechange.status %d %d",
598 success, state) != 0) {
599 device_printf(sc->sc_dev, "unable to send state change result\n");
600 sc->sc_rpc_error = 1;
601 }
602}
603
604static void
605vmt_do_shutdown(struct vmt_softc *sc)
606{
607 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_HALT);
608 vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK);
609
610 device_printf(sc->sc_dev, "host requested shutdown\n");
611 sysmon_task_queue_sched(0, vmt_pswitch_event, &sc->sc_ev_power);
612}
613
614static void
615vmt_do_reboot(struct vmt_softc *sc)
616{
617 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_REBOOT);
618 vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK);
619
620 device_printf(sc->sc_dev, "host requested reboot\n");
621 sysmon_task_queue_sched(0, vmt_pswitch_event, &sc->sc_ev_reset);
622}
623
624static void
625vmt_do_resume(struct vmt_softc *sc)
626{
627 device_printf(sc->sc_dev, "guest resuming from suspended state\n");
628
629 vmt_sync_guest_clock(sc);
630
631 /* force guest info update */
632 sc->sc_hostname[0] = '\0';
633 sc->sc_set_guest_os = 0;
634 vmt_update_guest_info(sc);
635
636 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_RESUME);
637 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
638 device_printf(sc->sc_dev, "error sending resume response\n");
639 sc->sc_rpc_error = 1;
640 }
641
642 sysmon_task_queue_sched(0, vmt_pswitch_event, &sc->sc_ev_sleep);
643}
644
645static bool
646vmt_shutdown(device_t self, int flags)
647{
648 struct vmt_softc *sc = device_private(self);
649
650 if (vm_rpc_send_rpci_tx(sc, "tools.capability.hgfs_server toolbox 0") != 0) {
651 device_printf(sc->sc_dev, "failed to disable hgfs server capability\n");
652 }
653
654 if (vm_rpc_send(&sc->sc_tclo_rpc, NULL, 0) != 0) {
655 device_printf(sc->sc_dev, "failed to send shutdown ping\n");
656 }
657
658 vm_rpc_close(&sc->sc_tclo_rpc);
659
660 return true;
661}
662
663static void
664vmt_pswitch_event(void *xarg)
665{
666 struct vmt_event *ev = xarg;
667
668 sysmon_pswitch_event(&ev->ev_smpsw, ev->ev_code);
669}
670
671static void
672vmt_tclo_tick(void *xarg)
673{
674 struct vmt_softc *sc = xarg;
675 u_int32_t rlen;
676 u_int16_t ack;
677
678 /* reopen tclo channel if it's currently closed */
679 if (sc->sc_tclo_rpc.channel == 0 &&
680 sc->sc_tclo_rpc.cookie1 == 0 &&
681 sc->sc_tclo_rpc.cookie2 == 0) {
682 if (vm_rpc_open(&sc->sc_tclo_rpc, VM_RPC_OPEN_TCLO) != 0) {
683 device_printf(sc->sc_dev, "unable to reopen TCLO channel\n");
684 callout_schedule(&sc->sc_tclo_tick, hz * 15);
685 return;
686 }
687
688 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_RESET_REPLY) != 0) {
689 device_printf(sc->sc_dev, "failed to send reset reply\n");
690 sc->sc_rpc_error = 1;
691 goto out;
692 } else {
693 sc->sc_rpc_error = 0;
694 }
695 }
696
697 if (sc->sc_tclo_ping) {
698 if (vm_rpc_send(&sc->sc_tclo_rpc, NULL, 0) != 0) {
699 device_printf(sc->sc_dev, "failed to send TCLO outgoing ping\n");
700 sc->sc_rpc_error = 1;
701 goto out;
702 }
703 }
704
705 if (vm_rpc_get_length(&sc->sc_tclo_rpc, &rlen, &ack) != 0) {
706 device_printf(sc->sc_dev, "failed to get length of incoming TCLO data\n");
707 sc->sc_rpc_error = 1;
708 goto out;
709 }
710
711 if (rlen == 0) {
712 sc->sc_tclo_ping = 1;
713 goto out;
714 }
715
716 if (rlen >= VMT_RPC_BUFLEN) {
717 rlen = VMT_RPC_BUFLEN - 1;
718 }
719 if (vm_rpc_get_data(&sc->sc_tclo_rpc, sc->sc_rpc_buf, rlen, ack) != 0) {
720 device_printf(sc->sc_dev, "failed to get incoming TCLO data\n");
721 sc->sc_rpc_error = 1;
722 goto out;
723 }
724 sc->sc_tclo_ping = 0;
725
726#ifdef VMT_DEBUG
727 printf("vmware: received message '%s'\n", sc->sc_rpc_buf);
728#endif
729
730 if (strcmp(sc->sc_rpc_buf, "reset") == 0) {
731
732 if (sc->sc_rpc_error != 0) {
733 device_printf(sc->sc_dev, "resetting rpc\n");
734 vm_rpc_close(&sc->sc_tclo_rpc);
735 /* reopen and send the reset reply next time around */
736 goto out;
737 }
738
739 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_RESET_REPLY) != 0) {
740 device_printf(sc->sc_dev, "failed to send reset reply\n");
741 sc->sc_rpc_error = 1;
742 }
743
744 } else if (strcmp(sc->sc_rpc_buf, "ping") == 0) {
745
746 vmt_update_guest_info(sc);
747 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
748 device_printf(sc->sc_dev, "error sending ping response\n");
749 sc->sc_rpc_error = 1;
750 }
751
752 } else if (strcmp(sc->sc_rpc_buf, "OS_Halt") == 0) {
753 vmt_do_shutdown(sc);
754 } else if (strcmp(sc->sc_rpc_buf, "OS_Reboot") == 0) {
755 vmt_do_reboot(sc);
756 } else if (strcmp(sc->sc_rpc_buf, "OS_PowerOn") == 0) {
757 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_POWERON);
758 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
759 device_printf(sc->sc_dev, "error sending poweron response\n");
760 sc->sc_rpc_error = 1;
761 }
762 } else if (strcmp(sc->sc_rpc_buf, "OS_Suspend") == 0) {
763 log(LOG_KERN | LOG_NOTICE, "VMware guest entering suspended state\n");
764
765 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_SUSPEND);
766 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
767 device_printf(sc->sc_dev, "error sending suspend response\n");
768 sc->sc_rpc_error = 1;
769 }
770 } else if (strcmp(sc->sc_rpc_buf, "OS_Resume") == 0) {
771 vmt_do_resume(sc);
772 } else if (strcmp(sc->sc_rpc_buf, "Capabilities_Register") == 0) {
773
774 /* don't know if this is important at all */
775 if (vm_rpc_send_rpci_tx(sc, "vmx.capability.unified_loop toolbox") != 0) {
776 device_printf(sc->sc_dev, "unable to set unified loop\n");
777 sc->sc_rpc_error = 1;
778 }
779 if (vm_rpci_response_successful(sc) == 0) {
780 device_printf(sc->sc_dev, "host rejected unified loop setting\n");
781 }
782
783 /* the trailing space is apparently important here */
784 if (vm_rpc_send_rpci_tx(sc, "tools.capability.statechange ") != 0) {
785 device_printf(sc->sc_dev, "unable to send statechange capability\n");
786 sc->sc_rpc_error = 1;
787 }
788 if (vm_rpci_response_successful(sc) == 0) {
789 device_printf(sc->sc_dev, "host rejected statechange capability\n");
790 }
791
792 if (vm_rpc_send_rpci_tx(sc, "tools.set.version %u", VM_VERSION_UNMANAGED) != 0) {
793 device_printf(sc->sc_dev, "unable to set tools version\n");
794 sc->sc_rpc_error = 1;
795 }
796
797 vmt_update_guest_uptime(sc);
798
799 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
800 device_printf(sc->sc_dev, "error sending capabilities_register response\n");
801 sc->sc_rpc_error = 1;
802 }
803 } else if (strcmp(sc->sc_rpc_buf, "Set_Option broadcastIP 1") == 0) {
804 struct ifaddr *iface_addr = NULL;
805 struct ifnet *iface;
806 struct sockaddr_in *guest_ip;
807 int s;
808 struct psref psref;
809
810 /* find first available ipv4 address */
811 guest_ip = NULL;
812 s = pserialize_read_enter();
813 IFNET_READER_FOREACH(iface) {
814
815 /* skip loopback */
816 if (strncmp(iface->if_xname, "lo", 2) == 0 &&
817 iface->if_xname[2] >= '0' && iface->if_xname[2] <= '9') {
818 continue;
819 }
820
821 IFADDR_READER_FOREACH(iface_addr, iface) {
822 if (iface_addr->ifa_addr->sa_family != AF_INET) {
823 continue;
824 }
825
826 guest_ip = satosin(iface_addr->ifa_addr);
827 ifa_acquire(iface_addr, &psref);
828 goto got;
829 }
830 }
831 got:
832 pserialize_read_exit(s);
833
834 if (guest_ip != NULL) {
835 if (vm_rpc_send_rpci_tx(sc, "info-set guestinfo.ip %s",
836 inet_ntoa(guest_ip->sin_addr)) != 0) {
837 device_printf(sc->sc_dev, "unable to send guest IP address\n");
838 sc->sc_rpc_error = 1;
839 }
840 ifa_release(iface_addr, &psref);
841
842 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
843 device_printf(sc->sc_dev, "error sending broadcastIP response\n");
844 sc->sc_rpc_error = 1;
845 }
846 } else {
847 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_ERROR_IP_ADDR) != 0) {
848 device_printf(sc->sc_dev,
849 "error sending broadcastIP error response\n");
850 sc->sc_rpc_error = 1;
851 }
852 }
853 } else {
854 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_ERROR) != 0) {
855 device_printf(sc->sc_dev, "error sending unknown command reply\n");
856 sc->sc_rpc_error = 1;
857 }
858 }
859
860out:
861 callout_schedule(&sc->sc_tclo_tick, sc->sc_tclo_ping ? hz : 1);
862}
863
864#define BACKDOOR_OP_I386(op, frame) \
865 __asm__ __volatile__ ( \
866 "pushal;" \
867 "pushl %%eax;" \
868 "movl 0x18(%%eax), %%ebp;" \
869 "movl 0x14(%%eax), %%edi;" \
870 "movl 0x10(%%eax), %%esi;" \
871 "movl 0x0c(%%eax), %%edx;" \
872 "movl 0x08(%%eax), %%ecx;" \
873 "movl 0x04(%%eax), %%ebx;" \
874 "movl 0x00(%%eax), %%eax;" \
875 op \
876 "xchgl %%eax, 0x00(%%esp);" \
877 "movl %%ebp, 0x18(%%eax);" \
878 "movl %%edi, 0x14(%%eax);" \
879 "movl %%esi, 0x10(%%eax);" \
880 "movl %%edx, 0x0c(%%eax);" \
881 "movl %%ecx, 0x08(%%eax);" \
882 "movl %%ebx, 0x04(%%eax);" \
883 "popl 0x00(%%eax);" \
884 "popal;" \
885 : \
886 :"a"(frame) \
887 )
888
889#define BACKDOOR_OP_AMD64(op, frame) \
890 __asm__ __volatile__ ( \
891 "pushq %%rbp; \n\t" \
892 "pushq %%rax; \n\t" \
893 "movq 0x30(%%rax), %%rbp; \n\t" \
894 "movq 0x28(%%rax), %%rdi; \n\t" \
895 "movq 0x20(%%rax), %%rsi; \n\t" \
896 "movq 0x18(%%rax), %%rdx; \n\t" \
897 "movq 0x10(%%rax), %%rcx; \n\t" \
898 "movq 0x08(%%rax), %%rbx; \n\t" \
899 "movq 0x00(%%rax), %%rax; \n\t" \
900 op "\n\t" \
901 "xchgq %%rax, 0x00(%%rsp); \n\t" \
902 "movq %%rbp, 0x30(%%rax); \n\t" \
903 "movq %%rdi, 0x28(%%rax); \n\t" \
904 "movq %%rsi, 0x20(%%rax); \n\t" \
905 "movq %%rdx, 0x18(%%rax); \n\t" \
906 "movq %%rcx, 0x10(%%rax); \n\t" \
907 "movq %%rbx, 0x08(%%rax); \n\t" \
908 "popq 0x00(%%rax); \n\t" \
909 "popq %%rbp; \n\t" \
910 : /* No outputs. */ : "a" (frame) \
911 /* No pushal on amd64 so warn gcc about the clobbered registers. */ \
912 : "rbx", "rcx", "rdx", "rdi", "rsi", "cc", "memory" \
913 )
914
915
916#ifdef __i386__
917#define BACKDOOR_OP(op, frame) BACKDOOR_OP_I386(op, frame)
918#else
919#define BACKDOOR_OP(op, frame) BACKDOOR_OP_AMD64(op, frame)
920#endif
921
922static void
923vm_cmd(struct vm_backdoor *frame)
924{
925 BACKDOOR_OP("inl %%dx, %%eax;", frame);
926}
927
928static void
929vm_ins(struct vm_backdoor *frame)
930{
931 BACKDOOR_OP("cld;\n\trep insb;", frame);
932}
933
934static void
935vm_outs(struct vm_backdoor *frame)
936{
937 BACKDOOR_OP("cld;\n\trep outsb;", frame);
938}
939
940static int
941vm_rpc_open(struct vm_rpc *rpc, uint32_t proto)
942{
943 struct vm_backdoor frame;
944
945 memset(&frame, 0, sizeof(frame));
946 frame.eax.word = VM_MAGIC;
947 frame.ebx.word = proto | VM_RPC_FLAG_COOKIE;
948 frame.ecx.part.low = VM_CMD_RPC;
949 frame.ecx.part.high = VM_RPC_OPEN;
950 frame.edx.part.low = VM_PORT_CMD;
951 frame.edx.part.high = 0;
952
953 vm_cmd(&frame);
954
955 if (frame.ecx.part.high != 1 || frame.edx.part.low != 0) {
956 /* open-vm-tools retries without VM_RPC_FLAG_COOKIE here.. */
957 printf("vmware: open failed, eax=%08x, ecx=%08x, edx=%08x\n",
958 frame.eax.word, frame.ecx.word, frame.edx.word);
959 return EIO;
960 }
961
962 rpc->channel = frame.edx.part.high;
963 rpc->cookie1 = frame.esi.word;
964 rpc->cookie2 = frame.edi.word;
965
966 return 0;
967}
968
969static int
970vm_rpc_close(struct vm_rpc *rpc)
971{
972 struct vm_backdoor frame;
973
974 memset(&frame, 0, sizeof(frame));
975 frame.eax.word = VM_MAGIC;
976 frame.ebx.word = 0;
977 frame.ecx.part.low = VM_CMD_RPC;
978 frame.ecx.part.high = VM_RPC_CLOSE;
979 frame.edx.part.low = VM_PORT_CMD;
980 frame.edx.part.high = rpc->channel;
981 frame.edi.word = rpc->cookie2;
982 frame.esi.word = rpc->cookie1;
983
984 vm_cmd(&frame);
985
986 if (frame.ecx.part.high == 0 || frame.ecx.part.low != 0) {
987 printf("vmware: close failed, eax=%08x, ecx=%08x\n",
988 frame.eax.word, frame.ecx.word);
989 return EIO;
990 }
991
992 rpc->channel = 0;
993 rpc->cookie1 = 0;
994 rpc->cookie2 = 0;
995
996 return 0;
997}
998
999static int
1000vm_rpc_send(const struct vm_rpc *rpc, const uint8_t *buf, uint32_t length)
1001{
1002 struct vm_backdoor frame;
1003
1004 /* Send the length of the command. */
1005 memset(&frame, 0, sizeof(frame));
1006 frame.eax.word = VM_MAGIC;
1007 frame.ebx.word = length;
1008 frame.ecx.part.low = VM_CMD_RPC;
1009 frame.ecx.part.high = VM_RPC_SET_LENGTH;
1010 frame.edx.part.low = VM_PORT_CMD;
1011 frame.edx.part.high = rpc->channel;
1012 frame.esi.word = rpc->cookie1;
1013 frame.edi.word = rpc->cookie2;
1014
1015 vm_cmd(&frame);
1016
1017 if ((frame.ecx.part.high & VM_RPC_REPLY_SUCCESS) == 0) {
1018 printf("vmware: sending length failed, eax=%08x, ecx=%08x\n",
1019 frame.eax.word, frame.ecx.word);
1020 return EIO;
1021 }
1022
1023 if (length == 0)
1024 return 0; /* Only need to poke once if command is null. */
1025
1026 /* Send the command using enhanced RPC. */
1027 memset(&frame, 0, sizeof(frame));
1028 frame.eax.word = VM_MAGIC;
1029 frame.ebx.word = VM_RPC_ENH_DATA;
1030 frame.ecx.word = length;
1031 frame.edx.part.low = VM_PORT_RPC;
1032 frame.edx.part.high = rpc->channel;
1033 frame.ebp.word = rpc->cookie1;
1034 frame.edi.word = rpc->cookie2;
1035#ifdef __amd64__
1036 frame.esi.quad = (uint64_t)buf;
1037#else
1038 frame.esi.word = (uint32_t)buf;
1039#endif
1040
1041 vm_outs(&frame);
1042
1043 if (frame.ebx.word != VM_RPC_ENH_DATA) {
1044 /* open-vm-tools retries on VM_RPC_REPLY_CHECKPOINT */
1045 printf("vmware: send failed, ebx=%08x\n", frame.ebx.word);
1046 return EIO;
1047 }
1048
1049 return 0;
1050}
1051
1052static int
1053vm_rpc_send_str(const struct vm_rpc *rpc, const uint8_t *str)
1054{
1055 return vm_rpc_send(rpc, str, strlen(str));
1056}
1057
1058static int
1059vm_rpc_get_data(const struct vm_rpc *rpc, char *data, uint32_t length,
1060 uint16_t dataid)
1061{
1062 struct vm_backdoor frame;
1063
1064 /* Get data using enhanced RPC. */
1065 memset(&frame, 0, sizeof(frame));
1066 frame.eax.word = VM_MAGIC;
1067 frame.ebx.word = VM_RPC_ENH_DATA;
1068 frame.ecx.word = length;
1069 frame.edx.part.low = VM_PORT_RPC;
1070 frame.edx.part.high = rpc->channel;
1071 frame.esi.word = rpc->cookie1;
1072#ifdef __amd64__
1073 frame.edi.quad = (uint64_t)data;
1074#else
1075 frame.edi.word = (uint32_t)data;
1076#endif
1077 frame.ebp.word = rpc->cookie2;
1078
1079 vm_ins(&frame);
1080
1081 /* NUL-terminate the data */
1082 data[length] = '\0';
1083
1084 if (frame.ebx.word != VM_RPC_ENH_DATA) {
1085 printf("vmware: get data failed, ebx=%08x\n",
1086 frame.ebx.word);
1087 return EIO;
1088 }
1089
1090 /* Acknowledge data received. */
1091 memset(&frame, 0, sizeof(frame));
1092 frame.eax.word = VM_MAGIC;
1093 frame.ebx.word = dataid;
1094 frame.ecx.part.low = VM_CMD_RPC;
1095 frame.ecx.part.high = VM_RPC_GET_END;
1096 frame.edx.part.low = VM_PORT_CMD;
1097 frame.edx.part.high = rpc->channel;
1098 frame.esi.word = rpc->cookie1;
1099 frame.edi.word = rpc->cookie2;
1100
1101 vm_cmd(&frame);
1102
1103 if (frame.ecx.part.high == 0) {
1104 printf("vmware: ack data failed, eax=%08x, ecx=%08x\n",
1105 frame.eax.word, frame.ecx.word);
1106 return EIO;
1107 }
1108
1109 return 0;
1110}
1111
1112static int
1113vm_rpc_get_length(const struct vm_rpc *rpc, uint32_t *length, uint16_t *dataid)
1114{
1115 struct vm_backdoor frame;
1116
1117 memset(&frame, 0, sizeof(frame));
1118 frame.eax.word = VM_MAGIC;
1119 frame.ebx.word = 0;
1120 frame.ecx.part.low = VM_CMD_RPC;
1121 frame.ecx.part.high = VM_RPC_GET_LENGTH;
1122 frame.edx.part.low = VM_PORT_CMD;
1123 frame.edx.part.high = rpc->channel;
1124 frame.esi.word = rpc->cookie1;
1125 frame.edi.word = rpc->cookie2;
1126
1127 vm_cmd(&frame);
1128
1129 if ((frame.ecx.part.high & VM_RPC_REPLY_SUCCESS) == 0) {
1130 printf("vmware: get length failed, eax=%08x, ecx=%08x\n",
1131 frame.eax.word, frame.ecx.word);
1132 return EIO;
1133 }
1134 if ((frame.ecx.part.high & VM_RPC_REPLY_DORECV) == 0) {
1135 *length = 0;
1136 *dataid = 0;
1137 } else {
1138 *length = frame.ebx.word;
1139 *dataid = frame.edx.part.high;
1140 }
1141
1142 return 0;
1143}
1144
1145static int
1146vm_rpci_response_successful(struct vmt_softc *sc)
1147{
1148 return (sc->sc_rpc_buf[0] == '1' && sc->sc_rpc_buf[1] == ' ');
1149}
1150
1151static int
1152vm_rpc_send_rpci_tx_buf(struct vmt_softc *sc, const uint8_t *buf, uint32_t length)
1153{
1154 struct vm_rpc rpci;
1155 u_int32_t rlen;
1156 u_int16_t ack;
1157 int result = 0;
1158
1159 if (vm_rpc_open(&rpci, VM_RPC_OPEN_RPCI) != 0) {
1160 device_printf(sc->sc_dev, "rpci channel open failed\n");
1161 return EIO;
1162 }
1163
1164 if (vm_rpc_send(&rpci, sc->sc_rpc_buf, length) != 0) {
1165 device_printf(sc->sc_dev, "unable to send rpci command\n");
1166 result = EIO;
1167 goto out;
1168 }
1169
1170 if (vm_rpc_get_length(&rpci, &rlen, &ack) != 0) {
1171 device_printf(sc->sc_dev, "failed to get length of rpci response data\n");
1172 result = EIO;
1173 goto out;
1174 }
1175
1176 if (rlen > 0) {
1177 if (rlen >= VMT_RPC_BUFLEN) {
1178 rlen = VMT_RPC_BUFLEN - 1;
1179 }
1180
1181 if (vm_rpc_get_data(&rpci, sc->sc_rpc_buf, rlen, ack) != 0) {
1182 device_printf(sc->sc_dev, "failed to get rpci response data\n");
1183 result = EIO;
1184 goto out;
1185 }
1186 }
1187
1188out:
1189 if (vm_rpc_close(&rpci) != 0) {
1190 device_printf(sc->sc_dev, "unable to close rpci channel\n");
1191 }
1192
1193 return result;
1194}
1195
1196static int
1197vm_rpc_send_rpci_tx(struct vmt_softc *sc, const char *fmt, ...)
1198{
1199 va_list args;
1200 int len;
1201
1202 va_start(args, fmt);
1203 len = vsnprintf(sc->sc_rpc_buf, VMT_RPC_BUFLEN, fmt, args);
1204 va_end(args);
1205
1206 if (len >= VMT_RPC_BUFLEN) {
1207 device_printf(sc->sc_dev, "rpci command didn't fit in buffer\n");
1208 return EIO;
1209 }
1210
1211 return vm_rpc_send_rpci_tx_buf(sc, sc->sc_rpc_buf, len);
1212}
1213
1214#if 0
1215 struct vm_backdoor frame;
1216
1217 memset(&frame, 0, sizeof(frame));
1218
1219 frame.eax.word = VM_MAGIC;
1220 frame.ecx.part.low = VM_CMD_GET_VERSION;
1221 frame.edx.part.low = VM_PORT_CMD;
1222
1223 printf("\n");
1224 printf("eax 0x%08x\n", frame.eax.word);
1225 printf("ebx 0x%08x\n", frame.ebx.word);
1226 printf("ecx 0x%08x\n", frame.ecx.word);
1227 printf("edx 0x%08x\n", frame.edx.word);
1228 printf("ebp 0x%08x\n", frame.ebp.word);
1229 printf("edi 0x%08x\n", frame.edi.word);
1230 printf("esi 0x%08x\n", frame.esi.word);
1231
1232 vm_cmd(&frame);
1233
1234 printf("-\n");
1235 printf("eax 0x%08x\n", frame.eax.word);
1236 printf("ebx 0x%08x\n", frame.ebx.word);
1237 printf("ecx 0x%08x\n", frame.ecx.word);
1238 printf("edx 0x%08x\n", frame.edx.word);
1239 printf("ebp 0x%08x\n", frame.ebp.word);
1240 printf("edi 0x%08x\n", frame.edi.word);
1241 printf("esi 0x%08x\n", frame.esi.word);
1242#endif
1243
1244/*
1245 * Notes on tracing backdoor activity in vmware-guestd:
1246 *
1247 * - Find the addresses of the inl / rep insb / rep outsb
1248 * instructions used to perform backdoor operations.
1249 * One way to do this is to disassemble vmware-guestd:
1250 *
1251 * $ objdump -S /emul/freebsd/sbin/vmware-guestd > vmware-guestd.S
1252 *
1253 * and search for '<tab>in ' in the resulting file. The rep insb and
1254 * rep outsb code is directly below that.
1255 *
1256 * - Run vmware-guestd under gdb, setting up breakpoints as follows:
1257 * (the addresses shown here are the ones from VMware-server-1.0.10-203137,
1258 * the last version that actually works in FreeBSD emulation on OpenBSD)
1259 *
1260 * break *0x805497b (address of 'in' instruction)
1261 * commands 1
1262 * silent
1263 * echo INOUT\n
1264 * print/x $ecx
1265 * print/x $ebx
1266 * print/x $edx
1267 * continue
1268 * end
1269 * break *0x805497c (address of instruction after 'in')
1270 * commands 2
1271 * silent
1272 * echo ===\n
1273 * print/x $ecx
1274 * print/x $ebx
1275 * print/x $edx
1276 * echo \n
1277 * continue
1278 * end
1279 * break *0x80549b7 (address of instruction before 'rep insb')
1280 * commands 3
1281 * silent
1282 * set variable $inaddr = $edi
1283 * set variable $incount = $ecx
1284 * continue
1285 * end
1286 * break *0x80549ba (address of instruction after 'rep insb')
1287 * commands 4
1288 * silent
1289 * echo IN\n
1290 * print $incount
1291 * x/s $inaddr
1292 * echo \n
1293 * continue
1294 * end
1295 * break *0x80549fb (address of instruction before 'rep outsb')
1296 * commands 5
1297 * silent
1298 * echo OUT\n
1299 * print $ecx
1300 * x/s $esi
1301 * echo \n
1302 * continue
1303 * end
1304 *
1305 * This will produce a log of the backdoor operations, including the
1306 * data sent and received and the relevant register values. You can then
1307 * match the register values to the various constants in this file.
1308 */
1309
1310MODULE(MODULE_CLASS_DRIVER, vmt, "sysmon_power,sysmon_taskq");
1311
1312#ifdef _MODULE
1313#include "ioconf.c"
1314#endif
1315
1316static int
1317vmt_modcmd(modcmd_t cmd, void *aux)
1318{
1319 int error = 0;
1320
1321 switch (cmd) {
1322 case MODULE_CMD_INIT:
1323#ifdef _MODULE
1324 error = config_init_component(cfdriver_ioconf_vmt,
1325 cfattach_ioconf_vmt, cfdata_ioconf_vmt);
1326#endif
1327 break;
1328 case MODULE_CMD_FINI:
1329#ifdef _MODULE
1330 error = config_fini_component(cfdriver_ioconf_vmt,
1331 cfattach_ioconf_vmt, cfdata_ioconf_vmt);
1332#endif
1333 break;
1334 case MODULE_CMD_AUTOUNLOAD:
1335 error = EBUSY;
1336 break;
1337 default:
1338 error = ENOTTY;
1339 break;
1340 }
1341
1342 return error;
1343}
1344