#include "captive/mm.h" /* for captive_mmap_map_get() */
#include <sys/mman.h>
#include "reactos/internal/mm.h" /* for PAGE_SIZE */
+#include "captive/ldr.h" /* for captive_ModuleList_patchpoint_find() */
+#include "captive/ldr_exports.h" /* for struct captive_ModuleList_patchpoint */
-static greg_t val_exceptionstack_top;
+int _abnormal_termination_orig(void);
+extern greg_t fs_KPCR_ExceptionList;
+
+/**
+ * _abnormal_termination_wrap:
+ *
+ * This call can be also accessed as AbnormalTermination() or _abnormal_termination().
+ * It is a captive wrapper around _abnormal_termination() function.
+ *
+ * Returns whether some exception occured in the current #try block we are currently
+ * #finish -ing. Any functions called from current #finish block will be considered
+ * for returning zero back again. It is forbidden to call this function outside
+ * of #finish block, result of such call is undefined.
+ *
+ * Exception handlers are registered from W32 binary in stack frames stored in "fs:[0x00000000]"
+ * value which gets mapped by libcaptive/ps/signal.c to #fs_KPCR_ExceptionList
+ * variable.
+ *
+ * If no exception handler was registered yet this function returns zero.
+ *
+ * See also RtlpDispatchException().
+ *
+ * Returns: non-zero if some exception is now being handled as pending.
+ */
+int _abnormal_termination_wrap(void)
+{
+ /* No handler registered yet? ntoskrnl _abnormal_termination() does not handle it
+ * and I do not want to bother with registering toplevel handler.
+ */
+ if (fs_KPCR_ExceptionList==(greg_t)-1)
+ return 0;
+
+ return _abnormal_termination_orig();
+}
+
+
+#if 0
+
+/**
+ * RtlpDispatchException:
+ * @ExceptionRecord: Ignored by libcaptive.
+ * @Context: Ignored by libcaptive.
+ *
+ * Function definition to prevent inclusion of real RtlpDispatchException() implementation.
+ * Currently libcaptive never raises any exception - fix _abnormal_termination() if it changes.
+ *
+ * Returns: Never returns. Value %0 if it returns although it is impossible.
+ */
+ULONG RtlpDispatchException(IN PEXCEPTION_RECORD ExceptionRecord,IN PCONTEXT Context)
+{
+ g_assert_not_reached();
+ g_return_val_if_reached(0);
+}
+
+#endif
+
+
+/* =='KeGetCurrentKPCR()->ExceptionList';
+ * libcaptive has reduced KPCR (named 'captive_KPCR') which
+ * does not contain this field
+ */
+greg_t fs_KPCR_ExceptionList=(greg_t)-1;
+
+/* FIXME */
+static greg_t fs_KPCR_Unknown638=0;
+
static gboolean instr_mov_greg_to_fsmem(int greg,const void *fsmem,struct ucontext *ucontext)
{
- if (fsmem==0x00000000) { /* exception stack top pointer */
+ if (fsmem==(const void *)0x00000000) { /* exception stack top pointer */
/* moving from %esp is required to pass! */
- val_exceptionstack_top=ucontext->uc_mcontext.gregs[greg];
+ fs_KPCR_ExceptionList=ucontext->uc_mcontext.gregs[greg];
+ return TRUE;
+ }
+ g_return_val_if_reached(FALSE);
+}
+
+static gboolean instr_mov_immed_to_fsmem(greg_t immed,const void *fsmem,struct ucontext *ucontext)
+{
+ if (fsmem==(const void *)0x00000000) { /* exception stack top pointer */
+ fs_KPCR_ExceptionList=immed;
return TRUE;
}
g_return_val_if_reached(FALSE);
static gboolean instr_mov_fsmem_to_greg(const void *fsmem,int greg,struct ucontext *ucontext)
{
- if (fsmem==0x00000000) { /* exception stack top pointer */
+ if (fsmem==(const void *)0x00000000) { /* exception stack top pointer */
/* moving to %esp is required to pass! */
- ucontext->uc_mcontext.gregs[greg]=val_exceptionstack_top;
+ ucontext->uc_mcontext.gregs[greg]=fs_KPCR_ExceptionList;
+ return TRUE;
+ }
+ if (fsmem==(const void *)0x00000051) { /* =='KeGetCurrentKPCR()->Number' */
+ g_return_val_if_fail(greg!=REG_ESP,FALSE);
+ ucontext->uc_mcontext.gregs[greg]=(greg_t)0; /* ==libcaptive version of KeGetCurrentProcessorNumber() */
+ return TRUE;
+ }
+ if (fsmem==(const void *)0x00000124) { /* =='KeGetCurrentKPCR()->CurrentThread' */
+ g_return_val_if_fail(greg!=REG_ESP,FALSE);
+ ucontext->uc_mcontext.gregs[greg]=(greg_t)captive_KeGetCurrentKPCR()->CurrentThread;
+ return TRUE;
+ }
+ g_return_val_if_reached(FALSE);
+}
+
+static gboolean instr_push_fsmem(const void *fsmem,struct ucontext *ucontext)
+{
+ if (fsmem==(const void *)0x00000000) { /* exception stack top pointer */
+ ucontext->uc_mcontext.gregs[REG_ESP]-=4;
+ *(greg_t *)ucontext->uc_mcontext.gregs[REG_ESP]=fs_KPCR_ExceptionList;
+ return TRUE;
+ }
+ g_return_val_if_reached(FALSE);
+}
+
+static gboolean instr_pop_fsmem(const void *fsmem,struct ucontext *ucontext)
+{
+ if (fsmem==(const void *)0x00000000) { /* exception stack top pointer */
+ fs_KPCR_ExceptionList=*(greg_t *)ucontext->uc_mcontext.gregs[REG_ESP];
+ ucontext->uc_mcontext.gregs[REG_ESP]+=4;
+ return TRUE;
+ }
+ g_return_val_if_reached(FALSE);
+}
+
+static gboolean instr_incl_fsmem(const void *fsmem,struct ucontext *ucontext)
+{
+ if (fsmem==(const void *)0x00000638) { /* Unknown638 */
+ fs_KPCR_Unknown638++;
return TRUE;
}
g_return_val_if_reached(FALSE);
static void sigaction_SIGSEGV(int signo,siginfo_t *siginfo,struct ucontext *ucontext)
{
-const guint8 *reg_eip;
+guint8 *reg_eip;
const void *reg_eip_aligned;
+static const void *reg_eip_aligned_last_valid=NULL; /* performance cache */
g_return_if_fail(signo==SIGSEGV);
g_return_if_fail(siginfo->si_signo==SIGSEGV);
*/
reg_eip_aligned=(const void *)(((char *)reg_eip)-(GPOINTER_TO_UINT(reg_eip)&(PAGE_SIZE-1)));
g_assert(reg_eip_aligned!=NULL);
- g_return_if_fail(!(captive_mmap_map_get(reg_eip_aligned)&PROT_EXEC));
+
+ /* We do not expect any pages can get un-PROT_EXEC-ed
+ * and therefore we never invalidate our cache 'reg_eip_aligned_last_valid'.
+ */
+ if (reg_eip_aligned_last_valid!=reg_eip_aligned) {
+ g_return_if_fail(!(captive_mmap_map_get(reg_eip_aligned)&PROT_EXEC));
+ reg_eip_aligned_last_valid=reg_eip_aligned;
+ }
/* all instruction notation comments are written in AT&T 'instr src,dest' syntax! */
if (*reg_eip==0x64) { /* prefix '%fs:' */
reg_eip++;
+ /* TODO:thread; %fs: is CPU-dependent */
+ if (*reg_eip==0x0F) { /* two-byte opcode */
+ reg_eip++;
+ if (*reg_eip==0xB6) { /* ??? */
+ reg_eip++;
+ if (*reg_eip==0x05) { /* movzbl %fs:{reg_eip[1..4]},%eax */
+ reg_eip++;
+ if (instr_mov_fsmem_to_greg(*(const void **)reg_eip,REG_EAX,ucontext)) {
+ reg_eip+=4;
+ goto ok;
+ }
+ g_assert_not_reached();
+ }
+ g_assert_not_reached();
+ }
+ g_assert_not_reached();
+ }
if (*reg_eip==0xA3) { /* 'mov %eax,%fs:{reg_eip[1..4]}' */
reg_eip++;
if (instr_mov_greg_to_fsmem(REG_EAX,*(const void **)reg_eip,ucontext)) {
}
if (*reg_eip==0x8B) { /* prefix 0x8B */
reg_eip++;
- if ((*reg_eip & ~0x38)==0x05) { /* 'mov %fs:{reg_eip[1..4]},%{op_regcode_to_greg(*reg_eip[b3..b5])} */
+ if ((*reg_eip & ~0x38)==0x05) { /* 'mov %fs:{reg_eip[1..4]},%{op_regcode_to_greg(*reg_eip[b3..b5])}' */
reg_eip++;
if (instr_mov_fsmem_to_greg(*(const void **)reg_eip,op_regcode_to_greg(reg_eip[-1]>>3U),ucontext)) {
reg_eip+=4;
}
g_assert_not_reached();
}
+ if (*reg_eip==0xFF) { /* prefix 0xFF */
+ reg_eip++;
+ if (*reg_eip==0x05) { /* 'incl %fs:{reg_eip[1..4]}' */
+ reg_eip++;
+ if (instr_incl_fsmem(*(const void **)reg_eip,ucontext)) {
+ reg_eip+=4;
+ goto ok;
+ }
+ g_assert_not_reached();
+ }
+ if (*reg_eip==0x35) { /* 'pushl %fs:{reg_eip[1..4]}' */
+ reg_eip++;
+ if (instr_push_fsmem(*(const void **)reg_eip,ucontext)) {
+ reg_eip+=4;
+ goto ok;
+ }
+ g_assert_not_reached();
+ }
+ g_assert_not_reached();
+ }
+ if (*reg_eip==0x8F) { /* prefix 0x0F */
+ reg_eip++;
+ if (*reg_eip==0x05) { /* 'popl %fs:{reg_eip[1..4]}' */
+ reg_eip++;
+ if (instr_pop_fsmem(*(const void **)reg_eip,ucontext)) {
+ reg_eip+=4;
+ goto ok;
+ }
+ g_assert_not_reached();
+ }
+ g_assert_not_reached();
+ }
+ if (*reg_eip==0xC7) { /* prefix 0xC7 */
+ reg_eip++;
+ if (*reg_eip==0x05) { /* 'movl ${reg_eip[5..8]},%fs:{reg_eip[1..4]}' */
+ reg_eip++;
+ if (instr_mov_immed_to_fsmem(((greg_t *)reg_eip)[1],*(const void **)reg_eip,ucontext)) {
+ reg_eip+=4+4;
+ goto ok;
+ }
+ g_assert_not_reached();
+ }
+ g_assert_not_reached();
+ }
+ g_assert_not_reached();
+ }
+
+ /* all instruction notation comments are written in AT&T 'instr src,dest' syntax! */
+ if (*reg_eip==0x66) { /* prefix '%fs:' */
+ reg_eip++;
+ /* TODO:thread; %fs: is CPU-dependent */
+ if (*reg_eip==0x8E) { /* two-byte opcode */
+ reg_eip++;
+ if (*reg_eip==0xE3) { /* 'mov %bx,%fs' */
+ reg_eip++;
+ g_assert(0x30==(0xFFFF&ucontext->uc_mcontext.gregs[REG_EBX]));
+ /* 'reload' of %fs can be ignored */
+ goto ok;
+ }
+ g_assert_not_reached();
+ }
g_assert_not_reached();
}
+
+ if (*reg_eip==0xF4) { /* hlt; from captive_ModuleList_patch() */
+struct captive_ModuleList_patchpoint *patchpoint;
+const gchar *funcname_disabled;
+
+ g_log(G_LOG_DOMAIN,G_LOG_LEVEL_DEBUG,"%s: reg_eip=%p; 0xF4 hit",G_STRLOC,reg_eip);
+
+ if ((funcname_disabled=captive_ModuleList_function_disable_find(
+ reg_eip))) { /* ExportAddress */
+ g_error("%s: Reached disabled W32 function: %s",G_STRLOC,funcname_disabled);
+ g_assert_not_reached();
+ }
+ patchpoint=captive_ModuleList_patchpoint_find(
+ reg_eip); /* ExportAddress */
+ g_assert(patchpoint!=NULL);
+ if (reg_eip==patchpoint->orig_w32_func) {
+ g_assert(0xF4 /* hlt */ ==*patchpoint->orig_w32_func);
+ g_assert(patchpoint->orig_w32_2ndinstr_byte ==*patchpoint->orig_w32_2ndinstr);
+ if (patchpoint->through_w32_func) {
+ *patchpoint->orig_w32_func=patchpoint->orig_w32_func_byte;
+ *patchpoint->orig_w32_2ndinstr=0xF4; /* hlt */
+ }
+ else { /* !patchpoint->through_w32_func */
+ reg_eip=(guint8 *)patchpoint->wrap_wrap_func;
+ }
+ goto ok;
+ }
+ if (reg_eip==patchpoint->orig_w32_2ndinstr) {
+ g_assert(patchpoint->orig_w32_func_byte ==*patchpoint->orig_w32_func);
+ g_assert(0xF4 /* hlt */ ==*patchpoint->orig_w32_2ndinstr);
+ g_assert(patchpoint->through_w32_func==TRUE);
+ *patchpoint->orig_w32_func=0xF4; /* hlt */
+ *patchpoint->orig_w32_2ndinstr=patchpoint->orig_w32_2ndinstr_byte;
+ patchpoint->through_w32_func=FALSE;
+ goto ok;
+ }
+ g_assert_not_reached();
+ }
+
+ if (*reg_eip==0xFA) { /* cli */
+ g_log(G_LOG_DOMAIN,G_LOG_LEVEL_DEBUG,"%s: reg_eip=%p; CLI neutralized",G_STRLOC,reg_eip);
+ *reg_eip=0x90; /* nop */
+ goto ok;
+ }
+
+ if (*reg_eip==0xFB) { /* sti */
+ g_log(G_LOG_DOMAIN,G_LOG_LEVEL_DEBUG,"%s: reg_eip=%p; STI neutralized",G_STRLOC,reg_eip);
+ *reg_eip=0x90; /* nop */
+ goto ok;
+ }
+
g_assert_not_reached();
ok:
*
* Currently emulated set is the access to %fs register offset %0
* where the exception stack top pointer is located.
+ *
+ * Returns: %TRUE if successful.
*/
gboolean captive_signal_init(void)
{