diff --git a/ext/libev/ev.c b/ext/libev/ev.c
index 39b9faf..dae87f1 100644
--- a/ext/libev/ev.c
+++ b/ext/libev/ev.c
@@ -37,6 +37,10 @@
  * either the BSD or the GPL.
  */
 
+/* ########## COOLIO PATCHERY HO! ########## */
+#include "ruby.h"
+/* ######################################## */
+
 /* this big block deduces configuration from config.h */
 #ifndef EV_STANDALONE
 # ifdef EV_CONFIG_H
@@ -107,7 +111,7 @@
 #  undef EV_USE_POLL
 #  define EV_USE_POLL 0
 # endif
-   
+
 # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
 #  ifndef EV_USE_EPOLL
 #   define EV_USE_EPOLL EV_FEATURE_BACKENDS
@@ -116,7 +120,7 @@
 #  undef EV_USE_EPOLL
 #  define EV_USE_EPOLL 0
 # endif
-   
+
 # if HAVE_KQUEUE && HAVE_SYS_EVENT_H
 #  ifndef EV_USE_KQUEUE
 #   define EV_USE_KQUEUE EV_FEATURE_BACKENDS
@@ -125,7 +129,7 @@
 #  undef EV_USE_KQUEUE
 #  define EV_USE_KQUEUE 0
 # endif
-   
+
 # if HAVE_PORT_H && HAVE_PORT_CREATE
 #  ifndef EV_USE_PORT
 #   define EV_USE_PORT EV_FEATURE_BACKENDS
@@ -161,7 +165,7 @@
 #  undef EV_USE_EVENTFD
 #  define EV_USE_EVENTFD 0
 # endif
- 
+
 #endif
 
 #include <stdlib.h>
@@ -2174,7 +2178,7 @@ downheap (ANHE *heap, int N, int k)
 
       heap [k] = heap [c];
       ev_active (ANHE_w (heap [k])) = k;
-      
+
       k = c;
     }
 
@@ -2594,7 +2598,7 @@ ev_supported_backends (void) EV_THROW
   if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
   if (EV_USE_POLL  ) flags |= EVBACKEND_POLL;
   if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
-  
+
   return flags;
 }
 
@@ -3398,9 +3402,33 @@ time_update (EV_P_ ev_tstamp max_block)
     }
 }
 
+/* ########## COOLIO PATCHERY HO! ########## */
+#if defined(HAVE_RB_THREAD_BLOCKING_REGION) || defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL)
+struct ev_poll_args {
+  struct ev_loop *loop;
+  ev_tstamp waittime;
+};
+
+static
+VALUE ev_backend_poll(void *ptr)
+{
+  struct ev_poll_args *args = (struct ev_poll_args *)ptr;
+  struct ev_loop *loop = args->loop;
+  backend_poll (EV_A_ args->waittime);
+  return Qnil;
+}
+#endif
+/* ######################################## */
+
 int
 ev_run (EV_P_ int flags)
 {
+/* ########## COOLIO PATCHERY HO! ########## */
+#if defined(HAVE_RB_THREAD_BLOCKING_REGION) || defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL)
+    struct ev_poll_args poll_args;
+#endif
+/* ######################################## */
+
 #if EV_FEATURE_API
   ++loop_depth;
 #endif
@@ -3518,7 +3546,70 @@ ev_run (EV_P_ int flags)
         ++loop_count;
 #endif
         assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */
-        backend_poll (EV_A_ waittime);
+
+/*
+########################## COOLIO PATCHERY HO! ##########################
+
+According to the grandwizards of Ruby, locking and unlocking of the global
+interpreter lock are apparently too powerful a concept for a mere mortal to
+wield (although redefining what + and - do to numbers is totally cool).
+And so it came to pass that the only acceptable way to release the global
+interpreter lock is through a convoluted callback system that thakes a
+function pointer. While the grandwizard of libev foresaw this sort of scenario,
+he too attempted to place an API with callbacks on it, one that runs before
+the system call, and one that runs immediately after.
+
+And so it came to pass that trying to wrap everything up in callbacks created
+two incompatible APIs, Ruby's which releases the global interpreter lock and
+reacquires it when the callback returns, and libev's, which wants two
+callbacks, one which runs before the polling operation starts, and one which
+runs after it finishes.
+
+These two systems are incompatible as they both want to use callbacks to
+solve the same problem, however libev wants to use before/after callbacks,
+and Ruby wants to use an "around" callback. This presents a significant
+problem as these two patterns of callbacks are diametrical opposites of each
+other and thus cannot be composed.
+
+And thus we are left with no choice but to patch the internals of libev in
+order to release a mutex at just the precise moment.
+
+This is a great example of a situation where granular locking and unlocking
+of the GVL is practically required. The goal is to get as close to the
+system call as possible, and to keep the GVL unlocked for the shortest
+amount of time possible.
+
+Perhaps Ruby could benefit from such an API, e.g:
+
+rb_thread_unsafe_dangerous_crazy_blocking_region_begin(...);
+rb_thread_unsafe_dangerous_crazy_blocking_region_end(...);
+
+#######################################################################
+*/
+
+/*
+  simulate to rb_thread_call_without_gvl using rb_theread_blocking_region.
+  https://github.com/brianmario/mysql2/blob/master/ext/mysql2/client.h#L8
+*/
+
+#ifndef HAVE_RB_THREAD_CALL_WITHOUT_GVL
+#ifdef HAVE_RB_THREAD_BLOCKING_REGION
+#define rb_thread_call_without_gvl(func, data1, ubf, data2) \
+  rb_thread_blocking_region((rb_blocking_function_t *)func, data1, ubf, data2)
+#endif
+#endif
+
+#if defined(HAVE_RB_THREAD_BLOCKING_REGION) || defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL)
+        poll_args.loop = loop;
+        poll_args.waittime = waittime;
+        rb_thread_call_without_gvl(ev_backend_poll, (void *)&poll_args, RUBY_UBF_IO, 0);
+#else
+         backend_poll (EV_A_ waittime);
+#endif
+/*
+############################# END PATCHERY ############################
+*/
+
         assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */
 
         pipe_write_wanted = 0; /* just an optimisation, no fence needed */
