Description: Fix test failure with Numpy 1.24.
Author: Bas Couwenberg <sebastic@debian.org>
Bug-Debian: https://bugs.debian.org/1027229

--- a/docs/source/dim_red.txt
+++ b/docs/source/dim_red.txt
@@ -18,10 +18,10 @@ Example:
 >>> np.random.seed(0)
 >>> mean1, cov1, n1 = [1, 4.5], [[1,1],[1,2]], 20  # 20 samples of class 1
 >>> x1 = np.random.multivariate_normal(mean1, cov1, n1)
->>> y1 = np.ones(n1, dtype=np.int)
+>>> y1 = np.ones(n1, dtype=int)
 >>> mean2, cov2, n2 = [2.5, 2.5], [[1,1],[1,2]], 30 # 30 samples of class 2
 >>> x2 = np.random.multivariate_normal(mean2, cov2, n2)
->>> y2 = 2 * np.ones(n2, dtype=np.int)
+>>> y2 = 2 * np.ones(n2, dtype=int)
 >>> x = np.concatenate((x1, x2), axis=0) # concatenate the samples
 >>> y = np.concatenate((y1, y2))
 >>> lda = mlpy.LDA()
@@ -50,10 +50,10 @@ Example - KNN in kernel fisher space:
 >>> np.random.seed(0)
 >>> mean1, cov1, n1 = [1, 4.5], [[1,1],[1,2]], 20  # 20 samples of class 1
 >>> x1 = np.random.multivariate_normal(mean1, cov1, n1)
->>> y1 = np.ones(n1, dtype=np.int)
+>>> y1 = np.ones(n1, dtype=int)
 >>> mean2, cov2, n2 = [2.5, 2.5], [[1,1],[1,2]], 30 # 30 samples of class 2
 >>> x2 = np.random.multivariate_normal(mean2, cov2, n2)
->>> y2 = 2 * np.ones(n2, dtype=np.int)
+>>> y2 = 2 * np.ones(n2, dtype=int)
 >>> x = np.concatenate((x1, x2), axis=0) # concatenate the samples
 >>> y = np.concatenate((y1, y2))
 >>> K = mlpy.kernel_gaussian(x, x, sigma=3) # compute the kernel matrix
@@ -174,7 +174,7 @@ Example:
 >>> np.random.seed(0)
 >>> np.random.seed(0)
 >>> x = np.zeros((150, 2))
->>> y = np.empty(150, dtype=np.int)
+>>> y = np.empty(150, dtype=int)
 >>> theta = np.random.normal(0, np.pi, 50)
 >>> r = np.random.normal(0, 0.1, 50)
 >>> x[0:50, 0] = r * np.cos(theta)
--- a/docs/source/liblinear.txt
+++ b/docs/source/liblinear.txt
@@ -56,13 +56,13 @@ Example:
 >>> np.random.seed(0)
 >>> mean1, cov1, n1 = [1, 5], [[1,1],[1,2]], 200  # 200 samples of class 0
 >>> x1 = np.random.multivariate_normal(mean1, cov1, n1)
->>> y1 = np.zeros(n1, dtype=np.int)
+>>> y1 = np.zeros(n1, dtype=int)
 >>> mean2, cov2, n2 = [2.5, 2.5], [[1,0],[0,1]], 300 # 300 samples of class 1
 >>> x2 = np.random.multivariate_normal(mean2, cov2, n2)
->>> y2 = np.ones(n2, dtype=np.int)
+>>> y2 = np.ones(n2, dtype=int)
 >>> mean3, cov3, n3 = [5, 8], [[0.5,0],[0,0.5]], 200 # 200 samples of class 2
 >>> x3 = np.random.multivariate_normal(mean3, cov3, n3)
->>> y3 = 2 * np.ones(n3, dtype=np.int)
+>>> y3 = 2 * np.ones(n3, dtype=int)
 >>> x = np.concatenate((x1, x2, x3), axis=0) # concatenate the samples
 >>> y = np.concatenate((y1, y2, y3))
 >>> svm = mlpy.LibLinear(solver_type='l2r_l2loss_svc_dual', C=0.01)
--- a/docs/source/lin_class.txt
+++ b/docs/source/lin_class.txt
@@ -22,10 +22,10 @@ Binary classification:
 >>> np.random.seed(0)
 >>> mean1, cov1, n1 = [1, 5], [[1,1],[1,2]], 200  # 200 samples of class 1
 >>> x1 = np.random.multivariate_normal(mean1, cov1, n1)
->>> y1 = np.ones(n1, dtype=np.int)
+>>> y1 = np.ones(n1, dtype=int)
 >>> mean2, cov2, n2 = [2.5, 2.5], [[1,0],[0,1]], 300 # 300 samples of class -1
 >>> x2 = np.random.multivariate_normal(mean2, cov2, n2)
->>> y2 = -np.ones(n2, dtype=np.int)
+>>> y2 = -np.ones(n2, dtype=int)
 >>> x = np.concatenate((x1, x2), axis=0) # concatenate the samples
 >>> y = np.concatenate((y1, y2))
 >>> ldac = mlpy.LDAC()
@@ -59,13 +59,13 @@ Multiclass classification:
 >>> np.random.seed(0)
 >>> mean1, cov1, n1 = [1, 25], [[1,1],[1,2]], 200  # 200 samples of class 0
 >>> x1 = np.random.multivariate_normal(mean1, cov1, n1)
->>> y1 = np.zeros(n1, dtype=np.int)
+>>> y1 = np.zeros(n1, dtype=int)
 >>> mean2, cov2, n2 = [2.5, 22.5], [[1,0],[0,1]], 300 # 300 samples of class 1
 >>> x2 = np.random.multivariate_normal(mean2, cov2, n2)
->>> y2 = np.ones(n2, dtype=np.int)
+>>> y2 = np.ones(n2, dtype=int)
 >>> mean3, cov3, n3 = [5, 28], [[0.5,0],[0,0.5]], 200 # 200 samples of class 2
 >>> x3 = np.random.multivariate_normal(mean3, cov3, n3)
->>> y3 = 2 * np.ones(n3, dtype=np.int)
+>>> y3 = 2 * np.ones(n3, dtype=int)
 >>> x = np.concatenate((x1, x2, x3), axis=0) # concatenate the samples
 >>> y = np.concatenate((y1, y2, y3))
 >>> ldac = mlpy.LDAC()
@@ -114,10 +114,10 @@ Examples
 >>> np.random.seed(0)
 >>> mean1, cov1, n1 = [1, 5], [[1,1],[1,2]], 200  # 200 samples of class 1
 >>> x1 = np.random.multivariate_normal(mean1, cov1, n1)
->>> y1 = np.ones(n1, dtype=np.int)
+>>> y1 = np.ones(n1, dtype=int)
 >>> mean2, cov2, n2 = [2.5, 2.5], [[1,0],[0,1]], 300 # 300 samples of class -1
 >>> x2 = np.random.multivariate_normal(mean2, cov2, n2)
->>> y2 = -np.ones(n2, dtype=np.int)
+>>> y2 = -np.ones(n2, dtype=int)
 >>> x = np.concatenate((x1, x2), axis=0) # concatenate the samples
 >>> y = np.concatenate((y1, y2))
 >>> p = mlpy.Perceptron(alpha=0.1, thr=0.05, maxiters=100) # basic perceptron
@@ -163,10 +163,10 @@ Example:
 >>> np.random.seed(0)
 >>> mean1, cov1, n1 = [1, 5], [[1,1],[1,2]], 200  # 200 samples of class 1
 >>> x1 = np.random.multivariate_normal(mean1, cov1, n1)
->>> y1 = np.ones(n1, dtype=np.int)
+>>> y1 = np.ones(n1, dtype=int)
 >>> mean2, cov2, n2 = [2.5, 2.5], [[1,0],[0,1]], 300 # 300 samples of class -1
 >>> x2 = np.random.multivariate_normal(mean2, cov2, n2)
->>> y2 = -np.ones(n2, dtype=np.int)
+>>> y2 = -np.ones(n2, dtype=int)
 >>> x = np.concatenate((x1, x2), axis=0) # concatenate the samples
 >>> y = np.concatenate((y1, y2))
 >>> en = mlpy.ElasticNetC(lmb=0.01, eps=0.001)
@@ -219,13 +219,13 @@ Example:
 >>> np.random.seed(0)
 >>> mean1, cov1, n1 = [1, 5], [[1,1],[1,2]], 200  # 200 samples of class 0
 >>> x1 = np.random.multivariate_normal(mean1, cov1, n1)
->>> y1 = np.zeros(n1, dtype=np.int)
+>>> y1 = np.zeros(n1, dtype=int)
 >>> mean2, cov2, n2 = [2.5, 2.5], [[1,0],[0,1]], 300 # 300 samples of class 1
 >>> x2 = np.random.multivariate_normal(mean2, cov2, n2)
->>> y2 = np.ones(n2, dtype=np.int)
+>>> y2 = np.ones(n2, dtype=int)
 >>> mean3, cov3, n3 = [5, 8], [[0.5,0],[0,0.5]], 200 # 200 samples of class 2
 >>> x3 = np.random.multivariate_normal(mean3, cov3, n3)
->>> y3 = 2 * np.ones(n3, dtype=np.int)
+>>> y3 = 2 * np.ones(n3, dtype=int)
 >>> x = np.concatenate((x1, x2, x3), axis=0) # concatenate the samples
 >>> y = np.concatenate((y1, y2, y3))
 >>> da = mlpy.DLDA(delta=0.1)
--- a/docs/source/nonlin_class.txt
+++ b/docs/source/nonlin_class.txt
@@ -17,10 +17,10 @@ Example:
 >>> np.random.seed(0)
 >>> mean1, cov1, n1 = [1, 4.5], [[1,1],[1,2]], 20  # 20 samples of class 1
 >>> x1 = np.random.multivariate_normal(mean1, cov1, n1)
->>> y1 = np.ones(n1, dtype=np.int)
+>>> y1 = np.ones(n1, dtype=int)
 >>> mean2, cov2, n2 = [2.5, 2.5], [[1,1],[1,2]], 30 # 30 samples of class 2
 >>> x2 = np.random.multivariate_normal(mean2, cov2, n2)
->>> y2 = 2 * np.ones(n2, dtype=np.int)
+>>> y2 = 2 * np.ones(n2, dtype=int)
 >>> x = np.concatenate((x1, x2), axis=0) # concatenate the samples
 >>> y = np.concatenate((y1, y2))
 >>> K = mlpy.kernel_gaussian(x, x, sigma=2) # kernel matrix
@@ -77,13 +77,13 @@ Example:
 >>> np.random.seed(0)
 >>> mean1, cov1, n1 = [1, 5], [[1,1],[1,2]], 200  # 200 samples of class 1
 >>> x1 = np.random.multivariate_normal(mean1, cov1, n1)
->>> y1 = np.ones(n1, dtype=np.int)
+>>> y1 = np.ones(n1, dtype=int)
 >>> mean2, cov2, n2 = [2.5, 2.5], [[1,0],[0,1]], 300 # 300 samples of class 2
 >>> x2 = np.random.multivariate_normal(mean2, cov2, n2)
->>> y2 = 2 * np.ones(n2, dtype=np.int)
+>>> y2 = 2 * np.ones(n2, dtype=int)
 >>> mean3, cov3, n3 = [5, 8], [[0.5,0],[0,0.5]], 200 # 200 samples of class 3
 >>> x3 = np.random.multivariate_normal(mean3, cov3, n3)
->>> y3 = 3 * np.ones(n3, dtype=np.int)
+>>> y3 = 3 * np.ones(n3, dtype=int)
 >>> x = np.concatenate((x1, x2, x3), axis=0) # concatenate the samples
 >>> y = np.concatenate((y1, y2, y3))
 >>> knn = mlpy.KNN(k=3)
@@ -129,13 +129,13 @@ Example:
 >>> np.random.seed(0)
 >>> mean1, cov1, n1 = [1, 5], [[1,1],[1,2]], 200  # 200 samples of class 1
 >>> x1 = np.random.multivariate_normal(mean1, cov1, n1)
->>> y1 = np.ones(n1, dtype=np.int)
+>>> y1 = np.ones(n1, dtype=int)
 >>> mean2, cov2, n2 = [2.5, 2.5], [[1,0],[0,1]], 300 # 300 samples of class 2
 >>> x2 = np.random.multivariate_normal(mean2, cov2, n2)
->>> y2 = 2 * np.ones(n2, dtype=np.int)
+>>> y2 = 2 * np.ones(n2, dtype=int)
 >>> mean3, cov3, n3 = [6, 8], [[0.5,0],[0,0.5]], 200 # 200 samples of class 3
 >>> x3 = np.random.multivariate_normal(mean3, cov3, n3)
->>> y3 = 3 * np.ones(n3, dtype=np.int)
+>>> y3 = 3 * np.ones(n3, dtype=int)
 >>> x = np.concatenate((x1, x2, x3), axis=0) # concatenate the samples
 >>> y = np.concatenate((y1, y2, y3))
 >>> tree = mlpy.ClassTree(minsize=10)
@@ -176,13 +176,13 @@ Example:
 >>> np.random.seed(0)
 >>> mean1, cov1, n1 = [1, 5], [[1,1],[1,2]], 200  # 200 samples of class 1
 >>> x1 = np.random.multivariate_normal(mean1, cov1, n1)
->>> y1 = np.ones(n1, dtype=np.int)
+>>> y1 = np.ones(n1, dtype=int)
 >>> mean2, cov2, n2 = [2.5, 2.5], [[1,0],[0,1]], 300 # 300 samples of class 2
 >>> x2 = np.random.multivariate_normal(mean2, cov2, n2)
->>> y2 = 2 * np.ones(n2, dtype=np.int)
+>>> y2 = 2 * np.ones(n2, dtype=int)
 >>> mean3, cov3, n3 = [6, 8], [[0.5,0],[0,0.5]], 200 # 200 samples of class 3
 >>> x3 = np.random.multivariate_normal(mean3, cov3, n3)
->>> y3 = 3 * np.ones(n3, dtype=np.int)
+>>> y3 = 3 * np.ones(n3, dtype=int)
 >>> x = np.concatenate((x1, x2, x3), axis=0) # concatenate the samples
 >>> y = np.concatenate((y1, y2, y3))
 >>> ml = mlpy.MaximumLikelihoodC()
--- a/docs/source/svm.txt
+++ b/docs/source/svm.txt
@@ -123,10 +123,10 @@ Example:
 >>> np.random.seed(0)
 >>> mean1, cov1, n1 = [1, 4.5], [[1,1],[1,2]], 20  # 20 samples of class 1
 >>> x1 = np.random.multivariate_normal(mean1, cov1, n1)
->>> y1 = np.ones(n1, dtype=np.int)
+>>> y1 = np.ones(n1, dtype=int)
 >>> mean2, cov2, n2 = [2.5, 2.5], [[1,1],[1,2]], 30 # 30 samples of class 2
 >>> x2 = np.random.multivariate_normal(mean2, cov2, n2)
->>> y2 = 2 * np.ones(n2, dtype=np.int)
+>>> y2 = 2 * np.ones(n2, dtype=int)
 >>> x = np.concatenate((x1, x2), axis=0) # concatenate the samples
 >>> y = np.concatenate((y1, y2))
 >>> K = mlpy.kernel_gaussian(x, x, sigma=2) # kernel matrix
--- a/docs/source/tutorial.txt
+++ b/docs/source/tutorial.txt
@@ -30,7 +30,7 @@ Load the modules:
 Load the Iris dataset:
 
 >>> iris = np.loadtxt('iris.csv', delimiter=',')
->>> x, y = iris[:, :4], iris[:, 4].astype(np.int) # x: (observations x attributes) matrix, y: classes (1: setosa, 2: versicolor, 3: virginica)
+>>> x, y = iris[:, :4], iris[:, 4].astype(int) # x: (observations x attributes) matrix, y: classes (1: setosa, 2: versicolor, 3: virginica)
 >>> x.shape
 (150, 4)
 >>> y.shape
--- a/mlpy/adatron/adatron.pyx
+++ b/mlpy/adatron/adatron.pyx
@@ -72,8 +72,8 @@ class KernelAdatron:
         cdef np.ndarray[np.float_t, ndim=1] alpha_arr
         cdef double margin
 
-        K_arr = np.ascontiguousarray(K, dtype=np.float)
-        y_arr = np.asarray(y, dtype=np.int)
+        K_arr = np.ascontiguousarray(K, dtype=float)
+        y_arr = np.asarray(y, dtype=int)
 
         if K_arr.ndim != 2:
             raise ValueError("K must be a 2d array_like object")
@@ -93,7 +93,7 @@ class KernelAdatron:
 
         ynew = np.where(y_arr==self._labels[0], -1, 1)
         n = K_arr.shape[0]
-        alpha_arr = np.zeros(n, dtype=np.float)
+        alpha_arr = np.zeros(n, dtype=float)
 
         steps = adatron(<long*> ynew.data, <double *> K_arr.data,
             <int> n, <double> self._C, <int> self._maxsteps,
@@ -121,7 +121,7 @@ class KernelAdatron:
         if self._alpha is None:
             raise ValueError("no model computed; run learn() first")
 
-        Kt_arr = np.asarray(Kt, dtype=np.float)
+        Kt_arr = np.asarray(Kt, dtype=float)
 
         try:
             s = np.sign(np.dot(self._alpha * self._y, Kt_arr.T))
@@ -129,7 +129,7 @@ class KernelAdatron:
             raise ValueError("Kt, alpha: shape mismatch")
         
         return np.where(s==-1, self._labels[0], self._labels[1]) \
-            .astype(np.int)
+            .astype(int)
 
     def margin(self):
         """Return the margin.
--- a/mlpy/bordacount/borda.py
+++ b/mlpy/bordacount/borda.py
@@ -59,7 +59,7 @@ def borda_count(x, k=None):
       * ...
     """
     
-    x_arr = np.asarray(x, dtype=np.int)
+    x_arr = np.asarray(x, dtype=int)
     n, p = x_arr.shape
 
     if k == None:
--- a/mlpy/canberra/canberra.pyx
+++ b/mlpy/canberra/canberra.pyx
@@ -16,8 +16,8 @@ def canberra(x, y):
     cdef np.ndarray[np.float64_t, ndim=1] x_arr
     cdef np.ndarray[np.float64_t, ndim=1] y_arr
     
-    x_arr = np.ascontiguousarray(x, dtype=np.float)
-    y_arr = np.ascontiguousarray(y, dtype=np.float)
+    x_arr = np.ascontiguousarray(x, dtype=float)
+    y_arr = np.ascontiguousarray(y, dtype=float)
 
     if x_arr.shape[0] != y_arr.shape[0]:
         raise ValueError("x, y: shape mismatch")
@@ -36,8 +36,8 @@ def canberra_location(x, y, k=None):
     cdef np.ndarray[np.int64_t, ndim=1] x_arr
     cdef np.ndarray[np.int64_t, ndim=1] y_arr
     
-    x_arr = np.ascontiguousarray(x, dtype=np.int)
-    y_arr = np.ascontiguousarray(y, dtype=np.int)
+    x_arr = np.ascontiguousarray(x, dtype=int)
+    y_arr = np.ascontiguousarray(y, dtype=int)
 
     if x_arr.shape[0] != y_arr.shape[0]:
         raise ValueError("x, y: shape mismatch")
@@ -75,7 +75,7 @@ def canberra_stability(x, k=None):
 
     cdef np.ndarray[np.int64_t, ndim=2] x_arr
         
-    x_arr = np.ascontiguousarray(x, dtype=np.int)
+    x_arr = np.ascontiguousarray(x, dtype=int)
     
     if k == None:
         k = x_arr.shape[1]
--- a/mlpy/crossval.py
+++ b/mlpy/crossval.py
@@ -73,11 +73,11 @@ def cv_kfold(n, k, strat=None, seed=0):
         raise ValueError("k must be > 1")
     
     if strat is not None:
-        _strat = np.asarray(strat, dtype=np.int)
+        _strat = np.asarray(strat, dtype=int)
         if n != _strat.shape[0]:
             raise ValueError("a, strat: shape mismatch")
     else:
-        _strat = np.zeros(n, dtype=np.int)
+        _strat = np.zeros(n, dtype=int)
     
     labels = np.unique(_strat)
 
@@ -152,11 +152,11 @@ def cv_random(n, k, p, strat=None, seed=
         raise ValueError("p must be in [0, 100]")
 
     if strat is not None:
-        _strat = np.asarray(strat, dtype=np.int)
+        _strat = np.asarray(strat, dtype=int)
         if n != _strat.shape[0]:
             raise ValueError("a, strat: shape mismatch")
     else:
-        _strat = np.zeros(n, dtype=np.int)
+        _strat = np.zeros(n, dtype=int)
 
     labels = np.unique(_strat)
 
--- a/mlpy/da.py
+++ b/mlpy/da.py
@@ -43,8 +43,8 @@ class LDAC:
               target values (N)
         """
         
-        xarr = np.asarray(x, dtype=np.float)
-        yarr = np.asarray(y, dtype=np.int)
+        xarr = np.asarray(x, dtype=float)
+        yarr = np.asarray(y, dtype=int)
         
         if xarr.ndim != 2:
             raise ValueError("x must be a 2d array_like object")
@@ -58,9 +58,9 @@ class LDAC:
         if k < 2:
             raise ValueError("number of classes must be >= 2")     
         
-        p = np.empty(k, dtype=np.float)
-        mu = np.empty((k, xarr.shape[1]), dtype=np.float)
-        cov = np.zeros((xarr.shape[1], xarr.shape[1]), dtype=np.float)
+        p = np.empty(k, dtype=float)
+        mu = np.empty((k, xarr.shape[1]), dtype=float)
+        cov = np.zeros((xarr.shape[1], xarr.shape[1]), dtype=float)
 
         for i in range(k):
             wi = (yarr == self._labels[i])
@@ -71,8 +71,8 @@ class LDAC:
         cov /= float(xarr.shape[0] - k)
         covinv = np.linalg.inv(cov)
         
-        self._w = np.empty((k, xarr.shape[1]), dtype=np.float)
-        self._bias = np.empty(k, dtype=np.float)
+        self._w = np.empty((k, xarr.shape[1]), dtype=float)
+        self._bias = np.empty(k, dtype=float)
 
         for i in range(k):           
             self._w[i] = np.dot(covinv, mu[i])
@@ -131,16 +131,16 @@ class LDAC:
         if self._w is None:
             raise ValueError("no model computed.")
 
-        tarr = np.asarray(t, dtype=np.float)
+        tarr = np.asarray(t, dtype=float)
 
         if tarr.ndim == 1:
-            delta = np.empty(self._labels.shape[0], dtype=np.float)
+            delta = np.empty(self._labels.shape[0], dtype=float)
             for i in range(self._labels.shape[0]):
                 delta[i] = np.dot(tarr, self._w[i]) + self._bias[i]
             return self._labels[np.argmax(delta)]
         else:
             delta = np.empty((tarr.shape[0], self._labels.shape[0]),
-                        dtype=np.float)
+                        dtype=float)
             for i in range(self._labels.shape[0]):
                 delta[:, i] = np.dot(tarr, self._w[i]) + self._bias[i]
             return self._labels[np.argmax(delta, axis=1)]
@@ -178,8 +178,8 @@ class DLDA:
               target values (N)
         """
         
-        xarr = np.asarray(x, dtype=np.float)
-        yarr = np.asarray(y, dtype=np.int)
+        xarr = np.asarray(x, dtype=float)
+        yarr = np.asarray(y, dtype=int)
         
         if xarr.ndim != 2:
             raise ValueError("x must be a 2d array_like object")
@@ -196,10 +196,10 @@ class DLDA:
         xm = np.mean(xarr, axis=0)
         self._xstd = np.std(xarr, axis=0, ddof=1)
         s0 = np.median(self._xstd)
-        self._dprime = np.empty((k, xarr.shape[1]), dtype=np.float)
-        self._xmprime = np.empty((k, xarr.shape[1]), dtype=np.float)
+        self._dprime = np.empty((k, xarr.shape[1]), dtype=float)
+        self._xmprime = np.empty((k, xarr.shape[1]), dtype=float)
         n = yarr.shape[0]
-        self._p = np.empty(k, dtype=np.float)
+        self._p = np.empty(k, dtype=float)
 
         for i in range(k):
             yi = (yarr == self._labels[i])
@@ -264,12 +264,12 @@ class DLDA:
         if self._xmprime is None:
             raise ValueError("no model computed.")
         
-        tarr = np.asarray(t, dtype=np.float)
+        tarr = np.asarray(t, dtype=float)
         
         if tarr.ndim == 1:
             return self._labels[np.argmax(self._score(tarr))]
         else:
-            ret = np.empty(tarr.shape[0], dtype=np.int)
+            ret = np.empty(tarr.shape[0], dtype=int)
             for i in range(tarr.shape[0]):
                 ret[i] = self._labels[np.argmax(self._score(tarr[i]))]
             return ret
@@ -282,13 +282,13 @@ class DLDA:
         if self._xmprime is None:
             raise ValueError("no model computed.")
         
-        tarr = np.asarray(t, dtype=np.float)
+        tarr = np.asarray(t, dtype=float)
 
         if tarr.ndim == 1:
             return self._prob(tarr)
         else:
             ret = np.empty((tarr.shape[0], self._labels.shape[0]),
-                dtype=np.float)
+                dtype=float)
             for i in range(tarr.shape[0]):
                 ret[i] = self._prob(tarr[i])
             return ret
@@ -337,8 +337,8 @@ class KFDAC:
               class labels (only two classes)
         """
 
-        Karr = np.asarray(K, dtype=np.float)
-        yarr = np.asarray(y, dtype=np.int)
+        Karr = np.asarray(K, dtype=float)
+        yarr = np.asarray(y, dtype=int)
 
         if Karr.ndim != 2:
             raise ValueError("K must be a 2d array_like object")
@@ -405,7 +405,7 @@ class KFDAC:
         if self._alpha is None:
             raise ValueError("no model computed; run learn()")
 
-        Ktarr = np.asarray(Kt, dtype=np.float)
+        Ktarr = np.asarray(Kt, dtype=float)
         if self._kernel is not None:
             Ktarr = self._kernel.kernel(Ktarr, self._x)
 
@@ -415,7 +415,7 @@ class KFDAC:
             raise ValueError("Kt, alpha: shape mismatch")
 
         return np.where(s==1, self._labels[0], self._labels[1]) \
-            .astype(np.int)
+            .astype(int)
 
     def alpha(self):
         """Return alpha.
--- a/mlpy/dimred.py
+++ b/mlpy/dimred.py
@@ -76,7 +76,7 @@ def lda(xarr, yarr):
     n, p = xarr.shape[0], xarr.shape[1]
     labels = np.unique(yarr)
     
-    sw = np.zeros((p, p), dtype=np.float)   
+    sw = np.zeros((p, p), dtype=float)   
     for i in labels:
         idx = np.where(yarr==i)[0]
         sw += np.cov(xarr[idx], rowvar=0) * \
@@ -121,7 +121,7 @@ def srda(xarr, yarr, alpha):
 
     # Point 1 in section 4.2
     yu = np.unique(yarr)
-    yk = np.zeros((yu.shape[0]+1, yarr.shape[0]), dtype=np.float)
+    yk = np.zeros((yu.shape[0]+1, yarr.shape[0]), dtype=float)
     yk[0] = 1.
     for i in range(1, yk.shape[0]):
         yk[i][yarr==yu[i-1]] = 1.
@@ -129,7 +129,7 @@ def srda(xarr, yarr, alpha):
     yk = yk[1:-1]
     
     # Point 2 in section 4.2
-    ak = np.empty((yk.shape[0], xarr.shape[1]), dtype=np.float)
+    ak = np.empty((yk.shape[0], xarr.shape[1]), dtype=float)
     for i in range(yk.shape[0]):
         ak[i] = ridge_base(xarr, yk[i], alpha)
 
@@ -258,14 +258,14 @@ def lda_fast(xarr, yarr):
     """
 
     yu = np.unique(yarr)
-    yk = np.zeros((yu.shape[0]+1, yarr.shape[0]), dtype=np.float)
+    yk = np.zeros((yu.shape[0]+1, yarr.shape[0]), dtype=float)
     yk[0] = 1.
     for i in range(1, yk.shape[0]):
         yk[i][yarr==yu[i-1]] = 1.
     gso(yk, norm=False) # orthogonalize yk
     yk = yk[1:-1]
     
-    ak = np.empty((yk.shape[0], xarr.shape[1]), dtype=np.float)
+    ak = np.empty((yk.shape[0], xarr.shape[1]), dtype=float)
     for i in range(yk.shape[0]):
         ak[i], _ = ols_base(xarr, yk[i], -1)
 
@@ -379,8 +379,8 @@ class LDA:
         variable, while the rows contain observations.
         """
 
-        xarr = np.asarray(x, dtype=np.float)
-        yarr = np.asarray(y, dtype=np.int)
+        xarr = np.asarray(x, dtype=float)
+        yarr = np.asarray(y, dtype=int)
         
         if xarr.ndim != 2:
             raise ValueError("x must be a 2d array_like object")
@@ -405,7 +405,7 @@ class LDA:
         if self._coeff is None:
             raise ValueError("no model computed")
 
-        tarr = np.asarray(t, dtype=np.float)
+        tarr = np.asarray(t, dtype=float)
         
         try:
             return np.dot(tarr-self._mean, self._coeff)
@@ -444,8 +444,8 @@ class SRDA:
         variable, while the rows contain observations.
         """
 
-        xarr = np.asarray(x, dtype=np.float)
-        yarr = np.asarray(y, dtype=np.int)
+        xarr = np.asarray(x, dtype=float)
+        yarr = np.asarray(y, dtype=int)
         
         if xarr.ndim != 2:
             raise ValueError("x must be a 2d array_like object")
@@ -467,7 +467,7 @@ class SRDA:
         if self._coeff is None:
             raise ValueError("no model computed")
 
-        tarr = np.asarray(t, dtype=np.float)
+        tarr = np.asarray(t, dtype=float)
         
         try:
             return np.dot(tarr-self._mean, self._coeff)
@@ -521,8 +521,8 @@ class KFDA:
               class labels (only two classes)
         """
 
-        Karr = np.array(K, dtype=np.float)
-        yarr = np.asarray(y, dtype=np.int)
+        Karr = np.array(K, dtype=float)
+        yarr = np.asarray(y, dtype=int)
         if yarr.ndim != 1:
             raise ValueError("y must be an 1d array_like object")
 
@@ -551,7 +551,7 @@ class KFDA:
         if self._coeff is None:
             raise ValueError("no model computed")
 
-        Ktarr = np.asarray(Kt, dtype=np.float)
+        Ktarr = np.asarray(Kt, dtype=float)
         if self._kernel is not None:
             Ktarr = self._kernel.kernel(Ktarr, self._x)
 
@@ -595,7 +595,7 @@ class PCA:
         variable, while the rows contain observations.
         """
 
-        xarr = np.asarray(x, dtype=np.float)
+        xarr = np.asarray(x, dtype=float)
         if xarr.ndim != 2:
             raise ValueError("x must be a 2d array_like object")
         
@@ -604,7 +604,7 @@ class PCA:
 
         if self._whiten:
             self._coeff_inv = np.empty((self._coeff.shape[1], 
-                self._coeff.shape[0]), dtype=np.float)
+                self._coeff.shape[0]), dtype=float)
             
             for i in range(len(self._evals)):
                 eval_sqrt = np.sqrt(self._evals[i])
@@ -630,7 +630,7 @@ class PCA:
             raise ValueError("k must be in [1, %d] or None" % \
                                  self._coeff.shape[1])
 
-        tarr = np.asarray(t, dtype=np.float)
+        tarr = np.asarray(t, dtype=float)
 
         try:
             return np.dot(tarr-self._mean, self._coeff[:, :k])
@@ -645,7 +645,7 @@ class PCA:
         if self._coeff is None:
             raise ValueError("no PCA computed")
 
-        zarr = np.asarray(z, dtype=np.float)
+        zarr = np.asarray(z, dtype=float)
 
         return np.dot(zarr, self._coeff_inv[:zarr.shape[1]]) +\
             self._mean
@@ -699,7 +699,7 @@ class PCAFast:
         variable, while the rows contain observations.
         """
 
-        xarr = np.asarray(x, dtype=np.float)
+        xarr = np.asarray(x, dtype=float)
         if xarr.ndim != 2:
             raise ValueError("x must be a 2d array_like object")
         
@@ -715,7 +715,7 @@ class PCAFast:
         if self._coeff is None:
             raise ValueError("no PCA computed")
 
-        tarr = np.asarray(t, dtype=np.float)
+        tarr = np.asarray(t, dtype=float)
 
         try:
             return np.dot(tarr-self._mean, self._coeff)
@@ -730,7 +730,7 @@ class PCAFast:
         if self._coeff is None:
             raise ValueError("no PCA computed")
 
-        zarr = np.asarray(z, dtype=np.float)
+        zarr = np.asarray(z, dtype=float)
         return np.dot(zarr, self._coeff_inv) + self._mean
         
     def coeff(self):
@@ -784,7 +784,7 @@ class KPCA:
               training data in input space (if kernel is a Kernel object)
         """
 
-        Karr = np.asarray(K, dtype=np.float)
+        Karr = np.asarray(K, dtype=float)
         if Karr.ndim != 2:
             raise ValueError("K must be a 2d array_like object")
         
@@ -818,7 +818,7 @@ class KPCA:
             raise ValueError("k must be in [1, %d] or None" % \
                                  self._coeff.shape[1])
 
-        Ktarr = np.asarray(Kt, dtype=np.float)
+        Ktarr = np.asarray(Kt, dtype=float)
         if self._kernel is not None:
             Ktarr = self._kernel.kernel(Ktarr, self._x)
         
--- a/mlpy/dtw/dtw.pyx
+++ b/mlpy/dtw/dtw.pyx
@@ -62,9 +62,9 @@ def dtw_std(x, y, dist_only=True, square
     cdef int i
     cdef int sq
 
-    x_arr = np.ascontiguousarray(x, dtype=np.float)
-    y_arr = np.ascontiguousarray(y, dtype=np.float)
-    cost_arr = np.empty((x_arr.shape[0], y_arr.shape[0]), dtype=np.float)
+    x_arr = np.ascontiguousarray(x, dtype=float)
+    y_arr = np.ascontiguousarray(y, dtype=float)
+    cost_arr = np.empty((x_arr.shape[0], y_arr.shape[0]), dtype=float)
 
     if squared: sq = 1
     else: sq = 0
@@ -77,8 +77,8 @@ def dtw_std(x, y, dist_only=True, square
     else:
         path(<double *> cost_arr.data, <int> cost_arr.shape[0], 
               <int> cost_arr.shape[1], -1, -1, &p)
-        px_arr = np.empty(p.k, dtype=np.int)
-        py_arr = np.empty(p.k, dtype=np.int)
+        px_arr = np.empty(p.k, dtype=int)
+        py_arr = np.empty(p.k, dtype=int)
         for i in range(p.k):
             px_arr[i] = p.px[i]
             py_arr[i] = p.py[i] 
@@ -122,9 +122,9 @@ def dtw_subsequence(x, y):
     cdef Path p
     cdef int i
     
-    x_arr = np.ascontiguousarray(x, dtype=np.float)
-    y_arr = np.ascontiguousarray(y, dtype=np.float)
-    cost_arr = np.empty((x_arr.shape[0], y_arr.shape[0]), dtype=np.float)
+    x_arr = np.ascontiguousarray(x, dtype=float)
+    y_arr = np.ascontiguousarray(y, dtype=float)
+    cost_arr = np.empty((x_arr.shape[0], y_arr.shape[0]), dtype=float)
 
     subsequence(<double *> x_arr.data, <double *> y_arr.data, 
                  <int> x_arr.shape[0], <int> y_arr.shape[0],
@@ -136,8 +136,8 @@ def dtw_subsequence(x, y):
     subsequence_path(<double *> cost_arr.data, <int> x_arr.shape[0],
                       <int> y_arr.shape[0], <int> idx, &p)
         
-    px_arr = np.empty(p.k, dtype=np.int)
-    py_arr = np.empty(p.k, dtype=np.int)
+    px_arr = np.empty(p.k, dtype=int)
+    py_arr = np.empty(p.k, dtype=int)
     
     for i in range(p.k):
         px_arr[i] = p.px[i]
--- a/mlpy/elasticnet.py
+++ b/mlpy/elasticnet.py
@@ -86,7 +86,7 @@ def elasticnet_base(x, y, lmb, eps, supp
     step = 1. / (np.linalg.eigvalsh(xx).max() * 1.1)
     lmb = lmb * n * step
     damp = 1. / (1 + lmb * eps)
-    beta0 = np.zeros(p, dtype=np.float)
+    beta0 = np.zeros(p, dtype=float)
 
     k, i = 0, 0
     kmax = 100000
@@ -166,8 +166,8 @@ class ElasticNet(object):
             response
         """
 
-        xarr = np.array(x, dtype=np.float, copy=True)
-        yarr = np.array(y, dtype=np.float, copy=True)
+        xarr = np.array(x, dtype=float, copy=True)
+        yarr = np.array(y, dtype=float, copy=True)
 
         if xarr.ndim != 2:
             raise ValueError("x must be a 2d array_like object")
@@ -209,7 +209,7 @@ class ElasticNet(object):
         if self._beta0 is None:
             raise ValueError('no mode computed; run learn() first')
 
-        tarr = np.asarray(t, dtype=np.float)
+        tarr = np.asarray(t, dtype=float)
 
         if tarr.ndim > 2 or tarr.ndim < 1:
             raise ValueError("t must be an 1d or a 2d array_like object")
@@ -283,7 +283,7 @@ class ElasticNetC(ElasticNet):
             class labels
         """
 
-        yarr = np.asarray(y, dtype=np.int)
+        yarr = np.asarray(y, dtype=int)
         self._labels = np.unique(yarr)
         
         k = self._labels.shape[0]
--- a/mlpy/findpeaks/findpeaks.pyx
+++ b/mlpy/findpeaks/findpeaks.pyx
@@ -61,9 +61,9 @@ def findpeaks_win(x, span):
     if (span % 2 == 0) or (span < 3):
         raise ValueError("span must be >= 3 and odd")
     
-    xarr = np.ascontiguousarray(x, dtype=np.float)
+    xarr = np.ascontiguousarray(x, dtype=float)
     retc = fp_win(<double *> xarr.data, <int> xarr.shape[0], <int> span, &m)
-    ret = np.empty(m, dtype=np.int)
+    ret = np.empty(m, dtype=int)
     
     for i in range(m):
         ret[i] = retc[i]
@@ -107,7 +107,7 @@ def findpeaks_dist(x, mindist=2):
     if mindist < 2:
         raise ValueError("mindist must be >= 2")
 
-    _x = np.ascontiguousarray(x, dtype=np.float)
+    _x = np.ascontiguousarray(x, dtype=float)
     idx = findpeaks_win(_x, 3)
     tmp = np.empty_like(idx)
  
--- a/mlpy/golub.py
+++ b/mlpy/golub.py
@@ -47,8 +47,8 @@ class Golub:
               target values (N)
         """
         
-        xarr = np.asarray(x, dtype=np.float)
-        yarr = np.asarray(y, dtype=np.int)
+        xarr = np.asarray(x, dtype=float)
+        yarr = np.asarray(y, dtype=int)
         
         if xarr.ndim != 2:
             raise ValueError("x must be a 2d array_like object")
@@ -85,7 +85,7 @@ class Golub:
         if self._w is None:
             raise ValueError("no model computed")
         
-        tarr = np.asarray(t, dtype=np.float)
+        tarr = np.asarray(t, dtype=float)
         if tarr.ndim > 2:
             raise ValueError("t must be an 1d or a 2d array_like object")
         
@@ -95,7 +95,7 @@ class Golub:
             raise ValueError("t, model: shape mismatch")
         
         return np.where(tmp>0, self._labels[1], self._labels[0]) \
-            .astype(np.int)
+            .astype(int)
     
     def w(self):
         """Returns the coefficients.
--- a/mlpy/gsl/gsl.pyx
+++ b/mlpy/gsl/gsl.pyx
@@ -12,7 +12,7 @@ def stats_quantile_from_sorted_data (sor
     cdef np.ndarray[np.float_t, ndim=1] sorted_data_arr
 
     try:
-        sorted_data_arr = np.array(sorted_data, dtype=np.float, order='C')
+        sorted_data_arr = np.array(sorted_data, dtype=float, order='C')
     except ValueError:
         raise ValueError("sorted_data must be a 2d array_like object")
 
--- a/mlpy/kernel/kernel.pyx
+++ b/mlpy/kernel/kernel.pyx
@@ -33,15 +33,15 @@ def kernel_linear(t, x):
     cdef np.ndarray[np.float64_t, ndim=2] k_arr
     cdef int i, j, nt, nx, pt, px
     
-    t_arr = np.array(t, dtype=np.float, order='C', ndmin=2)
-    x_arr = np.array(x, dtype=np.float, order='C', ndmin=2)
+    t_arr = np.array(t, dtype=float, order='C', ndmin=2)
+    x_arr = np.array(x, dtype=float, order='C', ndmin=2)
     nt, pt = t_arr.shape[0], t_arr.shape[1]
     nx, px = x_arr.shape[0], x_arr.shape[1]
 
     if pt != px:
         raise ValueError("t, x: shape mismatch")
         
-    k_arr = np.empty((nt, nx), dtype=np.float)
+    k_arr = np.empty((nt, nx), dtype=float)
     for i in range(nt):
         for j in range(nx):
             k_arr[i, j] = linear(<double *> t_arr.data + (i * pt),
@@ -60,15 +60,15 @@ def kernel_polynomial(t, x, gamma=1.0, b
     cdef np.ndarray[np.float64_t, ndim=2] k_arr
     cdef int i, j, nt, nx, pt, px
     
-    t_arr = np.array(t, dtype=np.float, order='C', ndmin=2)
-    x_arr = np.array(x, dtype=np.float, order='C', ndmin=2)
+    t_arr = np.array(t, dtype=float, order='C', ndmin=2)
+    x_arr = np.array(x, dtype=float, order='C', ndmin=2)
     nt, pt = t_arr.shape[0], t_arr.shape[1]
     nx, px = x_arr.shape[0], x_arr.shape[1]
 
     if pt != px:
         raise ValueError("t, x: shape mismatch")
         
-    k_arr = np.empty((nt, nx), dtype=np.float)
+    k_arr = np.empty((nt, nx), dtype=float)
     for i in range(nt):
         for j in range(nx):
             k_arr[i, j] = polynomial(<double *> t_arr.data + (i * pt),
@@ -87,15 +87,15 @@ def kernel_gaussian(t, x, sigma=1.0):
     cdef np.ndarray[np.float64_t, ndim=2] k_arr
     cdef int i, j, nt, nx, pt, px
     
-    t_arr = np.array(t, dtype=np.float, order='C', ndmin=2)
-    x_arr = np.array(x, dtype=np.float, order='C', ndmin=2)
+    t_arr = np.array(t, dtype=float, order='C', ndmin=2)
+    x_arr = np.array(x, dtype=float, order='C', ndmin=2)
     nt, pt = t_arr.shape[0], t_arr.shape[1]
     nx, px = x_arr.shape[0], x_arr.shape[1]
 
     if pt != px:
         raise ValueError("t, x: shape mismatch")
         
-    k_arr = np.empty((nt, nx), dtype=np.float)
+    k_arr = np.empty((nt, nx), dtype=float)
     for i in range(nt):
         for j in range(nx):
             k_arr[i, j] = gaussian(<double *> t_arr.data + (i * pt),
@@ -114,15 +114,15 @@ def kernel_exponential(t, x, sigma=1.0):
     cdef np.ndarray[np.float64_t, ndim=2] k_arr
     cdef int i, j, nt, nx, pt, px
     
-    t_arr = np.array(t, dtype=np.float, order='C', ndmin=2)
-    x_arr = np.array(x, dtxpe=np.float, order='C', ndmin=2)
+    t_arr = np.array(t, dtype=float, order='C', ndmin=2)
+    x_arr = np.array(x, dtxpe=float, order='C', ndmin=2)
     nt, pt = t_arr.shape[0], t_arr.shape[1]
     nx, px = x_arr.shape[0], x_arr.shape[1]
 
     if pt != px:
         raise ValueError("t, x: shape mismatch")
         
-    k_arr = np.empty((nt, nx), dtype=np.float)
+    k_arr = np.empty((nt, nx), dtype=float)
     for i in range(nt):
         for j in range(nx):
             k_arr[i, j] = exponential(<double *> t_arr.data + (i * pt),
@@ -141,15 +141,15 @@ def kernel_sigmoid(t, x, gamma=1.0, b=1.
     cdef np.ndarray[np.float64_t, ndim=2] k_arr
     cdef int i, j, nt, nx, pt, px
     
-    t_arr = np.array(t, dtype=np.float, order='C', ndmin=2)
-    x_arr = np.array(x, dtype=np.float, order='C', ndmin=2)
+    t_arr = np.array(t, dtype=float, order='C', ndmin=2)
+    x_arr = np.array(x, dtype=float, order='C', ndmin=2)
     nt, pt = t_arr.shape[0], t_arr.shape[1]
     nx, px = x_arr.shape[0], x_arr.shape[1]
 
     if pt != px:
         raise ValueError("t, x: shape mismatch")
         
-    k_arr = np.empty((nt, nx), dtype=np.float)
+    k_arr = np.empty((nt, nx), dtype=float)
     for i in range(nt):
         for j in range(nx):
             k_arr[i, j] = sigmoid(<double *> t_arr.data + (i * pt),
@@ -165,8 +165,8 @@ def kernel_center(Kt, K):
     """Kernel matrix centering.
     """
     
-    Kt_arr = np.array(Kt, dtype=np.float)
-    K_arr = np.array(K, dtype=np.float)
+    Kt_arr = np.array(Kt, dtype=float)
+    K_arr = np.array(K, dtype=float)
 
     if Kt_arr.ndim == 1:
         J1 = np.mean(Kt_arr)
--- a/mlpy/kmeans/kmeans.pyx
+++ b/mlpy/kmeans/kmeans.pyx
@@ -31,12 +31,12 @@ def kmeans(x, k, plus=False, seed=0):
     cdef np.ndarray[np.float64_t, ndim=2] means_arr
     cdef np.ndarray[np.int32_t, ndim=1] cls_arr
   
-    x_arr = np.ascontiguousarray(x, dtype=np.float)
+    x_arr = np.ascontiguousarray(x, dtype=float)
 
     if k <= 1 or k >= x_arr.shape[0]:
         raise ValueError("k must be in [2, N-1]")
 
-    means_arr = np.empty((k, x_arr.shape[1]), dtype=np.float)
+    means_arr = np.empty((k, x_arr.shape[1]), dtype=float)
     cls_arr = np.empty(x_arr.shape[0], dtype=np.int32)
     
     if plus:
@@ -52,4 +52,4 @@ def kmeans(x, k, plus=False, seed=0):
                 <int *> cls_arr.data, <int> x_arr.shape[0], 
                 <int> x_arr.shape[1], <int> k)
                 
-    return cls_arr.astype(np.int), means_arr, steps
+    return cls_arr.astype(int), means_arr, steps
--- a/mlpy/lars.py
+++ b/mlpy/lars.py
@@ -52,8 +52,8 @@ def lars_base(x, y, maxsteps=None):
     mu = np.ones(xarr.shape[0])
     active = []
     inactive = range(xarr.shape[1])
-    beta = np.zeros(xarr.shape[1], dtype=np.float)
-    est = np.zeros((maxsteps+1, xarr.shape[1]), dtype=np.float)
+    beta = np.zeros(xarr.shape[1], dtype=float)
+    est = np.zeros((maxsteps+1, xarr.shape[1]), dtype=float)
 
     for i in range(maxsteps):
 
@@ -140,8 +140,8 @@ class LARS():
             response
         """
 
-        xarr = np.array(x, dtype=np.float, copy=True)
-        yarr = np.array(y, dtype=np.float, copy=True)
+        xarr = np.array(x, dtype=float, copy=True)
+        yarr = np.array(y, dtype=float, copy=True)
 
         if xarr.ndim != 2:
             raise ValueError("x must be a 2d array_like object")
@@ -183,7 +183,7 @@ class LARS():
         if not self._beta or not self._beta0:
             raise ValueError('no mode computed; run learn() first')
 
-        tarr = np.asarray(t, dtype=np.float)
+        tarr = np.asarray(t, dtype=float)
 
         if tarr.ndim > 2 or tarr.ndim < 1:
             raise ValueError("t must be an 1d or a 2d array_like object")
--- a/mlpy/lcs/lcs.pyx
+++ b/mlpy/lcs/lcs.pyx
@@ -68,8 +68,8 @@ def lcs_std(x, y):
     cdef Path p
     cdef int length
 
-    x_arr = np.ascontiguousarray(x, dtype=np.int)
-    y_arr = np.ascontiguousarray(y, dtype=np.int)
+    x_arr = np.ascontiguousarray(x, dtype=int)
+    y_arr = np.ascontiguousarray(y, dtype=int)
 
     b = <char **> malloc ((x_arr.shape[0]+1) * sizeof(char *))
     for i in range(x_arr.shape[0]+1):
@@ -84,8 +84,8 @@ def lcs_std(x, y):
         free (b[i])
     free(b)
 
-    px_arr = np.empty(p.k, dtype=np.int)
-    py_arr = np.empty(p.k, dtype=np.int)
+    px_arr = np.empty(p.k, dtype=int)
+    py_arr = np.empty(p.k, dtype=int)
     
     for i in range(p.k):
          px_arr[i] = p.px[i]
@@ -132,8 +132,8 @@ def lcs_real(x, y, eps, delta):
     cdef Path p
     cdef int length
 
-    x_arr = np.ascontiguousarray(x, dtype=np.float)
-    y_arr = np.ascontiguousarray(y, dtype=np.float)
+    x_arr = np.ascontiguousarray(x, dtype=float)
+    y_arr = np.ascontiguousarray(y, dtype=float)
 
     b = <char **> malloc ((x_arr.shape[0]+1) * sizeof(char *))
     for i in range(x_arr.shape[0]+1):
@@ -149,8 +149,8 @@ def lcs_real(x, y, eps, delta):
         free (b[i])
     free(b)
 
-    px_arr = np.empty(p.k, dtype=np.int)
-    py_arr = np.empty(p.k, dtype=np.int)
+    px_arr = np.empty(p.k, dtype=int)
+    py_arr = np.empty(p.k, dtype=int)
     
     for i in range(p.k):
          px_arr[i] = p.px[i]
--- a/mlpy/liblinear/liblinear.pyx
+++ b/mlpy/liblinear/liblinear.pyx
@@ -221,7 +221,7 @@ cdef class LibLinear:
             p = predict(self.model, test_node)
             free(test_node)
         else:
-            p = np.empty(tarr.shape[0], dtype=np.int)
+            p = np.empty(tarr.shape[0], dtype=int)
             for i in range(tarr.shape[0]):
                 test_node = array1d_to_node(tarr[i])
                 p[i] = predict(self.model, test_node)
@@ -267,14 +267,14 @@ cdef class LibLinear:
         dec_values = <double *> malloc (n * sizeof(double))
 
         if tarr.ndim == 1:
-            dec_values_arr = np.empty(n, dtype=np.float)
+            dec_values_arr = np.empty(n, dtype=float)
             test_node = array1d_to_node(tarr)
             p = predict_values(self.model, test_node, dec_values)
             free(test_node)
             for j in range(n):
                 dec_values_arr[j] = dec_values[j]
         else:
-            dec_values_arr = np.empty((tarr.shape[0], n), dtype=np.float)
+            dec_values_arr = np.empty((tarr.shape[0], n), dtype=float)
             for i in range(tarr.shape[0]):
                 test_node = array1d_to_node(tarr[i])
                 p = predict_values(self.model, test_node, dec_values)
@@ -316,7 +316,7 @@ cdef class LibLinear:
                                             sizeof(double))
 
         if tarr.ndim == 1:
-            prob_estimates_arr = np.empty(self.model.nr_class, dtype=np.float)
+            prob_estimates_arr = np.empty(self.model.nr_class, dtype=float)
             test_node = array1d_to_node(tarr)
             p = predict_probability(self.model, test_node, prob_estimates)
             free(test_node)
@@ -324,7 +324,7 @@ cdef class LibLinear:
                 prob_estimates_arr[j] = prob_estimates[j]
         else:
             prob_estimates_arr = np.empty((tarr.shape[0], self.model.nr_class), 
-                                      dtype=np.float)
+                                      dtype=float)
             for i in range(tarr.shape[0]):
                 test_node = array1d_to_node(tarr[i])
                 p = predict_probability(self.model, test_node, prob_estimates)
--- a/mlpy/libml/libml.pyx
+++ b/mlpy/libml/libml.pyx
@@ -63,8 +63,8 @@ cdef class KNN:
         cdef double **xpp
         cdef int i
 
-        xarr = np.ascontiguousarray(x, dtype=np.float)
-        yarr = np.ascontiguousarray(y, dtype=np.int)
+        xarr = np.ascontiguousarray(x, dtype=float)
+        yarr = np.ascontiguousarray(y, dtype=int)
         
         if self.k > xarr.shape[0]:
             raise ValueError("k must be smaller than number of samples")
@@ -126,7 +126,7 @@ cdef class KNN:
         if self.nn.x is NULL:
             raise ValueError("no model computed")
         
-        tarr = np.ascontiguousarray(t, dtype=np.float)
+        tarr = np.ascontiguousarray(t, dtype=float)
         if tarr.ndim > 2:
             raise ValueError("t must be an 1d or a 2d array_like object")
 
@@ -149,7 +149,7 @@ cdef class KNN:
                     ret = self.classes[p-1]
 
         else:
-            ret = np.empty(tarr.shape[0], dtype=np.int)
+            ret = np.empty(tarr.shape[0], dtype=int)
             for i in range(tarr.shape[0]):
                 tiarr = tarr[i]
                 tdata = <double *> tiarr.data
@@ -184,7 +184,7 @@ cdef class KNN:
         if self.nn.x is NULL:
             raise ValueError("no model computed")
         
-        ret = np.empty(self.nn.nclasses, dtype=np.int)
+        ret = np.empty(self.nn.nclasses, dtype=int)
         for i in range(self.nn.nclasses):
             ret[i] = self.classes[i]
 
@@ -257,8 +257,8 @@ cdef class ClassTree:
         cdef double **xpp
         cdef int i
 
-        xarr = np.ascontiguousarray(x, dtype=np.float)
-        yarr = np.ascontiguousarray(y, dtype=np.int)
+        xarr = np.ascontiguousarray(x, dtype=float)
+        yarr = np.ascontiguousarray(y, dtype=int)
        
         yu = np.unique(yarr)
         if yu.shape[0] <= 1:
@@ -317,7 +317,7 @@ cdef class ClassTree:
         if self.tree.x is NULL:
             raise ValueError("no model computed")
         
-        tarr = np.ascontiguousarray(t, dtype=np.float)
+        tarr = np.ascontiguousarray(t, dtype=float)
         if tarr.ndim > 2:
             raise ValueError("t must be an 1d or a 2d array_like object")
 
@@ -340,7 +340,7 @@ cdef class ClassTree:
                     ret = self.classes[p-1]
 
         else:
-            ret = np.empty(tarr.shape[0], dtype=np.int)
+            ret = np.empty(tarr.shape[0], dtype=int)
             for i in range(tarr.shape[0]):
                 tiarr = tarr[i]
                 tdata = <double *> tiarr.data
@@ -375,7 +375,7 @@ cdef class ClassTree:
         if self.tree.x is NULL:
             raise ValueError("no model computed")
         
-        ret = np.empty(self.tree.nclasses, dtype=np.int)
+        ret = np.empty(self.tree.nclasses, dtype=int)
         for i in range(self.tree.nclasses):
             ret[i] = self.classes[i]
 
@@ -452,8 +452,8 @@ cdef class MaximumLikelihoodC:
         cdef double **xpp
         cdef int i
 
-        xarr = np.ascontiguousarray(x, dtype=np.float)
-        yarr = np.ascontiguousarray(y, dtype=np.int)
+        xarr = np.ascontiguousarray(x, dtype=float)
+        yarr = np.ascontiguousarray(y, dtype=int)
 
         yu = np.unique(yarr)
         if yu.shape[0] <= 1:
@@ -512,7 +512,7 @@ cdef class MaximumLikelihoodC:
         if self.ml.mean is NULL:
             raise ValueError("no model computed")
         
-        tarr = np.ascontiguousarray(t, dtype=np.float)
+        tarr = np.ascontiguousarray(t, dtype=float)
         if tarr.ndim > 2:
             raise ValueError("t must be an 1d or a 2d array_like object")
 
@@ -535,7 +535,7 @@ cdef class MaximumLikelihoodC:
                     ret = self.classes[p-1]
 
         else:
-            ret = np.empty(tarr.shape[0], dtype=np.int)
+            ret = np.empty(tarr.shape[0], dtype=int)
             for i in range(tarr.shape[0]):
                 tiarr = tarr[i]
                 tdata = <double *> tiarr.data
@@ -570,7 +570,7 @@ cdef class MaximumLikelihoodC:
         if self.ml.mean is NULL:
             raise ValueError("no model computed")
         
-        ret = np.empty(self.ml.nclasses, dtype=np.int)
+        ret = np.empty(self.ml.nclasses, dtype=int)
         for i in range(self.ml.nclasses):
             ret[i] = self.classes[i]
 
--- a/mlpy/libsvm/libsvm.pyx
+++ b/mlpy/libsvm/libsvm.pyx
@@ -315,14 +315,14 @@ cdef class LibSvm:
         dec_values = <double *> malloc (n * sizeof(double))
         
         if tarr.ndim == 1:
-            dec_values_arr = np.empty(n, dtype=np.float)
+            dec_values_arr = np.empty(n, dtype=float)
             test_node = array1d_to_node(tarr)
             p = svm_predict_values(self.model, test_node, dec_values)
             free(test_node)
             for j in range(n):
                 dec_values_arr[j] = dec_values[j]
         else:
-            dec_values_arr = np.empty((tarr.shape[0], n), dtype=np.float)
+            dec_values_arr = np.empty((tarr.shape[0], n), dtype=float)
             for i in range(tarr.shape[0]):
                 test_node = array1d_to_node(tarr[i])
                 p = svm_predict_values(self.model, test_node, dec_values)
@@ -375,7 +375,7 @@ cdef class LibSvm:
                                            sizeof(double))
         
         if tarr.ndim == 1:
-            prob_estimates_arr = np.empty(self.model.nr_class, dtype=np.float)
+            prob_estimates_arr = np.empty(self.model.nr_class, dtype=float)
             test_node = array1d_to_node(tarr)
             p = svm_predict_probability(self.model, test_node,
                 prob_estimates)
@@ -384,7 +384,7 @@ cdef class LibSvm:
                 prob_estimates_arr[j] = prob_estimates[j]
         else:
             prob_estimates_arr = np.empty((tarr.shape[0], self.model.nr_class), 
-                                          dtype=np.float)
+                                          dtype=float)
             for i in range(tarr.shape[0]):
                 test_node = array1d_to_node(tarr[i])
                 p = svm_predict_probability(self.model, test_node,
--- a/mlpy/metrics.py
+++ b/mlpy/metrics.py
@@ -74,8 +74,8 @@ def error(t, p):
       error : float, in range [0.0, 1.0]
     """
   
-    tarr = np.asarray(t, dtype=np.int)
-    parr = np.asarray(p, dtype=np.int)
+    tarr = np.asarray(t, dtype=int)
+    parr = np.asarray(p, dtype=int)
 
     if tarr.shape[0] != parr.shape[0]:
         raise ValueError("t, p: shape mismatch")
@@ -97,8 +97,8 @@ def accuracy(t, p):
       accuracy : float, in range [0.0, 1.0]
     """
   
-    tarr = np.asarray(t, dtype=np.int)
-    parr = np.asarray(p, dtype=np.int)
+    tarr = np.asarray(t, dtype=int)
+    parr = np.asarray(p, dtype=int)
 
     if tarr.shape[0] != parr.shape[0]:
         raise ValueError("t, p: shape mismatch")
@@ -124,8 +124,8 @@ def error_p(t, p):
       errorp : float, in range [0.0, 1.0]
     """
 
-    tarr = np.asarray(t, dtype=np.int)
-    parr = np.asarray(p, dtype=np.int)
+    tarr = np.asarray(t, dtype=int)
+    parr = np.asarray(p, dtype=int)
 
     if tarr.shape[0] != parr.shape[0]:
         raise ValueError("t, p: shape mismatch")
@@ -161,8 +161,8 @@ def error_n(t, p):
       errorp : float, in range [0.0, 1.0]
     """
 
-    tarr = np.asarray(t, dtype=np.int)
-    parr = np.asarray(p, dtype=np.int)
+    tarr = np.asarray(t, dtype=int)
+    parr = np.asarray(p, dtype=int)
 
     if tarr.shape[0] != parr.shape[0]:
         raise ValueError("t, p: shape mismatch")
@@ -198,8 +198,8 @@ def sensitivity(t, p):
       sensitivity : float, in range [0.0, 1.0]
     """
 
-    tarr = np.asarray(t, dtype=np.int)
-    parr = np.asarray(p, dtype=np.int)
+    tarr = np.asarray(t, dtype=int)
+    parr = np.asarray(p, dtype=int)
 
     if tarr.shape[0] != parr.shape[0]:
         raise ValueError("t, p: shape mismatch")
@@ -235,8 +235,8 @@ def specificity(t, p):
       sensitivity : float, in range [0.0, 1.0]
     """
 
-    tarr = np.asarray(t, dtype=np.int)
-    parr = np.asarray(p, dtype=np.int)
+    tarr = np.asarray(t, dtype=int)
+    parr = np.asarray(p, dtype=int)
 
     if tarr.shape[0] != parr.shape[0]:
         raise ValueError("t, p: shape mismatch")
@@ -272,8 +272,8 @@ def ppv(t, p):
       PPV : float, in range [0.0, 1.0]
     """
 
-    tarr = np.asarray(t, dtype=np.int)
-    parr = np.asarray(p, dtype=np.int)
+    tarr = np.asarray(t, dtype=int)
+    parr = np.asarray(p, dtype=int)
 
     if tarr.shape[0] != parr.shape[0]:
         raise ValueError("t, p: shape mismatch")
@@ -309,8 +309,8 @@ def npv(t, p):
       NPV : float, in range [0.0, 1.0]
     """
 
-    tarr = np.asarray(t, dtype=np.int)
-    parr = np.asarray(p, dtype=np.int)
+    tarr = np.asarray(t, dtype=int)
+    parr = np.asarray(p, dtype=int)
 
     if tarr.shape[0] != parr.shape[0]:
         raise ValueError("t, p: shape mismatch")
@@ -353,8 +353,8 @@ def mcc(t, p):
       MCC : float, in range [-1.0, 1.0]
     """
 
-    tarr = np.asarray(t, dtype=np.int)
-    parr = np.asarray(p, dtype=np.int)
+    tarr = np.asarray(t, dtype=int)
+    parr = np.asarray(p, dtype=int)
 
     if tarr.shape[0] != parr.shape[0]:
         raise ValueError("t, p: shape mismatch")
@@ -372,7 +372,7 @@ def mcc(t, p):
     if den == 0.0:
         den = 1.0
 
-    num = np.float((tp*tn)-(fp*fn))
+    num = float((tp*tn)-(fp*fn))
     return num / den
 
 
@@ -391,8 +391,8 @@ def auc_wmw(t, p):
       AUC : float, in range [0.0, 1.0]
     """
 
-    tarr = np.asarray(t, dtype=np.int)
-    parr = np.asarray(p, dtype=np.float)
+    tarr = np.asarray(t, dtype=int)
+    parr = np.asarray(p, dtype=float)
 
     if tarr.shape[0] != parr.shape[0]:
         raise ValueError("t, p: shape mismatch")
@@ -426,8 +426,8 @@ def mse(t, p):
       MSE : float
     """
 
-    tarr = np.asarray(t, dtype=np.float)
-    parr = np.asarray(p, dtype=np.float)
+    tarr = np.asarray(t, dtype=float)
+    parr = np.asarray(p, dtype=float)
 
     if tarr.shape[0] != parr.shape[0]:
         raise ValueError("t, p: shape mismatch")
@@ -453,8 +453,8 @@ def r2(t, p):
       R^2 : float
     """
 
-    tarr = np.asarray(t, dtype=np.float)
-    parr = np.asarray(p, dtype=np.float)
+    tarr = np.asarray(t, dtype=float)
+    parr = np.asarray(p, dtype=float)
 
     if tarr.shape[0] != parr.shape[0]:
         raise ValueError("t, p: shape mismatch")
@@ -480,8 +480,8 @@ def r2_corr(t, p):
       R^2 : float
     """
 
-    tarr = np.asarray(t, dtype=np.float)
-    parr = np.asarray(p, dtype=np.float)
+    tarr = np.asarray(t, dtype=float)
+    parr = np.asarray(p, dtype=float)
 
     if tarr.shape[0] != parr.shape[0]:
         raise ValueError("t, p: shape mismatch")
--- a/mlpy/ols.py
+++ b/mlpy/ols.py
@@ -77,8 +77,8 @@ class OLS:
               target values
         """
 
-        xarr = np.array(x, dtype=np.float, copy=True)
-        yarr = np.asarray(y, dtype=np.float)
+        xarr = np.array(x, dtype=float, copy=True)
+        yarr = np.asarray(y, dtype=float)
 
         if xarr.ndim != 2:
             raise ValueError("x must be a 2d array_like object")
@@ -90,7 +90,7 @@ class OLS:
             raise ValueError("x, y shape mismatch")
 
         xarr = np.concatenate((np.ones((xarr.shape[0], 1),
-                   dtype=np.float), xarr), axis=1)
+                   dtype=float), xarr), axis=1)
 
         beta, self._rank = ols_base(xarr, yarr, self._tol)
         self._beta = beta[1:]
@@ -110,7 +110,7 @@ class OLS:
         if not self._beta or not self._beta0:
             raise ValueError('no mode computed; run learn() first')
 
-        tarr = np.asarray(t, dtype=np.float)
+        tarr = np.asarray(t, dtype=float)
 
         if tarr.ndim > 2 or tarr.ndim < 1:
             raise ValueError("t must be an 1d or a 2d array_like object")
--- a/mlpy/parzen.py
+++ b/mlpy/parzen.py
@@ -52,8 +52,8 @@ class Parzen:
               target values
         """
 
-        K_arr = np.asarray(K, dtype=np.float)
-        y_arr = np.asarray(y, dtype=np.int)
+        K_arr = np.asarray(K, dtype=float)
+        y_arr = np.asarray(y, dtype=int)
 
         if K_arr.ndim != 2:
             raise ValueError("K must be a 2d array_like object")
@@ -105,7 +105,7 @@ class Parzen:
         if self._alpha is None:
             raise ValueError("no model computed; run learn()")
 
-        Kt_arr = np.asarray(Kt, dtype=np.float)
+        Kt_arr = np.asarray(Kt, dtype=float)
         if self._kernel is not None:
             Kt_arr = self._kernel.kernel(Kt_arr, self._x)
 
@@ -115,7 +115,7 @@ class Parzen:
             raise ValueError("Kt, alpha: shape mismatch")
 
         return np.where(s==-1, self._labels[0], self._labels[1]) \
-            .astype(np.int)
+            .astype(int)
           
     def alpha(self):
         """Return alpha.
--- a/mlpy/perceptron.py
+++ b/mlpy/perceptron.py
@@ -57,8 +57,8 @@ class Perceptron:
               target values (N)
         """
 
-        xarr = np.asarray(x, dtype=np.float)
-        yarr = np.asarray(y, dtype=np.int)
+        xarr = np.asarray(x, dtype=float)
+        yarr = np.asarray(y, dtype=int)
         
         if xarr.ndim != 2:
             raise ValueError("x must be a 2d array_like object")
@@ -77,7 +77,7 @@ class Perceptron:
         
         ynew = np.where(yarr == self._labels[0], 0, 1)
         
-        self._w = np.zeros(xarr.shape[1], dtype=np.float)
+        self._w = np.zeros(xarr.shape[1], dtype=float)
         self._bias = 0.0
         n = ynew.shape[0]
         
@@ -110,7 +110,7 @@ class Perceptron:
         if self._w is None:
             raise ValueError("no model computed")
 
-        tarr = np.asarray(t, dtype=np.float)
+        tarr = np.asarray(t, dtype=float)
         if tarr.ndim > 2:
             raise ValueError("t must be an 1d or a 2d array_like object")
         
@@ -120,7 +120,7 @@ class Perceptron:
             raise ValueError("t, model: shape mismatch")
 
         return np.where(tmp>0, self._labels[1], self._labels[0]) \
-            .astype(np.int)
+            .astype(int)
         
     def w(self):
         """Returns the coefficients.
--- a/mlpy/pls.py
+++ b/mlpy/pls.py
@@ -51,8 +51,8 @@ class PLS:
               target values
         """
 
-        xarr = np.array(x, dtype=np.float, copy=True)
-        yarr = np.array(y, dtype=np.float, copy=True)
+        xarr = np.array(x, dtype=float, copy=True)
+        yarr = np.array(y, dtype=float, copy=True)
 
         if xarr.ndim != 2:
             raise ValueError("x must be a 2d array_like object")
@@ -71,9 +71,9 @@ class PLS:
         xarr = xarr - self._xmean
         yarr = yarr - self._beta0
         
-        u = np.empty((xarr.shape[1], self._iters), dtype=np.float)
-        c = np.empty((yarr.shape[1], self._iters), dtype=np.float)
-        p = np.empty((xarr.shape[1], self._iters), dtype=np.float)
+        u = np.empty((xarr.shape[1], self._iters), dtype=float)
+        c = np.empty((yarr.shape[1], self._iters), dtype=float)
+        p = np.empty((xarr.shape[1], self._iters), dtype=float)
         for i in range(self._iters):
             YX = np.dot(yarr.T, xarr)
             u[:, i] = YX[0] / np.linalg.norm(YX[0])
@@ -111,7 +111,7 @@ class PLS:
         if self._beta is None:
             raise ValueError("no model computed; run learn()")
 
-        tarr = np.asarray(t, dtype=np.float)
+        tarr = np.asarray(t, dtype=float)
         
         if tarr.ndim > 2 or tarr.ndim < 1:
             raise ValueError("t must be an 1d or a 2d array_like object")
--- a/mlpy/rfe.py
+++ b/mlpy/rfe.py
@@ -23,7 +23,7 @@ from da import KFDAC
 
 # used in rfe_kfda
 def rayleigh(x, kernel, lmb, alpha, idx1, idx2):
-    R = np.empty(x.shape[1], dtype=np.float)
+    R = np.empty(x.shape[1], dtype=float)
     idx = np.ones(x.shape[1], dtype=bool)
     for i in range(x.shape[1]):
         idx[i] = False
@@ -77,8 +77,8 @@ def rfe_kfda(x, y, p, lmb, kernel):
     if (p < 0.0) or (p > 1.0):
         raise ValueError("parameter p must be in [0.0, 1.0]")
 
-    xarr = np.asarray(x, dtype=np.float)
-    yarr = np.asarray(y, dtype=np.int)
+    xarr = np.asarray(x, dtype=float)
+    yarr = np.asarray(y, dtype=int)
 
     if xarr.ndim != 2:
         raise ValueError("x must be a 2d array_like object")
@@ -100,7 +100,7 @@ def rfe_kfda(x, y, p, lmb, kernel):
     idx2 = np.where(yarr==labels[1])[0]
 
     kfda = KFDAC(lmb=lmb, kernel=kernel)
-    idxglobal = np.arange(xarr.shape[1], dtype=np.int)
+    idxglobal = np.arange(xarr.shape[1], dtype=int)
     ranking = []
 
     while True:
@@ -158,8 +158,8 @@ def rfe_w2(x, y, p, classifier):
     if not (hasattr(classifier, 'learn') and hasattr(classifier, 'w')):
         raise ValueError("parameter classifier must have learn() and w() methods")
 
-    xarr = np.asarray(x, dtype=np.float)
-    yarr = np.asarray(y, dtype=np.int)
+    xarr = np.asarray(x, dtype=float)
+    yarr = np.asarray(y, dtype=int)
 
     if xarr.ndim != 2:
         raise ValueError("x must be a 2d array_like object")
@@ -174,7 +174,7 @@ def rfe_w2(x, y, p, classifier):
     if labels.shape[0] != 2:
         raise ValueError("number of classes must be = 2")
     
-    idxglobal = np.arange(xarr.shape[1], dtype=np.int)
+    idxglobal = np.arange(xarr.shape[1], dtype=int)
     ranking = []
 
     while True:
--- a/mlpy/ridge.py
+++ b/mlpy/ridge.py
@@ -110,8 +110,8 @@ class Ridge:
               target values
         """
 
-        xarr = np.array(x, dtype=np.float, copy=True)
-        yarr = np.asarray(y, dtype=np.float)
+        xarr = np.array(x, dtype=float, copy=True)
+        yarr = np.asarray(y, dtype=float)
 
         if xarr.ndim != 2:
             raise ValueError("x must be a 2d array_like object")
@@ -143,7 +143,7 @@ class Ridge:
         if self._beta is None:
             raise ValueError("no model computed; run learn()")
 
-        tarr = np.asarray(t, dtype=np.float)
+        tarr = np.asarray(t, dtype=float)
 
         if tarr.ndim > 2 or tarr.ndim < 1:
             raise ValueError("t must be an 1d or a 2d array_like object")
@@ -209,8 +209,8 @@ class KernelRidge:
               target values
         """
 
-        K_arr = np.asarray(K, dtype=np.float)
-        y_arr = np.asarray(y, dtype=np.float)
+        K_arr = np.asarray(K, dtype=float)
+        y_arr = np.asarray(y, dtype=float)
 
         if K_arr.ndim != 2:
             raise ValueError("K must be a 2d array_like object")
@@ -238,7 +238,7 @@ class KernelRidge:
         # as in G. C. Cawley, N. L. C. Nicola and O. Chapelle.
         # Estimating Predictive Variances with Kernel Ridge 
         # Regression.
-        A = np.empty((n+1, n+1), dtype=np.float)
+        A = np.empty((n+1, n+1), dtype=float)
         A[:n, :n] = K_arr + self._lmb * np.eye(n)
         A[n, :n], A[:n, n], A[n, n] = 1., 1., 0.
         g = np.linalg.solve(A, np.append(y_arr, 0))
@@ -260,7 +260,7 @@ class KernelRidge:
         if self._alpha is None:
             raise ValueError("no model computed; run learn()")
 
-        Kt_arr = np.asarray(Kt, dtype=np.float)
+        Kt_arr = np.asarray(Kt, dtype=float)
         if self._kernel is not None:
             Kt_arr = self._kernel.kernel(Kt_arr, self._x)
         
--- a/mlpy/stats.py
+++ b/mlpy/stats.py
@@ -45,7 +45,7 @@ def bootstrap_ci(x, B=1000, alpha=0.05,
 
     np.random.seed(seed)
     
-    bmean = np.empty(B, dtype=np.float)
+    bmean = np.empty(B, dtype=float)
     for b in range(B):
         idx = np.random.random_integers(0, x_arr.shape[0]-1, x_arr.shape[0])
         bmean[b] = np.mean(x_arr[idx])
@@ -65,7 +65,7 @@ def quantile(x, f):
     percentile `f` should have the value 0.75.
     """
 
-    xarr = np.array(x, dtype=np.float, copy=True)
+    xarr = np.array(x, dtype=float, copy=True)
     xarr = np.ravel(x)
     xarr.sort()
     
--- a/mlpy/wavelet/padding.py
+++ b/mlpy/wavelet/padding.py
@@ -70,7 +70,7 @@ def pad(x, method='reflection'):
         right_x = np.zeros(rdiff, dtype=x_arr.dtype)
         
     xp = np.concatenate((left_x, x_arr, right_x))
-    orig = np.ones(x_arr.shape[0] + diff, dtype=np.bool)
+    orig = np.ones(x_arr.shape[0] + diff, dtype=bool)
     orig[:ldiff] = False
     orig[-rdiff:] = False
    
--- a/mlpy/fastcluster/fastcluster.py
+++ b/mlpy/fastcluster/fastcluster.py
@@ -22,7 +22,7 @@ __all__ = ['single', 'complete', 'averag
 __version_info__ = ('1', '1', '2')
 __version__ = '.'.join(__version_info__)
 
-from numpy import double, empty, array, ndarray, var, cov, dot, bool, expand_dims, ceil, sqrt
+from numpy import double, empty, array, ndarray, var, cov, dot, expand_dims, ceil, sqrt
 from numpy.linalg import inv
 from scipy.spatial.distance import pdist
 
