diff --git a/Cargo.toml b/Cargo.toml
index e555d93..a0f014d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -43,11 +43,11 @@ num-complex = { version = "0.4.6", features = ["rand"] }
 plotly = { version = "0.10.0", features = ["plotly_ndarray"] }
 polars = { version = "0.43.1", features = ["lazy"] }
 prettytable-rs = "0.10.0"
-# pyo3 = { version = "0.22.3", features = ["extension-module", "abi3-py38"] }
 quadrature = "0.1.2"
 rand = "0.8.5"
 rand_distr = "0.4.3"
 rayon = "1.10.0"
+roots = "0.0.8"
 sci-rs = "0.4.1"
 scilib = "1.0.0"
 statrs = "0.17.1"
diff --git a/pyproject.toml b/pyproject.toml
deleted file mode 100644
index 4a2bfe1..0000000
--- a/pyproject.toml
+++ /dev/null
@@ -1,16 +0,0 @@
-[build-system]
-requires = ["maturin>=1.7,<2.0"]
-build-backend = "maturin"
-
-[project]
-name = "stochastic-py"
-requires-python = ">=3.8"
-classifiers = [
-    "Programming Language :: Rust",
-    "Programming Language :: Python :: Implementation :: CPython",
-    "Programming Language :: Python :: Implementation :: PyPy",
-]
-dynamic = ["version"]
-
-[tool.maturin]
-features = ["pyo3/extension-module", "pyo3/abi3-py38"]
\ No newline at end of file
diff --git a/src/stats/copulas/bivariate.rs b/src/stats/copulas/bivariate.rs
index c7ec9be..fc728fd 100644
--- a/src/stats/copulas/bivariate.rs
+++ b/src/stats/copulas/bivariate.rs
@@ -1,9 +1,11 @@
+use core::f64;
 use std::{cmp::Ordering, error::Error};
 
 use ndarray::{stack, Array1, Array2, Axis};
 use ndarray_rand::RandomExt;
 use ndarray_stats::QuantileExt;
 use rand_distr::Uniform;
+use roots::{find_root_brent, SimpleConvergency};
 
 pub mod clayton;
 pub mod frank;
@@ -18,6 +20,8 @@ pub enum CopulaType {
   Independence,
 }
 
+const EPSILON: f64 = 1e-12;
+
 pub trait Bivariate {
   fn r#type(&self) -> CopulaType;
 
@@ -121,41 +125,65 @@ pub trait Bivariate {
     Ok(())
   }
 
-  fn probability_density(&self, X: &Array2<f64>) -> Result<Array1<f64>, Box<dyn Error>>;
-
-  fn pdf(&self, X: &Array2<f64>) -> Result<Array1<f64>, Box<dyn Error>> {
-    self.probability_density(X)
-  }
+  fn pdf(&self, X: &Array2<f64>) -> Result<Array1<f64>, Box<dyn Error>>;
 
-  fn log_probability_density(&self, X: &Array2<f64>) -> Result<Array1<f64>, Box<dyn Error>> {
-    let pdf = self.probability_density(X)?;
+  fn log_pdf(&self, X: &Array2<f64>) -> Result<Array1<f64>, Box<dyn Error>> {
+    let pdf = self.pdf(X)?;
     let log_pdf = pdf.mapv(|val| (val + 1e-32).ln());
     Ok(log_pdf)
   }
 
-  fn cumulative_distribution(&self, X: &Array2<f64>) -> Result<Array1<f64>, Box<dyn Error>>;
-
-  fn cdf(&self, X: &Array2<f64>) -> Result<Array1<f64>, Box<dyn Error>> {
-    self.cumulative_distribution(X)
-  }
+  fn cdf(&self, X: &Array2<f64>) -> Result<Array1<f64>, Box<dyn Error>>;
 
   fn percent_point(&self, y: &Array1<f64>, V: &Array1<f64>) -> Result<Array1<f64>, Box<dyn Error>> {
-    todo!()
+    let n = y.len();
+    let mut results = Array1::zeros(n);
+
+    for i in 0..n {
+      let y_i = y[i];
+      let v_i = V[i];
+
+      let f = |u| self.partial_derivative_scalar(u, v_i).unwrap() - y_i;
+      let mut convergency = SimpleConvergency {
+        eps: f64::EPSILON,
+        max_iter: 50,
+      };
+      let min = find_root_brent(f64::EPSILON, 1.0, f, &mut convergency);
+      results[i] = min.unwrap_or(f64::EPSILON);
+    }
+
+    Ok(results)
   }
 
   fn ppf(&self, y: &Array1<f64>, V: &Array1<f64>) -> Result<Array1<f64>, Box<dyn Error>> {
     self.percent_point(y, V)
   }
 
-  fn partial_derivative(&self, X: &Array2<f64>) -> Result<Array1<f64>, Box<dyn Error>> {
-    todo!()
+  fn partial_derivative(&self, X: &Array2<f64>) -> Result<Array1<f64>, Box<dyn std::error::Error>> {
+    let n = X.nrows();
+    let mut X_prime = X.clone();
+    let mut delta = Array1::zeros(n);
+    for i in 0..n {
+      delta[i] = if X[[i, 1]] > 0.5 { -0.0001 } else { 0.0001 };
+      X_prime[[i, 1]] = X[[i, 1]] + delta[i];
+    }
+
+    let f = self.cdf(X).unwrap();
+    let f_prime = self.cdf(&X_prime).unwrap();
+
+    let mut deriv = Array1::zeros(n);
+    for i in 0..n {
+      deriv[i] = (f_prime[i] - f[i]) / delta[i];
+    }
+
+    Ok(deriv)
   }
 
-  fn partial_derivative_scalar(
-    &self,
-    u: Array1<f64>,
-    v: Array1<f64>,
-  ) -> Result<f64, Box<dyn Error>> {
-    todo!()
+  fn partial_derivative_scalar(&self, U: f64, V: f64) -> Result<f64, Box<dyn Error>> {
+    self.check_fit()?;
+    let X = stack![Axis(1), Array1::from(vec![U]), Array1::from(vec![V])];
+    let out = self.partial_derivative(&X);
+
+    Ok(out?.get(0).unwrap().clone())
   }
 }
diff --git a/src/stats/copulas/bivariate/clayton.rs b/src/stats/copulas/bivariate/clayton.rs
index 83c65f7..7b32d4b 100644
--- a/src/stats/copulas/bivariate/clayton.rs
+++ b/src/stats/copulas/bivariate/clayton.rs
@@ -55,21 +55,54 @@ impl Bivariate for Clayton {
   }
 
   fn generator(&self, t: &Array1<f64>) -> Result<Array1<f64>, Box<dyn std::error::Error>> {
-    todo!()
+    self.check_fit()?;
+
+    let theta = self.theta.unwrap();
+    Ok((1.0 / theta) * (t.powf(-theta) - 1.0))
   }
 
-  fn probability_density(
-    &self,
-    X: &Array2<f64>,
-  ) -> Result<Array1<f64>, Box<dyn std::error::Error>> {
-    todo!()
+  fn pdf(&self, X: &Array2<f64>) -> Result<Array1<f64>, Box<dyn std::error::Error>> {
+    self.check_fit()?;
+
+    let U = X.column(0);
+    let V = X.column(1);
+
+    let theta = self.theta.unwrap();
+    let a = (theta + 1.0) * (&U * &V).powf(-theta - 1.0);
+    let b = U.powf(-theta) + V.powf(-theta) - 1.0;
+    let c = -(2.0 * theta + 1.0) / theta;
+    Ok(a * b.powf(c))
   }
 
-  fn cumulative_distribution(
-    &self,
-    X: &Array2<f64>,
-  ) -> Result<Array1<f64>, Box<dyn std::error::Error>> {
-    todo!()
+  fn cdf(&self, X: &Array2<f64>) -> Result<Array1<f64>, Box<dyn std::error::Error>> {
+    self.check_fit()?;
+
+    let U = X.column(0);
+    let V = X.column(1);
+
+    let V_all_zeros = V.iter().all(|&v| v == 0.0);
+    let U_all_zeros = U.iter().all(|&u| u == 0.0);
+
+    if V_all_zeros || U_all_zeros {
+      let shape = V.shape();
+      return Ok(Array1::zeros(shape[0]));
+    }
+
+    let theta = self.theta.unwrap();
+    let mut cdfs = Array1::<f64>::zeros(U.len());
+
+    for i in 0..U.len() {
+      let u = U[i];
+      let v = V[i];
+
+      if u > 0.0 && v > 0.0 {
+        cdfs[i] = (u.powf(-theta) + v.powf(-theta) - 1.0).powf(-1.0 / theta);
+      } else {
+        cdfs[i] = 0.0;
+      }
+    }
+
+    Ok(cdfs)
   }
 
   fn percent_point(
@@ -77,11 +110,42 @@ impl Bivariate for Clayton {
     y: &Array1<f64>,
     V: &Array1<f64>,
   ) -> Result<Array1<f64>, Box<dyn std::error::Error>> {
-    todo!()
+    self.check_fit()?;
+
+    let theta = self.theta.unwrap();
+
+    if theta == 0.0 {
+      return Ok(V.clone());
+    }
+
+    let a = y.powf(theta / (-1.0 - theta));
+    let b = V.powf(theta);
+
+    let b_all_zeros = b.iter().all(|&v| v == 0.0);
+
+    if b_all_zeros {
+      return Ok(Array1::ones(V.len()));
+    }
+
+    Ok(((a + &b - 1.0) / b).powf(-1.0 / theta))
   }
 
   fn partial_derivative(&self, X: &Array2<f64>) -> Result<Array1<f64>, Box<dyn std::error::Error>> {
-    todo!()
+    self.check_fit()?;
+
+    let U = X.column(0);
+    let V = X.column(1);
+
+    let theta = self.theta.unwrap();
+    let A = V.powf(-theta - 1.0);
+
+    if A.is_all_infinite() {
+      return Ok(Array1::zeros(V.len()));
+    }
+
+    let B = V.powf(-theta) + U.powf(-theta) - 1.0;
+    let h = B.powf((-1.0 - theta) / theta);
+    Ok(A * h)
   }
 
   fn compute_theta(&self) -> f64 {
@@ -94,3 +158,25 @@ impl Bivariate for Clayton {
     2.0 * tau / (1.0 - tau)
   }
 }
+
+#[cfg(test)]
+mod tests {
+  use ndarray::array;
+
+  use crate::stats::copulas::bivariate::Bivariate;
+
+  use super::Clayton;
+
+  #[test]
+  fn test_fit() {
+    let data = array![[0.5, 0.5], [0.5, 0.4], [0.5, 0.3], [0.2, 0.2], [0.5, 0.1]];
+    let mut clayton = Clayton::new();
+    clayton.fit(&data).unwrap();
+
+    let data1 = array![0.5, 0.4, 0.3, 0.2, 0.1];
+    let data2 = array![0.5, 0.4, 0.3, 0.2, 0.1];
+
+    let percentile_value = clayton.percent_point(&data1, &data2).unwrap();
+    println!("{:?}", percentile_value);
+  }
+}