Skip to content

Stub pyi file autogen using pyo3-stub-gen #257

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
351 changes: 299 additions & 52 deletions Cargo.lock

Large diffs are not rendered by default.

10 changes: 6 additions & 4 deletions python/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ readme = "README.md"

[lib]
name = "egobox"
crate-type = ["cdylib"]
crate-type = ["cdylib", "rlib"]

[features]
default = []
Expand All @@ -31,9 +31,11 @@ egobox-ego = { version = "0.28.1", path = "../crates/ego", features = [
"persistent",
] }

pyo3 = { version = "0.22", features = ["extension-module"] }
pyo3-log = "0.11"
numpy = "0.22.1"
pyo3 = "0.24"
pyo3-log = "0.12"
pyo3-stub-gen = { git = "https://github.com/zao111222333/pyo3-stub-gen.git", features = ["numpy"] }
# pyo3-stub-gen = { version = "0.8", features = ["numpy"] }
numpy = "0.24.0"

linfa.workspace = true
ndarray.workspace = true
Expand Down
277 changes: 106 additions & 171 deletions python/egobox/egobox.pyi

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions python/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ build-backend = "maturin"
requires = ["maturin>=1.0, <2.0"]

[tool.maturin]
python-source = "egobox"
module-name = "egobox"
features = ["pyo3/extension-module"]
# Optional usage of BLAS backend
# cargo-extra-args = "--features linfa/intel-mkl-static"
Expand Down
8 changes: 8 additions & 0 deletions python/src/bin/stub_gen.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
use pyo3_stub_gen::Result;

fn main() -> Result<()> {
env_logger::Builder::from_env(env_logger::Env::default().filter_or("RUST_LOG", "info")).init();
let stub = egobox::stub_info()?;
stub.generate()?;
Ok(())
}
35 changes: 20 additions & 15 deletions python/src/egor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ use ndarray::{concatenate, Array1, Array2, ArrayView2, Axis};
use numpy::{IntoPyArray, PyArray1, PyArray2, PyArrayMethods, PyReadonlyArray2, ToPyArray};
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
use pyo3_stub_gen::derive::{gen_stub_pyclass, gen_stub_pyfunction, gen_stub_pymethods};

/// Utility function converting `xlimits` float data list specifying bounds of x components
/// to x specified as a list of XType.Float types [egobox.XType]
Expand All @@ -29,17 +30,18 @@ use pyo3::prelude::*;
///
/// # Returns
/// xtypes: nx-size list of XSpec(XType(FLOAT), [lower_bound, upper_bounds]) where `nx` is the dimension of x
#[gen_stub_pyfunction]
#[pyfunction]
pub(crate) fn to_specs(py: Python, xlimits: Vec<Vec<f64>>) -> PyResult<PyObject> {
pub(crate) fn to_specs(py: Python, xlimits: Vec<Vec<f64>>) -> PyResult<pyo3::Bound<'_, PyAny>> {
if xlimits.is_empty() || xlimits[0].is_empty() {
let err = "Error: xspecs argument cannot be empty";
return Err(PyValueError::new_err(err.to_string()));
}
Ok(xlimits
xlimits
.iter()
.map(|xlimit| XSpec::new(XType::Float, xlimit.clone(), vec![]))
.collect::<Vec<XSpec>>()
.into_py(py))
.into_pyobject(py)
}

/// Optimizer constructor
Expand Down Expand Up @@ -163,6 +165,7 @@ pub(crate) fn to_specs(py: Python, xlimits: Vec<Vec<f64>>) -> PyResult<PyObject>
/// seed (int >= 0)
/// Random generator seed to allow computation reproducibility.
///
#[gen_stub_pyclass]
#[pyclass]
pub(crate) struct Egor {
pub xspecs: PyObject,
Expand Down Expand Up @@ -191,6 +194,7 @@ pub(crate) struct Egor {
pub seed: Option<u64>,
}

#[gen_stub_pyclass]
#[pyclass]
pub(crate) struct OptimResult {
#[pyo3(get)]
Expand All @@ -203,6 +207,7 @@ pub(crate) struct OptimResult {
y_doe: Py<PyArray2<f64>>,
}

#[gen_stub_pymethods]
#[pymethods]
impl Egor {
#[new]
Expand Down Expand Up @@ -336,7 +341,7 @@ impl Egor {
) -> PyResult<OptimResult> {
let obj = |x: &ArrayView2<f64>| -> Array2<f64> {
Python::with_gil(|py| {
let args = (x.to_owned().into_pyarray_bound(py),);
let args = (x.to_owned().into_pyarray(py),);
let res = fun.bind(py).call1(args).unwrap();
let pyarray = res.downcast_into::<PyArray2<f64>>().unwrap();
pyarray.to_owned_array()
Expand All @@ -350,12 +355,12 @@ impl Egor {
let cstr = |x: &[f64], g: Option<&mut [f64]>, _u: &mut InfillObjData<f64>| -> f64 {
Python::with_gil(|py| {
if let Some(g) = g {
let args = (Array1::from(x.to_vec()).into_pyarray_bound(py), true);
let args = (Array1::from(x.to_vec()).into_pyarray(py), true);
let grad = cstr.bind(py).call1(args).unwrap();
let grad = grad.downcast_into::<PyArray1<f64>>().unwrap().readonly();
g.copy_from_slice(grad.as_slice().unwrap())
}
let args = (Array1::from(x.to_vec()).into_pyarray_bound(py), false);
let args = (Array1::from(x.to_vec()).into_pyarray(py), false);
let res = cstr.bind(py).call1(args).unwrap().extract().unwrap();
res
})
Expand All @@ -378,10 +383,10 @@ impl Egor {
.run()
.expect("Egor should optimize the objective function")
});
let x_opt = res.x_opt.into_pyarray_bound(py).to_owned();
let y_opt = res.y_opt.into_pyarray_bound(py).to_owned();
let x_doe = res.x_doe.into_pyarray_bound(py).to_owned();
let y_doe = res.y_doe.into_pyarray_bound(py).to_owned();
let x_opt = res.x_opt.into_pyarray(py).to_owned();
let y_opt = res.y_opt.into_pyarray(py).to_owned();
let x_doe = res.x_doe.into_pyarray(py).to_owned();
let y_doe = res.y_doe.into_pyarray(py).to_owned();
Ok(OptimResult {
x_opt: x_opt.into(),
y_opt: y_opt.into(),
Expand Down Expand Up @@ -419,7 +424,7 @@ impl Egor {
.min_within_mixint_space(&xtypes);

let x_suggested = py.allow_threads(|| mixintegor.suggest(&x_doe, &y_doe));
x_suggested.to_pyarray_bound(py).into()
x_suggested.to_pyarray(py).into()
}

/// This function gives the best evaluation index given the outputs
Expand Down Expand Up @@ -467,10 +472,10 @@ impl Egor {
let n_fcstrs = 0;
let c_doe = Array2::zeros((y_doe.ncols(), n_fcstrs));
let idx = find_best_result_index(&y_doe, &c_doe, &self.cstr_tol(n_fcstrs));
let x_opt = x_doe.row(idx).to_pyarray_bound(py).into();
let y_opt = y_doe.row(idx).to_pyarray_bound(py).into();
let x_doe = x_doe.to_pyarray_bound(py).into();
let y_doe = y_doe.to_pyarray_bound(py).into();
let x_opt = x_doe.row(idx).to_pyarray(py).into();
let y_opt = y_doe.row(idx).to_pyarray(py).into();
let x_doe = x_doe.to_pyarray(py).into();
let y_doe = y_doe.to_pyarray(py).into();
OptimResult {
x_opt,
y_opt,
Expand Down
28 changes: 15 additions & 13 deletions python/src/gp_mix.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ use ndarray::{array, Array1, Array2, Axis, Ix1, Ix2, Zip};
use ndarray_rand::rand::SeedableRng;
use numpy::{IntoPyArray, PyArray1, PyArray2, PyReadonlyArray2, PyReadonlyArrayDyn};
use pyo3::prelude::*;
use pyo3_stub_gen::derive::{gen_stub_pyclass, gen_stub_pymethods};
use rand_xoshiro::Xoshiro256Plus;

/// Gaussian processes mixture builder
Expand Down Expand Up @@ -71,6 +72,7 @@ use rand_xoshiro::Xoshiro256Plus;
/// seed (int >= 0)
/// Random generator seed to allow computation reproducibility.
///
#[gen_stub_pyclass]
#[pyclass]
pub(crate) struct GpMix {
pub n_clusters: NbClusters,
Expand All @@ -84,6 +86,7 @@ pub(crate) struct GpMix {
pub seed: Option<u64>,
}

#[gen_stub_pymethods]
#[pymethods]
impl GpMix {
#[new]
Expand Down Expand Up @@ -226,9 +229,11 @@ impl GpMix {
}

/// A trained Gaussian processes mixture
#[gen_stub_pyclass]
#[pyclass]
pub(crate) struct Gpx(Box<GpMixture>);

#[gen_stub_pymethods]
#[pymethods]
impl Gpx {
/// Get Gaussian processes mixture builder aka `GpMix`
Expand Down Expand Up @@ -328,7 +333,7 @@ impl Gpx {
.predict(&x.as_array())
.unwrap()
.insert_axis(Axis(1))
.into_pyarray_bound(py)
.into_pyarray(py)
}

/// Predict variances at nsample points.
Expand All @@ -345,10 +350,7 @@ impl Gpx {
py: Python<'py>,
x: PyReadonlyArray2<f64>,
) -> Bound<'py, PyArray2<f64>> {
self.0
.predict_var(&x.as_array())
.unwrap()
.into_pyarray_bound(py)
self.0.predict_var(&x.as_array()).unwrap().into_pyarray(py)
}

/// Predict surrogate output derivatives at nsamples points.
Expand All @@ -369,7 +371,7 @@ impl Gpx {
self.0
.predict_gradients(&x.as_array())
.unwrap()
.into_pyarray_bound(py)
.into_pyarray(py)
}

/// Predict variance derivatives at nsamples points.
Expand All @@ -390,7 +392,7 @@ impl Gpx {
self.0
.predict_var_gradients(&x.as_array())
.unwrap()
.into_pyarray_bound(py)
.into_pyarray(py)
}

/// Sample gaussian process trajectories.
Expand All @@ -412,7 +414,7 @@ impl Gpx {
self.0
.sample(&x.as_array(), n_traj)
.unwrap()
.into_pyarray_bound(py)
.into_pyarray(py)
}

/// Get the input and output dimensions of the surrogate
Expand All @@ -435,8 +437,8 @@ impl Gpx {
) -> (Bound<'py, PyArray2<f64>>, Bound<'py, PyArray1<f64>>) {
let (xdata, ydata) = self.0.training_data();
(
xdata.to_owned().into_pyarray_bound(py),
ydata.to_owned().into_pyarray_bound(py),
xdata.to_owned().into_pyarray(py),
ydata.to_owned().into_pyarray(py),
)
}

Expand All @@ -452,7 +454,7 @@ impl Gpx {
Zip::from(thetas.rows_mut())
.and(experts)
.for_each(|mut theta, expert| theta.assign(expert.theta()));
thetas.into_pyarray_bound(py)
thetas.into_pyarray(py)
}

/// Get GP expert variance (ie posterior GP variance)
Expand All @@ -466,7 +468,7 @@ impl Gpx {
Zip::from(&mut variances)
.and(experts)
.for_each(|var, expert| *var = expert.variance());
variances.into_pyarray_bound(py)
variances.into_pyarray(py)
}

/// Get reduced likelihood values gotten when fitting the GP experts
Expand All @@ -482,6 +484,6 @@ impl Gpx {
Zip::from(&mut likelihoods)
.and(experts)
.for_each(|lkh, expert| *lkh = expert.likelihood());
likelihoods.into_pyarray_bound(py)
likelihoods.into_pyarray(py)
}
}
2 changes: 2 additions & 0 deletions python/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,3 +56,5 @@ fn egobox(_py: Python, m: &Bound<'_, PyModule>) -> PyResult<()> {

Ok(())
}

pyo3_stub_gen::define_stub_info_gatherer!(stub_info);
6 changes: 5 additions & 1 deletion python/src/sampling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@ use egobox_doe::{LhsKind, SamplingMethod};
use egobox_ego::gpmix::mixint::MixintContext;
use numpy::{IntoPyArray, PyArray2};
use pyo3::prelude::*;
use pyo3_stub_gen::derive::{gen_stub_pyclass_enum, gen_stub_pyfunction};

#[gen_stub_pyclass_enum]
#[pyclass(eq, eq_int, rename_all = "SCREAMING_SNAKE_CASE")]
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Sampling {
Expand All @@ -27,6 +29,7 @@ pub enum Sampling {
/// # Returns
/// ndarray of shape (n_samples, n_variables)
///
#[gen_stub_pyfunction]
#[pyfunction]
#[pyo3(signature = (method, xspecs, n_samples, seed=None))]
pub fn sampling(
Expand Down Expand Up @@ -76,7 +79,7 @@ pub fn sampling(
}
}
.sample(n_samples);
doe.into_pyarray_bound(py)
doe.into_pyarray(py)
}

/// Samples generation using optimized Latin Hypercube Sampling
Expand All @@ -89,6 +92,7 @@ pub fn sampling(
/// # Returns
/// ndarray of shape (n_samples, n_variables)
///
#[gen_stub_pyfunction]
#[pyfunction]
#[pyo3(signature = (xspecs, n_samples, seed=None))]
pub(crate) fn lhs(
Expand Down
Loading
Loading