Source code for dasf.ml.mixture.gmm

#!/usr/bin/env python3

""" Gaussian Mixture Model algorithm module. """

from sklearn.mixture import GaussianMixture as GaussianMixture_CPU

from dasf.ml.mixture.classifier import MixtureClassifier


[docs] class GaussianMixture(MixtureClassifier): """ Gaussian Mixture. Representation of a Gaussian mixture model probability distribution. This class allows to estimate the parameters of a Gaussian mixture distribution. Read more in the :ref:`User Guide <gmm>`. .. versionadded:: 0.18 Parameters ---------- n_components : int, default=1 The number of mixture components. covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full' String describing the type of covariance parameters to use. Must be one of: - 'full': each component has its own general covariance matrix. - 'tied': all components share the same general covariance matrix. - 'diag': each component has its own diagonal covariance matrix. - 'spherical': each component has its own single variance. tol : float, default=1e-3 The convergence threshold. EM iterations will stop when the lower bound average gain is below this threshold. reg_covar : float, default=1e-6 Non-negative regularization added to the diagonal of covariance. Allows to assure that the covariance matrices are all positive. max_iter : int, default=100 The number of EM iterations to perform. n_init : int, default=1 The number of initializations to perform. The best results are kept. init_params : {'kmeans', 'k-means++', 'random', 'random_from_data'}, \ default='kmeans' The method used to initialize the weights, the means and the precisions. String must be one of: - 'kmeans' : responsibilities are initialized using kmeans. - 'k-means++' : use the k-means++ method to initialize. - 'random' : responsibilities are initialized randomly. - 'random_from_data' : initial means are randomly selected data points. .. versionchanged:: v1.1 `init_params` now accepts 'random_from_data' and 'k-means++' as initialization methods. weights_init : array-like of shape (n_components, ), default=None The user-provided initial weights. If it is None, weights are initialized using the `init_params` method. means_init : array-like of shape (n_components, n_features), default=None The user-provided initial means, If it is None, means are initialized using the `init_params` method. precisions_init : array-like, default=None The user-provided initial precisions (inverse of the covariance matrices). If it is None, precisions are initialized using the 'init_params' method. The shape depends on 'covariance_type':: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' random_state : int, RandomState instance or None, default=None Controls the random seed given to the method chosen to initialize the parameters (see `init_params`). In addition, it controls the generation of random samples from the fitted distribution (see the method `sample`). Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. warm_start : bool, default=False If 'warm_start' is True, the solution of the last fitting is used as initialization for the next call of fit(). This can speed up convergence when fit is called several times on similar problems. In that case, 'n_init' is ignored and only a single initialization occurs upon the first call. See :term:`the Glossary <warm_start>`. verbose : int, default=0 Enable verbose output. If 1 then it prints the current initialization and each iteration step. If greater than 1 then it prints also the log probability and the time needed for each step. verbose_interval : int, default=10 Number of iteration done before the next print. Attributes ---------- weights_ : array-like of shape (n_components,) The weights of each mixture components. means_ : array-like of shape (n_components, n_features) The mean of each mixture component. covariances_ : array-like The covariance of each mixture component. The shape depends on `covariance_type`:: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' precisions_ : array-like The precision matrices for each component in the mixture. A precision matrix is the inverse of a covariance matrix. A covariance matrix is symmetric positive definite so the mixture of Gaussian can be equivalently parameterized by the precision matrices. Storing the precision matrices instead of the covariance matrices makes it more efficient to compute the log-likelihood of new samples at test time. The shape depends on `covariance_type`:: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' precisions_cholesky_ : array-like The cholesky decomposition of the precision matrices of each mixture component. A precision matrix is the inverse of a covariance matrix. A covariance matrix is symmetric positive definite so the mixture of Gaussian can be equivalently parameterized by the precision matrices. Storing the precision matrices instead of the covariance matrices makes it more efficient to compute the log-likelihood of new samples at test time. The shape depends on `covariance_type`:: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' converged_ : bool True when convergence of the best fit of EM was reached, False otherwise. n_iter_ : int Number of step used by the best fit of EM to reach the convergence. lower_bound_ : float Lower bound value on the log-likelihood (of the training data with respect to the model) of the best fit of EM. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- BayesianGaussianMixture : Gaussian mixture model fit with a variational inference. Examples -------- >>> import numpy as np >>> from sklearn.mixture import GaussianMixture >>> X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]]) >>> gm = GaussianMixture(n_components=2, random_state=0).fit(X) >>> gm.means_ array([[10., 2.], [ 1., 2.]]) >>> gm.predict([[0, 0], [12, 3]]) array([1, 0]) """ def __init__( self, n_components=1, *, covariance_type="full", tol=0.001, reg_covar=1e-06, max_iter=100, n_init=1, init_params="kmeans", weights_init=None, means_init=None, precisions_init=None, random_state=None, warm_start=False, verbose=0, verbose_interval=10 ): self.__gmm_cpu = GaussianMixture_CPU( n_components=n_components, covariance_type=covariance_type, tol=tol, reg_covar=reg_covar, max_iter=max_iter, n_init=n_init, init_params=init_params, weights_init=weights_init, means_init=means_init, precisions_init=precisions_init, random_state=random_state, warm_start=warm_start, verbose=verbose, verbose_interval=verbose_interval, )
[docs] def _fit_cpu(self, X, y=None): """ Estimate Gaussian Mixture model parameters with the EM algorithm using CPU only. The method fits the model ``n_init`` times and sets the parameters with which the model has the largest likelihood or lower bound. Within each trial, the method iterates between E-step and M-step for ``max_iter`` times until the change of likelihood or lower bound is less than ``tol``, otherwise, a ``ConvergenceWarning`` is raised. If ``warm_start`` is ``True``, then ``n_init`` is ignored and a single initialization is performed upon the first call. Upon consecutive calls, training starts where it left off. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object The fitted mixture. """ return self.__gmm_cpu.fit(X=X, y=y)
[docs] def _fit_predict_cpu(self, X, y=None): """ Estimate model parameters using X and predict the labels for X using CPU only. The method fits the model n_init times and sets the parameters with which the model has the largest likelihood or lower bound. Within each trial, the method iterates between E-step and M-step for `max_iter` times until the change of likelihood or lower bound is less than `tol`, otherwise, a :class:`~sklearn.exceptions.ConvergenceWarning` is raised. After fitting, it predicts the most probable label for the input data points. .. versionadded:: 0.20 Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. y : Ignored Not used, present for API consistency by convention. Returns ------- labels : array, shape (n_samples,) Component labels. """ return self.__gmm_cpu.fit_predict(X=X, y=y)
[docs] def _predict_cpu(self, X, y=None): """ Predict the labels for the data samples in X using trained model using CPU only. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- labels : array, shape (n_samples,) Component labels. """ return self.__gmm_cpu.predict(X=X)
[docs] def _set_params_cpu(self, **params): """ Set the parameters of this estimator using CPU only. The method works on simple estimators as well as on nested objects. The latter have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Parameters ---------- **params : dict Estimator parameters. Returns ------- self : estimator instance Estimator instance. """ return self.__gmm_cpu.set_params(**params)
[docs] def _get_params_cpu(self, deep=True): """ Get parameters for this estimator using CPU only. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values. """ return self.__gmm_cpu.get_params(deep=deep)