In [2]:
import statsmodels.api as sm
import statsmodels.formula.api as smf
from sklearn.datasets import make_classification

In [3]:
X, y = make_classification(n_samples=10000, n_features=10,n_informative=10, n_redundant=0,random_state=101)

In [4]:
Xc=sm.add_constant(X)
logistic_regression=sm.Logit(y,Xc)
fitted_model=logistic_regression.fit()


Optimization terminated successfully.
         Current function value: 0.438685
         Iterations 7

In [5]:
fitted_model.summary()


Out[5]:
Logit Regression Results
Dep. Variable: y No. Observations: 10000
Model: Logit Df Residuals: 9989
Method: MLE Df Model: 10
Date: Mon, 27 Feb 2017 Pseudo R-squ.: 0.3671
Time: 12:02:59 Log-Likelihood: -4386.8
converged: True LL-Null: -6931.5
LLR p-value: 0.000
coef std err z P>|z| [95.0% Conf. Int.]
const 0.4299 0.039 11.023 0.000 0.353 0.506
x1 0.0671 0.015 4.410 0.000 0.037 0.097
x2 -0.7828 0.019 -41.947 0.000 -0.819 -0.746
x3 0.1221 0.016 7.815 0.000 0.091 0.153
x4 0.2841 0.016 18.150 0.000 0.253 0.315
x5 0.1469 0.014 10.283 0.000 0.119 0.175
x6 -0.3414 0.019 -17.636 0.000 -0.379 -0.303
x7 0.0503 0.014 3.481 0.000 0.022 0.079
x8 -0.1393 0.014 -9.642 0.000 -0.168 -0.111
x9 0.1127 0.014 7.931 0.000 0.085 0.141
x10 -0.4792 0.018 -27.340 0.000 -0.514 -0.445

In [6]:
import urllib

In [7]:
dir(urllib.parse)


Out[7]:
['DefragResult',
 'DefragResultBytes',
 'MAX_CACHE_SIZE',
 'ParseResult',
 'ParseResultBytes',
 'Quoter',
 'ResultBase',
 'SplitResult',
 'SplitResultBytes',
 '_ALWAYS_SAFE',
 '_ALWAYS_SAFE_BYTES',
 '_DefragResultBase',
 '_NetlocResultMixinBase',
 '_NetlocResultMixinBytes',
 '_NetlocResultMixinStr',
 '_ParseResultBase',
 '_ResultMixinBytes',
 '_ResultMixinStr',
 '_SplitResultBase',
 '__all__',
 '__builtins__',
 '__cached__',
 '__doc__',
 '__file__',
 '__loader__',
 '__name__',
 '__package__',
 '__spec__',
 '_asciire',
 '_coerce_args',
 '_decode_args',
 '_encode_result',
 '_hexdig',
 '_hextobyte',
 '_hostprog',
 '_implicit_encoding',
 '_implicit_errors',
 '_noop',
 '_parse_cache',
 '_portprog',
 '_safe_quoters',
 '_splitnetloc',
 '_splitparams',
 '_typeprog',
 'clear_cache',
 'collections',
 'namedtuple',
 'non_hierarchical',
 'parse_qs',
 'parse_qsl',
 'quote',
 'quote_from_bytes',
 'quote_plus',
 're',
 'scheme_chars',
 'splitattr',
 'splithost',
 'splitnport',
 'splitpasswd',
 'splitport',
 'splitquery',
 'splittag',
 'splittype',
 'splituser',
 'splitvalue',
 'sys',
 'to_bytes',
 'unquote',
 'unquote_plus',
 'unquote_to_bytes',
 'unwrap',
 'urldefrag',
 'urlencode',
 'urljoin',
 'urlparse',
 'urlsplit',
 'urlunparse',
 'urlunsplit',
 'uses_fragment',
 'uses_netloc',
 'uses_params',
 'uses_query',
 'uses_relative']

In [8]:
import csv

In [12]:
dir(csv)


Out[12]:
['Dialect',
 'DictReader',
 'DictWriter',
 'Error',
 'QUOTE_ALL',
 'QUOTE_MINIMAL',
 'QUOTE_NONE',
 'QUOTE_NONNUMERIC',
 'Sniffer',
 'StringIO',
 '_Dialect',
 '__all__',
 '__builtins__',
 '__cached__',
 '__doc__',
 '__file__',
 '__loader__',
 '__name__',
 '__package__',
 '__spec__',
 '__version__',
 'excel',
 'excel_tab',
 'field_size_limit',
 'get_dialect',
 'list_dialects',
 're',
 'reader',
 'register_dialect',
 'unix_dialect',
 'unregister_dialect',
 'writer']