PageRenderTime 1765ms queryTime 488ms sortTime 21ms getByIdsTime 742ms findMatchingLines 375ms

100+ results results for 'import pandas repo:fomcl/savreaderwriter' (1765 ms)

Not the results you expected?
run_inversion_gridded2.py https://gitlab.com/flomertens/sph_img | Python | 252 lines
                    
1import os
                    
2import sys
                    
2import sys
                    
3import time
                    
4import shutil
                    
5
                    
6import matplotlib as mpl
                    
7mpl.use('Agg')
                    
9from libwise import scriptshelper as sh
                    
10from libwise import profileutils
                    
11
                    
11
                    
12import sphimg
                    
13import util
                    
15import numpy as np
                    
16import pandas as pd
                    
17
                    
                
convo_net.py https://gitlab.com/Mounphuix/MNIST | Python | 251 lines
                    
1from __future__ import print_function
                    
2import os
                    
2import os
                    
3import sys
                    
4import timeit
                    
5import numpy as np
                    
6import pandas as pd
                    
7import theano
                    
7import theano
                    
8import theano.tensor as T
                    
9import six.moves.cPickle as pickle
                    
9import six.moves.cPickle as pickle
                    
10from network_layers import LogisticRegression
                    
11from network_layers import HiddenLayer
                    
11from network_layers import HiddenLayer
                    
12from network_layers import LeNetConvPoolLayer
                    
13from utility_functions import load_data
                    
                
multilinear.py https://gitlab.com/pooja043/Globus_Docker_4 | Python | 329 lines
                    
14from patsy import dmatrix
                    
15import pandas as pd
                    
16from statsmodels.api import OLS
                    
16from statsmodels.api import OLS
                    
17from statsmodels.api import stats
                    
18import numpy as np
                    
56        formula description of the model
                    
57    dataframe : pandas.dataframe
                    
58        dataframe where the model will be evaluated
                    
87    -------
                    
88    summary : pandas.DataFrame
                    
89        a dataframe containing an extract from the summary of the model
                    
119
                    
120    >>> import statsmodels.api as sm
                    
121    >>> data = sm.datasets.longley.load_pandas()
                    
                
wide_n_deep_tutorial.py https://gitlab.com/admin-github-cloud/tensorflow | Python | 212 lines
                    
15"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
                    
16from __future__ import absolute_import
                    
17from __future__ import division
                    
17from __future__ import division
                    
18from __future__ import print_function
                    
19
                    
19
                    
20import tempfile
                    
21import urllib
                    
22
                    
23import pandas as pd
                    
24import tensorflow as tf
                    
                
zmethods.py https://gitlab.com/solstag/abstractology | Python | 253 lines
                    
19
                    
20import pandas
                    
21from pathlib import Path
                    
24
                    
25from ..ioio import ioio
                    
26
                    
34
                    
35and should return a pandas.Series of a scalar dtype and indexed by 'index'.
                    
36"""
                    
123        x_groups = xblocks[xlevel].groupby(xblocks[xlevel])
                    
124        count0 = pandas.Series(index=xblocks[xlevel].unique())
                    
125        count1 = pandas.Series(index=xblocks[xlevel].unique())
                    
239
                    
240    values = pandas.Series(vals, index=pandas.MultiIndex.from_tuples(keys))
                    
241
                    
                
data_utils.py https://gitlab.com/lwd17/enhanced_examplar_ae | Python | 309 lines
                    
7import csv
                    
8import os
                    
9import os.path as op
                    
9import os.path as op
                    
10import zipfile
                    
11from functools import reduce
                    
11from functools import reduce
                    
12from glob import glob
                    
13from multiprocessing import cpu_count
                    
13from multiprocessing import cpu_count
                    
14from typing import Any, Dict, List
                    
15
                    
16import numpy as np
                    
17import pandas as pd
                    
18import sentencepiece as sp
                    
                
preprocessing.ipynb https://gitlab.com/aakansh9/SPHERE-Challenge | Jupyter | 188 lines
                    
10   "source": [
                    
11    "import pandas as pd\n",
                    
12    "import numpy as np"
                    
128   "source": [
                    
129    "import xgboost"
                    
130   ]
                    
146      "\u001b[1;32m<ipython-input-54-4c763af0299c>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mxgboost\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mDMatrix\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mfeats\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlabel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtargets\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
                    
147      "\u001b[1;32m/opt/conda/lib/python3.5/site-packages/xgboost-0.4-py3.5.egg/xgboost/core.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, data, label, missing, weight, silent, feature_names, feature_types)\u001b[0m\n\u001b[0;32m    220\u001b[0m                                                                 \u001b[0mfeature_names\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    221\u001b[0m                                                                 feature_types)\n\u001b[1;32m--> 222\u001b[1;33m         \u001b[0mlabel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_maybe_pandas_label\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlabel\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    223\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    224\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mSTRING_TYPES\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
                    
148      "\u001b[1;32m/opt/conda/lib/python3.5/site-packages/xgboost-0.4-py3.5.egg/xgboost/core.py\u001b[0m in \u001b[0;36m_maybe_pandas_label\u001b[1;34m(label)\u001b[0m\n\u001b[0;32m    164\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlabel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mDataFrame\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    165\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlabel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcolumns\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m>\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 166\u001b[1;33m             \u001b[1;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'DataFrame for label cannot have multiple columns'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    167\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    168\u001b[0m         \u001b[0mlabel_dtypes\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mlabel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdtypes\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
                    
                
google_image_scraper.py https://gitlab.com/Quantza/DeepClassificationBot | Python | 245 lines
                    
5    
                    
6from __future__ import absolute_import
                    
7from __future__ import print_function
                    
7from __future__ import print_function
                    
8import os
                    
9import time
                    
9import time
                    
10import re
                    
11import socket
                    
12
                    
13from selenium import webdriver
                    
14from pattern.web import URL, DOM
                    
95    def retrieve_source_fr_html(self, driver):
                    
96        """ Make use of selenium. Retrieve from html table using pandas table.
                    
97
                    
                
demo.py https://gitlab.com/yaojian/RenjuAI | Python | 177 lines
                    
1#  -*- coding: utf-8 -*-
                    
2import os
                    
3import sys
                    
3import sys
                    
4from datetime import datetime
                    
5import numpy as np
                    
5import numpy as np
                    
6import pandas as pd
                    
7
                    
7
                    
8import mail
                    
9
                    
38def prepare_basic_model(date_zone, file_format='pkl'):
                    
39    from mail_preprocess import parallel_preprocess_v2
                    
40    path_prefix = global_dir + "/2015-12-%s_token." + file_format
                    
                
prep_mtedx_data.py https://gitlab.com/lwd17/enhanced_examplar_ae | Python | 271 lines
                    
6
                    
7import argparse
                    
8import logging
                    
8import logging
                    
9import os
                    
10from pathlib import Path
                    
10from pathlib import Path
                    
11import shutil
                    
12from itertools import groupby
                    
13from tempfile import NamedTemporaryFile
                    
14from typing import Tuple
                    
15
                    
15
                    
16import pandas as pd
                    
17import soundfile as sf
                    
                
tile.py https://gitlab.com/pooja043/Globus_Docker_4 | Python | 297 lines
                    
4
                    
5from pandas.core.api import DataFrame, Series
                    
6from pandas.core.categorical import Categorical
                    
7from pandas.core.index import _ensure_index
                    
8import pandas.core.algorithms as algos
                    
9import pandas.core.common as com
                    
9import pandas.core.common as com
                    
10import pandas.core.nanops as nanops
                    
11from pandas.compat import zip
                    
12
                    
13import numpy as np
                    
14
                    
                
fix.py https://gitlab.com/pooja043/Globus_Docker_4 | Python | 229 lines
                    
1"""Supporting functions for the 'fix' command."""
                    
2from __future__ import absolute_import, division, print_function
                    
3
                    
3
                    
4import logging
                    
5
                    
6import numpy as np
                    
7import pandas as pd
                    
8
                    
8
                    
9from . import params, smoothing
                    
10
                    
                
prep_covost_data.py https://gitlab.com/lwd17/enhanced_examplar_ae | Python | 294 lines
                    
7import argparse
                    
8import csv
                    
9import logging
                    
9import logging
                    
10import os
                    
11import os.path as op
                    
12import shutil
                    
13from tempfile import NamedTemporaryFile
                    
14from typing import Optional, Tuple
                    
15
                    
16import pandas as pd
                    
17import torchaudio
                    
17import torchaudio
                    
18from examples.speech_to_text.data_utils import (
                    
19    create_zip,
                    
                
ioio.py https://gitlab.com/solstag/abstractology | Python | 310 lines
                    
21import pickle
                    
22import json
                    
23import gzip
                    
24import lzma
                    
25import pandas
                    
26from pathlib import Path
                    
26from pathlib import Path
                    
27from itertools import chain
                    
28from tempfile import NamedTemporaryFile
                    
55            "module": lzma,
                    
56            "pandas_arg": "xz",
                    
57        },
                    
272    @classmethod
                    
273    def store_pandas(cls, obj, fpath, fmt=None, formatter_args={}):
                    
274        """
                    
                
example_enhanced_boxplots.py https://gitlab.com/pooja043/Globus_Docker_4 | Python | 98 lines
                    
1import numpy as np
                    
2import matplotlib.pyplot as plt
                    
3
                    
4import statsmodels.api as sm
                    
5
                    
9
                    
10data = sm.datasets.anes96.load_pandas()
                    
11party_ID = np.arange(7)
                    
                
generate_baseline03.py https://gitlab.com/tianzhou2011/talkingdata | Python | 397 lines
                    
8import keras
                    
9import pandas as pd
                    
10import os
                    
10import os
                    
11import sys
                    
12import gc
                    
12import gc
                    
13from random import shuffle
                    
14from scipy import sparse
                    
16from sklearn.cross_validation import train_test_split
                    
17from sklearn.metrics import log_loss
                    
18
                    
380    ##################
                    
381    from sklearn.datasets import dump_svmlight_file
                    
382    # Metric
                    
                
regression.py https://gitlab.com/SplatoonModdingHub/PTVS | Python | 303 lines
                    
44
                    
45from pandas import read_table
                    
46import numpy as np
                    
46import numpy as np
                    
47import matplotlib.pyplot as plt
                    
48
                    
50    # [OPTIONAL] Seaborn makes plots nicer
                    
51    import seaborn
                    
52except ImportError:
                    
63    # pandas.read_excel instead of read_table
                    
64    #from pandas import read_excel
                    
65    #frame = read_excel(URL)
                    
131    # Normalize the entire data set
                    
132    from sklearn.preprocessing import StandardScaler, MinMaxScaler
                    
133    arr = MinMaxScaler().fit_transform(arr)
                    
                
ReverseTrainSecondLayer.py https://gitlab.com/hyeonjik420/deep_learning_transcriptomics | Python | 86 lines
                    
1from keras.layers import Input, Dense, Activation
                    
2import numpy as np
                    
3from keras.models import Model, load_model
                    
4from keras import optimizers, activations
                    
5import math
                    
6from numpy import *
                    
7import pandas as pd
                    
8from keras.callbacks import ModelCheckpoint
                    
8from keras.callbacks import ModelCheckpoint
                    
9from keras.callbacks import CSVLogger
                    
10from keras import optimizers
                    
10from keras import optimizers
                    
11import tensorflow as tf
                    
12from keras.backend import tensorflow_backend as K
                    
12from keras.backend import tensorflow_backend as K
                    
13import glob
                    
14def sigmo(x):
                    
                
FastFourierTransform.py https://gitlab.com/debasishk/PerformanceTest-Monitoring | Python | 328 lines
                    
5import numpy as np
                    
6import pandas as pd
                    
7import matplotlib.pyplot as plt
                    
8import datetime
                    
9# from utils.mkdir import mkdir
                    
10import os
                    
41            out_path (str): The output path where results are stored
                    
42            data (Pandas Dataframe): The input data where Outliers are to be found out
                    
43            test_data (bool): Defaults to False. If model to be used for Unit Tests, explicitely specify this variable as True to bypass get_data() method.
                    
79        Returns:
                    
80            scores (Pandas DataFrame): FFT based outlier scores for each point stored in Pandas DataFrame indexed by tinestamp.
                    
81        """
                    
113        Returns:
                    
114            test_output (Pandas DataFrame): Pandas Dataframe object containing 1 column which holds scores for each point
                    
115        """
                    
                
test_multiclass.py https://gitlab.com/admin-github-cloud/scikit-learn | Python | 347 lines
                    
1
                    
2from __future__ import division
                    
3import numpy as np
                    
3import numpy as np
                    
4import scipy.sparse as sp
                    
5from itertools import product
                    
6
                    
7from sklearn.externals.six.moves import xrange
                    
8from sklearn.externals.six import iteritems
                    
9
                    
10from scipy.sparse import issparse
                    
11from scipy.sparse import csc_matrix
                    
11from scipy.sparse import csc_matrix
                    
12from scipy.sparse import csr_matrix
                    
13from scipy.sparse import coo_matrix
                    
                
TextAnalyzer.py https://gitlab.com/vicidroiddev/SouthParkMachineLearning | Python | 302 lines
                    
6import logging
                    
7import pandas
                    
8import numpy as np
                    
8import numpy as np
                    
9import time
                    
10import os
                    
10import os
                    
11import multiprocessing
                    
12import matplotlib.pyplot as plt
                    
19import sknn.mlp
                    
20from sknn.backend import lasagne
                    
21from sklearn.feature_extraction import DictVectorizer
                    
248
                    
249        output = pandas.DataFrame(
                    
250            data={
                    
                
test_buyback_auth.py https://gitlab.com/lbennett/zipline | Python | 227 lines
                    
3"""
                    
4import blaze as bz
                    
5from blaze.compute.core import swap_resources_into_scope
                    
5from blaze.compute.core import swap_resources_into_scope
                    
6import pandas as pd
                    
7from six import iteritems
                    
8
                    
9from zipline.pipeline.common import(
                    
10    BUYBACK_ANNOUNCEMENT_FIELD_NAME,
                    
19)
                    
20from zipline.pipeline.data import (
                    
21    CashBuybackAuthorizations,
                    
23)
                    
24from zipline.pipeline.factors.events import (
                    
25    BusinessDaysSinceCashBuybackAuth,
                    
                
KalmanFilters.py https://gitlab.com/debasishk/PerformanceTest-Monitoring | Python | 310 lines
                    
6import numpy as np
                    
7from externals.pykalman import KalmanFilter
                    
8import matplotlib.pyplot as plt
                    
8import matplotlib.pyplot as plt
                    
9import pandas as pd
                    
10import datetime, platform, os
                    
11from numpy.random import random
                    
12from utils.mkdir import mkdir
                    
13
                    
23            out_path (str): The output path, to which specific path to be appended to save results
                    
24            data (Pandas Dataframe): The data which needs to be fitted with ARIMA model. Defaults to Empty Dataframe
                    
25            test_data (bool, Optional [Default False]): Defaults to False. If model to be used for Unit Tests, explicitely specify this variable as True to bypass get_data() method.
                    
80        Args:
                    
81            data (Pandas DataFrame): Input Dataset which needs to be smoothed out, and predicted values to be found
                    
82
                    
                
vectorology.py https://gitlab.com/solstag/abstractology | Python | 259 lines
                    
21    import gensim
                    
22except ImportError:
                    
23    print("Warning: Failed to import `gensim`, some functions may not be available")
                    
24
                    
25import multiprocessing, numpy, os, pandas
                    
26from os import path
                    
26from os import path
                    
27from pathlib import Path
                    
28from collections import OrderedDict
                    
29from itertools import combinations
                    
30from copy import deepcopy
                    
31from matplotlib import pyplot as plt, colors as colors
                    
32
                    
33from .ioio import ioio
                    
34from .corporalogy import Corporalogy
                    
                
AUC_plot.py https://gitlab.com/BudzikFUW/budzik_analiza | Python | 228 lines
                    
1
                    
2import pandas as pd
                    
3import numpy as np
                    
4
                    
5import scipy.stats as st
                    
6import sys
                    
6import sys
                    
7import matplotlib.pyplot as plt
                    
8import os
                    
8import os
                    
9import seaborn as sns
                    
10from itertools import product
                    
10from itertools import product
                    
11from collections import defaultdict
                    
12
                    
                
misc.py https://gitlab.com/pschuprikov/lpm | Python | 430 lines
                    
1#!/usr/bin/env python3
                    
2from textwrap import dedent
                    
3import math
                    
3import math
                    
4from typing import List, Optional, Tuple, Union, Sequence, NamedTuple
                    
5from sys import stderr
                    
5from sys import stderr
                    
6import os
                    
7import os.path as path
                    
9import click
                    
10import pandas as pd
                    
11import numpy as np
                    
                
RemoveUnnecessaryCastCodeFixProvider.vb https://gitlab.com/sharadag/Roslyn | Visual Basic | 194 lines
                    
2
                    
3Imports System.Collections.Immutable
                    
4Imports System.Composition
                    
4Imports System.Composition
                    
5Imports System.Threading
                    
6Imports Microsoft.CodeAnalysis.CodeActions
                    
6Imports Microsoft.CodeAnalysis.CodeActions
                    
7Imports Microsoft.CodeAnalysis.CodeFixes
                    
8Imports Microsoft.CodeAnalysis.Diagnostics
                    
8Imports Microsoft.CodeAnalysis.Diagnostics
                    
9Imports Microsoft.CodeAnalysis.Formatting
                    
10Imports Microsoft.CodeAnalysis.Simplification
                    
10Imports Microsoft.CodeAnalysis.Simplification
                    
11Imports Microsoft.CodeAnalysis.Text
                    
12Imports Microsoft.CodeAnalysis.VisualBasic.Syntax
                    
                
factorplots.py https://gitlab.com/pooja043/Globus_Docker_4 | Python | 211 lines
                    
26    x : array-like
                    
27        The `x` factor levels constitute the x-axis. If a `pandas.Series` is
                    
28        given its name will be used in `xlabel` if `xlabel` is None.
                    
43    xlabel : str, optional
                    
44        Label to use for `x`. Default is 'X'. If `x` is a `pandas.Series` it
                    
45        will use the series names.
                    
47        Label to use for `response`. Default is 'func of response'. If
                    
48        `response` is a `pandas.Series` it will use the series names.
                    
49    colors : list, optional
                    
89
                    
90    from pandas import DataFrame
                    
91    fig, ax = utils.create_mpl_ax(ax)
                    
183    """
                    
184    from pandas import Series
                    
185    name = None
                    
                
sampler.py https://gitlab.com/nexemjail/mir | Python | 223 lines
                    
1import numpy as np
                    
2import librosa
                    
2import librosa
                    
3import scipy
                    
4import scipy.signal
                    
4import scipy.signal
                    
5from math import sqrt
                    
6from math import log
                    
6from math import log
                    
7import scipy.signal.spectral
                    
8
                    
220    #t = time.time()
                    
221    #df = pandas.DataFrame(convert_with_processes(path))
                    
222    #print time.time() - t
                    
                
answer_key.py https://gitlab.com/lbennett/zipline | Python | 341 lines
                    
14# limitations under the License.
                    
15import datetime
                    
16import hashlib
                    
16import hashlib
                    
17import os
                    
18
                    
19import numpy as np
                    
20import pandas as pd
                    
21import pytz
                    
21import pytz
                    
22import xlrd
                    
23import requests
                    
24
                    
25from six.moves import map
                    
26
                    
                
controller.py https://gitlab.com/shpeely/game-stats | Python | 197 lines
                    
1import logging
                    
2import pandas as pd
                    
3
                    
3
                    
4from game_stats.config import config
                    
5from game_stats.store import StoreFactory
                    
12
                    
13from game_stats.statistics.ultimate_score import UltimateScore
                    
14from game_stats.statistics.game_result_stats import GameResultStats
                    
14from game_stats.statistics.game_result_stats import GameResultStats
                    
15from game_stats.statistics.game_stats import GameStats
                    
16from game_stats.statistics.player_stats import PlayerStats
                    
16from game_stats.statistics.player_stats import PlayerStats
                    
17from game_stats.constants import *
                    
18
                    
                
test_form.py https://gitlab.com/braintech/psychopy-brain | Python | 194 lines
                    
3
                    
4from __future__ import division
                    
5
                    
5
                    
6import os
                    
7import pytest
                    
7import pytest
                    
8from pandas import DataFrame
                    
9from psychopy.visual.window import Window
                    
9from psychopy.visual.window import Window
                    
10from psychopy.visual.form import Form
                    
11from psychopy.visual.text import TextStim
                    
11from psychopy.visual.text import TextStim
                    
12from psychopy.visual.slider import Slider
                    
13from psychopy import constants
                    
                
__init__.py https://gitlab.com/pooja043/Globus_Docker_3 | Python | 107 lines
                    
1from __future__ import (absolute_import, division, print_function,
                    
2                        unicode_literals)
                    
3
                    
4import warnings
                    
5from contextlib import contextmanager
                    
6
                    
7from matplotlib.cbook import is_string_like, iterable
                    
8from matplotlib import rcParams, rcdefaults, use
                    
15
                    
16# stolen from pandas
                    
17@contextmanager
                    
26
                    
27    >>> import warnings
                    
28    >>> with assert_produces_warning():
                    
78    # it during all of the tests.
                    
79    import locale
                    
80    import warnings
                    
                
Exercises.ipynb https://gitlab.com/santosh.sivapurapu-ab/pandas_exercises | Jupyter | 179 lines
                    
19    "\n",
                    
20    "### Step 1. Import the necessary libraries"
                    
21   ]
                    
35   "source": [
                    
36    "### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/US_Crime_Rates/US_Crime_Rates_1960_2014.csv). "
                    
37   ]
                    
74   "source": [
                    
75    "##### Have you noticed that the type of Year is int64. But pandas has a different type to work with Time Series. Let's see it now.\n",
                    
76    "\n",
                    
                
plot_and_correct_humidity095b.py https://gitlab.com/pkormos/phase_python | Python | 188 lines
                    
9 
                    
10import netCDF4 as nc
                    
11import numpy as np
                    
12import matplotlib.pyplot as plt
                    
13from pandas import Series, date_range
                    
14 
                    
29p_list = [p_st.getncattr(tt_list[k]) for k in range(np.shape(pvar)[1])] # sorted rh station list
                    
30ppt = Series(np.ma.filled(pvar[:,p_list.index(stn0)],np.nan).ravel(),index=date_range('1962-1-1', '2014-12-31 23:00', freq='H')) # make pandas dataset
                    
31pn.close()
                    
39r_list = [rh_st.getncattr(tt_list[k]) for k in range(np.shape(rvar)[1])] # sorted rh station list
                    
40rh = Series(np.ma.filled(rvar[:,r_list.index(stn0)],np.nan).ravel(),index=date_range('18-Jun-1981 11:00:00', '01-Oct-2014', freq='H')) # make pandas dataset
                    
41rn.close()
                    
                
Exercises.ipynb https://gitlab.com/santosh.sivapurapu-ab/pandas_exercises | Jupyter | 156 lines
                    
17    "\n",
                    
18    "### Step 1. Import the necessary libraries"
                    
19   ]
                    
33   "source": [
                    
34    "### Step 2. Import the first dataset [cars1](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/05_Merge/Auto_MPG/cars1.csv) and [cars2](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/05_Merge/Auto_MPG/cars2.csv).  "
                    
35   ]
                    
                
retrieve_features.py https://gitlab.com/genehack-2/15a.ozerov.team_untitled | Python | 288 lines
                    
1import cv2
                    
2import numpy as np
                    
3from matplotlib import pyplot as plt
                    
4import pandas as pd
                    
5import seaborn as sns
                    
5import seaborn as sns
                    
6import sys
                    
7HOME_DIR = '/Users/agalicina/Term10/genehack/'  ##change for your machine
                    
8sys.path.append(HOME_DIR+'genehack2016')
                    
9from process import *
                    
10from sklearn.metrics import roc_curve, auc
                    
10from sklearn.metrics import roc_curve, auc
                    
11import math
                    
12
                    
                
DynamicPricingAlgo .ipynb https://gitlab.com/thiagarajan.saravanan/dynamic-pricer | Jupyter | 496 lines
                    
23    "import numpy as np\n",
                    
24    "import pandas as pd\n",
                    
25    "data = pd.ExcelFile(\"Breakfast.xlsx\")\n",
                    
171    "Method 1\n",
                    
172    "import pandas as pd\n",
                    
173    "from pymongo import MongoClient\n",
                    
210    "import pymongo\n",
                    
211    "import pandas as pd\n",
                    
212    "from pymongo import Connection\n",
                    
220    "import numpy as np\n",
                    
221    "import pandas as pd\n",
                    
222    "\n",
                    
242    "import pymongo\n",
                    
243    "import pandas as pd\n",
                    
244    "#from pymongo import Connection\n",
                    
                
Multithreaded_server.py https://gitlab.com/Angiolillo/SOKE | Python | 348 lines
                    
1from server import Clustering_dirichlet
                    
2from sklearn.naive_bayes import MultinomialNB
                    
3import numpy as np
                    
4import pandas as pd
                    
5import pickle
                    
5import pickle
                    
6from server import Preprocessing
                    
7import threading
                    
7import threading
                    
8import socket
                    
9
                    
                
dedup_columns.py https://gitlab.com/mbay/bosch | Python | 97 lines
                    
1from subprocess import Popen, PIPE
                    
2import pandas as pd
                    
3import numpy as np
                    
3import numpy as np
                    
4import argparse
                    
5import time
                    
5import time
                    
6from scipy.sparse import csr_matrix
                    
7from sklearn import preprocessing
                    
                
cell.py https://gitlab.com/abhi1tb/build | Python | 330 lines
                    
12
                    
13# Python stdlib imports
                    
14from copy import copy
                    
14from copy import copy
                    
15import datetime
                    
16import re
                    
18
                    
19from openpyxl.compat import (
                    
20    NUMERIC_TYPES,
                    
23
                    
24from openpyxl.utils.exceptions import IllegalCharacterError
                    
25
                    
40try:
                    
41    from pandas import Timestamp
                    
42    TIME_TYPES = TIME_TYPES + (Timestamp,)
                    
                
2015_10_26_Distance_effects.ipynb https://gitlab.com/jdebelius/Absloute-Power | Jupyter | 527 lines
                    
19   "source": [
                    
20    "import numpy as np\n",
                    
21    "import matplotlib.pyplot as plt\n",
                    
22    "import scipy\n",
                    
23    "import pandas as pd\n",
                    
24    "import skbio\n",
                    
25    "\n",
                    
26    "import absloute_power.distance as dist"
                    
27   ]
                    
                
misc.py https://gitlab.com/pooja043/Globus_Docker_3 | Python | 386 lines
                    
1from numpy import NaN
                    
2from pandas import compat
                    
3import numpy as np
                    
4
                    
5from pandas.core.api import Series, DataFrame, isnull, notnull
                    
6from pandas.core.series import remove_na
                    
6from pandas.core.series import remove_na
                    
7from pandas.compat import zip
                    
8
                    
                
engine.py https://gitlab.com/debasishk/PerformanceTest-Monitoring | Python | 274 lines
                    
1from dataprep import DataPreparation
                    
2from model_aggregators import ModelAggregation
                    
3from modify_scores import ModifyScores
                    
4import pandas as pd
                    
5from config import *
                    
5from config import *
                    
6from multiprocessing import Process, Manager
                    
7import warnings
                    
7import warnings
                    
8from utils.dbCon import connect_db
                    
9
                    
11
                    
12# import sys
                    
13# out_file = open("output.txt", "a+")
                    
                
test_foreign.py https://gitlab.com/pooja043/Globus_Docker_4 | Python | 199 lines
                    
4import os
                    
5import warnings
                    
6from datetime import datetime
                    
10from pandas import DataFrame, isnull
                    
11import pandas.util.testing as ptesting
                    
12
                    
19
                    
20import pandas
                    
21pandas_old = int(pandas.__version__.split('.')[1]) < 9
                    
43def test_genfromdta_pandas():
                    
44    from pandas.util.testing import assert_frame_equal
                    
45    dta = macrodata.load_pandas().data
                    
196if __name__ == "__main__":
                    
197    import nose
                    
198    nose.runmodule(argv=[__file__,'-vvs','-x','--pdb'],
                    
                
homework2_correction.ipynb https://gitlab.com/gvallverdu/cours-python | Jupyter | 341 lines
                    
21    "* Read the file by yourself by browsing it line by line\n",
                    
22    "* Use the read_csv() function of the pandas module to read the table.\n",
                    
23    "\n",
                    
154   "source": [
                    
155    "## Part 2: read in the file with pandas\n",
                    
156    "\n",
                    
156    "\n",
                    
157    "First, you have to import the pandas module:"
                    
158   ]
                    
165   "source": [
                    
166    "import pandas as pd"
                    
167   ]
                    
172   "source": [
                    
173    "We now use pandas' [`read_csv()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) function to read the file. Here are the elements we have to control to read the :\n",
                    
174    "\n",
                    
                
pandas-0.19.1-seqf.patch https://gitlab.com/argent/portage | Patch | 357 lines
                    
44 from dateutil.relativedelta import relativedelta
                    
45 from pandas.compat import range, iteritems
                    
46@@ -4851,6 +4852,7 @@ class TestDST(tm.TestCase):
                    
213         else:
                    
214diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
                    
215index d4eaaa0b5..685de214c 100644
                    
242@@ -412,17 +413,7 @@ class Timestamp(_Timestamp):
                    
243             from pandas.tseries.frequencies import to_offset
                    
244             freq = to_offset(freq)
                    
296+        # setup components
                    
297+        pandas_datetime_to_datetimestruct(value, PANDAS_FR_ns, &dts)
                    
298+        dts.ps = self.nanosecond * 1000
                    
329+        # reconstruct & check bounds
                    
330+        value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
                    
331+        if value != NPY_NAT:
                    
                
tabular_comparison.ipynb https://gitlab.com/rldotai/td-variance | Jupyter | 439 lines
                    
10   "source": [
                    
11    "import numpy as np\n",
                    
12    "from numpy.linalg import pinv\n",
                    
13    "\n",
                    
14    "import pandas as pd\n",
                    
15    "\n",
                    
15    "\n",
                    
16    "import networkx as nx\n",
                    
17    "import pydot\n",
                    
17    "import pydot\n",
                    
18    "from IPython.display import Image, display\n",
                    
19    "\n",
                    
19    "\n",
                    
20    "import matplotlib.pyplot as plt\n",
                    
21    "%matplotlib inline\n",
                    
                
generate_e4.py https://gitlab.com/tianzhou2011/talkingdata | Python | 213 lines
                    
1#!/usr/bin/env python
                    
2from __future__ import division
                    
3from scipy import sparse
                    
3from scipy import sparse
                    
4from sklearn.datasets import dump_svmlight_file
                    
5from sklearn.preprocessing import LabelEncoder, OneHotEncoder
                    
5from sklearn.preprocessing import LabelEncoder, OneHotEncoder
                    
6import argparse
                    
7import logging
                    
7import logging
                    
8import numpy as np
                    
9import os
                    
9import os
                    
10import pandas as pd
                    
11import time
                    
                
nlp.py https://gitlab.com/lancezlin/ai | Python | 241 lines
                    
4
                    
5import pandas as pd
                    
6from copy import deepcopy
                    
6from copy import deepcopy
                    
7from sklearn.linear_model import SGDClassifier
                    
8from sklearn.model_selection import GridSearchCV
                    
8from sklearn.model_selection import GridSearchCV
                    
9import re
                    
10import os
                    
10import os
                    
11from sklearn.feature_extraction.text import CountVectorizer
                    
12import nltk
                    
12import nltk
                    
13from nltk.stem.porter import PorterStemmer
                    
14from nltk.corpus import stopwords
                    
                
prep_covost_data.py https://gitlab.com/lwd17/enhanced_examplar_ae | Python | 279 lines
                    
6
                    
7import argparse
                    
8import logging
                    
9from pathlib import Path
                    
10import shutil
                    
11from tempfile import NamedTemporaryFile
                    
11from tempfile import NamedTemporaryFile
                    
12from typing import Optional, Tuple
                    
13
                    
13
                    
14import pandas as pd
                    
15import torchaudio
                    
15import torchaudio
                    
16from examples.speech_to_text.data_utils import (
                    
17    create_zip,
                    
                
pandas_sqlite_test.py https://github.com/chromium/chromium.git | Python | 78 lines
                    
8  pass
                    
9import unittest
                    
10
                    
12
                    
13from cli_tools.soundwave import pandas_sqlite
                    
14from core.external_modules import pandas
                    
16
                    
17@unittest.skipIf(pandas is None, 'pandas not available')
                    
18class TestPandasSQLite(unittest.TestCase):
                    
30  def testCreateTableIfNotExists_alreadyExists(self):
                    
31    df = pandas_sqlite.DataFrame(
                    
32        [('bug_id', int), ('summary', str), ('status', str)], index='bug_id')
                    
35      self.assertFalse(pandas.io.sql.has_table('bugs', con))
                    
36      pandas_sqlite.CreateTableIfNotExists(con, 'bugs', df)
                    
37      self.assertTrue(pandas.io.sql.has_table('bugs', con))
                    
                
adapterbokeh.py https://gitlab.com/oytunistrator/pythalesians | Python | 199 lines
                    
23from bokeh.plotting import figure, output_file, show
                    
24import pandas
                    
25
                    
25
                    
26from pythalesians.graphics.graphs.lowleveladapters.adaptertemplate import AdapterTemplate
                    
27from pythalesians.graphics.graphs.graphproperties import GraphProperties
                    
27from pythalesians.graphics.graphs.graphproperties import GraphProperties
                    
28from pythalesians.util.constants import Constants
                    
29
                    
29
                    
30import numpy
                    
31
                    
53
                    
54        if type(data_frame.index) == pandas.tslib.Timestamp:
                    
55            p1 = figure(
                    
                
analytics-ipynb.html.md.erb https://gitlab.com/ggsaavedra/PredictionIO | Ruby HTML | 98 lines
                    
33```python
                    
34import pandas as pd
                    
35def rows_to_df(rows):
                    
36    return pd.DataFrame(map(lambda e: e.asDict(), rows))
                    
37from pyspark.sql import SQLContext
                    
38sqlc = SQLContext(sc)
                    
69```python
                    
70import matplotlib.pyplot as plt
                    
71count = map(lambda e: e.c, summary)
                    
                
index.md https://gitlab.com/github-cloud-corporation/tensorflow | Markdown | 275 lines
                    
46
                    
473.  Install the pandas data analysis library. tf.learn doesn't require pandas, but it does support it, and this tutorial uses pandas. To install pandas:
                    
48    1. Get `pip`:
                    
58
                    
59    2. Use `pip` to install pandas:
                    
60
                    
61       ```shell
                    
62       $ sudo pip install pandas
                    
63       ```
                    
65    If you have trouble installing pandas, consult the [instructions]
                    
66(http://pandas.pydata.org/pandas-docs/stable/install.html) on the pandas site.
                    
67
                    
201```python
                    
202import pandas as pd
                    
203import urllib
                    
                
Devoir2_correction.ipynb https://gitlab.com/gvallverdu/cours-python | Jupyter | 340 lines
                    
21    "* Lire le fichier par vous même en le parcourant ligne par ligne\n",
                    
22    "* Utiliser la fonction read_csv() du module pandas pour lire le tableau.\n",
                    
23    "\n",
                    
155   "source": [
                    
156    "## Partie 2 : lecture du fichier avec pandas\n",
                    
157    "\n",
                    
157    "\n",
                    
158    "Il faut commencer par importer le module pandas :"
                    
159   ]
                    
168   "source": [
                    
169    "import pandas as pd"
                    
170   ]
                    
175   "source": [
                    
176    "On utilise maintenant la fonction [`read_csv()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) de pandas pour lire le fichier. Voici les éléments que l'on indique pour lire le fichier :\n",
                    
177    "\n",
                    
                
test_rplot.py https://gitlab.com/pooja043/Globus_Docker_4 | Python | 299 lines
                    
2from pandas.compat import range
                    
3import pandas.util.testing as tm
                    
4from pandas import read_csv
                    
4from pandas import read_csv
                    
5import os
                    
6import nose
                    
8with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
                    
9    import pandas.tools.rplot as rplot
                    
10
                    
251    def test_rplot1(self):
                    
252        import matplotlib.pyplot as plt
                    
253        path = os.path.join(curpath(), 'data/tips.csv')
                    
262    def test_rplot2(self):
                    
263        import matplotlib.pyplot as plt
                    
264        path = os.path.join(curpath(), 'data/tips.csv')
                    
                
conf.py https://gitlab.com/vectorci/pandas-td | Python | 285 lines
                    
3#
                    
4# Pandas-TD documentation build configuration file, created by
                    
5# sphinx-quickstart on Tue Nov 17 04:30:03 2015.
                    
15
                    
16import sys
                    
17import os
                    
225latex_documents = [
                    
226  (master_doc, 'Pandas-TD.tex', 'Pandas-TD Documentation',
                    
227   'Keisuke Nishida', 'manual'),
                    
255man_pages = [
                    
256    (master_doc, 'pandas-td', 'Pandas-TD Documentation',
                    
257     [author], 1)
                    
269texinfo_documents = [
                    
270  (master_doc, 'Pandas-TD', 'Pandas-TD Documentation',
                    
271   author, 'Pandas-TD', 'One line description of project.',
                    
                
test_clean_functions.ipynb https://gitlab.com/ReturnDensityEstimation/LongMemoryModel | Jupyter | 393 lines
                    
25    "import data_cleaning as clean\n",
                    
26    "import pandas as pd\n",
                    
27    "import numpy as np\n",
                    
27    "import numpy as np\n",
                    
28    "from BayesianKalman import kalmanfilter as kf"
                    
29   ]
                    
                
vary.py https://gitlab.com/pooja043/Globus_Docker_4 | Python | 339 lines
                    
1"""An array of genomic intervals, treated as variant loci."""
                    
2from __future__ import absolute_import, division, print_function
                    
3
                    
3
                    
4import logging
                    
5
                    
6import numpy as np
                    
7import pandas as pd
                    
8import vcf
                    
9
                    
10from . import gary
                    
11
                    
                
CalmeToi.py https://gitlab.com/tianzhou2011/talkingdata | Python | 361 lines
                    
7
                    
8import pandas as pd
                    
9import sys
                    
9import sys
                    
10import numpy as np
                    
11import scipy as sp
                    
11import scipy as sp
                    
12import matplotlib.pyplot as plt
                    
13from sklearn.cross_validation import StratifiedKFold, KFold
                    
13from sklearn.cross_validation import StratifiedKFold, KFold
                    
14from sklearn.metrics import log_loss
                    
15from sklearn.cluster import DBSCAN
                    
15from sklearn.cluster import DBSCAN
                    
16from sklearn import metrics as skmetrics
                    
17from sklearn.preprocessing import StandardScaler
                    
                
synthetic_data.py https://gitlab.com/razhangwei/news-bullying | Python | 489 lines
                    
4import os.path as osp
                    
5import sys
                    
6import time
                    
6import time
                    
7from copy import deepcopy
                    
8
                    
10import numpy as np
                    
11import pandas as pd
                    
12import seaborn.apionly as sns
                    
12import seaborn.apionly as sns
                    
13from numpy.linalg import norm
                    
14from scipy import sparse
                    
24from lib.model.kernels import ExponentialKernel
                    
25from lib.model.mark_density import TextualMarkDensity
                    
26from lib.model.source_identify_model import SourceIdentifyModel
                    
                
print_version.py https://gitlab.com/pooja043/Globus_Docker_4 | Python | 146 lines
                    
28        import statsmodels
                    
29        from statsmodels import version
                    
30        has_sm = True
                    
30        has_sm = True
                    
31    except ImportError:
                    
32        has_sm = False
                    
63    try:
                    
64        import pandas
                    
65        print("pandas: %s (%s)" % (safe_version(pandas, ['version',
                    
74                                     dirname(dateutil.__file__)))
                    
75    except ImportError:
                    
76        print("    dateutil: not installed")
                    
78    try:
                    
79        import patsy
                    
80        print("patsy: %s (%s)" % (safe_version(patsy),
                    
                
dataframe_test.py https://gitlab.com/zharfi/GunSafety | Python | 149 lines
                    
17
                    
18from __future__ import absolute_import
                    
19from __future__ import division
                    
19from __future__ import division
                    
20from __future__ import print_function
                    
21
                    
21
                    
22import tensorflow as tf
                    
23
                    
23
                    
24from tensorflow.contrib.learn.python import learn
                    
25from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
                    
80
                    
81  def test_set_item_pandas(self):
                    
82    # TODO(jamieas)
                    
                
timeseries_test.py https://github.com/chromium/chromium.git | Python | 247 lines
                    
4
                    
5import datetime
                    
6import unittest
                    
7
                    
8from cli_tools.soundwave import pandas_sqlite
                    
9from cli_tools.soundwave import tables
                    
9from cli_tools.soundwave import tables
                    
10from core.external_modules import pandas
                    
11
                    
70
                    
71@unittest.skipIf(pandas is None, 'pandas not available')
                    
72class TestTimeSeries(unittest.TestCase):
                    
183    with tables.DbSession(':memory:') as con:
                    
184      pandas_sqlite.InsertOrReplaceRecords(con, 'timeseries', timeseries_in)
                    
185      timeseries_out = tables.timeseries.GetTimeSeries(con, test_path)
                    
                
views.py https://gitlab.com/IPMsim/Virtual-IPM | Python | 368 lines
                    
16
                    
17import os
                    
18import re
                    
19
                    
20import matplotlib.pyplot as plt
                    
21# noinspection PyUnresolvedReferences
                    
21# noinspection PyUnresolvedReferences
                    
22from mpl_toolkits.mplot3d import Axes3D
                    
23import matplotlib.backends.backend_qt5agg
                    
24import numpy as np
                    
25import pandas
                    
26from pandas.errors import ParserError
                    
26from pandas.errors import ParserError
                    
27import PyQt5.QtCore as QtCore
                    
28import PyQt5.QtGui as QtGui
                    
                
faq.rst https://gitlab.com/0072016/0072016 | ReStructuredText | 277 lines
                    
57
                    
58* `pandas.io <http://pandas.pydata.org/pandas-docs/stable/io.html>`_ to load
                    
59  heterogeneously typed data from various file formats and database protocols
                    
64optimized file format such as HDF5 to reduce data load times. Various libraries
                    
65such as H5Py, PyTables and pandas provides a Python interface for reading and
                    
66writing data in that format.
                    
81
                    
82The contributor should support the importance of the proposed addition with
                    
83research papers and/or implementations in other similar packages, demonstrate
                    
182
                    
183    >>> from leven import levenshtein       # doctest: +SKIP
                    
184    >>> import numpy as np
                    
184    >>> import numpy as np
                    
185    >>> from sklearn.cluster import dbscan
                    
186    >>> data = ["ACCTCCTAGAAG", "ACCTACTAGAAGTT", "GAATATTAGGCCGA"]
                    
                
views.py https://gitlab.com/lidorle/Django-Assignment1 | Python | 109 lines
                    
2from __future__ import unicode_literals
                    
3import pandas as pd
                    
4from django.shortcuts import render
                    
4from django.shortcuts import render
                    
5from .models import Car,Company,PriceList
                    
6from django.http import Http404
                    
6from django.http import Http404
                    
7import json
                    
8from django.core import serializers
                    
8from django.core import serializers
                    
9from django.core.serializers.json import DjangoJSONEncoder
                    
10from django.views.decorators.csrf import csrf_exempt
                    
10from django.views.decorators.csrf import csrf_exempt
                    
11import urlparse
                    
12from django.http import HttpResponse
                    
                
LibFMFeatures.py https://gitlab.com/xbsd/kaggle-1 | Python | 191 lines
                    
1import string
                    
2from string import lower
                    
3
                    
4import numpy as np
                    
5from sklearn.feature_extraction import DictVectorizer
                    
6from sklearn.feature_extraction.text import TfidfVectorizer
                    
7from sklearn import preprocessing
                    
8from sklearn_pandas import DataFrameMapper
                    
9from sklearn.datasets.svmlight_format import dump_svmlight_file
                    
10from algo.base_mode import BaseModel
                    
11from sklearn_pandas import DataFrameMapper
                    
12from common.utils import FILE_SEPERATOR
                    
12from common.utils import FILE_SEPERATOR
                    
13from sklearn import pipeline
                    
14from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
                    
                
timeseriesio.py https://gitlab.com/oytunistrator/pythalesians | Python | 388 lines
                    
21
                    
22import pandas
                    
23import codecs
                    
23import codecs
                    
24import datetime
                    
25from dateutil.parser import parse
                    
25from dateutil.parser import parse
                    
26import shutil
                    
27
                    
27
                    
28from openpyxl import load_workbook
                    
29import os.path
                    
30
                    
31from pythalesians.util.constants import Constants
                    
32from pythalesians.util.loggermanager import LoggerManager
                    
                
calculate_path.py https://gitlab.com/mdornfe1/busy_cab | Python | 174 lines
                    
1import sqlalchemy as sa
                    
2import psycopg2
                    
2import psycopg2
                    
3import pandas.io.sql as sqlio
                    
4import pandas as pd
                    
4import pandas as pd
                    
5import matplotlib.pyplot as plt
                    
6import numpy as np
                    
6import numpy as np
                    
7import folium
                    
8import json
                    
8import json
                    
9import requests
                    
10from IPython import embed
                    
10from IPython import embed
                    
11import datetime
                    
12from folium import plugins
                    
                
test_with_sklearn.py https://gitlab.com/admin-github-cloud/xgboost | Python | 300 lines
                    
1import numpy as np
                    
2import random
                    
10    tm._skip_if_no_sklearn()
                    
11    from sklearn.datasets import load_digits
                    
12    from sklearn.cross_validation import KFold
                    
28    tm._skip_if_no_sklearn()
                    
29    from sklearn.datasets import load_iris
                    
30    from sklearn.cross_validation import KFold
                    
55
                    
56def test_feature_importances():
                    
57    tm._skip_if_no_sklearn()
                    
75    # numeric columns
                    
76    import pandas as pd
                    
77    y = pd.Series(digits['target'])
                    
118    from sklearn.grid_search import GridSearchCV
                    
119    from sklearn.datasets import load_boston
                    
120
                    
                
bk_filter.py https://gitlab.com/pooja043/Globus_Docker_4 | Python | 79 lines
                    
1from __future__ import absolute_import
                    
2
                    
2
                    
3import numpy as np
                    
4from scipy.signal import fftconvolve
                    
4from scipy.signal import fftconvolve
                    
5from .utils import _maybe_get_pandas_wrapper
                    
6
                    
51    --------
                    
52    >>> import statsmodels.api as sm
                    
53    >>> dta = sm.datasets.macrodata.load()
                    
60    # Lancosz Sigma Factors np.sinc(2*j/(2.*K+1))
                    
61    _pandas_wrapper = _maybe_get_pandas_wrapper(X, K)
                    
62    X = np.asarray(X)
                    
75                                               # convolution
                    
76    if _pandas_wrapper is not None:
                    
77        return _pandas_wrapper(X)
                    
                
PeerAnalysis.py https://gitlab.com/debasishk/PerformanceTest-Monitoring | Python | 286 lines
                    
34from collections import OrderedDict
                    
35import pandas as pd
                    
36import datetime as dt
                    
36import datetime as dt
                    
37import numpy as np
                    
38import matplotlib.pyplot as plt
                    
38import matplotlib.pyplot as plt
                    
39import sys
                    
40
                    
40
                    
41from AnomalyDetection.Univariate.LOF import LOF
                    
42from ...Utils.InitializeOutlierDetectionParams import ModelParams
                    
42from ...Utils.InitializeOutlierDetectionParams import ModelParams
                    
43from AnomalyDetection import MedianAbsoluteDeviation
                    
44from AnomalyDetection import FastFourierTransform
                    
                
test_enum_add_cases.swift https://gitlab.com/dwiktor/swift | Swift | 233 lines
                    
3
                    
4import StdlibUnittest
                    
5import enum_add_cases
                    
193    -> [AddPayloadToMultiPayload] {
                    
194  return [.Cats(s), .Ponies(s), .Pandas]
                    
195}
                    
207      return 1
                    
208    case .Pandas:
                    
209      return 2
                    
                
dynamic.py https://gitlab.com/pooja043/Globus_Docker_4 | Python | 373 lines
                    
8import util
                    
9import plotting
                    
10
                    
15try:
                    
16    import pandas as pn
                    
17except ImportError:
                    
36    try:
                    
37        import pandas as pn
                    
38    except ImportError:
                    
38    except ImportError:
                    
39        raise ImportError('pandas is required to use this code (for now)')
                    
40
                    
363if __name__ == '__main__':
                    
364    import pandas.util.testing as ptest
                    
365
                    
                
pd02-pandas-paris.ipynb https://gitlab.com/gvallverdu/cours-python | Jupyter | 392 lines
                    
34    "%matplotlib inline\n",
                    
35    "import pandas as pd\n",
                    
36    "import numpy as np\n",
                    
36    "import numpy as np\n",
                    
37    "import seaborn as sns\n",
                    
38    "import calendar\n",
                    
48    "<div class=\"alert alert-success\" style=\"margin-top:20px\">\n",
                    
49    "    <b>Exercice :</b> Lire le fichier <code>place-de-la-nation-co2.csv</code> avec pandas.\n",
                    
50    "</div>\n",
                    
312   "source": [
                    
313    "Le taux de CO2 semble plus important le mercredi, mais la variation est-elle significative ?"
                    
314   ]
                    
328    "<div class=\"alert alert-success\" style=\"margin-top:20px\">\n",
                    
329    "    <b>Exercice :</b> Lire le fichier <code>place-de-la-nation-temperature.csv</code> avec pandas.\n",
                    
330    "</div>\n"
                    
                
mat2nc.py https://gitlab.com/pkormos/phase_python | Python | 193 lines
                    
11
                    
12import netCDF4 as nc
                    
13import numpy as np
                    
13import numpy as np
                    
14from pandas import DataFrame, date_range
                    
15import h5py
                    
29#     'rc.usc-138044', 'rc.usc-d03','rc.usc-j10', 'rc.usc-l21'])
                    
30# ppt = DataFrame(np.transpose(pmat.get("ppt_corrected")), index=date_range('1962-1-1', '2014-12-31 23:00', freq='H'), columns = p_list) # make pandas dataset
                    
31# ppt = ppt.replace('NaN',-9999)
                    
74    'rc.tg.rme-176_tf'])
                    
75rh = DataFrame(np.transpose(rmat.get("rh")), index=date_range('18-Jun-1981 11:00:00', '01-Oct-2014', freq='H'), columns = r_list) # make pandas dataset
                    
76rh = rh.replace('NaN',-9999)
                    
                
utils.py https://gitlab.com/brenddongontijo/SMI-UnB | Python | 360 lines
                    
1import json
                    
2import matplotlib.dates as mdates
                    
3import matplotlib.patches as mpatches
                    
3import matplotlib.patches as mpatches
                    
4import mpld3
                    
5import numpy
                    
5import numpy
                    
6import pandas
                    
7
                    
7
                    
8from django.utils import timezone
                    
9from django.utils.translation import ugettext as _
                    
9from django.utils.translation import ugettext as _
                    
10from dateutil.relativedelta import relativedelta
                    
11from matplotlib.figure import Figure
                    
                
ensemble2.ipynb https://gitlab.com/jeongyoonlee/allstate-claims-severity | Jupyter | 256 lines
                    
30   "source": [
                    
31    "from __future__ import division\n",
                    
32    "from scipy.optimize import minimize                                             \n",
                    
32    "from scipy.optimize import minimize                                             \n",
                    
33    "from sklearn.metrics import mean_absolute_error as MAE\n",
                    
34    "from sklearn import base                                                        \n",
                    
35    "from sklearn.utils import check_random_state \n",
                    
36    "import pandas as pd\n",
                    
37    "import numpy as np\n",
                    
38    "\n",
                    
39    "from kaggler.data_io import load_data\n",
                    
40    "from const import SEED"
                    
                
05_pandas_reference.py https://gitlab.com/varunkothamachu/DAT3 | Python | 243 lines
                    
1'''
                    
2Pandas Reference Guide
                    
3
                    
12
                    
13import pandas as pd
                    
14import numpy as np
                    
101
                    
102### IMPORTING DATA ###
                    
103
                    
                
PaperPainting.py https://gitlab.com/m10774708/dotcode | Python | 309 lines
                    
1from pymongo import MongoClient
                    
2from sklearn import cluster, datasets, preprocessing, metrics
                    
3from sklearn.linear_model import LinearRegression
                    
4from scipy.spatial.distance import cdist
                    
5import scipy
                    
5import scipy
                    
6import numpy as np
                    
7import matplotlib.pyplot as plt
                    
7import matplotlib.pyplot as plt
                    
8import pandas as pd
                    
9import traceback
                    
9import traceback
                    
10import csv
                    
11import os
                    
11import os
                    
12import time
                    
13import datetime
                    
                
test_cparser.py https://gitlab.com/pooja043/Globus_Docker_4 | Python | 407 lines
                    
6from datetime import datetime
                    
7from pandas import compat
                    
8import csv
                    
18from pandas import DataFrame, Series, Index, isnull, MultiIndex
                    
19import pandas.io.parsers as parsers
                    
20from pandas.io.parsers import (read_csv, read_table, read_fwf,
                    
23                                 assert_series_equal, network)
                    
24import pandas.lib as lib
                    
25from pandas import compat
                    
27
                    
28import pandas.util.testing as tm
                    
29
                    
30from pandas.parser import TextReader
                    
31import pandas.parser as parser
                    
32
                    
                
grid.py https://gitlab.com/e0/qgrid | Python | 387 lines
                    
1import pandas as pd
                    
2import numpy as np
                    
2import numpy as np
                    
3import uuid
                    
4import os
                    
4import os
                    
5import json
                    
6from numbers import Integral
                    
7
                    
8from IPython.display import display_html, display_javascript
                    
9try:
                    
9try:
                    
10    from ipywidgets import widgets
                    
11except ImportError:
                    
11except ImportError:
                    
12    from IPython.html import widgets
                    
13from IPython.display import display, Javascript
                    
                
cmdp_collider_config.py https://gitlab.com/dreval/bmdp-python-scripts | Python | 273 lines
                    
11import numpy as np
                    
12import pandas as pd
                    
13import matplotlib
                    
13import matplotlib
                    
14from typing import Mapping, Any
                    
15from scipy.stats import t
                    
17matplotlib.use('PDF')
                    
18from matplotlib import pyplot as plt
                    
19
                    
                
pyder.py https://gitlab.com/NREL-DER-training/PythonDER | Python | 299 lines
                    
1from __future__ import absolute_import
                    
2from __future__ import division
                    
2from __future__ import division
                    
3from __future__ import print_function
                    
4
                    
4
                    
5import os
                    
6
                    
6
                    
7import copy
                    
8
                    
8
                    
9import pandas as pd
                    
10import networkx as nx
                    
11
                    
12from . import visualize
                    
13from . import templates
                    
                
data.py https://gitlab.com/pooja043/Globus_Docker_4 | Python | 70 lines
                    
37
                    
38from numpy import recfromtxt, column_stack, array
                    
39from statsmodels.datasets import utils as du
                    
39from statsmodels.datasets import utils as du
                    
40from os.path import dirname, abspath
                    
41
                    
53
                    
54def load_pandas():
                    
55    """
                    
63    data = _get_data()
                    
64    return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
                    
65
                    
                
test_math.py https://gitlab.com/pooja043/Globus_Docker_4 | Python | 67 lines
                    
1import nose
                    
2
                    
4from numpy.random import randn
                    
5import numpy as np
                    
6
                    
6
                    
7from pandas.core.api import Series, DataFrame, date_range
                    
8from pandas.util.testing import assert_almost_equal
                    
8from pandas.util.testing import assert_almost_equal
                    
9import pandas.core.datetools as datetools
                    
10import pandas.stats.moments as mom
                    
10import pandas.stats.moments as mom
                    
11import pandas.util.testing as tm
                    
12import pandas.stats.math as pmath
                    
12import pandas.stats.math as pmath
                    
13import pandas.tests.test_series as ts
                    
14from pandas import ols
                    
                
test_sklearn.py https://gitlab.com/peter__barnes/work | Python | 109 lines
                    
1import pandas as pd
                    
2import numpy as np
                    
2import numpy as np
                    
3from sklearn.tree import DecisionTreeClassifier
                    
4from sklearn.naive_bayes import GaussianNB
                    
4from sklearn.naive_bayes import GaussianNB
                    
5from sklearn.metrics import confusion_matrix
                    
6from sklearn.metrics import accuracy_score, precision_score, recall_score
                    
                
test_dividends.py https://gitlab.com/lbennett/zipline | Python | 407 lines
                    
3"""
                    
4import blaze as bz
                    
5from blaze.compute.core import swap_resources_into_scope
                    
5from blaze.compute.core import swap_resources_into_scope
                    
6import pandas as pd
                    
7from six import iteritems
                    
8
                    
9from zipline.pipeline.common import (
                    
10    ANNOUNCEMENT_FIELD_NAME,
                    
26)
                    
27from zipline.pipeline.data.dividends import (
                    
28    DividendsByAnnouncementDate,
                    
31)
                    
32from zipline.pipeline.factors.events import (
                    
33    BusinessDaysSinceDividendAnnouncement,
                    
                
feeding_functions.py https://gitlab.com/github-cloud-corporation/tensorflow | Python | 202 lines
                    
16
                    
17from __future__ import absolute_import
                    
18from __future__ import division
                    
18from __future__ import division
                    
19from __future__ import print_function
                    
20
                    
31from tensorflow.python.platform import tf_logging as logging
                    
32from tensorflow.python.training import queue_runner
                    
33
                    
35try:
                    
36  import pandas as pd
                    
37  HAS_PANDAS = True
                    
147      raise TypeError(
                    
148          "data must be either a numpy array or pandas DataFrame if pandas is "
                    
149          "installed; got {}".format(type(data).__name__))
                    
                
tensorflow_dataframe_test.py https://gitlab.com/github-cloud-corporation/tensorflow | Python | 354 lines
                    
34try:
                    
35  import pandas as pd
                    
36  HAS_PANDAS = True
                    
152                              "penguin": list("abcdefghij")})
                    
153    tensorflow_df = df.TensorFlowDataFrame.from_pandas(pandas_df, shuffle=False)
                    
154
                    
163
                    
164    self._assert_pandas_equals_tensorflow(pandas_df,
                    
165                                          tensorflow_df,
                    
194        default_values=default_values)
                    
195    self._assert_pandas_equals_tensorflow(pandas_df,
                    
196                                          tensorflow_df,
                    
252
                    
253    self._assert_pandas_equals_tensorflow(pandas_df,
                    
254                                          tensorflow_df,
                    
                
trainit.py https://gitlab.com/smartllvlab/cluster-fsl | Python | 194 lines
                    
1import torch
                    
2import argparse
                    
2import argparse
                    
3import pandas as pd
                    
4import os
                    
4import os
                    
5import json
                    
6import pprint
                    
6import pprint
                    
7from src import utils as ut
                    
8
                    
8
                    
9from src import datasets, models
                    
10from src.models import backbones
                    
10from src.models import backbones
                    
11from torch.utils.data import DataLoader
                    
12
                    
                
climate.py https://gitlab.com/jargnar/climate-portal | Python | 190 lines
                    
38'''
                    
39import os
                    
40import yaml
                    
40import yaml
                    
41import logging
                    
42import StringIO
                    
43import sqlalchemy
                    
44import pandas as pd
                    
45import json
                    
45import json
                    
46from flask import Flask
                    
47from flask import request
                    
47from flask import request
                    
48from flask import make_response
                    
49from flask import send_from_directory
                    
                
test_consensus_estimates.py https://gitlab.com/lbennett/zipline | Python | 345 lines
                    
3"""
                    
4import blaze as bz
                    
5from blaze.compute.core import swap_resources_into_scope
                    
5from blaze.compute.core import swap_resources_into_scope
                    
6import pandas as pd
                    
7from six import iteritems
                    
8
                    
9from zipline.pipeline.common import (
                    
10    ACTUAL_VALUE_FIELD_NAME,
                    
35    SID_FIELD_NAME)
                    
36from zipline.pipeline.data import ConsensusEstimates
                    
37from zipline.pipeline.loaders.consensus_estimates import (
                    
39)
                    
40from zipline.pipeline.loaders.blaze import BlazeConsensusEstimatesLoader
                    
41from zipline.pipeline.loaders.utils import (
                    
                
wyominglib.py https://gitlab.com/rvalenzuela/wyominglib | Python | 416 lines
                    
2    Download and parse wyoming soundings into
                    
3    pandas DataFrames, then store to HDF5 file
                    
4
                    
22    2)
                    
23    import wyominglib as wl
                    
24    wl.download_wyoming(region='samer',
                    
29    3)
                    
30    import wyominglib as wl
                    
31    wl.download_wyoming(region='samer',
                    
37
                    
38import re
                    
39import sys
                    
43import numpy as np
                    
44import pandas as pd
                    
45from bs4 import BeautifulSoup
                    
                
test_basic.py https://gitlab.com/github-cloud-corporation/xgboost | Python | 209 lines
                    
1# -*- coding: utf-8 -*-
                    
2import numpy as np
                    
3import xgboost as xgb
                    
3import xgboost as xgb
                    
4import unittest
                    
5
                    
143
                    
144    def test_feature_importances(self):
                    
145        data = np.random.randn(100, 5)
                    
158
                    
159        # number of feature importances should == number of features
                    
160        scores1 = bst.get_score()
                    
160        scores1 = bst.get_score()
                    
161        scores2 = bst.get_score(importance_type='weight')
                    
162        scores3 = bst.get_score(importance_type='cover')
                    
                
plot_and_correct_humidity076.py https://gitlab.com/pkormos/phase_python | Python | 239 lines
                    
9
                    
10import netCDF4 as nc
                    
11import numpy as np
                    
12import matplotlib.pyplot as plt
                    
13from pandas import DataFrame, date_range
                    
14
                    
35pvar = pn.variables["precipitation"]
                    
36ppt = DataFrame(pvar[:], index=date_range('1962-1-1', '2014-12-31 23:00', freq='H'), columns = p_list) # make pandas dataset
                    
37pn.close()
                    
58rvar = rn.variables["relative_humidity"]
                    
59rh = DataFrame(rvar[:], index=date_range('18-Jun-1981 11:00:00', '01-Oct-2014', freq='H'), columns = r_list) # make pandas dataset
                    
60rn.close()
                    
                
anova.py https://gitlab.com/pooja043/Globus_Docker_2 | Python | 388 lines
                    
1import numpy as np
                    
2from scipy import stats
                    
2from scipy import stats
                    
3from pandas import DataFrame, Index
                    
4from statsmodels.formula.formulatools import (_remove_intercept_patsy,
                    
198            LVL = np.dot(np.dot(L1,robust_cov),L2.T)
                    
199            from scipy import linalg
                    
200            orth_compl,_ = linalg.qr(LVL)
                    
207            r = L1.shape[0]
                    
208        #from IPython.core.debugger import Pdb; Pdb().set_trace()
                    
209        if test == 'F':
                    
304    --------
                    
305    >>> import statsmodels.api as sm
                    
306    >>> from statsmodels.formula.api import ols
                    
367if __name__ == "__main__":
                    
368    import pandas
                    
369    from statsmodels.formula.api import ols
                    
                
synthetic.py https://gitlab.com/lbennett/zipline | Python | 257 lines
                    
1from itertools import product
                    
2from string import ascii_uppercase
                    
3
                    
4import pandas as pd
                    
5from pandas.tseries.offsets import MonthBegin
                    
5from pandas.tseries.offsets import MonthBegin
                    
6from six import iteritems
                    
7
                    
7
                    
8from .futures import CME_CODE_TO_MONTH
                    
9
                    
                
classifier.py https://gitlab.com/SplatoonModdingHub/PTVS | Python | 290 lines
                    
39
                    
40from pandas import read_table
                    
41import numpy as np
                    
41import numpy as np
                    
42import matplotlib.pyplot as plt
                    
43
                    
58    # pandas.read_excel instead of read_table
                    
59    #from pandas import read_excel
                    
60    #frame = read_excel(URL)
                    
63    # BlobService.get_blob_to_path() with read_table() or read_excel()
                    
64    #import azure.storage
                    
65    #service = azure.storage.BlobService(ACCOUNT_NAME, ACCOUNT_KEY)
                    
131    # Use 80% of the data for training; test against the rest
                    
132    from sklearn.cross_validation import train_test_split
                    
133    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
                    
                
Validation.py https://gitlab.com/gregas/Classwork | Python | 177 lines
                    
42except ImportError:
                    
43    sys.exit("Error : Numpy can not be imported or not installed.")
                    
44print
                    
47try:
                    
48    import matplotlib
                    
49    print "Matplotlib is installed and version is : ", matplotlib.__version__
                    
56try:
                    
57    import pandas
                    
58    print "Pandas is installed and the version used is : ", pandas.__version__
                    
60except ImportError:
                    
61    sys.exit("Error : Pandas can not be imported or not installed.")
                    
62print
                    
88except ImportError:
                    
89    sys.exit("Error : Setuptools can not be imported or not installed.")
                    
90print
                    
                
 

Source

Language