/sklearn/datasets/kddcup99.py

http://github.com/scikit-learn/scikit-learn · Python · 366 lines · 302 code · 42 blank · 22 comment · 35 complexity · 759dc2c80bf14f58e796b0eff37d7c1b MD5 · raw file

  1. """KDDCUP 99 dataset.
  2. A classic dataset for anomaly detection.
  3. The dataset page is available from UCI Machine Learning Repository
  4. https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
  5. """
  6. import sys
  7. import errno
  8. from gzip import GzipFile
  9. from io import BytesIO
  10. import logging
  11. import os
  12. from os.path import exists, join
  13. try:
  14. from urllib2 import urlopen
  15. except ImportError:
  16. from urllib.request import urlopen
  17. import numpy as np
  18. from .base import get_data_home
  19. from .base import Bunch
  20. from ..externals import joblib, six
  21. from ..utils import check_random_state
  22. from ..utils import shuffle as shuffle_method
  23. URL10 = ('http://archive.ics.uci.edu/ml/'
  24. 'machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz')
  25. URL = ('http://archive.ics.uci.edu/ml/'
  26. 'machine-learning-databases/kddcup99-mld/kddcup.data.gz')
  27. logger = logging.getLogger()
  28. def fetch_kddcup99(subset=None, shuffle=False, random_state=None,
  29. percent10=True, download_if_missing=True):
  30. """Load and return the kddcup 99 dataset (classification).
  31. The KDD Cup '99 dataset was created by processing the tcpdump portions
  32. of the 1998 DARPA Intrusion Detection System (IDS) Evaluation dataset,
  33. created by MIT Lincoln Lab [1] . The artificial data was generated using
  34. a closed network and hand-injected attacks to produce a large number of
  35. different types of attack with normal activity in the background.
  36. As the initial goal was to produce a large training set for supervised
  37. learning algorithms, there is a large proportion (80.1%) of abnormal
  38. data which is unrealistic in real world, and inappropriate for unsupervised
  39. anomaly detection which aims at detecting 'abnormal' data, ie
  40. 1) qualitatively different from normal data.
  41. 2) in large minority among the observations.
  42. We thus transform the KDD Data set into two different data sets: SA and SF.
  43. - SA is obtained by simply selecting all the normal data, and a small
  44. proportion of abnormal data to gives an anomaly proportion of 1%.
  45. - SF is obtained as in [2]
  46. by simply picking up the data whose attribute logged_in is positive, thus
  47. focusing on the intrusion attack, which gives a proportion of 0.3% of
  48. attack.
  49. - http and smtp are two subsets of SF corresponding with third feature
  50. equal to 'http' (resp. to 'smtp')
  51. General KDD structure :
  52. ================ ==========================================
  53. Samples total 4898431
  54. Dimensionality 41
  55. Features discrete (int) or continuous (float)
  56. Targets str, 'normal.' or name of the anomaly type
  57. ================ ==========================================
  58. SA structure :
  59. ================ ==========================================
  60. Samples total 976158
  61. Dimensionality 41
  62. Features discrete (int) or continuous (float)
  63. Targets str, 'normal.' or name of the anomaly type
  64. ================ ==========================================
  65. SF structure :
  66. ================ ==========================================
  67. Samples total 699691
  68. Dimensionality 4
  69. Features discrete (int) or continuous (float)
  70. Targets str, 'normal.' or name of the anomaly type
  71. ================ ==========================================
  72. http structure :
  73. ================ ==========================================
  74. Samples total 619052
  75. Dimensionality 3
  76. Features discrete (int) or continuous (float)
  77. Targets str, 'normal.' or name of the anomaly type
  78. ================ ==========================================
  79. smtp structure :
  80. ================ ==========================================
  81. Samples total 95373
  82. Dimensionality 3
  83. Features discrete (int) or continuous (float)
  84. Targets str, 'normal.' or name of the anomaly type
  85. ================ ==========================================
  86. Parameters
  87. ----------
  88. subset : None, 'SA', 'SF', 'http', 'smtp'
  89. To return the corresponding classical subsets of kddcup 99.
  90. If None, return the entire kddcup 99 dataset.
  91. random_state : int, RandomState instance or None, optional (default=None)
  92. Random state for shuffling the dataset.
  93. If int, random_state is the seed used by the random number generator;
  94. If RandomState instance, random_state is the random number generator;
  95. If None, the random number generator is the RandomState instance used
  96. by `np.random`.
  97. shuffle : bool, default=False
  98. Whether to shuffle dataset.
  99. percent10 : bool, default=False
  100. Whether to load only 10 percent of the data.
  101. download_if_missing : bool, default=True
  102. If False, raise a IOError if the data is not locally available
  103. instead of trying to download the data from the source site.
  104. Returns
  105. -------
  106. data : Bunch
  107. Dictionary-like object, the interesting attributes are:
  108. 'data', the data to learn and 'target', the regression target for each
  109. sample.
  110. References
  111. ----------
  112. .. [1] Analysis and Results of the 1999 DARPA Off-Line Intrusion
  113. Detection Evaluation Richard Lippmann, Joshua W. Haines,
  114. David J. Fried, Jonathan Korba, Kumar Das
  115. .. [2] A Geometric Framework for Unsupervised Anomaly Detection: Detecting
  116. Intrusions in Unlabeled Data (2002) by Eleazar Eskin, Andrew Arnold,
  117. Michael Prerau, Leonid Portnoy, Sal Stolfo
  118. """
  119. kddcup99 = _fetch_brute_kddcup99(shuffle=shuffle, percent10=percent10,
  120. download_if_missing=download_if_missing)
  121. data = kddcup99.data
  122. target = kddcup99.target
  123. if subset == 'SA':
  124. s = target == b'normal.'
  125. t = np.logical_not(s)
  126. normal_samples = data[s, :]
  127. normal_targets = target[s]
  128. abnormal_samples = data[t, :]
  129. abnormal_targets = target[t]
  130. n_samples_abnormal = abnormal_samples.shape[0]
  131. # selected abnormal samples:
  132. random_state = check_random_state(random_state)
  133. r = random_state.randint(0, n_samples_abnormal, 3377)
  134. abnormal_samples = abnormal_samples[r]
  135. abnormal_targets = abnormal_targets[r]
  136. data = np.r_[normal_samples, abnormal_samples]
  137. target = np.r_[normal_targets, abnormal_targets]
  138. if subset == 'SF' or subset == 'http' or subset == 'smtp':
  139. # select all samples with positive logged_in attribute:
  140. s = data[:, 11] == 1
  141. data = np.c_[data[s, :11], data[s, 12:]]
  142. target = target[s]
  143. data[:, 0] = np.log((data[:, 0] + 0.1).astype(float))
  144. data[:, 4] = np.log((data[:, 4] + 0.1).astype(float))
  145. data[:, 5] = np.log((data[:, 5] + 0.1).astype(float))
  146. if subset == 'http':
  147. s = data[:, 2] == b'http'
  148. data = data[s]
  149. target = target[s]
  150. data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
  151. if subset == 'smtp':
  152. s = data[:, 2] == b'smtp'
  153. data = data[s]
  154. target = target[s]
  155. data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
  156. if subset == 'SF':
  157. data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]]
  158. return Bunch(data=data, target=target)
  159. def _fetch_brute_kddcup99(subset=None, data_home=None,
  160. download_if_missing=True, random_state=None,
  161. shuffle=False, percent10=False):
  162. """Load the kddcup99 dataset, downloading it if necessary.
  163. Parameters
  164. ----------
  165. subset : None, 'SA', 'SF', 'http', 'smtp'
  166. To return the corresponding classical subsets of kddcup 99.
  167. If None, return the entire kddcup 99 dataset.
  168. data_home : string, optional
  169. Specify another download and cache folder for the datasets. By default
  170. all scikit learn data is stored in '~/scikit_learn_data' subfolders.
  171. download_if_missing : boolean, default=True
  172. If False, raise a IOError if the data is not locally available
  173. instead of trying to download the data from the source site.
  174. random_state : int, RandomState instance or None, optional (default=None)
  175. Random state for shuffling the dataset.
  176. If int, random_state is the seed used by the random number generator;
  177. If RandomState instance, random_state is the random number generator;
  178. If None, the random number generator is the RandomState instance used
  179. by `np.random`.
  180. shuffle : bool, default=False
  181. Whether to shuffle dataset.
  182. percent10 : bool, default=False
  183. Whether to load only 10 percent of the data.
  184. Returns
  185. -------
  186. dataset : dict-like object with the following attributes:
  187. dataset.data : numpy array of shape (494021, 41)
  188. Each row corresponds to the 41 features in the dataset.
  189. dataset.target : numpy array of shape (494021,)
  190. Each value corresponds to one of the 21 attack types or to the
  191. label 'normal.'.
  192. dataset.DESCR : string
  193. Description of the kddcup99 dataset.
  194. """
  195. data_home = get_data_home(data_home=data_home)
  196. if sys.version_info[0] == 3:
  197. # The zlib compression format use by joblib is not compatible when
  198. # switching from Python 2 to Python 3, let us use a separate folder
  199. # under Python 3:
  200. dir_suffix = "-py3"
  201. else:
  202. # Backward compat for Python 2 users
  203. dir_suffix = ""
  204. if percent10:
  205. kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix)
  206. else:
  207. kddcup_dir = join(data_home, "kddcup99" + dir_suffix)
  208. samples_path = join(kddcup_dir, "samples")
  209. targets_path = join(kddcup_dir, "targets")
  210. available = exists(samples_path)
  211. if download_if_missing and not available:
  212. _mkdirp(kddcup_dir)
  213. URL_ = URL10 if percent10 else URL
  214. logger.warning("Downloading %s" % URL_)
  215. f = BytesIO(urlopen(URL_).read())
  216. dt = [('duration', int),
  217. ('protocol_type', 'S4'),
  218. ('service', 'S11'),
  219. ('flag', 'S6'),
  220. ('src_bytes', int),
  221. ('dst_bytes', int),
  222. ('land', int),
  223. ('wrong_fragment', int),
  224. ('urgent', int),
  225. ('hot', int),
  226. ('num_failed_logins', int),
  227. ('logged_in', int),
  228. ('num_compromised', int),
  229. ('root_shell', int),
  230. ('su_attempted', int),
  231. ('num_root', int),
  232. ('num_file_creations', int),
  233. ('num_shells', int),
  234. ('num_access_files', int),
  235. ('num_outbound_cmds', int),
  236. ('is_host_login', int),
  237. ('is_guest_login', int),
  238. ('count', int),
  239. ('srv_count', int),
  240. ('serror_rate', float),
  241. ('srv_serror_rate', float),
  242. ('rerror_rate', float),
  243. ('srv_rerror_rate', float),
  244. ('same_srv_rate', float),
  245. ('diff_srv_rate', float),
  246. ('srv_diff_host_rate', float),
  247. ('dst_host_count', int),
  248. ('dst_host_srv_count', int),
  249. ('dst_host_same_srv_rate', float),
  250. ('dst_host_diff_srv_rate', float),
  251. ('dst_host_same_src_port_rate', float),
  252. ('dst_host_srv_diff_host_rate', float),
  253. ('dst_host_serror_rate', float),
  254. ('dst_host_srv_serror_rate', float),
  255. ('dst_host_rerror_rate', float),
  256. ('dst_host_srv_rerror_rate', float),
  257. ('labels', 'S16')]
  258. DT = np.dtype(dt)
  259. file_ = GzipFile(fileobj=f, mode='r')
  260. Xy = []
  261. for line in file_.readlines():
  262. if six.PY3:
  263. line = line.decode()
  264. Xy.append(line.replace('\n', '').split(','))
  265. file_.close()
  266. print('extraction done')
  267. Xy = np.asarray(Xy, dtype=object)
  268. for j in range(42):
  269. Xy[:, j] = Xy[:, j].astype(DT[j])
  270. X = Xy[:, :-1]
  271. y = Xy[:, -1]
  272. # XXX bug when compress!=0:
  273. # (error: 'Incorrect data length while decompressing[...] the file
  274. # could be corrupted.')
  275. joblib.dump(X, samples_path, compress=0)
  276. joblib.dump(y, targets_path, compress=0)
  277. try:
  278. X, y
  279. except NameError:
  280. X = joblib.load(samples_path)
  281. y = joblib.load(targets_path)
  282. if shuffle:
  283. X, y = shuffle_method(X, y, random_state=random_state)
  284. return Bunch(data=X, target=y, DESCR=__doc__)
  285. def _mkdirp(d):
  286. """Ensure directory d exists (like mkdir -p on Unix)
  287. No guarantee that the directory is writable.
  288. """
  289. try:
  290. os.makedirs(d)
  291. except OSError as e:
  292. if e.errno != errno.EEXIST:
  293. raise