PageRenderTime 25ms CodeModel.GetById 15ms RepoModel.GetById 1ms app.codeStats 0ms

/notify_user/pymodules/python2.7/lib/python/cnvlib/fix.py

https://gitlab.com/pooja043/Globus_Docker_4
Python | 229 lines | 191 code | 14 blank | 24 comment | 12 complexity | a824371b309569ac06c27ec80798ae15 MD5 | raw file
  1. """Supporting functions for the 'fix' command."""
  2. from __future__ import absolute_import, division, print_function
  3. import logging
  4. import numpy as np
  5. import pandas as pd
  6. from . import params, smoothing
  7. def load_adjust_coverages(pset, ref_pset,
  8. fix_gc, fix_edge, fix_rmask):
  9. """Load and filter probe coverages; correct using reference and GC."""
  10. if 'gc' in pset:
  11. # Don't choke on Picard-derived files that have the GC column
  12. pset = pset.drop_extra_columns()
  13. # No corrections needed if there are no data rows (e.g. no antitargets)
  14. if not len(pset):
  15. return pset
  16. ref_matched = match_ref_to_probes(ref_pset, pset)
  17. # Drop probes that had poor coverage in the pooled reference
  18. ok_cvg_indices = ~mask_bad_probes(ref_matched)
  19. logging.info("Keeping %d of %d bins", sum(ok_cvg_indices), len(ref_matched))
  20. pset = pset[ok_cvg_indices]
  21. ref_matched = ref_matched[ok_cvg_indices]
  22. # Apply corrections for known systematic biases in coverage
  23. pset.center_all()
  24. if fix_gc:
  25. if 'gc' in ref_matched:
  26. logging.info("Correcting for GC bias...")
  27. pset = center_by_window(pset, .1, ref_matched['gc'])
  28. else:
  29. logging.warn("WARNING: Skipping correction for RepeatMasker bias")
  30. if fix_edge:
  31. logging.info("Correcting for density bias...")
  32. edge_bias = get_edge_bias(pset, params.INSERT_SIZE)
  33. pset = center_by_window(pset, .1, edge_bias)
  34. if fix_rmask:
  35. if 'rmask' in ref_matched:
  36. logging.info("Correcting for RepeatMasker bias...")
  37. pset = center_by_window(pset, .1, ref_matched['rmask'])
  38. else:
  39. logging.warn("WARNING: Skipping correction for RepeatMasker bias")
  40. # Normalize coverages according to the reference
  41. # (Subtract the reference log2 copy number to get the log2 ratio)
  42. pset.data['log2'] -= ref_matched['log2']
  43. pset.center_all()
  44. return apply_weights(pset, ref_matched)
  45. def mask_bad_probes(probes):
  46. """Flag the probes with excessively low or inconsistent coverage.
  47. Returns a bool array where True indicates probes that failed the checks.
  48. """
  49. mask = ((probes['log2'] < params.MIN_REF_COVERAGE) |
  50. (probes['spread'] > params.MAX_REF_SPREAD))
  51. if 'rmask' in probes:
  52. mask |= (probes['rmask'] > params.MAX_REPEAT_FRACTION)
  53. return mask
  54. def match_ref_to_probes(ref_pset, probes):
  55. """Filter the reference probes to match the target or antitarget probe set.
  56. """
  57. probes_labeled = probes.data.set_index(pd.Index(probes.coords()))
  58. ref_labeled = ref_pset.data.set_index(pd.Index(ref_pset.coords()))
  59. # Safety
  60. for dset, name in ((probes_labeled, "probe"),
  61. (ref_labeled, "reference")):
  62. dupes = dset.index.duplicated()
  63. if dupes.any():
  64. raise ValueError("Duplicated genomic coordinates in " + name +
  65. " set:\n" + "\n".join(map(str, dset.index[dupes])))
  66. ref_matched = ref_labeled.reindex(index=probes_labeled.index)
  67. # Check for signs that the wrong reference was used
  68. num_missing = pd.isnull(ref_matched.start).sum()
  69. if num_missing > 0:
  70. raise ValueError("Reference is missing %d bins found in %s"
  71. % (num_missing, probes.sample_id))
  72. return ref_pset.as_dataframe(ref_matched.reset_index(drop=True))
  73. def center_by_window(cnarr, fraction, sort_key):
  74. """Smooth out biases according to the trait specified by sort_key.
  75. E.g. correct GC-biased probes by windowed averaging across similar-GC
  76. probes; or for similar interval sizes.
  77. """
  78. # Separate neighboring probes that could have the same key
  79. # (to avoid re-centering actual CNV regions -- only want an independently
  80. # sampled subset of presumably overall-CN-neutral probes)
  81. df = cnarr.data.reset_index(drop=True)
  82. shuffle_order = np.random.permutation(df.index)
  83. df = df.reindex(shuffle_order)
  84. # Apply the same shuffling to the key array as to the target probe set
  85. assert isinstance(sort_key, (np.ndarray, pd.Series))
  86. sort_key = sort_key[shuffle_order]
  87. # Sort the data according to the specified parameter
  88. order = np.argsort(sort_key, kind='mergesort')
  89. df = df.iloc[order]
  90. biases = smoothing.rolling_median(df['log2'], fraction)
  91. # biases = smoothing.smoothed(df['log2'], fraction)
  92. df['log2'] -= biases
  93. fixarr = cnarr.as_dataframe(df)
  94. fixarr.sort()
  95. return fixarr
  96. def get_edge_bias(cnarr, margin):
  97. """Quantify the "edge effect" of the target tile and its neighbors.
  98. The result is proportional to the change in the target's coverage due to
  99. these edge effects, i.e. the expected loss of coverage near the target
  100. edges and, if there are close neighboring tiles, gain of coverage due
  101. to "spill over" reads from the neighbor tiles.
  102. (This is not the actual change in coverage. This is just a tribute.)
  103. """
  104. output_by_chrom = []
  105. for _chrom, subarr in cnarr.by_chromosome():
  106. tile_starts = np.asarray(subarr['start'])
  107. tile_ends = np.asarray(subarr['end'])
  108. tgt_sizes = tile_ends - tile_starts
  109. # Calculate coverage loss at (both edges of) each tile
  110. losses = edge_losses(tgt_sizes, margin)
  111. # Find tiled intervals within a margin (+/- bp) of the given probe
  112. # (excluding the probe itself), then calculate the relative coverage
  113. # "gain" due to the neighbors, if any
  114. gap_sizes = np.asarray(tile_starts[1:]) - np.asarray(tile_ends[:-1])
  115. ok_gaps_mask = (gap_sizes < margin)
  116. ok_gaps = gap_sizes[ok_gaps_mask]
  117. left_gains = edge_gains(tgt_sizes[1:][ok_gaps_mask], ok_gaps, margin)
  118. right_gains = edge_gains(tgt_sizes[:-1][ok_gaps_mask], ok_gaps, margin)
  119. gains = np.zeros(len(subarr))
  120. gains[np.concatenate([[False], ok_gaps_mask])] += left_gains
  121. gains[np.concatenate([ok_gaps_mask, [False]])] += right_gains
  122. output_by_chrom.append(gains - losses)
  123. return np.concatenate(output_by_chrom)
  124. def edge_losses(target_sizes, insert_size):
  125. """Calculate coverage losses at the edges of baited regions.
  126. Letting i = insert size and t = target size, the proportional loss of
  127. coverage near the two edges of the baited region (combined) is::
  128. i/2t
  129. If the "shoulders" extend outside the bait $(t < i), reduce by::
  130. (i-t)^2 / 4it
  131. on each side, or (i-t)^2 / 2it total.
  132. """
  133. losses = insert_size / (2 * target_sizes)
  134. # Drop the shoulder part that would extend past the bait
  135. small_mask = (target_sizes < insert_size)
  136. t_small = target_sizes[small_mask]
  137. losses[small_mask] -= ((insert_size - t_small)**2
  138. / (2 * insert_size * t_small))
  139. return losses
  140. def edge_gains(target_sizes, gap_sizes, insert_size):
  141. """Calculate coverage gain from neighboring baits' flanking reads.
  142. Letting i = insert size, t = target size, g = gap to neighboring bait,
  143. the gain of coverage due to a nearby bait, if g < i, is::
  144. (i-g)^2 / 4it
  145. If the neighbor flank extends beyond the target (t+g < i), reduce by::
  146. (i-t-g)^2 / 4it
  147. If a neighbor overlaps the target, treat it as adjacent (gap size 0).
  148. """
  149. if not (gap_sizes <= insert_size).all():
  150. raise ValueError("Gaps greater than insert size:\n" +
  151. gap_sizes[gap_sizes > insert_size].head())
  152. gap_sizes = np.maximum(0, gap_sizes)
  153. gains = ((insert_size - gap_sizes)**2
  154. / (4 * insert_size * target_sizes))
  155. # Drop the flank part that extends past this baited region
  156. past_other_side_mask = (target_sizes + gap_sizes < insert_size)
  157. g_past = gap_sizes[past_other_side_mask]
  158. t_past = target_sizes[past_other_side_mask]
  159. gains[past_other_side_mask] -= ((insert_size - t_past - g_past)**2
  160. / (4 * insert_size * t_past))
  161. return gains
  162. def apply_weights(cnarr, ref_matched, epsilon=1e-4):
  163. """Calculate weights for each bin.
  164. Weights are derived from:
  165. - bin sizes
  166. - average bin coverage depths in the reference
  167. - the "spread" column of the reference.
  168. """
  169. # Relative bin sizes
  170. sizes = ref_matched['end'] - ref_matched['start']
  171. weights = sizes / sizes.max()
  172. if (np.abs(np.mod(ref_matched['log2'], 1)) > epsilon).any():
  173. # NB: Not used with a flat reference
  174. logging.info("Weighting bins by relative coverage depths in reference")
  175. # Penalize bins that deviate from expected coverage
  176. flat_cvgs = ref_matched.expect_flat_cvg()
  177. weights *= 2 ** -np.abs(ref_matched['log2'] - flat_cvgs)
  178. if (ref_matched['spread'] > epsilon).any():
  179. # NB: Not used with a flat or paired reference
  180. logging.info("Weighting bins by coverage spread in reference")
  181. # Inverse of variance, 0--1
  182. variances = ref_matched['spread'] ** 2
  183. invvars = 1.0 - (variances / variances.max())
  184. weights = (weights + invvars) / 2
  185. # Avoid 0-value bins -- CBS doesn't like these
  186. weights = np.maximum(weights, epsilon)
  187. return cnarr.add_columns(weight=weights)