PageRenderTime 171ms CodeModel.GetById 25ms RepoModel.GetById 0ms app.codeStats 0ms

/src/java/org/apache/cassandra/db/CounterColumn.java

https://github.com/thepaul/cassandra
Java | 382 lines | 285 code | 38 blank | 59 comment | 44 complexity | 0546f4c04c0c4429614dff28dfc300c2 MD5 | raw file
  1. /*
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. package org.apache.cassandra.db;
  19. import java.io.IOException;
  20. import java.nio.ByteBuffer;
  21. import java.net.InetAddress;
  22. import java.security.MessageDigest;
  23. import java.util.Collection;
  24. import org.slf4j.Logger;
  25. import org.slf4j.LoggerFactory;
  26. import org.apache.cassandra.config.CFMetaData;
  27. import org.apache.cassandra.config.DatabaseDescriptor;
  28. import org.apache.cassandra.db.context.CounterContext;
  29. import org.apache.cassandra.db.context.IContext.ContextRelationship;
  30. import org.apache.cassandra.db.marshal.AbstractType;
  31. import org.apache.cassandra.db.marshal.MarshalException;
  32. import org.apache.cassandra.exceptions.OverloadedException;
  33. import org.apache.cassandra.exceptions.RequestExecutionException;
  34. import org.apache.cassandra.io.IColumnSerializer;
  35. import org.apache.cassandra.io.util.DataOutputBuffer;
  36. import org.apache.cassandra.utils.Allocator;
  37. import org.apache.cassandra.service.IWriteResponseHandler;
  38. import org.apache.cassandra.service.StorageProxy;
  39. import org.apache.cassandra.utils.*;
  40. /**
  41. * A column that represents a partitioned counter.
  42. */
  43. public class CounterColumn extends Column
  44. {
  45. private static final Logger logger = LoggerFactory.getLogger(CounterColumn.class);
  46. protected static final CounterContext contextManager = CounterContext.instance();
  47. private final long timestampOfLastDelete;
  48. public CounterColumn(ByteBuffer name, long value, long timestamp)
  49. {
  50. this(name, contextManager.create(value, HeapAllocator.instance), timestamp);
  51. }
  52. public CounterColumn(ByteBuffer name, long value, long timestamp, long timestampOfLastDelete)
  53. {
  54. this(name, contextManager.create(value, HeapAllocator.instance), timestamp, timestampOfLastDelete);
  55. }
  56. public CounterColumn(ByteBuffer name, ByteBuffer value, long timestamp)
  57. {
  58. this(name, value, timestamp, Long.MIN_VALUE);
  59. }
  60. public CounterColumn(ByteBuffer name, ByteBuffer value, long timestamp, long timestampOfLastDelete)
  61. {
  62. super(name, value, timestamp);
  63. this.timestampOfLastDelete = timestampOfLastDelete;
  64. }
  65. public static CounterColumn create(ByteBuffer name, ByteBuffer value, long timestamp, long timestampOfLastDelete, IColumnSerializer.Flag flag)
  66. {
  67. // #elt being negative means we have to clean delta
  68. short count = value.getShort(value.position());
  69. if (flag == IColumnSerializer.Flag.FROM_REMOTE || (flag == IColumnSerializer.Flag.LOCAL && count < 0))
  70. value = CounterContext.instance().clearAllDelta(value);
  71. return new CounterColumn(name, value, timestamp, timestampOfLastDelete);
  72. }
  73. public long timestampOfLastDelete()
  74. {
  75. return timestampOfLastDelete;
  76. }
  77. public long total()
  78. {
  79. return contextManager.total(value);
  80. }
  81. @Override
  82. public int dataSize()
  83. {
  84. /*
  85. * A counter column adds to a Column :
  86. * + 8 bytes for timestampOfLastDelete
  87. */
  88. return super.dataSize() + TypeSizes.NATIVE.sizeof(timestampOfLastDelete);
  89. }
  90. @Override
  91. public int serializedSize(TypeSizes typeSizes)
  92. {
  93. return super.serializedSize(typeSizes) + typeSizes.sizeof(timestampOfLastDelete);
  94. }
  95. @Override
  96. public IColumn diff(IColumn column)
  97. {
  98. assert column instanceof CounterColumn : "Wrong class type.";
  99. if (timestamp() < column.timestamp())
  100. return column;
  101. if (timestampOfLastDelete() < ((CounterColumn)column).timestampOfLastDelete())
  102. return column;
  103. ContextRelationship rel = contextManager.diff(column.value(), value());
  104. if (ContextRelationship.GREATER_THAN == rel || ContextRelationship.DISJOINT == rel)
  105. return column;
  106. return null;
  107. }
  108. /*
  109. * We have to special case digest creation for counter column because
  110. * we don't want to include the information about which shard of the
  111. * context is a delta or not, since this information differs from node to
  112. * node.
  113. */
  114. @Override
  115. public void updateDigest(MessageDigest digest)
  116. {
  117. digest.update(name.duplicate());
  118. // We don't take the deltas into account in a digest
  119. contextManager.updateDigest(digest, value);
  120. DataOutputBuffer buffer = new DataOutputBuffer();
  121. try
  122. {
  123. buffer.writeLong(timestamp);
  124. buffer.writeByte(serializationFlags());
  125. buffer.writeLong(timestampOfLastDelete);
  126. }
  127. catch (IOException e)
  128. {
  129. throw new RuntimeException(e);
  130. }
  131. digest.update(buffer.getData(), 0, buffer.getLength());
  132. }
  133. @Override
  134. public IColumn reconcile(IColumn column, Allocator allocator)
  135. {
  136. assert (column instanceof CounterColumn) || (column instanceof DeletedColumn) : "Wrong class type.";
  137. if (column.isMarkedForDelete()) // live + tombstone: track last tombstone
  138. {
  139. if (timestamp() < column.timestamp()) // live < tombstone
  140. {
  141. return column;
  142. }
  143. // live last delete >= tombstone
  144. if (timestampOfLastDelete() >= column.timestamp())
  145. {
  146. return this;
  147. }
  148. // live last delete < tombstone
  149. return new CounterColumn(name(), value(), timestamp(), column.timestamp());
  150. }
  151. // live < live last delete
  152. if (timestamp() < ((CounterColumn)column).timestampOfLastDelete())
  153. return column;
  154. // live last delete > live
  155. if (timestampOfLastDelete() > column.timestamp())
  156. return this;
  157. // live + live: merge clocks; update value
  158. return new CounterColumn(
  159. name(),
  160. contextManager.merge(value(), column.value(), allocator),
  161. Math.max(timestamp(), column.timestamp()),
  162. Math.max(timestampOfLastDelete(), ((CounterColumn)column).timestampOfLastDelete()));
  163. }
  164. @Override
  165. public boolean equals(Object o)
  166. {
  167. // super.equals() returns false if o is not a CounterColumn
  168. return super.equals(o) && timestampOfLastDelete == ((CounterColumn)o).timestampOfLastDelete;
  169. }
  170. @Override
  171. public int hashCode()
  172. {
  173. int result = super.hashCode();
  174. result = 31 * result + (int)(timestampOfLastDelete ^ (timestampOfLastDelete >>> 32));
  175. return result;
  176. }
  177. @Override
  178. public IColumn localCopy(ColumnFamilyStore cfs)
  179. {
  180. return new CounterColumn(cfs.internOrCopy(name, HeapAllocator.instance), ByteBufferUtil.clone(value), timestamp, timestampOfLastDelete);
  181. }
  182. @Override
  183. public IColumn localCopy(ColumnFamilyStore cfs, Allocator allocator)
  184. {
  185. return new CounterColumn(cfs.internOrCopy(name, allocator), allocator.clone(value), timestamp, timestampOfLastDelete);
  186. }
  187. @Override
  188. public String getString(AbstractType<?> comparator)
  189. {
  190. StringBuilder sb = new StringBuilder();
  191. sb.append(comparator.getString(name));
  192. sb.append(":");
  193. sb.append(isMarkedForDelete());
  194. sb.append(":");
  195. sb.append(contextManager.toString(value));
  196. sb.append("@");
  197. sb.append(timestamp());
  198. sb.append("!");
  199. sb.append(timestampOfLastDelete);
  200. return sb.toString();
  201. }
  202. @Override
  203. public int serializationFlags()
  204. {
  205. return ColumnSerializer.COUNTER_MASK;
  206. }
  207. @Override
  208. public void validateFields(CFMetaData metadata) throws MarshalException
  209. {
  210. validateName(metadata);
  211. // We cannot use the value validator as for other columns as the CounterColumnType validate a long,
  212. // which is not the internal representation of counters
  213. contextManager.validateContext(value());
  214. }
  215. /**
  216. * Check if a given counterId is found in this CounterColumn context.
  217. */
  218. public boolean hasCounterId(CounterId id)
  219. {
  220. return contextManager.hasCounterId(value(), id);
  221. }
  222. private CounterColumn computeOldShardMerger(int mergeBefore)
  223. {
  224. ByteBuffer bb = contextManager.computeOldShardMerger(value(), CounterId.getOldLocalCounterIds(), mergeBefore);
  225. if (bb == null)
  226. return null;
  227. else
  228. return new CounterColumn(name(), bb, timestamp(), timestampOfLastDelete);
  229. }
  230. private CounterColumn removeOldShards(int gcBefore)
  231. {
  232. ByteBuffer bb = contextManager.removeOldShards(value(), gcBefore);
  233. if (bb == value())
  234. return this;
  235. else
  236. {
  237. return new CounterColumn(name(), bb, timestamp(), timestampOfLastDelete);
  238. }
  239. }
  240. public static void mergeAndRemoveOldShards(DecoratedKey key, ColumnFamily cf, int gcBefore, int mergeBefore)
  241. {
  242. mergeAndRemoveOldShards(key, cf, gcBefore, mergeBefore, true);
  243. }
  244. /**
  245. * There is two phase to the removal of old shards.
  246. * First phase: we merge the old shard value to the current shard and
  247. * 'nulify' the old one. We then send the counter context with the old
  248. * shard nulified to all other replica.
  249. * Second phase: once an old shard has been nulified for longer than
  250. * gc_grace (to be sure all other replica had been aware of the merge), we
  251. * simply remove that old shard from the context (it's value is 0).
  252. * This method does both phases.
  253. * (Note that the sendToOtherReplica flag is here only to facilitate
  254. * testing. It should be true in real code so use the method above
  255. * preferably)
  256. */
  257. public static void mergeAndRemoveOldShards(DecoratedKey key, ColumnFamily cf, int gcBefore, int mergeBefore, boolean sendToOtherReplica)
  258. {
  259. ColumnFamily remoteMerger = null;
  260. if (!cf.isSuper())
  261. {
  262. for (IColumn c : cf)
  263. {
  264. if (!(c instanceof CounterColumn))
  265. continue;
  266. CounterColumn cc = (CounterColumn) c;
  267. CounterColumn shardMerger = cc.computeOldShardMerger(mergeBefore);
  268. CounterColumn merged = cc;
  269. if (shardMerger != null)
  270. {
  271. merged = (CounterColumn) cc.reconcile(shardMerger);
  272. if (remoteMerger == null)
  273. remoteMerger = cf.cloneMeShallow();
  274. remoteMerger.addColumn(merged);
  275. }
  276. CounterColumn cleaned = merged.removeOldShards(gcBefore);
  277. if (cleaned != cc)
  278. {
  279. cf.replace(cc, cleaned);
  280. }
  281. }
  282. }
  283. else
  284. {
  285. for (IColumn col : cf)
  286. {
  287. SuperColumn c = (SuperColumn)col;
  288. for (IColumn subColumn : c.getSubColumns())
  289. {
  290. if (!(subColumn instanceof CounterColumn))
  291. continue;
  292. CounterColumn cc = (CounterColumn) subColumn;
  293. CounterColumn shardMerger = cc.computeOldShardMerger(mergeBefore);
  294. CounterColumn merged = cc;
  295. if (shardMerger != null)
  296. {
  297. merged = (CounterColumn) cc.reconcile(shardMerger);
  298. if (remoteMerger == null)
  299. remoteMerger = cf.cloneMeShallow();
  300. remoteMerger.addColumn(c.name(), merged);
  301. }
  302. CounterColumn cleaned = merged.removeOldShards(gcBefore);
  303. if (cleaned != subColumn)
  304. c.replace(subColumn, cleaned);
  305. }
  306. }
  307. }
  308. if (remoteMerger != null && sendToOtherReplica)
  309. {
  310. try
  311. {
  312. sendToOtherReplica(key, remoteMerger);
  313. }
  314. catch (Exception e)
  315. {
  316. logger.error("Error while sending shard merger mutation to remote endpoints", e);
  317. }
  318. }
  319. }
  320. public IColumn markDeltaToBeCleared()
  321. {
  322. return new CounterColumn(name, contextManager.markDeltaToBeCleared(value), timestamp, timestampOfLastDelete);
  323. }
  324. private static void sendToOtherReplica(DecoratedKey key, ColumnFamily cf) throws RequestExecutionException, IOException
  325. {
  326. RowMutation rm = new RowMutation(cf.metadata().ksName, key.key);
  327. rm.add(cf);
  328. final InetAddress local = FBUtilities.getBroadcastAddress();
  329. String localDataCenter = DatabaseDescriptor.getEndpointSnitch().getDatacenter(local);
  330. StorageProxy.performWrite(rm, ConsistencyLevel.ANY, localDataCenter, new StorageProxy.WritePerformer()
  331. {
  332. public void apply(IMutation mutation, Collection<InetAddress> targets, IWriteResponseHandler responseHandler, String localDataCenter, ConsistencyLevel consistency_level)
  333. throws IOException, OverloadedException
  334. {
  335. // We should only send to the remote replica, not the local one
  336. targets.remove(local);
  337. // Fake local response to be a good lad but we won't wait on the responseHandler
  338. responseHandler.response(null);
  339. StorageProxy.sendToHintedEndpoints((RowMutation) mutation, targets, responseHandler, localDataCenter, consistency_level);
  340. }
  341. }, null);
  342. // we don't wait for answers
  343. }
  344. }