PageRenderTime 136ms CodeModel.GetById 30ms RepoModel.GetById 0ms app.codeStats 0ms

/src/java/org/apache/cassandra/service/AbstractCassandraDaemon.java

https://github.com/stephenc/cassandra
Java | 412 lines | 254 code | 47 blank | 111 comment | 29 complexity | 502ce32b8f2feedaccb859c636fe12d7 MD5 | raw file
  1. /*
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. package org.apache.cassandra.service;
  19. import java.io.File;
  20. import java.io.IOException;
  21. import java.net.InetAddress;
  22. import java.net.MalformedURLException;
  23. import java.net.URL;
  24. import java.util.Arrays;
  25. import java.util.concurrent.SynchronousQueue;
  26. import java.util.concurrent.ThreadPoolExecutor;
  27. import java.util.concurrent.TimeUnit;
  28. import java.util.concurrent.atomic.AtomicInteger;
  29. import com.google.common.collect.Iterables;
  30. import org.apache.log4j.PropertyConfigurator;
  31. import org.slf4j.Logger;
  32. import org.slf4j.LoggerFactory;
  33. import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
  34. import org.apache.cassandra.concurrent.NamedThreadFactory;
  35. import org.apache.cassandra.config.CFMetaData;
  36. import org.apache.cassandra.config.ConfigurationException;
  37. import org.apache.cassandra.config.DatabaseDescriptor;
  38. import org.apache.cassandra.config.Schema;
  39. import org.apache.cassandra.db.ColumnFamilyStore;
  40. import org.apache.cassandra.db.Directories;
  41. import org.apache.cassandra.db.SystemTable;
  42. import org.apache.cassandra.db.Table;
  43. import org.apache.cassandra.db.commitlog.CommitLog;
  44. import org.apache.cassandra.utils.CLibrary;
  45. import org.apache.cassandra.utils.Mx4jTool;
  46. /**
  47. * The <code>CassandraDaemon</code> is an abstraction for a Cassandra daemon
  48. * service, which defines not only a way to activate and deactivate it, but also
  49. * hooks into its lifecycle methods (see {@link #setup()}, {@link #start()},
  50. * {@link #stop()} and {@link #setup()}).
  51. *
  52. */
  53. public abstract class AbstractCassandraDaemon implements CassandraDaemon
  54. {
  55. /**
  56. * Initialize logging in such a way that it checks for config changes every 10 seconds.
  57. */
  58. public static void initLog4j()
  59. {
  60. if (System.getProperty("log4j.defaultInitOverride","false").equalsIgnoreCase("true"))
  61. {
  62. String config = System.getProperty("log4j.configuration", "log4j-server.properties");
  63. URL configLocation = null;
  64. try
  65. {
  66. // try loading from a physical location first.
  67. configLocation = new URL(config);
  68. }
  69. catch (MalformedURLException ex)
  70. {
  71. // then try loading from the classpath.
  72. configLocation = AbstractCassandraDaemon.class.getClassLoader().getResource(config);
  73. }
  74. if (configLocation == null)
  75. throw new RuntimeException("Couldn't figure out log4j configuration: "+config);
  76. // Now convert URL to a filename
  77. String configFileName = null;
  78. try
  79. {
  80. // first try URL.getFile() which works for opaque URLs (file:foo) and paths without spaces
  81. configFileName = configLocation.getFile();
  82. File configFile = new File(configFileName);
  83. // then try alternative approach which works for all hierarchical URLs with or without spaces
  84. if (!configFile.exists())
  85. configFileName = new File(configLocation.toURI()).getCanonicalPath();
  86. }
  87. catch (Exception e)
  88. {
  89. throw new RuntimeException("Couldn't convert log4j configuration location to a valid file", e);
  90. }
  91. PropertyConfigurator.configureAndWatch(configFileName, 10000);
  92. org.apache.log4j.Logger.getLogger(AbstractCassandraDaemon.class).info("Logging initialized");
  93. }
  94. }
  95. private static final Logger logger = LoggerFactory.getLogger(AbstractCassandraDaemon.class);
  96. static final AtomicInteger exceptions = new AtomicInteger();
  97. protected InetAddress listenAddr;
  98. protected int listenPort;
  99. protected volatile boolean isRunning = false;
  100. /**
  101. * This is a hook for concrete daemons to initialize themselves suitably.
  102. *
  103. * Subclasses should override this to finish the job (listening on ports, etc.)
  104. *
  105. * @throws IOException
  106. */
  107. protected void setup() throws IOException
  108. {
  109. logger.info("JVM vendor/version: {}/{}", System.getProperty("java.vm.name"), System.getProperty("java.version") );
  110. logger.info("Heap size: {}/{}", Runtime.getRuntime().totalMemory(), Runtime.getRuntime().maxMemory());
  111. logger.info("Classpath: {}", System.getProperty("java.class.path"));
  112. CLibrary.tryMlockall();
  113. listenPort = DatabaseDescriptor.getRpcPort();
  114. listenAddr = DatabaseDescriptor.getRpcAddress();
  115. Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler()
  116. {
  117. public void uncaughtException(Thread t, Throwable e)
  118. {
  119. exceptions.incrementAndGet();
  120. logger.error("Exception in thread " + t, e);
  121. for (Throwable e2 = e; e2 != null; e2 = e2.getCause())
  122. {
  123. // some code, like FileChannel.map, will wrap an OutOfMemoryError in another exception
  124. if (e2 instanceof OutOfMemoryError)
  125. System.exit(100);
  126. }
  127. }
  128. });
  129. // check all directories(data, commitlog, saved cache) for existence and permission
  130. Iterable<String> dirs = Iterables.concat(Arrays.asList(DatabaseDescriptor.getAllDataFileLocations()),
  131. Arrays.asList(new String[] {DatabaseDescriptor.getCommitLogLocation(),
  132. DatabaseDescriptor.getSavedCachesLocation()}));
  133. for (String dataDir : dirs)
  134. {
  135. logger.debug("Checking directory {}", dataDir);
  136. File dir = new File(dataDir);
  137. if (dir.exists())
  138. assert dir.isDirectory() && dir.canRead() && dir.canWrite() && dir.canExecute()
  139. : String.format("Directory %s is not accessible.", dataDir);
  140. }
  141. // Migrate sstables from pre-#2749 to the correct location
  142. if (Directories.sstablesNeedsMigration())
  143. Directories.migrateSSTables();
  144. if (CacheService.instance == null) // should never happen
  145. throw new RuntimeException("Failed to initialize Cache Service.");
  146. // check the system table to keep user from shooting self in foot by changing partitioner, cluster name, etc.
  147. // we do a one-off scrub of the system table first; we can't load the list of the rest of the tables,
  148. // until system table is opened.
  149. for (CFMetaData cfm : Schema.instance.getTableMetaData(Table.SYSTEM_TABLE).values())
  150. ColumnFamilyStore.scrubDataDirectories(Table.SYSTEM_TABLE, cfm.cfName);
  151. try
  152. {
  153. SystemTable.checkHealth();
  154. }
  155. catch (ConfigurationException e)
  156. {
  157. logger.error("Fatal exception during initialization", e);
  158. System.exit(100);
  159. }
  160. // load keyspace descriptions.
  161. try
  162. {
  163. DatabaseDescriptor.loadSchemas();
  164. }
  165. catch (IOException e)
  166. {
  167. logger.error("Fatal exception during initialization", e);
  168. System.exit(100);
  169. }
  170. // clean up debris in the rest of the tables
  171. for (String table : Schema.instance.getTables())
  172. {
  173. for (CFMetaData cfm : Schema.instance.getTableMetaData(table).values())
  174. {
  175. ColumnFamilyStore.scrubDataDirectories(table, cfm.cfName);
  176. }
  177. }
  178. // initialize keyspaces
  179. for (String table : Schema.instance.getTables())
  180. {
  181. if (logger.isDebugEnabled())
  182. logger.debug("opening keyspace " + table);
  183. Table.open(table);
  184. }
  185. if (CacheService.instance.keyCache.size() > 0)
  186. logger.info("completed pre-loading ({} keys) key cache.", CacheService.instance.keyCache.size());
  187. if (CacheService.instance.rowCache.size() > 0)
  188. logger.info("completed pre-loading ({} keys) row cache.", CacheService.instance.rowCache.size());
  189. try
  190. {
  191. GCInspector.instance.start();
  192. }
  193. catch (Throwable t)
  194. {
  195. logger.warn("Unable to start GCInspector (currently only supported on the Sun JVM)");
  196. }
  197. // replay the log if necessary
  198. CommitLog.instance.recover();
  199. SystemTable.finishStartup();
  200. // start server internals
  201. StorageService.instance.registerDaemon(this);
  202. try
  203. {
  204. StorageService.instance.initServer();
  205. }
  206. catch (ConfigurationException e)
  207. {
  208. logger.error("Fatal configuration error", e);
  209. System.err.println(e.getMessage() + "\nFatal configuration error; unable to start server. See log for stacktrace.");
  210. System.exit(1);
  211. }
  212. Mx4jTool.maybeLoad();
  213. }
  214. /**
  215. * Initialize the Cassandra Daemon based on the given <a
  216. * href="http://commons.apache.org/daemon/jsvc.html">Commons
  217. * Daemon</a>-specific arguments. To clarify, this is a hook for JSVC.
  218. *
  219. * @param arguments
  220. * the arguments passed in from JSVC
  221. * @throws IOException
  222. */
  223. public void init(String[] arguments) throws IOException
  224. {
  225. setup();
  226. }
  227. /**
  228. * Start the Cassandra Daemon, assuming that it has already been
  229. * initialized via {@link #init(String[])}
  230. *
  231. * Hook for JSVC
  232. *
  233. * @throws IOException
  234. */
  235. public void start()
  236. {
  237. if (Boolean.parseBoolean(System.getProperty("cassandra.start_rpc", "true")))
  238. {
  239. startRPCServer();
  240. }
  241. else
  242. {
  243. logger.info("Not starting RPC server as requested. Use JMX (StorageService->startRPCServer()) to start it");
  244. }
  245. }
  246. /**
  247. * Stop the daemon, ideally in an idempotent manner.
  248. *
  249. * Hook for JSVC
  250. */
  251. public void stop()
  252. {
  253. // this doesn't entirely shut down Cassandra, just the RPC server.
  254. // jsvc takes care of taking the rest down
  255. logger.info("Cassandra shutting down...");
  256. stopRPCServer();
  257. }
  258. /**
  259. * Start the underlying RPC server in idempotent manner.
  260. */
  261. public void startRPCServer()
  262. {
  263. if (!isRunning)
  264. {
  265. startServer();
  266. isRunning = true;
  267. }
  268. }
  269. /**
  270. * Stop the underlying RPC server in idempotent manner.
  271. */
  272. public void stopRPCServer()
  273. {
  274. if (isRunning)
  275. {
  276. stopServer();
  277. isRunning = false;
  278. }
  279. }
  280. /**
  281. * Returns whether the underlying RPC server is running or not.
  282. */
  283. public boolean isRPCServerRunning()
  284. {
  285. return isRunning;
  286. }
  287. /**
  288. * Start the underlying RPC server.
  289. * This method shoud be able to restart a server stopped through stopServer().
  290. * Should throw a RuntimeException if the server cannot be started
  291. */
  292. protected abstract void startServer();
  293. /**
  294. * Stop the underlying RPC server.
  295. * This method should be able to stop server started through startServer().
  296. * Should throw a RuntimeException if the server cannot be stopped
  297. */
  298. protected abstract void stopServer();
  299. /**
  300. * Clean up all resources obtained during the lifetime of the daemon. This
  301. * is a hook for JSVC.
  302. */
  303. public void destroy()
  304. {}
  305. /**
  306. * A convenience method to initialize and start the daemon in one shot.
  307. */
  308. public void activate()
  309. {
  310. String pidFile = System.getProperty("cassandra-pidfile");
  311. try
  312. {
  313. setup();
  314. if (pidFile != null)
  315. {
  316. new File(pidFile).deleteOnExit();
  317. }
  318. if (System.getProperty("cassandra-foreground") == null)
  319. {
  320. System.out.close();
  321. System.err.close();
  322. }
  323. start();
  324. }
  325. catch (Throwable e)
  326. {
  327. logger.error("Exception encountered during startup", e);
  328. // try to warn user on stdout too, if we haven't already detached
  329. e.printStackTrace();
  330. System.out.println("Exception encountered during startup: " + e.getMessage());
  331. System.exit(3);
  332. }
  333. }
  334. /**
  335. * A convenience method to stop and destroy the daemon in one shot.
  336. */
  337. public void deactivate()
  338. {
  339. stop();
  340. destroy();
  341. }
  342. /**
  343. * A subclass of Java's ThreadPoolExecutor which implements Jetty's ThreadPool
  344. * interface (for integration with Avro), and performs ClientState cleanup.
  345. *
  346. * (Note that the tasks being executed perform their own while-command-process
  347. * loop until the client disconnects.)
  348. */
  349. public static class CleaningThreadPool extends ThreadPoolExecutor
  350. {
  351. private final ThreadLocal<ClientState> state;
  352. public CleaningThreadPool(ThreadLocal<ClientState> state, int minWorkerThread, int maxWorkerThreads)
  353. {
  354. super(minWorkerThread, maxWorkerThreads, 60, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), new NamedThreadFactory("Thrift"));
  355. this.state = state;
  356. }
  357. @Override
  358. protected void afterExecute(Runnable r, Throwable t)
  359. {
  360. super.afterExecute(r, t);
  361. DebuggableThreadPoolExecutor.logExceptionsAfterExecute(r, t);
  362. state.get().logout();
  363. }
  364. }
  365. }