PageRenderTime 56ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 1ms

/src/coin/Symphony/tm_func.cpp

https://github.com/jinankjain/ogdf
C++ | 3610 lines | 2853 code | 361 blank | 396 comment | 753 complexity | 576e13ef396bf1259b1880890d7790fe MD5 | raw file
Possible License(s): GPL-2.0, GPL-3.0

Large files files are truncated, but you can click here to view the full file

  1. /*===========================================================================*/
  2. /* */
  3. /* This file is part of the SYMPHONY MILP Solver Framework. */
  4. /* */
  5. /* SYMPHONY was jointly developed by Ted Ralphs (ted@lehigh.edu) and */
  6. /* Laci Ladanyi (ladanyi@us.ibm.com). */
  7. /* */
  8. /* (c) Copyright 2000-2011 Ted Ralphs. All Rights Reserved. */
  9. /* */
  10. /* This software is licensed under the Eclipse Public License. Please see */
  11. /* accompanying file for terms. */
  12. /* */
  13. /*===========================================================================*/
  14. #define COMPILING_FOR_TM
  15. #include <stdio.h>
  16. #include <stdlib.h>
  17. #include <string.h>
  18. #include <math.h>
  19. #if !defined(_MSC_VER) && !defined(__MNO_CYGWIN) && defined(SIGHANDLER)
  20. #include <signal.h>
  21. #if !defined(HAS_SRANDOM)
  22. extern int srandom PROTO((unsigned seed));
  23. #endif
  24. #if !defined(HAS_RANDOM)
  25. extern long random PROTO((void));
  26. #endif
  27. #endif
  28. #ifdef _OPENMP
  29. #include "omp.h"
  30. #endif
  31. #if !defined (_MSC_VER)
  32. #include <unistd.h> /* this defines sleep() */
  33. #endif
  34. #include "sym_tm.h"
  35. #include "sym_constants.h"
  36. #include "sym_types.h"
  37. #include "sym_macros.h"
  38. #include "sym_messages.h"
  39. #include "sym_proccomm.h"
  40. #include "sym_timemeas.h"
  41. #include "sym_pack_cut.h"
  42. #include "sym_pack_array.h"
  43. #ifdef COMPILE_IN_LP
  44. #include "sym_lp.h"
  45. #endif
  46. #ifdef COMPILE_IN_TM
  47. #include "sym_master.h"
  48. #else
  49. #include "sym_cp.h"
  50. #endif
  51. int c_count = 0;
  52. /*===========================================================================*/
  53. /*===========================================================================*\
  54. * This file contains basic functions associated with the treemanager process
  55. \*===========================================================================*/
  56. /*===========================================================================*/
  57. /*===========================================================================*\
  58. * This function receives the intitial parameters and data and sets up
  59. * the tree manager data structures, etc.
  60. \*===========================================================================*/
  61. int tm_initialize(tm_prob *tm, base_desc *base, node_desc *rootdesc)
  62. {
  63. #ifndef COMPILE_IN_TM
  64. int r_bufid, bytes, msgtag, i;
  65. #endif
  66. FILE *f = NULL;
  67. tm_params *par;
  68. bc_node *root = (bc_node *) calloc(1, sizeof(bc_node));
  69. #ifdef COMPILE_IN_LP
  70. int i;
  71. #else
  72. #ifdef COMPILE_IN_TM
  73. int i;
  74. #endif
  75. int s_bufid;
  76. #endif
  77. int *termcodes = NULL;
  78. #if !defined(_MSC_VER) && !defined(__MNO_CYGWIN) && defined(SIGHANDLER)
  79. signal(SIGINT, sym_catch_c);
  80. #endif
  81. par = &tm->par;
  82. #ifdef _OPENMP
  83. tm->rpath =
  84. (bc_node ***) calloc(par->max_active_nodes, sizeof(bc_node **));
  85. tm->rpath_size = (int *) calloc(par->max_active_nodes, sizeof(int));
  86. tm->bpath =
  87. (branch_desc **) calloc(par->max_active_nodes, sizeof(branch_desc *));
  88. tm->bpath_size = (int *) calloc(par->max_active_nodes, sizeof(int));
  89. termcodes = (int *) calloc(par->max_active_nodes, sizeof(int));
  90. #else
  91. tm->rpath = (bc_node ***) calloc(1, sizeof(bc_node **));
  92. tm->rpath_size = (int *) calloc(1, sizeof(int));
  93. tm->bpath = (branch_desc **) calloc(1, sizeof(branch_desc *));
  94. tm->bpath_size = (int *) calloc(1, sizeof(int));
  95. termcodes = (int *) calloc(1, sizeof(int));
  96. #endif
  97. /*------------------------------------------------------------------------*\
  98. * Receives data from the master
  99. \*------------------------------------------------------------------------*/
  100. #ifdef COMPILE_IN_TM
  101. tm->bvarnum = base->varnum;
  102. tm->bcutnum = base->cutnum;
  103. #else
  104. r_bufid = receive_msg(ANYONE, TM_DATA);
  105. bufinfo(r_bufid, &bytes, &msgtag, &tm->master);
  106. receive_char_array((char *)par, sizeof(tm_params));
  107. receive_char_array(&tm->has_ub, 1);
  108. if (tm->has_ub)
  109. receive_dbl_array(&tm->ub, 1);
  110. receive_char_array(&tm->has_ub_estimate, 1);
  111. if (tm->has_ub_estimate)
  112. receive_dbl_array(&tm->ub_estimate, 1);
  113. READ_STR_LIST(par->lp_mach_num, MACH_NAME_LENGTH,
  114. par->lp_machs[0], par->lp_machs);
  115. READ_STR_LIST(par->cg_mach_num, MACH_NAME_LENGTH,
  116. par->cg_machs[0], par->cg_machs);
  117. READ_STR_LIST(par->cp_mach_num, MACH_NAME_LENGTH,
  118. par->cp_machs[0], par->cp_machs);
  119. receive_int_array(&tm->bvarnum, 1);
  120. receive_int_array(&tm->bcutnum, 1);
  121. #ifdef TRACE_PATH
  122. receive_int_array(&tm->feas_sol_size, 1);
  123. if (tm->feas_sol_size){
  124. tm->feas_sol = (int *) calloc (tm->feas_sol_size, sizeof(int));
  125. receive_int_array(tm->feas_sol, tm->feas_sol_size);
  126. }
  127. #endif
  128. freebuf(r_bufid);
  129. #endif
  130. SRANDOM(par->random_seed);
  131. #ifdef COMPILE_IN_LP
  132. #ifdef _OPENMP
  133. omp_set_dynamic(FALSE);
  134. omp_set_num_threads(par->max_active_nodes);
  135. #else
  136. par->max_active_nodes = 1;
  137. #endif
  138. tm->active_nodes = (bc_node **) calloc(par->max_active_nodes, sizeof(bc_node *));
  139. #ifndef COMPILE_IN_TM
  140. tm->lpp = (lp_prob **)
  141. malloc(par->max_active_nodes * sizeof(lp_prob *));
  142. for (i = 0; i < par->max_active_nodes; i++){
  143. tm->lpp[i] = (lp_prob *) calloc(1, sizeof(lp_prob));
  144. tm->lpp[i]->proc_index = i;
  145. }
  146. #ifdef COMPILE_IN_CG
  147. tm->cgp = (cg_prob **) malloc(par->max_active_nodes * sizeof(cg_prob *));
  148. for (i = 0; i < par->max_active_nodes; i++)
  149. tm->lpp[i]->cgp = tm->cgp[i] = (cg_prob *) calloc(1, sizeof(cg_prob));
  150. par->use_cg = FALSE;
  151. #endif
  152. #endif
  153. #pragma omp parallel for shared(tm)
  154. for (i = 0; i < par->max_active_nodes; i++){
  155. if ((termcodes[i] = lp_initialize(tm->lpp[i], 0)) < 0){
  156. printf("LP initialization failed with error code %i in thread %i\n\n",
  157. termcodes[i], i);
  158. }
  159. tm->lpp[i]->tm = tm;
  160. }
  161. tm->lp.free_num = par->max_active_nodes;
  162. for (i = 0; i < par->max_active_nodes; i++){
  163. if (termcodes[i] < 0){
  164. int tmp = termcodes[i];
  165. FREE(termcodes);
  166. return(tmp);
  167. }
  168. }
  169. #else
  170. tm->active_nodes =
  171. (bc_node **) malloc(par->max_active_nodes * sizeof(bc_node *));
  172. /*------------------------------------------------------------------------*\
  173. * Start the lp, cg processes and send cg tid's to the lp's.
  174. * Also, start the cp, sp processes.
  175. \*------------------------------------------------------------------------*/
  176. tm->lp = start_processes(tm, par->max_active_nodes, par->lp_exe,
  177. par->lp_debug, par->lp_mach_num, par->lp_machs);
  178. #endif
  179. #pragma omp critical (cut_pool)
  180. if (!tm->cuts){
  181. tm->cuts = (cut_data **) malloc(BB_BUNCH * sizeof(cut_data *));
  182. }
  183. if (par->use_cg){
  184. #ifndef COMPILE_IN_CG
  185. tm->cg = start_processes(tm, par->max_active_nodes, par->cg_exe,
  186. par->cg_debug, par->cg_mach_num, par->cg_machs);
  187. #ifdef COMPILE_IN_LP
  188. for (i = 0; i < par->max_active_nodes; i++)
  189. tm->lpp[i]->cut_gen = tm->cg.procs[i];
  190. #else
  191. for (i = 0; i < tm->lp.procnum; i++){
  192. s_bufid = init_send(DataInPlace);
  193. send_int_array(tm->cg.procs + i, 1);
  194. send_msg(tm->lp.procs[i], LP__CG_TID_INFO);
  195. }
  196. #endif
  197. #endif
  198. }
  199. if (par->max_cp_num){
  200. #ifdef COMPILE_IN_CP
  201. #ifndef COMPILE_IN_TM
  202. tm->cpp = (cut_pool **) malloc(par->max_cp_num * sizeof(cut_pool *));
  203. #endif
  204. for (i = 0; i < par->max_cp_num; i++){
  205. #ifndef COMPILE_IN_TM
  206. tm->cpp[i] = (cut_pool *) calloc(1, sizeof(cut_pool));
  207. #endif
  208. cp_initialize(tm->cpp[i], tm->master);
  209. }
  210. tm->cp.free_num = par->max_cp_num;
  211. tm->cp.procnum = par->max_cp_num;
  212. tm->cp.free_ind = (int *) malloc(par->max_cp_num * ISIZE);
  213. for (i = par->max_cp_num - 1; i >= 0; i--)
  214. tm->cp.free_ind[i] = i;
  215. #else
  216. tm->cp = start_processes(tm, par->max_cp_num, par->cp_exe,
  217. par->cp_debug, par->cp_mach_num, par->cp_machs);
  218. #endif
  219. tm->nodes_per_cp = (int *) calloc(tm->par.max_cp_num, ISIZE);
  220. tm->active_nodes_per_cp = (int *) calloc(tm->par.max_cp_num, ISIZE);
  221. }else{
  222. #ifdef COMPILE_IN_CP
  223. tm->cpp = (cut_pool **) calloc(1, sizeof(cut_pool *));
  224. #endif
  225. }
  226. /*------------------------------------------------------------------------*\
  227. * Receive the root node and send out initial data to the LP processes
  228. \*------------------------------------------------------------------------*/
  229. FREE(termcodes);
  230. if (tm->par.warm_start){
  231. if (!tm->rootnode){
  232. if (!(f = fopen(tm->par.warm_start_tree_file_name, "r"))){
  233. printf("Error reading warm start file %s\n\n",
  234. tm->par.warm_start_tree_file_name);
  235. return(ERROR__READING_WARM_START_FILE);
  236. }
  237. read_tm_info(tm, f);
  238. }else{
  239. free(root);
  240. root = tm->rootnode;
  241. }
  242. read_subtree(tm, root, f);
  243. if (f)
  244. fclose(f);
  245. if (!tm->rootnode){
  246. if (!read_tm_cut_list(tm, tm->par.warm_start_cut_file_name)){
  247. printf("Error reading warm start file %s\n\n",
  248. tm->par.warm_start_cut_file_name);
  249. return(ERROR__READING_WARM_START_FILE);
  250. }
  251. }
  252. tm->rootnode = root;
  253. if(root->node_status != NODE_STATUS__WARM_STARTED)
  254. root->node_status = NODE_STATUS__ROOT;
  255. }else{
  256. #ifdef COMPILE_IN_TM
  257. (tm->rootnode = root)->desc = *rootdesc;
  258. /* Copy the root description in case it is still needed */
  259. root->desc.uind.list = (int *) malloc(rootdesc->uind.size*ISIZE);
  260. memcpy((char *)root->desc.uind.list, (char *)rootdesc->uind.list,
  261. rootdesc->uind.size*ISIZE);
  262. root->bc_index = tm->stat.created++;
  263. root->lower_bound = -MAXDOUBLE;
  264. tm->stat.tree_size++;
  265. insert_new_node(tm, root);
  266. tm->phase = 0;
  267. tm->lb = 0;
  268. #else
  269. r_bufid = receive_msg(tm->master, TM_ROOT_DESCRIPTION);
  270. receive_node_desc(tm, root);
  271. if (root->desc.cutind.size > 0){ /* Hey we got cuts, too! Unpack them. */
  272. unpack_cut_set(tm, 0, 0, NULL);
  273. }
  274. freebuf(r_bufid);
  275. #endif
  276. #ifdef TRACE_PATH
  277. root->optimal_path = TRUE;
  278. #endif
  279. }
  280. return(FUNCTION_TERMINATED_NORMALLY);
  281. }
  282. /*===========================================================================*/
  283. /*===========================================================================*\
  284. * This is the main loop that solves the problem
  285. \*===========================================================================*/
  286. int solve(tm_prob *tm)
  287. {
  288. #ifndef COMPILE_IN_LP
  289. int r_bufid;
  290. #endif
  291. int termcode = 0;
  292. double start_time = tm->start_time;
  293. double no_work_start, ramp_up_tm = 0, ramp_down_time = 0;
  294. char ramp_down = FALSE, ramp_up = TRUE;
  295. double then, then2, then3, now;
  296. double timeout2 = 30, timeout3 = tm->par.logging_interval, timeout4 = 10;
  297. /*------------------------------------------------------------------------*\
  298. * The Main Loop
  299. \*------------------------------------------------------------------------*/
  300. no_work_start = wall_clock(NULL);
  301. termcode = TM_UNFINISHED;
  302. for (; tm->phase <= 1; tm->phase++){
  303. if (tm->phase == 1 && !tm->par.warm_start){
  304. if ((termcode = tasks_before_phase_two(tm)) ==
  305. FUNCTION_TERMINATED_NORMALLY){
  306. termcode = TM_FINISHED; /* Continue normally */
  307. }
  308. }
  309. then = wall_clock(NULL);
  310. then2 = wall_clock(NULL);
  311. then3 = wall_clock(NULL);
  312. #pragma omp parallel default(shared)
  313. {
  314. #ifdef _OPENMP
  315. int i, thread_num = omp_get_thread_num();
  316. #else
  317. int i, thread_num = 0;
  318. #endif
  319. while (tm->active_node_num > 0 || tm->samephase_candnum > 0){
  320. /*------------------------------------------------------------------*\
  321. * while there are nodes being processed or while there are nodes
  322. * waiting to be processed, continue to execute this loop
  323. \*------------------------------------------------------------------*/
  324. i = NEW_NODE__STARTED;
  325. while (tm->lp.free_num > 0 && (tm->par.time_limit >= 0.0 ?
  326. (wall_clock(NULL) - start_time < tm->par.time_limit) : TRUE) &&
  327. (tm->par.node_limit >= 0 ?
  328. tm->stat.analyzed < tm->par.node_limit : TRUE) &&
  329. ((tm->has_ub && (tm->par.gap_limit >= 0.0)) ?
  330. fabs(100*(tm->ub-tm->lb)/tm->ub) > tm->par.gap_limit : TRUE)
  331. && !(tm->par.find_first_feasible && tm->has_ub) && c_count <= 0){
  332. if (tm->samephase_candnum > 0){
  333. #pragma omp critical (tree_update)
  334. i = start_node(tm, thread_num);
  335. }else{
  336. i = NEW_NODE__NONE;
  337. }
  338. if (i != NEW_NODE__STARTED)
  339. break;
  340. if (ramp_up){
  341. ramp_up_tm += (wall_clock(NULL) -
  342. no_work_start) * (tm->lp.free_num + 1);
  343. }
  344. if (ramp_down){
  345. ramp_down_time += (wall_clock(NULL) -
  346. no_work_start) * (tm->lp.free_num + 1);
  347. }
  348. if (!tm->lp.free_num){
  349. ramp_down = FALSE;
  350. ramp_up = FALSE;
  351. }else if (ramp_up){
  352. no_work_start = wall_clock(NULL);
  353. }else{
  354. ramp_down = TRUE;
  355. no_work_start = wall_clock(NULL);
  356. }
  357. #ifdef COMPILE_IN_LP
  358. #ifdef _OPENMP
  359. if (tm->par.verbosity > 0)
  360. printf("Thread %i now processing node %i\n", thread_num,
  361. tm->lpp[thread_num]->bc_index);
  362. #endif
  363. if(tm->par.node_selection_rule == DEPTH_FIRST_THEN_BEST_FIRST &&
  364. tm->has_ub){
  365. tm->par.node_selection_rule = LOWEST_LP_FIRST;
  366. }
  367. switch(process_chain(tm->lpp[thread_num])){
  368. case FUNCTION_TERMINATED_NORMALLY:
  369. break;
  370. case ERROR__NO_BRANCHING_CANDIDATE:
  371. termcode = TM_ERROR__NO_BRANCHING_CANDIDATE;
  372. break;
  373. case ERROR__ILLEGAL_RETURN_CODE:
  374. termcode = TM_ERROR__ILLEGAL_RETURN_CODE;
  375. break;
  376. case ERROR__NUMERICAL_INSTABILITY:
  377. termcode = TM_ERROR__NUMERICAL_INSTABILITY;
  378. break;
  379. case ERROR__COMM_ERROR:
  380. termcode = TM_ERROR__COMM_ERROR;
  381. case ERROR__USER:
  382. termcode = TM_ERROR__USER;
  383. break;
  384. case ERROR__DUAL_INFEASIBLE:
  385. if(tm->lpp[thread_num]->bc_index < 1 ) {
  386. termcode = TM_UNBOUNDED;
  387. }else{
  388. termcode = TM_ERROR__NUMERICAL_INSTABILITY;
  389. }
  390. break;
  391. }
  392. #endif
  393. #pragma omp master
  394. {
  395. now = wall_clock(NULL);
  396. if (now - then2 > timeout2){
  397. if(tm->par.verbosity >= -1 ){
  398. print_tree_status(tm);
  399. }
  400. then2 = now;
  401. }
  402. if (now - then3 > timeout3){
  403. write_log_files(tm);
  404. then3 = now;
  405. }
  406. }
  407. }
  408. if (c_count > 0){
  409. termcode = TM_SIGNAL_CAUGHT;
  410. c_count = 0;
  411. break;
  412. }
  413. if (tm->par.time_limit >= 0.0 &&
  414. wall_clock(NULL) - start_time > tm->par.time_limit &&
  415. termcode != TM_FINISHED){
  416. termcode = TM_TIME_LIMIT_EXCEEDED;
  417. break;
  418. }
  419. if (tm->par.node_limit >= 0 && tm->stat.analyzed >=
  420. tm->par.node_limit && termcode != TM_FINISHED){
  421. if (tm->active_node_num + tm->samephase_candnum > 0){
  422. termcode = TM_NODE_LIMIT_EXCEEDED;
  423. }else{
  424. termcode = TM_FINISHED;
  425. }
  426. break;
  427. }
  428. if (tm->par.find_first_feasible && tm->has_ub){
  429. termcode = TM_FINISHED;
  430. break;
  431. }
  432. if (i == NEW_NODE__ERROR){
  433. termcode = SOMETHING_DIED;
  434. break;
  435. }
  436. if (tm->has_ub && (tm->par.gap_limit >= 0.0)){
  437. find_tree_lb(tm);
  438. if (fabs(100*(tm->ub-tm->lb)/tm->ub) <= tm->par.gap_limit){
  439. if (tm->lb < tm->ub){
  440. termcode = TM_TARGET_GAP_ACHIEVED;
  441. }else{
  442. termcode = TM_FINISHED;
  443. }
  444. break;
  445. }
  446. }
  447. if (i == NEW_NODE__NONE && tm->active_node_num == 0)
  448. break;
  449. #ifndef COMPILE_IN_LP
  450. struct timeval timeout = {5, 0};
  451. r_bufid = treceive_msg(ANYONE, ANYTHING, &timeout);
  452. if (r_bufid && !process_messages(tm, r_bufid)){
  453. find_tree_lb(tm);
  454. termcode = SOMETHING_DIED;
  455. break;
  456. }
  457. #endif
  458. now = wall_clock(NULL);
  459. if (now - then > timeout4){
  460. if (!processes_alive(tm)){
  461. find_tree_lb(tm);
  462. termcode = SOMETHING_DIED;
  463. break;
  464. }
  465. then = now;
  466. }
  467. #pragma omp master
  468. {
  469. for (i = 0; i < tm->par.max_active_nodes; i++){
  470. if (tm->active_nodes[i]){
  471. break;
  472. }
  473. }
  474. if (i == tm->par.max_active_nodes){
  475. tm->active_node_num = 0;
  476. }
  477. if (now - then2 > timeout2){
  478. if(tm->par.verbosity >=0 ){
  479. print_tree_status(tm);
  480. }
  481. then2 = now;
  482. }
  483. if (now - then3 > timeout3){
  484. write_log_files(tm);
  485. then3 = now;
  486. }
  487. }
  488. }
  489. }
  490. if(termcode == TM_UNBOUNDED) break;
  491. if (tm->samephase_candnum + tm->active_node_num == 0){
  492. termcode = TM_FINISHED;
  493. }
  494. if (tm->nextphase_candnum == 0)
  495. break;
  496. if (termcode != TM_UNFINISHED)
  497. break;
  498. }
  499. find_tree_lb(tm);
  500. tm->comp_times.ramp_up_tm = ramp_up_tm;
  501. tm->comp_times.ramp_down_time = ramp_down_time;
  502. write_log_files(tm);
  503. return(termcode);
  504. }
  505. /*===========================================================================*/
  506. /*==========================================================================*\
  507. * Write out the log files
  508. \*==========================================================================*/
  509. void write_log_files(tm_prob *tm)
  510. {
  511. #if !defined(COMPILE_IN_LP) || !defined(COMPILE_IN_CP)
  512. int s_bufid;
  513. #endif
  514. if (tm->par.logging){
  515. write_tm_info(tm, tm->par.tree_log_file_name, NULL, FALSE);
  516. write_subtree(tm->rootnode, tm->par.tree_log_file_name, NULL, TRUE,
  517. tm->par.logging);
  518. if (tm->par.logging != VBC_TOOL)
  519. write_tm_cut_list(tm, tm->par.cut_log_file_name, FALSE);
  520. }
  521. if (tm->par.max_cp_num > 0 && tm->par.cp_logging){
  522. #if defined(COMPILE_IN_LP) && defined(COMPILE_IN_CP)
  523. write_cp_cut_list(tm->cpp[0], tm->cpp[0]->par.log_file_name,
  524. FALSE);
  525. #else
  526. s_bufid = init_send(DataInPlace);
  527. send_msg(tm->cp.procs[0], WRITE_LOG_FILE);
  528. #endif
  529. }
  530. }
  531. /*===========================================================================*/
  532. /*==========================================================================*\
  533. * Prints out the current size of the tree and the gap *
  534. \*==========================================================================*/
  535. void print_tree_status(tm_prob *tm)
  536. {
  537. double elapsed_time;
  538. double obj_ub = SYM_INFINITY, obj_lb = -SYM_INFINITY;
  539. #ifdef SHOULD_SHOW_MEMORY_USAGE
  540. int i;
  541. int pid;
  542. int tmp_int;
  543. long unsigned vsize;
  544. char tmp_str[100], proc_filename[100];
  545. FILE *proc_file;
  546. double vsize_in_mb;
  547. #endif
  548. #if 0
  549. int *widths;
  550. double *gamma;
  551. int last_full_level = 0, max_width = 0, num_nodes_estimate = 1;
  552. int first_waist_level = 0, last_waist_level = 0, waist_level = 0;
  553. double average_node_time, estimated_time_remaining, user_time = 0.0;
  554. widths = (int *) calloc (tm->stat.max_depth + 1, ISIZE);
  555. gamma = (double *) calloc (tm->stat.max_depth + 1, DSIZE);
  556. calculate_widths(tm->rootnode, widths);
  557. last_full_level = tm->stat.max_depth;
  558. for (i = tm->stat.max_depth - 1; i > 0; i--){
  559. if ((double)(widths[i])/(double)(widths[i - 1]) < 2){
  560. last_full_level = i - 1;
  561. }
  562. if (widths[i] > max_width){
  563. max_width = widths[i];
  564. last_waist_level = i;
  565. first_waist_level = i;
  566. }
  567. if (widths[i] == max_width){
  568. first_waist_level = i;
  569. }
  570. }
  571. waist_level = (first_waist_level + last_waist_level)/2;
  572. for (i = 0; i < tm->stat.max_depth; i++){
  573. if (i < last_full_level){
  574. gamma[i] = 2.0;
  575. }else if (i < waist_level){
  576. gamma[i] = 2.0 - (double)((i - last_full_level + 1))/
  577. (double)((waist_level - last_full_level + 1));
  578. }else{
  579. gamma[i] = 1.0 - (double)(i - waist_level + 1)/
  580. (double)(tm->stat.max_depth - waist_level + 1);
  581. }
  582. }
  583. for (i = 1; i < tm->stat.max_depth; i++){
  584. gamma[i] *= gamma[i - 1];
  585. num_nodes_estimate += (int)(gamma[i] + 0.5);
  586. }
  587. elapsed_time = wall_clock(NULL) - tm->start_time;
  588. average_node_time = elapsed_time/tm->stat.analyzed;
  589. estimated_time_remaining =
  590. MAX(average_node_time*(num_nodes_estimate - tm->stat.analyzed), 0);
  591. #else
  592. elapsed_time = wall_clock(NULL) - tm->start_time;
  593. #endif
  594. #ifdef SHOULD_SHOW_MEMORY_USAGE
  595. pid = getpid();
  596. //printf("process id = %d\n",pid);
  597. sprintf(proc_filename,"/proc/%d/stat",pid);
  598. proc_file = fopen (proc_filename, "r");
  599. fscanf (proc_file, "%d %s %s", &tmp_int, tmp_str, tmp_str);
  600. for (i=0; i<19;i++) {
  601. fscanf (proc_file, "%d", &tmp_int);
  602. }
  603. fscanf (proc_file, "%lu", &vsize);
  604. fclose(proc_file);
  605. //printf("vsize = %lu\n",vsize);
  606. vsize_in_mb = vsize/1024.0/1024.0;
  607. if (tm->stat.max_vsize<vsize_in_mb) {
  608. tm->stat.max_vsize = vsize_in_mb;
  609. }
  610. printf("memory: %.2f MB ", vsize_in_mb);
  611. #endif
  612. printf("done: %i ", tm->stat.analyzed-tm->active_node_num);
  613. printf("left: %i ", tm->samephase_candnum+tm->active_node_num);
  614. if (tm->has_ub) {
  615. if (tm->obj_sense == SYM_MAXIMIZE){
  616. obj_lb = -tm->ub + tm->obj_offset;
  617. printf("lb: %.2f ", obj_lb);
  618. }else{
  619. obj_ub = tm->ub + tm->obj_offset;
  620. printf("ub: %.2f ", obj_ub);
  621. }
  622. } else {
  623. if (tm->obj_sense == SYM_MAXIMIZE){
  624. printf("lb: ?? ");
  625. }else{
  626. printf("ub: ?? ");
  627. }
  628. }
  629. find_tree_lb(tm);
  630. if(tm->lb > -SYM_INFINITY){
  631. if (tm->obj_sense == SYM_MAXIMIZE){
  632. obj_ub = -tm->lb + tm->obj_offset;
  633. printf("ub: %.2f ", obj_ub);
  634. }else{
  635. obj_lb = tm->lb + tm->obj_offset;
  636. printf("lb: %.2f ", obj_lb);
  637. }
  638. }else{
  639. if (tm->obj_sense == SYM_MAXIMIZE){
  640. printf("ub: ?? ");
  641. }else{
  642. printf("lb: ?? ");
  643. }
  644. }
  645. if (tm->has_ub && tm->ub && tm->lb > -SYM_INFINITY){
  646. printf("gap: %.2f ", fabs(100*(obj_ub-obj_lb)/obj_ub));
  647. }
  648. printf("time: %i\n", (int)(elapsed_time));
  649. #if 0
  650. printf("Estimated nodes remaining: %i\n", num_nodes_estimate);
  651. printf("Estimated time remaining: %i\n",
  652. (int)(estimated_time_remaining));
  653. #endif
  654. if (tm->par.vbc_emulation == VBC_EMULATION_FILE){
  655. FILE *f;
  656. #pragma omp critical(write_vbc_emulation_file)
  657. if (!(f = fopen(tm->par.vbc_emulation_file_name, "a"))){
  658. printf("\nError opening vbc emulation file\n\n");
  659. }else{
  660. PRINT_TIME(tm, f);
  661. fprintf(f, "L %.2f \n", tm->lb);
  662. fclose(f);
  663. }
  664. }else if (tm->par.vbc_emulation == VBC_EMULATION_LIVE){
  665. printf("$L %.2f\n", tm->lb);
  666. }
  667. #if 0
  668. FREE(widths);
  669. FREE(gamma);
  670. #endif
  671. }
  672. /*===========================================================================*/
  673. void calculate_widths(bc_node *node, int* widths)
  674. {
  675. int i;
  676. widths[node->bc_level] += 1;
  677. for (i = 0; i < node->bobj.child_num; i ++){
  678. calculate_widths(node->children[i], widths);
  679. }
  680. }
  681. /*===========================================================================*/
  682. /*===========================================================================*\
  683. * This function picks the "best" node off the active node list
  684. \*===========================================================================*/
  685. int start_node(tm_prob *tm, int thread_num)
  686. {
  687. int lp_ind, get_next, ind;
  688. bc_node *best_node = NULL;
  689. double time;
  690. time = wall_clock(NULL);
  691. /*------------------------------------------------------------------------*\
  692. * First choose the "best" node from the list of candidate nodes.
  693. * If the list for the current phase is empty then we return NEW_NODE__NONE.
  694. * Also, if the lower bound on the "best" node is above the current UB then
  695. * we just move that node the list of next phase candidates.
  696. \*------------------------------------------------------------------------*/
  697. get_next = TRUE;
  698. while (get_next){
  699. if ((best_node = del_best_node(tm)) == NULL)
  700. return(NEW_NODE__NONE);
  701. if (best_node->node_status == NODE_STATUS__WARM_STARTED){
  702. if(best_node->lower_bound >= MAXDOUBLE)
  703. break;
  704. }
  705. /* if no UB yet or lb is lower than UB then go ahead */
  706. if (!tm->has_ub ||
  707. (tm->has_ub && best_node->lower_bound < tm->ub-tm->par.granularity))
  708. break;
  709. /* ok, so we do have an UB and lb is higher than the UB. */
  710. /* in this switch we assume that there are only two phases! */
  711. switch (((best_node->desc.nf_status) << 8) + tm->phase){
  712. case (NF_CHECK_NOTHING << 8) + 0: /* prune these */
  713. case (NF_CHECK_NOTHING << 8) + 1:
  714. if(!tm->par.sensitivity_analysis){
  715. if (tm->par.max_cp_num > 0 && best_node->cp){
  716. #ifdef COMPILE_IN_CP
  717. ind = best_node->cp;
  718. #else
  719. ind = find_process_index(&tm->cp, best_node->cp);
  720. #endif
  721. tm->nodes_per_cp[ind]--;
  722. if (tm->nodes_per_cp[ind] + tm->active_nodes_per_cp[ind] == 0)
  723. tm->cp.free_ind[tm->cp.free_num++] = ind;
  724. }
  725. best_node->node_status = NODE_STATUS__PRUNED;
  726. best_node->feasibility_status = OVER_UB_PRUNED;
  727. if (tm->par.verbosity > 0){
  728. printf("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
  729. printf("+ TM: Pruning NODE %i LEVEL %i instead of sending it.\n",
  730. best_node->bc_index, best_node->bc_level);
  731. printf("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
  732. }
  733. if (tm->par.keep_description_of_pruned == KEEP_ON_DISK_VBC_TOOL ||
  734. tm->par.keep_description_of_pruned == KEEP_ON_DISK_FULL ||
  735. tm->par.keep_description_of_pruned == DISCARD){
  736. if (tm->par.keep_description_of_pruned ==
  737. KEEP_ON_DISK_VBC_TOOL ||
  738. tm->par.keep_description_of_pruned == KEEP_ON_DISK_FULL){
  739. #pragma omp critical (write_pruned_node_file)
  740. write_pruned_nodes(tm, best_node);
  741. }
  742. #if 0
  743. if (tm->par.vbc_emulation == VBC_EMULATION_FILE_NEW) {
  744. purge_pruned_nodes(tm, best_node, VBC_PRUNED_FATHOMED);
  745. } else {
  746. purge_pruned_nodes(tm, best_node, VBC_PRUNED);
  747. }
  748. #else
  749. purge_pruned_nodes(tm, best_node, VBC_PRUNED);
  750. #endif
  751. }
  752. break;
  753. }
  754. case (NF_CHECK_ALL << 8) + 1: /* work on these */
  755. case (NF_CHECK_UNTIL_LAST << 8) + 1:
  756. case (NF_CHECK_AFTER_LAST << 8) + 1:
  757. get_next = FALSE;
  758. break;
  759. default:
  760. /* i.e., phase == 0 and nf_status != NF_CHECK_NOTHING */
  761. if (!(tm->par.colgen_strat[0] & FATHOM__GENERATE_COLS__RESOLVE)){
  762. REALLOC(tm->nextphase_cand, bc_node *, tm->nextphase_cand_size,
  763. tm->nextphase_candnum+1, BB_BUNCH);
  764. tm->nextphase_cand[tm->nextphase_candnum++] = best_node;
  765. }else{
  766. get_next = FALSE;
  767. }
  768. break;
  769. }
  770. }
  771. /* Assign a free lp process */
  772. #ifdef COMPILE_IN_LP
  773. lp_ind = thread_num;
  774. #else
  775. lp_ind = tm->lp.free_ind[--tm->lp.free_num];
  776. best_node->lp = tm->lp.procs[lp_ind];
  777. best_node->cg = tm->par.use_cg ? tm->cg.procs[lp_ind] : 0;
  778. #endif
  779. /* assign pools, too */
  780. best_node->cp = assign_pool(tm, best_node->cp, &tm->cp,
  781. tm->active_nodes_per_cp, tm->nodes_per_cp);
  782. if (best_node->cp < 0) return(NEW_NODE__ERROR);
  783. /* It's time to put together the node and send it out */
  784. tm->active_nodes[lp_ind] = best_node;
  785. tm->active_node_num++;
  786. tm->stat.analyzed++;
  787. send_active_node(tm,best_node,tm->par.colgen_strat[tm->phase],thread_num);
  788. tm->comp_times.start_node += wall_clock(NULL) - time;
  789. return(NEW_NODE__STARTED);
  790. }
  791. /*===========================================================================*/
  792. /*===========================================================================*\
  793. * Returns the "best" active node and deletes it from the list
  794. \*===========================================================================*/
  795. bc_node *del_best_node(tm_prob *tm)
  796. {
  797. bc_node **list = tm->samephase_cand;
  798. int size = tm->samephase_candnum;
  799. bc_node *temp = NULL, *best_node;
  800. int pos, ch;
  801. int rule = tm->par.node_selection_rule;
  802. if (size == 0)
  803. return(NULL);
  804. best_node = list[1];
  805. temp = list[1] = list[size];
  806. tm->samephase_candnum = --size;
  807. if (tm->par.verbosity > 10)
  808. if (tm->samephase_candnum % 10 == 0)
  809. printf("\nTM: tree size: %i , %i\n\n",
  810. tm->samephase_candnum, tm->nextphase_candnum);
  811. pos = 1;
  812. while ((ch=2*pos) < size){
  813. if (node_compar(rule, list[ch], list[ch+1]))
  814. ch++;
  815. if (node_compar(rule, list[ch], temp)){
  816. list[pos] = temp;
  817. return(best_node);
  818. }
  819. list[pos] = list[ch];
  820. pos = ch;
  821. }
  822. if (ch == size){
  823. if (node_compar(rule, temp, list[ch])){
  824. list[pos] = list[ch];
  825. pos = ch;
  826. }
  827. }
  828. list[pos] = temp;
  829. return(best_node);
  830. }
  831. /*===========================================================================*/
  832. /*===========================================================================*\
  833. * Insert a new active node into the active node list (kept as a binary tree)
  834. \*===========================================================================*/
  835. void insert_new_node(tm_prob *tm, bc_node *node)
  836. {
  837. int pos, ch, size = tm->samephase_candnum;
  838. bc_node **list;
  839. int rule = tm->par.node_selection_rule;
  840. tm->samephase_candnum = pos = ++size;
  841. if (tm->par.verbosity > 10)
  842. if (tm->samephase_candnum % 10 == 0)
  843. printf("\nTM: tree size: %i , %i\n\n",
  844. tm->samephase_candnum, tm->nextphase_candnum);
  845. REALLOC(tm->samephase_cand, bc_node *,
  846. tm->samephase_cand_size, size + 1, BB_BUNCH);
  847. list = tm->samephase_cand;
  848. while ((ch=pos>>1) != 0){
  849. if (node_compar(rule, list[ch], node)){
  850. list[pos] = list[ch];
  851. pos = ch;
  852. }else{
  853. break;
  854. }
  855. }
  856. list[pos] = node;
  857. }
  858. /*===========================================================================*/
  859. /*===========================================================================*\
  860. * This is the node comparison function used to order the list of active
  861. * Nodes are ordered differently depending on what the comparison rule is
  862. \*===========================================================================*/
  863. int node_compar(int rule, bc_node *node0, bc_node *node1)
  864. {
  865. switch(rule){
  866. case LOWEST_LP_FIRST:
  867. return(node1->lower_bound < node0->lower_bound ? 1:0);
  868. case HIGHEST_LP_FIRST:
  869. return(node1->lower_bound > node0->lower_bound ? 1:0);
  870. case BREADTH_FIRST_SEARCH:
  871. return(node1->bc_level < node0->bc_level ? 1:0);
  872. case DEPTH_FIRST_SEARCH:
  873. case DEPTH_FIRST_THEN_BEST_FIRST:
  874. return(node1->bc_level > node0->bc_level ? 1:0);
  875. }
  876. return(0); /* fake return */
  877. }
  878. /*===========================================================================*/
  879. /*===========================================================================*\
  880. * Nodes by default inherit their parent's pools. However if there is a free
  881. * pool then the node is moved over to the free pool.
  882. \*===========================================================================*/
  883. int assign_pool(tm_prob *tm, int oldpool, process_set *pools,
  884. int *active_nodes_per_pool, int *nodes_per_pool)
  885. {
  886. int oldind = -1, ind, pool;
  887. #ifndef COMPILE_IN_CP
  888. int s_bufid, r_bufid;
  889. struct timeval timeout = {5, 0};
  890. #endif
  891. if (pools->free_num == 0){
  892. /* No change in the pool assigned to this node */
  893. return(oldpool);
  894. }
  895. if (oldpool > 0){
  896. #ifdef COMPILE_IN_CP
  897. oldind = oldpool;
  898. #else
  899. oldind = find_process_index(pools, oldpool);
  900. #endif
  901. if (nodes_per_pool[oldind] == 1){
  902. nodes_per_pool[oldind]--;
  903. active_nodes_per_pool[oldind]++;
  904. return(oldpool);
  905. }
  906. }
  907. ind = pools->free_ind[--pools->free_num];
  908. #ifdef COMPILE_IN_CP
  909. pool = ind;
  910. #else
  911. pool = pools->procs[ind];
  912. #endif
  913. if (! oldpool){
  914. /* If no pool is assigned yet then just assign the free one */
  915. active_nodes_per_pool[ind] = 1;
  916. return(pool);
  917. }
  918. /* finally when we really move the node from one pool to another */
  919. nodes_per_pool[oldind]--;
  920. active_nodes_per_pool[ind] = 1;
  921. #ifdef COMPILE_IN_CP
  922. /*FIXME: Multiple Pools won't work in shared memory mode until I fill this
  923. in.*/
  924. #else
  925. s_bufid = init_send(DataInPlace);
  926. send_int_array(&oldpool, 1);
  927. send_msg(pool, POOL_YOU_ARE_USELESS);
  928. s_bufid = init_send(DataInPlace);
  929. send_int_array(&pool, 1);
  930. send_msg(oldpool, POOL_COPY_YOURSELF);
  931. freebuf(s_bufid);
  932. do{
  933. r_bufid = treceive_msg(pool, POOL_USELESSNESS_ACKNOWLEDGED, &timeout);
  934. if (r_bufid == 0)
  935. if (pstat(pool) != PROCESS_OK) return(NEW_NODE__ERROR);
  936. }while (r_bufid == 0);
  937. freebuf(r_bufid);
  938. #endif
  939. return(pool);
  940. }
  941. /*===========================================================================*/
  942. /*===========================================================================*\
  943. * Takes the branching object description and sets up data structures
  944. * for the resulting children and adds them to the list of candidates.
  945. \*===========================================================================*/
  946. int generate_children(tm_prob *tm, bc_node *node, branch_obj *bobj,
  947. double *objval, int *feasible, char *action,
  948. int olddive, int *keep, int new_branching_cut)
  949. {
  950. node_desc *desc;
  951. int np_cp = 0, np_sp = 0;
  952. int dive = DO_NOT_DIVE, i;
  953. bc_node *child;
  954. int child_num;
  955. #ifdef TRACE_PATH
  956. int optimal_path = -1;
  957. #endif
  958. /* before we start to generate the children we must figure out if we'll
  959. * dive so that we can put the kept child into the right location */
  960. if (*keep >= 0 && (olddive == CHECK_BEFORE_DIVE || olddive == DO_DIVE))
  961. dive = olddive == DO_DIVE ? DO_DIVE : shall_we_dive(tm, objval[*keep]);
  962. node->children = (bc_node **) calloc(bobj->child_num, sizeof(bc_node *));
  963. if (node->bc_level == tm->stat.max_depth)
  964. tm->stat.max_depth++;
  965. child_num = bobj->child_num;
  966. #ifdef TRACE_PATH
  967. if (node->optimal_path && tm->feas_sol_size){
  968. for (i = 0; i < tm->feas_sol_size; i++)
  969. if (tm->feas_sol[i] == bobj->name)
  970. break;
  971. if (i < tm->feas_sol_size)
  972. optimal_path = 1;
  973. else
  974. optimal_path = 0;
  975. printf("\n\nNode %i is on the optimal path\n\n",
  976. tm->stat.tree_size + optimal_path);
  977. }
  978. #endif
  979. for (i = 0; i < child_num; i++){
  980. child = node->children[i] = (bc_node *) calloc(1, sizeof(bc_node));
  981. child->bc_index = tm->stat.tree_size++;
  982. child->bc_level = node->bc_level + 1;
  983. child->lower_bound = objval[i];
  984. #ifdef COMPILE_IN_LP
  985. child->update_pc = bobj->is_est[i] ? TRUE : FALSE;
  986. #endif
  987. child->parent = node;
  988. if (tm->par.verbosity > 10){
  989. printf("Generating node %i from %i...\n", child->bc_index,
  990. node->bc_index);
  991. }
  992. if (tm->par.vbc_emulation == VBC_EMULATION_FILE){
  993. FILE *f;
  994. #pragma omp critical(write_vbc_emulation_file)
  995. if (!(f = fopen(tm->par.vbc_emulation_file_name, "a"))){
  996. printf("\nError opening vbc emulation file\n\n");
  997. }else{
  998. PRINT_TIME(tm, f);
  999. fprintf(f, "N %i %i %i\n", node->bc_index+1, child->bc_index+1,
  1000. feasible[i] ? VBC_FEAS_SOL_FOUND :
  1001. ((dive != DO_NOT_DIVE && *keep == i) ?
  1002. VBC_ACTIVE_NODE : VBC_CAND_NODE));
  1003. fclose(f);
  1004. }
  1005. } else if (tm->par.vbc_emulation == VBC_EMULATION_FILE_NEW) {
  1006. FILE *f;
  1007. #pragma omp critical(write_vbc_emulation_file)
  1008. if (!(f = fopen(tm->par.vbc_emulation_file_name, "a"))){
  1009. printf("\nError opening vbc emulation file\n\n");
  1010. }else{
  1011. PRINT_TIME2(tm, f);
  1012. char reason[50];
  1013. char branch_dir = 'M';
  1014. sprintf (reason, "%s %i %i", "candidate", child->bc_index+1,
  1015. node->bc_index+1);
  1016. if (child->bc_index>0){
  1017. if (node->children[0]==child) {
  1018. branch_dir = node->bobj.sense[0];
  1019. /*branch_dir = 'L';*/
  1020. } else {
  1021. branch_dir = node->bobj.sense[1];
  1022. /*branch_dir = 'R';*/
  1023. }
  1024. if (branch_dir == 'G') {
  1025. branch_dir = 'R';
  1026. }
  1027. }
  1028. if (action[i] == PRUNE_THIS_CHILD_FATHOMABLE ||
  1029. action[i] == PRUNE_THIS_CHILD_INFEASIBLE){
  1030. sprintf(reason,"%s %c", reason, branch_dir);
  1031. }else{
  1032. sprintf(reason,"%s %c %f", reason, branch_dir,
  1033. child->lower_bound);
  1034. }
  1035. fprintf(f,"%s\n",reason);
  1036. fclose(f);
  1037. }
  1038. }else if (tm->par.vbc_emulation == VBC_EMULATION_LIVE){
  1039. printf("$N %i %i %i\n", node->bc_index+1, child->bc_index+1,
  1040. feasible[i] ? VBC_FEAS_SOL_FOUND :
  1041. ((dive != DO_NOT_DIVE && *keep == i) ?
  1042. VBC_ACTIVE_NODE: VBC_CAND_NODE));
  1043. }
  1044. #ifdef TRACE_PATH
  1045. if (optimal_path == i)
  1046. child->optimal_path = TRUE;
  1047. #endif
  1048. tm->stat.created++;
  1049. #ifndef ROOT_NODE_ONLY
  1050. if (action[i] == PRUNE_THIS_CHILD ||
  1051. action[i] == PRUNE_THIS_CHILD_FATHOMABLE ||
  1052. action[i] == PRUNE_THIS_CHILD_INFEASIBLE ||
  1053. (tm->has_ub && tm->ub - tm->par.granularity < objval[i] &&
  1054. node->desc.nf_status == NF_CHECK_NOTHING)){
  1055. /* this last can happen if the TM got the new bound but it hasn't
  1056. * been propagated to the LP yet */
  1057. #else /*We only want to process the root node in this case - discard others*/
  1058. if (TRUE){
  1059. #endif
  1060. if (tm->par.verbosity > 0){
  1061. printf("++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
  1062. printf("+ TM: Pruning NODE %i LEVEL %i while generating it.\n",
  1063. child->bc_index, child->bc_level);
  1064. printf("++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
  1065. }
  1066. child->node_status = NODE_STATUS__PRUNED;
  1067. #ifdef TRACE_PATH
  1068. if (child->optimal_path){
  1069. printf("\n\nAttempting to prune the optimal path!!!!!!!!!\n\n");
  1070. sleep(600);
  1071. if (tm->par.logging){
  1072. write_tm_info(tm, tm->par.tree_log_file_name, NULL, FALSE);
  1073. write_subtree(tm->rootnode, tm->par.tree_log_file_name, NULL,
  1074. TRUE, tm->par.logging);
  1075. write_tm_cut_list(tm, tm->par.cut_log_file_name, FALSE);
  1076. }
  1077. exit(1);
  1078. }
  1079. #endif
  1080. if (tm->par.keep_description_of_pruned == DISCARD ||
  1081. tm->par.keep_description_of_pruned == KEEP_ON_DISK_VBC_TOOL){
  1082. child->parent = node;
  1083. if (tm->par.keep_description_of_pruned == KEEP_ON_DISK_VBC_TOOL)
  1084. #pragma omp critical (write_pruned_node_file)
  1085. write_pruned_nodes(tm, child);
  1086. if (tm->par.vbc_emulation == VBC_EMULATION_FILE_NEW) {
  1087. int vbc_node_pr_reason;
  1088. switch (action[i]) {
  1089. case PRUNE_THIS_CHILD_INFEASIBLE:
  1090. vbc_node_pr_reason = VBC_PRUNED_INFEASIBLE;
  1091. break;
  1092. case PRUNE_THIS_CHILD_FATHOMABLE:
  1093. vbc_node_pr_reason = VBC_PRUNED_FATHOMED;
  1094. break;
  1095. default:
  1096. vbc_node_pr_reason = VBC_PRUNED;
  1097. }
  1098. /* following is no longer needed because this care is taken
  1099. * care of in install_new_ub
  1100. */
  1101. /*
  1102. if (feasible[i]) {
  1103. vbc_node_pr_reason = VBC_FEAS_SOL_FOUND;
  1104. }
  1105. */
  1106. #pragma omp critical (tree_update)
  1107. purge_pruned_nodes(tm, child, vbc_node_pr_reason);
  1108. } else {
  1109. #pragma omp critical (tree_update)
  1110. purge_pruned_nodes(tm, child, feasible[i] ? VBC_FEAS_SOL_FOUND :
  1111. VBC_PRUNED);
  1112. }
  1113. if (--child_num == 0){
  1114. *keep = -1;
  1115. return(DO_NOT_DIVE);
  1116. }
  1117. if (*keep == child_num) *keep = i;
  1118. #ifdef TRACE_PATH
  1119. if (optimal_path == child_num) optimal_path = i;
  1120. #endif
  1121. action[i] = action[child_num];
  1122. objval[i] = objval[child_num];
  1123. feasible[i--] = feasible[child_num];
  1124. continue;
  1125. }
  1126. }else{
  1127. child->node_status = NODE_STATUS__CANDIDATE;
  1128. /* child->lp = child->cg = 0; zeroed out by calloc */
  1129. child->cp = node->cp;
  1130. }
  1131. #ifdef DO_TESTS
  1132. if (child->lower_bound < child->parent->lower_bound - .01){
  1133. printf("#######Error: Child's lower bound (%.3f) is less than ",
  1134. child->lower_bound);
  1135. printf("parent's (%.3f)\n", child->parent->lower_bound);
  1136. }
  1137. if (child->lower_bound < tm->rootnode->lower_bound - .01){
  1138. printf("#######Error: Node's lower bound (%.3f) is less than ",
  1139. child->lower_bound);
  1140. printf("root's (%.3f)\n", tm->rootnode->lower_bound);
  1141. }
  1142. #endif
  1143. /* child->children = NULL; zeroed out by calloc */
  1144. /* child->child_num = 0; zeroed out by calloc */
  1145. /* child->died = 0; zeroed out by calloc */
  1146. desc = &child->desc;
  1147. /* all this is set by calloc
  1148. * desc->uind.type = 0; WRT_PARENT and no change
  1149. * desc->uind.size = 0;
  1150. * desc->uind.added = 0;
  1151. * desc->uind.list = NULL;
  1152. * desc->not_fixed.type = 0; WRT_PARENT and no change
  1153. * desc->not_fixed.size = 0;
  1154. * desc->not_fixed.added = 0;
  1155. * desc->not_fixed.list = NULL;
  1156. * desc->cutind.type = 0; WRT_PARENT and no change
  1157. * desc->cutind.size = 0;
  1158. * desc->cutind.added = 0;
  1159. * desc->cutind.list = NULL;
  1160. * desc->basis.basis_exists = FALSE; This has to be validated!!!
  1161. * desc->basis.{[base,extra][rows,vars]}
  1162. .type = 0; WRT_PARENT and no change
  1163. .size = 0;
  1164. .list = NULL;
  1165. .stat = NULL;
  1166. */
  1167. if (node->desc.basis.basis_exists){
  1168. desc->basis.basis_exists = TRUE;
  1169. }
  1170. /* If we have a non-base, new branching cut then few more things
  1171. might have to be fixed */
  1172. if (new_branching_cut && bobj->name >= 0){
  1173. /* Fix cutind and the basis description */
  1174. desc->cutind.size = 1;
  1175. desc->cutind.added = 1;
  1176. desc->cutind.list = (int *) malloc(ISIZE);
  1177. desc->cutind.list[0] = bobj->name;
  1178. if (desc->basis.basis_exists){
  1179. desc->basis.extrarows.size = 1;
  1180. desc->basis.extrarows.list = (int *) malloc(ISIZE);
  1181. desc->basis.extrarows.list[0] = bobj->name;
  1182. desc->basis.extrarows.stat = (int *) malloc(ISIZE);
  1183. desc->basis.extrarows.stat[0] = SLACK_BASIC;
  1184. }
  1185. }
  1186. desc->desc_size = node->desc.desc_size;
  1187. desc->desc = node->desc.desc;
  1188. desc->nf_status = node->desc.nf_status;
  1189. #ifdef SENSITIVITY_ANALYSIS
  1190. if (tm->par.sensitivity_analysis &&
  1191. action[i] != PRUNE_THIS_CHILD_INFEASIBLE){
  1192. child->duals = bobj->duals[i];
  1193. bobj->duals[i] = 0;
  1194. }
  1195. #endif
  1196. if (child->node_status != NODE_STATUS__PRUNED && feasible[i]){
  1197. if(tm->par.keep_description_of_pruned == KEEP_IN_MEMORY){
  1198. child->sol_size = bobj->sol_sizes[i];
  1199. child->sol_ind = bobj->sol_inds[i];
  1200. bobj->sol_inds[i]=0;
  1201. child->sol = bobj->solutions[i];
  1202. bobj->solutions[i] = 0;
  1203. child->feasibility_status = NOT_PRUNED_HAS_CAN_SOLUTION;
  1204. }
  1205. }
  1206. if (child->node_status == NODE_STATUS__PRUNED){
  1207. if(tm->par.keep_description_of_pruned == KEEP_IN_MEMORY){
  1208. child->feasibility_status = OVER_UB_PRUNED;
  1209. if (feasible[i]){
  1210. child->sol_size = bobj->sol_sizes[i];
  1211. child->sol_ind = bobj->sol_inds[i];
  1212. bobj->sol_inds[i] = 0;
  1213. child->sol = bobj->solutions[i];
  1214. bobj->solutions[i] = 0;
  1215. child->feasibility_status = FEASIBLE_PRUNED;
  1216. }
  1217. if (action[i] == PRUNE_THIS_CHILD_INFEASIBLE){
  1218. child->feasibility_status = INFEASIBLE_PRUNED;
  1219. }
  1220. }
  1221. #ifdef TRACE_PATH
  1222. if (child->optimal_path){
  1223. printf("\n\nAttempting to prune the optimal path!!!!!!!!!\n\n");
  1224. sleep(600);
  1225. if (tm->par.logging){
  1226. write_tm_info(tm, tm->par.tree_log_file_name, NULL, FALSE);
  1227. write_subtree(tm->rootnode, tm->par.tree_log_file_name, NULL,
  1228. TRUE, tm->par.logging);
  1229. write_tm_cut_list(tm, tm->par.cut_log_file_name, FALSE);
  1230. }
  1231. exit(1);
  1232. }
  1233. #endif
  1234. if (tm->par.keep_description_of_pruned == KEEP_ON_DISK_FULL ||
  1235. tm->par.keep_description_of_pruned == KEEP_ON_DISK_VBC_TOOL){
  1236. #pragma omp critical (write_pruned_node_file)
  1237. write_pruned_nodes(tm, child);
  1238. #pragma omp critical (tree_update)
  1239. if (tm->par.vbc_emulation== VBC_EMULATION_FILE_NEW) {
  1240. int vbc_node_pr_reason;
  1241. switch (action[i]) {
  1242. case PRUNE_THIS_CHILD_INFEASIBLE:
  1243. vbc_node_pr_reason = VBC_PRUNED_INFEASIBLE;
  1244. break;
  1245. case PRUNE_THIS_CHILD_FATHOMABLE:
  1246. vbc_node_pr_reason = VBC_PRUNED_FATHOMED;
  1247. break;
  1248. default:
  1249. vbc_node_pr_reason = VBC_PRUNED;
  1250. }
  1251. /* following is no longer needed because this care is taken
  1252. * care of in install_new_ub
  1253. */
  1254. /*
  1255. if (feasible[i]) {
  1256. vbc_node_pr_reason = VBC_FEAS_SOL_FOUND;
  1257. }
  1258. */
  1259. purge_pruned_nodes(tm, child, vbc_node_pr_reason);
  1260. } else {
  1261. purge_pruned_nodes(tm, child, feasible[i] ? VBC_FEAS_SOL_FOUND :
  1262. VBC_PRUNED);
  1263. }
  1264. if (--child_num == 0){
  1265. *keep = -1;
  1266. return(DO_NOT_DIVE);
  1267. }
  1268. if (*keep == child_num) *keep = i;
  1269. #ifdef TRACE_PATH
  1270. if (optimal_path == child_num) optimal_path = i;
  1271. #endif
  1272. action[i] = action[child_num];
  1273. objval[i] = objval[child_num];
  1274. feasible[i--] = feasible[child_num];
  1275. }
  1276. continue;
  1277. }
  1278. if (tm->phase == 0 &&
  1279. !(tm->par.colgen_strat[0] & FATHOM__GENERATE_COLS__RESOLVE) &&
  1280. (feasible[i] == LP_D_UNBOUNDED ||
  1281. (tm->has_ub && tm->ub - tm->par.granularity < child->lower_bound))){
  1282. /* it is kept for the next phase (==> do not dive) */
  1283. if (*keep == i)
  1284. dive = DO_NOT_DIVE;
  1285. REALLOC(tm->nextphase_cand, bc_node *,
  1286. tm->nextphase_cand_size, tm->nextphase_candnum+1, BB_BUNCH);
  1287. tm->nextphase_cand[tm->nextphase_candnum++] = child;
  1288. np_cp++;
  1289. np_sp++;
  1290. }else{
  1291. /* it will be processed in this phase (==> insert it if not kept) */
  1292. if (*keep != i || dive == DO_NOT_DIVE){
  1293. #pragma omp critical (tree_update)
  1294. insert_new_node(tm, child);
  1295. np_cp++;
  1296. np_sp++;
  1297. }
  1298. }
  1299. }
  1300. if (node->cp)
  1301. #ifdef COMPILE_IN_CP
  1302. tm->nodes_per_cp[node->cp] += np_cp;
  1303. #else
  1304. tm->nodes_per_cp[find_process_index(&tm->cp, node->cp)] += np_cp;
  1305. #endif
  1306. return(dive);
  1307. }
  1308. /*===========================================================================*/
  1309. /*===========================================================================*\
  1310. * Determines whether or not the LP process should keep one of the
  1311. * children resulting from branching or whether it should get a new node
  1312. * from the candidate list.
  1313. \*===========================================================================*/
  1314. char shall_we_dive(tm_prob *tm, double objval)
  1315. {
  1316. char dive;
  1317. int i, k;
  1318. double rand_num, average_lb;
  1319. double cutoff = 0;
  1320. double etol = 1e-3;
  1321. if (tm->par.time_limit >= 0.0 &&
  1322. wall_clock(NULL) - tm->start_time >= tm->par.time_limit){
  1323. return(FALSE);
  1324. }
  1325. if (tm->par.node_limit >= 0 && tm->stat.analyzed >= tm->par.node_limit){
  1326. return(FALSE);
  1327. }
  1328. if (tm->has_ub && (tm->par.gap_limit >= 0.0)){
  1329. find_tree_lb(tm);
  1330. if (100*(tm->ub-tm->lb)/(fabs(tm->ub)+etol) <= tm->par.gap_limit){
  1331. return(FALSE);
  1332. }
  1333. }
  1334. rand_num = ((double)(RANDOM()))/((double)(MAXINT));
  1335. if (tm->par.unconditional_dive_frac > 1 - rand_num){
  1336. dive = CHECK_BEFORE_DIVE;
  1337. }else{
  1338. switch(tm->par.diving_strategy){
  1339. case BEST_ESTIMATE:
  1340. if (tm->has_ub_estimate){
  1341. if (objval > tm->ub_estimate){
  1342. dive = DO_NOT_DIVE;
  1343. tm->stat.diving_halts++;
  1344. }else{
  1345. dive = CHECK_BEFORE_DIVE;
  1346. }
  1347. break;
  1348. }
  1349. case COMP_BEST_K:
  1350. average_lb = 0;
  1351. #pragma omp critical (tree_update)
  1352. for (k = 0, i = MIN(tm->samephase_candnum, tm->par.diving_k);
  1353. i > 0; i--)
  1354. if (tm->samephase_cand[i]->lower_bound < MAXDOUBLE/2){
  1355. average_lb += tm->samephase_cand[i]->lower_bound;
  1356. k++;
  1357. }
  1358. if (k){
  1359. average_lb /= k;
  1360. }else{
  1361. dive = CHECK_BEFORE_DIVE;
  1362. break;
  1363. }
  1364. if (fabs(average_lb) < etol) {
  1365. average_lb = (average_lb > 0) ? etol : -etol;
  1366. if (fabs(objval) < etol) {
  1367. objval = (objval > 0) ? etol : -etol;
  1368. }
  1369. }
  1370. if (fabs((objval/average_lb)-1) > tm->par.diving_threshold){
  1371. dive = DO_NOT_DIVE;
  1372. tm->stat.diving_halts++;
  1373. }else{
  1374. dive = CHECK_BEFORE_DIVE;
  1375. }
  1376. break;
  1377. case COMP_BEST_K_GAP:
  1378. average_lb = 0;
  1379. for (k = 0, i = MIN(tm->samephase_candnum, tm->par.diving_k);
  1380. i > 0; i--)
  1381. if (tm->samephase_cand[i]->lower_bound < MAXDOUBLE/2){
  1382. average_lb += tm->samephase_cand[i]->lower_bound;
  1383. k++;
  1384. }
  1385. if (k){
  1386. average_lb /= k;
  1387. }else{
  1388. dive = CHECK_BEFORE_DIVE;
  1389. break;
  1390. }
  1391. if (tm->has_ub)
  1392. cutoff = tm->par.diving_threshold*(tm->ub - average_lb);
  1393. else
  1394. cutoff = (1 + tm->par.diving_threshold)*average_lb;
  1395. if (objval > average_lb + cutoff){
  1396. dive = DO_NOT_DIVE;
  1397. tm->stat.diving_halts++;
  1398. }else{
  1399. dive = CHECK_BEFORE_DIVE;
  1400. }
  1401. break;
  1402. default:
  1403. printf("Unknown diving strategy -- diving by default\n");
  1404. dive = DO_DIVE;
  1405. break;
  1406. }
  1407. }
  1408. return(dive);
  1409. }
  1410. /*===========================================================================*/
  1411. /*===========================================================================*\
  1412. * This routine is entirely for saving memory. If there is no need to
  1413. * keep the description of the pruned nodes in memory, they are freed as
  1414. * soon as they are no longer needed. This can set off a chain reaction
  1415. * of other nodes that are no longer needed.
  1416. \*===========================================================================*/
  1417. int purge_pruned_nodes(tm_prob *tm, bc_node *node, int category)
  1418. {
  1419. int i, new_child_num;
  1420. branch_obj *bobj = &node->parent->bobj;
  1421. char reason[30];
  1422. char branch_dir = 'M';
  1423. if (tm->par.vbc_emulation != VBC_EMULATION_FILE_NEW &&
  1424. (category == VBC_PRUNED_INFEASIBLE || category == VBC_PRUNED_FATHOMED
  1425. || category == VBC_IGNORE)) {
  1426. printf("Error in purge_pruned_nodes.");
  1427. printf("category refers to VBC_EMULATION_FILE_NEW");
  1428. printf("when it is not used.\n");
  1429. exit(456);
  1430. }
  1431. if (tm->par.vbc_emulation == VBC_EMULATION_FILE_NEW) {
  1432. switch (category) {
  1433. case VBC_PRUNED_INFEASIBLE:
  1434. sprintf(reason,"%s","infeasible");
  1435. sprintf(reason,"%s %i %i",reason, node->bc_index+1,
  1436. node->parent->bc_index+1);
  1437. if (node->bc_index>0) {
  1438. if (node->parent->children[0]==node) {
  1439. branch_dir = node->parent->bobj.sense[0];
  1440. /*branch_dir = 'L';*/
  1441. } else {
  1442. branch_dir = node->parent->bobj.sense[1];
  1443. /*branch_dir = 'R';*/
  1444. }
  1445. if (branch_dir == 'G') {
  1446. branch_dir = 'R';
  1447. }
  1448. }
  1449. sprintf(reason,"%s %c %s", reason, branch_dir, "\n");
  1450. break;
  1451. case VBC_PRUNED_FATHOMED:
  1452. sprintf(reason,"%s","fathomed");
  1453. sprintf(reason,"%s %i %i",reason, node->bc_index+1,
  1454. node->parent->bc_index+1);
  1455. if (node->bc_index>0) {
  1456. if (node->parent->children[0]==node) {
  1457. branch_dir = node->parent->bobj.sense[0];
  1458. /*branch_dir = 'L';*/
  1459. } else {
  1460. branch_dir = node->parent->bobj.sense[1];
  1461. /*branch_dir = 'R';*/
  1462. }
  1463. if (branch_dir == 'G') {
  1464. branch_dir = 'R';
  1465. }
  1466. }
  1467. sprintf(reason,"%s %c %s", reason, branch_dir, "\…

Large files files are truncated, but you can click here to view the full file