/ass-2/mpi_dyn.c

https://github.com/ellbur/class-parallel-distributed · C · 154 lines · 121 code · 33 blank · 0 comment · 16 complexity · 37d4c0cfd29f3389b36ab6f3885b1dc2 MD5 · raw file

  1. #define _XOPEN_SOURCE // for crypt()
  2. #include <mpi.h>
  3. #include <complex.h>
  4. #include <math.h>
  5. #include <stdbool.h>
  6. #include <stdio.h>
  7. #include <stdlib.h>
  8. #include <string.h>
  9. #include <unistd.h>
  10. #include "fractal.h"
  11. #include "measure.h"
  12. int num_children;
  13. #define buf_len ((width+7)/8)
  14. static void collect_data(char *image_data);
  15. static void start_row(
  16. MPI_Request *pending_requests,
  17. int *pending_rows,
  18. char *buf,
  19. int row,
  20. int child
  21. );
  22. static void collect_into_image(char *image_data, int row, char *buf);
  23. static void send_shutdown(int child);
  24. static void master_routine() {
  25. char image_data[width * height * 3];
  26. begin_computation();
  27. collect_data(image_data);
  28. print_hash(width*height*3, image_data);
  29. report_computation();
  30. MPI_Finalize();
  31. }
  32. static void collect_data(char *image_data) {
  33. int buf_max_len = buf_len;
  34. char bufs[num_children][buf_max_len];
  35. MPI_Request pending_requests[num_children];
  36. int pending_rows[num_children];
  37. int sent_rows = 0;
  38. int completed_rows = 0;
  39. for (int i=0; i<num_children; i++) {
  40. start_row(pending_requests, pending_rows, bufs[i], i, i);
  41. sent_rows++;
  42. }
  43. while (completed_rows < height) {
  44. int recvd;
  45. MPI_Waitany(num_children, pending_requests, &recvd, MPI_STATUS_IGNORE);
  46. if (recvd == MPI_UNDEFINED) {
  47. fprintf(stderr, "For some reason we received no more messages...");
  48. exit(1);
  49. }
  50. collect_into_image(image_data, pending_rows[recvd], bufs[recvd]);
  51. completed_rows++;
  52. if (sent_rows < height) {
  53. start_row(pending_requests, pending_rows, bufs[recvd], sent_rows, recvd);
  54. sent_rows++;
  55. }
  56. }
  57. for (int i=0; i<num_children; i++) {
  58. send_shutdown(i);
  59. }
  60. }
  61. static void start_row(
  62. MPI_Request *pending_requests,
  63. int *pending_rows,
  64. char *buf,
  65. int row,
  66. int child
  67. )
  68. {
  69. MPI_Send(&row, 1, MPI_INT, child+1, 0, MPI_COMM_WORLD);
  70. MPI_Irecv(buf, buf_len, MPI_CHAR, child+1, 0,
  71. MPI_COMM_WORLD, pending_requests+child);
  72. pending_rows[child] = row;
  73. }
  74. static void send_shutdown(int child) {
  75. int msg = -1;
  76. MPI_Send(&msg, 1, MPI_INT, child+1, 0, MPI_COMM_WORLD);
  77. }
  78. static void collect_into_image(char *image_data, int row, char *buf) {
  79. for (int i=0; i<buf_len; i++)
  80. for (int j=0; j<8; j++) {
  81. int pix = row*width + i*8 + j;
  82. char in = (buf[i] >> j) & 1;
  83. image_data[3*pix+0] = in-1;
  84. image_data[3*pix+1] = in-1;
  85. image_data[3*pix+2] = in-1;
  86. }
  87. }
  88. static void child_routine(int proc) {
  89. begin_useful_work();
  90. while (true) {
  91. int row;
  92. MPI_Status status;
  93. end_useful_work();
  94. MPI_Recv(&row, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
  95. if (row == -1) break;
  96. begin_useful_work();
  97. char buf[buf_len];
  98. for (int j=0; j<buf_len; j++) buf[j] = 0;
  99. for (int j=0; j<width; j++) {
  100. buf[j/8] |= calc_in(j, row) << (j % 8);
  101. }
  102. end_useful_work();
  103. MPI_Send(buf, buf_len, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
  104. begin_useful_work();
  105. }
  106. report_work();
  107. MPI_Finalize();
  108. }
  109. int main(int argc, char **argv) {
  110. int proc;
  111. int comm_size;
  112. program = argv[0];
  113. MPI_Init(&argc, &argv);
  114. MPI_Comm_rank(MPI_COMM_WORLD, &proc);
  115. MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
  116. num_children = comm_size-1;
  117. if (proc == 0) {
  118. master_routine();
  119. }
  120. else child_routine(proc);
  121. }