CSC/ECE 506 Spring 2011/ch4a bm: Difference between revisions

From Expertiza_Wiki
Jump to navigation Jump to search
(→‎References: changed order of text)
(→‎Message Passing: added code for message passing)
Line 12: Line 12:


== Message Passing ==
== Message Passing ==
The following section of code implements Gaussian Elimination via message passing, using MPI:<br>
  1: ! main loop
  2: do pivot = 1, n-1
  3:    ! root maintains communication
  4:    if (my_rank.eq.0) then
  5:        ! adjust the chunk size
  6:        if (MOD(pivot, p).eq.0) then
  7:          chunk = chunk - n
  8:        endif
  9:
10:        ! calculate chunk vectors
11:        rem = MOD((n**2-(n*pivot)),chunk)
12:        tmp = 0
13:        do i = 1, p
14:          tmp = tmp + chunk
15:          if (tmp.le.(n**2-(n*pivot))) then
16:              a_chnk_vec(i) = chunk
17:              b_chnk_vec(i) = chunk / n
18:          else
19:              a_chnk_vec(i) = rem
20:              b_chnk_vec(i) = rem / n
21:              rem = 0
22:          endif
23:        continue
24:
25:        ! calculate displacement vectors
26:        a_disp_vec(1) = (pivot*n)
27:        b_disp_vec(1) = pivot
28:        do i = 2, p
29:          a_disp_vec(i) = a_disp_vec(i-1) + a_chnk_vec(i-1)
30:          b_disp_vec(i) = b_disp_vec(i-1) + b_chnk_vec(i-1)
31:        continue
32: 
33:        ! fetch the pivot equation
34:        do i = 1, n
35:          pivot_eqn(i) = a(n-(i-1),pivot)
36:        continue
37: 
38:        pivot_b = b(pivot)
39:    endif ! my_rank.eq.0
40: 
41:    ! distribute the pivot equation
42:    call MPI_BCAST(pivot_eqn, n,
43:                    MPI_DOUBLE_PRECISION,
44:                    root, MPI_COMM_WORLD, ierr)
45: 
46:    call MPI_BCAST(pivot_b, 1,
47:                    MPI_DOUBLE_PRECISION,
48:                    root, MPI_COMM_WORLD, ierr)
49: 
50:    ! distribute the chunk vector
51:    call MPI_SCATTER(a_chnk_vec, 1, MPI_INTEGER,
52:                      chunk, 1, MPI_INTEGER,
53:                      root, MPI_COMM_WORLD, ierr)
54: 
55:    ! distribute the data
56:    call MPI_SCATTERV(a, a_chnk_vec, a_disp_vec,
57:                      MPI_DOUBLE_PRECISION,
58:                      local_a, chunk,
59:                      MPI_DOUBLE_PRECISION,
60:                      root, MPI_COMM_WORLD,ierr)
61: 
62:    call MPI_SCATTERV(b, b_chnk_vec, b_disp_vec,
63:                      MPI_DOUBLE_PRECISION,
64:                      local_b, chunk/n,
65:                      MPI_DOUBLE_PRECISION,
66:                      root, MPI_COMM_WORLD,ierr)
67: 
68:    ! forward elimination
69:    do j = 1, (chunk/n)
70:        xmult = local_a((n-(pivot-1)),j) / pivot_eqn(pivot)
71:        do i = (n-pivot), 1, -1
72:          local_a(i,j) = local_a(i,j) - (xmult * pivot_eqn(n-(i-1)))
73:        continue
74: 
75:        local_b(j) = local_b(j) - (xmult * pivot_b)
76:    continue
77: 
78:    ! restore the data to root
79:    call MPI_GATHERV(local_a, chunk,
80:                      MPI_DOUBLE_PRECISION,
81:                      a, a_chnk_vec, a_disp_vec,
82:                      MPI_DOUBLE_PRECISION,
83:                      root, MPI_COMM_WORLD, ierr)
84: 
85:    call MPI_GATHERV(local_b, chunk/n,
86:                      MPI_DOUBLE_PRECISION,
87:                      b, b_chnk_vec, b_disp_vec,
88:                      MPI_DOUBLE_PRECISION,
89:                      root, MPI_COMM_WORLD, ierr)
90:  continue ! end of main loop


= Definitions =
= Definitions =

Revision as of 04:11, 27 February 2011

Overview

Gaussian Elimination

FORTRAN Background

Parallel Implementations

Data Parallel

Shared Memory

Message Passing

The following section of code implements Gaussian Elimination via message passing, using MPI:

 1: ! main loop
 2: do pivot = 1, n-1
 3:     ! root maintains communication
 4:     if (my_rank.eq.0) then
 5:        ! adjust the chunk size
 6:        if (MOD(pivot, p).eq.0) then
 7:           chunk = chunk - n
 8:        endif
 9: 
10:        ! calculate chunk vectors
11:        rem = MOD((n**2-(n*pivot)),chunk)
12:        tmp = 0
13:        do i = 1, p
14:           tmp = tmp + chunk
15:           if (tmp.le.(n**2-(n*pivot))) then
16:              a_chnk_vec(i) = chunk
17:              b_chnk_vec(i) = chunk / n
18:           else
19:              a_chnk_vec(i) = rem
20:              b_chnk_vec(i) = rem / n
21:              rem = 0
22:           endif
23:        continue
24: 
25:        ! calculate displacement vectors
26:        a_disp_vec(1) = (pivot*n)
27:        b_disp_vec(1) = pivot
28:        do i = 2, p
29:           a_disp_vec(i) = a_disp_vec(i-1) + a_chnk_vec(i-1)
30:           b_disp_vec(i) = b_disp_vec(i-1) + b_chnk_vec(i-1)
31:        continue
32:  
33:        ! fetch the pivot equation
34:        do i = 1, n
35:           pivot_eqn(i) = a(n-(i-1),pivot)
36:        continue
37:  
38:        pivot_b = b(pivot)
39:     endif ! my_rank.eq.0
40:  
41:     ! distribute the pivot equation
42:     call MPI_BCAST(pivot_eqn, n,
43:                    MPI_DOUBLE_PRECISION,
44:                    root, MPI_COMM_WORLD, ierr)
45:  
46:     call MPI_BCAST(pivot_b, 1,
47:                    MPI_DOUBLE_PRECISION,
48:                    root, MPI_COMM_WORLD, ierr)
49:  
50:     ! distribute the chunk vector
51:     call MPI_SCATTER(a_chnk_vec, 1, MPI_INTEGER,
52:                      chunk, 1, MPI_INTEGER,
53:                      root, MPI_COMM_WORLD, ierr)
54:  
55:     ! distribute the data
56:     call MPI_SCATTERV(a, a_chnk_vec, a_disp_vec,
57:                       MPI_DOUBLE_PRECISION,
58:                       local_a, chunk,
59:                       MPI_DOUBLE_PRECISION,
60:                       root, MPI_COMM_WORLD,ierr)
61:  
62:     call MPI_SCATTERV(b, b_chnk_vec, b_disp_vec,
63:                       MPI_DOUBLE_PRECISION,
64:                       local_b, chunk/n,
65:                       MPI_DOUBLE_PRECISION,
66:                       root, MPI_COMM_WORLD,ierr)
67:  
68:     ! forward elimination
69:     do j = 1, (chunk/n)
70:        xmult = local_a((n-(pivot-1)),j) / pivot_eqn(pivot)
71:        do i = (n-pivot), 1, -1
72:           local_a(i,j) = local_a(i,j) - (xmult * pivot_eqn(n-(i-1)))
73:        continue
74:  
75:        local_b(j) = local_b(j) - (xmult * pivot_b)
76:     continue
77:  
78:     ! restore the data to root
79:     call MPI_GATHERV(local_a, chunk,
80:                      MPI_DOUBLE_PRECISION,
81:                      a, a_chnk_vec, a_disp_vec,
82:                      MPI_DOUBLE_PRECISION,
83:                      root, MPI_COMM_WORLD, ierr)
84:  
85:     call MPI_GATHERV(local_b, chunk/n,
86:                      MPI_DOUBLE_PRECISION,
87:                      b, b_chnk_vec, b_disp_vec,
88:                      MPI_DOUBLE_PRECISION,
89:                      root, MPI_COMM_WORLD, ierr)
90:  continue ! end of main loop

Definitions

References

1. S.F.McGinn and R.E.Shaw, University of New Brunswick, Parallel Gaussian Elimination Using OpenMP and MPI
2. Ian Foster, Argonne National Laboratory, Case Study: Gaussian Elimination