x
Yes
No
Do you want to visit DriveHQ English website?
Inicio
Características
Precios
Prueba gratuita
Software cliente
Acerca de nosotros
Servidor de archivos
|
Solución de copias de seguridad
|
Servidor FTP
|
Servidor de correo electrónico
|
Alojamiento web
|
Software cliente
Servidor de archivos
Solución de copia de seguridad
Servidor FTP
Servidor de correo electrónico
Alojamiento web
Software cliente
all_to_all.hpp - Hosted on DriveHQ Cloud IT Platform
Arriba
Subir
Descargar
Compartir
Publicar
Nueva carpeta
Nuevo archivo
Copiar
Cortar
Eliminar
Pegar
Clasificación
Actualizar
Ruta de la carpeta: \\game3dprogramming\materials\GameFactory\GameFactoryDemo\references\boost_1_35_0\boost\mpi\collectives\all_to_all.hpp
Girar
Efecto
Propiedad
Historial
// Copyright (C) 2005, 2006 Douglas Gregor. // Use, modification and distribution is subject to the Boost Software // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // Message Passing Interface 1.1 -- Section 4.8. All-to-all #ifndef BOOST_MPI_ALL_TO_ALL_HPP #define BOOST_MPI_ALL_TO_ALL_HPP #include
#include
#include
#include
#include
#include
#include
#include
#include
#include
namespace boost { namespace mpi { namespace detail { // We're performaing an all-to-all with a type that has an // associated MPI datatype, so we'll use MPI_Alltoall to do all of // the work. template
void all_to_all_impl(const communicator& comm, const T* in_values, int n, T* out_values, mpl::true_) { MPI_Datatype type = get_mpi_datatype
(*in_values); BOOST_MPI_CHECK_RESULT(MPI_Alltoall, (const_cast
(in_values), n, type, out_values, n, type, comm)); } // We're performing an all-to-all with a type that does not have an // associated MPI datatype, so we'll need to serialize // it. Unfortunately, this means that we cannot use MPI_Alltoall, so // we'll just have to send individual messages to the other // processes. template
void all_to_all_impl(const communicator& comm, const T* in_values, int n, T* out_values, mpl::false_) { int size = comm.size(); int rank = comm.rank(); // The amount of data to be sent to each process std::vector
send_sizes(size); // The displacements for each outgoing value. std::vector
send_disps(size); // The buffer that will store all of the outgoing values std::vector
> outgoing; // Pack the buffer with all of the outgoing values. for (int dest = 0; dest < size; ++dest) { // Keep track of the displacements send_disps[dest] = outgoing.size(); // Our own value will never be transmitted, so don't pack it. if (dest != rank) { packed_oarchive oa(comm, outgoing); for (int i = 0; i < n; ++i) oa << in_values[dest * n + i]; } // Keep track of the sizes send_sizes[dest] = outgoing.size() - send_disps[dest]; } // Determine how much data each process will receive. std::vector
recv_sizes(size); all_to_all(comm, send_sizes, recv_sizes); // Prepare a buffer to receive the incoming data. std::vector
recv_disps(size); int sum = 0; for (int src = 0; src < size; ++src) { recv_disps[src] = sum; sum += recv_sizes[src]; } std::vector
> incoming(sum > 0? sum : 1); // Make sure we don't try to reference an empty vector if (outgoing.empty()) outgoing.push_back(0); // Transmit the actual data BOOST_MPI_CHECK_RESULT(MPI_Alltoallv, (&outgoing[0], &send_sizes[0], &send_disps[0], MPI_PACKED, &incoming[0], &recv_sizes[0], &recv_disps[0], MPI_PACKED, comm)); // Deserialize data from the iarchive for (int src = 0; src < size; ++src) { if (src == rank) std::copy(in_values + src * n, in_values + (src + 1) * n, out_values + src * n); else { packed_iarchive ia(comm, incoming, boost::archive::no_header, recv_disps[src]); for (int i = 0; i < n; ++i) ia >> out_values[src * n + i]; } } } } // end namespace detail template
inline void all_to_all(const communicator& comm, const T* in_values, T* out_values) { detail::all_to_all_impl(comm, in_values, 1, out_values, is_mpi_datatype
()); } template
void all_to_all(const communicator& comm, const std::vector
& in_values, std::vector
& out_values) { BOOST_ASSERT((int)in_values.size() == comm.size()); out_values.resize(comm.size()); ::boost::mpi::all_to_all(comm, &in_values[0], &out_values[0]); } template
inline void all_to_all(const communicator& comm, const T* in_values, int n, T* out_values) { detail::all_to_all_impl(comm, in_values, n, out_values, is_mpi_datatype
()); } template
void all_to_all(const communicator& comm, const std::vector
& in_values, int n, std::vector
& out_values) { BOOST_ASSERT((int)in_values.size() == comm.size() * n); out_values.resize(comm.size() * n); ::boost::mpi::all_to_all(comm, &in_values[0], n, &out_values[0]); } } } // end namespace boost::mpi #endif // BOOST_MPI_ALL_TO_ALL_HPP
all_to_all.hpp
Dirección de la página
Dirección del archivo
Anterior
3/8
Siguiente
Descargar
( 5 KB )
Comments
Total ratings:
0
Average rating:
No clasificado
of 10
Would you like to comment?
Join now
, or
Logon
if you are already a member.