Skip to content

Commit

Permalink
Making inspect happy
Browse files Browse the repository at this point in the history
  • Loading branch information
hkaiser committed Aug 2, 2017
1 parent 0d234ed commit 6147af2
Show file tree
Hide file tree
Showing 20 changed files with 156 additions and 157 deletions.
5 changes: 3 additions & 2 deletions examples/resource_partitioner/shared_priority_scheduler.hpp
Expand Up @@ -4,8 +4,8 @@
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)

#if !defined(HPX_shared_priority_scheduler)
#define HPX_shared_priority_scheduler
#if !defined(HPX_SHARED_PRIORITY_SCHEDULER)
#define HPX_SHARED_PRIORITY_SCHEDULER

#include <hpx/config.hpp>
#include <hpx/compat/mutex.hpp>
Expand All @@ -27,6 +27,7 @@

#include <cstddef>
#include <cstdint>
#include <exception>
#include <memory>
#include <string>
#include <type_traits>
Expand Down
104 changes: 41 additions & 63 deletions examples/resource_partitioner/simple_resource_partitioner.cpp
Expand Up @@ -17,21 +17,26 @@
#include <hpx/include/runtime.hpp>
//
#include <cmath>
#include <cstddef>
#include <iostream>
#include <memory>
#include <set>
#include <utility>
//
#include "shared_priority_scheduler.hpp"
#include "system_characteristics.h"
#include "system_characteristics.hpp"

namespace resource { namespace pools
{
namespace resource {
namespace pools {
enum ids
{
DEFAULT = 0,
MPI = 1,
GPU = 2,
MATRIX = 3,
};
}}
}
}

static bool use_pools = false;
static bool use_scheduler = false;
Expand Down Expand Up @@ -119,16 +124,11 @@ int hpx_main(boost::program_options::variables_map& vm)
hpx::future<void> future_1 = hpx::async(mpi_executor, &do_stuff, 5, true);

hpx::future<void> future_2 = future_1.then(
mpi_executor,
[](hpx::future<void>&& f)
{
do_stuff(5, true);
});
mpi_executor, [](hpx::future<void>&& f) { do_stuff(5, true); });

hpx::future<void> future_3 = future_2.then(mpi_executor,
[mpi_executor, high_priority_executor, async_count](
hpx::future<void>&& f) mutable
{
hpx::future<void>&& f) mutable {
hpx::future<void> future_4, future_5;
for (std::size_t i = 0; i < async_count; i++)
{
Expand Down Expand Up @@ -156,12 +156,8 @@ int hpx_main(boost::program_options::variables_map& vm)
// test a parallel algorithm on custom pool with high priority
hpx::parallel::static_chunk_size fixed(1);
hpx::parallel::for_loop_strided(
hpx::parallel::execution::par
.with(fixed)
.on(high_priority_executor),
0, loop_count, 1,
[&](std::size_t i)
{
hpx::parallel::execution::par.with(fixed).on(high_priority_executor), 0,
loop_count, 1, [&](std::size_t i) {
std::lock_guard<hpx::lcos::local::mutex> lock(m);
if (thread_set.insert(std::this_thread::get_id()).second)
{
Expand All @@ -176,12 +172,8 @@ int hpx_main(boost::program_options::variables_map& vm)

// test a parallel algorithm on custom pool with normal priority
hpx::parallel::for_loop_strided(
hpx::parallel::execution::par
.with(fixed)
.on(normal_priority_executor),
0, loop_count, 1,
[&](std::size_t i)
{
hpx::parallel::execution::par.with(fixed).on(normal_priority_executor),
0, loop_count, 1, [&](std::size_t i) {
std::lock_guard<hpx::lcos::local::mutex> lock(m);
if (thread_set.insert(std::this_thread::get_id()).second)
{
Expand All @@ -197,12 +189,8 @@ int hpx_main(boost::program_options::variables_map& vm)

// test a parallel algorithm on mpi_executor
hpx::parallel::for_loop_strided(
hpx::parallel::execution::par
.with(fixed)
.on(mpi_executor),
0, loop_count, 1,
[&](std::size_t i)
{
hpx::parallel::execution::par.with(fixed).on(mpi_executor), 0,
loop_count, 1, [&](std::size_t i) {
std::lock_guard<hpx::lcos::local::mutex> lock(m);
if (thread_set.insert(std::this_thread::get_id()).second)
{
Expand All @@ -222,11 +210,9 @@ int hpx_main(boost::program_options::variables_map& vm)
// test a parallel algorithm on custom pool with high priority
hpx::parallel::for_loop_strided(
hpx::parallel::execution::par
.with(fixed/*, high_priority_async_policy*/)
.with(fixed /*, high_priority_async_policy*/)
.on(mpi_executor),
0, loop_count, 1,
[&](std::size_t i)
{
0, loop_count, 1, [&](std::size_t i) {
std::lock_guard<hpx::lcos::local::mutex> lock(m);
if (thread_set.insert(std::this_thread::get_id()).second)
{
Expand All @@ -249,13 +235,11 @@ int hpx_main(boost::program_options::variables_map& vm)
int main(int argc, char* argv[])
{
boost::program_options::options_description desc_cmdline("Test options");
desc_cmdline.add_options()
("use-pools,u", "Enable advanced HPX thread pools and executors")
("use-scheduler,s", "Enable custom priority scheduler")
("pool-threads,m",
boost::program_options::value<int>()->default_value(1),
"Number of threads to assign to custom pool")
;
desc_cmdline.add_options()(
"use-pools,u", "Enable advanced HPX thread pools and executors")(
"use-scheduler,s", "Enable custom priority scheduler")("pool-threads,m",
boost::program_options::value<int>()->default_value(1),
"Number of threads to assign to custom pool");

// HPX uses a boost program options variable map, but we need it before
// hpx-main, so we will create another one here and throw it away after use
Expand Down Expand Up @@ -285,28 +269,25 @@ int main(int argc, char* argv[])

// create a thread pool and supply a lambda that returns a new pool with
// the a user supplied scheduler attached
rp.create_thread_pool(
"default",
rp.create_thread_pool("default",
[](hpx::threads::policies::callback_notifier& notifier,
std::size_t num_threads, std::size_t thread_offset,
std::size_t pool_index, char const* pool_name)
-> std::unique_ptr<hpx::threads::detail::thread_pool>
{
std::size_t num_threads, std::size_t thread_offset,
std::size_t pool_index, char const* pool_name)
-> std::unique_ptr<hpx::threads::detail::thread_pool> {
std::cout << "User defined scheduler creation callback "
<< std::endl;

std::unique_ptr<high_priority_sched> scheduler(
new high_priority_sched(num_threads, 1,
false, false, "shared-priority-scheduler"));
new high_priority_sched(
num_threads, 1, false, false, "shared-priority-scheduler"));

auto mode = scheduler_mode(scheduler_mode::do_background_work |
scheduler_mode::delay_exit);

std::unique_ptr<hpx::threads::detail::thread_pool> pool(
new hpx::threads::detail::thread_pool_impl<
high_priority_sched
>(std::move(scheduler), notifier, pool_index, pool_name,
mode, thread_offset));
new hpx::threads::detail::thread_pool_impl<high_priority_sched>(
std::move(scheduler), notifier, pool_index, pool_name, mode,
thread_offset));
return pool;
});

Expand All @@ -318,26 +299,23 @@ int main(int argc, char* argv[])

// create a thread pool and supply a lambda that returns a new pool with
// the a user supplied scheduler attached
rp.create_thread_pool(
"mpi",
rp.create_thread_pool("mpi",
[](hpx::threads::policies::callback_notifier& notifier,
std::size_t num_threads, std::size_t thread_offset,
std::size_t pool_index, char const* pool_name)
-> std::unique_ptr<hpx::threads::detail::thread_pool>
{
std::size_t num_threads, std::size_t thread_offset,
std::size_t pool_index, char const* pool_name)
-> std::unique_ptr<hpx::threads::detail::thread_pool> {
std::cout << "User defined scheduler creation callback "
<< std::endl;
std::unique_ptr<high_priority_sched> scheduler(
new high_priority_sched(num_threads, 1,
false, false, "shared-priority-scheduler"));
new high_priority_sched(num_threads, 1, false, false,
"shared-priority-scheduler"));

auto mode = scheduler_mode(scheduler_mode::delay_exit);

std::unique_ptr<hpx::threads::detail::thread_pool> pool(
new hpx::threads::detail::thread_pool_impl<
high_priority_sched
>(std::move(scheduler), notifier, pool_index,
pool_name, mode, thread_offset));
high_priority_sched>(std::move(scheduler), notifier,
pool_index, pool_name, mode, thread_offset));
return pool;
});

Expand Down
Expand Up @@ -10,6 +10,8 @@
#include <hpx/runtime/threads/threadmanager.hpp>
#include <hpx/runtime_impl.hpp>

#include <iostream>

void print_system_characteristics()
{
std::cout << "[hpx-main] System queries: \n\n";
Expand Down Expand Up @@ -48,26 +50,6 @@ void print_system_characteristics()
<< "\n"
<< "command line : " << cfg.get_cmd_line() << "\n\n";

//! -------------------------------------- affinity data
/*
std::size_t num_of_affinity_masks(affdat_ptr->affinity_masks_.size());
unsigned long num_of_pu_nums(affdat_ptr->pu_nums_.size());
std::cout << "[Affinity Data]\n"
<< "number of threads : " << affdat_ptr->num_threads_ << "\n"
<< "affinity domain : " << affdat_ptr->affinity_domain_ << "\n"
<< "number of pu_nums_ : " << num_of_pu_nums << "\n"
<< "number of aff. masks : " << num_of_affinity_masks << "\n"
<< "affinity masks : " << "\n";
for(std::size_t i(0); i<num_of_affinity_masks; i++){
std::cout << " " << std::bitset<8>(affdat_ptr->affinity_masks_[i]) << "\n";
}
*/
/* std::cout << "pu_nums : " << "\n";
for(unsigned long i(0); i<num_of_pu_nums; i++){
std::cout << " " << std::bitset<8>(affdat_ptr->pu_nums_[i]) << ", " << affdat_ptr->pu_nums_[i] << "\n";
}*/

//! -------------------------------------- topology
topo.print_hwloc(std::cout);
}
Expand Down
1 change: 1 addition & 0 deletions hpx/runtime/resource_partitioner.hpp
Expand Up @@ -27,6 +27,7 @@
#include <algorithm>
#include <cstddef>
#include <iosfwd>
#include <memory>
#include <stdexcept>
#include <string>
#include <vector>
Expand Down
11 changes: 9 additions & 2 deletions hpx/runtime/threads/detail/thread_pool_impl.hpp
Expand Up @@ -17,7 +17,14 @@
#include <hpx/util/high_resolution_clock.hpp>
#include <hpx/util/unlock_guard.hpp>

#include <cstddef>
#include <cstdint>
#include <exception>
#include <functional>
#include <iosfwd>
#include <memory>
#include <utility>
#include <vector>

namespace hpx { namespace threads { namespace detail
{
Expand Down Expand Up @@ -620,8 +627,8 @@ namespace hpx { namespace threads { namespace detail
catch (std::exception const& e)
{
// Repackage exceptions to avoid slicing.
boost::throw_exception(boost::enable_error_info(
hpx::exception(unhandled_exception, e.what())));
hpx::throw_with_info(
hpx::exception(unhandled_exception, e.what()));
}
}
catch (...)
Expand Down
2 changes: 2 additions & 0 deletions hpx/runtime/threads/executors/customized_pool_executors.hpp
Expand Up @@ -13,6 +13,8 @@
#include <hpx/util/thread_description.hpp>
#include <hpx/util/unique_function.hpp>

#include <cstddef>
#include <cstdint>
#include <string>

#include <hpx/config/warnings_prefix.hpp>
Expand Down
9 changes: 2 additions & 7 deletions hpx/runtime/threads/executors/thread_pool_executors.hpp
Expand Up @@ -79,7 +79,8 @@ namespace hpx { namespace threads { namespace executors
mask_cref_type get_pu_mask(topology const& topology,
std::size_t num_thread) const
{
return hpx::get_resource_partitioner().get_pu_mask(num_thread, scheduler_.numa_sensitive());
return hpx::get_resource_partitioner().get_pu_mask(
num_thread, scheduler_.numa_sensitive());
}

/// Set the new scheduler mode
Expand Down Expand Up @@ -169,7 +170,6 @@ namespace hpx { namespace threads { namespace executors
};
#endif


struct HPX_EXPORT local_priority_queue_executor : public scheduled_executor
{
local_priority_queue_executor();
Expand All @@ -188,7 +188,6 @@ namespace hpx { namespace threads { namespace executors
};
#endif


#if defined(HPX_HAVE_THROTTLING_SCHEDULER)
struct HPX_EXPORT throttling_executor : public scheduled_executor
{
Expand All @@ -198,10 +197,6 @@ namespace hpx { namespace threads { namespace executors
std::size_t min_punits = 1);
};
#endif




}}}

#include <hpx/config/warnings_suffix.hpp>
Expand Down
1 change: 1 addition & 0 deletions hpx/runtime/threads/executors/thread_pool_os_executors.hpp
Expand Up @@ -22,6 +22,7 @@
#include <chrono>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>

#include <hpx/config/warnings_prefix.hpp>
Expand Down
16 changes: 8 additions & 8 deletions hpx/runtime/threads/policies/hwloc_topology_info.hpp
Expand Up @@ -25,6 +25,7 @@

#include <cstddef>
#include <iosfwd>
#include <string>
#include <vector>

#if defined(HPX_NATIVE_MIC) && HWLOC_API_VERSION < 0x00010600
Expand All @@ -33,17 +34,15 @@

#include <hpx/config/warnings_prefix.hpp>

namespace hpx { namespace resource {
class resource_partitioner;
}
}
namespace hpx { namespace resource
{
class resource_partitioner;
}}

namespace hpx { namespace threads
{

struct HPX_EXPORT hwloc_topology_info : topology
{

friend resource::resource_partitioner;

hwloc_topology_info();
Expand Down Expand Up @@ -279,8 +278,9 @@ namespace hpx { namespace threads
// - Elements of the vector:
// Bitmasks of length equal to the number of PUs of the machine.
// The bitmasks indicate which PUs belong to which resource.
// For example, core_affinity_masks[0] is a bitmask, where the elements = 1
// indicate the PUs that belong to the core on which PU #0 (zero-based index) lies.
// For example, core_affinity_masks[0] is a bitmask, where the
// elements = 1 indicate the PUs that belong to the core on which
// PU #0 (zero-based index) lies.
mask_type machine_affinity_mask_;
std::vector<mask_type> socket_affinity_masks_;
std::vector<mask_type> numa_node_affinity_masks_;
Expand Down

0 comments on commit 6147af2

Please sign in to comment.