Template Numerical Library version\ main:4e6e2c1
Loading...
Searching...
No Matches
TNL::Graphs Namespace Reference

Graph algorithms and data structures. More...

Classes

struct  DirectedGraph
 Type tag for directed graphs. More...
struct  Edge
 Represents a weighted edge in a graph. More...
struct  Graph
 Graph class represents a mathematical graph using an adjacency matrix. More...
struct  GraphBase
 Graph class represents a mathematical graph using an adjacency matrix. More...
struct  GraphOrientation
 Template structure for specifying graph orientation (directed or undirected). More...
struct  GraphVertexView
 View type for accessing individual graph vertices and their edges. More...
struct  GraphVertexView< Matrices::DenseMatrixView< Real, Device, Index, Organization >, GraphType_ >
 Specialization of GraphVertexView for dense adjacency matrices. More...
struct  GraphVertexView< Matrices::SparseMatrixView< Real, Device, Index, MatrixType_, SegmentsView, ComputeRealType >, GraphType_ >
 Specialization of GraphVertexView for sparse adjacency matrices. More...
struct  GraphView
 View type for Graph class. More...
struct  UndirectedGraph
 Type tag for undirected graphs. More...

Functions

template<typename Graph, typename Function>
void forAllEdges (const Graph &graph, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all edges of all graph vertices of constant graph and applies the specified lambda function.
template<typename Graph, typename Function>
void forAllEdges (Graph &graph, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all edges of all graph vertices and applies the specified lambda function.
template<typename Graph, typename Condition, typename Function>
void forAllEdgesIf (const Graph &graph, Condition &&condition, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all edges of all graph vertices based on a condition.
template<typename Graph, typename Condition, typename Function>
void forAllEdgesIf (Graph &graph, Condition &&condition, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all edges of all graph vertices based on a condition.
template<typename Graph, typename Function>
void forAllVertices (const Graph &graph, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all graph vertices and applies the given lambda function to each vertex. This function is for constant matrices.
template<typename Graph, typename Function>
void forAllVertices (Graph &graph, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all graph vertices and applies the given lambda function to each vertex.
template<typename Graph, typename VertexCondition, typename Function>
void forAllVerticesIf (const Graph &graph, VertexCondition &&vertexCondition, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all graph vertices, applying a condition to determine whether each vertex should be processed. This function is for constant matrices.
template<typename Graph, typename VertexCondition, typename Function>
void forAllVerticesIf (Graph &graph, VertexCondition &&vertexCondition, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all graph vertices, applying a condition to determine whether each vertex should be processed.
template<typename Graph, typename Array, typename Function>
void forEdges (const Graph &graph, const Array &vertexIndexes, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all edges of graph vertices with the given indexes and applies the specified lambda function. This function is for constant matrices.
template<typename Graph, typename Array, typename IndexBegin, typename IndexEnd, typename Function>
void forEdges (const Graph &graph, const Array &vertexIndexes, IndexBegin begin, IndexEnd end, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all edges of graph vertices with the given indexes and applies the specified lambda function. This function is for constant matrices.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Function>
void forEdges (const Graph &graph, IndexBegin begin, IndexEnd end, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all edges of constant graph in the given range of graph vertices and applies the specified lambda function.
template<typename Graph, typename Array, typename Function>
void forEdges (Graph &graph, const Array &vertexIndexes, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all edges of graph vertices with the given indexes and applies the specified lambda function.
template<typename Graph, typename Array, typename IndexBegin, typename IndexEnd, typename Function>
void forEdges (Graph &graph, const Array &vertexIndexes, IndexBegin begin, IndexEnd end, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all edges of graph vertices with the given indexes and applies the specified lambda function.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Function>
void forEdges (Graph &graph, IndexBegin begin, IndexEnd end, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all edges in the given range of graph vertices and applies the specified lambda function.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Function>
void forEdgesIf (const Graph &graph, IndexBegin begin, IndexEnd end, Condition &&condition, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all edges in a given range of vertices based on a condition. This function is for constant matrices.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Function>
void forEdgesIf (Graph &graph, IndexBegin begin, IndexEnd end, Condition &&condition, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over all edges in a given range of vertices based on a condition.
template<typename Graph, typename Array, typename Function, typename T = std::enable_if_t< IsArrayType< Array >::value >>
void forVertices (const Graph &graph, const Array &vertexIndexes, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over graph vertices with the given indexes and applies the specified lambda function to each vertex. This function is for constant matrices.
template<typename Graph, typename Array, typename IndexBegin, typename IndexEnd, typename Function, typename T = std::enable_if_t< IsArrayType< Array >::value && std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void forVertices (const Graph &graph, const Array &vertexIndexes, IndexBegin begin, IndexEnd end, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over graph vertices with the given indexes and applies the specified lambda function to each vertex. This function is for constant matrices.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Function, typename T = std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void forVertices (const Graph &graph, IndexBegin begin, IndexEnd end, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over graph vertices within the specified range of vertex indexes and applies the given lambda function to each vertex. This function is for constant matrices.
template<typename Graph, typename Array, typename Function, typename T = std::enable_if_t< IsArrayType< Array >::value >>
void forVertices (Graph &graph, const Array &vertexIndexes, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over graph vertices with the given indexes and applies the specified lambda function to each vertex.
template<typename Graph, typename Array, typename IndexBegin, typename IndexEnd, typename Function, typename T = std::enable_if_t< IsArrayType< Array >::value && std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void forVertices (Graph &graph, const Array &vertexIndexes, IndexBegin begin, IndexEnd end, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over graph vertices with the given indexes and applies the specified lambda function to each vertex.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Function, typename T = std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void forVertices (Graph &graph, IndexBegin begin, IndexEnd end, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over graph vertices within the specified range of vertex indexes and applies the given lambda function to each vertex.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename VertexCondition, typename Function, typename T = std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void forVerticesIf (const Graph &graph, IndexBegin begin, IndexEnd end, VertexCondition &&vertexCondition, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over vertices within the given range of vertex indexes, applying a condition to determine whether each vertex should be processed. This function is for constant matrices.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename VertexCondition, typename Function, typename T = std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void forVerticesIf (Graph &graph, IndexBegin begin, IndexEnd end, VertexCondition &&vertexCondition, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Iterates in parallel over vertices within the given range of vertex indexes, applying a condition to determine whether each vertex should be processed.
template<typename Graph>
Graph::ValueType getTotalWeight (const Graph &graph)
 Computes the total weight of all edges in the graph.
constexpr std::false_type isGraph (...)
 This checks if given type is matrix.
template<typename Value, typename Device, typename Index, typename Orientation, template< typename, typename, typename > class Segments, typename AdjacencyMatrix>
constexpr std::true_type isGraph (const Graph< Value, Device, Index, Orientation, Segments, AdjacencyMatrix > &)
template<typename Value, typename Device, typename Index, typename Orientation, typename AdjacencyMatrix>
Fileoperator<< (File &&file, const GraphBase< Value, Device, Index, Orientation, AdjacencyMatrix > &graph)
 Serialization of graphs into binary files.
template<typename Value, typename Device, typename Index, typename Orientation, typename AdjacencyMatrix>
Fileoperator<< (File &file, const GraphBase< Value, Device, Index, Orientation, AdjacencyMatrix > &graph)
 Serialization of graphs into binary files.
template<typename Real, typename Index>
std::ostreamoperator<< (std::ostream &os, const Edge< Real, Index > &edge)
 Stream output operator for Edge.
template<typename Value, typename Device, typename Index, typename Orientation, typename AdjacencyMatrix>
std::ostreamoperator<< (std::ostream &os, const GraphBase< Value, Device, Index, Orientation, AdjacencyMatrix > &graph)
 Output stream operator for the Graph class.
template<typename Value, typename Device, typename Index, typename Orientation, template< typename, typename, typename > class Segments, typename AdjacencyMatrix>
Fileoperator>> (File &&file, Graph< Value, Device, Index, Orientation, Segments, AdjacencyMatrix > &graph)
 Deserialization of graphs from binary files.
template<typename Value, typename Device, typename Index, typename Orientation, template< typename, typename, typename > class Segments, typename AdjacencyMatrix>
Fileoperator>> (File &file, Graph< Value, Device, Index, Orientation, Segments, AdjacencyMatrix > &graph)
 Deserialization of graphs from binary files.
template<typename Graph, typename Fetch, typename Reduction, typename Store, typename FetchValue>
void reduceAllVertices (const Graph &graph, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices (const version).
template<typename Graph, typename Fetch, typename Reduction, typename Store>
void reduceAllVertices (const Graph &graph, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices with automatic identity deduction (const version).
template<typename Graph, typename Fetch, typename Reduction, typename Store, typename FetchValue>
void reduceAllVertices (Graph &graph, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices.
template<typename Graph, typename Fetch, typename Reduction, typename Store>
void reduceAllVertices (Graph &graph, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices with automatic identity deduction.
template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue>
Graph::IndexType reduceAllVerticesIf (const Graph &graph, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices based on a condition (const version).
template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType reduceAllVerticesIf (const Graph &graph, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices based on a condition with automatic identity deduction (const version).
template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue>
Graph::IndexType reduceAllVerticesIf (Graph &graph, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices based on a condition.
template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType reduceAllVerticesIf (Graph &graph, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices based on a condition with automatic identity deduction.
template<typename Graph, typename Fetch, typename Reduction, typename Store, typename FetchValue>
void reduceAllVerticesWithArgument (const Graph &graph, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices while returning also the position of the edge of interest (const version).
template<typename Graph, typename Fetch, typename Reduction, typename Store>
void reduceAllVerticesWithArgument (const Graph &graph, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices while returning also the position of the edge of interest with automatic identity deduction (const version).
template<typename Graph, typename Fetch, typename Reduction, typename Store, typename FetchValue>
void reduceAllVerticesWithArgument (Graph &graph, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices while returning also the position of the edge of interest.
template<typename Graph, typename Fetch, typename Reduction, typename Store>
void reduceAllVerticesWithArgument (Graph &graph, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices while returning also the position of the edge of interest with automatic identity deduction.
template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue = decltype( std::declval< Fetch >()( 0, 0, std::declval< typename Graph::RealType >() ) )>
Graph::IndexType reduceAllVerticesWithArgumentIf (const Graph &graph, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices based on a condition while returning also the position of the edge of interest (const version).
template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType reduceAllVerticesWithArgumentIf (const Graph &graph, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices based on a condition while returning also the position of the edge of interest with automatic identity deduction (const version).
template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue = decltype( std::declval< Fetch >()( 0, 0, std::declval< typename Graph::RealType >() ) )>
Graph::IndexType reduceAllVerticesWithArgumentIf (Graph &graph, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices based on a condition while returning also the position of the edge of interest.
template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType reduceAllVerticesWithArgumentIf (Graph &graph, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over all vertices based on a condition while returning also the position of the edge of interest with automatic identity deduction.
template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void reduceVertices (const Graph &graph, const Array &vertexIndexes, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within graph vertices specified by a given set of vertex indexes (const version).
template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void reduceVertices (const Graph &graph, const Array &vertexIndexes, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within graph vertices specified by a given set of vertex indexes with automatic identity deduction (const version).
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void reduceVertices (const Graph &graph, IndexBegin begin, IndexEnd end, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes (const version).
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void reduceVertices (const Graph &graph, IndexBegin begin, IndexEnd end, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes with automatic identity deduction (const version).
template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void reduceVertices (Graph &graph, const Array &vertexIndexes, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within graph vertices specified by a given set of vertex indexes.
template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void reduceVertices (Graph &graph, const Array &vertexIndexes, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within graph vertices specified by a given set of vertex indexes with automatic identity deduction.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void reduceVertices (Graph &graph, IndexBegin begin, IndexEnd end, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void reduceVertices (Graph &graph, IndexBegin begin, IndexEnd end, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes with automatic identity deduction.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue>
Graph::IndexType reduceVerticesIf (const Graph &graph, IndexBegin begin, IndexEnd end, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition (const version).
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType reduceVerticesIf (const Graph &graph, IndexBegin begin, IndexEnd end, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition with automatic identity deduction (const version).
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue>
Graph::IndexType reduceVerticesIf (Graph &graph, IndexBegin begin, IndexEnd end, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType reduceVerticesIf (Graph &graph, IndexBegin begin, IndexEnd end, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition with automatic identity deduction.
template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void reduceVerticesWithArgument (const Graph &graph, const Array &vertexIndexes, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within graph vertices specified by a given set of vertex indexes while returning also the position of the edge of interest (const version).
template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void reduceVerticesWithArgument (const Graph &graph, const Array &vertexIndexes, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within graph vertices specified by a given set of vertex indexes while returning also the position of the edge of interest with automatic identity deduction (const version).
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void reduceVerticesWithArgument (const Graph &graph, IndexBegin begin, IndexEnd end, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes while returning also the position of the edge of interest (const version).
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void reduceVerticesWithArgument (const Graph &graph, IndexBegin begin, IndexEnd end, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes while returning also the position of the edge of interest with automatic identity deduction (const version).
template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void reduceVerticesWithArgument (Graph &graph, const Array &vertexIndexes, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within graph vertices specified by a given set of vertex indexes while returning also the position of the edge of interest.
template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void reduceVerticesWithArgument (Graph &graph, const Array &vertexIndexes, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within graph vertices specified by a given set of vertex indexes while returning also the position of the edge of interest with automatic identity deduction.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void reduceVerticesWithArgument (Graph &graph, IndexBegin begin, IndexEnd end, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes while returning also the position of the edge of interest.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void reduceVerticesWithArgument (Graph &graph, IndexBegin begin, IndexEnd end, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes while returning also the position of the edge of interest with automatic identity deduction.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue = decltype( std::declval< Fetch >()( 0, 0, std::declval< typename Graph::RealType >() ) )>
Graph::IndexType reduceVerticesWithArgumentIf (const Graph &graph, IndexBegin begin, IndexEnd end, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition while returning also the position of the edge of interest (const version).
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType reduceVerticesWithArgumentIf (const Graph &graph, IndexBegin begin, IndexEnd end, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition while returning also the position of the edge of interest with automatic identity deduction (const version).
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue = decltype( std::declval< Fetch >()( 0, 0, std::declval< typename Graph::RealType >() ) )>
Graph::IndexType reduceVerticesWithArgumentIf (Graph &graph, IndexBegin begin, IndexEnd end, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition while returning also the position of the edge of interest.
template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType reduceVerticesWithArgumentIf (Graph &graph, IndexBegin begin, IndexEnd end, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
 Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition while returning also the position of the edge of interest with automatic identity deduction.

Detailed Description

Graph algorithms and data structures.

Function Documentation

◆ forAllEdges() [1/2]

template<typename Graph, typename Function>
void TNL::Graphs::forAllEdges ( const Graph & graph,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all edges of all graph vertices of constant graph and applies the specified lambda function.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
FunctionThe type of the lambda function to be applied to each edge.
Parameters
graphThe graph whose edges will be processed using the lambda function.
functionLambda function to be applied to each edge. See Traversal Function (Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forAllEdgesExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Traverse edges in range [1, 4) and modify them.
32 */
33 auto modifyEdge = [] __cuda_callable__( int sourceIdx, int localIdx, int& targetIdx, float& weight ) mutable
34 {
35 targetIdx = ( targetIdx + 1 ) % 5;
36 weight += 5;
37 };
38 TNL::Graphs::forEdges( graph, 1, 4, modifyEdge );
40
41 /***
42 * Print the modified graph.
43 */
44 std::cout << "Modified graph:\n" << graph << '\n';
45}
46
47int
48main( int argc, char* argv[] )
49{
50 std::cout << "Running on host:\n";
51 forAllEdgesExample< TNL::Devices::Host >();
52
53#ifdef __CUDACC__
54 std::cout << "Running on CUDA device:\n";
55 forAllEdgesExample< TNL::Devices::Cuda >();
56#endif
57
58 return EXIT_SUCCESS;
59}
#define __cuda_callable__
Definition Macros.h:49
void forEdges(Graph &graph, IndexBegin begin, IndexEnd end, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
Iterates in parallel over all edges in the given range of graph vertices and applies the specified la...
Graph class represents a mathematical graph using an adjacency matrix.
Definition Graph.h:57
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->

◆ forAllEdges() [2/2]

template<typename Graph, typename Function>
void TNL::Graphs::forAllEdges ( Graph & graph,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all edges of all graph vertices and applies the specified lambda function.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
FunctionThe type of the lambda function to be applied to each edge.
Parameters
graphThe graph whose edges will be processed using the lambda function.
functionLambda function to be applied to each edge. See Traversal Function (Non-Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forAllEdgesExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Traverse edges in range [1, 4) and modify them.
32 */
33 auto modifyEdge = [] __cuda_callable__( int sourceIdx, int localIdx, int& targetIdx, float& weight ) mutable
34 {
35 targetIdx = ( targetIdx + 1 ) % 5;
36 weight += 5;
37 };
38 TNL::Graphs::forEdges( graph, 1, 4, modifyEdge );
40
41 /***
42 * Print the modified graph.
43 */
44 std::cout << "Modified graph:\n" << graph << '\n';
45}
46
47int
48main( int argc, char* argv[] )
49{
50 std::cout << "Running on host:\n";
51 forAllEdgesExample< TNL::Devices::Host >();
52
53#ifdef __CUDACC__
54 std::cout << "Running on CUDA device:\n";
55 forAllEdgesExample< TNL::Devices::Cuda >();
56#endif
57
58 return EXIT_SUCCESS;
59}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->

◆ forAllEdgesIf() [1/2]

template<typename Graph, typename Condition, typename Function>
void TNL::Graphs::forAllEdgesIf ( const Graph & graph,
Condition && condition,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all edges of all graph vertices based on a condition.

See also: Overview of Graph Traversal Functions

This function is for constant matrices.

For each graph vertex, a condition lambda function is evaluated based on the vertex index. If the condition lambda function returns true, all edges of the vertex are traversed, and the specified lambda function is applied to each edge. If the condition lambda function returns false, the vertex is skipped.

Template Parameters
GraphThe type of the graph.
ConditionThe type of the condition lambda function.
FunctionThe type of the lambda function to be applied to each edge.
Parameters
graphThe graph whose edges will be processed using the lambda function.
conditionLambda function to check vertex condition. See Condition Lambda.
functionLambda function to be applied to each edge. See Traversal Function (Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forAllEdgesIfExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Define a condition: process only vertices with even indices.
32 */
33 auto condition = [] __cuda_callable__( int vertexIdx ) -> bool
34 {
35 return vertexIdx % 2 == 0;
36 };
38
40 /***
41 * Traverse edges only from vertices that satisfy the condition.
42 */
43 auto modifyEdge = [] __cuda_callable__( int sourceIdx, int localIdx, int targetIdx, float weight ) mutable
44 {
45 targetIdx = ( targetIdx + 1 ) % 5;
46 weight += 5;
47 };
48 TNL::Graphs::forAllEdgesIf( graph, condition, modifyEdge );
50
51 /***
52 * Print the modified graph.
53 */
54 std::cout << "Modified graph:\n" << graph << '\n';
55}
56
57int
58main( int argc, char* argv[] )
59{
60 std::cout << "Running on host:\n";
61 forAllEdgesIfExample< TNL::Devices::Host >();
62
63#ifdef __CUDACC__
64 std::cout << "Running on CUDA device:\n";
65 forAllEdgesIfExample< TNL::Devices::Cuda >();
66#endif
67
68 return EXIT_SUCCESS;
69}
void forAllEdgesIf(Graph &graph, Condition &&condition, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
Iterates in parallel over all edges of all graph vertices based on a condition.
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->

◆ forAllEdgesIf() [2/2]

template<typename Graph, typename Condition, typename Function>
void TNL::Graphs::forAllEdgesIf ( Graph & graph,
Condition && condition,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all edges of all graph vertices based on a condition.

See also: Overview of Graph Traversal Functions

For each graph vertex, a condition lambda function is evaluated based on the vertex index. If the condition lambda function returns true, all edges of the vertex are traversed, and the specified lambda function is applied to each edge. If the condition lambda function returns false, the vertex is skipped.

Template Parameters
GraphThe type of the graph.
ConditionThe type of the condition lambda function.
FunctionThe type of the lambda function to be applied to each edge.
Parameters
graphThe graph whose edges will be processed using the lambda function.
conditionLambda function to check vertex condition. See Condition Lambda.
functionLambda function to be applied to each edge. See Traversal Function (Non-Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forAllEdgesIfExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Define a condition: process only vertices with even indices.
32 */
33 auto condition = [] __cuda_callable__( int vertexIdx ) -> bool
34 {
35 return vertexIdx % 2 == 0;
36 };
38
40 /***
41 * Traverse edges only from vertices that satisfy the condition.
42 */
43 auto modifyEdge = [] __cuda_callable__( int sourceIdx, int localIdx, int targetIdx, float weight ) mutable
44 {
45 targetIdx = ( targetIdx + 1 ) % 5;
46 weight += 5;
47 };
48 TNL::Graphs::forAllEdgesIf( graph, condition, modifyEdge );
50
51 /***
52 * Print the modified graph.
53 */
54 std::cout << "Modified graph:\n" << graph << '\n';
55}
56
57int
58main( int argc, char* argv[] )
59{
60 std::cout << "Running on host:\n";
61 forAllEdgesIfExample< TNL::Devices::Host >();
62
63#ifdef __CUDACC__
64 std::cout << "Running on CUDA device:\n";
65 forAllEdgesIfExample< TNL::Devices::Cuda >();
66#endif
67
68 return EXIT_SUCCESS;
69}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->

◆ forAllVertices() [1/2]

template<typename Graph, typename Function>
void TNL::Graphs::forAllVertices ( const Graph & graph,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all graph vertices and applies the given lambda function to each vertex. This function is for constant matrices.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
FunctionThe type of the lambda function to be executed on each vertex.
Parameters
graphThe graph on which the lambda function will be applied.
functionLambda function to be applied to each vertex. See Vertex Traversal Function (Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forVerticesExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Traverse vertices in range [1, 4) and modify their edges.
32 */
33 auto processVertex = [] __cuda_callable__( typename GraphType::VertexView vertex ) mutable
34 {
35 for( int i = 0; i < vertex.getDegree(); i++ )
36 vertex.setEdge( i, ( vertex.getTargetIndex( i ) + 1 ) % 5, vertex.getEdgeWeight( i ) + 5 );
37 };
38 TNL::Graphs::forVertices( graph, 1, 4, processVertex );
40
41 /***
42 * Print the modified graph.
43 */
44 std::cout << "Modified graph:\n" << graph << '\n';
45}
46
47int
48main( int argc, char* argv[] )
49{
50 std::cout << "Running on host:\n";
51 forVerticesExample< TNL::Devices::Host >();
52
53#ifdef __CUDACC__
54 std::cout << "Running on CUDA device:\n";
55 forVerticesExample< TNL::Devices::Cuda >();
56#endif
57
58 return EXIT_SUCCESS;
59}
void forVertices(Graph &graph, IndexBegin begin, IndexEnd end, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
Iterates in parallel over graph vertices within the specified range of vertex indexes and applies the...
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->

◆ forAllVertices() [2/2]

template<typename Graph, typename Function>
void TNL::Graphs::forAllVertices ( Graph & graph,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all graph vertices and applies the given lambda function to each vertex.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
FunctionThe type of the lambda function to be executed on each vertex.
Parameters
graphThe graph on which the lambda function will be applied.
functionLambda function to be applied to each vertex. See Vertex Traversal Function (Non-Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forVerticesExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Traverse vertices in range [1, 4) and modify their edges.
32 */
33 auto processVertex = [] __cuda_callable__( typename GraphType::VertexView vertex ) mutable
34 {
35 for( int i = 0; i < vertex.getDegree(); i++ )
36 vertex.setEdge( i, ( vertex.getTargetIndex( i ) + 1 ) % 5, vertex.getEdgeWeight( i ) + 5 );
37 };
38 TNL::Graphs::forVertices( graph, 1, 4, processVertex );
40
41 /***
42 * Print the modified graph.
43 */
44 std::cout << "Modified graph:\n" << graph << '\n';
45}
46
47int
48main( int argc, char* argv[] )
49{
50 std::cout << "Running on host:\n";
51 forVerticesExample< TNL::Devices::Host >();
52
53#ifdef __CUDACC__
54 std::cout << "Running on CUDA device:\n";
55 forVerticesExample< TNL::Devices::Cuda >();
56#endif
57
58 return EXIT_SUCCESS;
59}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->

◆ forAllVerticesIf() [1/2]

template<typename Graph, typename VertexCondition, typename Function>
void TNL::Graphs::forAllVerticesIf ( const Graph & graph,
VertexCondition && vertexCondition,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all graph vertices, applying a condition to determine whether each vertex should be processed. This function is for constant matrices.

See also: Overview of Graph Traversal Functions

For each vertex, a condition lambda function is evaluated based on the vertex index. If the condition lambda function returns true, the specified lambda function is executed for the vertex. If the condition lambda function returns false, the vertex is skipped.

Template Parameters
GraphThe type of the graph.
VertexConditionThe type of the condition lambda function.
FunctionThe type of the lambda function to be executed on each vertex.
Parameters
graphThe graph on which the lambda function will be applied.
vertexConditionLambda function to check vertex condition. See Condition Lambda.
functionLambda function to be applied to each vertex. See Vertex Traversal Function (Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forAllVerticesIfExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Define a condition: process only vertices with more than one edge.
32 */
33 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
34 {
35 return graph.getVertexDegree( vertexIdx ) > 1;
36 };
38
40 /***
41 * Traverse vertices that satisfy the condition.
42 */
43 auto processVertex = [] __cuda_callable__( typename GraphType::VertexView vertex ) mutable
44 {
45 for( int i = 0; i < vertex.getDegree(); i++ )
46 vertex.setEdge( i, ( vertex.getTargetIndex( i ) + 1 ) % 5, vertex.getEdgeWeight( i ) + 5 );
47 };
48 TNL::Graphs::forAllVerticesIf( graph, condition, processVertex );
50
51 /***
52 * Print the modified graph.
53 */
54 std::cout << "Modified graph:\n" << graph << '\n';
55}
56
57int
58main( int argc, char* argv[] )
59{
60 std::cout << "Running on host:\n";
61 forAllVerticesIfExample< TNL::Devices::Host >();
62
63#ifdef __CUDACC__
64 std::cout << "Running on CUDA device:\n";
65 forAllVerticesIfExample< TNL::Devices::Cuda >();
66#endif
67
68 return EXIT_SUCCESS;
69}
void forAllVerticesIf(Graph &graph, VertexCondition &&vertexCondition, Function &&function, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
Iterates in parallel over all graph vertices, applying a condition to determine whether each vertex s...
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 3:60
Row: 3 -> 1:75 0:85
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 3:60
Row: 3 -> 1:75 0:85
Row: 4 ->

◆ forAllVerticesIf() [2/2]

template<typename Graph, typename VertexCondition, typename Function>
void TNL::Graphs::forAllVerticesIf ( Graph & graph,
VertexCondition && vertexCondition,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all graph vertices, applying a condition to determine whether each vertex should be processed.

See also: Overview of Graph Traversal Functions

For each vertex, a condition lambda function is evaluated based on the vertex index. If the condition lambda function returns true, the specified lambda function is executed for the vertex. If the condition lambda function returns false, the vertex is skipped.

Template Parameters
GraphThe type of the graph.
VertexConditionThe type of the condition lambda function.
FunctionThe type of the lambda function to be executed on each vertex.
Parameters
graphThe graph on which the lambda function will be applied.
vertexConditionLambda function to check vertex condition. See Condition Lambda.
functionLambda function to be applied to each vertex. See Vertex Traversal Function (Non-Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forAllVerticesIfExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Define a condition: process only vertices with more than one edge.
32 */
33 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
34 {
35 return graph.getVertexDegree( vertexIdx ) > 1;
36 };
38
40 /***
41 * Traverse vertices that satisfy the condition.
42 */
43 auto processVertex = [] __cuda_callable__( typename GraphType::VertexView vertex ) mutable
44 {
45 for( int i = 0; i < vertex.getDegree(); i++ )
46 vertex.setEdge( i, ( vertex.getTargetIndex( i ) + 1 ) % 5, vertex.getEdgeWeight( i ) + 5 );
47 };
48 TNL::Graphs::forAllVerticesIf( graph, condition, processVertex );
50
51 /***
52 * Print the modified graph.
53 */
54 std::cout << "Modified graph:\n" << graph << '\n';
55}
56
57int
58main( int argc, char* argv[] )
59{
60 std::cout << "Running on host:\n";
61 forAllVerticesIfExample< TNL::Devices::Host >();
62
63#ifdef __CUDACC__
64 std::cout << "Running on CUDA device:\n";
65 forAllVerticesIfExample< TNL::Devices::Cuda >();
66#endif
67
68 return EXIT_SUCCESS;
69}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 3:60
Row: 3 -> 1:75 0:85
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 3:60
Row: 3 -> 1:75 0:85
Row: 4 ->

◆ forEdges() [1/6]

template<typename Graph, typename Array, typename Function>
void TNL::Graphs::forEdges ( const Graph & graph,
const Array & vertexIndexes,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all edges of graph vertices with the given indexes and applies the specified lambda function. This function is for constant matrices.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the graph vertices to iterate over. This can be containers such as TNL::Containers::Array, TNL::Containers::ArrayView, TNL::Containers::Vector, or TNL::Containers::VectorView.
FunctionThe type of the lambda function to be applied to each edge.
Parameters
graphThe graph whose edges will be processed using the lambda function.
vertexIndexesThe array containing the indexes of the graph vertices to iterate over.
functionLambda function to be applied to each edge. See Traversal Function (Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10forEdgesWithIndexesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Create an array of vertex indices to process.
33 */
34 TNL::Containers::Vector< int, Device > vertices( { 0, 2, 3 } );
36
38 /***
39 * Traverse edges only from the specified vertices.
40 */
41 auto modifyEdge = [] __cuda_callable__( int sourceIdx, int localIdx, int targetIdx, float weight ) mutable
42 {
43 targetIdx = ( targetIdx + 1 ) % 5;
44 weight += 5;
45 };
46 TNL::Graphs::forEdges( graph, vertices, modifyEdge );
48
49 /***
50 * Print the modified graph.
51 */
52 std::cout << "Modified graph:\n" << graph << '\n';
53}
54
55int
56main( int argc, char* argv[] )
57{
58 std::cout << "Running on host:\n";
59 forEdgesWithIndexesExample< TNL::Devices::Host >();
60
61#ifdef __CUDACC__
62 std::cout << "Running on CUDA device:\n";
63 forEdgesWithIndexesExample< TNL::Devices::Cuda >();
64#endif
65
66 return EXIT_SUCCESS;
67}
Vector extends Array with algebraic operations.
Definition Vector.h:37
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->

◆ forEdges() [2/6]

template<typename Graph, typename Array, typename IndexBegin, typename IndexEnd, typename Function>
void TNL::Graphs::forEdges ( const Graph & graph,
const Array & vertexIndexes,
IndexBegin begin,
IndexEnd end,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all edges of graph vertices with the given indexes and applies the specified lambda function. This function is for constant matrices.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the graph vertices to iterate over. This can be containers such as TNL::Containers::Array, TNL::Containers::ArrayView, TNL::Containers::Vector, or TNL::Containers::VectorView.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertex indexes whose edges will be processed using the lambda function.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertex indexes whose edges will be processed using the lambda function.
FunctionThe type of the lambda function to be applied to each edge.
Parameters
graphThe graph whose edges will be processed using the lambda function.
vertexIndexesThe array containing the indexes of the graph vertices to iterate over.
beginThe beginning of the interval [ begin, end ) of vertex indexes whose edges will be processed using the lambda function.
endThe end of the interval [ begin, end ) of vertex indexes whose edges will be processed using the lambda function.
functionLambda function to be applied to each edge. See Traversal Function (Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10forEdgesWithIndexesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Create an array of vertex indices to process.
33 */
34 TNL::Containers::Vector< int, Device > vertices( { 0, 2, 3 } );
36
38 /***
39 * Traverse edges only from the specified vertices.
40 */
41 auto modifyEdge = [] __cuda_callable__( int sourceIdx, int localIdx, int targetIdx, float weight ) mutable
42 {
43 targetIdx = ( targetIdx + 1 ) % 5;
44 weight += 5;
45 };
46 TNL::Graphs::forEdges( graph, vertices, modifyEdge );
48
49 /***
50 * Print the modified graph.
51 */
52 std::cout << "Modified graph:\n" << graph << '\n';
53}
54
55int
56main( int argc, char* argv[] )
57{
58 std::cout << "Running on host:\n";
59 forEdgesWithIndexesExample< TNL::Devices::Host >();
60
61#ifdef __CUDACC__
62 std::cout << "Running on CUDA device:\n";
63 forEdgesWithIndexesExample< TNL::Devices::Cuda >();
64#endif
65
66 return EXIT_SUCCESS;
67}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->

◆ forEdges() [3/6]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Function>
void TNL::Graphs::forEdges ( const Graph & graph,
IndexBegin begin,
IndexEnd end,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all edges of constant graph in the given range of graph vertices and applies the specified lambda function.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices whose edges we want to process using the lambda function.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices whose edges we want to process using the lambda function.
FunctionThe type of the lambda function to be applied to each edge.
Parameters
graphThe graph whose edges will be processed using the lambda function.
beginThe beginning of the interval [ begin, end ) of vertices whose edges will be processed using the lambda function.
endThe end of the interval [ begin, end ) of vertices whose edges will be processed using the lambda function.
functionLambda function to be applied to each edge. See Traversal Function (Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forAllEdgesExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Traverse edges in range [1, 4) and modify them.
32 */
33 auto modifyEdge = [] __cuda_callable__( int sourceIdx, int localIdx, int& targetIdx, float& weight ) mutable
34 {
35 targetIdx = ( targetIdx + 1 ) % 5;
36 weight += 5;
37 };
38 TNL::Graphs::forEdges( graph, 1, 4, modifyEdge );
40
41 /***
42 * Print the modified graph.
43 */
44 std::cout << "Modified graph:\n" << graph << '\n';
45}
46
47int
48main( int argc, char* argv[] )
49{
50 std::cout << "Running on host:\n";
51 forAllEdgesExample< TNL::Devices::Host >();
52
53#ifdef __CUDACC__
54 std::cout << "Running on CUDA device:\n";
55 forAllEdgesExample< TNL::Devices::Cuda >();
56#endif
57
58 return EXIT_SUCCESS;
59}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->

◆ forEdges() [4/6]

template<typename Graph, typename Array, typename Function>
void TNL::Graphs::forEdges ( Graph & graph,
const Array & vertexIndexes,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all edges of graph vertices with the given indexes and applies the specified lambda function.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the graph vertices to iterate over. This can be containers such as TNL::Containers::Array, TNL::Containers::ArrayView, TNL::Containers::Vector, or TNL::Containers::VectorView.
FunctionThe type of the lambda function to be applied to each edge.
Parameters
graphThe graph whose edges will be processed using the lambda function.
vertexIndexesThe array containing the indexes of the graph vertices to iterate over.
functionLambda function to be applied to each edge. See Traversal Function (Non-Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10forEdgesWithIndexesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Create an array of vertex indices to process.
33 */
34 TNL::Containers::Vector< int, Device > vertices( { 0, 2, 3 } );
36
38 /***
39 * Traverse edges only from the specified vertices.
40 */
41 auto modifyEdge = [] __cuda_callable__( int sourceIdx, int localIdx, int targetIdx, float weight ) mutable
42 {
43 targetIdx = ( targetIdx + 1 ) % 5;
44 weight += 5;
45 };
46 TNL::Graphs::forEdges( graph, vertices, modifyEdge );
48
49 /***
50 * Print the modified graph.
51 */
52 std::cout << "Modified graph:\n" << graph << '\n';
53}
54
55int
56main( int argc, char* argv[] )
57{
58 std::cout << "Running on host:\n";
59 forEdgesWithIndexesExample< TNL::Devices::Host >();
60
61#ifdef __CUDACC__
62 std::cout << "Running on CUDA device:\n";
63 forEdgesWithIndexesExample< TNL::Devices::Cuda >();
64#endif
65
66 return EXIT_SUCCESS;
67}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->

◆ forEdges() [5/6]

template<typename Graph, typename Array, typename IndexBegin, typename IndexEnd, typename Function>
void TNL::Graphs::forEdges ( Graph & graph,
const Array & vertexIndexes,
IndexBegin begin,
IndexEnd end,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all edges of graph vertices with the given indexes and applies the specified lambda function.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the graph vertices to iterate over. This can be containers such as TNL::Containers::Array, TNL::Containers::ArrayView, TNL::Containers::Vector, or TNL::Containers::VectorView.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertex indexes whose edges will be processed using the lambda function.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertex indexes whose edges will be processed using the lambda function.
FunctionThe type of the lambda function to be applied to each edge.
Parameters
graphThe graph whose edges will be processed using the lambda function.
vertexIndexesThe array containing the indexes of the graph vertices to iterate over.
beginThe beginning of the interval [ begin, end ) of vertex indexes whose edges will be processed using the lambda function.
endThe end of the interval [ begin, end ) of vertex indexes whose edges will be processed using the lambda function.
functionLambda function to be applied to each edge. See Traversal Function (Non-Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10forEdgesWithIndexesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Create an array of vertex indices to process.
33 */
34 TNL::Containers::Vector< int, Device > vertices( { 0, 2, 3 } );
36
38 /***
39 * Traverse edges only from the specified vertices.
40 */
41 auto modifyEdge = [] __cuda_callable__( int sourceIdx, int localIdx, int targetIdx, float weight ) mutable
42 {
43 targetIdx = ( targetIdx + 1 ) % 5;
44 weight += 5;
45 };
46 TNL::Graphs::forEdges( graph, vertices, modifyEdge );
48
49 /***
50 * Print the modified graph.
51 */
52 std::cout << "Modified graph:\n" << graph << '\n';
53}
54
55int
56main( int argc, char* argv[] )
57{
58 std::cout << "Running on host:\n";
59 forEdgesWithIndexesExample< TNL::Devices::Host >();
60
61#ifdef __CUDACC__
62 std::cout << "Running on CUDA device:\n";
63 forEdgesWithIndexesExample< TNL::Devices::Cuda >();
64#endif
65
66 return EXIT_SUCCESS;
67}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->

◆ forEdges() [6/6]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Function>
void TNL::Graphs::forEdges ( Graph & graph,
IndexBegin begin,
IndexEnd end,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all edges in the given range of graph vertices and applies the specified lambda function.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices whose edges we want to process using the lambda function.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices whose edges we want to process using the lambda function.
FunctionThe type of the lambda function to be applied to each edge.
Parameters
graphThe graph whose edges will be processed using the lambda function.
beginThe beginning of the interval [ begin, end ) of vertices whose edges will be processed using the lambda function.
endThe end of the interval [ begin, end ) of vertices whose edges will be processed using the lambda function.
functionLambda function to be applied to each edge. See Traversal Function (Non-Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forAllEdgesExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Traverse edges in range [1, 4) and modify them.
32 */
33 auto modifyEdge = [] __cuda_callable__( int sourceIdx, int localIdx, int& targetIdx, float& weight ) mutable
34 {
35 targetIdx = ( targetIdx + 1 ) % 5;
36 weight += 5;
37 };
38 TNL::Graphs::forEdges( graph, 1, 4, modifyEdge );
40
41 /***
42 * Print the modified graph.
43 */
44 std::cout << "Modified graph:\n" << graph << '\n';
45}
46
47int
48main( int argc, char* argv[] )
49{
50 std::cout << "Running on host:\n";
51 forAllEdgesExample< TNL::Devices::Host >();
52
53#ifdef __CUDACC__
54 std::cout << "Running on CUDA device:\n";
55 forAllEdgesExample< TNL::Devices::Cuda >();
56#endif
57
58 return EXIT_SUCCESS;
59}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->

◆ forEdgesIf() [1/2]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Function>
void TNL::Graphs::forEdgesIf ( const Graph & graph,
IndexBegin begin,
IndexEnd end,
Condition && condition,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all edges in a given range of vertices based on a condition. This function is for constant matrices.

See also: Overview of Graph Traversal Functions

For each graph vertex, a condition lambda function is evaluated based on the vertex index. If the condition lambda function returns true, all edges of the vertex are traversed, and the specified lambda function is applied to each edge. If the condition lambda function returns false, the vertex is skipped.

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices whose edges will be processed using the lambda function.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices whose edges will be processed using the lambda function.
ConditionThe type of the condition lambda function.
FunctionThe type of the lambda function to be applied to each edge.
Parameters
graphThe graph whose edges will be processed using the lambda function.
beginThe beginning of the interval [ begin, end ) of vertices whose edges will be processed using the lambda function.
endThe end of the interval [ begin, end ) of vertices whose edges will be processed using the lambda function.
conditionLambda function to check vertex condition. See Condition Lambda.
functionLambda function to be applied to each edge. See Traversal Function (Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forAllEdgesIfExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Define a condition: process only vertices with even indices.
32 */
33 auto condition = [] __cuda_callable__( int vertexIdx ) -> bool
34 {
35 return vertexIdx % 2 == 0;
36 };
38
40 /***
41 * Traverse edges only from vertices that satisfy the condition.
42 */
43 auto modifyEdge = [] __cuda_callable__( int sourceIdx, int localIdx, int targetIdx, float weight ) mutable
44 {
45 targetIdx = ( targetIdx + 1 ) % 5;
46 weight += 5;
47 };
48 TNL::Graphs::forAllEdgesIf( graph, condition, modifyEdge );
50
51 /***
52 * Print the modified graph.
53 */
54 std::cout << "Modified graph:\n" << graph << '\n';
55}
56
57int
58main( int argc, char* argv[] )
59{
60 std::cout << "Running on host:\n";
61 forAllEdgesIfExample< TNL::Devices::Host >();
62
63#ifdef __CUDACC__
64 std::cout << "Running on CUDA device:\n";
65 forAllEdgesIfExample< TNL::Devices::Cuda >();
66#endif
67
68 return EXIT_SUCCESS;
69}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->

◆ forEdgesIf() [2/2]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Function>
void TNL::Graphs::forEdgesIf ( Graph & graph,
IndexBegin begin,
IndexEnd end,
Condition && condition,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over all edges in a given range of vertices based on a condition.

See also: Overview of Graph Traversal Functions

For each graph vertex, a condition lambda function is evaluated based on the vertex index. If the condition lambda function returns true, all edges of the vertex are traversed, and the specified lambda function is applied to each edge. If the condition lambda function returns false, the vertex is skipped.

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices whose edges will be processed using the lambda function.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices whose edges will be processed using the lambda function.
ConditionThe type of the condition lambda function.
FunctionThe type of the lambda function to be applied to each edge.
Parameters
graphThe graph whose edges will be processed using the lambda function.
beginThe beginning of the interval [ begin, end ) of vertices whose edges will be processed using the lambda function.
endThe end of the interval [ begin, end ) of vertices whose edges will be processed using the lambda function.
conditionLambda function to check vertex condition. See Condition Lambda.
functionLambda function to be applied to each edge. See Traversal Function (Non-Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forAllEdgesIfExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Define a condition: process only vertices with even indices.
32 */
33 auto condition = [] __cuda_callable__( int vertexIdx ) -> bool
34 {
35 return vertexIdx % 2 == 0;
36 };
38
40 /***
41 * Traverse edges only from vertices that satisfy the condition.
42 */
43 auto modifyEdge = [] __cuda_callable__( int sourceIdx, int localIdx, int targetIdx, float weight ) mutable
44 {
45 targetIdx = ( targetIdx + 1 ) % 5;
46 weight += 5;
47 };
48 TNL::Graphs::forAllEdgesIf( graph, condition, modifyEdge );
50
51 /***
52 * Print the modified graph.
53 */
54 std::cout << "Modified graph:\n" << graph << '\n';
55}
56
57int
58main( int argc, char* argv[] )
59{
60 std::cout << "Running on host:\n";
61 forAllEdgesIfExample< TNL::Devices::Host >();
62
63#ifdef __CUDACC__
64 std::cout << "Running on CUDA device:\n";
65 forAllEdgesIfExample< TNL::Devices::Cuda >();
66#endif
67
68 return EXIT_SUCCESS;
69}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->

◆ forVertices() [1/6]

template<typename Graph, typename Array, typename Function, typename T = std::enable_if_t< IsArrayType< Array >::value >>
void TNL::Graphs::forVertices ( const Graph & graph,
const Array & vertexIndexes,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over graph vertices with the given indexes and applies the specified lambda function to each vertex. This function is for constant matrices.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the graph vertices to iterate over. This can be containers such as TNL::Containers::Array, TNL::Containers::ArrayView, TNL::Containers::Vector, or TNL::Containers::VectorView.
FunctionThe type of the lambda function to be executed on each vertex.
Parameters
graphThe graph on which the lambda function will be applied.
vertexIndexesThe array containing the indexes of the graph vertices to iterate over.
functionLambda function to be applied to each vertex. See Vertex Traversal Function (Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10forVerticesWithIndexesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Create an array of vertex indices.
33 */
34 TNL::Containers::Vector< int, Device > vertices( { 0, 2, 4 } );
36
38 /***
39 * Traverse only the specified vertices.
40 */
41 auto processVertex = [] __cuda_callable__( typename GraphType::VertexView vertex ) mutable
42 {
43 for( int i = 0; i < vertex.getDegree(); i++ )
44 vertex.setEdge( i, ( vertex.getTargetIndex( i ) + 1 ) % 5, vertex.getEdgeWeight( i ) + 5 );
45 };
46 TNL::Graphs::forVertices( graph, vertices, processVertex );
48
49 /***
50 * Print the modified graph.
51 */
52 std::cout << "Modified graph:\n" << graph << '\n';
53}
54
55int
56main( int argc, char* argv[] )
57{
58 std::cout << "Running on host:\n";
59 forVerticesWithIndexesExample< TNL::Devices::Host >();
60
61#ifdef __CUDACC__
62 std::cout << "Running on CUDA device:\n";
63 forVerticesWithIndexesExample< TNL::Devices::Cuda >();
64#endif
65
66 return EXIT_SUCCESS;
67}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 4:65
Row: 3 -> 0:70 4:80
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 4:65
Row: 3 -> 0:70 4:80
Row: 4 ->

◆ forVertices() [2/6]

template<typename Graph, typename Array, typename IndexBegin, typename IndexEnd, typename Function, typename T = std::enable_if_t< IsArrayType< Array >::value && std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void TNL::Graphs::forVertices ( const Graph & graph,
const Array & vertexIndexes,
IndexBegin begin,
IndexEnd end,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over graph vertices with the given indexes and applies the specified lambda function to each vertex. This function is for constant matrices.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the graph vertices to iterate over. This can be containers such as TNL::Containers::Array, TNL::Containers::ArrayView, TNL::Containers::Vector, or TNL::Containers::VectorView.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices on which the lambda function will be applied.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices on which the lambda function will be applied.
FunctionThe type of the lambda function to be executed on each vertex.
Parameters
graphThe graph on which the lambda function will be applied.
vertexIndexesThe array containing the indexes of the graph vertices to iterate over.
beginThe beginning of the interval [ begin, end ) of vertex indexes whose corresponding vertices will be processed using the lambda function.
endThe end of the interval [ begin, end ) of vertex indexes whose corresponding vertices will be processed using the lambda function.
functionLambda function to be applied to each vertex. See Vertex Traversal Function (Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10forVerticesWithIndexesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Create an array of vertex indices.
33 */
34 TNL::Containers::Vector< int, Device > vertices( { 0, 2, 4 } );
36
38 /***
39 * Traverse only the specified vertices.
40 */
41 auto processVertex = [] __cuda_callable__( typename GraphType::VertexView vertex ) mutable
42 {
43 for( int i = 0; i < vertex.getDegree(); i++ )
44 vertex.setEdge( i, ( vertex.getTargetIndex( i ) + 1 ) % 5, vertex.getEdgeWeight( i ) + 5 );
45 };
46 TNL::Graphs::forVertices( graph, vertices, processVertex );
48
49 /***
50 * Print the modified graph.
51 */
52 std::cout << "Modified graph:\n" << graph << '\n';
53}
54
55int
56main( int argc, char* argv[] )
57{
58 std::cout << "Running on host:\n";
59 forVerticesWithIndexesExample< TNL::Devices::Host >();
60
61#ifdef __CUDACC__
62 std::cout << "Running on CUDA device:\n";
63 forVerticesWithIndexesExample< TNL::Devices::Cuda >();
64#endif
65
66 return EXIT_SUCCESS;
67}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 4:65
Row: 3 -> 0:70 4:80
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 4:65
Row: 3 -> 0:70 4:80
Row: 4 ->

◆ forVertices() [3/6]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Function, typename T = std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void TNL::Graphs::forVertices ( const Graph & graph,
IndexBegin begin,
IndexEnd end,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over graph vertices within the specified range of vertex indexes and applies the given lambda function to each vertex. This function is for constant matrices.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of graph vertices on which the lambda function will be applied.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of graph vertices on which the lambda function will be applied.
FunctionThe type of the lambda function to be executed on each vertex.
Parameters
graphThe graph on which the lambda function will be applied.
beginThe beginning of the interval [ begin, end ) of graph vertices that will be processed using the lambda function.
endThe end of the interval [ begin, end ) of graph vertices that will be processed using the lambda function.
functionLambda function to be applied to each vertex. See Vertex Traversal Function (Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forVerticesExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Traverse vertices in range [1, 4) and modify their edges.
32 */
33 auto processVertex = [] __cuda_callable__( typename GraphType::VertexView vertex ) mutable
34 {
35 for( int i = 0; i < vertex.getDegree(); i++ )
36 vertex.setEdge( i, ( vertex.getTargetIndex( i ) + 1 ) % 5, vertex.getEdgeWeight( i ) + 5 );
37 };
38 TNL::Graphs::forVertices( graph, 1, 4, processVertex );
40
41 /***
42 * Print the modified graph.
43 */
44 std::cout << "Modified graph:\n" << graph << '\n';
45}
46
47int
48main( int argc, char* argv[] )
49{
50 std::cout << "Running on host:\n";
51 forVerticesExample< TNL::Devices::Host >();
52
53#ifdef __CUDACC__
54 std::cout << "Running on CUDA device:\n";
55 forVerticesExample< TNL::Devices::Cuda >();
56#endif
57
58 return EXIT_SUCCESS;
59}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->

◆ forVertices() [4/6]

template<typename Graph, typename Array, typename Function, typename T = std::enable_if_t< IsArrayType< Array >::value >>
void TNL::Graphs::forVertices ( Graph & graph,
const Array & vertexIndexes,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over graph vertices with the given indexes and applies the specified lambda function to each vertex.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the graph vertices to iterate over. This can be containers such as TNL::Containers::Array, TNL::Containers::ArrayView, TNL::Containers::Vector, or TNL::Containers::VectorView.
FunctionThe type of the lambda function to be executed on each vertex.
Parameters
graphThe graph on which the lambda function will be applied.
vertexIndexesThe array containing the indexes of the graph vertices to iterate over.
functionLambda function to be applied to each vertex. See Vertex Traversal Function (Non-Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10forVerticesWithIndexesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Create an array of vertex indices.
33 */
34 TNL::Containers::Vector< int, Device > vertices( { 0, 2, 4 } );
36
38 /***
39 * Traverse only the specified vertices.
40 */
41 auto processVertex = [] __cuda_callable__( typename GraphType::VertexView vertex ) mutable
42 {
43 for( int i = 0; i < vertex.getDegree(); i++ )
44 vertex.setEdge( i, ( vertex.getTargetIndex( i ) + 1 ) % 5, vertex.getEdgeWeight( i ) + 5 );
45 };
46 TNL::Graphs::forVertices( graph, vertices, processVertex );
48
49 /***
50 * Print the modified graph.
51 */
52 std::cout << "Modified graph:\n" << graph << '\n';
53}
54
55int
56main( int argc, char* argv[] )
57{
58 std::cout << "Running on host:\n";
59 forVerticesWithIndexesExample< TNL::Devices::Host >();
60
61#ifdef __CUDACC__
62 std::cout << "Running on CUDA device:\n";
63 forVerticesWithIndexesExample< TNL::Devices::Cuda >();
64#endif
65
66 return EXIT_SUCCESS;
67}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 4:65
Row: 3 -> 0:70 4:80
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 4:65
Row: 3 -> 0:70 4:80
Row: 4 ->

◆ forVertices() [5/6]

template<typename Graph, typename Array, typename IndexBegin, typename IndexEnd, typename Function, typename T = std::enable_if_t< IsArrayType< Array >::value && std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void TNL::Graphs::forVertices ( Graph & graph,
const Array & vertexIndexes,
IndexBegin begin,
IndexEnd end,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over graph vertices with the given indexes and applies the specified lambda function to each vertex.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the graph vertices to iterate over. This can be containers such as TNL::Containers::Array, TNL::Containers::ArrayView, TNL::Containers::Vector, or TNL::Containers::VectorView.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices on which the lambda function will be applied.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices on which the lambda function will be applied.
FunctionThe type of the lambda function to be executed on each vertex.
Parameters
graphThe graph on which the lambda function will be applied.
vertexIndexesThe array containing the indexes of the graph vertices to iterate over.
beginThe beginning of the interval [ begin, end ) of vertex indexes whose corresponding vertices will be processed using the lambda function.
endThe end of the interval [ begin, end ) of vertex indexes whose corresponding vertices will be processed using the lambda function.
functionLambda function to be applied to each vertex. See Vertex Traversal Function (Non-Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10forVerticesWithIndexesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Create an array of vertex indices.
33 */
34 TNL::Containers::Vector< int, Device > vertices( { 0, 2, 4 } );
36
38 /***
39 * Traverse only the specified vertices.
40 */
41 auto processVertex = [] __cuda_callable__( typename GraphType::VertexView vertex ) mutable
42 {
43 for( int i = 0; i < vertex.getDegree(); i++ )
44 vertex.setEdge( i, ( vertex.getTargetIndex( i ) + 1 ) % 5, vertex.getEdgeWeight( i ) + 5 );
45 };
46 TNL::Graphs::forVertices( graph, vertices, processVertex );
48
49 /***
50 * Print the modified graph.
51 */
52 std::cout << "Modified graph:\n" << graph << '\n';
53}
54
55int
56main( int argc, char* argv[] )
57{
58 std::cout << "Running on host:\n";
59 forVerticesWithIndexesExample< TNL::Devices::Host >();
60
61#ifdef __CUDACC__
62 std::cout << "Running on CUDA device:\n";
63 forVerticesWithIndexesExample< TNL::Devices::Cuda >();
64#endif
65
66 return EXIT_SUCCESS;
67}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 4:65
Row: 3 -> 0:70 4:80
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 4:65
Row: 3 -> 0:70 4:80
Row: 4 ->

◆ forVertices() [6/6]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Function, typename T = std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void TNL::Graphs::forVertices ( Graph & graph,
IndexBegin begin,
IndexEnd end,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over graph vertices within the specified range of vertex indexes and applies the given lambda function to each vertex.

See also: Overview of Graph Traversal Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of graph vertices on which the lambda function will be applied.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of graph vertices on which the lambda function will be applied.
FunctionThe type of the lambda function to be executed on each vertex.
Parameters
graphThe graph on which the lambda function will be applied.
beginThe beginning of the interval [ begin, end ) of graph vertices that will be processed using the lambda function.
endThe end of the interval [ begin, end ) of graph vertices that will be processed using the lambda function.
functionLambda function to be applied to each vertex. See Vertex Traversal Function (Non-Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forVerticesExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Traverse vertices in range [1, 4) and modify their edges.
32 */
33 auto processVertex = [] __cuda_callable__( typename GraphType::VertexView vertex ) mutable
34 {
35 for( int i = 0; i < vertex.getDegree(); i++ )
36 vertex.setEdge( i, ( vertex.getTargetIndex( i ) + 1 ) % 5, vertex.getEdgeWeight( i ) + 5 );
37 };
38 TNL::Graphs::forVertices( graph, 1, 4, processVertex );
40
41 /***
42 * Print the modified graph.
43 */
44 std::cout << "Modified graph:\n" << graph << '\n';
45}
46
47int
48main( int argc, char* argv[] )
49{
50 std::cout << "Running on host:\n";
51 forVerticesExample< TNL::Devices::Host >();
52
53#ifdef __CUDACC__
54 std::cout << "Running on CUDA device:\n";
55 forVerticesExample< TNL::Devices::Cuda >();
56#endif
57
58 return EXIT_SUCCESS;
59}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 4:65
Row: 3 -> 1:75 0:85
Row: 4 ->

◆ forVerticesIf() [1/2]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename VertexCondition, typename Function, typename T = std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void TNL::Graphs::forVerticesIf ( const Graph & graph,
IndexBegin begin,
IndexEnd end,
VertexCondition && vertexCondition,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over vertices within the given range of vertex indexes, applying a condition to determine whether each vertex should be processed. This function is for constant matrices.

See also: Overview of Graph Traversal Functions

For each vertex, a condition lambda function is evaluated based on the vertex index. If the condition lambda function returns true, the specified lambda function is executed for the vertex. If the condition lambda function returns false, the vertex is skipped.

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices on which the lambda function will be applied.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices on which the lambda function will be applied.
VertexConditionThe type of the condition lambda function.
FunctionThe type of the lambda function to be executed on each vertex.
Parameters
graphThe graph on which the lambda function will be applied.
beginThe beginning of the interval [ begin, end ) of vertex indexes whose corresponding vertices will be processed using the lambda function.
endThe end of the interval [ begin, end ) of vertex indexes whose corresponding vertices will be processed using the lambda function.
vertexConditionLambda function to check vertex condition. See Condition Lambda.
functionLambda function to be applied to each vertex. See Vertex Traversal Function (Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forAllVerticesIfExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Define a condition: process only vertices with more than one edge.
32 */
33 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
34 {
35 return graph.getVertexDegree( vertexIdx ) > 1;
36 };
38
40 /***
41 * Traverse vertices that satisfy the condition.
42 */
43 auto processVertex = [] __cuda_callable__( typename GraphType::VertexView vertex ) mutable
44 {
45 for( int i = 0; i < vertex.getDegree(); i++ )
46 vertex.setEdge( i, ( vertex.getTargetIndex( i ) + 1 ) % 5, vertex.getEdgeWeight( i ) + 5 );
47 };
48 TNL::Graphs::forAllVerticesIf( graph, condition, processVertex );
50
51 /***
52 * Print the modified graph.
53 */
54 std::cout << "Modified graph:\n" << graph << '\n';
55}
56
57int
58main( int argc, char* argv[] )
59{
60 std::cout << "Running on host:\n";
61 forAllVerticesIfExample< TNL::Devices::Host >();
62
63#ifdef __CUDACC__
64 std::cout << "Running on CUDA device:\n";
65 forAllVerticesIfExample< TNL::Devices::Cuda >();
66#endif
67
68 return EXIT_SUCCESS;
69}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 3:60
Row: 3 -> 1:75 0:85
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 3:60
Row: 3 -> 1:75 0:85
Row: 4 ->

◆ forVerticesIf() [2/2]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename VertexCondition, typename Function, typename T = std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void TNL::Graphs::forVerticesIf ( Graph & graph,
IndexBegin begin,
IndexEnd end,
VertexCondition && vertexCondition,
Function && function,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Iterates in parallel over vertices within the given range of vertex indexes, applying a condition to determine whether each vertex should be processed.

See also: Overview of Graph Traversal Functions

For each vertex, a condition lambda function is evaluated based on the vertex index. If the condition lambda function returns true, the specified lambda function is executed for the vertex. If the condition lambda function returns false, the vertex is skipped.

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices on which the lambda function will be applied.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices on which the lambda function will be applied.
VertexConditionThe type of the condition lambda function.
FunctionThe type of the lambda function to be executed on each vertex.
Parameters
graphThe graph on which the lambda function will be applied.
beginThe beginning of the interval [ begin, end ) of vertex indexes whose corresponding vertices will be processed using the lambda function.
endThe end of the interval [ begin, end ) of vertex indexes whose corresponding vertices will be processed using the lambda function.
vertexConditionLambda function to check vertex condition. See Condition Lambda.
functionLambda function to be applied to each vertex. See Vertex Traversal Function (Non-Const Graph).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/traverse.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6
7template< typename Device >
8void
9forAllVerticesIfExample()
10{
11 /***
12 * Create a directed graph with 5 vertices.
13 */
15 // clang-format off
16 GraphType graph( 5, // number of vertices
17 { // definition of edges with weights
18 { 0, 1, 10.0 }, { 0, 2, 20.0 },
19 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
20 { 2, 3, 60.0 },
21 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
22 // clang-format on
23
24 /***
25 * Print the graph.
26 */
27 std::cout << "Graph:\n" << graph << '\n';
28
30 /***
31 * Define a condition: process only vertices with more than one edge.
32 */
33 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
34 {
35 return graph.getVertexDegree( vertexIdx ) > 1;
36 };
38
40 /***
41 * Traverse vertices that satisfy the condition.
42 */
43 auto processVertex = [] __cuda_callable__( typename GraphType::VertexView vertex ) mutable
44 {
45 for( int i = 0; i < vertex.getDegree(); i++ )
46 vertex.setEdge( i, ( vertex.getTargetIndex( i ) + 1 ) % 5, vertex.getEdgeWeight( i ) + 5 );
47 };
48 TNL::Graphs::forAllVerticesIf( graph, condition, processVertex );
50
51 /***
52 * Print the modified graph.
53 */
54 std::cout << "Modified graph:\n" << graph << '\n';
55}
56
57int
58main( int argc, char* argv[] )
59{
60 std::cout << "Running on host:\n";
61 forAllVerticesIfExample< TNL::Devices::Host >();
62
63#ifdef __CUDACC__
64 std::cout << "Running on CUDA device:\n";
65 forAllVerticesIfExample< TNL::Devices::Cuda >();
66#endif
67
68 return EXIT_SUCCESS;
69}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 3:60
Row: 3 -> 1:75 0:85
Row: 4 ->
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Modified graph:
Row: 0 -> 2:15 3:25
Row: 1 -> 3:35 4:45 0:55
Row: 2 -> 3:60
Row: 3 -> 1:75 0:85
Row: 4 ->

◆ operator<<()

template<typename Real, typename Index>
std::ostream & TNL::Graphs::operator<< ( std::ostream & os,
const Edge< Real, Index > & edge )

Stream output operator for Edge.

Outputs the edge in the format: (source, target, weight)

Template Parameters
RealType for the edge weight.
IndexType for vertex indices.
Parameters
osOutput stream.
edgeThe edge to output.
Returns
Reference to the output stream.

◆ reduceAllVertices() [1/4]

template<typename Graph, typename Fetch, typename Reduction, typename Store, typename FetchValue>
void TNL::Graphs::reduceAllVertices ( const Graph & graph,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
fetchLambda function for fetching data. See For Const Graphs.
reductionLambda function for reduction operation. See Basic Reduction (Without Arguments).
storeLambda function for storing results. See Basic Store (Vertex Index Only).
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Compute maximum edge weight for vertices in range [1, 4).
32 */
34 TNL::Containers::Vector< float, Device > vertexMaxWeights( 5, -1 );
35 auto vertexMaxWeights_view = vertexMaxWeights.getView();
37
39 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
40 {
41 return weight;
42 };
44
46 auto store = [ = ] __cuda_callable__( int vertexIdx, const float& maxWeight ) mutable
47 {
48 vertexMaxWeights_view[ vertexIdx ] = maxWeight;
49 };
51
53 TNL::Graphs::reduceVertices( graph, 1, 4, fetch, TNL::Max{}, store );
55
56 /***
57 * Print results.
58 */
59 std::cout << "Maximum edge weight for vertices 1-3:" << vertexMaxWeights << '\n';
60}
61
62int
63main( int argc, char* argv[] )
64{
65 std::cout << "Running on host:\n";
66 reduceVerticesExample< TNL::Devices::Host >();
67
68#ifdef __CUDACC__
69 std::cout << "Running on CUDA device:\n";
70 reduceVerticesExample< TNL::Devices::Cuda >();
71#endif
72
73 return EXIT_SUCCESS;
74}
void reduceVertices(Graph &graph, IndexBegin begin, IndexEnd end, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
Performs parallel reduction within each graph vertex over a given range of vertex indexes.
Function object implementing max(x, y).
Definition Functional.h:272
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]

◆ reduceAllVertices() [2/4]

template<typename Graph, typename Fetch, typename Reduction, typename Store>
void TNL::Graphs::reduceAllVertices ( const Graph & graph,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices with automatic identity deduction (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
fetchLambda function for fetching data. See For Const Graphs.
reductionFunction object for reduction operation. See Reduction Function Objects.
storeLambda function for storing results. See Basic Store (Vertex Index Only).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Compute maximum edge weight for vertices in range [1, 4).
32 */
34 TNL::Containers::Vector< float, Device > vertexMaxWeights( 5, -1 );
35 auto vertexMaxWeights_view = vertexMaxWeights.getView();
37
39 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
40 {
41 return weight;
42 };
44
46 auto store = [ = ] __cuda_callable__( int vertexIdx, const float& maxWeight ) mutable
47 {
48 vertexMaxWeights_view[ vertexIdx ] = maxWeight;
49 };
51
53 TNL::Graphs::reduceVertices( graph, 1, 4, fetch, TNL::Max{}, store );
55
56 /***
57 * Print results.
58 */
59 std::cout << "Maximum edge weight for vertices 1-3:" << vertexMaxWeights << '\n';
60}
61
62int
63main( int argc, char* argv[] )
64{
65 std::cout << "Running on host:\n";
66 reduceVerticesExample< TNL::Devices::Host >();
67
68#ifdef __CUDACC__
69 std::cout << "Running on CUDA device:\n";
70 reduceVerticesExample< TNL::Devices::Cuda >();
71#endif
72
73 return EXIT_SUCCESS;
74}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]

◆ reduceAllVertices() [3/4]

template<typename Graph, typename Fetch, typename Reduction, typename Store, typename FetchValue>
void TNL::Graphs::reduceAllVertices ( Graph & graph,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionLambda function for reduction operation. See Basic Reduction (Without Arguments).
storeLambda function for storing results. See Basic Store (Vertex Index Only).
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Compute maximum edge weight for vertices in range [1, 4).
32 */
34 TNL::Containers::Vector< float, Device > vertexMaxWeights( 5, -1 );
35 auto vertexMaxWeights_view = vertexMaxWeights.getView();
37
39 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
40 {
41 return weight;
42 };
44
46 auto store = [ = ] __cuda_callable__( int vertexIdx, const float& maxWeight ) mutable
47 {
48 vertexMaxWeights_view[ vertexIdx ] = maxWeight;
49 };
51
53 TNL::Graphs::reduceVertices( graph, 1, 4, fetch, TNL::Max{}, store );
55
56 /***
57 * Print results.
58 */
59 std::cout << "Maximum edge weight for vertices 1-3:" << vertexMaxWeights << '\n';
60}
61
62int
63main( int argc, char* argv[] )
64{
65 std::cout << "Running on host:\n";
66 reduceVerticesExample< TNL::Devices::Host >();
67
68#ifdef __CUDACC__
69 std::cout << "Running on CUDA device:\n";
70 reduceVerticesExample< TNL::Devices::Cuda >();
71#endif
72
73 return EXIT_SUCCESS;
74}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]

◆ reduceAllVertices() [4/4]

template<typename Graph, typename Fetch, typename Reduction, typename Store>
void TNL::Graphs::reduceAllVertices ( Graph & graph,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices with automatic identity deduction.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionFunction object for reduction operation. See Reduction Function Objects.
storeLambda function for storing results. See Basic Store (Vertex Index Only).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Compute maximum edge weight for vertices in range [1, 4).
32 */
34 TNL::Containers::Vector< float, Device > vertexMaxWeights( 5, -1 );
35 auto vertexMaxWeights_view = vertexMaxWeights.getView();
37
39 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
40 {
41 return weight;
42 };
44
46 auto store = [ = ] __cuda_callable__( int vertexIdx, const float& maxWeight ) mutable
47 {
48 vertexMaxWeights_view[ vertexIdx ] = maxWeight;
49 };
51
53 TNL::Graphs::reduceVertices( graph, 1, 4, fetch, TNL::Max{}, store );
55
56 /***
57 * Print results.
58 */
59 std::cout << "Maximum edge weight for vertices 1-3:" << vertexMaxWeights << '\n';
60}
61
62int
63main( int argc, char* argv[] )
64{
65 std::cout << "Running on host:\n";
66 reduceVerticesExample< TNL::Devices::Host >();
67
68#ifdef __CUDACC__
69 std::cout << "Running on CUDA device:\n";
70 reduceVerticesExample< TNL::Devices::Cuda >();
71#endif
72
73 return EXIT_SUCCESS;
74}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]

◆ reduceAllVerticesIf() [1/4]

template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue>
Graph::IndexType TNL::Graphs::reduceAllVerticesIf ( const Graph & graph,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices based on a condition (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
conditionLambda function for condition check. See Condition Check.
fetchLambda function for fetching data. See For Const Graphs.
reductionLambda function for reduction operation. See Basic Reduction (Without Arguments).
storeLambda function for storing results. See Basic Store (Vertex Index Only).
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Compute minimum edge weight for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 auto vertexMinWeights_view = vertexMinWeights.getView();
37 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
38
39 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
40 {
41 return graph.getVertexDegree( vertexIdx ) >= 2;
42 };
43
44 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
45 {
46 return weight;
47 };
48
49 auto store = [ = ] __cuda_callable__( int indexOfVertexIdx, int vertexIdx, const float& minWeight ) mutable
50 {
51 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
52 vertexMinWeights_view[ vertexIdx ] = minWeight;
53 };
54
55 int reducedVertexCount = TNL::Graphs::reduceVerticesIf( graph, 1, 4, condition, fetch, TNL::Min{}, store );
57
58 /***
59 * Print results.
60 */
61 std::cout << "Number of reduced vertices: " << reducedVertexCount << '\n';
62 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
63 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, reducedVertexCount ) << '\n';
64}
65
66int
67main( int argc, char* argv[] )
68{
69 std::cout << "Running on host:\n";
70 reduceVerticesIfExample< TNL::Devices::Host >();
71
72#ifdef __CUDACC__
73 std::cout << "Running on CUDA device:\n";
74 reduceVerticesIfExample< TNL::Devices::Cuda >();
75#endif
76
77 return EXIT_SUCCESS;
78}
Graph::IndexType reduceVerticesIf(Graph &graph, IndexBegin begin, IndexEnd end, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a ...
Function object implementing min(x, y).
Definition Functional.h:242
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]

◆ reduceAllVerticesIf() [2/4]

template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType TNL::Graphs::reduceAllVerticesIf ( const Graph & graph,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices based on a condition with automatic identity deduction (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
conditionLambda function for condition check. See Condition Check.
fetchLambda function for fetching data. See For Const Graphs.
reductionFunction object for reduction operation. See Reduction Function Objects.
storeLambda function for storing results. See Basic Store (Vertex Index Only).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Compute minimum edge weight for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 auto vertexMinWeights_view = vertexMinWeights.getView();
37 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
38
39 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
40 {
41 return graph.getVertexDegree( vertexIdx ) >= 2;
42 };
43
44 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
45 {
46 return weight;
47 };
48
49 auto store = [ = ] __cuda_callable__( int indexOfVertexIdx, int vertexIdx, const float& minWeight ) mutable
50 {
51 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
52 vertexMinWeights_view[ vertexIdx ] = minWeight;
53 };
54
55 int reducedVertexCount = TNL::Graphs::reduceVerticesIf( graph, 1, 4, condition, fetch, TNL::Min{}, store );
57
58 /***
59 * Print results.
60 */
61 std::cout << "Number of reduced vertices: " << reducedVertexCount << '\n';
62 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
63 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, reducedVertexCount ) << '\n';
64}
65
66int
67main( int argc, char* argv[] )
68{
69 std::cout << "Running on host:\n";
70 reduceVerticesIfExample< TNL::Devices::Host >();
71
72#ifdef __CUDACC__
73 std::cout << "Running on CUDA device:\n";
74 reduceVerticesIfExample< TNL::Devices::Cuda >();
75#endif
76
77 return EXIT_SUCCESS;
78}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]

◆ reduceAllVerticesIf() [3/4]

template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue>
Graph::IndexType TNL::Graphs::reduceAllVerticesIf ( Graph & graph,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices based on a condition.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
conditionLambda function for condition check. See Condition Check.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionLambda function for reduction operation. See Basic Reduction (Without Arguments).
storeLambda function for storing results. See Basic Store (Vertex Index Only).
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Compute minimum edge weight for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 auto vertexMinWeights_view = vertexMinWeights.getView();
37 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
38
39 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
40 {
41 return graph.getVertexDegree( vertexIdx ) >= 2;
42 };
43
44 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
45 {
46 return weight;
47 };
48
49 auto store = [ = ] __cuda_callable__( int indexOfVertexIdx, int vertexIdx, const float& minWeight ) mutable
50 {
51 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
52 vertexMinWeights_view[ vertexIdx ] = minWeight;
53 };
54
55 int reducedVertexCount = TNL::Graphs::reduceVerticesIf( graph, 1, 4, condition, fetch, TNL::Min{}, store );
57
58 /***
59 * Print results.
60 */
61 std::cout << "Number of reduced vertices: " << reducedVertexCount << '\n';
62 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
63 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, reducedVertexCount ) << '\n';
64}
65
66int
67main( int argc, char* argv[] )
68{
69 std::cout << "Running on host:\n";
70 reduceVerticesIfExample< TNL::Devices::Host >();
71
72#ifdef __CUDACC__
73 std::cout << "Running on CUDA device:\n";
74 reduceVerticesIfExample< TNL::Devices::Cuda >();
75#endif
76
77 return EXIT_SUCCESS;
78}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]

◆ reduceAllVerticesIf() [4/4]

template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType TNL::Graphs::reduceAllVerticesIf ( Graph & graph,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices based on a condition with automatic identity deduction.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
conditionLambda function for condition check. See Condition Check.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionFunction object for reduction operation. See Reduction Function Objects.
storeLambda function for storing results. See Basic Store (Vertex Index Only).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Compute minimum edge weight for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 auto vertexMinWeights_view = vertexMinWeights.getView();
37 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
38
39 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
40 {
41 return graph.getVertexDegree( vertexIdx ) >= 2;
42 };
43
44 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
45 {
46 return weight;
47 };
48
49 auto store = [ = ] __cuda_callable__( int indexOfVertexIdx, int vertexIdx, const float& minWeight ) mutable
50 {
51 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
52 vertexMinWeights_view[ vertexIdx ] = minWeight;
53 };
54
55 int reducedVertexCount = TNL::Graphs::reduceVerticesIf( graph, 1, 4, condition, fetch, TNL::Min{}, store );
57
58 /***
59 * Print results.
60 */
61 std::cout << "Number of reduced vertices: " << reducedVertexCount << '\n';
62 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
63 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, reducedVertexCount ) << '\n';
64}
65
66int
67main( int argc, char* argv[] )
68{
69 std::cout << "Running on host:\n";
70 reduceVerticesIfExample< TNL::Devices::Host >();
71
72#ifdef __CUDACC__
73 std::cout << "Running on CUDA device:\n";
74 reduceVerticesIfExample< TNL::Devices::Cuda >();
75#endif
76
77 return EXIT_SUCCESS;
78}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]

◆ reduceAllVerticesWithArgument() [1/4]

template<typename Graph, typename Fetch, typename Reduction, typename Store, typename FetchValue>
void TNL::Graphs::reduceAllVerticesWithArgument ( const Graph & graph,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices while returning also the position of the edge of interest (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
fetchLambda function for fetching data. See For Const Graphs.
reductionLambda function for reduction with argument tracking. See Reduction With Argument (Position Tracking).
storeLambda function for storing results with position tracking. See Store With Argument (Position Tracking).
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Find minimum edge weight and target vertex for vertices in range [1, 4).
32 */
36 auto minWeights_view = minWeights.getView();
37 auto minTargets_view = minTargets.getView();
39
41 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
42 {
43 return weight;
44 };
46
48 auto store = [ = ] __cuda_callable__( int vertexIdx, int localIdx, int targetIdx, float result, bool isolatedVertex ) mutable
49 {
50 minWeights_view[ vertexIdx ] = result;
51 if( ! isolatedVertex )
52 minTargets_view[ vertexIdx ] = targetIdx;
53 };
55
57 TNL::Graphs::reduceVerticesWithArgument( graph, 1, 5, fetch, TNL::MinWithArg{}, store );
59
60 /***
61 * Print results.
62 */
63 std::cout << "Minimum edge weight for vertices 1-4:" << minWeights << '\n';
64 std::cout << "Target vertex for minimum edge:" << minTargets << '\n';
65}
66
67int
68main( int argc, char* argv[] )
69{
70 std::cout << "Running on host:\n";
71 reduceVerticesWithArgumentExample< TNL::Devices::Host >();
72
73#ifdef __CUDACC__
74 std::cout << "Running on CUDA device:\n";
75 reduceVerticesWithArgumentExample< TNL::Devices::Cuda >();
76#endif
77
78 return EXIT_SUCCESS;
79}
void reduceVerticesWithArgument(Graph &graph, IndexBegin begin, IndexEnd end, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
Performs parallel reduction within each graph vertex over a given range of vertex indexes while retur...
Function object implementing argmin(x, y, i, j), i.e. returning the minimum value and its index.
Definition Functional.h:321
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]

◆ reduceAllVerticesWithArgument() [2/4]

template<typename Graph, typename Fetch, typename Reduction, typename Store>
void TNL::Graphs::reduceAllVerticesWithArgument ( const Graph & graph,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices while returning also the position of the edge of interest with automatic identity deduction (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
fetchLambda function for fetching data. See For Const Graphs.
reductionFunction object for reduction with argument tracking. See Reduction Function Objects.
storeLambda function for storing results with position tracking. See Store With Argument (Position Tracking).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Find minimum edge weight and target vertex for vertices in range [1, 4).
32 */
36 auto minWeights_view = minWeights.getView();
37 auto minTargets_view = minTargets.getView();
39
41 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
42 {
43 return weight;
44 };
46
48 auto store = [ = ] __cuda_callable__( int vertexIdx, int localIdx, int targetIdx, float result, bool isolatedVertex ) mutable
49 {
50 minWeights_view[ vertexIdx ] = result;
51 if( ! isolatedVertex )
52 minTargets_view[ vertexIdx ] = targetIdx;
53 };
55
57 TNL::Graphs::reduceVerticesWithArgument( graph, 1, 5, fetch, TNL::MinWithArg{}, store );
59
60 /***
61 * Print results.
62 */
63 std::cout << "Minimum edge weight for vertices 1-4:" << minWeights << '\n';
64 std::cout << "Target vertex for minimum edge:" << minTargets << '\n';
65}
66
67int
68main( int argc, char* argv[] )
69{
70 std::cout << "Running on host:\n";
71 reduceVerticesWithArgumentExample< TNL::Devices::Host >();
72
73#ifdef __CUDACC__
74 std::cout << "Running on CUDA device:\n";
75 reduceVerticesWithArgumentExample< TNL::Devices::Cuda >();
76#endif
77
78 return EXIT_SUCCESS;
79}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]

◆ reduceAllVerticesWithArgument() [3/4]

template<typename Graph, typename Fetch, typename Reduction, typename Store, typename FetchValue>
void TNL::Graphs::reduceAllVerticesWithArgument ( Graph & graph,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices while returning also the position of the edge of interest.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionLambda function for reduction with argument tracking. See Reduction With Argument (Position Tracking).
storeLambda function for storing results with position tracking. See Store With Argument (Position Tracking).
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Find minimum edge weight and target vertex for vertices in range [1, 4).
32 */
36 auto minWeights_view = minWeights.getView();
37 auto minTargets_view = minTargets.getView();
39
41 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
42 {
43 return weight;
44 };
46
48 auto store = [ = ] __cuda_callable__( int vertexIdx, int localIdx, int targetIdx, float result, bool isolatedVertex ) mutable
49 {
50 minWeights_view[ vertexIdx ] = result;
51 if( ! isolatedVertex )
52 minTargets_view[ vertexIdx ] = targetIdx;
53 };
55
57 TNL::Graphs::reduceVerticesWithArgument( graph, 1, 5, fetch, TNL::MinWithArg{}, store );
59
60 /***
61 * Print results.
62 */
63 std::cout << "Minimum edge weight for vertices 1-4:" << minWeights << '\n';
64 std::cout << "Target vertex for minimum edge:" << minTargets << '\n';
65}
66
67int
68main( int argc, char* argv[] )
69{
70 std::cout << "Running on host:\n";
71 reduceVerticesWithArgumentExample< TNL::Devices::Host >();
72
73#ifdef __CUDACC__
74 std::cout << "Running on CUDA device:\n";
75 reduceVerticesWithArgumentExample< TNL::Devices::Cuda >();
76#endif
77
78 return EXIT_SUCCESS;
79}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]

◆ reduceAllVerticesWithArgument() [4/4]

template<typename Graph, typename Fetch, typename Reduction, typename Store>
void TNL::Graphs::reduceAllVerticesWithArgument ( Graph & graph,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices while returning also the position of the edge of interest with automatic identity deduction.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionFunction object for reduction with argument tracking. See Reduction Function Objects.
storeLambda function for storing results with position tracking. See Store With Argument (Position Tracking).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Find minimum edge weight and target vertex for vertices in range [1, 4).
32 */
36 auto minWeights_view = minWeights.getView();
37 auto minTargets_view = minTargets.getView();
39
41 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
42 {
43 return weight;
44 };
46
48 auto store = [ = ] __cuda_callable__( int vertexIdx, int localIdx, int targetIdx, float result, bool isolatedVertex ) mutable
49 {
50 minWeights_view[ vertexIdx ] = result;
51 if( ! isolatedVertex )
52 minTargets_view[ vertexIdx ] = targetIdx;
53 };
55
57 TNL::Graphs::reduceVerticesWithArgument( graph, 1, 5, fetch, TNL::MinWithArg{}, store );
59
60 /***
61 * Print results.
62 */
63 std::cout << "Minimum edge weight for vertices 1-4:" << minWeights << '\n';
64 std::cout << "Target vertex for minimum edge:" << minTargets << '\n';
65}
66
67int
68main( int argc, char* argv[] )
69{
70 std::cout << "Running on host:\n";
71 reduceVerticesWithArgumentExample< TNL::Devices::Host >();
72
73#ifdef __CUDACC__
74 std::cout << "Running on CUDA device:\n";
75 reduceVerticesWithArgumentExample< TNL::Devices::Cuda >();
76#endif
77
78 return EXIT_SUCCESS;
79}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]

◆ reduceAllVerticesWithArgumentIf() [1/4]

template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue = decltype( std::declval< Fetch >()( 0, 0, std::declval< typename Graph::RealType >() ) )>
Graph::IndexType TNL::Graphs::reduceAllVerticesWithArgumentIf ( const Graph & graph,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices based on a condition while returning also the position of the edge of interest (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
conditionLambda function for vertex condition checking. See Condition Check.
fetchLambda function for fetching data. See For Const Graphs.
reductionFunction object for reduction with argument tracking. See Reduction Function Objects.
storeLambda function for storing results with position tracking. See Store With Argument (Position Tracking).
identityThe identity edge for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Find minimum edge weight and target vertex for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 TNL::Containers::Vector< int, Device > vertexMinTargets( 5, -1 );
37 TNL::Containers::Vector< int, Device > compressedVertexMinTargets( 5 );
38 auto vertexMinWeights_view = vertexMinWeights.getView();
39 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
40 auto vertexMinTargets_view = vertexMinTargets.getView();
41 auto compressedVertexMinTargets_view = compressedVertexMinTargets.getView();
42
43 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
44 {
45 return graph.getVertexDegree( vertexIdx ) >= 2;
46 };
47
48 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
49 {
50 return weight;
51 };
52
53 auto store =
55 int indexOfVertexIdx, int vertexIdx, int localIdx, int targetIdx, float minWeight, bool isolatedVertex ) mutable
56 {
57 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
58 vertexMinWeights_view[ vertexIdx ] = minWeight;
59 if( ! isolatedVertex ) {
60 compressedVertexMinTargets_view[ indexOfVertexIdx ] = targetIdx;
61 vertexMinTargets_view[ vertexIdx ] = targetIdx;
62 }
63 };
64
65 int traversedVertexCount =
66 TNL::Graphs::reduceVerticesWithArgumentIf( graph, 1, 4, condition, fetch, TNL::MinWithArg{}, store );
68
69 /***
70 * Print results.
71 */
72 std::cout << "Number of traversed vertices: " << traversedVertexCount << '\n';
73 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
74 std::cout << "Target vertex for minimum edge:" << vertexMinTargets << '\n';
75 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, traversedVertexCount ) << '\n';
76 std::cout << "Compressed minimum targets:" << compressedVertexMinTargets.getView( 0, traversedVertexCount ) << '\n';
77}
78
79int
80main( int argc, char* argv[] )
81{
82 std::cout << "Running on host:\n";
83 reduceVerticesWithArgumentIfExample< TNL::Devices::Host >();
84
85#ifdef __CUDACC__
86 std::cout << "Running on CUDA device:\n";
87 reduceVerticesWithArgumentIfExample< TNL::Devices::Cuda >();
88#endif
89
90 return EXIT_SUCCESS;
91}
Graph::IndexType reduceVerticesWithArgumentIf(Graph &graph, IndexBegin begin, IndexEnd end, Condition &&condition, Fetch &&fetch, Reduction &&reduction, Store &&store, const FetchValue &identity, TNL::Algorithms::Segments::LaunchConfiguration launchConfig=TNL::Algorithms::Segments::LaunchConfiguration())
Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a ...
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]

◆ reduceAllVerticesWithArgumentIf() [2/4]

template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType TNL::Graphs::reduceAllVerticesWithArgumentIf ( const Graph & graph,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices based on a condition while returning also the position of the edge of interest with automatic identity deduction (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
conditionLambda function for vertex condition checking. See Condition Check.
fetchLambda function for fetching data. See For Const Graphs.
reductionFunction object for reduction with argument tracking. See Reduction Function Objects.
storeLambda function for storing results with position tracking. See Store With Argument (Position Tracking).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Find minimum edge weight and target vertex for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 TNL::Containers::Vector< int, Device > vertexMinTargets( 5, -1 );
37 TNL::Containers::Vector< int, Device > compressedVertexMinTargets( 5 );
38 auto vertexMinWeights_view = vertexMinWeights.getView();
39 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
40 auto vertexMinTargets_view = vertexMinTargets.getView();
41 auto compressedVertexMinTargets_view = compressedVertexMinTargets.getView();
42
43 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
44 {
45 return graph.getVertexDegree( vertexIdx ) >= 2;
46 };
47
48 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
49 {
50 return weight;
51 };
52
53 auto store =
55 int indexOfVertexIdx, int vertexIdx, int localIdx, int targetIdx, float minWeight, bool isolatedVertex ) mutable
56 {
57 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
58 vertexMinWeights_view[ vertexIdx ] = minWeight;
59 if( ! isolatedVertex ) {
60 compressedVertexMinTargets_view[ indexOfVertexIdx ] = targetIdx;
61 vertexMinTargets_view[ vertexIdx ] = targetIdx;
62 }
63 };
64
65 int traversedVertexCount =
66 TNL::Graphs::reduceVerticesWithArgumentIf( graph, 1, 4, condition, fetch, TNL::MinWithArg{}, store );
68
69 /***
70 * Print results.
71 */
72 std::cout << "Number of traversed vertices: " << traversedVertexCount << '\n';
73 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
74 std::cout << "Target vertex for minimum edge:" << vertexMinTargets << '\n';
75 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, traversedVertexCount ) << '\n';
76 std::cout << "Compressed minimum targets:" << compressedVertexMinTargets.getView( 0, traversedVertexCount ) << '\n';
77}
78
79int
80main( int argc, char* argv[] )
81{
82 std::cout << "Running on host:\n";
83 reduceVerticesWithArgumentIfExample< TNL::Devices::Host >();
84
85#ifdef __CUDACC__
86 std::cout << "Running on CUDA device:\n";
87 reduceVerticesWithArgumentIfExample< TNL::Devices::Cuda >();
88#endif
89
90 return EXIT_SUCCESS;
91}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]

◆ reduceAllVerticesWithArgumentIf() [3/4]

template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue = decltype( std::declval< Fetch >()( 0, 0, std::declval< typename Graph::RealType >() ) )>
Graph::IndexType TNL::Graphs::reduceAllVerticesWithArgumentIf ( Graph & graph,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices based on a condition while returning also the position of the edge of interest.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
conditionLambda function for vertex condition checking. See Condition Check.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionFunction object for reduction with argument tracking. See Reduction Function Objects.
storeLambda function for storing results with position tracking. See Store With Argument (Position Tracking).
identityThe identity edge for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Find minimum edge weight and target vertex for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 TNL::Containers::Vector< int, Device > vertexMinTargets( 5, -1 );
37 TNL::Containers::Vector< int, Device > compressedVertexMinTargets( 5 );
38 auto vertexMinWeights_view = vertexMinWeights.getView();
39 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
40 auto vertexMinTargets_view = vertexMinTargets.getView();
41 auto compressedVertexMinTargets_view = compressedVertexMinTargets.getView();
42
43 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
44 {
45 return graph.getVertexDegree( vertexIdx ) >= 2;
46 };
47
48 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
49 {
50 return weight;
51 };
52
53 auto store =
55 int indexOfVertexIdx, int vertexIdx, int localIdx, int targetIdx, float minWeight, bool isolatedVertex ) mutable
56 {
57 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
58 vertexMinWeights_view[ vertexIdx ] = minWeight;
59 if( ! isolatedVertex ) {
60 compressedVertexMinTargets_view[ indexOfVertexIdx ] = targetIdx;
61 vertexMinTargets_view[ vertexIdx ] = targetIdx;
62 }
63 };
64
65 int traversedVertexCount =
66 TNL::Graphs::reduceVerticesWithArgumentIf( graph, 1, 4, condition, fetch, TNL::MinWithArg{}, store );
68
69 /***
70 * Print results.
71 */
72 std::cout << "Number of traversed vertices: " << traversedVertexCount << '\n';
73 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
74 std::cout << "Target vertex for minimum edge:" << vertexMinTargets << '\n';
75 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, traversedVertexCount ) << '\n';
76 std::cout << "Compressed minimum targets:" << compressedVertexMinTargets.getView( 0, traversedVertexCount ) << '\n';
77}
78
79int
80main( int argc, char* argv[] )
81{
82 std::cout << "Running on host:\n";
83 reduceVerticesWithArgumentIfExample< TNL::Devices::Host >();
84
85#ifdef __CUDACC__
86 std::cout << "Running on CUDA device:\n";
87 reduceVerticesWithArgumentIfExample< TNL::Devices::Cuda >();
88#endif
89
90 return EXIT_SUCCESS;
91}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]

◆ reduceAllVerticesWithArgumentIf() [4/4]

template<typename Graph, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType TNL::Graphs::reduceAllVerticesWithArgumentIf ( Graph & graph,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over all vertices based on a condition while returning also the position of the edge of interest with automatic identity deduction.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
conditionLambda function for vertex condition checking. See Condition Check.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionFunction object for reduction with argument tracking. See Reduction Function Objects.
storeLambda function for storing results with position tracking. See Store With Argument (Position Tracking).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Find minimum edge weight and target vertex for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 TNL::Containers::Vector< int, Device > vertexMinTargets( 5, -1 );
37 TNL::Containers::Vector< int, Device > compressedVertexMinTargets( 5 );
38 auto vertexMinWeights_view = vertexMinWeights.getView();
39 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
40 auto vertexMinTargets_view = vertexMinTargets.getView();
41 auto compressedVertexMinTargets_view = compressedVertexMinTargets.getView();
42
43 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
44 {
45 return graph.getVertexDegree( vertexIdx ) >= 2;
46 };
47
48 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
49 {
50 return weight;
51 };
52
53 auto store =
55 int indexOfVertexIdx, int vertexIdx, int localIdx, int targetIdx, float minWeight, bool isolatedVertex ) mutable
56 {
57 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
58 vertexMinWeights_view[ vertexIdx ] = minWeight;
59 if( ! isolatedVertex ) {
60 compressedVertexMinTargets_view[ indexOfVertexIdx ] = targetIdx;
61 vertexMinTargets_view[ vertexIdx ] = targetIdx;
62 }
63 };
64
65 int traversedVertexCount =
66 TNL::Graphs::reduceVerticesWithArgumentIf( graph, 1, 4, condition, fetch, TNL::MinWithArg{}, store );
68
69 /***
70 * Print results.
71 */
72 std::cout << "Number of traversed vertices: " << traversedVertexCount << '\n';
73 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
74 std::cout << "Target vertex for minimum edge:" << vertexMinTargets << '\n';
75 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, traversedVertexCount ) << '\n';
76 std::cout << "Compressed minimum targets:" << compressedVertexMinTargets.getView( 0, traversedVertexCount ) << '\n';
77}
78
79int
80main( int argc, char* argv[] )
81{
82 std::cout << "Running on host:\n";
83 reduceVerticesWithArgumentIfExample< TNL::Devices::Host >();
84
85#ifdef __CUDACC__
86 std::cout << "Running on CUDA device:\n";
87 reduceVerticesWithArgumentIfExample< TNL::Devices::Cuda >();
88#endif
89
90 return EXIT_SUCCESS;
91}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]

◆ reduceVertices() [1/8]

template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void TNL::Graphs::reduceVertices ( const Graph & graph,
const Array & vertexIndexes,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within graph vertices specified by a given set of vertex indexes (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the vertices to iterate over.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
vertexIndexesThe array containing the indexes of the vertices to iterate over.
fetchLambda function for fetching data. See For Const Graphs.
reductionLambda function for reduction operation. See Basic Reduction (Without Arguments).
storeLambda function for storing results. See Store With Vertex Index Array Or Condition.
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithIndexesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Compute sum of edge weights for specific vertices (0, 2, 3).
33 */
34 TNL::Containers::Vector< int, Device > vertexIndices( { 0, 2, 3 } );
36 TNL::Containers::Vector< float, Device > compressedVertexSums( 3, -1 );
37 auto vertexSums_view = vertexSums.getView();
38 auto compressedVertexSums_view = compressedVertexSums.getView();
39
40 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
41 {
42 return weight;
43 };
44
45 auto store = [ = ] __cuda_callable__( int indexOfVertexIdx, int vertexIdx, const float& sum ) mutable
46 {
47 compressedVertexSums_view[ indexOfVertexIdx ] = sum;
48 vertexSums_view[ vertexIdx ] = sum;
49 };
50
51 TNL::Graphs::reduceVertices( graph, vertexIndices, fetch, TNL::Plus{}, store );
53
54 /***
55 * Print results.
56 */
57 std::cout << "Sum of edge weights for specific vertices:" << vertexSums << '\n';
58 std::cout << "Compressed sums:" << compressedVertexSums.getView( 0, vertexIndices.getSize() ) << '\n';
59}
60
61int
62main( int argc, char* argv[] )
63{
64 std::cout << "Running on host:\n";
65 reduceVerticesWithIndexesExample< TNL::Devices::Host >();
66
67#ifdef __CUDACC__
68 std::cout << "Running on CUDA device:\n";
69 reduceVerticesWithIndexesExample< TNL::Devices::Cuda >();
70#endif
71
72 return EXIT_SUCCESS;
73}
Function object implementing x + y.
Definition Functional.h:34
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Sum of edge weights for specific vertices:[ 30, -1, 60, 150, -1 ]
Compressed sums:[ 30, 60, 150 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Sum of edge weights for specific vertices:[ 30, -1, 60, 150, -1 ]
Compressed sums:[ 30, 60, 150 ]

◆ reduceVertices() [2/8]

template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void TNL::Graphs::reduceVertices ( const Graph & graph,
const Array & vertexIndexes,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within graph vertices specified by a given set of vertex indexes with automatic identity deduction (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the vertices to iterate over.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
vertexIndexesThe array containing the indexes of the vertices to iterate over.
fetchLambda function for fetching data. See For Const Graphs.
reductionFunction object for reduction operation. See Reduction Function Objects.
storeLambda function for storing results. See Store With Vertex Index Array Or Condition.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithIndexesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Compute sum of edge weights for specific vertices (0, 2, 3).
33 */
34 TNL::Containers::Vector< int, Device > vertexIndices( { 0, 2, 3 } );
36 TNL::Containers::Vector< float, Device > compressedVertexSums( 3, -1 );
37 auto vertexSums_view = vertexSums.getView();
38 auto compressedVertexSums_view = compressedVertexSums.getView();
39
40 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
41 {
42 return weight;
43 };
44
45 auto store = [ = ] __cuda_callable__( int indexOfVertexIdx, int vertexIdx, const float& sum ) mutable
46 {
47 compressedVertexSums_view[ indexOfVertexIdx ] = sum;
48 vertexSums_view[ vertexIdx ] = sum;
49 };
50
51 TNL::Graphs::reduceVertices( graph, vertexIndices, fetch, TNL::Plus{}, store );
53
54 /***
55 * Print results.
56 */
57 std::cout << "Sum of edge weights for specific vertices:" << vertexSums << '\n';
58 std::cout << "Compressed sums:" << compressedVertexSums.getView( 0, vertexIndices.getSize() ) << '\n';
59}
60
61int
62main( int argc, char* argv[] )
63{
64 std::cout << "Running on host:\n";
65 reduceVerticesWithIndexesExample< TNL::Devices::Host >();
66
67#ifdef __CUDACC__
68 std::cout << "Running on CUDA device:\n";
69 reduceVerticesWithIndexesExample< TNL::Devices::Cuda >();
70#endif
71
72 return EXIT_SUCCESS;
73}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Sum of edge weights for specific vertices:[ 30, -1, 60, 150, -1 ]
Compressed sums:[ 30, 60, 150 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Sum of edge weights for specific vertices:[ 30, -1, 60, 150, -1 ]
Compressed sums:[ 30, 60, 150 ]

◆ reduceVertices() [3/8]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void TNL::Graphs::reduceVertices ( const Graph & graph,
IndexBegin begin,
IndexEnd end,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
fetchLambda function for fetching data. See For Const Graphs.
reductionLambda function for the reduction operation. See Basic Reduction (Without Arguments).
storeLambda function for storing results. See Basic Store (Vertex Index Only).
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Compute maximum edge weight for vertices in range [1, 4).
32 */
34 TNL::Containers::Vector< float, Device > vertexMaxWeights( 5, -1 );
35 auto vertexMaxWeights_view = vertexMaxWeights.getView();
37
39 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
40 {
41 return weight;
42 };
44
46 auto store = [ = ] __cuda_callable__( int vertexIdx, const float& maxWeight ) mutable
47 {
48 vertexMaxWeights_view[ vertexIdx ] = maxWeight;
49 };
51
53 TNL::Graphs::reduceVertices( graph, 1, 4, fetch, TNL::Max{}, store );
55
56 /***
57 * Print results.
58 */
59 std::cout << "Maximum edge weight for vertices 1-3:" << vertexMaxWeights << '\n';
60}
61
62int
63main( int argc, char* argv[] )
64{
65 std::cout << "Running on host:\n";
66 reduceVerticesExample< TNL::Devices::Host >();
67
68#ifdef __CUDACC__
69 std::cout << "Running on CUDA device:\n";
70 reduceVerticesExample< TNL::Devices::Cuda >();
71#endif
72
73 return EXIT_SUCCESS;
74}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]

◆ reduceVertices() [4/8]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void TNL::Graphs::reduceVertices ( const Graph & graph,
IndexBegin begin,
IndexEnd end,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes with automatic identity deduction (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
fetchLambda function for fetching data. See For Const Graphs.
reductionFunction object for reduction operation. See Reduction Function Objects.
storeLambda function for storing results. See Basic Store (Vertex Index Only).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Compute maximum edge weight for vertices in range [1, 4).
32 */
34 TNL::Containers::Vector< float, Device > vertexMaxWeights( 5, -1 );
35 auto vertexMaxWeights_view = vertexMaxWeights.getView();
37
39 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
40 {
41 return weight;
42 };
44
46 auto store = [ = ] __cuda_callable__( int vertexIdx, const float& maxWeight ) mutable
47 {
48 vertexMaxWeights_view[ vertexIdx ] = maxWeight;
49 };
51
53 TNL::Graphs::reduceVertices( graph, 1, 4, fetch, TNL::Max{}, store );
55
56 /***
57 * Print results.
58 */
59 std::cout << "Maximum edge weight for vertices 1-3:" << vertexMaxWeights << '\n';
60}
61
62int
63main( int argc, char* argv[] )
64{
65 std::cout << "Running on host:\n";
66 reduceVerticesExample< TNL::Devices::Host >();
67
68#ifdef __CUDACC__
69 std::cout << "Running on CUDA device:\n";
70 reduceVerticesExample< TNL::Devices::Cuda >();
71#endif
72
73 return EXIT_SUCCESS;
74}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]

◆ reduceVertices() [5/8]

template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void TNL::Graphs::reduceVertices ( Graph & graph,
const Array & vertexIndexes,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within graph vertices specified by a given set of vertex indexes.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the vertices to iterate over.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertex indexes where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertex indexes where the reduction will be performed.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
vertexIndexesThe array containing the indexes of the vertices to iterate over.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionLambda function for reduction operation. See Basic Reduction (Without Arguments).
storeLambda function for storing results. See Store With Vertex Index Array Or Condition.
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithIndexesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Compute sum of edge weights for specific vertices (0, 2, 3).
33 */
34 TNL::Containers::Vector< int, Device > vertexIndices( { 0, 2, 3 } );
36 TNL::Containers::Vector< float, Device > compressedVertexSums( 3, -1 );
37 auto vertexSums_view = vertexSums.getView();
38 auto compressedVertexSums_view = compressedVertexSums.getView();
39
40 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
41 {
42 return weight;
43 };
44
45 auto store = [ = ] __cuda_callable__( int indexOfVertexIdx, int vertexIdx, const float& sum ) mutable
46 {
47 compressedVertexSums_view[ indexOfVertexIdx ] = sum;
48 vertexSums_view[ vertexIdx ] = sum;
49 };
50
51 TNL::Graphs::reduceVertices( graph, vertexIndices, fetch, TNL::Plus{}, store );
53
54 /***
55 * Print results.
56 */
57 std::cout << "Sum of edge weights for specific vertices:" << vertexSums << '\n';
58 std::cout << "Compressed sums:" << compressedVertexSums.getView( 0, vertexIndices.getSize() ) << '\n';
59}
60
61int
62main( int argc, char* argv[] )
63{
64 std::cout << "Running on host:\n";
65 reduceVerticesWithIndexesExample< TNL::Devices::Host >();
66
67#ifdef __CUDACC__
68 std::cout << "Running on CUDA device:\n";
69 reduceVerticesWithIndexesExample< TNL::Devices::Cuda >();
70#endif
71
72 return EXIT_SUCCESS;
73}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Sum of edge weights for specific vertices:[ 30, -1, 60, 150, -1 ]
Compressed sums:[ 30, 60, 150 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Sum of edge weights for specific vertices:[ 30, -1, 60, 150, -1 ]
Compressed sums:[ 30, 60, 150 ]

◆ reduceVertices() [6/8]

template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void TNL::Graphs::reduceVertices ( Graph & graph,
const Array & vertexIndexes,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within graph vertices specified by a given set of vertex indexes with automatic identity deduction.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the vertices to iterate over.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
vertexIndexesThe array containing the indexes of the vertices to iterate over.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionFunction object for reduction operation. See Reduction Function Objects.
storeLambda function for storing results. See Store With Vertex Index Array Or Condition.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithIndexesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Compute sum of edge weights for specific vertices (0, 2, 3).
33 */
34 TNL::Containers::Vector< int, Device > vertexIndices( { 0, 2, 3 } );
36 TNL::Containers::Vector< float, Device > compressedVertexSums( 3, -1 );
37 auto vertexSums_view = vertexSums.getView();
38 auto compressedVertexSums_view = compressedVertexSums.getView();
39
40 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
41 {
42 return weight;
43 };
44
45 auto store = [ = ] __cuda_callable__( int indexOfVertexIdx, int vertexIdx, const float& sum ) mutable
46 {
47 compressedVertexSums_view[ indexOfVertexIdx ] = sum;
48 vertexSums_view[ vertexIdx ] = sum;
49 };
50
51 TNL::Graphs::reduceVertices( graph, vertexIndices, fetch, TNL::Plus{}, store );
53
54 /***
55 * Print results.
56 */
57 std::cout << "Sum of edge weights for specific vertices:" << vertexSums << '\n';
58 std::cout << "Compressed sums:" << compressedVertexSums.getView( 0, vertexIndices.getSize() ) << '\n';
59}
60
61int
62main( int argc, char* argv[] )
63{
64 std::cout << "Running on host:\n";
65 reduceVerticesWithIndexesExample< TNL::Devices::Host >();
66
67#ifdef __CUDACC__
68 std::cout << "Running on CUDA device:\n";
69 reduceVerticesWithIndexesExample< TNL::Devices::Cuda >();
70#endif
71
72 return EXIT_SUCCESS;
73}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Sum of edge weights for specific vertices:[ 30, -1, 60, 150, -1 ]
Compressed sums:[ 30, 60, 150 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Sum of edge weights for specific vertices:[ 30, -1, 60, 150, -1 ]
Compressed sums:[ 30, 60, 150 ]

◆ reduceVertices() [7/8]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void TNL::Graphs::reduceVertices ( Graph & graph,
IndexBegin begin,
IndexEnd end,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionLambda function for the reduction operation. See Basic Reduction (Without Arguments).
storeLambda function for storing results. See Basic Store (Vertex Index Only).
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Compute maximum edge weight for vertices in range [1, 4).
32 */
34 TNL::Containers::Vector< float, Device > vertexMaxWeights( 5, -1 );
35 auto vertexMaxWeights_view = vertexMaxWeights.getView();
37
39 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
40 {
41 return weight;
42 };
44
46 auto store = [ = ] __cuda_callable__( int vertexIdx, const float& maxWeight ) mutable
47 {
48 vertexMaxWeights_view[ vertexIdx ] = maxWeight;
49 };
51
53 TNL::Graphs::reduceVertices( graph, 1, 4, fetch, TNL::Max{}, store );
55
56 /***
57 * Print results.
58 */
59 std::cout << "Maximum edge weight for vertices 1-3:" << vertexMaxWeights << '\n';
60}
61
62int
63main( int argc, char* argv[] )
64{
65 std::cout << "Running on host:\n";
66 reduceVerticesExample< TNL::Devices::Host >();
67
68#ifdef __CUDACC__
69 std::cout << "Running on CUDA device:\n";
70 reduceVerticesExample< TNL::Devices::Cuda >();
71#endif
72
73 return EXIT_SUCCESS;
74}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]

◆ reduceVertices() [8/8]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void TNL::Graphs::reduceVertices ( Graph & graph,
IndexBegin begin,
IndexEnd end,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes with automatic identity deduction.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionFunction object for reduction operation. See Reduction Function Objects.
storeLambda function for storing results. See Basic Store (Vertex Index Only).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Compute maximum edge weight for vertices in range [1, 4).
32 */
34 TNL::Containers::Vector< float, Device > vertexMaxWeights( 5, -1 );
35 auto vertexMaxWeights_view = vertexMaxWeights.getView();
37
39 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
40 {
41 return weight;
42 };
44
46 auto store = [ = ] __cuda_callable__( int vertexIdx, const float& maxWeight ) mutable
47 {
48 vertexMaxWeights_view[ vertexIdx ] = maxWeight;
49 };
51
53 TNL::Graphs::reduceVertices( graph, 1, 4, fetch, TNL::Max{}, store );
55
56 /***
57 * Print results.
58 */
59 std::cout << "Maximum edge weight for vertices 1-3:" << vertexMaxWeights << '\n';
60}
61
62int
63main( int argc, char* argv[] )
64{
65 std::cout << "Running on host:\n";
66 reduceVerticesExample< TNL::Devices::Host >();
67
68#ifdef __CUDACC__
69 std::cout << "Running on CUDA device:\n";
70 reduceVerticesExample< TNL::Devices::Cuda >();
71#endif
72
73 return EXIT_SUCCESS;
74}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Maximum edge weight for vertices 1-3:[ -1, 50, 60, 80, -1 ]

◆ reduceVerticesIf() [1/4]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue>
Graph::IndexType TNL::Graphs::reduceVerticesIf ( const Graph & graph,
IndexBegin begin,
IndexEnd end,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
conditionLambda function for condition check. See Condition Check.
fetchLambda function for fetching data. See For Const Graphs.
reductionLambda function for reduction operation. See Basic Reduction (Without Arguments).
storeLambda function for storing results. See Basic Store (Vertex Index Only).
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Compute minimum edge weight for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 auto vertexMinWeights_view = vertexMinWeights.getView();
37 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
38
39 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
40 {
41 return graph.getVertexDegree( vertexIdx ) >= 2;
42 };
43
44 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
45 {
46 return weight;
47 };
48
49 auto store = [ = ] __cuda_callable__( int indexOfVertexIdx, int vertexIdx, const float& minWeight ) mutable
50 {
51 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
52 vertexMinWeights_view[ vertexIdx ] = minWeight;
53 };
54
55 int reducedVertexCount = TNL::Graphs::reduceVerticesIf( graph, 1, 4, condition, fetch, TNL::Min{}, store );
57
58 /***
59 * Print results.
60 */
61 std::cout << "Number of reduced vertices: " << reducedVertexCount << '\n';
62 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
63 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, reducedVertexCount ) << '\n';
64}
65
66int
67main( int argc, char* argv[] )
68{
69 std::cout << "Running on host:\n";
70 reduceVerticesIfExample< TNL::Devices::Host >();
71
72#ifdef __CUDACC__
73 std::cout << "Running on CUDA device:\n";
74 reduceVerticesIfExample< TNL::Devices::Cuda >();
75#endif
76
77 return EXIT_SUCCESS;
78}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]

◆ reduceVerticesIf() [2/4]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType TNL::Graphs::reduceVerticesIf ( const Graph & graph,
IndexBegin begin,
IndexEnd end,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition with automatic identity deduction (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
conditionLambda function for condition check. See Condition Check.
fetchLambda function for fetching data. See For Const Graphs.
reductionFunction object for reduction operation. See Reduction Function Objects.
storeLambda function for storing results. See Basic Store (Vertex Index Only).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Compute minimum edge weight for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 auto vertexMinWeights_view = vertexMinWeights.getView();
37 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
38
39 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
40 {
41 return graph.getVertexDegree( vertexIdx ) >= 2;
42 };
43
44 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
45 {
46 return weight;
47 };
48
49 auto store = [ = ] __cuda_callable__( int indexOfVertexIdx, int vertexIdx, const float& minWeight ) mutable
50 {
51 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
52 vertexMinWeights_view[ vertexIdx ] = minWeight;
53 };
54
55 int reducedVertexCount = TNL::Graphs::reduceVerticesIf( graph, 1, 4, condition, fetch, TNL::Min{}, store );
57
58 /***
59 * Print results.
60 */
61 std::cout << "Number of reduced vertices: " << reducedVertexCount << '\n';
62 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
63 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, reducedVertexCount ) << '\n';
64}
65
66int
67main( int argc, char* argv[] )
68{
69 std::cout << "Running on host:\n";
70 reduceVerticesIfExample< TNL::Devices::Host >();
71
72#ifdef __CUDACC__
73 std::cout << "Running on CUDA device:\n";
74 reduceVerticesIfExample< TNL::Devices::Cuda >();
75#endif
76
77 return EXIT_SUCCESS;
78}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]

◆ reduceVerticesIf() [3/4]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue>
Graph::IndexType TNL::Graphs::reduceVerticesIf ( Graph & graph,
IndexBegin begin,
IndexEnd end,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
conditionLambda function for condition check. See Condition Check.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionLambda function for reduction operation. See Basic Reduction (Without Arguments).
storeLambda function for storing results. See Basic Store (Vertex Index Only).
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Compute minimum edge weight for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 auto vertexMinWeights_view = vertexMinWeights.getView();
37 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
38
39 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
40 {
41 return graph.getVertexDegree( vertexIdx ) >= 2;
42 };
43
44 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
45 {
46 return weight;
47 };
48
49 auto store = [ = ] __cuda_callable__( int indexOfVertexIdx, int vertexIdx, const float& minWeight ) mutable
50 {
51 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
52 vertexMinWeights_view[ vertexIdx ] = minWeight;
53 };
54
55 int reducedVertexCount = TNL::Graphs::reduceVerticesIf( graph, 1, 4, condition, fetch, TNL::Min{}, store );
57
58 /***
59 * Print results.
60 */
61 std::cout << "Number of reduced vertices: " << reducedVertexCount << '\n';
62 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
63 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, reducedVertexCount ) << '\n';
64}
65
66int
67main( int argc, char* argv[] )
68{
69 std::cout << "Running on host:\n";
70 reduceVerticesIfExample< TNL::Devices::Host >();
71
72#ifdef __CUDACC__
73 std::cout << "Running on CUDA device:\n";
74 reduceVerticesIfExample< TNL::Devices::Cuda >();
75#endif
76
77 return EXIT_SUCCESS;
78}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]

◆ reduceVerticesIf() [4/4]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType TNL::Graphs::reduceVerticesIf ( Graph & graph,
IndexBegin begin,
IndexEnd end,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition with automatic identity deduction.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
conditionLambda function for condition check. See Condition Check.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionFunction object for reduction operation. See Reduction Function Objects.
storeLambda function for storing results. See Basic Store (Vertex Index Only).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Compute minimum edge weight for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 auto vertexMinWeights_view = vertexMinWeights.getView();
37 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
38
39 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
40 {
41 return graph.getVertexDegree( vertexIdx ) >= 2;
42 };
43
44 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
45 {
46 return weight;
47 };
48
49 auto store = [ = ] __cuda_callable__( int indexOfVertexIdx, int vertexIdx, const float& minWeight ) mutable
50 {
51 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
52 vertexMinWeights_view[ vertexIdx ] = minWeight;
53 };
54
55 int reducedVertexCount = TNL::Graphs::reduceVerticesIf( graph, 1, 4, condition, fetch, TNL::Min{}, store );
57
58 /***
59 * Print results.
60 */
61 std::cout << "Number of reduced vertices: " << reducedVertexCount << '\n';
62 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
63 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, reducedVertexCount ) << '\n';
64}
65
66int
67main( int argc, char* argv[] )
68{
69 std::cout << "Running on host:\n";
70 reduceVerticesIfExample< TNL::Devices::Host >();
71
72#ifdef __CUDACC__
73 std::cout << "Running on CUDA device:\n";
74 reduceVerticesIfExample< TNL::Devices::Cuda >();
75#endif
76
77 return EXIT_SUCCESS;
78}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of reduced vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Compressed minimum weights:[ 30, 70 ]

◆ reduceVerticesWithArgument() [1/8]

template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void TNL::Graphs::reduceVerticesWithArgument ( const Graph & graph,
const Array & vertexIndexes,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within graph vertices specified by a given set of vertex indexes while returning also the position of the edge of interest (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the vertices to iterate over.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
vertexIndexesThe array containing the indexes of the vertices to iterate over.
fetchLambda function for fetching data. See For Const Graphs.
reductionLambda function for reduction with argument tracking. See Reduction With Argument (Position Tracking).
storeLambda function for storing results with position tracking. See Store With Vertex Index Array and With Argument.
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Find minimum edge weight and target vertex for vertices in range [1, 4).
32 */
36 auto minWeights_view = minWeights.getView();
37 auto minTargets_view = minTargets.getView();
39
41 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
42 {
43 return weight;
44 };
46
48 auto store = [ = ] __cuda_callable__( int vertexIdx, int localIdx, int targetIdx, float result, bool isolatedVertex ) mutable
49 {
50 minWeights_view[ vertexIdx ] = result;
51 if( ! isolatedVertex )
52 minTargets_view[ vertexIdx ] = targetIdx;
53 };
55
57 TNL::Graphs::reduceVerticesWithArgument( graph, 1, 5, fetch, TNL::MinWithArg{}, store );
59
60 /***
61 * Print results.
62 */
63 std::cout << "Minimum edge weight for vertices 1-4:" << minWeights << '\n';
64 std::cout << "Target vertex for minimum edge:" << minTargets << '\n';
65}
66
67int
68main( int argc, char* argv[] )
69{
70 std::cout << "Running on host:\n";
71 reduceVerticesWithArgumentExample< TNL::Devices::Host >();
72
73#ifdef __CUDACC__
74 std::cout << "Running on CUDA device:\n";
75 reduceVerticesWithArgumentExample< TNL::Devices::Cuda >();
76#endif
77
78 return EXIT_SUCCESS;
79}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]

◆ reduceVerticesWithArgument() [2/8]

template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void TNL::Graphs::reduceVerticesWithArgument ( const Graph & graph,
const Array & vertexIndexes,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within graph vertices specified by a given set of vertex indexes while returning also the position of the edge of interest with automatic identity deduction (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the vertices to iterate over.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
vertexIndexesThe array containing the indexes of the vertices to iterate over.
fetchLambda function for fetching data. See For Const Graphs.
reductionFunction object for reduction with argument tracking. See Reduction Function Objects.
storeLambda function for storing results with position tracking. See Store With Vertex Index Array and With Argument.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Find minimum edge weight and target vertex for vertices in range [1, 4).
32 */
36 auto minWeights_view = minWeights.getView();
37 auto minTargets_view = minTargets.getView();
39
41 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
42 {
43 return weight;
44 };
46
48 auto store = [ = ] __cuda_callable__( int vertexIdx, int localIdx, int targetIdx, float result, bool isolatedVertex ) mutable
49 {
50 minWeights_view[ vertexIdx ] = result;
51 if( ! isolatedVertex )
52 minTargets_view[ vertexIdx ] = targetIdx;
53 };
55
57 TNL::Graphs::reduceVerticesWithArgument( graph, 1, 5, fetch, TNL::MinWithArg{}, store );
59
60 /***
61 * Print results.
62 */
63 std::cout << "Minimum edge weight for vertices 1-4:" << minWeights << '\n';
64 std::cout << "Target vertex for minimum edge:" << minTargets << '\n';
65}
66
67int
68main( int argc, char* argv[] )
69{
70 std::cout << "Running on host:\n";
71 reduceVerticesWithArgumentExample< TNL::Devices::Host >();
72
73#ifdef __CUDACC__
74 std::cout << "Running on CUDA device:\n";
75 reduceVerticesWithArgumentExample< TNL::Devices::Cuda >();
76#endif
77
78 return EXIT_SUCCESS;
79}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]

◆ reduceVerticesWithArgument() [3/8]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void TNL::Graphs::reduceVerticesWithArgument ( const Graph & graph,
IndexBegin begin,
IndexEnd end,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes while returning also the position of the edge of interest (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
fetchLambda function for fetching data. See For Const Graphs.
reductionLambda function for reduction operation with argument. See Reduction With Argument (Position Tracking).
storeLambda function for storing results. See Store With Argument (Position Tracking).
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.

◆ reduceVerticesWithArgument() [4/8]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void TNL::Graphs::reduceVerticesWithArgument ( const Graph & graph,
IndexBegin begin,
IndexEnd end,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes while returning also the position of the edge of interest with automatic identity deduction (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
fetchLambda function for fetching data. See For Const Graphs.
reductionFunction object for reduction operation with argument. See Reduction Function Objects.
storeLambda function for storing results. See Store With Argument (Position Tracking).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.

◆ reduceVerticesWithArgument() [5/8]

template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void TNL::Graphs::reduceVerticesWithArgument ( Graph & graph,
const Array & vertexIndexes,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within graph vertices specified by a given set of vertex indexes while returning also the position of the edge of interest.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the vertices to iterate over.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
vertexIndexesThe array containing the indexes of the vertices to iterate over.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionLambda function for reduction with argument tracking. See Reduction With Argument (Position Tracking).
storeLambda function for storing results with position tracking. See Store With Vertex Index Array and With Argument.
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Find minimum edge weight and target vertex for vertices in range [1, 4).
32 */
36 auto minWeights_view = minWeights.getView();
37 auto minTargets_view = minTargets.getView();
39
41 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
42 {
43 return weight;
44 };
46
48 auto store = [ = ] __cuda_callable__( int vertexIdx, int localIdx, int targetIdx, float result, bool isolatedVertex ) mutable
49 {
50 minWeights_view[ vertexIdx ] = result;
51 if( ! isolatedVertex )
52 minTargets_view[ vertexIdx ] = targetIdx;
53 };
55
57 TNL::Graphs::reduceVerticesWithArgument( graph, 1, 5, fetch, TNL::MinWithArg{}, store );
59
60 /***
61 * Print results.
62 */
63 std::cout << "Minimum edge weight for vertices 1-4:" << minWeights << '\n';
64 std::cout << "Target vertex for minimum edge:" << minTargets << '\n';
65}
66
67int
68main( int argc, char* argv[] )
69{
70 std::cout << "Running on host:\n";
71 reduceVerticesWithArgumentExample< TNL::Devices::Host >();
72
73#ifdef __CUDACC__
74 std::cout << "Running on CUDA device:\n";
75 reduceVerticesWithArgumentExample< TNL::Devices::Cuda >();
76#endif
77
78 return EXIT_SUCCESS;
79}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]

◆ reduceVerticesWithArgument() [6/8]

template<typename Graph, typename Array, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< IsArrayType< Array >::value >>
void TNL::Graphs::reduceVerticesWithArgument ( Graph & graph,
const Array & vertexIndexes,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within graph vertices specified by a given set of vertex indexes while returning also the position of the edge of interest with automatic identity deduction.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
ArrayThe type of the array containing the indexes of the vertices to iterate over.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
vertexIndexesThe array containing the indexes of the vertices to iterate over.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionFunction object for reduction with argument tracking. See Reduction Function Objects.
storeLambda function for storing results with position tracking. See Store With Vertex Index Array and With Argument.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
30 /***
31 * Find minimum edge weight and target vertex for vertices in range [1, 4).
32 */
36 auto minWeights_view = minWeights.getView();
37 auto minTargets_view = minTargets.getView();
39
41 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
42 {
43 return weight;
44 };
46
48 auto store = [ = ] __cuda_callable__( int vertexIdx, int localIdx, int targetIdx, float result, bool isolatedVertex ) mutable
49 {
50 minWeights_view[ vertexIdx ] = result;
51 if( ! isolatedVertex )
52 minTargets_view[ vertexIdx ] = targetIdx;
53 };
55
57 TNL::Graphs::reduceVerticesWithArgument( graph, 1, 5, fetch, TNL::MinWithArg{}, store );
59
60 /***
61 * Print results.
62 */
63 std::cout << "Minimum edge weight for vertices 1-4:" << minWeights << '\n';
64 std::cout << "Target vertex for minimum edge:" << minTargets << '\n';
65}
66
67int
68main( int argc, char* argv[] )
69{
70 std::cout << "Running on host:\n";
71 reduceVerticesWithArgumentExample< TNL::Devices::Host >();
72
73#ifdef __CUDACC__
74 std::cout << "Running on CUDA device:\n";
75 reduceVerticesWithArgumentExample< TNL::Devices::Cuda >();
76#endif
77
78 return EXIT_SUCCESS;
79}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Minimum edge weight for vertices 1-4:[ -1, 30, 60, 70, 3.40282e+38 ]
Target vertex for minimum edge:[ -1, 2, 3, 0, -1 ]

◆ reduceVerticesWithArgument() [7/8]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename FetchValue, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void TNL::Graphs::reduceVerticesWithArgument ( Graph & graph,
IndexBegin begin,
IndexEnd end,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes while returning also the position of the edge of interest.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionLambda function for reduction operation with argument. See Reduction With Argument (Position Tracking).
storeLambda function for storing results. See Store With Argument (Position Tracking).
identityThe initial value for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.

◆ reduceVerticesWithArgument() [8/8]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Fetch, typename Reduction, typename Store, typename T = typename std::enable_if_t< std::is_integral_v< IndexBegin > && std::is_integral_v< IndexEnd > >>
void TNL::Graphs::reduceVerticesWithArgument ( Graph & graph,
IndexBegin begin,
IndexEnd end,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes while returning also the position of the edge of interest with automatic identity deduction.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionFunction object for reduction operation with argument. See Reduction Function Objects.
storeLambda function for storing results. See Store With Argument (Position Tracking).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.

◆ reduceVerticesWithArgumentIf() [1/4]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue = decltype( std::declval< Fetch >()( 0, 0, std::declval< typename Graph::RealType >() ) )>
Graph::IndexType TNL::Graphs::reduceVerticesWithArgumentIf ( const Graph & graph,
IndexBegin begin,
IndexEnd end,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition while returning also the position of the edge of interest (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
conditionLambda function for vertex condition checking. See Condition Check.
fetchLambda function for fetching data. See For Const Graphs.
reductionFunction object for reduction with argument tracking. See Reduction Function Objects.
storeLambda function for storing results with position tracking. See Store With Argument (Position Tracking).
identityThe identity edge for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Find minimum edge weight and target vertex for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 TNL::Containers::Vector< int, Device > vertexMinTargets( 5, -1 );
37 TNL::Containers::Vector< int, Device > compressedVertexMinTargets( 5 );
38 auto vertexMinWeights_view = vertexMinWeights.getView();
39 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
40 auto vertexMinTargets_view = vertexMinTargets.getView();
41 auto compressedVertexMinTargets_view = compressedVertexMinTargets.getView();
42
43 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
44 {
45 return graph.getVertexDegree( vertexIdx ) >= 2;
46 };
47
48 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
49 {
50 return weight;
51 };
52
53 auto store =
55 int indexOfVertexIdx, int vertexIdx, int localIdx, int targetIdx, float minWeight, bool isolatedVertex ) mutable
56 {
57 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
58 vertexMinWeights_view[ vertexIdx ] = minWeight;
59 if( ! isolatedVertex ) {
60 compressedVertexMinTargets_view[ indexOfVertexIdx ] = targetIdx;
61 vertexMinTargets_view[ vertexIdx ] = targetIdx;
62 }
63 };
64
65 int traversedVertexCount =
66 TNL::Graphs::reduceVerticesWithArgumentIf( graph, 1, 4, condition, fetch, TNL::MinWithArg{}, store );
68
69 /***
70 * Print results.
71 */
72 std::cout << "Number of traversed vertices: " << traversedVertexCount << '\n';
73 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
74 std::cout << "Target vertex for minimum edge:" << vertexMinTargets << '\n';
75 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, traversedVertexCount ) << '\n';
76 std::cout << "Compressed minimum targets:" << compressedVertexMinTargets.getView( 0, traversedVertexCount ) << '\n';
77}
78
79int
80main( int argc, char* argv[] )
81{
82 std::cout << "Running on host:\n";
83 reduceVerticesWithArgumentIfExample< TNL::Devices::Host >();
84
85#ifdef __CUDACC__
86 std::cout << "Running on CUDA device:\n";
87 reduceVerticesWithArgumentIfExample< TNL::Devices::Cuda >();
88#endif
89
90 return EXIT_SUCCESS;
91}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]

◆ reduceVerticesWithArgumentIf() [2/4]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType TNL::Graphs::reduceVerticesWithArgumentIf ( const Graph & graph,
IndexBegin begin,
IndexEnd end,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition while returning also the position of the edge of interest with automatic identity deduction (const version).

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
conditionLambda function for vertex condition checking. See Condition Check.
fetchLambda function for fetching data. See For Const Graphs.
reductionFunction object for reduction with argument tracking. See Reduction Function Objects.
storeLambda function for storing results with position tracking. See Store With Argument (Position Tracking).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Find minimum edge weight and target vertex for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 TNL::Containers::Vector< int, Device > vertexMinTargets( 5, -1 );
37 TNL::Containers::Vector< int, Device > compressedVertexMinTargets( 5 );
38 auto vertexMinWeights_view = vertexMinWeights.getView();
39 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
40 auto vertexMinTargets_view = vertexMinTargets.getView();
41 auto compressedVertexMinTargets_view = compressedVertexMinTargets.getView();
42
43 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
44 {
45 return graph.getVertexDegree( vertexIdx ) >= 2;
46 };
47
48 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
49 {
50 return weight;
51 };
52
53 auto store =
55 int indexOfVertexIdx, int vertexIdx, int localIdx, int targetIdx, float minWeight, bool isolatedVertex ) mutable
56 {
57 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
58 vertexMinWeights_view[ vertexIdx ] = minWeight;
59 if( ! isolatedVertex ) {
60 compressedVertexMinTargets_view[ indexOfVertexIdx ] = targetIdx;
61 vertexMinTargets_view[ vertexIdx ] = targetIdx;
62 }
63 };
64
65 int traversedVertexCount =
66 TNL::Graphs::reduceVerticesWithArgumentIf( graph, 1, 4, condition, fetch, TNL::MinWithArg{}, store );
68
69 /***
70 * Print results.
71 */
72 std::cout << "Number of traversed vertices: " << traversedVertexCount << '\n';
73 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
74 std::cout << "Target vertex for minimum edge:" << vertexMinTargets << '\n';
75 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, traversedVertexCount ) << '\n';
76 std::cout << "Compressed minimum targets:" << compressedVertexMinTargets.getView( 0, traversedVertexCount ) << '\n';
77}
78
79int
80main( int argc, char* argv[] )
81{
82 std::cout << "Running on host:\n";
83 reduceVerticesWithArgumentIfExample< TNL::Devices::Host >();
84
85#ifdef __CUDACC__
86 std::cout << "Running on CUDA device:\n";
87 reduceVerticesWithArgumentIfExample< TNL::Devices::Cuda >();
88#endif
89
90 return EXIT_SUCCESS;
91}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]

◆ reduceVerticesWithArgumentIf() [3/4]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store, typename FetchValue = decltype( std::declval< Fetch >()( 0, 0, std::declval< typename Graph::RealType >() ) )>
Graph::IndexType TNL::Graphs::reduceVerticesWithArgumentIf ( Graph & graph,
IndexBegin begin,
IndexEnd end,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
const FetchValue & identity,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition while returning also the position of the edge of interest.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
FetchValueThe type returned by the Fetch lambda function.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
conditionLambda function for vertex condition checking. See Condition Check.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionFunction object for reduction with argument tracking. See Reduction Function Objects.
storeLambda function for storing results with position tracking. See Store With Argument (Position Tracking).
identityThe identity edge for the reduction operation.
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Find minimum edge weight and target vertex for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 TNL::Containers::Vector< int, Device > vertexMinTargets( 5, -1 );
37 TNL::Containers::Vector< int, Device > compressedVertexMinTargets( 5 );
38 auto vertexMinWeights_view = vertexMinWeights.getView();
39 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
40 auto vertexMinTargets_view = vertexMinTargets.getView();
41 auto compressedVertexMinTargets_view = compressedVertexMinTargets.getView();
42
43 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
44 {
45 return graph.getVertexDegree( vertexIdx ) >= 2;
46 };
47
48 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
49 {
50 return weight;
51 };
52
53 auto store =
55 int indexOfVertexIdx, int vertexIdx, int localIdx, int targetIdx, float minWeight, bool isolatedVertex ) mutable
56 {
57 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
58 vertexMinWeights_view[ vertexIdx ] = minWeight;
59 if( ! isolatedVertex ) {
60 compressedVertexMinTargets_view[ indexOfVertexIdx ] = targetIdx;
61 vertexMinTargets_view[ vertexIdx ] = targetIdx;
62 }
63 };
64
65 int traversedVertexCount =
66 TNL::Graphs::reduceVerticesWithArgumentIf( graph, 1, 4, condition, fetch, TNL::MinWithArg{}, store );
68
69 /***
70 * Print results.
71 */
72 std::cout << "Number of traversed vertices: " << traversedVertexCount << '\n';
73 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
74 std::cout << "Target vertex for minimum edge:" << vertexMinTargets << '\n';
75 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, traversedVertexCount ) << '\n';
76 std::cout << "Compressed minimum targets:" << compressedVertexMinTargets.getView( 0, traversedVertexCount ) << '\n';
77}
78
79int
80main( int argc, char* argv[] )
81{
82 std::cout << "Running on host:\n";
83 reduceVerticesWithArgumentIfExample< TNL::Devices::Host >();
84
85#ifdef __CUDACC__
86 std::cout << "Running on CUDA device:\n";
87 reduceVerticesWithArgumentIfExample< TNL::Devices::Cuda >();
88#endif
89
90 return EXIT_SUCCESS;
91}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]

◆ reduceVerticesWithArgumentIf() [4/4]

template<typename Graph, typename IndexBegin, typename IndexEnd, typename Condition, typename Fetch, typename Reduction, typename Store>
Graph::IndexType TNL::Graphs::reduceVerticesWithArgumentIf ( Graph & graph,
IndexBegin begin,
IndexEnd end,
Condition && condition,
Fetch && fetch,
Reduction && reduction,
Store && store,
TNL::Algorithms::Segments::LaunchConfiguration launchConfig = TNL::Algorithms::Segments::LaunchConfiguration() )

Performs parallel reduction within each graph vertex over a given range of vertex indexes based on a condition while returning also the position of the edge of interest with automatic identity deduction.

See also: Overview of Graph Reduction Functions

Template Parameters
GraphThe type of the graph.
IndexBeginThe type of the index defining the beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
IndexEndThe type of the index defining the end of the interval [ begin, end ) of vertices where the reduction will be performed.
ConditionThe type of the lambda function used for the condition check.
FetchThe type of the lambda function used for data fetching.
ReductionThe type of the function object defining the reduction operation.
StoreThe type of the lambda function used for storing results from individual vertices.
Parameters
graphThe graph on which the reduction will be performed.
beginThe beginning of the interval [ begin, end ) of vertices where the reduction will be performed.
endThe end of the interval [ begin, end ) of vertices where the reduction will be performed.
conditionLambda function for vertex condition checking. See Condition Check.
fetchLambda function for fetching data. See For Non-Const Graphs.
reductionFunction object for reduction with argument tracking. See Reduction Function Objects.
storeLambda function for storing results with position tracking. See Store With Argument (Position Tracking).
launchConfigThe configuration of the launch - see TNL::Algorithms::Segments::LaunchConfiguration.
Returns
The number of processed vertices, i.e. vertices for which the condition was true.
Example
1#include <iostream>
2#include <TNL/Graphs/Graph.h>
3#include <TNL/Graphs/reduce.h>
4#include <TNL/Devices/Host.h>
5#include <TNL/Devices/Cuda.h>
6#include <TNL/Containers/Vector.h>
7
8template< typename Device >
9void
10reduceVerticesWithArgumentIfExample()
11{
12 /***
13 * Create a directed graph with 5 vertices.
14 */
16 // clang-format off
17 GraphType graph( 5, // number of vertices
18 { // definition of edges with weights
19 { 0, 1, 10.0 }, { 0, 2, 20.0 },
20 { 1, 2, 30.0 }, { 1, 3, 40.0 }, { 1, 4, 50.0 },
21 { 2, 3, 60.0 },
22 { 3, 0, 70.0 }, { 3, 4, 80.0 } } );
23 // clang-format on
24
25 /***
26 * Print the graph.
27 */
28 std::cout << "Graph:\n" << graph << '\n';
29
31 /***
32 * Find minimum edge weight and target vertex for vertices in range [1, 4) with degree >= 2.
33 */
34 TNL::Containers::Vector< float, Device > vertexMinWeights( 5, -1 );
35 TNL::Containers::Vector< float, Device > compressedVertexMinWeights( 5 );
36 TNL::Containers::Vector< int, Device > vertexMinTargets( 5, -1 );
37 TNL::Containers::Vector< int, Device > compressedVertexMinTargets( 5 );
38 auto vertexMinWeights_view = vertexMinWeights.getView();
39 auto compressedVertexMinWeights_view = compressedVertexMinWeights.getView();
40 auto vertexMinTargets_view = vertexMinTargets.getView();
41 auto compressedVertexMinTargets_view = compressedVertexMinTargets.getView();
42
43 auto condition = [ = ] __cuda_callable__( int vertexIdx ) -> bool
44 {
45 return graph.getVertexDegree( vertexIdx ) >= 2;
46 };
47
48 auto fetch = [] __cuda_callable__( int sourceIdx, int targetIdx, const float& weight ) -> float
49 {
50 return weight;
51 };
52
53 auto store =
55 int indexOfVertexIdx, int vertexIdx, int localIdx, int targetIdx, float minWeight, bool isolatedVertex ) mutable
56 {
57 compressedVertexMinWeights_view[ indexOfVertexIdx ] = minWeight;
58 vertexMinWeights_view[ vertexIdx ] = minWeight;
59 if( ! isolatedVertex ) {
60 compressedVertexMinTargets_view[ indexOfVertexIdx ] = targetIdx;
61 vertexMinTargets_view[ vertexIdx ] = targetIdx;
62 }
63 };
64
65 int traversedVertexCount =
66 TNL::Graphs::reduceVerticesWithArgumentIf( graph, 1, 4, condition, fetch, TNL::MinWithArg{}, store );
68
69 /***
70 * Print results.
71 */
72 std::cout << "Number of traversed vertices: " << traversedVertexCount << '\n';
73 std::cout << "Minimum edge weight for vertices 1-3 with degree >= 2:" << vertexMinWeights << '\n';
74 std::cout << "Target vertex for minimum edge:" << vertexMinTargets << '\n';
75 std::cout << "Compressed minimum weights:" << compressedVertexMinWeights.getView( 0, traversedVertexCount ) << '\n';
76 std::cout << "Compressed minimum targets:" << compressedVertexMinTargets.getView( 0, traversedVertexCount ) << '\n';
77}
78
79int
80main( int argc, char* argv[] )
81{
82 std::cout << "Running on host:\n";
83 reduceVerticesWithArgumentIfExample< TNL::Devices::Host >();
84
85#ifdef __CUDACC__
86 std::cout << "Running on CUDA device:\n";
87 reduceVerticesWithArgumentIfExample< TNL::Devices::Cuda >();
88#endif
89
90 return EXIT_SUCCESS;
91}
Output
Running on host:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]
Running on CUDA device:
Graph:
Row: 0 -> 1:10 2:20
Row: 1 -> 2:30 3:40 4:50
Row: 2 -> 3:60
Row: 3 -> 0:70 4:80
Row: 4 ->
Number of traversed vertices: 2
Minimum edge weight for vertices 1-3 with degree >= 2:[ -1, 30, -1, 70, -1 ]
Target vertex for minimum edge:[ -1, 2, -1, 0, -1 ]
Compressed minimum weights:[ 30, 70 ]
Compressed minimum targets:[ 2, 0 ]