Commit 69b63dd2 authored by pmla's avatar pmla

further memory usage reductions

parent 01b14f30
Pipeline #24560416 failed with stage
......@@ -502,7 +502,6 @@ bool GrainSegmentationEngine::modularitySegmentation()
std::vector<size_t> vIndex(_numSuperclusters, 0);
std::vector<size_t> atomCumulative(_numSuperclusters);
std::vector<size_t> particleToVertexId(numAtoms);
std::vector<size_t> vertexToParticleId(numAtoms);
std::vector<double> orderedRMSD(numAtoms);
atomCumulative[0] = 0;
std::partial_sum(_superclusterSizes.begin(), _superclusterSizes.end() - 1, atomCumulative.begin() + 1);
......@@ -511,9 +510,7 @@ bool GrainSegmentationEngine::modularitySegmentation()
size_t scIndex = _atomSuperclusters[particleIndex];
size_t numVertices = vIndex[scIndex];
particleToVertexId[particleIndex] = numVertices;
vertexToParticleId[atomCumulative[scIndex] + numVertices] = particleIndex;
orderedRMSD[atomCumulative[scIndex] + numVertices] = _results->rmsd()->getFloat(particleIndex);
vIndex[scIndex]++;
}
......@@ -553,6 +550,9 @@ bool GrainSegmentationEngine::modularitySegmentation()
scOffset[scIndex] += numEdges[offset];
}
for (size_t scIndex=0;scIndex<_numSuperclusters;scIndex++)
scOffset[scIndex] = edgeCumulative[atomCumulative[scIndex]];
// Place edges into contiguous adjacency list
std::fill(numEdges.begin(), numEdges.end(), 0);
......@@ -585,29 +585,32 @@ bool GrainSegmentationEngine::modularitySegmentation()
adjweight[c2] = weight;
}
//totalWeight *= 8;
edgeCumulative.clear();
std::vector<size_t> community(numAtoms);
clock_t startTime = clock();
if(!parallelFor(_numSuperclusters, *this, [this, &atomCumulative, &vIndex, &particleToVertexId, &vertexToParticleId, &numEdges, &numSuperclusterEdges, &adjlist, &edgeCumulative, &adjweight, numAtoms, totalWeight, &orderedRMSD](size_t scIndex) {
if(!parallelFor(_numSuperclusters, *this, [this, &atomCumulative, &vIndex, &particleToVertexId, &numEdges, &numSuperclusterEdges, &adjlist, &scOffset, &adjweight, numAtoms, totalWeight, &orderedRMSD, &community](size_t scIndex) {
// Nothing to do for supercluster 0.
if(scIndex == 0) return;
printf("refining supercluster: %lu\n", scIndex);
size_t numVertices = vIndex[scIndex];
std::vector<size_t> community(numVertices);
assign_communities_halfedge( numVertices, numEdges.begin() + atomCumulative[scIndex], orderedRMSD.begin() + atomCumulative[scIndex],
numSuperclusterEdges[scIndex],
adjlist.begin() + edgeCumulative[atomCumulative[scIndex]],
adjweight.begin() + edgeCumulative[atomCumulative[scIndex]],
totalWeight, community);
for(size_t i = 0; i < numVertices; i++) {
int clusterId = community[i] + atomCumulative[scIndex] + 1;
size_t particleIndex = vertexToParticleId[atomCumulative[scIndex] + i];
_results->atomClusters()->setInt64(particleIndex, clusterId);
std::vector<size_t>::iterator cumulative_degrees = numEdges.begin() + atomCumulative[scIndex];
size_t c = 0;
for (size_t i=0;i<numVertices;i++) {
c += cumulative_degrees[i];
cumulative_degrees[i] = c;
}
assign_communities_halfedge( numVertices, cumulative_degrees, orderedRMSD.begin() + atomCumulative[scIndex],
numSuperclusterEdges[scIndex],
adjlist.begin() + scOffset[scIndex],
adjweight.begin() + scOffset[scIndex],
totalWeight, community.begin() + atomCumulative[scIndex]);
})) {
return false;
}
......@@ -617,6 +620,18 @@ clock_t clockTicksTaken = endTime - startTime;
double timeInSeconds = clockTicksTaken / (double) CLOCKS_PER_SEC;
printf("time taken: %f\n", timeInSeconds);
for(size_t particleIndex = 0; particleIndex < numAtoms; particleIndex++) {
size_t scIndex = _atomSuperclusters[particleIndex];
if (scIndex == 0) continue;
size_t index = atomCumulative[scIndex] + particleToVertexId[particleIndex];
int clusterId = community[index] + atomCumulative[scIndex] + 1;
_results->atomClusters()->setInt64(particleIndex, clusterId);
}
_clusterSizes.resize(numAtoms + 1, 0);
for(size_t particleIndex = 0; particleIndex < numAtoms; particleIndex++) {
qlonglong c = _results->atomClusters()->getInt64(particleIndex);
......
......@@ -43,12 +43,13 @@ class Graph {
public:
size_t num_nodes;
size_t num_edges;
std::vector<size_t> node_size;
std::vector<size_t> degrees;
std::vector<size_t> node_size;
std::vector<size_t> edges;
std::vector<double> weights;
std::vector<double> rmsds;
std::vector<size_t>::iterator degree_iterator;
std::vector<size_t>::iterator edge_iterator;
std::vector<double>::iterator weight_iterator;
std::vector<double>::iterator rmsd_iterator;
......@@ -56,6 +57,7 @@ public:
Graph(size_t _num_nodes, std::vector<size_t>& comm_size, std::vector<double>& comm_rmsd) {
num_nodes = _num_nodes;
degrees.resize(_num_nodes);
degree_iterator = degrees.begin();
for (auto c: comm_size)
node_size.push_back(c);
......@@ -74,23 +76,19 @@ public:
edge_iterator = adjlist;
weight_iterator = adjweight;
rmsd_iterator = _rmsds;
size_t c = 0;
for (size_t i=0;i<num_nodes;i++) {
c += adjdegrees[i];
degrees.push_back(c);
}
node_size.assign(num_nodes, 1);
degree_iterator = adjdegrees;
degrees.resize(1);
}
size_t num_neighbors(size_t node) {
assert(node >= 0 && node < num_nodes);
if (node == 0)
return degrees[0];
return degree_iterator[0];
else
return degrees[node] - degrees[node - 1];
return degree_iterator[node] - degree_iterator[node - 1];
}
double num_selfloops(size_t node) {
......@@ -121,7 +119,7 @@ public:
if (node == 0)
return make_pair(edge_iterator, weight_iterator);
else
return make_pair(edge_iterator + degrees[node - 1], weight_iterator + degrees[node - 1]);
return make_pair(edge_iterator + degree_iterator[node - 1], weight_iterator + degree_iterator[node - 1]);
}
};
......@@ -189,23 +187,6 @@ static void neigh_comm(Graph* g, size_t node, size_t& neigh_last, std::vector<si
}
}
#if 0
static std::vector<size_t> random_order(std::mt19937_64& mt_rand, size_t n)
{
std::vector<size_t> node_order(n);
std::iota(node_order.begin(), node_order.end(), 0);
for (size_t i=0;i<n-1;i++) {
size_t rand_pos = mt_rand() % (n - i) + i;
size_t tmp = node_order[i];
node_order[i] = node_order[rand_pos];
node_order[rand_pos] = tmp;
}
return node_order;
}
#endif
static std::vector<std::pair<size_t, double>> rmsd_order(size_t num_nodes, std::vector<double>::iterator rmsd_iterator, std::vector<size_t> node_size)
{
std::vector<std::pair<size_t, double>> node_order(num_nodes);
......@@ -218,7 +199,7 @@ static std::vector<std::pair<size_t, double>> rmsd_order(size_t num_nodes, std::
return node_order;
}
static Graph one_level(std::mt19937_64& mt_rand, Graph* g, std::vector< size_t > &prev_n2c, double total_weight, bool& improvement) {
static Graph one_level(Graph* g, size_t num_vertices, std::vector< size_t >::iterator prev_n2c, double total_weight, bool& improvement) {
size_t num_nodes = g->num_nodes;
std::vector<size_t> n2c(num_nodes); // community to which each node belongs
......@@ -239,7 +220,6 @@ static Graph one_level(std::mt19937_64& mt_rand, Graph* g, std::vector< size_t >
size_t nb_moves = 0;
double new_qual = quality(total_weight, g, in, tot);
double cur_qual = new_qual;
//std::vector<size_t> node_order = random_order(mt_rand, num_nodes);
std::vector<std::pair<size_t, double>> node_order = rmsd_order(num_nodes, g->rmsd_iterator, g->node_size);
// repeat while
......@@ -305,7 +285,7 @@ static Graph one_level(std::mt19937_64& mt_rand, Graph* g, std::vector< size_t >
for (size_t i=0;i<num_nodes;i++)
part[i] = renumber[n2c[i]];
for (size_t i=0;i<prev_n2c.size();i++)
for (size_t i=0;i<num_vertices;i++)
prev_n2c[i] = part[prev_n2c[i]];
//
......@@ -363,11 +343,8 @@ static Graph one_level(std::mt19937_64& mt_rand, Graph* g, std::vector< size_t >
}
size_t assign_communities_halfedge( size_t num_vertices, std::vector<size_t>::iterator adjdegrees, std::vector<double>::iterator rmsds,
size_t num_edges, std::vector<size_t>::iterator adjlist, std::vector<double>::iterator adjweight, double total_weight, std::vector< size_t > &n2c)
size_t num_edges, std::vector<size_t>::iterator adjlist, std::vector<double>::iterator adjweight, double total_weight, std::vector< size_t >::iterator n2c)
{
//thread-safe random number generation
std::mt19937_64 mt_rand(0);
Graph g(num_vertices, adjdegrees, rmsds, num_edges, adjlist, adjweight);
for (size_t i=0;i<num_vertices;i++)
n2c[i] = i;
......@@ -376,7 +353,7 @@ size_t assign_communities_halfedge( size_t num_vertices, std::vector<size_t>::it
bool improvement = true;
do {
g = one_level(mt_rand, &g, n2c, total_weight, improvement);
g = one_level(&g, num_vertices, n2c, total_weight, improvement);
numc = g.num_nodes;
} while (improvement);
......
......@@ -36,7 +36,7 @@
#include <vector>
size_t assign_communities_halfedge(size_t numVertices, std::vector<size_t>::iterator adjdegrees, std::vector<double>::iterator rmsds,
size_t numEdges, std::vector<size_t>::iterator adjlist, std::vector<double>::iterator adjweight, double total_weight, std::vector< size_t > &n2c);
size_t numEdges, std::vector<size_t>::iterator adjlist, std::vector<double>::iterator adjweight, double total_weight, std::vector< size_t >::iterator n2c);
#endif // LOUVAIN_H
add resolution parameter
implement rmsd averaging/ordering
implement better orphan atom adoption strategy
implement better orientation averaging + standard deviation calculation
HCP/Diamond disorientation correction
Alex: implement stacking fault detection?
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment