Need help for faster processing


Need help for faster processing
#1
For a while, I've been writing a custom LDraw to Collada converter for Bricklink Studio, as the one currently in the program is, to put it lightly, sucks

Have a problem with it's performance of finding and merging same vertex positions

Here's the code for one I have currently:
Code:
static void MergeCell1(List<Vector3> cell)
{
     for (int i = 0; i < cell.Count - 1; i++)
     {
         for (int j = i + 1; j < cell.Count; j++)
             if (Vector3.Distance(cell[i], cell[j]) < MERGE_RADIUS)
                 cell[j] = cell[i];
     }
}
static void MergeCell2(List<Vector3> cell, List<Vector3> sharpCell)
{

     for (int i = 0; i < cell.Count; i++)
     {
         for (int j = 0; j < sharpCell.Count; j++)
             if (Vector3.Distance(cell[i], sharpCell[j]) < MERGE_RADIUS)
                 sharpCell[j] = cell[i];
     }
}
static void MergeCell3(List<Vector3> sharpCell)
{

     for (int i = 0; i < sharpCell.Count - 1; i++)
     {
         for (int j = i + 1; j < sharpCell.Count; j++)
             if (Vector3.Distance(sharpCell[i], sharpCell[j]) < MERGE_RADIUS)
                 sharpCell[j] = sharpCell[i];
     }
}
static void MergeCell4(List<Vector3> cell, List<Vector3> sharpCell)
{

     for (int i = 0; i < sharpCell.Count; i++)
     {
         for (int j = 0; j < cell.Count; j++)
             if (Vector3.Distance(sharpCell[i], cell[j]) < MERGE_RADIUS)
                 cell[j] = sharpCell[i];
     }
}

static void MergePoints(string model)
{
     if (preBuilt.ContainsKey(model)) return;

     float maxX = subMeshes[model].MaxBy(x => x.X).X;
     float maxY = subMeshes[model].MaxBy(x => x.Y).Y;
     float maxZ = subMeshes[model].MaxBy(x => x.Z).Z;
     float minX = subMeshes[model].MinBy(x => x.X).X;
     float minY = subMeshes[model].MinBy(x => x.Y).Y;
     float minZ = subMeshes[model].MinBy(x => x.Z).Z;

     int step = (int)Math.Round(Math.Pow(subMeshes.Count, 1.0 / 3.0));

     float stepX = (maxX - minX) / step;
     float stepY = (maxY - minY) / step;
     float stepZ = (maxZ - minZ) / step;

     List<Vector3>[,,] cells = new List<Vector3>[step, step, step];
     List<Vector3>[,,] cellsLines = new List<Vector3>[step, step, step];

     for (int i = 0; i < step; i++)
         for (int j = 0; j < step; j++)
             for (int k = 0; k < step; k++)
             {
                 cells[i, j, k] = (from a in subMeshes[model]
                                   where
                                   a.X <= minX + (i * stepX) && a.X > minX + (i + 1 * stepX) &&
                                   a.Y <= minY + (i * stepY) && a.X > minY + (i + 1 * stepY) &&
                                   a.Z <= minZ + (i * stepZ) && a.X > minZ + (i + 1 * stepZ)
                                   select a).ToList();
             }
     for (int i = 0; i < step; i++)
         for (int j = 0; j < step; j++)
             for (int k = 0; k < step; k++)
             {
                 cellsLines[i, j, k] = (from a in subSharps[model]
                                        where
                                        a.X <= minX + (i * stepX) && a.X > minX + (i + 1 * stepX) &&
                                        a.Y <= minY + (i * stepY) && a.X > minY + (i + 1 * stepY) &&
                                        a.Z <= minZ + (i * stepZ) && a.X > minZ + (i + 1 * stepZ)
                                        select a).ToList();
             }

     Console.WriteLine("Merging points of model " + model + ", step 1 - " + subMeshes[model].Count + " points");
     Parallel.ForEach(cells.Cast<List<Vector3>>(), MergeCell1);
     Console.WriteLine("Merging points of model " + model + ", step 2 - " + subMeshes[model].Count + " points");
     Parallel.ForEach(cells.Cast<List<Vector3>>(), x => Parallel.ForEach(cellsLines.Cast<List<Vector3>>(), y => MergeCell2(x, y)));
     Console.WriteLine("Merging points of model " + model + ", step 3 - " + subSharps[model].Count + " points");
     Parallel.ForEach(cellsLines.Cast<List<Vector3>>(), MergeCell3);
     Console.WriteLine("Merging points of model " + model + ", step 4 - " + subSharps[model].Count + " points");
     Parallel.ForEach(cells.Cast<List<Vector3>>(), x => Parallel.ForEach(cellsLines.Cast<List<Vector3>>(), y => MergeCell4(x, y)));
     /*for (int i = 0; i < subMeshes[model].Count - 1; i++)
     {
         for (int j = i + 1; j < subMeshes[model].Count; j++)
             if (Vector3.Distance(subMeshes[model][i], subMeshes[model][j]) < MERGE_RADIUS)
                 subMeshes[model][j] = subMeshes[model][i];
         //if (i % (subMeshes[model].Count / 100) == 0 && i != 0) Console.WriteLine("Merging points of model " + model + ", step 1: " + i / (subMeshes[model].Count / 100) + " % ");
     }

     Console.WriteLine("Merging points of model " + model + ", step 2 - " + subMeshes[model].Count + " points");
     for (int i = 0; i < subMeshes[model].Count; i++)
     {
         for (int j = 0; j < subSharps[model].Count; j++)
             if (Vector3.Distance(subMeshes[model][i], subSharps[model][j]) < MERGE_RADIUS)
                 subSharps[model][j] = subMeshes[model][i];
         //if (i % (subMeshes[model].Count / 100) == 0 && i != 0) Console.WriteLine("Merging points of model " + model + ", step 2: " + i / (subMeshes[model].Count / 100) + " % ");
     }

     Console.WriteLine("Merging points of model " + model + ", step 3 - " + subSharps[model].Count + " points");
     for (int i = 0; i < subSharps[model].Count - 1; i++)
     {
         for (int j = i + 1; j < subSharps[model].Count; j++)
             if (Vector3.Distance(subSharps[model][i], subSharps[model][j]) < MERGE_RADIUS)
                 subSharps[model][j] = subSharps[model][i];
         //if (i % (subSharps[model].Count / 100) == 0 && i != 0) Console.WriteLine("Merging points of model " + model + ", step 3: " + i / (subMeshes[model].Count / 100) + " % ");
     }

     Console.WriteLine("Merging points of model " + model + ", step 4 - " + subSharps[model].Count + " points");
     for (int i = 0; i < subSharps[model].Count; i++)
     {
         for (int j = 0; j < subMeshes[model].Count; j++)
             if (Vector3.Distance(subSharps[model][i], subMeshes[model][j]) < MERGE_RADIUS)
                 subMeshes[model][j] = subSharps[model][i];
         //if (i % (subSharps[model].Count / 100) == 0 && i != 0) Console.WriteLine("Merging points of model " + model + ", step 4: " + i / (subMeshes[model].Count / 100) + " % ");
     }*/

}

In short, it subdivides space in sections, in each of which it checks each pair of vertices on if they are close enough to be considered the same, or not. Wonder if there is a way to make it faster
Reply
RE: Need help for faster processing
#2
(2023-11-02, 21:12)Max Murtazin Wrote: For a while, I've been writing a custom LDraw to Collada converter for Bricklink Studio, as the one currently in the program is, to put it lightly, sucks

Have a problem with it's performance of finding and merging same vertex positions

Here's the code for one I have currently:

...

In short, it subdivides space in sections, in each of which it checks each pair of vertices on if they are close enough to be considered the same, or not. Wonder if there is a way to make it faster

LDView has a custom vertex key class that it uses specifically for merging vertices. The class converts the coordinate values from floats to long (with a scale factor that represents how close together they need to be in each axis) and implements the less than operator. I then use std :: set and std :: map with these vertex keys, and nearby points automatically get merged. I'm not sure if this would be faster than what you're doing or not. Something similar could be done without conversion from float to long. On modern CPUs, I'm honestly not sure how much that conversion helps.
Reply
RE: Need help for faster processing
#3
(2023-11-02, 21:26)Travis Cobbs Wrote: LDView has a custom vertex key class that it uses specifically for merging vertices. The class converts the coordinate values from floats to long (with a scale factor that represents how close together they need to be in each axis) and implements the less than operator. I then use std :: set and std :: map with these vertex keys, and nearby points automatically get merged. I'm not sure if this would be faster than what you're doing or not. Something similar could be done without conversion from float to long. On modern CPUs, I'm honestly not sure how much that conversion helps.

Note: I'm not sure what programming language you're using, but as long as it allows for values to be placed in maps and sets with custom comparison functions, you can create a comparison function that includes your merge threshold and then use that. I think that any map or set type should be at worst around O(log base2 n) for insertion, vs your O(n squared). So, you would look up the new value in the set. If it is found with your custom comparison function that allows for a merge threshold, you would assign the values equal to each other.
Reply
RE: Need help for faster processing
#4
(2023-11-02, 21:12)Max Murtazin Wrote: For a while, I've been writing a custom LDraw to Collada converter for Bricklink Studio, as the one currently in the program is, to put it lightly, sucks

FYI: The latest LDCad has an experimental collada export. It might suit your needs.
Reply
RE: Need help for faster processing
#5
Code:
float maxX = subMeshes[model].MaxBy(x => x.X).X;
float maxY = subMeshes[model].MaxBy(x => x.Y).Y;
float maxZ = subMeshes[model].MaxBy(x => x.Z).Z;
float minX = subMeshes[model].MinBy(x => x.X).X;
float minY = subMeshes[model].MinBy(x => x.Y).Y;
float minZ = subMeshes[model].MinBy(x => x.Z).Z;
Looks like you are going through the same list of vectors six times here. It's probably faster to use a for loop:
Code:
float minX = Inf;
float maxX = -Inf;
//same for y and z
for (vec : subMeshes[model]) {
  minX = Math.min(minX, vec.X);
  maxX = Math.max(maxX, vec.X);
  //same for y and z
}


Code:
Vector3.Distance(sharpCell[i], cell[j]) < MERGE_RADIUS
You are unnecessarily computing a square root in the Vector3.Distance call. Calculating roots is pretty expensive and in your case the cost is also multiplied by doing it inside a nested for loop. Better use something like
Code:
const static MERGE_RADIUS_SQUARED = MERGE_RADIUS*MERGE_RADIUS
Vector3.DistanceSquared(sharpCell[i], cell[j]) < MERGE_RADIUS_SQUARED


If an approximate solution is enough, you can certainly try a map like Travis Cobbs said. His implementation is basically rounding the coordinates to 0.01 (I don't know the unit, but it's probably LDU) and then checking if the rounded values are equal. This will group 0.009 and 0.0011 together but not 0.014 and 0.016, despite the distance between the values being the same.
To get a precise (and still somewhat fast) algorithm, you will probably need to look into space partitioning algorithms like k/d tree.

It might also be faster to use a simple algorithm with bad time complexity for small coordinate lists and only use the complex algorithm for large lists where time complexity actually makes a difference so a hybrid approach might be the solution.

Also: Use a profiler if you don't already do.
Reply
RE: Need help for faster processing
#6
(2023-11-07, 8:34)Basil Bader Wrote:
Code:
float maxX = subMeshes[model].MaxBy(x => x.X).X;
float maxY = subMeshes[model].MaxBy(x => x.Y).Y;
float maxZ = subMeshes[model].MaxBy(x => x.Z).Z;
float minX = subMeshes[model].MinBy(x => x.X).X;
float minY = subMeshes[model].MinBy(x => x.Y).Y;
float minZ = subMeshes[model].MinBy(x => x.Z).Z;
Looks like you are going through the same list of vectors six times here. It's probably faster to use a for loop:
Code:
float minX = Inf;
float maxX = -Inf;
//same for y and z
for (vec : subMeshes[model]) {
  minX = Math.min(minX, vec.X);
  maxX = Math.max(maxX, vec.X);
  //same for y and z
}


Code:
Vector3.Distance(sharpCell[i], cell[j]) < MERGE_RADIUS
You are unnecessarily computing a square root in the Vector3.Distance call. Calculating roots is pretty expensive and in your case the cost is also multiplied by doing it inside a nested for loop. Better use something like
Code:
const static MERGE_RADIUS_SQUARED = MERGE_RADIUS*MERGE_RADIUS
Vector3.DistanceSquared(sharpCell[i], cell[j]) < MERGE_RADIUS_SQUARED


If an approximate solution is enough, you can certainly try a map like Travis Cobbs said. His implementation is basically rounding the coordinates to 0.01 (I don't know the unit, but it's probably LDU) and then checking if the rounded values are equal. This will group 0.009 and 0.0011 together but not 0.014 and 0.016, despite the distance between the values being the same.
To get a precise (and still somewhat fast) algorithm, you will probably need to look into space partitioning algorithms like k/d tree.

It might also be faster to use a simple algorithm with bad time complexity for small coordinate lists and only use the complex algorithm for large lists where time complexity actually makes a difference so a hybrid approach might be the solution.

Also: Use a profiler if you don't already do.

What's a profiler?
Reply
RE: Need help for faster processing
#7
(2023-11-07, 13:25)Max Murtazin Wrote: What's a profiler?

A tool that can show you how many times a function is called and how much time it needs. It helps to identify the low hanging fruits (parts of the code where you can achieve large performance improvements for relatively low effort)
Also: https://en.wikipedia.org/wiki/Profiling_...ogramming) and https://learn.microsoft.com/en-us/visual...ew=vs-2022
Reply
RE: Need help for faster processing
#8
ldr_tools uses special data structures for point comparisons and is multithreaded. It should give very good performance and reduce the amount of code to write if you're able to use Rust or Python.
https://github.com/ScanMountGoat/ldr_too...#ldr_tools
Reply
« Next Oldest | Next Newest »



Forum Jump:


Users browsing this thread: 5 Guest(s)