将调整大小算法分成两遍

Split resize algorithm into two passes

我已经编写了以下调整大小算法,可以正确地放大或缩小图像。由于在每个循环上通过权重数组进行内部迭代,所以它太慢了。

我相当确定我应该能够将算法分成两遍,就像使用两遍高斯模糊一样,这将大大降低操作复杂性并提高性能。不幸的是我无法让它工作。有人能帮忙吗?

Parallel.For(
    startY,
    endY,
    y =>
    {
        if (y >= targetY && y < targetBottom)
        {
            Weight[] verticalValues = this.verticalWeights[y].Values;

            for (int x = startX; x < endX; x++)
            {
                Weight[] horizontalValues = this.horizontalWeights[x].Values;

                // Destination color components
                Color destination = new Color();

                // This is where there is too much operation complexity.
                foreach (Weight yw in verticalValues)
                {
                    int originY = yw.Index;

                    foreach (Weight xw in horizontalValues)
                    {
                        int originX = xw.Index;
                        Color sourceColor = Color.Expand(source[originX, originY]);
                        float weight = yw.Value * xw.Value;
                        destination += sourceColor * weight;
                    }
                }

                destination = Color.Compress(destination);
                target[x, y] = destination;
            }
        }
    });

权重和指数计算如下。每个维度一个:

/// <summary>
/// Computes the weights to apply at each pixel when resizing.
/// </summary>
/// <param name="destinationSize">The destination section size.</param>
/// <param name="sourceSize">The source section size.</param>
/// <returns>
/// The <see cref="T:Weights[]"/>.
/// </returns>
private Weights[] PrecomputeWeights(int destinationSize, int sourceSize)
{
    IResampler sampler = this.Sampler;
    float ratio = sourceSize / (float)destinationSize;
    float scale = ratio;

    // When shrinking, broaden the effective kernel support so that we still
    // visit every source pixel.
    if (scale < 1)
    {
        scale = 1;
    }

    float scaledRadius = (float)Math.Ceiling(scale * sampler.Radius);
    Weights[] result = new Weights[destinationSize];

    // Make the weights slices, one source for each column or row.
    Parallel.For(
        0,
        destinationSize,
        i =>
            {
                float center = ((i + .5f) * ratio) - 0.5f;
                int start = (int)Math.Ceiling(center - scaledRadius);

                if (start < 0)
                {
                    start = 0;
                }

                int end = (int)Math.Floor(center + scaledRadius);

                if (end > sourceSize)
                {
                    end = sourceSize;

                    if (end < start)
                    {
                        end = start;
                    }
                }

                float sum = 0;
                result[i] = new Weights();

                List<Weight> builder = new List<Weight>();
                for (int a = start; a < end; a++)
                {
                    float w = sampler.GetValue((a - center) / scale);

                    if (w < 0 || w > 0)
                    {
                        sum += w;
                        builder.Add(new Weight(a, w));
                    }
                }

                // Normalise the values
                if (sum > 0 || sum < 0)
                {
                    builder.ForEach(w => w.Value /= sum);
                }

                result[i].Values = builder.ToArray();
                result[i].Sum = sum;
            });

    return result;
}

/// <summary>
/// Represents the weight to be added to a scaled pixel.
/// </summary>
protected class Weight
{
    /// <summary>
    /// The pixel index.
    /// </summary>
    public readonly int Index;

    /// <summary>
    /// Initializes a new instance of the <see cref="Weight"/> class.
    /// </summary>
    /// <param name="index">The index.</param>
    /// <param name="value">The value.</param>
    public Weight(int index, float value)
    {
        this.Index = index;
        this.Value = value;
    }

    /// <summary>
    /// Gets or sets the result of the interpolation algorithm.
    /// </summary>
    public float Value { get; set; }
}

/// <summary>
/// Represents a collection of weights and their sum.
/// </summary>
protected class Weights
{
    /// <summary>
    /// Gets or sets the values.
    /// </summary>
    public Weight[] Values { get; set; }

    /// <summary>
    /// Gets or sets the sum.
    /// </summary>
    public float Sum { get; set; }
}

每个 IResampler 都会根据给定的索引提供适当的权重系列。双三次重采样器的工作原理如下。

/// <summary>
/// The function implements the bicubic kernel algorithm W(x) as described on
/// <see href="https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm">Wikipedia</see>
/// A commonly used algorithm within imageprocessing that preserves sharpness better than triangle interpolation.
/// </summary>
public class BicubicResampler : IResampler
{
    /// <inheritdoc/>
    public float Radius => 2;

    /// <inheritdoc/>
    public float GetValue(float x)
    {
        // The coefficient.
        float a = -0.5f;

        if (x < 0)
        {
            x = -x;
        }

        float result = 0;

        if (x <= 1)
        {
            result = (((1.5f * x) - 2.5f) * x * x) + 1;
        }
        else if (x < 2)
        {
            result = (((((a * x) + 2.5f) * x) - 4) * x) + 2;
        }

        return result;
    }
}

这是一个由现有算法调整大小的图像示例。输出是正确的(注意保留了银色光泽)。

原图

使用双三次重采样器将图像大小减半。

该代码是我正在编写的更大 library 的一部分,用于将图像处理添加到 corefx。

您可以尝试加权 voronoi 图。随机尝试一组点并计算 voronoi 图。使用 Lloyd 算法平滑多边形并插入多边形的颜色。使用权重计算加权 voronoi 图。例如voronoi点画和马赛克:http://www.mrl.nyu.edu/~ajsecord/stipples.html and http://www.evilmadscientist.com/2012/stipplegen-weighted-voronoi-stippling-and-tsp-paths-in-processing/.

从抽象的角度来看(对图像处理了解不多)我认为您正在多次计算 weight 和 sourcecolor 的值(在最里面的 foreach 循环中)(每当同一对指数再次出现);预先简单地预先计算它们是否可行? 您需要为 Horizo​​ntalWeight 和 VerticalWeight 矩阵计算一个 'direct product' 矩阵(简单地将每对索引 (x, y) 的值相乘),并且还可以预先将 Color.Expand 应用于源。 这些任务可以并行完成,'direct product'(抱歉,我不知道那个野兽的正确名称)应该在很多库中可用。

好的,这就是我的处理方式。

诀窍是首先只调整图像的宽度,保持与原始图像相同的高度。我们将生成的像素存储在临时图像中。

然后就是将该图像的大小调整为我们的最终输出。

如您所见,我们不再遍历每个像素上的两个权重集合。尽管必须通过外部像素循环迭代两次,但该算法的运行速度要快得多,在我的测试图像上平均要快 25% 左右。

// Interpolate the image using the calculated weights.
// First process the columns.
Parallel.For(
    0,
    sourceBottom,
    y =>
    {
        for (int x = startX; x < endX; x++)
        {
            Weight[] horizontalValues = this.HorizontalWeights[x].Values;

            // Destination color components
            Color destination = new Color();

            foreach (Weight xw in horizontalValues)
            {
                int originX = xw.Index;
                Color sourceColor = Color.Expand(source[originX, y]);
                destination += sourceColor * xw.Value;
            }

            destination = Color.Compress(destination);
            this.firstPass[x, y] = destination;
        }
    });

// Now process the rows.
Parallel.For(
    startY,
    endY,
    y =>
    {
        if (y >= targetY && y < targetBottom)
        {
            Weight[] verticalValues = this.VerticalWeights[y].Values;

            for (int x = startX; x < endX; x++)
            {
                // Destination color components
                Color destination = new Color();

                foreach (Weight yw in verticalValues)
                {
                    int originY = yw.Index;
                    int originX = x;
                    Color sourceColor = Color.Expand(this.firstPass[originX, originY]);
                    destination += sourceColor * yw.Value;
                }

                destination = Color.Compress(destination);
                target[x, y] = destination;
            }
        }
    });