在张量流中为张量层解包位时性能低下

Slow performance when unpacking bits for a tensor layer in tensorflow

我正在处理通过与星际争霸 2 客户端的 WebSocket 连接传输的数据,以从正在进行的游戏中获取图像数据。在某些情况下,图像数据可以设置为每像素 1 位的格式。发生这种情况时,我需要 "unpack" 响应中每个字节的位(1 字节 => 8 位)。这是在下面的代码中完成的:

function unpackbits(uint8data) {
  const results = new Uint8Array(8 * uint8data.length)
  let byte
  let offset
  for (let i = 0; i < uint8data.length; i++) {
    byte = uint8data[i]
    offset = (8 * i)
    results[offset + 7] = ((byte & (1 << 0)) >> 0)
    results[offset + 6] = ((byte & (1 << 1)) >> 1)
    results[offset + 5] = ((byte & (1 << 2)) >> 2)
    results[offset + 4] = ((byte & (1 << 3)) >> 3)
    results[offset + 3] = ((byte & (1 << 4)) >> 4)
    results[offset + 2] = ((byte & (1 << 5)) >> 5)
    results[offset + 1] = ((byte & (1 << 6)) >> 6)
    results[offset + 0] = ((byte & (1 << 7)) >> 7)
  }
  return results
}

这会像这样输入到张量中:

 static unpack_layer(plane) {
    //Return a correctly shaped tensor given the feature layer bytes.//

    const size = point.Point.build(plane.getSize()) // { x, y }

    if (plane.getBitsPerPixel() === 1) {
      data = unpackbits(data)
      if (data.length !== (size.x * size.y)) {
        // This could happen if the correct length isn't a multiple of 8, leading
        // to some padding bits at the end of the string which are incorrectly
        // interpreted as data.
        data = data.slice(0, size.x * size.y)
      }
    }

    data = tf.tensor(data, [size.y, size.x], 'int32')
    return data
}

在我的一项测试中,此代码得到 运行 1900 次,执行时间为 0.0737 秒。

这很慢。

为了比较,python 中的等效功能需要 0.0209 秒到 运行 1900 次。 python 代码如下所示:

  def unpack_layer(plane):
    """Return a correctly shaped numpy array given the feature layer bytes."""

    size = point.Point.build(plane.size) # {x, y }
    data = np.frombuffer(plane.data, dtype=Feature.dtypes[plane.bits_per_pixel])

    if plane.bits_per_pixel == 1:
      data = np.unpackbits(data)
      if data.shape[0] != size.x *  size.y:
        # This could happen if the correct length isn't a multiple of 8, leading
        # to some padding bits at the end of the string which are incorrectly
        # interpreted as data.
        data = data[:size.x * size.y]
    return data.reshape(size.y, size.x)

简而言之,javascript 版本大约是 python 版本的 4 倍。

我将查看 numpy unpackbits 文档,因为它似乎比我自己的方法更有效 -

但是,我想知道是否有人有任何关于如何更好地优化我自己的 unpackbits 函数或更好的方法让 TensorFlow 为我做这件事的想法?

看起来 tensorflow.js 没有按位 AND 函数,因此怀疑在 tensorflow.js 内完成工作需要一些编码技巧...

不过,一个建议是创建一个包含 256 个 Uint8Array 的大小为 8 的数组,并使用 8 字节转换的完整列表预先填充它。这大大减少了字节流的重复计算,该字节流可能具有 0 - 255 范围内的重复值。例如,预先计算的数组中的第一个条目表示字节 0 的解包,因此是一个大小为 8 的 Uint8Array 填充了0,下一个条目是另一个大小为 8 的 Uint8Array,其中填充了 00000001,等等,一直到表示字节 255 的条目,其中是一个大小为 8 的 Uint8Array,其中填充了全 1。

然后,在解包时,只需使用类型化数组 .set 方法将预先计算的解包表示复制到 results Uint8Array...

希望这对您有所帮助。

编辑 创建了许多解包算法的变体来测试内联计算与内存查找的性能,并对使用 Chrome 的结果感到惊讶。 V8 编译器的一些优化是非直观的...

版本差异...

  • unpackbits [FAST]:来自原始问题,这是比较其他变体的标准。
  • unpackbits1 [FAST]:修改者...
    • 在每个整数后指定“|0”。
    • 使用增量一元运算符 ("++") 而不是向 result 数组的 offset 索引添加增量。
    • 用实际值代替位掩码的计算。 (即,函数使用 32 而不是 1 << 5。)
  • unpackbits1a [FAST]:与 unpackbits1 相同,除了...
    • 保留位掩码而不是整数值的计算。 (即,使用 1 << 5 而不是 32,如在原始问题中那样。) 直觉反驳,这会产生更快的结果!
  • unpackbits1b [较慢]:与 unpackbits1a 相同,除了...
    • offset 不会在循环内每次都重新计算。即,offset = 0|0 最初设置,然后 offset 仅在循环内递增。因此,不再为每个字节计算 offset = ( (8|0) * i )与直觉相反,这会产生较慢的结果!
  • unpackbits2 [SL​​OWEST]: 这是我上面推荐的内存查找选项。 与直觉相反,这意味着类型化数组内存操作比计算结果慢得多 unpackbits!
  • unpackbits3 [SLOWER]:这是我在上面推荐的内存查找选项,并进行了以下更改。
    • 此版本没有使用类型化数组 .set 方法,而是一个一个地设置八个字节。 与直觉相反,这意味着类型化数组 .set 方法比单独设置值更慢(至少八个字节)!
  • unpackbits4 [SLOWER]:算法的这种变体与原始算法相当,并且是内存查找选项的变体。但是,这不是 256 个单独的 Uint8Array,而是将所有预先计算的结果组合成一个长度为 256 * 8 的 Uint8Array。并且它没有使用类型化数组 .set 方法。
  • unpackbits5 [较慢]:与 unpackbits4 相同,除了...
    • 它没有在索引中使用一元“++”进行查找 table,而是为正在复制的 8 个字节中的每一个字节计算索引。正如预期的那样,每次计算索引都比使用一元“++”运算符慢。

这是测试。请注意,这会构建一个 10M 随机字节的初始数组,然后在同一数据上运行每个解包算法。在我的工作站上,测试运行不到 5 秒。

var lookupTable = initializeLookupTable();

function initializeLookupTable() {
  let lookup = new Array( 256 );
  let v = new Uint8Array( 1 );
  for ( let i = 0; i < 256; i++ ) {
    v[ 0 ] = i;
    lookup[ i ] = unpackbits( v );
  }
  return lookup;
}

var lookupTable4 = initializeLookupTable4();

function initializeLookupTable4() {
  let lookup = new Uint8Array( 256 * 8 );
  let v = new Uint8Array( 1 );
  for ( let i = 0; i < 256; i++ ) {
    v[ 0 ] = i;
    let temp = unpackbits( v );
    lookup.set( temp, i * 8 );
  }
  return lookup;
}

function unpackbits(uint8data) {
  const results = new Uint8Array(8 * uint8data.length)
  let byte
  let offset
  for (let i = 0; i < uint8data.length; i++) {
    byte = uint8data[i]
    offset = (8 * i);
    results[offset + 7] = ((byte & (1 << 0)) >> 0)
    results[offset + 6] = ((byte & (1 << 1)) >> 1)
    results[offset + 5] = ((byte & (1 << 2)) >> 2)
    results[offset + 4] = ((byte & (1 << 3)) >> 3)
    results[offset + 3] = ((byte & (1 << 4)) >> 4)
    results[offset + 2] = ((byte & (1 << 5)) >> 5)
    results[offset + 1] = ((byte & (1 << 6)) >> 6)
    results[offset + 0] = ((byte & (1 << 7)) >> 7)
  }
  return results
}

function unpackbits1(uint8data) {
  const results = new Uint8Array(8 * uint8data.length)
  let byte;
  let offset;
  for (let i = 0|0, n = uint8data.length; i < n; i++) {
    byte = uint8data[i]|0
    offset = (8 * i)|0;
    results[offset++] = ((byte & 128)>>7)|0;
    results[offset++] = ((byte & 64)>>6)|0;
    results[offset++] = ((byte & 32)>>5)|0;
    results[offset++] = ((byte & 16)>>4)|0;
    results[offset++] = ((byte & 8)>>3)|0;
    results[offset++] = ((byte & 4)>>2)|0;
    results[offset++] = ((byte & 2)>>1)|0;
    results[offset++] = ((byte & 1)>>0)|0;
  }
  return results
}

function unpackbits1a(uint8data) {
  const results = new Uint8Array(8 * uint8data.length)
  let byte;
  let offset;
  for (let i = 0|0, n = uint8data.length; i < n; i++) {
    byte = uint8data[i]|0;
    offset = (8 * i)|0;
    results[offset++] = ((byte & (1 << 7))>>7)|0;
    results[offset++] = ((byte & (1 << 6))>>6)|0;
    results[offset++] = ((byte & (1 << 5))>>5)|0;
    results[offset++] = ((byte & (1 << 4))>>4)|0;
    results[offset++] = ((byte & (1 << 3))>>3)|0;
    results[offset++] = ((byte & (1 << 2))>>2)|0;
    results[offset++] = ((byte & (1 << 1))>>1)|0;
    results[offset++] = (byte & 1)|0;
  }
  return results
}

function unpackbits1b(uint8data) {
  const results = new Uint8Array(8 * uint8data.length)
  let byte;
  let offset = 0|0;
  for (let i = 0|0, n = uint8data.length; i < n; i++) {
    byte = uint8data[i]|0;
    results[offset++] = ((byte & (1 << 7))>>7)|0;
    results[offset++] = ((byte & (1 << 6))>>6)|0;
    results[offset++] = ((byte & (1 << 5))>>5)|0;
    results[offset++] = ((byte & (1 << 4))>>4)|0;
    results[offset++] = ((byte & (1 << 3))>>3)|0;
    results[offset++] = ((byte & (1 << 2))>>2)|0;
    results[offset++] = ((byte & (1 << 1))>>1)|0;
    results[offset++] = (byte & 1)|0;
  }
  return results
}

function unpackbits2( uint8data ) {
  const result = new Uint8Array( 8 * uint8data.length );
  for ( let i = 0|0, ri = 0|0, n = uint8data.length; i < n; i++, ri += 8 ) {
    result.set( lookupTable[ uint8data[ i ] ], ri );
  }
  return result;
}

function unpackbits3( uint8data ) {
  const result = new Uint8Array( 8 * uint8data.length );
  let ri = 0|0;
  for ( let i = 0|0, n = uint8data.length; i < n; i++ ) {
    //result.set( lookupTable[ uint8data[ i ] ], ri );
    let lv = lookupTable[ uint8data[ i ] ];
    result[ ri++ ] = lv [ 0|0 ];
    result[ ri++ ] = lv [ 1|0 ];
    result[ ri++ ] = lv [ 2|0 ];
    result[ ri++ ] = lv [ 3|0 ];
    result[ ri++ ] = lv [ 4|0 ];
    result[ ri++ ] = lv [ 5|0 ];
    result[ ri++ ] = lv [ 6|0 ];
    result[ ri++ ] = lv [ 7|0 ];
  }
  return result;
}

function unpackbits4( uint8data ) {
  const result = new Uint8Array( 8 * uint8data.length );
  let ri = 0|0;
  for ( let i = 0|0, n = uint8data.length; i < n; i++ ) {
    let li = (uint8data[ i ] * 8)|0;
    result[ ri++ ] = lookupTable4[ li++ ];
    result[ ri++ ] = lookupTable4[ li++ ];
    result[ ri++ ] = lookupTable4[ li++ ];
    result[ ri++ ] = lookupTable4[ li++ ];
    result[ ri++ ] = lookupTable4[ li++ ];
    result[ ri++ ] = lookupTable4[ li++ ];
    result[ ri++ ] = lookupTable4[ li++ ];
    result[ ri++ ] = lookupTable4[ li++ ];   
  }
  return result;
}

function unpackbits5( uint8data ) {
  const result = new Uint8Array( 8 * uint8data.length );
  let ri = 0|0;
  for ( let i = 0|0, n = uint8data.length; i < n; i++ ) {
    let li = (uint8data[ i ] * 8)|0;
    result[ ri++ ] = lookupTable4[ li ];
    result[ ri++ ] = lookupTable4[ li+1 ];
    result[ ri++ ] = lookupTable4[ li+2 ];
    result[ ri++ ] = lookupTable4[ li+3 ];
    result[ ri++ ] = lookupTable4[ li+4 ];
    result[ ri++ ] = lookupTable4[ li+5 ];
    result[ ri++ ] = lookupTable4[ li+6 ];
    result[ ri++ ] = lookupTable4[ li+7 ];   
  }
  return result;
}


// Test

console.log( 'Building array of 10,000,000 test values.' );
let buffer = new ArrayBuffer( 10000000 );
let testArray = new Uint8Array( buffer );
for ( let i = 0; i < testArray.length; i++ ) {
  testArray[ i ] = Math.floor( 256 * Math.random() );
}
console.log( 'Finished building test values.' );

console.log( 'Starting unpackbits.' );
console.time('u');
let u = unpackbits( testArray );
console.timeEnd('u');
console.log( 'Finished unpackbits.' );

console.log( 'Starting unpackbits1.' );
console.time('u1');
u = unpackbits1( testArray );
console.timeEnd('u1');
console.log( 'Finished unpackbits1.' );

console.log( 'Starting unpackbits1a.' );
console.time('u1a');
u = unpackbits1a( testArray );
console.timeEnd('u1a');
console.log( 'Finished unpackbits1a.' );

console.log( 'Starting unpackbits1b.' );
console.time('u1b');
u = unpackbits1b(testArray );
console.timeEnd('u1b');
console.log( 'Finished unpackbits1b.' );

console.log( 'Starting unpackbits2.' );
console.time('u2');
u = unpackbits2( testArray );
console.timeEnd('u2');
console.log( 'Finished unpackbits2.' );

console.log( 'Starting unpackbits3.' );
console.time('u3');
u = unpackbits3( testArray );
console.timeEnd('u3');
console.log( 'Finished unpackbits3.' );

console.log( 'Starting unpackbits4.' );
console.time('u4');
u = unpackbits4( testArray );
console.timeEnd('u4');
console.log( 'Finished unpackbits4.' );

console.log( 'Starting unpackbits5.' );
console.time('u5');
u = unpackbits5( testArray );
console.timeEnd('u5');
console.log( 'Finished unpackbits5.' );

此回复是@Jon Trent 回复下评论链的延续。

编辑:包括重塑部分的 TensorFlow 比较。

我正在分析两种解包位方法的性能; unpackbits1a 和 unpackbits(原始)。我还分析了将数据重塑为 NxM 网格的不同方法,其中 N 可能与 M 相同。这是我得到的:

function unpackbits1a(uint8data) {
  const results = new Uint8Array(8 * uint8data.length)
  let byte;
  let offset;
  for (let i = 0|0, n = uint8data.length; i < n; i++) {
    byte = uint8data[i]
    offset = ((8|0) * i);  // The "|0" on this line cut's the time almost in half!
    results[offset++] = (byte & ((1|0) << (7|0)))>>7|0;
    results[offset++] = (byte & ((1|0) << (6|0)))>>6|0;
    results[offset++] = (byte & ((1|0) << (5|0)))>>5|0;
    results[offset++] = (byte & ((1|0) << (4|0)))>>4|0;
    results[offset++] = (byte & ((1|0) << (3|0)))>>3|0;
    results[offset++] = (byte & ((1|0) << (2|0)))>>2|0;
    results[offset++] = (byte & ((1|0) << (1|0)))>>1|0;
    results[offset++] = (byte & (1|0));
  }
  return results
}

function unpackbits(uint8data) {
  const results = new Uint8Array(8 * uint8data.length)
  let byte
  let offset
  for (let i = 0; i < uint8data.length; i++) {
    byte = uint8data[i]
    offset = 8 * i
    results[offset + 7] = ((byte & (1 << 0)) >> 0)
    results[offset + 6] = ((byte & (1 << 1)) >> 1)
    results[offset + 5] = ((byte & (1 << 2)) >> 2)
    results[offset + 4] = ((byte & (1 << 3)) >> 3)
    results[offset + 3] = ((byte & (1 << 4)) >> 4)
    results[offset + 2] = ((byte & (1 << 5)) >> 5)
    results[offset + 1] = ((byte & (1 << 6)) >> 6)
    results[offset + 0] = ((byte & (1 << 7)) >> 7)
  }
  return results
}


function unpackbitsToShape1(uint8data, shape = [1, 1]) {
  var data = unpackbits(uint8data)
  const dims = [shape[0] | 0, shape[1] | 0]
  const result = new Array(dims[0])
  let temp
  const width =  0 | dims[1]
  for (let i = 0 | 0; i < dims[0]; i++) {
    temp = new Array(dims[1])
    for (let j = 0| 0; j < dims[1]; j++) {
      temp[j] = data[uint8data[i * width + j]]
    }
    result[i] = temp
  }
  return result
}

function unpackbitsToShape2(uint8data, shape = [1, 1]) {
  var data = unpackbits(uint8data)
  const dims = [shape[0] | 0, shape[1] | 0]
  const result = new Array(dims[0])
  const width = dims[1]
  let offset
  for (let i = 0 | 0; i < dims[0]; i++) {
    offset = (width * i)
    result[i] = data.slice(offset, offset + width)
  }
  return result
}

function unpackbitsToShape3(uint8data, shape = [1, 1]) {
  const dims = [0 | shape[0], 0 | shape[1]]
  const result = new Array(dims[0])
  let position = 0 | 0
  const smallCount = 0 | (uint8data.length % dims[0])
  const bigCount = 0 | (uint8data.length - smallCount)
  const bigByteChunk = 0 | (bigCount / dims[0])
  const bigBitWidth = 0 | 8 * bigByteChunk
  const smallByteChunk = 0 | (smallCount / dims[0])
  const smallBitWidth = 0 | 8 * smallByteChunk
  if (smallCount) {
    let big
    let small
    let odd
    let temp
    for (let i = 0 | 0; i < dims[0]; i++) {
      temp = new Uint8Array(dims[1])
      odd = i % 2
      big = unpackbits(uint8data.subarray(position, position + bigByteChunk))
      position += bigByteChunk
      if (odd) {
        temp.set(small.subarray(smallBitWidth, 8), 0)
        temp.set(big, smallBitWidth)
        result[i] = temp
      } else {
        temp.set(big, 0)
        small = unpackbits(uint8data.subarray(position, position + 1))
        position++
        temp.set(small.subarray(0, smallBitWidth), bigBitWidth)
        result[i] = temp
      }
    }
    return result
  }
  for (let i = 0 | 0; i < dims[0]; i++) {
    // console.log('unpacking: ', uint8data.subarray(position, position + bigByteChunk))
    result[i] = unpackbits(uint8data.subarray(position, position + bigByteChunk))
    position += bigByteChunk
  }
  return result
}

var tf = require('@tensorflow/tfjs')
tf = require('@tensorflow/tfjs-node')
function unpackBitsToShapeTensorflow(uint8data, shape) {
  return tf.tensor(unpackbits(uint8data), shape, 'int32')
}

var test64by64 = new Uint8Array(512)
for (let i = 0; i < test64by64.length; i++) {
  test64by64[ i ] = Math.floor(256 * Math.random());
}
var test84by84 = new Uint8Array(882)
for (let i = 0; i < test84by84.length; i++) {
  test84by84[ i ] = Math.floor(256 * Math.random());
}
var test100by100 = new Uint8Array(1250)
for (let i = 0; i < test100by100.length; i++) {
  test100by100[ i ] = Math.floor(256 * Math.random());
}

function assert(condition, errMsg) {
  if (!condition) {
    console.error(errMsg)
  }
}

console.log('********* 64 x 64 *********\n\n')
console.log('Starting unpackbits1a.');
console.time('u1a');
var foo = unpackbits1a(test64by64);
console.timeEnd('u1a');
console.log('Finished unpackbits1a.');
console.log('Starting "unpackbits"');
console.time('u-orig');
foo = unpackbits(test64by64);
console.timeEnd('u-orig');
console.log('Finished unpackbits.');


console.log('Starting "unpackbitsToShape1"');
console.time('u1');
foo = unpackbitsToShape1(test64by64, [64, 64])
console.timeEnd('u1');
assert(
  foo.length === 64 && foo[0].length === 64,
  'foo.length === 64 && foo[0].length === 64'
)
console.log('Finished unpackbitsToShape1.');


console.log('Starting "unpackbitsToShape2"');
console.time('u2');
foo = unpackbitsToShape2(test64by64, [64, 64])
console.timeEnd('u2');
assert(
  foo.length === 64 && foo[0].length === 64,
  'foo.length === 64 && foo[0].length === 64'
)
console.log('Finished unpackbitsToShape2.');

console.log('Starting "unpackbitsToShape3"');
console.time('u3');
foo = unpackbitsToShape3(test64by64, [64, 64])
console.timeEnd('u3');
assert(
  foo.length === 64 && foo[0].length === 64,
  'foo.length === 64 && foo[0].length === 64'
)
console.log('Finished unpackbitsToShape3.');

console.log('\nStarting "unpackBitsToShapeTensorflow"')
console.time('u-tensor')
foo = unpackBitsToShapeTensorflow(test64by64, [64, 64])
console.timeEnd('u-tensor')
console.log('Finished unpackBitsToShapeTensorflow.');


console.log('\n\n********* 84 x 84 *********\n\n')
console.log('Starting unpackbits1a.');
console.time('u1a');
foo = unpackbits1a(test84by84);
console.timeEnd('u1a');
console.log('Finished unpackbits1a.');
console.log('Starting "unpackbits"');
console.time('u-orig');
foo = unpackbits(test84by84);
console.timeEnd('u-orig');
console.log('Finished unpackbits.');


console.log('Starting "unpackbitsToShape1"');
console.time('u1');
foo = unpackbitsToShape1(test84by84, [84, 84])
console.timeEnd('u1');
assert(
  foo.length === 84 && foo[0].length === 84,
  'foo.length === 84 && foo[0].length === 84'
)
console.log('Finished unpackbitsToShape1.');


console.log('Starting "unpackbitsToShape2"');
console.time('u2');
foo = unpackbitsToShape2(test84by84, [84, 84])
console.timeEnd('u2');
assert(
  foo.length === 84 && foo[0].length === 84,
  'foo.length === 84 && foo[0].length === 84'
)
console.log('Finished unpackbitsToShape2.');

console.log('Starting "unpackbitsToShape3"');
console.time('u3');
foo = unpackbitsToShape3(test84by84, [84, 84])
console.timeEnd('u3');
assert(
  foo.length === 84 && foo[0].length === 84,
  'foo.length === 84 && foo[0].length === 84'
)
console.log('Finished unpackbitsToShape3.');

console.log('\nStarting "unpackBitsToShapeTensorflow"')
console.time('u-tensor')
foo = unpackBitsToShapeTensorflow(test84by84, [84, 84])
console.timeEnd('u-tensor')
console.log('Finished unpackBitsToShapeTensorflow.');


console.log('\n\n********* 100 x 100 *********\n\n')
console.log('Starting unpackbits1a.');
console.time('u1a');
foo = unpackbits1a(test100by100);
console.timeEnd('u1a');
console.log('Finished unpackbits1a.');
console.log('Starting "unpackbits"');
console.time('u-orig');
foo = unpackbits(test100by100);
console.timeEnd('u-orig');
console.log('Finished unpackbits.');


console.log('Starting "unpackbitsToShape1"');
console.time('u1');
foo = unpackbitsToShape1(test100by100, [100, 100])
console.timeEnd('u1');
assert(
  foo.length === 100 && foo[0].length === 100,
  'foo.length === 100 && foo[0].length === 100'
)
console.log('Finished unpackbitsToShape1.');


console.log('Starting "unpackbitsToShape2"');
console.time('u2');
foo = unpackbitsToShape2(test100by100, [100, 100])
console.timeEnd('u2');
assert(
  foo.length === 100 && foo[0].length === 100,
  'foo.length === 100 && foo[0].length === 100'
)
console.log('Finished unpackbitsToShape2.');

console.log('Starting "unpackbitsToShape3"');
console.time('u3');
foo = unpackbitsToShape3(test100by100, [100, 100])
console.timeEnd('u3');
assert(
  foo.length === 100 && foo[0].length === 100,
  'foo.length === 100 && foo[0].length === 100'
)
console.log('Finished unpackbitsToShape3.');

console.log('\nStarting "unpackBitsToShapeTensorflow"')
console.time('u-tensor')
foo = unpackBitsToShapeTensorflow(test100by100, [100, 100])
console.timeEnd('u-tensor')
console.log('Finished unpackBitsToShapeTensorflow.');

我不知道浏览器的执行环境与节点有何不同,但结果似乎在节点中更稳定。这是我得到的:

********* 64 x 64 *********


Starting unpackbits1a.
u1a: 0.513ms
Finished unpackbits1a.
Starting "unpackbits"
u-orig: 0.189ms
Finished unpackbits.
Starting "unpackbitsToShape1"
u1: 0.434ms
Finished unpackbitsToShape1.
Starting "unpackbitsToShape2"
u2: 0.365ms
Finished unpackbitsToShape2.
Starting "unpackbitsToShape3"
u3: 0.590ms
Finished unpackbitsToShape3.

Starting "unpackBitsToShapeTensorflow"
u-tensor: 0.508ms
Finished unpackBitsToShapeTensorflow.


********* 84 x 84 *********


Starting unpackbits1a.
u1a: 0.222ms
Finished unpackbits1a.
Starting "unpackbits"
u-orig: 0.425ms
Finished unpackbits.
Starting "unpackbitsToShape1"
u1: 0.622ms
Finished unpackbitsToShape1.
Starting "unpackbitsToShape2"
u2: 0.303ms
Finished unpackbitsToShape2.
Starting "unpackbitsToShape3"
u3: 0.388ms
Finished unpackbitsToShape3.

Starting "unpackBitsToShapeTensorflow"
u-tensor: 0.175ms
Finished unpackBitsToShapeTensorflow.


********* 100 x 100 *********


Starting unpackbits1a.
u1a: 1.502ms
Finished unpackbits1a.
Starting "unpackbits"
u-orig: 0.018ms
Finished unpackbits.
Starting "unpackbitsToShape1"
u1: 1.631ms
Finished unpackbitsToShape1.
Starting "unpackbitsToShape2"
u2: 0.072ms
Finished unpackbitsToShape2.
Starting "unpackbitsToShape3"
u3: 0.159ms
Finished unpackbitsToShape3.

Starting "unpackBitsToShapeTensorflow"
u-tensor: 0.052ms
Finished unpackBitsToShapeTensorflow.

不确定这是否有帮助,但我很生气,因为我对 tensorflow 中的按位运算符的需求挂断了,以便根据原始问题将字节流转换为比特流。简单地使用整数除法和 modulus 也可以做到这一点!

简单来说,算法就是这样。给定字节流 [ 92 ]...

  • 和mod除以16,得到2个字节,分别是[5]和[12]。
  • 将这些结果交织成张量 [ 5, 12 ]。
  • 取每个值,然后将 mod 除以 4,得到 [ 1, 3 ] 和 [ 1, 0 ]。
  • 将这些结果交织成张量 [ 1, 1, 3, 0 ]。
  • 将 mod 除以 2,得到 [ 0, 0, 1, 0 ] 和 [ 1, 1, 1, 0 ]。
  • 插入 [ 0, 1, 0, 1, 1, 1, 0, 0 ],它是 92 的二进制。

以下是同一算法的两个版本。一个在 tensorflow 中,一个在 pure javascript.

function tfDaC( stream ) {
  
  const stream8bit = tf.tensor( stream, undefined, 'int32' );
  
  console.time('in-tf');
  const stream4bitHi = tf.div(stream8bit, tf.scalar(16, 'int32' ));
  const stream4bitLo = tf.mod(stream8bit, tf.scalar(16, 'int32' ));
  const stream4bit = tf.stack([stream4bitHi, stream4bitLo],1).flatten();

  const stream2bitHi = tf.div( stream4bit, tf.scalar(4, 'int32' ));
  const stream2bitLo = tf.mod(stream4bit, tf.scalar(4, 'int32' ));
  const stream2bit = tf.stack([stream2bitHi, stream2bitLo],1).flatten();

  const stream1bitHi = tf.div(stream2bit, tf.scalar(2, 'int32' ));
  const stream1bitLo = tf.mod(stream2bit, tf.scalar(2, 'int32' ));
  const stream1bit = tf.stack([stream1bitHi, stream1bitLo],1).flatten().toBool();
  console.timeEnd('in-tf');
  
  return stream1bit.dataSync().buffer;
}


function jsDaC( stream ) {

  let result = new ArrayBuffer( stream.byteLength * 8 );

  let buffer32 = new Uint32Array( result );  // Pointer to every 4 bytes!  
  for ( let i = 0; i < stream.byteLength; i++ ) {
    let byte = stream[ i ];
    buffer32[ (i * 2) |0 ] = ( byte / 16) |0;
    buffer32[ (i * 2 + 1) |0 ] = ( byte % 16 ) |0;
  }
  
  let buffer16 = new Uint16Array( result );  // Pointer to every 2 bytes!  
  for ( let i = 0; i < buffer32.length; i++ ) {
    let byte = buffer32[ i ];
    buffer16[ (i * 2) |0 ] = ( byte / 4) |0;
    buffer16[ (i * 2 + 1) |0 ] = ( byte % 4 ) |0;
  }
  
  let buffer8 = new Uint8Array( result );  // Pointer to every 4 bytes!  
  for ( let i = 0; i < buffer16.length; i++ ) {
    let byte = buffer16[ i ];
    buffer8[ (i * 2) |0 ] = ( byte / 2 ) |0;
    buffer8[ (i * 2 + 1) |0 ] = ( byte % 2 ) |0;
  }

  return result;
}

console.log( 'Generating array of 1M bytes' );
let buffer = new ArrayBuffer( 1000000 );
let testArray = new Uint8Array( buffer );
for ( let i = 0; i < testArray.length; i++ ) {
  testArray[ i ] = Math.floor( 256 * Math.random() );
}

let result;

console.log( 'Begin tensorflow divide & conquer test with 1M bytes.' );
console.time( 'tf' );
result = tfDaC( testArray );
console.timeEnd( 'tf' );
console.log( `End tensorflow test with 1M bytes resulting in array of ${result.byteLength} bytes` );

console.log( 'Begin javascript divide & conquer test with 1M bytes.' );
console.time( 'js' );
result = jsDaC( testArray );
console.timeEnd( 'js' );
console.log( `End javascript test with 1M bytes resulting in array of ${result.byteLength} bytes` );
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@2.0.1/dist/tf.min.js"></script>

tensorflow 在我的工作站上的性能很糟糕。我不得不将字节流减少到 1M 字节,因为我的 GPU 在我之前的 10M 字节流测试级别上抛出内存错误。即使只有 1M 字节,少数测试的范围也从 1236 毫秒到 1414 毫秒不等。不知道为什么这么慢。可能是将数字强制转换为 int32,这可能会增加很多开销,因为我的理解是 GPU 通常主要是为浮点运算而构建的。将数据编组进出 GPU 也需要一些时间。也许值得尝试将此函数转换为仅浮点函数而不是 int32...?!也许抓到了 tensorflow.js 的劣质版本...?!有兴趣听听它如何在您的 NodeJS 配置中运行...

另一方面,1M 字节的 javascript 版本从 30 毫秒到 42 毫秒不等,几乎比 GPU 快 2 个数量级(!)。但是,当将这些结果外推到 10M 字节时,该算法仍然比所有其他以前的算法慢...

所以不确定这是否有帮助。它可能只是帮助消除 tensorflow 作为一个选项,虽然它可能仍然值得尝试浮点数而不是 int32,但我不是很有希望......