尽管有 UnifiedMemoryArchitecture 和 MapOnDefaultTextures,为什么 Map() 在默认纹理上失败
Why does Map() fail on default texture despite UnifiedMemoryArchitecture and MapOnDefaultTextures
Docs 建议,可以将默认使用纹理映射到 UMA 架构,例如使用 Direct3D 11.3 的英特尔集成显卡。
我试图实现这个,但是 Map()
总是失败 E_INVALIDARG
。
我对 C++ 和 DirectX 还很陌生,但下面是我认为的最小测试用例。请毫不犹豫地指出我犯下的任何愚蠢行为。
我是 运行 这台笔记本电脑 Windows 10 1809,Intel Skylake i5-6300U HD Graphics 520。
#include "pch.h"
#include <iostream>
#include <dxgi1_6.h>
#include <d3d.h>
#include <d3d11_4.h>
#include <assert.h>
int main()
{
HRESULT res = S_OK;
ID3D11Device *Dev = nullptr;
ID3D11DeviceContext *Ctx = nullptr;
D3D_FEATURE_LEVEL Fl;
D3D_FEATURE_LEVEL fls[1] = { D3D_FEATURE_LEVEL_11_1 };
res = D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, D3D11_CREATE_DEVICE_DEBUG | D3D11_CREATE_DEVICE_BGRA_SUPPORT, fls, 1, D3D11_SDK_VERSION, &Dev, &Fl, &Ctx);
assert(res == S_OK);
assert(Fl == D3D_FEATURE_LEVEL_11_1);
ID3D11Device5 *Dev5 = nullptr;
res = Dev->QueryInterface<ID3D11Device5>(&Dev5);
assert(res == S_OK);
Dev->Release();
Dev = nullptr;
ID3D11DeviceContext4 *Ctx4;
res = Ctx->QueryInterface<ID3D11DeviceContext4>(&Ctx4);
assert(res == S_OK);
Ctx->Release();
Ctx = nullptr;
D3D11_FEATURE_DATA_D3D11_OPTIONS2 opts2;
res = Dev5->CheckFeatureSupport(D3D11_FEATURE_D3D11_OPTIONS2, &opts2, sizeof(opts2));
assert(res == S_OK);
assert(opts2.MapOnDefaultTextures);
assert(opts2.UnifiedMemoryArchitecture);
D3D11_TEXTURE2D_DESC1 texDesc = { 0 };
texDesc.ArraySize = 1;
texDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_UNORDERED_ACCESS;
texDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ | D3D11_CPU_ACCESS_WRITE;
texDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
texDesc.Height = 256;
texDesc.Width = 256;
texDesc.MipLevels = 1;
texDesc.MiscFlags = 0;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.TextureLayout = D3D11_TEXTURE_LAYOUT_UNDEFINED;
texDesc.Usage = D3D11_USAGE_DEFAULT;
byte mem[256 * 256 * 4];
ZeroMemory(mem, 256 * 256 * 4);
D3D11_SUBRESOURCE_DATA data = { 0 };
data.pSysMem = mem;
data.SysMemPitch = 256 * 4;
ID3D11Texture2D1 *tex2d;
res = Dev5->CreateTexture2D1(&texDesc, &data, &tex2d);
assert(res == S_OK);
D3D11_MAPPED_SUBRESOURCE map = { 0 };
// I believe at least one of these should succeed, but all fail
res = Ctx4->Map(tex2d, 0, D3D11_MAP_READ, 0, &map);
//res = Ctx4->Map(tex2d, 0, D3D11_MAP_WRITE, 0, &map);
//res = Ctx4->Map(tex2d, 0, D3D11_MAP_READ_WRITE, 0, &map);
assert(res == S_OK); // E_INVALIDARG
}
我相信 Map() 调用应该会成功,但它失败了 E_INVALIDARG。
编辑: 我也尝试了 D3D11_TEXTURE_LAYOUT_ROW_MAJOR
和 D3D11_TEXTURE_LAYOUT_64K_STANDARD_SWIZZLE
,但是 CreateTexture2D1()
失败了 E_INVALIDARG。也许我的硬件不支持这些模式?
我认为 documentation 中描述了这个问题:
It is illegal to set CPU access flags on default textures without also setting TextureLayout to a value other than D3D11_TEXTURE_LAYOUT_UNDEFINED.
Docs 建议,可以将默认使用纹理映射到 UMA 架构,例如使用 Direct3D 11.3 的英特尔集成显卡。
我试图实现这个,但是 Map()
总是失败 E_INVALIDARG
。
我对 C++ 和 DirectX 还很陌生,但下面是我认为的最小测试用例。请毫不犹豫地指出我犯下的任何愚蠢行为。
我是 运行 这台笔记本电脑 Windows 10 1809,Intel Skylake i5-6300U HD Graphics 520。
#include "pch.h"
#include <iostream>
#include <dxgi1_6.h>
#include <d3d.h>
#include <d3d11_4.h>
#include <assert.h>
int main()
{
HRESULT res = S_OK;
ID3D11Device *Dev = nullptr;
ID3D11DeviceContext *Ctx = nullptr;
D3D_FEATURE_LEVEL Fl;
D3D_FEATURE_LEVEL fls[1] = { D3D_FEATURE_LEVEL_11_1 };
res = D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, D3D11_CREATE_DEVICE_DEBUG | D3D11_CREATE_DEVICE_BGRA_SUPPORT, fls, 1, D3D11_SDK_VERSION, &Dev, &Fl, &Ctx);
assert(res == S_OK);
assert(Fl == D3D_FEATURE_LEVEL_11_1);
ID3D11Device5 *Dev5 = nullptr;
res = Dev->QueryInterface<ID3D11Device5>(&Dev5);
assert(res == S_OK);
Dev->Release();
Dev = nullptr;
ID3D11DeviceContext4 *Ctx4;
res = Ctx->QueryInterface<ID3D11DeviceContext4>(&Ctx4);
assert(res == S_OK);
Ctx->Release();
Ctx = nullptr;
D3D11_FEATURE_DATA_D3D11_OPTIONS2 opts2;
res = Dev5->CheckFeatureSupport(D3D11_FEATURE_D3D11_OPTIONS2, &opts2, sizeof(opts2));
assert(res == S_OK);
assert(opts2.MapOnDefaultTextures);
assert(opts2.UnifiedMemoryArchitecture);
D3D11_TEXTURE2D_DESC1 texDesc = { 0 };
texDesc.ArraySize = 1;
texDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_UNORDERED_ACCESS;
texDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ | D3D11_CPU_ACCESS_WRITE;
texDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
texDesc.Height = 256;
texDesc.Width = 256;
texDesc.MipLevels = 1;
texDesc.MiscFlags = 0;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.TextureLayout = D3D11_TEXTURE_LAYOUT_UNDEFINED;
texDesc.Usage = D3D11_USAGE_DEFAULT;
byte mem[256 * 256 * 4];
ZeroMemory(mem, 256 * 256 * 4);
D3D11_SUBRESOURCE_DATA data = { 0 };
data.pSysMem = mem;
data.SysMemPitch = 256 * 4;
ID3D11Texture2D1 *tex2d;
res = Dev5->CreateTexture2D1(&texDesc, &data, &tex2d);
assert(res == S_OK);
D3D11_MAPPED_SUBRESOURCE map = { 0 };
// I believe at least one of these should succeed, but all fail
res = Ctx4->Map(tex2d, 0, D3D11_MAP_READ, 0, &map);
//res = Ctx4->Map(tex2d, 0, D3D11_MAP_WRITE, 0, &map);
//res = Ctx4->Map(tex2d, 0, D3D11_MAP_READ_WRITE, 0, &map);
assert(res == S_OK); // E_INVALIDARG
}
我相信 Map() 调用应该会成功,但它失败了 E_INVALIDARG。
编辑: 我也尝试了 D3D11_TEXTURE_LAYOUT_ROW_MAJOR
和 D3D11_TEXTURE_LAYOUT_64K_STANDARD_SWIZZLE
,但是 CreateTexture2D1()
失败了 E_INVALIDARG。也许我的硬件不支持这些模式?
我认为 documentation 中描述了这个问题:
It is illegal to set CPU access flags on default textures without also setting TextureLayout to a value other than D3D11_TEXTURE_LAYOUT_UNDEFINED.