tus - 上传后访问控制允许来源错误
tus - Access-Control-Allow-Origin Error After Upload
我正在使用 tus-node-server and the tus-js-client 来尝试从 Web 浏览器将文件上传到我的服务器。在较小的文件(10mb-ish)上它似乎工作正常,但在较大的文件(385mb-ish)上它似乎失败并出现 Access-Control-Allow-Origin
失败。
上传进度被调用并一直完成直到 100% 然后失败并出现错误。这让我认为错误与某种类型的验证有关。
在控制台中抛出该错误后,它会重试,直到达到我设置的重试次数限制。
我已经发布了下面的错误。为什么会发生这种情况?
[Error] Origin https://example.com is not allowed by Access-Control-Allow-Origin.
[Error] XMLHttpRequest cannot load https://upload.example.com//saiudfhia1h due to access control checks.
[Error] Failed to load resource: Origin https://example.com is not allowed by Access-Control-Allow-Origin.
同样,在尝试了所有重试之后,它仍然失败并出现此错误。
tus: failed to upload chunk at offset 0, caused by [object XMLHttpRequestProgressEvent], originated from request (response code: 0, response text: )
前端 JS:
var upload = new tus.Upload(file, {
endpoint: "https://upload.example.com/?id=" + res._id,
retryDelays: [0, 1000, 3000, 5000],
metadata: {
filename: res._id,
filetype: file.type
},
onError: function(error) {
console.log("Failed because: " + error)
},
onProgress: function(bytesUploaded, bytesTotal) {
console.log(bytesUploaded, bytesTotal, percentage + "%")
},
onSuccess: function() {
console.log("Download %s from %s", upload.file.name, upload.url)
alert("You have successfully uploaded your file");
}
})
// Start the upload
upload.start()
后端 JS:
server.datastore = new tus.FileStore({
directory: '/files',
path: '/',
namingFunction: fileNameFromUrl
});
server.on(EVENTS.EVENT_UPLOAD_COMPLETE, (event) => {
console.log(`Upload complete for file ${event.file.id}`);
let params = {
Bucket: keys.awsBucketName,
Body: fs.createReadStream(path.join("/files", event.file.id)),
Key: `${event.file.id}/rawfile`
};
s3.upload(params, function(err, data) {
console.log(err, data);
fs.unlink(path.join("/files", event.file.id), (err) => {
if (err) throw err;
console.log('successfully deleted file');
});
});
});
const app = express();
const uploadApp = express();
uploadApp.all('*', server.handle.bind(server));
app.use('/', uploadApp);
app.listen(3000);
问题原来是服务器位于 CloudFlare 后面,每个上传请求都有大小限制。设置 tus 客户端将上传分块到多个请求中解决了这个问题。
有一个chunkSize
属性可以在tus-js-client里面设置。这可能因客户而异。此 属性 的默认值为 Infinity
。
var upload = new tus.Upload(file, {
endpoint: "https://upload.example.com/?id=" + res._id,
retryDelays: [0, 1000, 3000, 5000],
chunkSize: x, // Change `x` to the number representing the chunkSize you want
metadata: {
filename: res._id,
filetype: file.type
},
onError: function(error) {
console.log("Failed because: " + error)
},
onProgress: function(bytesUploaded, bytesTotal) {
console.log(bytesUploaded, bytesTotal, percentage + "%")
},
onSuccess: function() {
console.log("Download %s from %s", upload.file.name, upload.url)
alert("You have successfully uploaded your file");
}
})
我正在使用 tus-node-server and the tus-js-client 来尝试从 Web 浏览器将文件上传到我的服务器。在较小的文件(10mb-ish)上它似乎工作正常,但在较大的文件(385mb-ish)上它似乎失败并出现 Access-Control-Allow-Origin
失败。
上传进度被调用并一直完成直到 100% 然后失败并出现错误。这让我认为错误与某种类型的验证有关。
在控制台中抛出该错误后,它会重试,直到达到我设置的重试次数限制。
我已经发布了下面的错误。为什么会发生这种情况?
[Error] Origin https://example.com is not allowed by Access-Control-Allow-Origin.
[Error] XMLHttpRequest cannot load https://upload.example.com//saiudfhia1h due to access control checks.
[Error] Failed to load resource: Origin https://example.com is not allowed by Access-Control-Allow-Origin.
同样,在尝试了所有重试之后,它仍然失败并出现此错误。
tus: failed to upload chunk at offset 0, caused by [object XMLHttpRequestProgressEvent], originated from request (response code: 0, response text: )
前端 JS:
var upload = new tus.Upload(file, {
endpoint: "https://upload.example.com/?id=" + res._id,
retryDelays: [0, 1000, 3000, 5000],
metadata: {
filename: res._id,
filetype: file.type
},
onError: function(error) {
console.log("Failed because: " + error)
},
onProgress: function(bytesUploaded, bytesTotal) {
console.log(bytesUploaded, bytesTotal, percentage + "%")
},
onSuccess: function() {
console.log("Download %s from %s", upload.file.name, upload.url)
alert("You have successfully uploaded your file");
}
})
// Start the upload
upload.start()
后端 JS:
server.datastore = new tus.FileStore({
directory: '/files',
path: '/',
namingFunction: fileNameFromUrl
});
server.on(EVENTS.EVENT_UPLOAD_COMPLETE, (event) => {
console.log(`Upload complete for file ${event.file.id}`);
let params = {
Bucket: keys.awsBucketName,
Body: fs.createReadStream(path.join("/files", event.file.id)),
Key: `${event.file.id}/rawfile`
};
s3.upload(params, function(err, data) {
console.log(err, data);
fs.unlink(path.join("/files", event.file.id), (err) => {
if (err) throw err;
console.log('successfully deleted file');
});
});
});
const app = express();
const uploadApp = express();
uploadApp.all('*', server.handle.bind(server));
app.use('/', uploadApp);
app.listen(3000);
问题原来是服务器位于 CloudFlare 后面,每个上传请求都有大小限制。设置 tus 客户端将上传分块到多个请求中解决了这个问题。
有一个chunkSize
属性可以在tus-js-client里面设置。这可能因客户而异。此 属性 的默认值为 Infinity
。
var upload = new tus.Upload(file, {
endpoint: "https://upload.example.com/?id=" + res._id,
retryDelays: [0, 1000, 3000, 5000],
chunkSize: x, // Change `x` to the number representing the chunkSize you want
metadata: {
filename: res._id,
filetype: file.type
},
onError: function(error) {
console.log("Failed because: " + error)
},
onProgress: function(bytesUploaded, bytesTotal) {
console.log(bytesUploaded, bytesTotal, percentage + "%")
},
onSuccess: function() {
console.log("Download %s from %s", upload.file.name, upload.url)
alert("You have successfully uploaded your file");
}
})