在 ARSCNView 中加载大型 3d Object .scn 文件 Aspect Fit in to the screen ARKIT Swift iOS
Load large 3d Object .scn file in ARSCNView Aspect Fit in to the screen ARKIT Swift iOS
我正在使用 3d 模型开发 ARKit 应用程序。为此,我使用了 3d 模型并添加了用于移动、旋转和缩放 3d 模型的手势。
现在我只面临 1 个问题,但我不确定这个问题是否与什么有关。 3d 模型是否有问题,或者我的程序是否缺少任何内容。
问题是我使用的 3d 模型显示非常大并且超出了屏幕。我正在尝试缩小它的尺寸,但它非常大。
这是我的代码:
@IBOutlet var mySceneView: ARSCNView!
var selectedNode = SCNNode()
var prevLoc = CGPoint()
var touchCount : Int = 0
override func viewDidLoad() {
super.viewDidLoad()
self.lblTitle.text = self.sceneTitle
let mySCN = SCNScene.init(named: "art.scnassets/\(self.sceneImagename).scn")!
self.mySceneView.scene = mySCN
let cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
cameraNode.position = SCNVector3Make(0, 0, 0)
self.mySceneView.scene.rootNode.addChildNode(cameraNode)
self.mySceneView.allowsCameraControl = true
self.mySceneView.autoenablesDefaultLighting = true
let tapGesture = UITapGestureRecognizer(target: self, action: #selector(detailPage.doHandleTap(_:)))
let panGesture = UIPanGestureRecognizer(target: self, action: #selector(detailPage.doHandlePan(_:)))
let gesturesArray = NSMutableArray()
gesturesArray.add(tapGesture)
gesturesArray.add(panGesture)
gesturesArray.addObjects(from: self.mySceneView.gestureRecognizers!)
self.mySceneView.gestureRecognizers = (gesturesArray as! [UIGestureRecognizer])
}
//MARK:- Handle Gesture
@objc func doHandlePan(_ sender: UIPanGestureRecognizer) {
var delta = sender.translation(in: self.view)
let loc = sender.location(in: self.view)
if sender.state == .began {
self.prevLoc = loc
self.touchCount = sender.numberOfTouches
} else if sender.state == .changed {
delta = CGPoint(x: loc.x - prevLoc.x, y: loc.y - prevLoc.y)
prevLoc = loc
if self.touchCount != sender.numberOfTouches {
return
}
var rotMat = SCNMatrix4()
if touchCount == 2 {
rotMat = SCNMatrix4MakeTranslation(Float(delta.x * 0.025), Float(delta.y * -0.025), 0)
} else {
let rotMatX = SCNMatrix4Rotate(SCNMatrix4Identity, Float((1.0/100) * delta.y), 1, 0, 0)
let rotMatY = SCNMatrix4Rotate(SCNMatrix4Identity, Float((1.0/100) * delta.x), 0, 1, 0)
rotMat = SCNMatrix4Mult(rotMatX, rotMatY)
}
let transMat = SCNMatrix4MakeTranslation(selectedNode.position.x, selectedNode.position.y, selectedNode.position.z)
selectedNode.transform = SCNMatrix4Mult(selectedNode.transform, SCNMatrix4Invert(transMat))
let parentNodeTransMat = SCNMatrix4MakeTranslation((selectedNode.parent?.worldPosition.x)!, (selectedNode.parent?.worldPosition.y)!, (selectedNode.parent?.worldPosition.z)!)
let parentNodeMatWOTrans = SCNMatrix4Mult(selectedNode.parent!.worldTransform, SCNMatrix4Invert(parentNodeTransMat))
selectedNode.transform = SCNMatrix4Mult(selectedNode.transform, parentNodeMatWOTrans)
let camorbitNodeTransMat = SCNMatrix4MakeTranslation((self.mySceneView.pointOfView?.worldPosition.x)!, (self.mySceneView.pointOfView?.worldPosition.y)!, (self.mySceneView.pointOfView?.worldPosition.z)!)
let camorbitNodeMatWOTrans = SCNMatrix4Mult(self.mySceneView.pointOfView!.worldTransform, SCNMatrix4Invert(camorbitNodeTransMat))
selectedNode.transform = SCNMatrix4Mult(selectedNode.transform, SCNMatrix4Invert(camorbitNodeMatWOTrans))
selectedNode.transform = SCNMatrix4Mult(selectedNode.transform, rotMat)
selectedNode.transform = SCNMatrix4Mult(selectedNode.transform, camorbitNodeMatWOTrans)
selectedNode.transform = SCNMatrix4Mult(selectedNode.transform, SCNMatrix4Invert(parentNodeMatWOTrans))
selectedNode.transform = SCNMatrix4Mult(selectedNode.transform, transMat)
}
}
@objc func doHandleTap(_ sender: UITapGestureRecognizer) {
let p = sender.location(in: self.mySceneView)
var hitResults = self.mySceneView.hitTest(p, options: nil)
if (p.x > self.mySceneView.frame.size.width-100 || p.y < 100) {
self.mySceneView.allowsCameraControl = !self.mySceneView.allowsCameraControl
}
if hitResults.count > 0 {
let result = hitResults[0]
let material = result.node.geometry?.firstMaterial
selectedNode = result.node
SCNTransaction.begin()
SCNTransaction.animationDuration = 0.3
SCNTransaction.completionBlock = {
SCNTransaction.begin()
SCNTransaction.animationDuration = 0.3
SCNTransaction.commit()
}
material?.emission.contents = UIColor.white
SCNTransaction.commit()
}
}
我的问题是:
我们可以在屏幕中央设置适合屏幕大小的任意大小的 3d 对象模型纵横比吗?有什么办法请指教
任何指导或建议将不胜感激。
您需要的是使用getBoundingSphereCenter
获取边界球体大小,然后可以将其投影到屏幕上。或者获取该半径与 scenekit 相机和物体位置之间的距离的比率。这样你就会知道对象在屏幕上看起来有多大。要缩小比例,您只需设置对象的 scale
属性。
对于第二部分,您可以使用projectPoint。
我处理这个问题的方法是确保 3D 模型始终具有固定大小。
例如,如果 3D 模型是一个小杯子或一个大房子,我确保它在场景坐标 space 上的宽度始终为 25 厘米(同时保持 x y z 之间的比率)。
您可以这样计算节点边界框的宽度:
let mySCN = SCNScene(named: "art.scnassets/\(self.sceneImagename).scn")!
let minX = mySCN.rootNode.boundingBox.min.x
let maxX = mySCN.rootNode.boundingBox.max.x
// change 0.25 to whatever you need
// this value is in meters
let scaleValue = 0.25 / abs(minX - maxX)
// scale all axes of the node using `scaleValue`
// this maintains ratios and does not stretch the model
mySCN.rootNode.scale = SCNVector3(scaleValue, scaleValue, scaleValue)
self.mySceneView.scene = mySCN
您还可以使用边界框的 y 或 z 值根据高度或深度计算比例值。
我正在使用 3d 模型开发 ARKit 应用程序。为此,我使用了 3d 模型并添加了用于移动、旋转和缩放 3d 模型的手势。
现在我只面临 1 个问题,但我不确定这个问题是否与什么有关。 3d 模型是否有问题,或者我的程序是否缺少任何内容。
问题是我使用的 3d 模型显示非常大并且超出了屏幕。我正在尝试缩小它的尺寸,但它非常大。
这是我的代码:
@IBOutlet var mySceneView: ARSCNView!
var selectedNode = SCNNode()
var prevLoc = CGPoint()
var touchCount : Int = 0
override func viewDidLoad() {
super.viewDidLoad()
self.lblTitle.text = self.sceneTitle
let mySCN = SCNScene.init(named: "art.scnassets/\(self.sceneImagename).scn")!
self.mySceneView.scene = mySCN
let cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
cameraNode.position = SCNVector3Make(0, 0, 0)
self.mySceneView.scene.rootNode.addChildNode(cameraNode)
self.mySceneView.allowsCameraControl = true
self.mySceneView.autoenablesDefaultLighting = true
let tapGesture = UITapGestureRecognizer(target: self, action: #selector(detailPage.doHandleTap(_:)))
let panGesture = UIPanGestureRecognizer(target: self, action: #selector(detailPage.doHandlePan(_:)))
let gesturesArray = NSMutableArray()
gesturesArray.add(tapGesture)
gesturesArray.add(panGesture)
gesturesArray.addObjects(from: self.mySceneView.gestureRecognizers!)
self.mySceneView.gestureRecognizers = (gesturesArray as! [UIGestureRecognizer])
}
//MARK:- Handle Gesture
@objc func doHandlePan(_ sender: UIPanGestureRecognizer) {
var delta = sender.translation(in: self.view)
let loc = sender.location(in: self.view)
if sender.state == .began {
self.prevLoc = loc
self.touchCount = sender.numberOfTouches
} else if sender.state == .changed {
delta = CGPoint(x: loc.x - prevLoc.x, y: loc.y - prevLoc.y)
prevLoc = loc
if self.touchCount != sender.numberOfTouches {
return
}
var rotMat = SCNMatrix4()
if touchCount == 2 {
rotMat = SCNMatrix4MakeTranslation(Float(delta.x * 0.025), Float(delta.y * -0.025), 0)
} else {
let rotMatX = SCNMatrix4Rotate(SCNMatrix4Identity, Float((1.0/100) * delta.y), 1, 0, 0)
let rotMatY = SCNMatrix4Rotate(SCNMatrix4Identity, Float((1.0/100) * delta.x), 0, 1, 0)
rotMat = SCNMatrix4Mult(rotMatX, rotMatY)
}
let transMat = SCNMatrix4MakeTranslation(selectedNode.position.x, selectedNode.position.y, selectedNode.position.z)
selectedNode.transform = SCNMatrix4Mult(selectedNode.transform, SCNMatrix4Invert(transMat))
let parentNodeTransMat = SCNMatrix4MakeTranslation((selectedNode.parent?.worldPosition.x)!, (selectedNode.parent?.worldPosition.y)!, (selectedNode.parent?.worldPosition.z)!)
let parentNodeMatWOTrans = SCNMatrix4Mult(selectedNode.parent!.worldTransform, SCNMatrix4Invert(parentNodeTransMat))
selectedNode.transform = SCNMatrix4Mult(selectedNode.transform, parentNodeMatWOTrans)
let camorbitNodeTransMat = SCNMatrix4MakeTranslation((self.mySceneView.pointOfView?.worldPosition.x)!, (self.mySceneView.pointOfView?.worldPosition.y)!, (self.mySceneView.pointOfView?.worldPosition.z)!)
let camorbitNodeMatWOTrans = SCNMatrix4Mult(self.mySceneView.pointOfView!.worldTransform, SCNMatrix4Invert(camorbitNodeTransMat))
selectedNode.transform = SCNMatrix4Mult(selectedNode.transform, SCNMatrix4Invert(camorbitNodeMatWOTrans))
selectedNode.transform = SCNMatrix4Mult(selectedNode.transform, rotMat)
selectedNode.transform = SCNMatrix4Mult(selectedNode.transform, camorbitNodeMatWOTrans)
selectedNode.transform = SCNMatrix4Mult(selectedNode.transform, SCNMatrix4Invert(parentNodeMatWOTrans))
selectedNode.transform = SCNMatrix4Mult(selectedNode.transform, transMat)
}
}
@objc func doHandleTap(_ sender: UITapGestureRecognizer) {
let p = sender.location(in: self.mySceneView)
var hitResults = self.mySceneView.hitTest(p, options: nil)
if (p.x > self.mySceneView.frame.size.width-100 || p.y < 100) {
self.mySceneView.allowsCameraControl = !self.mySceneView.allowsCameraControl
}
if hitResults.count > 0 {
let result = hitResults[0]
let material = result.node.geometry?.firstMaterial
selectedNode = result.node
SCNTransaction.begin()
SCNTransaction.animationDuration = 0.3
SCNTransaction.completionBlock = {
SCNTransaction.begin()
SCNTransaction.animationDuration = 0.3
SCNTransaction.commit()
}
material?.emission.contents = UIColor.white
SCNTransaction.commit()
}
}
我的问题是:
我们可以在屏幕中央设置适合屏幕大小的任意大小的 3d 对象模型纵横比吗?有什么办法请指教
任何指导或建议将不胜感激。
您需要的是使用getBoundingSphereCenter
获取边界球体大小,然后可以将其投影到屏幕上。或者获取该半径与 scenekit 相机和物体位置之间的距离的比率。这样你就会知道对象在屏幕上看起来有多大。要缩小比例,您只需设置对象的 scale
属性。
对于第二部分,您可以使用projectPoint。
我处理这个问题的方法是确保 3D 模型始终具有固定大小。
例如,如果 3D 模型是一个小杯子或一个大房子,我确保它在场景坐标 space 上的宽度始终为 25 厘米(同时保持 x y z 之间的比率)。
您可以这样计算节点边界框的宽度:
let mySCN = SCNScene(named: "art.scnassets/\(self.sceneImagename).scn")!
let minX = mySCN.rootNode.boundingBox.min.x
let maxX = mySCN.rootNode.boundingBox.max.x
// change 0.25 to whatever you need
// this value is in meters
let scaleValue = 0.25 / abs(minX - maxX)
// scale all axes of the node using `scaleValue`
// this maintains ratios and does not stretch the model
mySCN.rootNode.scale = SCNVector3(scaleValue, scaleValue, scaleValue)
self.mySceneView.scene = mySCN
您还可以使用边界框的 y 或 z 值根据高度或深度计算比例值。