dataset里用的boundingbox是cloud里的tightBoundingBox ///site_model包含数据集信息 ///datasets中的orientation为模型整体绕z轴的旋转角度,初始为0 组织形式: 数据集 -建筑物 -楼层 -房间 或 地点: ///filter中的: 数据集校准后不变的值有: dataset_orientation(永远存储最初始的点位的quaternion,在旋转时也不变,因此它等于没有旋转时的orientation)---- image.datasetOrientation dataset_floor_orientation(一般和dataset_orientation值一样) dataset_location 真实的三维坐标 dataset_floor_location 数据集校准后改变的值有: orientation----image.orientation(在旋转时实时变,且是根据模型旋转度数和dataset_orientation来算的,所以如果dataset_orientation不对,就会算错。) location----image.location xy为经纬度 floor_location ------------------------------------------------ 查看全局: var view = window.IV.getMainView() view.currentImage.id view.ImageService.images view.DatasetRepository.dataMap 分屏 enableSplitScreen POI: 兴趣点 PoiService PoiEditorDirective PoiEntity t.prototype.isPreviewMenuVisible ---- canDisplayResultDetails --- PoiService.openedPoi - setOpenedPoi MeasurementLineMaterial : 测量线材质, 有蓝色标准实线和灰色透明虚线两种状态 depthTexture见renderOffscreen 数据集校准 saveAlignment = selectedDatasets m2w_保存了数据集的变换 this.underlayScene.children[3] 包含32个子mesh, 是全景图sphere 其材质fragment在下方 overlayScene 里有marker , name: "location" ? 点云: this.scene.children里找到最后一个, name: "PointCloudLayer" LocationEntity点位?GeoTransformationService: this.TransformService.globalToLocal.transform 转换坐标 setLocalCoordinateSystem 裁剪createCroppingJobDto QuaternionFactory VectorFactory 加载深度图loadDepthImage 获取深度值getDepth(用于更新reticule位置)。深度图用于修改全景图sphere的gl_FragDepthEXT getCoordinates doPointCloudPicking doDepthImagePicking t.NORMAL = "normal", t.DATASET_ALIGNMENT = "datasetAlignment", t.GEO_REGISTRATION = "GeoRegistration", t.SITE_MODEL_EDITOR = "SiteModelEditor", t.NAV_GRAPH_EDITOR = "NavGraphEditor", t.DOWNLOAD_POINT_CLOUD = "DownloadPointCloud", t.MEASUREMENTS = "Measurements" //--关于地图和 地图上的图片-------关键词mapSizeM updateSubTiles更新地图tile,如果不存在就加载 //图片上传https://testlaser.4dkankan.com/maxkk/t-iksBApb/locat/addDataSet.html var QuaternionFactory = { // 同 IndoorViewerAPI的 QuaternionFactory.toArray toArray : function(quaternion){ var rot90 = (new THREE.Quaternion).setFromAxisAngle(new THREE.Vector3(0,0,1), THREE.Math.degToRad(-90)) //add 转入时旋转90度 , rot90Invert = rot90.clone().inverse()//add 转出时旋回90度 var t1 = quaternion.clone().multiply(rot90Invert); var e = t1.toArray(); return [e[3], e[0], e[1], e[2]] } } //获取旋转: var getQuaternion = function(angle){//angle:0-360 角度 var quaternion = new THREE.Quaternion().setFromEuler(new THREE.Euler(0,0,THREE.Math.degToRad(-angle))); return QuaternionFactory.toArray(quaternion) } //获取缩放 var getSize = function(imgWidth, scale){//imgWidth:图片宽度, scale缩放值(x==y) var level = imgWidth / 1024; //以1024为基准 return 95.54628610610962 * level * scale; // 95.54628610610962 = 38.21851444244385 * (2+0.5), 其中38.21851444244385 = mapSizeM / Math.pow(2,maxDepth) = 40075017 / Math.pow(2,20) 可能表示地图在缩放zoom为20时的单块宽度 //0.5是试出来的,因为图片层的bias=0.5, 暂不知道其用处,所以试用了下…… //另:可能不是*2.5, 也许是*256/100 ? 不知道如何精确测试下 //有出现过一次错误是2048时的图但是大了一倍,发现是传图的那个网页在缩放值为0.1(即图为1:1显示,函数canvasFunction(extent, scale )时只有1024大小,后来刷新重新操作就是2048然后就正确。所以可能是这个网页出错。 } //换成算法部给的图后改成: var getSize = function(imgWidth, scale){//imgWidth:图片宽度 假设是正方形 return imgWidth * 0.05; //因为图是1px = 0.05米 } //位置直接使用中心点的经纬度 //------------------------------------------------- decodeBitStream 解析quadtree字符串 输入 "fccf7fffcff3bf7f" 输出 0: {2: {…}, 3: {…}} 1: {2: {…}, 3: {…}} 2: {0: {…}, 1: {…}, 2: {…}, 3: {…}} 3: {0: {…}, 1: {…}, 2: {…}} 代表含义: 最外层0123代表第一层能分裂出四块,分别为左上、右上、左下、右下; 子层中的0 又能分裂出四块,但它只包含2和3这两块,也就是左下和右下。以此类推。 解析规则 依次取出每个字母,字母代表的是16进制的数字(假设为o),代表的意思是它的子项中是否包含1或2或3或4(如第一行的0只包含了2和3),计算方式是 与运算,如 1 & o 为true 则包含1, 2 & o 为true 则包含2... 该案例中"fccf7fffcff3bf7f" 共有16个字母,分别为最外层1个加第二层4个加第三层11个 代码: e.decodeBitStream = function(t) { for (var e = {}, n = [e], i = 0; i < t.length; i++) { var r = n.shift()//取出一个字母转为数字 , o = parseInt(t.substr(i, 1), 16); //开始解析数字含义: if (1 & o) { var a = {};//标记 r[0] = a, n.push(a) } if (2 & o) { a = {}; r[1] = a, n.push(a) } if (4 & o) { a = {}; r[2] = a, n.push(a) } if (8 & o) { a = {}; r[3] = a, n.push(a) } } } 现在需要一个反向的算法,已知quadTree(根据图片)求字符串 =======shader======= 全景图 fragment uniform sampler2D map; uniform float opacity; varying vec2 vUv; #ifdef USE_ALPHAMAP uniform sampler2D alphaMap; #endif #ifdef GL_EXT_frag_depth uniform sampler2D depthMap; uniform mat4 inverseProjectionMatrix; uniform mat4 projectionMatrix; uniform vec4 viewport; #endif void main() { vec4 color = texture2D(map, vUv); float alpha = opacity; #ifdef USE_ALPHAMAP alpha *= texture2D(alphaMap, vUv).g; #endif gl_FragColor = vec4(color.r, color.g, color.b, alpha); #ifdef GL_EXT_frag_depth /* * Useful resources: * * https://www.khronos.org/opengl/wiki/Vertex_Post-Processing#Viewport_transform * Clipping, perspective divide viewport transform * * https://www.khronos.org/opengl/wiki/Compute_eye_space_from_window_space * From window (viewport) space back to eye space in GLSL * * https://www.khronos.org/opengl/wiki/Vertex_Transformation * Summary of transformations object -> world -> eye (camera, view) -> clip -> NDC -> window * * http://slideplayer.com/slide/6837153/# * Overview presentation * * http://www.shaderific.com/glsl-variables/ * GLSL built-in variables */ vec4 depth = texture2D(depthMap, vUv); //float distance = depth.r + 256. * (depth.g + 256. * depth.b); //distance *= 255. * .001; // distance is now in meters //更改 float distance = (depth.g + depth.r / 256.) * 255.; //为什么要乘以255 // return r[1] + r[0] / 256 vec4 ndcPos; ndcPos.xy = ((2.0 * gl_FragCoord.xy) - (2.0 * viewport.xy)) / (viewport.zw) - 1.; ndcPos.z = (2.0 * gl_FragCoord.z - gl_DepthRange.near - gl_DepthRange.far) / (gl_DepthRange.far - gl_DepthRange.near); ndcPos.w = 1.0; vec4 clipPos = ndcPos / gl_FragCoord.w; vec4 eyePos = inverseProjectionMatrix * clipPos; distance += .1; // add a safety margin vec4 eyePos2 = vec4(normalize(eyePos.xyz) * distance, 1.); vec4 clipPos2 = projectionMatrix * eyePos2; vec4 ndcPos2 = clipPos2 * 1. / clipPos2.w; gl_FragDepthEXT = 0.5 * ((gl_DepthRange.far - gl_DepthRange.near) * ndcPos2.z + gl_DepthRange.near + gl_DepthRange.far); #endif } -------- MeasurementLineMaterial vertex "attribute vec3 previous; attribute vec3 next; attribute float side; attribute float width; attribute float counters; uniform vec2 resolution; uniform float lineWidth; uniform vec3 color; uniform float opacity; uniform float near; uniform float far; uniform float sizeAttenuation; uniform vec3 dashColor; uniform float dashOpacity; varying vec2 vUV; varying vec4 vColor; varying vec4 vDashColor; varying float vCounters; vec2 fix(vec4 i, float aspect) { vec2 res = i.xy / i.w; res.x *= aspect; vCounters = counters; return res; } // This vertex shader is a copy of the one supplied by MeshLineMaterial. // It supports drawing dashed lines. void main() { float aspect = resolution.x / resolution.y; float pixelWidthRatio = 1.0 / (resolution.x * projectionMatrix[0][0]); vColor = vec4(color, opacity); vDashColor = vec4(dashColor, dashOpacity); vUV = uv; mat4 m = projectionMatrix * modelViewMatrix; vec4 finalPosition = m * vec4(position, 1.0); vec4 prevPos = m * vec4(previous, 1.0); vec4 nextPos = m * vec4(next, 1.0); vec2 currentP = fix(finalPosition, aspect); vec2 prevP = fix(prevPos, aspect); vec2 nextP = fix(nextPos, aspect); float pixelWidth = finalPosition.w * pixelWidthRatio; float w = 1.8 * pixelWidth * lineWidth * width; if (sizeAttenuation == 1.0) { w = 1.8 * lineWidth * width; } vec2 dir; if (nextP == currentP) { dir = normalize(currentP - prevP); } else if (prevP == currentP) { dir = normalize(nextP - currentP); } else { vec2 dir1 = normalize(currentP - prevP); vec2 dir2 = normalize(nextP - currentP); dir = normalize(dir1 + dir2); vec2 perp = vec2(-dir1.y, dir1.x); vec2 miter = vec2(-dir.y, dir.x); } vec2 normal = vec2(-dir.y, dir.x); normal.x /= aspect; normal *= .5 * w; vec4 offset = vec4(normal * side, 0.0, 1.0); finalPosition.xy += offset.xy; gl_Position = finalPosition; } -------- MeasurementLineMaterial fragment uniform sampler2D map; uniform sampler2D alphaMap; uniform float useMap; uniform float useAlphaMap; uniform float useDash; uniform float dashArray; uniform float dashOffset; uniform float dashRatio; uniform float visibility; uniform float alphaTest; uniform vec2 repeat; uniform sampler2D depthTexture; uniform sampler2D rgbaTexture; uniform float nearPlane; uniform float farPlane; uniform float occlusionDistance; uniform float clipDistance; uniform vec2 viewportSize; uniform vec2 viewportOffset; varying vec2 vUV; varying vec4 vColor; varying vec4 vDashColor; varying float vCounters; // Converts the exponential depth value from the depth buffer to a linear value // See https://learnopengl.com/Advanced-OpenGL/Depth-testing for more information about this formula float convertToLinear(float zValue) { float z = zValue * 2.0 - 1.0; return (2.0 * nearPlane * farPlane) / (farPlane + nearPlane - z * (farPlane - nearPlane)); } void main() { vec4 c = vDashColor; // <-- The following section of the shader is copied from MeshLineMaterial // Sample the fragment from a texture if such is supplied if (useMap == 1.0) { c *= texture2D(map, vUV * repeat); } // Sample the fragment's alpha value from an alpha texture if such is supplied if (useAlphaMap == 1.0) { c.a *= texture2D(alphaMap, vUV * repeat).a; } // Discard the fragment if below the alpha threshold if (c.a < alphaTest) { discard; } // If the line is dashed, set the alpha value of the fragment according to the line segment it belongs to if (useDash == 1.0) { c.a *= ceil(mod(vCounters + dashOffset, dashArray) - (dashArray * dashRatio)); } // <-- end of copied code #ifdef GL_EXT_frag_depth // mixFactor and clipFactor define the color mixing proportion between the states of // full visibility and occluded visibility // and // full visibility and total invisibility float mixFactor = 0.0; float clipFactor = 0.0; // The linear depth value of the current fragment float fragDepth = convertToLinear(gl_FragCoord.z); // The coordinates of the current fragment in the depth texture vec2 depthTxtCoords = vec2(gl_FragCoord.x - viewportOffset.x, gl_FragCoord.y) / viewportSize; // The linear depth value of the pixel occupied by this fragment in the depth buffer float textureDepth = convertToLinear(texture2D(depthTexture, depthTxtCoords).r); // The difference between the two depths float delta = textureDepth - fragDepth; if (delta < 0.0) { // occlusionDistance and clipDistance define the width of the respective zones and // mixFactor and clipFactor express the interpolation between the two colors depending on the position // of the current fragment withing those zones. mixFactor = clamp(delta / occlusionDistance, 0.0, 1.0); clipFactor = clamp(delta / clipDistance, 0.0, 1.0); } // If the fragment is totally transparent, don't bother drawing it if (clipFactor == 1.0) { discard; } #else float mixFactor = 0.0; float clipFactor = 0.0; #endif // Calculate the color of the dashed version of the line vec4 backColor = vec4(c.rgb, c.a * step(vCounters, visibility)); // Mix between the solid and the dahsed versions of the line according to the mixFactor gl_FragColor = mix(vColor, backColor, mixFactor); // Set the alpha value of the fragment according to the clipFactor // Note that clipFactor was previously clamped [0.0;1.0] gl_FragColor.a *= (1.0 - clipFactor); } ///////////////////////////////////// 参考applyRotationToDataset 、 applyTranslationToDataset 已知 oldOrientation、oldLocation newOrientation、newLocation var getTransfromMatrix = function(orientation, location){ //参数分别是旋转角度和平移向量 var a1 = Math.cos(orientation), a2 = Math.sin(orientation); var mat = new THREE.Matrix4(); mat.elements[0] = a1, mat.elements[1] = a2, mat.elements[4] = -a2, mat.elements[5] = a1 mat.elements[12] = location.x; mat.elements[13] = location.y; mat.elements[14] = location.z; return mat } var oldMatrix = getTransfromMatrix(oldOrientation, oldLocation) var newMatrix = getTransfromMatrix(newOrientation, newLocation) var oldMatrixInverse = new THREE.Matrix4().getInverse(oldMatrix) var diffMatrix = new THREE.Matrix4().multiplyMatrices(oldMatrixInverse, newMatrix)//和上一次变换的差 矩阵 var newPoint = oldPoint.applyMatrix4(diffMatrix) //修改棋盘每个顶点 //如果是基于初始的棋盘来修改,去掉oldMatrix、 diffMatrix 直接根据newMatrix修改oldPoint ============ 数据集初始化校准 var fakePositions = [ { x: -1.208132028579712, y: 0.04820600152015686, z: -2.257599115371704, }, { x: 1.6327489614486694, y: 0.056550998240709305, z: -2.1368539333343506, },{ x: 0.05691400170326233, y: 0.04810800030827522, z: 0.97919100522995, },{ x: -0.5570799708366394, y: 0.04639599844813347, z: 3.0515389442443848 , } ] var realPositions = [ { x: 458249.577950831, y: 2474529.667443291 }, { x: 458247.51758545433, y: 2474531.6324389814 }, {x: 458250.7569026919, y: 2474532.9341176464 }, {x: 458252.6196984933, y: 2474534.0980041157 } ]//正确点位的点位 fakePositions = fakePositions.map(e=>{ return new THREE.Vector3(e.x, -e.z, 0); }) realPositions = realPositions.map(e=>{ return new THREE.Vector3(e.x, e.y, 0); }) var moveVec = new THREE.Vector3().subVectors(realPositions[0], fakePositions[0]) //平移向量 var vec1 = new THREE.Vector3().subVectors(fakePositions[0], fakePositions[1]) //旧的向量 var vec2 = new THREE.Vector3().subVectors(realPositions[0], realPositions[1])//新的向量 var angle = vec1.angleTo(vec2) if(vec1.clone().cross(vec2).z < 0)angle *= -1 //这里不确定是<0还是>0 var matrix = new THREE.Matrix4().setPosition(moveVec.clone().sub(realPositions[0])) var rotateMatrix = new THREE.Matrix4().makeRotationAxis(new THREE.Vector3(0,0,1), angle ); matrix.premultiply(rotateMatrix) var moveBackMatrix = new THREE.Matrix4().setPosition(realPositions[0]) matrix.premultiply(moveBackMatrix) var pos = fakePositions.map(e=>{ return e.clone().applyMatrix4(matrix) }) ========== 4dkk根据给的经纬度控制点转化,filter orientation取值 var rot90 = (new THREE.Quaternion).setFromAxisAngle(new THREE.Vector3(0,0,1), THREE.Math.degToRad(-90)) var rot90Invert = rot90.clone().inverse() a.forEach(e=>{ var t = e.dataset_orientation var u = new THREE.Quaternion(t[1],t[2],t[3],t[0]).multiply(rot90); var ee = new THREE.Quaternion().setFromAxisAngle(new THREE.Vector3(0,0,1), 2.4223594240392305 ); u.multiply(ee) var u1 = u.clone().multiply(rot90Invert); var e2 = u1.toArray(); e.orientation = [e2[3], e2[0], e2[1], e2[2]] }) 注意:现在的经纬度和四维看看中的不一定对得准,存在误差,可能主要问题出再算出的位移、漫游点的dataset本地坐标上