笔记.js 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. dataset里用的boundingbox是cloud里的tightBoundingBox
  2. ///site_model包含数据集信息
  3. ///datasets中的orientation为模型整体绕z轴的旋转角度,初始为0
  4. 组织形式:
  5. 数据集
  6. -建筑物
  7. -楼层
  8. -房间 或 地点:
  9. ///filter中的:
  10. 数据集校准后不变的值有:
  11. dataset_orientation(永远存储最初始的点位的quaternion,在旋转时也不变,因此它等于没有旋转时的orientation)---- image.datasetOrientation
  12. dataset_floor_orientation(一般和dataset_orientation值一样)
  13. dataset_location 真实的三维坐标
  14. dataset_floor_location
  15. 数据集校准后改变的值有:
  16. orientation----image.orientation(在旋转时实时变,且是根据模型旋转度数和dataset_orientation来算的,所以如果dataset_orientation不对,就会算错。)
  17. location----image.location xy为经纬度
  18. floor_location
  19. ------------------------------------------------
  20. 查看全局:
  21. var view = window.IV.getMainView()
  22. view.currentImage.id
  23. view.ImageService.images
  24. view.DatasetRepository.dataMap
  25. 分屏
  26. enableSplitScreen
  27. POI: 兴趣点 PoiService PoiEditorDirective PoiEntity
  28. t.prototype.isPreviewMenuVisible ---- canDisplayResultDetails --- PoiService.openedPoi - setOpenedPoi
  29. MeasurementLineMaterial : 测量线材质, 有蓝色标准实线和灰色透明虚线两种状态 depthTexture见renderOffscreen
  30. 数据集校准 saveAlignment = selectedDatasets m2w_保存了数据集的变换
  31. this.underlayScene.children[3] 包含32个子mesh, 是全景图sphere 其材质fragment在下方
  32. overlayScene 里有marker , name: "location" ?
  33. 点云: this.scene.children里找到最后一个, name: "PointCloudLayer"
  34. LocationEntity点位?GeoTransformationService: this.TransformService.globalToLocal.transform 转换坐标 setLocalCoordinateSystem
  35. 裁剪createCroppingJobDto
  36. QuaternionFactory VectorFactory
  37. 加载深度图loadDepthImage 获取深度值getDepth(用于更新reticule位置)。深度图用于修改全景图sphere的gl_FragDepthEXT
  38. getCoordinates doPointCloudPicking doDepthImagePicking
  39. t.NORMAL = "normal",
  40. t.DATASET_ALIGNMENT = "datasetAlignment",
  41. t.GEO_REGISTRATION = "GeoRegistration",
  42. t.SITE_MODEL_EDITOR = "SiteModelEditor",
  43. t.NAV_GRAPH_EDITOR = "NavGraphEditor",
  44. t.DOWNLOAD_POINT_CLOUD = "DownloadPointCloud",
  45. t.MEASUREMENTS = "Measurements"
  46. //--关于地图和 地图上的图片-------关键词mapSizeM
  47. updateSubTiles更新地图tile,如果不存在就加载
  48. //图片上传https://testlaser.4dkankan.com/maxkk/t-iksBApb/locat/addDataSet.html
  49. var QuaternionFactory = { // 同 IndoorViewerAPI的 QuaternionFactory.toArray
  50. toArray : function(quaternion){
  51. var rot90 = (new THREE.Quaternion).setFromAxisAngle(new THREE.Vector3(0,0,1), THREE.Math.degToRad(-90)) //add 转入时旋转90度
  52. , rot90Invert = rot90.clone().inverse()//add 转出时旋回90度
  53. var t1 = quaternion.clone().multiply(rot90Invert);
  54. var e = t1.toArray();
  55. return [e[3], e[0], e[1], e[2]]
  56. }
  57. }
  58. //获取旋转:
  59. var getQuaternion = function(angle){//angle:0-360 角度
  60. var quaternion = new THREE.Quaternion().setFromEuler(new THREE.Euler(0,0,THREE.Math.degToRad(-angle)));
  61. return QuaternionFactory.toArray(quaternion)
  62. }
  63. //获取缩放
  64. var getSize = function(imgWidth, scale){//imgWidth:图片宽度, scale缩放值(x==y)
  65. var level = imgWidth / 1024; //以1024为基准
  66. return 95.54628610610962 * level * scale; // 95.54628610610962 = 38.21851444244385 * (2+0.5), 其中38.21851444244385 = mapSizeM / Math.pow(2,maxDepth) = 40075017 / Math.pow(2,20) 可能表示地图在缩放zoom为20时的单块宽度
  67. //0.5是试出来的,因为图片层的bias=0.5, 暂不知道其用处,所以试用了下……
  68. //另:可能不是*2.5, 也许是*256/100 ? 不知道如何精确测试下
  69. //有出现过一次错误是2048时的图但是大了一倍,发现是传图的那个网页在缩放值为0.1(即图为1:1显示,函数canvasFunction(extent, scale )时只有1024大小,后来刷新重新操作就是2048然后就正确。所以可能是这个网页出错。
  70. }
  71. //换成算法部给的图后改成:
  72. var getSize = function(imgWidth, scale){//imgWidth:图片宽度 假设是正方形
  73. return imgWidth * 0.05; //因为图是1px = 0.05米
  74. }
  75. //位置直接使用中心点的经纬度
  76. //-------------------------------------------------
  77. decodeBitStream 解析quadtree字符串
  78. 输入 "fccf7fffcff3bf7f"
  79. 输出
  80. 0: {2: {…}, 3: {…}}
  81. 1: {2: {…}, 3: {…}}
  82. 2: {0: {…}, 1: {…}, 2: {…}, 3: {…}}
  83. 3: {0: {…}, 1: {…}, 2: {…}}
  84. 代表含义:
  85. 最外层0123代表第一层能分裂出四块,分别为左上、右上、左下、右下; 子层中的0 又能分裂出四块,但它只包含2和3这两块,也就是左下和右下。以此类推。
  86. 解析规则
  87. 依次取出每个字母,字母代表的是16进制的数字(假设为o),代表的意思是它的子项中是否包含1或2或3或4(如第一行的0只包含了2和3),计算方式是
  88. 与运算,如 1 & o 为true 则包含1, 2 & o 为true 则包含2...
  89. 该案例中"fccf7fffcff3bf7f" 共有16个字母,分别为最外层1个加第二层4个加第三层11个
  90. 代码:
  91. e.decodeBitStream = function(t) {
  92. for (var e = {}, n = [e], i = 0; i < t.length; i++) {
  93. var r = n.shift()//取出一个字母转为数字
  94. , o = parseInt(t.substr(i, 1), 16);
  95. //开始解析数字含义:
  96. if (1 & o) {
  97. var a = {};//标记
  98. r[0] = a,
  99. n.push(a)
  100. }
  101. if (2 & o) {
  102. a = {};
  103. r[1] = a,
  104. n.push(a)
  105. }
  106. if (4 & o) {
  107. a = {};
  108. r[2] = a,
  109. n.push(a)
  110. }
  111. if (8 & o) {
  112. a = {};
  113. r[3] = a,
  114. n.push(a)
  115. }
  116. }
  117. }
  118. 现在需要一个反向的算法,已知quadTree(根据图片)求字符串
  119. =======shader=======
  120. 全景图 fragment
  121. uniform sampler2D map;
  122. uniform float opacity;
  123. varying vec2 vUv;
  124. #ifdef USE_ALPHAMAP
  125. uniform sampler2D alphaMap;
  126. #endif
  127. #ifdef GL_EXT_frag_depth
  128. uniform sampler2D depthMap;
  129. uniform mat4 inverseProjectionMatrix;
  130. uniform mat4 projectionMatrix;
  131. uniform vec4 viewport;
  132. #endif
  133. void main()
  134. {
  135. vec4 color = texture2D(map, vUv);
  136. float alpha = opacity;
  137. #ifdef USE_ALPHAMAP
  138. alpha *= texture2D(alphaMap, vUv).g;
  139. #endif
  140. gl_FragColor = vec4(color.r, color.g, color.b, alpha);
  141. #ifdef GL_EXT_frag_depth
  142. /*
  143. * Useful resources:
  144. *
  145. * https://www.khronos.org/opengl/wiki/Vertex_Post-Processing#Viewport_transform
  146. * Clipping, perspective divide viewport transform
  147. *
  148. * https://www.khronos.org/opengl/wiki/Compute_eye_space_from_window_space
  149. * From window (viewport) space back to eye space in GLSL
  150. *
  151. * https://www.khronos.org/opengl/wiki/Vertex_Transformation
  152. * Summary of transformations object -> world -> eye (camera, view) -> clip -> NDC -> window
  153. *
  154. * http://slideplayer.com/slide/6837153/#
  155. * Overview presentation
  156. *
  157. * http://www.shaderific.com/glsl-variables/
  158. * GLSL built-in variables
  159. */
  160. vec4 depth = texture2D(depthMap, vUv);
  161. //float distance = depth.r + 256. * (depth.g + 256. * depth.b);
  162. //distance *= 255. * .001; // distance is now in meters
  163. //更改
  164. float distance = (depth.g + depth.r / 256.) * 255.; //为什么要乘以255
  165. // return r[1] + r[0] / 256
  166. vec4 ndcPos;
  167. ndcPos.xy = ((2.0 * gl_FragCoord.xy) - (2.0 * viewport.xy)) / (viewport.zw) - 1.;
  168. ndcPos.z = (2.0 * gl_FragCoord.z - gl_DepthRange.near - gl_DepthRange.far) /
  169. (gl_DepthRange.far - gl_DepthRange.near);
  170. ndcPos.w = 1.0;
  171. vec4 clipPos = ndcPos / gl_FragCoord.w;
  172. vec4 eyePos = inverseProjectionMatrix * clipPos;
  173. distance += .1; // add a safety margin
  174. vec4 eyePos2 = vec4(normalize(eyePos.xyz) * distance, 1.);
  175. vec4 clipPos2 = projectionMatrix * eyePos2;
  176. vec4 ndcPos2 = clipPos2 * 1. / clipPos2.w;
  177. gl_FragDepthEXT = 0.5 * ((gl_DepthRange.far - gl_DepthRange.near) * ndcPos2.z
  178. + gl_DepthRange.near + gl_DepthRange.far);
  179. #endif
  180. }
  181. --------
  182. MeasurementLineMaterial vertex
  183. "attribute vec3 previous;
  184. attribute vec3 next;
  185. attribute float side;
  186. attribute float width;
  187. attribute float counters;
  188. uniform vec2 resolution;
  189. uniform float lineWidth;
  190. uniform vec3 color;
  191. uniform float opacity;
  192. uniform float near;
  193. uniform float far;
  194. uniform float sizeAttenuation;
  195. uniform vec3 dashColor;
  196. uniform float dashOpacity;
  197. varying vec2 vUV;
  198. varying vec4 vColor;
  199. varying vec4 vDashColor;
  200. varying float vCounters;
  201. vec2 fix(vec4 i, float aspect)
  202. {
  203. vec2 res = i.xy / i.w;
  204. res.x *= aspect;
  205. vCounters = counters;
  206. return res;
  207. }
  208. // This vertex shader is a copy of the one supplied by MeshLineMaterial.
  209. // It supports drawing dashed lines.
  210. void main()
  211. {
  212. float aspect = resolution.x / resolution.y;
  213. float pixelWidthRatio = 1.0 / (resolution.x * projectionMatrix[0][0]);
  214. vColor = vec4(color, opacity);
  215. vDashColor = vec4(dashColor, dashOpacity);
  216. vUV = uv;
  217. mat4 m = projectionMatrix * modelViewMatrix;
  218. vec4 finalPosition = m * vec4(position, 1.0);
  219. vec4 prevPos = m * vec4(previous, 1.0);
  220. vec4 nextPos = m * vec4(next, 1.0);
  221. vec2 currentP = fix(finalPosition, aspect);
  222. vec2 prevP = fix(prevPos, aspect);
  223. vec2 nextP = fix(nextPos, aspect);
  224. float pixelWidth = finalPosition.w * pixelWidthRatio;
  225. float w = 1.8 * pixelWidth * lineWidth * width;
  226. if (sizeAttenuation == 1.0)
  227. {
  228. w = 1.8 * lineWidth * width;
  229. }
  230. vec2 dir;
  231. if (nextP == currentP)
  232. {
  233. dir = normalize(currentP - prevP);
  234. }
  235. else if (prevP == currentP)
  236. {
  237. dir = normalize(nextP - currentP);
  238. }
  239. else
  240. {
  241. vec2 dir1 = normalize(currentP - prevP);
  242. vec2 dir2 = normalize(nextP - currentP);
  243. dir = normalize(dir1 + dir2);
  244. vec2 perp = vec2(-dir1.y, dir1.x);
  245. vec2 miter = vec2(-dir.y, dir.x);
  246. }
  247. vec2 normal = vec2(-dir.y, dir.x);
  248. normal.x /= aspect;
  249. normal *= .5 * w;
  250. vec4 offset = vec4(normal * side, 0.0, 1.0);
  251. finalPosition.xy += offset.xy;
  252. gl_Position = finalPosition;
  253. }
  254. --------
  255. MeasurementLineMaterial fragment
  256. uniform sampler2D map;
  257. uniform sampler2D alphaMap;
  258. uniform float useMap;
  259. uniform float useAlphaMap;
  260. uniform float useDash;
  261. uniform float dashArray;
  262. uniform float dashOffset;
  263. uniform float dashRatio;
  264. uniform float visibility;
  265. uniform float alphaTest;
  266. uniform vec2 repeat;
  267. uniform sampler2D depthTexture;
  268. uniform sampler2D rgbaTexture;
  269. uniform float nearPlane;
  270. uniform float farPlane;
  271. uniform float occlusionDistance;
  272. uniform float clipDistance;
  273. uniform vec2 viewportSize;
  274. uniform vec2 viewportOffset;
  275. varying vec2 vUV;
  276. varying vec4 vColor;
  277. varying vec4 vDashColor;
  278. varying float vCounters;
  279. // Converts the exponential depth value from the depth buffer to a linear value
  280. // See https://learnopengl.com/Advanced-OpenGL/Depth-testing for more information about this formula
  281. float convertToLinear(float zValue)
  282. {
  283. float z = zValue * 2.0 - 1.0;
  284. return (2.0 * nearPlane * farPlane) / (farPlane + nearPlane - z * (farPlane - nearPlane));
  285. }
  286. void main()
  287. {
  288. vec4 c = vDashColor;
  289. // <-- The following section of the shader is copied from MeshLineMaterial
  290. // Sample the fragment from a texture if such is supplied
  291. if (useMap == 1.0)
  292. {
  293. c *= texture2D(map, vUV * repeat);
  294. }
  295. // Sample the fragment's alpha value from an alpha texture if such is supplied
  296. if (useAlphaMap == 1.0)
  297. {
  298. c.a *= texture2D(alphaMap, vUV * repeat).a;
  299. }
  300. // Discard the fragment if below the alpha threshold
  301. if (c.a < alphaTest)
  302. {
  303. discard;
  304. }
  305. // If the line is dashed, set the alpha value of the fragment according to the line segment it belongs to
  306. if (useDash == 1.0)
  307. {
  308. c.a *= ceil(mod(vCounters + dashOffset, dashArray) - (dashArray * dashRatio));
  309. }
  310. // <-- end of copied code
  311. #ifdef GL_EXT_frag_depth
  312. // mixFactor and clipFactor define the color mixing proportion between the states of
  313. // full visibility and occluded visibility
  314. // and
  315. // full visibility and total invisibility
  316. float mixFactor = 0.0;
  317. float clipFactor = 0.0;
  318. // The linear depth value of the current fragment
  319. float fragDepth = convertToLinear(gl_FragCoord.z);
  320. // The coordinates of the current fragment in the depth texture
  321. vec2 depthTxtCoords = vec2(gl_FragCoord.x - viewportOffset.x, gl_FragCoord.y) / viewportSize;
  322. // The linear depth value of the pixel occupied by this fragment in the depth buffer
  323. float textureDepth = convertToLinear(texture2D(depthTexture, depthTxtCoords).r);
  324. // The difference between the two depths
  325. float delta = textureDepth - fragDepth;
  326. if (delta < 0.0)
  327. {
  328. // occlusionDistance and clipDistance define the width of the respective zones and
  329. // mixFactor and clipFactor express the interpolation between the two colors depending on the position
  330. // of the current fragment withing those zones.
  331. mixFactor = clamp(delta / occlusionDistance, 0.0, 1.0);
  332. clipFactor = clamp(delta / clipDistance, 0.0, 1.0);
  333. }
  334. // If the fragment is totally transparent, don't bother drawing it
  335. if (clipFactor == 1.0)
  336. {
  337. discard;
  338. }
  339. #else
  340. float mixFactor = 0.0;
  341. float clipFactor = 0.0;
  342. #endif
  343. // Calculate the color of the dashed version of the line
  344. vec4 backColor = vec4(c.rgb, c.a * step(vCounters, visibility));
  345. // Mix between the solid and the dahsed versions of the line according to the mixFactor
  346. gl_FragColor = mix(vColor, backColor, mixFactor);
  347. // Set the alpha value of the fragment according to the clipFactor
  348. // Note that clipFactor was previously clamped [0.0;1.0]
  349. gl_FragColor.a *= (1.0 - clipFactor);
  350. }
  351. /////////////////////////////////////
  352. 参考applyRotationToDataset 、 applyTranslationToDataset
  353. 已知
  354. oldOrientation、oldLocation
  355. newOrientation、newLocation
  356. var getTransfromMatrix = function(orientation, location){ //参数分别是旋转角度和平移向量
  357. var a1 = Math.cos(orientation), a2 = Math.sin(orientation);
  358. var mat = new THREE.Matrix4();
  359. mat.elements[0] = a1,
  360. mat.elements[1] = a2,
  361. mat.elements[4] = -a2,
  362. mat.elements[5] = a1
  363. mat.elements[12] = location.x;
  364. mat.elements[13] = location.y;
  365. mat.elements[14] = location.z;
  366. return mat
  367. }
  368. var oldMatrix = getTransfromMatrix(oldOrientation, oldLocation)
  369. var newMatrix = getTransfromMatrix(newOrientation, newLocation)
  370. var oldMatrixInverse = new THREE.Matrix4().getInverse(oldMatrix)
  371. var diffMatrix = new THREE.Matrix4().multiplyMatrices(oldMatrixInverse, newMatrix)//和上一次变换的差 矩阵
  372. var newPoint = oldPoint.applyMatrix4(diffMatrix) //修改棋盘每个顶点
  373. //如果是基于初始的棋盘来修改,去掉oldMatrix、 diffMatrix 直接根据newMatrix修改oldPoint
  374. ============
  375. 数据集初始化校准
  376. var fakePositions = [ {
  377. x: -1.208132028579712,
  378. y: 0.04820600152015686,
  379. z: -2.257599115371704,
  380. },
  381. {
  382. x: 1.6327489614486694,
  383. y: 0.056550998240709305,
  384. z: -2.1368539333343506,
  385. },{
  386. x: 0.05691400170326233,
  387. y: 0.04810800030827522,
  388. z: 0.97919100522995,
  389. },{
  390. x: -0.5570799708366394,
  391. y: 0.04639599844813347,
  392. z: 3.0515389442443848 ,
  393. }
  394. ]
  395. var realPositions = [
  396. { x: 458249.577950831, y: 2474529.667443291 },
  397. { x: 458247.51758545433, y: 2474531.6324389814 },
  398. {x: 458250.7569026919, y: 2474532.9341176464 },
  399. {x: 458252.6196984933, y: 2474534.0980041157 }
  400. ]//正确点位的点位
  401. fakePositions = fakePositions.map(e=>{
  402. return new THREE.Vector3(e.x, -e.z, 0);
  403. })
  404. realPositions = realPositions.map(e=>{
  405. return new THREE.Vector3(e.x, e.y, 0);
  406. })
  407. var moveVec = new THREE.Vector3().subVectors(realPositions[0], fakePositions[0]) //平移向量
  408. var vec1 = new THREE.Vector3().subVectors(fakePositions[0], fakePositions[1]) //旧的向量
  409. var vec2 = new THREE.Vector3().subVectors(realPositions[0], realPositions[1])//新的向量
  410. var angle = vec1.angleTo(vec2)
  411. if(vec1.clone().cross(vec2).z < 0)angle *= -1 //这里不确定是<0还是>0
  412. var matrix = new THREE.Matrix4().setPosition(moveVec.clone().sub(realPositions[0]))
  413. var rotateMatrix = new THREE.Matrix4().makeRotationAxis(new THREE.Vector3(0,0,1), angle );
  414. matrix.premultiply(rotateMatrix)
  415. var moveBackMatrix = new THREE.Matrix4().setPosition(realPositions[0])
  416. matrix.premultiply(moveBackMatrix)
  417. var pos = fakePositions.map(e=>{
  418. return e.clone().applyMatrix4(matrix)
  419. })
  420. ==========
  421. 4dkk根据给的经纬度控制点转化,filter orientation取值
  422. var rot90 = (new THREE.Quaternion).setFromAxisAngle(new THREE.Vector3(0,0,1), THREE.Math.degToRad(-90))
  423. var rot90Invert = rot90.clone().inverse()
  424. a.forEach(e=>{
  425. var t = e.dataset_orientation
  426. var u = new THREE.Quaternion(t[1],t[2],t[3],t[0]).multiply(rot90);
  427. var ee = new THREE.Quaternion().setFromAxisAngle(new THREE.Vector3(0,0,1), 2.4223594240392305 );
  428. u.multiply(ee)
  429. var u1 = u.clone().multiply(rot90Invert);
  430. var e2 = u1.toArray();
  431. e.orientation = [e2[3], e2[0], e2[1], e2[2]]
  432. })
  433. 注意:现在的经纬度和四维看看中的不一定对得准,存在误差,可能主要问题出再算出的位移、漫游点的dataset本地坐标上