diff --git a/docs/.gitkeep b/docs/.gitkeep
deleted file mode 100644
index e69de29..0000000
diff --git a/docs/docs.md b/docs/docs.md
new file mode 100644
index 0000000..2ee6fa7
--- /dev/null
+++ b/docs/docs.md
@@ -0,0 +1,29 @@
+---
+slug: '/'
+sidebar_class_name: hidden
+title: Docs
+description: Documentation
+---
+
+# Documentation
+
+8th Wall provides the complete solution to create WebAR, WebVR experiences and 3D games that run directly in a web browser.
+
+## 8th Wall Studio {#studio}
+
+8th Wall Studio is a real-time visual editor and game engine that combines the speed and power of the web with the tools needed to create hyper-immersive 3D and XR experiences. Studio has a visual 3D editor interface to create XR and web games across devices.
+
+[Studio Manual](/docs/studio)
+
+[Studio API](/docs/studio/api)
+
+## 8th Wall Engine {#engine}
+
+The 8th Wall AR Engine is a complete implementation of 8th Wall's Simultaneous Localization and Mapping (SLAM) engine, hyper-optimized for real-time WebAR on browsers. AR features include World Tracking, Image Targets, Face Effects,
+and Sky Segmentation.
+
+The engine is built-in to Studio projects, and is also easily integrated into modern 3D JavaScript frameworks such as [A-Frame](), [three.js](), [PlayCanvas](), and [Babylon.js]().
+
+[Engine Manual](/docs/engine)
+
+[Engine API](/docs/engine/api)
diff --git a/docs/engine/_category_.json b/docs/engine/_category_.json
new file mode 100644
index 0000000..463b3c5
--- /dev/null
+++ b/docs/engine/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "8th Wall Engine",
+ "position": 4
+}
diff --git a/docs/engine/api/_category_.json b/docs/engine/api/_category_.json
new file mode 100644
index 0000000..f444857
--- /dev/null
+++ b/docs/engine/api/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "API Reference",
+ "position": 3
+}
diff --git a/docs/engine/api/aframe/_category_.json b/docs/engine/api/aframe/_category_.json
new file mode 100644
index 0000000..f1321da
--- /dev/null
+++ b/docs/engine/api/aframe/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "AFrame",
+ "position": 3
+}
diff --git a/docs/engine/api/aframe/aframe.md b/docs/engine/api/aframe/aframe.md
new file mode 100644
index 0000000..3e95b98
--- /dev/null
+++ b/docs/engine/api/aframe/aframe.md
@@ -0,0 +1,72 @@
+# XR8.AFrame
+
+A-Frame () is a web framework designed for building virtual reality experiences.
+By adding 8th Wall Web to your A-Frame project, you can now easily build **augmented reality**
+experiences for the web.
+
+## Components
+Component | Description
+--------- | -----------
+[xrconfig](/docs/engine/api/aframe/xrconfig) | Used to configure the camera feed
+[xrweb](/docs/engine/api/aframe/xrweb) | Used to configure world tracking
+[xrface](/docs/engine/api/aframe/xrface) | Used to configure face effects
+[xrlayers](/docs/engine/api/aframe/xrlayers) | Used to configure sky effects
+[xrlayerscene](/docs/engine/api/aframe/xrlayers) | Used to configure sky effects
+
+## Functions
+
+Function | Description
+-------- | -----------
+[xrconfigComponent](/docs/engine/api/aframe/xrconfig#xrconfigcomponent) | Creates an A-Frame component for configuring the camera which can be registered with `AFRAME.registerComponent()`. Generally won't need to be called directly.
+[xrwebComponent](/docs/engine/api/aframe/xrweb#xrwebcomponent) | Creates an A-Frame component for World Tracking and/or Image Target tracking which can be registered with `AFRAME.registerComponent()`. Generally won't need to be called directly.
+[xrfaceComponent](/docs/engine/api/aframe/xrface#xrfacecomponent) | Creates an A-Frame component for Face Effects tracking which can be registered with `AFRAME.registerComponent()`. Generally won't need to be called directly.
+[xrlayersComponent](/docs/engine/api/aframe/xrlayers#xrlayerscomponent) | Creates an A-Frame component for Layers tracking which can be registered with `AFRAME.registerComponent()`. Generally won't need to be called directly.
+[xrlayersceneComponent](/docs/engine/api/aframe/xrlayers#xrlayerscenecomponent) | Creates an A-Frame component for a Layer scene which can be registered with `AFRAME.registerComponent()`. Generally won't need to be called directly.
+
+## Examples
+
+#### Example - SLAM enabled (default) {#example---slam-enabled-default}
+
+```html
+
+```
+
+#### Example - SLAM disabled (image tracking only) {#example---slam-disabled-image-tracking-only}
+
+```html
+
+```
+
+#### Example - Front camera (image tracking only) {#example---front-camera-image-tracking-only}
+
+```html
+
+```
+
+#### Example - Front camera Sky Effects {#example---front-camera-sky-effects}
+
+```html
+
+```
+
+#### Example - Sky + SLAM {#example---sky--slam}
+
+```html
+
+
+
+
+
+```
+
+#### Example - Face Effects {#example---face-effects}
+
+```html
+
+```
+
+#### Example - Face Effects with Ears {#example---face-effects-ears}
+
+```html
+
+```
diff --git a/docs/engine/api/aframe/xrconfig.md b/docs/engine/api/aframe/xrconfig.md
new file mode 100644
index 0000000..0d451e6
--- /dev/null
+++ b/docs/engine/api/aframe/xrconfig.md
@@ -0,0 +1,46 @@
+# Camera Configuration
+
+To configure the camera feed, add the `xrconfig` component to your `a-scene`:
+
+``
+
+## xrconfig Attributes (all optional) {#xrconfig-attributes}
+
+Component | Type | Default | Description
+--------- | ---- | ------- | -----------
+cameraDirection | `String` | `'back'` | Desired camera to use. Choose from: `back` or `front`. Use `cameraDirection: front;` with `mirroredDisplay: true;` for selfie mode. Note that world tracking is only supported with `cameraDirection: back;`.`
+allowedDevices | `String` | `'mobile-and-headsets'` | Supported device classes. Choose from: `'mobile-and-headsets'` , `'mobile'` or `'any'`. Use `'any'` to enable laptop or desktop-type devices with built-in or attached webcams. Note that world tracking is only supported on `'mobile-and-headsets'` or `mobile`.
+mirroredDisplay | `Boolean` | `false` | If true, flip left and right in the output geometry and reverse the direction of the camera feed. Use `'mirroredDisplay: true;'` with `'cameraDirection: front;'` for selfie mode. Should not be enabled if World Tracking (SLAM) is enabled.
+disableXrTablet | `Boolean` | `false` | Disable the tablet visible in immersive sessions.
+xrTabletStartsMinimized | `Boolean` | `false` | The tablet will start minimized.
+disableDefaultEnvironment | `Boolean` | `false` | Disable the default "void space" background.
+disableDesktopCameraControls | `Boolean` | `false` | Disable WASD and mouse look for camera.
+disableDesktopTouchEmulation | `Boolean` | `false` | Disable desktop fake touches.
+disableXrTouchEmulation | `Boolean` | `false` | Don’t emit touch events based on controller raycasts with the scene.
+disableCameraReparenting | `Boolean` | `false` | Disable camera -> controller object move
+defaultEnvironmentFloorScale | `Number` | `1` | Shrink or grow the floor texture.
+defaultEnvironmentFloorTexture | Asset | | Specify an alternative texture asset or URL for the tiled floor.
+defaultEnvironmentFloorColor | Hex Color | `#1A1C2A` | Set the floor color.
+defaultEnvironmentFogIntensity | `Number` | `1` | Increase or decrease fog density.
+defaultEnvironmentSkyTopColor | Hex Color | `#BDC0D6` | Set the color of the sky directly above the user.
+defaultEnvironmentSkyBottomColor | Hex Color | `#1A1C2A` | Set the color of the sky at the horizon.
+defaultEnvironmentSkyGradientStrength | `Number` | `1` | Control how sharply the sky gradient transitions.
+
+Notes:
+
+* `cameraDirection`: When using `xrweb` to provide world tracking (SLAM), only the `back` camera is
+supported. If you are using the `front` camera, you must disable world tracking by setting
+`disableWorldTracking: true` on `xrweb`.
+
+## xrconfigComponent()
+
+`XR8.AFrame.xrconfigComponent()`
+
+Creates an A-Frame component which can be registered with `AFRAME.registerComponent()`. This,
+however, generally won't need to be called directly. On 8th Wall Web script load, this component
+will be registered automatically if it is detected that A-Frame has loaded (i.e if `window.AFRAME`
+exists).
+
+```javascript
+window.AFRAME.registerComponent('xrconfig', XR8.AFrame.xrconfigComponent())
+```
diff --git a/docs/engine/api/aframe/xrface.md b/docs/engine/api/aframe/xrface.md
new file mode 100644
index 0000000..0364bbc
--- /dev/null
+++ b/docs/engine/api/aframe/xrface.md
@@ -0,0 +1,34 @@
+# Face Effects
+
+If you want Face Effects tracking, add the `xrface` component to your `a-scene`:
+
+``
+
+## xrface Attributes {#xrface-attributes}
+
+Component | Type | Default | Description
+--------- | ---- | ------- | -----------
+meshGeometry | `Array` | `['face']` | Comma separated strings that configures which portions of the face mesh will have returned triangle indices. Can be any combination of `'face'`, `'eyes'`, `'iris'` and/or `'mouth'`.
+maxDetections [Optional] | `Number` | `1` | The maximum number of faces to detect. The available choices are 1, 2, or 3.
+uvType [Optional] | `String` | `[XR8.FaceController.UvType.STANDARD]` | Specifies which uvs are returned in the facescanning and faceloading event. Options are: `[XR8.FaceController.UvType.STANDARD, XR8.FaceController.UvType.PROJECTED]`
+enableEars [Optional] | `Boolean` | `false` | If true, runs ear detection simultaneosly with Face Effects and returns ear attachment points.
+
+
+Notes:
+
+* `xrface` and `xrweb` cannot be used at the same time.
+* `xrface` and `xrlayers` cannot be used at the same time.
+* Best practice is to always use `xrconfig`; however, if you use `xrface` without `xrconfig` then `xrconfig` will be added automatically. When that happens all attributes which were set on `xrface` will be passed along to `xrconfig`.
+
+## xrfaceComponent()
+
+`XR8.AFrame.xrfaceComponent()`
+
+Creates an A-Frame component which can be registered with `AFRAME.registerComponent()`. This,
+however, generally won't need to be called directly. On 8th Wall Web script load, this component
+will be registered automatically if it is detected that A-Frame has loaded (i.e if `window.AFRAME`
+exists).
+
+```javascript
+window.AFRAME.registerComponent('xrface', XR8.AFrame.xrfaceComponent())
+```
diff --git a/docs/engine/api/aframe/xrlayers.md b/docs/engine/api/aframe/xrlayers.md
new file mode 100644
index 0000000..b3b7890
--- /dev/null
+++ b/docs/engine/api/aframe/xrlayers.md
@@ -0,0 +1,58 @@
+# Sky Effects
+
+If you want Sky Effects:
+
+1. Add the `xrlayers` component to your `a-scene`
+2. Add the `xrlayerscene` component to an `a-entity` and add content you want to be in the sky under that `a-entity`.
+
+```html
+
+
+
+
+
+```
+
+## xrlayers Attributes {#xrlayers-attributes}
+
+None
+
+Notes:
+
+* `xrlayers` and `xrface` cannot be used at the same time.
+* `xrlayers` and `xrweb` can be used at the same time. You must use `xrconfig` when doing so.
+ * Best practice is to always use `xrconfig`; however, if you use `xrlayers` without `xrface` or `xrweb` or `xrconfig`, then `xrconfig` will be added automatically. When that happens all attributes which were set on `xrweb` will be passed along to `xrconfig`.
+
+## xrlayerscene Attributes {#xrlayerscene-attributes}
+
+Component | Type | Default | Description
+--------- | ---- | ------- | -----------
+name | `String` | `''` | The layer name. Should correspond to a layer from [`XR8.LayersController`](../layerscontroller/layerscontroller.md). Only supported layer at this time is `sky`.
+invertLayerMask | `Boolean` | `false` | If true, content you place in your scene will occlude non-sky areas. If false, content you place in your scene will occlude sky areas.
+edgeSmoothness | `Number` | `0` | Amount to smooth the edges of the layer. Valid values between 0-1.
+
+## xrlayersceneComponent()
+
+`XR8.AFrame.xrlayersceneComponent()`
+
+Creates an A-Frame component which can be registered with `AFRAME.registerComponent()`. This,
+however, generally won't need to be called directly. On 8th Wall Web script load, this component
+will be registered automatically if it is detected that A-Frame has loaded (i.e if `window.AFRAME`
+exists).
+
+```javascript
+window.AFRAME.registerComponent('xrlayersceneComponent', XR8.AFrame.xrlayersceneComponent())
+```
+
+## xrlayersComponent()
+
+`XR8.AFrame.xrlayersComponent()`
+
+Creates an A-Frame component which can be registered with `AFRAME.registerComponent()`. This,
+however, generally won't need to be called directly. On 8th Wall Web script load, this component
+will be registered automatically if it is detected that A-Frame has loaded (i.e if `window.AFRAME`
+exists).
+
+```javascript
+window.AFRAME.registerComponent('xrlayers', XR8.AFrame.xrlayersComponent())
+```
diff --git a/docs/engine/api/aframe/xrweb.md b/docs/engine/api/aframe/xrweb.md
new file mode 100644
index 0000000..95c9838
--- /dev/null
+++ b/docs/engine/api/aframe/xrweb.md
@@ -0,0 +1,36 @@
+# World Tracking & Image Targets
+
+If you want World Tracking or Image Targets, add the `xrweb` component to your `a-scene`:
+
+``
+
+## xrweb Attributes (all optional) {#xrweb-attributes}
+
+Component | Type | Default | Description
+--------- | ---- | ------- | -----------
+scale | `String` | `'responsive'` | Either `'responsive'` or `'absolute'`. `'responsive'` will return values so that the camera on frame 1 is at the origin defined via [`XR8.XrController.updateCameraProjectionMatrix()`](../xrcontroller/updatecameraprojectionmatrix). `'absolute'` will return the camera, image targets, etc in meters. The default is `'responsive'`. When using `'absolute'` the x-position, z-position, and rotation of the starting pose will respect the parameters set in [`XR8.XrController.updateCameraProjectionMatrix()`](../xrcontroller/updatecameraprojectionmatrix) once scale has been estimated. The y-position will depend on the camera's physical height from the ground plane.
+disableWorldTracking | `Boolean` | `false` | If true, turn off SLAM tracking for efficiency.
+
+Notes:
+
+* `xrweb` and `xrface` cannot be used at the same time.
+* `xrweb` and `xrlayers` can be used at the same time. You must use `xrconfig` when doing so.
+ * Best practice is to always use `xrconfig`; however, if you use `xrweb` without `xrface` or
+ `xrlayers` or `xrconfig`, then `xrconfig` will be added automatically. When that happens all
+ attributes which were set on `xrweb` will be passed along to `xrconfig`.
+* `cameraDirection`: World tracking (SLAM) is only supported on the `back` camera. If you are using
+ the `front` camera, you must disable world tracking by setting `disableWorldTracking: true`.
+* World tracking (SLAM) is only supported on mobile devices.
+
+## xrwebComponent()
+
+`XR8.AFrame.xrwebComponent()`
+
+Creates an A-Frame component which can be registered with `AFRAME.registerComponent()`. This,
+however, generally won't need to be called directly. On 8th Wall Web script load, this component
+will be registered automatically if it is detected that A-Frame has loaded (i.e if `window.AFRAME`
+exists).
+
+```javascript
+window.AFRAME.registerComponent('xrweb', XR8.AFrame.xrwebComponent())
+```
diff --git a/docs/engine/api/aframeeventlisenters/_category_.json b/docs/engine/api/aframeeventlisenters/_category_.json
new file mode 100644
index 0000000..4e40049
--- /dev/null
+++ b/docs/engine/api/aframeeventlisenters/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "AFrame Event Listeners",
+ "position": 5
+}
diff --git a/docs/engine/api/aframeeventlisenters/aframeeventlisenters.md b/docs/engine/api/aframeeventlisenters/aframeeventlisenters.md
new file mode 100644
index 0000000..ecff691
--- /dev/null
+++ b/docs/engine/api/aframeeventlisenters/aframeeventlisenters.md
@@ -0,0 +1,13 @@
+# AFrame Event Listeners
+
+This section describes the events that are listened for by the "xrweb" A-Frame component
+
+You can emit these events in your web application to perform various actions:
+
+Event Listener | Description
+-------------- | -----------
+[hidecamerafeed](hidecamerafeed.md) | Hides the camera feed. Tracking does not stop.
+[recenter](recenter.md) | Recenters the camera feed to its origin. If a new origin is provided as an argument, the camera's origin will be reset to that, then it will recenter.
+[screenshotrequest](screenshotrequest.md) | Emits a request to the engine to capture a screenshot of the AFrame canvas. The engine will emit a [`screenshotready`](/docs/engine/api/aframeevents/screenshotready) event with the JPEG compressed image or [`screenshoterror`](/docs/engine/api/aframeevents/screenshoterror) if an error has occured.
+[showcamerafeed](showcamerafeed.md) | Shows the camera feed.
+[stopxr](stopxr.md) | Stop the current XR session. While stopped, the camera feed is stopped and device motion is not tracked.
diff --git a/docs/engine/api/aframeeventlisenters/hidecamerafeed.md b/docs/engine/api/aframeeventlisenters/hidecamerafeed.md
new file mode 100644
index 0000000..a8ae491
--- /dev/null
+++ b/docs/engine/api/aframeeventlisenters/hidecamerafeed.md
@@ -0,0 +1,18 @@
+# hidecamerafeed
+
+`scene.emit('hidecamerafeed')`
+
+## Description {#description}
+
+Hides the camera feed. Tracking does not stop.
+
+## Parameters {#parameters}
+
+None
+
+## Example {#example}
+
+```javascript
+let scene = this.el.sceneEl
+scene.emit('hidecamerafeed')
+```
diff --git a/docs/engine/api/aframeeventlisenters/recenter.md b/docs/engine/api/aframeeventlisenters/recenter.md
new file mode 100644
index 0000000..e7a0664
--- /dev/null
+++ b/docs/engine/api/aframeeventlisenters/recenter.md
@@ -0,0 +1,35 @@
+# recenter
+
+`scene.emit('recenter', {origin, facing})`
+
+## Description {#description}
+
+Recenters the camera feed to its origin. If a new origin is provided as an argument, the camera's origin will be reset to that, then it will recenter.
+
+If origin and facing are not provided, camera is reset to origin previously specified by a call to `recenter` or the last call to `XR8.XrController.updateCameraProjectionMatrix()` when using `xrweb` or `XR8.FaceController.configure({coordinates: {origin, scale, axes}})` / `XR8.LayersController.configure({coordinates: {origin, scale, axes}})` when using `xrface` or `xrlayers`.
+
+**IMPORTANT:** With A-Frame, `updateCameraProjectionMatrix()` and / or `configure()` is initially called based on initial camera position in the scene.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+origin: {x, y, z} [Optional] | The location of the new origin.
+facing: {w, x, y, z} [Optional] | A quaternion representing direction the camera should face at the origin.
+
+## Example - Recenter the scene {#example}
+
+```javascript
+let scene = this.el.sceneEl
+scene.emit('recenter')
+```
+
+## Example - Recenter the scene and update the origin {#example---update-origin}
+
+```javascript
+let scene = this.el.sceneEl
+scene.emit('recenter', {
+ origin: {x: 1, y: 4, z: 0},
+ facing: {w: 0.9856, x:0, y:0.169, z:0}
+})
+```
diff --git a/docs/engine/api/aframeeventlisenters/screenshotrequest.md b/docs/engine/api/aframeeventlisenters/screenshotrequest.md
new file mode 100644
index 0000000..4446b57
--- /dev/null
+++ b/docs/engine/api/aframeeventlisenters/screenshotrequest.md
@@ -0,0 +1,34 @@
+# screenshotrequest
+
+`scene.emit('screenshotrequest')`
+
+## Description {#description}
+
+Emits a request to the engine to capture a screenshot of the AFrame canvas. The engine will emit a
+[`screenshotready`](/docs/engine/api/aframeevents/screenshotready) event with the JPEG compressed image or
+[`screenshoterror`](/docs/engine/api/aframeevents/screenshoterror) if an error has occured.
+
+## Parameters {#parameters}
+
+None
+
+## Example {#example}
+
+```javascript
+const scene = this.el.sceneEl
+const photoButton = document.getElementById('photoButton')
+
+// Emit screenshotrequest when user taps
+photoButton.addEventListener('click', () => {
+ image.src = ""
+ scene.emit('screenshotrequest')
+})
+
+scene.addEventListener('screenshotready', event => {
+ image.src = 'data:image/jpeg;base64,' + event.detail
+})
+
+scene.addEventListener('screenshoterror', event => {
+ console.log("error")
+})
+```
diff --git a/docs/engine/api/aframeeventlisenters/showcamerafeed.md b/docs/engine/api/aframeeventlisenters/showcamerafeed.md
new file mode 100644
index 0000000..2038858
--- /dev/null
+++ b/docs/engine/api/aframeeventlisenters/showcamerafeed.md
@@ -0,0 +1,18 @@
+# showcamerafeed
+
+`scene.emit('showcamerafeed')`
+
+## Description {#description}
+
+Shows the camera feed.
+
+## Parameters {#parameters}
+
+None
+
+## Example {#example}
+
+```javascript
+let scene = this.el.sceneEl
+scene.emit('showcamerafeed')
+```
diff --git a/docs/engine/api/aframeeventlisenters/stopxr.md b/docs/engine/api/aframeeventlisenters/stopxr.md
new file mode 100644
index 0000000..b6dcdd9
--- /dev/null
+++ b/docs/engine/api/aframeeventlisenters/stopxr.md
@@ -0,0 +1,18 @@
+# stopxr
+
+`scene.emit('stopxr')`
+
+## Description {#description}
+
+Stop the current XR session. While stopped, the camera feed is stopped and device motion is not tracked.
+
+## Parameters {#parameters}
+
+None
+
+## Example {#example}
+
+```javascript
+let scene = this.el.sceneEl
+scene.emit('stopxr')
+```
diff --git a/docs/engine/api/aframeevents/_category_.json b/docs/engine/api/aframeevents/_category_.json
new file mode 100644
index 0000000..0855e3d
--- /dev/null
+++ b/docs/engine/api/aframeevents/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "AFrame Events",
+ "position": 4
+}
diff --git a/docs/engine/api/aframeevents/aframeevents.md b/docs/engine/api/aframeevents/aframeevents.md
new file mode 100644
index 0000000..44e3443
--- /dev/null
+++ b/docs/engine/api/aframeevents/aframeevents.md
@@ -0,0 +1,52 @@
+# AFrame Events
+
+This section describes events emitted by the `xrconfig`, `xrweb`, and `xrface` A-Frame components.
+
+You can listen for these events in your web application to call a function that handles the event.
+
+## Events Emitted by `xrconfig` {#xrconfig}
+
+The following events are emitted by `xrconfig` (which is automatically added if you only use `xrweb`, `xrface`, or `xrlayers`):
+
+Event Emitted | Description
+------------- | -----------
+[camerastatuschange](camerastatuschange.md) | This event is emitted when the status of the camera changes. See [`onCameraStatusChange`](/docs/engine/api/camerapipelinemodule/oncamerastatuschange) from [`XR8.addCameraPipelineModule()`](/docs/engine/api/xr8/addcamerapipelinemodule) for more information on the possible status.
+[realityerror](realityerror.md) | This event is emitted when an error has occured when initializing 8th Wall Web. This is the recommended time at which any error messages should be displayed. The [`XR8.XrDevice()` API](/docs/engine/api/xrdevice) can help with determining what type of error messaging should be displayed.
+[realityready](realityready.md) | This event is emitted when 8th Wall Web has initialized and at least one frame has been successfully processed. This is the recommended time at which any loading elements should be hidden.
+[screenshoterror](screenshoterror.md) | This event is emitted in response to the [`screenshotrequest`](/docs/engine/api/aframeeventlisenters/screenshotrequest) event resulting in an error.
+[screenshotready](screenshotready.md) | This event is emitted in response to the [`screenshotrequest`](/docs/engine/api/aframeeventlisenters/screenshotrequest) event being being completed successfully. The JPEG compressed image of the AFrame canvas will be provided.
+
+## Events Emitted by `xrweb` {#xrweb}
+
+Event Emitted | Description
+------------- | -----------
+[xrimageloading](xrimageloading.md) | This event is emitted when detection image loading begins.
+[xrimagescanning](xrimagescanning.md) | This event is emitted when all detection images have been loaded and scanning has begun.
+[xrimagefound](xrimagefound.md) | This event is emitted when an image target is first found.
+[xrimageupdated](xrimageupdated.md) | This event is emitted when an image target changes position, rotation or scale.
+[xrimagelost](xrimagelost.md) | This event is emitted when an image target is no longer being tracked.
+[xrtrackingstatus](xrtrackingstatus.md) | This event is emitted when [`XR8.XrController`](/docs/engine/api/xrcontroller) starts and any time tracking status or reason changes.
+
+## Events Emitted by `xrface` {#xrface}
+
+Event Emitted | Description
+------------- | -----------
+[xrfaceloading](xrfaceloading.md) | This event is emitted when loading begins for additional face AR resources.
+[xrfacescanning](xrfacescanning.md) | This event is emitted when AR resources have been loaded and scanning has begun.
+[xrfacefound](xrfacefound.md) | This event is emitted when a face is first found.
+[xrfaceupdated](xrfaceupdated.md) | This event is emitted when face is subsequently found.
+[xrfacelost](xrfacelost.md) | This event is emitted when a face is no longer being tracked.
+[xrmouthopened](xrmouthopened.md) | This event is emitted when a tracked face's mouth opens.
+[xrmouthclosed](xrmouthclosed.md) | This event is emitted when a tracked face's mouth closes.
+[xrlefteyeopened](xrlefteyeopened.md) | This event is emitted when a tracked face's left eye opens.
+[xrlefteyeclosed](xrlefteyeclosed.md) | This event is emitted when a tracked face's left eye closes.
+[xrrighteyeopened](xrrighteyeopened.md) | This event is emitted when a tracked face's right eye opens.
+[xrrighteyeclosed](xrrighteyeclosed.md) | This event is emitted when a tracked face's right eye closes.
+[xrlefteyebrowraised](xrlefteyebrowraised.md) | This event is emitted when a tracked face's left eyebrow is raised from its initial position when the face was found.
+[xrlefteyebrowlowered](xrlefteyebrowlowered.md) | This event is emitted when a tracked face's left eyebrow is lowered to its initial position when the face was found.
+[xrrighteyebrowraised](xrrighteyebrowraised.md) | This event is emitted when a tracked face's right eyebrow is raised from its initial position when the face was found.
+[xrrighteyebrowlowered](xrrighteyebrowlowered.md) | This event is emitted when a tracked face's right eyebrow is lowered to its initial position when the face was found.
+[xrlefteyewinked](xrlefteyewinked.md) | This event is emitted when a tracked face's left eye closes and opens within 750ms while the right eye remains open.
+[xrrighteyewinked](xrrighteyewinked.md) | This event is emitted when a tracked face's right eye closes and opens within 750ms while the left eye remains open.
+[xrblinked](xrblinked.md) | This event is emitted when a tracked face's eyes blink.
+[xrinterpupillarydistance](xrinterpupillarydistance.md) | This event is emitted when a tracked face's distance in millimeters between the centers of each pupil is first detected.
diff --git a/docs/engine/api/aframeevents/camerastatuschange.md b/docs/engine/api/aframeevents/camerastatuschange.md
new file mode 100644
index 0000000..2bb1475
--- /dev/null
+++ b/docs/engine/api/aframeevents/camerastatuschange.md
@@ -0,0 +1,32 @@
+# camerastatuschange
+
+## Description {#description}
+
+This event is emitted when the status of the camera changes. See
+[`onCameraStatusChange`](/docs/engine/api/camerapipelinemodule/oncamerastatuschange) from
+[`XR8.addCameraPipelineModule()`](/docs/engine/api/xr8/addcamerapipelinemodule) for more information on the possible
+status.
+
+## Example {#example}
+
+```javascript
+var handleCameraStatusChange = function handleCameraStatusChange(event) {
+ console.log('status change', event.detail.status);
+
+ switch (event.detail.status) {
+ case 'requesting':
+ // Do something
+ break;
+
+ case 'hasStream':
+ // Do something
+ break;
+
+ case 'failed':
+ event.target.emit('realityerror');
+ break;
+ }
+};
+let scene = this.el.sceneEl
+scene.addEventListener('camerastatuschange', handleCameraStatusChange)
+```
diff --git a/docs/engine/api/aframeevents/realityerror.md b/docs/engine/api/aframeevents/realityerror.md
new file mode 100644
index 0000000..7726c20
--- /dev/null
+++ b/docs/engine/api/aframeevents/realityerror.md
@@ -0,0 +1,25 @@
+# realityerror
+
+## Description {#description}
+
+This event is emitted when an error has occured when initializing 8th Wall Web. This is the
+recommended time at which any error messages should be displayed. The [`XR8.XrDevice()` API](/docs/engine/api/xrdevice)
+can help with determining what type of error messaging should be displayed.
+
+## Example {#example}
+
+```javascript
+let scene = this.el.sceneEl
+ scene.addEventListener('realityerror', (event) => {
+ if (XR8.XrDevice.isDeviceBrowserCompatible()) {
+ // Browser is compatible. Print the exception for more information.
+ console.log(event.detail.error)
+ return
+ }
+
+ // Browser is not compatible. Check the reasons why it may not be.
+ for (let reason of XR8.XrDevice.incompatibleReasons()) {
+ // Handle each XR8.XrDevice.IncompatibilityReasons
+ }
+ })
+```
diff --git a/docs/engine/api/aframeevents/realityready.md b/docs/engine/api/aframeevents/realityready.md
new file mode 100644
index 0000000..661e181
--- /dev/null
+++ b/docs/engine/api/aframeevents/realityready.md
@@ -0,0 +1,14 @@
+# realityready
+
+## Description {#description}
+
+This event is emitted when 8th Wall Web has initialized.
+
+## Example {#example}
+
+```javascript
+let scene = this.el.sceneEl
+scene.addEventListener('realityready', () => {
+ // Hide loading UI
+})
+```
diff --git a/docs/engine/api/aframeevents/screenshoterror.md b/docs/engine/api/aframeevents/screenshoterror.md
new file mode 100644
index 0000000..609ee69
--- /dev/null
+++ b/docs/engine/api/aframeevents/screenshoterror.md
@@ -0,0 +1,15 @@
+# screenshoterror
+
+## Description {#description}
+
+This event is emitted in response to the [`screenshotrequest`](/docs/engine/api/aframeeventlisenters/screenshotrequest) resulting in an error.
+
+## Example {#example}
+
+```javascript
+let scene = this.el.sceneEl
+scene.addEventListener('screenshoterror', (event) => {
+ console.log(event.detail)
+ // Handle screenshot error.
+})
+```
diff --git a/docs/engine/api/aframeevents/screenshotready.md b/docs/engine/api/aframeevents/screenshotready.md
new file mode 100644
index 0000000..558f755
--- /dev/null
+++ b/docs/engine/api/aframeevents/screenshotready.md
@@ -0,0 +1,16 @@
+# screenshotready
+
+## Description {#description}
+
+This event is emitted in response to the [`screenshotrequest`](/docs/engine/api/aframeeventlisenters/screenshotrequest) event being being completed successfully. The JPEG compressed image of the AFrame canvas will be provided.
+
+## Example {#example}
+
+```javascript
+let scene = this.el.sceneEl
+scene.addEventListener('screenshotready', (event) => {
+ // screenshotPreview is an HTML element
+ const image = document.getElementById('screenshotPreview')
+ image.src = 'data:image/jpeg;base64,' + event.detail
+})
+```
diff --git a/docs/engine/api/aframeevents/xrblinked.md b/docs/engine/api/aframeevents/xrblinked.md
new file mode 100644
index 0000000..95e208b
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrblinked.md
@@ -0,0 +1,11 @@
+# xrblinked
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a tracked face's eyes blink.
+
+`xrblinked.detail : {id}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
diff --git a/docs/engine/api/aframeevents/xrfacefound.md b/docs/engine/api/aframeevents/xrfacefound.md
new file mode 100644
index 0000000..4ef3fb1
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrfacefound.md
@@ -0,0 +1,49 @@
+# xrfacefound
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a face is first found.
+
+`xrfacefound.detail : {id, transform, vertices, normals, attachmentPoints}`
+
+Property | Description
+--------- | -----------
+id | A numerical id of the located face.
+transform: `{position, rotation, scale, scaledWidth, scaledHeight, scaledDepth}` | Transform information of the located face.
+vertices: [{x, y, z}] | Position of face points, relative to transform.
+normals: [{x, y, z}] | Normal direction of vertices, relative to transform.
+attachmentPoints: `{ name, position: {x,y,z} }` | See [`XR8.FaceController.AttachmentPoints`](/docs/engine/api/facecontroller/attachmentpoints) for list of available attachment points. `position` is relative to the transform.
+uvsInCameraFrame `[{u, v}]` | The list of uv positions in the camera frame corresponding to the returned vertex points.
+
+`transform` is an object with the following properties:
+
+Property | Description
+--------- | -----------
+position {x, y, z} | The 3d position of the located face.
+rotation {w, x, y, z} | The 3d local orientation of the located face.
+scale | A scale factor that should be applied to objects attached to this face.
+scaledWidth | Approximate width of the head in the scene when multiplied by scale.
+scaledHeight | Approximate height of the head in the scene when multiplied by scale.
+scaledDepth | Approximate depth of the head in the scene when multiplied by scale.
+
+## Example {#example}
+
+```javascript
+const faceRigidComponent = {
+ init: function () {
+ const object3D = this.el.object3D
+ object3D.visible = false
+ const show = ({detail}) => {
+ const {position, rotation, scale} = detail.transform
+ object3D.position.copy(position)
+ object3D.quaternion.copy(rotation)
+ object3D.scale.set(scale, scale, scale)
+ object3D.visible = true
+ }
+ const hide = ({detail}) => { object3D.visible = false }
+ this.el.sceneEl.addEventListener('xrfacefound', show)
+ this.el.sceneEl.addEventListener('xrfaceupdated', show)
+ this.el.sceneEl.addEventListener('xrfacelost', hide)
+ }
+}
+```
diff --git a/docs/engine/api/aframeevents/xrfaceloading.md b/docs/engine/api/aframeevents/xrfaceloading.md
new file mode 100644
index 0000000..c6a30e3
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrfaceloading.md
@@ -0,0 +1,24 @@
+# xrfaceloading
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when when loading begins for additional face AR resources.
+
+`xrfaceloading.detail : {maxDetections, pointsPerDetection, indices, uvs}`
+
+Property | Description
+--------- | -----------
+maxDetections | The maximum number of faces that can be simultaneously processed.
+pointsPerDetection | Number of vertices that will be extracted per face.
+indices: [{a, b, c}] | Indexes into the vertices array that form the triangles of the requested mesh, as specified with meshGeometry on configure.
+uvs: [{u, v}] | uv positions into a texture map corresponding to the returned vertex points.
+
+## Example {#example}
+
+```javascript
+const initMesh = ({detail}) => {
+ const {pointsPerDetection, uvs, indices} = detail
+ this.el.object3D.add(generateMeshGeometry({pointsPerDetection, uvs, indices}))
+}
+this.el.sceneEl.addEventListener('xrfaceloading', initMesh)
+```
diff --git a/docs/engine/api/aframeevents/xrfacelost.md b/docs/engine/api/aframeevents/xrfacelost.md
new file mode 100644
index 0000000..46c14b6
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrfacelost.md
@@ -0,0 +1,33 @@
+# xrfacelost
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a face is no longer being tracked.
+
+`xrfacelost.detail : {id}`
+
+Property | Description
+--------- | -----------
+id | A numerical id of the face that was lost.
+
+## Example {#example}
+
+```javascript
+const faceRigidComponent = {
+ init: function () {
+ const object3D = this.el.object3D
+ object3D.visible = false
+ const show = ({detail}) => {
+ const {position, rotation, scale} = detail.transform
+ object3D.position.copy(position)
+ object3D.quaternion.copy(rotation)
+ object3D.scale.set(scale, scale, scale)
+ object3D.visible = true
+ }
+ const hide = ({detail}) => { object3D.visible = false }
+ this.el.sceneEl.addEventListener('xrfacefound', show)
+ this.el.sceneEl.addEventListener('xrfaceupdated', show)
+ this.el.sceneEl.addEventListener('xrfacelost', hide)
+ }
+}
+```
diff --git a/docs/engine/api/aframeevents/xrfacescanning.md b/docs/engine/api/aframeevents/xrfacescanning.md
new file mode 100644
index 0000000..e896366
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrfacescanning.md
@@ -0,0 +1,24 @@
+# xrfacescanning
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when all face AR resources have been loaded and scanning has begun.
+
+`xrfacescanning.detail : {maxDetections, pointsPerDetection, indices, uvs}`
+
+Property | Description
+--------- | -----------
+maxDetections | The maximum number of faces that can be simultaneously processed.
+pointsPerDetection | Number of vertices that will be extracted per face.
+indices: [{a, b, c}] | Indexes into the vertices array that form the triangles of the requested mesh, as specified with meshGeometry on configure.
+uvs: [{u, v}] | uv positions into a texture map corresponding to the returned vertex points.
+
+## Example {#example}
+
+```javascript
+const initMesh = ({detail}) => {
+ const {pointsPerDetection, uvs, indices} = detail
+ this.el.object3D.add(generateMeshGeometry({pointsPerDetection, uvs, indices}))
+}
+this.el.sceneEl.addEventListener('xrfacescanning', initMesh)
+```
diff --git a/docs/engine/api/aframeevents/xrfaceupdated.md b/docs/engine/api/aframeevents/xrfaceupdated.md
new file mode 100644
index 0000000..b82e16f
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrfaceupdated.md
@@ -0,0 +1,49 @@
+# xrfaceupdated
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when face is subsequently found.
+
+`xrfaceupdated.detail : {id, transform, vertices, normals, attachmentPoints}`
+
+Property | Description
+--------- | -----------
+id | A numerical id of the located face.
+transform: `{position, rotation, scale, scaledWidth, scaledHeight, scaledDepth}` | Transform information of the located face.
+vertices: [{x, y, z}] | Position of face points, relative to transform.
+normals: [{x, y, z}] | Normal direction of vertices, relative to transform.
+attachmentPoints: { name, position: {x,y,z} } | See [`XR8.FaceController.AttachmentPoints`](/docs/engine/api/facecontroller/attachmentpoints) for list of available attachment points. `position` is relative to the transform.
+uvsInCameraFrame `[{u, v}]` | The list of uv positions in the camera frame corresponding to the returned vertex points.
+
+`transform` is an object with the following properties:
+
+Property | Description
+--------- | -----------
+position {x, y, z} | The 3d position of the located face.
+rotation {w, x, y, z} | The 3d local orientation of the located face.
+scale | A scale factor that should be applied to objects attached to this face.
+scaledWidth | Approximate width of the head in the scene when multiplied by scale.
+scaledHeight | Approximate height of the head in the scene when multiplied by scale.
+scaledDepth | Approximate depth of the head in the scene when multiplied by scale.
+
+## Example {#example}
+
+```javascript
+const faceRigidComponent = {
+ init: function () {
+ const object3D = this.el.object3D
+ object3D.visible = false
+ const show = ({detail}) => {
+ const {position, rotation, scale} = detail.transform
+ object3D.position.copy(position)
+ object3D.quaternion.copy(rotation)
+ object3D.scale.set(scale, scale, scale)
+ object3D.visible = true
+ }
+ const hide = ({detail}) => { object3D.visible = false }
+ this.el.sceneEl.addEventListener('xrfacefound', show)
+ this.el.sceneEl.addEventListener('xrfaceupdated', show)
+ this.el.sceneEl.addEventListener('xrfacelost', hide)
+ }
+}
+```
diff --git a/docs/engine/api/aframeevents/xrimagefound.md b/docs/engine/api/aframeevents/xrimagefound.md
new file mode 100644
index 0000000..7e7e626
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrimagefound.md
@@ -0,0 +1,68 @@
+# xrimagefound
+
+## Description {#description}
+
+This event is emitted by [`xrweb`](/docs/engine/api/aframe/xrweb) when an image target is first found.
+
+`imagefound.detail : { name, type, position, rotation, scale, scaledWidth, scaledHeight, height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians }`
+
+Property | Description
+--------- | -----------
+name | The image's name.
+type | One of `'FLAT'`, `'CYLINDRICAL'`, `'CONICAL'`.`
+position: `{x, y, z}` | The 3d position of the located image.
+rotation: `{w, x, y, z}` | The 3d local orientation of the located image.
+scale | A scale factor that should be applied to object attached to this image.
+
+If type = `FLAT`:
+
+Property | Description
+--------- | -----------
+scaledWidth | The width of the image in the scene, when multiplied by scale.
+scaledHeight | The height of the image in the scene, when multiplied by scale.
+
+If type= `CYLINDRICAL` or `CONICAL`:
+
+Property | Description
+--------- | -----------
+height | Height of the curved target.
+radiusTop | Radius of the curved target at the top.
+radiusBottom | Radius of the curved target at the bottom.
+arcStartRadians | Starting angle in radians.
+arcLengthRadians | Central angle in radians.
+
+## Example {#example}
+
+```javascript
+AFRAME.registerComponent('my-named-image-target', {
+ schema: {
+ name: { type: 'string' }
+ },
+ init: function () {
+ const object3D = this.el.object3D
+ const name = this.data.name
+ object3D.visible = false
+
+ const showImage = ({detail}) => {
+ if (name != detail.name) {
+ return
+ }
+ object3D.position.copy(detail.position)
+ object3D.quaternion.copy(detail.rotation)
+ object3D.scale.set(detail.scale, detail.scale, detail.scale)
+ object3D.visible = true
+ }
+
+ const hideImage = ({detail}) => {
+ if (name != detail.name) {
+ return
+ }
+ object3D.visible = false
+ }
+
+ this.el.sceneEl.addEventListener('xrimagefound', showImage)
+ this.el.sceneEl.addEventListener('xrimageupdated', showImage)
+ this.el.sceneEl.addEventListener('xrimagelost', hideImage)
+ }
+})
+```
diff --git a/docs/engine/api/aframeevents/xrimageloading.md b/docs/engine/api/aframeevents/xrimageloading.md
new file mode 100644
index 0000000..3f02d26
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrimageloading.md
@@ -0,0 +1,27 @@
+# xrimageloading
+
+## Description {#description}
+
+This event is emitted by [`xrweb`](/docs/engine/api/aframe/xrweb) when detection image loading begins.
+
+`imageloading.detail : { imageTargets: {name, type, metadata} }`
+
+Property | Description
+--------- | -----------
+name | The image's name.
+type | One of `'FLAT'`, `'CYLINDRICAL'`, `'CONICAL'`.
+metadata | User metadata.
+
+## Example {#example}
+
+```javascript
+const componentMap = {}
+
+const addComponents = ({detail}) => {
+ detail.imageTargets.forEach(({name, type, metadata}) => {
+ // ...
+ })
+}
+
+this.el.sceneEl.addEventListener('xrimageloading', addComponents)
+```
diff --git a/docs/engine/api/aframeevents/xrimagelost.md b/docs/engine/api/aframeevents/xrimagelost.md
new file mode 100644
index 0000000..c6243ff
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrimagelost.md
@@ -0,0 +1,68 @@
+# xrimagelost
+
+## Description {#description}
+
+This event is emitted by [`xrweb`](/docs/engine/api/aframe/xrweb) when an image target is no longer being tracked.
+
+`imagelost.detail : { name, type, position, rotation, scale, scaledWidth, scaledHeight, height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians }`
+
+Property | Description
+--------- | -----------
+name | The image's name.
+type | One of `'FLAT'`, `'CYLINDRICAL'`, `'CONICAL'`.`
+position: `{x, y, z}` | The 3d position of the located image.
+rotation: `{w, x, y, z}` | The 3d local orientation of the located image.
+scale | A scale factor that should be applied to object attached to this image.
+
+If type = `FLAT`:
+
+Property | Description
+--------- | -----------
+scaledWidth | The width of the image in the scene, when multiplied by scale.
+scaledHeight | The height of the image in the scene, when multiplied by scale.
+
+If type= `CYLINDRICAL` or `CONICAL`:
+
+Property | Description
+--------- | -----------
+height | Height of the curved target.
+radiusTop | Radius of the curved target at the top.
+radiusBottom | Radius of the curved target at the bottom.
+arcStartRadians | Starting angle in radians.
+arcLengthRadians | Central angle in radians.
+
+## Example {#example}
+
+```javascript
+AFRAME.registerComponent('my-named-image-target', {
+ schema: {
+ name: { type: 'string' }
+ },
+ init: function () {
+ const object3D = this.el.object3D
+ const name = this.data.name
+ object3D.visible = false
+
+ const showImage = ({detail}) => {
+ if (name != detail.name) {
+ return
+ }
+ object3D.position.copy(detail.position)
+ object3D.quaternion.copy(detail.rotation)
+ object3D.scale.set(detail.scale, detail.scale, detail.scale)
+ object3D.visible = true
+ }
+
+ const hideImage = ({detail}) => {
+ if (name != detail.name) {
+ return
+ }
+ object3D.visible = false
+ }
+
+ this.el.sceneEl.addEventListener('xrimagefound', showImage)
+ this.el.sceneEl.addEventListener('xrimageupdated', showImage)
+ this.el.sceneEl.addEventListener('xrimagelost', hideImage)
+ }
+})
+```
diff --git a/docs/engine/api/aframeevents/xrimagescanning.md b/docs/engine/api/aframeevents/xrimagescanning.md
new file mode 100644
index 0000000..a5cb41b
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrimagescanning.md
@@ -0,0 +1,31 @@
+# xrimagescanning
+
+## Description {#description}
+
+This event is emitted by [`xrweb`](/docs/engine/api/aframe/xrweb) when all detection images have been loaded and scanning has begun.
+
+`imagescanning.detail : { imageTargets: {name, type, metadata, geometry} }`
+
+Property | Description
+--------- | -----------
+name | The image's name.
+type | One of `'FLAT'`, `'CYLINDRICAL'`, `'CONICAL'`.
+metadata | User metadata.
+geometry | Object containing geometry data. If type=FLAT: `{scaledWidth, scaledHeight}`, lse if type=CYLINDRICAL or type=CONICAL: `{height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians}`
+
+If type = `FLAT`, geometry:
+
+Property | Description
+--------- | -----------
+scaledWidth | The width of the image in the scene, when multiplied by scale.
+scaledHeight | The height of the image in the scene, when multiplied by scale.
+
+If type= `CYLINDRICAL` or `CONICAL`, geometry:
+
+Property | Description
+--------- | -----------
+height | Height of the curved target.
+radiusTop | Radius of the curved target at the top.
+radiusBottom | Radius of the curved target at the bottom.
+arcStartRadians | Starting angle in radians.
+arcLengthRadians | Central angle in radians.
diff --git a/docs/engine/api/aframeevents/xrimageupdated.md b/docs/engine/api/aframeevents/xrimageupdated.md
new file mode 100644
index 0000000..0c0981c
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrimageupdated.md
@@ -0,0 +1,68 @@
+# xrimageupdated
+
+## Description {#description}
+
+This event is emitted by [`xrweb`](/docs/engine/api/aframe/xrweb) when an image target changes position, rotation or scale.
+
+`imageupdated.detail : { name, type, position, rotation, scale, scaledWidth, scaledHeight, height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians }`
+
+Property | Description
+--------- | -----------
+name | The image's name.
+type | One of `'FLAT'`, `'CYLINDRICAL'`, `'CONICAL'`.`
+position: `{x, y, z}` | The 3d position of the located image.
+rotation: `{w, x, y, z}` | The 3d local orientation of the located image.
+scale | A scale factor that should be applied to object attached to this image.
+
+If type = `FLAT`:
+
+Property | Description
+--------- | -----------
+scaledWidth | The width of the image in the scene, when multiplied by scale.
+scaledHeight | The height of the image in the scene, when multiplied by scale.
+
+If type= `CYLINDRICAL` or `CONICAL`:
+
+Property | Description
+--------- | -----------
+height | Height of the curved target.
+radiusTop | Radius of the curved target at the top.
+radiusBottom | Radius of the curved target at the bottom.
+arcStartRadians | Starting angle in radians.
+arcLengthRadians | Central angle in radians.
+
+## Example {#example}
+
+```javascript
+AFRAME.registerComponent('my-named-image-target', {
+ schema: {
+ name: { type: 'string' }
+ },
+ init: function () {
+ const object3D = this.el.object3D
+ const name = this.data.name
+ object3D.visible = false
+
+ const showImage = ({detail}) => {
+ if (name != detail.name) {
+ return
+ }
+ object3D.position.copy(detail.position)
+ object3D.quaternion.copy(detail.rotation)
+ object3D.scale.set(detail.scale, detail.scale, detail.scale)
+ object3D.visible = true
+ }
+
+ const hideImage = ({detail}) => {
+ if (name != detail.name) {
+ return
+ }
+ object3D.visible = false
+ }
+
+ this.el.sceneEl.addEventListener('xrimagefound', showImage)
+ this.el.sceneEl.addEventListener('xrimageupdated', showImage)
+ this.el.sceneEl.addEventListener('xrimagelost', hideImage)
+ }
+})
+```
diff --git a/docs/engine/api/aframeevents/xrinterpupillarydistance.md b/docs/engine/api/aframeevents/xrinterpupillarydistance.md
new file mode 100644
index 0000000..ea149a2
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrinterpupillarydistance.md
@@ -0,0 +1,12 @@
+# xrinterpupillarydistance
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a tracked face's distance in millimeters between the centers of each pupil is first detected.
+
+`xrinterpupillarydistance.detail : {id}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+interpupillaryDistance | `Number` | Approximate distance in millimeters between the centers of each pupil.
diff --git a/docs/engine/api/aframeevents/xrlefteyebrowlowered.md b/docs/engine/api/aframeevents/xrlefteyebrowlowered.md
new file mode 100644
index 0000000..1cc0c6a
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrlefteyebrowlowered.md
@@ -0,0 +1,11 @@
+# xrlefteyebrowlowered
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a tracked face's left eyebrow is lowered to its initial position when the face was found.
+
+`xrlefteyebrowlowered.detail : {id}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
diff --git a/docs/engine/api/aframeevents/xrlefteyebrowraised.md b/docs/engine/api/aframeevents/xrlefteyebrowraised.md
new file mode 100644
index 0000000..96f2418
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrlefteyebrowraised.md
@@ -0,0 +1,11 @@
+# xrlefteyebrowraised
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a tracked face's left eyebrow is raised from its initial position when the face was found.
+
+`xrlefteyebrowraised.detail : {id}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
diff --git a/docs/engine/api/aframeevents/xrlefteyeclosed.md b/docs/engine/api/aframeevents/xrlefteyeclosed.md
new file mode 100644
index 0000000..249d2c5
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrlefteyeclosed.md
@@ -0,0 +1,11 @@
+# xrlefteyeclosed
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a tracked face's left eye closes.
+
+`xrlefteyeclosed.detail : {id}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
diff --git a/docs/engine/api/aframeevents/xrlefteyeopened.md b/docs/engine/api/aframeevents/xrlefteyeopened.md
new file mode 100644
index 0000000..55bb303
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrlefteyeopened.md
@@ -0,0 +1,11 @@
+# xrlefteyeopened
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a tracked face's left eye opens.
+
+`xrlefteyeopened.detail : {id}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
diff --git a/docs/engine/api/aframeevents/xrlefteyewinked.md b/docs/engine/api/aframeevents/xrlefteyewinked.md
new file mode 100644
index 0000000..9c9c657
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrlefteyewinked.md
@@ -0,0 +1,11 @@
+# xrlefteyewinked
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a tracked face's left eye closes and opens within 750ms while the right eye remains open.
+
+`xrlefteyewinked.detail : {id}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
diff --git a/docs/engine/api/aframeevents/xrmouthclosed.md b/docs/engine/api/aframeevents/xrmouthclosed.md
new file mode 100644
index 0000000..58c97d5
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrmouthclosed.md
@@ -0,0 +1,11 @@
+# xrmouthclosed
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a tracked face's mouth closes.
+
+`xrmouthclosed.detail : {id}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
diff --git a/docs/engine/api/aframeevents/xrmouthopened.md b/docs/engine/api/aframeevents/xrmouthopened.md
new file mode 100644
index 0000000..cc3eb1c
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrmouthopened.md
@@ -0,0 +1,11 @@
+# xrmouthopened
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a tracked face's mouth opens.
+
+`xrmouthopened.detail : {id}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
diff --git a/docs/engine/api/aframeevents/xrrighteyebrowlowered.md b/docs/engine/api/aframeevents/xrrighteyebrowlowered.md
new file mode 100644
index 0000000..d305ba9
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrrighteyebrowlowered.md
@@ -0,0 +1,11 @@
+# xrrighteyebrowlowered
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a tracked face's right eyebrow is lowered to its initial position when the face was found
+
+`xrrighteyebrowlowered.detail : {id}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
diff --git a/docs/engine/api/aframeevents/xrrighteyebrowraised.md b/docs/engine/api/aframeevents/xrrighteyebrowraised.md
new file mode 100644
index 0000000..39861fc
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrrighteyebrowraised.md
@@ -0,0 +1,11 @@
+# xrrighteyebrowraised
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a tracked face's right eyebrow is raised from its position when the face was found.
+
+`xrrighteyebrowraised.detail : {id}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
diff --git a/docs/engine/api/aframeevents/xrrighteyeclosed.md b/docs/engine/api/aframeevents/xrrighteyeclosed.md
new file mode 100644
index 0000000..65620bb
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrrighteyeclosed.md
@@ -0,0 +1,11 @@
+# xrrighteyeclosed
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a tracked face's right eye closes.
+
+`xrrighteyeclosed.detail : {id}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
diff --git a/docs/engine/api/aframeevents/xrrighteyeopened.md b/docs/engine/api/aframeevents/xrrighteyeopened.md
new file mode 100644
index 0000000..c52ae6c
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrrighteyeopened.md
@@ -0,0 +1,11 @@
+# xrrighteyeopened
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a tracked face's right eye opens.
+
+`xrrighteyeopened.detail : {id}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
diff --git a/docs/engine/api/aframeevents/xrrighteyewinked.md b/docs/engine/api/aframeevents/xrrighteyewinked.md
new file mode 100644
index 0000000..526f62b
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrrighteyewinked.md
@@ -0,0 +1,11 @@
+# xrrighteyewinked
+
+## Description {#description}
+
+This event is emitted by [`xrface`](/docs/engine/api/aframe/xrface) when a tracked face's right eye closes and opens within 750ms while the left eye remains open.
+
+`xrrighteyewinked.detail : {id}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
diff --git a/docs/engine/api/aframeevents/xrtrackingstatus.md b/docs/engine/api/aframeevents/xrtrackingstatus.md
new file mode 100644
index 0000000..5bee247
--- /dev/null
+++ b/docs/engine/api/aframeevents/xrtrackingstatus.md
@@ -0,0 +1,24 @@
+# xrtrackingstatus
+
+## Description {#description}
+
+This event is emitted by [`xrweb`](/docs/engine/api/aframe/xrweb) when [`XR8.XrController`](/docs/engine/api/xrcontroller) is loaded and any time tracking status or reason changes.
+
+`xrtrackingstatus : { status, reason }`
+
+Property | Description
+--------- | -----------
+status | One of `'LIMITED'` or `'NORMAL'`.
+reason | One of `'INITIALIZING'` or `'UNDEFINED'`.
+
+## Example {#example}
+
+```javascript
+const updateScene = ({detail}) => {
+ const {status, reason} = detail
+ if (status === 'NORMAL') {
+ // Show scene
+ }
+}
+this.el.sceneEl.addEventListener('xrtrackingstatus', updateScene)
+```
diff --git a/docs/engine/api/api.md b/docs/engine/api/api.md
new file mode 100644
index 0000000..1fbac22
--- /dev/null
+++ b/docs/engine/api/api.md
@@ -0,0 +1,8 @@
+# 8th Wall Engine API
+
+The Engine API provides lower-level access to 8th Wall’s underlying AR engine, including:
+
+- **8th Wall Camera Pipeline Modules** — Camera pipeline modules developed by 8th Wall.
+- **Custom Camera Pipeline Modules** — Interface for working with the camera frame processing pipeline.
+
+Use the Engine API when you need fine-grained control over camera input, frame processing, or when integrating custom WebGL or computer vision workflows into your project.
diff --git a/docs/engine/api/babylonjs/_category_.json b/docs/engine/api/babylonjs/_category_.json
new file mode 100644
index 0000000..9aeb799
--- /dev/null
+++ b/docs/engine/api/babylonjs/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "Babylonjs",
+ "position": 6
+}
diff --git a/docs/engine/api/babylonjs/babylonjs.md b/docs/engine/api/babylonjs/babylonjs.md
new file mode 100644
index 0000000..5f8b4ec
--- /dev/null
+++ b/docs/engine/api/babylonjs/babylonjs.md
@@ -0,0 +1,20 @@
+# XR8.Babylonjs
+
+Babylon.js () is a complete JavaScript framework for building 3D games and
+experiences with HTML5 and WebGL. Combined with 8th Wall Web, you can create powerful Web AR
+experiences.
+
+Tutorial Video:
+
+* Integrating Babylon.js and 8th Wall Web:
+
+## Description {#description}
+
+Provides an integration that interfaces with the BabylonJS environment and lifecyle to drive the Babylon.js camera to do virtual overlays.
+
+## Functions {#functions}
+
+Function | Description
+-------- | -----------
+[xrCameraBehavior](xrcamerabehavior.md) | Get a behavior that can be attached to a Babylon camera to run World Tracking and/or Image Targets.
+[faceCameraBehavior](facecamerabehavior.md) | Get a behavior that can be attached to a Babylon camera to run Face Effects.
diff --git a/docs/engine/api/babylonjs/facecamerabehavior.md b/docs/engine/api/babylonjs/facecamerabehavior.md
new file mode 100644
index 0000000..2bb6980
--- /dev/null
+++ b/docs/engine/api/babylonjs/facecamerabehavior.md
@@ -0,0 +1,83 @@
+---
+sidebar_label: faceCameraBehavior()
+---
+# XR8.Babylonjs.faceCameraBehavior()
+
+`XR8.Babylonjs.faceCameraBehavior(config, faceConfig)`
+
+## Description {#description}
+
+Get a behavior that can be attached to a Babylon camera like so: `camera.addBehavior(XR8.Babylonjs.faceCameraBehavior())`
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+config [Optional] | Configuration parameters to pass to [`XR8.run()`](/docs/engine/api/xr8/run)
+faceConfig [Optional] | Face configuration parameters to pass to [`XR8.FaceController`](/docs/engine/api/facecontroller)
+
+`config` [Optional] is an object with the following properties:
+
+Property | Type | Default | Description
+-------- | ---- | ------- | -----------
+webgl2 [Optional] | `Boolean` | `false` | If true, use WebGL2 if available, otherwise fallback to WebGL1. If false, always use WebGL1.
+ownRunLoop [Optional] | `Boolean` | `true` | If true, XR should use it's own run loop. If false, you will provide your own run loop and be responsible for calling [`runPreRender`](/docs/engine/api/xr8/runprerender) and [`runPostRender`](/docs/engine/api/xr8/runpostrender) yourself [Advanced Users only]
+cameraConfig: {direction} [Optional] | `Object` | `{direction: XR8.XrConfig.camera().BACK}` | Desired camera to use. Supported values for `direction` are `XR8.XrConfig.camera().BACK` or `XR8.XrConfig.camera().FRONT`
+glContextConfig [Optional] | `WebGLContextAttributes` | `null` | The attributes to configure the WebGL canvas context.
+allowedDevices [Optional] | [`XR8.XrConfig.device()`](/docs/engine/api/xrconfig/device) | `XR8.XrConfig.device().MOBILE` | Specify the class of devices that the pipeline should run on. If the current device is not in that class, running will fail prior prior to opening the camera. If allowedDevices is `XR8.XrConfig.device().ANY`, always open the camera. Note that world tracking can only be used with `XR8.XrConfig.device().MOBILE`.
+
+`faceConfig` [Optional] is an object with the following properties:
+
+Parameter | Description
+--------- | -----------
+nearClip [Optional] | The distance from the camera of the near clip plane. By default it will use the Babylon camera.minZ
+farClip [Optional] | The distance from the camera of the far clip plane. By default it will use the Babylon camera.maxZ
+meshGeometry [Optional] | List that contains which parts of the head geometry are visible. Options are: `[XR8.FaceController.MeshGeometry.FACE, XR8.FaceController.MeshGeometry.EYES, XR8.FaceController.MeshGeometry.MOUTH, XR8.FaceController.MeshGeometry.IRIS]`. The default is `[XR8.FaceController.MeshGeometry.FACE]`
+maxDetections [Optional] | The maximum number of faces to detect. The available choices are 1, 2, or 3. The default is 1.
+uvType [Optional] | Specifies which uvs are returned in the facescanning and faceloading event. Options are: `[XR8.FaceController.UvType.STANDARD, XR8.FaceController.UvType.PROJECTED]`. The default is `[XR8.FaceController.UvType.STANDARD]`.
+leftHandedAxes [Optional] | If true, use left-handed coordinates.
+imageTargets [Optional] | If true, flip left and right in the output.
+
+## Returns {#returns}
+
+A Babylon JS behavior that connects the Face Effects engine to the Babylon camera and starts the camera feed and tracking.
+
+## Example {#example}
+
+```javascript
+const startScene = (canvas) => {
+ const engine = new BABYLON.Engine(canvas, true /* antialias */)
+ const scene = new BABYLON.Scene(engine)
+ scene.useRightHandedSystem = false
+
+ const camera = new BABYLON.FreeCamera('camera', new BABYLON.Vector3(0, 0, 0), scene)
+ camera.rotation = new BABYLON.Vector3(0, scene.useRightHandedSystem ? Math.PI : 0, 0)
+ camera.minZ = 0.0001
+ camera.maxZ = 10000
+
+ // Add a light to the scene
+ const directionalLight =
+ new BABYLON.DirectionalLight("DirectionalLight", new BABYLON.Vector3(-5, -10, 7), scene)
+ directionalLight.intensity = 0.5
+
+ // Mesh logic
+ const faceMesh = new BABYLON.Mesh("face", scene);
+ const material = new BABYLON.StandardMaterial("boxMaterial", scene)
+ material.diffuseColor = new BABYLON.Color3(173 / 255.0, 80 / 255.0, 255 / 255.0)
+ faceMesh.material = material
+
+ let facePoints = []
+
+ const runConfig = {
+ cameraConfig: {XR8.XrConfig.camera().FRONT},
+ allowedDevices: XR8.XrConfig.device().ANY,
+ verbose: true,
+ }
+
+ camera.addBehavior(XR8.Babylonjs.faceCameraBehavior(runConfig)) // Connect camera to XR and show camera feed.
+
+ engine.runRenderLoop(() => {
+ scene.render()
+ })
+}
+```
diff --git a/docs/engine/api/babylonjs/observables.md b/docs/engine/api/babylonjs/observables.md
new file mode 100644
index 0000000..c24070e
--- /dev/null
+++ b/docs/engine/api/babylonjs/observables.md
@@ -0,0 +1,147 @@
+# BabylonJS Observables
+
+## Image Target Observables {#image-target-observables}
+
+**onXrImageLoadingObservable**: Fires when detection image loading begins.
+
+`onXrImageLoadingObservable : { imageTargets: {name, type, metadata} }`
+
+**onXrImageScanningObservable**: Fires when all detection images have been loaded and scanning has begun.
+
+`onXrImageScanningObservable : { imageTargets: {name, type, metadata, geometry} }`
+
+**onXrImageFoundObservable**: Fires when an image target is first found.
+
+`onXrImageFoundObservable : { name, type, position, rotation, scale, scaledWidth, scaledHeight, height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians }`
+
+**onXrImageUpdatedObservable**: Fires when an image target changes position, rotation or scale.
+
+`onXrImageUpdatedObservable : { name, type, position, rotation, scale, scaledWidth, scaledHeight, height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians }`
+
+**onXrImageLostObservable**: Fires when an image target is no longer being tracked.
+
+`onXrImageLostObservable : { name, type, position, rotation, scale, scaledWidth, scaledHeight, height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians }`
+
+## Face Effects Observables {#face-effects-observables}
+
+**onFaceLoadingObservable**: Fires when loading begins for additional face AR resources.
+
+`onFaceLoadingObservable : {maxDetections, pointsPerDetection, indices, uvs}`
+
+**onFaceScanningObservable**: Fires when all face AR resources have been loaded and scanning has begun.
+
+`onFaceScanningObservable: {maxDetections, pointsPerDetection, indices, uvs}`
+
+**onFaceFoundObservable**: Fires when a face is first found.
+
+`onFaceFoundObservable : {id, transform, attachmentPoints, vertices, normals, uvsInCameraFrame}`
+
+**onFaceUpdatedObservable**: Fires when a face is subsequently found.
+
+`onFaceUpdatedObservable : {id, transform, attachmentPoints, vertices, normals, uvsInCameraFrame}`
+
+**onFaceLostObservable**: Fires when a face is no longer being tracked.
+
+`onFaceLostObservable : {id}`
+
+**onMouthOpenedObservable**: Fires when a tracked face's mouth opens.
+
+`onMouthOpenedObservable : {id}`
+
+**onMouthClosedObservable**: Fires when a tracked face's mouth closes.
+
+`onMouthClosedObservable : {id}`
+
+**onLeftEyeOpenedObservable**: Fires when a tracked face's left eye opens.
+
+`onLeftEyeOpenedObservable : {id}`
+
+**onLeftEyeClosedObservable**: Fires when a tracked face's left eye closes.
+
+`onLeftEyeClosedObservable : {id}`
+
+**onRightEyeOpenedObservable**: Fires when a tracked face's right eye opens
+
+`onRightEyeOpenedObservable : {id}`
+
+**onRightEyeClosedObservable**: Fires when a tracked face's right eye closes.
+
+`onRightEyeClosedObservable : {id}`
+
+**onLeftEyebrowRaisedObservable**: Fires when a tracked face's left eyebrow is raised from its initial position when the face was found.
+
+`onLeftEyebrowRaisedObservable : {id}`
+
+**onLeftEyebrowLoweredObservable**: Fires when a tracked face's left eyebrow is lowered to its initial position when the face was found.
+
+`onLeftEyebrowLoweredObservable : {id}`
+
+**onRightEyebrowRaisedObservable**: Fires when a tracked face's right eyebrow is raised from its position when the face was found.
+
+`onRightEyebrowRaisedObservable : {id}`
+
+**onRightEyebrowLoweredObservable**: Fires when a tracked face's right eyebrow is lowered to its initial position when the face was found.
+
+`onRightEyebrowLoweredObservable : {id}`
+
+**onLeftEyeWinkedObservable**: Fires when a tracked face's left eye closes and opens within 750ms while the right eye remains open.
+
+`onLeftEyeWinkedObservable : {id}`
+
+**onRightEyeWinkedObservable**: Fires when a tracked face's right eye closes and opens within 750ms while the left eye remains open.
+
+`onRightEyeWinkedObservable : {id}`
+
+**onBlinkedObservable**: Fires when a tracked face's eyes blink.
+
+`onBlinkedObservable : {id}`
+
+**onInterPupillaryDistanceObservable**: Fires when a tracked face's distance in millimeters between the centers of each pupil is first detected.
+
+`onInterPupillaryDistanceObservable : {id, interpupillaryDistance}`
+
+
+#### Image Target Example {#image-target-example}
+
+```javascript
+scene.onXrImageUpdatedObservable.add(e => {
+ target.position.copyFrom(e.position)
+ target.rotationQuaternion.copyFrom(e.rotation)
+ target.scaling.set(e.scale, e.scale, e.scale)
+})
+```
+
+#### Face Effects Example {#face-effects-example}
+
+```javascript
+// this is called when the face is first found. It provides the static information about the
+// face such as the UVs and indices
+scene.onFaceLoadingObservable.add((event) => {
+ const {indices, maxDetections, pointsPerDetection, uvs} = event
+
+ // Babylon expects all vertex data to be a flat list of numbers
+ facePoints = Array(pointsPerDetection)
+ for (let i = 0; i < pointsPerDetection; i++) {
+ const facePoint = BABYLON.MeshBuilder.CreateBox("box", {size: 0.02}, scene)
+ facePoint.material = material
+ facePoint.parent = faceMesh
+ facePoints[i] = facePoint
+ }
+})
+
+// this is called each time the face is updated which is on a per-frame basis
+scene.onFaceUpdatedObservable.add((event) => {
+ const {vertices, normals, transform} = event;
+ const {scale, position, rotation} = transform
+
+ vertices.map((v, i) => {
+ facePoints[i].position.x = v.x
+ facePoints[i].position.y = v.y
+ facePoints[i].position.z = v.z
+ })
+
+ faceMesh.scalingDeterminant = scale
+ faceMesh.position = position
+ faceMesh.rotationQuaternion = rotation
+})
+```
diff --git a/docs/engine/api/babylonjs/xrcamerabehavior.md b/docs/engine/api/babylonjs/xrcamerabehavior.md
new file mode 100644
index 0000000..f466835
--- /dev/null
+++ b/docs/engine/api/babylonjs/xrcamerabehavior.md
@@ -0,0 +1,71 @@
+---
+sidebar_label: xrCameraBehavior()
+---
+# XR8.Babylonjs.xrCameraBehavior()
+
+`XR8.Babylonjs.xrCameraBehavior(config, xrConfig)`
+
+## Description {#description}
+
+Get a behavior that can be attached to a Babylon camera like so: `camera.addBehavior(XR8.Babylonjs.xrCameraBehavior())`
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+config [Optional] | Configuration parameters to pass to [`XR8.run()`](/docs/engine/api/xr8/run)
+xrConfig [Optional] | Configuration parameters to pass to [`XR8.XrController`](/docs/engine/api/xrcontroller)
+
+`config` [Optional] is an object with the following properties:
+
+Property | Type | Default | Description
+-------- | ---- | ------- | -----------
+webgl2 [Optional] | `Boolean` | `false` | If true, use WebGL2 if available, otherwise fallback to WebGL1. If false, always use WebGL1.
+ownRunLoop [Optional] | `Boolean` | `false` | If true, XR should use it's own run loop. If false, you will provide your own run loop and be responsible for calling [`runPreRender`](/docs/engine/api/xr8/runprerender) and [`runPostRender`](/docs/engine/api/xr8/runpostrender) yourself [Advanced Users only]
+cameraConfig: {direction} [Optional] | `Object` | `{direction: XR8.XrConfig.camera().BACK}` | Desired camera to use. Supported values for `direction` are `XR8.XrConfig.camera().BACK` or `XR8.XrConfig.camera().FRONT`
+glContextConfig [Optional] | `WebGLContextAttributes` | `null` | The attributes to configure the WebGL canvas context.
+allowedDevices [Optional] | [`XR8.XrConfig.device()`](/docs/engine/api/xrconfig/device) | `XR8.XrConfig.device().MOBILE` | Specify the class of devices that the pipeline should run on. If the current device is not in that class, running will fail prior prior to opening the camera. If allowedDevices is `XR8.XrConfig.device().ANY`, always open the camera. Note that world tracking can only be used with `XR8.XrConfig.device().MOBILE`.
+
+`xrConfig` [Optional] is an object with the following properties:
+
+Parameter | Description
+--------- | -----------
+enableLighting [Optional] | If true, return an estimate of lighting information.
+enableWorldPoints [Optional] | If true, return the map points used for tracking.
+disableWorldTracking [Optional] | If true, turn off SLAM tracking for efficiency.
+imageTargets [Optional] | List of names of the image target to detect. Can be modified at runtime. Note: All currently active image targets will be replaced with the ones specified in this list.
+leftHandedAxes [Optional] | If true, use left-handed coordinates.
+imageTargets [Optional] | If true, flip left and right in the output.
+
+## Returns {#returns}
+
+A Babylon JS behavior that connects the XR engine to the Babylon camera and starts the camera feed and tracking.
+
+## Example {#example}
+
+```javascript
+let surface, engine, scene, camera
+
+const startScene = () => {
+ const canvas = document.getElementById('renderCanvas')
+
+ engine = new BABYLON.Engine(canvas, true, { stencil: true, preserveDrawingBuffer: true })
+ engine.enableOfflineSupport = false
+
+ scene = new BABYLON.Scene(engine)
+ camera = new BABYLON.FreeCamera('camera', new BABYLON.Vector3(0, 3, 0), scene)
+
+ initXrScene({ scene, camera }) // Add objects to the scene and set starting camera position.
+
+ // Connect the camera to the XR engine and show camera feed
+ camera.addBehavior(XR8.Babylonjs.xrCameraBehavior())
+
+ engine.runRenderLoop(() => {
+ scene.render()
+ })
+
+ window.addEventListener('resize', () => {
+ engine.resize()
+ })
+}
+```
diff --git a/docs/engine/api/camerapipelinemodule/_category_.json b/docs/engine/api/camerapipelinemodule/_category_.json
new file mode 100644
index 0000000..a7ed970
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "CameraPipelineModule",
+ "position": 8
+}
diff --git a/docs/engine/api/camerapipelinemodule/index.md b/docs/engine/api/camerapipelinemodule/index.md
new file mode 100644
index 0000000..cfadf37
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/index.md
@@ -0,0 +1,36 @@
+# CameraPipelineModule
+
+8th Wall camera applications are built using a camera pipeline module framework. Applications install modules which then control the behavior of the application at runtime.
+
+Refer to [`XR8.addCameraPipelineModule()`](/docs/engine/api/xr8/addcamerapipelinemodule) for details on adding camera pipeline modules to your application.
+
+A camera pipeline module object must have a **.name** string which is unique within the application. It should implement one or more of the following camera lifecycle methods. These methods will be executed at the appropriate point in the run loop.
+
+During the main runtime of an application, each camera frame goes through the following cycle:
+
+`onBeforeRun` -> `onCameraStatusChange` (`requesting` -> `hasStream` -> `hasVideo` | `failed`) -> `onStart` -> `onAttach` -> `onProcessGpu` -> `onProcessCpu` -> `onUpdate` -> `onRender`
+
+Camera modules should implement one or more of the following camera lifecycle methods:
+
+Function | Description
+-------- | -----------
+[onAppResourcesLoaded](onappresourcesloaded.md) | Called when we have received the resources attached to an app from the server.
+[onAttach](onattach.md) | Called before the first time a module receives frame updates. It is called on modules that were added either before or after the pipeline is running.
+[onBeforeRun](onbeforerun.md) | Called immediately after [`XR8.run()`](/docs/engine/api/xr8/run). If any promises are returned, XR will wait on all promises before continuing.
+[onCameraStatusChange](oncamerastatuschange.md) | Called when a change occurs during the camera permissions request.
+[onCanvasSizeChange](oncanvassizechange.md) | Called when the canvas changes size.
+[onDetach](ondetach.md) | is called after the last time a module receives frame updates. This is either after the engine is stopped or the module is manually removed from the pipeline, whichever comes first.
+[onDeviceOrientationChange](ondeviceorientationchange.md) | Called when the device changes landscape/portrait orientation.
+[onException](onexception.md) | Called when an error occurs in XR. Called with the error object.
+[onPaused](onpaused.md) | Called when [`XR8.pause()`](/docs/engine/api/xr8/pause) is called.
+[onProcessCpu](onprocesscpu.md) | Called to read results of GPU processing and return usable data.
+[onProcessGpu](onprocessgpu.md) | Called to start GPU processing.
+[onRemove](onremove.md) | is called when a module is removed from the pipeline.
+[onRender](onrender.md) | Called after onUpdate. This is the time for the rendering engine to issue any WebGL drawing commands. If an application is providing its own run loop and is relying on [`XR8.runPreRender()`](/docs/engine/api/xr8/runprerender) and [`XR8.runPostRender()`](/docs/engine/api/xr8/runpostrender), this method is not called and all rendering must be coordinated by the external run loop.
+[onResume](onresume.md) | Called when [`XR8.resume()`](/docs/engine/api/xr8/resume) is called.
+[onStart](onstart.md) | Called when XR starts. First callback after [`XR8.run()`](/docs/engine/api/xr8/run) is called.
+[onUpdate](onupdate.md) | Called to update the scene before render. Data returned by modules in [`onProcessGpu`](onprocessgpu.md) and [`onProcessCpu`](onprocesscpu.md) will be present as processGpu.modulename and processCpu.modulename where the name is given by module.name = "modulename".
+[onVideoSizeChange](onvideosizechange.md) | Called when the canvas changes size.
+[requiredPermissions](requiredpermissions.md) | Modules can indicate what browser capabilities they require that may need permissions requests. These can be used by the framework to request appropriate permissions if absent, or to create components that request the appropriate permissions before running XR.
+
+Note: Camera modules that implement [`onProcessGpu`](onprocessgpu.md) or [`onProcessCpu`](onprocesscpu.md) can provide data to subsequent stages of the pipeline. This is done by the module's name.
diff --git a/docs/engine/api/camerapipelinemodule/onappresourcesloaded.md b/docs/engine/api/camerapipelinemodule/onappresourcesloaded.md
new file mode 100644
index 0000000..aed516a
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/onappresourcesloaded.md
@@ -0,0 +1,26 @@
+# onAppResourcesLoaded()
+
+`onAppResourcesLoaded: ({ framework, imageTargets, version })`
+
+## Description {#description}
+
+`onAppResourcesLoaded()` is called when we have received the resources attached to an app from the server.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+framework | The framework bindings for this module for dispatching events.
+imageTargets [Optional]| An array of image targets with the fields {imagePath, metadata, name}
+version | The engine version, e.g. 14.0.8.949
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name = 'myPipelineModule',
+ onAppResourcesLoaded = ({ framework, version, imageTargets }) => {
+ //...
+ },
+})
+```
diff --git a/docs/engine/api/camerapipelinemodule/onattach.md b/docs/engine/api/camerapipelinemodule/onattach.md
new file mode 100644
index 0000000..ebe318b
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/onattach.md
@@ -0,0 +1,35 @@
+# onAttach()
+
+`onAttach: ({framework, canvas, GLctx, computeCtx, isWebgl2, orientation, videoWidth, videoHeight, canvasWidth, canvasHeight, status, stream, video, version, imageTargets, config})`
+
+## Description {#description}
+
+`onAttach()` is called before the first time a module receives frame updates. It is called on modules that were added either before or after the pipeline is running. It includes all the most recent data available from:
+
+* [`onStart()`](./onstart.md)
+* [`onDeviceOrientationChange()`](./ondeviceorientationchange.md)
+* [`onCanvasSizeChange()`](./oncanvassizechange.md)
+* [`onVideoSizeChange()`](./onvideosizechange.md)
+* [`onCameraStatusChange()`](./oncamerastatuschange.md)
+* [`onAppResourcesLoaded()`](./onappresourcesloaded.md)
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+framework | The framework bindings for this module for dispatching events.
+canvas | The canvas that backs GPU processing and user display.
+GLctx | The drawing canvas's `WebGLRenderingContext` or `WebGL2RenderingContext`.
+computeCtx | The compute canvas's `WebGLRenderingContext` or `WebGL2RenderingContext`.
+isWebgl2 | True if `GLctx` is a `WebGL2RenderingContext`.
+orientation | The rotation of the UI from portrait, in degrees (-90, 0, 90, 180).
+videoWidth | The width of the camera feed, in pixels.
+videoHeight | The height of the camera feed, in pixels.
+canvasWidth | The width of the `GLctx` canvas, in pixels.
+canvasHeight | The height of the `GLctx` canvas, in pixels.
+status | One of [ `'requesting'`, `'hasStream'`, `'hasVideo'`, `'failed'` ]
+stream | The [`MediaStream`](https://developer.mozilla.org/en-US/docs/Web/API/MediaStream) associated with the camera feed.
+video | The video dom element displaying the stream.
+version [Optional] | The engine version, e.g. 14.0.8.949, if app resources are loaded.
+imageTargets [Optional] | An array of image targets with the fields `{imagePath, metadata, name}`
+config | The configuration parameters that were passed to [`XR8.run()`](/docs/engine/api/xr8/run).
diff --git a/docs/engine/api/camerapipelinemodule/onbeforerun.md b/docs/engine/api/camerapipelinemodule/onbeforerun.md
new file mode 100644
index 0000000..c27459d
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/onbeforerun.md
@@ -0,0 +1,13 @@
+# onBeforeRun()
+
+`onBeforeRun: ({ config })`
+
+## Description {#description}
+
+`onBeforeRun` is called immediately after [XR8.run()](/docs/engine/api/xr8/run). If any promises are returned, XR will wait on all promises before continuing.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+config | The configuration parameters that were passed to [XR8.run()](/docs/engine/api/xr8/run).
diff --git a/docs/engine/api/camerapipelinemodule/oncamerastatuschange.md b/docs/engine/api/camerapipelinemodule/oncamerastatuschange.md
new file mode 100644
index 0000000..8cc5caa
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/oncamerastatuschange.md
@@ -0,0 +1,48 @@
+# onCameraStatusChange()
+
+`onCameraStatusChange: ({ status, stream, video, config })`
+
+## Description {#description}
+
+`onCameraStatusChange()` is called when a change occurs during the camera permissions request.
+
+Called with the status, and, if applicable, a reference to the newly available data. The typical status flow will be:
+
+`requesting` -> `hasStream` -> `hasVideo`.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+status | One of [ `'requesting'`, `'hasStream'`, `'hasVideo'`, `'failed'` ]
+stream: [Optional] | The [`MediaStream`](https://developer.mozilla.org/en-US/docs/Web/API/MediaStream) associated with the camera feed, if status is `'hasStream'`.
+video: [Optional] | The video DOM element displaying the stream, if status is hasVideo.
+config | The configuration parameters that were passed to [`XR8.run()`](/docs/engine/api/xr8/run), if status is `'requesting'`.
+
+The `status` parameter has the following states:
+
+State | Description
+----- | -----------
+requesting | In `'requesting'`, the browser is opening the camera, and if applicable, checking the user permissons. In this state, it is appropriate to display a prompt to the user to accept camera permissions.
+hasStream | Once the user permissions are granted and the camera is successfully opened, the status switches to `'hasStream'` and any user prompts regarding permissions can be dismissed.
+hasVideo | Once camera frame data starts to be available for processing, the status switches to `'hasVideo'`, and the camera feed can begin displaying.
+failed | If the camera feed fails to open, the status is `'failed'`. In this case it's possible that the user has denied permissions, and so helping them to re-enable permissions is advisable.
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'camerastartupmodule',
+ onCameraStatusChange: ({status}) {
+ if (status == 'requesting') {
+ myApplication.showCameraPermissionsPrompt()
+ } else if (status == 'hasStream') {
+ myApplication.dismissCameraPermissionsPrompt()
+ } else if (status == 'hasVideo') {
+ myApplication.startMainApplictation()
+ } else if (status == 'failed') {
+ myApplication.promptUserToChangeBrowserSettings()
+ }
+ },
+})
+```
diff --git a/docs/engine/api/camerapipelinemodule/oncanvassizechange.md b/docs/engine/api/camerapipelinemodule/oncanvassizechange.md
new file mode 100644
index 0000000..6892e0f
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/oncanvassizechange.md
@@ -0,0 +1,29 @@
+# onCanvasSizeChange()
+
+`onCanvasSizeChange: ({ GLctx, computeCtx, videoWidth, videoHeight, canvasWidth, canvasHeight })`
+
+## Description {#description}
+
+`onCanvasSizeChange()` is called when the canvas changes size. Called with dimensions of video and canvas.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+GLctx | The drawing canvas's `WebGLRenderingContext` or `WebGL2RenderingContext`.
+computeCtx | The compute canvas's `WebGLRenderingContext` or `WebGL2RenderingContext`.
+videoWidth | The width of the camera feed, in pixels.
+videoHeight | The height of the camera feed, in pixels.
+canvasWidth | The width of the `GLctx` canvas, in pixels.
+canvasHeight | The height of the `GLctx` canvas, in pixels.
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'mycamerapipelinemodule',
+ onCanvasSizeChange: ({ GLctx, videoWidth, videoHeight, canvasWidth, canvasHeight }) => {
+ myHandleResize({ GLctx, videoWidth, videoHeight, canvasWidth, canvasHeight })
+ },
+})
+```
diff --git a/docs/engine/api/camerapipelinemodule/ondetach.md b/docs/engine/api/camerapipelinemodule/ondetach.md
new file mode 100644
index 0000000..1c493e3
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/ondetach.md
@@ -0,0 +1,13 @@
+# onDetach()
+
+`onDetach: ({framework})`
+
+## Description {#description}
+
+`onDetach` is called after the last time a module receives frame updates. This is either after the engine is stopped or the module is manually removed from the pipeline, whichever comes first.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+framework | The framework bindings for this module for dispatching events.
diff --git a/docs/engine/api/camerapipelinemodule/ondeviceorientationchange.md b/docs/engine/api/camerapipelinemodule/ondeviceorientationchange.md
new file mode 100644
index 0000000..b10555d
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/ondeviceorientationchange.md
@@ -0,0 +1,28 @@
+# onDeviceOrientationChange()
+
+`onDeviceOrientationChange: ({ GLctx, computeCtx, videoWidth, videoHeight, orientation })`
+
+## Description {#description}
+
+`onDeviceOrientationChange()` is called when the device changes landscape/portrait orientation.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+GLctx | The drawing canvas's `WebGLRenderingContext` or `WebGL2RenderingContext`.
+computeCtx | The compute canvas's `WebGLRenderingContext` or `WebGL2RenderingContext`.
+videoWidth | The width of the camera feed, in pixels.
+videoHeight | The height of the camera feed, in pixels.
+orientation | The rotation of the UI from portrait, in degrees (-90, 0, 90, 180).
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'mycamerapipelinemodule',
+ onDeviceOrientationChange: ({ GLctx, videoWidth, videoHeight, orientation }) => {
+ // handleResize({ GLctx, videoWidth, videoHeight, orientation })
+ },
+})
+```
diff --git a/docs/engine/api/camerapipelinemodule/onexception.md b/docs/engine/api/camerapipelinemodule/onexception.md
new file mode 100644
index 0000000..cb13688
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/onexception.md
@@ -0,0 +1,24 @@
+# onException()
+
+`onException: (error)`
+
+## Description {#description}
+
+`onException()` is called when an error occurs in XR. Called with the error object.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+error | The error object that was thrown
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'mycamerapipelinemodule',
+ onException : (error) => {
+ console.error('XR threw an exception', error)
+ },
+})
+```
diff --git a/docs/engine/api/camerapipelinemodule/onpaused.md b/docs/engine/api/camerapipelinemodule/onpaused.md
new file mode 100644
index 0000000..85bd33f
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/onpaused.md
@@ -0,0 +1,22 @@
+# onPaused()
+
+`onPaused: ()`
+
+## Description {#description}
+
+`onPaused()` is called when [`XR8.pause()`](/docs/engine/api/xr8/pause) is called.
+
+## Parameters {#parameters}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'mycamerapipelinemodule',
+ onPaused: () => {
+ console.log('pausing application')
+ },
+})
+```
diff --git a/docs/engine/api/camerapipelinemodule/onprocesscpu.md b/docs/engine/api/camerapipelinemodule/onprocesscpu.md
new file mode 100644
index 0000000..b26cb54
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/onprocesscpu.md
@@ -0,0 +1,40 @@
+# onProcessCpu()
+
+`onProcessCpu: ({ framework, frameStartResult, processGpuResult })`
+
+## Description {#description}
+
+`onProcessCpu()` is called to read results of GPU processing and return usable data. Called with
+`{ frameStartResult, processGpuResult }`. Data returned by modules in
+[`onProcessGpu`](onprocessgpu.md) will be present as `processGpu.modulename` where the name is given
+by module.name = "modulename".
+
+Parameter | Description
+--------- | -----------
+framework | The framework bindings for this module for dispatching events.
+frameStartResult | The data that was provided at the beginning of a frame.
+processGpuResult | Data returned by all installed modules during onProcessGpu.
+
+## Returns {#returns}
+
+Any data that you wish to provide to [`onUpdate`](onupdate.md) should be returned. It will be
+provided to that method as `processCpuResult.modulename`
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'mycamerapipelinemodule',
+ onProcessCpu: ({ frameStartResult, processGpuResult }) => {
+ const GLctx = frameStartResult.GLctx
+ const { cameraTexture } = frameStartResult
+ const { camerapixelarray, mycamerapipelinemodule } = processGpuResult
+
+ // Do something interesting with mycamerapipelinemodule.gpuDataA and mycamerapipelinemodule.gpuDataB
+ ...
+
+ // These fields will be provided to onUpdate
+ return {cpuDataA, cpuDataB}
+ },
+})
+```
diff --git a/docs/engine/api/camerapipelinemodule/onprocessgpu.md b/docs/engine/api/camerapipelinemodule/onprocessgpu.md
new file mode 100644
index 0000000..a722aa8
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/onprocessgpu.md
@@ -0,0 +1,56 @@
+# onProcessGpu()
+
+`onProcessGpu: ({ framework, frameStartResult })`
+
+## Description {#description}
+
+`onProcessGpu()` is called to start GPU processing.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+framework | { dispatchEvent(eventName, detail) } : Emits a named event with the supplied detail.
+frameStartResult | { cameraTexture, computeTexture, GLctx, computeCtx, textureWidth, textureHeight, orientation, videoTime, repeatFrame }
+
+The `frameStartResult` parameter has the following properties:
+
+Property | Description
+-------- | -----------
+cameraTexture | The drawing canvas's [`WebGLTexture`](https://developer.mozilla.org/en-US/docs/Web/API/WebGLTexture) containing camera feed data.
+computeTexture | The compute canvas's [`WebGLTexture`](https://developer.mozilla.org/en-US/docs/Web/API/WebGLTexture) containing camera feed data.
+GLctx | The drawing canvas's `WebGLRenderingContext` or `WebGL2RenderingContext`.
+computeCtx | The compute canvas's `WebGLRenderingContext` or `WebGL2RenderingContext`.
+textureWidth | The width (in pixels) of the camera feed texture.
+textureHeight | The height (in pixels) of the camera feed texture.
+orientation | The rotation of the UI from portrait, in degrees (-90, 0, 90, 180).
+videoTime | The timestamp of this video frame.
+repeatFrame | True if the camera feed has not updated since the last call.
+
+## Returns {#returns}
+
+Any data that you wish to provide to [`onProcessCpu`](onprocesscpu.md) and [`onUpdate`](onupdate.md) should be
+returned. It will be provided to those methods as `processGpuResult.modulename`
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'mycamerapipelinemodule',
+ onProcessGpu: ({frameStartResult}) => {
+ const {cameraTexture, GLctx, textureWidth, textureHeight} = frameStartResult
+
+ if(!cameraTexture.name){
+ console.error("[index] Camera texture does not have a name")
+ }
+
+ const restoreParams = XR8.GlTextureRenderer.getGLctxParameters(GLctx, [GLctx.TEXTURE0])
+ // Do relevant GPU processing here
+ ...
+ XR8.GlTextureRenderer.setGLctxParameters(GLctx, restoreParams)
+
+ // These fields will be provided to onProcessCpu and onUpdate
+ return {gpuDataA, gpuDataB}
+ },
+})
+```
diff --git a/docs/engine/api/camerapipelinemodule/onremove.md b/docs/engine/api/camerapipelinemodule/onremove.md
new file mode 100644
index 0000000..7436ac3
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/onremove.md
@@ -0,0 +1,13 @@
+# onRemove()
+
+`onRemove: ({framework})`
+
+## Description {#description}
+
+`onRemove` is called when a module is removed from the pipeline.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+framework | The framework bindings for this module for dispatching events.
diff --git a/docs/engine/api/camerapipelinemodule/onrender.md b/docs/engine/api/camerapipelinemodule/onrender.md
new file mode 100644
index 0000000..0919193
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/onrender.md
@@ -0,0 +1,23 @@
+# onRender()
+
+`onRender: ()`
+
+## Description {#description}
+
+`onRender()` is called after [`onUpdate`](onupdate.md). This is the time for the rendering engine to issue any WebGL drawing commands. If an application is providing its own run loop and is relying on [`XR8.runPreRender()`](/docs/engine/api/xr8/runprerender) and [`XR8.runPostRender()`](/docs/engine/api/xr8/runprerender), this method is not called and all rendering must be coordinated by the external run loop.
+
+## Parameters {#parameters}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'mycamerapipelinemodule',
+ onRender: () => {
+ // This is already done by XR8.Threejs.pipelineModule() but is provided here as an illustration.
+ XR8.Threejs.xrScene().renderer.render()
+ },
+})
+```
diff --git a/docs/engine/api/camerapipelinemodule/onresume.md b/docs/engine/api/camerapipelinemodule/onresume.md
new file mode 100644
index 0000000..779893c
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/onresume.md
@@ -0,0 +1,22 @@
+# onResume()
+
+`onResume: ()`
+
+## Description {#description}
+
+`onResume()` is called when [`XR8.resume()`](/docs/engine/api/xr8/resume) is called.
+
+## Parameters {#parameters}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'mycamerapipelinemodule',
+ onResume: () => {
+ console.log('resuming application')
+ },
+})
+```
diff --git a/docs/engine/api/camerapipelinemodule/onstart.md b/docs/engine/api/camerapipelinemodule/onstart.md
new file mode 100644
index 0000000..1feea78
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/onstart.md
@@ -0,0 +1,45 @@
+# onStart()
+
+`onStart: ({ canvas, GLctx, computeCtx, isWebgl2, orientation, videoWidth, videoHeight, canvasWidth, canvasHeight, config })`
+
+## Description {#description}
+
+`onStart()` is called when XR starts.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+canvas | The canvas that backs GPU processing and user display.
+GLctx | The drawing canvas's `WebGLRenderingContext` or `WebGL2RenderingContext`.
+computeCtx | The compute canvas's `WebGLRenderingContext` or `WebGL2RenderingContext`.
+isWebgl2 | True if `GLctx` is a `WebGL2RenderingContext`.
+orientation | The rotation of the UI from portrait, in degrees (-90, 0, 90, 180).
+videoWidth | The width of the camera feed, in pixels.
+videoHeight | The height of the camera feed, in pixels.
+canvasWidth | The width of the `GLctx` canvas, in pixels.
+canvasHeight | The height of the `GLctx` canvas, in pixels.
+config | The configuration parameters that were passed to [`XR8.run()`](/docs/engine/api/xr8/run).
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'mycamerapipelinemodule',
+ onStart: ({canvasWidth, canvasHeight}) => {
+ // Get the three.js scene. This was created by XR8.Threejs.pipelineModule().onStart(). The
+ // reason we can access it here now is because 'mycamerapipelinemodule' was installed after
+ // XR8.Threejs.pipelineModule().
+ const {scene, camera} = XR8.Threejs.xrScene()
+
+ // Add some objects to the scene and set the starting camera position.
+ myInitXrScene({scene, camera})
+
+ // Sync the xr controller's 6DoF position and camera paremeters with our scene.
+ XR8.XrController.updateCameraProjectionMatrix({
+ origin: camera.position,
+ facing: camera.quaternion,
+ })
+ },
+})
+```
diff --git a/docs/engine/api/camerapipelinemodule/onupdate.md b/docs/engine/api/camerapipelinemodule/onupdate.md
new file mode 100644
index 0000000..c52882f
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/onupdate.md
@@ -0,0 +1,32 @@
+# onUpdate()
+
+`onUpdate: ({ framework, frameStartResult, processGpuResult, processCpuResult })`
+
+## Description {#description}
+
+`onUpdate()` is called to update the scene before render. Called with `{ framework, frameStartResult, processGpuResult, processCpuResult }`. Data returned by modules in [`onProcessGpu`](onprocessgpu.md) and [`onProcessCpu`](onprocesscpu.md) will be present as `processGpu.modulename` and `processCpu.modulename` where the name is given by module.name = "modulename".
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+framework | The framework bindings for this module for dispatching events.
+frameStartResult | The data that was provided at the beginning of a frame.
+processGpuResult | Data returned by all installed modules during [`onProcessGpu`](onprocessgpu.md).
+processCpuResult | Data returned by all installed modules during [`onProcessCpu`](onprocesscpu.md).
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'mycamerapipelinemodule',
+ onUpdate: ({ frameStartResult, processGpuResult, processCpuResult }) => {
+ if (!processCpuResult.reality) {
+ return
+ }
+ const {rotation, position, intrinsics} = processCpuResult.reality
+ const {cpuDataA, cpuDataB} = processCpuResult.mycamerapipelinemodule
+ // ...
+ },
+})
+```
diff --git a/docs/engine/api/camerapipelinemodule/onvideosizechange.md b/docs/engine/api/camerapipelinemodule/onvideosizechange.md
new file mode 100644
index 0000000..fd6cba5
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/onvideosizechange.md
@@ -0,0 +1,30 @@
+# onVideoSizeChange()
+
+`onVideoSizeChange: ({ GLctx, computeCtx, videoWidth, videoHeight, canvasWidth, canvasHeight, orientation })`
+
+## Description {#description}
+
+`onVideoSizeChange()` is called when the canvas changes size. Called with dimensions of video and canvas as well as device orientation.
+
+## Parameters {#parameters}
+
+Parameters | Description
+---------- | -----------
+GLctx | The drawing canvas's `WebGLRenderingContext` or `WebGL2RenderingContext`.
+computeCtx | The compute canvas's `WebGLRenderingContext` or `WebGL2RenderingContext`.
+videoWidth | The width of the camera feed, in pixels.
+videoHeight | The height of the camera feed, in pixels.
+canvasWidth | The width of the `GLctx` canvas, in pixels.
+canvasHeight | The height of the `GLctx` canvas, in pixels.
+orientation | The rotation of the UI from portrait, in degrees (-90, 0, 90, 180).
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'mycamerapipelinemodule',
+ onVideoSizeChange: ({ GLctx, videoWidth, videoHeight, canvasWidth, canvasHeight }) => {
+ myHandleResize({ GLctx, videoWidth, videoHeight, canvasWidth, canvasHeight })
+ },
+})
+```
diff --git a/docs/engine/api/camerapipelinemodule/requiredpermissions.md b/docs/engine/api/camerapipelinemodule/requiredpermissions.md
new file mode 100644
index 0000000..8dc1697
--- /dev/null
+++ b/docs/engine/api/camerapipelinemodule/requiredpermissions.md
@@ -0,0 +1,22 @@
+# requiredPermissions()
+
+`requiredPermissions: ([permissions])`
+
+## Description {#description}
+
+`requiredPermissions` is used to define the list of permissions required by a pipeline module.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+permissions | An array of [`XR8.XrPermissions.permissions()`](/docs/engine/api/xrpermissions/permissions) required by the pipeline module.
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'request-gyro',
+ requiredPermissions: () => ([XR8.XrPermissions.permissions().DEVICE_ORIENTATION]),
+})
+```
diff --git a/docs/engine/api/camerapixelarray/_category_.json b/docs/engine/api/camerapixelarray/_category_.json
new file mode 100644
index 0000000..7f7498d
--- /dev/null
+++ b/docs/engine/api/camerapixelarray/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "CameraPixelArray",
+ "position": 9
+}
diff --git a/docs/engine/api/camerapixelarray/camerapixelarray.md b/docs/engine/api/camerapixelarray/camerapixelarray.md
new file mode 100644
index 0000000..9bb4461
--- /dev/null
+++ b/docs/engine/api/camerapixelarray/camerapixelarray.md
@@ -0,0 +1,11 @@
+# XR8.CameraPixelArray
+
+## Description {#description}
+
+Provides a camera pipeline module that gives access to camera data as a grayscale or color uint8 array.
+
+## Functions {#functions}
+
+Function | Description
+-------- | -----------
+[pipelineModule](pipelinemodule.md) | A pipeline module that provides the camera texture as an array of RGBA or grayscale pixel values that can be used for CPU image processing.
diff --git a/docs/engine/api/camerapixelarray/pipelinemodule.md b/docs/engine/api/camerapixelarray/pipelinemodule.md
new file mode 100644
index 0000000..0bc25a6
--- /dev/null
+++ b/docs/engine/api/camerapixelarray/pipelinemodule.md
@@ -0,0 +1,52 @@
+---
+sidebar_label: pipelineModule()
+---
+# XR8.CameraPixelArray.pipelineModule()
+
+`XR8.CameraPixelArray.pipelineModule({ luminance, maxDimension, width, height })`
+
+## Description {#description}
+
+A pipeline module that provides the camera texture as an array of RGBA or grayscale pixel values
+that can be used for CPU image processing.
+
+## Parameters {#parameters}
+
+Parameter | Default | Description
+--------- | ------- | -----------
+luminance [Optional] | `false` | If true, output grayscale instead of RGBA
+maxDimension: [Optional] | | The size in pixels of the longest dimension of the output image. The shorter dimension will be scaled relative to the size of the camera input so that the image is resized without cropping or distortion.
+width [Optional] | The width of the camera feed texture. | Width of the output image. Ignored if `maxDimension` is specified.
+height [Optional] | The height of the camera feed texture. | Height of the output image. Ignored if `maxDimension` is specified.
+
+## Returns {#returns}
+
+Return value is an object made available to [`onProcessCpu`](/docs/engine/api/camerapipelinemodule/onprocesscpu) and
+[`onUpdate`](/docs/engine/api/camerapipelinemodule/onupdate) as:
+
+processGpuResult.camerapixelarray: {rows, cols, rowBytes, pixels}
+
+Property | Description
+--------- | -----------
+rows | Height in pixels of the output image.
+cols | Width in pixels of the output image.
+rowBytes | Number of bytes per row of the output image.
+pixels | A `UInt8Array` of pixel data.
+srcTex | A texture containing the source image for the returned pixels.
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule(XR8.CameraPixelArray.pipelineModule({ luminance: true }))
+XR8.addCameraPipelineModule({
+ name: 'mycamerapipelinemodule',
+ onProcessCpu: ({ processGpuResult }) => {
+ const { camerapixelarray } = processGpuResult
+ if (!camerapixelarray || !camerapixelarray.pixels) {
+ return
+ }
+ const { rows, cols, rowBytes, pixels } = camerapixelarray
+
+ ...
+ },
+```
diff --git a/docs/engine/api/canvasscreenshot/_category_.json b/docs/engine/api/canvasscreenshot/_category_.json
new file mode 100644
index 0000000..b752a9d
--- /dev/null
+++ b/docs/engine/api/canvasscreenshot/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "CanvasScreenshot",
+ "position": 10
+}
diff --git a/docs/engine/api/canvasscreenshot/canvasscreenshot.md b/docs/engine/api/canvasscreenshot/canvasscreenshot.md
new file mode 100644
index 0000000..66770ee
--- /dev/null
+++ b/docs/engine/api/canvasscreenshot/canvasscreenshot.md
@@ -0,0 +1,14 @@
+# XR8.CanvasScreenshot
+
+## Description {#description}
+
+Provides a camera pipeline module that can generate screenshots of the current scene.
+
+## Functions {#functions}
+
+Function | Description
+-------- | -----------
+[configure](configure.md) | Configures the expected result of canvas screenshots.
+[pipelineModule](pipelinemodule.md) | Creates a camera pipeline module that, when installed, receives callbacks on when the camera has started and when the canvas size has changed.
+[setForegroundCanvas](setforegroundcanvas.md) | Sets a foreground canvas to be displayed on top of the camera canvas. This must be the same dimensions as the camera canvas.
+[takeScreenshot](takescreenshot.md) | Returns a Promise that when resolved, provides a buffer containing the JPEG compressed image. When rejected, an error message is provided.
diff --git a/docs/engine/api/canvasscreenshot/configure.md b/docs/engine/api/canvasscreenshot/configure.md
new file mode 100644
index 0000000..843dcd2
--- /dev/null
+++ b/docs/engine/api/canvasscreenshot/configure.md
@@ -0,0 +1,27 @@
+---
+sidebar_label: configure()
+---
+# XR8.CanvasScreenshot.configure()
+
+`XR8.CanvasScreenshot.configure({ maxDimension, jpgCompression })`
+
+## Description {#description}
+
+Configures the expected result of canvas screenshots.
+
+## Parameters {#parameters}
+
+Parameter | Default | Description
+--------- | ------- | -----------
+maxDimension: [Optional] | `1280` | The value of the largest expected dimension.
+jpgCompression: [Optional] | `75` | 1-100 value representing the JPEG compression quality. 100 is little to no loss, and 1 is a very low quality image.
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.CanvasScreenshot.configure({ maxDimension: 640, jpgCompression: 50 })
+```
diff --git a/docs/engine/api/canvasscreenshot/pipelinemodule.md b/docs/engine/api/canvasscreenshot/pipelinemodule.md
new file mode 100644
index 0000000..4512dac
--- /dev/null
+++ b/docs/engine/api/canvasscreenshot/pipelinemodule.md
@@ -0,0 +1,24 @@
+---
+sidebar_label: pipelineModule()
+---
+# XR8.CanvasScreenshot.pipelineModule()
+
+`XR8.CanvasScreenshot.pipelineModule()`
+
+## Description {#description}
+
+Creates a camera pipeline module that, when installed, receives callbacks on when the camera has started and when the canvas size has changed.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+A CanvasScreenshot pipeline module that can be added via [XR8.addCameraPipelineModule()](/docs/engine/api/xr8/addcamerapipelinemodule).
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule(XR8.CanvasScreenshot.pipelineModule())
+```
diff --git a/docs/engine/api/canvasscreenshot/setforegroundcanvas.md b/docs/engine/api/canvasscreenshot/setforegroundcanvas.md
new file mode 100644
index 0000000..5c81755
--- /dev/null
+++ b/docs/engine/api/canvasscreenshot/setforegroundcanvas.md
@@ -0,0 +1,29 @@
+---
+sidebar_label: setForegroundCanvas()
+---
+# XR8.CanvasScreenshot.setForegroundCanvas()
+
+`XR8.CanvasScreenshot.setForegroundCanvas(canvas)`
+
+## Description {#description}
+
+Sets a foreground canvas to be displayed on top of the camera canvas. This must be the same dimensions as the camera canvas.
+
+Only required if you use separate canvases for camera feed vs virtual objects.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+canvas | The canvas to use as a foreground in the screenshot
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+const myOtherCanvas = document.getElementById('canvas2')
+XR8.CanvasScreenshot.setForegroundCanvas(myOtherCanvas)
+```
diff --git a/docs/engine/api/canvasscreenshot/takescreenshot.md b/docs/engine/api/canvasscreenshot/takescreenshot.md
new file mode 100644
index 0000000..5ecbdf3
--- /dev/null
+++ b/docs/engine/api/canvasscreenshot/takescreenshot.md
@@ -0,0 +1,37 @@
+---
+sidebar_label: takeScreenshot()
+---
+# XR8.CanvasScreenshot.takeScreenshot()
+
+`XR8.CanvasScreenshot.takeScreenshot({ onProcessFrame })`
+
+## Description {#description}
+
+Returns a Promise that when resolved, provides a buffer containing the JPEG compressed image. When rejected, an error message is provided.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+onProcessFrame [Optional] | Callback where you can implement additional drawing to the screenshot 2d canvas.
+
+## Returns {#returns}
+
+A Promise.
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule(XR8.canvasScreenshot().cameraPipelineModule())
+XR8.canvasScreenshot().takeScreenshot().then(
+ data => {
+ // myImage is an HTML element
+ const image = document.getElementById('myImage')
+ image.src = 'data:image/jpeg;base64,' + data
+ },
+ error => {
+ console.log(error)
+ // Handle screenshot error.
+ })
+})
+```
diff --git a/docs/engine/api/coachingoverlay/_category_.json b/docs/engine/api/coachingoverlay/_category_.json
new file mode 100644
index 0000000..25f3421
--- /dev/null
+++ b/docs/engine/api/coachingoverlay/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "CoachingOverlay",
+ "position": 11
+}
diff --git a/docs/engine/api/coachingoverlay/coachingoverlay.md b/docs/engine/api/coachingoverlay/coachingoverlay.md
new file mode 100644
index 0000000..7a8d072
--- /dev/null
+++ b/docs/engine/api/coachingoverlay/coachingoverlay.md
@@ -0,0 +1,12 @@
+# CoachingOverlay
+
+## Description {#description}
+
+Provides a module that generates a Coaching Overlay for your Absolute Scale Web AR experience.
+
+## Functions {#functions}
+
+Function | Description
+-------- | -----------
+[configure](configure.md) | Configures Coaching Overlay settings.
+[pipelineModule](pipelinemodule.md) | Creates a camera pipeline module that, when installed, adds coaching overlay functionality to your project.
diff --git a/docs/engine/api/coachingoverlay/configure.md b/docs/engine/api/coachingoverlay/configure.md
new file mode 100644
index 0000000..0fef99b
--- /dev/null
+++ b/docs/engine/api/coachingoverlay/configure.md
@@ -0,0 +1,32 @@
+---
+sidebar_label: configure()
+---
+# CoachingOverlay.configure()
+
+`CoachingOverlay.configure({ animationColor, promptColor, promptText, disablePrompt })`
+
+## Description {#description}
+
+Configures behavior and look of the Coaching Overlay.
+
+## Parameters (All Optional) {#parameters-all-optional}
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+animationColor | `String` | `'white'` | Color of the Coaching Overlay animation. This parameter accepts valid CSS color arguments.
+promptColor | `String` | `'white'` | Color of all the Coaching Overlay text. This parameter accepts valid CSS color arguments.
+promptText | `String` | `'Move device forward and back'` | Sets the text string for the animation explainer text that informs users of the motion they need to make to generate scale.
+disablePrompt | `Boolean` | `false` | Set to true to hide default Coaching Overlay in order to use Coaching Overlay events for a custom overlay.
+
+## Returns {#returns}
+
+None
+
+## Example - Code {#example---code}
+
+```javascript
+CoachingOverlay.configure({
+ animationColor: '#E86FFF',
+ promptText: 'To generate scale push your phone forward and then pull back',
+})
+```
diff --git a/docs/engine/api/coachingoverlay/pipelinemodule.md b/docs/engine/api/coachingoverlay/pipelinemodule.md
new file mode 100644
index 0000000..95f03f1
--- /dev/null
+++ b/docs/engine/api/coachingoverlay/pipelinemodule.md
@@ -0,0 +1,40 @@
+---
+sidebar_label: pipelineModule()
+---
+# CoachingOverlay.pipelineModule()
+
+`CoachingOverlay.pipelineModule()`
+
+## Description {#description}
+
+Creates a pipeline module that, when installed, adds Coaching Overlay functionality to your absolute scale project.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+A pipeline module that adds a Coaching Overlay to your project.
+
+## Non-AFrame Example {#non-aframe-example}
+
+```javascript
+// Configured here
+CoachingOverlay.configure({
+ animationColor: '#E86FFF',
+ promptText: 'To generate scale push your phone forward and then pull back',
+})
+XR8.addCameraPipelineModules([
+ XR8.GlTextureRenderer.pipelineModule(),
+ XR8.Threejs.pipelineModule(),
+ XR8.XrController.pipelineModule(),
+ XRExtras.FullWindowCanvas.pipelineModule(),
+ XRExtras.Loading.pipelineModule(),
+ XRExtras.RuntimeError.pipelineModule(),
+ LandingPage.pipelineModule(),
+ // Added here
+ CoachingOverlay.pipelineModule(),
+ ...
+])
+```
diff --git a/docs/engine/api/facecontroller/_category_.json b/docs/engine/api/facecontroller/_category_.json
new file mode 100644
index 0000000..ecdc720
--- /dev/null
+++ b/docs/engine/api/facecontroller/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "FaceController",
+ "position": 11
+}
diff --git a/docs/engine/api/facecontroller/attachmentpoints.md b/docs/engine/api/facecontroller/attachmentpoints.md
new file mode 100644
index 0000000..39d0ff8
--- /dev/null
+++ b/docs/engine/api/facecontroller/attachmentpoints.md
@@ -0,0 +1,55 @@
+---
+sidebar_label: AttachmentPoints
+---
+# XR8.FaceController.AttachmentPoints
+
+Enumeration
+
+## Description {#description}
+
+Points of the face you can anchor content to.
+
+## Properties {#properties}
+
+Property | Value | Description
+-------- | ----- | -----------
+FOREHEAD | `'forehead'` | Forehead
+RIGHT_EYEBROW_INNER | `'rightEyebrowInner'` | Inner side of right eyebrow
+RIGHT_EYEBROW_MIDDLE | `'rightEyebrowMiddle'` | Middle of right eyebrow
+RIGHT_EYEBROW_OUTER | `'rightEyebrowOuter'` | Outer side of right eyebrow
+LEFT_EYEBROW_INNER | `'leftEyebrowInner'` | Inner side of left eyebrow
+LEFT_EYEBROW_MIDDLE | `'leftEyebrowMiddle'` | Middle of left eyebrow
+LEFT_EYEBROW_OUTER | `'leftEyebrowOuter'` | Outer side of left eyebrow
+LEFT_EAR | `'leftEar'` | Left ear
+RIGHT_EAR | `'rightEar'` | Right ear
+LEFT_CHEEK | `'leftCheek'` | Left cheek
+RIGHT_CHEEK | `'rightCheek'` | Right cheek
+NOSE_BRIDGE | `'noseBridge'` | Bridge of the nose
+NOSE_TIP | `'noseTip'` | Tip of the nose
+LEFT_EYE | `'leftEye'` | Left eye
+RIGHT_EYE | `'rightEye'` | Right eye
+LEFT_EYE_OUTER_CORNER | `'leftEyeOuterCorner'` | Outer corner of left eye
+RIGHT_EYE_OUTER_CORNER | `'rightEyeOuterCorner'` | Outer corner of right eye
+LEFT_IRIS | `'leftIris'` | Iris of the left eye
+RIGHT_IRIS | `'rightIris'` | Iris of the right eye
+LEFT_UPPER_EYELID | `'leftUpperEyelid'` | Upper eyelid of the left eye
+RIGHT_UPPER_EYELID | `'rightUpperEyelid'` | Upper eyelid of the right eye
+LEFT_LOWER_EYELID | `'leftLowerEyelid'` | Lower eyelid of the left eye
+RIGHT_LOWER_EYELID | `'rightLowerEyelid'` | Lower eyelid of the right eye
+UPPER_LIP | `'upperLip'` | Upper lip
+LOWER_LIP | `'lowerLip'` | Lower lip
+MOUTH | `'mouth'` | Mouth
+MOUTH_RIGHT_CORNER | `'mouthRightCorner'` | Right corner of mouth
+MOUTH_LEFT_CORNER | `'mouthLeftCorner'` | Left corner of mouth
+CHIN | `'chin'` | Chin
+
+When `enableEars:true` ear detection runs simultaneously with Face Effects and returns the following ear attachment points:
+
+Property | Value | Description
+-------- | ----- | -----------
+EAR_LEFT_HELIX | `'leftHelix'` | Upper helix of the left ear
+EAR_LEFT_CANAL | `'leftCanal'` | Ear canal of the left ear
+EAR_LEFT_LOBE | `'leftLobe'` | Left earlobe
+EAR_RIGHT_HELIX | `'rightHelix'` | Upper helix of the right ear
+EAR_RIGHT_CANAL | `'rightCanal'` | Ear canal of the right ear
+EAR_RIGHT_LOBE | `'rightLobe'` | Right earlobe
diff --git a/docs/engine/api/facecontroller/configure.md b/docs/engine/api/facecontroller/configure.md
new file mode 100644
index 0000000..0d4adc0
--- /dev/null
+++ b/docs/engine/api/facecontroller/configure.md
@@ -0,0 +1,50 @@
+---
+sidebar_position: 1
+sidebar_label: configure()
+---
+# XR8.FaceController.configure()
+
+`XR8.FaceController.configure({ nearClip, farClip, meshGeometry, coordinates })`
+
+## Description {#description}
+
+Configures what processing is performed by FaceController.
+
+## Parameters {#parameters}
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+nearClip [Optional] | `Number` | `0.01` | The distance from the camera of the near clip plane, i.e. the closest distance to the camera at which scene objects are visible.
+farClip [Optional] | `Number` | `1000` | The distance from the camera of the far clip plane, i.e. the farthest distance to the camera at which scene objects are visible.
+meshGeometry [Optional] | `Array` | `[XR8.FaceController.MeshGeometry.FACE]` | Controls which parts of the head geometry are visible. Options: `[XR8.FaceController.MeshGeometry.FACE, XR8.FaceController.MeshGeometry.EYES, XR8.FaceController.MeshGeometry.IRIS, XR8.FaceController.MeshGeometry.MOUTH]`
+maxDetections [Optional] | `Number` | `1` | The maximum number of faces to detect. The available choices are 1, 2, or 3.
+enableEars [Optional] | `Boolean` | `false` | If true, runs ear detection simultaneosly with Face Effects and returns ear attachment points.
+uvType [Optional] | `String` | `[XR8.FaceController.UvType.STANDARD]` | Specifies which uvs are returned in the facescanning and faceloading event. Options are: `[XR8.FaceController.UvType.STANDARD, XR8.FaceController.UvType.PROJECTED]`
+coordinates [Optional] | `Coordinates` | | The camera configuration.
+
+The `Coordinates` object has the following properties:
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+origin [Optional] | `{position: {x, y, z}, rotation: {w, x, y, z}}` | `{position: {x: 0, y: 0, z: 0}, rotation: {w: 1, x: 0, y: 0, z: 0}}` | The position and rotation of the camera.
+scale [Optional] | `Number` | `1` | Scale of the scene.
+axes [Optional] | `String` | `'RIGHT_HANDED'` | Can be either `'LEFT_HANDED'` or `'RIGHT_HANDED'`.
+mirroredDisplay [Optional] | `Boolean` | `False` | If true, flip left and right in the output.
+
+**IMPORTANT:** [`XR8.FaceController`](./facecontroller.md) cannot be used at the same time as [`XR8.XrController`](../xrcontroller/xrcontroller.md).
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+ XR8.FaceController.configure({
+ meshGeometry: [XR8.FaceController.MeshGeometry.FACE],
+ coordinates: {
+ mirroredDisplay: true,
+ axes: 'LEFT_HANDED',
+ },
+ })
+```
diff --git a/docs/engine/api/facecontroller/facecontroller.md b/docs/engine/api/facecontroller/facecontroller.md
new file mode 100644
index 0000000..f42e213
--- /dev/null
+++ b/docs/engine/api/facecontroller/facecontroller.md
@@ -0,0 +1,14 @@
+# XR8.FaceController
+
+## Description {#description}
+
+`FaceController` provides face detection and meshing, and interfaces for configuring tracking.
+
+## Functions {#functions}
+
+Function | Description
+-------- | -----------
+[configure](configure.md) | Configures what processing is performed by FaceController.
+[pipelineModule](pipelinemodule.md) | Creates a camera pipeline module that, when installed, receives callbacks on when the camera has started, camera proessing events, and other state changes. These are used to calculate the camera's position.
+[AttachmentPoints](attachmentpoints.md) | Points on the face you can anchor content to.
+[MeshGeometry](meshgeometry.md) | Options for defining which portions of the face have mesh triangles returned.
diff --git a/docs/engine/api/facecontroller/meshgeometry.md b/docs/engine/api/facecontroller/meshgeometry.md
new file mode 100644
index 0000000..a0d4d3f
--- /dev/null
+++ b/docs/engine/api/facecontroller/meshgeometry.md
@@ -0,0 +1,19 @@
+---
+sidebar_label: MeshGeometry
+---
+# XR8.FaceController.MeshGeometry
+
+Enumeration
+
+## Description {#description}
+
+Options for defining which portions of the face have mesh triangles returned.
+
+## Properties {#properties}
+
+Property | Value | Description
+-------- | ----- | -----------
+FACE | `'face'` | Return geometry for the face.
+MOUTH | `'mouth'` | Return geometry for the mouth.
+EYES | `'eyes'` | Return geometry for the eyes.
+IRIS | `'iris'` | Return geometry for the irises.
diff --git a/docs/engine/api/facecontroller/pipelinemodule.md b/docs/engine/api/facecontroller/pipelinemodule.md
new file mode 100644
index 0000000..5b34e6f
--- /dev/null
+++ b/docs/engine/api/facecontroller/pipelinemodule.md
@@ -0,0 +1,265 @@
+---
+sidebar_position: 2
+sidebar_label: pipelineModule()
+---
+# XR8.FaceController.pipelineModule()
+
+`XR8.FaceController.pipelineModule()`
+
+## Description {#description}
+
+Creates a camera pipeline module that, when installed, receives callbacks on when the camera has started, camera proessing events, and other state changes. These are used to calculate the camera's position.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+Return value is an object made available to [`onUpdate`](/docs/engine/api/camerapipelinemodule/onupdate) as:
+
+`processCpuResult.facecontroller: { rotation, position, intrinsics, cameraFeedTexture }`
+
+Property | Type | Description
+--------- | ---- | -----------
+rotation | `{w, x, y, z}` | The orientation (quaternion) of the camera in the scene.
+position | `{x, y, z}` | The position of the camera in the scene.
+intrinsics | `[Number]` | A 16 dimensional column-major 4x4 projection matrix that gives the scene camera the same field of view as the rendered camera feed.
+cameraFeedTexture | [`WebGLTexture`](https://developer.mozilla.org/en-US/docs/Web/API/WebGLTexture) | The texture containing camera feed data.
+
+## Dispatched Events {#dispatched-events}
+
+**faceloading**: Fires when loading begins for additional face AR resources.
+
+`faceloading.detail : {maxDetections, pointsPerDetection, indices, uvs}`
+
+Property | Type | Description
+--------- | ---- | -----------
+maxDetections | `Number` | The maximum number of faces that can be simultaneously processed.
+pointsPerDetection | `Number` | Number of vertices that will be extracted per face.
+indices | `[{a, b, c}]` | The list of indexes into the vertices array that form the triangles of the requested mesh, as specified with `meshGeometry` in [`XR8.FaceController.configure()`](configure.md).
+uvs | `[{u, v}]` | The list of uv positions into a texture map corresponding to the returned vertex points.
+
+**facescanning**: Fires when all face AR resources have been loaded and scanning has begun.
+
+`facescanning.detail : {maxDetections, pointsPerDetection, indices, uvs}`
+
+Property | Type | Description
+--------- | ---- | -----------
+maxDetections | `Number` | The maximum number of faces that can be simultaneously processed.
+pointsPerDetection | `Number` | Number of vertices that will be extracted per face.
+indices | `[{a, b, c}]` | The list of indexes into the vertices array that form the triangles of the requested mesh, as specified with `meshGeometry` in [`XR8.FaceController.configure()`](configure.md).
+uvs | `[{u, v}]` | The list of uv positions into a texture map corresponding to the returned vertex points.
+
+**facefound**: Fires when a face first found.
+
+`facefound.detail : {id, transform, vertices, normals, attachmentPoints}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+transform | `{position, rotation, scale, scaledWidth, scaledHeight, scaledDepth}` | Transform information of the located face.
+vertices | `[{x, y, z}]` | Position of face points, relative to transform.
+normals | `[{x, y, z}]` | Normal direction of vertices, relative to transform.
+attachmentPoints | `{name, position: {x,y,z}}` | See [`XR8.FaceController.AttachmentPoints`](attachmentpoints.md) for list of available attachment points. `position` is relative to the transform.
+uvsInCameraFrame | `[{u, v}]` | The list of uv positions in the camera frame corresponding to the returned vertex points.
+
+`transform` is an object with the following properties:
+
+Property | Type | Description
+--------- | ---- | -----------
+position | `{x, y, z}` | The 3d position of the located face.
+rotation | `{w, x, y, z}` | The 3d local orientation of the located face.
+scale | `Number` | A scale factor that should be applied to objects attached to this face.
+scaledWidth | `Number` | Approximate width of the head in the scene when multiplied by scale.
+scaledHeight | `Number` | Approximate height of the head in the scene when multiplied by scale.
+scaledDepth | `Number` | Approximate depth of the head in the scene when multiplied by scale.
+
+**faceupdated**: Fires when a face is subsequently found.
+
+`faceupdated.detail : {id, transform, vertices, normals, attachmentPoints}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+transform | `{position, rotation, scale, scaledWidth, scaledHeight, scaledDepth}` | Transform information of the located face.
+vertices | `[{x, y, z}]` | Position of face points, relative to transform.
+normals | `[{x, y, z}]` | Normal direction of vertices, relative to transform.
+attachmentPoints | `{name, position: {x,y,z}}` | See [`XR8.FaceController.AttachmentPoints`](attachmentpoints.md) for list of available attachment points. `position` is relative to the transform.
+uvsInCameraFrame | `[{u, v}]` | The list of uv positions in the camera frame corresponding to the returned vertex points.
+
+`transform` is an object with the following properties:
+
+Property | Type | Description
+--------- | ---- | -----------
+position | `{x, y, z}` | The 3d position of the located face.
+rotation | `{w, x, y, z}` | The 3d local orientation of the located face.
+scale | `Number` | A scale factor that should be applied to objects attached to this face.
+scaledWidth | `Number` | Approximate width of the head in the scene when multiplied by scale.
+scaledHeight | `Number` | Approximate height of the head in the scene when multiplied by scale.
+scaledDepth | `Number` | Approximate depth of the head in the scene when multiplied by scale.
+
+**facelost**: Fires when a face is no longer being tracked.
+
+`facelost.detail : { id }`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+
+**mouthopened**: Fires when a tracked face's mouth opens.
+
+`mouthopened.detail : { id }`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+
+**mouthclosed**: Fires when a tracked face's mouth closes.
+
+`mouthclosed.detail : { id }`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+
+**lefteyeopened**: Fires when a tracked face's left eye opens.
+
+`lefteyeopened.detail : { id }`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+
+**lefteyeclosed**: Fires when a tracked face's left eye closes.
+
+`lefteyeclosed.detail : { id }`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+
+**righteyeopened**: Fires when a tracked face's right eye opens.
+
+`righteyeopened.detail : { id }`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+
+**righteyeclosed**: Fires when a tracked face's right eye closes.
+
+`righteyeclosed.detail : { id }`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+
+**lefteyebrowraised**: Fires when a tracked face's left eyebrow is raised from its initial position when the face was found.
+
+`lefteyebrowraised.detail : { id }`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+
+**lefteyebrowlowered**: Fires when a tracked face's left eyebrow is lowered to its initial position when the face was found.
+
+`lefteyebrowlowered.detail : { id }`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+
+**righteyebrowraised**: Fires when a tracked face's right eyebrow is raised from its position when the face was found.
+
+`righteyebrowraised.detail : { id }`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+
+**righteyebrowlowered**: Fires when a tracked face's right eyebrow is lowered to its initial position when the face was found.
+
+`righteyebrowlowered.detail : { id }`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+
+**lefteyewinked**: Fires when a tracked face's left eye closes and opens within 750ms while the right eye remains open.
+
+`lefteyewinked.detail : { id }`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+
+**righteyewinked**: Fires when a tracked face's right eye closes and opens within 750ms while the left eye remains open.
+
+`righteyewinked.detail : { id }`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+
+**blinked**: Fires when a tracked face's eyes blink.
+
+`blinked.detail : { id }`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+
+**interpupillarydistance**: Fires when a tracked face's distance in millimeters between the centers of each pupil is first detected.
+
+`interpupillarydistance.detail : {id, interpupillaryDistance}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face.
+interpupillaryDistance | `Number` | Approximate distance in millimeters between the centers of each pupil.
+
+When `enableEars:true` ear detection runs simultaneously with Face Effects and dispatches the following events:
+
+**earfound**: Fires when an ear is first found.
+
+`earfound.detail : {id, ear}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face which the ear is attached to.
+ear | `String` | Can be either `left` or `right`.
+
+**earpointfound**: Fires when an ear attachmentPoint is first found.
+
+`earpointfound.detail : {id, point}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face which the ear attachmentPoints is attached to.
+point | `String` | Can be either `leftHelix`, `leftCanal`, `leftLobe`, `rightHelix`, `rightCanal`, or `rightLobe`.
+
+**earlost**: Fires when an ear is no longer being tracked.
+
+`earlost.detail : {id, ear}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face which the ear is attached to.
+ear | `String` | Can be either `left` or `right`.
+
+**earpointlost**: Fires when an ear attachmentPoint is no longer being tracked.
+
+`earpointlost.detail : {id, point}`
+
+Property | Type | Description
+--------- | ---- | -----------
+id | `Number` | A numerical id of the located face which the ear attachmentPoints is attached to.
+point | `String` | Can be either `leftHelix`, `leftCanal`, `leftLobe`, `rightHelix`, `rightCanal`, or `rightLobe`.
+
+## Example - adding pipeline module {#example---adding-pipeline-module}
+
+```javascript
+XR8.addCameraPipelineModule(XR8.FaceController.pipelineModule())
+```
diff --git a/docs/engine/api/gltexturerenderer/_category_.json b/docs/engine/api/gltexturerenderer/_category_.json
new file mode 100644
index 0000000..747b5cf
--- /dev/null
+++ b/docs/engine/api/gltexturerenderer/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "GlTextureRenderer",
+ "position": 12
+}
diff --git a/docs/engine/api/gltexturerenderer/configure.md b/docs/engine/api/gltexturerenderer/configure.md
new file mode 100644
index 0000000..82bd799
--- /dev/null
+++ b/docs/engine/api/gltexturerenderer/configure.md
@@ -0,0 +1,44 @@
+---
+sidebar_label: configure()
+---
+# XR8.GlTextureRenderer.configure()
+
+`XR8.GlTextureRenderer.configure({ vertexSource, fragmentSource, toTexture, flipY, mirroredDisplay })`
+
+## Description {#description}
+
+Configures the pipeline module that draws the camera feed to the canvas.
+
+## Parameters {#parameters}
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+vertexSource [Optional] | `String` | A no-op vertex shader | The vertex shader source to use for rendering.
+fragmentSource [Optional] | `String` | A no-op fragment shader | The fragment shader source to use for rendering.
+toTexture [Optional] | [`WebGlTexture`](https://developer.mozilla.org/en-US/docs/Web/API/WebGLTexture) | The canvas | A texture to draw to. If no texture is provided, drawing will be to the canvas.
+flipY [Optional] | `Boolean` | `false` | If true, flip the rendering upside-down.
+mirroredDisplay [Optional] | `Boolean` | `false` | If true, flip the rendering left-right.
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+const purpleShader =
+ // Purple.
+ ` precision mediump float;
+ varying vec2 texUv;
+ uniform sampler2D sampler;
+ void main() {
+ vec4 c = texture2D(sampler, texUv);
+ float y = dot(c.rgb, vec3(0.299, 0.587, 0.114));
+ vec3 p = vec3(.463, .067, .712);
+ vec3 p1 = vec3(1.0, 1.0, 1.0) - p;
+ vec3 rgb = y < .25 ? (y * 4.0) * p : ((y - .25) * 1.333) * p1 + p;
+ gl_FragColor = vec4(rgb, c.a);
+ }`
+
+XR8.GlTextureRenderer.configure({fragmentSource: purpleShader})
+```
diff --git a/docs/engine/api/gltexturerenderer/create.md b/docs/engine/api/gltexturerenderer/create.md
new file mode 100644
index 0000000..0b1996e
--- /dev/null
+++ b/docs/engine/api/gltexturerenderer/create.md
@@ -0,0 +1,47 @@
+---
+sidebar_label: create()
+---
+# XR8.GlTextureRenderer.create()
+
+`XR8.GlTextureRenderer.create({ GLctx, vertexSource, fragmentSource, toTexture, flipY, mirroredDisplay })`
+
+## Description {#description}
+
+Creates an object for rendering from a texture to a canvas or another texture.
+
+## Parameters {#parameters}
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+GLctx | `WebGlRenderingContext` or `WebGl2RenderingContext` | | The `WebGlRenderingContext` (or `WebGl2RenderingContext`) to use for rendering. If no `toTexture` is specified, content will be drawn to this context's canvas.
+vertexSource [Optional] | `String` | A no-op vertex shader | The vertex shader source to use for rendering.
+fragmentSource [Optional] | `String` | A no-op fragment shader | The fragment shader source to use for rendering.
+toTexture [Optional] | [`WebGlTexture`](https://developer.mozilla.org/en-US/docs/Web/API/WebGLTexture) | `GLctx`'s canvas | A texture to draw to. If no texture is provided, drawing will be to the canvas.
+flipY [Optional] | `Boolean` | `false` | If true, flip the rendering upside-down.
+mirroredDisplay [Optional] | `Boolean` | `false` | If true, flip the rendering left-right.
+
+## Returns {#returns}
+
+Returns an object: `{render, destroy, shader}`
+
+Property | Description
+--------- | -----------
+render({ renderTexture, viewport }) | A function that renders the renderTexture to the specified viewport. Depending on if `toTexture` is supplied, the viewport is either on the canvas that created `GLctx`, or it's relative to the render texture provided.
+destroy | Clean up resources associated with this `GlTextureRenderer`.
+shader | Gets a handle to the shader being used to draw the texture.
+
+The `render` function has the following parameters:
+
+Parameter | Description
+--------- | -----------
+renderTexture | A [`WebGlTexture`](https://developer.mozilla.org/en-US/docs/Web/API/WebGLTexture) (source) to draw.
+viewport | The region of the canvas or output texture to draw to; this can be constructed manually, or using [`XR8.GlTextureRenderer.fillTextureViewport()`](filltextureviewport.md).
+
+The viewport is specified by `{ width, height, offsetX, offsetY }` :
+
+Property | Type | Description
+-------- | ---- | -----------
+width | `Number` | The width (in pixels) to draw.
+height | `Number` | The height (in pixels) to draw.
+offsetX [Optional] | `Number` | The minimum x-coordinate (in pixels) to draw to.
+offsetY [Optional] | `Number` | The minimum y-coordinate (in pixels) to draw to.
diff --git a/docs/engine/api/gltexturerenderer/filltextureviewport.md b/docs/engine/api/gltexturerenderer/filltextureviewport.md
new file mode 100644
index 0000000..bec0082
--- /dev/null
+++ b/docs/engine/api/gltexturerenderer/filltextureviewport.md
@@ -0,0 +1,32 @@
+---
+sidebar_label: fillTextureViewport()
+---
+# XR8.GlTextureRenderer.fillTextureViewport()
+
+`XR8.GlTextureRenderer.fillTextureViewport(srcWidth, srcHeight, destWidth, destHeight)`
+
+## Description {#description}
+
+Convenience method for getting a Viewport struct that fills a texture or canvas from a source
+without distortion. This is passed to the render method of the object created by
+[`XR8.GlTextureRenderer.create()`](create.md)
+
+## Parameters {#parameters}
+
+Parameter | Type | Description
+--------- | ---- | -----------
+srcWidth | `Number` | The width of the texture you are rendering.
+srcHeight | `Number` | The height of the texture you are rendering.
+destWidth | `Number` | The width of the render target.
+destHeight | `Number` | The height of the render target.
+
+## Returns {#returns}
+
+An object: `{ width, height, offsetX, offsetY }`
+
+Property | Type | Description
+-------- | ---- | -----------
+width | `Number` | The width (in pixels) to draw.
+height | `Number` | The height (in pixels) to draw.
+offsetX | `Number` | The minimum x-coordinate (in pixels) to draw to.
+offsetY | `Number` | The minimum y-coordinate (in pixels) to draw to.
diff --git a/docs/engine/api/gltexturerenderer/getglctxparameters.md b/docs/engine/api/gltexturerenderer/getglctxparameters.md
new file mode 100644
index 0000000..cd65159
--- /dev/null
+++ b/docs/engine/api/gltexturerenderer/getglctxparameters.md
@@ -0,0 +1,31 @@
+---
+sidebar_label: getGLctxParameters()
+---
+# XR8.GlTextureRenderer.getGLctxParameters()
+
+`XR8.GlTextureRenderer.getGLctxParameters(GLctx, textureUnit)`
+
+## Description {#description}
+
+Gets the current set of WebGL bindings so that they can be restored later.
+
+## Parameters {#parameters}
+
+Parameter | Type | Description
+--------- | ---- | -----------
+GLctx | `WebGlRenderingContext` or `WebGl2RenderingContext` | The `WebGLRenderingContext` or `WebGL2RenderingContext` to get bindings from.
+textureunits | `[]` | The texture units to preserve state for, e.g. `[GLctx.TEXTURE0]`
+
+## Returns {#returns}
+
+A struct to pass to [setGLctxParameters](setglctxparameters.md).
+
+## Example {#example}
+
+```javascript
+const restoreParams = XR8.GlTextureRenderer.getGLctxParameters(GLctx, [GLctx.TEXTURE0])
+// Alter context parameters as needed
+...
+XR8.GlTextureRenderer.setGLctxParameters(GLctx, restoreParams)
+// Context parameters are restored to their previous state
+```
diff --git a/docs/engine/api/gltexturerenderer/gltexturerenderer.md b/docs/engine/api/gltexturerenderer/gltexturerenderer.md
new file mode 100644
index 0000000..7d1141f
--- /dev/null
+++ b/docs/engine/api/gltexturerenderer/gltexturerenderer.md
@@ -0,0 +1,18 @@
+# XR8.GlTextureRenderer
+
+## Description {#description}
+
+Provides a camera pipeline module that draws the camera feed to a canvas as well as extra utilities for GL drawing operations.
+
+## Functions {#functions}
+
+Function | Description
+-------- | -----------
+[configure](configure.md) | Configures the pipeline module that draws the camera feed to the canvas.
+[create](create.md) | Creates an object for rendering from a texture to a canvas or another texture.
+[fillTextureViewport](filltextureviewport.md) | Convenience method for getting a Viewport struct that fills a texture or canvas from a source without distortion. This is passed to the render method of the object created by [`XR8.GlTextureRenderer.create()`](create.md)
+[getGLctxParameters](getglctxparameters.md) | Gets the current set of WebGL bindings so that they can be restored later.
+[pipelineModule](pipelinemodule.md) | Creates a pipeline module that draws the camera feed to the canvas.
+[setGLctxParameters](setglctxparameters.md) | Restores the WebGL bindings that were saved with [`XR8.GlTextureRenderer.getGLctxParameters()`](getglctxparameters.md).
+[setTextureProvider](settextureprovider.md) | Sets a provider that passes the texture to draw.
+[setForegroundTextureProvider](setforegroundtextureprovider.md) | Sets a provider that passes a list of foreground textures and alpha masks to draw.
diff --git a/docs/engine/api/gltexturerenderer/pipelinemodule.md b/docs/engine/api/gltexturerenderer/pipelinemodule.md
new file mode 100644
index 0000000..dd97ddf
--- /dev/null
+++ b/docs/engine/api/gltexturerenderer/pipelinemodule.md
@@ -0,0 +1,58 @@
+---
+sidebar_label: pipelineModule()
+---
+# XR8.GlTextureRenderer.pipelineModule()
+
+`XR8.GlTextureRenderer.pipelineModule({ vertexSource, fragmentSource, toTexture, flipY })`
+
+## Description {#description}
+
+Creates a pipeline module that draws the camera feed to the canvas.
+
+## Parameters {#parameters}
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+vertexSource [Optional] | `String` | A no-op vertex shader | The vertex shader source to use for rendering.
+fragmentSource [Optional] | `String` | A no-op fragment shader | The fragment shader source to use for rendering.
+toTexture [Optional] | [`WebGlTexture`](https://developer.mozilla.org/en-US/docs/Web/API/WebGLTexture) | The canvas | A texture to draw to. If no texture is provided, drawing will be to the canvas.
+flipY [Optional] | `Boolean` | `false` | If true, flip the rendering upside-down.
+
+## Returns {#returns}
+
+Return value is an object `{viewport, shader}` made available to
+[`onProcessCpu`](/docs/engine/api/camerapipelinemodule/onprocesscpu) and
+[`onUpdate`](/docs/engine/api/camerapipelinemodule/onupdate) as:
+
+`processGpuResult.gltexturerenderer` with the following properties:
+
+Property | Type | Description
+-------- | ---- | -----------
+viewport | `{width, height, offsetX, offsetY}` | The region of the canvas or output texture to draw to; this can be constructed manually, or using [`XR8.GlTextureRenderer.fillTextureViewport()`](filltextureviewport.md).
+shader | | A handle to the shader being used to draw the texture.
+
+processGpuResult.gltexturerenderer.viewport: `{ width, height, offsetX, offsetY }`
+
+Property | Type | Description
+-------- | ---- | -----------
+width | `Number` | The width (in pixels) to draw.
+height | `Number` | The height (in pixels) to draw.
+offsetX | `Number` | The minimum x-coordinate (in pixels) to draw to.
+offsetY | `Number` | The minimum y-coordinate (in pixels) to draw to.
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule(XR8.GlTextureRenderer.pipelineModule())
+XR8.addCameraPipelineModule({
+ name: 'mycamerapipelinemodule',
+ onProcessCpu: ({ processGpuResult }) => {
+ const {viewport, shader} = processGpuResult.gltexturerenderer
+ if (!viewport) {
+ return
+ }
+ const { width, height, offsetX, offsetY } = viewport
+
+ // ...
+ },
+```
diff --git a/docs/engine/api/gltexturerenderer/setforegroundtextureprovider.md b/docs/engine/api/gltexturerenderer/setforegroundtextureprovider.md
new file mode 100644
index 0000000..4047f77
--- /dev/null
+++ b/docs/engine/api/gltexturerenderer/setforegroundtextureprovider.md
@@ -0,0 +1,50 @@
+---
+sidebar_label: setForegroundTextureProvider()
+---
+# XR8.GlTextureRenderer.setForegroundTextureProvider()
+
+`XR8.GlTextureRenderer.setForegroundTextureProvider(({ frameStartResult, processGpuResult, processCpuResult }) => {} )`
+
+## Description {#description}
+
+Sets a provider that passes a list of foreground textures to draw. This should be a function that take the same inputs as [`cameraPipelineModule.onUpdate`](/docs/engine/api/camerapipelinemodule/onupdate).
+
+## Parameters {#parameters}
+
+`setForegroundTextureProvider()` takes a **function** with the following parameters:
+
+Parameter | Type | Description
+--------- | ---- | -----------
+frameStartResult | `Object` | The data that was provided at the beginning of a frame.
+processGpuResult | `Object` | Data returned by all installed modules during [`onProcessGpu`](/docs/engine/api/camerapipelinemodule/onprocessgpu).
+processCpuResult | `Object` | Data returned by all installed modules during [`onProcessCpu`](/docs/engine/api/camerapipelinemodule/onprocesscpu).
+
+The function should return an array of objects which each contain the following properties:
+
+Property | Type | Default | Description
+-------- | ---- | ------- | -----------
+foregroundTexture | [`WebGLTexture`](https://developer.mozilla.org/en-US/docs/Web/API/WebGLTexture) | | The foreground texture to draw.
+foregroundMaskTexture | [`WebGLTexture`](https://developer.mozilla.org/en-US/docs/Web/API/WebGLTexture) | | An alpha mask to use on the foregroundTexture. The `r` channel of the `foregroundMaskTexture` is used in the alpha blending.
+foregroundTextureFlipY [Optional] | `false` | `Boolean` | Whether to flip the `foregroundTexture`.
+foregroundMaskTextureFlipY [Optional] | `false` | `Boolean` | Whether to flip the `foregroundMaskTexture`.
+
+The foreground textures will be drawn on top of the texture provided by calling [`XR8.GlTextureRenderer.setTextureProvider()`](filltextureviewport.md). The foreground textures will be drawn in in the order of the returned array.
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.GlTextureRenderer.setForegroundTextureProvider(
+ ({processGpuResult}) => {
+ // Do some processing...
+ return [{
+ foregroundTexture,
+ foregroundMaskTexture,
+ foregroundTextureFlipY,
+ foregroundMaskTextureFlipY
+ }]
+ })
+```
diff --git a/docs/engine/api/gltexturerenderer/setglctxparameters.md b/docs/engine/api/gltexturerenderer/setglctxparameters.md
new file mode 100644
index 0000000..2ff52a8
--- /dev/null
+++ b/docs/engine/api/gltexturerenderer/setglctxparameters.md
@@ -0,0 +1,31 @@
+---
+sidebar_label: setGLctxParameters()
+---
+# XR8.GlTextureRenderer.setGLctxParameters()
+
+`XR8.GlTextureRenderer.setGLctxParameters(GLctx, restoreParams)`
+
+## Description {#description}
+
+Restores the WebGL bindings that were saved with [`XR8.GlTextureRenderer.getGLctxParameters()`](getglctxparameters.md).
+
+## Parameters {#parameters}
+
+Parameter | Type | Description
+--------- | ---- | -----------
+GLctx | `WebGlRenderingContext` or `WebGl2RenderingContext` | The `WebGLRenderingContext` or `WebGL2RenderingContext` to restore bindings on.
+restoreParams | The output of [`XR8.GlTextureRenderer.getGLctxParameters()`](getglctxparameters.md).
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+const restoreParams = XR8.GlTextureRenderer.getGLctxParameters(GLctx, [GLctx.TEXTURE0])
+// Alter context parameters as needed
+...
+XR8.GlTextureRenderer.setGLctxParameters(GLctx, restoreParams)
+// Context parameters are restored to their previous state
+```
diff --git a/docs/engine/api/gltexturerenderer/settextureprovider.md b/docs/engine/api/gltexturerenderer/settextureprovider.md
new file mode 100644
index 0000000..0b9b7c4
--- /dev/null
+++ b/docs/engine/api/gltexturerenderer/settextureprovider.md
@@ -0,0 +1,35 @@
+---
+sidebar_label: setTextureProvider()
+---
+# XR8.GlTextureRenderer.setTextureProvider()
+
+`XR8.GlTextureRenderer.setTextureProvider(({ frameStartResult, processGpuResult, processCpuResult }) => {} )`
+
+## Description {#description}
+
+Sets a provider that passes the texture to draw. This should be a function that take the same inputs as [`cameraPipelineModule.onUpdate`](/docs/engine/api/camerapipelinemodule/onupdate).
+
+## Parameters {#parameters}
+
+`setTextureProvider()` takes a **function** with the following parameters:
+
+Parameter | Type | Description
+--------- | ---- | -----------
+frameStartResult | `Object` | The data that was provided at the beginning of a frame.
+processGpuResult | `Object` | Data returned by all installed modules during [`onProcessGpu`](/docs/engine/api/camerapipelinemodule/onprocessgpu).
+processCpuResult | `Object` | Data returned by all installed modules during [`onProcessCpu`](/docs/engine/api/camerapipelinemodule/onprocesscpu).
+
+The function should return a [`WebGLTexture`](https://developer.mozilla.org/en-US/docs/Web/API/WebGLTexture) to draw.
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.GlTextureRenderer.setTextureProvider(
+ ({processGpuResult}) => {
+ return processGpuResult.camerapixelarray ? processGpuResult.camerapixelarray.srcTex : null
+ })
+```
diff --git a/docs/engine/api/landingpage/_category_.json b/docs/engine/api/landingpage/_category_.json
new file mode 100644
index 0000000..14df5e2
--- /dev/null
+++ b/docs/engine/api/landingpage/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "LandingPage",
+ "position": 15
+}
diff --git a/docs/engine/api/landingpage/configure.md b/docs/engine/api/landingpage/configure.md
new file mode 100644
index 0000000..ae9edb5
--- /dev/null
+++ b/docs/engine/api/landingpage/configure.md
@@ -0,0 +1,49 @@
+---
+sidebar_label: configure()
+---
+# LandingPage.configure()
+
+`LandingPage.configure({ logoSrc, logoAlt, promptPrefix, url, promptSuffix, textColor, font, textShadow, backgroundSrc, backgroundBlur, backgroundColor, mediaSrc, mediaAlt, mediaAutoplay, mediaAnimation, mediaControls, sceneEnvMap, sceneOrbitIdle, sceneOrbitInteraction, sceneLightingIntensity, vrPromptPrefix })`
+
+## Description {#description}
+
+Configures behavior and look of the LandingPage module.
+
+## Parameters (All Optional) {#parameters-all-optional}
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+logoSrc | `String` | | Image source for brand logo image.
+logoAlt | `String` | `'Logo'` | Alt text for brand logo image.
+promptPrefix | `String` | `'Scan or visit'` | Sets the text string for call to action before the URL for the experience is displayed.
+url | `String` | 8th.io link if 8th Wall hosted, or current page | Sets the displayed URL and QR code.
+promptSuffix | `String` | `'to continue'` | Sets the text string for call to action after the URL for the experience is displayed.
+textColor | Hex Color | `'#ffffff'` | Color of all the text on the Landing Page.
+font | `String` | `"'Nunito', sans-serif"` | Font of all text on the Landing Page. This parameter accepts valid CSS font-family arguments.
+textShadow | `Boolean` | `false` | Sets text-shadow property for all text on the Landing Page.
+backgroundSrc | `String` | | Image source for background image.
+backgroundBlur | `Number` | `0` | Applies a blur effect to the `backgroundSrc` if one is specified. (Typically values are between 0.0 and 1.0)
+backgroundColor | `String` | `'linear-gradient(#464766,#2D2E43)'` | Background color of the Landing Page. This parameter accepts valid CSS background-color arguments. Background color is not displayed if a background-src or sceneEnvMap is set.
+mediaSrc | `String` | App’s cover image, if present | Media source (3D model, image, or video) for landing page hero content. Accepted media sources include a-asset-item id, or static URL.
+mediaAlt | `String` | `'Preview'` | Alt text for landing page image content.
+mediaAutoplay | `Boolean` | `true` | If the `mediaSrc` is a video, specifies if the video should be played on load with sound muted.
+mediaAnimation | `String` | First animation clip of model, if present | If the `mediaSrc` is a 3D model, specify whether to play a specific animation clip associated with the model, or "none".
+mediaControls | `String` | `'minimal'` | If `mediaSrc` is a video, specify media controls displayed to to user. Choose from "none", "mininal" or "browser" (browser defaults)
+sceneEnvMap | `String` | `'field'` | Image source pointing to an equirectangular image. Or one of the following preset environments: "field", "hill", "city", "pastel", or "space".
+sceneOrbitIdle | `String` | `'spin'` | If the `mediaSrc` is a 3D model, specify whether the model should "spin", or "none".
+sceneOrbitInteraction | `String` | `'drag'` | If the `mediaSrc` is a 3D model, specify whether the user can interact with the orbit controls, choose "drag", or "none".
+sceneLightingIntensity | `Number` | `1` | If the `mediaSrc` is a 3D model, specify the strength of the light illuminating the mode.
+vrPromptPrefix | `String` | `'or visit'` | Sets the text string for call to action before the URL for the experience is displayed on VR headsets.
+
+## Returns {#returns}
+
+None
+
+## Example - Code {#example---code}
+
+```javascript
+LandingPage.configure({
+ mediaSrc: 'https://www.mydomain.com/bat.glb',
+ sceneEnvMap: 'hill',
+})
+```
diff --git a/docs/engine/api/landingpage/landingpage.md b/docs/engine/api/landingpage/landingpage.md
new file mode 100644
index 0000000..971e7b7
--- /dev/null
+++ b/docs/engine/api/landingpage/landingpage.md
@@ -0,0 +1,12 @@
+# LandingPage
+
+## Description {#description}
+
+Provides a module generates a custom Landing Page for your Web AR experience.
+
+## Functions {#functions}
+
+Function | Description
+-------- | -----------
+[configure](configure.md) | Configures LandingPage settings.
+[pipelineModule](pipelinemodule.md) | Creates a camera pipeline module that, when installed, adds landing page functionality to your project.
diff --git a/docs/engine/api/landingpage/pipelinemodule.md b/docs/engine/api/landingpage/pipelinemodule.md
new file mode 100644
index 0000000..11554f8
--- /dev/null
+++ b/docs/engine/api/landingpage/pipelinemodule.md
@@ -0,0 +1,39 @@
+---
+sidebar_label: pipelineModule()
+---
+# LandingPage.pipelineModule()
+
+`LandingPage.pipelineModule()`
+
+## Description {#description}
+
+Creates a pipeline module that, when installed, adds landing page functionality to your project.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+A pipeline module that adds landing page functionality to your project.
+
+## Non-AFrame Example {#non-aframe-example}
+
+```javascript
+// Configured here
+LandingPage.configure({
+ mediaSrc: 'https://domain.com/bat.glb',
+ sceneEnvMap: 'hill',
+})
+XR8.addCameraPipelineModules([
+ XR8.GlTextureRenderer.pipelineModule(),
+ XR8.Threejs.pipelineModule(),
+ XR8.XrController.pipelineModule(),
+ XRExtras.FullWindowCanvas.pipelineModule(),
+ XRExtras.Loading.pipelineModule(),
+ XRExtras.RuntimeError.pipelineModule(),
+ // Added here
+ LandingPage.pipelineModule(),
+ ...
+])
+```
diff --git a/docs/engine/api/layerscontroller/_category_.json b/docs/engine/api/layerscontroller/_category_.json
new file mode 100644
index 0000000..8c65708
--- /dev/null
+++ b/docs/engine/api/layerscontroller/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "LayersController",
+ "position": 14
+}
diff --git a/docs/engine/api/layerscontroller/configure.md b/docs/engine/api/layerscontroller/configure.md
new file mode 100644
index 0000000..9e46187
--- /dev/null
+++ b/docs/engine/api/layerscontroller/configure.md
@@ -0,0 +1,47 @@
+---
+sidebar_label: configure()
+---
+# XR8.LayersController.configure()
+
+`XR8.LayersController.configure({ nearClip, farClip, coordinates, layers })`
+
+## Description {#description}
+
+Configures the processing performed by `LayersController`.
+
+## Parameters {#parameters}
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+nearClip [Optional] | `Number` | `0.01` | The distance from the camera of the near clip plane, i.e. the closest distance to the camera at which scene objects are visible.
+farClip [Optional] | `Number` | `1000` | The distance from the camera of the far clip plane, i.e.Tte farthest distance to the camera at which scene objects are visible.
+coordinates [Optional] | `Coordinates` | | The camera configuration.
+layers [Optional] | `Record` | `{}` | Semantic layers to detect. The key is the layer name. To remove a layer pass `null` instead of `LayerOptions`. The only supported layer name at this time is `'sky'`.
+
+The `Coordinates` object has the following properties:
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+origin [Optional] | `{position: {x, y, z}, rotation: {w, x, y, z}}` | `{position: {x: 0, y: 2, z: 0}, rotation: {w: 1, x: 0, y: 0, z: 0}}` | The position and rotation of the camera.
+scale [Optional] | `Number` | `2` | Scale of the scene.
+axes [Optional] | `String` | `'RIGHT_HANDED'` | Can be either `'LEFT_HANDED'` or `'RIGHT_HANDED'`.
+mirroredDisplay [Optional] | `Boolean` | `false` | If true, flip left and right in the output.
+
+The `LayerOptions` object has the following properties:
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+invertLayerMask [Optional] | `Boolean` | `false` | If `true`, content you place in your scene will be visible in non-sky areas. If `false`, content you place in your scene will be visible in sky areas. To reset to the default value pass `null`.
+edgeSmoothness [Optional] | `Number` | `0` | Amount to smooth the edges of the layer. Valid values are between [0-1]. To reset to the default value pass `null`.
+
+**IMPORTANT:** [`XR8.LayersController`](./layerscontroller.md) cannot be used at the same time as [`XR8.FaceController`](../facecontroller/facecontroller.md).
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.LayersController.configure({layers: {sky: {invertLayerMask: true, edgeSmoothness: 0.8}}})
+```
diff --git a/docs/engine/api/layerscontroller/getlayernames.md b/docs/engine/api/layerscontroller/getlayernames.md
new file mode 100644
index 0000000..93e61b4
--- /dev/null
+++ b/docs/engine/api/layerscontroller/getlayernames.md
@@ -0,0 +1,18 @@
+---
+sidebar_label: getLayerNames()
+---
+# XR8.LayersController.getLayerNames()
+
+`XR8.LayersController.getLayerNames()`
+
+## Description {#description}
+
+Returns the layers that are configured by the `LayersController`.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+A list of Strings where each is a layer name.
diff --git a/docs/engine/api/layerscontroller/layerscontroller.md b/docs/engine/api/layerscontroller/layerscontroller.md
new file mode 100644
index 0000000..a62d53b
--- /dev/null
+++ b/docs/engine/api/layerscontroller/layerscontroller.md
@@ -0,0 +1,14 @@
+# XR8.LayersController
+
+## Description {#description}
+
+`LayersController` provides semantic layer detection and interfaces for configuring layer rendering.
+
+## Functions {#functions}
+
+Function | Description
+-------- | -----------
+[configure](configure.md) | Configures what processing is performed by `LayersController`.
+[getLayerNames](getlayernames.md) | Returns the layers that are configured by `LayersController`.
+[pipelineModule](pipelinemodule.md) | Creates a camera pipeline module that, when installed, provides semantic layer detection.
+[recenter](recenter.md) | Repositions the camera to the origin / facing direction.
diff --git a/docs/engine/api/layerscontroller/pipelinemodule.md b/docs/engine/api/layerscontroller/pipelinemodule.md
new file mode 100644
index 0000000..385b349
--- /dev/null
+++ b/docs/engine/api/layerscontroller/pipelinemodule.md
@@ -0,0 +1,66 @@
+---
+sidebar_label: pipelineModule()
+---
+# XR8.LayersController.pipelineModule()
+
+`XR8.LayersController.pipelineModule()`
+
+## Description {#description}
+
+Creates a camera pipeline module that, when installed, provides semantic layer detection.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+Return value is an object made available to [`onUpdate`](/docs/engine/api/camerapipelinemodule/onupdate) as:
+
+`processCpuResult.layerscontroller: { rotation, position, intrinsics, cameraFeedTexture, layers }`
+
+Property | Type | Description
+--------- | ---- | -----------
+rotation | `{w, x, y, z}` | The orientation (quaternion) of the camera in the scene.
+position | `{x, y, z}` | The position of the camera in the scene.
+intrinsics | `[Number]` | A 16 dimensional column-major 4x4 projection matrix that gives the scene camera the same field of view as the rendered camera feed.
+cameraFeedTexture | [`WebGLTexture`](https://developer.mozilla.org/en-US/docs/Web/API/WebGLTexture) | The texture containing camera feed data.
+layers | `Record` | Key is the layer name, LayerOutput contains the results of semantic layer detection for that layer.
+
+`LayerOutput` is an object with the following properties:
+
+Property | Type | Description
+--------- | ---- | -----------
+texture | [`WebGLTexture`](https://developer.mozilla.org/en-US/docs/Web/API/WebGLTexture) | The texture containing layer data. The r, g, b channels indicate our confidence of whether the layer is present at this pixel. 0.0 indicates the layer is not present and 1.0 indicates it is present. Note that this value will be flipped if `invertLayerMask` has been set to true.
+textureWidth | `Number` | Width of the returned texture in pixels.
+textureHeight | `Number` | Height of the returned texture in pixels.
+percentage | `Number` | Percentage of pixels that are classified as associated with the layer. Value in the range of [0, 1]
+
+## Dispatched Events {#dispatched-events}
+
+**layerloading**: Fires when loading begins for additional layer segmentation resources.
+
+`layerloading.detail : {}`
+
+**layerscanning**: Fires when all layer segmentation resources have been loaded and scanning has begun. One event is dispatched per layer being scanned.
+
+`layerscanning.detail : {name}`
+
+Property | Type | Description
+--------- | ---- | -----------
+name | `String` | Name of the layer which we are scanning.
+
+**layerfound**: Fires the first time a layer has been found.
+
+`layerfound.detail : {name, percentage}`
+
+Property | Type | Description
+--------- | ---- | -----------
+name | `String` | Name of the layer that has been found.
+percentage | `Number` | Percentage of pixels that are associated with the layer.
+
+## Example - adding pipeline module {#example---adding-pipeline-module}
+
+```javascript
+XR8.addCameraPipelineModule(XR8.LayersController.pipelineModule())
+```
diff --git a/docs/engine/api/layerscontroller/recenter.md b/docs/engine/api/layerscontroller/recenter.md
new file mode 100644
index 0000000..622ce18
--- /dev/null
+++ b/docs/engine/api/layerscontroller/recenter.md
@@ -0,0 +1,18 @@
+---
+sidebar_label: recenter()
+---
+# XR8.LayersController.recenter()
+
+`XR8.LayersController.recenter()`
+
+## Description {#description}
+
+Repositions the camera to the origin / facing direction.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+None
diff --git a/docs/engine/api/mediarecorder/_category_.json b/docs/engine/api/mediarecorder/_category_.json
new file mode 100644
index 0000000..bd72cb4
--- /dev/null
+++ b/docs/engine/api/mediarecorder/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "MediaRecorder",
+ "position": 16
+}
diff --git a/docs/engine/api/mediarecorder/configure.md b/docs/engine/api/mediarecorder/configure.md
new file mode 100644
index 0000000..65d52ab
--- /dev/null
+++ b/docs/engine/api/mediarecorder/configure.md
@@ -0,0 +1,71 @@
+---
+sidebar_label: configure()
+---
+# XR8.MediaRecorder.configure()
+
+`XR8.MediaRecorder.configure({ coverImageUrl, enableEndCard, endCardCallToAction, footerImageUrl, foregroundCanvas, maxDurationMs, maxDimension, shortLink, configureAudioOutput, audioContext, requestMic })`
+
+## Description {#description}
+
+Configures various MediaRecorder parameters.
+
+## Parameters {#parameters}
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+coverImageUrl [Optional]| `String` | Cover image configured in project, `null` otherwise | Image source for cover image.
+enableEndCard [Optional] | `String` | `false` | If true, enable end card.
+endCardCallToAction [Optional] | `String` | `'Try it at: '` | Sets the text string for call to action.
+fileNamePrefix [Optional] | `String` | `'my-capture-'` | Sets the text string that prepends the unique timestamp on file name.
+footerImageUrl [Optional] | `String` | `null` | Image src for cover image.
+foregroundCanvas [Optional] | `String` | `null` | The canvas to use as a foreground in the recorded video.
+maxDurationMs [Optional] | `Number` | `15000` | Maximum duration of video, in milliseconds.
+maxDimension [Optional] | `Number` | `1280` | Max dimension of the captured recording, in pixels.
+shortLink [Optional] | `String` | 8th.io shortlink from project dashboard | Sets the text string for shortlink.
+configureAudioOutput [Optional] | `Object` | `null` | User provided function that will receive the `microphoneInput` and `audioProcessor` audio nodes for complete control of the recording's audio. The nodes attached to the audio processor node will be part of the recording's audio. It is required to return the end node of the user's audio graph.
+audioContext [Optional] | `String` | `null` | User provided `AudioContext` instance. Engines like three.js and BABYLON.js have their own internal audio instance. In order for the recordings to contains sounds defined in those engines, you'll want to provide their `AudioContext` instance.
+requestMic [Optional] | `String` | `'auto'` | Determines when the audio permissions are requested. The options are provided in [`XR8.MediaRecorder.RequestMicOptions`](requestmicoptions.md).
+
+The function passed to `configureAudioOutput` takes an object with the following parameters:
+
+Parameter | Description
+--------- | -----------
+microphoneInput | A [`GainNode`](https://developer.mozilla.org/en-US/docs/Web/API/GainNode) that contains the user’s mic input. If the user’s permissions are not accepted, then this node won’t output the mic input but will still be present.
+audioProcessor | a [`ScriptProcessorNode`](https://developer.mozilla.org/en-US/docs/Web/API/ScriptProcessorNode) that passes audio data to the recorder. If you want an audio node to be part of the recording’s audio output, then you must connect it to the audioProcessor.
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.MediaRecorder.configure({
+ maxDurationMs: 15000,
+ enableEndCard: true,
+ endCardCallToAction: 'Try it at:',
+ shortLink: '8th.io/my-link',
+})
+```
+
+## Example - user configured audio output {#example---user-configured-audio-output}
+
+```javascript
+const userConfiguredAudioOutput = ({microphoneInput, audioProcessor}) => {
+ const myCustomAudioGraph = ...
+ myCustomAudioSource.connect(myCustomAudioGraph)
+ microphoneInput.connect(myCustomAudioGraph)
+
+ // Connect audio graph end node to hardware.
+ myCustomAudioGraph.connect(microphoneInput.context.destination)
+
+ // Audio graph will be automatically connected to processor.
+ return myCustomAudioGraph
+}
+const threejsAudioContext = THREE.AudioContext.getContext()
+XR8.MediaRecorder.configure({
+ configureAudioOutput: userConfiguredAudioOutput,
+ audioContext: threejsAudioContext,
+ requestMic: XR8.MediaRecorder.RequestMicOptions.AUTO,
+})
+```
diff --git a/docs/engine/api/mediarecorder/mediarecorder.md b/docs/engine/api/mediarecorder/mediarecorder.md
new file mode 100644
index 0000000..efadb6c
--- /dev/null
+++ b/docs/engine/api/mediarecorder/mediarecorder.md
@@ -0,0 +1,16 @@
+# XR8.MediaRecorder
+
+## Description {#description}
+
+Provides a camera pipeline module that allows you to record a video in MP4 format.
+
+## Functions {#functions}
+
+Function | Description
+-------- | -----------
+[configure](configure.md) | Configure video recording settings.
+[pipelineModule](pipelinemodule.md) | Creates a pipeline module that records video in MP4 format.
+[recordVideo](recordvideo.md) | Start recording.
+[requestMicrophone](requestmicrophone.md) | Enables recording of audio (if not enabled automatically), requesting permissions if needed.
+[stopRecording](stoprecording.md) | Stop recording.
+[RequestMicOptions](requestmicoptions.md) | Enum for whether or not to automatically request microphone permissions.
diff --git a/docs/engine/api/mediarecorder/pipelinemodule.md b/docs/engine/api/mediarecorder/pipelinemodule.md
new file mode 100644
index 0000000..3b4eb35
--- /dev/null
+++ b/docs/engine/api/mediarecorder/pipelinemodule.md
@@ -0,0 +1,24 @@
+---
+sidebar_label: pipelineModule()
+---
+# XR8.MediaRecorder.pipelineModule()
+
+`XR8.MediaRecorder.pipelineModule()`
+
+## Description {#description}
+
+Provides a camera pipeline module that allows you to record a video in MP4 format.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+A `MediaRecorder` pipeline module module allows you to record a video.
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule(XR8.MediaRecorder.pipelineModule())
+```
diff --git a/docs/engine/api/mediarecorder/recordvideo.md b/docs/engine/api/mediarecorder/recordvideo.md
new file mode 100644
index 0000000..3cea058
--- /dev/null
+++ b/docs/engine/api/mediarecorder/recordvideo.md
@@ -0,0 +1,52 @@
+---
+sidebar_label: recordVideo()
+---
+# XR8.MediaRecorder.recordVideo()
+
+`XR8.MediaRecorder.recordVideo({ onError, onProcessFrame, onStart, onStop, onVideoReady })`
+
+## Description {#description}
+
+Start recording.
+
+This function takes an object that implements one of more of the following media recorder licecycle callback methods:
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+onError | Callback when there is an error.
+onProcessFrame | Callback for adding an overlay to the video.
+onStart | Callback when recording has started.
+onStop | Callback when recording has stopped.
+onPreviewReady | Callback when a previewable, but not sharing-optimized, video is ready (Android/Desktop only).
+onFinalizeProgress | Callback when the media recorder is making progress in the final export (Android/Desktop only).
+onVideoReady | Callback when recording has completed and video is ready.
+
+**Note:** When the browser has native MediaRecorder support for webm and not mp4 (currently Android/Desktop), the webm is usable as a preview video, but is converted to mp4 to generate the final video. `onPreviewReady` is called when the conversion starts, to allow the user to see the video immediately, and when the mp4 file is ready, `onVideoReady` will be called. During conversion, `onFinalizeProgress` is called periodically to allow a progress bar to be displayed.
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.MediaRecorder.recordVideo({
+ onVideoReady: (result) => window.dispatchEvent(new CustomEvent('recordercomplete', {detail: result})),
+ onStop: () => showLoading(),
+ onError: () => clearState(),
+ onProcessFrame: ({elapsedTimeMs, maxRecordingMs, ctx}) => {
+ // overlay some red text over the video
+ ctx.fillStyle = 'red'
+ ctx.font = '50px "Nunito"'
+ ctx.fillText(`${elapsedTimeMs}/${maxRecordingMs}`, 50, 50)
+ const timeLeft = ( 1 - elapsedTimeMs / maxRecordingMs)
+ // update the progress bar to show how much time is left
+ progressBar.style.strokeDashoffset = `${100 * timeLeft }`
+ },
+ onFinalizeProgress: ({progress, total}) => {
+ console.log('Export is ' + Math.round(progress / total) + '% complete')
+ },
+})
+```
diff --git a/docs/engine/api/mediarecorder/requestmicoptions.md b/docs/engine/api/mediarecorder/requestmicoptions.md
new file mode 100644
index 0000000..eb414f6
--- /dev/null
+++ b/docs/engine/api/mediarecorder/requestmicoptions.md
@@ -0,0 +1,17 @@
+---
+sidebar_label: RequestMicOptions
+---
+# XR8.MediaRecorder.RequestMicOptions
+
+Enumeration
+
+## Description {#description}
+
+Determines when the audio permissions are requested.
+
+## Properties {#properties}
+
+Property | Value | Description
+-------- | ----- | -----------
+AUTO | `'auto'` | Automatically request microphone permissions in [`onAttach()`](/docs/engine/api/camerapipelinemodule/onattach).
+MANUAL | `'manual'` | Microphone permissions are NOT requested in [`onAttach()`](/docs/engine/api/camerapipelinemodule/onattach). Any other audio added to the app is still recorded if added to the AudioContext and connected to the audioProcessor provided to the user's [`configureAudioOutput`](/docs/engine/api/mediarecorder/configure/#parameters) function passed to [`XR8.MediaRecorder.configure()`](configure.md). You can request microphone permissions manually by calling [`XR8.MediaRecorder.requestMicrophone()`](requestmicrophone.md).
diff --git a/docs/engine/api/mediarecorder/requestmicrophone.md b/docs/engine/api/mediarecorder/requestmicrophone.md
new file mode 100644
index 0000000..760f6a6
--- /dev/null
+++ b/docs/engine/api/mediarecorder/requestmicrophone.md
@@ -0,0 +1,34 @@
+---
+sidebar_label: requestMicrophone()
+---
+# XR8.MediaRecorder.requestMicrophone()
+
+`XR8.MediaRecorder.requestMicrophone()`
+
+## Description {#description}
+
+Enables recording of audio (if not enabled automatically), requesting permissions if needed.
+
+Returns a promise that lets the client know when the stream is ready. If you begin recording
+before the audio stream is ready, then you may miss the user's microphone output at the
+beginning of the recording.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+A Promise.
+
+## Example {#example}
+
+```javascript
+XR8.MediaRecorder.requestMicrophone()
+.then(() => {
+ console.log('Microphone requested!')
+})
+.catch((err) => {
+ console.log('Hit an error: ', err)
+})
+```
diff --git a/docs/engine/api/mediarecorder/stoprecording.md b/docs/engine/api/mediarecorder/stoprecording.md
new file mode 100644
index 0000000..c752ae5
--- /dev/null
+++ b/docs/engine/api/mediarecorder/stoprecording.md
@@ -0,0 +1,24 @@
+---
+sidebar_label: stopRecording()
+---
+# XR8.MediaRecorder.stopRecording()
+
+`XR8.MediaRecorder.stopRecording()`
+
+## Description {#description}
+
+Stop recording.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.MediaRecorder.stopRecording()
+```
diff --git a/docs/engine/api/playcanvas/_category_.json b/docs/engine/api/playcanvas/_category_.json
new file mode 100644
index 0000000..a7d8632
--- /dev/null
+++ b/docs/engine/api/playcanvas/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "PlayCanvas",
+ "position": 17
+}
diff --git a/docs/engine/api/playcanvas/getting-started.md b/docs/engine/api/playcanvas/getting-started.md
new file mode 100644
index 0000000..63dd296
--- /dev/null
+++ b/docs/engine/api/playcanvas/getting-started.md
@@ -0,0 +1,65 @@
+# Getting Started with PlayCanvas
+
+To get started go to and fork a sample project:
+
+* Starter Kit Sample Projects
+ * [Image Tracking Starter Kit](https://playcanvas.com/project/631721/overview/8th-wall-ar-image-targets): An application to get you started quickly creating image tracking applications in PlayCanvas.
+ * [World Tracking Starter Kit](https://playcanvas.com/project/631719/overview/8th-wall-ar-world-tracking): An application to get you started quickly creating world tracking applications in PlayCanvas.
+ * [Face Effects Starter Kit](https://playcanvas.com/project/687674/overview/8th-wall-ar-face-effects): An application to get you started quickly creating Face Effects applications in PlayCanvas.
+ * [Sky Effects Starter Kit](https://playcanvas.com/project/1055775/overview/8th-wall-sky-effects): An application to get you started quickly creating Sky Effects applications in PlayCanvas.
+ * [Ear Tracking Starter Kit](https://playcanvas.com/project/1158433/overview/8th-wall-ears): An application to get you started quickly creating Ear Tracking applications in PlayCanvas.
+
+
+* Additional Sample Projects
+ * [World Tracking and Face Effects](https://playcanvas.com/project/701392/overview/8th-wall-ar-swap-camera): An example that illustrates how to switch between World Tracking and Face Effects in a single project.
+ * [Color Swap](https://playcanvas.com/project/783654/overview/8th-wall-ar-color-swap): An application to get you started quickly creating AR world tracking applications that include simple UI and color change.
+ * [Swap Scenes](https://playcanvas.com/project/781435/overview/8th-wall-ar-swap-scenes): An application to get you started quickly creating AR World Tracking applications that switch scenes.
+ * [Swap Camera](https://playcanvas.com/project/701392/overview/8th-wall-ar-swap-camera): An application that demonstrates how to switch between front camera Face Effects and back camera World Tracking.
+
+## Add your App Key {#add-your-app-key}
+
+Go to Settings -> External Scripts
+
+The following two scripts should be added:
+
+* `https://cdn.8thwall.com/web/xrextras/xrextras.js`
+* `https://apps.8thwall.com/xrweb?appKey=XXXXXX`
+
+Then replace `XXXXXX` with your own unique App Key obtained from the 8th Wall Console.
+
+## Enable "Transparent Canvas" {#enable-transparent-canvas}
+
+1. Go to Settings -> Rendering.
+2. Make sure that "Transparent Canvas" is **checked**.
+
+## Disable "Prefer WebGL 2.0" {#disable-prefer-webgl-20}
+
+1. Go to Settings -> Rendering.
+2. Make sure that "Prefer WebGL 2.0" is **unchecked**.
+
+## Add xrcontroller.js {#add-xrcontroller}
+The 8th Wall sample PlayCanvas projects are populated with an XRController game object. If you are starting with a blank project, download `xrcontroller.js` from and attach to an Entity in your scene.
+
+**NOTE**: Only for SLAM and/or Image Target projects. `xrcontroller.js` and `facecontroller.js` or
+`layerscontroller.js` cannot be used simultaneously.
+
+Option | Description
+--------- | -----------
+disableWorldTracking | If true, turn off SLAM tracking for efficiency.
+shadowmaterial | Material which you want to use as a transparent shadow receiver (e.g. for ground shadows). Typically this material will be used on a "ground" plane entity positioned at (0,0,0)
+
+## Add layerscontroller.js {#add-layerscontroller}
+The 8th Wall sample PlayCanvas projects are populated with a FaceController game object. If you are starting with a blank project, download `layerscontroller.js` from and attach to an Entity in your scene.
+
+**NOTE**: Only for Sky Effects projects. `layerscontroller.js` and `facecontroller.js` or
+`xrcontroller.js` cannot be used simultaneously.
+
+## Add facecontroller.js {#add-facecontroller}
+The 8th Wall sample PlayCanvas projects are populated with a FaceController game object. If you are starting with a blank project, download `facecontroller.js` from and attach it to an Entity in your scene.
+
+**NOTE**: Only for Face Effects projects. `facecontroller.js` and `xrcontroller.js` or
+`layerscontroller.js` cannot be used simultaneously.
+
+Option | Description
+--------- | -----------
+headAnchor | The entity to anchor to the root of the head in world space.
diff --git a/docs/engine/api/playcanvas/playcanvas.md b/docs/engine/api/playcanvas/playcanvas.md
new file mode 100644
index 0000000..9033d10
--- /dev/null
+++ b/docs/engine/api/playcanvas/playcanvas.md
@@ -0,0 +1,17 @@
+# XR8.PlayCanvas
+
+PlayCanvas () is an open-source 3D game engine/interactive 3D
+application engine alongside a proprietary cloud-hosted creation platform that allows for
+simultaneous editing from multiple computers via a browser-based interface.
+
+## Description {#description}
+
+Provides an integration that interfaces with the PlayCanvas environment and lifecycle to drive the
+PlayCanvas camera to do virtual overlays.
+
+## Functions {#functions}
+
+Function | Description
+-------- | -----------
+[run](run.md) | Opens the camera with the specified Pipeline Modules and starts running in a PlayCanvas scene.
+[stop](stop.md) | Remove the modules added in [run](run.md) and stop the camera.
diff --git a/docs/engine/api/playcanvas/run.md b/docs/engine/api/playcanvas/run.md
new file mode 100644
index 0000000..067c472
--- /dev/null
+++ b/docs/engine/api/playcanvas/run.md
@@ -0,0 +1,70 @@
+---
+sidebar_label: run()
+---
+# XR8.PlayCanvas.run()
+
+`XR8.PlayCanvas.run( {pcCamera, pcApp}, [extraModules], config )`
+
+## Description {#description}
+
+Adds specified Pipeline Modules and then opens the camera.
+
+## Parameters {#parameters}
+
+Parameter | Type | Description
+--------- | ---- | -----------
+pcCamera | [`pc.CameraComponent`](https://developer.playcanvas.com/en/api/pc.CameraComponent.html) |The PlayCanvas scene camera to drive with AR.
+pcApp | [`pc.Application`](https://developer.playcanvas.com/en/api/pc.Application.html) | The PlayCanvas app, typically `this.app`.
+extraModules [Optional] | `[Object]` | An optional array of extra pipeline modules to install.
+config | `{canvas, webgl2, ownRunLoop, cameraConfig, glContextConfig, allowedDevices, layers}` |Configuration parameters to pass to [`XR8.run()`](/docs/engine/api/xr8/run) as well as PlayCanvas specific configuration, e.g. `layers`.
+
+`config` is an object with the following properties:
+
+Property | Type | Default | Description
+-------- | ---- | ------- | -----------
+canvas | [`HTMLCanvasElement`](https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement) | | The HTML Canvas that the camera feed will be drawn to. Typically this is `document.getElementById('application-canvas')`.
+webgl2 [Optional] | `Boolean` | `false` | If true, use WebGL2 if available, otherwise fallback to WebGL1. If false, always use WebGL1.
+ownRunLoop [Optional] | `Boolean` | `false` | If true, XR should use it's own run loop. If false, you will provide your own run loop and be responsible for calling [`XR8.runPreRender()`](/docs/engine/api/xr8/runprerender) and [`XR8.runPostRender()`](/docs/engine/api/xr8/runpostrender) yourself [Advanced Users only]
+cameraConfig: {direction} [Optional] | `Object` | `{direction: XR8.XrConfig.camera().BACK}` | Desired camera to use. Supported values for `direction` are `XR8.XrConfig.camera().BACK` or `XR8.XrConfig.camera().FRONT`
+glContextConfig [Optional] | `WebGLContextAttributes` | `null` | The attributes to configure the WebGL canvas context.
+allowedDevices [Optional] | [`XR8.XrConfig.device()`](/docs/engine/api/xrconfig/device) | `XR8.XrConfig.device().MOBILE` | Specify the class of devices that the pipeline should run on. If the current device is not in that class, running will fail prior prior to opening the camera. If allowedDevices is `XR8.XrConfig.device().ANY`, always open the camera. Note that world tracking can only be used with `XR8.XrConfig.device().MOBILE`.
+layers [Optional] | `[]` | `[]` | Specify the list of layers to draw using `GlTextureRenderer`. The key is the name of the layer in 8th Wall, and the value is a list of PlayCanvas layer names which we should render to a texture and mask using the 8th Wall layer. Example value: `{"sky": ["FirstSkyLayer", "SecondSkyLayer"]}`.
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+var layerscontroller = pc.createScript('layerscontroller')
+
+layerscontroller.prototype.initialize = function() {
+ // After XR has fully loaded, open the camera feed and start displaying AR.
+ const runOnLoad = ({pcCamera, pcApp}, extramodules) => () => {
+ // Pass in your canvas name. Typically this is 'application-canvas'.
+ const config = {
+ canvas: document.getElementById('application-canvas'),
+ layers: {"sky": ["Sky"]}
+ }
+ XR8.PlayCanvas.run({pcCamera, pcApp}, extraModules, config)
+ }
+
+ // Find the camera in the PlayCanvas scene, and tie it to the motion of the user's phone in the
+ // world.
+ const pcCamera = XRExtras.PlayCanvas.findOneCamera(this.entity)
+
+ // While XR is still loading, show some helpful things.
+ // Almost There: Detects whether the user's environment can support web ar, and if it doesn't,
+ // shows hints for how to view the experience.
+ // Loading: shows prompts for camera permission and hides the scene until it's ready for display.
+ // Runtime Error: If something unexpected goes wrong, display an error screen.
+ XRExtras.Loading.showLoading({onxrloaded: runOnLoad({pcCamera, pcApp: this.app}, [
+ // Optional modules that developers may wish to customize or theme.
+ XRExtras.AlmostThere.pipelineModule(), // Detects unsupported browsers and gives hints.
+ XRExtras.Loading.pipelineModule(), // Manages the loading screen on startup.
+ XRExtras.RuntimeError.pipelineModule(), // Shows an error image on runtime error.
+ XR8.LayersController.pipelineModule(), // Adds support for Sky Effects.
+ ])})
+}
+```
diff --git a/docs/engine/api/playcanvas/stop.md b/docs/engine/api/playcanvas/stop.md
new file mode 100644
index 0000000..c913d69
--- /dev/null
+++ b/docs/engine/api/playcanvas/stop.md
@@ -0,0 +1,18 @@
+---
+sidebar_label: stop()
+---
+# XR8.PlayCanvas.stop()
+
+`XR8.PlayCanvas.stop()`
+
+## Description {#description}
+
+Remove the modules added in [`run()`](run.md) and stop the camera.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+None
diff --git a/docs/engine/api/playcanvaseventlisteners/_category_.json b/docs/engine/api/playcanvaseventlisteners/_category_.json
new file mode 100644
index 0000000..8ccbcc7
--- /dev/null
+++ b/docs/engine/api/playcanvaseventlisteners/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "PlayCanvas Event Listeners",
+ "position": 19
+}
diff --git a/docs/engine/api/playcanvaseventlisteners/playcanvaseventlisteners.md b/docs/engine/api/playcanvaseventlisteners/playcanvaseventlisteners.md
new file mode 100644
index 0000000..a589b4b
--- /dev/null
+++ b/docs/engine/api/playcanvaseventlisteners/playcanvaseventlisteners.md
@@ -0,0 +1,13 @@
+# PlayCanvas Event Listeners
+
+This section describes the events that are listened for by 8th Wall Web in a PlayCanvas environment.
+
+You can fire these events in your web application to perform various actions:
+
+Event Listener | Description
+-------------- | -----------
+[xr:hidecamerafeed](xrhidecamerafeed.md) | Hides the camera feed. Tracking does not stop.
+[xr:recenter](xrrecenter.md) | Recenters the camera feed to its origin. If a new origin is provided as an argument, the camera's origin will be reset to that, then it will recenter.
+[xr:screenshotrequest](xrscreenshotrequest.md) | Emits a request to the engine to capture a screenshot of the PlayCanvas canvas. The engine will emit a [`xr:screenshotready`](/docs/engine/api/playcanvasevents/xrscreenshotready) event with the JPEG compressed image or [`xr:screenshoterror`](/docs/engine/api/playcanvasevents/xrscreenshoterror) if an error has occured.
+[xr:showcamerafeed](xrshowcamerafeed.md) | Shows the camera feed.
+[xr:stopxr](xrstopxr.md) | Stop the current XR session. While stopped, the camera feed is stopped and device motion is not tracked.
diff --git a/docs/engine/api/playcanvaseventlisteners/xrhidecamerafeed.md b/docs/engine/api/playcanvaseventlisteners/xrhidecamerafeed.md
new file mode 100644
index 0000000..ee57fbf
--- /dev/null
+++ b/docs/engine/api/playcanvaseventlisteners/xrhidecamerafeed.md
@@ -0,0 +1,17 @@
+# xr:hidecamerafeed
+
+`this.app.fire('xr:hidecamerafeed')`
+
+## Description {#description}
+
+Hides the camera feed. Tracking does not stop.
+
+## Parameters {#parameters}
+
+None
+
+## Example {#example}
+
+```javascript
+this.app.fire('xr:hidecamerafeed')
+```
diff --git a/docs/engine/api/playcanvaseventlisteners/xrrecenter.md b/docs/engine/api/playcanvaseventlisteners/xrrecenter.md
new file mode 100644
index 0000000..27b7b0a
--- /dev/null
+++ b/docs/engine/api/playcanvaseventlisteners/xrrecenter.md
@@ -0,0 +1,31 @@
+# xr:recenter
+
+`this.app.fire('xr:recenter')`
+
+## Description {#description}
+
+Recenters the camera feed to its origin. If a new origin is provided as an argument, the camera's origin will be reset to that, then it will recenter.
+
+## Parameters {#parameters}
+
+Parameter | Type | Description
+--------- | ---- | -----------
+origin [Optional] | `{x, y, z}` | The location of the new origin.
+facing [Optional] | `{w, x, y, z}` | A quaternion representing direction the camera should face at the origin.
+
+## Example {#example}
+
+```javascript
+/*jshint esversion: 6, asi: true, laxbreak: true*/
+
+// taprecenter.js: Defines a playcanvas script that re-centers the AR scene when the screen is
+// tapped.
+
+var taprecenter = pc.createScript('taprecenter')
+
+// Fire a 'recenter' event to move the camera back to its starting location in the scene.
+taprecenter.prototype.initialize = function() {
+ this.app.touch.on(pc.EVENT_TOUCHSTART,
+ (event) => { if (event.touches.length !== 1) { return } this.app.fire('xr:recenter')})
+}
+```
diff --git a/docs/engine/api/playcanvaseventlisteners/xrscreenshotrequest.md b/docs/engine/api/playcanvaseventlisteners/xrscreenshotrequest.md
new file mode 100644
index 0000000..cb0616f
--- /dev/null
+++ b/docs/engine/api/playcanvaseventlisteners/xrscreenshotrequest.md
@@ -0,0 +1,30 @@
+# xr:screenshotrequest
+
+`this.app.fire('xr:screenshotrequest')`
+
+## Parameters {#parameters}
+
+None
+
+## Description {#description}
+
+Emits a request to the engine to capture a screenshot of the PlayCanvas canvas. The engine will emit
+a [`xr:screenshotready`](/docs/engine/api/playcanvasevents/xrscreenshotready) event with the JPEG compressed image or
+[`xr:screenshoterror`](/docs/engine/api/playcanvasevents/xrscreenshoterror) if an error has occured.
+
+## Example {#example}
+
+```javascript
+this.app.on('xr:screenshotready', (event) => {
+ // screenshotPreview is an HTML element
+ const image = document.getElementById('screenshotPreview')
+ image.src = 'data:image/jpeg;base64,' + event.detail
+}, this)
+
+this.app.on('xr:screenshoterror', (detail) => {
+ console.log(detail)
+ // Handle screenshot error.
+}, this)
+
+this.app.fire('xr:screenshotrequest')
+```
diff --git a/docs/engine/api/playcanvaseventlisteners/xrshowcamerafeed.md b/docs/engine/api/playcanvaseventlisteners/xrshowcamerafeed.md
new file mode 100644
index 0000000..ff91a92
--- /dev/null
+++ b/docs/engine/api/playcanvaseventlisteners/xrshowcamerafeed.md
@@ -0,0 +1,17 @@
+# xr:showcamerafeed
+
+`this.app.fire('xr:showcamerafeed')`
+
+## Description {#description}
+
+Shows the camera feed.
+
+## Parameters {#parameters}
+
+None
+
+## Example {#example}
+
+```javascript
+this.app.fire('xr:showcamerafeed')
+```
diff --git a/docs/engine/api/playcanvaseventlisteners/xrstopxr.md b/docs/engine/api/playcanvaseventlisteners/xrstopxr.md
new file mode 100644
index 0000000..787d85f
--- /dev/null
+++ b/docs/engine/api/playcanvaseventlisteners/xrstopxr.md
@@ -0,0 +1,17 @@
+# xr:stopxr
+
+`this.app.fire('xr:stopxr')`
+
+## Description {#description}
+
+Stop the current XR session. While stopped, the camera feed is stopped and device motion is not tracked.
+
+## Parameters {#parameters}
+
+None
+
+## Example {#example}
+
+```javascript
+this.app.fire('xr:stopxr')
+```
diff --git a/docs/engine/api/playcanvasevents/_category_.json b/docs/engine/api/playcanvasevents/_category_.json
new file mode 100644
index 0000000..c0c1633
--- /dev/null
+++ b/docs/engine/api/playcanvasevents/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "PlayCanvas Events",
+ "position": 18
+}
diff --git a/docs/engine/api/playcanvasevents/playcanvas-face-effects-events.md b/docs/engine/api/playcanvasevents/playcanvas-face-effects-events.md
new file mode 100644
index 0000000..eaf88d9
--- /dev/null
+++ b/docs/engine/api/playcanvasevents/playcanvas-face-effects-events.md
@@ -0,0 +1,123 @@
+---
+sidebar_position: 3
+---
+# PlayCanvas Face Effects Events
+
+Face Effects events can be listened to as `this.app.on(event, handler, this)`.
+
+**xr:faceloading**: Fires when loading begins for additional face AR resources.
+
+`xr:faceloading : {maxDetections, pointsPerDetection, indices, uvs}`
+
+**xr:facescanning**: Fires when all face AR resources have been loaded and scanning has begun.
+
+`xr:facescanning: {maxDetections, pointsPerDetection, indices, uvs}`
+
+**xr:facefound**: Fires when a face is first found.
+
+`xr:facefound : {id, transform, attachmentPoints, vertices, normals, uvsInCameraFrame}`
+
+**xr:faceupdated**: Fires when a face is subsequently found.
+
+`xr:faceupdated : {id, transform, attachmentPoints, vertices, normals, uvsInCameraFrame}`
+
+**xr:facelost**: Fires when a face is no longer being tracked.
+
+`xr:facelost : {id}`
+
+**xr:mouthopened**: Fires when a tracked face's mouth opens.
+
+`xr:mouthopened : {id}`
+
+**xr:mouthclosed**: Fires when a tracked face's mouth closes.
+
+`xr:mouthclosed : {id}`
+
+**xr:lefteyeopened**: Fires when a tracked face's left eye opens.
+
+`xr:lefteyeopened : {id}`
+
+**xr:lefteyeclosed**: Fires when a tracked face's left eye closes.
+
+`xr:lefteyeclosed : {id}`
+
+**xr:righteyeopened**: Fires when a tracked face's right eye opens
+
+`xr:righteyeopened : {id}`
+
+**xr:righteyeclosed**: Fires when a tracked face's right eye closes.
+
+`xr:righteyeclosed : {id}`
+
+**xr:lefteyebrowraised**: Fires when a tracked face's left eyebrow is raised from its initial position when the face was found.
+
+`xr:lefteyebrowraised : {id}`
+
+**xr:lefteyebrowlowered**: Fires when a tracked face's left eyebrow is lowered to its initial position when the face was found.
+
+`xr:lefteyebrowlowered : {id}`
+
+**xr:righteyebrowraised**: Fires when a tracked face's right eyebrow is raised from its position when the face was found.
+
+`xr:righteyebrowraised : {id}`
+
+**xr:righteyebrowlowered**: Fires when a tracked face's right eyebrow is lowered to its initial position when the face was found.
+
+`xr:righteyebrowlowered : {id}`
+
+**xr:lefteyewinked**: Fires when a tracked face's left eye closes and opens within 750ms while the right eye remains open.
+
+`xr:lefteyewinked : {id}`
+
+**xr:righteyewinked**: Fires when a tracked face's right eye closes and opens within 750ms while the left eye remains open.
+
+`xr:righteyewinked : {id}`
+
+**xr:blinked**: Fires when a tracked face's eyes blink.
+
+`xr:blinked : {id}`
+
+**xr:interpupillarydistance**: Fires when a tracked face's distance in millimeters between the centers of each pupil is first detected.
+
+`xr:interpupillarydistance : {id, interpupillaryDistance}`
+
+## Example {#example}
+
+```javascript
+ let mesh = null
+
+ // Fires when loading begins for additional face AR resources.
+ this.app.on('xr:faceloading', ({maxDetections, pointsPerDetection, indices, uvs}) => {
+ const node = new pc.GraphNode();
+ const material = this.material.resource;
+ mesh = pc.createMesh(
+ this.app.graphicsDevice,
+ new Array(pointsPerDetection * 3).fill(0.0), // setting filler vertex positions
+ {
+ uvs: uvs.map((uv) => [uv.u, uv.v]).flat(),
+ indices: indices.map((i) => [i.a, i.b, i.c]).flat()
+ }
+ );
+
+ const meshInstance = new pc.MeshInstance(node, mesh, material);
+ const model = new pc.Model();
+ model.graph = node;
+ model.meshInstances.push(meshInstance);
+ this.entity.model.model = model;
+ }, {})
+
+ // Fires when a face is subsequently found.
+ this.app.on('xr:faceupdated', ({id, transform, attachmentPoints, vertices, normals}) => {
+ const {position, rotation, scale, scaledDepth, scaledHeight, scaledWidth} = transform
+
+ this.entity.setPosition(position.x, position.y, position.z);
+ this.entity.setLocalScale(scale, scale, scale)
+ this.entity.setRotation(rotation.x, rotation.y, rotation.z, rotation.w)
+
+ // Set mesh vertices in local space
+ mesh.setPositions(vertices.map((vertexPos) => [vertexPos.x, vertexPos.y, vertexPos.z]).flat())
+ // Set vertex normals
+ mesh.setNormals(normals.map((normal) => [normal.x, normal.y, normal.z]).flat())
+ mesh.update()
+ }, {})
+```
diff --git a/docs/engine/api/playcanvasevents/playcanvas-image-target-events.md b/docs/engine/api/playcanvasevents/playcanvas-image-target-events.md
new file mode 100644
index 0000000..252c83f
--- /dev/null
+++ b/docs/engine/api/playcanvasevents/playcanvas-image-target-events.md
@@ -0,0 +1,48 @@
+---
+sidebar_position: 2
+---
+# PlayCanvas Image Target Events
+
+Image target events can be listened to as `this.app.on(event, handler, this)`.
+
+**xr:imageloading**: Fires when detection image loading begins.
+
+`xr:imageloading : { imageTargets: {name, type, metadata} }`
+
+**xr:imagescanning**: Fires when all detection images have been loaded and scanning has begun.
+
+`xr:imagescanning : { imageTargets: {name, type, metadata, geometry} }`
+
+**xr:imagefound**: Fires when an image target is first found.
+
+`xr:imagefound : { name, type, position, rotation, scale, scaledWidth, scaledHeight, height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians }`
+
+**xr:imageupdated**: Fires when an image target changes position, rotation or scale.
+
+`xr:imageupdated : { name, type, position, rotation, scale, scaledWidth, scaledHeight, height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians }`
+
+**xr:imagelost**: Fires when an image target is no longer being tracked.
+
+`xr:imagelost : { name, type, position, rotation, scale, scaledWidth, scaledHeight, height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians }`
+
+## Example {#example}
+
+```javascript
+const showImage = (detail) => {
+ if (name != detail.name) { return }
+ const {rotation, position, scale} = detail
+ entity.setRotation(rotation.x, rotation.y, rotation.z, rotation.w)
+ entity.setPosition(position.x, position.y, position.z)
+ entity.setLocalScale(scale, scale, scale)
+ entity.enabled = true
+}
+
+const hideImage = (detail) => {
+ if (name != detail.name) { return }
+ entity.enabled = false
+}
+
+this.app.on('xr:imagefound', showImage, {})
+this.app.on('xr:imageupdated', showImage, {})
+this.app.on('xr:imagelost', hideImage, {})
+```
diff --git a/docs/engine/api/playcanvasevents/playcanvasevents.md b/docs/engine/api/playcanvasevents/playcanvasevents.md
new file mode 100644
index 0000000..5e121ff
--- /dev/null
+++ b/docs/engine/api/playcanvasevents/playcanvasevents.md
@@ -0,0 +1,49 @@
+# PlayCanvas Events
+
+This section describes the events fired by 8th Wall in a PlayCanvas environment.
+
+You can listen for these events in your web application.
+
+## Events Emitted {#events-emitted}
+
+Event Emitted | Description
+------------- | -----------
+[xr:camerastatuschange](xrcamerastatuschange.md) | This event is emitted when the status of the camera changes. See [`onCameraStatusChange`](/docs/engine/api/camerapipelinemodule/oncamerastatuschange) from [`XR8.addCameraPipelineModule()`](/docs/engine/api/xr8/addcamerapipelinemodule) for more information on the possible status.
+[xr:realityerror](xrrealityerror.md) | This event is emitted when an error has occured when initializing 8th Wall Web. This is the recommended time at which any error messages should be displayed. The [`XR8.XrDevice()` API](/docs/engine/api/xrdevice) can help with determining what type of error messaging should be displayed.
+[xr:realityready](xrrealityready.md) | This event is emitted when 8th Wall Web has initialized and at least one frame has been successfully processed. This is the recommended time at which any loading elements should be hidden.
+[xr:screenshoterror](xrscreenshoterror.md) | This event is emitted in response to the [`screenshotrequest`](/docs/engine/api/aframeeventlisenters/screenshotrequest) resulting in an error.
+[xr:screenshotready](xrscreenshotready.md) | This event is emitted in response to the [`screenshotrequest`](/docs/engine/api/aframeeventlisenters/screenshotrequest) event being being completed successfully. The JPEG compressed image of the AFrame canvas will be provided.
+
+## XR8.XrController Events Emitted {#xrcontroller-events-emitted}
+
+When `XR8.XrController.pipelineModule()` is added by passing it in `extraModules` to `XR8.PlayCanvas.run()` these events are emitted:
+
+Event Emitted | Description
+------------- | -----------
+[xr:imageloading](playcanvas-image-target-events.md) | This event is emitted when detection image loading begins.
+[xr:imagescanning](playcanvas-image-target-events.md) | This event is emitted when all detection images have been loaded and scanning has begun.
+[xr:imagefound](playcanvas-image-target-events.md) | This event is emitted when an image target is first found.
+[xr:imageupdated](playcanvas-image-target-events.md) | This event is emitted when an image target changes position, rotation or scale.
+[xr:imagelost](playcanvas-image-target-events.md) | This event is emitted when an image target is no longer being tracked.
+
+## XR8.LayersController Events Emitted {#layerscontroller-events-emitted}
+
+When `XR8.LayersController.pipelineModule()` is added by passing it in `extraModules` to `XR8.PlayCanvas.run()` these events are emitted:
+
+Event Emitted | Description
+------------- | -----------
+[xr:layerloading](xrlayerloading.md) | Fires when loading begins for additional layer segmentation resources.
+[xr:layerscanning](xrlayerscanning.md) | Fires when all layer segmentation resources have been loaded and scanning has begun. One event is dispatched per layer being scanned.
+[xr:layerfound](xrlayerfound.md) | Fires when a layer is first found.
+
+## XR8.FaceController Events Emitted {#facecontroller-events-emitted}
+
+When `XR8.FaceController.pipelineModule()` is added by passing it in `extraModules` to `XR8.PlayCanvas.run()` these events are emitted:
+
+Event Emitted | Description
+------------- | -----------
+[xr:faceloading](playcanvas-face-effects-events.md) | Fires when loading begins for additional face AR resources.
+[xr:facescanning](playcanvas-face-effects-events.md) | Fires when all face AR resources have been loaded and scanning has begun.
+[xr:facefound](playcanvas-face-effects-events.md) | Fires when a face is first found.
+[xr:faceupdated](playcanvas-face-effects-events.md) | Fires when a face is subsequently found.
+[xr:facelost](playcanvas-face-effects-events.md) | Fires when a face is no longer being tracked.
diff --git a/docs/engine/api/playcanvasevents/xrcamerastatuschange.md b/docs/engine/api/playcanvasevents/xrcamerastatuschange.md
new file mode 100644
index 0000000..d125635
--- /dev/null
+++ b/docs/engine/api/playcanvasevents/xrcamerastatuschange.md
@@ -0,0 +1,33 @@
+---
+sidebar_position: 1
+---
+# xr:camerastatuschange
+
+## Description {#description}
+
+This event is fired when the status of the camera changes. See
+[`onCameraStatusChange`](/docs/engine/api/camerapipelinemodule/oncamerastatuschange) from
+[`XR8.addCameraPipelineModule()`](/docs/engine/api/camerapipelinemodule) for more information on the possible status.
+
+## Example {#example}
+
+```javascript
+const handleCameraStatusChange = function handleCameraStatusChange(detail) {
+ console.log('status change', detail.status);
+
+ switch (detail.status) {
+ case 'requesting':
+ // Do something
+ break;
+
+ case 'hasStream':
+ // Do something
+ break;
+
+ case 'failed':
+ this.app.fire('xr:realityerror');
+ break;
+ }
+}
+this.app.on('xr:camerastatuschange', handleCameraStatusChange, this)
+```
diff --git a/docs/engine/api/playcanvasevents/xrlayerfound.md b/docs/engine/api/playcanvasevents/xrlayerfound.md
new file mode 100644
index 0000000..ea4f29e
--- /dev/null
+++ b/docs/engine/api/playcanvasevents/xrlayerfound.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 1
+---
+# xr:layerfound
+
+## Description {#description}
+
+This event is emitted when a layer is first found.
+
+`xr:layerfound.detail : { name, percentage }`
+
+Property | Description
+--------- | -----------
+name: `string` | The name of the layer which has been found.
+percentage: `number` | The percentage of pixels that are sky.
+
+## Example {#example}
+
+```javascript
+this.app.on('xr:layerfound', (event) => {
+ console.log(`Layer ${event.name} found in ${event.percentage} of the screen.`)
+}, this)
+```
diff --git a/docs/engine/api/playcanvasevents/xrlayerloading.md b/docs/engine/api/playcanvasevents/xrlayerloading.md
new file mode 100644
index 0000000..af55bb8
--- /dev/null
+++ b/docs/engine/api/playcanvasevents/xrlayerloading.md
@@ -0,0 +1,18 @@
+---
+sidebar_position: 1
+---
+# xr:layerloading
+
+## Description {#description}
+
+This event is emitted when loading begins for additional layer segmentation.
+
+`xr:layerloading.detail : { }`
+
+## Example {#example}
+
+```javascript
+this.app.on('xr:layerloading', () => {
+ console.log(`Layer loading.`)
+}, this)
+```
diff --git a/docs/engine/api/playcanvasevents/xrlayerscanning.md b/docs/engine/api/playcanvasevents/xrlayerscanning.md
new file mode 100644
index 0000000..dda5ab5
--- /dev/null
+++ b/docs/engine/api/playcanvasevents/xrlayerscanning.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 1
+---
+# xr:layerscanning
+
+## Description {#description}
+
+This event is emitted when all layer segmentation resources have been loaded and scanning has begun. One event is dispatched per layer being scanned.
+
+`xr:layerscanning.detail : { name }`
+
+Property | Description
+--------- | -----------
+name: `string` | The name of the layer which we are scanning.
+
+## Example {#example}
+
+```javascript
+this.app.on('xr:layerscanning', (event) => {
+ console.log(`Layer ${event.name} has started scanning.`)
+}, this)
+```
diff --git a/docs/engine/api/playcanvasevents/xrrealityerror.md b/docs/engine/api/playcanvasevents/xrrealityerror.md
new file mode 100644
index 0000000..9d43c2e
--- /dev/null
+++ b/docs/engine/api/playcanvasevents/xrrealityerror.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 1
+---
+# xr:realityerror
+
+## Description {#description}
+
+This event is emitted when an error has occured when initializing 8th Wall Web. This is the
+recommended time at which any error messages should be displayed. The [`XR8.XrDevice()` API](/docs/engine/api/xrdevice)
+can help with determining what type of error messaging should be displayed.
+
+## Example {#example}
+
+```javascript
+this.app.on('xr:realityerror', ({error, isDeviceBrowserSupported, compatibility}) => {
+ if (detail.isDeviceBrowserSupported) {
+ // Browser is compatible. Print the exception for more information.
+ console.log(error)
+ return
+ }
+
+ // Browser is not compatible. Check the reasons why it may not be in `compatibility`
+ console.log(compatibility)
+}, this)
+```
diff --git a/docs/engine/api/playcanvasevents/xrrealityready.md b/docs/engine/api/playcanvasevents/xrrealityready.md
new file mode 100644
index 0000000..0b93a28
--- /dev/null
+++ b/docs/engine/api/playcanvasevents/xrrealityready.md
@@ -0,0 +1,16 @@
+---
+sidebar_position: 1
+---
+# xr:realityready
+
+## Description {#description}
+
+This event is fired when 8th Wall Web has initialized and at least one frame has been successfully processed.
+
+## Example {#example}
+
+```javascript
+this.app.on('xr:realityready', () => {
+ // Hide loading UI
+}, this)
+```
diff --git a/docs/engine/api/playcanvasevents/xrscreenshoterror.md b/docs/engine/api/playcanvasevents/xrscreenshoterror.md
new file mode 100644
index 0000000..55994df
--- /dev/null
+++ b/docs/engine/api/playcanvasevents/xrscreenshoterror.md
@@ -0,0 +1,17 @@
+---
+sidebar_position: 1
+---
+# xr:screenshoterror
+
+## Description {#description}
+
+This event is emitted in response to the [`xr:screenshotrequest`](/docs/engine/api/playcanvaseventlisteners/xrscreenshotrequest) resulting in an error.
+
+## Example {#example}
+
+```javascript
+this.app.on('xr:screenshoterror', (detail) => {
+ console.log(detail)
+ // Handle screenshot error.
+}, this)
+```
diff --git a/docs/engine/api/playcanvasevents/xrscreenshotready.md b/docs/engine/api/playcanvasevents/xrscreenshotready.md
new file mode 100644
index 0000000..aabec6c
--- /dev/null
+++ b/docs/engine/api/playcanvasevents/xrscreenshotready.md
@@ -0,0 +1,18 @@
+---
+sidebar_position: 1
+---
+# xr:screenshotready
+
+## Description {#description}
+
+This event is emitted in response to the [`xr:screenshotrequest`](/docs/engine/api/playcanvaseventlisteners/xrscreenshotrequest) event being being completed successfully. The JPEG compressed image of the PlayCanvas canvas will be provided.
+
+## Example {#example}
+
+```javascript
+this.app.on('xr:screenshotready', (event) => {
+ // screenshotPreview is an HTML element
+ const image = document.getElementById('screenshotPreview')
+ image.src = 'data:image/jpeg;base64,' + event.detail
+}, this)
+```
diff --git a/docs/engine/api/threejs/_category_.json b/docs/engine/api/threejs/_category_.json
new file mode 100644
index 0000000..21d6f0e
--- /dev/null
+++ b/docs/engine/api/threejs/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "Threejs",
+ "position": 20
+}
diff --git a/docs/engine/api/threejs/configure.md b/docs/engine/api/threejs/configure.md
new file mode 100644
index 0000000..5398e96
--- /dev/null
+++ b/docs/engine/api/threejs/configure.md
@@ -0,0 +1,38 @@
+---
+sidebar_label: configure()
+---
+# XR8.Threejs.configure()
+
+`XR8.Threejs.configure({renderCameraTexture, layerNames})`
+
+## Description {#description}
+
+Configures the three.js renderer.
+
+## Parameters {#parameters}
+
+Property | Type | Default | Description
+--------- | --------- | --------- | ----------- |
+renderCameraTexture [Optional] | `Boolean` | `true` | If `true`, render the camera feed cropped to the canvas's size to a texture. This will be returned as `cameraTexture` by [`XR8.Threejs.xrScene()`](xrscene.md). If `false` or `null`, do not render the camera feed to a texture.
+layerScenes [Optional] | `[String]` | `[]` | An array of layer names. The layers to create new three.js scenes for. Scenes are returned as `layerScenes` by [`XR8.Threejs.xrScene()`](xrscene.md). The only valid value is `'sky'`.
+
+## Returns {#returns}
+
+None
+
+## Example - Render camera feed to a texture {#example---render-camera-feed-to-a-texture}
+
+```javascript
+XR8.Threejs.configure({renderCameraTexture: true})
+...
+const {cameraTexture} = XR8.Threejs.xrScene()
+```
+
+## Example - Sky Scene {#example---sky-scene}
+
+```javascript
+XR8.Threejs.configure({layerScenes: ['sky']})
+...
+const {layerScenes} = XR8.Threejs.xrScene()
+createSkyScene(layerScenes.sky.scene, layerScenes.sky.camera)
+```
diff --git a/docs/engine/api/threejs/pipelinemodule.md b/docs/engine/api/threejs/pipelinemodule.md
new file mode 100644
index 0000000..8ff404d
--- /dev/null
+++ b/docs/engine/api/threejs/pipelinemodule.md
@@ -0,0 +1,75 @@
+---
+sidebar_label: pipelineModule()
+---
+# XR8.Threejs.pipelineModule()
+
+`XR8.Threejs.pipelineModule()`
+
+## Description {#description}
+
+A pipeline module that interfaces with the three.js environment and lifecyle. The three.js scene can be queried using [`XR8.Threejs.xrScene()`](xrscene.md) after [`XR8.Threejs.pipelineModule()`](pipelinemodule.md)'s [`onStart`](/docs/engine/api/camerapipelinemodule/onstart) method is called. Setup can be done in another pipeline module's [`onStart`](/docs/engine/api/camerapipelinemodule/onstart) method by referring to [`XR8.Threejs.xrScene()`](xrscene.md) as long as [`XR8.addCameraPipelineModule()`](/docs/engine/api/xr8/addcamerapipelinemodule) is called on the second module *after* calling `XR8.addCameraPipelineModule(XR8.Threejs.pipelineModule())`.
+
+* [`onStart`](/docs/engine/api/camerapipelinemodule/onstart), a three.js renderer and scene are created and configured to draw over a camera feed.
+* [`onUpdate`](/docs/engine/api/camerapipelinemodule/onupdate), the three.js camera is driven with the phone's motion.
+* [`onRender`](/docs/engine/api/camerapipelinemodule/onrender), the renderer's `render()` method is invoked.
+
+Note that this module does not actually draw the camera feed to the canvas, GlTextureRenderer does
+that. To add a camera feed in the background, install the
+[`XR8.GlTextureRenderer.pipelineModule()`](/docs/engine/api/gltexturerenderer/pipelinemodule) before installing this
+module (so that it is rendered before the scene is drawn).
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+A three.js pipeline module that can be added via [`XR8.addCameraPipelineModule()`](/docs/engine/api/xr8/addcamerapipelinemodule).
+
+## Example {#example}
+
+```javascript
+// Add XrController.pipelineModule(), which enables 6DoF camera motion estimation.
+XR8.addCameraPipelineModule(XR8.XrController.pipelineModule())
+
+// Add a GlTextureRenderer which draws the camera feed to the canvas.
+XR8.addCameraPipelineModule(XR8.GlTextureRenderer.pipelineModule())
+
+// Add Threejs.pipelineModule() which creates a three.js scene, camera, and renderer, and
+// drives the scene camera based on 6DoF camera motion.
+XR8.addCameraPipelineModule(XR8.Threejs.pipelineModule())
+
+// Add custom logic to the camera loop. This is done with camera pipeline modules that provide
+// logic for key lifecycle moments for processing each camera frame. In this case, we'll be
+// adding onStart logic for scene initialization, and onUpdate logic for scene updates.
+XR8.addCameraPipelineModule({
+ // Camera pipeline modules need a name. It can be whatever you want but must be unique
+ // within your app.
+ name: 'myawesomeapp',
+
+ // onStart is called once when the camera feed begins. In this case, we need to wait for the
+ // XR8.Threejs scene to be ready before we can access it to add content.
+ onStart: ({canvasWidth, canvasHeight}) => {
+ // Get the three.js scene. This was created by XR8.Threejs.pipelineModule().onStart(). The
+ // reason we can access it here now is because 'myawesomeapp' was installed after
+ // XR8.Threejs.pipelineModule().
+ const {scene, camera} = XR8.Threejs.xrScene()
+
+ // Add some objects to the scene and set the starting camera position.
+ initScene({scene, camera})
+
+ // Sync the xr controller's 6DoF position and camera paremeters with our scene.
+ XR8.XrController.updateCameraProjectionMatrix({
+ origin: camera.position,
+ facing: camera.quaternion,
+ })
+ },
+
+ // onUpdate is called once per camera loop prior to render. Any three.js geometry scene would
+ // typically happen here.
+ onUpdate: () => {
+ // Update the position of objects in the scene, etc.
+ updateScene(XR8.Threejs.xrScene())
+ },
+})
+```
diff --git a/docs/engine/api/threejs/threejs.md b/docs/engine/api/threejs/threejs.md
new file mode 100644
index 0000000..3f2c91e
--- /dev/null
+++ b/docs/engine/api/threejs/threejs.md
@@ -0,0 +1,13 @@
+# XR8.Threejs
+
+## Description {#description}
+
+Provides a camera pipeline module that drives three.js camera to do virtual overlays.
+
+## Functions {#functions}
+
+Function | Description
+-------- | -----------
+[pipelineModule](pipelinemodule.md) | A pipeline module that interfaces with the three.js environment and lifecyle.
+[configure](configure.md) | Configures the three.js renderer.
+[xrScene](xrscene.md) | Get a handle to the xr scene, camera, renderer, (optional) camera feed texture, and (optional) layerScenes.
diff --git a/docs/engine/api/threejs/xrscene.md b/docs/engine/api/threejs/xrscene.md
new file mode 100644
index 0000000..9b8d301
--- /dev/null
+++ b/docs/engine/api/threejs/xrscene.md
@@ -0,0 +1,49 @@
+---
+sidebar_label: xrScene()
+---
+# XR8.Threejs.xrScene()
+
+`XR8.Threejs.xrScene()`
+
+## Description {#description}
+
+Get a handle to the xr scene, camera, renderer, (optional) camera feed texture, and (optional) layerScenes.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+An object: `{ scene, camera, renderer, cameraTexture, layerScenes }`
+
+Property | Type | Description
+--------- | --------- | ----------- |
+scene | [`Scene`](https://threejs.org/docs/#api/en/scenes/Scene) | The three.js scene.
+camera | [`Camera`](https://threejs.org/docs/#api/en/cameras/Camera) | The three.js main camera.
+renderer | [`Renderer`](https://threejs.org/docs/#api/en/renderers/WebGLRenderer) | The three.js renderer.
+cameraTexture [Optional] | [`Texture`](https://threejs.org/docs/#api/en/textures/Texture) | A three.js texture with the camera feed cropped to the canvas size. Enabled by calling [`XR8.Threejs.configure({renderCameraTexture: true})`](configure.md).
+layerScenes [Optional] | `Record` | A map of layer names to three.js layer scenes. Will contain records which are enabled by calling [`XR8.Threejs.configure({layerScenes: ['sky']})`](configure.md).
+
+The `LayerScene` in the `layerScenes` object has the following properties:
+
+Property | Type | Description
+--------- | --------- | ----------- |
+scene | [`Scene`](https://threejs.org/docs/#api/en/scenes/Scene) | The three.js scene for this layer. Content added to this sky will only be visible when in an area of the camera feed which this layer has been detected in. For example in Sky Effects a cube will only show up in the sky. Use `XR8.LayersController.configure({layers: {sky: {invertLayerMask: true}}})` to invert this and make the cube only show up when not in the sky.
+camera | [`Camera`](https://threejs.org/docs/#api/en/cameras/Camera) | The three.js camera for this layer. Will have its position and rotation synced with the main camera.
+
+## Example {#example}
+
+```javascript
+const {scene, camera, renderer, cameraTexture} = XR8.Threejs.xrScene()
+```
+
+## Example - Sky Scene {#example---sky-scene}
+
+```javascript
+XR8.LayersController.configure({layers: {sky: {}}})
+XR8.Threejs.configure({layerScenes: ['sky']})
+...
+const {layerScenes} = XR8.Threejs.xrScene()
+createSkyScene(layerScenes.sky.scene, layerScenes.sky.camera)
+```
diff --git a/docs/engine/api/xr8/_category_.json b/docs/engine/api/xr8/_category_.json
new file mode 100644
index 0000000..ecc8833
--- /dev/null
+++ b/docs/engine/api/xr8/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "XR8",
+ "position": 1
+}
diff --git a/docs/engine/api/xr8/addcamerapipelinemodule.md b/docs/engine/api/xr8/addcamerapipelinemodule.md
new file mode 100644
index 0000000..a5f811b
--- /dev/null
+++ b/docs/engine/api/xr8/addcamerapipelinemodule.md
@@ -0,0 +1,99 @@
+---
+sidebar_label: addCameraPipelineModule()
+---
+# XR8.addCameraPipelineModule()
+
+`XR8.addCameraPipelineModule(module)`
+
+## Description {#description}
+
+8th Wall camera applications are built using a camera pipeline module framework. For a full description on camera pipeline modules, see [CameraPipelineModule](/docs/engine/api/camerapipelinemodule).
+
+Applications install modules which then control the behavior of the application at runtime. A module object must have a **.name** string which is unique within the application, and then should provide one or more of the camera lifecycle methods which will be executed at the appropriate point in the run loop.
+
+During the main runtime of an application, each camera frame goes through the following cycle:
+
+`onBeforeRun` -> `onCameraStatusChange` (`requesting` -> `hasStream` -> `hasVideo` | `failed`) -> `onStart` -> `onAttach` -> `onProcessGpu` -> `onProcessCpu` -> `onUpdate` -> `onRender`
+
+Camera modules should implement one or more of the following camera lifecycle methods:
+
+Function | Description
+-------- | -----------
+[onAppResourcesLoaded](/docs/engine/api/camerapipelinemodule/onappresourcesloaded) | Called when we have received the resources attached to an app from the server.
+[onAttach](/docs/engine/api/camerapipelinemodule/onattach) | Called before the first time a module receives frame updates. It is called on modules that were added either before or after the pipeline is running.
+[onBeforeRun](/docs/engine/api/camerapipelinemodule/onbeforerun) | Called immediately after [`XR8.run()`](run.md). If any promises are returned, XR will wait on all promises before continuing.
+[onCameraStatusChange](/docs/engine/api/camerapipelinemodule/oncamerastatuschange) | Called when a change occurs during the camera permissions request.
+[onCanvasSizeChange](/docs/engine/api/camerapipelinemodule/oncanvassizechange) | Called when the canvas changes size.
+[onDetach](/docs/engine/api/camerapipelinemodule/ondetach) | is called after the last time a module receives frame updates. This is either after the engine is stopped or the module is manually removed from the pipeline, whichever comes first.
+[onDeviceOrientationChange](/docs/engine/api/camerapipelinemodule/ondeviceorientationchange) | Called when the device changes landscape/portrait orientation.
+[onException](/docs/engine/api/camerapipelinemodule/onexception) | Called when an error occurs in XR. Called with the error object.
+[onPaused](/docs/engine/api/camerapipelinemodule/onpaused) | Called when [`XR8.pause()`](pause.md) is called.
+[onProcessCpu](/docs/engine/api/camerapipelinemodule/onprocesscpu) | Called to read results of GPU processing and return usable data.
+[onProcessGpu](/docs/engine/api/camerapipelinemodule/onprocessgpu) | Called to start GPU processing.
+[onRemove](/docs/engine/api/camerapipelinemodule/onremove) | is called when a module is removed from the pipeline.
+[onRender](/docs/engine/api/camerapipelinemodule/onrender) | Called after onUpdate. This is the time for the rendering engine to issue any WebGL drawing commands. If an application is providing its own run loop and is relying on [`XR8.runPreRender()`](runprerender.md) and [`XR8.runPostRender()`](runpostrender.md), this method is not called and all rendering must be coordinated by the external run loop.
+[onResume](/docs/engine/api/camerapipelinemodule/onresume) | Called when [`XR8.resume()`](resume.md) is called.
+[onStart](/docs/engine/api/camerapipelinemodule/onstart) | Called when XR starts. First callback after [`XR8.run()`](run.md) is called.
+[onUpdate](/docs/engine/api/camerapipelinemodule/onupdate) | Called to update the scene before render. Data returned by modules in [`onProcessGpu`](/docs/engine/api/camerapipelinemodule/onprocessgpu) and [`onProcessCpu`](/docs/engine/api/camerapipelinemodule/onprocesscpu) will be present as processGpu.modulename and processCpu.modulename where the name is given by module.name = "modulename".
+[onVideoSizeChange](/docs/engine/api/camerapipelinemodule/onvideosizechange) | Called when the canvas changes size.
+[requiredPermissions](/docs/engine/api/camerapipelinemodule/requiredpermissions) | Modules can indicate what browser capabilities they require that may need permissions requests. These can be used by the framework to request appropriate permissions if absent, or to create components that request the appropriate permissions before running XR.
+
+Note: Camera modules that implement [`onProcessGpu`](/docs/engine/api/camerapipelinemodule/onprocessgpu) or [`onProcessCpu`](/docs/engine/api/camerapipelinemodule/onprocesscpu) can provide data to subsequent stages of the pipeline. This is done by the module's name.
+
+## Parameters {#parameters}
+
+Parameter | Type | Description
+--------- | ---- | -----------
+module | `Object` | The module object.
+
+## Returns {#returns}
+
+None
+
+## Example 1 - A camera pipeline module for managing camera permissions: {#example-1---a-camera-pipeline-module-for-managing-camera-permissions}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'camerastartupmodule',
+ onCameraStatusChange: ({status}) {
+ if (status == 'requesting') {
+ myApplication.showCameraPermissionsPrompt()
+ } else if (status == 'hasStream') {
+ myApplication.dismissCameraPermissionsPrompt()
+ } else if (status == 'hasVideo') {
+ myApplication.startMainApplictation()
+ } else if (status == 'failed') {
+ myApplication.promptUserToChangeBrowserSettings()
+ }
+ },
+})
+```
+
+## Example 2 - a QR code scanning application could be built like this {#example-2---a-qr-code-scanning-application-could-be-built-like-this}
+
+```javascript
+// Install a module which gets the camera feed as a UInt8Array.
+XR8.addCameraPipelineModule(
+ XR8.CameraPixelArray.pipelineModule({luminance: true, width: 240, height: 320}))
+
+// Install a module that draws the camera feed to the canvas.
+XR8.addCameraPipelineModule(XR8.GlTextureRenderer.pipelineModule())
+
+// Create our custom application logic for scanning and displaying QR codes.
+XR8.addCameraPipelineModule({
+ name: 'qrscan',
+ onProcessCpu: ({processGpuResult}) => {
+ // CameraPixelArray.pipelineModule() returned these in onProcessGpu.
+ const { pixels, rows, cols, rowBytes } = processGpuResult.camerapixelarray
+ const { wasFound, url, corners } = findQrCode(pixels, rows, cols, rowBytes)
+ return { wasFound, url, corners }
+ },
+ onUpdate: ({processCpuResult}) => {
+ // These were returned by this module ('qrscan') in onProcessCpu
+ const {wasFound, url, corners } = processCpuResult.qrscan
+ if (wasFound) {
+ showUrlAndCorners(url, corners)
+ }
+ },
+})
+```
diff --git a/docs/engine/api/xr8/addcamerapipelinemodules.md b/docs/engine/api/xr8/addcamerapipelinemodules.md
new file mode 100644
index 0000000..1394bf2
--- /dev/null
+++ b/docs/engine/api/xr8/addcamerapipelinemodules.md
@@ -0,0 +1,37 @@
+---
+sidebar_label: addCameraPipelineModules()
+---
+# XR8.addCameraPipelineModules()
+
+`XR8.addCameraPipelineModules([ modules ])`
+
+## Description {#description}
+
+Add multiple camera pipeline modules. This is a convenience method that calls [`XR8.addCameraPipelineModule()`](addcamerapipelinemodule.md) in order on each element of the input array.
+
+## Parameters {#parameters}
+
+Parameter | Type | Description
+--------- | ---- | -----------
+modules | `[Object]` | An array of camera pipeline modules.
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+const onxrloaded = () => {
+ XR8.addCameraPipelineModules([ // Add camera pipeline modules.
+ // Existing pipeline modules.
+ XR8.GlTextureRenderer.pipelineModule(), // Draws the camera feed.
+ ])
+
+ // Request camera permissions and run the camera.
+ XR8.run({canvas: document.getElementById('camerafeed')})
+}
+
+// Wait until the XR javascript has loaded before making XR calls.
+window.XR8 ? onxrloaded() : window.addEventListener('xrloaded', onxrloaded)
+```
diff --git a/docs/engine/api/xr8/clearcamerapipelinemodules.md b/docs/engine/api/xr8/clearcamerapipelinemodules.md
new file mode 100644
index 0000000..bc16ac0
--- /dev/null
+++ b/docs/engine/api/xr8/clearcamerapipelinemodules.md
@@ -0,0 +1,24 @@
+---
+sidebar_label: clearCameraPipelineModules()
+---
+# XR8.clearCameraPipelineModules()
+
+`XR8.clearCameraPipelineModules()`
+
+## Description {#description}
+
+Remove all camera pipeline modules from the camera loop.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.clearCameraPipelineModules()
+```
diff --git a/docs/engine/api/xr8/initialize.md b/docs/engine/api/xr8/initialize.md
new file mode 100644
index 0000000..40ff7a6
--- /dev/null
+++ b/docs/engine/api/xr8/initialize.md
@@ -0,0 +1,24 @@
+---
+sidebar_label: initialize()
+---
+# XR8.initialize()
+
+`XR8.initialize()`
+
+## Description {#description}
+
+Returns a promise that is fulfilled when the AR Engine's WebAssembly is initialized.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.initialize().then(() => console.log(XR8.version())
+```
diff --git a/docs/engine/api/xr8/isinitialized.md b/docs/engine/api/xr8/isinitialized.md
new file mode 100644
index 0000000..0835a78
--- /dev/null
+++ b/docs/engine/api/xr8/isinitialized.md
@@ -0,0 +1,26 @@
+---
+sidebar_label: isInitialized()
+---
+# XR8.isInitialized()
+
+`XR8.isInitialized()`
+
+## Description {#description}
+
+Indicates whether or not the AR Engine's WebAssembly is initialized.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+A Boolean indicating whether or not the AR Engine's WebAssembly is initialized.
+
+## Example {#example}
+
+```javascript
+if (XR8.isInitialized()) {
+ console.log(XR8.version())
+}
+```
diff --git a/docs/engine/api/xr8/ispaused.md b/docs/engine/api/xr8/ispaused.md
new file mode 100644
index 0000000..bd71dc5
--- /dev/null
+++ b/docs/engine/api/xr8/ispaused.md
@@ -0,0 +1,34 @@
+---
+sidebar_label: isPaused()
+---
+# XR8.isPaused()
+
+`XR8.isPaused()`
+
+## Description {#description}
+
+Indicates whether or not the XR session is paused.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+True if the XR session is paused, false otherwise.
+
+## Example {#example}
+
+```javascript
+// Call XR8.pause() / XR8.resume() when the button is pressed.
+document.getElementById('pause').addEventListener(
+ 'click',
+ () => {
+ if (!XR8.isPaused()) {
+ XR8.pause()
+ } else {
+ XR8.resume()
+ }
+ },
+ true)
+```
diff --git a/docs/engine/api/xr8/pause.md b/docs/engine/api/xr8/pause.md
new file mode 100644
index 0000000..aa3863f
--- /dev/null
+++ b/docs/engine/api/xr8/pause.md
@@ -0,0 +1,34 @@
+---
+sidebar_label: pause()
+---
+# XR8.pause()
+
+`XR8.pause()`
+
+## Description {#description}
+
+Pause the current XR session. While paused, device motion is not tracked.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+// Call XR8.pause() / XR8.resume() when the button is pressed.
+document.getElementById('pause').addEventListener(
+ 'click',
+ () => {
+ if (!XR8.isPaused()) {
+ XR8.pause()
+ } else {
+ XR8.resume()
+ }
+ },
+ true)
+```
diff --git a/docs/engine/api/xr8/removecamerapipelinemodule.md b/docs/engine/api/xr8/removecamerapipelinemodule.md
new file mode 100644
index 0000000..6c0fc39
--- /dev/null
+++ b/docs/engine/api/xr8/removecamerapipelinemodule.md
@@ -0,0 +1,26 @@
+---
+sidebar_label: removeCameraPipelineModule()
+---
+# XR8.removeCameraPipelineModule()
+
+`XR8.removeCameraPipelineModule(moduleName)`
+
+## Description {#description}
+
+Removes a module from the camera pipeline.
+
+## Parameters {#parameters}
+
+Parameter | Type | Description
+--------- | ---- | -----------
+moduleName | `String` | The name of a module.
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.removeCameraPipelineModule('reality')
+```
diff --git a/docs/engine/api/xr8/removecamerapipelinemodules.md b/docs/engine/api/xr8/removecamerapipelinemodules.md
new file mode 100644
index 0000000..6b64bdd
--- /dev/null
+++ b/docs/engine/api/xr8/removecamerapipelinemodules.md
@@ -0,0 +1,28 @@
+---
+sidebar_label: removeCameraPipelineModules()
+---
+# XR8.removeCameraPipelineModules()
+
+`XR8.removeCameraPipelineModules([ moduleNames ])`
+
+## Description {#description}
+
+Remove multiple camera pipeline modules. This is a convenience method that calls
+[`XR8.removeCameraPipelineModule()`](removecamerapipelinemodule.md) in order on each element of the input
+array.
+
+## Parameters {#parameters}
+
+Parameter | Type | Description
+--------- | ---- | -----------
+moduleNames | `[String] or [Object]` | An array of objects with a name property, or a name strings of modules.
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.removeCameraPipelineModules(['threejsrenderer', 'reality'])
+```
diff --git a/docs/engine/api/xr8/requiredpermissions.md b/docs/engine/api/xr8/requiredpermissions.md
new file mode 100644
index 0000000..7eb0484
--- /dev/null
+++ b/docs/engine/api/xr8/requiredpermissions.md
@@ -0,0 +1,30 @@
+---
+sidebar_label: requiredPermissions()
+---
+# XR8.requiredPermissions()
+
+`XR8.requiredPermissions()`
+
+## Description {#description}
+
+Return a list of permissions required by the application.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+A list of [`XR8.XrPermissions.permissions()`](../xrpermissions/permissions.md).
+
+## Example {#example}
+
+```javascript
+if (XR8.XrPermissions) {
+ const permissions = XR8.XrPermissions.permissions()
+ const requiredPermissions = XR8.requiredPermissions()
+ if (!requiredPermissions.has(permissions.DEVICE_ORIENTATION)) {
+ return
+ }
+}
+```
diff --git a/docs/engine/api/xr8/resume.md b/docs/engine/api/xr8/resume.md
new file mode 100644
index 0000000..a3790ab
--- /dev/null
+++ b/docs/engine/api/xr8/resume.md
@@ -0,0 +1,34 @@
+---
+sidebar_label: resume()
+---
+# XR8.resume()
+
+`XR8.resume()`
+
+## Description {#description}
+
+Resume the current XR session after it has been paused.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+// Call XR8.pause() / XR8.resume() when the button is pressed.
+document.getElementById('pause').addEventListener(
+ 'click',
+ () => {
+ if (!XR8.isPaused()) {
+ XR8.pause()
+ } else {
+ XR8.resume()
+ }
+ },
+ true)
+```
diff --git a/docs/engine/api/xr8/run.md b/docs/engine/api/xr8/run.md
new file mode 100644
index 0000000..4fe13e6
--- /dev/null
+++ b/docs/engine/api/xr8/run.md
@@ -0,0 +1,77 @@
+---
+sidebar_label: run()
+---
+# XR8.run()
+
+`XR8.run(canvas, webgl2, ownRunLoop, cameraConfig, glContextConfig, allowedDevices, sessionConfiguration)`
+
+## Description {#description}
+
+Open the camera and start running the camera run loop.
+
+## Parameters {#parameters}
+
+Property | Type | Default | Description
+-------- | ---- | ------- | -----------
+canvas | `HTMLCanvasElement` | | The HTML Canvas that the camera feed will be drawn to.
+webgl2 [Optional] | `Boolean` | `true` | If true, use WebGL2 if available, otherwise fallback to WebGL1. If false, always use WebGL1.
+ownRunLoop [Optional] | `Boolean` | `true` | If true, XR should use it's own run loop. If false, you will provide your own run loop and be responsible for calling [runPreRender](runprerender.md) and [runPostRender](runpostrender.md) yourself [Advanced Users only]
+cameraConfig: {direction} [Optional] | `Object` | `{direction: XR8.XrConfig.camera().BACK}` | Desired camera to use. Supported values for `direction` are `XR8.XrConfig.camera().BACK` or `XR8.XrConfig.camera().FRONT`
+glContextConfig [Optional] | `WebGLContextAttributes` | `null` | The attributes to configure the WebGL canvas context.
+allowedDevices [Optional] | [`XR8.XrConfig.device()`](/docs/engine/api/xrconfig/device) | `XR8.XrConfig.device().MOBILE_AND_HEADSETS` | Specify the class of devices that the pipeline should run on. If the current device is not in that class, running will fail prior prior to opening the camera. If allowedDevices is `XR8.XrConfig.device().ANY`, always open the camera. Note that world tracking can only be used with `XR8.XrConfig.device().MOBILE_AND_HEADSETS` or `XR8.XrConfig.device().MOBILE`.
+sessionConfiguration: `{disableXrTablet, xrTabletStartsMinimized, defaultEnvironment}` [Optional] | `Object` | `{}` | Configure options related to varying types of sessions.
+
+`sessionConfiguration` is an object with the following [Optional] properties:
+
+Property | Type | Default | Description
+--------- | ---- | ------- | -----------
+disableXrTablet [Optional] | `Boolean` | `false` | Disable the tablet visible in immersive sessions.
+xrTabletStartsMinimized [Optional] | `Boolean` | `false` | The tablet will start minimized.
+defaultEnvironment `{disabled, floorScale, floorTexture, floorColor, fogIntensity, skyTopColor, skyBottomColor, skyGradientStrength}` [Optional] | `Object` | {} | Configure options related to the default environment of your immersive session.
+
+`defaultEnvironment` is an object with the following [Optional] properties:
+
+Property | Type | Default | Description
+--------- | ---- | ------- | -----------
+disabled [Optional] | `Boolean` | `false` | Disable the default "void space" background.
+floorScale [Optional] | `Number` | `1` | Shrink or grow the floor texture.
+floorTexture [Optional] | Asset | | Specify an alternative texture asset or URL for the tiled floor.
+floorColor [Optional] | Hex Color | `#1A1C2A` | Set the floor color.
+fogIntensity [Optional] | `Number` | `1` | Increase or decrease fog density.
+skyTopColor [Optional] | Hex Color | `#BDC0D6` | Set the color of the sky directly above the user.
+skyBottomColor [Optional] | Hex Color | `#1A1C2A` | Set the color of the sky at the horizon.
+skyGradientStrength [Optional] | `Number` | `1` | Control how sharply the sky gradient transitions.
+
+Notes:
+
+* `cameraConfig`: World tracking (SLAM) is only supported on the `back` camera. If you are using the `front` camera, you must disable world tracking by calling `XR8.XrController.configure({disableWorldTracking: true})` first.
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+// Open the camera and start running the camera run loop
+// In index.html:
+XR8.run({canvas: document.getElementById('camerafeed')})
+```
+
+## Example - Using Front camera (image tracking only) {#example---using-front-camera-image-tracking-only}
+
+```javascript
+// Disable world tracking (SLAM). This is required to use the front camera.
+XR8.XrController.configure({disableWorldTracking: true})
+// Open the camera and start running the camera run loop
+// In index.html:
+XR8.run({canvas: document.getElementById('camerafeed'), cameraConfig: {direction: XR8.XrConfig.camera().FRONT}})
+```
+
+## Example - Set glContextConfig {#example---set-glcontextconfig}
+
+```javascript
+// Open the camera and start running the camera run loop with an opaque canvas.
+// In index.html:
+XR8.run({canvas: document.getElementById('camerafeed'), glContextConfig: {alpha: false, preserveDrawingBuffer: false}})
+```
diff --git a/docs/engine/api/xr8/runpostrender.md b/docs/engine/api/xr8/runpostrender.md
new file mode 100644
index 0000000..a8f57ea
--- /dev/null
+++ b/docs/engine/api/xr8/runpostrender.md
@@ -0,0 +1,32 @@
+---
+sidebar_label: runPostRender()
+---
+# XR8.runPostRender()
+
+`XR8.runPostRender()`
+
+## Description {#description}
+
+Executes all lifecycle updates that should happen after rendering.
+
+**IMPORTANT**: Make sure that [`onStart`](/docs/engine/api/camerapipelinemodule/onstart) has been called before calling `XR8.runPreRender()` / `XR8.runPostRender()`.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+// Implement A-Frame components tock() method
+function tock() {
+ // Check whether XR is initialized
+ ...
+ // Run XR lifecycle methods
+ XR8.runPostRender()
+}
+```
diff --git a/docs/engine/api/xr8/runprerender.md b/docs/engine/api/xr8/runprerender.md
new file mode 100644
index 0000000..df8d9f1
--- /dev/null
+++ b/docs/engine/api/xr8/runprerender.md
@@ -0,0 +1,34 @@
+---
+sidebar_label: runPreRender()
+---
+# XR8.runPreRender()
+
+`XR8.runPreRender( timestamp )`
+
+## Description {#description}
+
+Executes all lifecycle updates that should happen before rendering.
+
+**IMPORTANT**: Make sure that [`onStart`](/docs/engine/api/camerapipelinemodule/onstart) has been called before calling `XR8.runPreRender()` / `XR8.runPostRender()`.
+
+## Parameters {#parameters}
+
+Parameter | Type | Description
+--------- | ---- | -----------
+timestamp | `Number` | The current time, in milliseconds.
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+// Implement A-Frame components tick() method
+function tick() {
+ // Check device compatibility and run any necessary view geometry updates and draw the camera feed.
+ ...
+ // Run XR lifecycle methods
+ XR8.runPreRender(Date.now())
+ }
+```
diff --git a/docs/engine/api/xr8/stop.md b/docs/engine/api/xr8/stop.md
new file mode 100644
index 0000000..cd53f7f
--- /dev/null
+++ b/docs/engine/api/xr8/stop.md
@@ -0,0 +1,25 @@
+---
+sidebar_label: stop()
+---
+# XR8.stop()
+
+`XR8.stop()`
+
+## Description {#description}
+
+While stopped, the camera feed is closed and device motion is not tracked. Must call
+[`XR8.run()`](run.md) to restart after the engine is stopped.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.stop()
+```
diff --git a/docs/engine/api/xr8/version.md b/docs/engine/api/xr8/version.md
new file mode 100644
index 0000000..5720198
--- /dev/null
+++ b/docs/engine/api/xr8/version.md
@@ -0,0 +1,24 @@
+---
+sidebar_label: version()
+---
+# XR8.version()
+
+`XR8.version()`
+
+## Description {#description}
+
+Get the 8th Wall Web engine version.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+A string indicating the 8th Wall Web engine version.
+
+## Example {#example}
+
+```javascript
+console.log(XR8.version())
+```
diff --git a/docs/engine/api/xr8/xr8.md b/docs/engine/api/xr8/xr8.md
new file mode 100644
index 0000000..0c2a36c
--- /dev/null
+++ b/docs/engine/api/xr8/xr8.md
@@ -0,0 +1,51 @@
+# XR8
+
+## Description {#description}
+
+Entry point for 8th Wall's Javascript API
+
+## Functions {#functions}
+
+Function | Description
+-------- | -----------
+[addCameraPipelineModule](addcamerapipelinemodule.md) | Adds a module to the camera pipeline that will receive event callbacks for each stage in the camera pipeline.
+[addCameraPipelineModules](addcamerapipelinemodules.md) | Add multiple camera pipeline modules. This is a convenience method that calls [addCameraPipelineModule](addcamerapipelinemodule.md) in order on each element of the input array.
+[clearCameraPipelineModules](clearcamerapipelinemodules.md) | Remove all camera pipeline modules from the camera loop.
+[initialize](initialize.md) | Returns a promise that is fulfilled when the AR Engine's WebAssembly is initialized.
+[isInitialized](isinitialized.md) | Indicates whether or not the AR Engine's WebAssembly is initialized.
+[isPaused](ispaused.md) | Indicates whether or not the XR session is paused.
+[pause](pause.md) | Pause the current XR session. While paused, the camera feed is stopped and device motion is not tracked.
+[resume](resume.md) | Resume the current XR session.
+[removeCameraPipelineModule](removecamerapipelinemodule.md) | Removes a module from the camera pipeline.
+[removeCameraPipelineModules](removecamerapipelinemodules.md) | Remove multiple camera pipeline modules. This is a convenience method that calls [removeCameraPipelineModule](removecamerapipelinemodule.md) in order on each element of the input array.
+[requiredPermissions](requiredpermissions.md) | Return a list of permissions required by the application.
+[run](run.md) | Open the camera and start running the camera run loop.
+[runPreRender](runprerender.md) | Executes all lifecycle updates that should happen before rendering.
+[runPostRender](runpostrender.md) | Executes all lifecycle updates that should happen after rendering.
+[stop](stop.md) | Stop the current XR session. While stopped, the camera feed is closed and device motion is not tracked.
+[version](version.md) | Get the 8th Wall Web engine version.
+
+## Events {#events}
+
+Event Emitted | Description
+------------- | -----------
+xrloaded | This event is emitted once `XR8` has loaded.
+
+## Modules {#modules}
+
+Module | Description
+-------- | -----------
+[AFrame](../aframe/aframe.md) | Entry point for A-Frame integration with 8th Wall Web.
+[Babylonjs](../babylonjs/babylonjs.md) | Entry point for Babylon.js integration with 8th Wall Web.
+[CameraPixelArray](../camerapixelarray/camerapixelarray.md) | Provides a camera pipeline module that gives access to camera data as a grayscale or color uint8 array.
+[CanvasScreenshot](../canvasscreenshot/canvasscreenshot.md) | Provides a camera pipeline module that can generate screenshots of the current scene.
+[FaceController](../facecontroller/facecontroller.md) | Provides face detection and meshing, and interfaces for configuring tracking.
+[GlTextureRenderer](../gltexturerenderer/gltexturerenderer.md) | Provides a camera pipeline module that draws the camera feed to a canvas as well as extra utilities for GL drawing operations.
+[LayersController](../layerscontroller/layerscontroller.md) | Provides a camera pipeline module that enables semantic layer detection and interfaces for configuring layer rendering.
+[MediaRecorder](../mediarecorder/mediarecorder.md) | Provides a camera pipeline module that allows you to record a video in MP4 format.
+[PlayCanvas](../playcanvas/playcanvas.md) | Entry point for PlayCanvas integration with 8th Wall Web.
+[Threejs](../threejs/threejs.md) | Provides a camera pipeline module that drives three.js camera to do virtual overlays.
+[XrConfig](../xrconfig/xrconfig.md) | Specifying class of devices and cameras that pipeline modules should run on.
+[XrController](../xrcontroller/xrcontroller.md) | `XrController` provides 6DoF camera tracking and interfaces for configuring tracking.
+[XrDevice](../xrdevice/xrdevice.md) | Provides information about device compatibility and characteristics.
+[XrPermissions](../xrpermissions/xrpermissions.md) | Utilities for specifying permissions required by a pipeline module.
diff --git a/docs/engine/api/xrconfig/_category_.json b/docs/engine/api/xrconfig/_category_.json
new file mode 100644
index 0000000..ee6409e
--- /dev/null
+++ b/docs/engine/api/xrconfig/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "XrConfig",
+ "position": 23
+}
diff --git a/docs/engine/api/xrconfig/camera.md b/docs/engine/api/xrconfig/camera.md
new file mode 100644
index 0000000..313347e
--- /dev/null
+++ b/docs/engine/api/xrconfig/camera.md
@@ -0,0 +1,17 @@
+---
+sidebar_label: camera()
+---
+# XR8.XrConfig.camera()
+
+Enumeration
+
+## Description {#description}
+
+Desired camera to use.
+
+## Properties {#properties}
+
+Property | Value | Description
+-------- | ----- | -----------
+FRONT | `'front'` | Use the front facing / selfie camera.
+BACK | `'back'` | Use the rear facing / back camera.
diff --git a/docs/engine/api/xrconfig/device.md b/docs/engine/api/xrconfig/device.md
new file mode 100644
index 0000000..689009b
--- /dev/null
+++ b/docs/engine/api/xrconfig/device.md
@@ -0,0 +1,20 @@
+---
+sidebar_label: device()
+---
+# XR8.XrConfig.device()
+
+Enumeration
+
+## Description {#description}
+
+Specify the class of devices that the pipeline should run on. If the current device is not in that class, running will fail prior prior to opening the camera. If allowedDevices is `XR8.XrConfig.device().ANY`, always open the camera.
+
+Note: World Effects (SLAM) can only be used with `XR8.XrConfig.device().MOBILE_AND_HEADSETS` or `XR8.XrConfig.device().MOBILE`.
+
+## Properties {#properties}
+
+Property | Value | Description
+-------- | ----- | -----------
+MOBILE | `'mobile'` | Restrict camera pipeline on mobile-class devices, for example phones and tablets.
+MOBILE_AND_HEADSETS | `'mobile-and-headsets'` | Restrict camera pipeline on mobile and headset class devices.
+ANY | `'any'` | Start running camera pipeline without checking device capabilities. This may fail at some point in the pipeline startup if a required sensor is not available at run time (for example, a laptop has no camera).
diff --git a/docs/engine/api/xrconfig/xrconfig.md b/docs/engine/api/xrconfig/xrconfig.md
new file mode 100644
index 0000000..8175e40
--- /dev/null
+++ b/docs/engine/api/xrconfig/xrconfig.md
@@ -0,0 +1,12 @@
+## XR8.XrConfig {#xr8xrconfig}
+
+## Description {#description}
+
+Utilities for specifying class of devices and cameras that pipeline modules should run on.
+
+## Properties {#properties}
+
+Property | Type | Description
+-------- | ---- | -----------
+[camera()](camera.md) | Enum | Desired camera to use.
+[device()](device.md) | Enum | Specify the class of devices that the pipeline should run on.
diff --git a/docs/engine/api/xrcontroller/_category_.json b/docs/engine/api/xrcontroller/_category_.json
new file mode 100644
index 0000000..6600555
--- /dev/null
+++ b/docs/engine/api/xrcontroller/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "XrController",
+ "position": 24
+}
diff --git a/docs/engine/api/xrcontroller/configure.md b/docs/engine/api/xrcontroller/configure.md
new file mode 100644
index 0000000..b20b51f
--- /dev/null
+++ b/docs/engine/api/xrcontroller/configure.md
@@ -0,0 +1,49 @@
+---
+sidebar_label: configure()
+---
+# XR8.XrController.configure()
+
+`XrController.configure({ disableWorldTracking, enableLighting, enableWorldPoints, imageTargets: [], leftHandedAxes, mirroredDisplay, scale })`
+
+## Description {#description}
+
+Configures the processing performed by `XrController` (some settings may have performance implications).
+
+## Parameters {#parameters}
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+disableWorldTracking [Optional] | `Boolean` | `false` | If true, turn off SLAM tracking for efficiency. This needs to be done **BEFORE** [`XR8.run()`](/docs/engine/api/xr8/run) is called.
+enableLighting [Optional] | `Boolean` | `false` | If true, `lighting` will be provided by [`XR8.XrController.pipelineModule()`](pipelinemodule.md) as `processCpuResult.reality.lighting`
+enableWorldPoints [Optional] | `Boolean` | `false` | If true, `worldPoints` will be provided by [`XR8.XrController.pipelineModule()`](pipelinemodule.md) as `processCpuResult.reality.worldPoints`.
+imageTargets [Optional] | `Array` | | List of names of the image target to detect. Can be modified at runtime. Note: All currently active image targets will be replaced with the ones specified in this list.
+leftHandedAxes [Optional] | `Boolean` | `false` | If true, use left-handed coordinates.
+mirroredDisplay [Optional] | `Boolean` | `false` | If true, flip left and right in the output.
+scale [Optional] | `String` | `responsive` | Either `responsive` or `absolute`. `responsive` will return values so that the camera on frame 1 is at the origin defined via [`XR8.XrController.updateCameraProjectionMatrix()`](updatecameraprojectionmatrix.md). `absolute` will return the camera, image targets, etc in meters. When using `absolute` the x-position, z-position, and rotation of the starting pose will respect the parameters set in [`XR8.XrController.updateCameraProjectionMatrix()`](updatecameraprojectionmatrix.md) once scale has been estimated. The y-position will depend on the camera's physical height from the ground plane.
+
+**IMPORTANT:** `disableWorldTracking: true` needs to be set **BEFORE** both [`XR8.XrController.pipelineModule()`](pipelinemodule.md) and [`XR8.run()`](/docs/engine/api/xr8/run) are called and cannot be modifed while the engine is running.
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.XrController.configure({enableLighting: true, disableWorldTracking: false, scale: 'absolute'})
+```
+
+## Example - Disable world tracking {#example---disable-world-tracking}
+
+```javascript
+// Disable world tracking (SLAM)
+XR8.XrController.configure({disableWorldTracking: true})
+// Open the camera and start running the camera run loop
+XR8.run({canvas: document.getElementById('camerafeed')})
+```
+
+## Example - Change active image target set {#example---change-active-image-target-set}
+
+```javascript
+XR8.XrController.configure({imageTargets: ['image-target1', 'image-target2', 'image-target3']})
+```
diff --git a/docs/engine/api/xrcontroller/hittest.md b/docs/engine/api/xrcontroller/hittest.md
new file mode 100644
index 0000000..9498c4d
--- /dev/null
+++ b/docs/engine/api/xrcontroller/hittest.md
@@ -0,0 +1,40 @@
+---
+sidebar_label: hitTest()
+---
+# XR8.XrController.hitTest()
+
+`XrController.hitTest(X, Y, includedTypes = [])`
+
+## Description {#description}
+
+Estimate the 3D position of a point on the camera feed. X and Y are specified as numbers between 0 and 1, where (0, 0) is the upper left corner and (1, 1) is the lower right corner of the camera feed as rendered in the camera that was specified by [`XR8.XrController.updateCameraProjectionMatrix()`](updatecameraprojectionmatrix.md). Multiple 3d position estimates may be returned for a single hit test based on the source of data being used to estimate the position. The data source that was used to estimate the position is indicated by the `hitTest.type`.
+
+## Parameters {#parameters}
+
+Parameter | Type | Description
+--------- | ---- | -----------
+X | `Number` | Value between 0 and 1 that represents the horizontal position on camera feed from left to right.
+Y | `Number` | Value between 0 and 1 that represents the vertical position on camera feed from top to bottom.
+includedTypes | `[String]` | List that should contain `'FEATURE_POINT'`.
+
+## Returns {#returns}
+
+An array of estimated 3D positions from the hit test:
+
+`[{ type, position, rotation, distance }]`
+
+Parameter | Type | Description
+--------- | ---- | -----------
+type | `String` | One of `'FEATURE_POINT'`, `'ESTIMATED_SURFACE'`, `'DETECTED_SURFACE'`, or `'UNSPECIFIED'`
+position | `{x, y, z}` | The estimated 3D position of the queried point on the camera feed.
+rotation | `{x, y, z, w}` | The estimated 3D rotation of the queried point on the camera feed.
+distance | `Number` | The estimated distance from the device of the queried point on the camera feed.
+
+## Example {#example}
+```javascript
+const hitTestHandler = (e) => {
+ const x = e.touches[0].clientX / window.innerWidth
+ const y = e.touches[0].clientY / window.innerHeight
+ const hitTestResults = XR8.XrController.hitTest(x, y, ['FEATURE_POINT'])
+}
+```
diff --git a/docs/engine/api/xrcontroller/pipelinemodule.md b/docs/engine/api/xrcontroller/pipelinemodule.md
new file mode 100644
index 0000000..be81420
--- /dev/null
+++ b/docs/engine/api/xrcontroller/pipelinemodule.md
@@ -0,0 +1,192 @@
+---
+sidebar_label: pipelineModule()
+---
+# XR8.XrController.pipelineModule()
+
+`XR8.XrController.pipelineModule()`
+
+## Description {#description}
+
+Creates a camera pipeline module that, when installed, receives callbacks on when the camera has started, camera proessing events, and other state changes. These are used to calculate the camera's position.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+Return value is an object made available to [`onUpdate`](/docs/engine/api/camerapipelinemodule/onupdate) as:
+
+`processCpuResult.reality: { rotation, position, intrinsics, trackingStatus, trackingReason, worldPoints, realityTexture, lighting }`
+
+Property | Type | Description
+--------- | ---- | -----------
+rotation | `{w, x, y, z}` | The orientation (quaternion) of the camera in the scene.
+position | `{x, y, z}` | The position of the camera in the scene.
+intrinsics | `[Number]` | A 16 dimensional column-major 4x4 projection matrix that gives the scene camera the same field of view as the rendered camera feed.
+trackingStatus | `String` | One of `'LIMITED'` or `'NORMAL'`.
+trackingReason | `String` | One of `'UNSPECIFIED'` or`'INITIALIZING'`.
+worldPoints | `[{id, confidence, position: {x, y, z}}]` | An array of detected points in the world at their location in the scene. Only filled if `XrController` is configured to return world points and `trackingReason != 'INITIALIZING'`.
+realityTexture | [`WebGLTexture`](https://developer.mozilla.org/en-US/docs/Web/API/WebGLTexture) | The texture containing camera feed data.
+lighting | `{exposure, temperature}` | Exposure of the lighting in your environment. Note: `temperature` has not yet been implemented.
+
+## Dispatched Events {#dispatched-events}
+
+**trackingStatus**: Fires when `XrController` starts and tracking status or reason changes.
+
+`reality.trackingstatus : { status, reason }`
+
+Property | Type | Description
+--------- | ---- | -----------
+status | `String` | One of `'LIMITED'` or `'NORMAL'`.
+reason | `String` | One of `'INITIALIZING'` or `'UNDEFINED'`.
+
+**imageloading**: Fires when detection image loading begins.
+
+`imageloading.detail : { imageTargets: {name, type, metadata} }`
+
+Property | Type | Description
+--------- | ---- | -----------
+name | `String` | The image's name.
+type | `String` | One of `'FLAT'`, `'CYLINDRICAL'`, `'CONICAL'`.
+metadata | `Object` | User metadata.
+
+**imagescanning**: Fires when all detection images have been loaded and scanning has begun.
+
+`imagescanning.detail : { imageTargets: {name, type, metadata, geometry} }`
+
+Property | Type | Description
+--------- | ---- | -----------
+name | `String` | The image's name.
+type | `String` | One of `'FLAT'`, `'CYLINDRICAL'`, `'CONICAL'`.
+metadata | `Object` | User metadata.
+geometry | `Object` | Object containing geometry data. If type=FLAT: `{scaledWidth, scaledHeight}`, else if type=CYLINDRICAL or type=CONICAL: `{height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians}`
+
+If type = `FLAT`, geometry:
+
+Property | Type | Description
+--------- | ---- | -----------
+scaledWidth | `Number` | The width of the image in the scene, when multiplied by scale.
+scaledHeight | `Number` | The height of the image in the scene, when multiplied by scale.
+
+If type= `CYLINDRICAL` or `CONICAL`, geometry:
+
+Property | Type | Description
+--------- | ---- | -----------
+height | `Number` | Height of the curved target.
+radiusTop | `Number` | Radius of the curved target at the top.
+radiusBottom | `Number` | Radius of the curved target at the bottom.
+arcStartRadians | `Number` | Starting angle in radians.
+arcLengthRadians | `Number` | Central angle in radians.
+
+**imagefound**: Fires when an image target is first found.
+
+`imagefound.detail : { name, type, position, rotation, scale, scaledWidth, scaledHeight, height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians }`
+
+Property | Type | Description
+--------- | ---- | -----------
+name | `String` | The image's name.
+type | `Number` | One of `'FLAT'`, `'CYLINDRICAL'`, `'CONICAL'`.`
+position | `{x, y, z}` | The 3d position of the located image.
+rotation | `{w, x, y, z}` | The 3d local orientation of the located image.
+scale | `Number` | A scale factor that should be applied to object attached to this image.
+
+If type = `FLAT`:
+
+Property | Type | Description
+--------- | ---- | -----------
+scaledWidth | `Number` | The width of the image in the scene, when multiplied by scale.
+scaledHeight | `Number` | The height of the image in the scene, when multiplied by scale.
+
+If type= `CYLINDRICAL` or `CONICAL`:
+
+Property | Type | Description
+--------- | ---- | -----------
+height | `Number` | Height of the curved target.
+radiusTop | `Number` | Radius of the curved target at the top.
+radiusBottom | `Number` | Radius of the curved target at the bottom.
+arcStartRadians | `Number` | Starting angle in radians.
+arcLengthRadians | `Number` | Central angle in radians.
+
+**imageupdated**: Fires when an image target changes position, rotation or scale.
+
+`imageupdated.detail : { name, type, position, rotation, scale, scaledWidth, scaledHeight, height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians }`
+
+Property | Type | Description
+--------- | ---- | -----------
+name | `String` | The image's name.
+type | `Number` | One of `'FLAT'`, `'CYLINDRICAL'`, `'CONICAL'`.`
+position | `{x, y, z}` | The 3d position of the located image.
+rotation | `{w, x, y, z}` | The 3d local orientation of the located image.
+scale | `Number` | A scale factor that should be applied to object attached to this image.
+
+If type = `FLAT`:
+
+Property | Type | Description
+--------- | ---- | -----------
+scaledWidth | `Number` | The width of the image in the scene, when multiplied by scale.
+scaledHeight | `Number` | The height of the image in the scene, when multiplied by scale.
+
+If type= `CYLINDRICAL` or `CONICAL`:
+
+Property | Type | Description
+--------- | ---- | -----------
+height | `Number` | Height of the curved target.
+radiusTop | `Number` | Radius of the curved target at the top.
+radiusBottom | `Number` | Radius of the curved target at the bottom.
+arcStartRadians | `Number` | Starting angle in radians.
+arcLengthRadians | `Number` | Central angle in radians.
+
+**imagelost**: Fires when an image target is no longer being tracked.
+
+`imagelost.detail : { name, type, position, rotation, scale, scaledWidth, scaledHeight, height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians }`
+
+Property | Type | Description
+--------- | ---- | -----------
+name | `String` | The image's name.
+type | `Number` | One of `'FLAT'`, `'CYLINDRICAL'`, `'CONICAL'`.`
+position | `{x, y, z}` | The 3d position of the located image.
+rotation | `{w, x, y, z}` | The 3d local orientation of the located image.
+scale | `Number` | A scale factor that should be applied to object attached to this image.
+
+If type = `FLAT`:
+
+Property | Type | Description
+--------- | ---- | -----------
+scaledWidth | `Number` | The width of the image in the scene, when multiplied by scale.
+scaledHeight | `Number` | The height of the image in the scene, when multiplied by scale.
+
+If type= `CYLINDRICAL` or `CONICAL`:
+
+Property | Type | Description
+--------- | ---- | -----------
+height | `Number` | Height of the curved target.
+radiusTop | `Number` | Radius of the curved target at the top.
+radiusBottom | `Number` | Radius of the curved target at the bottom.
+arcStartRadians | `Number` | Starting angle in radians.
+arcLengthRadians | `Number` | Central angle in radians.
+
+## Example - adding pipeline module {#example---adding-pipeline-module}
+
+```javascript
+XR8.addCameraPipelineModule(XR8.XrController.pipelineModule())
+```
+
+## Example - dispatched events {#example---dispatched-events}
+
+```javascript
+const logEvent = ({name, detail}) => {
+ console.log(`Handling event ${name}, got detail, ${JSON.stringify(detail)}`)
+}
+
+XR8.addCameraPipelineModule({
+ name: 'eventlogger',
+ listeners: [
+ {event: 'reality.imageloading', process: logEvent},
+ {event: 'reality.imagescanning', process: logEvent},
+ {event: 'reality.imagefound', process: logEvent},
+ {event: 'reality.imageupdated', process: logEvent},
+ {event: 'reality.imagelost', process: logEvent},
+ ],
+})
+```
diff --git a/docs/engine/api/xrcontroller/recenter.md b/docs/engine/api/xrcontroller/recenter.md
new file mode 100644
index 0000000..e0d92c7
--- /dev/null
+++ b/docs/engine/api/xrcontroller/recenter.md
@@ -0,0 +1,18 @@
+---
+sidebar_label: recenter()
+---
+# XR8.XrController.recenter()
+
+`XR8.XrController.recenter()`
+
+## Description {#description}
+
+Repositions the camera to the origin / facing direction specified by [`XR8.XrController.updateCameraProjectionMatrix()`](updatecameraprojectionmatrix.md) and restart tracking.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+None
diff --git a/docs/engine/api/xrcontroller/updatecameraprojectionmatrix.md b/docs/engine/api/xrcontroller/updatecameraprojectionmatrix.md
new file mode 100644
index 0000000..ac289e4
--- /dev/null
+++ b/docs/engine/api/xrcontroller/updatecameraprojectionmatrix.md
@@ -0,0 +1,40 @@
+---
+sidebar_label: updateCameraProjectionMatrix()
+---
+# XR8.XrController.updateCameraProjectionMatrix()
+
+`XR8.XrController.updateCameraProjectionMatrix({ cam, origin, facing })`
+
+## Description {#description}
+
+Reset the scene's display geometry and the camera's starting position in the scene. The display geometry is needed to properly overlay the position of objects in the virtual scene on top of their corresponding position in the camera image. The starting position specifies where the camera will be placed and facing at the start of a session.
+
+## Parameters {#parameters}
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+cam [Optional] | `{pixelRectWidth, pixelRectHeight, nearClipPlane, farClipPlane}` | `{nearClipPlane: 0.01, farClipPlane: 1000.0}` | The camera configuration.
+origin [Optional] | `{x, y, z}` | `{x: 0, y: 2, z: 0}` | The starting position of the camera in the scene.
+facing [Optional] | `{w, x, y, z}` | `{w: 1, x: 0, y: 0, z: 0}` | The starting direction (quaternion) of the camera in the scene.
+
+`cam` has the following parameters:
+
+Parameter | Type | Description
+--------- | ---- | -----------
+pixelRectWidth | `Number` | The width of the canvas that displays the camera feed.
+pixelRectHeight | `Number` | The height of the canvas that displays the camera feed.
+nearClipPlane | `Number` | The closest distance to the camera at which scene objects are visible.
+farClipPlane | `Number` | The farthest distance to the camera at which scene objects are visible.
+
+## Returns {#returns}
+
+None
+
+## Example {#example}
+
+```javascript
+XR8.XrController.updateCameraProjectionMatrix({
+ origin: { x: 1, y: 4, z: 0 },
+ facing: { w: 0.9856, x: 0, y: 0.169, z: 0 }
+})
+```
diff --git a/docs/engine/api/xrcontroller/xrcontroller.md b/docs/engine/api/xrcontroller/xrcontroller.md
new file mode 100644
index 0000000..675c645
--- /dev/null
+++ b/docs/engine/api/xrcontroller/xrcontroller.md
@@ -0,0 +1,15 @@
+# XR8.XrController
+
+## Description {#description}
+
+`XrController` provides 6DoF camera tracking and interfaces for configuring tracking.
+
+## Functions {#functions}
+
+Function | Description
+-------- | -----------
+[configure](configure.md) | Configures what processing is performed by `XrController` (may have performance implications).
+[hitTest](hittest.md) | Estimate the 3D position of a point on the camera feed.
+[pipelineModule](pipelinemodule.md) | Creates a camera pipeline module that, when installed, receives callbacks on when the camera has started, camera proessing events, and other state changes. These are used to calculate the camera's position.
+[recenter](recenter.md) | Repositions the camera to the origin / facing direction specified by updateCameraProjectionMatrix and restart tracking.
+[updateCameraProjectionMatrix](updatecameraprojectionmatrix.md) | Reset the scene's display geometry and the camera's starting position in the scene. The display geometry is needed to properly overlay the position of objects in the virtual scene on top of their corresponding position in the camera image. The starting position specifies where the camera will be placed and facing at the start of a session.
diff --git a/docs/engine/api/xrdevice/_category_.json b/docs/engine/api/xrdevice/_category_.json
new file mode 100644
index 0000000..a74d61b
--- /dev/null
+++ b/docs/engine/api/xrdevice/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "XrDevice",
+ "position": 26
+}
diff --git a/docs/engine/api/xrdevice/deviceestimate.md b/docs/engine/api/xrdevice/deviceestimate.md
new file mode 100644
index 0000000..17b7f39
--- /dev/null
+++ b/docs/engine/api/xrdevice/deviceestimate.md
@@ -0,0 +1,26 @@
+---
+sidebar_label: deviceEstimate()
+---
+# XR8.XrDevice.deviceEstimate()
+
+`XR8.XrDevice.deviceEstimate()`
+
+## Description {#description}
+
+Returns an estimate of the user's device (e.g. make / model) based on user agent string and other factors. This information is only an estimate, and should not be assumed to be complete or reliable.
+
+## Parameters {#parameters}
+
+None
+
+## Returns {#returns}
+
+An object: `{ locale, os, osVersion, manufacturer, model }`
+
+Property | Description
+--------- | -----------
+locale | The user's locale.
+os | The device's operating system.
+osVersion | The device's operating system version.
+manufacturer | The device's manufacturer.
+model | The device's model.
diff --git a/docs/engine/api/xrdevice/incompatibilityreasons.md b/docs/engine/api/xrdevice/incompatibilityreasons.md
new file mode 100644
index 0000000..84c8ee1
--- /dev/null
+++ b/docs/engine/api/xrdevice/incompatibilityreasons.md
@@ -0,0 +1,21 @@
+---
+sidebar_label: IncompatibilityReasons
+---
+# XR8.XrDevice.IncompatibilityReasons
+
+Enumeration
+
+## Description {#description}
+
+The possible reasons for why a device and browser may not be compatible with 8th Wall Web.
+
+## Properties {#properties}
+
+Property | Value | Description
+-------- | ----- |-----------
+UNSPECIFIED | `0` | The incompatible reason is not specified.
+UNSUPPORTED_OS | `1` | The estimated operating system is not supported.
+UNSUPPORTED_BROWSER | `2` | The estimated browser is not supported.
+MISSING_DEVICE_ORIENTATION | `3` | The browser does not support device orientation events.
+MISSING_USER_MEDIA | `4` | The browser does not support user media acccess.
+MISSING_WEB_ASSEMBLY | `5` | The browser does not support web assembly.
diff --git a/docs/engine/api/xrdevice/incompatiblereasondetails.md b/docs/engine/api/xrdevice/incompatiblereasondetails.md
new file mode 100644
index 0000000..c8e2d53
--- /dev/null
+++ b/docs/engine/api/xrdevice/incompatiblereasondetails.md
@@ -0,0 +1,25 @@
+---
+sidebar_label: incompatibleReasonDetails()
+---
+# XR8.XrDevice.incompatibleReasonDetails()
+
+`XR8.XrDevice.incompatibleReasonDetails({ allowedDevices })`
+
+## Description {#description}
+
+Returns extra details about the reasons why the device and browser are incompatible. This information should only be used as a hint to help with further error handling. These should not be assumed to be complete or reliable. This will only contain entries if [`XR8.XrDevice.isDeviceBrowserCompatible()`](isdevicebrowsercompatible.md) returns false.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+allowedDevices [Optional] | Supported device classes, a value in [`XR8.XrConfig.device()`](/docs/engine/api/xrconfig/device).
+
+## Returns {#returns}
+
+An object: `{ inAppBrowser, inAppBrowserType }`
+
+Property | Description
+-------- | -----------
+inAppBrowser | The name of the in-app browser detected (e.g. `'Twitter'`)
+inAppBrowserType | A string that helps describe how to handle the in-app browser.
diff --git a/docs/engine/api/xrdevice/incompatiblereasons.md b/docs/engine/api/xrdevice/incompatiblereasons.md
new file mode 100644
index 0000000..790d469
--- /dev/null
+++ b/docs/engine/api/xrdevice/incompatiblereasons.md
@@ -0,0 +1,36 @@
+---
+sidebar_label: incompatibleReasons()
+---
+# XR8.XrDevice.incompatibleReasons()
+
+`XR8.XrDevice.incompatibleReasons({ allowedDevices })`
+
+## Description {#description}
+
+Returns an array of [`XR8.XrDevice.IncompatibilityReasons`](incompatibilityreasons.md) why the device the device and browser are not supported. This will only contain entries if [`XR8.XrDevice.isDeviceBrowserCompatible()`](isdevicebrowsercompatible.md) returns false.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+allowedDevices [Optional] | Supported device classes, a value in [`XR8.XrConfig.device()`](/docs/engine/api/xrconfig/device).
+
+## Returns {#returns}
+
+Returns an array of [`XR8.XrDevice.IncompatibilityReasons`](incompatibilityreasons.md).
+
+## Example {#example}
+
+```javascript
+const reasons = XR8.XrDevice.incompatibleReasons()
+for (let reason of reasons) {
+ switch (reason) {
+ case XR8.XrDevice.IncompabilityReasons.UNSUPPORTED_OS:
+ // Handle unsupported os error messaging.
+ break;
+ case XR8.XrDevice.IncompabilityReasons.UNSUPPORTED_BROWSER:
+ // Handle unsupported browser
+ break;
+ ...
+}
+```
diff --git a/docs/engine/api/xrdevice/isdevicebrowsercompatible.md b/docs/engine/api/xrdevice/isdevicebrowsercompatible.md
new file mode 100644
index 0000000..ae8593d
--- /dev/null
+++ b/docs/engine/api/xrdevice/isdevicebrowsercompatible.md
@@ -0,0 +1,26 @@
+---
+sidebar_label: isDeviceBrowserCompatible()
+---
+# XR8.XrDevice.isDeviceBrowserCompatible()
+
+`XR8.XrDevice.isDeviceBrowserCompatible({ allowedDevices })`
+
+## Description {#description}
+
+Returns an estimate of whether the user's device and browser is compatible with 8th Wall Web. If this returns false, [`XR8.XrDevice.incompatibleReasons()`](incompatiblereasons.md) will return reasons about why the device and browser are not supported.
+
+## Parameters {#parameters}
+
+Parameter | Description
+--------- | -----------
+allowedDevices [Optional] | Supported device classes, a value in [`XR8.XrConfig.device()`](/docs/engine/api/xrconfig/device).
+
+## Returns {#returns}
+
+True or false.
+
+## Example {#example}
+
+```javascript
+XR8.XrDevice.isDeviceBrowserCompatible({allowedDevices: XR8.XrConfig.device().MOBILE})
+```
diff --git a/docs/engine/api/xrdevice/xrdevice.md b/docs/engine/api/xrdevice/xrdevice.md
new file mode 100644
index 0000000..3f3be04
--- /dev/null
+++ b/docs/engine/api/xrdevice/xrdevice.md
@@ -0,0 +1,20 @@
+# XR8.XrDevice
+
+## Description {#description}
+
+Provides information about device compatibility and characteristics.
+
+## Properties {#properties}
+
+Property | Type | Description
+-------- | ---- | -----------
+[IncompatibilityReasons](incompatibilityreasons.md) | Enum | The possible reasons for why a device and browser may not be compatible with 8th Wall Web.
+
+## Functions {#functions}
+
+Function | Description
+-------- | -----------
+[deviceEstimate](deviceestimate.md) | Returns an estimate of the user's device (e.g. make / model) based on user agent string and other factors. This information is only an estimate, and should not be assumed to be complete or reliable.
+[incompatibleReasons](incompatiblereasons.md) | Returns an array of [`XR8.XrDevice.IncompatibilityReasons`](incompatibilityreasons.md) why the device the device and browser are not supported. This will only contain entries if [`XR8.XrDevice.isDeviceBrowserCompatible()`](isdevicebrowsercompatible.md) returns false.
+[incompatibleReasonDetails](incompatiblereasondetails.md) | Returns extra details about the reasons why the device and browser are incompatible. This information should only be used as a hint to help with further error handling. These should not be assumed to be complete or reliable. This will only contain entries if [`XR8.XrDevice.isDeviceBrowserCompatible()`](isdevicebrowsercompatible.md) returns false.
+[isDeviceBrowserCompatible](isdevicebrowsercompatible.md) | Returns an estimate of whether the user's device and browser is compatible with 8th Wall Web. If this returns false, [`XR8.XrDevice.incompatibleReasons()`](incompatiblereasons.md) will return reasons about why the device and browser are not supported.
diff --git a/docs/engine/api/xrpermissions/_category_.json b/docs/engine/api/xrpermissions/_category_.json
new file mode 100644
index 0000000..38a08df
--- /dev/null
+++ b/docs/engine/api/xrpermissions/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "XrPermissions",
+ "position": 25
+}
diff --git a/docs/engine/api/xrpermissions/permissions.md b/docs/engine/api/xrpermissions/permissions.md
new file mode 100644
index 0000000..6fbe4d6
--- /dev/null
+++ b/docs/engine/api/xrpermissions/permissions.md
@@ -0,0 +1,29 @@
+---
+sidebar_label: permissions()
+---
+# XR8.XrPermissions.permissions()
+
+Enumeration
+
+## Description {#description}
+
+Permissions that can be required by a pipeline module.
+
+## Properties {#properties}
+
+Property | Value | Description
+-------- | ----- | -----------
+CAMERA | `'camera'` | Require camera.
+DEVICE_MOTION | `'devicemotion'` | Require accelerometer.
+DEVICE_ORIENTATION | `'deviceorientation'` | Require gyro.
+DEVICE_GPS | `'geolocation'` | Require GPS location.
+MICROPHONE | `'microphone'` | Require microphone.
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'request-gyro',
+ requiredPermissions: () => ([XR8.XrPermissions.permissions().DEVICE_ORIENTATION]),
+})
+```
diff --git a/docs/engine/api/xrpermissions/xrpermissions.md b/docs/engine/api/xrpermissions/xrpermissions.md
new file mode 100644
index 0000000..a6e8c51
--- /dev/null
+++ b/docs/engine/api/xrpermissions/xrpermissions.md
@@ -0,0 +1,22 @@
+# XR8.XrPermissions
+
+## Description {#description}
+
+Utilities for specifying permissions required by a pipeline module.
+
+Modules can indicate what browser capabilities they require that may need permissions requests. These can be used by the framework to request appropriate permissions if absent, or to create components that request the appropriate permissions before running XR.
+
+## Properties {#properties}
+
+Property | Type | Description
+-------- | ---- | -----------
+[permissions()](permissions.md) | Enum | List of permissions that can be specified as required by a pipeline module.
+
+## Example {#example}
+
+```javascript
+XR8.addCameraPipelineModule({
+ name: 'request-gyro',
+ requiredPermissions: () => ([XR8.XrPermissions.permissions().DEVICE_ORIENTATION]),
+})
+```
diff --git a/docs/engine/changelog.md b/docs/engine/changelog.md
new file mode 100644
index 0000000..4db2e79
--- /dev/null
+++ b/docs/engine/changelog.md
@@ -0,0 +1,616 @@
+---
+id: changelog
+sidebar_position: 99999
+---
+# Changelog
+
+
+#### Release 27.4: (2025-July-17 27.4.11.427 / 2025-May-07, v27.4.8.427 / 2025-April-10, v27.4.5.427) {#release-27-2025-May-7-2747}
+* New Features:
+ * Added Image Target compatibility for 8th Wall Studio projects.
+
+* Fixes and Enhancements:
+ * Increased maximum active image targets to 32
+ * Disabled the desktop default environment for mobile AR experiences with desktop mode enabled.
+ * Fixed an issue where the landing page did not appear for face effects with desktop mode disabled.
+ * Updated VPS API. VPS now requires engine version 27.4 or higher.
+ * Fixed an issue related to changing XR camera orientation in Studio projects (27.4.11.427).
+
+#### Release 27.3: (2025-March-19, v27.3.1.427) {#release-27-2025-March-19-2731427}
+
+* Fixes and Enhancements:
+ * Improved localization performance at VPS locations
+ * Fixed crashes on orientation change and camera swapping
+ * Corrected flickering face effects
+
+#### Release 27.2: (2024-December-04, v27.2.6.427 / 2024-November-04, v27.2.5.427 / 2024-October-23, v27.2.4.427) {#release-27-2024-october-23-2724427}
+
+* New Features:
+ * Added VPS compatibility for 8th Wall Studio projects.
+
+* Fixes and Enhancements:
+ * Fixed an issue affecting the reliability of the the simulator in VPS projects. (27.2.5.427)
+ * Improved the reliability of the camera pipeline initialization for enhanced AR experiences. (27.2.6.427)
+
+#### Release 27.1: (2024-October-03, v27.1.9.427 / 2024-October-01, v27.1.6.427) {#release-27-1-2024-october-01-v2716427}
+
+* Fixes and Enhancements:
+ * Boosted localization and tracking quality at VPS locations, significantly enhancing stability
+ and accuracy of VPS AR experiences.
+ * Optimized SLAM relocalization & tracking.
+ * Fixed an issue where the World Effects camera could teleport at the start of runtime in Studio.
+ * Resolved an issue affecting the stability of VPS tracking to improve overall performance. (27.1.9.427)
+ * Improved SLAM relocalization snaps AR content back to the proper position more quickly. (27.1.9.427)
+
+#### Release 27: (2024-Sept-12, v27.0.4.427 / 2024-August-01, v27.0.2.427) {#release-27-2024-august-01-v2702427}
+
+* Fixes and Enhancements:
+ * Fixed an issue when swapping between World Effects and Face Effects experiences.
+ * Improved XR camera synchronization with scenes in Studio.
+ * Optimized logging for improved performance and cleaner output.
+
+#### Release 26: (2024-June-18, v26.0.6.150) {#release-26-2024-june-18-v2606150}
+
+* New Features:
+ * Added support for Face Effects and World Tracking in 8th Wall Studio.
+
+* Fixes and Enhancements:
+ * Fixed an issue with some A-Frame projects that could cause unexpected behavior.
+
+#### Release 25: (2024-May-28, v25.0.1.2384) {#release-25-2024-may-28-v25012384}
+
+* New Features:
+ * Updated the XR engine to download as feature-specific components instead of one large package.
+
+#### Release 24.1: (2024-March-28, v24.1.10.2165 / 2024-February-29, v24.1.5.2165 / 2024-February-13, v24.1.2.2165 / 2024-January-25, v24.1.1.2165) {#release-241-2024-march-28-v241102165--2024-february-29-v24152165--2024-february-13-v24122165--2024-january-25-v24112165}
+
+* New Features:
+ * Updated 8Frame to support A-Frame 1.5.0.
+ * Added Metaversal Deployment support for Magic Leap 2 1.5.0 operating system update.
+ * Updated Hand Tracking to support left and right hand UVs, enabling you to easily draw designs on a hand mesh.
+ * Added support for Sky Effects to 8th Wall Simulator. (24.1.2.2165)
+ * Added four new wrist attachment point to Hand Tracking. (24.1.5.2165)
+ * Updated Metaversal Deployment to support virtual reality in the browser on Apple Vision Pro. (24.1.10.2165)
+
+* Fixes and Enhancements:
+ * Improved performance of Sky Effects experiences.
+ * Improved Hand Tracking's wrist tracking stability. (24.1.5.2165)
+
+* XRExtras Enhancements:
+ * Added `uv-orientation` parameter to `xrextras-hand-mesh` to support new hand UV functionality.
+ * Fixed an issue with MediaRecorder on iOS 17.4. (24.1.10.2165)
+
+#### Release 24: (2023-November-29, v24.0.10.2165 / 2023-November-16, v24.0.9.2165 / 2023-November-01, v24.0.8.2165) {#release-24-2023-november-29-v240102165--2023-november-16-v24092165--2023-november-01-v24082165}
+
+* New Features:
+ * Added three new ear attachment points for Face Effects, allowing you to accurately attach AR content to various points on the ears.
+ * Updated Hand Tracking to expose hand UVs, enabling you to easily draw designs on a hand mesh
+ * Enhanced Metaversal Deployment to support 8th Wall experiences on the Magic Leap 2.
+ * Updated PlayCanvas integration to support three new ear attachment points for Face Effects. (24.0.9.2165)
+
+* Fixes and Enhancements:
+ * Cleaned up some PlayCanvas warnings (24.0.10.2165)
+
+* XRExtras Enhancements:
+ * Updated AFrame components for easy Face Effects with new ear attachment points
+
+#### Release 23: (2023-August-24, v23.1.1.2275 / 2023-August-09, v23.0.12.2275 /2023-July-28, v23.0.7.2275 / 2023-July-25, v23.0.4.2275) {#release-23-2023-august-24-v23112275--2023-august-09-v230122275-2023-july-28-v23072275--2023-july-25-v23042275}
+
+* New Features:
+ * Introducing Hand Tracking - use hands, wrists, and fingers as an interactive canvas for immersive WebAR experiences.
+ * Attach 3D objects to an industry leading 36 hand attachment points.
+ * Use the 8th Wall engine’s adaptive hand mesh to match the size and volume of any hand.
+ * Added Hand Tracking Coaching Overlay module to guide users through a flow to ensure their hands are in view of the camera.
+ * Updated PlayCanvas integration to support Hand Tracking. (23.0.12.2275)
+ * Added XrDevice.deviceInfo API to query detailed device information. (23.1.1.2275)
+
+* Fixes and Enhancements:
+ * Improved SLAM relocalization snaps AR content back to the proper position more quickly and with better precision after an interruption.
+ * Refined camera selection on Android devices.
+ * Cleaned up warnings related to default xrhand parameters. (23.0.7.2275)
+ * Fixed an issue with WebGL context on MacOS devices using Safari. (23.0.12.2275)
+ * Improved SLAM tracking on a wide range of devices. (23.1.1.2275)
+
+* XRExtras Enhancements:
+ * New A-Frame components for easy Hand Tracking development.
+ * Fixed shadow shader in PlayCanvas.
+
+#### Release 22.1: (2023-May-15, v22.1.7.1958 / 2023-May-03, v22.1.2.1958) {#release-221-2023-may-15-v22171958--2023-may-03-v22121958}
+
+* New Features:
+ * Added multi-face support for Face Effects, allowing you to augment up to three faces simultaneously in a single experience.
+ * Updated Face Effects to support either standard or projected UVs, enabling you to easily draw Face Effect designs on a projected face mesh.
+
+* Fixes and Enhancements:
+ * Fixed a device orientation issue on iOS 16.4 devices.
+ * Fixed a performance issue that could occur when using one controller on a Meta Quest device.
+ * Improved performance of three.js experiences on headsets. (22.1.7.1958)
+
+* XRExtras Enhancements:
+ * Added `face-id` parameter to `xrextras-faceanchor` to support new multi-face functionality. (22.1.7.1958)
+
+#### Release 22: (2023-April-20, v22.0.4.1958) {#release-22-2023-april-20-v22041958}
+
+* New Features:
+ * Introducing the 8th Wall Engine’s completely refreshed Face Effects:
+ * Improved tracking quality and stability for:
+ * Eyebrow Region
+ * Eye Tracking
+ * Mouth Tracking
+ * Added Iris tracking capability:
+ * Added API to estimate InterPupillary Distance (IPD)
+ * Added developer-friendly real-time Face Events including:
+ * Eyebrows Raised/Lowered
+ * Mouth Open/Closed
+ * Eye Open/Closed
+ * Enabled new face morphing effects by exposing uv positions of face points in the camera frame.
+ * Increased head mesh height to allow effects that extend all the way to the hairline.
+
+* Fixes and Enhancements:
+ * Improved the speed of sky detection for Sky Effect’s experiences.
+
+
+#### Release 21.4: (2023-April-07, v21.4.7.997 / 2023-March-27, v21.4.6.997) {#release-214-2023-april-07-v2147997--2023-march-27-v2146997}
+
+* New Features:
+ * Introducing Sky Effects + World Tracking - create immersive experiences that augment the sky and ground together in one project:
+ * Added ability to simultaneously track 3D interactive content in the sky and on surfaces via SLAM.
+ * Added the ability to move AR content from the sky layer to the ground, and from the ground to the sky.
+ * Updated PlayCanvas integration to support Sky Effects as well as Sky + World Tracking.
+ * Improved PlayCanvas integration with a new unified run() & stop() API which replaces the runXr() & stopXr() API.
+ * Added a new xrconfig API that makes it easier to configure the different XR components that your project uses.
+
+* Fixes and Enhancements:
+ * Fixed an issue with sky detection at the edge of the camera frame on some Sky Effects experiences.
+ * Fixed an issue with xrlayerscene component when used in self-hosted projects.
+ * Fixed an device orientation issue on iOS 16.4 devices (21.4.7.997)
+
+#### Release 21.3: (2023-March-17, v21.3.8.997) {#release-213--2023-march-17-v2138997}
+
+* New Features:
+ * Added edge feathering controls (edgeSmoothness) for Sky Effects, allowing you to fine tune the look and intensity of borders between virtual and real-world content in the sky.
+ * Added support for camera-locked Sky Effects in three.js, enabling you to add content to the sky that is always in view of the camera in your three.js projects.
+ * Updated 8Frame to support A-Frame 1.4.1.
+ * Updated Metaversal Deployment to support Room Setup in the Meta Quest Browser.
+
+* Fixes and Enhancements:
+ * Improved performance and visual quality of Sky Effects experiences.
+ * Added ability to specify which VPS Project Locations you want to localize against. This can help improve VPS localization times if there are many nearby Locations.
+ * Fixed an issue where opening PlayCanvas experiences on desktop could result in crashing.
+
+#### Release 21.2: (2022-December-16, v21.2.2.997 / 2022-December-13, v21.2.1.997) {#release-212--2022-december-16-v2122997--2022-december-13-v2121997}
+
+* New Features:
+ * Introducing Sky Effects - a major update to the 8th Wall Engine enabling sky segmentation:
+ * Added ability to place 3D interactive content in the sky.
+ * Added the ability to replace sky mask with images or video.
+ * Added Sky Coaching Overlay module to guide users through a flow to ensure they are pointing their device at the Sky.
+
+* Fixes and Enhancements:
+ * Improved tracking quality at VPS locations.
+ * Fixed an AFrame Sky Effects pixelation issue that impacted some phones. (21.2.2.997)
+
+* XRExtras Enhancements:
+ * Enhanced MediaRecorder to add another method of drawing 2D elements to the recorded canvas.
+ * Fixed shadow rendering in PlayCanvas v1.55+
+ * Improved performance of Image Target A-Frame primitives.
+
+#### Release 20.3: (2022-November-22, v20.3.3.684) {#release-203--2022-november-22-v2033684}
+
+* New Features:
+ * Updated Metaversal Deployment to support mixed reality in the Meta Quest Browser.
+ * 8th Wall World Effects experiences automatically make use of video passthrough AR on Meta Quest Pro and Meta Quest 2 when accessed in the browser.
+
+* Fixes and Enhancements:
+ * Optimized localization at VPS locations
+ * Improved tracking quality at VPS locations by using the selected mesh of each Project Location.
+ * Improved experience for some Android devices with multiple cameras.
+
+#### Release 20: (2022-October-05, v20.1.20.684 / 2022-September-21, v20.1.19.684 / 2022-September-21, v20.1.17.684) {#release-20--2022-october-05-v20120684--2022-september-21-v20119684--2022-september-21-v20117684}
+
+* New Features:
+ * Introducing Lightship VPS for Web - create location-based WebAR experiences by connecting AR content to real-world locations.
+ * Added new Geospatial Browser to the 8th Wall Developer Portal.
+ * Find, create and manage VPS-activated Locations.
+ * Generate and download 3D meshes for use as occluders, physics objects, or as a reference for creating location-aware animations.
+ * Added `enableVps` parameter to XR8.XrController.configure() and xrweb.
+ * Added events when a Location is ready for scanning, found, or lost.
+ * Added ability to find and access Location raw mesh geometry.
+ * Added `XR8.Vps.makeWayspotWatcher`, and `XR8.Vps.projectWayspots` APIs for querying nearby VPS activated Locations and Project Locations.
+ * Added Lightship VPS Coaching Overlay module to guide users through a flow to localize at real-world locations.
+ * Added XR8.Platform API for unlocking new 8th Wall platform features like Lightship VPS and Niantic Lightship Maps.
+ * Niantic Lightship Map module
+ * Add the lightship-maps module to your project on 8thwall.com to make it easy to create a variety of location-based experiences.
+
+* Fixes and Enhancements:
+ * Improved error handling for VPS network requests (20.1.19.684)
+ * Fixed issues with some VPS network requests (20.1.20.684)
+
+#### Release 19.1: (2022-August-26, v19.1.6.390 / 2022-August-10, v19.1.2.390) {#release-191--2022-august-26-v1916390--2022-august-10-v1912390}
+
+* Fixes and Enhancements:
+ * Fixed issues with 8th Wall experiences within WeChat on iOS.
+ * Improved initial SLAM tracking for some Android devices (19.1.6.390)
+
+#### Release 19: (2022-May-5, v19.0.16.390 / 2022-April-13, v19.0.14.390 / 2022-March-24, v19.0.8.390) {#release-19--2022-may-5-v19016390--2022-april-13-v19014390--2022-march-24-v1908390}
+
+* New Features:
+ * Introducing Absolute Scale — a major update to 8th Wall SLAM to enable real-world scale in World Effects:
+ * Added ability to enable Absolute Scale in World Effects projects.
+ * Added scale parameter to XR8.XrController.configure().
+ * Added Coaching Overlay module to guide users through a flow to generate appropriate data for scale estimation.
+ * Updated 8Frame to support A-Frame 1.3.0. (19.0.16.390)
+
+* Fixes and Enhancements:
+ * Improved performance on various devices.
+ * Improved experience for some Android devices with multiple cameras.
+ * Improved performance of Absolute Scale on some iOS devices. (19.0.14.390)
+ * Fixed Huawei browser user messaging on Huawei devices. (19.0.14.390)
+
+#### Release 18.2: (2022-March-09, v18.2.4.554 / 2022-January-14, v18.2.3.554 / 2022-January-13, v18.2.2.554) {#release-182--2022-march-09-v1824554--2022-january-14-v1823554--2022-january-13-v1822554}
+
+* Fixes and Enhancements:
+ * Fixed an issue where devices running iOS 13 could reload after starting an XR8 session.
+ * Fixed an issue where the WebGL context could be lost after many XR8 sessions.
+ * Improved experience for some Android devices with multiple cameras.
+ * Fixed issue where additive blending could interefere with the camera feed.
+ * Fixed an issue with transparent materials. (18.2.3.554)
+ * Fixed a three.js rendering issue on devices running iOS 15.4 (18.2.4.554)
+
+#### Release 18.1: (2021-December-02, v18.1.3.554) {#release-181--2021-december-02-v1813554}
+
+* Fixes and Enhancements:
+ * Fixed a loading issue on some iOS devices when accessing Inline AR projects.
+ * Fixed an issue with denying browser prompts on some iOS devices.
+ * Fixed an issue rotating device orientation between landscape and portrait within SFSafariViewController.
+ * Improved compatibility with some Android devices that have atypical camera feed aspect ratios.
+
+#### Release 18: (2021-November-08, v18.0.6.554) {#release-18--2021-november-08-v1806554}
+
+* New Features:
+ * Introducing the completely rebuilt 8th Wall Engine featuring Metaversal Deployment:
+ * Added pipeline module API for session managers.
+ * Added Web3D session manager.
+ * Added headset session managers for three.js and A-Frame.
+ * Updated allowedDevices to include mobile-and-headset.
+ * Added additional session configuration parameters in XR8.run().
+
+* Fixes and Enhancements:
+ * Improved frame capture with a variety of Pixel devices.
+ * Updated iOS WKWebView flow to support experiences accessed via LinkedIn.
+
+* XRextras:
+ * Added xrextras-opaque-background A-Frame component and XRExtras.Lifecycle.attachListener.
+
+#### Release 17.2: (2021-October-26, v17.2.4.476) {#release-172--2021-october-26-v1724476}
+
+* Fixes and Enhancements:
+ * Enhanced SLAM map building quality.
+ * Optimized tracking quality of SLAM experiences.
+ * Improved PlayCanvas integration to support drawing on the same canvas that the camera feed is rendered on.
+
+#### Release 17.1: (2021-September-21, v17.1.3.476) {#release-171--2021-september-21-v1713476}
+
+* New Features:
+ * Added new APIs
+ * API to query the engine initialization state.
+ * three.js camera feed is available as a THREE.Texture.
+ * Lifecycle method for pipeline module removal.
+
+* Fixes and Enhancements:
+ * Enhanced SLAM map building quality.
+ * Improved tracking quality on a wide range of devices.
+ * Improved frame rate of World Effects, Face Effects, and Image Target experiences on Chromium-based and Firefox browsers.
+ * Improved MediaRecorder video quality on Android devices.
+
+* XRExtras Enhancements:
+ * Enhanced MediaRecorder share flow when Web Share API Level 2 is enabled.
+ * Improved startup time of Loading module.
+ * Improved lifecycle handling for Runtime Error, Almost There and Loading modules.
+ * Updated the Almost There module to improve the success of QR Code scans.
+ * Improved Full Window Canvas logic on iPad split screen views.
+
+#### Release 17: (2021-July-20, v17.0.5.476) {#release-17--2021-july-20-v1705476}
+
+* Fixes and Enhancements:
+ * Enhanced above-horizon tracking boosts map quality improving the performance of WebAR experiences that ask users to point their phones up to fully explore AR content.
+ * Optimized SLAM relocalization snaps AR content back to the proper position in world space after an interruption.
+ * Improved tracking quality of SLAM experiences when users make extreme yaw movements.
+
+* XRExtras Enhancements:
+ * Updated MediaRecorder to return to the media preview when users press the “view” button on the iOS dialog box after choosing to download media.
+
+#### Release 16.1: (2021-June-02, v16.1.4.1227) {#release-161--2021-june-02-v16141227}
+
+* Fixes and Enhancements:
+ * Improved recovery of world tracking after an interruption.
+ * Improved lifecycle management of event listeners in A-Frame projects.
+ * Fixed an issue with WebGL 1 errors on some Android devices.
+ * Fixed an issue where MediaRecorder would occasionally not render a recording preview.
+ * Fixed an issue where swapping the camera multiple times could result in crashing.
+ * Improved compatibility using canvases with pre-defined WebGL 2 contexts.
+
+#### Release 16: (2021-May-21, v16.0.8.1227 / 2021-April-27, v16.0.6.1227 / 2021-April-22, v16.0.5.1227) {#release-16--2021-may-21-v16081227--2021-april-27-v16061227--2021-april-22-v16051227}
+
+* New Features:
+ * Introducing the all-new 8th Wall MediaRecorder:
+ * Uses W3C web standards compliant recording when available.
+ * Optimizes performance to improve frame rate during recording.
+ * Enhancements to image quality and frame rate of recording.
+
+* Fixes and Enhancements:
+ * Improved tracking quality and frame rate of SLAM experiences.
+ * Improved tracking quality and frame rate of Image Target experiences.
+ * Improved experience for some Android devices with multiple cameras.
+ * Fixed raycasting issues with PlayCanvas.
+ * Fixed SLAM tracking issue (v16.0.8.1227)
+
+* XRExtras Enhancements:
+ * Updated MediaRecorder to provide a progress bar while transcoding recordings on relevant devices.
+
+#### Release 15.3: (2021-March-2, v15.3.3.487) {#release-153--2021-march-2-v1533487}
+
+* New Features:
+ * Updated 8Frame to support A-Frame 1.2.0.
+
+* Fixes and Enhancements:
+ * Fixed an issue with resuming the camera feed in Safari after navigating back to an 8th Wall app.
+ * Fixed an issue with resuming the camera feed after re-opening a WKWebView
+ * Improved compatibility with different rendering engine versions.
+ * Optimized iOS WKWebView flows for some native apps.
+
+#### Release 15.2: (2020-December-14, v15.2.4.487) {#release-152--2020-december-14-v1524487}
+
+* New Features:
+ * Added support for WKWebView on devices running iOS 14.3 or later.
+ * Made a compute context accessible to Pipeline Modules to accelerate offscreen GPU computer vision.
+ * Updated 8Frame to support A-Frame 1.1.0.
+
+* Fixes and Enhancements:
+ * Improved compatibility with rendering engines.
+ * Added the ability to load and unload image targets while tracking other image targets.
+ * Fixed an issue with MediaRecorder related to audio context switching.
+ * Improved experience for some Android devices with multiple cameras.
+ * Fixed an issue where WebGL errors would sometimes be hidden.
+ * Fixed an issue with simultaneously tracking flat and curved image targets.
+ * Fixed an issue with switching between WebGL and WebGL2 pipelines.
+
+* XRExtras Enhancements:
+ * Improved flows for iOS WKWebView on devices running iOS 14.3 or later.
+ * Fixed an issue with Stats module pipeline detach.
+
+#### Release 15.1: (2020-October-27, v15.1.4.487) {#release-151--2020-october-27-v1514487}
+
+* New Features:
+ * Added support for Curved Image Targets to be used simultaneously with SLAM.
+ * Added support for A-Frame 1.1.0-beta, THREE 120, and MRCS HoloVideoObject 1.2.5.
+
+* Fixes and Enhancements:
+ * Improved quality of tracking Flat Image Targets simultaneously with SLAM.
+ * Improved framerate for devices with iOS 14 or greater.
+ * Improved experience for some Android devices with multiple cameras. (v15.0.9.487)
+ * Optimized performance of some GPU processing.
+ * Enhanced PlayCanvas integration with support for switching between XR and FaceController cameras.
+ * Fixed an issue with MediaRecorder microphone access where onPause events were not closing the microphone input.
+ * Fixed an issue with MediaRecorder occasionally producing files incompatible with some video players.
+ * Fixed a raycasting issue with AFrame 1.0.x. (v15.0.9.487)
+
+* XRExtras Enhancements:
+ * XRExtras.PauseOnHidden() module pauses the camera feed when your browser tab is hidden.
+
+#### Release 15: (2020-October-09, v15.0.9.487 / 2020-September-22, v15.0.8.487) {#release-15--2020-october-09-v1509487--2020-september-22-v1508487}
+
+* New Features:
+ * 8th Wall Curved Image Targets:
+ * Added support for cylindrical image targets such as those wrapped around bottles, cans and more.
+ * Added support for conical image targets such as those wrapped around coffee cups, party hats, lampshades and more.
+
+* Fixes and Enhancements:
+ * Improved tracking quality for SLAM and Image Targets.
+ * Fixed an issue with MRCS Holograms and device routing on iOS 14.
+ * Fixed an issue with Face Effects and Image Targets where updates to mirroredDisplay were not reflected during runtime.
+ * Improved experience for some Android devices with multiple cameras. (v15.0.9.487)
+ * Fixed a raycasting issue with AFrame 1.0.x (v15.0.9.487)
+
+* XRExtras Enhancements:
+ * New AFrame components for easy Curved Image Target development:
+ * 3D container prefab component that forms a portal-like container that 3D content can be placed inside.
+ * Video playback prefab component for easily enabling video on curved image targets.
+ * Improved detection of Web Share API Level 2 support.
+
+#### Release 14.2: (2020-July-30, v14.2.4.949) {#release-142--2020-july-30-v1424949}
+
+* New Features:
+ * Updated MediaRecorder.configure() to provide more control over audio output and mixing:
+ * Pass in your own audioContext.
+ * Request mic permissions during setup or runtime.
+ * Optionally disable microphone recording.
+ * Add your own audio nodes to the audio graph.
+ * Incorporate scene audio into recording playback.
+
+* Fixes and Enhancements:
+ * Fixed an issue where clip planes were not set from PlayCanvas in some cases.
+ * Added support for switching between world tracking, image target tracking, and face effects at runtime.
+ * Fixed an issue where vertex buffers could be rebound after vertex arrays were deleted.
+ * Improved experience for some Android devices with multiple cameras.
+
+#### Release 14.1: (2020-July-06, v14.1.4.949) {#release-141--2020-july-06-v1414949}
+
+* New Features:
+ * Introducing 8th Wall Video Recording:
+ * Add in-browser video recording to any 8th Wall project with the new XR8.MediaRecorder API.
+ * Add dynamic overlays and end cards with custom images and call to action.
+ * Configure maximum video duration and resolution.
+ * Added microphone as a configurable module permission.
+
+* Fixes and Enhancements:
+ * Enhanced CanvasScreenshot functionality with improved overlay support.
+ * Fixed an issue with Face Effects that could cause visual glitches on device orientation change.
+ * Improved Face Effects right-handed coordinate compatibility with Bablyon.js.
+ * Improved graphics pipeline compatibility with Babylon.js.
+
+* XRExtras Enhancements:
+ * Record button prefab component for capturing video and photos:
+ * Select between standard, fixed, and photo capture modes.
+ * Preview prefab component for easily previewing, downloading, and sharing captures
+ * Use XRExtras to easily customize the Video Recording user experience in your project:
+ * Configure maximum video length and resolution.
+ * Add optional watermark to each frame of the video.
+ * Add optional end card to add branding and a call to action at the end of the video.
+
+#### Release 14: (2020-May-26) {#release-14--2020-may-26}
+
+* New Features:
+ * Introducing 8th Wall Face Effects: Attach 3D objects to face attachment points and paint your face with custom materials, shaders or videos.
+ * Selfie Mode: Use the front camera with a mirrored display to get the perfect selfie shot.
+ * Desktop Browsers: Enable your image target and face effect experiences to run in desktop browsers utilizing the webcam.
+
+* Fixes and Enhancements:
+ * Enhanced capture field of view on Pixel 4/4XL phones.
+ * Enhanced camera profiles for certain phone models.
+ * Fixed an issue with changing device orientation during startup.
+ * Fixed an issue with swapping the camera direction on the same a-scene.
+ * Fixed an issue with AFrame look-controls not being removed on scene restart.
+ * Improved experience for some Android phones with multiple cameras.
+ * Other fixes and enhancements.
+
+* XRExtras Enhancements:
+ * Enhanced almost there flows for experiences that can be viewed on desktop.
+ * PauseOnBlur module stops the camera when your tab is not active.
+ * New AFrame components for easy face effects and desktop experience development.
+ * New ThreeExtras for rendering PBR materials, basic materials, and videos to faces.
+
+#### Release 13.2: (2020-Feb-13) {#release-132--2020-feb-13}
+
+* New Features:
+ * Release camera stream on XR8.pause() and reopen on XR8.resume().
+ * Added API to access shader program and modify uniforms used by GlTextureRenderer.
+ * Added API to configure WebGL context on run.
+
+* Fixes and Enhancements:
+ * Fix black video issue on iOS when a user long-presses on an image.
+ * Improved iOS screenshot capture speed and reliability.
+ * Fixed alpha channel rendering when taking a screenshot.
+ * Improved experience for some Android phones with multiple cameras.
+ * Improved detection of social network web views.
+
+* XRExtras Enhancements:
+ * Improved QR codes with better compatibility with native cameras.
+ * Improved link out flows for social networks.
+ * Improved CSS customization.
+
+#### Release 13.1 {#release-131}
+
+* New Features:
+ * Improved framerate on high resolution Android phones.
+ * Camera pipeline can be stopped and restarted.
+ * Camera pipeline modules can be removed at runtime or when stopped.
+ * New lifecycle callbacks for attaching and detaching.
+
+* Fixes and Enhancements:
+ * Improved experience for some Android phones with multiple cameras.
+ * Fixed iOS phone calibration on iOS 12.2 and above.
+
+#### Release 13 {#release-13}
+
+* New Features:
+ * Adds support for cloud-based creation, collaboration, publishing, and hosting of WebAR content.
+
+#### Release 12.1 {#release-121}
+
+* Fixes and Enhancements:
+ * Increased camera resolution on newer iOS devices.
+ * Increased AFrame fps on high-res Android devices.
+ * Fixed three.js r103+ raycasting issues.
+ * Added support for iPadOS.
+ * Fixed memory issue when loading many image targets repeatedly.
+ * Minor performance enhances and bug fixes.
+
+#### Release 12 {#release-12}
+
+* New Features:
+ * Increased image target upload limit to 1000 image targets per app.
+ * New API for selecting active image targets at runtime.
+ * Apps can now scan for up to 10 image targets simultaneously.
+ * Front facing camera is supported in camera framework and image targets.
+ * Engine support for PlayCanvas.
+
+* Fixes:
+ * Improved experience for some Android phones with multiple cameras.
+
+* XRExtras:
+ * Improved visual quality on Android Phones.
+ * Support for iOS 13 device orientation permissions.
+ * Better error handling for missing web assembly on some older versions of iOS.
+ * Support for PlayCanvas.
+
+#### Release 11.2 {#release-112}
+
+* New Features:
+ * iOS 13 motion support.
+
+#### Release 11.1 {#release-111}
+
+* Fixes and Enhancements:
+ * Reduced memory usage.
+ * Improved tracking performance.
+ * Enhanced detection of browser capabilities.
+
+#### Release 11 {#release-11}
+
+* New Features:
+ * Added support for Image Targets.
+ * Added support for BabylonJS.
+ * Reduced JS binary size to 1MB.
+ * Added support running 8th Wall Web inside a cross-origin iframe.
+ * Minor API additions.
+
+#### Release 10.1 {#release-101}
+
+* New Features:
+ * Added support for A-Frame 0.9.0.
+
+* Fixes:
+ * Fixed error when providing includedTypes to XrController.hitTest().
+ * Reduced memory usage when tracking over extended distances.
+
+#### Release 10 {#release-10}
+
+Release 10 adds a revamped web developer console with streamlined developer-mode, access to allowed origins and QR codes. It adds 8th Wall Web support for XRExtras, an open-source package for error handling, loading visualizations, "almost there" flows, and more.
+
+* New Features:
+ * Revamped web developer console.
+ * XR Extras provides a convenient solution for:
+ * Load screens and requesting camera permissions.
+ * Redirecting users from unsupported devices or browsers ("almost there").
+ * Runtime error handling.
+ * Drawing a full screen camera feed in low-level frameworks like three.js.
+ * Added public lighting, hit test interfaces to XrController.
+ * Other minor API additions.
+
+* Fixes:
+ * Improved app startup speed.
+ * Fixed a framework issue where errors were not propagated on startup.
+ * Fixed an issue that could occur with WebGL during initialization.
+ * Use window.screen interface for device orientation if available.
+ * Fixed a three.js issue that could occur when the canvas is resized.
+
+#### Release 9.3 {#release-93}
+
+* New Features:
+ * Minor API additions: XR.addCameraPipelineModules() and XR.FullWindowCanvas.pipelineModule()
+
+#### Release 9.2 {#release-92}
+
+* New Features:
+ * Public documentation released: https://docs.8thwall.com/web
+
+#### Release 9.1 {#release-91}
+
+* New Features:
+ * Added support for Amazon Sumerian in 8th Wall Web
+ * Improved tracking stability and eliminated jitter
+
+#### Release 9 {#release-9}
+
+* Initial release of 8th Wall Web!
diff --git a/docs/engine/engine.md b/docs/engine/engine.md
new file mode 100644
index 0000000..268b247
--- /dev/null
+++ b/docs/engine/engine.md
@@ -0,0 +1,11 @@
+---
+sidebar_label: Introduction
+---
+
+# 8th Wall Engine
+
+The 8th Wall AR Engine is a complete implementation of 8th Wall's Simultaneous Localization and Mapping (SLAM) engine, hyper-optimized for real-time WebAR on browsers. AR features include World Tracking, Image Targets, Face Effects, and Sky Segmentation.
+
+The engine is built-in to Studio projects, and is also easily integrated into modern 3D JavaScript frameworks such as [A-Frame](), [three.js](), [PlayCanvas](), and [Babylon.js]().
+
+
diff --git a/docs/engine/getting-started/_category_.json b/docs/engine/getting-started/_category_.json
new file mode 100644
index 0000000..3562d43
--- /dev/null
+++ b/docs/engine/getting-started/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "Getting Started",
+ "position": 1
+}
diff --git a/docs/engine/getting-started/development.md b/docs/engine/getting-started/development.md
new file mode 100644
index 0000000..ada9000
--- /dev/null
+++ b/docs/engine/getting-started/development.md
@@ -0,0 +1,34 @@
+---
+id: development
+sidebar_position: 4
+---
+
+# Development
+
+## Test on Desktop
+
+1. If node/npm are not installed, install using https://github.com/nvm-sh/nvm or https://nodejs.org/en/download
+2. `cd` to the project root and run `npm install`.
+
+
+
+3. Run `npm run serve` to run the development server. Once the local server is running, you will see the URL/IP addresses your project is running at.
+
+
+
+4. Open a new browser window and paste in the loopback URL or IP address to test your project in development mode.
+
+
+
+## Test on Mobile
+
+To test your project on mobile devices, especially for AR experiences that require camera access, you'll need to serve your development server over HTTPS. We recommend using [ngrok](https://ngrok.com/) to create a secure tunnel to your local server.
+
+After setting up ngrok, add the following configuration to `config/webpack.config.js` under the `devServer` section:
+
+```javascript
+devServer: {
+ // ... existing config
+ allowedHosts: ['.ngrok-free.dev']
+}
+```
diff --git a/docs/engine/getting-started/getting-started.md b/docs/engine/getting-started/getting-started.md
new file mode 100644
index 0000000..dfa7460
--- /dev/null
+++ b/docs/engine/getting-started/getting-started.md
@@ -0,0 +1,11 @@
+# Getting Started with the 8th Wall Engine
+
+The 8th Wall Engine powers AR experiences that work in the browser across all devices. The feature set includes:
+- **World Effects**: Place interactive 3D in your space with World Effects
+- **Image Targets**: Bring print & packaging to life with Image Targets
+- **Face Effects**: Try on accessories, face filters and more with Face Effects
+- **Sky Effects**: Use the sky as a canvas to create immersive, interactive experiences
+
+The 8th Wall engine is an API that can be integrated into various web 3D frameworks, and is built-in to [8th Wall Studio](/docs/studio).
+
+
diff --git a/docs/engine/getting-started/integration.md b/docs/engine/getting-started/integration.md
new file mode 100644
index 0000000..06dcd3d
--- /dev/null
+++ b/docs/engine/getting-started/integration.md
@@ -0,0 +1,62 @@
+---
+id: integration
+sidebar_position: 1
+---
+
+# Integration
+
+:::tip
+If this is your first time working with 8th Wall, we strongly recommend starting with or referencing an [example project](https://github.com/8thwall/8thwall/tree/main/examples).
+:::
+
+## Integrating 8th Wall with A-Frame {#aframe}
+
+8th Wall can be integrated with your A-Frame project in a few easy steps:
+
+1. Include a slightly modified version of A-Frame (referred to as "8-Frame") which fixes some polish concerns:
+```
+
+```
+2. Download the [8th Wall Engine Binary](https://8th.io/xrjs) and unzip it into your project folder
+3. Add the 8th Wall Engine as a script tag to the `` of your `index.html`
+```
+
+```
+4. Add the [`xrconfig`](/docs/engine/api/aframe/xrconfig/) component to your ``
+5. Depending on the type of experience you want to develop, add one of the following components to your ``:
+* **World Tracking & Image Targets**: [`xrweb`](/docs/engine/api/aframe/xrweb/)
+* **Face Effects**: [`xrface`](/docs/engine/api/aframe/xrface/)
+* **Sky Effects**: [`xrlayers`](/docs/engine/api/aframe/xrlayers/)
+
+:::note
+See documentation on [A-Frame Components](/docs/engine/api/aframe/), [A-Frame Events](/docs/engine/api/aframeevents/) and [A-Frame Event Listeners](/docs/engine/api/aframeeventlisenters/) provided by the 8th Wall Engine.
+:::
+
+## Integrating 8th Wall with three.js {#threejs}
+
+```
+// app.js
+const onxrloaded = () => {
+ XR8.addCameraPipelineModules([ // Add camera pipeline modules.
+ // Existing pipeline modules.
+ XR8.GlTextureRenderer.pipelineModule(), // Draws the camera feed.
+ XR8.Threejs.pipelineModule(), // Creates a ThreeJS AR Scene.
+ XR8.XrController.pipelineModule(), // Enables SLAM tracking.
+ window.LandingPage.pipelineModule(), // Detects unsupported browsers and gives hints.
+ XRExtras.FullWindowCanvas.pipelineModule(), // Modifies the canvas to fill the window.
+ XRExtras.Loading.pipelineModule(), // Manages the loading screen on startup.
+ XRExtras.RuntimeError.pipelineModule(), // Shows an error image on runtime error.
+ // Custom pipeline modules.
+ initScenePipelineModule(), // Sets up the threejs camera and scene content.
+ ])
+
+ // Add a canvas to the document for our xr scene.
+ document.body.insertAdjacentHTML('beforeend', camerafeedHtml)
+ const canvas = document.getElementById('camerafeed')
+
+ // Open the camera and start running the camera run loop.
+ XR8.run({canvas, allowedDevices: XR8.XrConfig.device().ANY})
+}
+
+window.XR8 ? onxrloaded() : window.addEventListener('xrloaded', onxrloaded)
+```
diff --git a/docs/engine/getting-started/publishing.md b/docs/engine/getting-started/publishing.md
new file mode 100644
index 0000000..b0c0d0c
--- /dev/null
+++ b/docs/engine/getting-started/publishing.md
@@ -0,0 +1,88 @@
+---
+id: publishing
+sidebar_position: 5
+---
+
+# Publishing
+
+Publishing an 8th Wall project simply means hosting the built `dist/` folder somewhere. Because the output is a static website, you can use nearly any static hosting provider.
+
+:::info
+WebAR requires a secure context (**HTTPS**) for camera access. Pick a host that provides HTTPS by default (most do).
+:::
+
+## Generate a production build
+
+From the project root, run:
+
+`npm run build`
+
+
+
+Once the project has been built, a folder named `dist` will be added to the project root. This folder contains everything you need to host the project.
+
+
+
+## Host your project
+
+Below are common hosting options grouped by workflow:
+
+- **Drag & drop (recommended for beginners)**: upload your `dist/` folder (or a zip) in a web UI.
+- **Git-based CI/CD**: connect a Git repo for automatic deploys when you push changes.
+
+### Drag & drop hosting
+
+:::tip
+These hosting solutions are recommended for beginners or if you just want "upload and go". For ongoing updates, you will need to rebuild locally and upload/deploy again.
+:::
+
+#### Netlify Drop
+
+Netlify Drop lets you drag and drop your dist folder and get a live URL immediately, great for quick demos and sharing.
+
+1. Build your project: `npm run build`
+2. Open [Netlify Drop](https://app.netlify.com/drop)
+3. Drag your `dist/` folder into the page
+4. You’ll get a live URL right away
+
+#### Cloudflare Pages
+
+Cloudflare Pages supports a Direct Upload flow that includes drag & drop of a folder or zip.
+
+1. Build your project: `npm run build`
+2. Create a Pages project using **Direct Upload**
+3. Drag & drop the `dist/` folder (or upload a zip)
+4. Your site deploys and you get a URL
+
+#### AWS Amplify
+
+Amplify Hosting supports manual deployments where you can drag & drop a zipped build output.
+
+1. Build your project: `npm run build`
+2. Zip the `dist/` folder
+3. In Amplify Hosting, choose **Deploy without a Git provider**
+4. Drag & drop the zip and deploy
+
+#### Neocities
+
+Neocities is a straightforward platform that works well for simple static sites (especially personal/demo projects).
+
+**Steps**
+1. Build your project: `npm run build`
+2. Upload the contents of `dist/` via the Neocities editor/uploader
+3. Use the provided site URL
+
+
+### Git-based hosting
+
+:::tip
+If you plan to keep iterating, git-based hosting gives you automatic deployments when you push to your repo. These solutions are better for teams & ongoing updates.
+:::
+
+#### GitHub Pages
+
+GitHub Pages publishes static files from a repository and is a common "set it and forget it" option.
+
+#### Vercel / Netlify (CI/CD)
+
+If your project lives in GitHub/GitLab, these platforms can auto-build and auto-deploy on every push.
diff --git a/docs/engine/guides/01-landing-page.md b/docs/engine/guides/01-landing-page.md
new file mode 100644
index 0000000..7b2d652
--- /dev/null
+++ b/docs/engine/guides/01-landing-page.md
@@ -0,0 +1,127 @@
+---
+id: landing-pages
+---
+# Landing Pages
+
+Landing Pages are an evolution of our popular "Almost There" pages.
+
+## Why Use Landing Pages? {#why-use-landing-pages}
+
+We have transformed these pages to become powerful branding and marketing opportunities for you and
+your clients. All Landing Page templates are optimized for branding and education with various
+layouts, an improved QR code design and support for key media.
+
+Landing Pages ensure that your users have a meaningful experience no matter what device they are on.
+\- They appear on devices that are not allowed or capable of accessing the Web AR experience
+directly. They also continue our mission of making AR accessible by helping users get to the right
+destination to engage with AR.
+
+We designed Landing Pages in a manner which makes it extremely easy for developers to customize the
+page. We want you to benefit from an optimized Landing Page while still enabling you to spend your
+time on building your WebAR experience.
+
+## Landing Pages Intelligently Adapt To Your Configuration {#landing-pages-intelligently-adapt-to-your-configuration}
+
+
+
+
+
+## Use Landing Pages in Your Project {#use-landing-pages-in-your-project}
+
+1. Open your Project
+2. Add the following tag to `head.html`
+
+``
+
+Note: For Self-Hosted projects, you would add the following ``
+
+3. **Remove** `xrextras-almost-there` from your A-Frame project, or
+`XRExtras.AlmostThere.pipelineModule()` from your Non-AFrame project. (Landing Pages include
+almost-there logic in addition to the updates to the QR code page.)
+4. Optionally, customize the parameters of your `landing-page` component as defined below. For
+Non-AFrame projects, please refer to the [LandingPage.configure()](/docs/engine/api/landingpage/configure)
+documentation.
+
+## A-Frame component parameters (All Optional) {#a-frame-component-parameters}
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+logoSrc | `String` | | Image source for brand logo image.
+logoAlt | `String` | `'Logo'` | Alt text for brand logo image.
+promptPrefix | `String` | `'Scan or visit'` | Sets the text string for call to action before the URL for the experience is displayed.
+url | `String` | 8th.io link if 8th Wall hosted, or current page | Sets the displayed URL and QR code.
+promptSuffix | `String` | `'to continue'` | Sets the text string for call to action after the URL for the experience is displayed.
+textColor | Hex Color | `'#ffffff'` | Color of all the text on the Landing Page.
+font | `String` | `"'Nunito', sans-serif"` | Font of all text on the Landing Page. This parameter accepts valid CSS font-family arguments.
+textShadow | `Boolean` | `false` | Sets text-shadow property for all text on the Landing Page.
+backgroundSrc | `String` | | Image source for background image.
+backgroundBlur | `Number` | `0` | Applies a blur effect to the `backgroundSrc` if one is specified. (Typically values are between 0.0 and 1.0)
+backgroundColor | `String` | `'linear-gradient(#464766,#2D2E43)'` | Background color of the Landing Page. This parameter accepts valid CSS background-color arguments. Background color is not displayed if a background-src or sceneEnvMap is set.
+mediaSrc | `String` | App’s cover image, if present | Media source (3D model, image, or video) for landing page hero content. Accepted media sources include a-asset-item id, or static URL.
+mediaAlt | `String` | `'Preview'` | Alt text for landing page image content.
+mediaAutoplay | `Boolean` | `true` | If the `mediaSrc` is a video, specifies if the video should be played on load with sound muted.
+mediaAnimation | `String` | First animation clip of model, if present | If the `mediaSrc` is a 3D model, specify whether to play a specific animation clip associated with the model, or "none".
+mediaControls | `String` | `'minimal'` | If `mediaSrc` is a video, specify media controls displayed to to user. Choose from "none", "mininal" or "browser" (browser defaults)
+sceneEnvMap | `String` | `'field'` | Image source pointing to an equirectangular image. Or one of the following preset environments: "field", "hill", "city", "pastel", or "space".
+sceneOrbitIdle | `String` | `'spin'` | If the `mediaSrc` is a 3D model, specify whether the model should "spin", or "none".
+sceneOrbitInteraction | `String` | `'drag'` | If the `mediaSrc` is a 3D model, specify whether the user can interact with the orbit controls, choose "drag", or "none".
+sceneLightingIntensity | `Number` | `1` | If the `mediaSrc` is a 3D model, specify the strength of the light illuminating the mode.
+vrPromptPrefix | `String` | `'or visit'` | Sets the text string for call to action before the URL for the experience is displayed on VR headsets.
+
+## Examples {#examples}
+
+#### 3D Layout with user specified parameters {#3d-layout-with-user-specified-parameters}
+
+
+
+#### A-Frame Example with External URL (screenshot above) {#a-frame-example}
+
+```html
+
+```
+
+#### A-Frame Example with Local Asset {#a-frame-local-asset example}
+```html
+
+
+
+
+
+
+```
+
+#### Non-AFrame Example (screenshot above) {#non-aframe-example--screenshot-above}
+
+```js
+// Configured here
+LandingPage.configure({
+ mediaSrc: 'https://www.mydomain.com/bat.glb',
+ sceneEnvMap: 'hill',
+})
+XR8.addCameraPipelineModules([
+ XR8.GlTextureRenderer.pipelineModule(),
+ XR8.Threejs.pipelineModule(),
+ XR8.XrController.pipelineModule(),
+ XRExtras.FullWindowCanvas.pipelineModule(),
+ XRExtras.Loading.pipelineModule(),
+ XRExtras.RuntimeError.pipelineModule(),
+ // Added here
+ LandingPage.pipelineModule(),
+ ...
+])
+```
diff --git a/docs/engine/guides/02-coaching-overlay.md b/docs/engine/guides/02-coaching-overlay.md
new file mode 100644
index 0000000..17bd443
--- /dev/null
+++ b/docs/engine/guides/02-coaching-overlay.md
@@ -0,0 +1,280 @@
+---
+id: coaching-overlays
+---
+
+# Coaching Overlay
+
+Coaching Overlays allow you onboard users and ensure the best experience.
+
+## Absolute Scale Coaching Overlay {#absolute-scale-coaching-overlay}
+
+The Absolute Scale Coaching Overlay onboards users to absolute scale experiences ensuring that they collect the
+best possible data to determine scale. We designed the Coaching Overlay to make it easily
+customizable by developers, enabling you to focus your time on building your WebAR experience.
+
+### Use Absolute Scale Coaching Overlay in Your Project: {#use-absolute-scale-coaching-overlay-in-your-project}
+
+1. Open your Project
+2. Add the following tag to `head.html`
+
+```jsx
+
+```
+
+Note: For Self-Hosted projects, you would add the following `
+```
+
+3. Optionally, customize the parameters of your `coaching-overlay` component as defined below. For
+Non-AFrame projects, please refer to the
+[CoachingOverlay.configure()](/docs/engine/api/coachingoverlay/configure) documentation.
+
+### A-Frame component parameters (all optional) {#absolute-scale-coaching-overlay-parameters}
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+animationColor | `String` | `'white'` | Color of the Coaching Overlay animation. This parameter accepts valid CSS color arguments.
+promptColor | `String` | `'white'` | Color of all the Coaching Overlay text. This parameter accepts valid CSS color arguments.
+promptText | `String` | `'Move device forward and back'` | Sets the text string for the animation explainer text that informs users of the motion they need to make to generate scale.
+disablePrompt | `Boolean` | `false` | Set to true to hide default Coaching Overlay in order to use Coaching Overlay events for a custom overlay.
+
+### Creating a custom Coaching Overlay for your project {#custom-absolute-scale-coaching-overlay}
+
+Coaching Overlay can be added as a pipeline module and then hidden (using the `disablePrompt`
+parameter) so that you can easily use the Coaching Overlay events to trigger a custom overlay.
+
+Coaching Overlay events are only fired when `scale` is set to `absolute`. Events are emitted by the
+8th Wall camera run loop and can be listened to from within a pipeline module. These events
+include:
+
+* `coaching-overlay.show`: event is triggered when the Coaching Overlay should be shown.
+* `coaching-overlay.hide`: event is triggered when the Coaching Overlay should be hidden.
+
+#### Example - Coaching Overlay with user specified parameters {#example---coaching-overlay-with-user-specified-parameters}
+
+
+
+#### A-Frame Example (screenshot above) {#a-frame-example-screenshot-above}
+
+```jsx
+
+```
+
+#### Non-AFrame Example (screenshot above) {#non-aframe-example--screenshot-above}
+
+```jsx
+// Configured here
+CoachingOverlay.configure({
+ animationColor: '#E86FFF',
+ promptText: 'To generate scale push your phone forward and then pull back',
+})
+XR8.addCameraPipelineModules([
+ XR8.GlTextureRenderer.pipelineModule(),
+ XR8.Threejs.pipelineModule(),
+ XR8.XrController.pipelineModule(),
+ XRExtras.FullWindowCanvas.pipelineModule(),
+ XRExtras.Loading.pipelineModule(),
+ XRExtras.RuntimeError.pipelineModule(),
+ LandingPage.pipelineModule(),
+ // Added here
+ CoachingOverlay.pipelineModule(),
+ ...
+])
+```
+
+#### AFrame Example - Listening for Coaching Overlay events {#aframe-example---listening-for-coaching-overlay-events}
+
+```javascript
+this.el.sceneEl.addEventListener('coaching-overlay.show', () => {
+ // Do something
+ })
+
+this.el.sceneEl.addEventListener('coaching-overlay.hide', () => {
+ // Do something
+})
+```
+
+#### Non-AFrame Example - Listening for Coaching Overlay events {#non-aframe-example---listening-for-coaching-overlay-events}
+
+```javascript
+const myShow = () => {
+ console.log('EXAMPLE: COACHING OVERLAY - SHOW')
+}
+
+const myHide = () => {
+ console.log('EXAMPLE: COACHING OVERLAY - HIDE')
+}
+
+const myPipelineModule = {
+ name: 'my-coaching-overlay',
+ listeners: [
+ {event: 'coaching-overlay.show', process: myShow},
+ {event: 'coaching-overlay.hide', process: myHide},
+ ],
+}
+
+const onxrloaded = () => {
+ XR8.addCameraPipelineModule(myPipelineModule)
+}
+
+window.XR8 ? onxrloaded() : window.addEventListener('xrloaded', onxrloaded)
+```
+
+## Sky Effects Coaching Overlay {#sky-effects-coaching-overlay}
+
+The Sky Effects Coaching Overlay onboards users to Sky Effects experiences ensuring that they are pointing their
+device at the sky. We designed the Coaching Overlay to make it easily customizable by developers,
+enabling you to focus your time on building your WebAR experience.
+
+### Use Sky Effects Coaching Overlay in Your Project {#use-sky-effects-coaching-overlay-in-your-project}
+
+1. Open your Project
+2. Add the following tag to `head.html`
+
+```jsx
+
+```
+
+Note: For Self-Hosted projects, you would add the following `
+```
+
+3. Optionally, customize the parameters of your `sky-coaching-overlay` component as defined below.
+For Non-AFrame projects, please refer to the SkyCoachingOverlay.configure() documentation.
+
+### A-Frame component parameters (all optional) {#sky-coaching-overlay-parameters}
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+animationColor | `String` | `'white'` | Color of the Coaching Overlay animation. This parameter accepts valid CSS color arguments.
+promptColor | `String` | `'white'` | Color of all the Coaching Overlay text. This parameter accepts valid CSS color arguments.
+promptText | `String` | `'Point your phone towards the sky'` | Sets the text string for the animation explainer text that informs users of the motion they need to make.
+disablePrompt | `Boolean` | `false` | Set to true to hide default Coaching Overlay in order to use Coaching Overlay events for a custom overlay.
+autoByThreshold | `Boolean` | `true` | Automatically show/hide the overlay based percentage of sky pixel is above/below `hideThreshold` / `showThreshold`
+showThreshold | `Number` | 0.1 | Show a currently hidden overlay if percentage of sky pixel is below this threshold.
+hideThreshold | `Number` | 0.05 | Hide a currently shown overlay if percentage of sky pixel is above this threshold.
+
+### Creating a custom Coaching Overlay for your project {#custom-sky-coaching-overlay}
+
+Sky Coaching Overlay can be added as a pipeline module and then hidden (using the `disablePrompt` parameter) so that you can easily use the Coaching Overlay events to trigger a custom overlay.
+
+* `sky-coaching-overlay.show`: event is triggered when the Coaching Overlay should be shown.
+* `sky-coaching-overlay.hide`: event is triggered when the Coaching Overlay should be hidden.
+
+
+**SkyCoachingOverlay.control**
+
+By default, Sky Effects Coaching Overlay automatically shows and hides the coaching overlay depending on whether the user is looking at the sky or not. You can take control of this behavior by using `SkyCoachingOverlay.control`.
+
+```javascript
+// Show the coaching overlay
+SkyCoachingOverlay.control.show()
+// Hide the coaching overlay
+SkyCoachingOverlay.control.hide()
+// Make it so the sky coaching overlay automatically shows / hides itself.
+SkyCoachingOverlay.control.setAutoShowHide(true)
+// Make it so the sky coaching overlay does not automatically show / hide itself.
+SkyCoachingOverlay.control.setAutoShowHide(false)
+// Hides the coaching overlay and cleans it up
+SkyCoachingOverlay.control.cleanup()
+```
+
+For example, part of your experience might no longer require the user to look at the sky. If you don’t call `setAutoShowHide(false)`, the coaching overlay will appear on top of your UI. In this case, call `setAutoShowHide(false)`, then manually control show and hide using `show()` and `hide()`. Or when you are ready to ask the user to look at the sky again, `setAutoShowHide(true)`.
+
+
+#### Example - Sky Coaching Overlay with user specified parameters {#example---sky-coaching-overlay-with-user-specified-parameters}
+
+
+
+#### A-Frame Example (screenshot above) {#a-frame-example-screenshot-above}
+
+```html
+
+```
+
+#### Non-AFrame Example (screenshot above) {#non-aframe-example--screenshot-above}
+
+```javascript
+// Configured here
+SkyCoachingOverlay.configure({
+ animationColor: '#E86FFF',
+ promptText: 'Look at the sky!!',
+})
+XR8.addCameraPipelineModules([ // Add camera pipeline modules.
+ // Existing pipeline modules.
+ XR8.GlTextureRenderer.pipelineModule(), // Draws the camera feed.
+ XR8.Threejs.pipelineModule(), // Creates a ThreeJS AR Scene as well as a Sky scene.
+ window.LandingPage.pipelineModule(), // Detects unsupported browsers and gives hints.
+ XRExtras.FullWindowCanvas.pipelineModule(), // Modifies the canvas to fill the window.
+ XRExtras.Loading.pipelineModule(), // Manages the loading screen on startup.
+ XRExtras.RuntimeError.pipelineModule(), // Shows an error image on runtime error.
+
+ // Enables Sky Segmentation.
+ XR8.LayersController.pipelineModule(),
+ SkyCoachingOverlay.pipelineModule(),
+
+ ...
+ mySkySampleScenePipelineModule(),
+ ])
+
+ XR8.LayersController.configure({layers: {sky: {invertLayerMask: false}}})
+ XR8.Threejs.configure({layerScenes: ['sky']})
+
+```
+
+#### AFrame Example - Listening for Sky Coaching Overlay events {#aframe-example---listening-for-sky-coaching-overlay-events}
+
+```javascript
+this.el.sceneEl.addEventListener('sky-coaching-overlay.show', () => {
+ // Do something
+ })
+
+this.el.sceneEl.addEventListener('sky-coaching-overlay.hide', () => {
+ // Do something
+})
+```
+
+#### Non-AFrame Example - Listening for Sky Coaching Overlay events {#non-aframe-example---listening-for-sky-coaching-overlay-events}
+
+```javascript
+const myShow = () => {
+ console.log('EXAMPLE: SKY COACHING OVERLAY - SHOW')
+}
+
+const myHide = () => {
+ console.log('EXAMPLE: SKY COACHING OVERLAY - HIDE')
+}
+
+const myPipelineModule = {
+ name: 'my-sky-coaching-overlay',
+ listeners: [
+ {event: 'sky-coaching-overlay.show', process: myShow},
+ {event: 'sky-coaching-overlay.hide', process: myHide},
+ ],
+}
+
+const onxrloaded = () => {
+ XR8.addCameraPipelineModule(myPipelineModule)
+}
+
+window.XR8 ? onxrloaded() : window.addEventListener('xrloaded', onxrloaded)
+```
diff --git a/docs/engine/guides/05-load-screen.mdx b/docs/engine/guides/05-load-screen.mdx
new file mode 100644
index 0000000..5f1106c
--- /dev/null
+++ b/docs/engine/guides/05-load-screen.mdx
@@ -0,0 +1,148 @@
+---
+id: load-screen
+---
+# Customizing the Load Screen
+
+8th Wall's [XRExtras](https://github.com/8thwall/web/tree/master/xrextras) library provides modules
+that handle the most common WebAR application needs, including the load screen, social link-out
+flows and error handling.
+
+The `Loading` module displays a loading overlay and camera permissions prompt while libraries are
+loading, and while the camera is starting up. It's the first thing your users see when they enter
+your WebAR experience.
+
+This section describes how to customize the loading screen by providing values that change the
+color, load spinner, and load animation to match the overall design of your experience.
+
+**Note:** All projects must display the [Powered by 8th Wall](https://drive.google.com/drive/folders/1c9d23c5hS_HspHTUD7VceV6ocqdbPN7J?usp=sharing)
+badge on the loading page. It's included by default in the `Loading Module` and cannot be removed.
+
+## ID's / Classes to override {#ids--classes-to-override}
+
+````mdx-code-block
+
+
+
Loading Screen
+
iOS (13+) Motion Sensor Prompt
+
+
+ {/* Images */}
+
+
+
+
+
+
+
+
+ {/* Key */}
+
+
+
+
#requestingCameraPermissions
+
#requestingCameraIcon
+
#loadBackground
+
#loadImage
+
+
+
+
+
.prompt-box-8w
+
.prompt-button-8w
+
.button-primary-8w
+
+
To customize the text, you can use a MutationObserver. Please refer to code example below.
+
+
+
+
+````
+
+## A-Frame component parameters {#a-frame-component-parameters}
+
+If you are using XRExtras with an A-Frame project, the `xrextras-loading` module makes it easy to customize the load screen via the following parameters:
+
+Parameter | Type | Description
+--------- | ---- | -----------
+cameraBackgroundColor | Hex Color | Background color of the loading screen's top section behind the camera icon and text (See above. Loading Screen #1)
+loadBackgroundColor | Hex Color | Background color of the loading screen's lower section behind the `loadImage` (See above. Loading Screen #3)
+loadImage | ID | The ID of an image. The image needs to be an `` (See above. Loading Screen #4)
+loadAnimation | `String` | Animation style of `loadImage`. Choose from `spin` (default), `pulse`, `scale`, or `none`
+
+#### A-Frame Component Example {#a-frame-component-example}
+
+```jsx
+
+
+
+
+
+```
+
+#### Javascript/CSS method {#javascriptcss-method}
+
+```javascript
+const load = () => {
+ XRExtras.Loading.showLoading()
+ console.log('customizing loading spinner')
+ const loadImage = document.getElementById("loadImage")
+ if (loadImage) {
+ loadImage.src = require("./assets/my-custom-image.png")
+ }
+}
+window.XRExtras ? load() : window.addEventListener('xrextrasloaded', load)
+```
+
+#### CSS example {#css-example}
+
+```css
+#requestingCameraPermissions {
+ color: black !important;
+ background-color: white !important;
+}
+#requestingCameraIcon {
+ /* This changes the image from white to black */
+ filter: invert(1) !important;
+}
+
+.prompt-box-8w {
+ background-color: white !important;
+ color: #00FF00 !important;
+}
+.prompt-button-8w {
+ background-color: #0000FF !important;
+}
+
+.button-primary-8w {
+ background-color: #7611B7 !important;
+}
+```
+
+#### iOS (13+) Motion Sensor Prompt Text Customization {#ios-13-motion-sensor-prompt-text-customization}
+
+```javascript
+let inDom = false
+const observer = new MutationObserver(() => {
+ if (document.querySelector('.prompt-box-8w')) {
+ if (!inDom) {
+ document.querySelector('.prompt-box-8w p').innerHTML = 'My new text goes here
Press Approve to continue.'
+ document.querySelector('.prompt-button-8w').innerHTML = 'Deny'
+ document.querySelector('.button-primary-8w').innerHTML = 'Approve'
+ }
+ inDom = true
+ } else if (inDom) {
+ inDom = false
+ observer.disconnect()
+ }
+})
+observer.observe(document.body, {childList: true})
+```
diff --git a/docs/engine/guides/06-video-recording.md b/docs/engine/guides/06-video-recording.md
new file mode 100644
index 0000000..07a562a
--- /dev/null
+++ b/docs/engine/guides/06-video-recording.md
@@ -0,0 +1,107 @@
+---
+id: video-recording
+---
+# Customize Video Recording
+
+8th Wall's [XRExtras](https://github.com/8thwall/web/tree/master/xrextras) library provides modules
+that handle the most common WebAR application needs, including the load screen, social link-out
+flows and error handling.
+
+The XRExtras [MediaRecorder](https://github.com/8thwall/web/tree/master/xrextras/src/mediarecorder)
+module makes it easy to customize the Video Recording user experience in your project.
+
+This section describes how to customize recorded videos with things like capture button behavior
+(tap vs hold), add video watermarks, max video length, end card behavior and looks, etc.
+
+## A-Frame primitives {#a-frame-primitives}
+
+`xrextras-capture-button` : Adds a capture button to the scene.
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+capture-mode | `String` | `'standard'` | Sets the capture mode behavior. **standard**: tap to take photo, tap + hold to record video. **fixed**: tap to toggle video recording. **photo**: tap to take photo. One of `[standard, fixed, photo]`
+
+`xrextras-capture-config` : Configures the captured media.
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+max-duration-ms | int | `15000` | Total video duration (in miliseconds) that the capture button allows. If the end card is disabled, this corresponds to max user record time. 15000 by default.
+max-dimension | int | `1280` | Maximum dimension (width or height) of captured video. For photo configuration, please see [`XR8.CanvasScreenshot.configure()`](/docs/engine/api/canvasscreenshot/configure)
+enable-end-card | `Boolean` | `true` | Whether the end card is included in the recorded media.
+cover-image-url | `String` | | Image source for end card cover image. Uses project's cover image by default.
+end-card-call-to-action | `String` | `'Try it at: '` | Sets the text string for call to action on end card.
+short-link | `String` | | Sets the text string for end card shortlink. Uses project shortlink by default.
+footer-image-url | `String` | Powered by 8th Wall image | Image source for end card footer image.
+watermark-image-url | `String` | `null` | Image source for watermark.
+watermark-max-width | int | 20 | Max width (%) of watermark image.
+watermark-max-height | int | 20 | Max height (%) of watermark image.
+watermark-location | `String` | `'bottomRight'` | Location of watermark image. One of `topLeft, topMiddle, topRight, bottomLeft, bottomMiddle, bottomRight`
+file-name-prefix | `String` | `'my-capture-'` | Sets the text string that prepends the unique timestamp on file name.
+request-mic | `String` | `'auto'` | Determines if you want to set up the microphone during initialization (`'auto'`) or during runtime (`'manual'`)
+include-scene-audio | `Boolean` | `true` | If true, the A-Frame sounds in the scene will be part of the recorded output.
+
+`xrextras-capture-preview` : Adds a media preview prefab to the scene which allows for playback, downloading, and sharing.
+
+Parameter | Type | Default | Description
+--------- | ---- | ------- | -----------
+action-button-share-text | `String` | `'Share'` | Sets the text string in action button when Web Share API 2 **is** available (Android, iOS 15 or higher). `'Share'` by default.
+action-button-view-text | `String` | `'View'` | Sets the text string in action button when Web Share API 2 is **not** available in iOS (iOS 14 or below). `'View'` by default.
+
+## XRExtras.MediaRecorder Events {#xrextrasmediarecorder-events}
+
+XRExtras.MediaRecorder emits the following events.
+
+#### Events Emitted {#events-emitted}
+
+Event Emitted | Description | Event Detail
+------------- | ----------- | ------------
+mediarecorder-photocomplete | Emitted after a photo is taken. | {blob}
+mediarecorder-recordcomplete | Emitted after a video recording is complete. | {videoBlob}
+mediarecorder-previewready | Emitted after a previewable video recording is complete. [(Android/Desktop only)](/docs/engine/api/mediarecorder/recordvideo/#parameters) | {videoBlob}
+mediarecorder-finalizeprogress | Emitted when the media recorder is making progress in the final export. [(Android/Desktop only)](/docs/engine/api/mediarecorder/recordvideo/#parameters) | {progress, total}
+mediarecorder-previewopened | Emitted after recording preview is opened. | null
+mediarecorder-previewclosed | Emitted after recording preview is closed. | null
+
+#### Example: A-Frame Primitives {#primitives-example}
+
+```jsx
+
+
+
+
+
+```
+
+#### Example: A-Frame Events {#example-a-frame-events}
+
+```javascript
+window.addEventListener('mediarecorder-previewready', (e) => {
+ console.log(e.detail.videoBlob)
+})
+```
+
+#### Example: Change Share Button CSS {#change-share-button-example}
+
+```css
+#actionButton {
+ /* change color of action button */
+ background-color: #007aff !important;
+}
+```
diff --git a/docs/engine/guides/07-your-3d-models-on-the-web.md b/docs/engine/guides/07-your-3d-models-on-the-web.md
new file mode 100644
index 0000000..1c31443
--- /dev/null
+++ b/docs/engine/guides/07-your-3d-models-on-the-web.md
@@ -0,0 +1,168 @@
+---
+id: your-3d-models-on-the-web
+---
+
+# Your 3D Models on the Web
+
+We recommend using 3D models in GLB (glTF 2.0 binary) format for all WebAR experiences. GLB is
+currently the best format for WebAR with its small file size, great performance and versatile
+feature support (PBR, animations, etc).
+
+## Converting Models to GLB format {#converting-models-to-glb}
+
+Before you export, ensure that:
+
+* Pivot point is at the base of the model (if you expect it to attach to the ground)
+* Forward vector of object is along Z axis (if you expect it to face forward)
+
+If your model is exported as a glTF, drag and drop the glTF folder into
+[gltf.report](https://gltf.report) and click _Export_ to convert it to a GLB.
+
+If your model can not be exported to glTF/GLB from 3D modeling software, import it in Blender and
+export as gLTF or use a converter.
+
+**Online converters**: [Creators3D](https://www.creators3d.com/online-viewer), [Boxshot](https://boxshot.com/facebook-3d-converter/)
+
+**Native converters**: [Maya2glTF](https://github.com/iimachines/Maya2glTF), [3DS Max](https://doc.babylonjs.com/features/featuresDeepDive/Exporters/3DSMax)
+
+A full list of converters can be found at .
+
+It's a good idea to view the model in [glTF Viewer](https://gltf-viewer.donmccurdy.com/) before
+importing it to an 8th Wall project. This will help catch any issues with your model prior to adding
+it to an 8th Wall project.
+
+After you import into an 8th Wall project, ensure that:
+
+* Scale appears correct at (1, 1, 1). If scale is off by a significant amount (i.e. 0.0001 or
+10000), do not change the scale in code. Instead, change it in your modeling software and
+re-import. Changing the scale significantly in code may result in clipping issues with the model.
+* Materials appear correct. If your model has reflective materials, they may appear black unless
+given something to reflect. See the
+[reflections sample project](https://www.8thwall.com/8thwall/cubemap-aframe) and/or the
+[glass sample project](https://www.8thwall.com/playground/glass-materials-aframe)
+
+For more information about 3D model best practices, reference the [GLB optimization section](#glb-optimization).
+
+Please also view the [5 Tips for Developers to Make Any 8th Wall WebAR Project More Realistic](https://www.8thwall.com/blog/post/41447089034/5-tips-for-developers-to-make-any-8th-wall-webar-project-more-realistic) blog post.
+
+### Converting FBX to GLB {#converting-fbx-to-glb}
+
+The following instructions will explain how to install and run the Facebook-developed [FBX2glTF CLI conversion tool](https://github.com/facebookincubator/FBX2glTF) locally on your Mac. This tool is by far the most reliable tool any one of us at 8th Wall have used yet for FBX to GLB conversion and we have used it for all our first party content to date.
+
+**Installing FBX2glTF on your Mac**
+
+1. Download this file: https://github.com/facebookincubator/FBX2glTF/releases/download/v0.9.7/FBX2glTF-darwin-x64
+2. Open Terminal
+3. Navigate to the Downloads folder: `cd ~/Downloads`
+4. Make the file executable: `chmod +x FBX2glTF-darwin-x64`
+5. If you see a warning about the downloaded file, simply click `Cancel`
+
+
+
+6. Open `System Preferences` -> `Security & Privacy`, click the `Lock` icon and then enter your `macOS password`.
+
+
+
+7. Click `Allow Anyway`
+8. Close System Preferences.
+9. Return to the Terminal window
+10. Re-run the previous command (pressing the Up arrow should restore the previous command): `chmod +x FBX2glTF-darwin-x64`
+11. An updated warning will be displayed, click `Open`:
+
+
+
+12. At this point you should be able to successfully run the FBX2glTF
+
+**Copy FBX2glTF to a system directory (Optional)**
+
+To make it easier to run the FBX2glTF converter, copy it into the /usr/local/bin directory. This eliminates the need to navigate to the Downloads folder each time to run the command.
+
+1. From the Downloads folder, run `sudo cp ./FBX2glTF-darwin-x64 /usr/local/bin`
+2. The system will likely ask for your macOS password. Type it in and press `Enter`
+3. Since `/usr/local/bin` should be in your PATH by default, you can now simply run
+`FBX2glTF-darwin-x64` from any directory.
+
+**Running FBX2glTF on your Mac**
+
+1. In Terminal, navigate to the folder where your fbx files are located. Here are some helpful
+commands for traversing directories via command line on macOS:
+ * `pwd` outputs the current directory of the terminal.
+ * `ls` lists the contents of the current directlory.
+ * `cd` changes directory, and takes either a relative (e.g `cd ~/Downloads`) or absolute path (e.g. `cd /var/tmp`)
+
+2. Run the `FBX2glTF-darwin-x64` and pass in parameters for input (-i) and output (-o) files.
+
+#### FBX2glTF Example {#fbx2gltf-example}
+
+```bash
+FBX2glTF-darwin-x64 -i yourfile.fbx -o newfile.glb
+```
+
+3. The above example will convert `yourfile.fbx` into a new GLB file named `newfile.glb`
+4. Drag and Drop the newly created GLB file into https://gltf-viewer.donmccurdy.com/ to verify it
+works correctly.
+5. For advanced configuration of the glb conversion, check out the following commands:
+https://github.com/facebookincubator/FBX2glTF#cli-switches
+
+**FBX2glTF Batch Conversion**
+
+If you have multiple FBX files in the same directory, you can convert them all at once
+
+1. In Terminal, navigate to the folder containing multiple FBX files
+2. Run the following command:
+
+#### FBX2glTF Batch Conversion Example {#fbx2gltf-batch-conversion-example}
+
+```bash
+ls *.fbx | xargs -n1 -I {} FBX2glTF-darwin-x64 -i {} -o {}.glb
+```
+
+3. This should produce glb versions of each fbx file you have in the current folder!
+
+## GLB Optimization {#glb-optimization}
+
+Optimizing assets is a critical step to creating magical WebAR content. Large assets can lead to
+issues such as infinite loading, black textures, and crashes.
+
+### Texture Optimization {#texture-optimization}
+
+Textures are usually the biggest contributor to large file sizes, it’s a good idea to optimize these
+first.
+
+For best results, we suggest using textures 1024x1024 or smaller. Texture sizes should always be set
+to the power of two (512x512, 1024x1024, etc).
+
+This can be done using your favorite image editing and/or 3D modeling program; however, if you
+already have an existing GLB model, a quick and easy way to resize the textures within the 3D model
+is to use [gltf.report](https://gltf.report)
+
+* Drag your 3D model onto the page. In the left column, set the maximum desired texture size (1).
+* Click play (2) to run the script. The Console (lower left) will display status of the operation.
+* Download your modified GLB model by clicking Export (3)
+
+
+
+### Compression {#compression}
+
+Compression can greatly reduce file size. Draco compression is the most popular compression method
+and can be configured in Blender export settings or after exporting in
+[gltf.report](https://gltf.report).
+
+Loading compressed models to your project requires additional configuration. Reference the
+[A-Frame sample project](https://www.8thwall.com/playground/draco-compression) or the
+[three.js sample project](https://www.8thwall.com/playground/draco-threejs) for more information.
+
+### Geometry Optimization {#geometry-optimization}
+
+For further optimization, decimate the model to reduce polygon count.
+
+In Blender, apply the _Decimate_ modifier to the model and reduce the _Ratio_ setting to a value under 1.
+
+Select _Apply Modifiers_ in the export settings.
+
+### Optimization Tutorial {#optimization-tutorial}
+
+````mdx-code-block
+
+
+````
diff --git a/docs/engine/guides/09-advanced-analytics.md b/docs/engine/guides/09-advanced-analytics.md
new file mode 100644
index 0000000..f98056b
--- /dev/null
+++ b/docs/engine/guides/09-advanced-analytics.md
@@ -0,0 +1,144 @@
+---
+id: advanced-analytics
+---
+# Advanced Analytics
+
+The process for adding analytics to a project is the same as adding them to any non-AR
+website. You are welcome to use any analytics solution you prefer.
+
+In this example, we’ll explain how to add Google Analytics to your 8th Wall project using Google Tag
+Manager (GTM) - making it easy to collect custom analytics on how users are both viewing and
+interacting with your project.
+
+Using GTM’s web-based user interface, you can define tags and create triggers that cause your tag to
+fire when certain events occur. In your 8th Wall project, fire events (using a single line of
+Javascript) at desired places in your code.
+
+## Analytics Pre-requisites {#analytics-pre-requisites}
+
+You must already have Google Analytics and Google Tag Manager accounts and have a basic understanding of how they work.
+
+For more information, please refer to the following Google documentation:
+
+* Google Analytics 4
+ * Getting Started:
+ * Add a Data Stream:
+* Google Tag Manager
+ * Overview:
+ * Setup and Install:
+
+## Add Google Tag Manager to your 8th Wall Project {#add-google-tag-manager-to-your-8th-wall-project}
+
+1. On the Workspace page of your Tag Manager container, click your container ID (e.g.
+"**GTM-XXXXXX**") to open the "Install Google Tag Manager" box. This window contains the code that
+you’ll later need to add to your 8th Wall project.
+
+
+
+2. Open your project code and paste the **top** code block into the `` of `index.html`.
+
+3. Paste the contents of the **bottom** code block into the `` of `index.html`.
+
+## Configure Google Tag Manager {#configure-google-tag-manager}
+
+1. Locate the [Google Measurement ID](https://support.google.com/analytics/answer/12270356) for your data stream.
+2. In GTM, [create a GA4 Configuration](https://support.google.com/tagmanager/answer/9442095#config) tag.
+
+Example:
+
+
+
+## Tracking Page Views {#tracking-page-views}
+Page views will be automatically tracked through the GA4 Configuration tag. See the [Configure Google Tag Manager](#configure-google-tag-manager) for more information.
+
+## Tracking Custom Events {#tracking-custom-events}
+
+GTM also provides the ability to fire events when custom actions take place **inside** the WebAR
+experience. These events will be particular to your WebAR project, but some examples might be:
+
+* 3D object placed
+* Image Target found
+* Screenshot taken
+* etc…
+
+In this example, we’ll create a Tag (with Trigger) and add it to the
+["AFrame: Place Ground"](https://www.8thwall.com/8thwall/placeground-aframe) sample project that
+fires each time a 3D model is spawned.
+
+#### Create Custom Event Trigger {#create-custom-event-trigger}
+
+* Trigger Type: **Custom Event**
+* Event Name: **placeModel**
+* This trigger fires on: **All Custom Events**
+
+
+
+#### Create Tag {#create-tag-1}
+
+Next, create a tag that will fire when the "placeModel" trigger is fired in your code.
+
+* Tag Type: **Google Analytics: GA4 Event**
+* Configuration Tag: (Select configuration created previously)
+* Event Name: **Place Model**
+* Triggering: **Select "placeModel" trigger created in the previous step.**
+
+
+
+**IMPORTANT**: Make sure to save all triggers/tags created and then **Submit/Publish** your
+settings inside the GTM interface so they are live. See
+
+#### Fire Event Inside 8th Wall Project {#fire-event-inside-8th-wall-project}
+
+In your 8th Wall project, add the following line of javascript to fire this trigger at the desired place in your code:
+
+`window.dataLayer.push({event: 'placeModel'})`
+
+##### Example - based on {#example---based-on-httpswww8thwallcom8thwallplaceground-aframemastertap-placejs}
+
+```javascript
+export const tapPlaceComponent = {
+ init: function() {
+ const ground = document.getElementById('ground')
+ ground.addEventListener('click', event => {
+ // Create new entity for the new object
+ const newElement = document.createElement('a-entity')
+
+ // The raycaster gives a location of the touch in the scene
+ const touchPoint = event.detail.intersection.point
+ newElement.setAttribute('position', touchPoint)
+
+ const randomYRotation = Math.random() * 360
+ newElement.setAttribute('rotation', '0 ' + randomYRotation + ' 0')
+
+ newElement.setAttribute('visible', 'false')
+ newElement.setAttribute('scale', '0.0001 0.0001 0.0001')
+
+ newElement.setAttribute('shadow', {
+ receive: false,
+ })
+
+ newElement.setAttribute('class', 'cantap')
+ newElement.setAttribute('hold-drag', '')
+
+ newElement.setAttribute('gltf-model', '#treeModel')
+ this.el.sceneEl.appendChild(newElement)
+
+ newElement.addEventListener('model-loaded', () => {
+ // Once the model is loaded, we are ready to show it popping in using an animation
+ newElement.setAttribute('visible', 'true')
+ newElement.setAttribute('animation', {
+ property: 'scale',
+ to: '7 7 7',
+ easing: 'easeOutElastic',
+ dur: 800,
+ })
+
+ // **************************************************
+ // Fire Google Tag Manager event once model is loaded
+ // **************************************************
+ window.dataLayer.push({event: 'placeModel'})
+ })
+ })
+ }
+}
+```
diff --git a/docs/engine/guides/09-image-targets.md b/docs/engine/guides/09-image-targets.md
new file mode 100644
index 0000000..05cbee3
--- /dev/null
+++ b/docs/engine/guides/09-image-targets.md
@@ -0,0 +1,322 @@
+---
+id: image-targets
+---
+
+# Image Targets
+
+Bring signage, magazines, boxes, bottles, cups, and cans to life with 8th Wall Image Targets. 8th
+Wall Web can detect and track flat, cylindrical and conical shaped image targets, allowing you to
+bring static content to life.
+
+Not only can your designated image target trigger a web AR experience, but your content also has the
+ability to track directly to it.
+
+Image targets can work in tandem with our World Tracking (SLAM), enabling experiences that combine
+image targets and markerless tracking.
+
+You may track up to 5 image targets simultaneously with World Tracking enabled or up to 10 when it
+is disabled.
+
+Up to 5 image targets per project can be **"Autoloaded"**. An Autoloaded image target is enabled
+immediately as the page loads. This is useful for apps that use 5 or fewer image targets such as
+product packaging, a movie poster or business card.
+
+The set of active image targets can be changed at any time by calling
+[XR8.XrController.configure()](/docs/engine/api/xrcontroller/configure). This lets you manage hundreds of image
+targets per project making possible use cases like geo-fenced image target hunts, AR books, guided
+art museum tours and much more. If your project utilizes SLAM most of the time but image targets
+some of the time, you can improve performance by only loading image targets when you need them. You
+can even read uploaded target names from URL parameters stored in different QR Codes, allowing you
+to have different targets initially load in the same web app depending on which QR Codes the user
+scans to enter the experience.
+
+## Image Target Types {#image-target-types}
+
+ | |
+-|-|-
+**Flat**|| Track 2D images like posters, signs, magazines, boxes, etc.
+**Cylindrical**|| Track images wrapped around cylindrical items like cans and bottles.
+**Conical**|| Track images wrapped around objects with different a top vs bottom circumference like coffee cups, etc.
+
+## Image Target Requirements {#image-target-requirements}
+
+* File Types: **.jpg**, **.jpeg** or **.png**
+* Dimensions:
+ * Minimum: **480 x 640 pixels**
+ * Maximum length or width: **2048 pixels**.
+ * Note: If you upload something larger, the image is resized down to a max length/width of 2048
+ , maintaining aspect ratio.
+* Hosting: All image targets must be uploaded to your 8th Wall project before they can be
+ used. You can self-host the rest of your Web AR experience (if on an Enterprise or Legacy Pro
+ plan) but the source image target is always hosted by 8th Wall. Please see below for instructions
+ on creating/uploading flat or curved image targets.
+
+## Image Target Quantities {#image-target-quantities}
+
+There is no limit to the number of image targets that can be associated with a project, however,
+there are limits to the number of image targets that can be **active** in the user's browser at any
+given time.
+
+* Active image targets per Project: **32**
+
+## Manage Image Targets {#manage-image-targets}
+
+Click the Image Target icon in the left navigation or the "Manage Image Targets" link on the Project
+Dashboard to manage your image targets.
+
+This screen allows you to create, edit, and delete the image targets associated with your project.
+Click on an existing image target to edit. Click the "+" icon for the desired image target type to
+create a new one.
+
+## Create Flat Image Target {#create-flat-image-target}
+
+1. Click the "+ Flat" icon to create a new flat image target.
+
+
+
+2. **Upload Flat Image Target**: Drag your image (.jpg, .jpeg or .png) into the upload panel, or click within the dotted region and use your file browser to select your image.
+
+3. **Set Tracking Region** (and Orientation): Use the slider to set the region of the image that will be used to detect and track your target within the WebAR experience. The rest of the image will be discarded, and the region which you specify will be tracked in your experience.
+
+
+
+4. **Edit Flat Image Target properties**:
+
+* (1) Give your image target a **name** by editing the field at the top left of the window.
+* (2) **IMPORTANT!** Test your image target: The best way to determine if your uploaded image will make a good or bad image target (see [Optimizing Image Target Tracking](#optimizing-image-target-tracking)) is to use the Simulator to assess tracking quality. Scan the QR code with your camera app to open the simulator link, then point your device at the screen or physical object.
+* (3) Click **Load automatically** if you want the image target to be enabled automatically as the WebAR project loads. Up to 5 image targets can be loaded automatically without writing a single line of code. More targets can be loaded programnatically through the Javascript API.
+* (4) Optional: If you would like to add metadata to your image, in either Text or JSON format, click the **Metadata** button at the bottom of the window.
+
+
+
+5. Changes made on this screen are automatically saved. Click **Close** to return to your image target library.
+
+## Create Cylindrical Image Target {#create-cylindrical-image-target}
+
+1. Click the "+ Cylindrical" icon to create a new flat image target.
+
+
+
+2. **Upload Flat Image Target**: Drag your image (.jpg, .jpeg or .png) into the upload panel, or click within the dotted region and use your file browser to select your image.
+
+3. **Set Tracking Region** (and Orientation): Use the slider to set the region of the image that will be used to detect and track your target within the WebAR experience. The rest of the image will be discarded, and the region which you specify will be tracked in your experience.
+
+
+
+4. **Edit Cylindrical Image Target properties**:
+
+* (1) Give your image target a **name** by editing the field at the top left of the window.
+* (2) **Drag the sliders** until the shape of your label appears as expected in the simulator, or **input the measurements** directly.
+* (3) **IMPORTANT!** Test your image target: The best way to determine if your uploaded image will make a good or bad image target (see [Optimizing Image Target Tracking](#optimizing-image-target-tracking)) is to use the Simulator to assess tracking quality. Scan the QR code with your camera app to open the simulator link, then point your device at the screen or physical object.
+* (4) Click **Load automatically** if you want the image target to be enabled automatically as the WebAR project loads. Up to 5 image targets can be loaded automatically without writing a single line of code. More targets can be loaded programnatically through the Javascript API.
+* (5) Optional: If you would like to add metadata to your image, in either Text or JSON format, click the **Metadata** button at the bottom of the window.
+
+
+
+5. Changes made on this screen are automatically saved. Click **Close** to return to your image target library.
+
+## Create Conical Image Target {#create-conical-image-target}
+
+1. Click the "+ Conical" icon to create a new flat image target.
+
+
+
+2. **Upload Conical Image Target**: Drag your image (.jpg, .jpeg or .png) into the upload panel, or click within the dotted region and use your file browser to select your image. The uploaded image should be in "unwrapped", aka "rainbow" format, cropped like so:
+
+
+
+3. **Set Large Arc Alignment**: Drag the slider until the **red** line overlays the uploaded image's **large arc**.
+
+
+
+4. **Set Small Arc Alignment**: Do the same for the small arc. Drag the slider until the **blue** line overlays the uploaded image's **small arc**.
+
+5. **Set Tracking Region** (and Orientation): Drag and zoom on the image to set the portion of the image that is detected and tracked. This should be the most feature rich area of your image.
+
+
+
+6. **Edit Conical Image Target properties**:
+
+* (1) Give your image target a **name** by editing the field at the top left of the window.
+* (2) **Drag the sliders** until the shape of your label appears as expected in the simulator, or **input the measurements** directly.
+* (3) **IMPORTANT!** Test your image target: The best way to determine if your uploaded image will make a good or bad image target (see [Optimizing Image Target Tracking](#optimizing-image-target-tracking)) is to use the Simulator to assess tracking quality. Scan the QR code with your camera app to open the simulator link, then point your device at the screen or physical object.
+* (4) Click **Load automatically** if you want the image target to be enabled automatically as the WebAR project loads. Up to 5 image targets can be loaded automatically without writing a single line of code. More targets can be loaded programnatically through the Javascript API.
+* (5) Optional: If you would like to add metadata to your image, in either Text or JSON format, click the **Metadata** button at the bottom of the window.
+
+
+
+7. Changes made on this screen are automatically saved. Click **Close** to return to your image target library.
+
+## Edit Image Targets {#edit-image-targets}
+
+Click on any of the image targets under **My Image Targets** to view and/or modify their properties:
+
+1. Image Target Name
+2. Sliders / Measurements (Cylindrical/Conical image targets only)
+3. Simulator QR Code
+4. Delete Image Target
+5. Load Automatically
+6. Metadata
+7. Orientation and Dimensions
+8. Autosave status
+9. Close
+
+Type | Fields
+---- | ------
+Flat | 
+Cylindrical | 
+Conical | 
+
+## Changing Active Image Targets {#changing-active-image-targets}
+
+The set of active image targets can be modified at runtime by calling
+[XR8.XrController.configure()](/docs/engine/api/xrcontroller/configure)
+
+Note: The set of currently active image targets will be **replaced** with the new set passwd to
+[XR8.XrController.configure()](/docs/engine/api/xrcontroller/configure).
+
+#### Example - Change active image target set {#example---change-active-image-target-set}
+
+```javascript
+XR8.XrController.configure({imageTargets: ['image-target1', 'image-target2', 'image-target3']})
+```
+
+## Optimizing Image Target Tracking {#optimizing-image-target-tracking}
+
+To ensure the highest quality image target tracking experience, be sure to follow these guidelines when selecting an image target.
+
+***DO*** have:
+
+* a lot of varied detail
+* high contrast
+
+***DON'T*** have:
+
+* repetitive patterns
+* excessive dead space
+* low resolution images
+
+Color: Image target detection cannot distinguish between colors, so don't rely on it as a key differentiator between targets.
+
+For best results, use images on flat, cylindrical or conical surfaces for image target tracking.
+
+Consider the reflectivity of your image target's physical material. Glossy surfaces and screen reflections can lower tracking quality. Use matte materials in diffuse lighting conditions for optimal tracking quality.
+
+Note: Detection happens fastest in the center of the screen.
+
+Good Markers | Bad Markers
+---------------------- | ------------------------
+ |  |
+ | 
+
+## Image Target Events {#image-target-events}
+
+8th Wall Web emits Events / Observables for various events in the image target lifecycle (e.g. imageloading, imagescaning, imagefound, imageupdated, imagelost) Please see the API reference for instructions on handling these events in your Web Application:
+
+* [AFrame Events](/docs/engine/api/aframeevents)
+* [BabylonJS Observables](/docs/engine/api/babylonjs/observables)
+* [PlayCanvas Events](/docs/engine/api/playcanvasevents/playcanvas-image-target-events)
+* [XrController Dispatched Events](/docs/engine/api/xrcontroller/pipelinemodule/#dispatched-events)
+
+#### Example Projects {#example-projects}
+
+
+
+
+
+---
+
+The new API allows image targets to be defined directly from code as `XrController.configure({imageTargetData: [{...}]})`.
+
+## Flat Image Target
+
+If you are setting up a new flat image target:
+
+1. Start by converting the image to grayscale and cropping/scaling/rotating to 480x640 pixels.
+2. Define the image target data:
+```
+const imageTarget = {
+ "imagePath": "./path/to/image.png",
+ "metadata": {}, // available for custom use cases
+ "name": "my-image-target",
+ "type": "PLANAR",
+ "properties": {
+ "left": 0,
+ "top": 0,
+ "width": 480,
+ "height": 640,
+ "originalWidth": 480,
+ "originalHeight": 640,
+ "isRotated": false, // set this to true if you rotated the image when you cropped it
+ }
+}
+```
+
+## Cylindrical Image Target
+
+For cylindrical targets, there are more parameters to provide (`cylinderSideLength`, `cylinderCircumferenceTop`, `targetCircumferenceTop`, `cylinderCircumferenceBottom`, `arcAngle`, `coniness`)
+
+1. Define the image target data:
+```
+const imageTarget = {
+ "imagePath": "./path/to/image.png",
+ "metadata": {}, // available for custom use cases
+ "name": "my-image-target",
+ "type": "CYLINDER",
+ "properties": {
+ "left": 0,
+ "top": 18,
+ "width": 1476,
+ "height": 1968,
+ "originalWidth": 1476,
+ "originalHeight": 2000,
+ "isRotated": false, // set this to true if you rotated the image when you cropped it
+ "cylinderSideLength": 135.5,
+ "cylinderCircumferenceTop": 257.14, // This should match circumferencce bottom
+ "targetCircumferenceTop": 100,
+ "cylinderCircumferenceBottom": 257.14,
+ "arcAngle": 140.0,
+ "coniness": 0, // This should be 0 for all cylinders
+ "inputMode": "BASIC",
+ "unit": "mm",
+ }
+}
+```
+
+## Conical Image Target
+
+For conical targets, there is the unconification, plus the input format is different from what was uploaded to the web.
+
+1. Start with a rainbow image cropped by the top (dotted green line) and bottom (bold green line) radius
+
+
+
+2. Stretch the rainbow image to be flat, and crop/scale/rotate the dimensions of the flat, unconified image to be a grayscale 480x640 pixels (this cropped image should be the image provided in the image path)
+3. Define the image target data:
+```
+const imageTarget = {
+ "imagePath": "./path/to/image.png", // path to unconified image
+ "metadata": {}, // available for custom use cases
+ "name": "my-image-target",
+ "type": "CONICAL",
+ "properties": {
+ "left": 177,
+ "top": 554,
+ "width": 564,
+ "height": 752,
+ "originalWidth": 842,
+ "originalHeight": 2000,
+ "isRotated": false, // set this to true if you rotated the image when you cropped it
+ "topRadius": 4479,
+ "bottomRadius": 3630.644,
+ "cylinderSideLength": 21.05,
+ "cylinderCircumferenceTop": 100,
+ "targetCircumferenceTop": 50,
+ "cylinderCircumferenceBottom": 81.06,
+ "arcAngle": 180,
+ "coniness": 0.303, // log2(topRadius / bottomRadius)
+ "inputMode": "BASIC",
+ "unit": "mm",
+ }
+}
+```
diff --git a/docs/engine/guides/12-iframe.md b/docs/engine/guides/12-iframe.md
new file mode 100644
index 0000000..4ad4af5
--- /dev/null
+++ b/docs/engine/guides/12-iframe.md
@@ -0,0 +1,135 @@
+---
+id: ios-8th-wall-web-inside-an-iframe
+---
+# Working with iframes
+
+## iframe Setup for Android and iOS 15+ {#iframe-setup-for-android-and-ios-15}
+
+To allow Inline AR for Android and iOS 15+, you must include an allow parameter in your iframe with
+the following [feature-policy directives](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Permissions-Policy#directives):
+
+```html
+
+```
+
+NOTE: microphone is optional.
+
+## LEGACY METHOD: Supporting iOS versions prior to iOS 15 {#legacy-method-supporting-ios-versions-prior-to-ios-15}
+
+The following is **ONLY** required for supporting Inline AR in iOS versions prior to iOS 15. Given
+the high adoption of iOS 15+, we **NO LONGER** recommend using this approach.
+
+See the latest iOS adoption stats from Apple:
+
+In addition to including the allow parameter with the correct
+[feature-policy directives](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Permissions-Policy#directives)
+in your iframe as explained above, to support World Tracking projects on iOS versions prior to iOS
+15, you must also include additional javascript on both the OUTER and INNER AR pages as explained
+below.
+
+In these versions, Safari blocks deviceorientation and devicemotion event access from cross-origin
+iframes. To counter this, you must include two scripts in your project to ensure cross-compatibility
+with iOS when deploying World Tracking projects.
+
+This is **not required for Face Effects or Image Target projects** (with `disableWorldTracking` set
+to `true`).
+
+When implemented correctly, this process enables the OUTER website to send motion events down to the
+INNER AR website, a requirement for World Tracking.
+
+#### For the OUTER website {#for-the-outer-website}
+
+**iframe.js** must be included in the **HEAD** of the **OUTER** page via this script tag:
+
+```html
+
+```
+
+When starting AR, register the XRIFrame by iframe ID:
+
+```js
+window.XRIFrame.registerXRIFrame(IFRAME_ID)
+```
+
+When stoppping AR, deregister the XRIFrame:
+
+```js
+window.XRIFrame.deregisterXRIFrame()
+```
+
+#### For the INNER website {#for-the-inner-website}
+
+**iframe-inner.js** must be included in the **HEAD** of your **INNER AR** website with this script tag:
+
+```html
+
+```
+
+By allowing the inner and outer windows to communicate, deviceorientation/devicemotion data can be shared.
+
+See sample project at
+
+#### Examples {#examples}
+
+##### Outer Page {#outer-page}
+
+```jsx
+// Send deviceorientation/devicemotion to the INNER iframe
+
+
+...
+const IFRAME_ID = 'my-iframe' // Iframe containing AR content.
+const onLoad = () => {
+ window.XRIFrame.registerXRIFrame(IFRAME_ID)
+}
+// Add event listenters and callbacks for the body DOM.
+window.addEventListener('load', onLoad, false)
+
+...
+
+
+
+
+```
+
+##### Inner Page: AFrame projects {#inner-page-aframe-projects}
+
+```html
+
+
+
+
+
+...
+
+
+
+
+
+ ...
+
+```
+
+##### Inner Page: Non-AFrame projects {#inner-page-non-aframe-projects}
+
+```html
+
+
+
+
+
+...
+
+
+XR8.addCameraPipelineModules([
+ // Custom pipeline modules
+ iframeInnerPipelineModule,
+])
+```
diff --git a/docs/engine/guides/13-progressive-web-apps.md b/docs/engine/guides/13-progressive-web-apps.md
new file mode 100644
index 0000000..a9e6743
--- /dev/null
+++ b/docs/engine/guides/13-progressive-web-apps.md
@@ -0,0 +1,150 @@
+---
+id: progressive-web-apps
+---
+# Progressive Web Apps
+
+Progressive Web Apps (PWAs) use modern web capabilities to offer users an experience that's similar
+to a native application. The 8th Wall Cloud Editor allows you to create a PWA version of your
+project so that users can add it to their home screen. Users must be **connected to the internet**
+in order to access it.
+
+To enable PWA support for your WebAR project:
+
+1. Visit your project settings page, and expand the “Progressive Web App” pane. (Only visible to paid workspaces)
+2. Toggle the slider to Enable PWA support.
+3. Customize your PWA name, icon, and colors.
+4. Click "Save"
+
+**Note**: For Cloud Editor projects, you may be prompted to build & re-publish your project if it
+was previously published. If you decide not to republish, PWA support will be included the next
+time your project is built.
+
+## PWA API Reference {#pwa-api-reference}
+
+8th Wall's **XRExtras** library provides an API to automatically display an install prompt in your web app.
+
+Please refer to the `PwaInstaller` API reference at
+
+## PWA Icon Requirements {#pwa-icon-requirements}
+
+* File Types: **.png**
+* Aspect Ratio: **1:1**
+* Dimensions:
+ * Minimum: **512 x 512 pixels**
+ * Note: If you upload an image larger than 512x512, it will be cropped to a 1:1 aspect ratio and resized down to 512x512.
+
+## PWA Install Prompt Customization {#pwa-install-prompt-customization}
+
+The [PwaInstaller](https://github.com/8thwall/web/tree/master/xrextras/src/pwainstallermodule)
+module from XRExtras displays an install prompt asking your user to add your web app to their home
+screen.
+
+To customize the look of your install prompt, you can provide custom string values through the
+[XRExtras.PwaInstaller.configure()](https://github.com/8thwall/web/tree/master/xrextras/src/pwainstallermodule#configure) API.
+
+For a completely custom install prompt, configure the installer with
+[displayInstallPrompt](https://github.com/8thwall/web/tree/master/xrextras/src/pwainstallermodule#displayinstallprompt)
+and
+[hideInstallPrompt](https://github.com/8thwall/web/tree/master/xrextras/src/pwainstallermodule#hideinstallprompt)
+
+## Self-Hosted PWA Usage {#self-hosted-pwa-usage}
+
+For Self-Hosted apps, we aren’t able to automatically inject details of the PWA into the HTML,
+requiring use of the configure API with the name and icon they’d like to appear in the install
+prompt.
+
+Add the following `` tags to the `` of your html:
+
+``
+
+``
+
+## PWA Code Examples {#pwa-code-examples}
+
+#### Basic Example (AFrame) {#basic-example-aframe}
+
+```html
+
+```
+
+#### Basic Example (Non-AFrame) {#basic-example-non-aframe}
+
+```javascript
+XR8.addCameraPipelineModules([
+ XR8.GlTextureRenderer.pipelineModule(),
+ XR8.Threejs.pipelineModule(),
+ XR8.XrController.pipelineModule(),
+ XRExtras.AlmostThere.pipelineModule(),
+ XRExtras.FullWindowCanvas.pipelineModule(),
+ XRExtras.Loading.pipelineModule(),
+ XRExtras.RuntimeError.pipelineModule(),
+
+ XRExtras.PwaInstaller.pipelineModule(), // Added here
+
+ // Custom pipeline modules.
+ myCustomPipelineModule(),
+])
+
+```
+
+#### Customized Look Example (AFrame) {#customized-look-example-aframe}
+
+```html
+
+```
+
+#### Customized Look Example (Non-AFrame) {#customized-look-example-non-aframe}
+
+```javascript
+XRExtras.PwaInstaller.configure({
+ displayConfig: {
+ name: 'My Custom PWA Name',
+ iconSrc: '//cdn.8thwall.com/my_custom_icon',
+ installTitle: ' My Custom Title',
+ installSubtitle: 'My Custom Subtitle',
+ installButtonText: 'Custom Install',
+ iosInstallText: 'Custom iOS Install',
+ }
+})
+```
+
+#### Customized Display Time Example (AFrame) {#customized-display-time-example-aframe}
+
+```html
+
+```
+
+#### Customized Display Time Example (Non-AFrame) {#customized-display-time-example-non-aframe}
+
+```javascript
+XRExtras.PwaInstaller.configure({
+ promptConfig: {
+ minNumVisits: 5, // Users must visit web app 5 times before prompt
+ displayAfterDismissalMillis: 86400000 // One day
+ }
+})
+```
diff --git a/docs/engine/guides/17-local-hosting.mdx b/docs/engine/guides/17-local-hosting.mdx
new file mode 100644
index 0000000..0f57e45
--- /dev/null
+++ b/docs/engine/guides/17-local-hosting.mdx
@@ -0,0 +1,81 @@
+---
+id: local-hosting
+---
+# Local Hosting
+
+## Getting started {#getting-started}
+
+Follow the getting started guide by cloning self-hosted sample projects from the [8th Wall Github Repository](https://github.com/8thwall/web).
+
+To develop locally you need Node.js and npm installed. If you don't already have Node.js and npm installed, [get it here](https://www.npmjs.com/get-npm).
+
+1. Download the source code from the [8th Wall Github Repository](https://github.com/8thwall/web) and `cd` into an example of your choice (aframe is recommended for beginners).
+4. Serve a project directory on your local network over HTTPS with [http-server](https://github.com/http-party/http-server#readme). See Serve projects over HTTPS.
+5. Connect to your sever from your device, accept certificate warnings and camera permissions. See View Project on iOS or View Project on Android.
+
+## Serve projects over HTTPS {#serve-projects-over-https}
+
+Browsers require HTTPS certificates to access the camera. Use [http-server](https://github.com/http-party/http-server#readme) to serve project directories with HTTPS.
+
+First, you need to make sure that [openssl](https://github.com/openssl/openssl) is installed, and you have key.pem and cert.pem files. You can generate them using this command:
+
+```
+openssl req -newkey rsa:2048 -new -nodes -x509 -days 3650 -keyout key.pem -out cert.pem
+```
+
+You will be prompted with a few questions after entering the command. Use 127.0.0.1 as value for Common name if you want to be able to install the certificate in your OS's root certificate store or browser so that it is trusted.
+
+This generates a cert-key pair and it will be valid for 3650 days (about 10 years).
+
+Then you can run `http-server` with `-S` for enabling SSL and `-C` for your certificate file:
+
+```
+npx http-server [project-path] -S -C cert.pem
+```
+
+Example:
+
+```sh
+npx http-server gettingstarted/aframe/ -S -C cert.pem
+```
+
+
+
+**NOTE**: The first IP address listed is **127.0.0.1:8080** (which is the loopback
+device aka "localhost") and your mobile phone won't be able to connect to that IP address directly.
+Please use one of the other IP addresses.
+
+**WINDOWS USERS**: Run the http-server command using a standard Command Prompt window (cmd.exe). The script may generate errors if run from PowerShell.
+
+Learn more in the [http-server documentation](https://github.com/http-party/http-server#tlsssl).
+
+## View Project on iOS {#view-project-on-ios}
+
+1. Open **Safari on iOS 11+**, and connect to one of the “Available on” URLs. Note: Safari will complain about
+the SSL certificates, but you can safely proceed.
+
+**IMPORTANT**: Make sure to copy the **entire** "Available on" URL into your browser, including both
+the "**https://**" at the beginning and **port** number at the end.
+
+Example: `https://10.0.0.99:8080`
+
+2. Click "visit this website":
+
+3. Click "Show Details":
+
+4. Click "Visit Website":
+
+5. Finally, click "Allow" to grant camera permissions and start viewing the sample AR experience:
+
+## View Project on Android {#view-project-on-android}
+
+1. Open **Chrome**, a **Chrome-variant** (e.g. Samsung browser) or **Firefox**
+
+**IMPORTANT**: Make sure to copy the **entire** "Available on" URL into your browser, including both
+the "**https://**" at the beginning and **port** number at the end.
+
+Example: `https://10.0.0.99:8080`
+
+2. Chrome Example: The browser will complain that the cert is invalid, simply click `'ADVANCED'` to proceed:
+
+3. Click "PROCEED TO ... (UNSAFE)":
diff --git a/docs/engine/guides/_category_.json b/docs/engine/guides/_category_.json
new file mode 100644
index 0000000..e790508
--- /dev/null
+++ b/docs/engine/guides/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "Guides",
+ "position": 2
+}
diff --git a/docs/help-support.mdx b/docs/help-support.mdx
new file mode 100644
index 0000000..6cd236a
--- /dev/null
+++ b/docs/help-support.mdx
@@ -0,0 +1,46 @@
+---
+id: help
+sidebar_position: 999
+---
+# Help & Support
+
+**Need help?** The 8th Wall community is here to help you succeed.
+
+
+ Report an issue with the Desktop App or 8th Wall Engine on the github repository.
+
+
+
diff --git a/docs/migration/8thwall-hosted.md b/docs/migration/8thwall-hosted.md
new file mode 100644
index 0000000..ce86b5d
--- /dev/null
+++ b/docs/migration/8thwall-hosted.md
@@ -0,0 +1,78 @@
+---
+id: 8thwall-hosted
+sidebar_position: 2
+---
+
+# 8th Wall Hosted Projects
+
+:::danger[Important]
+As of Feburary 28, 2026, access to 8thwall.com has ended. It is no longer possible to export 8th Wall hosted projects and migrate them to the open source ecosystem. If you have specific account or project inquires please contact support@nianticspatial.com.
+:::
+
+This guide walks through the process of migrating 8th Wall-hosted projects to a self-hosted setup. It applies to all projects previously hosted on 8th Wall, including both **Editor** and **Studio** projects.
+
+
+
+## Project Structure
+
+If you previously exported your 8th Wall project's buildable code, you will have a `.zip` file containing the following contents:
+
+
+
+### config
+
+Contains the necessary webpack configuration and typescript definitions to support project development.
+
+### external
+
+Contains dependencies used by your project, loaded in `index.html`.
+
+### image-targets
+
+Contains your project's image targets (if any).
+
+### src
+
+Contains all your original project code and assets
+
+:::info
+For Studio projects, the scene graph is stored in a file called `.expanse.json` which may not be visible in your file viewer by default.
+:::
+
+## Development
+
+If your project was built with Studio, open the project in the [8th Wall Desktop App](/docs/studio/getting-started/installation/). With the 8th Wall Desktop App, you can seamlessly continue development and test across desktop and mobile devices without any additional steps.
+
+If your project was built with a web 3D framework such as A-Frame or three.js, you can continue developing using an IDE of your choice and follow the steps below to test the project in realtime on desktop and mobile devices.
+
+:::warning[Important]
+Be sure to make [required project updates](/docs/migration/project-updates.md) and review updated documentation on [publishing your project](/docs/engine/getting-started/publishing).
+:::
+
+### Test on Desktop
+
+1. If node/npm are not installed, install using https://github.com/nvm-sh/nvm or https://nodejs.org/en/download
+2. `cd` to the project root and run `npm install`.
+
+
+
+3. Run `npm run serve` to run the development server. Once the local server is running, you will see the URL/IP addresses your project is running at.
+
+
+
+4. Open a new browser window and paste in the loopback URL or IP address to test your project in development mode.
+
+
+
+### Test on Mobile
+
+To test your project on mobile devices, especially for AR experiences that require camera access, you'll need to serve your development server over HTTPS. We recommend using [ngrok](https://ngrok.com/) to create a secure tunnel to your local server.
+
+After setting up ngrok, add the following configuration to `config/webpack.config.js` under the `devServer` section:
+
+```javascript
+devServer: {
+ // ... existing config
+ allowedHosts: ['.ngrok-free.dev']
+}
+```
diff --git a/docs/migration/_category_.json b/docs/migration/_category_.json
new file mode 100644
index 0000000..507466e
--- /dev/null
+++ b/docs/migration/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "Migration Guide",
+ "position": 2
+}
diff --git a/docs/migration/faq.md b/docs/migration/faq.md
new file mode 100644
index 0000000..0c1e6a6
--- /dev/null
+++ b/docs/migration/faq.md
@@ -0,0 +1,295 @@
+---
+id: faq
+sidebar_position: 999
+---
+
+# FAQ
+
+## What’s Happening
+
+### What is happening with 8th Wall?
+All 8th Wall products and services — including Studio, Cloud Editor, and Asset Lab — will be discontinued over the coming year. In the meantime, we are actively exploring options to open source key components of the 8th Wall technology and documentation so the creativity built on 8th Wall can continue to thrive in the developer community.
+
+While the hosted 8th Wall platform will eventually wind down, core parts of its engine, SDK, and documentation will be released for public use.
+
+### What does it mean for 8th Wall to open source?
+Going open source means 8th Wall’s non-proprietary technology — including parts of its SDK, runtime, tools, and documentation — will be publicly available for developers to use, modify, and build upon.
+
+Rather than a closed, hosted platform, 8th Wall will live on as community-driven code. This transition ensures the technology continues to evolve in the hands of the developers, artists, and creators who shaped it.
+
+### Which parts of 8th Wall will be open sourced?
+We plan to release key parts of the 8th Wall SDK, runtime, and developer tools as open-source projects. This includes documentation, sample projects, and tooling that is not tied to proprietary computer vision or SLAM systems.
+
+Not every hosted component can be maintained long-term, but we aim to release the building blocks that matter most. Specific repositories and contribution guidelines will be shared as they become available.
+
+---
+
+## Key Dates and Access
+
+### What’s the timeline for this transition?
+We’re shifting to open source gradually to give developers time to export projects and access documentation.
+
+| Date | What Happens |
+|------|-------------|
+| **February 28, 2026** | End of platform access. All accounts lose the ability to create, edit, publish, or export projects. |
+| **Feb 28, 2026 – Feb 28, 2027** | All hosted projects and experiences remain live and accessible. |
+| **After Feb 28, 2027** | Hosting services will be decommissioned and project data deleted per retention policy. |
+
+### Will I still be able to log in or access my projects after February 28, 2026?
+No. After February 28, 2026, you will no longer be able to log in to the 8th Wall platform.
+
+Hosted projects will remain live through **February 28, 2027**, but they will be locked and cannot be edited or exported.
+
+---
+
+## Exporting Your Projects
+
+### Can I download my projects and assets?
+Yes. You can export your projects **before February 28, 2026**. [Documentation on how to export your 8th Wall-hosted projects](/docs/migration/8thwall-hosted) has been provided to help you archive or migrate your work.
+
+### What’s the difference between Code Export and Buildable Code Export?
+**Code Export** provides a snapshot of your project code and assets for reference and backup.
+
+**Buildable Code Export** includes everything required to build and run the project offline or in a self-hosted environment, including the distributed engine binary and runtime components.
+
+### What exactly is included in each exported project?
+Each **Buildable Code Export** includes:
+- Project source code
+- Assets and image targets
+- Build configuration
+- XR runtime components
+- A copy of the distributed 8th Wall Engine binary required to run the project locally
+
+### What is not included in Buildable Code Export?
+Buildable Code Export does not include cloud-dependent services or licensed IP that cannot be distributed for offline or self-hosted use.
+
+Specifically, exported projects do **not** include:
+- Niantic Spatial VPS (also known as Lightship VPS for Web)
+- Lightship Maps
+- Hand Tracking
+
+If your project relies on these services, it may continue to function while hosted on the 8th Wall platform, but it will not run offline using Buildable Code Export.
+
+We strongly recommend exporting and running your project locally to validate which features are supported in your self-hosted workflow.
+
+### Do I need to export projects that are already live?
+Yes. If you want to continue developing, rebuilding, or migrating a project after platform access ends, you must export it before February 28, 2026.
+
+Live projects that are not exported will continue running temporarily but cannot be modified.
+
+### What happens if I don’t export my project before February 28, 2026?
+If you do not export a project before February 28, 2026, you will not be able to download or modify it after platform access ends.
+
+Projects that are live on that date will continue to run as-is until February 28, 2027, but they will be locked and cannot be edited or exported.
+
+### Do I need to export with Buildable Code Export if I have a self-hosted project?
+No. Existing self-hosted projects will continue to work without changes until February 28, 2027.
+
+If your project is not cloud-dependent (does not use VPS, Maps, Hand Tracking, or Modules) and you want to migrate to the open-source engine to keep your project live beyond February 28, 2027, follow the [self-hosted project migration guide](/docs/migration/self-hosted).
+
+### If I use the 8th Wall Desktop App, do I still need to export my project?
+You still must export your project before February 28, 2026 in order to continue developing or self-hosting it. You can export your project directly from the Desktop App or from 8thwall.com.
+
+### Should I test my exported project before February 28, 2026?
+Yes. We strongly recommend exporting and running your project locally as soon as possible to validate your workflow and identify any dependencies on cloud-based services.
+
+### Will there be documentation or tools to help with migration?
+Yes. We will release documentation, export utilities, and examples to support developers in self-hosting or rebuilding projects using the open-source version.
+
+---
+
+## Studio Desktop App Offline Mode
+
+### What is Offline Mode for the Studio Desktop App?
+Offline Mode allows you to continue developing locally in the Desktop App without logging in to the 8th Wall platform. It removes account-based features and focuses on local and exported project workflows.
+
+### Is Offline Mode required immediately?
+No. Offline Mode is optional until February 28, 2026.
+
+After that date, login will stop working and the Desktop App will operate in Offline Mode automatically. If you haven’t switched earlier, your projects will prompt you to migrate to the offline workflow at that time.
+
+We recommend switching now so you can validate your local setup before platform access ends.
+
+### Can I switch back after enabling Offline Mode?
+No. Once enabled, Offline Mode cannot be reversed without uninstalling and reinstalling the Desktop App.
+
+### What projects can I work on in Offline Mode?
+You can work on projects previously opened on your device, exported projects, and new local projects. You cannot access cloud-hosted projects that haven’t been exported.
+
+### What features are not available in Offline Mode?
+Offline Mode does not currently include Image Target management, Asset Lab, Native App Export, pre-recorded Simulator sequences, or other account-dependent features. We plan to restore or replace key capabilities over time as we execute on our open source roadmap.
+
+---
+
+## Projects, Hosting, and Data
+
+### Will my hosted projects stay live?
+Yes. All 8th Wall hosted projects and experiences will remain live through **February 28, 2027**. After that, hosting services will be shut down.
+
+### What happens to externally linked assets or embedded experiences?
+Assets and hosted URLs will continue functioning through **February 28, 2027**. After that, they will stop serving content.
+
+Any embedded 8th Wall projects will need to be re-hosted.
+
+### What’s the plan for data retention and deletion?
+All developer and billing data will be permanently deleted after the decommission date. Please download any data you wish to keep before that time.
+
+---
+
+## Distributed 8th Wall Engine Binary
+
+### What is the Distributed 8th Wall Engine Binary?
+The Distributed Engine Binary contains the core 8th Wall AR engine and is included with each project exported using Buildable Code Export.
+
+It is provided as closed source and governed by a limited-use distribution license.
+
+### Is the engine binary the same engine used by the hosted platform?
+Yes. The distributed engine binary contains the same core AR capabilities used by the hosted 8th Wall platform, packaged for local and self-hosted use.
+
+However, it removes specific cloud-dependent or licensed features such as:
+- Niantic Spatial VPS (Lightship VPS for Web)
+- Lightship Maps
+- Hand Tracking
+- Any other features that depend on hosted platform services
+
+### What’s included in the engine binary?
+The distributed engine binary includes the core AR capabilities that power 8th Wall experiences, including:
+- World Effects
+- Face Effects
+- Image Targets
+- Sky Effects
+- Absolute Scale
+
+### What’s not included in the engine binary?
+The engine binary does not include:
+- Source code access
+- The ability to modify or recompile the engine
+- Niantic Spatial products such as VPS, Lightship Maps, or the Geospatial Browser
+- Hand Tracking
+
+### How long will the engine binary be maintained?
+The engine binary will be maintained through **March 2026** to support a stable transition.
+
+---
+
+## Distributed Engine Binary License and Permitted Use
+
+### What license will the distributed 8th Wall Engine binary use?
+The engine binary is distributed under a limited-use license included with the binary. The license defines how the engine can be used, distributed, and integrated into projects.
+
+### In plain terms, what is this license for?
+The license allows you to continue running and distributing your existing 8th Wall projects using the distributed engine binary, while protecting Niantic Spatial’s proprietary technology.
+
+It is designed to preserve existing use cases, not to enable engine modification or competitive development.
+
+### What am I allowed to do with the Distributed 8th Wall Engine Binary?
+You may install, execute, and distribute the engine binary in its original form as part of your own application, game, product, or service, provided that your use complies with the license terms.
+
+This includes:
+- Running the engine locally or in your own hosting environment
+- Packaging the engine with your exported 8th Wall project
+- Deploying your application to end users as part of a larger experience
+
+### Can I modify, inspect, or reverse engineer the engine?
+No. The engine is provided as closed source. You may not:
+- Reverse engineer, decompile, or disassemble the engine
+- Modify the engine or create derivative works
+- Redistribute altered versions of the engine
+
+These restrictions apply regardless of use case.
+
+### Can I use the engine in a commercial product?
+Yes, as long as the value of your product does not derive entirely or substantially from the engine itself.
+
+In practice:
+- ✅ Using the engine as one component of a broader application or experience is permitted
+- ❌ Selling the engine itself or an engine-based toolkit is not permitted
+
+### Can I use the engine to build client marketing campaigns?
+Yes. Developers, brands, and agencies may use the engine to build branded or marketing experiences for clients, provided the engine is one component of a broader creative or marketing deliverable.
+
+Common permitted examples include:
+- Branded AR campaigns
+- Experiential marketing activations
+- Web-based AR experiences delivered as part of a larger campaign
+
+In short: if you’re selling the experience, not the engine, your use is permitted.
+
+### If I use the Distributed Engine Binary, am I required to include attribution or notices?
+Yes. When distributing applications that use the engine, you must:
+- Retain Niantic Spatial copyright and proprietary notices
+- Identify Niantic Spatial as the creator of the engine
+- Include a reference to the engine license and disclaimer of warranties
+
+Specific attribution requirements are outlined in the license.
+
+---
+
+## Open Source Plans
+
+### What parts of 8th Wall will be open sourced?
+Components that are not tied to proprietary computer vision or SLAM will be open sourced, including:
+- 8th Wall Desktop App
+- Standalone Runtime / ECS
+- Non-SLAM AR features
+- Developer tools such as the 8th Wall Agent MCP Server and Image Target Processor
+- Documentation and sample projects
+
+### What will not be open sourced?
+The following will not be open sourced:
+- Engine internals (provided via the distributed binary)
+- Hand Tracking
+- Niantic Spatial products (VPS, Geospatial Browser, Maps)
+
+---
+
+## Niantic Spatial Products
+
+### Can I use Niantic Spatial VPS with the distributed engine binary?
+No. Niantic Spatial VPS (Lightship VPS for Web) is not included in the distributed engine binary.
+
+### Can I launch new projects using Niantic Spatial VPS after February 28, 2026?
+No. Projects created using the distributed engine binary and open source tools will not support Niantic Spatial VPS on the Web.
+
+Hosted projects launched before February 28, 2026 that use Niantic Spatial VPS will continue to be supported through **February 28, 2027**.
+
+### Will Lightship Maps or the Geospatial Browser be available after the transition?
+No. Lightship Maps and the Geospatial Browser are not part of the open source transition. The Geospatial Browser will also be removed from the Desktop App.
+
+---
+
+## Billing and Accounts
+
+### When will billing and new subscriptions stop?
+We have paused new annual contracts and are no longer accepting new paid sign-ups.
+
+Existing subscriptions will continue until February 28, 2026, when editing access ends and recurring billing stops automatically.
+
+### What happens to my active subscription?
+All paid accounts will automatically end by February 28, 2026.
+
+If you prepaid for time beyond that date, you will receive a prorated refund. You’ll retain dashboard and export access until that date.
+
+### What about enterprise or custom agreements?
+Enterprise and custom customers will be contacted directly to coordinate transitions. For additional questions, contact billing@8thwall.com.
+
+### Will I still be able to access my billing or account history?
+Yes. Billing receipts and account details will remain accessible through February 28, 2026.
+
+### What happens to my data after billing ends?
+All developer and billing data will be permanently deleted after decommissioning. Download anything you need before that date.
+
+---
+
+## Community and Support
+
+### What support and community channels will remain active during this period?
+We aim to keep the 8th Wall Forum, Discord, and social channels active and monitored throughout the transition period.
+
+### What if I need help exporting or archiving my projects?
+We’ll continue providing export guides and documentation updates throughout 2026.
+
+Support is available through the [community Discord](https://8th.io/discord) or at support@8thwall.com.
+
+### Will the Forum eventually close?
+Eventually, yes — but not immediately. We’ll provide advance notice before any changes to support channels occur.
diff --git a/docs/migration/image-targets.md b/docs/migration/image-targets.md
new file mode 100644
index 0000000..a5f3cea
--- /dev/null
+++ b/docs/migration/image-targets.md
@@ -0,0 +1,104 @@
+---
+id: image-targets
+sidebar_position: 5
+---
+
+# Image Targets
+
+We have developed a new API that allows image targets to be defined directly from code:
+```
+XrController.configure({imageTargetData: [{...}]})
+```
+
+## Flat Image Target
+
+If you are setting up a new flat image target:
+
+1. Start by converting the image to grayscale and cropping/scaling/rotating to 480x640 pixels.
+2. Define the image target data:
+```
+const imageTarget = {
+ "imagePath": "./path/to/image.png",
+ "metadata": {}, // available for custom use cases
+ "name": "my-image-target",
+ "type": "PLANAR",
+ "properties": {
+ "left": 0,
+ "top": 0,
+ "width": 480,
+ "height": 640,
+ "originalWidth": 480,
+ "originalHeight": 640,
+ "isRotated": false, // set this to true if you rotated the image when you cropped it
+ }
+}
+```
+
+## Cylindrical Image Target
+
+For cylindrical targets, there are more parameters to provide (`cylinderSideLength`, `cylinderCircumferenceTop`, `targetCircumferenceTop`, `cylinderCircumferenceBottom`, `arcAngle`, `coniness`)
+
+1. Define the image target data:
+```
+const imageTarget = {
+ "imagePath": "./path/to/image.png",
+ "metadata": {}, // available for custom use cases
+ "name": "my-image-target",
+ "type": "CYLINDER",
+ "properties": {
+ "left": 0,
+ "top": 18,
+ "width": 1476,
+ "height": 1968,
+ "originalWidth": 1476,
+ "originalHeight": 2000,
+ "isRotated": false, // set this to true if you rotated the image when you cropped it
+ "cylinderSideLength": 135.5,
+ "cylinderCircumferenceTop": 257.14, // This should match circumferencce bottom
+ "targetCircumferenceTop": 100,
+ "cylinderCircumferenceBottom": 257.14,
+ "arcAngle": 140.0,
+ "coniness": 0, // This should be 0 for all cylinders
+ "inputMode": "BASIC",
+ "unit": "mm",
+ }
+}
+```
+
+## Conical Image Target
+
+For conical targets, there is the unconification, plus the input format is different from what was uploaded to the web.
+
+1. Start with a rainbow image cropped by the top (dotted green line) and bottom (bold green line) radius
+
+
+
+2. Stretch the rainbow image to be flat, and crop/scale/rotate the dimensions of the flat, unconified image to be a grayscale 480x640 pixels (this cropped image should be the image provided in the image path)
+3. Define the image target data:
+```
+const imageTarget = {
+ "imagePath": "./path/to/image.png", // path to unconified image
+ "metadata": {}, // available for custom use cases
+ "name": "my-image-target",
+ "type": "CONICAL",
+ "properties": {
+ "left": 177,
+ "top": 554,
+ "width": 564,
+ "height": 752,
+ "originalWidth": 842,
+ "originalHeight": 2000,
+ "isRotated": false, // set this to true if you rotated the image when you cropped it
+ "topRadius": 4479,
+ "bottomRadius": 3630.644,
+ "cylinderSideLength": 21.05,
+ "cylinderCircumferenceTop": 100,
+ "targetCircumferenceTop": 50,
+ "cylinderCircumferenceBottom": 81.06,
+ "arcAngle": 180,
+ "coniness": 0.303, // log2(topRadius / bottomRadius)
+ "inputMode": "BASIC",
+ "unit": "mm",
+ }
+}
+```
diff --git a/docs/migration/migration.md b/docs/migration/migration.md
new file mode 100644
index 0000000..528a224
--- /dev/null
+++ b/docs/migration/migration.md
@@ -0,0 +1,9 @@
+---
+sidebar_label: 'Introduction'
+---
+
+# Migration Guide
+
+8th Wall now continues at [8thwall.org](https://8thwall.org) as a free, open source XR toolset. While the hosted 8th Wall platform is no longer available, the underlying engine and supporting tools continue to live on as an open ecosystem.
+
+If you previously developed with 8th Wall, follow this guide to migrate compatible 8th Wall-hosted and self-hosted projects to the open source ecosystem.
diff --git a/docs/migration/project-updates.md b/docs/migration/project-updates.md
new file mode 100644
index 0000000..481f6fe
--- /dev/null
+++ b/docs/migration/project-updates.md
@@ -0,0 +1,53 @@
+---
+id: project-updates
+sidebar_position: 4
+---
+
+# Required Project Updates
+
+## Image Targets
+
+If your project utilizes image targets, you need to configure them at the start of your experience.
+
+To enable image targets, call `XR8.XrController.configure` before any other code:
+
+```
+const onxrloaded = () => {
+ XR8.XrController.configure({
+ imageTargetData: [
+ require('../image-targets/target1.json'),
+ require('../image-targets/target2.json'),
+ ],
+ })
+}
+window.XR8 ? onxrloaded() : window.addEventListener('xrloaded', onxrloaded)
+```
+
+:::info
+Autoloaded targets will have a `"loadAutomatically": true` property in the json file.
+:::
+
+## Asset Bundles
+
+References to asset bundles may need to be updated. Asset bundles are now plain folders.
+
+For example, the physics playground sample project uses a gLTF asset bundle. When running the project for the first time, we encounter a compiler error:
+
+
+
+To fix the issue, we need to update **all references** to reflect the correct location and name of the asset.
+
+In this case, we need to update the gLTF path from:
+```
+/assets/models/props/cannonball.gltf
+```
+to:
+```
+/assets/models/props/cannonball.gltf/Prop_CannonBall.gltf
+```
+
+## Optimization
+
+If you are not using the XR Engine, you can remove the xr.js script tag from index.html and delete the external/xr/ folder to save bandwidth.
+
+If you are using the XR Engine, you can also customize whether `face`, `slam`, or both, are loaded on the `data-preload-chunks` attribute.
diff --git a/docs/migration/self-hosted.md b/docs/migration/self-hosted.md
new file mode 100644
index 0000000..c36627f
--- /dev/null
+++ b/docs/migration/self-hosted.md
@@ -0,0 +1,60 @@
+---
+id: self-hosted
+sidebar_position: 3
+---
+
+# Self Hosted Projects
+
+Existing self-hosted projects will work without any change until Feb 28, 2027. This guide walks through the process of migrating from the 8th Wall-hosted engine to the 8th Wall engine binary, which will mean you project can live on beyond Feb 2027.
+
+:::info
+The 8th Wall engine binary does **not support** cloud-dependent features or those we don’t have license to distribute such as:
+* VPS / Maps
+* Hand Tracking
+* Modules / Backends
+:::
+
+To update a self-hosted project to use the 8th Wall engine binary:
+
+1. Download [xr-standalone.zip](https://8th.io/xrjs) and unzip it into your project folder
+2. Remove the script tag for `apps.8thwall.com/xrweb` and replace it with ``
+3. Add `data-preload-chunks` to the script tag or call `await XR8.loadChunk()` in your code before starting the engine. See sections below for more details.
+
+:::note
+`data-preload-chunks="face, slam"` is also supported for experiences using both world and face effects.
+:::
+
+
+## World Effects
+
+If you're using world tracking, add `data-preload-chunks="slam"` to the script tag or call `await XR8.loadChunk('slam')` in your code before starting the engine.
+
+## Face Effects
+
+If you're using face tracking, add `data-preload-chunks="face"` to the script tag or call `await XR8.loadChunk('face')` in your code before starting the engine.
+
+## Image Targets
+
+If you're using image targets, add `data-preload-chunks="slam"` to the script tag or call `await XR8.loadChunk('slam')` in your code before starting the engine.
+
+### Configure Image Targets
+
+Configure the image targets at the start of your experience.
+
+To enable image targets, call `XR8.XrController.configure` before any other code:
+
+```
+const onxrloaded = () => {
+ XR8.XrController.configure({
+ imageTargetData: [
+ require('../image-targets/target1.json'),
+ require('../image-targets/target2.json'),
+ ],
+ })
+}
+window.XR8 ? onxrloaded() : window.addEventListener('xrloaded', onxrloaded)
+```
+
+:::info
+Autoloaded targets will have a `"loadAutomatically": true` property in the json file.
+:::
diff --git a/docs/open-source.md b/docs/open-source.md
new file mode 100644
index 0000000..ad854dd
--- /dev/null
+++ b/docs/open-source.md
@@ -0,0 +1,42 @@
+---
+id: open-source
+sidebar_position: 6
+---
+
+# Open Source
+
+## Engine Distribution
+
+The 8th Wall engine is now available in two forms:
+
+### Distributed Engine Binary (with SLAM)
+
+Released in January 2026, the **Distributed Engine Binary** includes SLAM and is available under a binary-only license for continued commercial and noncommercial use.
+
+Included:
+- SLAM
+- Core engine runtime
+
+Not included:
+- VPS
+- Maps
+- Hand Tracking
+
+### Open Source Engine Framework (MIT Licensed)
+
+The open source version of the engine framework is licensed under MIT and includes:
+
+- Core engine architecture
+- Face Effects
+- Image Targets
+- Sky Effects
+
+SLAM is **not** included in the open source release and remains available only through the Distributed Engine Binary.
+
+With the framework now open, developers can inspect, extend, and maintain the engine as browser APIs evolve and web standards change.
+
+
+## Open Source Use
+
+8th Wall's websites and SDKs may incorporate open source packages. Please see
+ for details.
diff --git a/docs/requirements.md b/docs/requirements.md
new file mode 100644
index 0000000..14fba78
--- /dev/null
+++ b/docs/requirements.md
@@ -0,0 +1,34 @@
+---
+id: requirements
+sidebar_position: 7
+---
+
+# Requirements
+
+## Web Browser
+Mobile browsers require the following functionality to support 8th Wall Web experiences:
+
+* WebGL (`canvas.getContext('webgl') || canvas.getContext('webgl2')`)
+* getUserMedia (`navigator.mediaDevices.getUserMedia`)
+* deviceorientation (`window.DeviceOrientationEvent` - *only needed if SLAM is enabled*)
+* Web-Assembly / WASM (`window.WebAssembly`)
+
+**NOTE:** 8th Wall Web experiences must be viewed via **https**. **This is required by browsers for camera access.**
+
+### iOS
+
+* **Safari** (iOS 11+)
+* **Apps** that use SFSafariViewController web views (iOS 13+)
+ * Apple added `getUserMedia()` support to SFSafariViewController in iOS 13. 8th Wall works within iOS 13 apps that use SFSafariViewController web views.
+ * Examples: Twitter, Slack, Discord, Gmail, Hangouts, and more.
+* **Apps/Browsers** that use WKWebView web views (iOS 14.3+)
+
+### Android
+
+* **Browsers** known to natively support the features required for WebAR:
+ * Chrome
+ * Firefox
+ * Samsung Internet
+ * Microsoft Edge
+* **Apps** using Web Views known to support the features required for WebAR:
+ * Twitter, WhatsApp, Slack, Gmail, Hangouts, Reddit, LinkedIn, and more.
diff --git a/docs/studio/_category_.json b/docs/studio/_category_.json
new file mode 100644
index 0000000..1d8b0a5
--- /dev/null
+++ b/docs/studio/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "8th Wall Studio",
+ "position": 3
+}
diff --git a/docs/studio/api/Entity.mdx b/docs/studio/api/Entity.mdx
new file mode 100644
index 0000000..ab15aa1
--- /dev/null
+++ b/docs/studio/api/Entity.mdx
@@ -0,0 +1,141 @@
+---
+id: entity
+description: This class represents an individual entity in the 8th Wall Studio scene graph, offering component access, state control, and hierarchical operations.
+sidebar_position: 500
+---
+
+# Entity
+
+## Description
+{frontMatter.description}
+
+## Properties
+
+| Property | Type | Description |
+|----------|------|-------------|
+| eid | Eid | Unique identifier for this entity. |
+
+## Component Methods
+
+### get
+Access the current data for a component on this entity.
+
+```ts
+entity.get(component: RootAttribute) // -> ReadData
+```
+
+### has
+Check if a component exists on this entity.
+
+```ts
+entity.has(component: RootAttribute) // -> boolean
+```
+
+### set
+Apply data to a component on this entity.
+
+```ts
+entity.set(component: RootAttribute, data: ReadData) // -> void
+```
+
+### remove
+Remove a component from this entity.
+
+```ts
+entity.remove(component: RootAttribute) // -> void
+```
+
+### reset
+Reset a component on this entity to its default value.
+
+```ts
+entity.reset(component: RootAttribute) // -> void
+```
+
+## State Methods
+
+### hide
+Hide this entity.
+
+```ts
+entity.hide() // -> void
+```
+
+### show
+Show this entity if hidden.
+
+```ts
+entity.show() // -> void
+```
+
+### isHidden
+Check if the entity is currently hidden.
+
+```ts
+entity.isHidden() // -> boolean
+```
+
+### disable
+Disable this entity.
+
+```ts
+entity.disable() // -> void
+```
+
+### enable
+Enable this entity.
+
+```ts
+entity.enable() // -> void
+```
+
+### isDisabled
+Check if the entity is currently disabled.
+
+```ts
+entity.isDisabled() // -> boolean
+```
+
+### delete
+Delete this entity from the world.
+
+```ts
+entity.delete() // -> void
+```
+
+### isDeleted
+Check if the entity has been deleted.
+
+```ts
+entity.isDeleted() // -> boolean
+```
+
+## Hierarchy Methods
+
+### setParent
+Set this entity’s parent.
+
+```ts
+entity.setParent(parent: Eid | undefined | null) // -> void
+```
+
+### getChildren
+Get this entity’s children.
+
+```ts
+entity.getChildren() // -> Generator
+```
+
+### getParent
+Get this entity’s parent.
+
+```ts
+entity.getParent() // -> Eid | undefined
+```
+
+### addChild
+Make another entity a child of this one.
+
+```ts
+entity.addChild(child: Eid) // -> void
+```
diff --git a/docs/studio/api/_category_.json b/docs/studio/api/_category_.json
new file mode 100644
index 0000000..5bf3f5e
--- /dev/null
+++ b/docs/studio/api/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "API Reference",
+ "position": 6
+}
diff --git a/docs/studio/api/api.md b/docs/studio/api/api.md
new file mode 100644
index 0000000..59fcf26
--- /dev/null
+++ b/docs/studio/api/api.md
@@ -0,0 +1,11 @@
+# 8th Wall Studio API
+
+The Studio API provides everything you need to build structured, dynamic experiences in Studio.
+
+The Studio API includes:
+
+- [**Entity-Component System (ECS)**](/docs/studio/api/ecs) — APIs for working with Studio’s ECS architecture, allowing you to create, modify, and organize entities and components at runtime.
+- [**World**](/docs/studio/api/world) — Core functions and utilities for managing the overall scene graph, including entity hierarchies, transforms, and spaces. The world is the container for all spaces, entities, queries, and observers in your project.
+- [**Events**](/docs/studio/api/events) — A rich system for sending and responding to runtime events within Studio.
+
+Use the Studio API to create immersive, stateful experiences that respond to player input, world changes, and real-time interactions.
diff --git a/docs/studio/api/changelog.mdx b/docs/studio/api/changelog.mdx
new file mode 100644
index 0000000..c358102
--- /dev/null
+++ b/docs/studio/api/changelog.mdx
@@ -0,0 +1,73 @@
+---
+id: changelog
+sidebar_position: 1000
+---
+# Changelog
+
+## 2.2.0 (October 24, 2025) {#2.2.0}
+
+### New Features
+- Add physics collider rotational offsets
+
+### Features and Enhancements
+- Fix directional light shadow angle when following camera
+
+## 2.1.0 (October 16, 2025) {#2.1.0}
+
+### New Features
+- Add effect manager API to update sky and fog (`world.effects`)
+
+### Fixes and Enhancements
+- Updated type definition for schema/data in component remove callback
+- Add animation events to event-ids
+- Fix duplicate observer callbacks on `add()` for prefab instances
+- Fixed issue with mouse inputs causing pointer lock
+- Fixed issue where `ScaleAnimation` was incorrectly offsetting colliders
+
+## 2.0.1 (September 25, 2025) {#2.0.1}
+
+### Fixes and Enhancements
+
+- Fixed an issue with physics where dynamic objects wouldn’t come fully to rest
+- Fixed a crash that could occur after repeatedly changing physics collider scale
+- Corrected particle emission directions
+- Made particle effects framerate independent
+- Increased reliability of the desktop app simulator
+- Fixed a UI issue causing incorrect offset placement on elements
+
+## 2.0.0 (September 10, 2025) {#2.0.0}
+
+The all-new Studio Runtime 2.0 comes with a rebuilt physics engine. Some physics behaviors are different as a result. View the [2.0 upgrade guide](http://8th.io/v2upgradeguide) for instructions and tips to ensure a smooth migration.
+
+### Breaking Changes
+
+- Removed properties from `ecs.Collider`:
+ - `rollingFriction`
+ - `spinningFriction`
+- Changed matrix update mode: `manual` by default, which is more optimized. If modifying three.js objects directly, calling `world.three.notifyChanged()` is required on each changed object, or go back to the original behavior with `world.three.setMatrixUpdateMode('auto')`
+- In 2.0, some of the new Physics engine’s underlying calculations differ for properties like friction, restitution, and damping. See the [upgrade guide](http://8th.io/v2upgradeguide) for more details.
+
+### New Features
+- We added a new `type` property (required) to `ecs.Collider` to allow for determining if and how physics simulation should apply to the object. Previously, a mass value above 0 determined if a collider was Dynamic or not (0 mass = Static), and Kinematic colliders were not supported.
+ - Allowed values: `ColliderType.Static`, `ColliderType.Kinematic`, `ColliderType.Dynamic`
+ - Kinematic colliders are not affected by physics forces, but can have motion and can affect other dynamic bodies through collisions
+ - See [Collider API documentation](https://www.8thwall.com/docs/studio/api/ecs/collider/) for details and use cases
+
+### Fixes and Enhancements
+- Enabled stricter typescript checking on by default at build time to improve error reporting
+
+## 1.1.0 (August 29, 2025) {#1.1.0}
+
+- Exported additional constants for XR events
+- Fixed face mesh not being rendered as configured with Face AR camera
+- Fixed behavior for disabled colliders
+- Fixed corrupted shape being applied to auto colliders
+
+## 1.0.1 (August 25, 2025) {#1.0.1}
+
+- Ensured physics boolean types are exposed as `true`/`false`, not `1`/`0`
+- Exported `GpsPointer`
+
+## 1.0.0 (August 6, 2025) {#1.0.0}
+
+- Initial versioned release
diff --git a/docs/studio/api/ecs/_category_.json b/docs/studio/api/ecs/_category_.json
new file mode 100644
index 0000000..30497d9
--- /dev/null
+++ b/docs/studio/api/ecs/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "ecs",
+ "position": 1
+}
\ No newline at end of file
diff --git a/docs/studio/api/ecs/animation/_category_.json b/docs/studio/api/ecs/animation/_category_.json
new file mode 100644
index 0000000..78f84bd
--- /dev/null
+++ b/docs/studio/api/ecs/animation/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "animation",
+ "position": 3
+}
\ No newline at end of file
diff --git a/docs/studio/api/ecs/animation/custom-property-animation.mdx b/docs/studio/api/ecs/animation/custom-property-animation.mdx
new file mode 100644
index 0000000..06d6a3e
--- /dev/null
+++ b/docs/studio/api/ecs/animation/custom-property-animation.mdx
@@ -0,0 +1,38 @@
+---
+id: custom-property-animation
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import CustomPropertyAnimationTable from "../../../../../gen/tables/CustomPropertyAnimation.md"
+
+# CustomPropertyAnimation
+
+## Description
+
+This component creates an animation on an entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/animation/custom-vec3-animation.mdx b/docs/studio/api/ecs/animation/custom-vec3-animation.mdx
new file mode 100644
index 0000000..883aaeb
--- /dev/null
+++ b/docs/studio/api/ecs/animation/custom-vec3-animation.mdx
@@ -0,0 +1,39 @@
+---
+id: custom-vec3-animation
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import CustomVec3AnimationTable from "../../../../../gen/tables/CustomVec3Animation.md"
+
+# CustomVec3Animation
+
+## Description
+
+This component creates an animation on an entity.
+
+## Properties
+
+
+
+
diff --git a/docs/studio/api/ecs/animation/follow-animation.mdx b/docs/studio/api/ecs/animation/follow-animation.mdx
new file mode 100644
index 0000000..ca8b04c
--- /dev/null
+++ b/docs/studio/api/ecs/animation/follow-animation.mdx
@@ -0,0 +1,30 @@
+---
+id: follow-animation
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import FollowAnimationTable from "../../../../../gen/tables/FollowAnimation.md"
+
+# FollowAnimation
+
+## Description
+
+This component creates an animation on an entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/animation/look-at-animation.mdx b/docs/studio/api/ecs/animation/look-at-animation.mdx
new file mode 100644
index 0000000..e28844b
--- /dev/null
+++ b/docs/studio/api/ecs/animation/look-at-animation.mdx
@@ -0,0 +1,30 @@
+---
+id: look-at-animation
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import LookAtAnimationTable from "../../../../../gen/tables/LookAtAnimation.md"
+
+# LookAtAnimation
+
+## Description
+
+This component creates an animation on an entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/animation/position-animation.mdx b/docs/studio/api/ecs/animation/position-animation.mdx
new file mode 100644
index 0000000..575bf72
--- /dev/null
+++ b/docs/studio/api/ecs/animation/position-animation.mdx
@@ -0,0 +1,41 @@
+---
+id: position-animation
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import PositionAnimationTable from "../../../../../gen/tables/PositionAnimation.md"
+
+# PositionAnimation
+
+## Description
+
+This component creates an animation on an entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/animation/rotate-animation.mdx b/docs/studio/api/ecs/animation/rotate-animation.mdx
new file mode 100644
index 0000000..31a866d
--- /dev/null
+++ b/docs/studio/api/ecs/animation/rotate-animation.mdx
@@ -0,0 +1,41 @@
+---
+id: rotate-animation
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import RotateAnimationTable from "../../../../../gen/tables/RotateAnimation.md"
+
+
+# RotateAnimation
+
+## Description
+
+This component creates an animation on an entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/animation/scale-animation.mdx b/docs/studio/api/ecs/animation/scale-animation.mdx
new file mode 100644
index 0000000..2f590c9
--- /dev/null
+++ b/docs/studio/api/ecs/animation/scale-animation.mdx
@@ -0,0 +1,43 @@
+---
+id: scale-animation
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import ScaleAnimationTable from "../../../../../gen/tables/ScaleAnimation.md"
+
+
+# ScaleAnimation
+
+## Description
+
+This component creates an animation on an entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/assets.md b/docs/studio/api/ecs/assets.md
new file mode 100644
index 0000000..08ed546
--- /dev/null
+++ b/docs/studio/api/ecs/assets.md
@@ -0,0 +1,61 @@
+---
+id: assets
+---
+
+# assets
+
+## Description
+
+This library includes functions that handle asset management.
+
+## Types
+
+### AssetRequest
+
+| Parameter | Type | Description |
+|-----------|--------|----------------------|
+| id | eid | Unused |
+| url | string | The url of the asset |
+
+### AssetStatistics
+
+| Parameter | Type | Description |
+|-----------|--------|--------------------------------------------------|
+| pending | number | The number of pending assets |
+| complete | number | The number of assets that have completed loading |
+| total | number | The total number of assets to be loaded |
+
+### Asset
+
+| Parameter | Type | Description |
+|-----------|--------|---------------------------------|
+| data | blob | Asset data |
+| remoteUrl | string | Where the data was fetched from |
+| localUrl | string | url constructed from data |
+
+## Functions
+
+### load
+
+Load an asset
+
+``` ts
+ecs.assets.load(assetRequest: AssetRequest) // -> Promise
+```
+
+### clear
+
+Clears the request asset from loaded assets.
+
+``` ts
+ecs.assets.clear(assetRequest: AssetRequest) // -> Promise
+```
+
+### getStatistics
+
+Gets statistics related to loading assets.
+
+``` ts
+ecs.assets.getStatistics() // -> AssetStatistics
+```
+
diff --git a/docs/studio/api/ecs/audio.mdx b/docs/studio/api/ecs/audio.mdx
new file mode 100644
index 0000000..f77c7a0
--- /dev/null
+++ b/docs/studio/api/ecs/audio.mdx
@@ -0,0 +1,37 @@
+---
+id: audio
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import AudioTable from "../../../../gen/tables/Audio.md"
+
+# Audio
+
+## Description
+
+This component allows entities to play sounds.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/camera.mdx b/docs/studio/api/ecs/camera.mdx
new file mode 100644
index 0000000..4811d21
--- /dev/null
+++ b/docs/studio/api/ecs/camera.mdx
@@ -0,0 +1,36 @@
+---
+id: camera
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import CameraTable from "../../../../gen/tables/Camera.md"
+
+# Camera
+
+## Description
+
+This component allows the user to view the world through the entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/collider.mdx b/docs/studio/api/ecs/collider.mdx
new file mode 100644
index 0000000..1154457
--- /dev/null
+++ b/docs/studio/api/ecs/collider.mdx
@@ -0,0 +1,59 @@
+---
+id: collider
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import ColliderTable from "../../../../gen/tables/Collider.md"
+
+# Collider
+
+## Description
+
+This component establishes a physics collider on an entity.
+
+## ColliderShape
+
+| Type | Description |
+|----------|-------------|
+| Box | Box |
+| Sphere | Sphere |
+| Plane | Plane |
+| Capsule | Capsule |
+| Cone | Cone |
+| Cylinder | Cylinder |
+
+## ColliderType
+
+| Type | Description |
+|-----------|-------------|
+| Static | Applies to an object that does not move under simulation and behaves as if it has infinite mass |
+| Dynamic | Applies to a fully simulated, real-life object affected by forces, gravity, and collisions. |
+| Kinematic | Applies to an object whose motion is not affected by forces but is entirely controlled programmatically by the user. Unlike static bodies, kinematic bodies can move and can affect other dynamic bodies through collisions. |
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/disabled.mdx b/docs/studio/api/ecs/disabled.mdx
new file mode 100644
index 0000000..1a2bb06
--- /dev/null
+++ b/docs/studio/api/ecs/disabled.mdx
@@ -0,0 +1,23 @@
+---
+id: disabled
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+
+# Disabled
+
+## Description
+
+This component disables the entity that it is attached to.
+
+## Properties
+
+None
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/ecs.mdx b/docs/studio/api/ecs/ecs.mdx
new file mode 100644
index 0000000..e01fee2
--- /dev/null
+++ b/docs/studio/api/ecs/ecs.mdx
@@ -0,0 +1,184 @@
+---
+id: ecs
+---
+
+import EcsTypes from '/src/components/_ecs-types.mdx'
+
+# ecs
+
+## Properties
+
+### Types {#types}
+
+
+
+## Functions
+
+### registerComponent
+
+Registers a component with the ECS.
+
+``` ts
+ecs.registerComponent({
+ name: string,
+ schema: Schema,
+ data: Schema,
+ schemaDefaults: Object,
+ add: function
+ remove: function,
+ tick: function
+}) // -> Component Handle
+```
+
+#### State
+
+| Parameter | Type | Description |
+|---------------------|-----------------------------|--------------------------------------------------------------------------------------|
+| triggers (Required) | `Record` | Name of the next states that this state and transition to and the triggers to do so. |
+| onEnter | function | Function called before the state is entered. |
+| onExit | function | Function called before the state is entered. |
+
+#### Trigger
+
+##### EventTrigger
+
+| Parameter | Type | Description |
+|------------------|--------------------|-------------------------------------------------------------------------------------------------------------------------------------|
+| type (Required) | constant: 'event' | A constant to indicate the type of the trigger |
+| event (Required) | string | The event type that triggers this |
+| target | eid | The entity you want this trigger to change state for |
+| beforeTransition | (event) => boolean | A function that run before the transition, if the result is truthy then the transition will terminate and the state will not change |
+
+##### TimeoutTrigger
+
+| Parameter | Type | Description |
+|--------------------|---------------------|--------------------------------------------------|
+| type (Required) | constant: 'timeout' | A constant to indicate the type of the trigger |
+| timeout (Required) | number | The number of milliseconds before the transition |
+
+### createStateMachine
+
+Create a state machine.
+
+``` ts
+ecs.createStateMachine(world, owner, {initialState: string, states: Record}) // -> State Machine ID
+```
+
+### deleteStateMachine
+
+Delete a state machine.
+
+``` ts
+ecs.deleteStateMachine(world, machineId) // -> void
+```
+
+### defineState
+
+Define a state.
+
+``` ts
+ecs.defineState(name: string) // -> State
+```
+
+#### initial
+
+Mark this state as the initial state of the state machine
+
+``` ts
+ecs.defineState(name).initial() // -> void
+```
+
+#### onEnter
+
+Set a callback to run when entering this state.
+
+``` ts
+ecs.defineState(name).onEnter(callback) // -> void
+```
+
+#### onEvent
+
+##### Event
+
+| Parameter | Type | Description |
+|------------------|--------------------|-----------------------------------------------------------------------------------------------------------------------------------|
+| target | eid | the entity you want this trigger to change state for |
+| beforeTransition | (event) => boolean | Function that run before the transition, if the result is truthy then the transition will terminate and the state will not change |
+
+Trigger a transition to the next state when an event is received.
+
+``` ts
+ecs.defineState(name).onEvent(event: string, nextState: string | State, args: EventObject) // -> void
+```
+
+#### onExit
+
+Set a callback to run when exiting this state.
+
+``` ts
+ecs.defineState(name).onExit(callback) // -> void
+```
+
+#### wait
+
+Wait before transitioning state.
+
+``` ts
+ecs.defineState(name).wait(waitTime: number, nextState: string | State) // -> void
+```
+
+### defineSystem
+
+Define a system.
+
+``` ts
+ecs.defineSystem([terms]: string[], behavior: function) // -> System
+```
+
+### defineQuery
+
+Define a query.
+
+``` ts
+ecs.defineQuery([terms: string]) // -> callback
+```
+
+### getAttribute
+
+Built-in components are also exposed as a property of ecs.
+
+``` ts
+ecs.getAttribute(attributeName: string) // -> the attribute that has been registered with that name.
+```
+
+### listAttributes
+
+Returns a list of attributes.
+
+``` ts
+ecs.listAttributes() // -> string[]
+```
+
+### getBehaviors
+
+Returns a list of registered behaviors.
+
+``` ts
+ecs.getBehaviors() // -> function[]
+```
+
+### registerBehavior
+
+Register a function that runs on the world every tick.
+
+``` ts
+ecs.registerBehavior(behavior: function) // -> void
+```
+
+### unregisterBehavior
+
+Deactivate a behavior.
+
+``` ts
+ecs.unregisterBehavior(behavior: function) // -> void
+```
diff --git a/docs/studio/api/ecs/face.mdx b/docs/studio/api/ecs/face.mdx
new file mode 100644
index 0000000..44d3a1b
--- /dev/null
+++ b/docs/studio/api/ecs/face.mdx
@@ -0,0 +1,29 @@
+---
+id: face
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import FaceTable from "../../../../gen/tables/Face.md"
+
+# Face
+
+## Description
+
+This component causes the entity's transform to follow a face tracked in XR. It will show and hide the entity as the face is found or lost.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/faceattachment.mdx b/docs/studio/api/ecs/faceattachment.mdx
new file mode 100644
index 0000000..b14e73a
--- /dev/null
+++ b/docs/studio/api/ecs/faceattachment.mdx
@@ -0,0 +1,27 @@
+---
+id: face-attachment
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import FaceAttachmentTable from "../../../../gen/tables/FaceAttachment.md"
+
+# FaceAttachment
+
+## Description
+
+This component causes the entity's transform to follow the specified face attachment tracked in XR. It will show and hide the entity as the face is found or lost
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/flycontroller.mdx b/docs/studio/api/ecs/flycontroller.mdx
new file mode 100644
index 0000000..0b0c529
--- /dev/null
+++ b/docs/studio/api/ecs/flycontroller.mdx
@@ -0,0 +1,34 @@
+---
+id: fly-controller
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import FlyControllerTable from "../../../../gen/tables/FlyController.md"
+
+# FlyController
+
+## Description
+
+This component lets you control the object and move freely around the space
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/geometry/_category_.json b/docs/studio/api/ecs/geometry/_category_.json
new file mode 100644
index 0000000..3f30d95
--- /dev/null
+++ b/docs/studio/api/ecs/geometry/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "geometry",
+ "position": 2
+}
\ No newline at end of file
diff --git a/docs/studio/api/ecs/geometry/box-geometry.mdx b/docs/studio/api/ecs/geometry/box-geometry.mdx
new file mode 100644
index 0000000..e3173d4
--- /dev/null
+++ b/docs/studio/api/ecs/geometry/box-geometry.mdx
@@ -0,0 +1,26 @@
+---
+id: box-geometry
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import BoxGeometryTable from "../../../../../gen/tables/BoxGeometry.md"
+
+# BoxGeometry
+
+## Description
+
+This component establishes geometry on an entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/geometry/capsule-geometry.mdx b/docs/studio/api/ecs/geometry/capsule-geometry.mdx
new file mode 100644
index 0000000..ce77f58
--- /dev/null
+++ b/docs/studio/api/ecs/geometry/capsule-geometry.mdx
@@ -0,0 +1,27 @@
+---
+id: capsule-geometry
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import CapsuleGeometryTable from "../../../../../gen/tables/CapsuleGeometry.md"
+
+# CapsuleGeometry
+
+## Description
+
+This component establishes geometry on an entity.
+
+## Properties
+
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/geometry/circle-geometry.mdx b/docs/studio/api/ecs/geometry/circle-geometry.mdx
new file mode 100644
index 0000000..29f5ae1
--- /dev/null
+++ b/docs/studio/api/ecs/geometry/circle-geometry.mdx
@@ -0,0 +1,23 @@
+---
+id: circle-geometry
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import CircleGeometryTable from "../../../../../gen/tables/CircleGeometry.md"
+
+# CircleGeometry
+
+## Description
+
+This component establishes geometry on an entity.
+
+## Properties
+
+
+
+
diff --git a/docs/studio/api/ecs/geometry/cone-geometry.mdx b/docs/studio/api/ecs/geometry/cone-geometry.mdx
new file mode 100644
index 0000000..769b3f3
--- /dev/null
+++ b/docs/studio/api/ecs/geometry/cone-geometry.mdx
@@ -0,0 +1,26 @@
+---
+id: cone-geometry
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import ConeGeometryTable from "../../../../../gen/tables/ConeGeometry.md"
+
+# ConeGeometry
+
+## Description
+
+This component establishes geometry on an entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/geometry/cylinder-geometry.mdx b/docs/studio/api/ecs/geometry/cylinder-geometry.mdx
new file mode 100644
index 0000000..d9d44d5
--- /dev/null
+++ b/docs/studio/api/ecs/geometry/cylinder-geometry.mdx
@@ -0,0 +1,26 @@
+---
+id: cylinder-geometry
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import CylinderGeometryTable from "../../../../../gen/tables/CylinderGeometry.md"
+
+# CylinderGeometry
+
+## Description
+
+This component establishes geometry on an entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/geometry/plane-geometry.mdx b/docs/studio/api/ecs/geometry/plane-geometry.mdx
new file mode 100644
index 0000000..7dfc0e2
--- /dev/null
+++ b/docs/studio/api/ecs/geometry/plane-geometry.mdx
@@ -0,0 +1,27 @@
+---
+id: plane-geometry
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import PlaneGeometryTable from "../../../../../gen/tables/PlaneGeometry.md"
+
+# PlaneGeometry
+
+## Description
+
+This component establishes geometry on an entity.
+
+## Properties
+
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/geometry/polyhedron-geometry.mdx b/docs/studio/api/ecs/geometry/polyhedron-geometry.mdx
new file mode 100644
index 0000000..ac9bd27
--- /dev/null
+++ b/docs/studio/api/ecs/geometry/polyhedron-geometry.mdx
@@ -0,0 +1,25 @@
+---
+id: polyhedron-geometry
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import PolyhedronGeometryTable from "../../../../../gen/tables/PolyhedronGeometry.md"
+
+# PolyhedronGeometry
+
+## Description
+
+This component establishes geometry on an entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/geometry/ring-geometry.mdx b/docs/studio/api/ecs/geometry/ring-geometry.mdx
new file mode 100644
index 0000000..27ecefb
--- /dev/null
+++ b/docs/studio/api/ecs/geometry/ring-geometry.mdx
@@ -0,0 +1,26 @@
+---
+id: ring-geometry
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import RingGeometryTable from "../../../../../gen/tables/RingGeometry.md"
+
+# RingGeometry
+
+## Description
+
+This component establishes geometry on an entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/geometry/sphere-geometry.mdx b/docs/studio/api/ecs/geometry/sphere-geometry.mdx
new file mode 100644
index 0000000..563c17d
--- /dev/null
+++ b/docs/studio/api/ecs/geometry/sphere-geometry.mdx
@@ -0,0 +1,25 @@
+---
+id: sphere-geometry
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import SphereGeometryTable from "../../../../../gen/tables/SphereGeometry.md"
+
+# SphereGeometry
+
+## Description
+
+This component establishes geometry on an entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/geometry/torus-geometry.mdx b/docs/studio/api/ecs/geometry/torus-geometry.mdx
new file mode 100644
index 0000000..5a25645
--- /dev/null
+++ b/docs/studio/api/ecs/geometry/torus-geometry.mdx
@@ -0,0 +1,25 @@
+---
+id: torus-geometry
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import TorusGeometryTable from "../../../../../gen/tables/TorusGeometry.md"
+
+# TorusGeometry
+
+## Description
+
+This component establishes geometry on an entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/gltfmodel.mdx b/docs/studio/api/ecs/gltfmodel.mdx
new file mode 100644
index 0000000..d892a5e
--- /dev/null
+++ b/docs/studio/api/ecs/gltfmodel.mdx
@@ -0,0 +1,36 @@
+---
+id: gltf-model
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import GltfModelTable from "../../../../gen/tables/GltfModel.md"
+
+# GltfModel
+
+## Description
+
+This component establishes custom geometry on an entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/gpspointer.mdx b/docs/studio/api/ecs/gpspointer.mdx
new file mode 100644
index 0000000..b46b478
--- /dev/null
+++ b/docs/studio/api/ecs/gpspointer.mdx
@@ -0,0 +1,27 @@
+---
+id: gps-pointer
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import GpsPointerTable from "../../../../gen/tables/GpsPointer.md"
+
+# GpsPointer
+
+## Description
+
+This component controls how the entity is rotated according GPS inputs.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/hidden.mdx b/docs/studio/api/ecs/hidden.mdx
new file mode 100644
index 0000000..8f4042a
--- /dev/null
+++ b/docs/studio/api/ecs/hidden.mdx
@@ -0,0 +1,23 @@
+---
+id: hidden
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+
+# Hidden
+
+## Description
+
+This component hides the entity that it is attached to.
+
+## Properties
+
+None
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/imagetarget.mdx b/docs/studio/api/ecs/imagetarget.mdx
new file mode 100644
index 0000000..5dabadf
--- /dev/null
+++ b/docs/studio/api/ecs/imagetarget.mdx
@@ -0,0 +1,27 @@
+---
+id: image-target
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import ImageTargetTable from "../../../../gen/tables/ImageTarget.md"
+
+# ImageTarget
+
+## Description
+
+This component causes an entity's transform to follow an image in XR. It will show and hide itself as the image is found or lost.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/light.mdx b/docs/studio/api/ecs/light.mdx
new file mode 100644
index 0000000..6257845
--- /dev/null
+++ b/docs/studio/api/ecs/light.mdx
@@ -0,0 +1,49 @@
+---
+id: light
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import LightTable from "../../../../gen/tables/Light.md"
+
+# Light
+
+## Description
+
+This component makes an entity emit light.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/material/_category_.json b/docs/studio/api/ecs/material/_category_.json
new file mode 100644
index 0000000..d38a61f
--- /dev/null
+++ b/docs/studio/api/ecs/material/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "material",
+ "position": 4
+}
\ No newline at end of file
diff --git a/docs/studio/api/ecs/material/basic-material.mdx b/docs/studio/api/ecs/material/basic-material.mdx
new file mode 100644
index 0000000..1b39dba
--- /dev/null
+++ b/docs/studio/api/ecs/material/basic-material.mdx
@@ -0,0 +1,32 @@
+---
+id: basic-material
+sidebar_position: 1
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import MaterialTable from "../../../../../gen/tables/Material.md"
+
+# Material
+
+## Description
+
+This component creates a standard [PBR](https://learn.microsoft.com/en-us/azure/remote-rendering/overview/features/pbr-materials) material on an entity with geometry.
+
+## Properties
+
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/material/hidermaterial.mdx b/docs/studio/api/ecs/material/hidermaterial.mdx
new file mode 100644
index 0000000..9d20319
--- /dev/null
+++ b/docs/studio/api/ecs/material/hidermaterial.mdx
@@ -0,0 +1,23 @@
+---
+id: hider-material
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+
+# HiderMaterial
+
+## Description
+
+This component creates a material that hides any objects behind it.
+
+## Properties
+
+None.
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/material/shadowmaterial.mdx b/docs/studio/api/ecs/material/shadowmaterial.mdx
new file mode 100644
index 0000000..01e3300
--- /dev/null
+++ b/docs/studio/api/ecs/material/shadowmaterial.mdx
@@ -0,0 +1,33 @@
+---
+id: shadow-material
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import ShadowMaterialTable from "../../../../../gen/tables/ShadowMaterial.md"
+
+# ShadowMaterial
+
+## Description
+
+This component creates a material that only renders shadows on an entity with geometry.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/material/unlitmaterial.mdx b/docs/studio/api/ecs/material/unlitmaterial.mdx
new file mode 100644
index 0000000..809630d
--- /dev/null
+++ b/docs/studio/api/ecs/material/unlitmaterial.mdx
@@ -0,0 +1,30 @@
+---
+id: unlit-material
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import UnlitMaterialTable from "../../../../../gen/tables/UnlitMaterial.md"
+
+# UnlitMaterial
+
+## Description
+
+This component creates a material unaffected by lighting or shadows on an entity with geometry.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/material/videomaterial.mdx b/docs/studio/api/ecs/material/videomaterial.mdx
new file mode 100644
index 0000000..d0615bf
--- /dev/null
+++ b/docs/studio/api/ecs/material/videomaterial.mdx
@@ -0,0 +1,35 @@
+---
+id: video-material
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import VideoMaterialTable from "../../../../../gen/tables/VideoMaterial.md"
+
+# VideoMaterial
+
+## Description
+
+This component allows the user to add a video texture to the entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/math/_category_.json b/docs/studio/api/ecs/math/_category_.json
new file mode 100644
index 0000000..52f78e9
--- /dev/null
+++ b/docs/studio/api/ecs/math/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "math",
+ "position": 1
+}
\ No newline at end of file
diff --git a/docs/studio/api/ecs/math/mat4.md b/docs/studio/api/ecs/math/mat4.md
new file mode 100644
index 0000000..f316cdb
--- /dev/null
+++ b/docs/studio/api/ecs/math/mat4.md
@@ -0,0 +1,315 @@
+---
+id: mat4
+---
+
+# mat4
+
+The Mat4 interface represents a 4x4 matrix, stored as a 16-element array in column-major order. This type of matrix is commonly used in 3D geometry to represent transformations, including position, rotation, and scale (also known as a TRS matrix). These matrices are essential for defining the position, orientation, and size of objects in a 3D scene.
+
+Certain matrices, such as TRS matrices, have efficiently computable inverses. In these cases, Mat4 allows for the inverse to be calculated in constant time, making it O(1) operation. Mat4 objects are created using the ecs.math.mat4 factory (Mat4Factory) or through operations on existing Mat4 instances.
+
+## Factory
+
+### i
+
+Identity matrix
+
+``` ts
+ecs.math.mat4.i() // -> mat4
+```
+
+### of
+
+Creates a matrix with directly specified data, using column-major order. Optional inverse can be provided. If not provided, the inverse will be calculated automatically if the matrix is invertible. Attempting to calculate the inverse for a non-invertible matrix will throw an error.
+
+``` ts
+ecs.math.mat4.of(data: number[], inverseData?: number[]) // -> mat4
+```
+
+### r
+
+Creates a rotation matrix from a quaternion.
+
+``` ts
+ecs.math.mat4.r(q: QuatSource) // -> mat4
+```
+
+### rows
+
+Creates a matrix using specified row data. You can also optionally provide inverse row data. Both dataRows and inverseDataRows should be arrayed, each containing four numbers. If the inverse is not provided, it will be computed automatically if the matrix is invertible.
+
+:::danger
+Attempting to calculate the inverse for a non-invertible matrix will throw an error.
+:::
+
+``` ts
+ecs.math.mat4.rows(dataRows: number[][], inverseDataRows?: number[][]) // -> mat4
+```
+
+### s
+
+Creates a scale matrix. Specify the scale factors along the x, y, and z axes.
+
+``` ts
+ecs.math.mat4.s(x: number, y: number, z: number) // -> mat4
+```
+
+### t
+
+Creates a translation matrix. Specify the translation offsets along the x, y, and z axes.
+
+``` ts
+ecs.math.mat4.t(x: number, y: number, z: number) // -> mat4
+```
+
+### tr
+
+Creates a combined translation and rotation matrix using a translation vector and a quaternion for the rotation.
+
+``` ts
+ecs.math.mat4.tr(t: Vec3Source, r: QuatSource) // -> mat4
+```
+
+### trs
+
+Creates a combined translation, rotation, and scale matrix. Use a translation vector, a quaternion for rotation, and scale factors for x, y, and z axes.
+
+``` ts
+ecs.math.mat4.trs(t: Vec3Source, r: QuatSource, s: Vec3Source) // -> mat4
+```
+
+## Immutable
+
+The following methods perform computations using the current value of a Mat4 without altering its contents. Methods that return Mat4 types generate new instances. While immutable APIs are generally safer, more readable, and reduce errors compared to mutable APIs, they may be less efficient in scenarios where thousands of objects are created each frame.
+
+:::note
+If garbage collection becomes a performance issue, consider using the Mutable API.
+:::
+
+### clone
+
+Create a new matrix with the same components as this matrix.
+
+``` ts
+ecs.math.mat4.clone() // -> mat4
+```
+
+### data
+
+Get the raw data of the matrix, in column-major order.
+
+``` ts
+ecs.math.mat4.data() // -> number[]
+```
+
+### decomposeTrs
+
+Decompose the matrix into its translation, rotation, and scale components, assuming it was formed by a translation, rotation, and scale in that order. If ‘target’ is supplied, the result will be stored in ‘target’ and ‘target’ will be returned. Otherwise, a new {t, r, s} object will be created and returned.
+
+``` ts
+ecs.math.mat4.decomposeTrs(target?: {t: Vec3, r: Quat, s: Vec3}) // -> {t: Vec3, r: Quat, s: Vec3}
+```
+
+### determinant
+
+Compute the determinant of the matrix.
+
+``` ts
+ecs.math.mat4.determinant() // -> number
+```
+
+### equals
+
+Check whether two matrices are equal, with a specified floating point tolerance.
+
+``` ts
+ecs.math.mat4.equals(m: Mat4, tolerance: number) // -> boolean
+```
+
+### inv
+
+Invert the matrix or throw if the matrix is not invertible. Because Mat4 stores precomputed inverse, this operation is O(1).
+
+``` ts
+ecs.math.mat4.inv() // -> mat4
+```
+
+### inverseData
+
+Get the raw data of the inverse matrix, in column-major order, or null if the matrix is not invertible.
+
+``` ts
+ecs.math.mat4.inverseData() // -> number[] | null
+```
+
+### lookAt
+
+Get a matrix with the same position and scale as this matrix, but with the rotation set to look at the target.
+
+``` ts
+ecs.math.mat4.lookAt(target: Vec3Source, up: Vec3Source) // -> mat4
+```
+
+### scale
+
+Multiply the matrix by a scalar.
+
+:::danger
+Scaling by 0 throws an error.
+:::
+
+``` ts
+ecs.math.mat4.scale(s: number) // -> mat4
+```
+
+### transpose
+
+Get the transpose of the matrix.
+
+``` ts
+ecs.math.mat4.transpose() // -> mat4
+```
+
+### times
+
+Multiply the matrix by another matrix.
+
+``` ts
+ecs.math.mat4.times(m: Mat4) // -> mat4
+```
+
+### timesVec
+
+Multiply the matrix by a vector using homogeneous coordinates.
+
+``` ts
+ecs.math.mat4.timesVec(v: Vec3Source, target?: Vec3) // -> vec3
+```
+
+## Mutable
+
+The following methods compute results based on the current value of a Mat4 and modify its contents directly. They mirror the methods in the Immutable API described earlier. Methods returning Mat4 types provide a reference to the same object, allowing for method chaining. While mutable APIs can offer better performance than immutable ones, they tend to be less safe, less readable, and more prone to errors.
+
+:::note
+If code is unlikely to be executed frequently within a single frame, consider using the Immutable API for better code safety and clarity.
+:::
+
+### setInv
+
+Invert the matrix or throw if the matrix is not invertible. Because Mat4 stores precomputed inverse, this operation is O(1). Store the result in this Mat4 and return this Mat4 for chaining.
+
+``` ts
+existingMat4.setInv() // -> mat4
+```
+
+### setLookAt
+
+Set the matrix rotation to look at the target, keeping translation and scale unchanged. Store the result in this Mat4 and return this Mat4 for chaining.
+
+``` ts
+existingMat4.setLookAt(target: Vec3Source, up: Vec3Source) // -> mat4
+```
+
+### setPremultiply
+
+Sets this matrix the result of m times this matrix. Store the result in this Mat4 and return this Mat4 for chaining.
+
+``` ts
+existingMat4.setPremultiply(m: Mat4) // -> mat4
+```
+
+### setScale
+
+Multiply each element of the matrix by a scaler. Scaling by 0 throws an error. Store the result in this Mat4 and return this Mat4 for chaining.
+
+``` ts
+existingMat4.setScale(s: number) // -> mat4
+```
+
+### setTimes
+
+Set the matrix to the result of this matrix times m. Store the result in this Mat4 and return this Mat4 for chaining.
+
+``` ts
+existingMat4.setTimes(target: Mat4Source) // -> mat4
+```
+
+### setTranspose
+
+Set the matrix to its transpose. Store the result in this Mat4 and return this Mat4 for chaining.
+
+``` ts
+existingMat4.setTranspose() // -> mat4
+```
+
+## Set
+
+The following methods set the value of the current Mat4 object without regard to its current content, replacing whatever was there before.
+
+### makeI
+
+Set the matrix to the identity matrix. Store the result in this Mat4 and return this Mat4 for chaining.
+
+``` ts
+existingMat4.makeI() // -> mat4
+```
+
+### makeR
+
+Set this matrix to a rotation matrix from the specified quaternion. Store the result in this Mat4 and return this Mat4 for chaining.
+
+``` ts
+existingMat4.makeR(r: QuatSource) // -> mat4
+```
+
+### makeRows
+
+Create a matrix with specified row data and optionally specified inverse row data. dataRows and inverseDataRows should be four arrays, each with four numbers. If the inverse is not specified, it will be computed if the matrix is invertible.
+
+:::danger
+If the matrix is not invertible, calling inv() will throw an error.
+:::
+
+``` ts
+existingMat4.makeRows(rowData: number[][], inverseRowData?: number[][]) // -> mat4
+```
+
+### makeS
+
+Set this matrix to a scale matrix from the specified vector. No element of the vector should be zero. Store the result in this Mat4 and return this Mat4 for chaining.
+
+``` ts
+existingMat4.makeS(s: Vec3Source) // -> mat4
+```
+
+### makeT
+
+Set this matrix to a translation matrix from the specified vector. Store the result in this Mat4 and return this Mat4 for chaining.
+
+``` ts
+existingMat4.makeT(s: Vec3Source) // -> mat4
+```
+
+### makeTr
+
+Set this matrix to a translation and rotation matrix from the specified vector and quaternion. Store the result in this Mat4 and return this Mat4 for chaining.
+
+``` ts
+existingMat4.makeTr(t: Vec3Source, r: QuatSource) // -> mat4
+```
+
+### makeTrs
+
+Set this matrix to a translation, rotation, and scale matrix from the specified vectors and quaternion. Store the result in this Mat4 and return this Mat4 for chaining.
+
+``` ts
+existingMat4.makeTrs(t: Vec3Source, r: QuatSource, s: Vec3Source) // -> mat4
+```
+
+### set
+
+Sets the value of the matrix and inverse to the provided values. If no inverse is provided, one will be computed if possible. If the matrix is not invertible, calling inv() will throw an error. Store the result in this Mat4 and return this Mat4 for chaining.
+
+``` ts
+existingMat4.set(data: number[], inverseData?: number[]) // -> mat4
+```
\ No newline at end of file
diff --git a/docs/studio/api/ecs/math/math.md b/docs/studio/api/ecs/math/math.md
new file mode 100644
index 0000000..b52fc4a
--- /dev/null
+++ b/docs/studio/api/ecs/math/math.md
@@ -0,0 +1,45 @@
+---
+id: math
+---
+
+# math
+
+## Description
+
+This library includes types and functions to handle different types of math.
+
+## Compatibility
+
+The types in `ecs.math` represent widely followed paradigms and can be bridged easily to other libraries.
+
+### three.js
+
+The types in `ecs.math` can be converted to and from the corresponding three.js math types `THREE.Vector3`, `THREE.Quaternion`, and `THREE.Matrix4` by following the examples below.
+
+``` ts
+const {mat4, quat, vec3} = ecs.math
+
+// Vec3 <--> THREE.Vector3
+const v3js = new THREE.Vector3()
+const v = vec3.zero()
+
+v3js.copy(v) // Set a THREE.Vector3 from a Vec3.
+v.setFrom(v3js) // Set a Vec3 from a THREE.Vector3.
+const v2 = vec3.from(v3js) // Create new Vec3 from a THREE.Vector3.
+
+// Quat <--> THREE.Quaternion
+const q3js = new THREE.Quaternion()
+const q = quat.zero()
+
+q3js.copy(q) // Set a THREE.Quaternion from a Quat.
+q.setFrom(q3js) // Set a Quat from a THREE.Quaternion.
+const q2 = quat.from(q3js) // Create new Quat from a THREE.Quaternion.
+
+// Mat4 <--> THREE.Matrix4
+const m3js = new THREE.Matrix4()
+const m = mat4.i()
+
+m3js.fromArray(m.data()) // Set a THREE.Matrix4 from a Mat4.
+m.set(m3js.elements) // Set a Mat4 from a THREE.Matrix4.
+const m2 = mat4.of(m3js.elements) // Create a new Mat4 from a THREE.Matrix4
+```
\ No newline at end of file
diff --git a/docs/studio/api/ecs/math/quat.md b/docs/studio/api/ecs/math/quat.md
new file mode 100644
index 0000000..d072562
--- /dev/null
+++ b/docs/studio/api/ecs/math/quat.md
@@ -0,0 +1,473 @@
+---
+id: quat
+---
+
+# quat
+
+Interface representing a quaternion. A quaternion is represented by (x, y, z, w) coordinates and represents a 3D rotation. Quaternions can be converted to and from 4x4 rotation matrices with the interfaces in Mat4. Quaternion objects are created with the ecs.math.quat QuatFactory, or through operations on other Quat objects.
+
+## Source
+
+The QuatSource interface represents any object that has x, y, z, and w properties and hence can be used as a data source to create a Quat. In addition, QuatSource can be used as an argument to Quat algorithms, meaning that any object with {x: number, y: number, z: number, w: number} properties can be used.
+
+## Properties
+
+Quat has the following enumerable properties:
+
+``readonly x: number`` Access the x component of the quaternion.
+
+``readonly y: number`` Access the y component of the quaternion.
+
+``readonly z: number`` Access the z component of the quaternion.
+
+``readonly w: number`` Access the w component of the quaternion.
+
+## Factory
+
+### axisAngle
+
+Create a Quat from an axis-angle representation. The direction of the aa vector gives the axis of rotation, and the magnitude of the vector gives the angle, in radians. For example, quat.axisAngle(vec3.up().scale(Math.PI / 2)) represents a 90-degree rotation about the y-axis and is equivalent to quat.yDegrees(90). If a target is supplied, the result will be stored in the target and the target will be returned. Otherwise, a new Quat will be created and returned.
+
+``` ts
+ecs.math.quat.axisAngle(aa: Vec3Source, target?: Quat) // -> quat
+```
+
+### from
+
+Create a Quat from an object with x, y, z, w properties.
+
+``` ts
+ecs.math.quat.from({x, y, z, w}: {x: number, y: number, z: number, w: number}) // -> quat
+```
+
+### lookAt
+
+Create a Quat representing the rotation required for an object positioned at ‘eye’ to look at an object positioned at ‘target,’ with the given ‘up vector.
+
+``` ts
+ecs.math.quat.lookAt(eye: Vec3Source, target: Vec3Source, up: Vec3Source) // -> quat
+```
+
+### pitchYawRollDegrees
+
+Construct a quaternion from a pitch / yaw / roll representation, also known as YXZ Euler angles. Rotation is specified in degrees.
+
+``` ts
+ecs.math.quat.pitchYawRollDegrees(v: Vec3Source) // -> quat
+```
+
+### pitchYawRollRadians
+
+Construct a quaternion from a pitch / yaw / roll representation, also known as YXZ Euler angles. Rotation is specified in radians.
+
+``` ts
+ecs.math.quat.pitchYawRollRadians(v: Vec3Source) // -> quat
+```
+
+### xDegrees
+
+Create a Quat which represents a rotation about the x-axis. Rotation is specified in degrees.
+
+``` ts
+ecs.math.quat.xDegrees(degrees: number) // -> quat
+```
+
+### xRadians
+
+Create a Quat which represents a rotation about the x-axis. Rotation is specified in radians.
+
+``` ts
+ecs.math.quat.xRadians(radians: number) // -> quat
+```
+
+### xyzw
+
+Create a Quat from x, y, z, w values.
+
+``` ts
+ecs.math.quat.xyzw(x: number, y: number, z: number, w: number) // -> quat
+```
+
+### yDegrees
+
+Create a Quat which represents a rotation about the y-axis. Rotation is specified in degrees.
+
+``` ts
+ecs.math.quat.yDegrees(degrees: number) // -> quat
+```
+
+### yRadians
+
+Create a Quat which represents a rotation about the y-axis. Rotation is specified in radians.
+
+``` ts
+ecs.math.quat.yRadians(radians: number) // -> quat
+```
+
+### zDegrees
+
+Create a Quat which represents a rotation about the z-axis. Rotation is specified in degrees.
+
+``` ts
+ecs.math.quat.zDegrees(degrees: number) // -> quat
+```
+
+### zRadians
+
+Create a Quat which represents a rotation about the z-axis. Rotation is specified in radians.
+
+``` ts
+ecs.math.quat.zRadians(radians: number) // -> quat
+```
+
+### zero
+
+Create a Quat which represents a zero rotation.
+
+``` ts
+ecs.math.quat.zero() // -> quat
+```
+
+## Immutable
+
+The following methods perform calculations using the current value of a Quat without modifying its contents. Methods that return Quat types create new instances. While immutable APIs are generally safer, more readable, and reduce the likelihood of errors, they can become inefficient when a large number of objects are allocated per frame.
+
+:::note
+If garbage collection impacts performance, consider using the Mutable API described below.
+:::
+
+### axisAngle
+
+Convert the quaternion to an axis-angle representation. The direction of the vector gives the axis of rotation, and the magnitude of the vector gives the angle, in radians. If ‘target’ is supplied, the result will be stored in ‘target’ and ‘target’ will be returned. Otherwise, a new Vec3 will be created and returned.
+
+``` ts
+existingQuat.axisAngle(target?: Vec3) // -> vec3
+```
+
+### clone
+
+Create a new quaternion with the same components as this quaternion.
+
+``` ts
+existingQuat.clone() // -> quat
+```
+
+### conjugate
+
+Return the rotational conjugate of this quaternion. The conjugate of a quaternion represents the same rotation in the opposite direction about the rotational axis.
+
+``` ts
+existingQuat.conjugate() // -> quat
+```
+
+### data
+
+Access the quaternion as an array of [x, y, z, w].
+
+``` ts
+ecs.math.quat.data() // -> number[]
+```
+
+### degreesTo
+
+Angle between two quaternions, in degrees.
+
+``` ts
+existingQuat.degreesTo(target: QuatSource) // -> number
+```
+
+### delta
+
+Compute the quaternion required to rotate this quaternion to the target quaternion.
+
+``` ts
+existingQuat.delta(target: QuatSource) // -> quat
+```
+
+### dot
+
+Compute the dot product of this quaternion with another quaternion.
+
+``` ts
+existingQuat.dot(target: QuatSource) // -> number
+```
+
+### equals
+
+Check whether two quaternions are equal, with a specified floating point tolerance.
+
+``` ts
+existingQuat.equals(q: QuatSource, tolerance: number) // -> boolean
+```
+
+### inv
+
+Compute the quaternion which multiplies this quaternion to get a zero rotation quaternion.
+
+``` ts
+existingQuat.inv() // -> quat
+```
+
+### negate
+
+Negate all components of this quaternion. The result is a quaternion representing the same rotation as this quaternion.
+
+``` ts
+existingQuat.negate() // -> quat
+```
+
+### normalize
+
+Get the normalized version of this quaternion with a length of 1.
+
+``` ts
+existingQuat.normalize() // -> quat
+```
+
+### pitchYawRollRadians
+
+Convert the quaternion to pitch, yaw, and roll angles in radians.
+
+``` ts
+ecs.math.quat.pitchYawRollRadians(target?: Vec3) // -> vec3
+```
+
+### pitchYawRollDegrees
+
+Convert the quaternion to pitch, yaw, and roll angles in degrees.
+
+``` ts
+ecs.math.quat.pitchYawRollDegrees(target?: Vec3) // -> vec3
+```
+
+### plus
+
+Add two quaternions together.
+
+``` ts
+ecs.math.quat.plus(q: QuatSource) // -> quat
+```
+
+### radiansTo
+
+Angle between two quaternions, in radians.
+
+``` ts
+existingQuat.radiansTo(target: QuatSource) // -> number
+```
+
+### slerp
+
+Spherical interpolation between two quaternions given a provided interpolation value. If the interpolation is set to 0, then it will return this quaternion. If the interpolation is set to 1, then it will return the target quaternion.
+
+``` ts
+ecs.math.quat.slerp(target: QuatSource, t: number) // -> quat
+```
+
+### times
+
+Multiply two quaternions together.
+
+``` ts
+existingQuat.times(q: QuatSource) // -> quat
+```
+
+### timesVec
+
+Multiply the quaternion by a vector. This is equivalent to converting the quaternion to a rotation matrix and multiplying the matrix by the vector.
+
+``` ts
+ecs.math.quat.times(v: Vec3Source, target?: Vec3) // -> vec3
+```
+
+## Mutable
+
+The following methods perform calculations using the current value of a Quat and modify it directly. These methods correspond to those in the Immutable API above. When returning Quat types, they provide a reference to the same object, allowing for method chaining. While mutable APIs can offer better performance than immutable ones, they tend to be less safe, less readable, and more prone to errors. If the code is unlikely to be called frequently within a single frame, consider using the Immutable API for improved safety and clarity.
+
+### setConjugate
+
+Set this quaternion to its rotational conjugate. The conjugate of a quaternion represents the same rotation in the opposite direction about the rotational axis. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.setConjugate() // -> quat
+```
+
+### setDelta
+
+Compute the quaternion required to rotate this quaternion to the target quaternion. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.setDelta(target: QuatSource) // -> quat
+```
+
+### setInv
+
+Set this to the quaternion which multiplies this quaternion to get a zero rotation quaternion. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.setInv() // -> quat
+```
+
+### setNegate
+
+Negate all components of this quaternion. The result is a quaternion representing the same rotation as this quaternion. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.setNegate() // -> quat
+```
+
+### setNormalize
+
+Get the normalized version of this quaternion with a length of 1. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.setNormalize() // -> quat
+```
+
+### setPlus
+
+Add this quaternion to another quaternion. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.setPlus(q: QuatSource) // -> quat
+```
+
+### setPremultiply
+
+Set this quaternion as the result of q times this quaternion. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.setPremultiply(q: QuatSource) // -> quat
+```
+
+### setRotateToward
+
+Rotate this quaternion towards the target quaternion by a given number of radians, clamped to the target. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.setRotateToward(target: QuatSource, radians: number) // -> quat
+```
+
+### setSlerp
+
+Spherical interpolation between two quaternions given a provided interpolation value. If the interpolation is set to 0, then it will return this quaternion. If the interpolation is set to 1, then it will return the target quaternion. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.setSlerp(target: QuatSource, t: number) // -> quat
+```
+
+### setTimes
+
+Multiply two quaternions together. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.setTimes(target: QuatSource) // -> quat
+```
+
+## Set
+
+The following methods set the value of the current Quat object without regard to its current content, replacing whatever was there before.
+
+### makeAxisAngle
+
+Set a Quat from an axis-angle representation. The direction of the vector gives the axis of rotation, and the magnitude of the vector gives the angle, in radians. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.makeAxisAngle(aa: Vec3Source) // -> quat
+```
+
+### makePitchYawRollRadians
+
+Set the quaternion to a rotation specified by pitch, yaw, and roll angles in radians. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.makePitchYawRollRadians(v: Vec3Source) // -> quat
+```
+
+### makeLookAt
+
+Set the quaternion to a rotation that would cause the eye to look at the target with the given-up vector. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.makeLookAt(eye: Vec3Source, target: Vec3Source, up: Vec3Source) // -> quat
+```
+
+### makePitchYawRollDegrees
+
+Set the quaternion to a rotation specified by pitch, yaw, and roll angles in degrees. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.makePitchYawRollDegrees(v: Vec3Source) // -> quat
+```
+
+### makeXDegrees
+
+Set the quaternion to a rotation about the x-axis (pitch) in degrees. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.makeXDegrees(degrees: number) // -> quat
+```
+
+### makeXRadians
+
+Set the quaternion to a rotation about the x-axis (pitch) in radians. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.makeXRadians(radians: number) // -> quat
+```
+
+### makeYDegrees
+
+Set the quaternion to a rotation about the y-axis (yaw) in degrees. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.makeYDegrees(degrees: number) // -> quat
+```
+
+### makeYRadians
+
+Set the quaternion to a rotation about the y-axis (yaw) in radians. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.makeYRadians(radians: number) // -> quat
+```
+
+### makeZDegrees
+
+Set the quaternion to a rotation about the z-axis (roll) in degrees. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.makeZDegrees(degrees: number) // -> quat
+```
+
+### makeZRadians
+
+Set the quaternion to a rotation about the z-axis (roll) in radians. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.makeZRadians(radians: number) // -> quat
+```
+
+### makeZero
+
+Set the quaternion to a zero rotation. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.makeZero() // -> quat
+```
+
+### setFrom
+
+Set this quaternion to the value in another quaternion. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.setFrom(q: QuatSource) // -> quat
+```
+
+### setXyzw
+
+Set the quaternion to the specified x, y, z, and w values. Store the result in this Quat and return this Quat for chaining.
+
+``` ts
+existingQuat.setXyzw(x: number, y: number, z: number, w: number) // -> quat
+```
\ No newline at end of file
diff --git a/docs/studio/api/ecs/math/vec2.md b/docs/studio/api/ecs/math/vec2.md
new file mode 100644
index 0000000..44ae1ae
--- /dev/null
+++ b/docs/studio/api/ecs/math/vec2.md
@@ -0,0 +1,293 @@
+---
+id: vec2
+---
+
+# vec2
+
+Interface representing a 2D vector. A 2D vector is represented by (x, y) coordinates and can represent a point in a plane, a directional vector, or other types of data with three ordered dimensions. Vec2 objects are created with the ecs.math.vec2 Vec2Factory, or through operations on other Vec2 objects.
+
+## Source
+
+The Vec2Source interface represents any object that has x and y properties and hence can be used as a data source to create a Vec2. In addition, Vec2Source can be used as an argument to Vec2 algorithms, meaning that any object with {x: number, y: number} properties can be used.
+
+## Properties
+
+Vec2Source has the following enumerable properties:
+
+``readonly x: number`` Access the x component of the vector.
+
+``readonly y: number`` Access the y component of the vector.
+
+## Factory
+
+### from
+
+Create a Vec2 from a Vec2, or another object with x, y properties.
+
+``` ts
+ecs.math.vec2.from({x, y}: {x: number, y: number}) // -> vec2
+```
+
+### one
+
+Create a vec2 where all elements are set to one. This is equivalent to ```vec2.from({x: 1, y: 1})```.
+
+``` ts
+ecs.math.vec2.one() // -> vec2
+```
+
+### scale
+
+Create a vec2 with all elements set to the scale value s. This is equivalent to ```vec2.from({x: s, y: s})```.
+
+``` ts
+ecs.math.vec2.scale(s: number) // -> vec2
+```
+
+### xy
+
+Create a Vec2 from x, y values. This is equivalent to ```vec2.from({x, y})```.
+
+``` ts
+ecs.math.vec2.xy(x: number, y: number) // -> vec2
+```
+
+### zero
+
+Create a vec2 where all elements are set to zero. This is equivalent to ```vec2.from({x: 0, y: 0})```.
+
+``` ts
+ecs.math.vec2.zero() // -> vec2
+```
+
+## Immutable
+
+The following methods perform computations based on the current value of a Vec2 but do not modify its contents. Methods that return Vec2 types return new objects. Immutable APIs are typically safer, more readable, and less error-prone than mutable APIs, but may be inefficient in situations where thousands of objects are allocated each frame.
+
+:::note
+If garbage collection impacts performance, consider using the Mutable API described below.
+:::
+
+### clone
+
+Create a new vector with the same components as this vector.
+
+``` ts
+existingVec2.clone() // -> vec2
+```
+
+### cross
+
+Compute the cross-product of this vector and another vector. For 2D vectors, the cross-product is the size of the z component of the 3D cross-product of the two vectors with 0 as the z component.
+
+``` ts
+existingVec2.cross(v: Vec2Source) // -> number
+```
+
+### distanceTo
+
+Compute the Euclidean distance between this vector and another vector.
+
+``` ts
+existingVec2.distanceTo(v: Vec2Source) // -> number
+```
+
+### divide
+
+Element-wise vector division.
+
+``` ts
+existingVec2.divide(v: Vec2Source) // -> vec2
+```
+
+### dot
+
+Compute the dot product of this vector and another vector.
+
+``` ts
+existingVec2.dot(v: Vec2Source) // -> number
+```
+
+### equals
+
+Check whether two vectors are equal, with a specified floating point tolerance.
+
+``` ts
+existingVec2.equals(v: Vec2Source, tolerance: number) // -> boolean
+```
+
+### length
+
+Length of the vector.
+
+``` ts
+existingVec2.length() // -> number
+```
+
+### minus
+
+Subtract a vector from this vector.
+
+``` ts
+existingVec2.minus(v: Vec2Source) // -> vec2
+```
+
+### mix
+
+Compute a linear interpolation between this vector and another vector v with a factor t such that the result is thisVec * (1 - t) + v * t. The factor t should be between zero and 1.
+
+``` ts
+existingVec2.mix(v: Vec2Source, t: number) // -> vec2
+```
+
+### normalize
+
+Return a new vector with the same direction as this vector, but with a length of 1.
+
+``` ts
+existingVec2.normalize() // -> vec2
+```
+
+### plus
+
+Add two vectors together.
+
+``` ts
+existingVec2.plus(v: Vec2Source) // -> vec2
+```
+
+### scale
+
+Multiply the vector by a scalar.
+
+``` ts
+existingVec2.scale(s: number) // -> vec2
+```
+
+### times
+
+Element-wise vector multiplication.
+
+``` ts
+existingVec2.times(v: Vec2Source) // -> vec2
+```
+
+## Mutable
+
+The following methods perform computations based on the current value of a Vec2 and modify its contents in place. They are parallel to methods in the mutable API above. Methods that return Vec2 types return a reference to the current object for convenient method chaining. Mutable APIs can be more performant than Immutable APIs, but are typically less safe, less readable, and more error-prone.
+
+### setDivide
+
+Element-wise vector division. Store the result in this Vec2 and return this Vec2 for chaining.
+
+``` ts
+existingVec2.setDivide(v: Vec2Source) // -> vec2
+```
+
+### setMinus
+
+Subtract a vector from this vector. Store the result in this Vec2 and return this Vec2 for chaining.
+
+``` ts
+existingVec2.setMinus(v: Vec2Source) // -> vec2
+```
+
+### setMix
+
+Compute a linear interpolation between this vector and another vector v with a factor t such that the result is thisVec * (1 - t) + v * t. The factor t should be between 0 and 1. Store the result in this Vec2 and return this Vec2 for chaining.
+
+``` ts
+existingVec2.setMix(v: Vec2Source, t: number) // -> vec2
+```
+
+### setNormalize
+
+Set the vector to be a version of itself with the same direction but with length 1. Store the result in this Vec2 and return this Vec2 for chaining.
+
+``` ts
+existingVec2.setNormalize() // -> vec2
+```
+
+### setPlus
+
+Add two vectors together. Store the result in this Vec2 and return this Vec2 for chaining.
+
+``` ts
+existingVec2.setPlus(v: Vec2Source) // -> vec2
+```
+
+### setScale
+
+Multiply the vector by a scalar. Store the result in this Vec2 and return this Vec2 for chaining.
+
+``` ts
+existingVec2.setScale(s: number) // -> vec2
+```
+
+### setTimes
+
+Element-wise vector multiplication. Store the result in this Vec2 and return this Vec2 for chaining.
+
+``` ts
+existingVec2.setTimes(v: Vec2Source) // -> vec2
+```
+
+### setX
+
+Set the Vec2's x component. Store the result in this Vec2 and return this Vec2 for chaining.
+
+``` ts
+existingVec2.setX(v: number) // -> vec2
+```
+
+### setY
+
+Set the Vec2's y component. Store the result in this Vec2 and return this Vec2 for chaining.
+
+``` ts
+existingVec2.setY(v: number) // -> vec2
+```
+
+### Set
+
+The following methods set the value of the current Vec2 object without regard to its current content, replacing whatever was there before.
+
+### makeOne
+
+Set the Vec2 to be all ones. Store the result in this Vec2 and return this Vec2 for chaining.
+
+``` ts
+existingVec2.makeOne() // -> vec2
+```
+
+### makeScale
+
+Set the Vec2 to have all components set to the scale value s. Store the result in this Vec2 and return this Vec2 for chaining.
+
+``` ts
+existingVec2.makeScale(s: number) // -> vec2
+```
+
+### makeZero
+
+Set the Vec2 to be all zeros. Store the result in this Vec2 and return this Vec2 for chaining.
+
+``` ts
+existingVec2.makeZero() // -> vec2
+```
+
+### setFrom
+
+Set this Vec2 to have the same value as another Vec2 or another object with x and y properties. Store the result in this Vec2 and return this Vec2 for chaining.
+
+``` ts
+existingVec2.setFrom(source: Vec2Source) // -> vec2
+```
+
+### setXy
+
+Set the Vec2's x and y components. Store the result in this Vec2 and return this Vec2 for chaining.
+
+``` ts
+existingVec2.setXy(x: number, y: number) // -> vec2
+```
\ No newline at end of file
diff --git a/docs/studio/api/ecs/math/vec3.md b/docs/studio/api/ecs/math/vec3.md
new file mode 100644
index 0000000..47b89c8
--- /dev/null
+++ b/docs/studio/api/ecs/math/vec3.md
@@ -0,0 +1,319 @@
+---
+id: vec3
+---
+
+# vec3
+
+Interface representing a 3D vector. A 3D vector is represented by (x, y, z) coordinates and can represent a point in space, a directional vector, or other types of data with three ordered dimensions. 3D vectors can be multiplied by 4x4 matrices (Mat4) using homogeneous coordinate math, enabling efficient 3D geometry computation. Vec3 objects are created with the ecs.math.vec3 Vec3Factory, or through operations on other Vec3 objects.
+
+## Source
+
+The Vec3Source interface represents any object that has x, y, and z properties and hence can be used as a data source to create a Vec3. In addition, Vec3Source can be used as an argument to Vec3 algorithms, meaning that any object with {x: number, y: number, z: number} properties can be used.
+
+## Properties
+
+Vec3 has the following enumerable properties:
+
+``readonly x: number`` Access the x component of the vector.
+
+``readonly y: number`` Access the y component of the vector.
+
+``readonly z: number`` Access the z component of the vector.
+
+## Factory
+
+### from
+
+Create a Vec3 from a Vec3, or another object with x, y properties.
+
+``` ts
+ecs.math.vec3.from({x, y, z}: {x: number, y: number, z: number}) // -> vec3
+```
+
+### one
+
+Create a vec3 where all elements are set to one. This is equivalent to ```vec3.from({x: 1, y: 1, z: 1})```.
+
+``` ts
+ecs.math.vec3.one() // -> vec3
+```
+
+### scale
+
+Create a vec3 with all elements set to the scale value s. This is equivalent to ```vec3.from({x: s, y: s, z: s})```.
+
+``` ts
+ecs.math.vec3.scale(s: number) // -> vec3
+```
+
+### xyz
+
+Create a Vec3 from x, y, z values. This is equivalent to ```vec3.from({x, y, z})```.
+
+``` ts
+ecs.math.vec3.xyz(x: number, y: number, z: number) // -> vec3
+```
+
+### zero
+
+Create a vec3 where all elements are set to zero. This is equivalent to ```vec3.from({x: 0, y: 0, z: 0})```.
+
+``` ts
+ecs.math.vec3.zero() // -> vec3
+```
+
+## Immutable
+
+The following methods perform computations based on the current value of a Vec3 but do not modify its contents. Methods that return Vec3 types return new objects. Immutable APIs are typically safer, more readable, and less error-prone than mutable APIs, but may be inefficient in situations where thousands of objects are allocated each frame.
+
+:::note
+If garbage collection impacts performance, consider using the Mutable API described below.
+:::
+
+### clone
+
+Create a new vector with the same components as this vector.
+
+``` ts
+existingVec3.clone() // -> vec3
+```
+
+### cross
+
+Compute the cross-product of this vector and another vector.
+
+``` ts
+existingVec3.cross(v: Vec3Source) // -> vec3
+```
+
+### data
+
+Access the vector as a homogeneous array (four dimensions).
+
+``` ts
+existingVec3.data() // -> number[]
+```
+
+### distanceTo
+
+Compute the Euclidean distance between this vector and another vector.
+
+``` ts
+existingVec3.distanceTo(v: Vec3Source) // -> number
+```
+
+### divide
+
+Element-wise vector division.
+
+``` ts
+existingVec3.divide(v: Vec3Source) // -> vec3
+```
+
+### dot
+
+Compute the dot product of this vector and another vector.
+
+``` ts
+existingVec3.dot(v: Vec3Source) // -> number
+```
+
+### equals
+
+Check whether two vectors are equal, with a specified floating point tolerance.
+
+``` ts
+existingVec3.equals(v: Vec3Source, tolerance: number) // -> boolean
+```
+
+### length
+
+Length of the vector.
+
+``` ts
+existingVec3.length() // -> number
+```
+
+### minus
+
+Subtract a vector from this vector.
+
+``` ts
+existingVec3.minus(v: Vec3Source) // -> vec3
+```
+
+### mix
+
+Compute a linear interpolation between this vector and another vector v with a factor t such that the result is thisVec * (1 - t) + v * t. The factor t should be between zero and 1.
+
+``` ts
+existingVec3.mix(v: Vec3Source, t: number) // -> vec3
+```
+
+### normalize
+
+Return a new vector with the same direction as this vector, but with a length of 1.
+
+``` ts
+existingVec3.normalize() // -> vec3
+```
+
+### plus
+
+Add two vectors together.
+
+``` ts
+existingVec3.plus(v: Vec3Source) // -> vec3
+```
+
+### scale
+
+Multiply the vector by a scalar.
+
+``` ts
+existingVec3.scale(s: number) // -> vec3
+```
+
+### times
+
+Element-wise vector multiplication.
+
+``` ts
+existingVec3.times(v: Vec3Source) // -> vec3
+```
+
+## Mutable
+
+The following methods perform computations based on the current value of a Vec3 and modify its contents in place. They are parallel to methods in the mutable API above. Methods that return Vec3 types return a reference to the current object for convenient method chaining. Mutable APIs can be more performant than Immutable APIs, but are typically less safe, less readable, and more error-prone.
+
+### SetCross
+
+Compute the cross-product of this vector and another vector. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.setCross(v: Vec3Source) // -> vec3
+```
+
+### setDivide
+
+Element-wise vector division. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.setDivide(v: Vec3Source) // -> vec3
+```
+
+### setMinus
+
+Subtract a vector from this vector. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.setMinus(v: Vec3Source) // -> vec3
+```
+
+### setMix
+
+Compute a linear interpolation between this vector and another vector v with a factor t such that the result is thisVec * (1 - t) + v * t. The factor t should be between 0 and 1. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.setMix(v: Vec3Source, t: number) // -> vec3
+```
+
+### setNormalize
+
+Set the vector to be a version of itself with the same direction but with length 1. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.setNormalize() // -> vec3
+```
+
+### setPlus
+
+Add two vectors together. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.setPlus(v: Vec3Source) // -> vec3
+```
+
+### setScale
+
+Multiply the vector by a scalar. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.setScale(s: number) // -> vec3
+```
+
+### setTimes
+
+Element-wise vector multiplication. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.setTimes(v: Vec3Source) // -> vec3
+```
+
+### setX
+
+Set the Vec3's x component. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.setX(v: number) // -> vec3
+```
+
+### setY
+
+Set the Vec3's y component. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.setY(v: number) // -> vec3
+```
+
+### setZ
+
+Set the Vec3's z component. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.setZ(v: number) // -> vec3
+```
+
+### Set
+
+The following methods set the value of the current Vec3 object without regard to its current content, replacing whatever was there before.
+
+### makeOne
+
+Set the Vec3 to be all ones. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.makeOne() // -> vec3
+```
+
+### makeScale
+
+Set the Vec3 to have all components set to the scale value s. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.makeScale(s: number) // -> vec3
+```
+
+### makeZero
+
+Set the Vec3 to be all zeros. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.makeZero() // -> vec3
+```
+
+### setFrom
+
+Set this Vec3 to have the same value as another Vec3 or another object with x and y, and z properties. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.setFrom(source: Vec3Source) // -> vec3
+```
+
+### setXyz
+
+Set the Vec3's x, y, and z components. Store the result in this Vec3 and return this Vec3 for chaining.
+
+``` ts
+existingVec3.setXyz(x: number, y: number, z: number) // -> vec3
+```
\ No newline at end of file
diff --git a/docs/studio/api/ecs/orbitcontrols.mdx b/docs/studio/api/ecs/orbitcontrols.mdx
new file mode 100644
index 0000000..bbe34f2
--- /dev/null
+++ b/docs/studio/api/ecs/orbitcontrols.mdx
@@ -0,0 +1,45 @@
+---
+id: orbit-controls
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import OrbitControlsTable from "../../../../gen/tables/OrbitControls.md"
+
+# OrbitControls
+
+## Description
+
+This component lets you orbit around an entity with constraints
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/particleemitter.mdx b/docs/studio/api/ecs/particleemitter.mdx
new file mode 100644
index 0000000..830a9ca
--- /dev/null
+++ b/docs/studio/api/ecs/particleemitter.mdx
@@ -0,0 +1,40 @@
+---
+id: particle-emitter
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import ParticleEmitterTable from "../../../../gen/tables/ParticleEmitter.md"
+
+# ParticleEmitter
+
+## Description
+
+This component makes an entity emit particles.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/persistent.mdx b/docs/studio/api/ecs/persistent.mdx
new file mode 100644
index 0000000..109fec1
--- /dev/null
+++ b/docs/studio/api/ecs/persistent.mdx
@@ -0,0 +1,23 @@
+---
+id: persistent
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+
+# Persistent
+
+## Description
+
+This component marks the entity it is attached to as Persistent, which means it will not be despawned when unmounting the Space (switching Spaces). Can only be set on root objects.
+
+## Properties
+
+None
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/physics.md b/docs/studio/api/ecs/physics.md
new file mode 100644
index 0000000..cd78317
--- /dev/null
+++ b/docs/studio/api/ecs/physics.md
@@ -0,0 +1,113 @@
+---
+id: physics
+---
+
+# Physics
+
+## Description
+
+This library has methods for applying physics and setting Colliders on Entities.
+
+## Functions
+
+### applyForce
+
+You can directly apply forces (linear and angular) to any entity with a physics collider.
+These forces are applied in the next physics simulation update, which takes place at regular intervals.
+The function accepts a 3D vector to define the force direction and magnitude.
+
+``` ts
+ecs.physics.applyForce(world, eid, forceX, forceY, forceZ) // -> void
+```
+
+### applyImpulse
+
+This function is used to apply a one-time impulse force to a physics collider, altering its velocity based on the given impulse vector.
+This method is useful for events that require a quick, single action response, such as jumping, punching, or a sudden push.
+
+``` ts
+ecs.physics.applyImpulse(world, eid, impulseX, impulseY, impulseZ) // -> void
+```
+
+### applyTorque
+
+You can directly apply forces (linear and angular) to any entity with a physics collider.
+These forces are applied in the next physics simulation update, which takes place at regular intervals.
+The function accepts a 3D vector to define the force direction and magnitude.
+
+``` ts
+ecs.physics.applyTorque(world, eid, torqueX, torqueY, torqueZ) // -> void
+```
+
+### getWorldGravity
+
+This is a simple getter function that returns the current force of gravity applied to every object in the scene.
+The return value might change depending on the time the function was executed.
+
+``` ts
+ecs.physics.getWorldGravity(world) // -> number
+```
+
+### registerConvexShape
+
+Register a convex shape.
+
+``` ts
+ecs.physics.registerConvexShape(world, vertices) // -> eid of the registered shape
+```
+
+### getAngularVelocity
+
+Get the angular velocity of an entity.
+
+``` ts
+ecs.physics.getAngularVelocity(world, eid) // -> {x: number, y: number, z: number}
+```
+
+### setAngularVelocity
+
+Set the angular velocity of an entity.
+
+``` ts
+ecs.physics.setAngularVelocity(world, eid) // -> {x: number, y: number, z: number}
+```
+
+### getLinearVelocity
+
+Get the linear velocity of an entity.
+
+``` ts
+ecs.physics.getLinearVelocity(world, eid) // -> {x: number, y: number, z: number}
+```
+
+### setLinearVelocity
+
+Set the linear velocity of an entity.
+
+``` ts
+ecs.physics.setLinearVelocity(world, eid, velocityX, velocityY, velocityZ) // -> void
+```
+
+### getWorldGravity
+
+Get the world gravity.
+
+``` ts
+ecs.physics.getWorldGravity(world, gravity) // -> number
+```
+
+### setWorldGravity
+
+Set the world gravity.
+
+``` ts
+ecs.physics.setWorldGravity(world, gravity) // -> void
+```
+
+### unregisterConvexShape
+
+Unregister a convex shape.
+
+``` ts
+ecs.physics.unregisterConvexShape(world, id) // -> void
+```
diff --git a/docs/studio/api/ecs/position.mdx b/docs/studio/api/ecs/position.mdx
new file mode 100644
index 0000000..bbb7bc3
--- /dev/null
+++ b/docs/studio/api/ecs/position.mdx
@@ -0,0 +1,31 @@
+---
+id: position
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import PositionTable from "../../../../gen/tables/Position.md"
+
+# Position
+
+## Description
+
+This component controls how the entity is positioned, **in local space**.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/quaternion.mdx b/docs/studio/api/ecs/quaternion.mdx
new file mode 100644
index 0000000..5c98936
--- /dev/null
+++ b/docs/studio/api/ecs/quaternion.mdx
@@ -0,0 +1,33 @@
+---
+id: quaternion
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import QuaternionTable from "../../../../gen/tables/Quaternion.md"
+
+# Quaternion
+
+## Description
+
+This component controls how the entity is rotated.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/scale.mdx b/docs/studio/api/ecs/scale.mdx
new file mode 100644
index 0000000..c59ad30
--- /dev/null
+++ b/docs/studio/api/ecs/scale.mdx
@@ -0,0 +1,32 @@
+---
+id: scale
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import ScaleTable from "../../../../gen/tables/Scale.md"
+
+# Scale
+
+## Description
+
+This component controls how the entity is scaled.
+
+## Properties
+
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/shadow.mdx b/docs/studio/api/ecs/shadow.mdx
new file mode 100644
index 0000000..89116c3
--- /dev/null
+++ b/docs/studio/api/ecs/shadow.mdx
@@ -0,0 +1,29 @@
+---
+id: shadow
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import ShadowTable from "../../../../gen/tables/Shadow.md"
+
+# Shadow
+
+## Description
+
+This component controls how the entity handles shadows.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/splat.mdx b/docs/studio/api/ecs/splat.mdx
new file mode 100644
index 0000000..393cdfd
--- /dev/null
+++ b/docs/studio/api/ecs/splat.mdx
@@ -0,0 +1,28 @@
+---
+id: splat
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import SplatTable from "../../../../gen/tables/Splat.md"
+
+# Splat
+
+## Description
+
+This component establishes a Gaussian Splat on an entity.
+
+## Properties
+
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/ui.mdx b/docs/studio/api/ecs/ui.mdx
new file mode 100644
index 0000000..8dc170e
--- /dev/null
+++ b/docs/studio/api/ecs/ui.mdx
@@ -0,0 +1,30 @@
+---
+id: ui
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import UiTable from "../../../../gen/tables/Ui.md"
+
+# Ui
+
+## Description
+
+This component establishes a User Interface in relation to the entity.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/ecs/videocontrols.mdx b/docs/studio/api/ecs/videocontrols.mdx
new file mode 100644
index 0000000..14b7db6
--- /dev/null
+++ b/docs/studio/api/ecs/videocontrols.mdx
@@ -0,0 +1,37 @@
+---
+id: video-controls
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import VideoControlsTable from "../../../../gen/tables/VideoControls.md"
+
+# VideoControls
+
+## Description
+
+This component allows entities to control video playback on textures applied to materials. It is automatically added when a video is set through the Studio configurator. If the video is added programmatically, this component must be added manually to enable playback controls.
+
+## Properties
+
+
+
+## Functions
+
+
diff --git a/docs/studio/api/events/_category_.json b/docs/studio/api/events/_category_.json
new file mode 100644
index 0000000..88545e0
--- /dev/null
+++ b/docs/studio/api/events/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "Events Reference",
+ "position": 3
+}
\ No newline at end of file
diff --git a/docs/studio/api/events/assets.mdx b/docs/studio/api/events/assets.mdx
new file mode 100644
index 0000000..ca784a7
--- /dev/null
+++ b/docs/studio/api/events/assets.mdx
@@ -0,0 +1,159 @@
+---
+id: assets
+---
+
+import {EventExample} from '@site/src/components/event-example'
+
+# Asset Events
+
+Asset events are emitted on the asset entity and bubble up to the [world.events.globalId](/docs/studio/api/world/events/#properties).
+
+## 3D Model
+
+### GLTF_MODEL_LOADED
+
+Emitted when a model has loaded
+
+#### Properties
+
+| Property | Type | Description |
+|----------|---------------------------------------------------------|--------------------|
+| model | [Group](https://threejs.org/docs/#api/en/objects/Group) | The three.js Group |
+
+#### Example
+
+
+
+### GLTF_ANIMATION_FINISHED
+
+Emitted when all loops of an animation clip have finished.
+
+#### Properties
+
+| Property | Type | Description |
+|----------|--------|---------------------------|
+| name | string | The name of the animation |
+
+#### Example
+
+
+
+### GLTF_ANIMATION_LOOP
+
+Emitted when a single loop of the animation clip has finished.
+
+#### Properties
+
+| Property | Type | Description |
+|----------|--------|---------------------------|
+| name | string | The name of the animation |
+
+#### Example
+
+
+
+## Gaussian Splat
+
+### SPLAT_MODEL_LOADED
+
+Emitted when a Splat has loaded
+
+#### Properties
+
+| Property | Type | Description |
+|----------|------------------------------------------------------------|-----------------------|
+| model | [Object3D](https://threejs.org/docs/#api/en/core/Object3D) | The three.js Object3D |
+
+#### Example
+
+
+
+## Audio
+
+### AUDIO_CAN_PLAY_THROUGH
+
+Emitted when an entity has the capability to play Audio.
+
+#### Properties
+
+None.
+
+#### Example
+
+
+
+### AUDIO_END
+
+Emitted when audio has finished playing on an entity.
+
+#### Properties
+
+None.
+
+#### Example
+
+
+
+## Video
+
+### VIDEO_CAN_PLAY_THROUGH
+
+Emitted when an entity has the capability to play the video.
+
+#### Properties
+
+| Property | Type | Description |
+|----------|--------|---------------------------|
+| src | string | The video source |
+
+#### Example
+
+
+
+### VIDEO_END
+
+Emitted when the video has finished playing on an entity.
+
+#### Properties
+
+| Property | Type | Description |
+|----------|--------|---------------------------|
+| src | string | The video source |
+
+#### Example
+
+
diff --git a/docs/studio/api/events/camera.mdx b/docs/studio/api/events/camera.mdx
new file mode 100644
index 0000000..376ce04
--- /dev/null
+++ b/docs/studio/api/events/camera.mdx
@@ -0,0 +1,86 @@
+---
+id: camera
+---
+
+import {EventExample} from '@site/src/components/event-example'
+
+# Camera Events
+
+Camera events are emitted on the [world.events.globalId](/docs/studio/api/world/events/#properties).
+
+## Events
+
+### ACTIVE_CAMERA_CHANGE
+
+Emitted when the active camera changes.
+
+#### Properties
+
+| Property | Type | Description |
+|----------|--------|-------------------|
+| camera | Camera | The active camera |
+
+#### Example
+
+
+
+### ACTIVE_CAMERA_EID_CHANGE
+
+Emitted when the active camera eid changes.
+
+#### Properties
+
+| Property | Type | Description |
+|----------|--------|--------------------------|
+| eid | eid | eid of the active camera |
+
+#### Example
+
+
+
+### XR_CAMERA_EDIT
+
+Emitted when any XR attribute is changed on the active camera.
+
+#### Properties
+
+| Property | Type | Description |
+|----------|--------|-------------------|
+| camera | Camera | The active camera |
+
+#### Example
+
+
+
+### XR_CAMERA_STOP
+
+Emitted when the XR camera stops.
+
+#### Properties
+
+None.
+
+#### Example
+
+
diff --git a/docs/studio/api/events/events.md b/docs/studio/api/events/events.md
new file mode 100644
index 0000000..e83d178
--- /dev/null
+++ b/docs/studio/api/events/events.md
@@ -0,0 +1,25 @@
+# Studio Events Reference
+
+Events are a core part of building dynamic and interactive experiences in Studio. This reference outlines the different types of events you can listen for in your projects.
+
+## Event Categories
+
+- [XR Events](/docs/studio/api/events/xr): Events emitted by 8th Wall camera pipeline modules like `reality` and `facecontroller`, covering things like Image Target tracking and Face detection.
+
+- [Asset Events](/docs/studio/api/events/assets): Events related to assets, such as asset loading and playback events.
+
+- [Camera Events](/docs/studio/api/events/camera): Events related to camera state changes, including active camera switches, XR camera attribute edits, and active camera entity changes.
+
+- [General Events](/docs/studio/api/events/general): Core world-level events triggered within Studio experiences, such as an active space change.
+
+- [Input Events](/docs/studio/api/events/input): Events triggered by user interactions, including touch, gesture, and UI click events. Covers both simple taps and complex multi-touch gestures.
+
+- [Physics Events](/docs/studio/api/events/physics): Events emitted when physical interactions occur between entities, such as collisions starting or ending.
+
+---
+
+Each event section provides:
+
+- A description of when the event is emitted
+- Properties (if any) passed with the event
+- Code examples showing how to listen for the event globally or on specific entities
diff --git a/docs/studio/api/events/general.mdx b/docs/studio/api/events/general.mdx
new file mode 100644
index 0000000..0386d32
--- /dev/null
+++ b/docs/studio/api/events/general.mdx
@@ -0,0 +1,27 @@
+---
+id: general
+---
+
+import {EventExample} from '@site/src/components/event-example'
+
+# General Events
+
+## Events
+
+### ACTIVE_SPACE_CHANGE
+
+Emitted on the [world.events.globalId](/docs/studio/api/world/events/#properties) when the world loads a Space (no promise of the Space being loaded).
+
+#### Properties
+
+None.
+
+#### Example
+
+
diff --git a/docs/studio/api/events/input.mdx b/docs/studio/api/events/input.mdx
new file mode 100644
index 0000000..c77c186
--- /dev/null
+++ b/docs/studio/api/events/input.mdx
@@ -0,0 +1,13 @@
+---
+id: input
+---
+
+import InputEvents from '/src/components/_input-events.mdx'
+
+# Input Events
+
+## Description
+
+This library includes events that correspond to different types of input.
+
+
diff --git a/docs/studio/api/events/media-recorder.mdx b/docs/studio/api/events/media-recorder.mdx
new file mode 100644
index 0000000..c3a1949
--- /dev/null
+++ b/docs/studio/api/events/media-recorder.mdx
@@ -0,0 +1,167 @@
+---
+id: media-recorder
+---
+
+import {EventExample} from '@site/src/components/event-example'
+
+# Media Recorder Events
+
+## Description
+
+This Media Recorder allows you to capure screenshots and record video of your Studio project at runtime.
+
+Media Recorder events are emitted on the [world.events.globalId](/docs/studio/api/world/events/#properties).
+
+## Events
+
+### RECORDER_SCREENSHOT_READY
+
+Emitted when screenshot is ready.
+
+#### Properties
+
+| Property | Type | Description |
+|----------|------|-------------|
+| blob | Blob | The JPEG image blob of the screenshot |
+
+#### Example
+
+
+
+
+### RECORDER_VIDEO_STARTED
+
+Emitted when recording has started.
+
+#### Properties
+
+None.
+
+#### Example
+
+
+
+### RECORDER_VIDEO_STOPPED
+
+Emitted when recording has stopped.
+
+#### Properties
+
+None.
+
+#### Example
+
+
+
+### RECORDER_VIDEO_ERROR
+
+Emitted when there is an error.
+
+#### Properties
+
+| Property | Type | Description |
+|----------|--------|-----------------------|
+| message | string | The error message |
+| name | string | The error name |
+| stack | string | The error stack trace |
+
+#### Example
+
+
+
+### RECORDER_VIDEO_READY
+
+Emitted when recording has completed and video is ready.
+
+#### Properties
+
+| Property | Type | Description |
+|-----------|------|----------------------------------|
+| videoBlob | Blob | The recorded video blob |
+
+#### Example
+
+
+
+### RECORDER_PREVIEW_READY
+
+Emitted when a previewable, but not sharing-optimized, video is ready (Android/Desktop only).
+
+#### Properties
+
+| Property | Type | Description |
+|-----------|------|------------------------------------|
+| videoBlob | Blob | The preview video blob |
+
+#### Example
+
+
+
+### RECORDER_FINALIZE_PROGRESS
+
+Emitted when the media recorder is making progress in the final export (Android/Desktop only).
+
+#### Properties
+
+| Property | Type | Description |
+|----------|--------|------------------------------------|
+| progress | number | Finalization progress (0 to 1) |
+
+#### Example
+
+
+
+### RECORDER_PROCESS_FRAME
+
+#### Properties
+
+| Property | Type | Description |
+|-----------|-----------|-------------------------------------|
+| frame | ImageData | The processed video frame |
+| timestamp | number | The timestamp of the frame (ms) |
+
+#### Example
+
+
diff --git a/docs/studio/api/events/physics.mdx b/docs/studio/api/events/physics.mdx
new file mode 100644
index 0000000..f1489d2
--- /dev/null
+++ b/docs/studio/api/events/physics.mdx
@@ -0,0 +1,60 @@
+---
+id: physics
+---
+
+import {EventExample} from '@site/src/components/event-example'
+
+# Physics Events
+
+## Events
+
+### COLLISION_START_EVENT
+Emitted when the entity has started colliding with another entity.
+
+#### Properties
+
+| Property | Type | Description |
+|----------|------|---------------------------------|
+| other | eid | The eid of the colliding entity |
+
+#### Example
+
+
+
+### COLLISION_END_EVENT
+Emitted when the entity has stopped colliding with another entity.
+
+#### Properties
+
+| Property | Type | Description |
+|----------|------|---------------------------------|
+| other | eid | The eid of the colliding entity |
+
+#### Example
+
+
+
+### UPDATE_EVENT
+Emitted on the [world.events.globalId](/docs/studio/api/world/events/#properties) immediately after the physics update before rendering.
+
+#### Properties
+
+None.
+
+#### Example
+
+
diff --git a/docs/studio/api/events/xr/_category_.json b/docs/studio/api/events/xr/_category_.json
new file mode 100644
index 0000000..a7c06af
--- /dev/null
+++ b/docs/studio/api/events/xr/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "XR Events",
+ "position": 1
+}
\ No newline at end of file
diff --git a/docs/studio/api/events/xr/face.mdx b/docs/studio/api/events/xr/face.mdx
new file mode 100644
index 0000000..892372c
--- /dev/null
+++ b/docs/studio/api/events/xr/face.mdx
@@ -0,0 +1,401 @@
+---
+id: face
+sidebar_position: 1
+---
+
+import {EventExample} from '@site/src/components/event-example'
+
+# Face Effects Events
+
+## Types
+
+### TransformObject {#TransformObject}
+| Property | Type | Description |
+|--------------|----------------|-------------------------------------------------------------------------|
+| position | `{x, y, z}` | The 3d position of the located face. |
+| rotation | `{w, x, y, z}` | The 3d local orientation of the located face. |
+| scale | `Number` | A scale factor that should be applied to objects attached to this face. |
+| scaledWidth | `Number` | Approximate width of the head in the scene when multiplied by scale. |
+| scaledHeight | `Number` | Approximate height of the head in the scene when multiplied by scale. |
+| scaledDepth | `Number` | Approximate depth of the head in the scene when multiplied by scale. |
+
+## Events
+
+### FACE_LOADING
+This event is emitted by Face Effects when loading begins for additional face AR resources.
+
+#### Properties
+| Property | Type | Description |
+|--------------------|---------------|-----------------------------------------------------------------------------------------------------------------------------|
+| maxDetections | `Number` | The maximum number of faces that can be simultaneously processed. |
+| pointsPerDetection | `Number` | Number of vertices that will be extracted per face. |
+| indices | `[{a, b, c}]` | Indexes into the vertices array that form the triangles of the requested mesh, as specified with meshGeometry on configure. |
+| uvs | `[{u, v}]` | uv positions into a texture map corresponding to the returned vertex points. |
+
+#### Example
+
+
+
+### FACE_SCANNING
+This event is emitted by Face Effects when all face AR resources have been loaded and scanning has begun.
+
+#### Properties
+| Property | Type | Description |
+|--------------------|---------------|-----------------------------------------------------------------------------------------------------------------------------|
+| maxDetections | `Number` | The maximum number of faces that can be simultaneously processed. |
+| pointsPerDetection | `Number` | Number of vertices that will be extracted per face. |
+| indices | `[{a, b, c}]` | Indexes into the vertices array that form the triangles of the requested mesh, as specified with meshGeometry on configure. |
+| uvs | `[{u, v}]` | uv positions into a texture map corresponding to the returned vertex points. |
+
+#### Example
+
+
+
+### FACE_FOUND
+This event is emitted by Face Effects when a face is first found.
+
+#### Properties
+| Property | Type | Description |
+|------------------|---------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| id | `Number` | A numerical id of the located face |
+| transform | [`TransformObject`](#TransformObject) | Transform information of the located face. |
+| vertices | `[{x, y, z}]` | Position of face points, relative to transform. |
+| normals | `[{x, y, z}]` | Normal direction of vertices, relative to transform. |
+| attachmentPoints | `{ name, position: {x,y,z} }` | See [`XR8.FaceController.AttachmentPoints`](https://www.8thwall.com/docs/engine/api/facecontroller/attachmentpoints/) for list of available attachment points. `position` is relative to the transform. |
+| uvsInCameraFrame | `[{u, v}]` | The list of uv positions in the camera frame corresponding to the returned vertex points. |
+
+#### Example
+
+
+
+### FACE_UPDATED
+This event is emitted by Face Effects when faces are subsequently found.
+
+#### Properties
+| Property | Type | Description |
+|------------------|---------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| id | `Number` | A numerical id of the located face |
+| transform | [`TransformObject`](#TransformObject) | Transform information of the located face. |
+| vertices | `[{x, y, z}]` | Position of face points, relative to transform. |
+| normals | `[{x, y, z}]` | Normal direction of vertices, relative to transform. |
+| attachmentPoints | `{ name, position: {x,y,z} }` | See [`XR8.FaceController.AttachmentPoints`](https://www.8thwall.com/docs/engine/api/facecontroller/attachmentpoints/) for list of available attachment points. `position` is relative to the transform. |
+| uvsInCameraFrame | `[{u, v}]` | The list of uv positions in the camera frame corresponding to the returned vertex points. |
+
+#### Example
+
+
+
+### FACE_LOST
+This event is emitted by Face Effects when a face is no longer being tracked.
+
+#### Properties
+| Property | Type | Description |
+|----------|----------|-------------------------------------------|
+| id | `Number` | A numerical id of the face that was lost. |
+
+#### Example
+
+
+
+### FACE_BLINKED
+This event is emitted by Face Effects when a tracked face's eyes blink.
+
+#### Properties
+| Property | Type | Description |
+|----------|----------|------------------------------------|
+| id | `Number` | A numerical id of the located face |
+
+#### Example
+
+
+
+### FACE_INTERPUPILLARY_DISTANCE
+This event is emitted by Face Effects when a tracked face's distance in millimeters between the centers of each pupil is first detected.
+
+#### Properties
+| Property | Type | Description |
+|------------------------|----------|------------------------------------------------------------------------|
+| id | `Number` | A numerical id of the located face. |
+| interpupillaryDistance | `Number` | Approximate distance in millimeters between the centers of each pupil. |
+
+#### Example
+
+
+
+### FACE_LEFT_EYEBROW_LOWERED
+This event is emitted by Face Effects when a tracked face's distance in millimeters between the centers of each pupil is first detected.
+
+#### Properties
+| Property | Type | Description |
+|----------|----------|-------------------------------------|
+| id | `Number` | A numerical id of the located face. |
+
+#### Example
+
+
+
+### FACE_LEFT_EYEBROW_RAISED
+This event is emitted by Face Effects when a tracked face's left eyebrow is raised from its initial position when the face was found.
+
+#### Properties
+| Property | Type | Description |
+|----------|----------|-------------------------------------|
+| id | `Number` | A numerical id of the located face. |
+
+#### Example
+
+
+
+### FACE_LEFT_EYE_CLOSED
+This event is emitted by Face Effects when a tracked face's left eye closes.
+
+#### Properties
+| Property | Type | Description |
+|----------|----------|-------------------------------------|
+| id | `Number` | A numerical id of the located face. |
+
+#### Example
+
+
+
+### FACE_LEFT_EYE_OPENED
+This event is emitted by Face Effects when a tracked face's left eye opens.
+
+#### Properties
+| Property | Type | Description |
+|----------|----------|-------------------------------------|
+| id | `Number` | A numerical id of the located face. |
+
+#### Example
+
+
+
+### FACE_LEFT_EYE_WINKED
+This event is emitted by Face Effects when a tracked face's left eye closes and opens within 750ms while the right eye remains open.
+
+#### Properties
+| Property | Type | Description |
+|----------|----------|-------------------------------------|
+| id | `Number` | A numerical id of the located face. |
+
+#### Example
+
+
+
+### FACE_MOUTH_CLOSED
+This event is emitted by Face Effects when a tracked face's mouth closes.
+
+#### Properties
+| Property | Type | Description |
+|----------|----------|-------------------------------------|
+| id | `Number` | A numerical id of the located face. |
+
+#### Example
+
+
+
+### FACE_MOUTH_OPENED
+This event is emitted by Face Effects when a tracked face's mouth opens.
+
+#### Properties
+| Property | Type | Description |
+|----------|----------|-------------------------------------|
+| id | `Number` | A numerical id of the located face. |
+
+#### Example
+
+
+
+### FACE_RIGHT_EYEBROW_LOWERED
+This event is emitted by Face Effects when a tracked face's right eyebrow is lowered to its initial position when the face was found.
+
+#### Properties
+| Property | Type | Description |
+|----------|----------|-------------------------------------|
+| id | `Number` | A numerical id of the located face. |
+
+#### Example
+
+
+
+### FACE_RIGHT_EYEBROW_RAISED
+This event is emitted by Face Effects when a tracked face's right eyebrow is raised from its initial position when the face was found.
+
+#### Properties
+| Property | Type | Description |
+|----------|----------|-------------------------------------|
+| id | `Number` | A numerical id of the located face. |
+
+#### Example
+
+
+
+
+### FACE_RIGHT_EYE_CLOSED
+This event is emitted by Face Effects when a tracked face's right eye closes.
+
+#### Properties
+| Property | Type | Description |
+|----------|----------|-------------------------------------|
+| id | `Number` | A numerical id of the located face. |
+
+#### Example
+
+
+
+### FACE_RIGHT_EYE_OPENED
+This event is emitted by Face Effects when a tracked face's right eye opens.
+
+#### Properties
+| Property | Type | Description |
+|----------|----------|-------------------------------------|
+| id | `Number` | A numerical id of the located face. |
+
+#### Example
+
+
+
+### FACE_RIGHT_EYE_WINKED
+This event is emitted by Face Effects when a tracked face's right eye closes and opens within 750ms while the left eye remains open.
+
+#### Properties
+| Property | Type | Description |
+|----------|----------|-------------------------------------|
+| id | `Number` | A numerical id of the located face. |
+
+#### Example
+
+
+
+### FACE_EAR_POINT_FOUND
+This event is emitted by Face Effects when an ear point is found.
+
+#### Properties
+
+| Property | Type | Description |
+|----------|----------|---------------------------------------------------------------------------------------------------------------------|
+| id | `Number` | A numerical id of the located face |
+| point | `String` | Ear point name. One of the following: `leftLobe`, `leftCanal`, `leftHelix`, `rightLobe`, `rightCanal`, `rightHelix` |
+
+#### Example
+
+
+
+### FACE_EAR_POINT_LOST
+This event is emitted by Face Effects when an ear point is lost.
+
+#### Properties
+
+| Property | Type | Description |
+|----------|----------|---------------------------------------------------------------------------------------------------------------------|
+| id | `Number` | A numerical id of the located face |
+| point | `String` | Ear point name. One of the following: `leftLobe`, `leftCanal`, `leftHelix`, `rightLobe`, `rightCanal`, `rightHelix` |
+
+#### Example
+
+
diff --git a/docs/studio/api/events/xr/image-targets.mdx b/docs/studio/api/events/xr/image-targets.mdx
new file mode 100644
index 0000000..0d02458
--- /dev/null
+++ b/docs/studio/api/events/xr/image-targets.mdx
@@ -0,0 +1,175 @@
+---
+id: image-targets
+sidebar_position: 2
+---
+
+import {EventExample} from '@site/src/components/event-example'
+
+# Image Target Events
+
+## Types {#types}
+
+### ImagePropertiesObject {#ImagePropertiesObject}
+`ImagePropertiesObject` is an object with the following properties:
+
+| Property | Type | Description |
+|----------------|-----------|--------------------------------------------|
+| width | `Number` | Width of the image target. |
+| height | `Number` | Height of the image target. |
+| originalWidth | `Number` | Width of the uploaded image. |
+| originalHeight | `Number` | Height of the uploaded image. |
+| isRotated | `boolean` | Whether the image target has been rotated. |
+
+### ImageLoadingObject {#ImageLoadingObject}
+`ImageLoadingObject` is an object with the following properties:
+
+| Property | Type | Description |
+|----------|----------------|------------------------------------------|
+| name | `String` | The image's name. |
+| type | `String` | One of `FLAT`, `CYLINDRICAL`, `CONICAL`. |
+| metadata | `Object` | User metadata. |
+
+### ImageScanningObject {#ImageScanningObject}
+`ImageScanningObject` is an object with the following properties:
+
+| Property | Type | Description |
+|----------|----------------|------------------------------------------|
+| name | `String` | The image's name. |
+| type | `String` | One of `FLAT`, `CYLINDRICAL`, `CONICAL`. |
+| metadata | `Object` | User metadata. |
+| geometry | `Object` | Object containing geometry data. If type=FLAT: `{scaledWidth, scaledHeight}`, else if type=CYLINDRICAL or type=CONICAL: `{height, radiusTop, radiusBottom, arcStartRadians, arcLengthRadians}` |
+
+## Events
+
+### REALITY_IMAGE_FOUND
+
+This event is emitted when an image target is first found.
+
+#### Properties
+| Property | Type | Description |
+|--------------|----------------|--------------------------------------------------------------------------|
+| name | `String` | The image's name. |
+| type | `String` | One of `FLAT`, `CYLINDRICAL`, `CONICAL`. |
+| position | `{x, y, z}` | The 3d position of the located image. |
+| rotation | `{w, x, y, z}` | The 3d local orientation of the located image. |
+| scale | `Number` | A scale factor that should be applied to object attached to this image. |
+| properties | [`ImagePropertiesObject`](#ImagePropertiesObject) | Additional image target properties. |
+| scaledWidth | `Number` | **Only applicable to `FLAT`**. The width of the image in the scene, when multiplied by scale. |
+| scaledHeight | `Number` | **Only applicable to `FLAT`**. The height of the image in the scene, when multiplied by scale. |
+| height | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Height of the curved target. |
+| radiusTop | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Radius of the curved target at the top. |
+| radiusBottom | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Radius of the curved target at the bottom. |
+| arcStartRadians | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Starting angle in radians. |
+| arcLengthRadians | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Central angle in radians. |
+
+#### Example
+
+
+
+### REALITY_IMAGE_LOADING
+
+This event is emitted when detection image loading begins.
+
+#### Properties
+
+| Property | Type | Description |
+|--------------|---------------------------- |------------------------------------------|
+| imageTargets | `Array` | The list of image targets. |
+
+#### Example
+
+ {`,
+ ` console.log('Loading: ', name)`,
+ `})`,
+ ]}
+/>
+
+### REALITY_IMAGE_LOST
+This event is emitted when an image target is no longer being tracked.
+
+#### Properties
+| Property | Type | Description |
+|------------|----------------|--------------------------------------------------------------------------|
+| name | `String` | The image's name. |
+| type | `String` | One of `FLAT`, `CYLINDRICAL`, `CONICAL`. |
+| position | `{x, y, z}` | The 3d position of the located image. |
+| rotation | `{w, x, y, z}` | The 3d local orientation of the located image. |
+| scale | `Number` | A scale factor that should be applied to object attached to this image. |
+| properties | [`ImagePropertiesObject`](#ImagePropertiesObject) | Additional image target properties. |
+| scaledWidth | `Number` | **Only applicable to `FLAT`**. The width of the image in the scene, when multiplied by scale. |
+| scaledHeight | `Number` | **Only applicable to `FLAT`**. The height of the image in the scene, when multiplied by scale. |
+| height | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Height of the curved target. |
+| radiusTop | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Radius of the curved target at the top. |
+| radiusBottom | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Radius of the curved target at the bottom. |
+| arcStartRadians | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Starting angle in radians. |
+| arcLengthRadians | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Central angle in radians. |
+
+#### Example
+
+
+
+### REALITY_IMAGE_SCANNING
+This event is emitted when all detection images have been loaded and scanning has begun.
+
+#### Properties
+
+| Property | Type | Description |
+|--------------|------------------------------|------------------------------------------|
+| imageTargets | `Array` | The list of image targets. |
+
+#### Example
+
+ {`,
+ ` console.log('Scanning: ', name)`,
+ `})`,
+ ]}
+/>
+
+### REALITY_IMAGE_UPDATED
+This event is emitted when an image target changes position, rotation or scale.
+
+#### Properties
+| Property | Type | Description |
+|------------|----------------|--------------------------------------------------------------------------|
+| name | `String` | The image's name. |
+| type | `String` | One of `FLAT`, `CYLINDRICAL`, `CONICAL`. |
+| position | `{x, y, z}` | The 3d position of the located image. |
+| rotation | `{w, x, y, z}` | The 3d local orientation of the located image. |
+| scale | `Number` | A scale factor that should be applied to object attached to this image. |
+| properties | [`ImagePropertiesObject`](#ImagePropertiesObject) | Additional image target properties. |
+| scaledWidth | `Number` | **Only applicable to `FLAT`**. The width of the image in the scene, when multiplied by scale. |
+| scaledHeight | `Number` | **Only applicable to `FLAT`**. The height of the image in the scene, when multiplied by scale. |
+| height | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Height of the curved target. |
+| radiusTop | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Radius of the curved target at the top. |
+| radiusBottom | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Radius of the curved target at the bottom. |
+| arcStartRadians | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Starting angle in radians. |
+| arcLengthRadians | `Number` | **Only applicable to `CYLINDRICAL` or `CONICAL`**. Central angle in radians. |
+
+#### Example
+
+
diff --git a/docs/studio/api/events/xr/world.mdx b/docs/studio/api/events/xr/world.mdx
new file mode 100644
index 0000000..9cfc4dc
--- /dev/null
+++ b/docs/studio/api/events/xr/world.mdx
@@ -0,0 +1,28 @@
+---
+id: world
+sidebar_position: 4
+---
+
+import {EventExample} from '@site/src/components/event-example'
+
+# World Effects Events
+
+## Events {#events}
+
+### REALITY_TRACKING_STATUS
+This event is emitted by World Effects when the engine starts and tracking status or reason changes.
+
+#### Properties {#properties}
+| Property | Type | Description |
+|----------|----------|---------------------------------------|
+| status | `String` | One of `LIMITED` or `NORMAL`. |
+| reason | `String` | One of `INITIALIZING` or `UNDEFINED`. |
+
+#### Example
+
+
diff --git a/docs/studio/api/events/xr/xr.mdx b/docs/studio/api/events/xr/xr.mdx
new file mode 100644
index 0000000..c11e3db
--- /dev/null
+++ b/docs/studio/api/events/xr/xr.mdx
@@ -0,0 +1,27 @@
+import {EventExample} from '@site/src/components/event-example'
+
+# XR Events
+
+XR events are forwarded from the 8th Wall engine. They are emitted on the camera entity and bubble up to the [world.events.globalId](/docs/studio/api/world/events/#properties).
+
+See the lifecycle events for [World Effects](/docs/studio/api/events/xr/world), [Face Effects](/docs/studio/api/events/xr/face), and [Image Targets](/docs/studio/api/events/xr/image-targets).
+
+## Events
+
+### REALITY_READY
+
+This event is emitted when 8th Wall Web has initialized and at least one frame has been successfully processed. This is the recommended time at which any permissions/loading UI should be hidden.
+
+#### Properties
+
+None.
+
+#### Example
+
+
diff --git a/docs/studio/api/world/_category_.json b/docs/studio/api/world/_category_.json
new file mode 100644
index 0000000..e633ab9
--- /dev/null
+++ b/docs/studio/api/world/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "world",
+ "position": 2
+}
\ No newline at end of file
diff --git a/docs/studio/api/world/audio.mdx b/docs/studio/api/world/audio.mdx
new file mode 100644
index 0000000..0acd062
--- /dev/null
+++ b/docs/studio/api/world/audio.mdx
@@ -0,0 +1,51 @@
+---
+id: audio
+description: This library includes functions that handle audio playback.
+---
+
+# audio
+
+## Description
+{frontMatter.description}
+
+## Functions
+
+### mute
+
+Mute scene audio.
+
+``` ts
+world.audio.mute() // -> void
+```
+
+### unmute
+
+Unmute scene audio.
+
+``` ts
+world.audio.unmute() // -> void
+```
+
+### pause
+
+Pause scene audio.
+
+``` ts
+world.audio.pause() // -> void
+```
+
+### play
+
+Play scene audio.
+
+``` ts
+world.audio.play() // -> void
+```
+
+### setVolume
+
+Set volume of scene audio. **The parameter should be a value between 0 and 1.**
+
+``` ts
+world.audio.setVolume(newVolume: number) // -> void
+```
diff --git a/docs/studio/api/world/camera.mdx b/docs/studio/api/world/camera.mdx
new file mode 100644
index 0000000..9019bca
--- /dev/null
+++ b/docs/studio/api/world/camera.mdx
@@ -0,0 +1,29 @@
+---
+id: camera
+description: This library includes functions that handle the Camera.
+---
+
+# camera
+
+## Description
+
+{frontMatter.description}
+
+## Functions
+
+### getActiveEid
+
+Get the active camera.
+
+``` ts
+world.camera.getActiveEid() // -> eid
+```
+
+### setActiveEid
+
+Set the active camera.
+
+``` ts
+world.camera.setActiveEid(eid) // -> void
+```
+
diff --git a/docs/studio/api/world/effects.mdx b/docs/studio/api/world/effects.mdx
new file mode 100644
index 0000000..9f75fbe
--- /dev/null
+++ b/docs/studio/api/world/effects.mdx
@@ -0,0 +1,89 @@
+---
+id: effects
+description: This library includes functions to update world effects like the skybox and fog.
+---
+
+# effects
+
+## Description
+{frontMatter.description}
+
+## Types
+
+```
+type NoFog = {type: 'none'}
+
+type LinearFog = {
+ type: 'linear'
+ near: number
+ far: number
+ color: string
+}
+
+type ExponentialFog = {
+ type: 'exponential'
+ density: number
+ color: string
+}
+
+type Fog = NoFog | LinearFog | ExponentialFog
+```
+
+```
+type Color = {type: 'color', color?: string}
+
+type GradientStyle = 'linear' | 'radial'
+type Gradient = {
+ type: 'gradient'
+ style?: GradientStyle
+ colors?: string[]
+}
+type Image = {type: 'image', src?: T}
+type NoSky = {type: 'none'}
+
+type Sky = Color | Gradient | Image | NoSky
+```
+
+## Functions {#functions}
+
+### setFog
+
+Set the active fog.
+
+``` ts
+world.effects.setFog(fog: Fog) => void
+```
+
+#### Example
+``` ts
+world.effects.setFog({type: 'linear', far: 1000, near: 0.1, color: 'ffffff'})
+```
+
+### getFog
+
+Get the active fog.
+
+``` ts
+world.effects.getFog() => Fog | undefined
+```
+
+### setSky
+
+Set the active sky.
+
+``` ts
+world.effects.setSky(sky: Sky) => void
+```
+
+#### Example
+``` ts
+world.effects.setSky({type: 'color', color: 'ffffff'})
+```
+
+### getSky
+
+Get the active sky.
+
+``` ts
+world.effects.getSky() => Sky | undefined
+```
diff --git a/docs/studio/api/world/events.mdx b/docs/studio/api/world/events.mdx
new file mode 100644
index 0000000..79d1896
--- /dev/null
+++ b/docs/studio/api/world/events.mdx
@@ -0,0 +1,41 @@
+---
+id: events
+description: This library includes functions that handle event management.
+---
+
+# events
+
+## Description
+{frontMatter.description}
+
+## Properties {#properties}
+
+| Property | Description |
+|----------|----------------------------------------------|
+| globalId | Used to dispatch/listen for events globally. |
+
+## Functions {#functions}
+
+### addListener
+
+Registers a component to listen to an event.
+
+``` ts
+world.events.addListener(target: eid, name: string, listener: function) -> void
+```
+
+### removeListener
+
+Un-registers an existing listener
+
+``` ts
+world.events.removeListener(target: eid, name: string, listener: function) -> void
+```
+
+### dispatch
+
+Used to emit events that the listeners can listen to.
+
+``` ts
+world.events.dispatch(target: eid, name: string, data: object /* (optional) */) -> void
+```
diff --git a/docs/studio/api/world/input.mdx b/docs/studio/api/world/input.mdx
new file mode 100644
index 0000000..88c487b
--- /dev/null
+++ b/docs/studio/api/world/input.mdx
@@ -0,0 +1,178 @@
+---
+id: input
+description: This library includes functions that handle input management.
+---
+
+# input
+
+## Description
+{frontMatter.description}
+
+## Functions
+
+### disablePointerLockRequest
+Disables the pointer lock request on click.
+
+``` ts
+world.input.disablePointerLockRequest() // -> void
+```
+
+### enablePointerLockRequest
+Request a pointer lock from the user when the screen is clicked.
+
+``` ts
+world.input.enablePointerLockRequest() // -> void
+```
+
+### getAction
+Returns > 0 if the action was triggered. Value is usually from 0 to 1. The exception is mouse velocity and scroll which is uncapped.
+
+``` ts
+world.input.getAction(actionName: string) // -> number
+```
+
+### getActiveMap
+Get the active action map.
+
+``` ts
+world.input.getActiveMap() // -> string (Name of the active action map)
+```
+
+### setActiveMap
+Sets the active action map. The default action map's name is 'default'.
+
+``` ts
+world.input.setActiveMap(mapName: string) // -> void
+```
+
+### getAxis
+Returns the value of the axis of the gamepadIdx.
+
+``` ts
+world.input.getAxis(gamepadIdx?: number) // -> number[]
+```
+
+### getButton
+Returns true while the virtual button identified by buttonName is held down.
+
+``` ts
+world.input.getAxis(input: number, gamepadIdx?: number) // -> boolean
+```
+
+### getButtonDown
+Returns true during the frame the user pressed down the button mapped to the index.
+
+``` ts
+world.input.getAxis(input: number, gamepadIdx?: number) // -> boolean
+```
+
+### getButtonUp
+Returns true the first frame the user releases the button.
+
+``` ts
+world.input.getAxis(input: number, gamepadIdx?: number) // -> boolean
+```
+
+### getGamepads
+Returns all gamepads connected to the device.
+
+``` ts
+world.input.getGamepads() // -> Gamepad[]
+```
+
+### getKey
+Returns true while the user holds down the key identified by name.
+
+``` ts
+world.input.getKey(code: string) // -> boolean
+```
+
+### getKeyDown
+Returns true during the frame the user starts pressing down the key identified by name.
+
+``` ts
+world.input.getKeyDown(code: string) // -> boolean
+```
+
+### getKeyUp
+Returns true during the frame the user releases the key identified by name.
+
+``` ts
+world.input.getKeyUp(code: string) // -> boolean
+```
+
+### getMouseButton
+Returns true while the user holds down the mouse button identified by button number.
+
+| Number | Mouse Button |
+|--------|--------------|
+| 0 | Left Click |
+| 1 | Right Click |
+| 2 | Middle Click |
+
+``` ts
+world.input.getMouseButton(index: number) // -> boolean
+```
+
+### getMouseDown
+Returns true during the frame the user starts pressing down on the mouse button.
+
+``` ts
+world.input.getMouseDown(index: number) // -> boolean
+```
+
+### getMouseUp
+Returns true during the frame the user releases the mouse button.
+
+``` ts
+world.input.getMouseUp(index: number) // -> boolean
+```
+
+### getMousePosition
+Returns the clientX and clientY coordinates of the mouse.
+
+``` ts
+world.input.getMousePosition() // -> [number, number]
+```
+
+### getMouseVelocity
+Returns the x and y velocity of the mouse.
+
+``` ts
+world.input.getMouseVelocity() // -> [number, number]
+```
+
+### getMouseScroll
+Return the x and y velocity of mouse scroll.
+
+``` ts
+world.input.getMouseScroll() // -> [number, number]
+```
+
+### getTouch
+Returns true while the screen is being touched. If an identifier is provided, returns true only if that specific touch is active.
+
+``` ts
+world.input.getTouch(identifier?: number) // -> boolean
+```
+
+### getTouchDown
+Returns true during the frame a touch begins. If an identifier is provided, returns true only when that specific touch begins.
+
+``` ts
+world.input.getTouchDown(identifier?: number) // -> boolean
+```
+
+### getTouchUp
+Returns true during the frame a touch ends. If an identifier is provided, returns true only when that specific touch ends.
+
+``` ts
+world.input.getTouchUp(identifier?: number) // -> boolean
+```
+
+### getTouchIds
+Returns an array of active touch identifiers. Each identifier represents a distinct touch point currently on the screen.
+
+``` ts
+world.input.getTouchIds() // -> number[]
+```
diff --git a/docs/studio/api/world/spaces.mdx b/docs/studio/api/world/spaces.mdx
new file mode 100644
index 0000000..67188eb
--- /dev/null
+++ b/docs/studio/api/world/spaces.mdx
@@ -0,0 +1,39 @@
+---
+id: spaces
+description: This library includes functions to work with Spaces.
+---
+
+# spaces
+
+## Description
+{frontMatter.description}
+
+## Functions
+
+### loadSpace
+Loads a Space specified by an ID or a name. The newly loaded Space will replace the old Space as the active Space. The old Space’s (and any included Spaces) objects will be despawned, and the new Spaces (and any included Spaces) will be spawned. Will throw an error if there is no match or multiple matches with the given ID or name.
+
+``` ts
+world.spaces.loadSpace(idOrName: string) // -> void
+```
+
+### listSpaces
+Returns all Space’s SpaceData.
+
+``` ts
+world.spaces.listSpaces() // -> SpaceData[] | undefined
+```
+
+### getActiveSpace
+Returns the active Space’s SpaceData.
+
+``` ts
+world.spaces.getActiveSpace() // -> SpaceData | undefined
+```
+
+## SpaceData
+| Property | Type | Description |
+|----------|---------|----------------------------------------------|
+| name | string | The space name. Must be unique |
+| id | string | The space id |
+| spawned | boolean | Indicates if the space is currently spawned |
diff --git a/docs/studio/api/world/three.mdx b/docs/studio/api/world/three.mdx
new file mode 100644
index 0000000..7f85bc8
--- /dev/null
+++ b/docs/studio/api/world/three.mdx
@@ -0,0 +1,27 @@
+---
+id: three
+description: This library provides an interface to the rendering engine.
+---
+
+# three
+
+## Description
+{frontMatter.description}
+
+## Properties
+
+| Property | Type | Description |
+|----------------|----------------------|--------------------------------------|
+| scene | Three | Entry point for three.js API. |
+| renderer | WebGLRenderer | The three.js renderer. |
+| activeCamera | Camera | Handle to the active camera. |
+| entityToObject | `Map` | Map of entities to three.js objects. |
+
+## Functions
+
+### entityToObject
+Retrieves the corresponding three.js Object3D for a given entity.
+
+``` ts
+world.three.entityToObject.get(eid: Eid) // -> Object3D
+```
\ No newline at end of file
diff --git a/docs/studio/api/world/time.mdx b/docs/studio/api/world/time.mdx
new file mode 100644
index 0000000..37099cc
--- /dev/null
+++ b/docs/studio/api/world/time.mdx
@@ -0,0 +1,46 @@
+---
+id: time
+description: This library includes properties and functions that handle time management.
+---
+
+# time
+
+## Description
+{frontMatter.description}
+
+## Properties
+
+### world.time.elapsed
+The number of milliseconds the world has been running for, excluding time spent while the world was paused.
+
+### world.time.delta
+The number of milliseconds since the previous frame, excluding time jumps due to being paused.
+
+### world.time.absolute
+The number of milliseconds that have elapsed since the world was created.
+
+### world.time.absoluteDelta
+The number of milliseconds since the last frame, including large jumps of time if the world is resuming after being paused.
+
+## Functions
+
+### setTimeout
+Executes a function once after a specified delay.
+
+``` ts
+world.time.setTimeout(callback: function, delay: number) // -> Timeout
+```
+
+### clearTimeout
+Stops a previously set timeout or interval, preventing the specified function from executing if the delay hasn’t yet passed.
+
+``` ts
+world.time.clearTimeout(timeout: Timeout) // -> void
+```
+
+### setInterval
+Executes a function repeatedly at specified time intervals.
+
+``` ts
+world.time.setInterval(callback: function, interval: number) // -> Interval
+```
\ No newline at end of file
diff --git a/docs/studio/api/world/transform.mdx b/docs/studio/api/world/transform.mdx
new file mode 100644
index 0000000..19fea6d
--- /dev/null
+++ b/docs/studio/api/world/transform.mdx
@@ -0,0 +1,137 @@
+---
+id: transform
+description: This library includes functions for working with Transforms.
+---
+
+# transform
+
+## Description
+{frontMatter.description}
+
+## Functions
+
+### getLocalPosition
+Returns the local position of the given entity.
+
+``` ts
+world.transform.getLocalPosition(eid: Eid, out?: Vec3): Vec3
+```
+
+### getLocalTransform
+Returns the local transform matrix of the given entity.
+
+``` ts
+world.transform.getLocalTransform(eid: Eid, out?: Mat4): Mat4
+```
+
+### getWorldPosition
+Returns the world position of the given entity.
+
+``` ts
+world.transform.getWorldPosition(eid: Eid, out?: Vec3): Vec3
+```
+
+### getWorldTransform
+Returns the world transform matrix of the given entity.
+
+``` ts
+world.transform.getWorldTransform(eid: Eid, out?: Mat4): Mat4
+```
+
+### setLocalPosition
+Sets the local position of the given entity.
+
+``` ts
+world.transform.setLocalPosition(eid: Eid, position: Vec3Source): void
+```
+
+### setLocalTransform
+Sets the local transform matrix of the given entity.
+
+``` ts
+world.transform.setLocalTransform(eid: Eid, mat4: Mat4): void
+```
+
+### setWorldPosition
+Sets the world position of the given entity.
+
+``` ts
+world.transform.setWorldPosition(eid: Eid, position: Vec3Source): void
+```
+
+### setWorldTransform
+Sets the world transform matrix of the given entity.
+
+``` ts
+world.transform.setWorldTransform(eid: Eid, mat4: Mat4): void
+```
+
+### getWorldQuaternion
+Returns the world quaternion of the given entity.
+
+``` ts
+world.transform.getWorldQuaternion(eid: Eid, out?: Quat): Quat
+```
+
+### setWorldQuaternion
+Sets the world quaternion of the given entity.
+
+``` ts
+world.transform.setWorldQuaternion(eid: Eid, rotation: QuatSource): void
+```
+
+### translateSelf
+Translates the entity in its own local space using partial Vec3Source input.
+
+``` ts
+world.transform.translateSelf(eid: Eid, translation: Partial): void
+```
+
+### translateLocal
+Translates the entity in its parent's local space using partial Vec3Source input.
+
+``` ts
+world.transform.translateLocal(eid: Eid, translation: Partial): void
+```
+
+### translateWorld
+Translates the entity in world space using partial Vec3Source input.
+
+``` ts
+world.transform.translateWorld(eid: Eid, translation: Partial): void
+```
+
+### rotateSelf
+Rotates the entity around its own axes using a quaternion.
+
+``` ts
+world.transform.rotateSelf(eid: Eid, rotation: QuatSource): void
+```
+
+### rotateLocal
+Rotates the entity relative to its local space using a quaternion.
+
+``` ts
+world.transform.rotateLocal(eid: Eid, rotation: QuatSource): void
+```
+
+### lookAt
+Orients the entity to face another entity.
+
+``` ts
+world.transform.lookAt(eid: Eid, other: Eid): void
+```
+
+### lookAtLocal
+Orients the entity to face a position in local space.
+
+``` ts
+world.transform.lookAtLocal(eid: Eid, position: Vec3Source): void
+```
+
+### lookAtWorld
+Orients the entity to face a position in world space.
+
+``` ts
+world.transform.lookAtWorld(eid: Eid, position: Vec3Source): void
+```
diff --git a/docs/studio/api/world/world.mdx b/docs/studio/api/world/world.mdx
new file mode 100644
index 0000000..c1932ee
--- /dev/null
+++ b/docs/studio/api/world/world.mdx
@@ -0,0 +1,124 @@
+---
+id: world
+description: This library includes properties and functions related to the World.
+---
+
+# world
+
+## Description
+{frontMatter.description}
+
+## Properties
+
+| Property | Type | Description |
+|-------------|------|----------------------------|
+| allEntities | Set | Set of all scene entities. |
+
+## Functions
+
+### createEntity
+Create a new entity. Optionally specify a prefab entity or prefab name to spawn a instance of that prefab.
+
+``` ts
+world.createEntity(prefabOrPrefabName?: Eid | string) // -> eid
+```
+
+### deleteEntity
+Delete an existing entity.
+
+``` ts
+world.deleteEntity(eid) // -> void
+```
+
+### getChildren
+Get children of an entity.
+
+``` ts
+world.getChildren(eid) // -> Generator
+```
+
+### getParent
+Get the parent of an entity.
+
+``` ts
+world.getParent(eid) // -> eid
+```
+
+### setParent
+Set the parent of an entity.
+
+``` ts
+world.setParent(eid, parent: eid) // -> void
+```
+
+### getWorldTransform
+Get the world transform of an entity. **Copies the worldTransform of the target into the second parameter.**
+
+``` ts
+world.getWorldTransform(eid, transform: Mat4) // -> void
+```
+
+### setTransform
+Set transform of an entity.
+
+``` ts
+world.setTransform(eid, transform: Mat4) // -> void
+```
+
+### setPosition
+Set position of an entity.
+
+``` ts
+world.setPosition(eid, x: number, y: number, z: number) // -> void
+```
+
+### setQuaternion
+Set rotation of an entity.
+
+``` ts
+world.setQuaternion(eid, x: number, y: number, z: number, w: number) // -> void
+```
+
+### setScale
+Set scale of an entity.
+
+``` ts
+world.setScale(eid, x: number, y: number, z: number) // -> void
+```
+
+### normalizeQuaternion
+Normalizes an entity's quaternion.
+
+``` ts
+world.normalizeQuaternion(eid) // -> void
+```
+
+### getInstanceEntity
+Get the corresponding instance entity from a prefab instance hierarchy given the root instance, and a source prefab or prefab child entity.
+
+``` ts
+world.getInstanceEntity(prefabInstance: Eid, prefabSourceEntity: Eid) // -> Eid
+```
+
+### raycast
+Perform a Raycast from a position and direction.
+
+``` ts
+world.raycast(origin: Vec3, direction: Vec3, near: number = 0, far: number = Infinity) // -> IntersectionResult[]
+```
+
+### raycastFrom
+Perform a Raycast from an entity's position and forward direction.
+
+``` ts
+world.raycastFrom(eid: Eid, near: number = 0, far: number = Infinity) // -> IntersectionResult[]
+```
+
+## IntersectionResult
+
+| Property | Type | Description |
+|-----------|--------------|------------------------------------------------------------------|
+| eid? | Eid | The eid of the object that was hit. |
+| point | Vec3 | The location in World Space where the intersection occurred. |
+| distance | number | The distance between the starting location and the hit location. |
+| threeData | Intersection | The three.js intersection data. |
diff --git a/docs/studio/api/world/xr.mdx b/docs/studio/api/world/xr.mdx
new file mode 100644
index 0000000..a0a1745
--- /dev/null
+++ b/docs/studio/api/world/xr.mdx
@@ -0,0 +1,35 @@
+---
+id: xr
+description: This library includes functions to capture images and videos within your Studio project.
+---
+
+# xr
+
+## Description
+{frontMatter.description} Also see [Media Recorder Events](/docs/studio/api/events/media-recorder/).
+
+## Functions
+
+### startMediaRecorder
+
+Start recording.
+
+``` ts
+world.xr.startMediaRecorder: () => void
+```
+
+### stopMediaRecorder
+
+Stop recording.
+
+``` ts
+world.xr.stopMediaRecorder: () => void
+```
+
+### takeScreenshot
+
+Take a screenshot. Returns a Promise that when resolved, provides a buffer containing the JPEG compressed image. When rejected, an error message is provided.
+
+``` ts
+world.xr.takeScreenshot: () => Promise
+```
diff --git a/docs/studio/asset-lab/_category_.json b/docs/studio/asset-lab/_category_.json
new file mode 100644
index 0000000..7cb219e
--- /dev/null
+++ b/docs/studio/asset-lab/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "Asset Lab",
+ "position": 4
+}
\ No newline at end of file
diff --git a/docs/studio/asset-lab/asset-lab.md b/docs/studio/asset-lab/asset-lab.md
new file mode 100644
index 0000000..6d1288f
--- /dev/null
+++ b/docs/studio/asset-lab/asset-lab.md
@@ -0,0 +1,24 @@
+---
+id: asset-lab
+sidebar_class_name: "hidden"
+---
+
+# Asset Lab
+
+Asset Lab is an AI-powered suite of features for generating assets in 8th Wall Studio. It is designed to streamline workflows for generating images, 3D models, and animated characters for use in Studio projects.
+
+### Key Features
+- Use-case optimized workflows for generating different asset types
+- Integration with industry-leading AI models
+- Easy project import
+- Workspace-level generated asset library & credit balance
+
+## Generate Assets
+
+1. Click on the **Asset Lab** tab on the left panel in Studio.
+2. Click the `Generate…` button to select a workflow.
+3. Follow the steps based on your selected workflow.
+
+The steps of your selected workflow will be displayed at the top.
+
+
diff --git a/docs/studio/asset-lab/generate-character.md b/docs/studio/asset-lab/generate-character.md
new file mode 100644
index 0000000..586c438
--- /dev/null
+++ b/docs/studio/asset-lab/generate-character.md
@@ -0,0 +1,62 @@
+---
+id: generate-characters
+sidebar_position: 3
+---
+
+# Generate Animated Characters
+
+Asset Lab currently supports rigging and animating **humanoid bipedal** 3D character models.
+
+To generate a rigged and animated character model, you must first generate a 3D character model in an T-pose, from multiple images, to use as input.
+
+## Step 1: Generate Image Inputs
+
+It is required to use GPT-Image-1 for generating image inputs for Animated characters. See [Generate Images](/docs/studio/asset-lab/generate-models) for more details.
+
+Use **GPT-Image-1** to generate multi-view character images in a T-pose:
+1. Front view
+2. Right, left, and back views
+
+Then click **Send to 3D Model**.
+
+
+
+## Step 2: Generate 3D Model
+
+Select a supported 3D generation model. See [Generate 3D Models](/docs/studio/asset-lab/generate-models) for more details.
+
+Select the Generate button to process the request.
+
+
+
+Once complete, click **Send to Animation**.
+
+## Step 3: Rig and Animate
+
+Currently supports rigging via **Meshy**. Input must be a bipedal humanoid with clearly defined limbs.
+
+Returns the following animation clips:
+- Walk
+- Run
+- Idle
+- Jump
+- Attack
+- Death
+- Zombie Walk
+- Dance
+
+Click **Rig + Animate** to process (may take up to 2 minutes).
+
+
+
+## Step 4: Import into Project
+
+Use the import or download buttons to save your rigged model.
+
+
+
+Filter for **Animated Characters** in the Library to find them.
+
+
+
+
diff --git a/docs/studio/asset-lab/generate-images.md b/docs/studio/asset-lab/generate-images.md
new file mode 100644
index 0000000..2be55da
--- /dev/null
+++ b/docs/studio/asset-lab/generate-images.md
@@ -0,0 +1,61 @@
+---
+id: generate-images
+sidebar_position: 1
+---
+
+# Generate Images
+
+## Step 1: Generate Image(s)
+
+Begin by selecting a model from the dropdown menu.
+
+
+
+### Supported Models
+
+**Stable Image Core**
+Stability AI’s core model for fast, high-quality generation.
+Inputs:
+- Text prompt
+- Negative prompt (optional)
+- Aspect ratio (default: square)
+- Batch size (1 or 4)
+
+**FLUX1.1 [pro]**
+Improved version of FLUX.1 with enhanced fidelity and composition.
+Inputs:
+- Text prompt
+- Aspect ratio (default: square)
+- Batch size (1 or 4)
+
+**FLUX Kontext [pro]**
+Supports both text and reference image inputs.
+Inputs:
+- Text prompt
+- Image prompt
+- Aspect ratio (default: square)
+- Batch size (1 or 4)
+
+**GPT-Image-1**
+OpenAI’s latest multimodal image model.
+Inputs:
+- Text prompt
+- Image prompt (optional)
+- Aspect ratio (default: square)
+- Batch size (1 or 4)
+
+_Select the Generate button to process the request._
+
+
+
+## Step 2: Import into Project or Download
+
+Use the buttons at the bottom to import or download all images.
+
+
+
+Access all assets from your workspace in the **Library**, available in the Studio sidebar or full-screen Asset Lab.
+
+
+
+
diff --git a/docs/studio/asset-lab/generate-models.md b/docs/studio/asset-lab/generate-models.md
new file mode 100644
index 0000000..8124e0c
--- /dev/null
+++ b/docs/studio/asset-lab/generate-models.md
@@ -0,0 +1,82 @@
+---
+id: generate-models
+sidebar_position: 2
+---
+
+# Generate 3D Models
+
+To generate a 3D model (`.glb` format), you must first generate an image or multiple images to use as input. 3D models are generated from either a single image or multi-view image set.
+
+This process works best with individual subjects like objects or buildings, not entire scenes.
+
+## Step 1: Generate Image Inputs
+
+Begin by selecting a model using the dropdown menu.
+
+
+
+Each model has different strengths, as well as a different credit price per request (displayed in the Generate button near the bottom). See credit price doc for full list of prices.
+
+Depending on which model you select to generate an image, you will have different input options. See Image Generation workflow for more details on available models and inputs.
+
+Use the toggle to generate either a single image (best for simple symmetrical objects) or the front view in multi image set (best for more complex or asymmetrical objects)
+
+If you select Single View, you generate one image of the object at a ¾ view.
+
+
+
+If you select Multi-View, you will first generate a view of the “front” of your object. If you are satisfied with the result, you can then generate the “right”, “left”, and “back” views of the same object using the Generate Multi-View button. For this step, it is required to use GPT-image-1.
+
+
+
+Once you are satisfied with your image inputs, you can click the “Send to 3D Model” button in the bottom right to move to the next step.
+
+## Step 2: Generate 3D Model
+
+Select your desired model and adjust parameters as needed.
+
+### Supported Models
+
+**Trellis**
+Large-scale model by Microsoft for high-quality textured meshes.
+Inputs:
+- Single or multi-view images
+- Shape Guidance (0–10)
+- Detail Guidance (1–10)
+- Mesh Simplification (0.9–0.98)
+- Texture size: 512x512 or 1024x1024
+
+**Hunyuan 3D-2**
+Tencent’s high-res asset generator.
+Inputs:
+- Single or multi-view images
+- Speed (Standard or Turbo)
+- Guidance (0–20)
+- Shape Detail (1–1024)
+
+**Hunyuan 3D-2 Mini**
+Lower-resource variant of Hunyuan 3D-2.
+Inputs:
+- Single images only
+- Speed (Standard or Turbo)
+- Guidance (0–20)
+- Shape Detail (1–1024)
+
+Each model has different strengths, as well as a different credit price per request (displayed in the Generate button near the bottom). See credit price doc for full list of prices.
+
+_Select the Generate button to begin._
+
+
+
+## Step 3: Import into Project or Download
+
+Use the buttons at the bottom to import or download your 3D model.
+
+
+
+You can access all of the assets generated from users in your workspace from the Library, available in the left tab of the full screen Asset Lab or in the left side panel tab in Studio. Use the filter option to display 3D models only.
+
+
+
+
+
diff --git a/docs/studio/asset-lab/library.md b/docs/studio/asset-lab/library.md
new file mode 100644
index 0000000..4eea57b
--- /dev/null
+++ b/docs/studio/asset-lab/library.md
@@ -0,0 +1,28 @@
+---
+id: library
+sidebar_position: 4
+---
+
+# Asset Library
+
+In the Asset Lab Library:
+- Access all workspace-generated assets
+- Check your Credit balance displayed top right
+- Filter by type: Images, 3D Models, Animated Characters
+- Use search and sort tools to organize
+
+
+
+Click on an asset for metadata:
+- Prompt used
+- User who submitted
+- Model used
+- Credit cost
+
+Available actions:
+- **Send to Workflow** – Reuse asset as input
+- **Download** – Save locally
+- **Remix** – Adjust input settings
+- **Import** – Add to project’s asset folder
+
+
\ No newline at end of file
diff --git a/docs/studio/changelog.md b/docs/studio/changelog.md
new file mode 100644
index 0000000..6067081
--- /dev/null
+++ b/docs/studio/changelog.md
@@ -0,0 +1,725 @@
+---
+id: release-notes
+sidebar_position: 999
+toc_max_heading_level: 2
+
+# NOTE(christoph): We can set this to a previous version if we don't want to show the popup for the next version, for example `latest_popup_id: "2025_09_17"`, or stop showing the popup entirely by setting it to `""`.
+latest_popup_id: "latest"
+
+runtime_version_2025_10_24: "2.2.0"
+runtime_version_2025_10_16: "2.1.0"
+runtime_version_2025_10_10: "2.0.1"
+runtime_version_2025_09_25: "2.0.1"
+runtime_version_2025_09_17: "2.0.0"
+runtime_version_2025_09_09: "2.0.0"
+runtime_version_2025_08_29: "1.1.0"
+runtime_version_2025_08_19: "1.0.0"
+runtime_version_2025_08_06: "1.0.0"
+---
+
+# Release Notes
+
+
+
+## October 2025 [Update 3] {#version-2025-october-24}
+October 24, 2025
+
+### New Features
+Runtime 2.2.0 adds physics collider rotational offsets, and other fixes/enhancements. Read the
+full release notes [here](/docs/studio/api/changelog/#2.2.0).
+
+## October 2025 [Update 2] {#version-2025-october-16}
+October 16, 2025
+
+### New Features
+Runtime 2.1.0 introduces an API for skybox and fog, and other fixes/enhancements. Read the
+full release notes [here](/docs/studio/api/changelog/#2.1.0).
+
+## October 2025 [Update 1] {#version-2025-october-10}
+October 10, 2025
+
+### New Features
+
+Desktop App
+- Added support for Windows. Download [here](https://www.8thwall.com/download).
+
+Native App Export
+- Added iFrame embed options with a copyable code snippet in the Publish flow. Learn more [here](https://www.8thwall.com/blog/post/196857049250/embedding-made-easy-iframe-support-in-8th-walls-publish-flow).
+
+### Fixes and Enhancements
+
+Desktop App
+- Fixed issue where the 8th Wall desktop app on Mac would sometimes stall at initial playback
+- The desktop app now supports translations based on user language preferences
+
+Native App Export
+- Introduced a configuration option to include or remove the status bar in iOS Native App Export builds
+
+General
+- Markdown files will open in preview mode by default (Studio Web)
+
+## September 2025 [Update 3] {#version-2025-september-25}
+September 25, 2025
+
+### Fixes and Enhancements
+
+Physics
+- Fixed issue preventing dynamic objects from reaching complete rest
+- Fixed crash after repeated collider scale changes
+
+Particles
+- Fixed incorrect emission directions
+- Ensured particle effects are framerate independent
+
+Desktop App
+- Increased reliability of simulator
+
+UI
+- Fixed issue causing incorrect offset placement on UI elements
+
+## September 2025 [Update 2] {#version-2025-september-17}
+September 17, 2025
+
+### New Features
+
+Desktop App
+- [The 8th Wall Desktop App is here](http://8th.io/desktopappblog). Now in Public Beta for macOS, with Windows coming soon, the Desktop App brings the speed of local development together with the collaboration of the cloud. [Learn more](https://www.8thwall.com/docs/studio/app/) and [download now](https://www.8thwall.com/download).
+
+
+
+## September 2025 [Update 1] {#version-2025-september-9}
+September 9, 2025
+
+### New Features
+
+Physics Upgrade
+- The all-new Studio Runtime 2.0.0 comes with a [rebuilt physics engine](https://8th.io/v2update) that’s faster, smoother, and ready for whatever you throw at it.
+- Some physics behaviors are different as a result for properties like Friction, Restitution, and Damping. View the [upgrade guide](https://8th.io/v2upgradeguide) for a smooth transition to 2.0.
+- Kinematic Colliders: Added Kinematic option to Collider types. Allows for objects to have scripted or animated movement, while also allowing physics collision interactions.
+
+Native App Export
+- Export your 3D or XR experience as an iOS app and increase your reach by publishing to both the web and iOS app store.
+
+### Fixes and Enhancements
+
+Prefabs
+- Fixed issue where colliders on nested prefab objects would not generate properly.
+
+Native App Export
+- Updated the Android SDK target for Android Native App Export from API Level 34 to API Level 36 to ensure compliance with Google Play distribution requirements (apps must target API Level 35+).
+- Fixed an issue where particle effects and custom fonts did not render correctly in Static Bundle build mode for Android Native App Export.
+
+General
+- Updated video autoplay behavior so videos with audio will autoplay as muted. Video audio will automatically unmute once a user interaction has occurred.
+- Enabled stricter typescript checking at build time to improve error reporting.
+- Now within Studio, you will automatically be notified of newly released updates.
+
+## August 2025 [Update 3] {#version-2025-august-29}
+August 29, 2025
+
+### New Features
+
+Runtime Versioning
+- Studio projects can run on a specific version, which can be updated in Settings. Pin your project to a fixed runtime for predictability, or opt in to automatic minor updates and bug fixes to always stay current.
+
+### Fixes and Enhancements
+
+Asset Lab
+- Previous asset generation steps now populate when sending assets from Library to generate workflows
+- Retry multiple multi-view angles at once during image generation step for 3D model and Animated Character workflows
+
+Face
+- Fixed face mesh not being rendered as configured with Face AR camera
+
+Physics
+- Fixed corrupted shape being applied to auto colliders
+
+## August 2025 [Update 2] {#version-2025-august-19}
+August 19, 2025
+
+### New Features
+
+Billing
+* Added one-time credit top-ups
+
+General
+* Added Camera Preview Widget
+
+### Fixes and Enhancements
+
+Billing
+* Added Stripe Billing Portal to manage subscriptions, billing info, and invoices
+
+Image Targets
+* Fixed an issue preventing curved image targets from being updated
+
+Asset Lab
+* Allow for re-generating a single image during multi-view image generation for 3D model and Animated Character workflows
+
+General
+* Fixed an issue preventing some users from signing up with Google
+
+## August 2025 [Update 1] {#version-2025-august-6}
+August 6, 2025
+
+### Fixes and Enhancements
+
+General
+* Improved the usability and organization of the camera component
+* Fixed issue where Fog would not appear when enabled in configurator
+* Fixed mouse pointer lock issue affecting the Fly Controls component in Studio
+
+Asset Lab
+* Added UI support for background opacity for images generated with Image-GPT-1
+
+UI Elements
+* Fixed artifact appearing on UI elements with transparent images on some iOS devices
+Particles
+* Fixed GLTF particles not displaying
+Prefabs
+* Fix prefab children collider updates
+
+## July 2025 [Update 4] {#version-2025-july-29}
+July 29, 2025
+
+### New Features
+
+Asset Lab
+* Added controls to optimize generated 3D models
+
+UI Elements
+* Added stacking order configuration to manage overlapping elements
+
+### Fixes and Enhancements
+
+UI Elements
+* Improved handling of sort order among sibling elements
+* UI element groups are now flattened into a single layer
+
+Transforms
+* Added `getWorldQuaternion` and `setWorldQuaternion` to world.transform
+
+Physics
+* Enabled high precision mode for dynamic colliders
+
+Materials
+* Added texture filtering with mipmap support
+
+Splats
+* Added support for spz v3
+
+Asset Lab
+* Added option to select assets from Library for use as inputs within image, 3D model, and animated character workflows
+
+## July 2025 [Update 3] {#version-2025-july-22}
+July 22, 2025
+
+### New Features
+
+Input Manager
+* Added Screen Touch binding to Input Manager.
+
+### Fixes and Enhancements
+
+Asset Lab
+* Added support for user-uploaded images for 3D model generation workflow.
+* Updated Animated Character workflow to support single-image to 3D generations.
+* Added support for user-uploaded 3D models for Animated Character workflow.
+
+XR
+* Fixed issue where initial flashing would occur during the camera permissions as scene objects were loaded.
+
+Native App Export
+* Updated user agent string for Native Android apps to more accurately reflect the platform and device.
+* Fixed an issue with touch events behaving unexpectedly in Native Android applications.
+
+## July 2025 [Update 2] {#version-2025-july-15}
+July 15, 2025
+
+### New Features
+
+Spaces
+* Added Fog configuration to Space Settings.
+### Fixes and Enhancements
+
+UI Elements
+* Added ignoreRaycast option.
+
+Asset Lab
+* Added ability to preview animation clips in Animated Character workflow.
+
+XR
+* Fixed invalid app key error when reloading XR camera.
+
+## July 2025 [Update 1] {#version-2025-july-07}
+July 7, 2025
+
+### New Features
+
+Asset Lab
+* Added support for Hunyuan3D-2.1 Image-to-3D generation model.
+* Added support for Flux Schnell Image generation model.
+
+Native App Export
+* Enabled support for various device orientations.
+* Added configuration options for device status bar.
+* Added support for multi-touch.
+
+### Fixes and Enhancements
+
+General
+* Fixed issue where camera set to focus on moving objects was not updating correctly.
+
+Prefabs
+* Fix various prefab runtime issues.
+* Fixed issue where Prefab children components were not getting deleted correctly.
+* Made style updates to better highlight overridden components and changes.
+* Fixed issue where Prefab children components were not getting deleted correctly.
+
+UI Elements
+* Fixed issue with Images stretching when set to “Contain”.
+
+Asset Lab
+* Fix library loading timeouts.
+
+Particles
+* Fixed issue where particles would incorrectly fallback to using cube primitives when no primitive was set.
+
+Materials
+* Improved performance of GLTF video materials.
+
+Mesh
+* Fixed issue where adding a collider to certain GLBs would cause the object to disappear in Studio’s viewport.
+
+Native App Export
+* Improved UI scaling consistency on Android apps.
+* Fixed intermittent issues when opening or closing Android apps.
+
+Simulator
+* Fixed an issue where the simulator would initialize twice on open.
+
+## June 2025 [Update 3] {#version-2025-june-11}
+June 11, 2025
+
+### New Features
+
+Asset Lab
+* Generate images, 3D models, and animated and rigged characters with our new Asset Lab and easily add these to your scene.
+
+Native App Export
+* Export your 3D or XR experience as an Android app and increase your reach but publishing to both the web and app stores.
+
+**Fixes & Enhancements**
+
+General
+* Removed Live Sync optional setting for more streamlined playback behavior.
+* Updated the Studio’s Playback and Build controls for better ease of use.
+
+## June 2025 [Update 2] {#version-2025-june-09}
+June 9, 2025
+
+### New Features
+
+UI Elements
+* Hover Events are now supported for UI Elements.
+
+Materials
+* Added API for working with Video Textures at runtime.
+
+**Fixes & Enhancements**
+
+UI Elements
+* Fixed an issue causing UI elements to persist when using `display: none`.
+
+Animations
+* Bug fixed for animations transitions.
+
+## June 2025 [Update 1] {#version-2025-june-02}
+June 2, 2025
+
+### New Features
+
+Prefabs
+* We've added support for Prefabs in Studio for creating reusable, customizable game templates that streamline and scale your development, and optimize performance.
+* See our [Prefabs Guide](/docs/studio/guides/prefabs) to get Started.
+
+General
+* Videos are now supported as material texture maps. Note: New VideoMaterial override will override all glTF materials, like HiderMaterial and VideoMaterial.
+
+## May 2025 [Update 2] {#version-2025-may-29}
+May 29, 2025
+
+### New Features
+
+UI Elements Events
+* We've introduced UI events for working UI Elements like Buttons. (i.e. Pressed, Released, Selected, Disabled)
+* UI Events now have dedicated strings.
+* See more in the Events section of the API Documentation.
+
+Lights
+* We've introduced a new Light type called "Area Light" which emits light from a rectangular primitive.
+
+### Fixes and Enhancements
+
+Audio
+* Fixed issue where multiple audio entities would not spawn correctly.
+
+## May 2025 [Update 1] {#version-2025-may-05}
+May 5, 2025
+
+### New Features
+
+Scene Reflections
+* Added capabilities to set a reflections map on a space. This reflection map affects the lighting setup of your scene and alters what reflective materials show. See the new Reflections setting in the Space Settings Panel.
+
+**Fixes & Enhancements**
+
+General
+* Added new "required" directive for setting fields on Custom components to be required. The `@required` directive for Custom Components will throw an error if the condition is not met at Build.
+
+## April 2025 [Update 2] {#version-2025-april-29}
+April 29, 2025
+
+### New Features
+
+Materials
+* Added a new setting for Texture wrapping in the Materials configurator.
+
+## April 2025 [Update 1] {#version-2025-april-9}
+April 9, 2025
+
+### New Features
+
+Image Targets
+* **Image Targets are now supported in 8th Wall Studio!** Developers can now anchor AR content to images in the real world, enabling a new range of creative and educational experiences.
+
+### Fixes and Enhancements
+
+Input
+* `input.getMousePosition()` now returns `clientX/Y` instead of `screenX/Y` for improved alignment with viewport coordinates.
+* Added new `ecs.input.UI_CLICK` event for improved UI interaction tracking.
+
+Transforms
+* Added transform utility functions to world.transform.
+
+Raycasting
+* Added new raycasting functions: `raycast()` and `raycastFrom()` for more flexible and precise interaction with 3D objects.
+
+UI
+* Updates to the Studio UI system interface for a more intuitive development experience.
+
+General
+* Fixed bug where `world.spaces` could not be accessed in `add` callbacks.
+* Fixed issue with ear attachments not appearing in the viewport when enabled.
+
+
+## March 2025 [Update 1] {#version-2025-March-5}
+March 5, 2025
+
+### Fixes and Enhancements
+
+General
+* Added location spawned event
+
+Shadow
+* Smart shadow camera frustum
+
+Animations
+* Bug fix for position/rotation animations
+* Fixed animation stall when swapping models
+
+Assets
+* Fixed bug where settings are stale in asset load
+* Fixed race condition in UI image asset loading
+
+## February 2025 [Update 1] {#version-2025-february-13}
+February 13, 2025
+
+### New Features
+
+Niantic Maps for Web
+* Connecting experiences to the real world
+Maps are key to building location-based experiences, and now, with Niantic Maps for Web available directly in 8th Wall Studio, adding them to your workflow is seamless. With Niantic Maps in Studio, Studio developers now have access to the same technology Niantic uses to power our most popular real-world games, allowing you to root your AR experiences in real-world locations, assist in discovering location-based experiences, and act as an aggregator of real-world AR experiences. Maps are now fully integrated into Studio’s Scene Hierarchy, allowing you to drop maps into your projects with just a click—no extra API setup needed.
+
+Spaces
+* Spaces now gives you the ability to build and manage multiple distinct areas within a single project. You can think of Spaces like scenes or environments in other gaming engines or design tools. Simply put, Spaces are 3D frames where you can place assets, lighting, cameras, and game interactions. A Space (also called a Scene) contains all of your entities.
+
+## January 2025 [Update 3] {#version-2025-january-31}
+January 31, 2025
+
+### Fixes and Enhancements
+
+General
+* General bug fixes to improve performance of scene loading, Splat loading, and working in Live Sync Mode
+
+## January 2025 [Update 2] {#version-2025-january-23}
+January 23, 2025
+
+### Fixes and Enhancements
+
+UI Elements
+* Added 9-slice stretch configuration for Background Size (3D UI Elements only)
+* Added Border Radius configuration
+
+General
+* Fixed bug where colorspace was not accurately reflected for UI Elements
+
+Physics
+* Adds a toggle for the physics system, it will skip the system on every tick, it also works as an optimization when physics are not in use.
+
+## January 2025 [Update 1] {#version-2025-january-15}
+January 15, 2025
+
+### Fixes and Enhancements
+
+Light
+* Added `spot` light type
+
+Shadow
+* Receive Shadow configuration is moved to the Mesh component
+
+Math
+* Added `Mat4.decomposeT`
+* Added `Mat4.decomposeR`
+* Added `Mat4.decomposeS`
+
+## December 2024 [Update 1] {#version-2024-december-09}
+December 9, 2024
+
+### Fixes and Enhancements
+
+VPS
+* Added the ability to hide the Location asset from displaying in the Viewport
+
+UI
+* Fixed custom font display issues
+
+Audio
+* Added the ability to get and set audio clip progress
+
+VPS
+* Added `location` to VPS event data with the eid of the relevant Location entity
+
+## November 2024 [Update 2] {#version-2024-november-11}
+November 11, 2024
+
+### Fixes and Enhancements
+
+General
+* Improved behavior for `ecs.Disabled`
+* Improved performance with raycasting
+
+VPS
+* Fix bug with LocationMeshes getting hidden in Viewport during Live Sync
+
+Lighting
+* support "follow camera" for directional light
+
+## November 2024 [Update 1] {#version-2024-november-05}
+November 5, 2024
+
+### Fixes and Enhancements
+
+General
+* Added ability to disable entities and their components in a scene for better control and optimized runtime performance.
+* Added new capability to create a new client project version from a previous commit version. Access this functionality using the Project History view in Studio’s Scene Settings.
+
+Audio
+* Added audio loading and playback finished events for easier audio playback management and control: `ecs.events.AUDIO_CAN_PLAY_THROUGH`, `ecs.events.AUDIO_END` events
+
+Assets
+* Added function for seeing status of asset loading: `ecs.assets.getStatistics`
+
+UI
+* Added function for image stretching as part of a UI element: `Ui.set({backgroundSize: ‘contain/cover/stretch’})`
+
+## October 2024 [Update 3] {#version-2024-october-29}
+October 29th, 2024
+
+### New Features
+
+Backend Services
+* Backend Functions and Backend Proxies are now supported in 8th Wall Studio!
+
+## October 2024 [Update 2] {#version-2024-october-24}
+October 24, 2024
+
+### New Features
+
+VPS
+* **VPS is now supported in 8th Wall Studio!** Developers can now create location-based WebAR experiences by connecting AR content to real-world locations.
+
+### Fixes and Enhancements
+
+3D Models
+* Added support for playing all animation clips on a gltf model
+
+UI
+* Added ability to set opacity of UI elements.
+
+## October 2024 [Update 1] {#version-2024-october-18}
+October 18, 2024
+
+### Fixes and Enhancements
+
+Events
+* Added `ecs.events.SPLAT_MODEL_LOADED` event.
+
+Physics
+* Added [getLinearVelocity()](/docs/studio/api/ecs/physics/#getlinearvelocity) function.
+
+Primitives
+* Added polyhedron primitive, replacing tetrahedron.
+* Added Torus primitive.
+
+## September 2024 [Update 2] {#version-2024-september-30}
+September 30, 2024
+
+### New Features
+
+3D Models
+* Support for uploading and converting FBX-format 3D assets.
+* Support for previewing and configuring your 3D Models. With our updated Asset Previewer you can check your model in different lighting settings, adjust the pivot point, change mesh compression settings, update scale, inspect included materials, and more.
+
+Materials
+* Materials can be edited and saved on the asset preview. Changes will be reflected on the asset and scene.
+
+UI
+* Support for custom fonts with TTF file upload capability.
+* Fine-tune elements such as color, borders, text, opacity, and more. The UI builder also allows you to combine multiple 2D elements on a single canvas to create compound 2D graphics and interfaces. Edit and modify these elements in real-time within the Studio Viewport, with changes instantly reflected in the Simulator.
+
+### Fixes and Enhancements
+
+Particles
+* Updated Particle component with additional configuration options and easier-to-use defaults
+
+Physics
+* applyImpulse api, alternative to apply force for game development. Good for actions like jumping, punching, pushing quickly, etc.
+* Simple runtime getter function for querying the current gravity setting.
+
+## September 2024 [Update 1] {#version-2024-september-11}
+September 11, 2024
+
+### Fixes and Enhancements
+
+State Machine
+* Improved capabilities and expanded API for working with State Machines and Events. Check out the [State Machine](/docs/studio/essentials/state-machines/) documentation to learn more.
+
+## August 2024 [Update 5] {#version-2024-august-29}
+August 29, 2024
+
+### Fixes and Enhancements
+
+Particles
+* Fixed an issue where particle spawning position was not correctly set for child entities.
+
+## August 2024 [Update 4] {#version-2024-august-26}
+August 26, 2024
+
+### New Features
+
+Splats
+* **Gaussian Splatting support in Studio is here!** Using the Niantic Scaniverse app, you can easily create and export splats as an `.SPZ` file. Once uploaded to 8th Wall Studio, splats can be seamlessly integrated into your projects, serving as the foundation for hyper-realistic 3D experiences.
+
+### Fixes and Enhancements
+
+Animations
+* Fixed issue where non-looping animations did not complete at the correct position.
+
+Assets
+* Improved support for previewing assets and changing asset settings.
+
+Audio
+* Updated audio lifecycle APIs (play, pause, mute, unmute)
+
+Primitives
+* Support for Hider materials for primitive objects that let you obscure or hide objects within a scene.
+* Support for Unlit materials for primitive objects that ignore lighting conditions.
+* Fixed issue with cylinder colliders not matching the primitive shape
+
+## August 2024 [Update 3] {#version-2024-august-15}
+August 15, 2024
+
+### Fixes and Enhancements
+
+Events
+* Fixed an issue where event listeners were being skipped or removed in certain scenarios.
+
+UI
+* Fixed an issue where fonts could not be changed.
+* Fixed performance issues with loading and rendering UI elements.
+
+Docs
+* Added information on common issues and best practices to follow when scripting [Custom Components](/docs/studio/essentials/best-practices/)
+
+## August 2024 [Update 2] {#version-2024-august-08}
+August 8, 2024
+
+### Fixes and Enhancements
+
+Input Manager
+* Fixed an issue where mobile browser swipes/dragging behaviors were not controlled.
+* Added ability to control and access pointer lock, improving game control inputs.
+
+Physics
+* Fixed a timing issue that created incorrect physics behaviors.
+
+Rendering
+* Corrected an issue that caused materials to look washed out.
+
+UI
+* Added ability to hide UI Elements in the scene, enabling more dynamic UI behaviors.
+
+## August 2024 [Update 1] {#version-2024-august-01}
+August 1, 2024
+
+### New Features
+
+Animation
+* Added events and configuration controls to support GLTF models with pre-baked animations - see [3D Model guide](/docs/studio/guides/models/)
+
+Hierarchy
+* Added ability to multi-select and move objects using Command/Ctrl keys.
+* Added ability to range-select objects using Shift key.
+
+Physics
+* Added a gravity factor for physics and colliders to support more configurable physics effects - see [Physics guide](/docs/studio/guides/physics/).
+
+Primitives
+* Added RingGeometry primitive type - see [Primitives guide](/docs/studio/guides/models#primitives)
+
+Viewport
+* Added Right Click context menu for selected objects.
+* Added transform snapping when holding the Shift key.
+
+### Fixes and Enhancements
+
+Assets
+* Fixed issue where new files could not be added and assets could not be moved.
+
+Camera
+* Fixed bug where Near/Far Clip setting was not functional.
+
+Input Manager
+* Fixed issue where left / right arrow keys were swapped.
+
+Simulator
+* Simulator can now be resized.
+
+UI
+* Fixed bug that prevented Font size changes for UI Elements.
+
+Viewport
+* 3D Models dragged into the Viewport will now snap to the cursor’s current position.
+
+Misc
+* Various UI usability improvements.
+* Improvements to copying and pasting objects.
+
+## June 2024 [Update 1] {#version-2024-june-18}
+June 18, 2024
+
+### New Features
+
+Initial release of 8th Wall Studio! Hello World!
+* Key updates for include initial systems and editor tooling for physics, animations, player inputs, cameras, lighting, particles, audio, 3D models, materials, meshes and much more. See the Studio documentation for more information on these systems.
diff --git a/docs/studio/essentials/_category_.json b/docs/studio/essentials/_category_.json
new file mode 100644
index 0000000..738cb79
--- /dev/null
+++ b/docs/studio/essentials/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "Essentials",
+ "position": 2
+}
\ No newline at end of file
diff --git a/docs/studio/essentials/best-practices.mdx b/docs/studio/essentials/best-practices.mdx
new file mode 100644
index 0000000..aa1fa54
--- /dev/null
+++ b/docs/studio/essentials/best-practices.mdx
@@ -0,0 +1,160 @@
+---
+id: best-practices
+description: Common issues and Best Practices to follow when creating custom Components.
+sidebar_position: 50
+---
+
+# Best Practices
+
+
+
{frontMatter.description}
+
+## Stale References
+When your component is passed `(world, component)` to add, tick, or remove callbacks, it is not always safe to reference `component` in a nested function and use it after the callback has returned.
+
+
+
+### Incorrect Example
+
+``` ts
+ecs.registerComponent({
+ name: 'age-counter',
+ data: {
+ age: ecs.i32,
+ interval: ecs.i32,
+},
+ add: (world, component) => {
+ const interval = world.time.setInterval(() => {
+ // This is not safe because we're accessing data after some amount of time
+ // has passed, it's not guaranteed to still be valid.
+ component.data.age += 1
+}, 1000)
+
+ // This is safe because we're assigning to data within the add function
+ component.data.interval = interval
+},
+ tick: (world, component) => {
+ console.log('I am', component.data.age, 'seconds old')
+},
+ remove: (world, component) => {
+ world.time.clearTimeout(component.data.interval)
+}
+})
+```
+
+
+
+
+
+### Correct Example
+
+``` ts
+ecs.registerComponent({
+ name: 'age-counter',
+ data: {
+ age: ecs.i32,
+ interval: ecs.i32,
+ },
+ add: (world, component) => {
+ const {eid, dataAttribute} = component
+ const interval = world.time.setInterval(() => {
+ // This is safe because we're re-acquiring a cursor at the time we need it,
+ // instead of using a stale cursor from before.
+ const data = dataAttribute.cursor(eid)
+ data.age += 1
+ }, 1000)
+
+ component.data.interval = interval
+},
+tick: (world, component) => {
+console.log('I am', component.data.age, 'seconds old')
+},
+remove: (world, component) => {
+// world.time.clearTimeout(component.data.interval)
+}
+})
+```
+
+
+
+In the example above, dataAttribute is used as a stable method to access the component’s data within a nested function. This ensures that data remains valid and up-to-date even when the function is called asynchronously.
+
+Additionally, the eid variable is destructured rather than accessing component.eid directly because component.eid can change depending on which entity is receiving the callback. Using a destructured variable avoids potential stale references.
+
+Of the arguments passed to component callbacks, here is the validity of each:
+
+:::warning
+Always destructure eid before use instead of accessing component.eid, as directly accessing component.eid can lead to stale references.
+:::
+
+| context | Changes after callback exits? | Can be used in a nested function? | Lifetime |
+|---------------------------------|-------------------------------|-----------------------------------|-----------------------|
+| world | ❌ No | ✅ Yes | Experience Lifetime |
+| eid | ✅ Yes | ✅ Yes | Entity Lifetime |
+| schema & data | ✅ Yes | ❌ No | Top level of Callback |
+| schemaAttribute & dataAttribute | ❌ No | ✅ Yes | Entity Lifetime |
+
+## Invalidated Cursors
+Cursor objects act as interfaces for reading and writing data in the ECS state. Each time a cursor is requested for a component, the same cursor instance is reused, but it points to a different location in memory. As a result, a cursor reference can become invalid, meaning it may no longer point to the expected data.
+
+### Incorrect Example
+
+
+
+``` ts
+const cursor1 = MyComponent.get(world, entity1)
+console.log(cursor1.name) // 'entity1'
+
+const cursor2 = MyComponent.get(world, entity2)
+console.log(cursor2.name) // 'entity2'
+
+// Unexpected bugs may occur if using cursor1 after another access of the component
+console.log(cursor1.name) // 'entity2'
+console.log(cursor1 === cursor2) // 'true' - it's the same object, just initialized differently each time
+```
+
+
+
+## Dangling Listeners
+Avoid assuming that any object or component will persist indefinitely. As your project evolves or new features are introduced, it’s important to ensure that your component logic is robust, including properly cleaning up event listeners.
+
+State Machines are a great way of managing and cleaning up Event Listeners.
+
+### Correct Example
+
+
+
+``` ts
+ecs.registerComponent({
+ name: 'Game Manager',
+ schema: {
+ // Add data that can be configured on the component.
+ scoreDisplay: ecs.eid, // how many coins you've collected
+ },
+ schemaDefaults: {
+ // Add defaults for the schema fields.
+ },
+ data: {
+ // Add data that cannot be configured outside of the component.
+ score: ecs.i32, // The integer value of the score
+ },
+ stateMachine: ({world, eid, schemaAttribute, dataAttribute}) => {
+ // Add score event
+ const coinCollect = () => {
+ const data = dataAttribute.cursor(eid)
+ data.score += 1
+ ecs.Ui.set(world, schemaAttribute.get(eid).scoreDisplay, {
+ text: data.score.toString(),
+ })
+ }
+
+ ecs.defineState('default').initial().onEnter(() => {
+ world.events.addListener(world.events.globalId, 'coinCollect', coinCollect)
+ }).onExit(() => {
+ world.events.removeListener(world.events.globalId, 'coinCollect', coinCollect)
+ })
+ },
+})
+```
+
+
diff --git a/docs/studio/essentials/component-functions.mdx b/docs/studio/essentials/component-functions.mdx
new file mode 100644
index 0000000..04038cd
--- /dev/null
+++ b/docs/studio/essentials/component-functions.mdx
@@ -0,0 +1,60 @@
+---
+id: component-functions
+description: Components can be added, modified, and removed at runtime.
+sidebar_position: 45
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+
+# Component Functions
+
+## Introduction
+{frontMatter.description} Built-in components and custom components use the same interface for managing component data.
+
+## Referencing a Custom Component
+The following example shows how to share a reference to a custom component between different code files.
+
+:::tip
+Make sure you export the Component **before** attempting to import or use it anywhere.
+:::
+
+``` tsx title="/custom-component.ts"
+import * as ecs from '@8thwall/ecs'
+
+const CustomComponent = ecs.registerComponent({
+ ...
+})
+
+export {CustomComponent}
+```
+
+``` tsx title="/another-component.ts"
+import * as ecs from '@8thwall/ecs'
+import {CustomComponent} from './custom-component'
+
+ecs.registerComponent({
+ name: 'Another Custom Component',
+ stateMachine: ({world, eid, schemaAttribute, dataAttribute}) => {
+ ecs.defineState('default')
+ .initial()
+ .onEnter(() => {
+ const entity = world.createEntity()
+
+ CustomComponent.set(world, entity, {
+ displayName: 'Jini'
+ })
+ })
+ },
+})
+```
+
+## Functions
+Component functions allow you to perform different actions on a component and its data in relation to an entity.
+
+
diff --git a/docs/studio/essentials/component-lifecycle.mdx b/docs/studio/essentials/component-lifecycle.mdx
new file mode 100644
index 0000000..13a2c1b
--- /dev/null
+++ b/docs/studio/essentials/component-lifecycle.mdx
@@ -0,0 +1,59 @@
+---
+id: component-lifecycle
+description: Lifecyle events can be used to trigger Component behavior on add, tick, or remove.
+sidebar_position: 55
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+
+# Component Lifecycle
+
+## Introduction
+
+{frontMatter.description}
+
+## Example
+
+```ts
+const MyComponent = ecs.registerComponent({
+ name: 'My Component',
+ add: (world, component) => {
+ console.log('My component was added to', component.eid)
+ },
+ tick: (world, component) => {
+ console.log('My tick function is running on', component.eid)
+ },
+ remove: (world, component) => {
+ console.log('My component was removed from', component.eid)
+ },
+})
+```
+
+## Methods
+
+| Method | Description |
+|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| add | Called once when the Component is initialized. Used to set up initial state and instantiate variables |
+| remove | Called when the Component is removed from the entity or when the entity is detached from the scene. Used to undo all previous modifications to the entity. |
+| tick | Called on each render loop or tick of the scene. Used for continuous changes or checks. |
+
+### Parameters
+
+| Property | Type | Description |
+|-----------|-------------------------------------|-------------------------------------|
+| world | [World](/docs/studio/api/world) | Reference to the World. |
+| component | [ComponentObject](#componentobject) | Reference to the current Component. |
+
+### ComponentObject
+
+:::warning
+Use schemaAttribute or dataAttribute instead of eid, schema, or data properties in asynchronous contexts like timers or event handlers.
+:::
+
+| Property | Type | Description |
+|-----------------|-----------------|-------------------------------------------------------------|
+| eid | eid | The Entity ID of the current Component |
+| schema | Cursor | Reference to the current Entity's schema |
+| schemaAttribute | ComponentObject | Reference to the current Component's schema in World Scope. |
+| data | Cursor | Reference to the current Entity's data |
+| dataAttribute | ComponentObject | Reference to the current Component's data in World Scope. |
diff --git a/docs/studio/essentials/custom-components.mdx b/docs/studio/essentials/custom-components.mdx
new file mode 100644
index 0000000..5dc01bb
--- /dev/null
+++ b/docs/studio/essentials/custom-components.mdx
@@ -0,0 +1,57 @@
+---
+id: custom-components
+description: Custom Components are used to add custom game logic to your entities.
+sidebar_position: 40
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+
+# Custom Components
+
+## Introduction
+{frontMatter.description}
+
+## Creating a Custom Component
+To create a custom Component, follow these steps:
+
+1. In the File browser, click the plus button (+).
+2. Click "New file" → "New Component file", and give it a name (File extension optional).
+ 1. A new Component file will be generated, TypeScript by default, in your project.
+ 2. The new Component file will include the boilerplate code required to register the custom Component.
+
+## Registering a Custom Component
+The following code is an example of how a newly created Custom Component will appear in the Code Editor:
+
+### Example
+
+``` ts
+// This is a component file. You can use this file to define a custom component for your project.
+// This component will appear as a custom component in the editor.
+
+import * as ecs from '@8thwall/ecs' // This is how you access the ecs library.
+
+ecs.registerComponent({
+ name: 'Custom Component',
+ // schema: {
+ // },
+ // schemaDefaults: {
+ // },
+ // data: {
+ // },
+ // add: (world, component) => {
+ // },
+ // tick: (world, component) => {
+ // },
+ // remove: (world, component) => {
+ // },
+ // stateMachine: ({world, eid, schemaAttribute, dataAttribute}) => {
+ // ecs.defineState('default').initial()
+ // },
+})
+```
+
+From this example, you can add [schema](/docs/studio/essentials/schema), [a state machine](/docs/studio/essentials/state-machines), or [lifecycle callbacks](/docs/studio/essentials/component-lifecycle)
+
+:::warning
+Component names starting with ‘debug-’ are reserved for internal debugging purposes and **will not display in the editor.**
+:::
diff --git a/docs/studio/essentials/entities-and-components/_category_.json b/docs/studio/essentials/entities-and-components/_category_.json
new file mode 100644
index 0000000..4cd5a0b
--- /dev/null
+++ b/docs/studio/essentials/entities-and-components/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "Entities and Components",
+ "position": 1
+}
\ No newline at end of file
diff --git a/docs/studio/essentials/overview.mdx b/docs/studio/essentials/overview.mdx
new file mode 100644
index 0000000..816ddda
--- /dev/null
+++ b/docs/studio/essentials/overview.mdx
@@ -0,0 +1,46 @@
+---
+id: overview
+description: 8th Wall Studio utilizes an Entity Component System (ECS) architecture to manage entities and their behaviors.
+sidebar_position: 1
+---
+
+# Overview
+
+## Introduction
+
+{frontMatter.description}
+
+``` mermaid
+%%{init: {"flowchart": {"htmlLabels": false}} }%%
+graph TD;
+ world("World") --> space("Space");
+ space --> eid("Entity");
+ eid("Entity") --> ComponentA("Position");
+ eid("Entity") --> ComponentB("Quaternion");
+ eid("Entity") --> ComponentC("Scale");
+ eid("Entity") --> ComponentE("CapsuleGeometry");
+ eid("Entity") --> ComponentF("Material");
+ eid("Entity") --> ComponentD("Health");
+```
+
+An entity exists within a space, which is owned by a world. The world represents the overarching environment or context, while spaces group entities together. For example, a world could contain a game level, with spaces organizing different areas or scenes. Entities within each space can have components such as position, rotation, scale, health, geometry, and material. Each component defines a distinct characteristic or behavior of the entity, enabling modular control over its attributes.
+
+## World {#world}
+
+A World is the container for all spaces/entities, and exposes APIs for [audio](/docs/studio/api/world/audio), [events](/docs/studio/api/world/events/), [3D transforms](/docs/studio/api/world/transform/), and more.
+
+## Space
+
+A Space is a group of Entities. It also contains global settings for features like fog, skybox, and included spaces that are activated when the space is loaded. See more in [Spaces](/docs/studio/guides/spaces/).
+
+## Entities
+
+Entities are 3D objects that form the backbone of any game or simulation in 8th Wall Studio. An entity by itself has no behavior or appearance; it simply acts as a container to which components can be attached. Entities are represented by a unique 64-bit integer called an Entity ID or eid. See more in [Entities](/docs/studio/guides/entities/).
+
+## Components
+
+Components are the building blocks that give entities their functionality. While an entity represents a blank object, in 8th Wall Studio, you can use built-in Components or create your own custom Components to define unique behaviors for your game. Components might define visual appearance, physical properties, input handling, or custom game logic. By combining multiple Components, you can create complex entities with rich behavior.
+
+## Relationships
+
+Entities and components work together in a hierarchical manner. By composing entities out of different components, you can build diverse and complex game objects without the need for rigid inheritance structures.
diff --git a/docs/studio/essentials/schema.mdx b/docs/studio/essentials/schema.mdx
new file mode 100644
index 0000000..11727ef
--- /dev/null
+++ b/docs/studio/essentials/schema.mdx
@@ -0,0 +1,179 @@
+---
+id: schema
+description: A Component Schema defines the data contained by a Component.
+toc_max_heading_level: 3
+sidebar_position: 42
+---
+
+import ComponentFunctions from "@site/src/components/component-functions";
+import EcsTypes from '/src/components/_ecs-types.mdx'
+
+# Schema
+
+{frontMatter.description} When you define a schema, you'll provide an object containing key/value pairs where the key is the name of the property, and the value is an [ECS type](/docs/studio/api/ecs/#types) that specifies the kind of data that property will hold.
+
+
+
+## Example
+The following example shows a Custom Component's schema.
+
+``` ts
+schema: {
+ target: ecs.eid, // Unique entity reference for the NPC (Entity ID)
+ speed: ecs.f32, // Movement speed of the NPC (32-bit float)
+ strength: ecs.f64, // Strength level for the NPC (64-bit float)
+ level: ecs.i32, // Character level of the NPC (32-bit integer)
+ armor: ecs.ui8, // Armor rating of the NPC (0-255, 8-bit unsigned integer)
+ experience: ecs.ui32, // Experience points of the NPC (32-bit unsigned integer)
+ guildName: ecs.string, // Name of the Guild NPC belongs to. (String)
+ isHostile: ecs.boolean // Boolean indicating if the NPC is hostile to the player (Boolean)
+}
+```
+
+Defaults can be provided, but are not required. If defaults are not provided, numbers will default to `0`, booleans will default to `false`, strings will default to `''`, and entity references will default to unset. There is no way to set a default for an entity reference at the component level.
+
+``` ts
+schemaDefaults: {
+ speed: 3.14,
+ strength: 5.8,
+ level: 10,
+ armor: 255,
+ experience: 12,
+ guildName: 'Niantic Crew'
+ isHostile: false
+}
+```
+
+## Custom Editor Fields {#custom-editor-fields}
+Display and functionality of your components in the entity editor can be customized in various ways. This is all done using comments inside the schema where fields are marked `// @`.
+
+### Labels
+Sometimes labels in the editor need to be more descriptive than their names in code.
+
+``` ts
+schema: {
+ // @label Foo
+ bar: ecs.eid,
+},
+```
+
+### Asset References
+If you need to reference an asset in the project.
+
+``` ts
+schema: {
+ // @asset
+ yetiModel: ecs.string,
+}
+```
+
+### Min & Max
+If you need to clamp values from going over a certain amount when changed in the interface.
+*Does not affect changes to the variable at runtime.*
+
+``` ts
+schema: {
+ // @min 0
+ // @max 128
+ goldCoins: ecs.i32,
+}
+```
+
+### Conditions
+Properties can be set to only show depending on the values of other properties.
+
+``` ts
+schema: {
+ // 'from' will only show if autoFrom set false:
+ autoFrom: ecs.boolean,
+ // @condition autoFrom=false
+ from: ecs.f32,
+
+ // 'easingFunction' will show if either easeIn or easeOut set:
+ easeIn: ecs.boolean,
+ easeOut: ecs.boolean,
+ // @condition easeIn=true|easeOut=true
+ easingFunction: ecs.string,
+
+ // 'targetX' only shows if no target set:
+ target: ecs.eid,
+ // @condition target=null
+ targetX: ecs.f32,
+}
+```
+
+### Enumerations
+
+String properties can be limited to a set list:
+
+``` ts
+schema: {
+ // @enum Quadratic, Cubic, Quartic, Quintic, Sinusoidal, Exponential
+ easingFunction: ecs.string,
+}
+```
+
+### Groups
+Certain groups of properties can be instructed to be treated specially in the editor. Groups are configured as follows:
+
+* The start and end of the group is marked with // @group start … and // @group end
+* Conditions can be applied to the whole group with // @group condition
+* Two kinds of group currently supported: vector3 and color
+
+#### Labels
+Custom labels can still be used for individual fields:
+
+``` ts
+schema: {
+ // @group start orient:vector3
+ // @label Pitch
+ orientPitch: ecs.f32,
+ // @label Yaw
+ orientYaw: ecs.f32,
+ // @label Roll
+ orientRoll: ecs.f32,
+ // @group end
+}
+```
+
+#### Vector3
+Groups of properties that represent 3D vectors can be indicated as follows:
+
+``` ts
+schema: {
+ autoFrom: ecs.boolean,
+ // @group start from:vector3
+ // @group condition autoFrom=false
+ fromX: ecs.f32,
+ fromY: ecs.f32,
+ fromZ: ecs.f32,
+ // @group end
+}
+```
+
+#### Color
+Colors can be indicated as in the following example:
+
+``` ts
+schema: {
+ // @group start background:color
+ bgRed: ecs.f32,
+ bgGreen: ecs.f32,
+ bgBlue: ecs.f32,
+ // @group end
+}
+```
+
+## Data
+Data is similar to Schema, however there are two notable differences.
+
+1. Data can **not** be read or written outside the Component it is defined in.
+2. Data does **not** have default values, however they can be set in the 'add' lifecycle method for similar functionality.
+
+## Cursors
+A Cursor is a reference to another Component's schema attached to an entity. You can think of it like a pointer or handle that allows you to read or write properties.
+
+However, keep in mind:
+
+* Cursors can become stale if used asynchronously (e.g., inside setTimeout, or delayed callbacks). Use schemaAttribute and dataAttribute instead in these cases.
+* Cursors provide live access to the data on an entity, which means modifying a cursor directly changes the value in the world.
diff --git a/docs/studio/essentials/state-machines.mdx b/docs/studio/essentials/state-machines.mdx
new file mode 100644
index 0000000..fa6fa7a
--- /dev/null
+++ b/docs/studio/essentials/state-machines.mdx
@@ -0,0 +1,340 @@
+---
+id: state-machines
+description: State Machines are designed to simplify state management.
+sidebar_position: 50
+toc_max_heading_level: 3
+---
+
+# State Machines
+
+## Introduction
+
+
{frontMatter.description}
+
+A state machine is made up of three main components:
+
+* States
+* State Groups
+* Triggers
+
+A state machine is always in exactly one state at a time, and will transition between states when certain conditions (defined by the triggers) are met. State groups are a convenient way to bundle shared logic between multiple states, but the groups are not states themselves.
+
+## Example
+
+```ts
+ecs.registerComponent({
+ name: 'Jump On Touch',
+ stateMachine: ({world, entity, defineState}) => {
+ const idle = defineState('idle').initial().onEnter(() => {
+ console.log('Entering idle state')
+ }).onEvent(ecs.input.SCREEN_TOUCH_START, 'jumping')
+
+ const jumping = defineState('jumping').onEnter(() => {
+ console.log('Entering jumping state')
+ ecs.physics.applyImpulse(world, entity.eid, 0, 5, 0)
+ }).onTick(() => {
+ console.log('In jumping state')
+ }).wait(2000, 'idle')
+ },
+})
+```
+
+## Defining a State Machine
+
+When creating a state machine inside a component, your function is called with the following:
+
+### Properties
+
+| Property | Type | Description |
+|------------------|----------------|-------------------------------------------------------------|
+| world | World | Reference to the World. |
+| eid | eid | The Entity ID of the current Component |
+| entity | Entity | The Entity instance of the current Component |
+| defineState | function | A function to define states on the state machine |
+| defineStateGroup | function | A function to define groups on the state machine |
+| schemaAttribute | WorldAttribute | Reference to the current Component's schema in World Scope. |
+| dataAttribute | WorldAttribute | Reference to the current Component's data in World Scope. |
+
+The following code is an example of how to define an empty state machine:
+
+``` ts
+ecs.registerComponent({
+ ...
+ stateMachine: ({world, entity, defineState}) => {
+ // Define states here
+ },
+})
+```
+
+## State
+
+A state is the fundamental atomic unit of a state machine. After defining the possible states of your state machine, you can move between States by defining triggers.
+
+### Defining a State
+
+The following code is an example of how to define a new State inside a state machine within a component.
+
+``` ts
+ecs.registerComponent({
+ ...
+ stateMachine: ({world, entity, defineState}) => {
+ const foo = defineState('foo')
+ ...
+ }
+})
+```
+
+:::tip
+State functions are “fluent,” meaning they return the same instance of the State, allowing you to chain multiple function calls in a single statement.
+:::
+
+### .initial()
+
+Mark this state as the first active state when the state machine is created.
+
+``` ts
+defineState('myCustomState').initial()
+```
+
+### .onEnter()
+
+Set a callback to run when entering this state.
+
+``` ts
+defineState('myCustomState').onEnter(() => {
+ // Do something
+})
+```
+
+### .onTick()
+
+Set a callback to run every frame while this state is active.
+
+``` ts
+defineState('myCustomState').onTick(() => {
+ // Do something
+})
+```
+
+### .onExit()
+
+Set a callback to run when exiting this state.
+
+``` ts
+defineState('myCustomState').onExit(() => {
+ // Do something
+})
+```
+
+### .onEvent()
+
+Transition to a new state when a specific event is received.
+
+| Parameter | Type | Description |
+|----------------------|-----------------|---------------------------------------------------|
+| event (Required) | string | The name of the event to listen for |
+| nextState (Required) | string or State | The state to transition to when the event occurs |
+| options (Optional) | object | Additional options |
+
+#### Options
+
+| Parameter | Type | Description |
+|-----------|--------------------|----------------------------------------------------------------------------------------------|
+| target | eid | The entity expected to receive the event (defaults to the current entity) |
+| where | (event) => boolean | An optional condition to check before transitioning; if false, the transition will not occur |
+
+``` ts
+defineState('myCustomState').onEvent(
+ ecs.input.SCREEN_TOUCH_START,
+ 'other',
+ {
+ target: world.events.globalId,
+ where: (event) => event.data.position.y > 0.5
+ }
+)
+```
+
+### .wait()
+
+Transition to a new state after a set amount of time.
+
+| Parameter | Type | Description |
+|-----------|-----------------|---------------------------------------------------|
+| timeout | number | The duration in milliseconds before transitioning |
+| nextState | string or State | The next state to transition to |
+
+``` ts
+defineState('myCustomState').wait(1000, 'myOtherCustomState')
+```
+
+### .onTrigger()
+
+Transition to a new state when a TriggerHandle (defined with `ecs.defineTrigger()`) is triggered.
+
+| Parameter | Type | Description |
+|-----------|-----------------|-----------------------------------------------------------------|
+| handle | TriggerHandle | The handle that will cause a transition when manually activated |
+| nextState | string or State | The next state to transition to |
+
+``` ts
+const toOther = ecs.defineTrigger()
+defineState('example').onTrigger(toOther, 'other')
+...
+toOther.trigger()
+```
+
+### .listen()
+
+Register an event listener that will be automatically added when the state is entered, and removed on exit.
+
+| Parameter | Type | Description |
+|-----------|-----------------------|---------------------------------------------------|
+| target | eid or () => eid | The entity that is expected to receive an event |
+| name | string | The event to listen for |
+| listener | (event) => void | The function to call when the event is dispatched |
+
+``` ts
+const handleCollision = (event) => {
+ console.log('Collided with', event.data.other)
+}
+defineState('example').listen(eid, ecs.physics.COLLISION_START_EVENT, handleCollision)
+```
+
+## State Groups
+
+A state group is a way to define behavior and triggers that apply to a list of states. State groups are not states themselves, and cannot be transitioned into directly. Instead, when any state in the group is active, the group’s behavior and triggers are also active.
+
+### Defining a State Group
+
+| Parameter | Type | Description |
+|----------------------|--------------------------|----------------------------------------------------------------------------------------------------------|
+| substates (Optional) | Array of string or State | The list of states that make up this group; excluding this parameter is equivalent to listing all states |
+
+``` ts
+const fizz = defineState('fizz')
+const buzz = defineState('buzz')
+
+const fizzBuzz = defineStateGroup([fizz, 'buzz'])
+```
+
+:::tip
+State Group functions are “fluent,” meaning they return the same instance of the State Group, allowing you to chain multiple function calls in a single statement.
+:::
+
+### .onEnter()
+
+Set a callback to run when entering this group.
+
+``` ts
+defineStateGroup(['a', 'b']).onEnter(() => {
+ // Do something
+})
+```
+
+### .onTick()
+
+Set a callback to run every frame while this group is active.
+
+``` ts
+defineStateGroup(['a', 'b']).onTick(() => {
+ // Do something
+})
+```
+
+### .onExit()
+
+Set a callback to run when exiting this group.
+
+``` ts
+defineStateGroup(['a', 'b']).onTick(() => {
+ // Do something
+})
+```
+
+### .onEvent()
+
+Transition to a new state when a specific event is received.
+
+| Parameter | Type | Description |
+|----------------------|-----------------|---------------------------------------------------|
+| event (Required) | string | The name of the event to listen for |
+| nextState (Required) | string or State | The state to transition to when the event occurs |
+| options (Optional) | object | Additional options |
+
+#### Options
+
+| Parameter | Type | Description |
+|-----------|--------------------|----------------------------------------------------------------------------------------------|
+| target | eid | The entity expected to receive the event (defaults to the current entity) |
+| where | (event) => boolean | An optional condition to check before transitioning; if false, the transition will not occur |
+
+``` ts
+defineStateGroup(['a', 'b']).onEvent(
+ ecs.input.SCREEN_TOUCH_START,
+ 'other',
+ {
+ target: world.events.globalId,
+ where: (event) => event.data.position.y > 0.5
+ }
+)
+```
+
+### .wait()
+
+Transition to a new state after a set amount of time.
+
+| Parameter | Type | Description |
+|-----------|---------|---------------------------------------------------|
+| timeout | number | The duration in milliseconds before transitioning |
+| nextState | string or State | The next state to transition to |
+
+``` ts
+defineStateGroup(['a', 'b']).wait(1000, 'c')
+```
+
+### .onTrigger()
+
+Transition to a new state when a TriggerHandle (defined with `ecs.defineTrigger()`) is triggered.
+
+| Parameter | Type | Description |
+|-----------|-----------------|-----------------------------------------------------------------|
+| handle | TriggerHandle | The handle that will cause a transition when manually activated |
+| nextState | string or State | The next state to transition to |
+
+``` ts
+const toC = ecs.defineTrigger()
+defineStateGroup(['a', 'b']).onTrigger(toC, 'c')
+...
+toC.trigger()
+```
+
+### .listen()
+
+Register an event listener that will be automatically added when the state group is entered, and removed on exit.
+
+| Parameter | Type | Description |
+|-----------|-----------------------|---------------------------------------------------|
+| target | eid or () => eid | The entity that is expected to receive an event |
+| name | string | The event to listen for |
+| listener | (event) => void | The function to call when the event is dispatched |
+
+``` ts
+const handleCollision = (event) => {
+ console.log('collided with', event.data.other)
+}
+defineStateGroup(['a', 'b']).listen(eid, ecs.physics.COLLISION_START_EVENT, handleCollision)
+```
+
+## Custom Triggers
+
+You can define a custom trigger that can be invoked at any time to cause a transition.
+
+``` ts
+const go = ecs.defineTrigger()
+const stopped = defineState('stopped').onTick(() => {
+ if (world.input.getAction('start-going')) {
+ go.trigger()
+ }
+}).onTrigger(go, 'going')
+const going = defineState('going')
+```
diff --git a/docs/studio/getting-started/_category_.json b/docs/studio/getting-started/_category_.json
new file mode 100644
index 0000000..87121b8
--- /dev/null
+++ b/docs/studio/getting-started/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "Getting Started",
+ "position": 1
+}
\ No newline at end of file
diff --git a/docs/studio/getting-started/create-project.md b/docs/studio/getting-started/create-project.md
new file mode 100644
index 0000000..9003199
--- /dev/null
+++ b/docs/studio/getting-started/create-project.md
@@ -0,0 +1,38 @@
+---
+id: create-project
+description: This section explains how to use the 8th Wall Desktop App.
+sidebar_position: 2
+---
+
+# Create or open a project
+
+## Create a Project
+
+1. From the hub view, click **New Project**
+
+
+
+3. Enter the Project Title.
+
+
+
+4. Click **Create**.
+
+## Project Actions
+
+In the Studio hub view, you can move, delete, and find projects using the Project Actions menu `(...)` button next to each project:
+- **Reveal in finder**: opens your local file browser to project’s location
+- **Remove from disk**: deletes your project’s local files (the project will remain available on web as of its last cloud build)
+- **Change disk location**: opens your file browser to select a new folder location for your project to move to
+
+## Project Structure
+
+When you create or open a project for the first time, a local version of the project is added on your machine within `~/Documents/8th Wall/`. By default, the 8th Wall folder is created within your `Documents` folder, but you can change this by moving the 8th Wall folder to another location if preferred.
+
+The folder created for your project will include certain files and folders by default. The `src` folder mirrors the Project file directory you see in Studio. This folder is a directory within your project's file structure where you store files like component scripts, as well as assets like images, fonts, sounds, or other media that your project needs.
+
+
+
+:::warning
+Do not attempt to copy these files to another server. To publish and share your experience, see [documentation on publishing your project](/docs/studio/getting-started/publishing).
+:::
diff --git a/docs/studio/getting-started/getting-started.md b/docs/studio/getting-started/getting-started.md
new file mode 100644
index 0000000..52ddd98
--- /dev/null
+++ b/docs/studio/getting-started/getting-started.md
@@ -0,0 +1,10 @@
+---
+id: getting-started
+description: This section introduces the 8th Wall Desktop App.
+---
+
+# Getting Started with 8th Wall Studio
+
+You can now build projects right from your own machine while still staying connected to everything you already know in 8th Wall. This means you can move faster in your own code editor, save changes and see results right away, and stay in the flow while you experiment and iterate.
+
+
diff --git a/docs/studio/getting-started/installation.md b/docs/studio/getting-started/installation.md
new file mode 100644
index 0000000..4dbf0df
--- /dev/null
+++ b/docs/studio/getting-started/installation.md
@@ -0,0 +1,30 @@
+---
+id: installation
+description: This section explains how install and setup to 8th Wall Desktop App.
+sidebar_position: 1
+---
+
+# Installation & Setup
+
+## Download and Install
+
+To install the 8th Wall Studio desktop app:
+- Go to [8thwall.org/downloads](https://8thwall.org/downloads)
+- Select your platform:
+ - **Apple Silicon**
+ - **Intel**
+ - **Windows**
+- Install the application by opening the `.dmg` or `.exe.` file and dragging the app to your Applications folder. On Windows, you will need to click through the Unknown Publisher warning.
+- Open the 8th Wall app.
+
+When you launch the app for the first time, it might prompt you to give it access to some of your file system locations or allow its connections through the firewall. Accept those requests to help the 8th Wall app set up projects locally.
+
+When you open the 8th Wall desktop app, you’ll start in the app’s hub view where you can explore and access your projects, and edit local development preferences. You can also access quick links to resources like the project library, docs, and community discord.
+
+
+
+## Code Editor Preferences
+
+8th Wall desktop app allows you to work with your preferred local code editor like VSCode or Cursor. You can also change this at any time by clicking the **Settings** button in the left sidebar.
+
+
diff --git a/docs/studio/getting-started/interface.md b/docs/studio/getting-started/interface.md
new file mode 100644
index 0000000..955c083
--- /dev/null
+++ b/docs/studio/getting-started/interface.md
@@ -0,0 +1,204 @@
+---
+id: navigate-interface
+sidebar_position: 4
+---
+
+# Navigating the Interface
+
+Studio features a rich editor interface made up of a number of different tools and views, each of
+which are essential when developing your project.
+
+The sections below showcase the main Studio editor interface elements, with the fundamental features
+highlighted.
+
+
+
+
+
+## Hierarchy {#hierarchy}
+
+View the entities and objects included in the space, and change their nesting. You can reparent or
+unparent the object by clicking and dragging it to another position in the hierarchy. Right click to
+duplicate or delete objects. Add new objects to your space. Search and filter for different objects.
+
+
+
+## Assets {#assets}
+
+Files & Assets can be managed from the lower left panel.
+
+
+
+### Files {#files}
+
+Upload your own 3D models, 2D images, audio
+files, custom scripts and more. Create folders and drag files to reorganize their placement. You can
+also drag and drop an asset into the Viewport or the Hierarchy to add the entity into your scene. To
+learn more about using and optimizing 3D Models in GLB/GLTF format please see [Your 3D Models on the
+Web](/docs/engine/guides/your-3d-models-on-the-web/).
+
+### Prefabs {#prefabs}
+
+Create reusable, customizable game templates that streamline and scale your development.
+[Learn more about Prefabs](/docs/studio/guides/prefabs/).
+
+### Targets {#targets}
+
+Upload and manage project Image Targets.
+[Learn more about Image Targets](/docs/studio/guides/xr/image-targets/).
+
+## Viewport {#viewport}
+
+Add, position, update, and work with objects and lighting in the space. Use the lower perspective
+gizmo to change the view of the scene, change lighting and shadow visibility, and switch from
+orthographic to perspective view. Use the top toolbar to change the position, rotation, or scale of
+a selected object, or to undo and redo edits.
+
+
+
+### Shortcuts {#shortcuts}
+
+| Function | Keyboard Shortcut |
+|--------------------------|---------------------------------------------------------|
+| Camera Orbit | ⌥ Left Click+Drag |
+| Camera Pan | ⌥ Right Click+Drag, Right Click+Drag, Middle Click+Drag |
+| Camera Zoom | Scroll Wheel |
+| Focus on Selected Object | F |
+| Translate | W |
+| Rotate | E |
+| Scale | R |
+| Hide/Show UI Layer | ⌘\ |
+| Delete Object | Delete |
+| Duplicate | ⌘D |
+| Copy Object | ⌘C |
+| Paste Object | ⌘V |
+| Undo | ⌘Z |
+| Redo | ⌘⇧Z, ⌘Y |
+
+## Simulator {#simulator}
+
+Launch the simulator to play your scene. You can make edits to the entities in your space and
+see those immediately reflected in the simulator. The simulator also lets you test and view project changes across different device viewport sizes and
+simulated real-world environments without needing to leave Studio.
+
+
+
+
+
+If you're developing AR, you can access a collection of pre-recorded camera sequences.
+The AR Simulator has a number of playback controls and convenience features
+like:
+
+* Play bar, scrubber and in/out handles: Allow you to set up loop points, giving you granular
+ control over the selected sequence.
+* Recenter button (lower right): Recenters the camera feed to its origin. NOTE: Recenter is also
+ called each time the sequence loops and each time a new sequence is selected.
+
+## Publish Button {#publish-button}
+
+The final step is to publish your project. To learn more about publishing a project for public viewing please see the
+[Publish your project](/docs/studio/getting-started/publishing) section.
+
+## Settings & Inspector {#settings-inspector}
+
+View and configure object-specific components, as well as adjust overall settings for the editor.
+
+### Space Settings {#space-settings}
+
+When **no entity is selected** you will see general settings for your project.
+
+
+
+#### Default Settings {#default-settings}
+
+Style your Space with settings like Skybox and Fog. Skyboxes are a wrapper around your entire scene that shows what the world
+looks like beyond your geometry. If your project is configured to use AR on an AR-compatible device,
+(see [XR](/docs/studio/guides/xr/world/)) the Skybox will not be rendered.
+
+#### Project Settings {#project-settings}
+
+If you have multiple Spaces, select which one is the entry space.
+
+Use the Input Manager to set up experiences that work across different devices inputs like
+keyboards, gamepad controls, trackpads, and touch screen actions. Create your event action and set
+up a mapping (or binding) to different inputs. [Learn more the Input system](/docs/studio/guides/input)
+
+#### Project Version {#project-version}
+
+Studio projects can run on a specific runtime version, which can be selected here. Pin your project to a fixed runtime for predictability, or opt in to automatic minor updates and bug fixes to always stay current.
+
+#### Source Control {#source-control}
+
+Manage different versions of your project and change history. Creating a new client creates a new
+version of your project which can be helpful for testing changes without affecting your main
+version. You can also access a history of the project’s previous Landed changes by selecting the
+Project History function.
+
+#### Code Editor {#code-editor}
+
+Choose from different usability settings like light/dark modes, keybindings, and code save settings.
+
+### Inspector {#inspector}
+
+Inspect and configure an entity and its components. Learn more about entities and components in [Overview](/docs/studio/essentials/overview/).
+
+By default every entity displays a Transform component in the Inspector. Different types of entities
+may display different components, for example a Primitive will display a Mesh component with
+configurable options like geometry shape settings, materials, textures, etc.
+
+#### Components {#components}
+
+You can add a component using the "+ New Component" button. There are several types of built-in
+components in Studio, including Physics, Lighting, Audio, Animations, and more. Custom components
+can also be added - [Learn more about Custom Components](/docs/studio/essentials/custom-components/). Once set up, your custom component
+will
+appear in the Custom category. Click the three dots to remove a component.
+
+
+
+## Devices & Console {#devices--console}
+
+### Connect Device {#connect-device}
+
+:::tip
+Testing your project on multiple devices ensures that users will see a consistent experience across
+a variety of screen sizes and platforms.
+:::
+
+Instantly preview projects on mobile, desktop, or headset device or in another browser window as you
+develop via link/QR code.
+
+
+
+* At the bottom of the Studio interfact, click the Connect Device button.
+* Scan the QR code with your mobile device to open a web browser and test your project.
+ Or click on the QR code to open a new tab on your desktop browser.
+* When the page loads, if your project uses WebAR, you'll be prompted for access to motion and
+ orientation sensors (on some devices) and the camera (all devices). Click Allow for all permission
+ prompts. You will be taken to the private development URL for the project.
+* Note: The "Preview" QR code is a temporary, one-time use QR code
+ only meant for use by the developer while actively developing in Studio. This QR code takes
+ you to a private, development URL, and isn't accessible by others. To share your work with others,
+ please see the section below on Publishing your project.
+* Click the headset icon to generate a link for a headset device.
+
+### Console {#console}
+
+Debug your project build actions and runtime. Debug Mode is an advanced Studio feature that provides
+logging, performance information, and enhanced visualizations directly on your device.
+
+
+
+## Code Editor {#code-editor-1}
+
+The 8th Wall Code Editor equips developers with a set of coding tools to create, collaborate and
+publish web-based XR content. Our powerful IDE includes the code editor, integrated source control,
+commit history, live preview, wireless remote debugging and push-button hosting on a global CDN.
+Other Code Editor features include:
+
+* Intellisense
+* Command Palette
+* Code Peek
+* Light/Dark Themes
+
+
diff --git a/docs/studio/getting-started/making-changes.md b/docs/studio/getting-started/making-changes.md
new file mode 100644
index 0000000..6758784
--- /dev/null
+++ b/docs/studio/getting-started/making-changes.md
@@ -0,0 +1,36 @@
+---
+id: making-changes
+sidebar_position: 4
+---
+
+# Making Changes
+
+If this is your first time opening Studio, see the documentation on [navigating the interface](/docs/studio/getting-started/navigate-interface/).
+
+## Adding Entities
+
+Add new objects to your space by clicking the **+** button above the scene hierarchy.
+
+
+
+## Position, Rotate, Scale
+
+Use the top toolbar to change the position, rotation, or scale of a selected object, or to undo and redo edits.
+
+
+
+## Inspector Panel
+
+Inspect and configure an entity and its components. Learn more about entities and components in [Overview](/docs/studio/essentials/overview/).
+
+By default every entity displays a Transform component in the Inspector. Different types of entities may display different components, for example a Primitive will display a Mesh component with configurable options like geometry shape settings, materials, textures, etc.
+
+
+
+## Code and Assets
+
+The desktop app listens for changes to your local directory in real time. For example, if you use VSCode to update a project’s `component.ts` file, as soon as you save the file, you should see the updated file appear in Studio.
+
+Similarly, you can work within 3D modeling tools like Blender and Maya and save asset changes directly to your 8th Wall project. This enables you to work across different programs and create a single streamlined pipeline, so your workflow stays intact from start to finish.
+
+
diff --git a/docs/studio/getting-started/publishing.md b/docs/studio/getting-started/publishing.md
new file mode 100644
index 0000000..5d93beb
--- /dev/null
+++ b/docs/studio/getting-started/publishing.md
@@ -0,0 +1,278 @@
+---
+id: publishing
+description: This section explains how to export an HTML5 bundle.
+sidebar_position: 6
+---
+
+# Publishing
+
+## Creating a production build {#creating-production-build}
+
+1. Open your Studio project.
+2. Click **Publish**, select **HTML5**.
+3. Click **Build** to generate your HTML5 bundle.
+4. Once the build is complete, download the `.zip` file.
+
+---
+
+## Testing a production build
+
+1. Unzip the file downloaded from Studio.
+2. If you do not already have `npm` installed, follow the instructions on this [page](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) to set it up.
+3. Run `npm install --global http-server` to install the [http-server](https://www.npmjs.com/package/http-server) npm package as a global CLI tool.
+4. Run `http-server `
+ 1. Example: `http-server /Users/John/Downloads/my-project`
+5. There should be some logs that list a series of local URLs like:
+```sh
+Available on:
+ http://127.0.0.1:8080
+ http://192.168.20.43:8080
+ http://172.29.29.159:8080
+```
+6. Open one of the URLs in your web browser.
+
+---
+
+## Self-Hosting your project
+
+The HTML5 bundle can be self hosted or deployed in many different ways. For more comprehensive information on self hosting, check out this [guide](https://github.com/mikeroyal/Self-Hosting-Guide).
+
+Below are common hosting options grouped by workflow:
+
+- **Drag & drop (recommended for beginners)**: upload your `dist/` folder (or a zip) in a web UI.
+- **Git-based CI/CD**: connect a Git repo for automatic deploys when you push changes.
+
+### Drag & drop hosting
+
+:::tip
+These hosting solutions are recommended for beginners or if you just want "upload and go". For ongoing updates, you will need to rebuild locally and upload/deploy again.
+:::
+
+#### Netlify Drop
+
+Netlify Drop lets you drag and drop your dist folder and get a live URL immediately, great for quick demos and sharing.
+
+1. Build your project: `npm run build`
+2. Open [Netlify Drop](https://app.netlify.com/drop)
+3. Drag your `dist/` folder into the page
+4. You’ll get a live URL right away
+
+#### Cloudflare Pages
+
+Cloudflare Pages supports a Direct Upload flow that includes drag & drop of a folder or zip.
+
+1. Build your project: `npm run build`
+2. Create a Pages project using **Direct Upload**
+3. Drag & drop the `dist/` folder (or upload a zip)
+4. Your site deploys and you get a URL
+
+#### AWS Amplify
+
+Amplify Hosting supports manual deployments where you can drag & drop a zipped build output.
+
+1. Build your project: `npm run build`
+2. Zip the `dist/` folder
+3. In Amplify Hosting, choose **Deploy without a Git provider**
+4. Drag & drop the zip and deploy
+
+#### Neocities
+
+Neocities is a straightforward platform that works well for simple static sites (especially personal/demo projects).
+
+**Steps**
+1. Build your project: `npm run build`
+2. Upload the contents of `dist/` via the Neocities editor/uploader
+3. Use the provided site URL
+
+
+### Git-based hosting
+
+:::tip
+If you plan to keep iterating, git-based hosting gives you automatic deployments when you push to your repo. These solutions are better for teams & ongoing updates.
+:::
+
+#### GitHub Pages
+
+GitHub Pages publishes static files from a repository and is a common "set it and forget it" option.
+
+#### Vercel / Netlify (CI/CD)
+
+If your project lives in GitHub/GitLab, these platforms can auto-build and auto-deploy on every push.
+
+---
+
+## Publishing your 8th Wall project to gaming platforms
+
+Since 8th Wall HTML5 bundles are fully contained builds, they can be self hosted or published to many gaming platforms.
+
+### Itch.io
+
+1. Download the `.zip` bundle.
+2. Log in to [Itch.io](https://itch.io) and [create a new project](https://itch.io/game/new).
+3. Fill in the project details:
+ - Under **Kind of project**, select **HTML**.
+ - Under **Uploads**, select **Upload files**. Upload the `.zip` file that you downloaded in Step 1. Check the **This file will be played in the browser** checkbox.
+ - Under **Embed options**, choose the appropriate sizing for your project.
+4. Finish configuring your game and publish it.
+
+### Viverse
+
+1. Sign in to [Viverse](https://viverse.com) and [go to Viverse Studio](https://studio.viverse.com).
+2. Under **Upload Your Own Build**, click **Upload**.
+3. Click **Create New World**.
+4. Enter the **Name** and **Description** for your project, then click **Create**.
+5. Click **Content Versions**.
+6. Under **New Version**, click **Select File**. Upload the `.zip` file that you downloaded in Step 1, then click **Upload**.
+7. Under **iframe Support for Preview**, click **Apply iframe Settings** and enable all permissions that your project requires.
+ - Note that Viverse will put your project downloaded from 8th Wall in it's own iFrame, and the Viverse iFrame will need to grant a permission which your project requires.
+8. Finish configuring your game and publish it.
+
+### Game Jolt
+
+1. Sign in to [Game Jolt](https://gamejolt.com) and [go to Game Jolt Store](https://gamejolt.com/games).
+2. Click **Add Your Game**.
+3. Enter the project details and click **Save & Next**.
+4. On your game dashboard, under **Packages**, click **Add Package**.
+5. Under **Edit package**, click **New Release**.
+6. Click **Upload Browser Build**. Upload the `.zip` file that you downloaded in Step 1.
+7. Configure your game dimensions, or select **Fit to screen?** if you want the game to fit the screen.
+8. Finish configuring your game and publish it.
+
+### GamePix
+
+:::info[Important]
+GamePix does not allow games with external links. Make sure your project does NOT make network calls outside of the bundle.
+:::
+
+1. Download the **Full HTML** embed code.\
+2. Sign up for a [GamePix Developer Account](https://partners.gamepix.com/join-us?t=developer) and go to the [GamePix Dashboard](https://my.gamepix.com/dashboard).
+3. Click **Create New Game**.
+4. Enter the game details and click **Create**.
+5. Under **Info**, select **HTML5-JS** under **Game Engine**.
+6. Under **Build**, click **Browse File**. Upload the `.zip` file you downloaded earlier.
+7. Finish configuring your game and publish it.
+
+### Newgrounds
+
+1. Download the **Full HTML** embed code. Make a `.zip` file of this `index.html` file.
+2. Sign up for a [Newgrounds account](https://www.newgrounds.com).
+3. Click the arrow in the top right corner and select **Game (swf, HTML5)**
+4. Under **Submission File(s)**, click **Upload File**. Upload the `.zip` file you downloaded earlier.
+5. Configure your game dimensions and check **Touchscreen friendly**
+6. Finish configuring your game and publish it.
+
+### Y8
+
+1. Download the **Full HTML** embed code. Make a `.zip` file of this `index.html` file.
+2. Log into [Y8](https://www.y8.com/upload).
+3. Make sure you have verified your email, then [create a free Y8 Storage Account](https://account.y8.com/storage_account).
+4. Under **Game**, choose **Zip** and then **HTML5**.
+5. Click **Choose File**. Upload the `.zip` file you downloaded earlier. If you have not created a Storage Account it will fail. If that happens, click **Create Storage Account** to create one, then refresh the **Upload Your Content to Y8** page and try again.
+6. Finish configuring your game and publish it.
+
+### Poki
+
+1. Go to the [Poki Developer Portal](https://developers.poki.com/share).
+2. Fill in your project details, using the link to your hosted project under **Link to your game**.
+3. Click **Share your game**.
+
+### Kongregate
+
+1. Email the Kongregate team at [bd@kongregate.com](mailto:bd@kongregate.com). Include the link to your hosted project in your email.
+
+### Armor Games
+
+1. Email the Armor Games team at [mygame@armorgames.com](mailto:mygame@armorgames.com). Include the link to your hosted project in your email.
+
+### Addicting Games
+
+1. Download the **Full HTML** embed code.
+2. Email the Addicting Games team at [games@addictinggames.com](mailto:games@addictinggames.com). Include the `.zip` file in your email, as well as all of the other information they request in the [Addicting Games Developer Center](https://www.addictinggames.com/about/upload#Send).
+
+### Lagged
+
+1. Email the Lagged team at [contact@lagged.com](mailto:contact@lagged.com). Include the link to your hosted project in your email.
+2. Once you are approved, you can [sign up for a Lagged account](https://lagged.dev/signup) using the **Invite Code** they provide you and upload your game.
+
+### Discord
+
+#### Sample Project
+
+As a starting point to use the Discord Embedded SDK with your project, you can try out our sample project.
+
+1. Navigate to https://www.8thwall.com/8thwall/discord-activity-example and clone the project to your workspace.
+2. Follow the steps in [Exporting an HTML5 Bundle](#creating-production-build)
+3. Download the `.zip` to a location of your choosing.
+
+#### Discord Developer Set Up
+
+In order to run a web client in Discord, you'll need to set up an account and create an app in the developer hub.
+
+1. Create a Discord Account and navigate to https://discord.com/developers/applications
+
+2. Create a new application by clicking the Button in the top right corner
+ 1. Enter a name for the application and accept the terms of service
+
+
+
+3. Go to the **OAuth2** page, under the **Settings** section:
+ 1. Add `http://127.0.0.1` as a redirect URI for testing.
+ 2. Save the `Client ID` somewhere secure.
+ 3. Click "Reset Secret" to retrieve the `Client Secret` and store it somewhere safe.
+ 4. Press "Save" to keep your settings.
+
+
+
+4. Navigate to the **URL Mappings** page, under the **Activities** section:
+ 1. Add a temporary target to the root mapping like `127.0.0.1:8888`. This will be replaced later with your public URL, but it's required to enable Activities in the next step.
+
+5. Go to the **Settings** page, under the **Activities** section:
+ 1. Toggle **Enable Activities** and accept the app launcher agreement.
+
+
+
+6. Then, go to the **Installation** tab, under the **Settings** section:
+ 1. Copy the link from the **Install Link** panel and open it in your browser.
+ 2. Install the application to make it accessible in any server or DM.
+
+#### Launching an Application
+
+1. Set up the example server code at https://github.com/8thwall/discord-activity-example
+ 1. `git clone https://github.com/8thwall/discord-activity-example`
+ 2. Run `npm install`
+ 3. Unzip the `.zip` downloaded earlier containing the frontend of the project.
+ 4. Create a `.env` file in the root of the repo, and fill it out with the details from the Discord Developer Portal:
+ ```
+ DISCORD_CLIENT_ID=XXXXXXXXXX
+ DISCORD_CLIENT_SECRET=XXXXXXXXXX
+ DISCORD_CLIENT_HOST_PATH=/path/to/unzipped/folder
+ ```
+ 5. Enter `npm start` to start the server.
+
+2. Use `cloudflared` to create a tunnel, so the project will be publicly accessible over the internet.
+ 1. `brew install cloudflared` to download the `cloudflared` CLI tool
+ 2. Run `cloudflared tunnel --url http://localhost:8888`.
+ 3. Make note of the URL that was generated.
+
+ Example:
+ ```
+ 2025-10-11T03:05:16Z INF +--------------------------------------------------------------------------------------------+
+ 2025-10-11T03:05:16Z INF | Your quick Tunnel has been created! Visit it at (it may take some time to be reachable): |
+ 2025-10-11T03:05:16Z INF | https://sporting-follow-audit-href.trycloudflare.com |
+ 2025-10-11T03:05:16Z INF +--------------------------------------------------------------------------------------------+
+ ```
+ 4. Open the `cloudflared` URL in your browser to make sure the project loads.
+
+3. Update your Discord application settings:
+ 1. Open the Discord Developer Portal and navigate to your application
+ 2. Go to **URL Mappings** under the **Activities** section
+ 3. Replace the temporary target with your `cloudflared` URL for the **Root Mapping**
+
+
+
+4. Test your Discord Activity:
+ 1. Open Discord and navigate to any DM or server
+ 2. Click the activities icon (game controller) in the voice channel controls
+ 3. Find and click your application in the **Apps & Commands** panel
+
+
diff --git a/docs/studio/getting-started/simulator.md b/docs/studio/getting-started/simulator.md
new file mode 100644
index 0000000..9871fbf
--- /dev/null
+++ b/docs/studio/getting-started/simulator.md
@@ -0,0 +1,61 @@
+---
+id: simulator
+sidebar_position: 5
+---
+
+
+# Simulator
+
+## Overview
+
+Launch the simulator to play your scene. You can make edits to the entities in your space and
+see those immediately reflected in the simulator. The simulator also lets you test and view project changes across different device viewport sizes and
+simulated real-world environments without needing to leave Studio.
+
+
+
+
+
+## AR Simulator
+
+If you're developing AR, you can access a collection of pre-recorded camera sequences.
+The AR Simulator has a number of playback controls and convenience features
+like:
+
+* Play bar, scrubber and in/out handles: Allow you to set up loop points, giving you granular
+ control over the selected sequence.
+* Recenter button (lower right): Recenters the camera feed to its origin. NOTE: Recenter is also
+ called each time the sequence loops and each time a new sequence is selected.
+
+
+
+Use the bottom left Sequence Selection menu to change the AR sequence. You can use the carousel
+to switch between options in the sequence category. Pausing the sequence only pauses the video,
+allowing you to test changes at the same frame. Drag the playback handles to set in/out loop points.
+
+
+
+The camera button the bottom right corner opens Live View, which follows the same logic as your project's camera configuration. Live View allows you to simulate your project using the feed from your desktop instead of a pre-recorded AR sequence. For example, if
+your project uses Face Effects and you have the Studio project open on desktop, it will open your
+desktop camera.
+
+:::note
+Live View in the Simulator may prompt you to enable camera, microphone, or location
+permissions depending on what is enabled in your project. Click Allow for permission prompts in order to
+see your experience in Live View.
+:::
+
+Your project might look different on different devices due to differences in the mobile web
+viewport size. Or you may want to see your project in both landscape and portrait mode. At the top
+left of the Simulator, you can choose from a set of common device viewport sizes, change the
+orientation, or use responsive mode to adjust to a custom size. You can also double click the edges
+of the Simulator panel to automatically fit the Simulator to the width of the selected device
+viewport. **Note: Dimensions are presented in CSS logical pixels (AKA viewport dimensions), not
+physical device pixels. When selecting a device from the selector, only the viewport dimensions
+will be updated, not the user agent of the client.**
+
+
+
+You can also simulate specific GPS coordinates if you're developing a location or map-based experience.
+
+
diff --git a/docs/studio/guides/_category_.json b/docs/studio/guides/_category_.json
new file mode 100644
index 0000000..ef9efb8
--- /dev/null
+++ b/docs/studio/guides/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "Guides",
+ "position": 3
+}
\ No newline at end of file
diff --git a/docs/studio/guides/animation.mdx b/docs/studio/guides/animation.mdx
new file mode 100644
index 0000000..8de8c52
--- /dev/null
+++ b/docs/studio/guides/animation.mdx
@@ -0,0 +1,62 @@
+---
+id: animation
+description: This section provides guidance for setting up custom animations on entities. These are animations that transform some element of the entity over time, such as position, scale, or a property of some component on the entity.
+---
+
+# Animation
+
+## Introduction
+{frontMatter.description} Note: This section deals with creating animations in Studio and working with Studio’s core animation system–if you’re looking for guidance on working with a GLTF model file's built-in animations see the [Models guide](/docs/studio/guides/models/).
+
+Components are provided for procedural animation, and can be split into two types:
+
+1. Transition
+2. Relationship
+
+## Transition Animations
+These animations transition an object from one state to another over time. There will be a 'from' and 'to' state, and the animation can either finish or be set to loop.
+
+### Position Animation
+Animate the position of a target object.
+
+See [Position Animation Component](/docs/studio/api/ecs/animation/position-animation/) API.
+
+### Scale Animation
+Animate the scale vector of a target object.
+
+See [Scale Animation Component](/docs/studio/api/ecs/animation/scale-animation/) API.
+
+### Rotate Animation
+Animate the rotation of a target object.
+
+See [Rotate Animation Component](/docs/studio/api/ecs/animation/rotate-animation/) API.
+
+### Custom Vec3 Animation
+Animate the named 3D vector attribute on a target object.
+
+See [Custom Vec3 Animation Component](/docs/studio/api/ecs/animation/custom-vec3-animation/) API.
+
+### Custom Property Animation
+Animate a single number property on the attributes of a target object.
+
+See [Custom Property Animation Component](/docs/studio/api/ecs/animation/custom-property-animation/) API.
+
+## Relationship Animations
+These are animations that express a relationship between an object and a target. When running, they maintain that relationship for the object that they are attached to.
+
+### Follow Animation
+Maintain distance to a given target or point.
+
+See [Follow Animation Component](/docs/studio/api/ecs/animation/follow-animation/) API.
+
+### Look At Animation
+Maintain orientation to face a given target or point.
+
+See [Look At Animation Component](/docs/studio/api/ecs/animation/look-at-animation/) API.
+
+## Adding an Animation
+Animations can be added to the entity via the editor or in code. Adding them in the editor is done via the "New Component" button:
+
+
+
+Animations can also be added via code, using the component’s import name.
diff --git a/docs/studio/guides/audio.mdx b/docs/studio/guides/audio.mdx
new file mode 100644
index 0000000..aef3bf2
--- /dev/null
+++ b/docs/studio/guides/audio.mdx
@@ -0,0 +1,75 @@
+---
+id: audio
+description: Audio is essential for creating a fully immersive experience in any scene. It adds depth, emotion, and atmosphere, making interactions more engaging and memorable.
+---
+
+# Audio
+
+## Introduction
+{frontMatter.description} Whether you’re aiming for realistic soundscapes or stylized audio effects, well-crafted audio design significantly enhances the impact and mood of your scene, drawing users deeper into the experience.
+
+See the [World](/docs/studio/api/world/audio) and [Audio Component](/docs/studio/api/ecs/audio/) APIs for properties and functions.
+
+## Adding Audio
+Audio can be added to the entity via the Studio interface or in code. Adding them in Studio is done via the 'New Component' button. The Audio component features various play settings.
+
+#### Supported Formats
+Studio supports the following audio file types: .mp3, .m4a, .wav, .ogg, and .aac
+
+#### Types of Audio
+
+**Global:** A sound that plays throughout the world with no changes in volume.
+
+**Positional:** A sound that plays at a certain position in the world and changes volume based on distance.
+
+
+
+### Playing a Sound Effect
+It's likely you will want sound effects in your game. In order to play a sound an Audio component must be attached to an entity. Following this you can create an entity for the purpose of playing the sound effect, and clean it up once it's done.
+
+``` tsx title="/sound-effect.ts"
+import * as ecs from '@8thwall/ecs'
+
+const SoundEffectComponent = ecs.registerComponent({
+ name: 'Sound Effect',
+ stateMachine: ({world, eid, schemaAttribute, dataAttribute}) => {
+ ecs.defineState('default')
+ .initial()
+ .onEnter(() => {
+ ecs.Audio.set(world, eid, {
+ url: 'assets/blaster.mp3',
+ volume: 1,
+ loop: false,
+ paused: false,
+ positional: true,
+ refDistance: 1,
+ distanceModel: 'Inverse',
+ rolloffFactor: 1,
+ })
+ })
+ .listen(eid, ecs.events.AUDIO_END, () => {
+ world.deleteEntity(eid)
+ })
+ },
+})
+
+export {SoundEffectComponent}
+```
+
+``` tsx title="/sound-effect-on-click.ts"
+import * as ecs from '@8thwall/ecs'
+import {SoundEffectComponent} from './sound-effect'
+
+ecs.registerComponent({
+ name: 'Play Sound Effect on Click',
+ stateMachine: ({world, eid, schemaAttribute, dataAttribute}) => {
+ ecs.defineState('default')
+ .initial()
+ .listen(eid, ecs.input.SCREEN_TOUCH_START, () => {
+ const ent = world.createEntity()
+
+ SoundEffectComponent.set(world, ent)
+ })
+ },
+})
+```
diff --git a/docs/studio/guides/best-practices/_category_.json b/docs/studio/guides/best-practices/_category_.json
new file mode 100644
index 0000000..18a67ac
--- /dev/null
+++ b/docs/studio/guides/best-practices/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "Best Practices",
+ "position": 1
+}
\ No newline at end of file
diff --git a/docs/studio/guides/camera.mdx b/docs/studio/guides/camera.mdx
new file mode 100644
index 0000000..9eb2a04
--- /dev/null
+++ b/docs/studio/guides/camera.mdx
@@ -0,0 +1,22 @@
+---
+id: camera
+description: This section provides guidance on setting up the Camera. The Camera component system allows you to create and control the camera view.
+---
+
+# Camera
+
+## Introduction
+{frontMatter.description}
+
+## Adding a Camera
+A camera is provided by default in a new Studio project, however you can also add a Camera to a scene using the + Object dropdown on the Space Hierarchy. Additionally, a camera can be added to an entity via Studio’s Add Component interface or in code.
+
+
+
+## Changing the Active Camera
+Click the *Set Active Camera* button on the Camera's properties.
+
+
+
+## Changing the Camera Type
+There are three types of Cameras in Studio: 3D only, Face, and World. Each of these camera types will have different settings. A Face or a World Camera is necessary for AR experiences. **To learn more about building AR experiences see the XR guides section.**
diff --git a/docs/studio/guides/entities.mdx b/docs/studio/guides/entities.mdx
new file mode 100644
index 0000000..0d500e4
--- /dev/null
+++ b/docs/studio/guides/entities.mdx
@@ -0,0 +1,91 @@
+---
+id: entities
+description: An entity by itself has no behavior or appearance; it simply acts as a container to which components can be attached.
+---
+
+# Entities
+
+## Introduction
+{frontMatter.description}
+
+## Creating an Entity
+The following code shows how to create a new entity without any components
+
+### Example
+
+``` ts
+import * as ecs from '@8thwall/ecs'
+
+const eid = world.createEntity()
+```
+
+## Deleting an Entity
+The following code shows how to delete an existing entity given its id:
+
+### Example
+
+``` ts
+import * as ecs from '@8thwall/ecs'
+
+world.deleteEntity(eid)
+```
+
+## Adding Components to an Entity
+The following code shows how to add a built-in component to an entity at runtime.
+
+### Example
+
+``` ts
+const box = world.createEntity()
+
+ecs.BoxGeometry.set(world, box, {
+ width: 1,
+ height: 1,
+ depth: 1,
+})
+
+ecs.Material.set(world, box, {
+ r: 255,
+ g: 255,
+ b: 255,
+})
+```
+
+## Create and Modify Relationships
+The following code shows how you can use built-in helper methods to create or change relationships between entities.
+
+### Example
+
+``` ts
+// Entities are automatically a child of World.
+const foo = world.createEntity()
+const bar = world.createEntity()
+
+// Set foo to be a child of bar.
+world.setParent(foo, bar)
+
+// Get the parent of bar. (returns an eid where <= 0 is undefined)
+world.getParent(bar)
+
+// Get the children of foo. (returns a Generator)
+world.getChildren(foo)
+```
+
+## Helper Functions
+There are a number of helper functions for interacting with an entity's transform.
+
+:::tip
+Entities are positioned relative to their parent. The `getWorldTransform()` function retrieves the object’s transform in world space, accounting for all parent transforms.
+:::
+
+``` ts
+world.setScale(eid, 1, 1, 1)
+world.setPosition(eid, 1, 1, 1)
+world.setQuaternion(eid, 0, 0, 0, 1)
+world.normalizeQuaternion(eid)
+
+const tempMatrix = ecs.math.mat4.i()
+world.getWorldTransform(eid1, tempMatrix)
+
+world.setTransform(eid2, tempMatrix)
+```
\ No newline at end of file
diff --git a/docs/studio/guides/events.mdx b/docs/studio/guides/events.mdx
new file mode 100644
index 0000000..8f19172
--- /dev/null
+++ b/docs/studio/guides/events.mdx
@@ -0,0 +1,168 @@
+---
+id: events
+description: Events are how entities can communicate with each other through a flexible listener and dispatch system.
+---
+
+# Events
+
+## Introduction
+{frontMatter.description}
+
+## Event Listeners
+
+##### Listener {#listener}
+
+| Property | Type | Description |
+|---------------|--------|-------------------------------------|
+| target | eid | Entity the event was dispatched on. |
+| currentTarget | eid | Entity the event was listened on. |
+| name | string | Name of the event. |
+| data | any | Event custom data |
+
+### Creating Event Listeners
+
+#### addListener
+
+``` ts
+world.events.addListener(target, name, listener)
+```
+
+##### Parameters
+
+:::tip
+It's possible to create global event listeners by using `world.events.globalId` as the target.
+:::
+
+| Property | Type | Description |
+|----------|-----------------------|------------------------------------------------------|
+| target | eid | Reference to the target entity. |
+| name | string | Name of the event to listen for. |
+| listener | [Listener](#listener) | The callback function for when an event is triggered |
+
+### Creating Event Handlers
+When adding event listeners to entities, it’s crucial to set up handlers correctly to ensure they function as intended, especially when components are added to multiple entities. Improper handler creation can lead to stale references and unexpected behavior.
+
+Suppose you’re creating a handler for an NPC entity that listens for a damaged event. The handler should update some schema and data values when the event occurs.
+
+#### Incorrect Example
+
+
+
+In this example:
+* The handler damagedHandler directly references `component.schema` and `component.data`.
+* If the component is added to multiple entities, the component reference inside the handler becomes stale.
+* This can cause the handler to operate on incorrect data, leading to bugs.
+
+#### Correct Example
+To ensure the handler operates on the correct entity data, pass the component’s `dataAttribute` and `schemaAttribute` to the handler and use them to fetch cursors inside the handler.
+
+
+
+### Removing Event Listeners
+
+#### removeListener
+
+``` ts
+world.events.removeListener(target, name, listener)
+```
+
+##### Parameters
+
+| Property | Type | Description |
+|----------|-----------------------|------------------------------------------------------|
+| target | eid | Reference to the target entity. |
+| name | string | Name of the event to listen for. |
+| listener | [Listener](#listener) | The callback function for when an event is triggered |
+
+## Event Dispatchers
+
+### Dispatching Custom Events
+
+#### Dispatch
+
+:::info
+When an event happens on an entity, it first runs the handlers on it, then on its parent, then all the way up on other ancestors.
+:::
+
+``` ts
+world.events.dispatch(eidOfEnemy, "attack", {damage: 10})
+```
+
+## Cleaning Up Listeners
+
+:::danger
+Always ensure listeners are properly removed to avoid memory leaks.
+:::
+
+When a component is deleted, its event listeners are not automatically cleaned up, so you must remove them manually.
diff --git a/docs/studio/guides/global-behaviors.mdx b/docs/studio/guides/global-behaviors.mdx
new file mode 100644
index 0000000..8b42103
--- /dev/null
+++ b/docs/studio/guides/global-behaviors.mdx
@@ -0,0 +1,77 @@
+---
+id: global-behaviors
+description: A behavior is a function that runs on the World every tick.
+---
+
+# Global Behaviors
+
+## Introduction
+
+{frontMatter.description} Compared to Components, which only operate on a single Entity at a time, a behavior can use queries to enumerate matching entities and operate on them as a group.
+
+## Defining a Behavior
+The following code is an example of how to define a custom Behavior:
+
+``` ts
+const behavior = (world) => {
+ if (world.time.elapsed % 5000 - world.time.delta < 0) {
+ const eid = world.createEntity()
+ Enemy.set(world, eid, {health: 100})
+ }
+}
+```
+
+## Registering a Behavior
+
+The following code is an example of how to register a custom Behavior:
+
+``` ts
+ecs.registerBehavior(behavior)
+```
+
+## Deactivating a Behavior
+
+The following code is an example of how to deactivate a custom Behavior:
+
+``` ts
+ecs.unregisterBehavior(behavior)
+```
+
+## Behavior Query
+
+Behaviors can run queries, which return lists of entity IDs.
+
+``` ts
+const query = ecs.defineQuery([Enemy, Health])
+
+const enemyDieBehavior = (world) => {
+ const enemies = query(world)
+
+ for (const enemyId of enemies) {
+ if (Health.get(world, enemyId).hp <= 0) {
+ world.destroyEntity(enemyId)
+ }
+ }
+}
+
+ecs.registerBehavior(enemyDieBehavior)
+```
+
+## Systems
+
+Behaviors can also be structured as Systems, which run on entities that match specific queries and allow for efficient data access.
+
+:::tip
+This approach improves performance because data like “enemy” and “health” are pre-fetched, making iteration faster.
+:::
+``` ts
+const enemyDieSystem = ecs.defineSystem([Enemy, Health],
+ (world, eid, [enemy, health]) => {
+ if (health.hp <= 0) {
+ world.destroyEntity(eid)
+ }
+ }
+)
+
+ecs.registerBehavior(enemyDieSystem)
+```
diff --git a/docs/studio/guides/input.mdx b/docs/studio/guides/input.mdx
new file mode 100644
index 0000000..d74028f
--- /dev/null
+++ b/docs/studio/guides/input.mdx
@@ -0,0 +1,127 @@
+---
+id: input
+description: This section explains how to set up inputs for your project.
+---
+
+# Input
+
+## Introduction
+{frontMatter.description} Inputs refer to the various methods through which users interact with the application. Currently, the supported input methods include touch, keyboard, mouse, and gamepad.
+
+See [world.input](/docs/studio/api/world/input/) for api.
+
+## Input Manager
+In Studio, there are Inputs and Actions - the Input Manager sets up the mapping in between them. An action is an abstraction over the source of input(s) it receives. They are most useful for representing input as "logical" concepts (e.g. "jump") rather than as "physical" inputs (e.g. "space bar on keyboard pressed").
+
+
+
+### Action Maps
+You can manage Action Maps via the Input Manager in the Space Settings (the default state of the Inspector when no entity is selected).
+
+#### Adding an Action Map
+Action Map are sets of actions that can be swapped programmatically. To create a new action map click on the action map dropdown then click on Create New Action Map. Then type in the name of the action map and save it. Action Maps can be renamed, duplicated, and deleted. The default action map can only be duplicated.
+
+
+
+:::info
+The currently selected Action Map is the Action Map that will be active by default.
+:::
+
+#### Action Map Templates
+When Create New Action Map is selected, several action map templates will appear. The Fly Controller and Orbit Controls templates are used for their respective components.
+
+
+
+### Adding an Action
+Actions are an abstraction layer to user inputs. To add a Action to the Action Map, click the (+) New Action button then type in the name for your Action.
+
+
+
+### Binding to an Action
+Bindings are how inputs can be associated with an action. To add a binding click the Add Binding button and select the input you want to bind to the action in the dropdown. There are Bindings for many types of inputs including keyboard, mouse and gamepad. Multiple input bindings can be mapped to a single action. To delete a binding, click the trash bin next to the binding you want to delete.
+
+
+
+### Action Modifiers
+Modifiers are an additional input you can assign to a binding in order activate the action. To create a Binding with an modifier, click the With Modifier button. The modifier is selected with the first dropdown and the binding is selected with the second dropdown.
+
+
+
+## Examples
+
+### Detecting an action in tick
+
+``` ts
+ecs.registerComponent({
+ name: 'player-controller',
+ tick: (world, component) => {
+ if (world.input.getAction('jump')) {
+ // Handle jump logic here
+ }
+ },
+})
+```
+
+### Detecting an action in a state machine
+
+``` ts
+stateMachine: ({world, eid, schemaAttribute, dataAttribute}) => {
+ ecs.defineState('default').initial().onTick(() => {
+ if (world.input.getAction('jump')) {
+ // Handle jump logic here
+ }
+ })
+}
+```
+
+### Switching between action maps
+
+:::info
+When setting a new action map, the new map does not get activated till the next tick.
+:::
+
+#### In tick
+
+``` ts
+tick: (world, component) => {
+ if (world.input.getAction('menu')) {
+ world.input.setActiveMap(world.input.getActiveMap() === 'player' ? 'menu' : 'player')
+ return
+ }
+ if (world.input.getAction('jump')) {
+ // Handle jump logic here
+ }
+}
+```
+
+
+#### In a state machine
+
+``` ts
+stateMachine: ({world, eid, schemaAttribute, dataAttribute}) => {
+ const switchMap = ecs.defineTrigger()
+
+ ecs.defineState('default')
+ .initial()
+ .onEnter(() => {
+ world.input.setActiveMap('player')
+ })
+ .onTick(() => {
+ if (world.input.getAction('menu')) {
+ switchMap.trigger()
+ return
+ }
+ // Handle movement logic here
+ }).onTrigger(switchMap, 'menu')
+
+ ecs.defineState('menu').onEnter(() => {
+ world.input.setActiveMap('menu')
+ }).onTick(() => {
+ if (world.input.getAction('menu')) {
+ switchMap.trigger()
+ return
+ }
+ // Handle menu logic here
+ }).onTrigger(switchMap, 'default')
+}
+```
diff --git a/docs/studio/guides/lighting.mdx b/docs/studio/guides/lighting.mdx
new file mode 100644
index 0000000..6ecaf5d
--- /dev/null
+++ b/docs/studio/guides/lighting.mdx
@@ -0,0 +1,48 @@
+---
+id: lighting
+description: Whether you’re aiming for a highly realistic look or a stylized one, lighting can make a significant difference in how your scene feels and interacts with users.
+---
+
+# Lighting
+
+## Introduction
+Lighting plays a crucial role in enhancing the visual appeal of any scene. It adds depth, realism, and atmosphere, making experiences more immersive, engaging, and visually compelling. {frontMatter.description}
+
+## Adding a Light
+Lights and shadows can be added to an entity through the Studio interface or via code. In Studio, you can do this by clicking the (+) button in the Hierarchy or by selecting “New Component” in the Inspector for a chosen entity. Both Light and Shadow components offer various settings for customization.
+
+#### Types of Lights
+**Directional:**
+A light that gets emitted in a specific direction. This light will behave as though it is infinitely far away and the rays produced from it are all parallel. The common use case for this is to simulate daylight; the sun is far enough away that its position can be considered to be infinite, and all light rays coming from it are parallel.
+
+**Point:**
+A light that radiates in all directions from a single point. A common example is replicating light from a bare lightbulb.
+
+**Ambient:**
+This light uniformly illuminates all objects in the scene, creating global illumination.
+
+
+
+### Example
+The following example demonstrates how to assign a Light to an entity at runtime:
+
+``` ts
+ecs.Light.set(world, component.eid, {
+ type: 'point'
+})
+```
+
+## Adding Shadows
+**Objects in the scene won’t cast or receive shadows by default.** To enable shadows, add the Shadow component to the object and configure its properties accordingly.
+
+
+
+### Example
+The following example shows how to set a Shadow on an entity at runtime.
+
+``` ts
+ecs.Shadow.set(world, component.eid, {
+ castShadow: true,
+ receiveShadow: true
+})
+```
diff --git a/docs/studio/guides/materials.mdx b/docs/studio/guides/materials.mdx
new file mode 100644
index 0000000..e12e240
--- /dev/null
+++ b/docs/studio/guides/materials.mdx
@@ -0,0 +1,114 @@
+---
+id: materials
+description: This section explains how to use materials in Studio.
+---
+
+# Materials
+
+## Introduction
+This section explains how to use materials in Studio.
+
+## Material Types
+
+### Material or Standard Material
+A standard [PBR](https://learn.microsoft.com/en-us/azure/remote-rendering/overview/features/pbr-materials) material.
+
+View properties [here](/docs/studio/api/ecs/material/basic-material).
+
+### Unlit Material
+Material unaffected by lighting or shadows.
+Color remains consistent, no Physical Based Rendering (PBR) support.
+
+View properties [here](/docs/studio/api/ecs/material/unlit-material/).
+
+## Material Properties
+Materials can be configured either through code or directly within the Mesh component in the editor.
+
+
+
+### Configuring materials through code
+
+```ts
+ecs.Material.set(world, component.eid, {
+ r: 255,
+ g: 128,
+ b: 64,
+ roughness: 0.5,
+ metalness: 0.8,
+ opacity: 1.0,
+ side: "front"
+})
+
+ecs.UnlitMaterial.set(world, component.eid, {
+ r: 255,
+ g: 0,
+ b: 128,
+ opacity: 1.0,
+ side: "double"
+})
+```
+
+### Load a texture
+
+Loading a texture requires an asset or an independent url that serves an image.
+You can visit the API section on Material and Unlit Material to find out more about the different types of texture maps supported.
+
+```ts
+import * as ecs from '@8thwall/ecs'
+
+ecs.registerComponent({
+ name: 'apply-texture-to-material',
+ schema: {
+ // @asset
+ myTexture: ecs.string,
+ },
+ add: (world, component) => {
+ const { myTexture } = component.schemaAttribute.get(component.eid)
+ ecs.assets.load({ url: myTexture })
+ .then((result) => {
+ ecs.Material.set(world, component.eid, {
+ r: 255,
+ g: 128,
+ b: 64,
+ textureSrc: `${result.remoteUrl}`,
+ roughness: 0.5,
+ metalness: 0.8,
+ opacity: 1.0,
+ side: "back"
+ })
+ })
+ .catch((error) => {
+ console.error('Failed to load texture:', error)
+ })
+ }
+})
+```
+
+## Special Materials
+
+### Shadow Material
+Material that only renders when a shadow is casted onto it.
+
+Note: For this to work, you need three settings turned on:
+1. Enable "cast shadows" on the lights
+2. Enable "receive shadows" on the shadow material object
+3. The object casting the shadow (red ball in the image below) needs to cast shadows as well.
+
+
+
+```ts
+ecs.ShadowMaterial.set(world, component.eid, {r: 0, g: 250, b: 0, opacity: 1, side: 'front', depthTest: true, depthWrite: true})
+```
+
+### Hider Material
+Material that hides any objects behind it.
+
+
+
+Camera perspective at runtime:
+
+
+
+```ts
+ecs.HiderMaterial.set(world, component.eid)
+```
diff --git a/docs/studio/guides/models.mdx b/docs/studio/guides/models.mdx
new file mode 100644
index 0000000..8e96b0d
--- /dev/null
+++ b/docs/studio/guides/models.mdx
@@ -0,0 +1,138 @@
+---
+id: models
+description: Studio enables you to integrate 3D objects into your projects through the Mesh component, making it easy to build immersive and interactive experiences. You can add basic shapes using built-in primitives or import custom 3D models.
+---
+
+# Models
+
+
+
+## Introduction
+Studio supports multiple 3D model formats for your entities, with each entity using one model type at a time.
+You can work with simple primitives for basic shapes, or import GLB, FBX, and GLTF bundles when you need more detailed, high-end representations. The built-in asset editor lets you modify and fine-tune your models directly in the engine, so you can iterate quickly without switching tools.
+
+:::note
+While Studio UI's mesh component configures primitives, meshes, and Gaussian splats through a unified interface, each type uses a distinct component programmatically.
+:::
+
+
+## Primitives {#primitives}
+Primitives are used to represent volumes, simple objects or used as placeholders for future game assets.
+Studio includes several built-in primitives such as spheres, boxes, and planes that can be quickly customized and positioned within the editor.
+Primitives have materials that can also be modified dynamically at runtime.
+
+
+
+The following built-in Primitives are available:
+
+| Type | Attributes |
+|--------------------|------------------------------|
+| Sphere | `{radius}` |
+| Box | `{width, height, depth}` |
+| Plane | `{width, height}` |
+| Capsule | `{radius, height}` |
+| Cone | `{radius, height}` |
+| Cylinder | `{radius, height}` |
+| Polyhedron | `{radius}` |
+| Circle | `{radius}` |
+| Ring | `{innerRadius, outerRadius}` |
+| Torus | `{radius, tubeRadius}` |
+
+## Adding a Primitive
+A 3D Model can be added to the entity via the Studio interface via the Mesh component, or in code. Adding them in Studio is done via the (+) button on the Hierarchy or by adding geometry to a Custom Component on the entity.
+
+### Example
+
+``` ts
+ecs.SphereGeometry.set(world, eid, {radius: 5})
+```
+
+## GLTF Component
+
+### Supported Formats
+
+#### GLTF and GLB
+GLTF and GLB models are supported and ideal when working with 3D on the web.
+
+#### Asset Bundle
+While most GLBs can be dragged and dropped into studio, sometimes GLTF files require multiples file uploads.
+To handle this process we use an asset bundle. To upload a new asset into your project, on the lower part of the left panel select the file tab and click the plus icon.
+Choose the new asset option and then drag and drop all your files.
+
+
+
+
+
+#### FBX
+FBX upload is supported however, 8th Wall Studio requires models to be converted to GLB after upload.
+**Ensure that textures are embedded in the FBX file before converting to GLB.**
+
+
+
+### Setting a Model
+The following example shows how to set a GLTF or GLB on a entity at runtime.
+
+:::info
+This method requires that you select your custom model from the component properties, once the component is applied to your entity
+:::
+
+``` ts
+import * as ecs from '@8thwall/ecs'
+
+ecs.registerComponent({
+ name: 'loaded-model',
+ schema: {
+ // @asset
+ model: ecs.string,
+ },
+ add: (world, component) => {
+ ecs.GltfModel.set(world, eid, {
+ url: component.schema.model,
+ })
+ }
+})
+```
+
+### Setting Model Properties
+
+#### Model Properties
+View properties [here](/docs/studio/api/ecs/gltf-model).
+
+#### Example
+The following example shows how to set a Model on an entity at runtime.
+
+``` ts
+import * as ecs from '@8thwall/ecs'
+
+ecs.registerComponent({
+ name: 'loaded-model',
+ schema: {
+ // @asset
+ model: ecs.string,
+ },
+ add: (world, component) => {
+ ecs.GltfModel.set(world, eid, {
+ url: component.schema.model,
+ animationClip: 'clip1',
+ loop: true,
+ paused: false,
+ time: 0,
+ timeScale: 1,
+ })
+ }
+})
+```
+
+## Models and Physics
+
+### Primitives
+Colliders represent physical entities in the simulation step, with attributes that directly affect their behavior. Most primitives have collider shapes that match their exact volume, making it simple to spawn a primitive like a cube and add physics interactions.
+
+### Models
+You can generate a custom collider from your 3D model that approximates its shape and size. For best results, use the simplest collider shape that meets your needs. Most of the time this can be done using a primitive instead.
+
+:::danger
+Custom collider generation may fail due to variations in model complexity, size, and quality. If this happens, use primitives (one or multiple) to approximate your desired shape, or export a clean .glb file from a 3D modeling tool like Blender.
+:::
+
+For more information visit the physics guide [here](/docs/studio/guides/physics/)
diff --git a/docs/studio/guides/particles.mdx b/docs/studio/guides/particles.mdx
new file mode 100644
index 0000000..b68b213
--- /dev/null
+++ b/docs/studio/guides/particles.mdx
@@ -0,0 +1,115 @@
+---
+id: particles
+description: Particles are a powerful tool for adding dynamic, eye-catching effects to any scene. They bring life and movement, creating everything from realistic environmental elements like smoke, fire, and rain to stylized effects like magic, sparks, or explosions.
+---
+
+# Particles
+
+## Introduction
+{frontMatter.description} Whether used subtly or dramatically, particles enhance the atmosphere and interactivity of a scene, making experiences more captivating and visually rich.
+
+## Adding Particles
+Particles can be added to the entity via the Studio interface or in code. Adding them in Studio is done via the "New Component" button.
+
+
+
+
+### Example
+The following example shows how to add a ParticleEmitter to an entity at runtime.
+``` ts
+ecs.ParticleEmitter.set(world, component.eid, {
+ stopped: false,
+ emitterLife: 10,
+ particlesPerShot: 5,
+ emitDelay: 1,
+ minimumLifespan: 1,
+ maximumLifespan: 3,
+ mass: 1,
+ gravity: 1,
+ scale: 1,
+ forceX: 0,
+ forceY: 60,
+ forceZ: 0,
+ spread: 120,
+ radialVelocity: 0,
+ spawnAreaType: 'point',
+ resourceType: 'model',
+ resourceUrl: 'https://static.8thwall.app/assets/Moon-lowpoly-v3-yg6nmdc1uj.glb',
+})
+```
+
+## Emission
+Emission attributes define the way particles are placed into the world and some of the global attributes that affect every particle equally.
+
+* **Stopped:** If checked, the emitter will not emit.
+* **Emitter Life:** Defines the lifetime of the emitter.
+* **Particles Per Shot:** How many particles are placed every time the emitter fires.
+* **Emit Delay:** Delay between emissions in seconds.
+* **Lifespan:** Sets a range representing the time particles remain in the scene before despawning.
+* **Mass:** The mass of each particle, only relevant when simulating physics.
+* **Gravity:** Adds a force downwards that makes particles fall.
+* **Forces:** Simple forces in the X, Y and Z relative axes that are applied to every particle.
+* **Spread:** Is the angle the particles move in relation to the emitter.
+* **Radial Velocity:** If set above 0, the emitter velocity type will change to radial. Radial Velocity is calculated relative to the observer and vectorial forces are influenced by radial force.
+
+### Spawn Area Type
+Spawn areas change the starting coordinate of the particles.
+
+* **Point:** Spawn is set to the coordinates of the emitter.
+* **Box:** Defines a box using width, height and depth.
+* **Sphere:** Defines a spawn area of a sphere with a defined radius.
+
+:::note
+These spawn areas won't work properly if dimensions are set to 0.
+:::
+
+### Bounding Zone Type
+Bounding boxes dictate the spatial boundaries within which particles are generated.
+
+* **Box:** Defines a box using width, height and depth.
+* **Sphere:** Defines a spawn area of a sphere with a defined radius.
+
+:::note
+These bounding zones won't work properly if dimensions are set to 0 or negative. If using Spawn Area, the bounding zone needs to occupy a bigger volume than the Spawn Area.
+:::
+
+### Resource Type
+This system is able to render glb assets and simple sprites. Choose a model for 3D or sprite for 2D sprites. Use a publicly accessible URL to load the asset to the emitter.
+
+* **Sprites:** They have blending options that work similarly to basic Photoshop layer effects. These are very handy when creating visual effects like fire, stars or smoke.
+* **Model:** 3D models with particle behavior. Useful for creating volumetric effects.
+
+### Simulation
+These parameters will affect the way particles behave on their own after being placed in the world.
+
+* **Color:** If sprites are set as a resource type, then sprites can change color. Input a hex value of the starting and ending color for them to shift gradually.
+* **Random Drift:** Particles will randomly drift in accordance with the random drift range parameter.
+* **Collision:** If particles get near each other, an opposite force will be applied between them.
+
+## Examples
+
+### Starting Particles
+
+``` ts
+ecs.ParticleEmitter.mutate(world, component.eid, (cursor) => {
+ cursor.stopped = false
+})
+```
+
+### Stopping Particles
+
+``` ts
+ecs.ParticleEmitter.mutate(world, component.eid, (cursor) => {
+ cursor.stopped = true
+})
+```
+
+### Emitting Models
+
+``` ts
+ecs.ParticleEmitter.set(world, component.eid, {
+ stopped: false,
+ resourceType: 'model',
+ resourceUrl: 'assets/robot.glb',
+})
+```
diff --git a/docs/studio/guides/physics.mdx b/docs/studio/guides/physics.mdx
new file mode 100644
index 0000000..359e3c0
--- /dev/null
+++ b/docs/studio/guides/physics.mdx
@@ -0,0 +1,99 @@
+---
+id: physics
+description: Studio has a built-in physics system intended for handling robust and dynamic interactions in your scene.
+---
+
+# Physics
+
+## Introduction
+{frontMatter.description}
+
+## Adding Colliders
+:::note
+* Colliders can be nested in some configurations, but not others. For example, a static collider inside a dynamic collider, or two nested dynamic colliders, could misbehave.
+* Unless you have a Geometry component that is also a valid Collider Shape, you’ll need to specify a shape manually. For example there isn’t a supported matching collider for the tetrahedron primitive.
+* GLTF model collider generation for complex models might affect performance.
+:::
+
+### Example
+The following example shows how to add a collider to an entity at runtime.
+
+``` ts
+ecs.Collider.set(world, component.eid, {
+ shape: ecs.ColliderShape.Sphere,
+ type: ecs.ColliderType.Dynamic,
+ radius: 1,
+ mass: 1,
+ eventOnly: false,
+ lockXAxis: false,
+ lockYAxis: false,
+ lockZAxis: false,
+ friction: 0.5,
+ restitution: 0.5,
+ linearDamping: 0,
+ angularDamping: 0,
+})
+```
+
+## Static vs Dynamic vs Kinematic Collider Types
+In physics simulation, colliders define the physical shape of an object for collision detection. These can be Static, Dynamic, or Kinematic depending on how they interact with forces and the environment.
+
+* **Static:** A collider that does not respond to external forces or physics simulations. It remains fixed in place, making it ideal for immovable objects like walls, floors, or terrain.
+* **Dynamic:** A collider that does respond to external forces such as gravity, collisions, or impulses. It’s suitable for moving objects like players, enemies, or debris.
+* **Kinematic:** A collider whose motion is not affected by forces but is entirely controlled programmatically by the user. Unlike static bodies, kinematic bodies can move and can affect other dynamic bodies through collisions.
+
+### Event Only
+The `eventOnly` setting converts a collider into a trigger area—it no longer blocks or reacts to physical objects but instead emits enter and exit events when other physics bodies interact with it.
+
+## Functions
+You can directly apply forces (linear and angular) to any entity with a physics collider. These forces are applied in the next physics simulation update, which takes place at regular intervals. The function accepts a 3D vector to define the force direction and magnitude.
+
+### Velocity
+
+#### Linear
+
+``` ts
+ecs.physics.setLinearVelocity(world, component.eid, forceX, forceY, forceZ)
+```
+
+#### Angular
+
+``` ts
+ecs.physics.setAngularVelocity(world, component.eid, forceX, forceY, forceZ)
+```
+
+### Force & Torque
+
+#### Force
+
+``` ts
+ecs.physics.applyForce(world, component.eid, forceX, forceY, forceZ)
+```
+
+#### Torque
+
+``` ts
+ecs.physics.applyTorque(world, component.eid, torqueX, torqueY, torqueZ)
+```
+
+### Impulse
+
+:::tip
+This function is used to apply a one-time impulse force to a physics collider, altering its velocity based on the given impulse vector.
+This method is useful for events that require a quick, single action response, such as jumping, punching, or a sudden push.
+:::
+
+``` ts
+ecs.physics.applyImpulse(world, component.eid, impulseX, impulseY, impulseZ)
+```
+
+### Gravity
+
+#### Gravity Factor
+In each scene, gravity acts as a constant force known as “World Gravity.” This force affects every collider in the scene that is set to dynamic. To customize how World Gravity impacts an individual collider, we provide an attribute called “Gravity Factor.”
+
+#### Set World Gravity
+
+``` ts
+ecs.physics.setWorldGravity(world, gravity)
+```
diff --git a/docs/studio/guides/prefabs.mdx b/docs/studio/guides/prefabs.mdx
new file mode 100644
index 0000000..784bb23
--- /dev/null
+++ b/docs/studio/guides/prefabs.mdx
@@ -0,0 +1,78 @@
+---
+id: prefabs
+description: Prefabs are reusable collections of entities from which you can create multiple copies.
+---
+
+# Prefabs
+
+## Introduction
+{frontMatter.description} They are stored within the expanse file but can be surfaced however you wish. There’s also a convenient API for working with Prefabs in the ECS. Prefabs in Studio have the following features:
+
+1. Prefab components can be shared across instances
+2. Inherited components can be overridden on a per-instance basis
+3. A prefab instance can contain instances of other prefabs
+4. Prefabs are runtime accessible and modifiable
+
+## Using Prefabs
+In Studio, you can set up and customize Prefabs quickly using the “Make Prefab” right click option on an entity or by dragging it into the Prefab tab of the lower left file directory. Within the Prefabs tab, you can easily edit your Prefab and its components, update its hierarchy, create duplicates–the same way as you would work with objects in the space hierarchy.
+
+Prefab hierarchies are created in the same way as entity hierarchies. If you want to visually inspect and edit your Prefab’s details in an isolated view, just double click it or right click to open the Prefab Editor which lets you make design edits directly to the source. Updates made to the source will automatically be previewable across all instances of the Prefab in the scene.
+
+Ready to work with your new Prefab in your scene? Simply drag and drop the Prefab into your scene viewport or space hierarchy to create a Prefab instance. There are two key concepts to note when working with Prefab and their instances: **Inheritance** and **Overrides**.
+
+### Inheritance
+Entities can inherit components from prefabs. After adding a Prefab instance in your space, any configurations you make later to the Prefab source will be automatically "inherited" by the instance. Changes to the prefab at runtime will also apply to prefab instances. Inherited components are only stored in one place, and shared across instances. This can be useful for static data that's shared across instances, such as material data, textures or meshes.
+
+:::info
+The physics collider component is not inheritable at runtime.
+:::
+
+### Overriding
+When an instance inherits a component from a Prefab, it can be overridden with component values specific to the instance. To override a component (ex. material, animation, custom component script, etc) simply add or change it on the instance. By overriding a component on a Prefab instance it will remain overridden moving forward. Meaning that future changes you make to the same component in its Prefab source will not be inherited.
+
+#### Resetting Overrides
+If you want to reset an overridden component back to the Prefab’s inherited value, you can right click the component in the Inspector and select "Reset Prefab Overrides". This will remove the override and revert the component back to the Prefab's original value.
+
+## Runtime Instances
+Studio’s game engine lets you direct Prefabs at runtime with minimal code. You can programmatically generate new instances in real time based on existing Prefabs, and you can make Prefab instance changes and edits at runtime. You can also query for all the sub-entities of Prefab instances at runtime in case you need to make changes to different instances that were dynamically generated.
+
+### Instantiate a Prefab
+Create a new instance of prefab using a prefab name or EID. Prefab Instances can be used like any other Entity. A Prefab Instance will have its position set to zero on Instatiation
+
+#### Using Prefab Name
+
+
+
+``` ts
+ecs.registerComponent({
+ name: 'Spawn Prefab by Name',
+ stateMachine: ({world, eid, schemaAttribute, dataAttribute}) => {
+ ecs.defineState('default')
+ .initial()
+ .listen(eid, ecs.input.SCREEN_TOUCH_START, (e) => {
+ const newInstance = world.createEntity("Human")
+ })
+ },
+})
+```
+
+#### Using Prefab EID
+
+``` ts
+ecs.registerComponent({
+ name: 'Spawn Prefab by EID',
+ schema: {
+ prefabToSpawn: ecs.eid,
+ },
+ stateMachine: ({world, eid, schemaAttribute, dataAttribute}) => {
+ ecs.defineState('default')
+ .initial()
+ .listen(eid, ecs.input.SCREEN_TOUCH_START, (e) => {
+ const {prefabToSpawn} = schemaAttribute.get(eid)
+
+ if (prefabToSpawn) {
+ const newInstance = world.createEntity(prefabToSpawn)
+ })
+ },
+})
+```
diff --git a/docs/studio/guides/spaces.mdx b/docs/studio/guides/spaces.mdx
new file mode 100644
index 0000000..ce3b6ec
--- /dev/null
+++ b/docs/studio/guides/spaces.mdx
@@ -0,0 +1,60 @@
+---
+id: spaces
+description: A Space is a way to organize Entities and make global configuration.
+---
+
+# Spaces
+
+## Introduction
+{frontMatter.description} Creating immersive WebXR games and experiences often requires multiple environments, transitions, and structured Spaces for different parts of the user journey. Spaces now gives you the ability to build and manage multiple distinct areas within a single project. You can think of Spaces like scenes or environments in other gaming engines or design tools. Simply put, Spaces are 3D frames where you can place assets, lighting, cameras, and game interactions.
+
+
+
+## Adding a Space
+When you start a new project in 8th Wall Studio, you’ll see a default Space with a Camera and a Light. Adding a new Space is simple—just select the dropdown at the top of the Hierarchy Panel, name your Space, and start designing.
+
+Note: Spaces were introduced in our 2025-February-13 release. Projects created before this release will be spaceless, but you can set up Spaces using the Space dropdown at the top of the Hierarchy. All entities in the project will now be designated to the created Space.
+
+
+
+## Included Spaces
+You can specify a Space to include other Spaces. Including a Space means all of its objects will be loaded when the Space that includes it is loaded. You can add additional Spaces to your active Space using the Space configurator in the Settings tab of the Inspector.
+
+
+
+Notes:
+- Including an additional Space means that all objects from that Space and any Space it includes (and so on).
+- When switching between two Spaces, included Space(s) that are shared between the two will not have their objects despawned and respawned.
+
+## Loading a Space
+A newly loaded Space will replace the old Space as the active Space. world.spaces.loadSpace loads a Space specified by an ID or a name. The old Space’s (and any included Spaces) objects will be despawned, and the new Spaces (and any included Spaces) will be spawned.
+
+Loading a Space will set the object to its initial state. If you reload a previously loaded Space, all previous runtime updates will be cleared.
+
+Ephemeral objects will also remain loaded when switching between Spaces. To unload ephemeral objects on Space changes, parent the ephemeral objects to a graph object.
+
+
+
+## Persistent Objects
+You can easily set up objects to be preserved between Spaces. For example, you may have a main character object that should move between levels. Using the Persistence toggle in the object's Inspector or the Persistent component or use ecs.Persistent to designate that object to remain loaded as a new Space is loaded.
+
+
+
+Note: Persistence should only be set on root level objects. Reparenting persistent objects to other objects in runtime might lead to unexpected behaviors in Live Sync.
+
+## Space Settings
+In the Settings tab of the Inspector, you can configure a particular Space’s Active Camera and Skybox, and you can configure the Entry Space (or default starting Space) in the Project Settings section.
+
+Active Camera: The active camera of a Space can be any camera object in that Space or in any Space that it includes.
+
+Skybox: Style your Space’s Skybox. Skyboxes are a wrapper around your entire Space that shows what the world looks like beyond your geometry. If your project is configured to use AR on an AR-compatible device, the Skybox will not be rendered.
+
+Entry Space: You can quickly set up an Entry Space (or default starting Space) in the Project Settings section (right panel).
+
+## Simulating the Active Space in Live Sync Mode
+Live Sync Mode (in the Settings right panel) allows for easily modular testing of the active Space. When Live Sync is enabled, the inline Simulator will load whatever Space is currently active in the Hierarchy.
+
+Notes:
+- Opening a simulator instance in a new window (using the New Tab link) or using the pop-out mode of the Simulator will load the Entry Space regardless of Live Sync being enabled.
+- Switching Spaces in the Hierarchy dropdown will change what Space is loaded in the Simulator in Live Sync mode.
+- If Save Edits is on, any edits in the Viewport will be preserved between Space changes. If Save Edits is off, switching Spaces will reset the edits.
diff --git a/docs/studio/guides/splats.mdx b/docs/studio/guides/splats.mdx
new file mode 100644
index 0000000..5a6ee52
--- /dev/null
+++ b/docs/studio/guides/splats.mdx
@@ -0,0 +1,65 @@
+---
+id: splats
+description: Gaussian splatting allows you to capture the essence of a real-world scene using just your smartphone and render it as a fully immersive 3D model, viewable in WebAR.
+---
+
+# Splats
+
+## Introduction
+{frontMatter.description} This technique revolutionizes how we bring real-world objects and places into digital experiences by using splats instead of traditional meshes.
+
+:::info How is it different from a mesh?
+Unlike meshes that rely on connected polygons and textures, Gaussian splats are composed of millions of tiny, independent “splats” or blobs. These splats combine to create a vibrant, 360-degree immersive 3D model, capturing intricate real-world details and textures.
+:::
+
+
+
+## Creating Splats using Scaniverse
+Capturing Gaussian splats is effortless with the Niantic Scaniverse™ app. Simply use your phone to capture a scene, review the splat, and edit it on any of your devices. This accessibility allows anyone to create highly detailed 3D models without needing specialized equipment or powerful computers.
+
+
+
+## Exporting Splats from Scaniverse
+Once you’ve captured and edited your splat, exporting it as a .SPZ file is quick and easy through Niantic Scaniverse™. After capturing and editing:
+
+1. Select Share.
+2. Choose Export Model.
+3. Select .SPZ format.
+4. Save the file to your device.
+
+:::note
+Currently, only .SPZ splats are supported.
+:::
+
+
+
+## Importing Splats
+8th Wall Studio makes it simple to import .SPZ splats.
+
+* Drag the file into the Files/Assets section of left pane.
+* Alternatively, use the (+) button to upload the file.
+
+Once imported, your splats seamlessly integrate into your projects, allowing you to create hyper-realistic, immersive 3D experiences.
+
+:::note
+Importing a splat may take 1-2 minutes, and files are limited to 100 MB in size.
+:::
+
+:::danger
+Large splats are likely to heavily affect performance.
+:::
+
+
+
+## Using Splats
+Splats are a powerful tool when combined with other 3D objects, ground planes, and physics colliders, creating dynamic XR experiences that connect deeply to the real world. Whether for virtual tours of popular locations or as backdrops for interactive games, splats help bring your creative visions to life with realism and immersion.
+
+
+
+## Limitations
+While Gaussian splats provide exceptional visual fidelity, they come with some limitations:
+
+* Rendering splats can be demanding on the CPU and GPU. Large file sizes, multiple splats, and uncropped splats can negatively impact performance. Cropping and editing within the Scaniverse app is recommended to optimize splats before importing them into Studio.
+* Editing or cropping splats in 8th Wall Studio is not yet supported. Always finalize edits in Scaniverse™ before importing the .SPZ file into Studio.
+* Currently, splat assets do not interact with lights or cast shadows.
+* Splats are not compatible with the orthographic camera at this time.
diff --git a/docs/studio/guides/time.mdx b/docs/studio/guides/time.mdx
new file mode 100644
index 0000000..84ff8c3
--- /dev/null
+++ b/docs/studio/guides/time.mdx
@@ -0,0 +1,55 @@
+---
+id: time
+description: This section provides everything you need to manage time within the world, from tracking elapsed runtime and frame intervals to setting up single or repeating actions.
+---
+
+# Time
+
+## Introduction
+{frontMatter.description} Learn how to use built-in timing functions to create responsive and well-timed interactions in your experience.
+
+## world.time.elapsed
+The number of milliseconds the world has been running for, excluding time spent while the world was paused.
+
+## world.time.delta
+The number of milliseconds since the previous frame, excluding time jumps due to being paused.
+
+## world.time.absolute
+The number of milliseconds that have elapsed since the world was created.
+
+## world.time.absoluteDelta
+The number of milliseconds since the last frame, including large jumps of time if the world is resuming after being paused.
+
+## Timers
+Executes a function once after a specified delay.
+
+### Starting a Timer
+
+``` ts
+const timeout = world.time.setTimeout(() => {
+ console.log('1000 ms have passed!')
+}, 1000)
+```
+
+### Clearing a Timer
+
+``` ts
+world.time.clearTimeout(timeout)
+```
+
+## Intervals
+Repeatedly executes a function at specified time intervals.
+
+### Setting an Interval
+
+``` ts
+const interval = world.time.setInterval(() => {
+ console.log('Another 1000 ms have passed!')
+}, 1000)
+```
+
+### Clearing an Interval
+
+``` ts
+world.time.clearTimeout(interval)
+```
\ No newline at end of file
diff --git a/docs/studio/guides/ui.mdx b/docs/studio/guides/ui.mdx
new file mode 100644
index 0000000..22e8b1d
--- /dev/null
+++ b/docs/studio/guides/ui.mdx
@@ -0,0 +1,93 @@
+---
+id: ui
+description: 8th Wall Studio offers a built-in UI system for creating interactive, user-friendly interfaces within your experience.
+---
+
+# UI
+
+## Introduction
+{frontMatter.description} The UI component provides a variety of building blocks with configurable properties ranging from text and images to flexible layout and customizable styling.
+
+## Adding UI Elements
+You can introduce UI elements through several methods:
+- **New Primitive:** Use the (+) option in the Hierarchy to add presets.
+- **Add Component:** Attach UI elements on an existing entity.
+- **Scripting:** Programmatically add elements using the API.
+
+
+
+:::note
+The (+) menu options are all presets of the same UI component, and can all be configured the same using the UI inspector.
+:::
+
+### 3D UI
+3D UI elements integrate seamlessly into your 3D scene, allowing for spatially interactive displays. They can be positioned in the world with the transform inspector and require a width and height in world pixels (1/100th of a world unit), e.g. `'500'`. A child UI element's position is entirely determined by layout properties on it and its parent.
+
+
+
+### Overlay UI
+For screen-anchored UI, overlay elements offer fixed positioning on top of the canvas. Size and positioning can be specified in display pixels (e.g. `'125'`) or percentages (e.g. `'30%'`). The transfrom of top-level overlay UI elements is purely for organization in the viewport; their position on-screen is controlled with `position: 'absolute'`. Like with 3D, child elements are positioned relative to their parent.
+
+
+
+## Properties
+
+The UI component comes with many configurable properties. In the viewport inspector, they are grouped by category and are not always visible. To add a section, click the 'Select Property' dropdown at the bottom of the configurator. For the full list of individual properties that can be specified at runtime in code, see the [`Ui` API Documentation](/docs/studio/api/ecs/ui/).
+
+### Layout
+
+Use the layout configurator to control how child elements are positioned. The various alignment properties allow for dynamic menus that can handle changing content and screen size. The order in which children will appear is determined by their order in the hierarchy. UI children that are set to 'Position Absolute' will be fixed relative to the parent, ignoring its layout configuration.
+
+
+
+### Text
+
+Text can be added to any UI element. The text section allows you to configure the content, font, size, color, and alignment. Text that exceeds the width of the element will automatically wrap to a new line.
+
+
+
+### Background & Border
+
+The visual style of a UI element is controlled by the Background section and Border section. Here, colors and transparency can be configured, and an image can be applied from a web URL or project assets.
+
+
+
+## Interaction
+You can create functional buttons using a custom component with an event listener. UI elements support click/tap events as well as hover states. See the [Input Events API Documentation](/docs/studio/api/events/input/) for a comprehensive list of supported events and example code. These work with overlay and 3D UI elements.
+
+A click will only fire on the top-most element at the pointer location. Layering is controlled by hierarchy order, with later siblings appearing on top of earlier ones. For precise control, use the 'Stacking Order' option at the top of the configurator to arrange groups of UI elements in a custom order.
+
+Enable the 'Ignore Raycast' property on decorative, invisible, or full-screen layout-only frames to prevent them from blocking user input from reaching interactable elements that may lie underneath.
+
+
+
+## Fonts
+
+### Default Fonts
+A selection of built-in fonts are available for use in any 8th Wall project.
+
+
+
+### Custom Fonts
+You can also upload custom fonts via TTF files to use in your UI Elements. Upload font files to your assets to make them automatically available for any UI in your project.
diff --git a/docs/studio/guides/video.mdx b/docs/studio/guides/video.mdx
new file mode 100644
index 0000000..0fff7d3
--- /dev/null
+++ b/docs/studio/guides/video.mdx
@@ -0,0 +1,18 @@
+---
+id: video
+description: This section provides everything you need to work with videos in Studio.
+---
+
+# Video
+
+## Introduction
+{frontMatter.description} By default, all video textures and UI videos default to **muted autoplay**. To customize video behavior, use [`VideoControls`](/docs/studio/api/ecs/video-controls).
+
+## Adding Videos
+
+To apply a video texture to an entity, select a video from your assets from any texture map dropdown.
+
+Video textures can only be applied to Standard and Unlit Materials for primitives.
+For GLB entities, video textures can applied using Video Material, which will override all of the model's color maps.
+
+
diff --git a/docs/studio/guides/xr/_category_.json b/docs/studio/guides/xr/_category_.json
new file mode 100644
index 0000000..deed1ba
--- /dev/null
+++ b/docs/studio/guides/xr/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "XR",
+ "position": 3
+}
\ No newline at end of file
diff --git a/docs/studio/guides/xr/face.mdx b/docs/studio/guides/xr/face.mdx
new file mode 100644
index 0000000..b681fb4
--- /dev/null
+++ b/docs/studio/guides/xr/face.mdx
@@ -0,0 +1,23 @@
+---
+id: face
+description: Face effects allow you to create filters, effects, and trigger behaviors based on face detection events.
+---
+
+# Face
+
+## Introduction
+{frontMatter.description}
+
+
+## Enabling Face Effects
+To enable face effects in your project, follow these steps:
+
+1. Select the Camera in your scene
+2. Set the Type to "Face"
+````mdx-code-block
+
+````
+3. (Optional) Add a “Face” object to your scene hierarchy to visualize spatial face effects.
+````mdx-code-block
+
+````
diff --git a/docs/studio/guides/xr/image-targets.mdx b/docs/studio/guides/xr/image-targets.mdx
new file mode 100644
index 0000000..065b35e
--- /dev/null
+++ b/docs/studio/guides/xr/image-targets.mdx
@@ -0,0 +1,104 @@
+---
+id: image-targets
+description: Image Targets allow you to set a flat, cylindrical or conical shape as a trigger or marker to overlay AR content.
+---
+
+# Image Targets
+
+## Introduction
+{frontMatter.description}
+
+
+
+
+
+---
+
+## Enabling Image Targets
+
+To enable image targets in your project, you must use a World camera.
+
+1. Select the **Camera** in your scene
+2. Set the **Type** to `World`
+
+````mdx-code-block
+
+````
+
+---
+
+## Adding Image Targets
+
+### Upload the Image Target
+
+Locate the **Image Targets** panel at the bottom left of the Studio Interface. Click on the **( + )** to add a flat, cylindrical, or conical image target.
+
+````mdx-code-block
+
+````
+
+### Configuring the Image Target
+
+Customize the image target's tracking region and metadata.
+
+````mdx-code-block
+
+````
+
+### Testing the Image Target
+
+Scan the QR code to test the quality and tracking of your uploaded image target.
+
+````mdx-code-block
+
+````
+
+### Adding the Image Target Entity
+
+You can add an image target to your scene by creating an **Image Target** entity.
+To do this, click **( + )** in the Scene Hierarchy and select **Image Target**.
+
+````mdx-code-block
+
+````
+
+### Linking the Image Target
+
+Link the image target entity to an uploaded image target.
+
+
+````mdx-code-block
+
+````
+
+---
+
+## Using Image Targets
+
+When you add an image target to your project, an image target mesh will appear so that you can align and position other 3D content against the image target.
+
+---
+
+## Simulating Image Targets
+
+Studio’s **Simulator** allows you to test your image target experience remotely by simulating movement using **WASD** controls.
+
+### Using the Simulator with Image Targets
+
+To simulate movement and content placement around an image target added to your scene:
+
+1. Make sure your **Camera Settings** are set to **World**
+2. Press **Play** to open the Simulator
+3. Use the **Simulator environment options** (bottom left selector) to find your desired Image Target
+4. Use the **WASD keyboard shortcuts** to simulate movement. *(You must first click on the Simulator panel before using the keyboard)*
+
+**Movement Keys:**
+
+- `W` = Forward
+- `A` = Left
+- `S` = Backward
+- `D` = Right
+- `Q` = Up
+- `E` = Down
+
+Moving around the image target with WASD will trigger the **Image Found** event to simulate successful image target tracking.
diff --git a/docs/studio/guides/xr/world.mdx b/docs/studio/guides/xr/world.mdx
new file mode 100644
index 0000000..a6db2cd
--- /dev/null
+++ b/docs/studio/guides/xr/world.mdx
@@ -0,0 +1,24 @@
+---
+id: world
+description: World effects allow you to create experiences that interact with the real world environment.
+---
+
+# World
+
+## Introduction
+{frontMatter.description}
+
+
+## Enabling World Effects
+To enable world effects in your project, follow these steps:
+
+1. Select the Camera in your scene.
+2. Set the Type to "World."
+````mdx-code-block
+
+````
+3. (Optional) Add a Plane with a Shadow component to represent the ground.
+
+:::info
+The ground level in your scene is defined as 0 on the Y-axis, relative to the camera feed.
+:::
diff --git a/docs/studio/guides/xr/xr.md b/docs/studio/guides/xr/xr.md
new file mode 100644
index 0000000..229ee7a
--- /dev/null
+++ b/docs/studio/guides/xr/xr.md
@@ -0,0 +1,28 @@
+# XR
+
+8th Wall's World Tracking, Image Target Tracking, and Face Effects are available to use visually within Studio.
+
+There are three types of Cameras in Studio: 3D only, Face, and World. Each of these camera types
+will have different settings. A Face or World Camera is necessary for AR experiences. To learn more
+about creating and managing a Camera in your scene please see the [Camera](/docs/studio/guides/camera)
+section.
+
+Studio provides tooling for working with XR in your project. For working with World Effects, Studio
+provides 6DoF camera tracking and interfaces for configuring tracking. With Face Effects, Studio
+provides a Face Mesh component to support configuring and testing your effect, as well as setting up
+facial attachment points. A Face mesh component can be added via (+) button on the Hierarchy. Studio
+also provides tooling for previewing XR experiences – see the Simulator section for learning more about
+testing your XR project.
+
+
+
+
+
+When previewing Face Effects in Studio, the face camera is placed at the origin (0, 0, 0) while
+the face anchor is placed in front of the face camera as seen in the screenshot below.
+
+
+
+## XR API Reference {#xr-api-reference}
+
+Please refer to the [Camera](/docs/studio/api/ecs/camera) component APIs that define camera behavior.
diff --git a/docs/studio/native-app-export/_category_.json b/docs/studio/native-app-export/_category_.json
new file mode 100644
index 0000000..b5dffaa
--- /dev/null
+++ b/docs/studio/native-app-export/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "Native App Export",
+ "position": 5
+}
\ No newline at end of file
diff --git a/docs/studio/native-app-export/android.md b/docs/studio/native-app-export/android.md
new file mode 100644
index 0000000..79fde81
--- /dev/null
+++ b/docs/studio/native-app-export/android.md
@@ -0,0 +1,94 @@
+---
+id: android
+description: This section explains how to export to Android.
+---
+
+# Android
+
+## Exporting to Android
+
+1. **Open your Studio project**. Ensure the project meets the [requirement criteria](/docs/studio/native-app-export/#requirements).
+
+2. Click **Publish**. Under **Export**, select **Android**.
+
+3. **Customize your app build:**
+ - **Display Name:** The name shown on the Android home screen
+ - **Bundle Identifier:** A unique string, e.g. `com.mycompany.myapp`
+ - **(Optional)** Upload an **App Icon** (1024x1024)
+
+4. Once your basic app info is filled in, click **Continue** to finalize the build configuration.
+
+---
+
+## Finalizing Build Settings
+
+Now you'll define how your app is packaged:
+
+- **Version:** Use semantic versioning (e.g. `1.0.0`)
+- **Orientation:**
+ - **Portrait:** Keeps the app fixed in a vertical position, even when the device is turned.
+ - **Landscape Left:** Displays the app horizontally with the device turned so the left side is down.
+ - **Landscape Right:** Displays the app horizontally with the device turned so the right side is down.
+ - **Auto Rotate:** Allows the app to follow the device's physical rotation, switching between vertical and horizontal views automatically.
+ - **Auto Rotate (Landscape Only):** Adjusts the app's position based on device rotation but restricts it to horizontal views only.
+- **Status Bar:**
+ - **Yes:** Displays the default system status bar over the application.
+ - **No:** Hides the default system status bar.
+- **Build Type:**
+ - **APK (Android Package):** Direct install files for testing or side-loading
+ - **AAB (Android App Bundle):** Required for Google Play publishing
+- **Build Mode:**
+ - **Live Reload:** Pulls updates from Studio as your project is updated
+ - **Static Bundle:** Full self-contained build
+- **Environment:** Select from `Dev`, `Latest`, `Staging`, or `Production`
+
+When everything is set, click **Build** to generate your app package.
+
+> Once the build is complete, download the `.apk` or `.aab` file using the download links provided in the build summary.
+
+---
+
+## Publishing to the Google Play Store
+
+Once your export is complete, you’re ready to publish your app to the Play Store using the **AAB (Android App Bundle)**:
+
+### Why AAB?
+
+Google has required AAB format for all new apps since August 2021—AAB helps optimize delivery by generating device-specific APKs and reducing app size.
+
+### Upload to Google Play Console
+
+1. Log into [Play Console](https://play.google.com/console) and enroll in Play App Signing if needed
+2. Navigate to **“Create app”** → fill in name, language, free/paid status
+3. Go to **Test & Release** → **Production** (or internal/beta track). Click **Create new release**, and then upload your .aab file by dragging it onto the **Drop app bundles here to upload** section.
+4. Complete store listing, privacy policy, content ratings, and target regions
+5. Review and roll out your release
+
+🔗 [Check full upload docs here: Upload your app to the Play Console](https://developer.android.com/studio/publish)
+
+---
+
+## Installing Directly on an Android Device
+
+### Installing on a Physical Android Device
+
+1. Enable **“install unknown apps”** for your browser or file manager
+2. Transfer the APK via USB, email, or cloud storage
+3. Open the APK from your device and tap **Install**
+
+**For command-line method:**
+
+```bash
+adb install path/to/app.apk
+```
+
+### Installing on an Android Emulator
+
+1. Set up an emulator in Android Studio’s AVD Manager.
+2. Run the emulator.
+3. Drag and drop the APK from your computer onto the emulator to install.
+
+In terminal:
+```bash
+adb install path/to/app.apk
+```
diff --git a/docs/studio/native-app-export/embed.md b/docs/studio/native-app-export/embed.md
new file mode 100644
index 0000000..9a4d188
--- /dev/null
+++ b/docs/studio/native-app-export/embed.md
@@ -0,0 +1,116 @@
+---
+id: embed
+description: This section explains how to embed an 8th Wall project in an iFrame.
+---
+
+# Embed
+
+## Embedding an 8th Wall project in an iFrame
+
+1. **Open your Studio project**.
+
+2. Click **Publish**. Under the **Publish** section in the top left, select **Embed**.
+
+3. **Customize your embed:**
+ - **Embed Type:** Select the type of embed you want to create - you can choose either a simple iFrame or a full HTML page.
+
+4. Click **Copy** to copy the embed code to your clipboard.
+
+5. Paste the embed code into your website source code. You can also customize the embed code to your needs.
+
+---
+
+## Publishing your 8th Wall project to gaming platforms
+
+8th Wall iFrames are compatible with and can be published to many gaming platforms.
+
+### Itch.io
+
+1. Download the **Full HTML** embed code.
+1. Log in to [Itch.io](https://itch.io) and [create a new project](https://itch.io/game/new).
+2. Fill in the project details:
+ - Under **Kind of project**, select **HTML**.
+ - Under **Uploads**, select **Upload files**. Upload the `index.html` file that you downloaded in Step 1. Check the **This file will be played in the browser** checkbox.
+ - Under **Embed options**, choose the appropriate sizing for your project.
+3. Finish configuring your game and publish it.
+
+### Viverse
+
+1. Download the **Full HTML** embed code. Make a `.zip` file of this `index.html` file.
+2. Sign in to [Viverse](https://viverse.com) and [go to Viverse Studio](https://studio.viverse.com).
+3. Under **Upload Your Own Build**, click **Upload**.
+4. Click **Create New World**.
+5. Enter the **Name** and **Description** for your project, then click **Create**.
+6. Click **Content Versions**.
+7. Under **New Version**, click **Select File**. Upload the `.zip` file you created in Step 1, then click **Upload**.
+8. Under **iframe Support for Preview**, click **Apply iframe Settings** and enable all permissions that your project requires.
+ - Note that Viverse will put the iFrame that you downloaded from 8th Wall in it's own iFrame, and both the Viverse and 8th Wall iFrames will need to grant a permission which your project requires.
+9. Finish configuring your game and publish it.
+
+### Game Jolt
+
+1. Download the **Full HTML** embed code. Make a `.zip` file of this `index.html` file.
+2. Sign in to [Game Jolt](https://gamejolt.com) and [go to Game Jolt Store](https://gamejolt.com/games).
+3. Click **Add Your Game**.
+4. Enter the project details and click **Save & Next**.
+5. On your game dashboard, under **Packages**, click **Add Package**.
+6. Under **Edit package**, click **New Release**.
+7. Click **Upload Browser Build**. Upload the `.zip` file you created in Step 1.
+8. Configure your game dimensions, or select **Fit to screen?** if you want the game to fit the screen.
+9. Finish configuring your game and publish it.
+
+### GamePix
+
+:::info[Important]
+GamePix does not allow games with external links.
+:::
+
+1. Download the **Full HTML** embed code. Make a `.zip` file of this `index.html` file.
+2. Sign up for a [GamePix Developer Account](https://partners.gamepix.com/join-us?t=developer) and go to the [GamePix Dashboard](https://my.gamepix.com/dashboard).
+3. Click **Create New Game**.
+4. Enter the game details and click **Create**.
+5. Under **Info**, select **HTML5-JS** under **Game Engine**.
+6. Under **Build**, click **Browse File**. Upload the `.zip` file you created in Step 1.
+7. Finish configuring your game and publish it.
+
+### Newgrounds
+
+1. Download the **Full HTML** embed code. Make a `.zip` file of this `index.html` file.
+2. Sign up for a [Newgrounds account](https://www.newgrounds.com).
+3. Click the arrow in the top right corner and select **Game (swf, HTML5)**
+4. Under **Submission File(s)**, click **Upload File**. Upload the `.zip` file you created in Step 1.
+5. Configure your game dimensions and check **Touchscreen friendly**
+6. Finish configuring your game and publish it.
+
+### Y8
+
+1. Download the **Full HTML** embed code. Make a `.zip` file of this `index.html` file.
+2. Log into [Y8](https://www.y8.com/upload).
+3. Make sure you have verified your email, then [create a free Y8 Storage Account](https://account.y8.com/storage_account).
+4. Under **Game**, choose **Zip** and then **HTML5**.
+5. Click **Choose File**. Upload the `.zip` file you created in Step 1. If you have not created a Storage Account it will fail. If that happens, click **Create Storage Account** to create one, then refresh the **Upload Your Content to Y8** page and try again.
+6. Finish configuring your game and publish it.
+
+### Poki
+
+1. Go to the [Poki Developer Portal](https://developers.poki.com/share).
+2. Fill in your project details, using the link to your 8th Wall experience (e.g. https://8w.8thwall.app/my-project) under **Link to your game**.
+3. Click **Share your game**.
+
+### Kongregate
+
+1. Email the Kongregate team at [bd@kongregate.com](mailto:bd@kongregate.com). Include the link to your 8th Wall experience (e.g. https://8w.8thwall.app/my-project) in your email.
+
+### Armor Games
+
+1. Email the Armor Games team at [mygame@armorgames.com](mailto:mygame@armorgames.com). Include the link to your 8th Wall experience (e.g. https://8w.8thwall.app/my-project) in your email.
+
+### Addicting Games
+
+1. Download the **Full HTML** embed code. Make a `.zip` file of this `index.html` file.
+2. Email the Addicting Games team at [games@addictinggames.com](mailto:games@addictinggames.com). Include the `.zip` file you created in Step 1 in your email, as well as all of the other information they request in the [Addicting Games Developer Center](https://www.addictinggames.com/about/upload#Send).
+
+### Lagged
+
+1. Email the Lagged team at [contact@lagged.com](mailto:contact@lagged.com). Include the link to your 8th Wall experience (e.g. https://8w.8thwall.app/my-project) in your email.
+2. Once you are approved, you can [sign up for a Lagged account](https://lagged.dev/signup) using the **Invite Code** they provide you and upload your game.
diff --git a/docs/studio/native-app-export/html.md b/docs/studio/native-app-export/html.md
new file mode 100644
index 0000000..0614ad9
--- /dev/null
+++ b/docs/studio/native-app-export/html.md
@@ -0,0 +1,222 @@
+---
+id: html
+description: This section explains how to export an HTML5 bundle.
+---
+
+# HTML
+
+## Exporting an HTML5 Bundle {#exporting-an-html5-bundle}
+
+:::info[Important]
+At the moment, AR experiences are not yet provided via HTML5 export.
+Your project must use 3D cameras in order to function properly.
+:::
+
+1. **Open your Studio project**.
+
+2. Click **Publish**. Under the **Export** section, select **HTML5**.
+
+3. Select an environment to build your bundle from.
+
+4. Click **Build** to generate your HTML5 bundle.
+
+> Once the build is complete, download the `.zip` file using the download links provided in the build summary.
+
+---
+
+## Publishing your 8th Wall project to gaming platforms
+
+Since 8th Wall HTML5 bundles are fully contained builds, they can be self hosted or published to many gaming platforms.
+
+### Self-Host
+
+:::note
+The HTML5 bundle can be self hosted or deployed in many different ways. The instructions below is just one example using `npm`.
+For more comprehensive information on self hosting, check out this [guide](https://github.com/mikeroyal/Self-Hosting-Guide).
+:::
+
+1. Download the `.zip` bundle, then unzip the file.
+2. If you do not already have `npm` installed, follow the instructions on this [page](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) to set it up.
+3. Run `npm install --global http-server` to install the [http-server](https://www.npmjs.com/package/http-server) npm package as a global CLI tool.
+4. Run `http-server `
+ 1. Example: `http-server /Users/John/Downloads/my-project`
+5. There should be some logs that list a series of local URLs like:
+```sh
+Available on:
+ http://127.0.0.1:8080
+ http://192.168.20.43:8080
+ http://172.29.29.159:8080
+```
+6. Open one of the URLs in your web browser.
+
+### Itch.io
+
+1. Download the `.zip` bundle.
+2. Log in to [Itch.io](https://itch.io) and [create a new project](https://itch.io/game/new).
+3. Fill in the project details:
+ - Under **Kind of project**, select **HTML**.
+ - Under **Uploads**, select **Upload files**. Upload the `.zip` file that you downloaded in Step 1. Check the **This file will be played in the browser** checkbox.
+ - Under **Embed options**, choose the appropriate sizing for your project.
+4. Finish configuring your game and publish it.
+
+### Viverse
+
+1. Sign in to [Viverse](https://viverse.com) and [go to Viverse Studio](https://studio.viverse.com).
+2. Under **Upload Your Own Build**, click **Upload**.
+3. Click **Create New World**.
+4. Enter the **Name** and **Description** for your project, then click **Create**.
+5. Click **Content Versions**.
+6. Under **New Version**, click **Select File**. Upload the `.zip` file that you downloaded in Step 1, then click **Upload**.
+7. Under **iframe Support for Preview**, click **Apply iframe Settings** and enable all permissions that your project requires.
+ - Note that Viverse will put your project downloaded from 8th Wall in it's own iFrame, and the Viverse iFrame will need to grant a permission which your project requires.
+8. Finish configuring your game and publish it.
+
+### Game Jolt
+
+1. Sign in to [Game Jolt](https://gamejolt.com) and [go to Game Jolt Store](https://gamejolt.com/games).
+2. Click **Add Your Game**.
+3. Enter the project details and click **Save & Next**.
+4. On your game dashboard, under **Packages**, click **Add Package**.
+5. Under **Edit package**, click **New Release**.
+6. Click **Upload Browser Build**. Upload the `.zip` file that you downloaded in Step 1.
+7. Configure your game dimensions, or select **Fit to screen?** if you want the game to fit the screen.
+8. Finish configuring your game and publish it.
+
+### GamePix
+
+:::info[Important]
+GamePix does not allow games with external links. Make sure your project does NOT make network calls outside of the bundle.
+:::
+
+1. Download the **Full HTML** embed code.\
+2. Sign up for a [GamePix Developer Account](https://partners.gamepix.com/join-us?t=developer) and go to the [GamePix Dashboard](https://my.gamepix.com/dashboard).
+3. Click **Create New Game**.
+4. Enter the game details and click **Create**.
+5. Under **Info**, select **HTML5-JS** under **Game Engine**.
+6. Under **Build**, click **Browse File**. Upload the `.zip` file you downloaded earlier.
+7. Finish configuring your game and publish it.
+
+### Newgrounds
+
+1. Download the **Full HTML** embed code. Make a `.zip` file of this `index.html` file.
+2. Sign up for a [Newgrounds account](https://www.newgrounds.com).
+3. Click the arrow in the top right corner and select **Game (swf, HTML5)**
+4. Under **Submission File(s)**, click **Upload File**. Upload the `.zip` file you downloaded earlier.
+5. Configure your game dimensions and check **Touchscreen friendly**
+6. Finish configuring your game and publish it.
+
+### Y8
+
+1. Download the **Full HTML** embed code. Make a `.zip` file of this `index.html` file.
+2. Log into [Y8](https://www.y8.com/upload).
+3. Make sure you have verified your email, then [create a free Y8 Storage Account](https://account.y8.com/storage_account).
+4. Under **Game**, choose **Zip** and then **HTML5**.
+5. Click **Choose File**. Upload the `.zip` file you downloaded earlier. If you have not created a Storage Account it will fail. If that happens, click **Create Storage Account** to create one, then refresh the **Upload Your Content to Y8** page and try again.
+6. Finish configuring your game and publish it.
+
+### Poki
+
+1. Go to the [Poki Developer Portal](https://developers.poki.com/share).
+2. Fill in your project details, using the link to your hosted project under **Link to your game**.
+3. Click **Share your game**.
+
+### Kongregate
+
+1. Email the Kongregate team at [bd@kongregate.com](mailto:bd@kongregate.com). Include the link to your hosted project in your email.
+
+### Armor Games
+
+1. Email the Armor Games team at [mygame@armorgames.com](mailto:mygame@armorgames.com). Include the link to your hosted project in your email.
+
+### Addicting Games
+
+1. Download the **Full HTML** embed code.
+2. Email the Addicting Games team at [games@addictinggames.com](mailto:games@addictinggames.com). Include the `.zip` file in your email, as well as all of the other information they request in the [Addicting Games Developer Center](https://www.addictinggames.com/about/upload#Send).
+
+### Lagged
+
+1. Email the Lagged team at [contact@lagged.com](mailto:contact@lagged.com). Include the link to your hosted project in your email.
+2. Once you are approved, you can [sign up for a Lagged account](https://lagged.dev/signup) using the **Invite Code** they provide you and upload your game.
+
+### Discord
+
+#### Sample Project
+
+As a starting point to use the Discord Embedded SDK with your project, you can try out our sample project.
+
+1. Navigate to https://www.8thwall.com/8thwall/discord-activity-example and clone the project to your workspace.
+2. Follow the steps in [Exporting an HTML5 Bundle](#exporting-an-html5-bundle)
+3. Download the `.zip` to a location of your choosing.
+
+#### Discord Developer Set Up
+
+In order to run a web client in Discord, you'll need to set up an account and create an app in the developer hub.
+
+1. Create a Discord Account and navigate to https://discord.com/developers/applications
+
+2. Create a new application by clicking the Button in the top right corner
+ 1. Enter a name for the application and accept the terms of service
+
+
+
+3. Go to the **OAuth2** page, under the **Settings** section:
+ 1. Add `http://127.0.0.1` as a redirect URI for testing.
+ 2. Save the `Client ID` somewhere secure.
+ 3. Click "Reset Secret" to retrieve the `Client Secret` and store it somewhere safe.
+ 4. Press "Save" to keep your settings.
+
+
+
+4. Navigate to the **URL Mappings** page, under the **Activities** section:
+ 1. Add a temporary target to the root mapping like `127.0.0.1:8888`. This will be replaced later with your public URL, but it's required to enable Activities in the next step.
+
+5. Go to the **Settings** page, under the **Activities** section:
+ 1. Toggle **Enable Activities** and accept the app launcher agreement.
+
+
+
+6. Then, go to the **Installation** tab, under the **Settings** section:
+ 1. Copy the link from the **Install Link** panel and open it in your browser.
+ 2. Install the application to make it accessible in any server or DM.
+
+#### Launching an Application
+
+1. Set up the example server code at https://github.com/8thwall/discord-activity-example
+ 1. `git clone https://github.com/8thwall/discord-activity-example`
+ 2. Run `npm install`
+ 3. Unzip the `.zip` downloaded earlier containing the frontend of the project.
+ 4. Create a `.env` file in the root of the repo, and fill it out with the details from the Discord Developer Portal:
+ ```
+ DISCORD_CLIENT_ID=XXXXXXXXXX
+ DISCORD_CLIENT_SECRET=XXXXXXXXXX
+ DISCORD_CLIENT_HOST_PATH=/path/to/unzipped/folder
+ ```
+ 5. Enter `npm start` to start the server.
+
+2. Use `cloudflared` to create a tunnel, so the project will be publicly accessible over the internet.
+ 1. `brew install cloudflared` to download the `cloudflared` CLI tool
+ 2. Run `cloudflared tunnel --url http://localhost:8888`.
+ 3. Make note of the URL that was generated.
+
+ Example:
+ ```
+ 2025-10-11T03:05:16Z INF +--------------------------------------------------------------------------------------------+
+ 2025-10-11T03:05:16Z INF | Your quick Tunnel has been created! Visit it at (it may take some time to be reachable): |
+ 2025-10-11T03:05:16Z INF | https://sporting-follow-audit-href.trycloudflare.com |
+ 2025-10-11T03:05:16Z INF +--------------------------------------------------------------------------------------------+
+ ```
+ 4. Open the `cloudflared` URL in your browser to make sure the project loads.
+
+3. Update your Discord application settings:
+ 1. Open the Discord Developer Portal and navigate to your application
+ 2. Go to **URL Mappings** under the **Activities** section
+ 3. Replace the temporary target with your `cloudflared` URL for the **Root Mapping**
+
+
+
+4. Test your Discord Activity:
+ 1. Open Discord and navigate to any DM or server
+ 2. Click the activities icon (game controller) in the voice channel controls
+ 3. Find and click your application in the **Apps & Commands** panel
+
+
diff --git a/docs/studio/native-app-export/ios.md b/docs/studio/native-app-export/ios.md
new file mode 100644
index 0000000..a72fb43
--- /dev/null
+++ b/docs/studio/native-app-export/ios.md
@@ -0,0 +1,149 @@
+---
+id: iOS
+description: This section explains how to export to iOS.
+---
+
+# iOS
+
+## Exporting to iOS
+
+1. **Open your Studio project**. Ensure the project meets the [requirement criteria](/docs/studio/native-app-export/#requirements).
+
+
+2. Click **Publish**. Under **Export**, select **iOS**.
+
+
+3. **Customize your app build:**
+ - **Display Name**: The name shown on the iOS home screen
+ - **(Optional)** Upload an **App Icon** (1024x1024 or larger)
+
+4. **Complete Apple Configuration:** In this step, you’ll configure the signing credentials required to build and run your iOS app. You must select one or both signing types: Development or Distribution, and upload the corresponding certificate and provisioning profile for each. All of these steps should be completed without leaving the Native App Export flow in Studio.
+
+ - **Bundle Identifier**: A unique string, e.g. `com.mycompany.myapp` this string must match Apple developer account settings in order to upload the app for distribution/testing
+
+ - **Signing Type**:
+
+ i. **Apple Development** – Use this option if you want to build and test your app on registered devices during development.
+
+ 1. **Generate a Certificate Signing Request (CSR)**
+ a. In Studio, click *Add New Certificate* and then *Create Certificate Signing Request.*
+
+ 2. **Create a development certificate**
+ a. Log in to your [Apple Developer Account](https://developer.apple.com/account/resources/certificates/add).
+ b. Use the certificate signing request to create an Apple Development or iOS Development certificate, then download it.
+ c. Reference: [Apple: Create a development certificate](https://developer.apple.com/help/account/certificates/create-a-development-certificate).
+
+ 3. **Upload the certificate**
+ a. In Studio, upload the development certificate under *Upload Certificate.*
+
+ 4. **Create a provisioning profile**
+ a. In your Apple Developer Account, create an iOS App Development provisioning profile.
+ b. Associate it with the correct development certificate and App Identifier (you may need to create one first).
+ i. To create an App ID, go to [Apple: Create an App ID](https://developer.apple.com/account/resources/identifiers/add/bundleId) and choose App IDs. Then select *App*. Then write the Description and Bundle ID for it.
+ - Some teams prefer to use a wildcard Bundle ID for use during development, as this lets you share the same App ID and provisioning profile between different apps. To do so, choose **Description = Wildcard Development** and **Bundle ID = Explicit** with a value of `com.mycompany.*`.
+ c. Reference: [Apple: Create a development provisioning profile](https://developer.apple.com/help/account/provisioning-profiles/create-a-development-provisioning-profile).
+
+ 5. **Upload the development provisioning profile**
+ a. In Studio, upload the development provisioning profile under *Upload Provisioning Profile.*
+
+ ii. **Apple Distribution** – Use this option when preparing your app for release via TestFlight, the App Store, or enterprise distribution.
+
+ 1. **Generate a Certificate Signing Request (CSR)**
+ a. In Studio, click *Add New Certificate* and then *Create Certificate Signing Request.*
+
+ 2. **Create a distribution certificate**
+ a. Log in to your Apple Developer Account.
+ b. Use the certificate signing request to create an Apple Distribution certificate (or iOS Distribution – App Store Connect and Ad Hoc), then download it.
+ c. Reference: [Apple: Certificate overview](https://developer.apple.com/help/account/certificates/certificates-overview).
+
+ 3. **Upload the certificate**
+ a. In Studio, upload the distribution certificate under *Upload Certificate.*
+
+ 4. **Create a provisioning profile**
+ a. In your Apple Developer Account, create an App Store (for TestFlight/App Store release) or Ad Hoc (for limited device distribution) provisioning profile.
+ b. Associate it with the correct distribution certificate and App Identifier (you may need to create one first).
+ i. Unlike for development, for distribution you should create an App ID just for this app, not a Wildcard Bundle ID.
+ c. Reference: [Apple: Create a distribution provisioning profile](https://developer.apple.com/help/account/provisioning-profiles/create-an-app-store-provisioning-profile).
+
+ 5. **Upload the distribution provisioning profile**
+ a. In Studio, upload the distribution provisioning profile under *Upload Provisioning Profile.*
+
+ - Once you’ve uploaded the necessary certificates and provisioning profiles for Development and/or Distribution, click **Save** to confirm your Apple signing setup.
+
+5. **Configure Permissions (Optional):**
+ Indicate the sensor permissions your app may need to function properly, and optionally set custom text for the permission prompt. This step is required to successfully submit your app to the app store.
+
+ - **Camera**: Select if the application uses any of the device’s camera (like for Face Effects or World Effects)
+ - **Location**: Select if the application uses GPS location
+ - **Microphone**: Select if the application uses the device’s microphone (like for Media Recorder, or voice interaction)
+
+6. Once your basic app information is filled in, Apple configuration is complete, and permissions are set, click **Continue** to finalize the build configuration.
+
+---
+
+## Finalizing Build Settings
+
+Now you'll define how your app is packaged:
+
+- **Version**: Use semantic versioning (e.g. 1.0.0) ([Semantic Versioning](https://semver.org/))
+- **Orientation**:
+ - Portrait: Keeps the app fixed in a vertical position, even when the device is turned.
+ - Landscape Left: Displays the app horizontally with the device turned so the left side is down.
+ - Landscape Right: Displays the app horizontally with the device turned so the right side is down.
+ - Auto Rotate: Allows the app to follow the device's physical rotation, switching between vertical and horizontal views automatically.
+ - Auto Rotate (Landscape Only): Adjusts the app's position based on device rotation but restricts it to horizontal views only.
+
+- **Status Bar**:
+ - Yes: Displays the default system status bar over the application.
+ - No: Hides the default system status bar.
+
+- **Build Mode**:
+ - Static Bundle: Full self-contained build (note: apps that use AR features still require an internet connection, even if they are a Static Bundle)
+ - Live Reload: Pulls updates from Studio as your project is updated
+
+- **Environment**: Select from Dev, Latest, Staging, or Production
+
+- **Signing Type**:
+ - Development: Select this option when you are building and testing your app during development. It allows you to run the app on registered devices using your development provisioning profile and certificates.
+ - Distribution: Select this option when you are preparing your app for release, whether for TestFlight, the App Store, or enterprise/internal distribution. This uses your distribution provisioning profile and certificates to ensure the app can be installed and trusted on end users’ devices.
+
+7. When everything is set, click **Build** to generate your app package.
+
+8. Once the build is complete, download the `.ipa` file using the download links provided in the build summary.
+
+---
+
+## Publishing to the App Store
+
+Once your export is complete, you’re ready to publish your app to the App Store using the IPA (iOS App Store Package). When you’re ready to share your app with others or release it, you’ll use Apple’s App Store Connect and either TestFlight (for beta testing) or App Store distribution. The high-level process is:
+
+1. **Prepare an App Store Connect record**: Log in to App Store Connect (with your Apple Developer account) and create an App entry if you haven’t already. In the App Store Connect dashboard, go to *My Apps* and click the “+” to add a new app. Choose iOS as the platform, enter your app name, select the correct Bundle ID (as configured in your 8th Wall project), and provide a SKU and primary language, then *Create* the app.
+
+2. **Upload the .ipa using Transporter**: Ensure the .ipa is signed with your Distribution certificate and provisioning profile (App Store distribution). Apple does not accept development-signed builds for TestFlight/App Store distribution. On a Mac, the easiest upload method is Apple’s Transporter app. Install Transporter from the Mac App Store, open it and sign in with your Apple ID (Developer account). Then click the “+” and add your .ipa file (or drag the .ipa into Transporter) and click *Deliver* to upload. Transporter will validate the file and submit it to App Store Connect. (You can also upload builds via Xcode’s Archive Organizer or the `altool` command.)
+
+3. **Enable TestFlight testing (if needed)**: Once the build appears in App Store Connect (under your app’s TestFlight tab), you can distribute it to testers.
+ - Internal testing: up to 100 members, assign builds immediately.
+ - External testing: up to 10,000 users, requires Beta App Review.
+
+4. **App Store submission**: To release the app to the public App Store, go to the app’s App Store page in App Store Connect. Fill in all required metadata: screenshots, description, category, pricing, privacy policy URL, etc. Attach the uploaded build, then click *Submit for Review*. Apple will then perform a full review of the app.
+
+🔗 [Apple: Upload your app to App Store Connect](https://developer.apple.com/help/app-store-connect/manage-builds/upload-builds/#:~:text=After%20adding%20an%20app%20to,testing%20%2C%20or%20%2075)
+
+---
+
+## Installing Directly on an iOS Device
+
+To install a development-signed `.ipa` (e.g. from 8th Wall) onto an iPhone or iPad for testing, you need to sideload it using Apple’s tools:
+
+1. **Verify provisioning**: Ensure the device’s UDID is included in the app’s provisioning profile. A development or Ad Hoc `.ipa` will only install on devices registered in that profile. If not, you’ll need to add your device to the provisioning profile and then reupload your provisioning profile on the Complete Apple Configuration page under Apple Development and then regenerate the `.ipa` app signed with the profile that contains your device.
+
+2. **Install on device**:
+ a. **Using Xcode**: On macOS, connect your iOS device via USB (and tap “Trust” if prompted on the device). Launch Xcode and go to *Window > Devices and Simulators.* Select your iPhone/iPad from the left device list. (Make sure Developer Mode is enabled on the device for iOS 16+; otherwise, iOS will block running the app.) Install the `.ipa` using Xcode: Drag and drop the `.ipa` file onto the “Installed Apps” section of your device’s panel in Xcode’s Devices window. Xcode will copy the app to the device and verify it. After a moment, the app icon should appear on your device.
+
+ b. **Using Apple Configurator 2**: This is a free Mac app from Apple which can be used to install the `.ipa`. Open Configurator, connect your device, then choose *Actions > Add > Apps > Choose from my Mac…* and select the `.ipa` file. This will deploy the app to the device in a similar way.
+
+ c. **Using Music (formerly iTunes)**: Open the Music app, connect your device, select your device in the left sidebar, and then drag and drop the `.ipa` file onto the main window. After a moment, the app should appear on your device. Note that it may not be on your first page — if you don’t see it, scroll through your app homepages.
+
+3. **Trust the developer certificate**: If the app was signed with an enterprise or development certificate, you may need to manually trust it on the device before it will run. On the iPhone/iPad, go to *Settings > General > VPN & Device Management* (or *Profiles & Device Management* on older iOS) and find the profile for the app’s developer. Tap *Trust [Developer]* and confirm to trust the certificate. This step is not needed for App Store/TestFlight apps, but may be required for direct installs.
+
+4. **Launch the app**: Now open the app on your device. The app should launch if the profile and certificate are valid and the device is in Developer Mode (for iOS 16+). If you get an error like “integrity could not be verified,” it usually means the device is not provisioned, the app is not properly signed, or Developer Mode is off. Once properly installed and trusted, the development build will run on your physical device.
diff --git a/docs/studio/native-app-export/native-app-export.md b/docs/studio/native-app-export/native-app-export.md
new file mode 100644
index 0000000..1763b50
--- /dev/null
+++ b/docs/studio/native-app-export/native-app-export.md
@@ -0,0 +1,37 @@
+---
+id: native-app-export
+description: This section explains how to use Native App Export.
+sidebar_class_name: "hidden"
+---
+
+# Native App Export
+
+:::info[Beta Feature]
+Native App Export is currently in Beta and limited to **Android & iOS** builds. Support for desktop and headsets is coming soon.
+:::
+
+Native App Export enables you to package your Studio project as a standalone application.
+
+## Requirements {#requirements}
+
+Whether building for iOS or Android, ensure your project has been successfully built for the web at least once before attempting to export.
+
+### iOS
+
+Native export for iOS is available for AR & 3D projects. Your application **will not** support:
+
+- Push notifications
+- In-app purchases
+
+### Android
+
+Native export for Android is only available for non-AR, 3D-only projects. Your project **must not** use:
+
+- Camera or AR features
+- GPS
+- Virtual or physical keyboards
+- Push notifications
+- In-app purchases
+- Video textures
+- MediaRecorder API
+- CSS
diff --git a/docs/studio/studio.mdx b/docs/studio/studio.mdx
new file mode 100644
index 0000000..b54a848
--- /dev/null
+++ b/docs/studio/studio.mdx
@@ -0,0 +1,11 @@
+---
+sidebar_label: Introduction
+sidebar_position: 1
+description: 8th Wall Studio is designed to empower creators to build the next generation of immersive XR experiences.
+---
+
+# 8th Wall Studio
+
+{frontMatter.description} With Studio, you can easily create engaging WebAR experiences, interactive 3D games, and more in real-time, then deploy them seamlessly across mobile devices and desktops.
+
+
diff --git a/docs/troubleshooting/_category_.json b/docs/troubleshooting/_category_.json
new file mode 100644
index 0000000..0fa011a
--- /dev/null
+++ b/docs/troubleshooting/_category_.json
@@ -0,0 +1,4 @@
+{
+ "label": "Troubleshooting",
+ "position": 4
+}
diff --git a/docs/troubleshooting/invalid-timestamps-detected.md b/docs/troubleshooting/invalid-timestamps-detected.md
new file mode 100644
index 0000000..03b2bb5
--- /dev/null
+++ b/docs/troubleshooting/invalid-timestamps-detected.md
@@ -0,0 +1,17 @@
+---
+id: invalid-timestamps-detected
+sidebar_position: 4
+---
+# Invalid Timestamps Detected
+
+#### Issue {#issue}
+
+On iOS devices, console logs display a warning that states `webvr-polyfill: Invalid timestamps detected: Timestamp from devicemotion outside expected range.`
+
+#### Resolution {#resolution}
+
+No action required.
+
+This is a **warning** coming from `webvr-polyfill`, a dependency of the AFrame/8Frame library. Devicemotion is an event coming from the browser that fires at a regular interval. It indicates the amount of physical force of acceleration the device is receiving at that time. These "Invalid timestamp" messages are a byproduct of iOS's devicemotion implementation where timestamps are sometimes reported out of order.
+
+This is simply a **warning**, not an error, and can be safely ignored. It does not have any impact on your Web AR experience.
diff --git a/docs/troubleshooting/ios-black-textures.md b/docs/troubleshooting/ios-black-textures.md
new file mode 100644
index 0000000..00e34d5
--- /dev/null
+++ b/docs/troubleshooting/ios-black-textures.md
@@ -0,0 +1,32 @@
+---
+id: ios-black-textures
+sidebar_position: 1
+---
+
+# Black textures on iOS devices
+
+#### Issue {#issue}
+
+When using high resolution and/or a large number of textures on certain versions of iOS, Safari can run out of GPU memory. The textures may render black or cause the page to crash.
+
+#### Workarounds {#workarounds}
+
+1. **Reduce the size/resolution of the textures used in your scene** (see
+[texture optimization](/docs/engine/guides/your-3d-models-on-the-web/#texture-optimization))
+
+2. **Disable image bitmaps on iOS devices**:
+
+There are existing bugs in iOS 14 and iOS 15 related to image bitmaps that can cause texture issues.
+Disable image bitmaps to help prevent black textures and crashes. See example below:
+
+#### Example: Disable iOS Bitmaps (add to the top of app.js): {#example-disable-ios-bitmaps-add-to-the-top-of-appjs}
+
+```javascript
+// Bitmaps can cause texture issues on iOS. This workaround can help prevent black textures and crashes.
+const IS_IOS =
+ /^(iPad|iPhone|iPod)/.test(window.navigator.platform) ||
+ (/^Mac/.test(window.navigator.platform) && window.navigator.maxTouchPoints > 1)
+if (IS_IOS) {
+ window.createImageBitmap = undefined
+}
+```
diff --git a/docs/troubleshooting/loading-infinite-spinner.md b/docs/troubleshooting/loading-infinite-spinner.md
new file mode 100644
index 0000000..0e95900
--- /dev/null
+++ b/docs/troubleshooting/loading-infinite-spinner.md
@@ -0,0 +1,57 @@
+---
+id: loading-infinite-spinner
+sidebar_position: 2
+---
+# Loading Screen Infinite Spinner
+
+#### Issue {#issue}
+
+When accessing a WebAR experience, the page is stuck on the Loading screen with an "infinite spinner".
+
+
+
+#### Why does this happen? {#why-does-this-happen}
+
+If you are using the XRExtras `loading` module (which is included with all 8th Wall projects and
+examples by default), the loading screen is displayed while the scene and assets are loading, and
+while the browser is waiting for browser permissions to be accepted. If the scene takes a long time
+to load, or if something prevents the scene from fully initializing, it can appears to be "stuck" on
+this screen forever.
+
+#### Potential Causes {#potential-causes}
+
+1. Large Assets and/or Slow Internet Connection
+
+If you are in a location with slow wifi and/or cellular service while attempting to load a Web AR
+page with large assets, the scene may not really be "stuck", but rather just taking a long time to
+load. Use the browser's Network inspector to see if your page is simply in process of downloading
+assets.
+
+Additionally, try to [optimize scene assets](/docs/engine/guides/your-3d-models-on-the-web/#texture-optimization)
+as much as possible. This can include techniques such as compressing textures, reducing texture
+and/or video resolution, and reducing the polygon count of 3D models.
+
+2. Camera locked to a background tab
+
+Some devices/browsers may not let you open the camera if it's already in use by another tab. Try
+closing any other tabs that may be using the camera, then re-load the page.
+
+3. iOS Safari specific: CSS elements push the video element "off the screen"
+
+If you have added custom HTML/CSS elements to your Web AR experience, make sure that they are
+properly overlaid on top of the scene. If the video element on the page is pushed off-screen, iOS
+Safari won't render the video feed. This in turn triggers a series of events that make it appear as
+if 8th Wall is "stuck". In reality, here is what is going on:
+
+Video feed doesn't render -> AFrame scene doesn't fully initialize -> AFrame scene never emits the
+"loaded" event -> XRExtras Loading module never disappears (it's listening for the scene's "loading"
+event which never fires!)
+
+To resolve this, we recommend using the Safari inspector's "Layout" view to visualize the
+positioning of your DOM content. Often times, you'll see something similar to the image below where
+the video element is pushed "off the screen" / "below the fold".
+
+
+
+To resolve, adjust the CSS positioning of your elements so they do not push the video feed off the
+screen. Using `absolute` positioning is one way to do this.
diff --git a/docs/troubleshooting/tracking-and-camera-issues.md b/docs/troubleshooting/tracking-and-camera-issues.md
new file mode 100644
index 0000000..1bbaa36
--- /dev/null
+++ b/docs/troubleshooting/tracking-and-camera-issues.md
@@ -0,0 +1,62 @@
+---
+id: tracking-and-camera-issues
+sidebar_position: 3
+---
+# Tracking And Camera Issues
+
+## 6DoF Camera Motion Not Working {#camera-problem}
+
+#### Issue {#camera-issue}
+
+As I move my phone, the camera position does not update.
+
+#### Resolution {#camera-resolution}
+
+Check the position of the camera in your scene. The camera should **NOT** be at a height (Y) of
+zero. Set it to Non-Zero value. The Y position of the camera at start effectively determines the
+scale of virtual content on a surface (e.g. smaller y, bigger content)
+
+## Object Not Tracking Surface Properly {#tracking-problem}
+
+#### Issue {#tracking-issue}
+
+Content in my scene doesn't appear to be "sticking" to a surface properly
+
+#### Resolution {#tracking-resolution}
+
+To place an object on a surface, the **base** of the object needs to be at a **height of Y=0**
+
+**Note**: Setting the position at a height of Y=0 isn't necesarily sufficient.
+
+For example, if the transform your model is at the center of the object, placing it at Y=0 will
+result in part of the object living below the surface. In this case you'll need to adjust the
+vertical position of the object so that the bottom of the object sits at Y=0.
+
+It's often helpful to visualize object positioning relative to the surface by placing a
+semi-transparent plane at Y=0.
+
+#### A-Frame example {#a-frame-example}
+
+```html
+
+
+```
+
+#### three.js example {#threejs-example}
+
+```javascript
+ // Create a 1x1 Plane with a transparent yellow material
+ var geometry = new THREE.PlaneGeometry(1, 1, 1, 1); // THREE.PlaneGeometry (width, height, widthSegments, heightSegments)
+ var material = new THREE.MeshBasicMaterial({color: 0xffff00, transparent:true, opacity:0.5, side: THREE.DoubleSide});
+ var plane = new THREE.Mesh(geometry, material);
+ // Rotate 90 degrees (in radians) along X so plane is parallel to ground
+ plane.rotateX(1.5708)
+ plane.position.set(0, 0, 0)
+ scene.add( plane );
+```
diff --git a/docs/troubleshooting/troubleshooting.md b/docs/troubleshooting/troubleshooting.md
new file mode 100644
index 0000000..fa8f074
--- /dev/null
+++ b/docs/troubleshooting/troubleshooting.md
@@ -0,0 +1,18 @@
+# Known Issues & Workarounds
+
+* [Black textures on iOS devices](/docs/troubleshooting/ios-black-textures)
+* [Loading Screen Infinite Spinner](/docs/troubleshooting/loading-infinite-spinner)
+* [6DoF Camera Motion Not Working](/docs/troubleshooting/tracking-and-camera-issues/#camera-problem)
+* [Object Not Tracking Surface Properly](/docs/troubleshooting/tracking-and-camera-issues/#tracking-problem)
+* [Invalid Timestamps Detected](/docs/troubleshooting/invalid-timestamps-detected)
+* [World Tracking Issues](/docs/troubleshooting/world-tracking-issues)
+
+## Known Issues
+
+- Switching clients while the simulator is running will cause the simulator to infinitely hang in the "Initializing" state. You can close the project and reopen it if you enter this state.
+
+## Reload
+
+You can relead and force reload the 8th Wall Desktop App from the **View** menu or using keyboard shortcuts.
+
+
diff --git a/docs/troubleshooting/world-tracking-issues.md b/docs/troubleshooting/world-tracking-issues.md
new file mode 100644
index 0000000..1651de0
--- /dev/null
+++ b/docs/troubleshooting/world-tracking-issues.md
@@ -0,0 +1,58 @@
+---
+id: world-tracking-issues
+description: I'm experiencing issues with World Tracking.
+---
+
+# World Tracking Issues
+
+## Introduction
+I'm experiencing issues with World Tracking.
+
+## Why does this happen?
+
+8th Wall’s World Tracking relies on real time analysis of the device camera feed, identifying **feature points** which are unique, trackable details used to determine where the "floor" or **ground plane** is.
+
+It is **floor based only**, continuously recalculating a single horizontal plane (**Y = 0**) from the most stable set of feature points visible at any moment. If the surface is too uniform or lacks distinct features, tracking can be lost or the floor may be redefined incorrectly.
+
+Unlike ARKit or ARCore, which can track multiple surfaces, 8th Wall detects and updates one ground plane dynamically. The **feature detection pipeline operates in grayscale**, using contrast and texture differences between light and dark areas rather than color. High contrast, detail rich surfaces work best, while smooth, low contrast ones, regardless of color, provide poor results.
+
+Common causes of poor tracking include
+
+* Repetitive patterns such as brick walls, wood planks, or floor tiles
+* Low grayscale contrast such as plain carpets, smooth concrete, or single color floors
+* Shiny, reflective, or translucent surfaces
+* Rapid camera movement or motion blur
+* Sudden lighting changes
+* Multiple surfaces in view such as a table and floor together
+* Tight or cluttered spaces with limited open area
+
+## How do I fix it?
+
+1. **Choose a feature rich surface**
+ * Use surfaces with irregular patterns and varied light and dark contrast such as cracks in concrete, grass, gravel, or patterned rugs.
+
+2. **Avoid repeating patterns**
+ * Similar looking elements like bricks or planks can confuse detection.
+
+3. **Ensure good lighting**
+ * Use even lighting without glare or heavy shadows.
+
+4. **Minimize rapid camera movement**
+ * Move slowly, especially at startup, to establish stable tracking.
+
+5. **Keep only one surface in view**
+ * Frame the target surface without other horizontal planes intruding.
+
+6. **Give AR content enough space**
+ * Use an area large enough for your content, free of close obstacles.
+
+7. **Re scan if needed**
+ * Point the camera at a feature rich surface for a few seconds or call `XR8.XrController.recenter()` to reset tracking.
+
+## How do I improve the End User Experience?
+
+1. **Add a Re Center Button**
+ * Lets users reset tracking to the current camera view, helpful if they started on a poor surface, without reloading the page.
+
+2. **Position content fully above Y = 0**
+ * Ensure models sit entirely above the ground plane. Adjust pivot points or reposition models so no part appears below the floor.
diff --git a/docusaurus.config.js b/docusaurus.config.js
index 427f670..fdb97ed 100644
--- a/docusaurus.config.js
+++ b/docusaurus.config.js
@@ -16,7 +16,11 @@ const config = {
[
'classic',
{
- docs: false,
+ docs: {
+ routeBasePath: '/docs',
+ path: 'docs',
+ sidebarPath: require.resolve('./sidebars.js'),
+ },
blog: {
path: 'blog',
routeBasePath: 'blog',
@@ -56,7 +60,7 @@ const config = {
{ href: 'https://www.youtube.com/@8thwall', label: 'Tutorials', position: 'left' },
{ href: 'https://8th.io/blog', label: 'Blog', position: 'left' },
{ href: 'https://8th.io/discord', label: 'Discord', position: 'left' },
- { href: 'https://www.8thwall.com/docs', label: 'Docs', position: 'left' },
+ { href: '/docs', label: 'Docs', position: 'left' },
{
type: 'html',
position: 'right',
diff --git a/gen/tables/Audio.md b/gen/tables/Audio.md
new file mode 100644
index 0000000..07b6169
--- /dev/null
+++ b/gen/tables/Audio.md
@@ -0,0 +1,12 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| url | `string` | `''` | Source URL for the audio |
+| volume | `number` | `1` | How loud the audio will be played. A value between 0 and 1. |
+| loop | `boolean` | `false` | Whether the audio restarts after it finishes |
+| paused | `boolean` | `false` | Whether the audio is currently paused |
+| pitch | `number` | `1` | The factor used to change the audio's pitch. 1 is the default pitch. |
+| positional | `boolean` | `false` | Whether the audio is placed in 3D space |
+| refDistance | `number` | `1` | **Only applied if positional is true.** The value indicating at what distance the volume from this source will start reducing as the listener moves away. Must be a non-negative value. |
+| distanceModel | `string` | `'inverse'` | **Only applied if positional is true.** The algorithm used to reduce volume as the distance increases between this audio source and the listener. Allowed values: `'linear'`, `'inverse'`, `'exponential'`. |
+| rolloffFactor | `number` | `1` | **Only applied if positional is true.** How quickly volume is reduced as distance increases between this audio source and the listener. The acceptable range of values changes depending on the distanceModel, shown by the following: linear: 0 to 1, inverse: 0 to Infinity, exponential: 0 to Infinity |
+| maxDistance | `number` | `10000` | **Only applied if positional is true** and distanceModel is `'linear'`. The max distance between this audio source and the listener. Volume is not reduced after this point. Must be a positive value. |
diff --git a/gen/tables/BoxGeometry.md b/gen/tables/BoxGeometry.md
new file mode 100644
index 0000000..bd32805
--- /dev/null
+++ b/gen/tables/BoxGeometry.md
@@ -0,0 +1,5 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| width | `number` | `0` | Width of the box |
+| height | `number` | `0` | Height of the box |
+| depth | `number` | `0` | Depth of the box |
diff --git a/gen/tables/Camera.md b/gen/tables/Camera.md
new file mode 100644
index 0000000..40caa4d
--- /dev/null
+++ b/gen/tables/Camera.md
@@ -0,0 +1,27 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| type | `string` | `'perspective'` | Type of projection used for rendering. Allowed values: `'perspective'`, `'orthogonal'`. Only relevant when `xrCameraType` is set to `'3dOnly'`. |
+| nearClip | `number` | `0.1` | The distance from the camera of the near clip plane, i.e. the closest distance to the camera at which scene objects are visible. |
+| farClip | `number` | `2000` | The distance from the camera of the far clip plane, i.e. the farthest distance to the camera at which scene objects are visible. |
+| zoom | `number` | `1` | Zoom factor of the camera. Only relevant when `xrCameraType` is set to `'3dOnly'`. |
+| fov | `number` | `80` | Field of view, in degrees. Only relevant when `xrCameraType` is set to `'3dOnly'` and `type` is set to `'perspective'`. |
+| left | `number` | `-1` | Camera frustum left plane. Only relevant when `xrCameraType` is set to `'3dOnly'` and `type` is set to `'orthogonal'`. |
+| right | `number` | `1` | Camera frustum right plane. Only relevant when `xrCameraType` is set to `'3dOnly'` and `type` is set to `'orthogonal'`. |
+| top | `number` | `1` | Camera frustum top plane. Only relevant when `xrCameraType` is set to `'3dOnly'` and `type` is set to `'orthogonal'`. |
+| bottom | `number` | `-1` | Camera frustum bottom plane. Only relevant when `xrCameraType` is set to `'3dOnly'` and `type` is set to `'orthogonal'`. |
+| direction | `string` | `'front'` | Whether to use either `'front'` or `'back'` camera for AR. Only relevant for `'world'` or `'face'` camera. Must set to `'back'` to enable SLAM tracking. |
+| xrCameraType | `string` | `'3dOnly'` | Type of camera to use. Allowed values: `'world'`, `'face'`, `'3dOnly'`. |
+| uvType | `string` | `'standard'` | Specifies which uvs are returned in the facescanning and faceloading events. Allowed values: `'standard'`, `'projected'`. Only relevant when `xrCameraType` is set to `'face'`. |
+| leftHandedAxes | `boolean` | `false` | If true, use left-handed coordinates: X-right, Y-up, and Z-forward. Otherwise, X-left, Y-up, and Z-forward. |
+| disableWorldTracking | `boolean` | `false` | If true, turn off SLAM tracking for efficiency. Must be set to false to enable VPS. |
+| enableLighting | `boolean` | `false` | If true, return an estimate of lighting information |
+| scale | `string` | `'responsive'` | Allowed values: `'responsive'`, `'absolute'`. `'responsive'` will return values so that the camera on frame 1 is at the origin defined via [XR8.XrController.updateCameraProjectionMatrix()](https://www.8thwall.com/docs/docs/engine/api/xrcontroller/updatecameraprojectionmatrix/). `'absolute'` will return the camera, image targets, etc in meters. When using `'absolute'`, the x-position, z-position, and rotation of the starting pose will respect the parameters set in [XR8.XrController.updateCameraProjectionMatrix()](https://www.8thwall.com/docs/docs/engine/api/xrcontroller/updatecameraprojectionmatrix/) once scale has been estimated. The y-position will depend on the camera's physical height from the ground plane. Must be set to `'responsive'` to enable VPS. |
+| enableWorldPoints | `boolean` | `false` | If true, return the map points used for tracking |
+| enableVps | `boolean` | `false` | If true, look for Project Locations and a mesh. The mesh that is returned has no relation to Project Locations and will be returned even if no Project Locations are configured. Must enable responsive scale and world tracking to enabled VPS. Only relevant when xrCameraType is set to `'world'` |
+| mirroredDisplay | `boolean` | `false` | If true, flip the rendering left-right |
+| meshGeometryFace | `boolean` | `false` | Whether to show face mesh geometry. Only relevant when xrCameraType is set to `'face'`. |
+| meshGeometryEyes | `boolean` | `false` | Whether to show eye mesh geometry. Only relevant when xrCameraType is set to `'face'`. |
+| meshGeometryIris | `boolean` | `false` | Whether to show iris mesh geometry. Only relevant when xrCameraType is set to `'face'`. |
+| meshGeometryMouth | `boolean` | `false` | Whether to show mouth mesh geometry. Only relevant when xrCameraType is set to `'face'`. |
+| enableEars | `boolean` | `false` | If true, runs ear detection simultaneously with Face Effects and returns ear attachment points. Only relevant when xrCameraType is set to `'face'`. |
+| maxDetections | `number` | `1` | The maximum number of faces to detect. Allowed values: 1, 2, or 3. Only relevant when xrCameraType is set to `'face'`. |
diff --git a/gen/tables/CapsuleGeometry.md b/gen/tables/CapsuleGeometry.md
new file mode 100644
index 0000000..a542dd7
--- /dev/null
+++ b/gen/tables/CapsuleGeometry.md
@@ -0,0 +1,4 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| radius | `number` | `0` | Radius of the capsule |
+| height | `number` | `0` | Height of the capsule |
diff --git a/gen/tables/CircleGeometry.md b/gen/tables/CircleGeometry.md
new file mode 100644
index 0000000..4061d25
--- /dev/null
+++ b/gen/tables/CircleGeometry.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| radius | `number` | `0` | Radius of the circle |
diff --git a/gen/tables/Collider.md b/gen/tables/Collider.md
new file mode 100644
index 0000000..a0c2780
--- /dev/null
+++ b/gen/tables/Collider.md
@@ -0,0 +1,29 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| type | `ecs.ColliderType` | `ecs.ColliderType.Static` | Supported types include static, dynamic, and kinematic |
+| shape | `ecs.ColliderShape` | `ecs.ColliderShape.Box` | Supported shape types include geometries and auto generated shapes from meshes |
+| width | `number` | `0` | Width of Box or Plane shapes |
+| height | `number` | `0` | Height of Box, Plane, Capsule, Cone, or Cylinder shapes |
+| depth | `number` | `0` | Depth of Box shape |
+| radius | `number` | `0` | Radius of Sphere, Capsule, Cone, or Cylinder shapes |
+| mass | `number` | `0` | Mass of the entity |
+| eventOnly | `boolean` | `false` | Determines if the object should only dispatch collision events, and not physically respond |
+| gravityFactor | `number` | `1` | Factor of which to scale the world’s gravity |
+| lockXAxis | `boolean` | `false` | Disables rotation on the X Axis |
+| lockYAxis | `boolean` | `false` | Disables rotation on the Y Axis |
+| lockZAxis | `boolean` | `false` | Disables rotation on the Z Axis |
+| friction | `number` | `0.5` | The amount of contact friction on the entity |
+| restitution | `number` | `0` | The bounciness of the entity, negative values absorb impact |
+| linearDamping | `number` | `0` | The amount of air resistance while moving |
+| angularDamping | `number` | `0` | The amount of air resistance while rotating |
+| lockXPosition | `boolean` | `false` | Prevents movement of the collider along the X-axis |
+| lockYPosition | `boolean` | `false` | Prevents movement of the collider along the Y-axis |
+| lockZPosition | `boolean` | `false` | Prevents movement of the collider along the Z-axis |
+| highPrecision | `boolean` | `false` | Enables continuous collision detection to increase precision of collision events |
+| offsetX | `number` | `0` | Local X offset of the collider shape from the object origin |
+| offsetY | `number` | `0` | Local Y offset of the collider shape from the object origin |
+| offsetZ | `number` | `0` | Local Z offset of the collider shape from the object origin |
+| offsetQuaternionX | `number` | `0` | Quaterion X offset of the collider shape from the object orientation |
+| offsetQuaternionY | `number` | `0` | Quaterion Y offset of the collider shape from the object orientation |
+| offsetQuaternionZ | `number` | `0` | Quaterion Z offset of the collider shape from the object orientation |
+| offsetQuaternionW | `number` | `1` | Quaterion W offset of the collider shape from the object orientation |
diff --git a/gen/tables/ConeGeometry.md b/gen/tables/ConeGeometry.md
new file mode 100644
index 0000000..83f0163
--- /dev/null
+++ b/gen/tables/ConeGeometry.md
@@ -0,0 +1,4 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| radius | `number` | `0` | Radius of the cone |
+| height | `number` | `0` | Height of the cone |
diff --git a/gen/tables/CustomPropertyAnimation.md b/gen/tables/CustomPropertyAnimation.md
new file mode 100644
index 0000000..69b3f08
--- /dev/null
+++ b/gen/tables/CustomPropertyAnimation.md
@@ -0,0 +1,14 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| attribute | `string` | `''` | Selected attribute to animate (e.g. position, scale, material) |
+| property | `string` | `''` | Individual property to animate (e.g. x, y, or z for a vector attribute) |
+| target | `eid` | `undefined` | The target object to animate. If not specified, the animation is run on the object that the component is attached to. |
+| from | `number` | `0` | The starting state of the animation |
+| to | `number` | `0` | The ending state of the animation |
+| autoFrom | `boolean` | `false` | If enabled, ignore `'from'` property and animate from the state of the object at the start of the animation |
+| duration | `number` | `1000` | Length of time that the animation runs in milliseconds |
+| loop | `boolean` | `true` | If enabled, repeat the animation |
+| reverse | `boolean` | `false` | Whether to play in reverse, if loop set |
+| easeIn | `boolean` | `false` | If enabled, easing function will be applied over time instead of straight interpolation |
+| easeOut | `boolean` | `false` | If enabled, easing function will be applied over time instead of straight interpolation |
+| easingFunction | `string` | `''` | Allowed values: `'Quadratic'`, `'Cubic'`, `'Quartic'`, `'Quintic'`, `'Sinusoidal'`, `'Exponential'`, `'Circular'`, `'Elastic'`, `'Back'`, `'Bounce'` |
diff --git a/gen/tables/CustomVec3Animation.md b/gen/tables/CustomVec3Animation.md
new file mode 100644
index 0000000..b499ddd
--- /dev/null
+++ b/gen/tables/CustomVec3Animation.md
@@ -0,0 +1,17 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| attribute | `string` | `''` | Selected attribute to animate (e.g. position, scale, material) |
+| target | `eid` | `undefined` | The target object to animate. If not specified, the animation is run on the object that the component is attached to. |
+| fromX | `number` | `0` | The starting X position for the animation |
+| fromY | `number` | `0` | The starting Y position for the animation |
+| fromZ | `number` | `0` | The starting Z position for the animation |
+| toX | `number` | `0` | The ending X position for the animation |
+| toY | `number` | `0` | The ending Y position for the animation |
+| toZ | `number` | `0` | The ending Z position for the animation |
+| autoFrom | `boolean` | `false` | If enabled, ignore `from` property and animate from the state of the object at the start of the animation |
+| duration | `number` | `1000` | Length of time that the animation runs in milliseconds |
+| loop | `boolean` | `true` | If enabled, repeat the animation |
+| reverse | `boolean` | `false` | Whether to play in reverse, if loop set |
+| easeIn | `boolean` | `false` | If enabled, easing function will be applied over time instead of straight interpolation |
+| easeOut | `boolean` | `false` | If enabled, easing function will be applied over time instead of straight interpolation |
+| easingFunction | `string` | `''` | Allowed values: `'Quadratic'`, `'Cubic'`, `'Quartic'`, `'Quintic'`, `'Sinusoidal'`, `'Exponential'`, `'Circular'`, `'Elastic'`, `'Back'`, `'Bounce'` |
diff --git a/gen/tables/CylinderGeometry.md b/gen/tables/CylinderGeometry.md
new file mode 100644
index 0000000..599743f
--- /dev/null
+++ b/gen/tables/CylinderGeometry.md
@@ -0,0 +1,4 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| radius | `number` | `0` | Radius of the cylinder |
+| height | `number` | `0` | Height of the cylinder |
diff --git a/gen/tables/Disabled.md b/gen/tables/Disabled.md
new file mode 100644
index 0000000..bf2d2b7
--- /dev/null
+++ b/gen/tables/Disabled.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+
diff --git a/gen/tables/Face.md b/gen/tables/Face.md
new file mode 100644
index 0000000..04edaf1
--- /dev/null
+++ b/gen/tables/Face.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| id | `number` | `1` | The id of the face that is currently being tracked out of the number of faces tracked, specified by the active camera |
diff --git a/gen/tables/FaceAnchor.md b/gen/tables/FaceAnchor.md
new file mode 100644
index 0000000..bf2d2b7
--- /dev/null
+++ b/gen/tables/FaceAnchor.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+
diff --git a/gen/tables/FaceAttachment.md b/gen/tables/FaceAttachment.md
new file mode 100644
index 0000000..42009ba
--- /dev/null
+++ b/gen/tables/FaceAttachment.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| point | `string` | `'forehead'` | The specified face attachment that is being tracked |
diff --git a/gen/tables/FaceGeometry.md b/gen/tables/FaceGeometry.md
new file mode 100644
index 0000000..bf2d2b7
--- /dev/null
+++ b/gen/tables/FaceGeometry.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+
diff --git a/gen/tables/FaceMeshAnchor.md b/gen/tables/FaceMeshAnchor.md
new file mode 100644
index 0000000..bf2d2b7
--- /dev/null
+++ b/gen/tables/FaceMeshAnchor.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+
diff --git a/gen/tables/FlyController.md b/gen/tables/FlyController.md
new file mode 100644
index 0000000..224c164
--- /dev/null
+++ b/gen/tables/FlyController.md
@@ -0,0 +1,9 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| verticalSensitivity | `number` | `1` | Input sensitivity vertically |
+| horizontalSensitivity | `number` | `1` | Input sensitivity horizontally |
+| moveSpeedX | `number` | `10` | Movement speed of the x axis |
+| moveSpeedY | `number` | `10` | Movement speed of the y axis |
+| moveSpeedZ | `number` | `10` | Movement speed of the z axis |
+| invertedX | `boolean` | `false` | Whether X input is inverted |
+| invertedY | `boolean` | `false` | Whether Y input is inverted |
diff --git a/gen/tables/FollowAnimation.md b/gen/tables/FollowAnimation.md
new file mode 100644
index 0000000..cb846d1
--- /dev/null
+++ b/gen/tables/FollowAnimation.md
@@ -0,0 +1,6 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| target | `eid` | `undefined` | The target object to animate. If not specified, the animation is run on the object that the component is attached to. |
+| minDistance | `number` | `0` | Minimum distance to maintain when following (in meters) |
+| maxDistance | `number` | `0` | Maximum distance to maintain when following (in meters) |
+| elasticity | `number` | `1` | Proportion of distance to move (per second) when target is outside of min/max distance (0 never moves, 1 is instantaneous) |
diff --git a/gen/tables/GltfModel.md b/gen/tables/GltfModel.md
new file mode 100644
index 0000000..e56da34
--- /dev/null
+++ b/gen/tables/GltfModel.md
@@ -0,0 +1,11 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| url | `string` | `''` | The source URL |
+| animationClip | `string` | `''` | The name of the animationClip attached to the model to play. The Wildcard `*` is also accepted, and will set the model to play through every animation. |
+| loop | `boolean` | `false` | Whether the animation clip restarts after it finishes playing or not |
+| paused | `boolean` | `false` | Whether the animation clip is paused |
+| time | `number` | `0` | Time in seconds of animationClip to jump to |
+| timeScale | `number` | `1` | Scaling factor for the time. 0 causes the animation to pause. Negative values cause the animation to play backwards. |
+| reverse | `boolean` | `false` | If set, the animation will play in reverse when it finishes playing forward (together counts as one loop iteration) |
+| repetitions | `number` | `0` | If set along with ‘loop’, the number of times the loop will be repeated (-1 means loop forever) |
+| crossFadeDuration | `number` | `0` | The time in seconds that will be spent blending between animations when a new animation is started (and an existing animation is playing) |
diff --git a/gen/tables/GpsPointer.md b/gen/tables/GpsPointer.md
new file mode 100644
index 0000000..b4025a7
--- /dev/null
+++ b/gen/tables/GpsPointer.md
@@ -0,0 +1,7 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| faceGpsDirection | `boolean` | `true` | If enabled, the object this is attached to will rotate to face the direction in which the center of the map is moving |
+| idleClip | `string` | `''` | If specified, the selected animation clip will be triggered on the entity when below a walking speed threshold of 2km/h |
+| walkClip | `string` | `''` | If specified, the selected animation clip will be triggered on the entity once above a walking speed threshold of 2km/h |
+| runClip | `string` | `''` | If specified, the selected animation clip will be triggered on the entity once above a running speed threshold of 8km/h |
+| driveClip | `string` | `''` | If specified, the selected animation clip will be triggered on the entity once above a driving speed threshold of 25km/h |
diff --git a/gen/tables/Hidden.md b/gen/tables/Hidden.md
new file mode 100644
index 0000000..bf2d2b7
--- /dev/null
+++ b/gen/tables/Hidden.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+
diff --git a/gen/tables/HiderMaterial.md b/gen/tables/HiderMaterial.md
new file mode 100644
index 0000000..bf2d2b7
--- /dev/null
+++ b/gen/tables/HiderMaterial.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+
diff --git a/gen/tables/ImageTarget.md b/gen/tables/ImageTarget.md
new file mode 100644
index 0000000..6d5411e
--- /dev/null
+++ b/gen/tables/ImageTarget.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| name | `string` | `''` | The unique name of the image target in the workspace to track |
diff --git a/gen/tables/Light.md b/gen/tables/Light.md
new file mode 100644
index 0000000..e56f995
--- /dev/null
+++ b/gen/tables/Light.md
@@ -0,0 +1,32 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| type | `string` | `''` | Allowed values: `'directional'`, `'point'`, `'ambient'`, `'spot'`, `'area'` |
+| castShadow | `boolean` | `true` | If the light source should cast shadows |
+| intensity | `number` | `0.5` | The light's intensity, or strength |
+| r | `number` | `255` | The amount of red the light emits. A value between 0 and 255. |
+| g | `number` | `255` | The amount of green the light emits. A value between 0 and 255. |
+| b | `number` | `255` | The amount of blue the light emits. A value between 0 and 255. |
+| colorMap | `string` | `''` | The color map source |
+| shadowBias | `number` | `-0.005` | How much to add or subtract from the normalized depth when deciding whether a surface is in shadow |
+| shadowNormalBias | `number` | `0` | How much the position used to query the shadow map is offset along the object normal |
+| shadowRadius | `number` | `1` | The radius of the shadow |
+| shadowAutoUpdate | `boolean` | `true` | Should the shadow be automatically calculated and updated |
+| shadowBlurSamples | `number` | `8` | The amount of samples to use when calculating the Virtual Shadow Map |
+| shadowMapSizeHeight | `number` | `1024` | The height of the Shadow Map. Values **must** be powers of 2. |
+| shadowMapSizeWidth | `number` | `1024` | The width of the Shadow Map. Values **must** be powers of 2. |
+| shadowCameraNear | `number` | `0.5` | Camera frustum near-pane for calculating shadows |
+| shadowCameraFar | `number` | `200` | Camera frustum far-pane for calculating shadows |
+| shadowCameraLeft | `number` | `-50` | Camera frustum left-pane for calculating shadows |
+| shadowCameraRight | `number` | `50` | Camera frustum right-pane for calculating shadows |
+| shadowCameraBottom | `number` | `-50` | Camera frustum bottom-pane for calculating shadows |
+| shadowCameraTop | `number` | `50` | Camera frustum top-pane for calculating shadows |
+| targetX | `number` | `0` | The target X coordinate of the light (Directional only) |
+| targetY | `number` | `0` | The target Y coordinate of the light (Directional only) |
+| targetZ | `number` | `0` | The target Z coordinate of the light (Directional only) |
+| width | `number` | `10` | Width of the light source (Area only) |
+| height | `number` | `10` | Height of the light source (Area only) |
+| penumbra | `number` | `0` | Percent of the spotlight cone that is attenuated due to penumbra. Accepted values between 0 and 1. (Spot only) |
+| decay | `number` | `2` | The amount the light dims along the distance of the light |
+| angle | `number` | `Math.PI / 3` | Maximum extent of the spotlight, in radians, from its direction. Should be no more than Math.PI / 2. |
+| followCamera | `boolean` | `true` | Whether the light should follow where the camera is moving (Directional only) |
+| distance | `number` | `0` | When distance is zero, light will attenuate according to inverse-square law to infinite distance. When distance is non-zero, light will attenuate according to inverse-square law until near the distance cutoff, where it will then attenuate quickly and smoothly to 0. Inherently, cutoffs are not physically correct. |
diff --git a/gen/tables/Location.md b/gen/tables/Location.md
new file mode 100644
index 0000000..bf2d2b7
--- /dev/null
+++ b/gen/tables/Location.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+
diff --git a/gen/tables/LocationAnchor.md b/gen/tables/LocationAnchor.md
new file mode 100644
index 0000000..bf2d2b7
--- /dev/null
+++ b/gen/tables/LocationAnchor.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+
diff --git a/gen/tables/LookAtAnimation.md b/gen/tables/LookAtAnimation.md
new file mode 100644
index 0000000..36b3e5b
--- /dev/null
+++ b/gen/tables/LookAtAnimation.md
@@ -0,0 +1,8 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| target | `eid` | `undefined` | The target object to track |
+| targetX | `number` | `0` | The target x position (if no target is specified) |
+| targetY | `number` | `0` | The target y position (if no target is specified) |
+| targetZ | `number` | `0` | The target z position (if no target is specified) |
+| lockX | `boolean` | `false` | Whether to lock the rotation on the relative x-axis when looking at the target |
+| lockY | `boolean` | `false` | Whether to lock the rotation on the relative y-axis when looking at the target |
diff --git a/gen/tables/Map.md b/gen/tables/Map.md
new file mode 100644
index 0000000..a59876f
--- /dev/null
+++ b/gen/tables/Map.md
@@ -0,0 +1,7 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| latitude | `number` | `37.7955281` | The latitude of the map's center |
+| longitude | `number` | `-122.3934225` | The longitude of the map's center |
+| radius | `number` | `500` | The radius of the map view, in meters |
+| spawnLocations | `boolean` | `false` | If enabled, map points will spawn at every VPS activated location |
+| useGps | `boolean` | `true` | If enabled, map will poll GPS and update latitude and longitude automatically at runtime |
diff --git a/gen/tables/MapPoint.md b/gen/tables/MapPoint.md
new file mode 100644
index 0000000..0d95c9c
--- /dev/null
+++ b/gen/tables/MapPoint.md
@@ -0,0 +1,5 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| latitude | `number` | `37.7955281` | The latitude of the map point |
+| longitude | `number` | `-122.3934225` | The longitude of the map point |
+| meters | `number` | `33.33` | Map meters per Transform unit for children of the map point |
diff --git a/gen/tables/MapTheme.md b/gen/tables/MapTheme.md
new file mode 100644
index 0000000..e582c49
--- /dev/null
+++ b/gen/tables/MapTheme.md
@@ -0,0 +1,48 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| landColor | `string` | `'#AEC988'` | Color of land as a hexadecimal code |
+| landOpacity | `number` | `1` | Opacity range of land (0.0 - 1.0) |
+| landVisibility | `boolean` | `true` | Visibility of land |
+| buildingColor | `string` | `'#EFEFEA'` | Color of buildings as a hexadecimal code |
+| buildingOpacity | `number` | `0.4` | Opacity range of buildings (0.0 - 1.0) |
+| buildingMinMeters | `number` | `6` | Min height of buildings in meters |
+| buildingMaxMeters | `number` | `6` | Max height of buildings in meters |
+| buildingVisibility | `boolean` | `true` | Visibility of buildings |
+| buildingBase | `number` | `0.014` | Height of the bottom of buildings, offset from land |
+| parkColor | `string` | `'#80B063'` | Color of parks as a hexadecimal code |
+| parkOpacity | `number` | `1` | Opacity range of parks (0.0 - 1.0) |
+| parkVisibility | `boolean` | `true` | Visibility of park as a checkbox |
+| parkBase | `number` | `0.002` | Height of the park, offset from land |
+| parkingColor | `string` | `'#84A172'` | Color of parking lots as a hexadecimal code |
+| parkingOpacity | `number` | `1` | Opacity range of parking lots (0.0 - 1.0) |
+| parkingVisibility | `boolean` | `true` | Visibility of parking lots |
+| parkingBase | `number` | `0.008` | Height of parking lots, offset from land |
+| transitColor | `string` | `'#F9F8C7'` | Color of transit lines as a hexadecimal code |
+| transitOpacity | `number` | `1` | Opacity range of transit lines (0.0 - 1.0) |
+| transitVisibility | `boolean` | `true` | Visibility of transit lines |
+| transitMeters | `number` | `8` | Width in meters of transit lines |
+| transitBase | `number` | `0.012` | Height of transit lines, offset from land |
+| transitMin | `number` | `0` | Minimum mapping system unit width of transit lines, when zooming out |
+| roadColor | `string` | `'#DCE1DE'` | Color of roads as a hexadecimal code |
+| roadOpacity | `number` | `1` | Opacity range of roads (0.0 - 1.0) |
+| roadVisibility | `boolean` | `true` | Visibility of roads |
+| roadSMeters | `number` | `2` | Width in meters of S Roads |
+| roadMMeters | `number` | `4` | Width in meters of M Roads |
+| roadLMeters | `number` | `8` | Width in meters of L Roads |
+| roadXLMeters | `number` | `32` | Width in meters of XL Roads |
+| roadSMin | `number` | `0` | Minimum mapping system unit width of S roads, when zooming out |
+| roadMMin | `number` | `0` | Minimum mapping system unit width of M roads, when zooming out |
+| roadLMin | `number` | `0` | Minimum mapping system unit width of L roads, when zooming out |
+| roadXLMin | `number` | `0` | Minimum mapping system unit width of XL roads, when zooming out |
+| roadBase | `number` | `0.01` | Height of roads, offset from land |
+| sandColor | `string` | `'#AC92A6'` | Color of sand as a hexadecimal code |
+| sandOpacity | `number` | `1` | Opacity range of sand (0.0 - 1.0) |
+| sandVisibility | `boolean` | `true` | Visibility of sand |
+| sandBase | `number` | `0.004` | Height of sand, offset from land |
+| waterColor | `string` | `'#A0D3D3'` | Color of waterways as a hexadecimal code |
+| waterOpacity | `number` | `1` | Opacity range of waterways (0.0 - 1.0) |
+| waterVisibility | `boolean` | `true` | Visibility of waterways |
+| waterMeters | `number` | `6` | Width in meters of waterways |
+| waterBase | `number` | `0.006` | Height of waterways, offset from land |
+| waterMin | `number` | `0` | Minimum mapping system unit width of waterways, when zooming out |
+| lod | `number` | `1` | Level of detail. LOD higher than 1 will render less tile data (lower detail), while LOD lower than 1 will render more tile data (higher detail). LOD should be greater than 0. |
diff --git a/gen/tables/Material.md b/gen/tables/Material.md
new file mode 100644
index 0000000..62646eb
--- /dev/null
+++ b/gen/tables/Material.md
@@ -0,0 +1,32 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| r | `number` | `0` | Red channel value of the material. A value between 0 and 255. |
+| g | `number` | `0` | Green channel value of the material. A value between 0 and 255. |
+| b | `number` | `0` | Blue channel value of the material. A value between 0 and 255. |
+| textureSrc | `string` | `''` | The color map source, modulated by color (if set) |
+| roughness | `number` | `0.5` | How rough the material appears. A value between 0 and 1. |
+| roughnessMap | `string` | `''` | This texture map resource, affects how rough the material appears |
+| metalness | `number` | `0.5` | How metallic the material appears. A value between 0 and 1. |
+| metalnessMap | `string` | `''` | This texture resource affects how metal the material appears |
+| normalScale | `number` | `1` | How much the normal map (if set) affects the material. A value between 0 and 1. |
+| normalMap | `string` | `''` | Normal map source of the texture |
+| opacity | `number` | `1` | Overall alpha/transparency of the material. A value between 0 and 1. |
+| opacityMap | `string` | `''` | Alpha/transparency mapped via a texture resource |
+| emissiveIntensity | `number` | `0` | Overall intensity of the emissive map. A value between 0 and 1. |
+| emissiveMap | `string` | `''` | Emissive strength mapped as a texture resource. Modulated by emissive color and intensity. |
+| emissiveR | `number` | `0` | Red channel emissive color of the material. A value between 0 and 255. |
+| emissiveG | `number` | `0` | Green channel emissive color of the material. A value between 0 and 255. |
+| emissiveB | `number` | `0` | Blue channel emissive color of the material. A value between 0 and 255. |
+| side | `string` | `'front'` | Which sides of faces will be rendered. Allowed values: `'front'`, `'back'`, `'double'`. |
+| blending | `string` | `'normal'` | Blending to use when displaying objects with this material. Allowed values: `'no'`, `'normal'`, `'additive'`, `'subtractive'`, `'multiply'`. |
+| repeatX | `number` | `1` | How many times a texture is repeated across a material on the X axis |
+| repeatY | `number` | `1` | How many times a texture is repeated across a material on the Y axis |
+| offsetX | `number` | `0` | How much a texture is offset across a material on the X axis |
+| offsetY | `number` | `0` | How much a texture is offset across a material on the Y axis |
+| depthTest | `boolean` | `true` | Whether to test depth when rendering this material |
+| depthWrite | `boolean` | `true` | Whether rendering this material impacts the depth buffer |
+| wireframe | `boolean` | `false` | Render geometry as wireframe |
+| forceTransparent | `boolean` | `false` | Whether to force the alpha channel to render as transparent |
+| mipmaps | `boolean` | `true` | Whether to generate mipmaps for textures |
+| textureFiltering | `string` | `'smooth'` | Texture filtering mode. Allowed values: `'smooth'` or `'sharp'`. |
+| wrap | `string` | `'repeat'` | Wrapping mode for textures. Allowed values: `'clamp'`, `'repeat'`, `'mirroredRepeat'`. |
diff --git a/gen/tables/OrbitControls.md b/gen/tables/OrbitControls.md
new file mode 100644
index 0000000..5749879
--- /dev/null
+++ b/gen/tables/OrbitControls.md
@@ -0,0 +1,20 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| speed | `number` | `5` | How fast the camera moves |
+| maxAngularSpeed | `number` | `10` | Max rotation speed of the camera |
+| maxZoomSpeed | `number` | `10` | Max zoom speed for the camera |
+| distanceMin | `number` | `5` | Minimum distance between the focused entity and the camera |
+| distanceMax | `number` | `20` | Maximum distance between the focused entity and the camera |
+| pitchAngleMin | `number` | `-90` | Minimum pitch angle |
+| pitchAngleMax | `number` | `90` | Maximum pitch angle |
+| constrainYaw | `boolean` | `false` | Whether to constrain yaw |
+| yawAngleMin | `number` | `0` | Minimum yaw angle |
+| yawAngleMax | `number` | `0` | Maximum yaw angle |
+| inertiaFactor | `number` | `0.3` | Inertia factor for camera movement |
+| invertedX | `boolean` | `false` | Whether controls for moving on x-axis are inverted |
+| invertedY | `boolean` | `false` | Whether controls for moving on y-axis are inverted |
+| invertedZoom | `boolean` | `false` | Whether controls for zooming in and out are inverted |
+| controllerSupport | `boolean` | `false` | Whether to support controllers via the input-manager |
+| verticalSensitivity | `number` | `1` | Input sensitivity vertically |
+| horizontalSensitivity | `number` | `1` | Input sensitivity horizontally |
+| focusEntity | `eid` | `undefined` | Focus subject for the orbit camera |
diff --git a/gen/tables/ParticleEmitter.md b/gen/tables/ParticleEmitter.md
new file mode 100644
index 0000000..2aecb3e
--- /dev/null
+++ b/gen/tables/ParticleEmitter.md
@@ -0,0 +1,35 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| stopped | `boolean` | `false` | Playback state of the emitter |
+| emitterLife | `number` | `1` | The lifetime of the emitter in seconds |
+| particlesPerShot | `number` | `1` | Particles per emission |
+| emitDelay | `number` | `1` | The time between particle emissions |
+| minimumLifespan | `number` | `1` | Minimum lifetime range for each particle |
+| maximumLifespan | `number` | `10` | Maximum lifetime range for each particle |
+| mass | `number` | `1` | Mass of each particle |
+| gravity | `number` | `0` | Gravity factor of each particle |
+| scale | `number` | `1` | Uniform scale of each particle |
+| forceX | `number` | `0` | Force applied to each particle in the X direction |
+| forceY | `number` | `0` | Force applied to each particle in the Y direction |
+| forceZ | `number` | `0` | Force applied to each particle in the Z direction |
+| spread | `number` | `0` | Area and direction that each particles spawns relative to its origin |
+| radialVelocity | `number` | `0` | Amount of radial velocity applied to each particle |
+| spawnAreaType | `string` | `'point'` | Allowed values: `'point'`, `'box'` and `'sphere'` |
+| spawnAreaWidth | `number` | `0` | Width of the spawn area box (Box only) |
+| spawnAreaHeight | `number` | `0` | Height of the spawn area box (Box only) |
+| spawnAreaDepth | `number` | `0` | Depth of the spawn area box (Box only) |
+| spawnAreaRadius | `number` | `0` | Radius of the spawn area sphere (Sphere only) |
+| boundingZoneType | `string` | `''` | Allowed values: `'none'`, `'box'`, `'sphere'` |
+| boundingZoneWidth | `number` | `0` | Width of the bounding zone (Box only) |
+| boundingZoneHeight | `number` | `0` | Height of the bounding zone (Box only) |
+| boundingZoneDepth | `number` | `0` | Depth of the bounding zone (Box only) |
+| boundingZoneRadius | `number` | `0` | Radius of the bounding zone (Sphere only) |
+| resourceType | `string` | `'none'` | Allowed values: `'sprite'`, `'model'` |
+| resourceUrl | `string` | `''` | URL of the resource |
+| blendingMode | `string` | `''` | Allowed values: `'none'`, `'normal'`, `'add'`, `'multiply'`, `'subtract'` |
+| animateColor | `boolean` | `false` | Determines if color should be animated |
+| colorStart | `string` | `''` | The starting color of each particle |
+| colorEnd | `string` | `''` | The ending color of each particle |
+| randomDrift | `boolean` | `false` | Enable randomized drifting for each particle |
+| randomDriftRange | `number` | `0` | Determines the randomized drift range and speed of each particle |
+| collisions | `boolean` | `false` | Determines if particles should respond to physics collisions |
diff --git a/gen/tables/Persistent.md b/gen/tables/Persistent.md
new file mode 100644
index 0000000..bf2d2b7
--- /dev/null
+++ b/gen/tables/Persistent.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+
diff --git a/gen/tables/PlaneGeometry.md b/gen/tables/PlaneGeometry.md
new file mode 100644
index 0000000..b6c4505
--- /dev/null
+++ b/gen/tables/PlaneGeometry.md
@@ -0,0 +1,4 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| width | `number` | `0` | Width of the plane |
+| height | `number` | `0` | Height of the plane |
diff --git a/gen/tables/PolyhedronGeometry.md b/gen/tables/PolyhedronGeometry.md
new file mode 100644
index 0000000..446d2f9
--- /dev/null
+++ b/gen/tables/PolyhedronGeometry.md
@@ -0,0 +1,4 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| radius | `number` | `0` | Radius of the polyhedron |
+| faces | `number` | `0` | Number of flat surfaces that make up the entity |
diff --git a/gen/tables/Position.md b/gen/tables/Position.md
new file mode 100644
index 0000000..765a991
--- /dev/null
+++ b/gen/tables/Position.md
@@ -0,0 +1,5 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| x | `number` | `0` | Negative X axis extends left. Positive X Axis extends right. |
+| y | `number` | `0` | Negative Y axis extends down. Positive Y Axis extends up. |
+| z | `number` | `0` | Negative Z axis extends in. Positive Z Axis extends out. |
diff --git a/gen/tables/PositionAnimation.md b/gen/tables/PositionAnimation.md
new file mode 100644
index 0000000..2410bdb
--- /dev/null
+++ b/gen/tables/PositionAnimation.md
@@ -0,0 +1,16 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| target | `eid` | `undefined` | The target object to animate. If not specified, the animation is run on the object that the component is attached to. |
+| fromX | `number` | `0` | The starting X coordinate for the animation |
+| fromY | `number` | `0` | The starting Y coordinate for the animation |
+| fromZ | `number` | `0` | The starting Z coordinate for the animation |
+| toX | `number` | `0` | The target X coordinate for the animation |
+| toY | `number` | `0` | The target Y coordinate for the animation |
+| toZ | `number` | `0` | The target Z coordinate for the animation |
+| autoFrom | `boolean` | `false` | If enabled, ignore `'fromX/Y/Z'` properties and animate from the state of the object at the start of the animation |
+| duration | `number` | `1000` | Length of time that the animation runs in milliseconds |
+| loop | `boolean` | `true` | If enabled, repeat the animation |
+| reverse | `boolean` | `false` | Whether to play in reverse, if loop set |
+| easeIn | `boolean` | `false` | If enabled, easing function will be applied over time instead of straight interpolation |
+| easeOut | `boolean` | `false` | If enabled, easing function will be applied over time instead of straight interpolation |
+| easingFunction | `string` | `''` | Allowed values: `'Quadratic'`, `'Cubic'`, `'Quartic'`, `'Quintic'`, `'Sinusoidal'`, `'Exponential'`, `'Circular'`, `'Elastic'`, `'Back'`, `'Bounce'` |
diff --git a/gen/tables/Quaternion.md b/gen/tables/Quaternion.md
new file mode 100644
index 0000000..c30ca50
--- /dev/null
+++ b/gen/tables/Quaternion.md
@@ -0,0 +1,6 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| x | `number` | `0` | x coordinate |
+| y | `number` | `0` | y coordinate |
+| z | `number` | `0` | z coordinate |
+| w | `number` | `1` | w coordinate |
diff --git a/gen/tables/RingGeometry.md b/gen/tables/RingGeometry.md
new file mode 100644
index 0000000..db1415d
--- /dev/null
+++ b/gen/tables/RingGeometry.md
@@ -0,0 +1,4 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| innerRadius | `number` | `0` | Radius of the inner hole of the ring |
+| outerRadius | `number` | `0` | Radius of the outer edge of the ring |
diff --git a/gen/tables/RotateAnimation.md b/gen/tables/RotateAnimation.md
new file mode 100644
index 0000000..e629e2b
--- /dev/null
+++ b/gen/tables/RotateAnimation.md
@@ -0,0 +1,17 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| target | `eid` | `undefined` | The target object to animate. If not specified, the animation is run on the object that the component is attached to. |
+| fromX | `number` | `0` | The starting X coordinate for the animation |
+| fromY | `number` | `0` | The starting Y coordinate for the animation |
+| fromZ | `number` | `0` | The starting Z coordinate for the animation |
+| toX | `number` | `0` | The target X coordinate for the animation |
+| toY | `number` | `0` | The target Y coordinate for the animation |
+| toZ | `number` | `0` | The target Z coordinate for the animation |
+| autoFrom | `boolean` | `false` | If enabled, ignore `fromX/Y/Z` properties and animate from the state of the object at the start of the animation |
+| duration | `number` | `1000` | Length of time that the animation runs in milliseconds |
+| loop | `boolean` | `true` | If enabled, repeat the animation |
+| reverse | `boolean` | `false` | Whether to play in reverse, if loop set |
+| easeIn | `boolean` | `false` | If enabled, easing function will be applied over time instead of straight interpolation |
+| easeOut | `boolean` | `false` | If enabled, easing function will be applied over time instead of straight interpolation |
+| easingFunction | `string` | `''` | Allowed values: `'Quadratic'`, `'Cubic'`, `'Quartic'`, `'Quintic'`, `'Sinusoidal'`, `'Exponential'`, `'Circular'`, `'Elastic'`, `'Back'`, `'Bounce'` |
+| shortestPath | `boolean` | `true` | If enabled, The animation will take the shortest path to completion |
diff --git a/gen/tables/Scale.md b/gen/tables/Scale.md
new file mode 100644
index 0000000..6ee2622
--- /dev/null
+++ b/gen/tables/Scale.md
@@ -0,0 +1,5 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| x | `number` | `1` | Scaling factor in the X direction |
+| y | `number` | `1` | Scaling factor in the Y direction |
+| z | `number` | `1` | Scaling factor in the Z direction |
diff --git a/gen/tables/ScaleAnimation.md b/gen/tables/ScaleAnimation.md
new file mode 100644
index 0000000..83f5d57
--- /dev/null
+++ b/gen/tables/ScaleAnimation.md
@@ -0,0 +1,16 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| target | `eid` | `undefined` | The target object to animate. If not specified, the animation is run on the object that the component is attached to. |
+| fromX | `number` | `0` | The starting X rotation for the animation |
+| fromY | `number` | `0` | The starting Y rotation for the animation |
+| fromZ | `number` | `0` | The starting Z rotation for the animation |
+| toX | `number` | `0` | The target X rotation for the animation |
+| toY | `number` | `0` | The target Y rotation for the animation |
+| toZ | `number` | `0` | The target Z rotation for the animation |
+| autoFrom | `boolean` | `false` | If enabled, ignore `from` property and animate from the state of the object at the start of the animation |
+| duration | `number` | `1000` | Length of time that the animation runs in milliseconds |
+| loop | `boolean` | `true` | If enabled, repeat the animation |
+| reverse | `boolean` | `false` | Whether to play in reverse, if loop set |
+| easeIn | `boolean` | `false` | If enabled, easing function will be applied over time instead of straight interpolation |
+| easeOut | `boolean` | `false` | If enabled, easing function will be applied over time instead of straight interpolation |
+| easingFunction | `string` | `''` | Allowed values: `'Quadratic'`, `'Cubic'`, `'Quartic'`, `'Quintic'`, `'Sinusoidal'`, `'Exponential'`, `'Circular'`, `'Elastic'`, `'Back'`, `'Bounce'` |
diff --git a/gen/tables/Shadow.md b/gen/tables/Shadow.md
new file mode 100644
index 0000000..bd63d80
--- /dev/null
+++ b/gen/tables/Shadow.md
@@ -0,0 +1,4 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| castShadow | `boolean` | `false` | Indicates if an entity can cast shadows |
+| receiveShadow | `boolean` | `false` | Indicates if an entity can receive shadows |
diff --git a/gen/tables/ShadowMaterial.md b/gen/tables/ShadowMaterial.md
new file mode 100644
index 0000000..11903c7
--- /dev/null
+++ b/gen/tables/ShadowMaterial.md
@@ -0,0 +1,9 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| r | `number` | `0` | Red channel value of the material. A value between 0 and 255. |
+| g | `number` | `0` | Green channel value of the material. A value between 0 and 255. |
+| b | `number` | `0` | Blue channel value of the material. A value between 0 and 255. |
+| opacity | `number` | `0.4` | Overall alpha/transparency of the material. A value between 0 and 1. |
+| side | `string` | `'front'` | Which sides of faces will be rendered. Allowed values: `'front'`, `'back'`, `'double'`. |
+| depthTest | `boolean` | `true` | Whether to test depth when rendering this material |
+| depthWrite | `boolean` | `true` | Whether rendering this material impacts the depth buffer |
diff --git a/gen/tables/SphereGeometry.md b/gen/tables/SphereGeometry.md
new file mode 100644
index 0000000..a41ca99
--- /dev/null
+++ b/gen/tables/SphereGeometry.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| radius | `number` | `0` | Radius of the sphere |
diff --git a/gen/tables/Splat.md b/gen/tables/Splat.md
new file mode 100644
index 0000000..40d9248
--- /dev/null
+++ b/gen/tables/Splat.md
@@ -0,0 +1,4 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| url | `string` | `''` | Source for the Splat in SPZ format |
+| skybox | `boolean` | `false` | Whether the skybox of the splat is visible |
diff --git a/gen/tables/TetrahedronGeometry.md b/gen/tables/TetrahedronGeometry.md
new file mode 100644
index 0000000..bf2d2b7
--- /dev/null
+++ b/gen/tables/TetrahedronGeometry.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+
diff --git a/gen/tables/ThreeObject.md b/gen/tables/ThreeObject.md
new file mode 100644
index 0000000..bf2d2b7
--- /dev/null
+++ b/gen/tables/ThreeObject.md
@@ -0,0 +1,3 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+
diff --git a/gen/tables/TorusGeometry.md b/gen/tables/TorusGeometry.md
new file mode 100644
index 0000000..e1ff058
--- /dev/null
+++ b/gen/tables/TorusGeometry.md
@@ -0,0 +1,4 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| radius | `number` | `0` | Radius of the torus |
+| tubeRadius | `number` | `0` | Inner radius of the torus |
diff --git a/gen/tables/Ui.md b/gen/tables/Ui.md
new file mode 100644
index 0000000..0339f28
--- /dev/null
+++ b/gen/tables/Ui.md
@@ -0,0 +1,66 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| type | `string` | `'overlay'` | Allowed values: `'overlay'` , `'3d'` |
+| font | `string` | `'Nunito'` | Font to use |
+| fontSize | `number` | `16` | Size of the text |
+| position | `string` | `'static'` | Allowed values: `'static'`, `'relative'`, `'absolute'` |
+| opacity | `number` | `1` | Overall opacity of the object |
+| backgroundOpacity | `number` | `-1` | Opacity of the background. -1 is interpreted as 0 if there is no image, and 1 if there is an image. |
+| backgroundSize | `string` | `'contain'` | Sets the size of the background image. Allowed values: `'stretch'`, `'contain'`, `'cover'`, `'nineSlice'`. |
+| background | `string` | `'#ffffff'` | Background color |
+| borderColor | `string` | `'#000000'` | Color of the border |
+| color | `string` | `'#ffffff'` | Foreground (text) color |
+| text | `string` | `''` | Text content of the object |
+| image | `string` | `''` | Image resource |
+| fixedSize | `boolean` | `false` | Determines if size is fixed |
+| width | `string` | `'100'` | Width of the object |
+| height | `string` | `'100'` | Height of the object |
+| top | `string` | `''` | Vertical position from top border |
+| left | `string` | `''` | Horizontal position from left border |
+| bottom | `string` | `''` | Vertical position from bottom border |
+| right | `string` | `''` | Horizontal position from right border |
+| borderRadius | `number` | `0` | Rounds all corners of the element |
+| borderRadiusTopLeft | `string` | `''` | Rounds the top left corner of the element |
+| borderRadiusTopRight | `string` | `''` | Rounds the top right corner of the element |
+| borderRadiusBottomLeft | `string` | `''` | Rounds the bottom left corner of the element |
+| borderRadiusBottomRight | `string` | `''` | Rounds the bottom right corner of the element |
+| alignContent | `string` | `'flex-start'` | Distribution of space between content items. Allowed values: `'flex-start'`, `'center'`, `'flex-end'`, `'stretch'`, `'space-between'`, `'space-around'`. |
+| alignItems | `string` | `'flex-start'` | Alignment of items on the cross axis. Allowed values: `'flex-start'`, `'center'`, `'flex-end'`, `'stretch'`, `'baseline'`. |
+| alignSelf | `string` | `''` | Alignment of an individual flex item. Allowed values: `'auto'`, `'flex-start'`, `'center'`, `'flex-end'`, `'stretch'`, `'baseline'`. |
+| borderWidth | `number` | `0` | Width of the border |
+| columnGap | `string` | `''` | Gap between columns |
+| direction | `string` | `'ltr'` | Text direction. Allowed values: `'inherit'`, `'ltr'`, `'rtl'`. |
+| display | `string` | `''` | Display type of the element. Allowed values: `'flex'`, `'none'`. |
+| flex | `number` | `0` | Flex grow, shrink, and basis shorthand |
+| flexBasis | `string` | `''` | Initial main size of a flex item |
+| flexDirection | `string` | `'row'` | Direction of flex items in the container. Allowed values: `'column'`, `'column-reverse'`, `'row'`, `'row-reverse'`. |
+| flexGrow | `number` | `0` | Defines the ability for a flex item to grow |
+| flexShrink | `number` | `0` | Defines the ability for a flex item to shrink |
+| flexWrap | `string` | `'nowrap'` | Whether flex items wrap. Allowed values: `'no-wrap'`, `'wrap'`, `'wrap-reverse'`. |
+| gap | `string` | `''` | Gap between flex items |
+| justifyContent | `string` | `'flex-start'` | Alignment of items on the main axis. Allowed values: `'flex-start'`, `'center'`, `'flex-end'`, `'space-between'`, `'space-around'`, `'space-evenly'`. |
+| margin | `string` | `''` | Margin for all sides around the element |
+| marginBottom | `string` | `''` | Bottom margin |
+| marginLeft | `string` | `''` | Left margin |
+| marginRight | `string` | `''` | Right margin |
+| marginTop | `string` | `''` | Top margin |
+| maxHeight | `string` | `''` | Maximum height of the element |
+| maxWidth | `string` | `''` | Maximum width of the element |
+| minHeight | `string` | `''` | Minimum height of the element |
+| minWidth | `string` | `''` | Minimum width of the element |
+| overflow | `string` | `''` | How content that exceeds the element’s size is handled. Allowed values: `'visible'`, `'hidden'`, `'scroll'`. |
+| padding | `string` | `''` | Padding for all sides inside the element |
+| paddingBottom | `string` | `''` | Bottom padding |
+| paddingLeft | `string` | `''` | Left padding |
+| paddingRight | `string` | `''` | Right padding |
+| paddingTop | `string` | `''` | Top padding |
+| rowGap | `string` | `''` | Gap between rows |
+| textAlign | `string` | `'center'` | Alignment of text within the element. Allowed values: `'left'`, `'right'`, `'center'`, `'justify'`. |
+| stackingOrder | `number` | `0` | Determines the rendering order of UI elements. Elements with higher values are drawn above those with lower values. A value of 0 uses the default behavior, rendering elements according to their order in the scene hierarchy |
+| ignoreRaycast | `boolean` | `false` | Determines whether the UI element should respond to user interactions like clicks or taps |
+| nineSliceBorderTop | `string` | `'0'` | Size of the top border used in nine-slice scaling. Defines how much of the top portion is preserved without scaling. |
+| nineSliceBorderBottom | `string` | `'0'` | Size of the bottom border used in nine-slice scaling. Defines how much of the bottom portion is preserved without scaling. |
+| nineSliceBorderLeft | `string` | `'0'` | Size of the left border used in nine-slice scaling. Defines how much of the left portion is preserved without scaling. |
+| nineSliceBorderRight | `string` | `'0'` | Size of the right border used in nine-slice scaling. Defines how much of the right portion is preserved without scaling. |
+| nineSliceScaleFactor | `number` | `1` | Multiplier applied to the scaled center area in nine-slice backgrounds. Allows finer control over the scaling of the center portion. |
+| verticalTextAlign | `string` | `'start'` | Vertical alignment of the text within the element |
diff --git a/gen/tables/UnlitMaterial.md b/gen/tables/UnlitMaterial.md
new file mode 100644
index 0000000..36661e1
--- /dev/null
+++ b/gen/tables/UnlitMaterial.md
@@ -0,0 +1,21 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| r | `number` | `0` | Red channel value of the material. A value between 0 and 255. |
+| g | `number` | `0` | Green channel value of the material. A value between 0 and 255. |
+| b | `number` | `0` | Blue channel value of the material. A value between 0 and 255. |
+| textureSrc | `string` | `''` | The color map source, modulated by color (if set) |
+| opacity | `number` | `1` | Overall alpha/transparency of the material. A value between 0 and 1. |
+| side | `string` | `'front'` | Which sides of faces will be rendered. Allowed values: `'front'`, `'back'`, or `'double'`. |
+| opacityMap | `string` | `''` | Alpha/transparency mapped via a texture resource |
+| blending | `string` | `'normal'` | Blending to use when displaying objects with this material. Allowed values: `'no'`, `'normal'`, `'additive'`, `'subtractive'`, `'multiply'`. |
+| repeatX | `number` | `1` | How many times a texture is repeated across a material on the X axis |
+| repeatY | `number` | `1` | How many times a texture is repeated across a material on the Y axis |
+| offsetX | `number` | `0` | How much a texture is offset across a material on the X axis |
+| offsetY | `number` | `0` | How much a texture is offset across a material on the Y axis |
+| wrap | `string` | `'repeat'` | Wrapping mode for textures. Allowed values: `'clamp'`, `'repeat'`, `'mirroredRepeat'`. |
+| depthTest | `boolean` | `true` | Whether to test depth when rendering this material |
+| depthWrite | `boolean` | `true` | Whether rendering this material impacts the depth buffer |
+| wireframe | `boolean` | `false` | Render geometry as wireframe |
+| forceTransparent | `boolean` | `false` | Whether to force the alpha channel to render as transparent |
+| textureFiltering | `string` | `'smooth'` | Texture filtering mode. Allowed values: `'smooth'`, `'sharp'`. |
+| mipmaps | `boolean` | `true` | Whether to generate mipmaps for textures |
diff --git a/gen/tables/VideoControls.md b/gen/tables/VideoControls.md
new file mode 100644
index 0000000..06c18b8
--- /dev/null
+++ b/gen/tables/VideoControls.md
@@ -0,0 +1,11 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| volume | `number` | `1` | How loud the video will be played. A value between 0 and 1. |
+| loop | `boolean` | `false` | Whether the video restarts after it finishes |
+| paused | `boolean` | `false` | Whether the video is currently paused |
+| speed | `number` | `1` | The playback speed of the video. Also affects the pitch of the audio. |
+| positional | `boolean` | `false` | Whether the video's audio is played positionally |
+| refDistance | `number` | `1` | **Only applied if positional is true.** The distance at which the audio begins to diminish in volume. Must be a non-negative value. |
+| distanceModel | `string` | `'inverse'` | **Only applied if positional is true.** The algorithm used to reduce volume as the distance increases between this video source and the listener. Allowed values: `'linear'`, `'inverse'`, `'exponential'` |
+| rolloffFactor | `number` | `1` | **Only applied if positional is true.** How quickly volume is reduced as distance increases. The acceptable range varies depending on the distanceModel: linear: 0–1, inverse: 0–∞, exponential: 0–∞ |
+| maxDistance | `number` | `10000` | **Only applied if positional is true** and distanceModel is `'linear'`. Beyond this distance, the volume will not reduce further. Must be a positive value. |
diff --git a/gen/tables/VideoMaterial.md b/gen/tables/VideoMaterial.md
new file mode 100644
index 0000000..2121bdc
--- /dev/null
+++ b/gen/tables/VideoMaterial.md
@@ -0,0 +1,7 @@
+Property | Type | Default | Description
+| :- | :- | :- | :- |
+| r | `number` | `0` | Red channel value of the material. A value between 0 and 255. |
+| g | `number` | `0` | Green channel value of the material. A value between 0 and 255. |
+| b | `number` | `0` | Blue channel value of the material. A value between 0 and 255. |
+| textureSrc | `string` | `''` | The color map source, modulated by color (if set) |
+| opacity | `number` | `1` | Overall alpha/transparency of the material. A value between 0 and 1. |
diff --git a/menu.mdx b/menu.mdx
new file mode 100644
index 0000000..ff3f788
--- /dev/null
+++ b/menu.mdx
@@ -0,0 +1,126 @@
+---
+slug: '/'
+sidebar_class_name: hidden
+title: 8th Wall Documentation
+description: 8th Wall Documentation
+---
+
+````mdx-code-block
+
+
+ # Documentation
+
+ 8th Wall provides the complete solution to create WebAR, WebVR experiences
+ and 3D games that run directly in a web browser.
+
+ ## 8th Wall Studio
+
+ 8th Wall Studio is a real-time visual editor and game engine that combines the speed and power of the
+ web with the tools needed to create hyper-immersive 3D and XR experiences.
+ Studio has a visual 3D editor interface to create XR and web games across devices.
+
+
+
+ ## 8th Wall Engine
+
+ The 8th Wall AR Engine is a complete implementation of 8th Wall's Simultaneous Localization and Mapping (SLAM) engine, hyper-optimized for real-time WebAR on browsers. AR features include World Tracking, Image Targets, Face Effects,
+ and Sky Segmentation.
+
+ The engine is built-in to Studio projects, and is also easily integrated into modern 3D JavaScript frameworks such as [A-Frame](), [three.js](), [PlayCanvas](), and [Babylon.js]().
+
+
+````
diff --git a/sidebars.js b/sidebars.js
index 9bd576a..3bcd708 100644
--- a/sidebars.js
+++ b/sidebars.js
@@ -1,6 +1,8 @@
/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */
// Docs are currently disabled (docs: false in docusaurus.config.js).
// Add sidebar entries here when the /docs section is ready.
-const sidebars = {};
+const sidebars = {
+ docsSidebar: [{ type: 'autogenerated', dirName: '.' }],
+};
module.exports = sidebars;
diff --git a/src/components/_ecs-types.mdx b/src/components/_ecs-types.mdx
new file mode 100644
index 0000000..cc4e739
--- /dev/null
+++ b/src/components/_ecs-types.mdx
@@ -0,0 +1,18 @@
+:::info
+
+Currently, **storing dynamically sized objects or lists isn’t supported**. We’re actively exploring this feature and [**would love to hear about your specific use cases**](https://forum.8thwall.com/).
+
+:::
+
+The following data types are useful for creating Schema properties on a Custom Component or references to a specific type.
+
+| Type | Description |
+|-------------|------------------------------|
+| ecs.eid | Entity Reference |
+| ecs.f32 | 32-bit floating-point number |
+| ecs.f64 | 64-bit floating-point number |
+| ecs.i32 | 32-bit integer |
+| ecs.ui8 | 8-bit unsigned integer |
+| ecs.ui32 | 32-bit unsigned integer |
+| ecs.string | String |
+| ecs.boolean | Boolean |
\ No newline at end of file
diff --git a/src/components/_input-events.mdx b/src/components/_input-events.mdx
new file mode 100644
index 0000000..7bd9c13
--- /dev/null
+++ b/src/components/_input-events.mdx
@@ -0,0 +1,247 @@
+import {EventExample} from '@site/src/components/event-example'
+
+## Types
+
+### Position
+
+| Property | Type | Description |
+| -------- | ------- | ----------------------------------------------------------- |
+| x | integer | The x-coordinate on the screen, normalized between 0 and 1. |
+| y | integer | The y-coordinate on the screen, normalized between 0 and 1. |
+
+### UIHoverEvent
+
+| Property | Type | Description |
+| -------- | ------- | ----------------------------------------------------------- |
+| x | integer | The x-coordinate on the screen, normalized between 0 and 1. |
+| y | integer | The y-coordinate on the screen, normalized between 0 and 1. |
+| targets | eid[] | The target UI element(s) |
+
+### TouchEvent
+
+| Property | Type | Description |
+| ------------- | -------- | ----------------------------------------------------------------------------------------------------- |
+| pointerId | integer | unique ID for the pointer, provided by the browser. |
+| position | Position | Touched position coordinates on the screen, normalized between 0 and 1. |
+| worldPosition | Vector3 | The position where the touchedEvent hit in the world. _only available on `SCREEN_TOUCH_START`._ |
+| target | eid | eid if initially touched object |
+| start | Position | The position coordinates where the touch event started on the screen, normalized between 0 and 1. |
+| change | Position | The position coordinates where touch since the last change on the screen, normalized between 0 and 1. |
+
+### GestureEvent
+
+| Property | Type | Description |
+| -------------- | -------- | ----------------------------------------------------------------------------- |
+| touchCount | integer | The number of points contributing to the gesture. |
+| position | Position | Touched position coordinates on the screen, normalized between 0 and 1. |
+| startPosition | Position | The position coordinates where the event started, normalized between 0 and 1. |
+| positionChange | Position | The position coordinates since the last change, normalized between 0 and 1. |
+| spread | float | The average position between pointers from the center point. |
+| startSpread | float | The first spread emitted in start. |
+| spreadChange | float | The spread value since the last change. |
+| nextTouchCount | integer | On end, the number of pointers involved in the following gesture |
+
+### GamepadEvent
+
+| Property | Type | Description |
+| -------------- | --------------------------------------------------------------------- | -------------------- |
+| gamepad | [Gamepad](https://developer.mozilla.org/en-US/docs/Web/API/Gamepad) | The gamepad object |
+
+## Events
+
+### UI_CLICK
+
+Emitted when both the press (`UI_PRESSED`) and release (`UI_RELEASED`) occur on the same UI element. This event represents a complete click or tap gesture. It is dispatched on the element where both interactions overlapped and is typically used for confirming user intention, such as activating a button or triggering an action.
+The event will be dispatched on the lowest common ancestor of the start (pressed) and end (released) eids. Mouse movement does not affect click events.
+
+Event payload is type `Position`.
+
+#### Example
+
+
+
+### UI_PRESSED
+
+Emitted when the user initiates a touch or pointer-down interaction on a UI element. It is dispatched only on the exact element that was directly pressed and does not bubble to parent elements. This event is useful for triggering immediate visual feedback or interaction states (such as button highlights or animations) at the start of user input.
+
+Event payload is type `TouchEvent`.
+
+#### Example
+
+
+
+### UI_RELEASED
+
+Emitted when the pointer is lifted after a `UI_PRESSED`. It is always dispatched on the same UI element that was initially pressed, regardless of where the pointer is released. This allows developers to respond to the completion of a press interaction, even if the pointer moved away from the original target.
+
+Event payload is type `TouchEvent`.
+
+#### Example
+
+
+
+### UI_HOVER_START
+
+Emits when the mouse begins hovering over a UI element.
+
+Event payload is type `HoverEvent`.
+
+#### Example
+
+
+
+### UI_HOVER_END
+
+Emits when the mouse stops hovering over a UI element.
+
+Event payload is type `HoverEvent`.
+
+#### Example
+
+
+
+:::note
+
+- Multiple touch points can be active simultaneously.
+- Only one touch gesture (single or multitouch) will be recognized as active at a time.
+ :::
+
+:::info
+If a touch event has a target, it will be emitted on that target and propagate up to its parent elements and eventually to the global level. This means a touch listener on a parent object will capture events from all its child elements.
+:::
+
+### SCREEN_TOUCH_START
+
+Emits when the user initially touches or clicks the screen or target object.
+
+Event payload is type `TouchEvent`.
+
+#### Example
+
+
+
+### SCREEN_TOUCH_MOVE
+
+Event payload is type `TouchEvent`.
+
+Emits when the user clicks and drags or moves their finger on the screen.
+
+#### Example
+
+
+
+### SCREEN_TOUCH_END
+
+Emits when the user stops clicking or lift the finger off the screen.
+
+Event payload is type `TouchEvent`.
+
+#### Example
+
+
+
+:::info
+Gesture events are emitted when the user makes a "gesture" on the phone screen. A gesture is any action that requires multiple fingers. If the user starts with a "'zoom" action (2 fingers moving away from each other) then adds another finger to the screen then the "zoom" gesture will end and a new one will start with 3 fingers.
+:::
+
+### GESTURE_START
+
+Emits when the user stops clicking or lift the finger off the screen.
+
+Event payload is type `GestureEvent`.
+
+#### Example
+
+
+
+### GESTURE_MOVE
+
+Emits when the user moves their finger(s) on the screen.
+
+Event payload is type `GestureEvent`.
+
+#### Example
+
+
+
+### GESTURE_END
+
+Emits when the number of fingers change from an previous gesture check.
+
+Event payload is type `GestureEvent`.
+
+#### Example
+
+
+
+### GAMEPAD_CONNECTED
+
+Emits when a gamepad is connected to the device.
+
+Event payload is type `GamepadEvent`.
+
+#### Example
+
+
+
+### GAMEPAD_DISCONNECTED
+
+Emits when a gamepad is disconnected from the device.
+
+Event payload is type `GamepadEvent`.
+
+#### Example
+
+
diff --git a/src/components/component-functions.tsx b/src/components/component-functions.tsx
new file mode 100644
index 0000000..991b4c7
--- /dev/null
+++ b/src/components/component-functions.tsx
@@ -0,0 +1,203 @@
+import React from 'react'
+
+// @ts-ignore
+import CodeBlock from '@theme/CodeBlock'
+// @ts-ignore
+import Heading from '@theme/Heading'
+
+function isCodeLiteral(str: string) {
+ // matches things like ecs.Foo.Bar or MyEnum.VALUE
+ return /^[A-Za-z_$][\w$]*(\.[A-Za-z_$][\w$]*)+$/.test(str)
+}
+
+function formatProperties(obj: any, indent = 1) {
+ const nextIndent = ' '.repeat(indent + 1)
+ const prevIndentStr = ' '.repeat(Math.max(indent - 2, 0))
+
+ if (Array.isArray(obj)) {
+ const items = obj
+ .map(item => formatProperties(item, indent + 2))
+ .join(`,\n${nextIndent}`)
+ return `[\n${nextIndent}${items}\n${prevIndentStr}]`
+ } else if (typeof obj === 'object' && obj !== null) {
+ const entries = Object.entries(obj)
+ .map(([key, value]) => `${nextIndent}${key}: ${formatProperties(value, indent + 2)}`)
+ .join(',\n')
+ return `{\n${entries}\n${prevIndentStr}}`
+ } else if (typeof obj === 'string') {
+ // if it looks like code, return raw; otherwise quote
+ return isCodeLiteral(obj) ? obj : `'${obj.replace(/'/g, "\\'")}'`
+ } else {
+ return String(obj)
+ }
+}
+
+const ComponentFunctions = (props: any) => {
+ const {name, properties, mutate, showExamples = true} = props
+ const propertiesString = properties && Object.keys(properties).length > 0
+ ? formatProperties(properties)
+ : '{}'
+ const defaultMutateFunction = 'cursor.width += 1; return false;'
+ const mutateFunctionCode = mutate || defaultMutateFunction
+
+ return (
+ <>
+ Get
+
+