Work
This commit is contained in:
parent
7491df4be5
commit
d639b02a27
4
main.typ
4
main.typ
|
@ -1,5 +1,4 @@
|
||||||
#import "@preview/chic-hdr:0.4.0": *
|
#import "@preview/chic-hdr:0.4.0": *
|
||||||
#import "@preview/i-figured:0.2.4"
|
|
||||||
#import "template.typ"
|
#import "template.typ"
|
||||||
|
|
||||||
#show: doc => template.phd(doc)
|
#show: doc => template.phd(doc)
|
||||||
|
@ -20,9 +19,6 @@
|
||||||
|
|
||||||
#include "introduction/main.typ"
|
#include "introduction/main.typ"
|
||||||
|
|
||||||
#show heading: i-figured.reset-counters
|
|
||||||
#show figure: i-figured.show-figure
|
|
||||||
|
|
||||||
#show: chic.with(
|
#show: chic.with(
|
||||||
odd: (
|
odd: (
|
||||||
chic-footer(
|
chic-footer(
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
#import "@preview/cetz:0.2.2"
|
||||||
|
|
||||||
== 3D streaming
|
== 3D streaming
|
||||||
|
|
||||||
In this thesis, we focus on the objective of delivering large, massive 3D scenes over the network.
|
In this thesis, we focus on the objective of delivering large, massive 3D scenes over the network.
|
||||||
|
@ -16,62 +18,57 @@ Since our objective is to stream 3D static scenes, single-rate mesh and mesh seq
|
||||||
This section thus focuses on progressive meshes and random accessible mesh compression.
|
This section thus focuses on progressive meshes and random accessible mesh compression.
|
||||||
|
|
||||||
Progressive meshes were introduced in @progressive-meshes and allow a progressive transmission of a mesh by sending a low resolution mesh first, called _base mesh_, and then transmitting detail information that a client can use to increase the resolution.
|
Progressive meshes were introduced in @progressive-meshes and allow a progressive transmission of a mesh by sending a low resolution mesh first, called _base mesh_, and then transmitting detail information that a client can use to increase the resolution.
|
||||||
To do so, an algorithm, called _decimation algorithm_, starts from the original full resolution mesh and iteratively removes vertices and faces by merging vertices through the so-called _edge collapse_ operation (Figure X).
|
To do so, an algorithm, called _decimation algorithm_, starts from the original full resolution mesh and iteratively removes vertices and faces by merging vertices through the so-called _edge collapse_ operation (@rw:vertex-split).
|
||||||
|
|
||||||
// \begin{figure}[ht]
|
#figure(
|
||||||
// \centering
|
cetz.canvas({
|
||||||
// \begin{tikzpicture}[scale=2]
|
import cetz.draw: *
|
||||||
// \node (Top1) at (0.5, 1) {};
|
|
||||||
// \node (A) at (0, 0.8) {};
|
scale(2)
|
||||||
// \node (B) at (1, 0.9) {};
|
|
||||||
// \node (C) at (1.2, 0) {};
|
let polygon = (
|
||||||
// \node (D) at (0.9, -0.8) {};
|
(0, 0.8),
|
||||||
// \node (E) at (0.2, -0.9) {};
|
(1, 0.9),
|
||||||
// \node (F) at (-0.2, 0) {};
|
(1.2, 0),
|
||||||
// \node (G) at (0.5, 0.5) {};
|
(0.9, -0.8),
|
||||||
// \node (H) at (0.6, -0.5) {};
|
(0.2, -0.9),
|
||||||
// \node (Bottom1) at (0.5, -1) {};
|
(-0.2, 0),
|
||||||
//
|
)
|
||||||
// \node (Top2) at (3.5, 1) {};
|
|
||||||
// \node (A2) at (3, 0.8) {};
|
let split = ((0.5, 0.5), (0.6, -0.5))
|
||||||
// \node (B2) at (4, 0.9) {};
|
|
||||||
// \node (C2) at (4.2, 0) {};
|
let polygon2 = polygon.map((arr) => (arr.at(0) + 3.0, arr.at(1)))
|
||||||
// \node (D2) at (3.9, -0.8) {};
|
|
||||||
// \node (E2) at (3.2, -0.9) {};
|
line(..polygon, close: true)
|
||||||
// \node (F2) at (2.8, 0) {};
|
line(polygon.at(0), split.at(0))
|
||||||
// \node (G2) at (3.55, 0) {};
|
line(polygon.at(1), split.at(0))
|
||||||
// \node (Bottom2) at (3.5, -1) {};
|
line(polygon.at(2), split.at(0))
|
||||||
//
|
line(polygon.at(3), split.at(1))
|
||||||
// \draw (A.center) -- (B.center) -- (C.center) -- (D.center) -- (E.center) -- (F.center) -- (A.center);
|
line(polygon.at(4), split.at(1))
|
||||||
// \draw (A.center) -- (G.center);
|
line(polygon.at(5), split.at(1))
|
||||||
// \draw (B.center) -- (G.center);
|
|
||||||
// \draw (C.center) -- (G.center);
|
line(polygon.at(5), split.at(0))
|
||||||
// \draw (F.center) -- (G.center);
|
line(polygon.at(2), split.at(1))
|
||||||
// \draw (C.center) -- (H.center);
|
|
||||||
// \draw (F.center) -- (H.center);
|
line(split.at(0), split.at(1), stroke: (paint: red, thickness: 2pt))
|
||||||
// \draw (E.center) -- (H.center);
|
|
||||||
// \draw (D.center) -- (H.center);
|
|
||||||
// \draw[color=red, line width=1mm] (G.center) -- (H.center);
|
let center = (3.55, 0);
|
||||||
//
|
line(..polygon2, close: true)
|
||||||
// \draw (A2.center) -- (B2.center) -- (C2.center) -- (D2.center) -- (E2.center) -- (F2.center) -- (A2.center);
|
|
||||||
// \draw (A2.center) -- (G2.center);
|
for point in polygon2 {
|
||||||
// \draw (B2.center) -- (G2.center);
|
line(point, center)
|
||||||
// \draw (C2.center) -- (G2.center);
|
}
|
||||||
// \draw (F2.center) -- (G2.center);
|
|
||||||
// \draw (E2.center) -- (G2.center);
|
circle(center, radius: 1.5pt, stroke: none, fill: red)
|
||||||
// \draw (D2.center) -- (G2.center);
|
|
||||||
// \node at (G2) [circle,fill=red,inner sep=2pt]{};
|
content((2, 1.75), [Edge collapse])
|
||||||
//
|
bezier((0.5, 1), (3.5, 1), (2, 2), mark: (end: (symbol: "stealth")))
|
||||||
// \draw[-{Latex[length=3mm]}] (Top1) to [out=30, in=150] (Top2);
|
bezier((0.5, -1), (3.5, -1), (2, -2), mark: (start: (symbol: "stealth")))
|
||||||
// \draw[-{Latex[length=3mm]}] (Bottom2) to [out=-150, in=-30] (Bottom1);
|
content((2, -1.75), [Vertex split])
|
||||||
//
|
}),
|
||||||
// \node at (2, 1.75) {Edge collapse};
|
caption: [Vertex split and edge collapse]
|
||||||
// \node at (2, -1.75) {Vertex split};
|
)<rw:vertex-split>
|
||||||
//
|
|
||||||
//
|
|
||||||
// \end{tikzpicture}
|
|
||||||
// \caption{Vertex split and edge collapse\label{sote:progressive-scheme}}
|
|
||||||
// \end{figure}
|
|
||||||
|
|
||||||
Every time two vertices are merged, a vertex and two faces are removed from the original mesh, decreasing the model resolution.
|
Every time two vertices are merged, a vertex and two faces are removed from the original mesh, decreasing the model resolution.
|
||||||
At the end of this content preparation phase, the mesh has been reorganized into a base mesh and a sequence of partially ordered edge split operations.
|
At the end of this content preparation phase, the mesh has been reorganized into a base mesh and a sequence of partially ordered edge split operations.
|
||||||
|
@ -179,10 +176,10 @@ Their main goal is to display 3D objects on top of regular maps, and their visua
|
||||||
#figure(
|
#figure(
|
||||||
image("../assets/related-work/3d-streaming/3dtiles.png", width: 80%),
|
image("../assets/related-work/3d-streaming/3dtiles.png", width: 80%),
|
||||||
caption: [Screenshot of 3D Tiles interface]
|
caption: [Screenshot of 3D Tiles interface]
|
||||||
)
|
)<rw:3d-tiles>
|
||||||
|
|
||||||
3D Tiles, as its name suggests, is based on a spacial partitionning of the scene.
|
3D Tiles, as its name suggests, is based on a spacial partitionning of the scene.
|
||||||
It started with a regular octree, but has then been improved to a $k$-d tree (see Figure~\ref{sote:3d-tiles-partition}).
|
It started with a regular octree, but has then been improved to a $k$-d tree (see @rw:3d-tiles).
|
||||||
|
|
||||||
#grid(
|
#grid(
|
||||||
columns:(1fr, 0.1fr, 1fr),
|
columns:(1fr, 0.1fr, 1fr),
|
||||||
|
@ -200,7 +197,7 @@ It started with a regular octree, but has then been improved to a $k$-d tree (se
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
In~\citeyear{3d-tiles-10x}, 3D Tiles streaming system was improved by preloading the data at the camera's next position when known in advance (with ideas that are similar to those we discuss and implement in Chapter~\ref{bi}, published in~\citeyear{bookmarks-impact}) and by ordering tile requests depending on the user's position (with ideas that are similar to those we discuss and implement in Chapter~\ref{d3}, published in~\citeyear{dash-3d}).
|
In~\citeyear{3d-tiles-10x}, 3D Tiles streaming system was improved by preloading the data at the camera's next position when known in advance (with ideas that are similar to those we discuss and implement in @bi, published in @bookmarks-impact) and by ordering tile requests depending on the user's position (with ideas that are similar to those we discuss and implement in @d3, published in @dash-3d).
|
||||||
|
|
||||||
@zampoglou is another example of a streaming framework: it is the first paper that proposes to use DASH to stream 3D content.
|
@zampoglou is another example of a streaming framework: it is the first paper that proposes to use DASH to stream 3D content.
|
||||||
In their work, the authors describe a system that allows users to access 3D content at multiple resolutions.
|
In their work, the authors describe a system that allows users to access 3D content at multiple resolutions.
|
||||||
|
|
|
@ -66,13 +66,13 @@ A thorough review is beyond the scope of this state-of-the-art, but examples inc
|
||||||
=== DASH-SRD
|
=== DASH-SRD
|
||||||
Being now widely adopted in the context of video streaming, DASH has been adapted to various other contexts.
|
Being now widely adopted in the context of video streaming, DASH has been adapted to various other contexts.
|
||||||
DASH-SRD (Spatial Relationship Description, @dash-srd) is a feature that extends the DASH standard to allow streaming only a spatial subpart of a video to a device.
|
DASH-SRD (Spatial Relationship Description, @dash-srd) is a feature that extends the DASH standard to allow streaming only a spatial subpart of a video to a device.
|
||||||
It works by encoding a video at multiple resolutions, and tiling the highest resolutions as shown in Figure \ref{sota:srd-png}.
|
It works by encoding a video at multiple resolutions, and tiling the highest resolutions as shown in @rw:srd.
|
||||||
That way, a client can choose to download either the low resolution of the whole video or higher resolutions of a subpart of the video.
|
That way, a client can choose to download either the low resolution of the whole video or higher resolutions of a subpart of the video.
|
||||||
|
|
||||||
#figure(
|
#figure(
|
||||||
image("../assets/related-work/video/srd.png", width: 60%),
|
image("../assets/related-work/video/srd.png", width: 60%),
|
||||||
caption: [DASH-SRD @dash-srd],
|
caption: [DASH-SRD @dash-srd],
|
||||||
)
|
)<rw:srd>
|
||||||
|
|
||||||
For each tile of the video, an adaptation set is declared in the MPD, and a supplemental property is defined in order to give the client information about the tile.
|
For each tile of the video, an adaptation set is declared in the MPD, and a supplemental property is defined in order to give the client information about the tile.
|
||||||
This supplemental property contains many elements, but the most important ones are the position ($x$ and $y$) and the size (width and height) describing the position of the tile in relation to the full video.
|
This supplemental property contains many elements, but the most important ones are the position ($x$ and $y$) and the size (width and height) describing the position of the tile in relation to the full video.
|
||||||
|
@ -90,6 +90,6 @@ An example of such a property is given in @rw:srd-xml.
|
||||||
)<rw:srd-xml>
|
)<rw:srd-xml>
|
||||||
|
|
||||||
Essentially, this feature is a way of achieving view-dependent streaming, since the client only displays a part of the video and can avoid downloading content that will not be displayed.
|
Essentially, this feature is a way of achieving view-dependent streaming, since the client only displays a part of the video and can avoid downloading content that will not be displayed.
|
||||||
While Figure \ref{sota:srd-png} illustrates how DASH-SRD can be used in the context of zoomable video streaming, the ideas developed in DASH-SRD have proven to be particularly useful in the context of 360 video streaming (see for example @ozcinar2017viewport).
|
While @rw:srd illustrates how DASH-SRD can be used in the context of zoomable video streaming, the ideas developed in DASH-SRD have proven to be particularly useful in the context of 360 video streaming (see for example @ozcinar2017viewport).
|
||||||
This is especially interesting in the context of 3D streaming since we have this same pattern of a user viewing only a part of a content.
|
This is especially interesting in the context of 3D streaming since we have this same pattern of a user viewing only a part of a content.
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue