bool:
- for d_id in darts_list:
- if isValidAction(m, d_id, 4)[0]:
+ # Vรฉrification que tous les angles sont supรฉrieurs ร 5ยฐ
+ if angle_A <= 5 or angle_B <= 5 or angle_C <= 5:
return False
- return True
\ No newline at end of file
+ return True
+
+ def isTruncated(self, darts_list) -> bool:
+ for d_id in darts_list:
+ if self.isValidAction(d_id, 4)[0]: # if on action is valid, it means it's valid topologically and geometrically, so no need to verify the two
+ return False
+ return True
+
+
+ # def is_star_vertex(self, n1:Node, new_coordinates, plot=False):
+ # #plot_mesh(self.mesh)
+ #
+ # # Retrieve all neighboring vertices in order
+ # d = n1.get_dart()
+ # d2 = d.get_beta(2)
+ # n_start = d2.get_node() #First neighbour to retrieve
+ #
+ # adj_nodes = [n_start]
+ # nodes_coord = [[n_start.x(), n_start.y()]]
+ #
+ # d = d2.get_beta(1)
+ # d2 = d.get_beta(2)
+ # n = d2.get_node()
+ #
+ # # As long as we haven't returned to the first neighbor, we keep searching.
+ # # This works because the collapse action is restricted to inner darts that are not connected to a boundary node.
+ # # Therefore, we are guaranteed to find the first vertex by following the beta1 and beta2 relations.
+ #
+ # while n != n_start:
+ # adj_nodes.append(n)
+ # nodes_coord.append([n.x(), n.y()])
+ # d = d2.get_beta(1)
+ # d2 = d.get_beta(2)
+ # n = d2.get_node()
+ #
+ # nodes_coord = np.array(nodes_coord)
+ #
+ # # Create a Polygon with shapely package
+ # poly = Polygon(nodes_coord)
+ # # Create the point for wich we want to check star property
+ # point_v = Point(new_coordinates)
+ #
+ # if plot :
+ # plt.figure(figsize=(6, 6))
+ # # Polygone
+ # x, y = poly.exterior.xy
+ # plt.fill(x, y, alpha=0.3, edgecolor='red', facecolor='lightcoral',
+ # label='Polygon formed par by neighbours vertices')
+ #
+ # # Voisins
+ # plt.scatter(nodes_coord[:, 0], nodes_coord[:, 1], color='blue', zorder=5, label='Neighbours')
+ #
+ # # Sommet testรฉ
+ # plt.scatter(new_coordinates[0], new_coordinates[1], color='green', s=100, zorder=5, label='Vertex to test')
+ #
+ # plt.legend()
+ # plt.gca().set_aspect('equal')
+ # plt.show()
+ #
+ # # If polygon is convexe
+ # if poly.is_valid and poly.is_simple and poly.convex_hull.equals(poly):
+ # return True
+ # p_before = None
+ # # Si concave : vรฉrifier visibilitรฉ
+ # for p in poly.exterior.coords[:-1]:
+ # full_seg = LineString([new_coordinates, p])
+ # new_seg_end = full_seg.interpolate(full_seg.length - 1e-5)
+ # seg = LineString([new_coordinates, new_seg_end])
+ # if not poly.contains_properly(seg):
+ # return False
+ # elif seg.crosses(poly.boundary):
+ # return False
+ # elif seg.touches(poly.boundary):
+ # return False
+ # elif p_before is not None: # test coolinearity of two vectors
+ # v1 = new_coordinates[0]-p[0], new_coordinates[1]-p[1]
+ # v2 = new_coordinates[0]-p_before[0], new_coordinates[1]-p_before[1]
+ #
+ # det = v1[0] * v2[1] - v1[1] * v2[0]
+ # if -1e-5 < det < 1e-5:
+ # return False
+ # p_before = p
+ # return Truedef is_star_vertex2(self, n1:Node, n2:Node, v):
+ #plot_mesh(self.mesh)
+ #
+ # adj_nodes = []
+ # nodes_coord = []
+ # d = n1.get_dart()
+ # d2 = d.get_beta(2)
+ # n = d2.get_node()
+ # while n != n2:
+ # adj_nodes.append(n)
+ # nodes_coord.append([n.x(), n.y()])
+ # d = d2.get_beta(1)
+ # d2 = d.get_beta(2)
+ # n = d2.get_node()
+ # d = d.get_beta(1)
+ # d2 = d.get_beta(2)
+ # n = d2.get_node()
+ # while n != n1:
+ # if n not in adj_nodes:
+ # adj_nodes.append(n)
+ # nodes_coord.append([n.x(), n.y()])
+ # d = d2.get_beta(1)
+ # d2 = d.get_beta(2)
+ # n = d2.get_node()
+ # d = d.get_beta(1)
+ # d2 = d.get_beta(2)
+ # n = d2.get_node()
+ # while n != n2:
+ # if n not in adj_nodes:
+ # adj_nodes.append(n)
+ # nodes_coord.append([n.x(), n.y()])
+ # d = d2.get_beta(1)
+ # d2 = d.get_beta(2)
+ # n = d2.get_node()
+ #
+ # nodes_coord = np.array(nodes_coord)
+ #
+ # # Crรฉer le polygone
+ # poly = Polygon(nodes_coord)
+ # point_v = Point(v)
+ #
+ # # Vรฉrifier si polygone est convexe
+ # if poly.is_valid and poly.is_simple and poly.convex_hull.equals(poly):
+ # return True
+ #
+ # # Si concave : vรฉrifier visibilitรฉ
+ # for p in poly.exterior.coords[:-1]:
+ # seg = LineString([v, p])
+ # if not poly.contains(seg):
+ # # plt.figure(figsize=(6, 6))
+ # # # Polygone
+ # # x, y = poly.exterior.xy
+ # # plt.fill(x, y, alpha=0.3, edgecolor='red', facecolor='lightcoral',
+ # # label='Polygon formed par by neighbours vertices')
+ # #
+ # # # Voisins
+ # # plt.scatter(nodes_coord[:, 0], nodes_coord[:, 1], color='blue', zorder=5, label='Neighbours')
+ # #
+ # # # Sommet testรฉ
+ # # plt.scatter(v[0], v[1], color='green', s=100, zorder=5, label='Vertex to test')
+ # #
+ # # plt.legend()
+ # # plt.gca().set_aspect('equal')
+ # # plt.show()
+ # return False
+ # return True
+
+ # def is_star_vertex2(self, n1:Node, n2:Node, v):
+ # #plot_mesh(self.mesh)
+ #
+ # adj_nodes = []
+ # nodes_coord = []
+ # d = n1.get_dart()
+ # d2 = d.get_beta(2)
+ # n = d2.get_node()
+ # while n != n2:
+ # adj_nodes.append(n)
+ # nodes_coord.append([n.x(), n.y()])
+ # d = d2.get_beta(1)
+ # d2 = d.get_beta(2)
+ # n = d2.get_node()
+ # d = d.get_beta(1)
+ # d2 = d.get_beta(2)
+ # n = d2.get_node()
+ # while n != n1:
+ # if n not in adj_nodes:
+ # adj_nodes.append(n)
+ # nodes_coord.append([n.x(), n.y()])
+ # d = d2.get_beta(1)
+ # d2 = d.get_beta(2)
+ # n = d2.get_node()
+ # d = d.get_beta(1)
+ # d2 = d.get_beta(2)
+ # n = d2.get_node()
+ # while n != n2:
+ # if n not in adj_nodes:
+ # adj_nodes.append(n)
+ # nodes_coord.append([n.x(), n.y()])
+ # d = d2.get_beta(1)
+ # d2 = d.get_beta(2)
+ # n = d2.get_node()
+ #
+ # nodes_coord = np.array(nodes_coord)
+ #
+ # # Crรฉer le polygone
+ # poly = Polygon(nodes_coord)
+ # point_v = Point(v)
+ #
+ # # Vรฉrifier si polygone est convexe
+ # if poly.is_valid and poly.is_simple and poly.convex_hull.equals(poly):
+ # return True
+ #
+ # # Si concave : vรฉrifier visibilitรฉ
+ # for p in poly.exterior.coords[:-1]:
+ # seg = LineString([v, p])
+ # if not poly.contains(seg):
+ # # plt.figure(figsize=(6, 6))
+ # # # Polygone
+ # # x, y = poly.exterior.xy
+ # # plt.fill(x, y, alpha=0.3, edgecolor='red', facecolor='lightcoral',
+ # # label='Polygon formed par by neighbours vertices')
+ # #
+ # # # Voisins
+ # # plt.scatter(nodes_coord[:, 0], nodes_coord[:, 1], color='blue', zorder=5, label='Neighbours')
+ # #
+ # # # Sommet testรฉ
+ # # plt.scatter(v[0], v[1], color='green', s=100, zorder=5, label='Vertex to test')
+ # #
+ # # plt.legend()
+ # # plt.gca().set_aspect('equal')
+ # # plt.show()
+ # return False
+ # return True
+
+ # def find_star_vertex2(self, n1:Node, n2:Node) -> (float, float):
+ # adj_nodes = []
+ # nodes_coord = []
+ # for d_info in self.mesh.active_darts():
+ # if d_info[3] == n1.id or d_info[3] == n2.id:
+ # d2 = Dart(self.mesh, d_info[2])
+ # if d2 is not None:
+ # n = d2.get_node()
+ # adj_nodes.append(n)
+ # nodes_coord.append([n.x(), n.y()])
+ # else:
+ # raise ValueError("Collapse action may not be done near boundary")
+ # nodes_coord = np.array(nodes_coord)
+ #
+ # # Ordonner les voisins autour de v
+ # vectors = nodes_coord - v
+ # angles = np.arctan2(vectors[:, 1], vectors[:, 0])
+ # order = np.argsort(angles)
+ # neighbors_ordered = nodes_coord[order]
+ #
+ # hull = ConvexHull(nodes_coord)
+ # delaunay = Delaunay(nodes_coord)
+ # plt.plot(nodes_coord[:, 0], nodes_coord[:, 1], 'o')
+ #
+ # for simplex in hull.simplices:
+ # plt.plot(nodes_coord[simplex, 0], nodes_coord[simplex, 1], 'k-')
+ # plt.plot(nodes_coord[hull.vertices, 0], nodes_coord[hull.vertices, 1], 'r--', lw=2)
+ # plt.plot(nodes_coord[hull.vertices[0], 0], nodes_coord[hull.vertices[0], 1], 'ro')
+ # plt.show()
+ #
+ # _ = scipy.spatial.delaunay_plot_2d(delaunay)
+ # plt.show()
+ #
+ # mid = np.array([(n1.x() + n2.x()) / 2, (n1.y() + n2.y()) / 2])
+ # is_star_vertex = delaunay.find_simplex(mid) >=0
+ #
+ # # Calcul des angles des voisins autour de v
+ # vectors = nodes_coord - mid
+ # angles = np.arctan2(vectors[:, 1], vectors[:, 0])
+ # order = np.argsort(angles)
+ # neighbors_ordered = nodes_coord[order]
+ #
+ # # Construire le polygone
+ # poly = Polygon(neighbors_ordered)
+ #
+ # # Vรฉrifier si v est ร l'intรฉrieur ou sur la frontiรจre
+ # point_v = Point(mid)
+ # is_star = poly.contains(point_v) or poly.touches(point_v)
+ #
+ # plt.figure(figsize=(6, 6))
+ # # Polygone
+ # x, y = poly.exterior.xy
+ # plt.fill(x, y, alpha=0.3, edgecolor='red', facecolor='lightcoral', label='Polygone formรฉ par les voisins')
+ #
+ # # Voisins
+ # plt.scatter(nodes_coord[:, 0], nodes_coord[:, 1], color='blue', zorder=5, label='Voisins')
+ #
+ # # Sommet testรฉ
+ # plt.scatter(mid[0], mid[1], color='green', s=100, zorder=5, label='Sommet testรฉ')
+ #
+ # plt.legend()
+ # plt.gca().set_aspect('equal')
+ # plt.title(f"Le sommet est-il รฉtoilรฉ ? {is_star}")
+ # plt.show()
+ #
+ # if is_star:
+ # return mid
+ # elif poly.contains(Point(n1.x(), n1.y())) or poly.touches(Point(n1.x(), n1.y())):
+ # return n1.x(), n1.y()
+ # elif poly.contains(Point(n2.x(), n2.y())) or poly.touches(Point(n2.x(), n2.y())):
+ # return n2.x(), n2.y()
+ # else:
+ # plot_mesh(self.mesh)
+ # raise ValueError("No star vertex was found")
diff --git a/mesh_model/mesh_struct/mesh.py b/mesh_model/mesh_struct/mesh.py
index 01b8bb4..86da604 100644
--- a/mesh_model/mesh_struct/mesh.py
+++ b/mesh_model/mesh_struct/mesh.py
@@ -13,13 +13,14 @@
class Mesh:
def __init__(self, nodes=[], faces=[]):
"""
- Vertices are stored in a numpy array containing coordinates (x,y, dart id)
+ Vertices are stored in a numpy array containing coordinates (x,y, dart id, ideal adjacency, vertex score)
Faces are stored in a numpy array of simple (dart ids)
- Darts are stored in a numpy array, where each dart is a 5-tuple (dart id, beta_1, beta_2, vertex_id, face_id)
+ Darts are stored in a numpy array, where each dart is a 5-tuple (dart id, beta_1, beta_2, vertex_id, face_id, geo_quality)
+ Ideal adjacency, vertex scores and geometric quality are not defined here. You must use Mesh analysis class to define them
"""
- self.nodes = numpy.empty((0, 3))
+ self.nodes = numpy.empty((0, 5), dtype=float)
self.faces = numpy.empty(0, dtype=int)
- self.dart_info = numpy.empty((0, 5), dtype=int)
+ self.dart_info = numpy.empty((0, 6), dtype=int)
self.first_free_dart = 0
self.first_free_node = 0
self.first_free_face = 0
@@ -61,19 +62,20 @@ def nb_faces(self) -> int:
def add_node(self, x: float, y: float) -> Node:
"""
Add a vertex in the mesh, this node is not connected to a dart here
+ The ideal adjacency and vertex score are not defined here
:param x: X coordinate
:param y: Y coordinate
:return: the created node
"""
if len(self.nodes) <= self.first_free_node:
- self.nodes = numpy.append(self.nodes, [[x, y, -1]], axis=0)
+ self.nodes = numpy.append(self.nodes, [[x, y, -1, -1, -99]], axis=0)
self.first_free_node += 1
return Node(self, len(self.nodes) - 1)
elif self.first_free_node >= 0:
n_id = int(self.first_free_node)
if isinstance(n_id, int):
self.first_free_node = abs(self.nodes[n_id, 2] + 1)
- self.nodes[n_id] = [x, y, -1]
+ self.nodes[n_id] = [x, y, -1, -1, -99]
else:
print(n_id)
print(type(n_id))
@@ -272,17 +274,18 @@ def set_face_beta2(self, f: Face, darts: list[Dart]) -> None:
df_current = df_current.get_beta(1)
end = (df_current.id == f.get_dart().id)
- def add_dart(self, a1: int = -1, a2: int = -1, v: int = -1, f: int = -1) -> Dart:
+ def add_dart(self, a1: int = -1, a2: int = -1, v: int = -1, f: int = -1, q: int = -99) -> Dart:
"""
This function add a dart in the mesh. It must not be used directly
:param a1: dart index to connect by alpha1
:param a2: dart index to connect by alpha2
:param v: vertex index this dart point to
:param f: face to connect
+ :param q: geometric quality around the dart
:return: the created dart
"""
if len(self.dart_info) <= self.first_free_dart:
- self.dart_info = numpy.append(self.dart_info, [[len(self.dart_info), a1, a2, v, f]], axis=0)
+ self.dart_info = numpy.append(self.dart_info, [[len(self.dart_info), a1, a2, v, f, q]], axis=0)
self.first_free_dart += 1
return Dart(self, len(self.dart_info) - 1)
elif len(self.dart_info) > self.first_free_dart:
@@ -308,7 +311,16 @@ def del_dart(self, d: Dart):
"""
self.dart_info[d.id][0] = -self.first_free_dart - 1
self.first_free_dart = d.id
+ d.active = False
+ def is_dart_active(self, d: Dart) -> bool:
+ """Check if the dart has been deleted"""
+ if d is None:
+ return False
+ elif self.dart_info[d.id,0] < 0:
+ return False
+ elif self.dart_info[d.id,0] >=0 :
+ return True
def set_beta2(self, dart: Dart) -> None:
"""
@@ -405,6 +417,8 @@ def find_parallel_darts(self, d: Dart) -> list[Dart]:
return parallel_darts
+
+
def inverseQuad(A: Node, B: Node, C: Node, D: Node):
u1 = numpy.array([B.x() - A.x(), B.y() - A.y()]) # vect(AB)
u2 = numpy.array([C.x() - B.x(), C.y() - B.y()]) # vect(BC)
diff --git a/mesh_model/mesh_struct/mesh_elements.py b/mesh_model/mesh_struct/mesh_elements.py
index ac68310..a1b12e2 100644
--- a/mesh_model/mesh_struct/mesh_elements.py
+++ b/mesh_model/mesh_struct/mesh_elements.py
@@ -44,6 +44,9 @@ def get_beta(self, i: int) -> Dart:
raise ValueError("Wrong alpha dimension")
if self.mesh.dart_info[self.id, i] == -1:
return None
+ d2_id = self.mesh.dart_info[self.id, i]
+ if self.mesh.dart_info[d2_id, 0] <0 :
+ raise ValueError("Dart deleted")
return Dart(self.mesh, self.mesh.dart_info[self.id, i])
@@ -96,6 +99,36 @@ def set_face(self, face: Face) -> None:
"""
self.mesh.dart_info[self.id, 4] = face.id
+ def get_quality(self) -> int:
+ """
+ Get the geometric quality around a given dart.
+
+ :return: the geometric quality around the dart.
+ :raises ValueError: if there is no quality dimension
+ """
+ dart_quality = self.mesh.dart_info[self.id, 5]
+ if dart_quality == -99:
+ raise ValueError("No quality dimension")
+ return dart_quality
+
+ def set_quality(self, quality: int) -> None:
+ """
+ Set the geometric quality around a given dart. Automatically set the same quality for the twin dart
+ The quality is a parameter used to determine whether applying an operation to the dart would flip a face.
+
+ * For triangular meshes:
+ The dart's surrounding quality is determined by analyzing the quadrilateral formed by the two adjacent triangles.
+ The configuration is classified as convex, crossed, or concave.
+
+ * For quadrilateral meshes:
+ The dart's surrounding quality is determined based on whether the associated node forms a "star-shaped" (รฉtoilรฉ) configuration.
+ :param quality: calculated quality
+ """
+
+ d2_id = self.mesh.dart_info[self.id, 2]
+ self.mesh.dart_info[self.id, 5] = quality
+ if d2_id >=0: # inner dart
+ self.mesh.dart_info[d2_id, 5] = quality
class Node:
_mesh_type: type = None
@@ -175,6 +208,42 @@ def set_xy(self, x: float, y: float) -> None:
self.set_x(x)
self.set_y(y)
+ def set_ideal_adjacency(self, i: int) -> None:
+ """
+ Set the ideal adjacency of this node.
+ :param i: calculated ideal adjacency
+ """
+ self.mesh.nodes[self.id,3] = i
+
+ def get_ideal_adjacency(self) -> int:
+ """
+ Get the ideal adjacency of this node.
+ :return: ideal adjacency
+ :raises ValueError: if there is no ideal adjacency
+ """
+ ideal_adjacency = self.mesh.nodes[self.id,3]
+ if ideal_adjacency == -1:
+ raise ValueError("No ideal adjacency")
+ return ideal_adjacency
+
+ def set_score(self, s: int) -> None:
+ """
+ Set the score of a node.
+ :param s: calculated score
+ """
+ self.mesh.nodes[self.id,4] = s
+
+ def get_score(self) -> int:
+ """
+ Get the score of this node.
+ :return: score
+ :raises ValueError: if there is no score defined
+ """
+ score = self.mesh.nodes[self.id,4]
+ if score == -99:
+ raise ValueError("No score")
+ return score
+
class Face:
_mesh_type: type = None
diff --git a/mesh_model/random_quadmesh.py b/mesh_model/random_quadmesh.py
index 66a39cf..12fbbb0 100644
--- a/mesh_model/random_quadmesh.py
+++ b/mesh_model/random_quadmesh.py
@@ -4,7 +4,7 @@
from mesh_model.mesh_struct.mesh_elements import Dart
from mesh_model.mesh_struct.mesh import Mesh
-from mesh_model.mesh_analysis.quadmesh_analysis import isValidAction
+from mesh_model.mesh_analysis.quadmesh_analysis import QuadMeshOldAnalysis
from environment.actions.quadrangular_actions import flip_edge_cntcw_ids, split_edge_ids, collapse_edge_ids
from mesh_model.reader import read_gmsh
@@ -32,6 +32,7 @@ def mesh_shuffle(mesh: Mesh, num_nodes) -> Mesh:
nb_action_max = int(num_nodes)
nb_action = 0
active_darts_list = mesh.active_darts()
+ m_analysis = QuadMeshOldAnalysis(mesh)
i = 0
while i < nb_action_max:
action_type = np.random.randint(0, 3)
@@ -41,17 +42,17 @@ def mesh_shuffle(mesh: Mesh, num_nodes) -> Mesh:
i1 = dart.get_node()
i2 = (dart.get_beta(1)).get_node()
#plot_mesh(mesh)
- if action_type == 0 and isValidAction(mesh, d_id, action_type)[0]:
- flip_edge_cntcw_ids(mesh, i1.id, i2.id)
+ if action_type == 0 and m_analysis.isValidAction(d_id, action_type)[0]:
+ flip_edge_cntcw_ids(m_analysis, i1.id, i2.id)
nb_action += 1
- elif action_type == 1: # and isValidAction(mesh, d_id, action_type)[0]
- split_edge_ids(mesh, i1.id, i2.id)
+ elif action_type == 1 and m_analysis.isValidAction(d_id, action_type)[0]:
+ split_edge_ids(m_analysis, i1.id, i2.id)
nb_action += 1
- elif action_type == 2 and isValidAction(mesh, d_id, action_type)[0]:
- collapse_edge_ids(mesh, i1.id, i2.id)
+ elif action_type == 2 and m_analysis.isValidAction(d_id, action_type)[0]:
+ collapse_edge_ids(m_analysis, i1.id, i2.id)
nb_action += 1
- elif action_type == 3 and isValidAction(mesh, d_id, action_type)[0]:
- collapse_edge_ids(mesh, i1.id, i2.id)
+ elif action_type == 3 and m_analysis.isValidAction(d_id, action_type)[0]:
+ collapse_edge_ids(m_analysis, i1.id, i2.id)
nb_action += 1
i += 1
active_darts_list = mesh.active_darts()
diff --git a/mesh_model/random_trimesh.py b/mesh_model/random_trimesh.py
index db17a34..86a3834 100644
--- a/mesh_model/random_trimesh.py
+++ b/mesh_model/random_trimesh.py
@@ -1,10 +1,10 @@
from __future__ import annotations
import numpy as np
+from mesh_model.mesh_analysis.global_mesh_analysis import NodeAnalysis
from mesh_model.mesh_struct.mesh_elements import Dart, Node
from mesh_model.mesh_struct.mesh import Mesh
-from mesh_model.mesh_analysis.trimesh_analysis import find_opposite_node, isValidAction
-from mesh_model.mesh_analysis.global_mesh_analysis import node_in_mesh
+from mesh_model.mesh_analysis.trimesh_analysis import TriMeshQualityAnalysis
from environment.actions.triangular_actions import flip_edge_ids, split_edge_ids, collapse_edge_ids
@@ -18,6 +18,7 @@ def regular_mesh(num_nodes_max: int) -> Mesh:
nodes = [[0.0, 0.0], [1.0, 0.0], [0.5, 0.87]]
faces = [[0, 1, 2]]
mesh = Mesh(nodes, faces)
+ m_analysis = TriMeshQualityAnalysis(mesh)
num_nodes = 3
dart_id = 0
@@ -28,10 +29,10 @@ def regular_mesh(num_nodes_max: int) -> Mesh:
A = d.get_node()
d1 = d.get_beta(1)
B = d1.get_node()
- x_C, y_C = find_opposite_node(d)
+ x_C, y_C = m_analysis.find_opposite_node(d)
# Search if the node C already exist in the actual mesh
- found, n_id = node_in_mesh(mesh, x_C, y_C)
+ found, n_id = m_analysis.node_in_mesh( x_C, y_C)
if found and d.get_beta(2) is None:
C = Node(mesh, n_id)
@@ -46,6 +47,9 @@ def regular_mesh(num_nodes_max: int) -> Mesh:
dart_id += 1
mesh.set_twin_pointers()
+ m_analysis.set_adjacency()
+ m_analysis.set_scores()
+ m_analysis.set_geometric_quality()
return mesh
@@ -80,11 +84,12 @@ def mesh_shuffle_flip(mesh: Mesh) -> Mesh:
"""
nb_flip = len(mesh.dart_info)
nb_nodes = len(mesh.nodes)
+ m_analysis = TriMeshQualityAnalysis(mesh)
for i in range(nb_flip):
i1 = np.random.randint(nb_nodes)
i2 = np.random.randint(nb_nodes)
if i1 != i2:
- flip_edge_ids(mesh, i1, i2)
+ flip_edge_ids(m_analysis, i1, i2)
return mesh
def mesh_shuffle(mesh: Mesh, num_nodes) -> Mesh:
@@ -94,25 +99,26 @@ def mesh_shuffle(mesh: Mesh, num_nodes) -> Mesh:
:param num_nodes: number nodes of the mesh
:return: a mesh with randomly flipped darts.
"""
- nb_action_max = int(num_nodes)
+ nb_action_max = 3
nb_action = 0
active_darts_list = mesh.active_darts()
+ m_analysis = TriMeshQualityAnalysis(mesh)
i = 0
- while i < nb_action_max:
+ while nb_action < nb_action_max:
action_type = np.random.randint(0, 3)
d_id = np.random.randint(len(active_darts_list))
d_id = active_darts_list[d_id][0]
dart = Dart(mesh, d_id)
i1 = dart.get_node()
i2 = ((dart.get_beta(1)).get_beta(1)).get_node()
- if action_type == 0 and isValidAction(mesh, d_id, action_type)[0]:
- flip_edge_ids(mesh, i1.id, i2.id)
+ if action_type == 0 and m_analysis.isValidAction(d_id, action_type)[0]:
+ flip_edge_ids(m_analysis, i1.id, i2.id)
nb_action += 1
- elif action_type == 1 and isValidAction(mesh, d_id, action_type)[0]:
- split_edge_ids(mesh, i1.id, i2.id)
+ elif action_type == 1 and m_analysis.isValidAction(d_id, action_type)[0]:
+ split_edge_ids(m_analysis, i1.id, i2.id)
nb_action += 1
- elif action_type == 2 and isValidAction(mesh, d_id, action_type)[0]:
- collapse_edge_ids(mesh, i1.id, i2.id)
+ elif action_type == 2 and m_analysis.isValidAction(d_id, action_type)[0]:
+ collapse_edge_ids(m_analysis, i1.id, i2.id)
nb_action += 1
i += 1
active_darts_list = mesh.active_darts()
diff --git a/model_RL/PPO_model_pers.py b/model_RL/PPO_model_pers.py
index f2b860b..f390cb9 100644
--- a/model_RL/PPO_model_pers.py
+++ b/model_RL/PPO_model_pers.py
@@ -1,4 +1,3 @@
-from mesh_model.mesh_analysis.global_mesh_analysis import global_score
import copy
import random
from tqdm import tqdm
@@ -7,7 +6,6 @@
import torch.nn as nn
from torch.optim import Adam
from torch.distributions import Categorical
-from mesh_model.mesh_analysis.quadmesh_analysis import isValidAction
class NaNExceptionActor(Exception):
@@ -19,16 +17,17 @@ class NaNExceptionCritic(Exception):
class Actor(nn.Module):
- def __init__(self, env, input_dim, output_dim, lr=0.0001, eps=0):
+ def __init__(self, env, input_dim, n_actions, n_darts_observed, lr=0.0001, eps=0):
super(Actor, self).__init__()
self.fc1 = nn.Linear(input_dim, 64)
self.fc2 = nn.Linear(64, 64)
- self.fc3 = nn.Linear(64, output_dim)
+ self.fc3 = nn.Linear(64, n_actions*n_darts_observed)
self.softmax = nn.Softmax(dim=-1)
self.gamma = 0.9
self.optimizer = Adam(self.parameters(), lr=lr, weight_decay=0.01)
self.env = env
self.eps = eps
+ self.n_actions = n_actions
def reset(self, env=None):
self.fc1.reset_parameters()
@@ -37,6 +36,7 @@ def reset(self, env=None):
self.optimizer = Adam(self.parameters(), lr=self.optimizer.defaults['lr'], weight_decay=self.optimizer.defaults['weight_decay'])
def select_action(self, observation, info):
+ ma = info["mesh_analysis"]
if np.random.rand() < self.eps:
action = self.env.sample() # random choice of an action
dart_id = self.env.darts_selected[action[1]]
@@ -44,7 +44,7 @@ def select_action(self, observation, info):
total_actions_possible = np.prod(self.env.action_space.nvec)
prob = 1/total_actions_possible
i = 0
- while not isValidAction(self.env.mesh, dart_id, action_type):
+ while not ma.isValidAction(dart_id, action_type):
if i > 15:
return None, None
action = self.env.sample()
@@ -58,11 +58,11 @@ def select_action(self, observation, info):
action = dist.sample()
action = action.tolist()
prob = pmf[action]
- action_dart = int(action/4)
- action_type = action % 4
+ action_dart = int(action/self.n_actions)
+ action_type = action % self.n_actions
dart_id = info["darts_selected"][action_dart]
i = 0
- while not isValidAction(info["mesh"], dart_id, action_type):
+ while not ma.isValidAction(dart_id, action_type):
if i > 15:
return None, None
pmf = self.forward(obs)
@@ -70,8 +70,8 @@ def select_action(self, observation, info):
action = dist.sample()
action = action.tolist()
prob = pmf[action]
- action_dart = int(action/4)
- action_type = action % 4
+ action_dart = int(action/self.n_actions)
+ action_type = action % self.n_actions
dart_id = info["darts_selected"][action_dart]
i += 1
action_list = [action, dart_id, action_type]
@@ -137,10 +137,11 @@ def learn(self, critic_loss):
class PPO:
- def __init__(self, env, obs_size, max_steps, lr, gamma, nb_iterations, nb_episodes_per_iteration, nb_epochs, batch_size):
+ def __init__(self, env, obs_size, n_actions, n_darts_observed, max_steps, lr, gamma, nb_iterations, nb_episodes_per_iteration, nb_epochs, batch_size):
self.env = env
self.max_steps = max_steps
- self.actor = Actor(self.env, obs_size, 4*10, lr=lr)
+ self.n_actions =n_actions
+ self.actor = Actor(self.env, obs_size, n_actions, n_darts_observed, lr=lr)
self.critic = Critic(obs_size, lr=lr)
self.lr = lr
self.gamma = gamma
@@ -166,7 +167,7 @@ def train(self, dataset):
critic_loss = []
actor_loss = []
self.critic.optimizer.zero_grad()
- for _, (s, o, a, r, G, old_prob, next_o, done) in enumerate(batch, 1):
+ for _, (ma, o, a, r, G, old_prob, next_o, done) in enumerate(batch, 1):
o = torch.tensor(o.flatten(), dtype=torch.float32)
next_o = torch.tensor(next_o.flatten(), dtype=torch.float32)
value = self.critic(o)
@@ -174,7 +175,7 @@ def train(self, dataset):
log_prob = torch.log(pmf[a[0]])
next_value = torch.tensor(0.0, dtype=torch.float32) if done else self.critic(next_o)
delta = r + 0.9 * next_value - value
- _, st, ideal_s, _ = global_score(s) # Comparaison ร l'รฉtat s et pas s+1 ?
+ _, st, ideal_s, _ = ma.global_score() # Comparaison ร l'รฉtat s et pas s+1 ?
if st == ideal_s:
continue
advantage = 1 if done else G / (st - ideal_s)
@@ -226,13 +227,13 @@ def learn(self, writer):
done = False
step = 0
while step < self.max_steps:
- state = copy.deepcopy(info["mesh"])
+ ma = copy.deepcopy(info["mesh_analysis"])
obs = next_obs
action, prob = self.actor.select_action(obs, info)
if action is None:
wins.append(0)
break
- gym_action = [action[2],int(action[0]/4)]
+ gym_action = [action[2],int(action[0]/self.n_actions)]
next_obs, reward, terminated, truncated, info = self.env.step(gym_action)
ep_reward += reward
ep_mesh_reward += info["mesh_reward"]
@@ -241,13 +242,13 @@ def learn(self, writer):
if terminated:
if truncated:
wins.append(0)
- trajectory.append((state, obs, action, reward, G, prob, next_obs, done))
+ trajectory.append((ma, obs, action, reward, G, prob, next_obs, done))
else:
wins.append(1)
done = True
- trajectory.append((state, obs, action, reward, G, prob, next_obs, done))
+ trajectory.append((ma, obs, action, reward, G, prob, next_obs, done))
break
- trajectory.append((state, obs, action, reward, G, prob, next_obs, done))
+ trajectory.append((ma, obs, action, reward, G, prob, next_obs, done))
step += 1
if len(trajectory) != 0:
rewards.append(ep_reward)
@@ -275,4 +276,4 @@ def learn(self, writer):
print("NaN Exception on Critic Network")
return None, None, None, None
- return self.actor, rewards, wins, len_ep, info["observation_registry"]
+ return self.actor, rewards, wins, len_ep, None
diff --git a/model_RL/evaluate_model.py b/model_RL/evaluate_model.py
index 1f002ed..f6984be 100644
--- a/model_RL/evaluate_model.py
+++ b/model_RL/evaluate_model.py
@@ -1,19 +1,19 @@
-from numpy import ndarray
-
-from environment.trimesh_env import TriMesh
-from mesh_model.mesh_analysis.global_mesh_analysis import global_score
-from mesh_model.mesh_struct.mesh import Mesh
import numpy as np
import copy
+
from tqdm import tqdm
+from environment.old_files.trimesh_env import TriMesh
+from mesh_model.mesh_analysis.trimesh_analysis import TriMeshOldAnalysis
+from mesh_model.mesh_struct.mesh import Mesh
+
def testPolicy(
policy,
n_eval_episodes: int,
dataset: list[Mesh],
max_steps: int
-) -> tuple[ndarray, ndarray, ndarray, list[Mesh]]:
+) -> tuple[np.ndarray, np.ndarray, np.ndarray, list[Mesh]]:
"""
Tests policy on each mesh of a dataset with n_eval_episodes.
:param policy: the policy to test
@@ -60,7 +60,9 @@ def isBetterPolicy(actual_best_policy, policy_to_test):
def isBetterMesh(best_mesh, actual_mesh):
- if best_mesh is None or global_score(best_mesh)[1] > global_score(actual_mesh)[1]:
+ ma1 = TriMeshOldAnalysis(best_mesh)
+ ma2 = TriMeshOldAnalysis(actual_mesh)
+ if best_mesh is None or ma1.global_score()[1] > ma2.global_score()[1]:
return True
else:
return False
diff --git a/model_RL/AC_model.py b/model_RL/old_files/AC_model.py
similarity index 96%
rename from model_RL/AC_model.py
rename to model_RL/old_files/AC_model.py
index f48582f..615dd18 100644
--- a/model_RL/AC_model.py
+++ b/model_RL/old_files/AC_model.py
@@ -1,6 +1,6 @@
import torch
from tqdm import tqdm
-from model_RL.utilities.actor_critic_networks import NaNExceptionActor, NaNExceptionCritic, Actor, Critic
+from model_RL.old_files.utilities import NaNExceptionActor, NaNExceptionCritic, Actor, Critic
class AC:
diff --git a/model_RL/PPO_model.py b/model_RL/old_files/PPO_model.py
similarity index 98%
rename from model_RL/PPO_model.py
rename to model_RL/old_files/PPO_model.py
index a6dc568..ecbe9dd 100644
--- a/model_RL/PPO_model.py
+++ b/model_RL/old_files/PPO_model.py
@@ -2,7 +2,7 @@
Old version of PPO for triangular environement
"""
-from model_RL.utilities.actor_critic_networks import NaNExceptionActor, NaNExceptionCritic, Actor, Critic
+from model_RL.old_files.utilities import NaNExceptionActor, NaNExceptionCritic, Actor, Critic
from mesh_model.mesh_analysis.global_mesh_analysis import global_score
import copy
import torch
diff --git a/model_RL/SAC_model.py b/model_RL/old_files/SAC_model.py
similarity index 97%
rename from model_RL/SAC_model.py
rename to model_RL/old_files/SAC_model.py
index eff6636..62212f4 100644
--- a/model_RL/SAC_model.py
+++ b/model_RL/old_files/SAC_model.py
@@ -1,4 +1,4 @@
-from model_RL.utilities.actor_critic_networks import NaNExceptionActor, NaNExceptionCritic, Actor, Critic
+from model_RL.old_files.utilities import NaNExceptionActor, NaNExceptionCritic, Actor, Critic
import torch
import random
diff --git a/model_RL/parameters/ppo_config.json b/model_RL/old_files/ppo_config.json
similarity index 59%
rename from model_RL/parameters/ppo_config.json
rename to model_RL/old_files/ppo_config.json
index 1b844e5..299000f 100644
--- a/model_RL/parameters/ppo_config.json
+++ b/model_RL/old_files/ppo_config.json
@@ -5,7 +5,7 @@
"batch_size": 64,
"learning_rate": 0.0001,
"gamma": 0.9,
- "verbose": 1,
- "tensorboard_log": "training/results/quad/",
- "total_timesteps": 80000
+ "verbose": 2,
+ "tensorboard_log": "training/results/tri/",
+ "total_timesteps": 20000
}
diff --git a/model_RL/utilities/actor_critic_networks.py b/model_RL/old_files/utilities/actor_critic_networks.py
similarity index 100%
rename from model_RL/utilities/actor_critic_networks.py
rename to model_RL/old_files/utilities/actor_critic_networks.py
diff --git a/model_RL/utilities/nnPolicy.py b/model_RL/old_files/utilities/nnPolicy.py
similarity index 100%
rename from model_RL/utilities/nnPolicy.py
rename to model_RL/old_files/utilities/nnPolicy.py
diff --git a/model_RL/utilities/__init__.py b/model_RL/utilities/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/test_modules/test_actions.py b/test_modules/test_actions.py
index b2fcad6..c21de43 100644
--- a/test_modules/test_actions.py
+++ b/test_modules/test_actions.py
@@ -1,5 +1,7 @@
import unittest
+import scipy
import mesh_model.mesh_struct.mesh as mesh
+from mesh_model.mesh_analysis.trimesh_analysis import TriMeshQualityAnalysis
from mesh_model.mesh_struct.mesh_elements import Dart, Node
from mesh_model.random_trimesh import regular_mesh
from environment.actions.triangular_actions import split_edge, flip_edge, collapse_edge
@@ -31,7 +33,10 @@ def test_flip(self):
d1.set_beta(2, d2)
d2.set_beta(2, d1)
- flip_edge(cmap, n00, n11)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+ done = flip_edge(m_analysis, n00, n11)
+
+ self.assertTrue(done[0])
self.assertEqual(2, cmap.nb_faces())
self.assertEqual(4, cmap.nb_nodes())
@@ -45,69 +50,78 @@ def test_split(self):
t1 = cmap.add_triangle(n00, n10, n11)
t2 = cmap.add_triangle(n00, n11, n01)
- split_edge(cmap, n00, n11)
+ cmap.set_twin_pointers()
+ m_analysis = TriMeshQualityAnalysis(cmap)
+
+ done = split_edge(m_analysis, n00, n11)
+ n_new = Node(cmap, 4)
+
+ self.assertTrue(done[0])
d1 = t1.get_dart()
- # d1 goes from n00 to n10
- self.assertEqual(d1.get_node(), n00)
+ # d1 goes from nnew to n10
+ self.assertEqual(d1.get_node(), n_new)
d1 = d1.get_beta(1).get_beta(1)
# now d1 goes from n11 to n00
self.assertEqual(d1.get_node(), n11)
d2 = t2.get_dart() # goes from n00 to n11
self.assertEqual(d2.get_node(), n00)
- # We sew on both directions
- d1.set_beta(2, d2)
- d2.set_beta(2, d1)
+ self.assertEqual(4, cmap.nb_faces())
+ self.assertEqual(5, cmap.nb_nodes())
def test_collapse(self):
nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]
faces = [[0, 1, 2], [0, 2, 3]]
cmap = mesh.Mesh(nodes, faces)
- plot_mesh(cmap)
n00 = Node(cmap, 0)
n11 = Node(cmap, 2)
- split_edge(cmap, n00, n11)
- plot_mesh(cmap)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+
+ split_edge(m_analysis, n00, n11)
n5 = Node(cmap, 4)
- valid, _, _ = collapse_edge(cmap, n00, n5)
- d1_to_test = Dart(cmap, 7)
- d2_to_test = Dart(cmap, 0)
+ split_edge(m_analysis, n11, n5)
+ n6 = Node(cmap, 5)
+ plot_mesh(cmap)
+ #Collapse not possible
+ valid, _, _ = collapse_edge(m_analysis, n11, n6)
self.assertEqual(valid, False)
- # Test possible collapse
- cmap = regular_mesh(16)
- d = Dart(cmap, 0)
- n0 = d.get_node()
- n1 = d.get_beta(1).get_node()
- valid, _, _ = collapse_edge(cmap, n0, n1)
+
+ # Collapse possible
+ valid, _, _ = collapse_edge(m_analysis, n6, n5)
self.assertEqual(valid, True)
+ plot_mesh(cmap)
+
def test_split_collapse_split(self):
nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]
faces = [[0, 1, 2], [0, 2, 3]]
cmap = mesh.Mesh(nodes, faces)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+
n0 = Node(cmap, 0)
n1 = Node(cmap, 1)
n2 = Node(cmap, 2)
n3 = Node(cmap, 3)
- split_edge(cmap, n0, n2)
+ split_edge(m_analysis, n0, n2)
n4 = Node(cmap, 4)
- collapse_edge(cmap, n0, n4)
- split_edge(cmap, n0, n2)
+ collapse_edge(m_analysis, n0, n4)
+ split_edge(m_analysis, n0, n2)
n5 = Node(cmap, 5)
- collapse_edge(cmap, n0, n5)
- split_edge(cmap, n4, n2)
- collapse_edge(cmap, n4, n5)
- collapse_edge(cmap, n2, n4)
- split_edge(cmap, n0, n2)
- split_edge(cmap, n0, n4)
- split_edge(cmap, n4, n3)
- split_edge(cmap, n4, n1)
- split_edge(cmap, n5, n1)
+ collapse_edge(m_analysis, n0, n5)
+ split_edge(m_analysis, n4, n2)
+ collapse_edge(m_analysis, n4, n5)
+ collapse_edge(m_analysis, n2, n4)
+ split_edge(m_analysis, n0, n2)
+ split_edge(m_analysis, n0, n4)
+ split_edge(m_analysis, n4, n3)
+ split_edge(m_analysis, n4, n1)
+ split_edge(m_analysis, n5, n1)
n7 = Node(cmap, 7)
n8 = Node(cmap, 8)
- collapse_edge(cmap, n7, n8)
- collapse_edge(cmap, n5, n7)
-
+ collapse_edge(m_analysis, n7, n8)
+ collapse_edge(m_analysis, n5, n7)
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
diff --git a/test_modules/test_actions_quad.py b/test_modules/test_actions_quad.py
index fdc1903..24469d6 100644
--- a/test_modules/test_actions_quad.py
+++ b/test_modules/test_actions_quad.py
@@ -1,9 +1,9 @@
import unittest
import os
import mesh_model.mesh_struct.mesh as mesh
-from mesh_model.mesh_analysis.global_mesh_analysis import global_score
from mesh_model.mesh_struct.mesh_elements import Dart, Node
from mesh_model.random_quadmesh import random_mesh
+from mesh_model.mesh_analysis.quadmesh_analysis import QuadMeshOldAnalysis
from environment.actions.quadrangular_actions import flip_edge_cntcw, flip_edge_cw, split_edge, collapse_edge, cleanup_edge
from view.mesh_plotter.mesh_plots import plot_mesh
from mesh_model.reader import read_gmsh
@@ -24,6 +24,7 @@ def test_flip(self):
q1 = cmap.add_quad(n11, n10, n20, n21)
q2 = cmap.add_quad(n10, n11, n01, n00)
cmap.set_twin_pointers()
+ ma = QuadMeshOldAnalysis(cmap)
plot_mesh(cmap)
d0 = q1.get_dart()
@@ -34,11 +35,12 @@ def test_flip(self):
self.assertEqual(d2.get_node(), n10)
- self.assertEqual(flip_edge_cntcw(cmap, n11, n10), (True,True,True))
+ self.assertEqual(flip_edge_cntcw(ma, n11, n10), (True,True,True))
self.assertEqual(2, cmap.nb_faces())
self.assertEqual(6, cmap.nb_nodes())
plot_mesh(cmap)
- self.assertFalse(flip_edge_cntcw(cmap, n11, n10)[0])
+ self.assertFalse(flip_edge_cntcw(ma, n11, n10)[0])
+ self.assertEqual(flip_edge_cw(ma, n01, n20), (True, True, True))
def test_split(self):
cmap = mesh.Mesh()
@@ -57,14 +59,15 @@ def test_split(self):
q3 = cmap.add_quad(n11, n21, n22, n12)
q4 = cmap.add_quad(n01, n11, n12, n02)
cmap.set_twin_pointers()
+ ma = QuadMeshOldAnalysis(cmap)
plot_mesh(cmap)
found, d = cmap.find_inner_edge(n11, n21)
self.assertTrue(found)
- self.assertEqual(split_edge(cmap, n11, n21), (True,True,True))
+ self.assertEqual(split_edge(ma, n11, n21), (True,True,True))
self.assertEqual(10, cmap.nb_nodes())
self.assertEqual(5, cmap.nb_faces())
plot_mesh(cmap)
- self.assertFalse(split_edge(cmap, n20, n21)[0])
+ self.assertFalse(split_edge(ma, n20, n21)[0])
def test_collapse(self):
cmap = mesh.Mesh()
@@ -88,12 +91,13 @@ def test_collapse(self):
plot_mesh(cmap)
found, d = cmap.find_inner_edge(n151, n12)
self.assertTrue(found)
+ ma = QuadMeshOldAnalysis(cmap)
plot_mesh(cmap)
- self.assertEqual(collapse_edge(cmap, n151, n12), (True, True, True))
+ self.assertEqual(collapse_edge(ma, n151, n12), (True, True, True))
self.assertEqual(9, cmap.nb_nodes())
self.assertEqual(4, cmap.nb_faces())
plot_mesh(cmap)
- self.assertFalse(split_edge(cmap, n20, n21)[0])
+ self.assertFalse(split_edge(ma, n20, n21)[0])
def test_cleanup(self):
cmap = mesh.Mesh()
@@ -114,10 +118,11 @@ def test_cleanup(self):
q4 = cmap.add_quad(n01, n051, n12, n02)
q5 = cmap.add_quad(n051, n10, n151, n12)
cmap.set_twin_pointers()
+ ma = QuadMeshOldAnalysis(cmap)
plot_mesh(cmap)
found, d = cmap.find_inner_edge(n151, n12)
self.assertTrue(found)
- self.assertEqual(cleanup_edge(cmap, n151, n21), (True, True, True))
+ self.assertEqual(cleanup_edge(ma, n151, n21), (True, True, True))
self.assertEqual(7, cmap.nb_nodes())
self.assertEqual(3, cmap.nb_faces())
plot_mesh(cmap)
@@ -126,16 +131,17 @@ def test_cleanup(self):
def test_actions(self):
filename = os.path.join(TESTFILE_FOLDER, 't1_quad.msh')
cmap = read_gmsh(filename)
+ ma = QuadMeshOldAnalysis(cmap)
plot_mesh(cmap)
d = Dart(cmap, 14)
n1= d.get_node()
n2 = (d.get_beta(1)).get_node()
- self.assertEqual(collapse_edge(cmap, n1, n2), (True,True,True))
+ self.assertEqual(collapse_edge(ma, n1, n2), (True,True,True))
plot_mesh(cmap)
d = Dart(cmap, 32)
n1 = d.get_node()
n2 = (d.get_beta(1)).get_node()
- self.assertEqual(flip_edge_cntcw(cmap, n1, n2), (True,True,True))
+ self.assertEqual(flip_edge_cntcw(ma, n1, n2), (True,True,True))
plot_mesh(cmap)
@@ -151,100 +157,103 @@ def test_split_collapse_split(self):
nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]
faces = [[0, 1, 2], [0, 2, 3]]
cmap = mesh.Mesh(nodes, faces)
+ ma = QuadMeshOldAnalysis(cmap)
n0 = Node(cmap, 0)
n1 = Node(cmap, 1)
n2 = Node(cmap, 2)
n3 = Node(cmap, 3)
- split_edge(cmap, n0, n2)
+ split_edge(ma, n0, n2)
n4 = Node(cmap, 4)
- collapse_edge(cmap, n0, n4)
- split_edge(cmap, n0, n2)
+ collapse_edge(ma, n0, n4)
+ split_edge(ma, n0, n2)
n5 = Node(cmap, 5)
- collapse_edge(cmap, n0, n5)
- split_edge(cmap, n4, n2)
- collapse_edge(cmap, n4, n5)
- collapse_edge(cmap, n2, n4)
- split_edge(cmap, n0, n2)
- split_edge(cmap, n0, n4)
- split_edge(cmap, n4, n3)
- split_edge(cmap, n4, n1)
- split_edge(cmap, n5, n1)
+ collapse_edge(ma, n0, n5)
+ split_edge(ma, n4, n2)
+ collapse_edge(ma, n4, n5)
+ collapse_edge(ma, n2, n4)
+ split_edge(ma, n0, n2)
+ split_edge(ma, n0, n4)
+ split_edge(ma, n4, n3)
+ split_edge(ma, n4, n1)
+ split_edge(ma, n5, n1)
n7 = Node(cmap, 7)
n8 = Node(cmap, 8)
- collapse_edge(cmap, n7, n8)
- collapse_edge(cmap, n5, n7)
+ collapse_edge(ma, n7, n8)
+ collapse_edge(ma, n5, n7)
def test_simple_mesh(self):
filename = os.path.join(TESTFILE_FOLDER, 'simple_quad.msh')
cmap = read_gmsh(filename)
+ ma = QuadMeshOldAnalysis(cmap)
self.assertEqual(6, cmap.nb_faces())
self.assertEqual(11, cmap.nb_nodes())
#Collapse node 10 (0.67,0.67) from edge 10-6
- collapse_edge(cmap, Node(cmap, 10), Node(cmap, 6))
+ collapse_edge(ma, Node(cmap, 10), Node(cmap, 6))
self.assertEqual(5, cmap.nb_faces())
self.assertEqual(10, cmap.nb_nodes())
self.assertTrue(Node(cmap,10).get_dart().id < 0)
plot_mesh(cmap)
#Flip edge 5-3
self.assertTrue(cmap.find_inner_edge(Node(cmap, 5), Node(cmap, 3))[0])
- flip_edge_cntcw(cmap, Node(cmap, 5), Node(cmap, 3))
+ flip_edge_cntcw(ma, Node(cmap, 5), Node(cmap, 3))
self.assertFalse(cmap.find_inner_edge(Node(cmap, 5), Node(cmap, 3))[0])
plot_mesh(cmap)
#Collapse node 9 (0.33, 0.33) from edge 9-4
- collapse_edge(cmap, Node(cmap, 9), Node(cmap, 4))
+ collapse_edge(ma, Node(cmap, 9), Node(cmap, 4))
self.assertEqual(4, cmap.nb_faces())
self.assertEqual(9, cmap.nb_nodes())
self.assertTrue(Node(cmap, 9).get_dart().id < 0)
plot_mesh(cmap)
#Flip edge 7-1
self.assertTrue(cmap.find_inner_edge(Node(cmap, 7), Node(cmap, 1))[0])
- flip_edge_cntcw(cmap, Node(cmap, 7), Node(cmap, 1))
+ flip_edge_cntcw(ma, Node(cmap, 7), Node(cmap, 1))
self.assertFalse(cmap.find_inner_edge(Node(cmap, 7), Node(cmap, 1))[0])
plot_mesh(cmap)
#Flip edge 3-8
self.assertTrue(cmap.find_inner_edge(Node(cmap, 3), Node(cmap, 8))[0])
- flip_edge_cntcw(cmap, Node(cmap, 3), Node(cmap, 8))
+ flip_edge_cntcw(ma, Node(cmap, 3), Node(cmap, 8))
self.assertFalse(cmap.find_inner_edge(Node(cmap, 3), Node(cmap, 8))[0])
plot_mesh(cmap)
#Flip edge 1-8
self.assertTrue(cmap.find_inner_edge(Node(cmap, 1), Node(cmap, 8))[0])
- flip_edge_cntcw(cmap, Node(cmap, 1), Node(cmap, 8))
+ flip_edge_cntcw(ma, Node(cmap, 1), Node(cmap, 8))
self.assertFalse(cmap.find_inner_edge(Node(cmap, 1), Node(cmap, 8))[0])
plot_mesh(cmap)
#Split edge 2-7 and create new node n9 at coordinate (0.5, 0.75)
- split_edge(cmap, Node(cmap, 2), Node(cmap, 7))
+ split_edge(ma, Node(cmap, 2), Node(cmap, 7))
self.assertEqual(5, cmap.nb_faces())
self.assertEqual(10, cmap.nb_nodes())
self.assertTrue(Node(cmap, 9).get_dart().id > 0)
plot_mesh(cmap)
# Split edge 0-5 and create new node n10 at coordinate (0.5, 0.25)
- split_edge(cmap, Node(cmap, 0), Node(cmap, 5))
+ split_edge(ma, Node(cmap, 0), Node(cmap, 5))
self.assertEqual(6, cmap.nb_faces())
self.assertEqual(11, cmap.nb_nodes())
self.assertTrue(Node(cmap, 10).get_dart().id > 0)
plot_mesh(cmap)
# Flip edge 0-8
self.assertTrue(cmap.find_inner_edge(Node(cmap, 0), Node(cmap, 8))[0])
- flip_edge_cw(cmap, Node(cmap, 0), Node(cmap, 8))
+ flip_edge_cw(ma, Node(cmap, 0), Node(cmap, 8))
self.assertFalse(cmap.find_inner_edge(Node(cmap, 0), Node(cmap, 8))[0])
plot_mesh(cmap)
# Collapse node 8 (0.5, 0.5) from edge 8-10
- collapse_edge(cmap, Node(cmap, 8), Node(cmap, 10))
+ collapse_edge(ma, Node(cmap, 8), Node(cmap, 10))
self.assertEqual(5, cmap.nb_faces())
self.assertEqual(10, cmap.nb_nodes())
self.assertTrue(Node(cmap, 8).get_dart().id < 0)
# Collapse node 10 (0.5, 0.25) from edge 10-5
- collapse_edge(cmap, Node(cmap, 10), Node(cmap, 5))
+ collapse_edge(ma, Node(cmap, 10), Node(cmap, 5))
self.assertEqual(4, cmap.nb_faces())
self.assertEqual(9, cmap.nb_nodes())
self.assertTrue(Node(cmap, 10).get_dart().id < 0)
plot_mesh(cmap)
- self.assertEqual(global_score(cmap)[1], 0)
-
+ self.assertEqual(ma.global_score()[1], 0)
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test_modules/test_global_mesh_analysis.py b/test_modules/test_global_mesh_analysis.py
new file mode 100644
index 0000000..58ef84a
--- /dev/null
+++ b/test_modules/test_global_mesh_analysis.py
@@ -0,0 +1,33 @@
+import math
+import unittest
+import os
+
+from mesh_model.mesh_struct.mesh import Mesh
+from mesh_model.mesh_analysis.trimesh_analysis import TriMeshQualityAnalysis, TriMeshOldAnalysis
+from mesh_model.reader import read_gmsh
+from view.mesh_plotter.mesh_plots import plot_mesh
+
+TESTFILE_FOLDER = os.path.join(os.path.dirname(__file__), '../mesh_files/')
+
+class TestGlobalMeshAnalysis(unittest.TestCase):
+
+ def test_angle_by_coord(self):
+ filename = os.path.join(TESTFILE_FOLDER, 't1_quad.msh')
+ cmap = read_gmsh(filename)
+ m_analysis = TriMeshOldAnalysis(cmap)
+ self.assertEqual(m_analysis.get_angle_by_coord(1,0,0,0,0,1), 90)
+ self.assertEqual(m_analysis.get_angle_by_coord(-1,0,0,0,1,0), 180)
+
+ def test_angle_from_sides(self):
+ filename = os.path.join(TESTFILE_FOLDER, 't1_quad.msh')
+ cmap = read_gmsh(filename)
+ m_analysis = TriMeshOldAnalysis(cmap)
+ self.assertAlmostEquals(math.degrees(m_analysis.angle_from_sides(1,1,1)), 60)
+ self.assertAlmostEquals(math.degrees(m_analysis.angle_from_sides(0.00000001, 1, 1)), 0)
+ self.assertAlmostEquals(math.degrees(m_analysis.angle_from_sides(1, 0.5, 0.5)), 180)
+
+ with self.assertRaises(ValueError):
+ m_analysis.angle_from_sides(1.1, 0.5, 0.5)
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
diff --git a/test_modules/test_quadrangular_mesh_analysis.py b/test_modules/test_quadrangular_mesh_analysis.py
index aea10de..ff4a41b 100644
--- a/test_modules/test_quadrangular_mesh_analysis.py
+++ b/test_modules/test_quadrangular_mesh_analysis.py
@@ -3,77 +3,89 @@
from mesh_model.mesh_struct.mesh import Mesh
from mesh_model.mesh_struct.mesh_elements import Dart
-import mesh_model.mesh_analysis.global_mesh_analysis as GMA
-import mesh_model.mesh_analysis.quadmesh_analysis as QMA
-from environment.actions.triangular_actions import split_edge_ids
+from mesh_model.mesh_analysis.quadmesh_analysis import QuadMeshOldAnalysis
+from environment.actions.quadrangular_actions import split_edge_ids, flip_edge_cw_ids
from view.mesh_plotter.mesh_plots import plot_mesh
from mesh_model.reader import read_gmsh
TESTFILE_FOLDER = os.path.join(os.path.dirname(__file__), '../mesh_files/')
-class TestMeshAnalysis(unittest.TestCase):
+class TestMeshOldAnalysis(unittest.TestCase):
def test_mesh_regular_score(self):
nodes = [[0.0, 0.0], [1.0, 0.0], [2.0, 0.0], [0.0, 1.0], [1.0, 1.0], [2.0, 1.0], [0.0, 2.0], [1.0, 2.0], [2.0, 2.0], [3.0, 3.0]]
faces = [[0, 1, 4, 3], [1, 2, 5, 4], [3, 4, 7, 6], [4, 5, 8, 7]]
cmap = Mesh(nodes,faces)
- nodes_score, mesh_score, mesh_ideal_score, adjacency = GMA.global_score(cmap)
+ qma = QuadMeshOldAnalysis(cmap)
+ nodes_score, mesh_score, mesh_ideal_score, adjacency = qma.global_score()
self.assertEqual((0,0), (mesh_score, mesh_ideal_score) )
def test_mesh_with_irregularities(self):
filename = os.path.join(TESTFILE_FOLDER, 't1_quad.msh')
cmap = read_gmsh(filename)
- nodes_score, mesh_score, mesh_ideal_score, adjacency = GMA.global_score(cmap)
+ qma = QuadMeshOldAnalysis(cmap)
+ nodes_score, mesh_score, mesh_ideal_score, adjacency = qma.global_score()
self.assertIsNot((0, 0), (mesh_score,mesh_ideal_score) )
def test_is_valid_action(self):
filename = os.path.join(TESTFILE_FOLDER, 't1_quad.msh')
cmap = read_gmsh(filename)
+ qma = QuadMeshOldAnalysis(cmap)
#Boundary dart
- self.assertEqual(QMA.isValidAction(cmap, 20, 0), (False, True))
+ self.assertEqual(qma.isValidAction(20, 0), (False, True))
- # Flip test
- self.assertEqual(QMA.isValidAction(cmap, 3, 0), (True, True))
- self.assertEqual(QMA.isValidAction(cmap, 27, 0), (False, True))
+ # Flip Clockwise test
+ self.assertEqual(qma.isValidAction(3, 0), (True, True))
+ self.assertEqual(qma.isValidAction(27, 0), (False, True))
+
+ # Flip Counterclockwise test
+ self.assertEqual(qma.isValidAction(3, 1), (True, True))
+ self.assertEqual(qma.isValidAction(27, 1), (False, True))
#Split test
- self.assertEqual(QMA.isValidAction(cmap, 0, 1), (True, True))
- self.assertEqual(QMA.isValidAction(cmap, 27, 1), (False, True))
+ self.assertEqual(qma.isValidAction(0, 2), (True, True))
+ self.assertEqual(qma.isValidAction(27, 2), (False, True))
#Collapse test
- self.assertEqual(QMA.isValidAction(cmap, 0, 2), (True, True))
+ self.assertEqual(qma.isValidAction(0, 3), (True, True))
plot_mesh(cmap)
- self.assertEqual(QMA.isValidAction(cmap, 27, 2), (False, True))
+ self.assertEqual(qma.isValidAction(27, 3), (False, True))
+
+ #Cleanup test action id = 4
#All action test
- self.assertEqual(QMA.isValidAction(cmap, 27, 3), (False, True))
- self.assertEqual(QMA.isValidAction(cmap, 9, 3), (True, True))
+ self.assertEqual(qma.isValidAction(27, 5), (False, True))
+ flip_edge_cw_ids(qma,13,37)
+ self.assertEqual(qma.isValidAction(66, 5), (False, False))
+ self.assertEqual(qma.isValidAction(9, 5), (True, True))
#One action test
- self.assertEqual(QMA.isValidAction(cmap, 0, 4), (True, True))
- self.assertEqual(QMA.isValidAction(cmap, 9, 4), (True, True))
- self.assertEqual(QMA.isValidAction(cmap, 27, 4), (False, True))
+ self.assertEqual(qma.isValidAction(0, 6), (True, True))
+ self.assertEqual(qma.isValidAction(9, 6), (True, True))
+ self.assertEqual(qma.isValidAction(27, 6), (False, True))
#Invalid action
with self.assertRaises(ValueError):
- QMA.isValidAction(cmap, 0, 7)
+ qma.isValidAction(0, 7)
def test_isTruncated(self):
filename = os.path.join(TESTFILE_FOLDER, 't1_quad.msh')
cmap = read_gmsh(filename)
+ qma = QuadMeshOldAnalysis(cmap)
darts_list = []
for d_info in cmap.active_darts():
darts_list.append(d_info[0])
- self.assertFalse(QMA.isTruncated(cmap, darts_list))
+ self.assertFalse(qma.isTruncated(darts_list))
nodes = [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0], [1.0, 1.0]]
faces = [[0, 1, 3, 2]]
cmap = Mesh(nodes, faces)
+ qma = QuadMeshOldAnalysis(cmap)
darts_list = []
for d_info in cmap.active_darts():
darts_list.append(d_info[0])
- self.assertTrue(QMA.isTruncated(cmap, darts_list))
+ self.assertTrue(qma.isTruncated(darts_list))
if __name__ == '__main__':
unittest.main()
diff --git a/test_modules/test_triangular_mesh_analysis.py b/test_modules/test_triangular_mesh_analysis.py
index 8eeff64..8dd1fc3 100644
--- a/test_modules/test_triangular_mesh_analysis.py
+++ b/test_modules/test_triangular_mesh_analysis.py
@@ -1,20 +1,19 @@
import unittest
from mesh_model.mesh_struct.mesh import Mesh
-from mesh_model.mesh_struct.mesh_elements import Dart
-import mesh_model.mesh_analysis.global_mesh_analysis as GMA
-import mesh_model.mesh_analysis.trimesh_analysis as TMA
-import mesh_model.mesh_analysis.quadmesh_analysis as QMA
+from mesh_model.mesh_struct.mesh_elements import Dart, Node
+from mesh_model.mesh_analysis.trimesh_analysis import TriMeshQualityAnalysis, TriMeshOldAnalysis
from environment.actions.triangular_actions import split_edge_ids
from view.mesh_plotter.mesh_plots import plot_mesh
-class TestMeshAnalysis(unittest.TestCase):
+class TestMeshQualityAnalysis(unittest.TestCase):
def test_mesh_regular_score(self):
nodes = [[0.0, 0.0], [1.0, 0.0], [0.5, 1], [-0.5, 1.0], [0.0, 2.0], [-1.0,0.0],[-0.5,-1.0],[0.0,-2.0], [0.5,-1.0]]
faces = [[0, 1, 2], [0, 2, 3], [3, 2, 4], [3, 5, 0], [0, 5, 6], [0, 6, 8], [6, 7, 8], [0, 8, 1]]
cmap = Mesh(nodes,faces)
- nodes_score, mesh_score, mesh_ideal_score, adjacency = GMA.global_score(cmap)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+ nodes_score, mesh_score, mesh_ideal_score, adjacency = m_analysis.global_score()
self.assertEqual((0,0), (mesh_score, mesh_ideal_score) )
def test_mesh_with_irregularities(self):
@@ -25,34 +24,38 @@ def test_mesh_with_irregularities(self):
[3, 4, 5], [3, 5, 6], [3, 6, 7], [7, 6, 8], [7, 8, 9], [7, 9, 10], [10, 9, 11], [10, 11, 12],
[14, 12, 13], [14, 13, 15], [1, 14, 15], [1, 15, 16], [1, 16, 17], [1, 17, 2], [2, 17, 18], [2, 18, 4]]
cmap = Mesh(nodes, faces)
- nodes_score, mesh_score, mesh_ideal_score, adjacency = GMA.global_score(cmap)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+ nodes_score, mesh_score, mesh_ideal_score, adjacency = m_analysis.global_score()
self.assertEqual((6, -2), (mesh_score,mesh_ideal_score) )
def test_mesh_bad_score(self):
nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2]]
cmap = Mesh(nodes, faces)
- nodes_score, mesh_score, mesh_ideal_score, adjacency = GMA.global_score(cmap)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+ nodes_score, mesh_score, mesh_ideal_score, adjacency = m_analysis.global_score()
self.assertEqual((3, 1), (mesh_score, mesh_ideal_score))
def test_split_score(self):
nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2]]
cmap = Mesh(nodes, faces)
- split_edge_ids(cmap, 0, 2)
- split_edge_ids(cmap, 1, 2) # split impossible
- nodes_score, mesh_score, mesh_ideal_score, adjacency = GMA.global_score(cmap)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+ split_edge_ids(m_analysis, 0, 2)
+ split_edge_ids(m_analysis, 1, 4) # split impossible
+ nodes_score, mesh_score, mesh_ideal_score, adjacency = m_analysis.global_score()
self.assertEqual((3, 1), (mesh_score, mesh_ideal_score))
def test_find_template_opposite_node_not_found(self):
nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2]]
cmap = Mesh(nodes, faces)
+ m_analysis = TriMeshQualityAnalysis(cmap)
dart_to_test = Dart(cmap, 0)
- node = TMA.find_template_opposite_node(dart_to_test)
+ node = m_analysis.find_template_opposite_node(dart_to_test)
self.assertEqual(node, None)
dart_to_test = Dart(cmap, 2)
- node = TMA.find_template_opposite_node(dart_to_test)
+ node = m_analysis.find_template_opposite_node(dart_to_test)
self.assertEqual(node, 3)
def test_is_valid_action(self):
@@ -63,102 +66,320 @@ def test_is_valid_action(self):
[3, 4, 5], [3, 5, 6], [3, 6, 7], [7, 6, 8], [7, 8, 9], [7, 9, 10], [10, 9, 11], [10, 11, 12],
[14, 12, 13], [14, 13, 15], [1, 14, 15], [1, 15, 16], [1, 16, 17], [1, 17, 2], [2, 17, 18], [2, 18, 4]]
cmap = Mesh(nodes, faces)
- split_edge_ids(cmap, 0, 1)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+ split_edge_ids(m_analysis, 0, 1)
#Boundary dart
- self.assertEqual(TMA.isValidAction(cmap, 25, 0), (False, True))
+ self.assertEqual(m_analysis.isValidAction(25, 0), (False, True))
# Flip test
- self.assertEqual(TMA.isValidAction(cmap, 3, 0), (True, True))
- self.assertEqual(TMA.isValidAction(cmap, 0, 0), (False, True))
+ self.assertEqual(m_analysis.isValidAction(3, 0), (True, True))
+ self.assertEqual(m_analysis.isValidAction(0, 0), (True, False))
#Split test
- self.assertEqual(TMA.isValidAction(cmap, 0, 1), (True, True))
- split_edge_ids(cmap, 1, 19)
- split_edge_ids(cmap, 1, 20)
- self.assertEqual(TMA.isValidAction(cmap, 20, 1), (True, False))
- split_edge_ids(cmap, 0, 19)
- split_edge_ids(cmap, 0, 22)
- split_edge_ids(cmap, 0, 23)
- self.assertEqual(TMA.isValidAction(cmap, 20, 1), (False, True))
+ self.assertEqual(m_analysis.isValidAction(0, 1), (True, True))
+ split_edge_ids(m_analysis, 1, 19)
+ split_edge_ids(m_analysis, 1, 20)
+ plot_mesh(m_analysis.mesh)
+ self.assertEqual(m_analysis.isValidAction(20, 1), (True, True))
+ split_edge_ids(m_analysis, 0, 19)
+ split_edge_ids(m_analysis, 0, 22)
+ split_edge_ids(m_analysis, 0, 23)
+ plot_mesh(m_analysis.mesh)
+ self.assertEqual(m_analysis.isValidAction(20, 1), (False, True)) #node n2 and n14 degree are >= 10
#Collapse test
- self.assertEqual(TMA.isValidAction(cmap, 20, 2), (True, True))
+ self.assertEqual(m_analysis.isValidAction(20, 2), (True, True))
+ self.assertEqual(m_analysis.isValidAction(2, 2), (False, False))
+
+ #All action test
+ self.assertEqual(m_analysis.isValidAction(2, 3), (False, False))
+ self.assertEqual(m_analysis.isValidAction(26, 3), (False, False))
+ self.assertEqual(m_analysis.isValidAction(9, 3), (True, True))
+
+ #One action test
+ self.assertEqual(m_analysis.isValidAction(0, 4), (True, True))
+ self.assertEqual(m_analysis.isValidAction(9, 4), (True, True))
+ self.assertEqual(m_analysis.isValidAction(46, 4), (False, True))
+
+ def test_isFlipOk(self):
+ nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
+ faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2]]
+ cmap = Mesh(nodes, faces)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+ dart_to_test = Dart(cmap, 0)
+ self.assertFalse(m_analysis.isFlipOk(dart_to_test)[0])
+ dart_to_test = Dart(cmap, 2)
+ self.assertTrue(m_analysis.isFlipOk(dart_to_test)[0])
+
+ def test_isSplitOk(self):
+ nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
+ faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2]]
+ cmap = Mesh(nodes, faces)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+ dart_to_test = Dart(cmap, 0)
+ self.assertEqual(m_analysis.isSplitOk(dart_to_test), (False, True))
+ dart_to_test = Dart(cmap, 2)
+ self.assertEqual(m_analysis.isSplitOk(dart_to_test), (True, True))
+
+ def test_isCollapseOk(self):
+ nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
+ faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2]]
+ cmap = Mesh(nodes, faces)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+ dart_to_test = Dart(cmap, 0)
+ self.assertFalse(m_analysis.isCollapseOk(dart_to_test)[0])
+ dart_to_test = Dart(cmap, 2)
+ self.assertFalse(m_analysis.isCollapseOk(dart_to_test)[0])
+
+ split_edge_ids(m_analysis, 0, 2)
+ split_edge_ids(m_analysis, 0, 5)
+ dart_to_test = Dart(cmap, 12)
+ self.assertTrue(m_analysis.isCollapseOk(dart_to_test)[0])
+
+ def test_isTruncated(self):
+ nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0]]
+ faces = [[0, 1, 2]]
+ cmap = Mesh(nodes, faces)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+ darts_list = []
+ for d_info in cmap.active_darts():
+ darts_list.append(d_info[0])
+ self.assertTrue(m_analysis.isTruncated(darts_list))
+ nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
+ faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2]]
+ cmap = Mesh(nodes, faces)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+ darts_list = []
+ for d_info in cmap.active_darts():
+ darts_list.append(d_info[0])
+ self.assertFalse(m_analysis.isTruncated(darts_list))
+
+ def test_get_geometric_quality(self):
+ nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
+ faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2], [0, 1, 4], [0, 4, 1]]
+ cmap = Mesh(nodes, faces)
plot_mesh(cmap)
- self.assertEqual(TMA.isValidAction(cmap, 2, 2), (False, True))
+ m_analysis = TriMeshQualityAnalysis(cmap)
+
+ # Half flat
+ d_to_test = Dart(cmap, 0)
+ self.assertEqual(m_analysis.get_dart_geometric_quality(d_to_test), 4)
+
+ # Full flat
+ d_to_test = Dart(cmap, 11)
+ self.assertEqual(m_analysis.get_dart_geometric_quality(d_to_test), 6)
+
+ nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]
+ faces = [[0, 1, 2], [2, 1, 3]]
+ cmap = Mesh(nodes, faces)
+ plot_mesh(cmap)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+
+ # Crossed
+ d_to_test = Dart(cmap, 1)
+ self.assertEqual(m_analysis.get_dart_geometric_quality(d_to_test), 3)
+
+ def test_find_star_vertex(self):
+ # Polygon with ker
+ nodes = [[0.0, 0.0], [1.0, 1.0], [1.0, -1.0], [0.0, -2.0], [-1.0, 0.0], [-0.5, 0.5], [-0.25, -0.25]]
+ faces = [[0, 2, 1], [0, 3, 2], [0, 4, 3], [0, 5, 4], [0, 1, 5]]
+ cmap = Mesh(nodes, faces)
+ plot_mesh(cmap)
+ m_analysis = TriMeshQualityAnalysis(cmap)
+ n_to_test = Node(cmap, 0)
+ self.assertTrue(m_analysis.find_star_vertex(n_to_test)[0])
+
+
+class TestMeshOldAnalysis(unittest.TestCase):
+
+ def test_mesh_regular_score(self):
+ nodes = [[0.0, 0.0], [1.0, 0.0], [0.5, 1], [-0.5, 1.0], [0.0, 2.0], [-1.0,0.0],[-0.5,-1.0],[0.0,-2.0], [0.5,-1.0]]
+ faces = [[0, 1, 2], [0, 2, 3], [3, 2, 4], [3, 5, 0], [0, 5, 6], [0, 6, 8], [6, 7, 8], [0, 8, 1]]
+ cmap = Mesh(nodes,faces)
+ tma = TriMeshOldAnalysis(cmap)
+ nodes_score, mesh_score, mesh_ideal_score, adjacency = tma.global_score()
+ self.assertEqual((0,0), (mesh_score, mesh_ideal_score) )
+
+ def test_mesh_with_irregularities(self):
+ nodes = [[0.0, 0.0], [1.0, 0.0], [0.5, 1], [-0.5, 1.0], [0.0, 2.0], [-1.0, 2.0], [-2.0, 1.0], [-1.0, 0.0],
+ [-2.0, 0.0], [-2.0, -1.0], [-0.5, -1.0], [-1.0, -2.0], [0.0, -2.0], [1.0, -2.0],
+ [0.5, -1.0], [2.0, -1.0], [2.0, 0.0], [2.0, 1.0], [1.0, 2.0]]
+ faces = [[0, 1, 2], [0, 2, 3], [3, 2, 4], [7, 0, 3], [7, 10, 0], [10, 14, 0], [0, 14, 1], [10, 12, 14],
+ [3, 4, 5], [3, 5, 6], [3, 6, 7], [7, 6, 8], [7, 8, 9], [7, 9, 10], [10, 9, 11], [10, 11, 12],
+ [14, 12, 13], [14, 13, 15], [1, 14, 15], [1, 15, 16], [1, 16, 17], [1, 17, 2], [2, 17, 18], [2, 18, 4]]
+ cmap = Mesh(nodes, faces)
+ tma = TriMeshOldAnalysis(cmap)
+ nodes_score, mesh_score, mesh_ideal_score, adjacency = tma.global_score()
+ self.assertEqual((6, -2), (mesh_score,mesh_ideal_score) )
+
+ def test_mesh_bad_score(self):
+ nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
+ faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2]]
+ cmap = Mesh(nodes, faces)
+ tma = TriMeshOldAnalysis(cmap)
+ nodes_score, mesh_score, mesh_ideal_score, adjacency = tma.global_score()
+ self.assertEqual((3, 1), (mesh_score, mesh_ideal_score))
+
+ def test_split_score(self):
+ nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
+ faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2]]
+ cmap = Mesh(nodes, faces)
+ tma = TriMeshOldAnalysis(cmap)
+ split_edge_ids(tma, 0, 2)
+ split_edge_ids(tma, 1, 2) # split impossible
+ nodes_score, mesh_score, mesh_ideal_score, adjacency = tma.global_score()
+ self.assertEqual((3, 1), (mesh_score, mesh_ideal_score))
+
+ def test_find_template_opposite_node_not_found(self):
+ nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
+ faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2]]
+ cmap = Mesh(nodes, faces)
+ tma = TriMeshOldAnalysis(cmap)
+ dart_to_test = Dart(cmap, 0)
+ node = tma.find_template_opposite_node(dart_to_test)
+ self.assertEqual(node, None)
+ dart_to_test = Dart(cmap, 2)
+ node = tma.find_template_opposite_node(dart_to_test)
+ self.assertEqual(node, 3)
+
+ def test_is_valid_action(self):
+ nodes = [[0.0, 0.0], [1.0, 0.0], [0.5, 1], [-0.5, 1.0], [0.0, 2.0], [-1.0, 2.0], [-2.0, 1.0], [-1.0, 0.0],
+ [-2.0, 0.0], [-2.0, -1.0], [-0.5, -1.0], [-1.0, -2.0], [0.0, -2.0], [1.0, -2.0],
+ [0.5, -1.0], [2.0, -1.0], [2.0, 0.0], [2.0, 1.0], [1.0, 2.0]]
+ faces = [[0, 1, 2], [0, 2, 3], [3, 2, 4], [7, 0, 3], [7, 10, 0], [10, 14, 0], [0, 14, 1], [10, 12, 14],
+ [3, 4, 5], [3, 5, 6], [3, 6, 7], [7, 6, 8], [7, 8, 9], [7, 9, 10], [10, 9, 11], [10, 11, 12],
+ [14, 12, 13], [14, 13, 15], [1, 14, 15], [1, 15, 16], [1, 16, 17], [1, 17, 2], [2, 17, 18], [2, 18, 4]]
+ cmap = Mesh(nodes, faces)
+ tma = TriMeshOldAnalysis(cmap)
+ split_edge_ids(tma, 0, 1)
+
+ #Boundary dart
+ self.assertEqual(tma.isValidAction( 25, 0), (False, True))
+
+ # Flip test
+ self.assertEqual(tma.isValidAction( 3, 0), (True, True))
+ self.assertEqual(tma.isValidAction(0, 0), (False, True))
+
+ #Split test
+ self.assertEqual(tma.isValidAction(0, 1), (True, True))
+ split_edge_ids(tma, 1, 19)
+ split_edge_ids(tma, 1, 20)
+ self.assertEqual(tma.isValidAction(20, 1), (True, False))
+ split_edge_ids(tma, 0, 19)
+ split_edge_ids(tma, 0, 22)
+ split_edge_ids(tma, 0, 23)
+ self.assertEqual(tma.isValidAction(20, 1), (False, True))
+
+ #Collapse test
+ self.assertEqual(tma.isValidAction(20, 2), (True, True))
+ plot_mesh(cmap)
+ self.assertEqual(tma.isValidAction(2, 2), (False, True))
#All action test
- self.assertEqual(TMA.isValidAction(cmap, 2, 3), (False, False))
- self.assertEqual(TMA.isValidAction(cmap, 26, 3), (False, False))
- self.assertEqual(TMA.isValidAction(cmap, 9, 3), (True, True))
+ self.assertEqual(tma.isValidAction(2, 3), (False, False))
+ self.assertEqual(tma.isValidAction(26, 3), (False, False))
+ self.assertEqual(tma.isValidAction(9, 3), (True, True))
#One action test
- self.assertEqual(TMA.isValidAction(cmap, 0, 4), (True, True))
- self.assertEqual(TMA.isValidAction(cmap, 9, 4), (True, True))
- self.assertEqual(TMA.isValidAction(cmap, 94, 4), (False, False))
+ self.assertEqual(tma.isValidAction(0, 4), (True, True))
+ self.assertEqual(tma.isValidAction(9, 4), (True, True))
+ self.assertEqual(tma.isValidAction(94, 4), (False, False))
def test_isFlipOk(self):
nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2]]
cmap = Mesh(nodes, faces)
+ tma = TriMeshOldAnalysis(cmap)
plot_mesh(cmap)
dart_to_test = Dart(cmap, 0)
- self.assertFalse(TMA.isFlipOk(dart_to_test)[0])
+ self.assertFalse(tma.isFlipOk(dart_to_test)[0])
dart_to_test = Dart(cmap, 2)
- self.assertTrue(TMA.isFlipOk(dart_to_test))
+ self.assertTrue(tma.isFlipOk(dart_to_test))
def test_isSplitOk(self):
nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2]]
cmap = Mesh(nodes, faces)
+ tma = TriMeshOldAnalysis(cmap)
plot_mesh(cmap)
dart_to_test = Dart(cmap, 0)
- self.assertEqual(TMA.isSplitOk(dart_to_test), (False, True))
+ self.assertEqual(tma.isSplitOk(dart_to_test), (False, True))
dart_to_test = Dart(cmap, 2)
- self.assertEqual(TMA.isSplitOk(dart_to_test), (True, True))
+ self.assertEqual(tma.isSplitOk(dart_to_test), (True, True))
def test_isCollapseOk(self):
nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2]]
cmap = Mesh(nodes, faces)
+ tma = TriMeshOldAnalysis(cmap)
plot_mesh(cmap)
dart_to_test = Dart(cmap, 0)
- self.assertFalse(TMA.isCollapseOk(dart_to_test)[0])
+ self.assertFalse(tma.isCollapseOk(dart_to_test)[0])
dart_to_test = Dart(cmap, 2)
- self.assertFalse(TMA.isCollapseOk(dart_to_test)[0])
+ self.assertFalse(tma.isCollapseOk(dart_to_test)[0])
+ split_edge_ids(tma,2,1)
+ split_edge_ids(tma, 1, 5)
+ dart_to_test = Dart(cmap, 17)
+ self.assertTrue(tma.isCollapseOk(dart_to_test)[0])
def test_valid_triangle(self):
+ nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0]]
+ faces = [[0, 1, 2]]
+ cmap = Mesh(nodes, faces)
+ tma = TriMeshOldAnalysis(cmap)
+
# test Lmax invalid
vect_AB = (5.0, 0.0)
vect_AC = (2.5, 5.0)
vect_BC = (-2.5, 5.0)
- self.assertFalse(TMA.valid_triangle(vect_AB, vect_AC, vect_BC))
+ self.assertFalse(tma.valid_triangle(vect_AB, vect_AC, vect_BC))
# test invalid angles
vect_AB = (3.0, 0.0)
vect_AC = (1.5, 0.05)
vect_BC = (-1.5, 0.05)
- self.assertFalse(TMA.valid_triangle(vect_AB, vect_AC, vect_BC))
+ self.assertFalse(tma.valid_triangle(vect_AB, vect_AC, vect_BC))
# test valid triangle
vect_AB = (3.0, 0.0)
vect_AC = (1.5, 3.0)
vect_BC = (-1.5, 3.0)
- self.assertTrue(TMA.valid_triangle(vect_AB, vect_AC, vect_BC))
+ self.assertTrue(tma.valid_triangle(vect_AB, vect_AC, vect_BC))
def test_isTruncated(self):
nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0]]
faces = [[0, 1, 2]]
cmap = Mesh(nodes, faces)
+ tma = TriMeshOldAnalysis(cmap)
darts_list = []
for d_info in cmap.active_darts():
darts_list.append(d_info[0])
- self.assertTrue(TMA.isTruncated(cmap, darts_list))
+ self.assertTrue(tma.isTruncated(darts_list))
nodes = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [2.0, 0.0]]
faces = [[0, 1, 2], [0, 2, 3], [1, 4, 2]]
cmap = Mesh(nodes, faces)
+ tma = TriMeshOldAnalysis(cmap)
darts_list = []
for d_info in cmap.active_darts():
darts_list.append(d_info[0])
- self.assertFalse(TMA.isTruncated(cmap, darts_list))
+ self.assertFalse(tma.isTruncated(darts_list))
if __name__ == '__main__':
unittest.main()
+
+
+ # def test_valid_triangle(self):
+ # # test Lmax invalid
+ # vect_AB = (5.0, 0.0)
+ # vect_AC = (2.5, 5.0)
+ # vect_BC = (-2.5, 5.0)
+ # self.assertFalse(m_analysis.valid_triangle(vect_AB, vect_AC, vect_BC))
+ # # test invalid angles
+ # vect_AB = (3.0, 0.0)
+ # vect_AC = (1.5, 0.05)
+ # vect_BC = (-1.5, 0.05)
+ # self.assertFalse(m_analysis.valid_triangle(vect_AB, vect_AC, vect_BC))
+ # # test valid triangle
+ # vect_AB = (3.0, 0.0)
+ # vect_AC = (1.5, 3.0)
+ # vect_BC = (-1.5, 3.0)
+ # self.assertTrue(m_analysis.valid_triangle(vect_AB, vect_AC, vect_BC))
diff --git a/training/config_PPO_SB3.yaml b/training/config/quadmesh_config_PPO_SB3.yaml
similarity index 76%
rename from training/config_PPO_SB3.yaml
rename to training/config/quadmesh_config_PPO_SB3.yaml
index 254798b..9a1d4d4 100644
--- a/training/config_PPO_SB3.yaml
+++ b/training/config/quadmesh_config_PPO_SB3.yaml
@@ -1,5 +1,5 @@
project_name : "Quadmesh"
-experiment_name : "test"
+experiment_name : "TEST"
description : ""
total_timesteps : 10000
@@ -7,7 +7,9 @@ total_timesteps : 10000
paths:
log_dir : "training/results/quad-sb3/"
policy_saving_dir : "training/policy_saved/quad-sb3/"
- wandb_model_saving_dir : "training/wandb_models/"
+ wandb_model_saving_dir : "training/wandb_models/quad-sb3/"
+ episode_recording_dir: "training/results/quad-sb3/episode_recording/"
+ observation_counts_dir: "training/results/quad-sb3/observation_counts/"
dataset:
evaluation_mesh_file_path : "mesh_files/simple_quad.msh"
@@ -27,6 +29,7 @@ env:
reward_function : 0 # 0 if basics, 1 if penalize, 2 otherwise
render_mode : null
obs_count : true
+ analysis_type : "old"
ppo:
policy : MlpPolicy
@@ -46,9 +49,9 @@ eval:
n_darts_selected: 10
deep: 12
obs_size: 120
- render_mode: "human"
+ render_mode: null
action_restriction: false
- with_degree_observation: false
+ with_quality_observation: false
metrics:
diff --git a/training/config_PPO_perso.yaml b/training/config/quadmesh_config_PPO_perso.yaml
similarity index 68%
rename from training/config_PPO_perso.yaml
rename to training/config/quadmesh_config_PPO_perso.yaml
index 5e946dc..3b374ed 100644
--- a/training/config_PPO_perso.yaml
+++ b/training/config/quadmesh_config_PPO_perso.yaml
@@ -1,11 +1,13 @@
project_name : "Quadmesh"
-experiment_name : "random_basic_quad_test_config-TEST"
+experiment_name : "TEST"
description : ""
paths:
log_dir : "training/results/quad-perso/"
policy_saving_dir : "training/policy_saved/quad-perso/"
- wandb_model_saving_dir : "training/wandb_models/"
+ wandb_model_saving_dir : "training/wandb_models/quad-perso/"
+ episode_recording_dir: "training/results/quad-perso/episode_recording/"
+ observation_counts_dir: "training/results/quad-perso/observation_counts/"
dataset:
evaluation_mesh_file_path : "mesh_files/simple_quad.msh"
@@ -24,10 +26,12 @@ env:
reward_function : 0 # 0 if basics, 1 if penalize, 2 otherwise
render_mode: null
obs_count: true
+ analysis_type : "old"
ppo:
- n_iterations : 5
- n_episodes_per_iteration : 50
+ n_actions : 4
+ n_iterations : 2
+ n_episodes_per_iteration : 20
n_epochs : 5
batch_size: 64
learning_rate : 0.0001
diff --git a/training/exploit.py b/training/exploit.py
deleted file mode 100644
index 8f02119..0000000
--- a/training/exploit.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import mesh_model.random_trimesh as TM
-import torch
-from environment.trimesh_env import TriMesh
-from model_RL.utilities.actor_critic_networks import Actor
-
-from view.mesh_plotter.create_plots import plot_test_results
-from view.mesh_plotter.mesh_plots import plot_dataset
-
-from model_RL.evaluate_model import testPolicy
-
-LOCAL_MESH_FEAT = 0
-
-
-def exploit():
- mesh_size = 12
- feature = LOCAL_MESH_FEAT
-
- dataset = [TM.random_mesh(30) for _ in range(9)]
- plot_dataset(dataset)
-
- env = TriMesh(None, mesh_size, max_steps=60, feat=feature)
-
-
- actor = Actor(env, 30, 15, lr=0.0001)
- actor.load_state_dict(torch.load('policy_saved/actor_network.pth'))
-
- avg_steps, avg_wins, avg_rewards, final_meshes = testPolicy(actor, 30, dataset, 100)
-
- if avg_steps is not None:
- plot_test_results(avg_rewards, avg_wins, avg_steps)
- plot_dataset(final_meshes)
diff --git a/training/exploit_PPO_perso.py b/training/exploit_PPO_perso.py
index 7355785..275bee7 100644
--- a/training/exploit_PPO_perso.py
+++ b/training/exploit_PPO_perso.py
@@ -1,36 +1,42 @@
-from numpy import ndarray
-
-import gymnasium as gym
import json
import torch
+import copy
+import numpy as np
+import gymnasium as gym
+import yaml
+
+from tqdm import tqdm
+from numpy import ndarray
from torch.distributions import Categorical
-from model_RL.PPO_model_pers import Actor
-from mesh_model.mesh_analysis.global_mesh_analysis import global_score
+from mesh_model.mesh_analysis.quadmesh_analysis import QuadMeshOldAnalysis
+from mesh_model.mesh_analysis.trimesh_analysis import TriMeshOldAnalysis, TriMeshQualityAnalysis
+from mesh_model.mesh_struct.mesh_elements import Dart
from mesh_model.mesh_struct.mesh import Mesh
from mesh_model.reader import read_gmsh
+from model_RL.PPO_model_pers import Actor
+
from view.mesh_plotter.create_plots import plot_test_results
from view.mesh_plotter.mesh_plots import plot_dataset
from environment.actions.smoothing import smoothing_mean
-import mesh_model.random_quadmesh as QM
+
from environment.gymnasium_envs.quadmesh_env.envs.quadmesh import QuadMeshEnv
-import numpy as np
-import copy
-from tqdm import tqdm
+from environment.gymnasium_envs.trimesh_full_env.envs.trimesh import TriMeshEnvFull
+import mesh_model.random_quadmesh as QM
def testPolicy(
actor,
n_eval_episodes: int,
- env_config,
+ config,
dataset: list[Mesh]
) -> tuple[ndarray, ndarray, ndarray, ndarray, list[Mesh]]:
"""
Tests policy on each mesh of a dataset with n_eval_episodes.
- :param policy: the policy to test
+ :param actor: the policy to test
:param n_eval_episodes: number of evaluation episodes on each mesh
+ :param config: configuration
:param dataset: list of mesh objects
- :param max_steps: max steps to evaluate
:return: average length of evaluation episodes, number of wins,average reward per mesh, dataset with the modified meshes
"""
print('Testing policy')
@@ -42,14 +48,15 @@ def testPolicy(
for i, mesh in tqdm(enumerate(dataset, 1)):
best_mesh = mesh
env = gym.make(
- env_config["env_name"],
- max_episode_steps=30,
- mesh = mesh,
- n_darts_selected=env_config["n_darts_selected"],
- deep= env_config["deep"],
- action_restriction=env_config["action_restriction"],
- with_degree_obs=env_config["with_degree_observation"],
- render_mode="human"
+ config["env"]["env_id"],
+ max_episode_steps=config["env"]["max_episode_steps"],
+ mesh=mesh,
+ #mesh_size = 30,
+ n_darts_selected=config["env"]["n_darts_selected"],
+ deep=config["env"]["deep"],
+ action_restriction=config["env"]["action_restriction"],
+ with_quality_obs=config["env"]["with_quality_observation"],
+ render_mode=config["env"]["render_mode"],
)
for _ in range(n_eval_episodes):
terminated = False
@@ -63,8 +70,8 @@ def testPolicy(
dist = Categorical(pmf)
action = dist.sample()
action = action.tolist()
- action_dart = int(action / 4)
- action_type = action % 4
+ action_dart = int(action / config["ppo"]["n_actions"])
+ action_type = action % config["ppo"]["n_actions"]
gymnasium_action = [action_type, action_dart]
if action is None:
env.terminal = True
@@ -74,7 +81,7 @@ def testPolicy(
ep_length += 1
if terminated:
nb_wins[i-1] += 1
- if isBetterMesh(best_mesh, info['mesh']):
+ if isBetterMesh(best_mesh, info['mesh'], config["env"]["analysis_type"]):
best_mesh = copy.deepcopy(info['mesh'])
avg_length[i-1] += ep_length
avg_mesh_rewards[i-1] += ep_mesh_rewards
@@ -90,8 +97,27 @@ def isBetterPolicy(actual_best_policy, policy_to_test):
if actual_best_policy is None:
return True
-def isBetterMesh(best_mesh, actual_mesh):
- if best_mesh is None or global_score(best_mesh)[1] > global_score(actual_mesh)[1]:
+def isBetterMesh(best_mesh, actual_mesh, analysis_type):
+ tri = False
+ for d_info in actual_mesh.dart_info:
+ if d_info[0]>=0:
+ d = Dart(actual_mesh, d_info[0])
+ if d == ((d.get_beta(1)).get_beta(1)).get_beta(1):
+ tri = True
+ else:
+ tri = False
+ break
+ if tri:
+ if analysis_type == "old":
+ ma_best_mesh = TriMeshOldAnalysis(best_mesh)
+ ma_actual_mesh = TriMeshOldAnalysis(actual_mesh)
+ else:
+ ma_best_mesh = TriMeshQualityAnalysis(best_mesh)
+ ma_actual_mesh = TriMeshQualityAnalysis(actual_mesh)
+ else:
+ ma_best_mesh = QuadMeshOldAnalysis(best_mesh)
+ ma_actual_mesh = QuadMeshOldAnalysis(actual_mesh)
+ if best_mesh is None or ma_best_mesh.global_score()[1] > ma_actual_mesh.global_score()[1]:
return True
else:
return False
@@ -99,28 +125,29 @@ def isBetterMesh(best_mesh, actual_mesh):
if __name__ == '__main__':
-
#Create a dataset of 9 meshes
- mesh = read_gmsh("../mesh_files/medium_quad.msh")
+ mesh = read_gmsh("../mesh_files/t1_tri.msh")
dataset = [mesh for _ in range(9)]
- with open("../environment/environment_config.json", "r") as f:
- env_config = json.load(f)
+ with open("../training/config/trimesh_config_PPO_perso.yaml", "r") as f:
+ config = yaml.safe_load(f)
plot_dataset(dataset)
env = gym.make(
- env_config["env_name"],
+ config["env"]["env_id"],
+ max_episode_steps=config["env"]["max_episode_steps"],
mesh=mesh,
- max_episode_steps=env_config["max_episode_steps"],
- n_darts_selected=env_config["n_darts_selected"],
- deep=env_config["deep"],
- action_restriction=env_config["action_restriction"],
- with_degree_obs=env_config["with_degree_observation"]
+ # mesh_size = 30,
+ n_darts_selected=config["env"]["n_darts_selected"],
+ deep=config["env"]["deep"],
+ action_restriction=config["env"]["action_restriction"],
+ with_quality_obs=config["env"]["with_quality_observation"],
+ render_mode=config["env"]["render_mode"],
)
#Load the model
- actor = Actor(env, 10*8, 4*10, lr=0.0001)
- actor.load_state_dict(torch.load('policy_saved/quad-perso/medium_quad_perso-2.pth'))
- avg_steps, avg_wins, avg_rewards, normalized_return, final_meshes = testPolicy(actor, 15, env_config, dataset)
+ actor = Actor(env, config["env"]["obs_size"], config["ppo"]["n_actions"], n_darts_observed=config["env"]["n_darts_selected"], lr=0.0001)
+ actor.load_state_dict(torch.load('policy_saved/tri-perso/TEST-Exploit.pth'))
+ avg_steps, avg_wins, avg_rewards, normalized_return, final_meshes = testPolicy(actor, 15, config, dataset)
plot_test_results(avg_rewards, avg_wins, avg_steps, normalized_return)
plot_dataset(final_meshes)
diff --git a/training/exploit_SB3_policy.py b/training/exploit_SB3_policy.py
index 132f234..15ebcf2 100644
--- a/training/exploit_SB3_policy.py
+++ b/training/exploit_SB3_policy.py
@@ -1,5 +1,5 @@
import copy
-import json
+import yaml
import gymnasium as gym
import numpy as np
@@ -8,15 +8,23 @@
from numpy import ndarray
from stable_baselines3 import PPO
-from mesh_model.mesh_analysis.global_mesh_analysis import global_score
+from environment.actions.triangular_actions import flip_edge
+from mesh_model.mesh_analysis.global_mesh_analysis import GlobalMeshAnalysis
+from mesh_model.mesh_analysis.trimesh_analysis import TriMeshOldAnalysis
+from mesh_model.mesh_analysis.quadmesh_analysis import QuadMeshOldAnalysis
+from mesh_model.mesh_analysis.trimesh_analysis import TriMeshQualityAnalysis
from mesh_model.mesh_struct.mesh import Mesh
+from mesh_model.mesh_struct.mesh_elements import Dart
+from mesh_model.random_trimesh import random_mesh, regular_mesh
+from environment.actions.triangular_actions import flip_edge_ids, split_edge_ids
from mesh_model.reader import read_gmsh
from view.mesh_plotter.create_plots import plot_test_results
-from view.mesh_plotter.mesh_plots import plot_dataset
+from view.mesh_plotter.mesh_plots import plot_dataset, plot_mesh
from environment.actions.smoothing import smoothing_mean
-import mesh_model.random_quadmesh as QM
-from environment.gymnasium_envs.quadmesh_env.envs.quadmesh import QuadMeshEnv
+from environment.gymnasium_envs import trimesh_full_env
+
+
def testPolicy(
model,
@@ -42,12 +50,13 @@ def testPolicy(
best_mesh = mesh
env = gym.make(
config["eval"]["eval_env_id"],
- max_episode_steps=20,
+ max_episode_steps=config["eval"]["max_episode_steps"],
mesh = mesh,
+ #mesh_size = 30,
n_darts_selected=config["eval"]["n_darts_selected"],
deep= config["eval"]["deep"],
action_restriction=config["eval"]["action_restriction"],
- with_degree_obs=config["eval"]["with_degree_observation"],
+ with_degree_obs=config["eval"]["with_quality_observation"],
render_mode = config["eval"]["render_mode"],
)
for _ in range(n_eval_episodes):
@@ -66,7 +75,7 @@ def testPolicy(
ep_length += 1
if terminated:
nb_wins[i-1] += 1
- if isBetterMesh(best_mesh, info['mesh']):
+ if isBetterMesh(best_mesh, info['mesh'], config["env"]["analysis_type"]):
best_mesh = copy.deepcopy(info['mesh'])
avg_length[i-1] += ep_length
avg_mesh_rewards[i-1] += ep_mesh_rewards
@@ -82,25 +91,50 @@ def isBetterPolicy(actual_best_policy, policy_to_test):
if actual_best_policy is None:
return True
-def isBetterMesh(best_mesh, actual_mesh):
- if best_mesh is None or global_score(best_mesh)[1] > global_score(actual_mesh)[1]:
+def isBetterMesh(best_mesh, actual_mesh, analysis_type):
+ tri = False
+ for d_info in actual_mesh.dart_info:
+ if d_info[0]>=0:
+ d = Dart(actual_mesh, d_info[0])
+ if d == ((d.get_beta(1)).get_beta(1)).get_beta(1):
+ tri = True
+ else:
+ tri = False
+ break
+ if tri:
+ if analysis_type == "old":
+ ma_best_mesh = TriMeshOldAnalysis(best_mesh)
+ ma_actual_mesh = TriMeshOldAnalysis(actual_mesh)
+ else:
+ ma_best_mesh = TriMeshQualityAnalysis(best_mesh)
+ ma_actual_mesh = TriMeshQualityAnalysis(actual_mesh)
+ else:
+ ma_best_mesh = QuadMeshOldAnalysis(best_mesh)
+ ma_actual_mesh = QuadMeshOldAnalysis(actual_mesh)
+ if best_mesh is None or ma_best_mesh.global_score()[1] > ma_actual_mesh.global_score()[1]:
return True
else:
return False
if __name__ == '__main__':
-
#Create a dataset of 9 meshes
- mesh = read_gmsh("../mesh_files/medium_quad.msh")
- dataset = [mesh for _ in range(9)]
- with open("../environment/environment_config.json", "r") as f:
- env_config = json.load(f)
+ mesh = read_gmsh("../mesh_files/tri-star.msh")
+ # ma = TriMeshQualityAnalysis(mesh)
+ # split_edge_ids(ma, 5, 2)
+ # split_edge_ids(ma, 2, 10)
+ # flip_edge_ids(ma, 3,7)
+ # #plot_mesh(mesh)
+
+ dataset = [mesh for _ in range(1)]
+ # PARAMETERS CONFIGURATION
+ with open("../training/config/trimesh_config_PPO_SB3.yaml", "r") as f:
+ config = yaml.safe_load(f)
plot_dataset(dataset)
#Load the model
- model = PPO.load("policy_saved/quad/4-actions-quad-simple-PPO43.zip")
- avg_steps, avg_wins, avg_rewards, avg_normalized_return, final_meshes = testPolicy(model, 1, env_config, dataset)
+ model = PPO.load("policy_saved/tri-sb3/PPO_SB3_tri-delaunay-v0.zip")
+ avg_steps, avg_wins, avg_rewards, avg_normalized_return, final_meshes = testPolicy(model, 1, config, dataset)
plot_test_results(avg_rewards, avg_wins, avg_steps, avg_normalized_return)
plot_dataset(final_meshes)
diff --git a/training/exploit_trimesh.py b/training/exploit_trimesh.py
index b8c323f..4cd2c80 100644
--- a/training/exploit_trimesh.py
+++ b/training/exploit_trimesh.py
@@ -2,8 +2,8 @@
import torch
import json
import gymnasium as gym
-from environment.trimesh_env import TriMesh
-from model_RL.utilities.actor_critic_networks import Actor
+from environment.old_files.trimesh_env import TriMesh
+from model_RL.old_files.utilities import Actor
from mesh_model.reader import read_gmsh
from view.mesh_plotter.create_plots import plot_test_results
from view.mesh_plotter.mesh_plots import plot_dataset
@@ -39,7 +39,7 @@ def exploit():
#Create a dataset of 9 meshes
dataset = [mesh for _ in range(9)]
- with open("../environment/environment_config.json", "r") as f:
+ with open("../environment/old_files/environment_config.json", "r") as f:
env_config = json.load(f)
plot_dataset(dataset)
diff --git a/training/results/trimesh_results/FLIP/PPO_10/events.out.tfevents.1735828642.UN00315924-UNAL.dam.intra.cea.fr.292560.0 b/training/results/0-trimesh_results/FLIP/PPO_10/events.out.tfevents.1735828642.UN00315924-UNAL.dam.intra.cea.fr.292560.0
similarity index 100%
rename from training/results/trimesh_results/FLIP/PPO_10/events.out.tfevents.1735828642.UN00315924-UNAL.dam.intra.cea.fr.292560.0
rename to training/results/0-trimesh_results/FLIP/PPO_10/events.out.tfevents.1735828642.UN00315924-UNAL.dam.intra.cea.fr.292560.0
diff --git a/training/results/trimesh_results/FLIP/PPO_9/events.out.tfevents.1735828526.UN00315924-UNAL.dam.intra.cea.fr.291743.0 b/training/results/0-trimesh_results/FLIP/PPO_9/events.out.tfevents.1735828526.UN00315924-UNAL.dam.intra.cea.fr.291743.0
similarity index 100%
rename from training/results/trimesh_results/FLIP/PPO_9/events.out.tfevents.1735828526.UN00315924-UNAL.dam.intra.cea.fr.291743.0
rename to training/results/0-trimesh_results/FLIP/PPO_9/events.out.tfevents.1735828526.UN00315924-UNAL.dam.intra.cea.fr.291743.0
diff --git a/training/results/trimesh_results/e1/PPO_restricted-v0/events.out.tfevents.1734431836.UN00315924-UNAL.dam.intra.cea.fr.757470.0 b/training/results/0-trimesh_results/e1/PPO_restricted-v0/events.out.tfevents.1734431836.UN00315924-UNAL.dam.intra.cea.fr.757470.0
similarity index 100%
rename from training/results/trimesh_results/e1/PPO_restricted-v0/events.out.tfevents.1734431836.UN00315924-UNAL.dam.intra.cea.fr.757470.0
rename to training/results/0-trimesh_results/e1/PPO_restricted-v0/events.out.tfevents.1734431836.UN00315924-UNAL.dam.intra.cea.fr.757470.0
diff --git a/training/results/trimesh_results/e1/PPO_restricted-v1/events.out.tfevents.1734514338.UN00315924-UNAL.dam.intra.cea.fr.70489.0 b/training/results/0-trimesh_results/e1/PPO_restricted-v1/events.out.tfevents.1734514338.UN00315924-UNAL.dam.intra.cea.fr.70489.0
similarity index 100%
rename from training/results/trimesh_results/e1/PPO_restricted-v1/events.out.tfevents.1734514338.UN00315924-UNAL.dam.intra.cea.fr.70489.0
rename to training/results/0-trimesh_results/e1/PPO_restricted-v1/events.out.tfevents.1734514338.UN00315924-UNAL.dam.intra.cea.fr.70489.0
diff --git a/training/results/trimesh_results/e1/PPO_restricted-v2/events.out.tfevents.1734517568.UN00315924-UNAL.dam.intra.cea.fr.83005.0 b/training/results/0-trimesh_results/e1/PPO_restricted-v2/events.out.tfevents.1734517568.UN00315924-UNAL.dam.intra.cea.fr.83005.0
similarity index 100%
rename from training/results/trimesh_results/e1/PPO_restricted-v2/events.out.tfevents.1734517568.UN00315924-UNAL.dam.intra.cea.fr.83005.0
rename to training/results/0-trimesh_results/e1/PPO_restricted-v2/events.out.tfevents.1734517568.UN00315924-UNAL.dam.intra.cea.fr.83005.0
diff --git a/training/results/trimesh_results/e1/PPO_unrestricted-v0/events.out.tfevents.1734431823.UN00315924-UNAL.dam.intra.cea.fr.757302.0 b/training/results/0-trimesh_results/e1/PPO_unrestricted-v0/events.out.tfevents.1734431823.UN00315924-UNAL.dam.intra.cea.fr.757302.0
similarity index 100%
rename from training/results/trimesh_results/e1/PPO_unrestricted-v0/events.out.tfevents.1734431823.UN00315924-UNAL.dam.intra.cea.fr.757302.0
rename to training/results/0-trimesh_results/e1/PPO_unrestricted-v0/events.out.tfevents.1734431823.UN00315924-UNAL.dam.intra.cea.fr.757302.0
diff --git a/training/results/trimesh_results/e1/PPO_unrestricted-v1/events.out.tfevents.1734513709.UN00315924-UNAL.dam.intra.cea.fr.67221.0 b/training/results/0-trimesh_results/e1/PPO_unrestricted-v1/events.out.tfevents.1734513709.UN00315924-UNAL.dam.intra.cea.fr.67221.0
similarity index 100%
rename from training/results/trimesh_results/e1/PPO_unrestricted-v1/events.out.tfevents.1734513709.UN00315924-UNAL.dam.intra.cea.fr.67221.0
rename to training/results/0-trimesh_results/e1/PPO_unrestricted-v1/events.out.tfevents.1734513709.UN00315924-UNAL.dam.intra.cea.fr.67221.0
diff --git a/training/results/trimesh_results/e1/PPO_unrestricted-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0 b/training/results/0-trimesh_results/e1/PPO_unrestricted-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
similarity index 100%
rename from training/results/trimesh_results/e1/PPO_unrestricted-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
rename to training/results/0-trimesh_results/e1/PPO_unrestricted-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
diff --git a/training/results/trimesh_results/e2/PPO_10-v0/events.out.tfevents.1734446254.UN00315924-UNAL.dam.intra.cea.fr.817111.0 b/training/results/0-trimesh_results/e2/PPO_10-v0/events.out.tfevents.1734446254.UN00315924-UNAL.dam.intra.cea.fr.817111.0
similarity index 100%
rename from training/results/trimesh_results/e2/PPO_10-v0/events.out.tfevents.1734446254.UN00315924-UNAL.dam.intra.cea.fr.817111.0
rename to training/results/0-trimesh_results/e2/PPO_10-v0/events.out.tfevents.1734446254.UN00315924-UNAL.dam.intra.cea.fr.817111.0
diff --git a/training/results/trimesh_results/e2/PPO_10-v1/events.out.tfevents.1734553188.UN00315924-UNAL.dam.intra.cea.fr.173728.0 b/training/results/0-trimesh_results/e2/PPO_10-v1/events.out.tfevents.1734553188.UN00315924-UNAL.dam.intra.cea.fr.173728.0
similarity index 100%
rename from training/results/trimesh_results/e2/PPO_10-v1/events.out.tfevents.1734553188.UN00315924-UNAL.dam.intra.cea.fr.173728.0
rename to training/results/0-trimesh_results/e2/PPO_10-v1/events.out.tfevents.1734553188.UN00315924-UNAL.dam.intra.cea.fr.173728.0
diff --git a/training/results/trimesh_results/e2/PPO_20-v0/events.out.tfevents.1734432461.UN00315924-UNAL.dam.intra.cea.fr.760588.0 b/training/results/0-trimesh_results/e2/PPO_20-v0/events.out.tfevents.1734432461.UN00315924-UNAL.dam.intra.cea.fr.760588.0
similarity index 100%
rename from training/results/trimesh_results/e2/PPO_20-v0/events.out.tfevents.1734432461.UN00315924-UNAL.dam.intra.cea.fr.760588.0
rename to training/results/0-trimesh_results/e2/PPO_20-v0/events.out.tfevents.1734432461.UN00315924-UNAL.dam.intra.cea.fr.760588.0
diff --git a/training/results/trimesh_results/e2/PPO_20-v1/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0 b/training/results/0-trimesh_results/e2/PPO_20-v1/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
similarity index 100%
rename from training/results/trimesh_results/e2/PPO_20-v1/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
rename to training/results/0-trimesh_results/e2/PPO_20-v1/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
diff --git a/training/results/trimesh_results/e2/PPO_30-v/events.out.tfevents.1734555115.UN00315924-UNAL.dam.intra.cea.fr.181453.0 b/training/results/0-trimesh_results/e2/PPO_30-v/events.out.tfevents.1734555115.UN00315924-UNAL.dam.intra.cea.fr.181453.0
similarity index 100%
rename from training/results/trimesh_results/e2/PPO_30-v/events.out.tfevents.1734555115.UN00315924-UNAL.dam.intra.cea.fr.181453.0
rename to training/results/0-trimesh_results/e2/PPO_30-v/events.out.tfevents.1734555115.UN00315924-UNAL.dam.intra.cea.fr.181453.0
diff --git a/training/results/trimesh_results/e2/PPO_30-v0/events.out.tfevents.1734435261.UN00315924-UNAL.dam.intra.cea.fr.775239.0 b/training/results/0-trimesh_results/e2/PPO_30-v0/events.out.tfevents.1734435261.UN00315924-UNAL.dam.intra.cea.fr.775239.0
similarity index 100%
rename from training/results/trimesh_results/e2/PPO_30-v0/events.out.tfevents.1734435261.UN00315924-UNAL.dam.intra.cea.fr.775239.0
rename to training/results/0-trimesh_results/e2/PPO_30-v0/events.out.tfevents.1734435261.UN00315924-UNAL.dam.intra.cea.fr.775239.0
diff --git a/training/results/trimesh_results/e2/PPO_30-v1/events.out.tfevents.1734559178.UN00315924-UNAL.dam.intra.cea.fr.195914.0 b/training/results/0-trimesh_results/e2/PPO_30-v1/events.out.tfevents.1734559178.UN00315924-UNAL.dam.intra.cea.fr.195914.0
similarity index 100%
rename from training/results/trimesh_results/e2/PPO_30-v1/events.out.tfevents.1734559178.UN00315924-UNAL.dam.intra.cea.fr.195914.0
rename to training/results/0-trimesh_results/e2/PPO_30-v1/events.out.tfevents.1734559178.UN00315924-UNAL.dam.intra.cea.fr.195914.0
diff --git a/training/results/trimesh_results/e2/PPO_5-v0/events.out.tfevents.1734432449.UN00315924-UNAL.dam.intra.cea.fr.760459.0 b/training/results/0-trimesh_results/e2/PPO_5-v0/events.out.tfevents.1734432449.UN00315924-UNAL.dam.intra.cea.fr.760459.0
similarity index 100%
rename from training/results/trimesh_results/e2/PPO_5-v0/events.out.tfevents.1734432449.UN00315924-UNAL.dam.intra.cea.fr.760459.0
rename to training/results/0-trimesh_results/e2/PPO_5-v0/events.out.tfevents.1734432449.UN00315924-UNAL.dam.intra.cea.fr.760459.0
diff --git a/training/results/trimesh_results/e2/PPO_5-v1/events.out.tfevents.1734552568.UN00315924-UNAL.dam.intra.cea.fr.170760.0 b/training/results/0-trimesh_results/e2/PPO_5-v1/events.out.tfevents.1734552568.UN00315924-UNAL.dam.intra.cea.fr.170760.0
similarity index 100%
rename from training/results/trimesh_results/e2/PPO_5-v1/events.out.tfevents.1734552568.UN00315924-UNAL.dam.intra.cea.fr.170760.0
rename to training/results/0-trimesh_results/e2/PPO_5-v1/events.out.tfevents.1734552568.UN00315924-UNAL.dam.intra.cea.fr.170760.0
diff --git a/training/results/trimesh_results/e3/PPO_8_deep3-v1/events.out.tfevents.1734611428.UN00315924-UNAL.dam.intra.cea.fr.278749.0 b/training/results/0-trimesh_results/e3/PPO_8_deep3-v1/events.out.tfevents.1734611428.UN00315924-UNAL.dam.intra.cea.fr.278749.0
similarity index 100%
rename from training/results/trimesh_results/e3/PPO_8_deep3-v1/events.out.tfevents.1734611428.UN00315924-UNAL.dam.intra.cea.fr.278749.0
rename to training/results/0-trimesh_results/e3/PPO_8_deep3-v1/events.out.tfevents.1734611428.UN00315924-UNAL.dam.intra.cea.fr.278749.0
diff --git a/training/results/trimesh_results/e3/PPO_deep12-v0/events.out.tfevents.1734598832.UN00315924-UNAL.dam.intra.cea.fr.221925.0 b/training/results/0-trimesh_results/e3/PPO_deep12-v0/events.out.tfevents.1734598832.UN00315924-UNAL.dam.intra.cea.fr.221925.0
similarity index 100%
rename from training/results/trimesh_results/e3/PPO_deep12-v0/events.out.tfevents.1734598832.UN00315924-UNAL.dam.intra.cea.fr.221925.0
rename to training/results/0-trimesh_results/e3/PPO_deep12-v0/events.out.tfevents.1734598832.UN00315924-UNAL.dam.intra.cea.fr.221925.0
diff --git a/training/results/trimesh_results/e3/PPO_deep12-v1/events.out.tfevents.1734606176.UN00315924-UNAL.dam.intra.cea.fr.253849.0 b/training/results/0-trimesh_results/e3/PPO_deep12-v1/events.out.tfevents.1734606176.UN00315924-UNAL.dam.intra.cea.fr.253849.0
similarity index 100%
rename from training/results/trimesh_results/e3/PPO_deep12-v1/events.out.tfevents.1734606176.UN00315924-UNAL.dam.intra.cea.fr.253849.0
rename to training/results/0-trimesh_results/e3/PPO_deep12-v1/events.out.tfevents.1734606176.UN00315924-UNAL.dam.intra.cea.fr.253849.0
diff --git a/training/results/trimesh_results/e3/PPO_deep3-v0/events.out.tfevents.1734559585.UN00315924-UNAL.dam.intra.cea.fr.197662.0 b/training/results/0-trimesh_results/e3/PPO_deep3-v0/events.out.tfevents.1734559585.UN00315924-UNAL.dam.intra.cea.fr.197662.0
similarity index 100%
rename from training/results/trimesh_results/e3/PPO_deep3-v0/events.out.tfevents.1734559585.UN00315924-UNAL.dam.intra.cea.fr.197662.0
rename to training/results/0-trimesh_results/e3/PPO_deep3-v0/events.out.tfevents.1734559585.UN00315924-UNAL.dam.intra.cea.fr.197662.0
diff --git a/training/results/trimesh_results/e3/PPO_deep3-v1/events.out.tfevents.1734606200.UN00315924-UNAL.dam.intra.cea.fr.254037.0 b/training/results/0-trimesh_results/e3/PPO_deep3-v1/events.out.tfevents.1734606200.UN00315924-UNAL.dam.intra.cea.fr.254037.0
similarity index 100%
rename from training/results/trimesh_results/e3/PPO_deep3-v1/events.out.tfevents.1734606200.UN00315924-UNAL.dam.intra.cea.fr.254037.0
rename to training/results/0-trimesh_results/e3/PPO_deep3-v1/events.out.tfevents.1734606200.UN00315924-UNAL.dam.intra.cea.fr.254037.0
diff --git a/training/results/trimesh_results/e3/PPO_deep3-v2/events.out.tfevents.1734618832.UN00315924-UNAL.dam.intra.cea.fr.310315.0 b/training/results/0-trimesh_results/e3/PPO_deep3-v2/events.out.tfevents.1734618832.UN00315924-UNAL.dam.intra.cea.fr.310315.0
similarity index 100%
rename from training/results/trimesh_results/e3/PPO_deep3-v2/events.out.tfevents.1734618832.UN00315924-UNAL.dam.intra.cea.fr.310315.0
rename to training/results/0-trimesh_results/e3/PPO_deep3-v2/events.out.tfevents.1734618832.UN00315924-UNAL.dam.intra.cea.fr.310315.0
diff --git a/training/results/trimesh_results/e3/PPO_deep6-v0/events.out.tfevents.1734598818.UN00315924-UNAL.dam.intra.cea.fr.221824.0 b/training/results/0-trimesh_results/e3/PPO_deep6-v0/events.out.tfevents.1734598818.UN00315924-UNAL.dam.intra.cea.fr.221824.0
similarity index 100%
rename from training/results/trimesh_results/e3/PPO_deep6-v0/events.out.tfevents.1734598818.UN00315924-UNAL.dam.intra.cea.fr.221824.0
rename to training/results/0-trimesh_results/e3/PPO_deep6-v0/events.out.tfevents.1734598818.UN00315924-UNAL.dam.intra.cea.fr.221824.0
diff --git a/training/results/trimesh_results/e3/PPO_deep6-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0 b/training/results/0-trimesh_results/e3/PPO_deep6-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
similarity index 100%
rename from training/results/trimesh_results/e3/PPO_deep6-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
rename to training/results/0-trimesh_results/e3/PPO_deep6-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
diff --git a/training/results/trimesh_results/e4/PPO_no_deg-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0 b/training/results/0-trimesh_results/e4/PPO_no_deg-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0
similarity index 100%
rename from training/results/trimesh_results/e4/PPO_no_deg-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0
rename to training/results/0-trimesh_results/e4/PPO_no_deg-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0
diff --git a/training/results/trimesh_results/e4/PPO_no_deg-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0 b/training/results/0-trimesh_results/e4/PPO_no_deg-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0
similarity index 100%
rename from training/results/trimesh_results/e4/PPO_no_deg-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0
rename to training/results/0-trimesh_results/e4/PPO_no_deg-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0
diff --git a/training/results/trimesh_results/e4/PPO_with_deg-v0/events.out.tfevents.1735663450.UN00315924-UNAL.dam.intra.cea.fr.13339.0 b/training/results/0-trimesh_results/e4/PPO_with_deg-v0/events.out.tfevents.1735663450.UN00315924-UNAL.dam.intra.cea.fr.13339.0
similarity index 100%
rename from training/results/trimesh_results/e4/PPO_with_deg-v0/events.out.tfevents.1735663450.UN00315924-UNAL.dam.intra.cea.fr.13339.0
rename to training/results/0-trimesh_results/e4/PPO_with_deg-v0/events.out.tfevents.1735663450.UN00315924-UNAL.dam.intra.cea.fr.13339.0
diff --git a/training/results/trimesh_results/e4/PPO_with_deg-v1/events.out.tfevents.1735670821.UN00315924-UNAL.dam.intra.cea.fr.37218.0 b/training/results/0-trimesh_results/e4/PPO_with_deg-v1/events.out.tfevents.1735670821.UN00315924-UNAL.dam.intra.cea.fr.37218.0
similarity index 100%
rename from training/results/trimesh_results/e4/PPO_with_deg-v1/events.out.tfevents.1735670821.UN00315924-UNAL.dam.intra.cea.fr.37218.0
rename to training/results/0-trimesh_results/e4/PPO_with_deg-v1/events.out.tfevents.1735670821.UN00315924-UNAL.dam.intra.cea.fr.37218.0
diff --git a/training/results/trimesh_results/e5/PPO_4/events.out.tfevents.1735770299.UN00315924-UNAL.dam.intra.cea.fr.132277.0 b/training/results/0-trimesh_results/e5/PPO_4/events.out.tfevents.1735770299.UN00315924-UNAL.dam.intra.cea.fr.132277.0
similarity index 100%
rename from training/results/trimesh_results/e5/PPO_4/events.out.tfevents.1735770299.UN00315924-UNAL.dam.intra.cea.fr.132277.0
rename to training/results/0-trimesh_results/e5/PPO_4/events.out.tfevents.1735770299.UN00315924-UNAL.dam.intra.cea.fr.132277.0
diff --git a/training/results/trimesh_results/e5/PPO_m16-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0 b/training/results/0-trimesh_results/e5/PPO_m16-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0
similarity index 100%
rename from training/results/trimesh_results/e5/PPO_m16-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0
rename to training/results/0-trimesh_results/e5/PPO_m16-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0
diff --git a/training/results/trimesh_results/e5/PPO_m16-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0 b/training/results/0-trimesh_results/e5/PPO_m16-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0
similarity index 100%
rename from training/results/trimesh_results/e5/PPO_m16-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0
rename to training/results/0-trimesh_results/e5/PPO_m16-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0
diff --git a/training/results/trimesh_results/e5/PPO_m25-v0/events.out.tfevents.1735744674.UN00315924-UNAL.dam.intra.cea.fr.63182.0 b/training/results/0-trimesh_results/e5/PPO_m25-v0/events.out.tfevents.1735744674.UN00315924-UNAL.dam.intra.cea.fr.63182.0
similarity index 100%
rename from training/results/trimesh_results/e5/PPO_m25-v0/events.out.tfevents.1735744674.UN00315924-UNAL.dam.intra.cea.fr.63182.0
rename to training/results/0-trimesh_results/e5/PPO_m25-v0/events.out.tfevents.1735744674.UN00315924-UNAL.dam.intra.cea.fr.63182.0
diff --git a/training/results/trimesh_results/e5/PPO_m25-v1/events.out.tfevents.1735761690.UN00315924-UNAL.dam.intra.cea.fr.109198.0 b/training/results/0-trimesh_results/e5/PPO_m25-v1/events.out.tfevents.1735761690.UN00315924-UNAL.dam.intra.cea.fr.109198.0
similarity index 100%
rename from training/results/trimesh_results/e5/PPO_m25-v1/events.out.tfevents.1735761690.UN00315924-UNAL.dam.intra.cea.fr.109198.0
rename to training/results/0-trimesh_results/e5/PPO_m25-v1/events.out.tfevents.1735761690.UN00315924-UNAL.dam.intra.cea.fr.109198.0
diff --git a/training/results/trimesh_results/e5/PPO_m40-v0/events.out.tfevents.1735744703.UN00315924-UNAL.dam.intra.cea.fr.63292.0 b/training/results/0-trimesh_results/e5/PPO_m40-v0/events.out.tfevents.1735744703.UN00315924-UNAL.dam.intra.cea.fr.63292.0
similarity index 100%
rename from training/results/trimesh_results/e5/PPO_m40-v0/events.out.tfevents.1735744703.UN00315924-UNAL.dam.intra.cea.fr.63292.0
rename to training/results/0-trimesh_results/e5/PPO_m40-v0/events.out.tfevents.1735744703.UN00315924-UNAL.dam.intra.cea.fr.63292.0
diff --git a/training/results/trimesh_results/e6/PPO_1/events.out.tfevents.1735818941.UN00315924-UNAL.dam.intra.cea.fr.253347.0 b/training/results/0-trimesh_results/e6/PPO_1/events.out.tfevents.1735818941.UN00315924-UNAL.dam.intra.cea.fr.253347.0
similarity index 100%
rename from training/results/trimesh_results/e6/PPO_1/events.out.tfevents.1735818941.UN00315924-UNAL.dam.intra.cea.fr.253347.0
rename to training/results/0-trimesh_results/e6/PPO_1/events.out.tfevents.1735818941.UN00315924-UNAL.dam.intra.cea.fr.253347.0
diff --git a/training/results/trimesh_results/e6/PPO_l30-v0/events.out.tfevents.1735812367.UN00315924-UNAL.dam.intra.cea.fr.228874.0 b/training/results/0-trimesh_results/e6/PPO_l30-v0/events.out.tfevents.1735812367.UN00315924-UNAL.dam.intra.cea.fr.228874.0
similarity index 100%
rename from training/results/trimesh_results/e6/PPO_l30-v0/events.out.tfevents.1735812367.UN00315924-UNAL.dam.intra.cea.fr.228874.0
rename to training/results/0-trimesh_results/e6/PPO_l30-v0/events.out.tfevents.1735812367.UN00315924-UNAL.dam.intra.cea.fr.228874.0
diff --git a/training/results/trimesh_results/e6/PPO_l50-v0/events.out.tfevents.1735812393.UN00315924-UNAL.dam.intra.cea.fr.229064.0 b/training/results/0-trimesh_results/e6/PPO_l50-v0/events.out.tfevents.1735812393.UN00315924-UNAL.dam.intra.cea.fr.229064.0
similarity index 100%
rename from training/results/trimesh_results/e6/PPO_l50-v0/events.out.tfevents.1735812393.UN00315924-UNAL.dam.intra.cea.fr.229064.0
rename to training/results/0-trimesh_results/e6/PPO_l50-v0/events.out.tfevents.1735812393.UN00315924-UNAL.dam.intra.cea.fr.229064.0
diff --git a/training/results/trimesh_results/e6/PPO_l50-v1/events.out.tfevents.1735818955.UN00315924-UNAL.dam.intra.cea.fr.253446.0 b/training/results/0-trimesh_results/e6/PPO_l50-v1/events.out.tfevents.1735818955.UN00315924-UNAL.dam.intra.cea.fr.253446.0
similarity index 100%
rename from training/results/trimesh_results/e6/PPO_l50-v1/events.out.tfevents.1735818955.UN00315924-UNAL.dam.intra.cea.fr.253446.0
rename to training/results/0-trimesh_results/e6/PPO_l50-v1/events.out.tfevents.1735818955.UN00315924-UNAL.dam.intra.cea.fr.253446.0
diff --git a/training/results/trimesh_results/efull/PPO_1/events.out.tfevents.1735818941.UN00315924-UNAL.dam.intra.cea.fr.253347.0 b/training/results/0-trimesh_results/efull/PPO_1/events.out.tfevents.1735818941.UN00315924-UNAL.dam.intra.cea.fr.253347.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_1/events.out.tfevents.1735818941.UN00315924-UNAL.dam.intra.cea.fr.253347.0
rename to training/results/0-trimesh_results/efull/PPO_1/events.out.tfevents.1735818941.UN00315924-UNAL.dam.intra.cea.fr.253347.0
diff --git a/training/results/trimesh_results/efull/PPO_10-v0/events.out.tfevents.1734446254.UN00315924-UNAL.dam.intra.cea.fr.817111.0 b/training/results/0-trimesh_results/efull/PPO_10-v0/events.out.tfevents.1734446254.UN00315924-UNAL.dam.intra.cea.fr.817111.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_10-v0/events.out.tfevents.1734446254.UN00315924-UNAL.dam.intra.cea.fr.817111.0
rename to training/results/0-trimesh_results/efull/PPO_10-v0/events.out.tfevents.1734446254.UN00315924-UNAL.dam.intra.cea.fr.817111.0
diff --git a/training/results/trimesh_results/efull/PPO_10-v1/events.out.tfevents.1734553188.UN00315924-UNAL.dam.intra.cea.fr.173728.0 b/training/results/0-trimesh_results/efull/PPO_10-v1/events.out.tfevents.1734553188.UN00315924-UNAL.dam.intra.cea.fr.173728.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_10-v1/events.out.tfevents.1734553188.UN00315924-UNAL.dam.intra.cea.fr.173728.0
rename to training/results/0-trimesh_results/efull/PPO_10-v1/events.out.tfevents.1734553188.UN00315924-UNAL.dam.intra.cea.fr.173728.0
diff --git a/training/results/trimesh_results/efull/PPO_20-v0/events.out.tfevents.1734432461.UN00315924-UNAL.dam.intra.cea.fr.760588.0 b/training/results/0-trimesh_results/efull/PPO_20-v0/events.out.tfevents.1734432461.UN00315924-UNAL.dam.intra.cea.fr.760588.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_20-v0/events.out.tfevents.1734432461.UN00315924-UNAL.dam.intra.cea.fr.760588.0
rename to training/results/0-trimesh_results/efull/PPO_20-v0/events.out.tfevents.1734432461.UN00315924-UNAL.dam.intra.cea.fr.760588.0
diff --git a/training/results/trimesh_results/efull/PPO_20-v1/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0 b/training/results/0-trimesh_results/efull/PPO_20-v1/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_20-v1/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
rename to training/results/0-trimesh_results/efull/PPO_20-v1/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
diff --git a/training/results/trimesh_results/efull/PPO_30-v/events.out.tfevents.1734555115.UN00315924-UNAL.dam.intra.cea.fr.181453.0 b/training/results/0-trimesh_results/efull/PPO_30-v/events.out.tfevents.1734555115.UN00315924-UNAL.dam.intra.cea.fr.181453.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_30-v/events.out.tfevents.1734555115.UN00315924-UNAL.dam.intra.cea.fr.181453.0
rename to training/results/0-trimesh_results/efull/PPO_30-v/events.out.tfevents.1734555115.UN00315924-UNAL.dam.intra.cea.fr.181453.0
diff --git a/training/results/trimesh_results/efull/PPO_30-v0/events.out.tfevents.1734435261.UN00315924-UNAL.dam.intra.cea.fr.775239.0 b/training/results/0-trimesh_results/efull/PPO_30-v0/events.out.tfevents.1734435261.UN00315924-UNAL.dam.intra.cea.fr.775239.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_30-v0/events.out.tfevents.1734435261.UN00315924-UNAL.dam.intra.cea.fr.775239.0
rename to training/results/0-trimesh_results/efull/PPO_30-v0/events.out.tfevents.1734435261.UN00315924-UNAL.dam.intra.cea.fr.775239.0
diff --git a/training/results/trimesh_results/efull/PPO_30-v1/events.out.tfevents.1734559178.UN00315924-UNAL.dam.intra.cea.fr.195914.0 b/training/results/0-trimesh_results/efull/PPO_30-v1/events.out.tfevents.1734559178.UN00315924-UNAL.dam.intra.cea.fr.195914.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_30-v1/events.out.tfevents.1734559178.UN00315924-UNAL.dam.intra.cea.fr.195914.0
rename to training/results/0-trimesh_results/efull/PPO_30-v1/events.out.tfevents.1734559178.UN00315924-UNAL.dam.intra.cea.fr.195914.0
diff --git a/training/results/trimesh_results/efull/PPO_4/events.out.tfevents.1735770299.UN00315924-UNAL.dam.intra.cea.fr.132277.0 b/training/results/0-trimesh_results/efull/PPO_4/events.out.tfevents.1735770299.UN00315924-UNAL.dam.intra.cea.fr.132277.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_4/events.out.tfevents.1735770299.UN00315924-UNAL.dam.intra.cea.fr.132277.0
rename to training/results/0-trimesh_results/efull/PPO_4/events.out.tfevents.1735770299.UN00315924-UNAL.dam.intra.cea.fr.132277.0
diff --git a/training/results/trimesh_results/efull/PPO_5-v0/events.out.tfevents.1734432449.UN00315924-UNAL.dam.intra.cea.fr.760459.0 b/training/results/0-trimesh_results/efull/PPO_5-v0/events.out.tfevents.1734432449.UN00315924-UNAL.dam.intra.cea.fr.760459.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_5-v0/events.out.tfevents.1734432449.UN00315924-UNAL.dam.intra.cea.fr.760459.0
rename to training/results/0-trimesh_results/efull/PPO_5-v0/events.out.tfevents.1734432449.UN00315924-UNAL.dam.intra.cea.fr.760459.0
diff --git a/training/results/trimesh_results/efull/PPO_5-v1/events.out.tfevents.1734552568.UN00315924-UNAL.dam.intra.cea.fr.170760.0 b/training/results/0-trimesh_results/efull/PPO_5-v1/events.out.tfevents.1734552568.UN00315924-UNAL.dam.intra.cea.fr.170760.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_5-v1/events.out.tfevents.1734552568.UN00315924-UNAL.dam.intra.cea.fr.170760.0
rename to training/results/0-trimesh_results/efull/PPO_5-v1/events.out.tfevents.1734552568.UN00315924-UNAL.dam.intra.cea.fr.170760.0
diff --git a/training/results/trimesh_results/efull/PPO_8_deep3-v1/events.out.tfevents.1734611428.UN00315924-UNAL.dam.intra.cea.fr.278749.0 b/training/results/0-trimesh_results/efull/PPO_8_deep3-v1/events.out.tfevents.1734611428.UN00315924-UNAL.dam.intra.cea.fr.278749.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_8_deep3-v1/events.out.tfevents.1734611428.UN00315924-UNAL.dam.intra.cea.fr.278749.0
rename to training/results/0-trimesh_results/efull/PPO_8_deep3-v1/events.out.tfevents.1734611428.UN00315924-UNAL.dam.intra.cea.fr.278749.0
diff --git a/training/results/trimesh_results/efull/PPO_deep12-v0/events.out.tfevents.1734598832.UN00315924-UNAL.dam.intra.cea.fr.221925.0 b/training/results/0-trimesh_results/efull/PPO_deep12-v0/events.out.tfevents.1734598832.UN00315924-UNAL.dam.intra.cea.fr.221925.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_deep12-v0/events.out.tfevents.1734598832.UN00315924-UNAL.dam.intra.cea.fr.221925.0
rename to training/results/0-trimesh_results/efull/PPO_deep12-v0/events.out.tfevents.1734598832.UN00315924-UNAL.dam.intra.cea.fr.221925.0
diff --git a/training/results/trimesh_results/efull/PPO_deep12-v1/events.out.tfevents.1734606176.UN00315924-UNAL.dam.intra.cea.fr.253849.0 b/training/results/0-trimesh_results/efull/PPO_deep12-v1/events.out.tfevents.1734606176.UN00315924-UNAL.dam.intra.cea.fr.253849.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_deep12-v1/events.out.tfevents.1734606176.UN00315924-UNAL.dam.intra.cea.fr.253849.0
rename to training/results/0-trimesh_results/efull/PPO_deep12-v1/events.out.tfevents.1734606176.UN00315924-UNAL.dam.intra.cea.fr.253849.0
diff --git a/training/results/trimesh_results/efull/PPO_deep3-v0/events.out.tfevents.1734559585.UN00315924-UNAL.dam.intra.cea.fr.197662.0 b/training/results/0-trimesh_results/efull/PPO_deep3-v0/events.out.tfevents.1734559585.UN00315924-UNAL.dam.intra.cea.fr.197662.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_deep3-v0/events.out.tfevents.1734559585.UN00315924-UNAL.dam.intra.cea.fr.197662.0
rename to training/results/0-trimesh_results/efull/PPO_deep3-v0/events.out.tfevents.1734559585.UN00315924-UNAL.dam.intra.cea.fr.197662.0
diff --git a/training/results/trimesh_results/efull/PPO_deep3-v1/events.out.tfevents.1734606200.UN00315924-UNAL.dam.intra.cea.fr.254037.0 b/training/results/0-trimesh_results/efull/PPO_deep3-v1/events.out.tfevents.1734606200.UN00315924-UNAL.dam.intra.cea.fr.254037.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_deep3-v1/events.out.tfevents.1734606200.UN00315924-UNAL.dam.intra.cea.fr.254037.0
rename to training/results/0-trimesh_results/efull/PPO_deep3-v1/events.out.tfevents.1734606200.UN00315924-UNAL.dam.intra.cea.fr.254037.0
diff --git a/training/results/trimesh_results/efull/PPO_deep3-v2/events.out.tfevents.1734618832.UN00315924-UNAL.dam.intra.cea.fr.310315.0 b/training/results/0-trimesh_results/efull/PPO_deep3-v2/events.out.tfevents.1734618832.UN00315924-UNAL.dam.intra.cea.fr.310315.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_deep3-v2/events.out.tfevents.1734618832.UN00315924-UNAL.dam.intra.cea.fr.310315.0
rename to training/results/0-trimesh_results/efull/PPO_deep3-v2/events.out.tfevents.1734618832.UN00315924-UNAL.dam.intra.cea.fr.310315.0
diff --git a/training/results/trimesh_results/efull/PPO_deep6-v0/events.out.tfevents.1734598818.UN00315924-UNAL.dam.intra.cea.fr.221824.0 b/training/results/0-trimesh_results/efull/PPO_deep6-v0/events.out.tfevents.1734598818.UN00315924-UNAL.dam.intra.cea.fr.221824.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_deep6-v0/events.out.tfevents.1734598818.UN00315924-UNAL.dam.intra.cea.fr.221824.0
rename to training/results/0-trimesh_results/efull/PPO_deep6-v0/events.out.tfevents.1734598818.UN00315924-UNAL.dam.intra.cea.fr.221824.0
diff --git a/training/results/trimesh_results/efull/PPO_deep6-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0 b/training/results/0-trimesh_results/efull/PPO_deep6-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_deep6-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
rename to training/results/0-trimesh_results/efull/PPO_deep6-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
diff --git a/training/results/trimesh_results/efull/PPO_l30-v0/events.out.tfevents.1735812367.UN00315924-UNAL.dam.intra.cea.fr.228874.0 b/training/results/0-trimesh_results/efull/PPO_l30-v0/events.out.tfevents.1735812367.UN00315924-UNAL.dam.intra.cea.fr.228874.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_l30-v0/events.out.tfevents.1735812367.UN00315924-UNAL.dam.intra.cea.fr.228874.0
rename to training/results/0-trimesh_results/efull/PPO_l30-v0/events.out.tfevents.1735812367.UN00315924-UNAL.dam.intra.cea.fr.228874.0
diff --git a/training/results/trimesh_results/efull/PPO_l50-v0/events.out.tfevents.1735812393.UN00315924-UNAL.dam.intra.cea.fr.229064.0 b/training/results/0-trimesh_results/efull/PPO_l50-v0/events.out.tfevents.1735812393.UN00315924-UNAL.dam.intra.cea.fr.229064.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_l50-v0/events.out.tfevents.1735812393.UN00315924-UNAL.dam.intra.cea.fr.229064.0
rename to training/results/0-trimesh_results/efull/PPO_l50-v0/events.out.tfevents.1735812393.UN00315924-UNAL.dam.intra.cea.fr.229064.0
diff --git a/training/results/trimesh_results/efull/PPO_l50-v1/events.out.tfevents.1735818955.UN00315924-UNAL.dam.intra.cea.fr.253446.0 b/training/results/0-trimesh_results/efull/PPO_l50-v1/events.out.tfevents.1735818955.UN00315924-UNAL.dam.intra.cea.fr.253446.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_l50-v1/events.out.tfevents.1735818955.UN00315924-UNAL.dam.intra.cea.fr.253446.0
rename to training/results/0-trimesh_results/efull/PPO_l50-v1/events.out.tfevents.1735818955.UN00315924-UNAL.dam.intra.cea.fr.253446.0
diff --git a/training/results/trimesh_results/efull/PPO_m16-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0 b/training/results/0-trimesh_results/efull/PPO_m16-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_m16-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0
rename to training/results/0-trimesh_results/efull/PPO_m16-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0
diff --git a/training/results/trimesh_results/efull/PPO_m16-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0 b/training/results/0-trimesh_results/efull/PPO_m16-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_m16-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0
rename to training/results/0-trimesh_results/efull/PPO_m16-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0
diff --git a/training/results/trimesh_results/efull/PPO_m25-v0/events.out.tfevents.1735744674.UN00315924-UNAL.dam.intra.cea.fr.63182.0 b/training/results/0-trimesh_results/efull/PPO_m25-v0/events.out.tfevents.1735744674.UN00315924-UNAL.dam.intra.cea.fr.63182.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_m25-v0/events.out.tfevents.1735744674.UN00315924-UNAL.dam.intra.cea.fr.63182.0
rename to training/results/0-trimesh_results/efull/PPO_m25-v0/events.out.tfevents.1735744674.UN00315924-UNAL.dam.intra.cea.fr.63182.0
diff --git a/training/results/trimesh_results/efull/PPO_m25-v1/events.out.tfevents.1735761690.UN00315924-UNAL.dam.intra.cea.fr.109198.0 b/training/results/0-trimesh_results/efull/PPO_m25-v1/events.out.tfevents.1735761690.UN00315924-UNAL.dam.intra.cea.fr.109198.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_m25-v1/events.out.tfevents.1735761690.UN00315924-UNAL.dam.intra.cea.fr.109198.0
rename to training/results/0-trimesh_results/efull/PPO_m25-v1/events.out.tfevents.1735761690.UN00315924-UNAL.dam.intra.cea.fr.109198.0
diff --git a/training/results/trimesh_results/efull/PPO_m40-v0/events.out.tfevents.1735744703.UN00315924-UNAL.dam.intra.cea.fr.63292.0 b/training/results/0-trimesh_results/efull/PPO_m40-v0/events.out.tfevents.1735744703.UN00315924-UNAL.dam.intra.cea.fr.63292.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_m40-v0/events.out.tfevents.1735744703.UN00315924-UNAL.dam.intra.cea.fr.63292.0
rename to training/results/0-trimesh_results/efull/PPO_m40-v0/events.out.tfevents.1735744703.UN00315924-UNAL.dam.intra.cea.fr.63292.0
diff --git a/training/results/trimesh_results/efull/PPO_no_deg-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0 b/training/results/0-trimesh_results/efull/PPO_no_deg-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_no_deg-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0
rename to training/results/0-trimesh_results/efull/PPO_no_deg-v0/events.out.tfevents.1735664190.UN00315924-UNAL.dam.intra.cea.fr.16322.0
diff --git a/training/results/trimesh_results/efull/PPO_no_deg-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0 b/training/results/0-trimesh_results/efull/PPO_no_deg-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_no_deg-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0
rename to training/results/0-trimesh_results/efull/PPO_no_deg-v1/events.out.tfevents.1735739679.UN00315924-UNAL.dam.intra.cea.fr.49745.0
diff --git a/training/results/trimesh_results/efull/PPO_restricted-v0/events.out.tfevents.1734431836.UN00315924-UNAL.dam.intra.cea.fr.757470.0 b/training/results/0-trimesh_results/efull/PPO_restricted-v0/events.out.tfevents.1734431836.UN00315924-UNAL.dam.intra.cea.fr.757470.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_restricted-v0/events.out.tfevents.1734431836.UN00315924-UNAL.dam.intra.cea.fr.757470.0
rename to training/results/0-trimesh_results/efull/PPO_restricted-v0/events.out.tfevents.1734431836.UN00315924-UNAL.dam.intra.cea.fr.757470.0
diff --git a/training/results/trimesh_results/efull/PPO_restricted-v1/events.out.tfevents.1734514338.UN00315924-UNAL.dam.intra.cea.fr.70489.0 b/training/results/0-trimesh_results/efull/PPO_restricted-v1/events.out.tfevents.1734514338.UN00315924-UNAL.dam.intra.cea.fr.70489.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_restricted-v1/events.out.tfevents.1734514338.UN00315924-UNAL.dam.intra.cea.fr.70489.0
rename to training/results/0-trimesh_results/efull/PPO_restricted-v1/events.out.tfevents.1734514338.UN00315924-UNAL.dam.intra.cea.fr.70489.0
diff --git a/training/results/trimesh_results/efull/PPO_restricted-v2/events.out.tfevents.1734517568.UN00315924-UNAL.dam.intra.cea.fr.83005.0 b/training/results/0-trimesh_results/efull/PPO_restricted-v2/events.out.tfevents.1734517568.UN00315924-UNAL.dam.intra.cea.fr.83005.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_restricted-v2/events.out.tfevents.1734517568.UN00315924-UNAL.dam.intra.cea.fr.83005.0
rename to training/results/0-trimesh_results/efull/PPO_restricted-v2/events.out.tfevents.1734517568.UN00315924-UNAL.dam.intra.cea.fr.83005.0
diff --git a/training/results/trimesh_results/efull/PPO_unrestricted-v0/events.out.tfevents.1734431823.UN00315924-UNAL.dam.intra.cea.fr.757302.0 b/training/results/0-trimesh_results/efull/PPO_unrestricted-v0/events.out.tfevents.1734431823.UN00315924-UNAL.dam.intra.cea.fr.757302.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_unrestricted-v0/events.out.tfevents.1734431823.UN00315924-UNAL.dam.intra.cea.fr.757302.0
rename to training/results/0-trimesh_results/efull/PPO_unrestricted-v0/events.out.tfevents.1734431823.UN00315924-UNAL.dam.intra.cea.fr.757302.0
diff --git a/training/results/trimesh_results/efull/PPO_unrestricted-v1/events.out.tfevents.1734513709.UN00315924-UNAL.dam.intra.cea.fr.67221.0 b/training/results/0-trimesh_results/efull/PPO_unrestricted-v1/events.out.tfevents.1734513709.UN00315924-UNAL.dam.intra.cea.fr.67221.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_unrestricted-v1/events.out.tfevents.1734513709.UN00315924-UNAL.dam.intra.cea.fr.67221.0
rename to training/results/0-trimesh_results/efull/PPO_unrestricted-v1/events.out.tfevents.1734513709.UN00315924-UNAL.dam.intra.cea.fr.67221.0
diff --git a/training/results/trimesh_results/efull/PPO_unrestricted-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0 b/training/results/0-trimesh_results/efull/PPO_unrestricted-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_unrestricted-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
rename to training/results/0-trimesh_results/efull/PPO_unrestricted-v2/events.out.tfevents.1734516617.UN00315924-UNAL.dam.intra.cea.fr.79025.0
diff --git a/training/results/trimesh_results/efull/PPO_with_deg-v0/events.out.tfevents.1735663450.UN00315924-UNAL.dam.intra.cea.fr.13339.0 b/training/results/0-trimesh_results/efull/PPO_with_deg-v0/events.out.tfevents.1735663450.UN00315924-UNAL.dam.intra.cea.fr.13339.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_with_deg-v0/events.out.tfevents.1735663450.UN00315924-UNAL.dam.intra.cea.fr.13339.0
rename to training/results/0-trimesh_results/efull/PPO_with_deg-v0/events.out.tfevents.1735663450.UN00315924-UNAL.dam.intra.cea.fr.13339.0
diff --git a/training/results/trimesh_results/efull/PPO_with_deg-v1/events.out.tfevents.1735670821.UN00315924-UNAL.dam.intra.cea.fr.37218.0 b/training/results/0-trimesh_results/efull/PPO_with_deg-v1/events.out.tfevents.1735670821.UN00315924-UNAL.dam.intra.cea.fr.37218.0
similarity index 100%
rename from training/results/trimesh_results/efull/PPO_with_deg-v1/events.out.tfevents.1735670821.UN00315924-UNAL.dam.intra.cea.fr.37218.0
rename to training/results/0-trimesh_results/efull/PPO_with_deg-v1/events.out.tfevents.1735670821.UN00315924-UNAL.dam.intra.cea.fr.37218.0
diff --git a/training/results/trimesh_results/final/PPO_1/events.out.tfevents.1735901763.UN00315924-UNAL.dam.intra.cea.fr.608011.0 b/training/results/0-trimesh_results/final/PPO_1/events.out.tfevents.1735901763.UN00315924-UNAL.dam.intra.cea.fr.608011.0
similarity index 100%
rename from training/results/trimesh_results/final/PPO_1/events.out.tfevents.1735901763.UN00315924-UNAL.dam.intra.cea.fr.608011.0
rename to training/results/0-trimesh_results/final/PPO_1/events.out.tfevents.1735901763.UN00315924-UNAL.dam.intra.cea.fr.608011.0
diff --git a/training/results/trimesh_results/final/PPO_2/events.out.tfevents.1735901862.UN00315924-UNAL.dam.intra.cea.fr.608834.0 b/training/results/0-trimesh_results/final/PPO_2/events.out.tfevents.1735901862.UN00315924-UNAL.dam.intra.cea.fr.608834.0
similarity index 100%
rename from training/results/trimesh_results/final/PPO_2/events.out.tfevents.1735901862.UN00315924-UNAL.dam.intra.cea.fr.608834.0
rename to training/results/0-trimesh_results/final/PPO_2/events.out.tfevents.1735901862.UN00315924-UNAL.dam.intra.cea.fr.608834.0
diff --git a/training/results/trimesh_results/final/PPO_3/events.out.tfevents.1738140564.UN00315924-UNAL.dam.intra.cea.fr.233861.0 b/training/results/0-trimesh_results/final/PPO_3/events.out.tfevents.1738140564.UN00315924-UNAL.dam.intra.cea.fr.233861.0
similarity index 100%
rename from training/results/trimesh_results/final/PPO_3/events.out.tfevents.1738140564.UN00315924-UNAL.dam.intra.cea.fr.233861.0
rename to training/results/0-trimesh_results/final/PPO_3/events.out.tfevents.1738140564.UN00315924-UNAL.dam.intra.cea.fr.233861.0
diff --git a/training/results/trimesh_results/final/PPO_4/events.out.tfevents.1738157639.UN00315924-UNAL.dam.intra.cea.fr.304319.0 b/training/results/0-trimesh_results/final/PPO_4/events.out.tfevents.1738157639.UN00315924-UNAL.dam.intra.cea.fr.304319.0
similarity index 100%
rename from training/results/trimesh_results/final/PPO_4/events.out.tfevents.1738157639.UN00315924-UNAL.dam.intra.cea.fr.304319.0
rename to training/results/0-trimesh_results/final/PPO_4/events.out.tfevents.1738157639.UN00315924-UNAL.dam.intra.cea.fr.304319.0
diff --git a/training/results/trimesh_results/test/PPO_1/events.out.tfevents.1736411485.UN00315924-UNAL.dam.intra.cea.fr.1486540.0 b/training/results/0-trimesh_results/test/PPO_1/events.out.tfevents.1736411485.UN00315924-UNAL.dam.intra.cea.fr.1486540.0
similarity index 100%
rename from training/results/trimesh_results/test/PPO_1/events.out.tfevents.1736411485.UN00315924-UNAL.dam.intra.cea.fr.1486540.0
rename to training/results/0-trimesh_results/test/PPO_1/events.out.tfevents.1736411485.UN00315924-UNAL.dam.intra.cea.fr.1486540.0
diff --git a/training/results/files_csv/curve_smoothing.py b/training/results/files_csv/curve_smoothing.py
index 28c9e8e..02913f5 100644
--- a/training/results/files_csv/curve_smoothing.py
+++ b/training/results/files_csv/curve_smoothing.py
@@ -1,9 +1,9 @@
import pandas as pd
-df = pd.read_csv("PPO_22.csv")
+df = pd.read_csv("medium_quad_perso.csv")
if 'Value' in df.columns:
alpha =0.1
df["Smoothed"]=df["Value"].ewm(alpha=alpha, adjust=False).mean()
- df.to_csv("PPO_SB3_fixed_quad.csv",index=False)
\ No newline at end of file
+ df.to_csv("medium_quad_perso.csv",index=False)
\ No newline at end of file
diff --git a/training/train_quadmesh.py b/training/train_quadmesh.py
index 6e76a28..deb6915 100644
--- a/training/train_quadmesh.py
+++ b/training/train_quadmesh.py
@@ -1,30 +1,23 @@
from __future__ import annotations
-import wandb
-import time
-import yaml
-import os
import random
import torch
+import yaml
+import os
+import time
+import wandb
-import numpy as np
-import gymnasium as gym
import matplotlib.pyplot as plt
+import gymnasium as gym
+import numpy as np
from torch.utils.tensorboard import SummaryWriter
#Internal import
from environment.gymnasium_envs import quadmesh_env
-
-from mesh_model.random_quadmesh import random_mesh
from mesh_model.reader import read_gmsh
from model_RL.PPO_model_pers import PPO
-from model_RL.evaluate_model import testPolicy
-
-from view.mesh_plotter.create_plots import plot_training_results, plot_test_results
-from view.mesh_plotter.mesh_plots import plot_dataset
-
def log_init(log_writer, config):
@@ -61,9 +54,11 @@ def log_end(log_writer, config, obs_registry):
if __name__ == '__main__':
# PARAMETERS CONFIGURATION
- with open("training/config_PPO_perso.yaml", "r") as f:
+ with open("training/config/quadmesh_config_PPO_perso.yaml", "r") as f:
config = yaml.safe_load(f)
+ experiment_name = config["experiment_name"]
+
# Create log dir
log_dir = config["paths"]["log_dir"]
os.makedirs(log_dir, exist_ok=True)
@@ -100,6 +95,8 @@ def log_end(log_writer, config, obs_registry):
model = PPO(
env=env,
obs_size= config["env"]["obs_size"],
+ n_actions=config["ppo"]["n_actions"],
+ n_darts_observed=config["env"]["n_darts_selected"],
max_steps=config["env"]["max_episode_steps"],
lr=config["ppo"]["learning_rate"],
gamma=config["ppo"]["gamma"],
diff --git a/training/train_quadmesh_SB3.py b/training/train_quadmesh_SB3.py
index e7d6dae..0c0ed66 100644
--- a/training/train_quadmesh_SB3.py
+++ b/training/train_quadmesh_SB3.py
@@ -8,8 +8,8 @@
import wandb
import matplotlib.pyplot as plt
-import numpy as np
import gymnasium as gym
+import numpy as np
from stable_baselines3 import PPO
@@ -231,7 +231,7 @@ def _on_training_end(self) -> None:
if __name__ == '__main__':
# PARAMETERS CONFIGURATION
- with open("training/config_PPO_SB3.yaml", "r") as f:
+ with open("training/config/quadmesh_config_PPO_SB3.yaml", "r") as f:
config = yaml.safe_load(f)
experiment_name = config["experiment_name"]
diff --git a/training/train_trimesh-flip_SB3.py b/training/train_trimesh-flip_SB3.py
index 82f6cb7..6de2032 100644
--- a/training/train_trimesh-flip_SB3.py
+++ b/training/train_trimesh-flip_SB3.py
@@ -94,9 +94,9 @@ def _on_training_end(self) -> None:
self.logger.dump(step=0)
-with open("../model_RL/parameters/ppo_config.json", "r") as f:
+with open("../model_RL/old_files/ppo_config.json", "r") as f:
ppo_config = json.load(f)
-with open("../environment/environment_config.json", "r") as f:
+with open("../environment/old_files/environment_config.json", "r") as f:
env_config = json.load(f)
# Create log dir
diff --git a/training/train_trimesh.py b/training/train_trimesh.py
index 0d67e81..0516259 100644
--- a/training/train_trimesh.py
+++ b/training/train_trimesh.py
@@ -1,43 +1,100 @@
-import mesh_model.random_trimesh as TM
+from __future__ import annotations
+
+import random
import torch
-from environment.trimesh_env import TriMesh
+import os
+import time
+import yaml
+import wandb
+
+import gymnasium as gym
+import numpy as np
+
+from torch.utils.tensorboard import SummaryWriter
+from stable_baselines3.common.env_checker import check_env
+
+#Internal import
+from environment.gymnasium_envs import trimesh_full_env
+from model_RL.PPO_model_pers import PPO
+
+
+def log_init(log_writer, config):
+ log_writer.add_text("Description", config["description"])
+ log_writer.add_hparams(hparam_dict=config["env"], metric_dict=config["metrics"], run_name=config["experiment_name"])
+
+
+if __name__ == '__main__':
+
+ # PARAMETERS CONFIGURATION
+ with open("training/config/trimesh_config_PPO_perso.yaml", "r") as f:
+ config = yaml.safe_load(f)
-from view.mesh_plotter.create_plots import plot_training_results, plot_test_results
-from view.mesh_plotter.mesh_plots import plot_dataset
+ experiment_name = config["experiment_name"]
-from model_RL.evaluate_model import testPolicy
+ # Create log dir
+ log_dir = config["paths"]["log_dir"]
+ os.makedirs(log_dir, exist_ok=True)
-from model_RL.PPO_model import PPO
-#from model_RL.SAC_model import SAC
-#from model_RL.AC_model import AC
+ wandb.tensorboard.patch(root_logdir=log_dir)
+ wandb.init(
+ project="Trimesh-learning",
+ name=experiment_name,
+ config=config,
+ sync_tensorboard=True,
+ save_code=True
+ )
-LOCAL_MESH_FEAT = 0
+ # SEEDING
+ seed = config["seed"]
+ random.seed(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ torch.backends.cudnn.deterministic = True
+ # Create the environment
+ env = gym.make(
+ config["env"]["env_id"],
+ # mesh=read_gmsh(config["dataset"]["evaluation_mesh_file_path"]),
+ mesh_size=config["env"]["mesh_size"],
+ max_episode_steps=config["env"]["max_episode_steps"],
+ n_darts_selected=config["env"]["n_darts_selected"],
+ deep=config["env"]["deep"],
+ action_restriction=config["env"]["action_restriction"],
+ with_quality_obs=config["env"]["with_quality_observation"],
+ render_mode=config["env"]["render_mode"],
+ analysis_type=config["env"]["analysis_type"],
+ )
-def train():
- mesh_size = 30
- lr = 0.0001
- gamma = 0.9
- feature = LOCAL_MESH_FEAT
+ check_env(env, warn=True)
- dataset = [TM.random_mesh(30) for _ in range(9)]
- plot_dataset(dataset)
+ model = PPO(
+ env=env,
+ obs_size= config["env"]["obs_size"],
+ n_actions=config["ppo"]["n_actions"],
+ n_darts_observed=config["env"]["n_darts_selected"],
+ max_steps=config["env"]["max_episode_steps"],
+ lr=config["ppo"]["learning_rate"],
+ gamma=config["ppo"]["gamma"],
+ nb_iterations=config["ppo"]["n_iterations"],
+ nb_episodes_per_iteration=config["ppo"]["n_episodes_per_iteration"],
+ nb_epochs=config["ppo"]["n_epochs"],
+ batch_size=config["ppo"]["batch_size"],
+ )
- env = TriMesh(None, mesh_size, max_steps=80, feat=feature)
+ writer = SummaryWriter(log_dir + config["experiment_name"])
+ log_init(writer, config)
- # Choix de la politique Actor Critic
- # actor = Actor(env, 30, 5, lr=0.0001)
- # critic = Critic(30, lr=0.0001)
- # policy = NNPolicy(env, 30, 64,5, 0.9, lr=0.0001)
+ # LEARNING
+ start_time = time.perf_counter()
+ print("-----------Starting learning-----------")
- model = PPO(env, lr, gamma, nb_iterations=3, nb_episodes_per_iteration=10, nb_epochs=2, batch_size=8)
- actor, rewards, wins, steps = model.train()
- if rewards is not None:
- plot_training_results(rewards, wins, steps)
+ actor, rewards, wins, steps, obs_registry = model.learn(writer)
- # torch.save(actor.state_dict(), 'policy_saved/actor_network.pth')
- avg_steps, avg_wins, avg_rewards, final_meshes = testPolicy(actor, 5, dataset, 60)
+ end_time = time.perf_counter()
+ print("-----------Learning ended------------")
+ print(f"Temps d'apprentissage : {end_time - start_time:.4} s")
- if rewards is not None:
- plot_test_results(avg_rewards, avg_wins, avg_steps, avg_rewards)
- plot_dataset(final_meshes)
+ # SAVING POLICY
+ torch.save(actor.state_dict(), config["paths"]["policy_saving_dir"]+config["experiment_name"]+".pth")
+ writer.close()
+ wandb.finish()
\ No newline at end of file
diff --git a/training/train_trimesh_SB3.py b/training/train_trimesh_SB3.py
index 1dc3bf0..b836c07 100644
--- a/training/train_trimesh_SB3.py
+++ b/training/train_trimesh_SB3.py
@@ -1,18 +1,75 @@
from __future__ import annotations
+import random
+import torch
import os
-import json
+import time
+import yaml
+import wandb
-import mesh_model.random_trimesh as TM
-from view.mesh_plotter.mesh_plots import dataset_plt
-from training.exploit_SB3_policy import testPolicy
+import gymnasium as gym
+import numpy as np
+
+from copy import deepcopy
from stable_baselines3 import PPO
from stable_baselines3.common.env_checker import check_env
from stable_baselines3.common.callbacks import BaseCallback
-from stable_baselines3.common.logger import Figure
+from stable_baselines3.common.logger import Figure, HParam
+
+from wandb.integration.sb3 import WandbCallback
+
+from environment.actions.smoothing import smoothing_mean
+from mesh_model.reader import read_gmsh
+from view.mesh_plotter.mesh_plots import dataset_plt, plot_mesh
+from training.exploit_SB3_policy import testPolicy
+
from environment.gymnasium_envs import trimesh_full_env
-import gymnasium as gym
+import mesh_model.random_trimesh as TM
+
+
+class HParamCallback(BaseCallback):
+ """
+ Saves the hyperparameters and metrics at the start of the training, and logs them to TensorBoard.
+ """
+
+ def _on_training_start(self) -> None:
+ hparam_dict = {
+ "algorithm": self.model.__class__.__name__,
+ "experiment": experiment_name,
+ "description": config["description"],
+ "learning rate": self.model.learning_rate,
+ "gamma": self.model.gamma,
+ "batch_size": config["ppo"]["batch_size"],
+ "epochs": config["ppo"]["n_epochs"],
+ "clip_range": config["ppo"]["clip_range"],
+ "training_meshes": config["dataset"]["training_mesh_file_path"],
+ "evaluation_meshes": config["dataset"]["evaluation_mesh_file_path"],
+ "max_steps": config["env"]["max_episode_steps"],
+ "max_timesteps": config["total_timesteps"],
+ "deep": config["env"]["deep"],
+ "with_quality": config["env"]["with_quality_observation"],
+ "nb_darts_selected": config["env"]["n_darts_selected"],
+ "reward_mode": config["env"]["reward_function"],
+
+
+ }
+ # define the metrics that will appear in the `HPARAMS` Tensorboard tab by referencing their tag
+ # Tensorbaord will find & display metrics from the `SCALARS` tab
+ metric_dict = {
+ "normalized_return": 0,
+ "rollout/ep_len_mean": 0.0,
+ "rollout/ep_rew_mean": 0.0
+ }
+ self.logger.record(
+ "hparams",
+ HParam(hparam_dict, metric_dict),
+ exclude=("stdout", "log", "json", "csv"),
+ )
+
+ def _on_step(self) -> bool:
+ return True
+
class TensorboardCallback(BaseCallback):
"""
@@ -40,14 +97,6 @@ def __init__(self, model, verbose=0):
self.final_distance = 0
self.normalized_return = 0
- def _on_training_start(self) -> None:
- """
- Record PPO parameters and environment configuration at the training start.
- """
- self.logger.record("parameters/ppo", f"{json.dumps(ppo_config, indent=4)}")
- self.logger.record("parameters/env", f"{json.dumps(env_config, indent=4)}")
- self.logger.dump(step=0)
-
def _on_step(self) -> bool:
"""
Record different learning variables to monitor
@@ -65,6 +114,7 @@ def _on_step(self) -> bool:
self.actions_info["nb_invalid_split"] += self.locals["infos"][0].get("invalid_split", 0.0)
self.actions_info["nb_invalid_collapse"] += self.locals["infos"][0].get("invalid_collapse", 0.0)
+ self.final_distance = self.locals["infos"][0].get("distance", 0.0)
self.mesh_reward += self.locals["infos"][0].get("mesh_reward", 0.0)
# When the episode is over
@@ -73,20 +123,26 @@ def _on_step(self) -> bool:
mesh_ideal_reward = self.locals["infos"][0].get("mesh_ideal_rewards", 0.0) # maximum achievable reward
if mesh_ideal_reward > 0:
self.normalized_return = self.mesh_reward/ mesh_ideal_reward
+ if self.normalized_return > 1:
+ plot_mesh(self.locals["infos"][0].get("mesh"))
+ smoothed_m = deepcopy(self.locals["infos"][0].get("mesh"))
+ smoothing_mean(smoothed_m)
+ plot_mesh(smoothed_m)
+ raise ValueError("normalized return above 1 imposssible")
else:
self.normalized_return = 0
- self.final_distance = self.locals["infos"][0].get("distance", 0.0)
+
self.logger.record("final_distance", self.final_distance)
- self.logger.record("valid_actions", self.actions_info["episode_valid_actions"]*100/self.current_episode_length if self.current_episode_length > 0 else 0)
+ self.logger.record("valid_actions (%)", self.actions_info["episode_valid_actions"]*100/self.current_episode_length if self.current_episode_length > 0 else 0)
self.logger.record("n_invalid_topo", self.actions_info["episode_invalid_topo"])
self.logger.record("n_invalid_geo", self.actions_info["episode_invalid_geo"])
self.logger.record("nb_flip", self.actions_info["nb_flip"])
self.logger.record("nb_split", self.actions_info["nb_split"])
self.logger.record("nb_collapse", self.actions_info["nb_collapse"])
- self.logger.record("invalid_flip", self.actions_info["nb_invalid_flip"]*100/self.actions_info["nb_flip"] if self.actions_info["nb_flip"] > 0 else 0)
- self.logger.record("invalid_split", self.actions_info["nb_invalid_split"]*100/self.actions_info["nb_split"] if self.actions_info["nb_split"] > 0 else 0)
- self.logger.record("invalid_collapse", self.actions_info["nb_invalid_collapse"]*100/self.actions_info["nb_collapse"]if self.actions_info["nb_collapse"] > 0 else 0)
+ self.logger.record("invalid_flip (%)", self.actions_info["nb_invalid_flip"]*100/self.actions_info["nb_flip"] if self.actions_info["nb_flip"] > 0 else 0)
+ self.logger.record("invalid_split (%)", self.actions_info["nb_invalid_split"]*100/self.actions_info["nb_split"] if self.actions_info["nb_split"] > 0 else 0)
+ self.logger.record("invalid_collapse (%)", self.actions_info["nb_invalid_collapse"]*100/self.actions_info["nb_collapse"]if self.actions_info["nb_collapse"] > 0 else 0)
self.logger.record("episode_mesh_reward", self.mesh_reward)
self.logger.record("episode_reward", self.current_episode_reward)
@@ -111,50 +167,82 @@ def _on_training_end(self) -> None:
"""
Records policy evaluation results : before and after dataset images
"""
- dataset = [TM.random_mesh(30) for _ in range(9)] # dataset of 9 meshes of size 30
+ print("-------- Testing Policy ---------")
+ dataset = [TM.random_mesh(30) for _ in range(4)] # dataset of 9 meshes of size 30
before = dataset_plt(dataset) # plot the datasat as image
- length, wins, rewards, normalized_return, final_meshes = testPolicy(self.model, 10, env_config, dataset) # test model policy on the dataset
+ length, wins, rewards, normalized_return, final_meshes = testPolicy(self.model, 5, config, dataset) # test model policy on the dataset
after = dataset_plt(final_meshes)
self.logger.record("figures/before", Figure(before, close=True), exclude=("stdout", "log"))
self.logger.record("figures/after", Figure(after, close=True), exclude=("stdout", "log"))
self.logger.dump(step=0)
-
-with open("../model_RL/parameters/ppo_config.json", "r") as f:
- ppo_config = json.load(f)
-with open("../environment/environment_config.json", "r") as f:
- env_config = json.load(f)
-
-# Create log dir
-log_dir = ppo_config["tensorboard_log"]
-os.makedirs(log_dir, exist_ok=True)
-
-# Create the environment
-env = gym.make(
- env_config["env_name"],
- mesh_size=env_config["mesh_size"],
- max_episode_steps=env_config["max_episode_steps"],
- n_darts_selected=env_config["n_darts_selected"],
- deep= env_config["deep"],
- action_restriction=env_config["action_restriction"],
- with_degree_obs=env_config["with_degree_observation"]
-)
-
-check_env(env, warn=True)
-
-model = PPO(
- policy=ppo_config["policy"],
- env=env,
- n_steps=ppo_config["n_steps"],
- n_epochs=ppo_config["n_epochs"],
- batch_size=ppo_config["batch_size"],
- learning_rate=ppo_config["learning_rate"],
- gamma=ppo_config["gamma"],
- verbose=ppo_config["verbose"],
- tensorboard_log=log_dir
-)
-
-print("-----------Starting learning-----------")
-model.learn(total_timesteps=ppo_config["total_timesteps"], callback=TensorboardCallback(model))
-print("-----------Learning ended------------")
-model.save("policy_saved/test/test-PPO-4")
\ No newline at end of file
+if __name__ == '__main__':
+
+ # PARAMETERS CONFIGURATION
+ with open("training/config/trimesh_config_PPO_SB3.yaml", "r") as f:
+ config = yaml.safe_load(f)
+
+ experiment_name = config["experiment_name"]
+
+ # SEEDING
+ seed = config["seed"]
+ random.seed(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ torch.backends.cudnn.deterministic = True
+
+ # # WANDB
+ # run = wandb.init(
+ # project="Trimesh-learning",
+ # name=experiment_name,
+ # sync_tensorboard=True, # auto-upload sb3's tensorboard metrics
+ # save_code=True, # optional
+ # )
+ # Create tensorboard log dir
+ log_dir = config["paths"]["log_dir"]
+ os.makedirs(log_dir, exist_ok=True)
+
+ #training_mesh = read_gmsh(config["dataset"]["training_mesh_file_path"])
+ # Create the environment
+ env = gym.make(
+ config["env"]["env_id"],
+ #mesh=training_mesh,
+ mesh_size = config["env"]["mesh_size"],
+ max_episode_steps=config["env"]["max_episode_steps"],
+ n_darts_selected=config["env"]["n_darts_selected"],
+ deep=config["env"]["deep"],
+ action_restriction=config["env"]["action_restriction"],
+ with_quality_obs=config["env"]["with_quality_observation"],
+ render_mode=config["env"]["render_mode"],
+ analysis_type=config["env"]["analysis_type"],
+ )
+
+ check_env(env, warn=True)
+
+ model = PPO(
+ policy=config["ppo"]["policy"],
+ env=env,
+ n_steps=config["ppo"]["n_steps"],
+ n_epochs=config["ppo"]["n_epochs"],
+ batch_size=config["ppo"]["batch_size"],
+ learning_rate=config["ppo"]["learning_rate"],
+ gamma=config["ppo"]["gamma"],
+ verbose=1,
+ tensorboard_log=log_dir
+ )
+
+ start_time = time.perf_counter()
+ print("-----------Starting learning-----------")
+ model.learn(
+ total_timesteps=config["total_timesteps"],
+ tb_log_name=config["experiment_name"],
+ callback=[HParamCallback(),
+
+ TensorboardCallback(model)],
+ progress_bar=True
+ )
+ end_time = time.perf_counter()
+ print("-----------Learning ended------------")
+ print(f"Temps d'apprentissage : {end_time - start_time:.4} s")
+ model.save(config["paths"]["policy_saving_dir"] + config["experiment_name"])
+ # run.finish()
\ No newline at end of file
diff --git a/view/graph.py b/view/graph.py
index 46e95ac..343d7e4 100644
--- a/view/graph.py
+++ b/view/graph.py
@@ -1,6 +1,7 @@
import pygame
import sys
from pygame import math
+import warnings
edge_color_normal = pygame.Color(30, 30, 30) # Dark Grey
vertex_color_normal = pygame.Color(255, 0, 0) # Red
@@ -123,11 +124,14 @@ def create_vertex(self, id: int, x: int, y: int, n_value) -> int:
return len(self.vertices) - 1
def create_edge(self, i1: int, i2: int) -> int:
+ n1, n2 = None, None
for v in self.vertices:
if v.idx == i1:
n1 = v
elif v.idx == i2:
n2 = v
+ if n1 is None or n2 is None:
+ warnings.warn("try to create an edge between nodes not found")
self.add_edge(Edge(n1, n2))
return len(self.edges) - 1
diff --git a/view/mesh_plotter/mesh_plots.py b/view/mesh_plotter/mesh_plots.py
index 5a79718..584e345 100644
--- a/view/mesh_plotter/mesh_plots.py
+++ b/view/mesh_plotter/mesh_plots.py
@@ -11,7 +11,8 @@ def plot_mesh(mesh: Mesh) -> None:
Plot a mesh using matplotlib
:param mesh: a Mesh
"""
- _, _ = plt.subplots()
+ fig, ax = plt.subplots(figsize=(15, 15))
+
subplot_mesh(mesh)
plt.show(block=True)
@@ -43,8 +44,37 @@ def subplot_mesh(mesh: Mesh) -> None:
n1 = d1.get_node()
n2 = d2.get_node()
n3 = d3.get_node()
+
+ # Nodes coordinates
+ p1 = np.array([n1.x(), n1.y()])
+ p2 = np.array([n2.x(), n2.y()])
+ p3 = np.array([n3.x(), n3.y()])
+
polygon = np.array([(n1.x(), n1.y()), (n2.x(), n2.y()), (n3.x(), n3.y()), (n1.x(), n1.y())])
plt.plot(polygon[:, 0], polygon[:, 1], 'k-')
+
+ # Plot darts ID
+ mid1 = (p1 + p2) / 2
+ mid2 = (p2 + p3) / 2
+ mid3 = (p3 + p1) / 2
+
+ centroid = (p1 + p2 + p3) / 3
+
+ pos1 = mid1 +0.2* (centroid - mid1)
+ pos2 = mid2 +0.2* (centroid - mid2)
+ pos3 = mid3 +0.2* (centroid - mid3)
+
+ plt.text(*pos1, f"{d1.id}", color='blue', fontsize=10, ha='center', va='center')
+ plt.text(*pos2, f"{d2.id}", color='blue', fontsize=10, ha='center', va='center')
+ plt.text(*pos3, f"{d3.id}", color='blue', fontsize=10, ha='center', va='center')
+
+ # Plot nodes ID
+ n_id =0
+ for n_info in mesh.nodes:
+ if n_info[2] >=0:
+ plt.text(n_info[0] + 0.03, n_info[1] - 0.02, f"{n_id}", fontsize=12, color='red', ha='right', va='top')
+ n_id+=1
+
elif quad:
for dart_id in faces:
d1 = Dart(mesh, dart_id)