Skip to content

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
  • Loading branch information
rochSmets committed Jan 24, 2024
1 parent 23a374b commit 529e584
Show file tree
Hide file tree
Showing 3 changed files with 227 additions and 139 deletions.
54 changes: 42 additions & 12 deletions pyphare/pyphare/core/operators.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import numpy as np
from pyphare.pharesee.hierarchy import PatchLevel, Patch, FieldData
# from pyphare.pharesee.hierarchy import PatchLevel, Patch, FieldData
from pyphare.pharesee.hierarchy import ScalarField, VectorField # TensorField
from pyphare.pharesee.hierarchy import compute_hier_from
from pyphare.pharesee.hierarchy import rename
Expand All @@ -20,7 +20,7 @@ def _compute_sqrt(patch_datas, **kwargs):

ref_name = next(iter(patch_datas.keys()))

dset = np.sqrt(patch_datas["scalar"].dataset[:])
dset = np.sqrt(patch_datas["scalar"].dataset[:])

return ({"name": 'scalar', "data": dset, "centering": patch_datas[ref_name].centerings},)

Expand All @@ -43,6 +43,36 @@ def _compute_cross_product(patch_datas, **kwargs):
)


def _compute_grad(patch_data, **kwargs):
ndim = patch_data["scalar"].box.ndim
nb_ghosts = kwargs["nb_ghosts"]
ds = patch_data["scalar"].dataset

ds_shape = list(ds.shape)

ds_x = np.full(ds_shape, np.nan)
ds_y = np.full(ds_shape, np.nan)
ds_z = np.full(ds_shape, np.nan)

grad_ds = np.gradient(ds)

if ndim == 2:
ds_x[nb_ghosts:-nb_ghosts, nb_ghosts:-nb_ghosts] = \
np.asarray(grad_ds[0][nb_ghosts:-nb_ghosts, nb_ghosts:-nb_ghosts])
ds_y[nb_ghosts:-nb_ghosts, nb_ghosts:-nb_ghosts] = \
np.asarray(grad_ds[1][nb_ghosts:-nb_ghosts, nb_ghosts:-nb_ghosts])
ds_z[nb_ghosts:-nb_ghosts, nb_ghosts:-nb_ghosts].fill(0.) # TODO at 2D, gradient is null in z dir

else:
raise RuntimeError("dimension not yet implemented")

return (
{"name": 'x', "data": ds_x, "centering": patch_data["scalar"].centerings},
{"name": 'y', "data": ds_y, "centering": patch_data["scalar"].centerings},
{"name": 'z', "data": ds_z, "centering": patch_data["scalar"].centerings},
)


def dot(hier_left, hier_right, **kwargs):
names_left_kept = hier_left.get_names()
names_right_kept = hier_right.get_names()
Expand Down Expand Up @@ -94,7 +124,6 @@ def cross(hier_left, hier_right, **kwargs):


def sqrt(hier, **kwargs):

h = compute_hier_from(_compute_sqrt, hier,)

return ScalarField(h.patch_levels, h.domain_box,
Expand All @@ -103,18 +132,19 @@ def sqrt(hier, **kwargs):
data_files=h.data_files)





def modulus(hier):

assert isinstance(hier, VectorField)

h = hier*hier
return sqrt(h)



return sqrt(dot(hier, hier))


def grad(hier, **kwargs):
assert isinstance(hier, ScalarField)
nb_ghosts = list(hier.level(0).patches[0].patch_datas.values())[0].ghosts_nbr[0]
h = compute_hier_from(_compute_grad, hier, nb_ghosts=nb_ghosts)

# TODO the plot of a grad displays only 1 patch if vmin and vmax are not specified... any idea why ?
return VectorField(h.patch_levels, h.domain_box,
refinement_ratio=h.refinement_ratio,
time=h.times()[0],
data_files=h.data_files)
201 changes: 119 additions & 82 deletions pyphare/pyphare/pharesee/hierarchy.py
Original file line number Diff line number Diff line change
Expand Up @@ -1084,7 +1084,15 @@ def dist_plot(self, **kwargs):

return final, dp(final, **kwargs)

def __neg__(self):
names_self = self.get_names()

h = compute_hier_from(_compute_neg, self, names=names_self)

return VectorField(h.patch_levels, h.domain_box,
refinement_ratio=h.refinement_ratio,
time=h.times()[0],
data_files=h.data_files)


class ScalarField(PatchHierarchy):
Expand All @@ -1096,11 +1104,20 @@ def __init__(self, patch_levels, domain_box, **kwargs):
time = kwargs.get("time", .0)
data_files = kwargs.get("data_files", None)

# self.name = ['scalar'] # TODO should we use the static name ?

super().__init__(patch_levels, domain_box, refinement_ratio, time, data_files)

def __mul__(self, other):
assert isinstance(other, (int, float))

h = compute_hier_from(_compute_mul, self, names=self.get_names(), other=other)

return ScalarField(h.patch_levels, h.domain_box,
refinement_ratio=h.refinement_ratio,
time=h.times()[0],
data_files=h.data_files)

def __rmul__(self, other):
return self.__mul__(other)


class VectorField(PatchHierarchy):
Expand All @@ -1114,119 +1131,115 @@ def __init__(self, patch_levels, domain_box, **kwargs):

super().__init__(patch_levels, domain_box, refinement_ratio, time, data_files)

def __mul__(self, other):
assert isinstance(other, (int, float))

def __mul__(self, operand):

# # scalar product between 2 vectors
# if isinstance(operand, VectorField):

# times_self = list(self.time_hier.keys())
# times_operand = list(operand.time_hier.keys())

# patch_levels = self.patch_levels
# domain_box = self.domain_box
h = compute_hier_from(_compute_mul, self, names=self.get_names(), other=other)

# # TODO verifier qu'on a le meme centering et la meme size pur les dataset !
# assert(all(l == r for l, r in zip(times_self, times_operand)))
# for (time_self, time_operand) in zip(list(self.time_hier.keys()), list(operand.time_hier.keys())):
# new_patch_level = {}
return VectorField(h.patch_levels, h.domain_box,
refinement_ratio=h.refinement_ratio,
time=h.times()[0],
data_files=h.data_files)

# assert(all (l == r for l, r in zip(self.levels(time_self).keys(), operand.levels(time_operand).keys())))
# for (ilvl_self, lvl_self), (ilvl_operand, lvl_operand) in zip(self.levels(time_self).items(), operand.levels(time_operand).items()):
# new_patches = {}
def __rmul__(self, other):
return self.__mul__(other)

# assert(all (l.id == r.id for l, r in zip(lvl_self.patches, lvl_operand.patches)))
# for (patch_self, patch_operand) in zip(lvl_self.patches, lvl_operand.patches):
# layout = patch_self.layout

# if ilvl_self not in new_patches:
# new_patches[ilvl_self] = []

# new_pd = {}
# dset = 0.

# for (name, name_self, name_operand) in zip(self.names, patch_self.patch_datas.keys(), patch_operand.patch_datas.keys()):
# pd_self = patch_self.patch_datas[name_self]
# pd_operand = patch_operand.patch_datas[name_operand]
def __add__(self, other):
names_self_kept = self.get_names()
names_other_kept = other.get_names()

# dset = dset+np.asarray(pd_self.dataset)*np.asarray(pd_operand.dataset)
if isinstance(other, VectorField):
names_self = ['self_x', 'self_y', 'self_z']
names_other = ['other_x', 'other_y', 'other_z']
else:
raise RuntimeError("type of hierarchy not yet considered")

# # TODO not very nice to keep the last centering from the previous for loop !
# pd = FieldData(layout, ScalarField.name, dset, centering=pd_operand.centerings)
# new_pd[ScalarField.name] = pd
h_self = rename(self, names_self)
h_other = rename(other, names_other)

# new_patches[ilvl_self].append(Patch(new_pd, patch_self.id))
h = compute_hier_from(_compute_add, (h_self, h_other),)

# new_patch_level[ilvl_self] = PatchLevel(ilvl_self, new_patches[ilvl_self])
self = rename(h_self, names_self_kept)
other = rename(h_other, names_other_kept)

# return ScalarField(new_patch_level, domain_box, time=time_self)
return VectorField(h.patch_levels, h.domain_box,
refinement_ratio=h.refinement_ratio,
time=h.times()[0],
data_files=h.data_files)

# multiplication by a scalar
if isinstance(operand, (int, float)):
def __sub__(self, other):
names_self_kept = self.get_names()
names_other_kept = other.get_names()

patch_levels = self.patch_levels
domain_box = self.domain_box
if isinstance(other, VectorField):
names_self = ['self_x', 'self_y', 'self_z']
names_other = ['other_x', 'other_y', 'other_z']
else:
raise RuntimeError("type of hierarchy not yet considered")

names = ['x', 'y', 'z']
h_self = rename(self, names_self)
h_other = rename(other, names_other)

for time in list(self.time_hier.keys()):
new_patch_level = {}
h = compute_hier_from(_compute_sub, (h_self, h_other),)

for ilvl, lvl in self.levels(time).items():
new_patches = {}
self = rename(h_self, names_self_kept)
other = rename(h_other, names_other_kept)

for patch in lvl.patches:
new_pd = {}
layout = patch.layout
return VectorField(h.patch_levels, h.domain_box,
refinement_ratio=h.refinement_ratio,
time=h.times()[0],
data_files=h.data_files)

for (name, pd_name) in zip(names, patch.patch_datas.keys()):
dset = np.asarray(patch.patch_datas[pd_name].dataset)*operand
pd = FieldData(layout, name, dset, centering=patch.patch_datas[pd_name].centerings)
new_pd[name] = pd
def __truediv__(self, other):

if ilvl not in new_patches:
new_patches[ilvl] = []
if isinstance(other, ScalarField):
names = self.get_names()
else:
raise RuntimeError("type of hierarchy not yet considered")

new_patches[ilvl].append(Patch(new_pd, patch.id))
h = compute_hier_from(_compute_truediv, (self, other), names=names)

new_patch_level[ilvl] = PatchLevel(ilvl, new_patches[ilvl])
return VectorField(h.patch_levels, h.domain_box,
refinement_ratio=h.refinement_ratio,
time=h.times()[0],
data_files=h.data_files)

return VectorField(new_patch_level, domain_box, time=time)

else:
raise ValueError("Unsupported type of operand : {}".format(type(operand)))
def _compute_mul(patch_datas, **kwargs):
names = kwargs["names"]
other = kwargs["other"]
pd_attrs = []

def __rmul__(self, operand):
return self.__mul__(operand)
for name in names:
pd_attrs.append({"name": name,
"data": other*patch_datas[name].dataset,
"centering": patch_datas[name].centerings})

def __add__(self, operand):
names_self_kept = self.get_names()
names_operand_kept = operand.get_names()
return tuple(pd_attrs)

if isinstance(operand, VectorField):
names_self = ['self_x', 'self_y', 'self_z']
names_operand = ['operand_x', 'operand_y', 'operand_z']
else:
raise RuntimeError("type of hierarchy not yet considered")

h_self = rename(self, names_self)
h_operand = rename(operand, names_operand)
def _compute_add(patch_datas, **kwargs):

h = compute_hier_from(_compute_add, (h_self, h_operand),)
ref_name = next(iter(patch_datas.keys()))

self = rename(h_self, names_self_kept)
operand = rename(h_operand, names_operand_kept)
dset_x = patch_datas["self_x"].dataset[:] + patch_datas["other_x"].dataset[:]
dset_y = patch_datas["self_y"].dataset[:] + patch_datas["other_y"].dataset[:]
dset_z = patch_datas["self_z"].dataset[:] + patch_datas["other_z"].dataset[:]

return "zob"
return (
{"name": 'x', "data": dset_x, "centering": patch_datas[ref_name].centerings},
{"name": 'y', "data": dset_y, "centering": patch_datas[ref_name].centerings},
{"name": 'z', "data": dset_z, "centering": patch_datas[ref_name].centerings},
)


def _compute_add(patch_datas, **kwargs):
def _compute_sub(patch_datas, **kwargs):

ref_name = next(iter(patch_datas.keys()))

dset_x = patch_datas["self_x"].dataset[:] * patch_datas["operand_x"].dataset[:]\
dset_y = patch_datas["self_y"].dataset[:] * patch_datas["operand_y"].dataset[:]\
dset_z = patch_datas["self_z"].dataset[:] * patch_datas["operand_z"].dataset[:]\
dset_x = patch_datas["self_x"].dataset[:] - patch_datas["other_x"].dataset[:]
dset_y = patch_datas["self_y"].dataset[:] - patch_datas["other_y"].dataset[:]
dset_z = patch_datas["self_z"].dataset[:] - patch_datas["other_z"].dataset[:]

return (
{"name": 'x', "data": dset_x, "centering": patch_datas[ref_name].centerings},
Expand All @@ -1235,6 +1248,30 @@ def _compute_add(patch_datas, **kwargs):
)


def _compute_neg(patch_datas, **kwargs):
names = kwargs["names"]
pd_attrs = []

for name in names:
pd_attrs.append({"name": name,
"data": -patch_datas[name].dataset,
"centering": patch_datas[name].centerings})

return tuple(pd_attrs)


def _compute_truediv(patch_datas, **kwargs):
names = kwargs["names"]
pd_attrs = []

for name in names:
pd_attrs.append({"name": name,
"data": patch_datas[name].dataset/patch_datas["scalar"].dataset,
"centering": patch_datas[name].centerings})

return tuple(pd_attrs)


def _compute_rename(patch_datas, **kwargs):
new_names = kwargs["names"]
pd_attrs = []
Expand Down
Loading

0 comments on commit 529e584

Please sign in to comment.