diff --git a/CHANGELOG.md b/CHANGELOG.md index fdf55605..78212a56 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ All notable changes to this project will be documented in this file. The format ### Fixed - Fix the gradient evaluation in `NeoHookeCompressible(mu=1, lmbda=None)`. +- Fix `MaterialStrain.plot()`. ## [8.5.0] - 2024-04-27 diff --git a/src/felupe/constitution/_view.py b/src/felupe/constitution/_view.py index 0eb48565..e575e1c3 100644 --- a/src/felupe/constitution/_view.py +++ b/src/felupe/constitution/_view.py @@ -201,12 +201,13 @@ def fun(λ3): λ2 = λ3 F = eye * np.array([λ1, λ2, λ3]).reshape(1, 3, 1, -1) if self.statevars_included: - if self.statevars is None: - self.statevars = np.zeros((*self.umat.x[-1].shape, 1, 1)) + statevars = self.statevars + if statevars is None: + statevars = np.zeros((*self.umat.x[-1].shape, 1, 1)) P = np.zeros_like(F) for increment, defgrad in enumerate(F.T): P[..., [increment]], statevars = self.umat.gradient( - [F[..., [increment]], self.statevars] + [F[..., [increment]], statevars] ) else: P, statevars = self.umat.gradient([F, None]) @@ -262,12 +263,13 @@ def planar(self, stretches=None): def fun(λ3): F = eye * np.array([λ1, λ2, λ3]).reshape(1, 3, 1, -1) if self.statevars_included: - if self.statevars is None: - self.statevars = np.zeros((*self.umat.x[-1].shape, 1, 1)) + statevars = self.statevars + if statevars is None: + statevars = np.zeros((*self.umat.x[-1].shape, 1, 1)) P = np.zeros_like(F) for increment, defgrad in enumerate(F.T): P[..., [increment]], statevars = self.umat.gradient( - [F[..., [increment]], self.statevars] + [F[..., [increment]], statevars] ) else: P, statevars = self.umat.gradient([F, None]) @@ -322,12 +324,13 @@ def biaxial(self, stretches=None): def fun(λ3): F = eye * np.array([λ1, λ2, λ3]).reshape(1, 3, 1, -1) if self.statevars_included: - if self.statevars is None: - self.statevars = np.zeros((*self.umat.x[-1].shape, 1, 1)) + statevars = self.statevars + if statevars is None: + statevars = np.zeros((*self.umat.x[-1].shape, 1, 1)) P = np.zeros_like(F) for increment, defgrad in enumerate(F.T): P[..., [increment]], statevars = self.umat.gradient( - [F[..., [increment]], self.statevars] + [F[..., [increment]], statevars] ) else: P, self.statevars = self.umat.gradient([F, None])