Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
adam-subel authored Dec 11, 2020
1 parent 67d6db6 commit 86576a3
Show file tree
Hide file tree
Showing 8 changed files with 630 additions and 0 deletions.
136 changes: 136 additions & 0 deletions DSMAG.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
import numpy as np
import time as time
import scipy
import matplotlib.pyplot as plt
from scipy.fftpack import fft, ifft
import scipy.io as sio
from numpy import genfromtxt
import math

NX = 1024
NY = 128

nu = 2e-2
n = int(NX/NY)
s = 20
gamma = 2

dt = 1e-2*s

reg = 18

Lx = 100
dy = Lx/NY
dx = Lx/NX

x_bar = np.linspace(0, Lx-dy, num=NY+1)
x_bar = (x_bar[1:]+x_bar[:NY])/2

x_prime = np.linspace(0, Lx-dx, num=NX)

kx_gamma = (2*math.pi/Lx)*np.concatenate((np.arange(0,(NY/gamma)/2+1,dtype=float),np.arange((-(NY/gamma)/2+1),0,dtype=float)))
kx_bar = (2*math.pi/Lx)*np.concatenate((np.arange(0,NY/2+1,dtype=float),np.arange((-NY/2+1),0,dtype=float)))
kx_prime = (2*math.pi/Lx)*np.concatenate((np.arange(0,NX/2+1,dtype=float),np.arange((-NX/2+1),0,dtype=float)))


D1_gamma = 1j*kx_gamma.reshape([int(NY/gamma),1])

D1_bar = 1j*kx_bar
D2_bar = kx_bar*kx_bar
D1_bar = D1_bar.reshape([NY,1])
D2_bar = D2_bar.reshape([NY,1])

I = np.eye(NY)
D2x = 1 + 0.5*dt*nu*D2_bar

D1_prime = 1j*kx_prime
D1_prime = D1_prime.reshape([NX,1])


def filter_bar(u,n):
u_bar = np.zeros((int(u.shape[0]/n),u.shape[1]))

for i in range(int(u.shape[0]/n)):
u_bar[i] = np.mean(u[n*i:(n*i+n)])

return u_bar

def calc_cp(u, gamma):

delta = dy
delta_sub = dy*gamma

uu_sub = filter_bar(u*u, gamma)
u_sub = filter_bar(u, gamma)

L = .5*(uu_sub - u_sub*u_sub).squeeze()

der_u = np.real(ifft(D1_bar*fft(u,axis = 0),axis=0))

der_u_sub = np.real(ifft(D1_gamma*fft(u_sub,axis = 0),axis=0))

M = ((delta**2)*filter_bar(np.linalg.norm(der_u,2)*der_u,gamma)-(delta_sub**2)*np.linalg.norm(der_u_sub,2)*der_u_sub).squeeze()

c_p = np.dot(L,M)/np.dot(M,M)

if c_p < 0:
c_p = abs(c_p)

return c_p


u_bar_dict = sio.loadmat('./drive/My Drive/dealiasing/u_bar_region_' + str(reg) + '.mat')
u_bar_store=u_bar_dict['u_bar'].transpose()

force_dict = sio.loadmat('./drive/My Drive/dealiasing/f_bar_all_regions.mat')
force_bar=force_dict['f_bar'][:,int((reg-1)*12500)+int(1000000/s):]


num_pred = 200000

u_old = u_bar_store[1000000-s,:].reshape([NY,1])
u = u_bar_store[1000000,:].reshape([NY,1])


u_fft = fft(u,axis=0)
u_old_fft = fft(u_old,axis=0)

u_store = np.zeros((NY,num_pred))
sub_store = np.zeros((NY,num_pred))

# uses a fixed c_p to start and then calculates at each time step

c_p = .038
S_bar = .5*D1_bar*u_old_fft
S_bar_norm = np.sqrt(2*np.dot(S_bar.transpose(),S_bar))
tau_approx_old = -D1_bar*c_p*2*(dy**2)*S_bar_norm*S_bar

for i in range(num_pred):

force=force_bar[:,i].reshape((NY,1))

F = D1_bar*fft(.5*(u**2),axis=0)
F0 = D1_bar*fft(.5*(u_old**2),axis=0)

c_p = calc_cp(u,gamma)

S_bar = .5*D1_bar*u_fft
S_bar_norm = np.sqrt(2*np.dot(S_bar.transpose(),S_bar))
tau_approx = -D1_bar*c_p*2*(dy**2)*S_bar_norm*S_bar

uRHS = -0.5*dt*(3*F- F0) - 0.5*dt*nu*(D2_bar*u_fft) + u_fft + dt*fft(force,axis=0)\
-1/2*dt*(3*tau_approx-tau_approx_old)

tau_approx_old = tau_approx

u_old = u

u_fft = uRHS/D2x.reshape([NY,1])

u = np.real(ifft(u_fft,axis=0))
u_store[:,i] = u.squeeze()
sub_store[:,i] = np.real(ifft(tau_approx,axis=0)).squeeze()


sio.savemat('./DSMAG_region_.mat',
{'u_pred':u_store, 'sub_pred':sub_store})
77 changes: 77 additions & 0 deletions Stochastic_Burgers_DNS.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
clc
clear all
close all

L=100.0;
nu=0.02;
A=sqrt(2)*1e-2;

N=1024;
dt=0.01;
s=20; %ratio of LES and DNS time steps

% number of time steps
M=10000000;

% time steps between samples
P=1;

x=[0:N-1]'*L/N;

kx=[0:N/2 -N/2+1:-1]'*2.0*pi/L;

u_old=sin(2*pi*2*x/L+randn*2*pi);

un_old=fft(u_old);
Fn_old=1i*kx.*fft(0.5*(u_old).^2);

U_DNS=zeros(N,M/P);
f_store = zeros(size(U_DNS));
U_DNS(:,1)=u_old;
z=0;

u=u_old;
un=zeros(N,1);

f=zeros(N,1);
for kk=1:3
C1=randn;
C2=randn;
f=f+C1*A/sqrt(kk*s*dt)*cos(2*pi*kk*x/L+2*pi*C2);
end
fn=fft(f);

for m=2:M
Fn=1i*kx.*fft(0.5*u.^2);

if(mod(m,s)==0)
f= zeros(size(f));

for kk=1:3
C1=randn;
C2=randn;
f=f+C1*A/sqrt(kk*s*dt)*cos(2*pi*kk*x/L+2*pi*C2);
end
fn=fft(f);
end

for k=1:N
C=0.5*(kx(k))^2*nu*dt;
un(k)=((1.0-C)*un_old(k)-0.5*dt*(3.0*Fn(k)-Fn_old(k))+dt*fn(k))/(1.0+C);
end

un_old=un;
u=real(ifft(un));
Fn_old=Fn;

if(mod(m,P)==0)
z=z+1;
U_DNS(:,z) = u;
f_store(:,z+1) = f;
end
end

f_store = f_store(:,1:s:end);

save('DNS_Burgers_s_20.mat','U_DNS','-v7.3')
save('DNS_Force_LES_s_20.mat','f_store','-v7.3')
181 changes: 181 additions & 0 deletions Transfer_Learning.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,181 @@
import sys
import numpy as np
import scipy
import scipy.sparse as sparse
from scipy.sparse import linalg
import scipy.io as sio
import math
import keras
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras import layers
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.optimizers import rmsprop, SGD, Adagrad, Adadelta
from scipy.io import savemat
from scipy.io import loadmat
from scipy.fftpack import fft, ifft

def swish(x):
beta = 1.0
return beta * x * keras.backend.sigmoid(x)

train_num = 50000

train_region = 100000

train_start = train_region - train_num
num_pred = 20000

def normalize_data(data):

std_data = np.std(data)
mean_data = np.mean(data)

norm_data = (data-mean_data)/std_data

return norm_data, mean_data, std_data

def shift_data(data1,data2):
shifts = np.random.randint(0,data1.shape[1],data1.shape[0])
for i in range(data1.shape[0]):
data1[i,:] = np.concatenate((data1[i,shifts[i]:], data1[i,:shifts[i]]))
data2[i,:] = np.concatenate((data2[i,shifts[i]:], data2[i,:shifts[i]]))

return data1, data2

u_bar_dict = sio.loadmat('./u_bar_region_13.mat')

full_input=u_bar_store=u_bar_dict['u_bar'].transpose()



full_output = sio.loadmat('./PI_region_13.mat')
full_output=full_output['PI'].transpose()

full_input[:train_region,:], full_output[:train_region,:] = shift_data(full_input[:train_region,:],
full_output[:train_region,:])



norm_input, mean_input, std_input = normalize_data(full_input[:train_region,:])

norm_output, mean_output, std_output = normalize_data(full_output[:train_region,:])


training_input = norm_input
training_output = norm_output

print('shape of input')
print(np.shape(training_input))


print('shape of output')
print(np.shape(training_output))


index=np.random.permutation(train_region)


print(std_input)
print(std_output)

print(mean_input)
print(mean_output)


input_train=training_input[index[0:train_num],:]
output_train=training_output[index[0:train_num],:]

test_input=training_input[index[train_num:(train_num+num_pred)],:]
test_output=training_output[index[train_num:(train_num+num_pred)],:]


model = Sequential()

model.add(Dense(128,input_shape=(128,),activation=swish,trainable = False))
model.add(Dense(250,activation=swish, trainable = False))
model.add(Dense(250,activation=swish, trainable = False))
model.add(Dense(250,activation=swish, trainable = False))
model.add(Dense(250,activation=swish, trainable = False))
model.add(Dense(250,activation=swish, trainable = False))
model.add(Dense(250,activation=swish ))
model.add(Dense(128,activation=None))

model.compile(loss='mse', optimizer='Adam', metrics=['mae'])
model.load_weights('./weights_trained_ANN')

model.fit(input_train, output_train,epochs=50,batch_size=200,shuffle=True,validation_split=0.1)

model.save_weights('./weights_ANN_transfer_train')

pred_start = train_region + 50000

s=20

NX = 128
nu = 2e-2

dt = s*1e-2

Lx = 100
dx = Lx/NX
x = np.linspace(0, Lx-dx, num=NX)
kx = (2*math.pi/Lx)*np.concatenate((np.arange(0,NX/2+1,dtype=float),np.arange((-NX/2+1),0,dtype=float))).reshape([NX,1])


wave_num = np.concatenate((np.arange(0,int(NX/2+1)),np.arange(int(-NX/2+1),0)))

rho_bar = np.sin(2*math.pi*(wave_num/NX))/(2*math.pi*wave_num/NX)
rho_bar[0] = 1


maxit=100000

D1 = 1j*kx
D2 = kx*kx
D1 = D1.reshape([NX,1])
D2 = D2.reshape([NX,1])
D2_tensor = np.float32((D2[0:int(NX/2)]-np.mean(D2[0:int(NX/2)])/np.std(D2[0:int(NX/2)])))

D2x = 1 + 0.5*dt*nu*D2


u_store = np.zeros((NX,maxit))
sub_store = np.zeros((NX,maxit))

force_dict = sio.loadmat('./f_bar_all_regions.mat')
force_bar=force_dict['f_bar'][:,int(12*12500)+int(pred_start/s):]

u_old = full_input[pred_start-1,:].reshape([NX,1])
u = full_input[pred_start,:].reshape([NX,1])
u_fft = fft(u,axis=0)
u_old_fft = fft(u_old,axis=0)
subgrid_prev_n = model.predict(((u_old-mean_input)/std_input).reshape((1,NX))).reshape(NX,1)
subgrid_prev_n = subgrid_prev_n*std_output+mean_output

for i in range(maxit):

subgrid_n = model.predict(((u-mean_input)/std_input).reshape((1,NX))).reshape(NX,1)
subgrid_n = subgrid_n*std_output+mean_output

force=force_bar[:,i].reshape((NX,1))

F = D1*fft(.5*(u**2),axis=0)
F0 = D1*fft(.5*(u_old**2),axis=0)

uRHS = -0.5*dt*(3*F- F0) - 0.5*dt*nu*(D2*u_fft) + u_fft + dt*fft(force,axis=0) \
-fft(dt*3/2*subgrid_n + 1/2*dt*subgrid_prev_n,axis = 0)


subgrid_prev_n = subgrid_n
u_old_fft = u_fft
u_old = u

u_fft = uRHS/D2x.reshape([NX,1])

u = np.real(ifft(u_fft,axis=0))
u_store[:,i] = u.squeeze()
sub_store[:,i] = subgrid_n.squeeze()

sio.savemat('./DDP_results_transfer.mat',
{'u_pred':u_store, 'sub_pred':sub_store})
Loading

0 comments on commit 86576a3

Please sign in to comment.