In [1]:
from util import get_neighbor_matrix_fixed_num,get_batches,sortBycol
import numpy as np
import matplotlib.pyplot as plt
from jax import grad,value_and_grad,jit,vmap,random
import jax.numpy as jnp
/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/jax/config.py:163: UserWarning: enable_omnistaging() is a no-op in JAX versions 0.2.12 and higher;
see https://github.com/google/jax/blob/master/design_notes/omnistaging.md
  "enable_omnistaging() is a no-op in JAX versions 0.2.12 and higher;\n"
In [2]:
@jit
def loss_jax(params, df_batch,un):
    x,y = df_batch[:,0],df_batch[:,1]
    vec = jnp.sort(y) - jnp.sort(params*un)
    return jnp.var(vec)

val_and_grad = value_and_grad(loss_jax)
vmap_val_and_grad_inner = vmap(val_and_grad, in_axes=(None,None,1),out_axes=0)  
vmap_val_and_grad_outer = vmap(vmap_val_and_grad_inner, in_axes=(None,0,2),out_axes=0)  
vmap_val_and_grad_outer = jit(vmap_val_and_grad_outer)
In [3]:
def batch_test(df,resolution,npos):
    nghM = get_neighbor_matrix_fixed_num(df, resolution)
    batches = get_batches(data=df, neighborM=nghM, resolution=resolution, npos=npos)
    batches = jnp.array(batches)

    df_batch = batches[0]
    batch_sz,_ = df_batch.shape
    return batches,batch_sz
In [4]:
def test(batches,key_seed=42,step_sz = 1.0,exp = 200,nrep = 100):
    key = random.PRNGKey(key_seed)
    df_batch = batches[0]
    batch_sz,_ = df_batch.shape
    theta_H =0.2
    params = theta_H

    loss_res = []
    t_res = []
    gradt_res = []

    for j in range(exp):
        key, subkey = random.split(key)
        un = random.uniform(subkey,shape=(batch_sz,nrep,len(batches)),minval=0.0, maxval=1.0)
        loss_val,grad = vmap_val_and_grad_outer(params, batches, un)
        ave_loss,ave_grad = np.mean(loss_val),np.mean(grad)
        params -= step_sz * ave_grad
        loss_res.append(ave_loss)
        t_res.append(params)
        gradt_res.append(ave_grad)
        if j%10==0:
            sys.stdout.write("\rDoing thing %i" % j)  
    return loss_res,t_res,gradt_res,params

Data generation

In [5]:
# Set problem dimensions
nsamples = 500
resolution = 0.05
npos = 100

def f_t(x):
    # return x
    # return jnp.sin(4*x)
    # return 0.1*((2.5*x)**3 - x)
    # return 0.25*x**3-0.1*x
    if x < 0:
        return 0.5*x**3 -x
    else:   
        return 1 - 0.5*x**3 + x
In [6]:
# Generate random ground truth W and b
key = random.PRNGKey(0)
k1, k2 = random.split(key)

# Generate samples with additional noise
ksample, knoise = random.split(k1)
x_samples = random.uniform(k1,shape=(nsamples, 1),minval=-1, maxval=1)

# y_samples = vmap(f_t)(x_samples)
y_samples = np.array([f_t(x) for x in x_samples])
y_samples += 1.0*random.uniform(knoise,shape=(nsamples, 1),minval=0.0, maxval=1.0)
x= x_samples.reshape(-1)
y= y_samples.reshape(-1)

plt.scatter(x,y,marker='.')
plt.xlabel('X',fontsize=20)
plt.ylabel('Y',fontsize=20)

n = nsamples
df_c = np.zeros([n,2])
df_c[:,0],df_c[:,1] = x,y
df_sort_c = sortBycol(df_c,0)

df_rv = np.zeros([n,2])
df_rv[:,0],df_rv[:,1] = y,x
df_sort_rv = sortBycol(df_rv,0)
WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)

Compute the minimal variance-based divergence measure in both directions

In [7]:
c_batches,c_batch_sz = batch_test(df_sort_c,resolution,npos)
c_loss_res, c_t_res, c_gradt_res, params_c = test(c_batches, key_seed = 42, step_sz = 1.0, exp = 100,nrep = 50)
loss_c = np.mean(c_loss_res[-10:])/params_c

rv_batches,rv_batch_sz = batch_test(df_sort_rv,resolution,npos)
rv_loss_res,rv_t_res,rv_gradt_res,params_rv = test(rv_batches,key_seed = 42, step_sz = 1.0, exp = 100,nrep = 50)
loss_rv = np.mean(rv_loss_res[-10:])/params_rv
Doing thing 90

In the causal direction

In [8]:
print(np.mean(c_loss_res[-10:])/params_c)
plt.figure(figsize=(15,5))

plt.subplot(1,3,1)
plt.title('loss')
plt.plot(np.arange(0,len(c_loss_res),1), c_loss_res)

plt.subplot(1,3,2)
plt.title('theta')
plt.plot(np.arange(0,len(c_t_res),1), c_t_res)

plt.subplot(1,3,3)
plt.title('grad theta')
plt.plot(np.arange(0,len( c_gradt_res),1), c_gradt_res)
0.009291233
Out[8]:
[<matplotlib.lines.Line2D at 0x7fc008a51ba8>]

In the reverse direction

In [9]:
print(np.mean(rv_loss_res[-10:])/params_rv)
plt.figure(figsize=(15,5))

plt.subplot(1,3,1)
plt.title('loss')
plt.plot(np.arange(0,len(rv_loss_res),1), rv_loss_res)

plt.subplot(1,3,2)
plt.title('theta')
plt.plot(np.arange(0,len(rv_t_res),1), rv_t_res)

plt.subplot(1,3,3)
plt.title('grad theta')
plt.plot(np.arange(0,len( rv_gradt_res),1), rv_gradt_res)
0.016730879
Out[9]:
[<matplotlib.lines.Line2D at 0x7fbff861c630>]

Plot for Sec. 6.1

In [10]:
x_axis = [10,25,50,100,200,500]
y_poly = [0.49,0.86,0.84,0.99,1.0,1.0]
y_sin = [0.45,1.0,1.0,1.0,1.0,1.0]
y_iden = [0.34,0.38,0.49,0.68,0.92,1.0]
y_disc = [0.44,0.59,0.6,0.79,0.9,1.0]
In [11]:
plt.figure(figsize=(8,6))
plt.plot(x_axis,y_poly,'o-',markersize=20,label='$y=0.1(2.5x)^3-0.1x+E_y$')
plt.plot(x_axis,y_sin,'*-',markersize=20,label='$y=sin(4x)+E_y$')
plt.plot(x_axis,y_iden,'v-',markersize=20,label='$y=x+E_y$')
plt.plot(x_axis,y_disc,'d-',markersize=20,label='$y=f_{piece}(x)+E_y$')



plt.legend(loc=0,fontsize=20)
plt.title('')
plt.xlabel('Sample size',fontsize=20)
plt.ylabel('Accuracy',fontsize=20)

plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)

plt.savefig('syn1.pdf')
In [ ]: