Commit f00ad8d2 authored by Orkun Şensebat's avatar Orkun Şensebat
Browse files

Refactor refactored example

No time loss

In preparation of doing variable time window W
parent dadbbb16
Pipeline #467215 passed with stage
in 53 seconds
......@@ -12,7 +12,6 @@ np.random.seed(wxyz)
n_configs = 32 # number of detector configs
n_samples = 100000 # number of simulations per config
T0 = 1000 # (ns), maximum time delay
W = 1 # (ns), time coincidence window
# orientation of half-wave plate in observation station 2
phi2 = 0
......@@ -33,66 +32,78 @@ def observation_station(theta, phi): # c, s, cHWP, sHWP):
return x, t
# starts a stopwatch to measure simulation time
start = time.time()
def simulation(time_window):
# this is our result array, in which we store, how detection combinations we detected during the entire experiment
# 1st index: arriving in station1 at bottom (0) or at top (1)
# 2nd index: arriving in station2 at bottom (0) or at top (1)
# 3rd index: non-coincidences included (0) or not (1)
# 4th index: configuration of the polarizors/detectors
result = np.zeros(shape=(2, 2, 2, n_configs), dtype=np.int32) # set all counts to zero
# this is our result array, in which we store, how detection combinations we detected during the entire experiment
# 1st index: arriving in station1 at bottom (0) or at top (1)
# 2nd index: arriving in station2 at bottom (0) or at top (1)
# 3rd index: non-coincidences included (0) or not (1)
# 4th index: configuration of the polarizors/detectors
counts = np.zeros(shape=(2, 2, 2, n_configs), dtype=np.int32) # set all counts to zero
# we split a whole rotation (2PI) into nsteps and make experiments for each configuration
for delta_psi in range(n_configs): # loop over different settings of EOM1
# we split a whole rotation (2PI) into nsteps and make experiments for each configuration
for delta_psi in range(n_configs): # loop over different settings of EOM1
phi1 = delta_psi * 2 * np.pi / n_configs
phi1 = delta_psi * 2 * np.pi / n_configs
# in the end, we want probabilities so we iterate over a number of random samples,
# which give us - normalized with total amount of samples - the sought probabilities
for i in range(n_samples):
# we randomize the polarization of the source photons,
# however they're always entangle, so they will have a phase of pi/2 between them
theta1 = np.random.rand() * 2 * np.pi
theta2 = theta1 + np.pi / 2
# produces binary x values along with a timestamp for the current event
# for both observation stations
x1, t1 = observation_station(theta1, phi1)
x2, t2 = observation_station(theta2, phi2)
# increases the count of the associated (x1, x2) counter element
# (with and without time-window)
counts[x1, x2, 0, delta_psi] = counts[x1, x2, 0, delta_psi] + 1 # Malus law model
if(abs(t1 - t2)<W):
counts[x1, x2, 1, delta_psi] = counts[x1, x2, 1, delta_psi] + 1 # Malus law model + time window
# in the end, we want probabilities so we iterate over a number of random samples,
# which give us - normalized with total amount of samples - the sought probabilities
for i in range(n_samples):
# we randomize the polarization of the source photons,
# however they're always entangle, so they will have a phase of pi/2 between them
theta1 = np.random.rand() * 2 * np.pi
theta2 = theta1 + np.pi / 2
# produces binary x values along with a timestamp for the current event
# for both observation stations
x1, t1 = observation_station(theta1, phi1)
x2, t2 = observation_station(theta2, phi2)
# increases the count of the associated (x1, x2) counter element
# (with and without time-window)
result[x1, x2, 0, delta_psi] = result[x1, x2, 0, delta_psi] + 1 # Malus law model
if(abs(t1 - t2) < time_window):
result[x1, x2, 1, delta_psi] = result[x1, x2, 1, delta_psi] + 1 # Malus law model + time window
return result
# data analysis
# 1st index: no time window (0), use time coincidences (1)
E_tot = np.zeros(shape=(2, n_configs), dtype=np.int32)
E_12 = np.zeros(shape=(2, n_configs), dtype=np.float64)
E_1 = np.zeros(shape=(2, n_configs), dtype=np.float64)
E_2 = np.zeros(shape=(2, n_configs), dtype=np.float64)
# builds the expectation values for each station as well as the correlation between them;
# does this for all configurations and both with and without time-window
# uses formulas from lecture 5, page 25
for j in range(n_configs):
for i in [0, 1]:
# calculates numerators
E_tot[i, j] = counts[0, 0, i, j] + counts[1, 1, i, j] + counts[1, 0, i, j] + counts[0, 1, i, j]
E_12[i, j] = counts[0, 0, i, j] + counts[1, 1, i, j] - counts[1, 0, i, j] - counts[0, 1, i, j]
E_1[i, j] = counts[0, 0, i, j] + counts[0, 1, i, j] - counts[1, 1, i, j] - counts[1, 0, i, j]
E_2[i, j] = counts[0, 0, i, j] + counts[1, 0, i, j] - counts[1, 1, i, j] - counts[0, 1, i, j]
# normalizes (while preventing 0-division)
if(E_tot[i, j]>0):
E_12[i, j] = E_12[i, j]/E_tot[i, j]
E_1[i, j] = E_1[i, j]/E_tot[i, j]
E_2[i, j] = E_2[i, j]/E_tot[i, j]
def count_coincidences(counts):
E = {}
# 1st index: no time window (0), use time coincidences (1)
E['tot'] = np.zeros(shape=(2, n_configs), dtype=np.int32)
E[12] = np.zeros(shape=(2, n_configs), dtype=np.float64)
E[1] = np.zeros(shape=(2, n_configs), dtype=np.float64)
E[2] = np.zeros(shape=(2, n_configs), dtype=np.float64)
# builds the expectation values for each station as well as the correlation between them;
# does this for all configurations and both with and without time-window
# uses formulas from lecture 5, page 25
for j in range(n_configs):
for i in [0, 1]:
# calculates numerators
E['tot'][i, j] = counts[0, 0, i, j] + counts[1, 1, i, j] + counts[1, 0, i, j] + counts[0, 1, i, j]
E[12][i, j] = counts[0, 0, i, j] + counts[1, 1, i, j] - counts[1, 0, i, j] - counts[0, 1, i, j]
E[1][i, j] = counts[0, 0, i, j] + counts[0, 1, i, j] - counts[1, 1, i, j] - counts[1, 0, i, j]
E[2][i, j] = counts[0, 0, i, j] + counts[1, 0, i, j] - counts[1, 1, i, j] - counts[0, 1, i, j]
# normalizes (while preventing 0-division)
if (E['tot'][i, j] > 0):
E[12][i, j] = E[12][i, j] / E['tot'][i, j]
E[1][i, j] = E[1][i, j] / E['tot'][i, j]
E[2][i, j] = E[2][i, j] / E['tot'][i, j]
return E
# starts a stopwatch to measure simulation time
start = time.time()
W = 1 # (ns), time coincidence window
counts = simulation(time_window=W)
E = count_coincidences(counts)
# plots the graphs
# 1. as expected
......@@ -108,7 +119,7 @@ plt.figure()
plt.xlabel(r'$\Delta \varphi$')
plt.ylabel(r'$<x_1 x_2>$')
plt.plot(phi_theory, theory, '.', markersize=1, color='orange')
plt.plot(phi, E_12[plot_i, :], 'o', color='blue')
plt.plot(phi, E[12][plot_i, :], 'o', color='blue')
plt.savefig('correlation.pdf')
if showPlots:
plt.show()
......@@ -122,7 +133,7 @@ plt.ylabel(r'$<x_1><x_2>$')
plt.ylim([-1, 1])
plt.xticks([0, 45, 90, 135, 180, 225, 270, 305, 360])
plt.plot(phi_theory, line, '.', markersize=1, color='orange')
plt.plot(phi, E_1[plot_i, :]*E_2[plot_i, :], 'o', color='blue')
plt.plot(phi, E[1][plot_i, :]*E[2][plot_i, :], 'o', color='blue')
plt.savefig('expectation_value.pdf')
if showPlots:
plt.show()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment