Rev. | f957d0aae85b96c511b9429b51a0249b29e0667d |
---|---|
Größe | 81,996 Bytes |
Zeit | 2008-08-06 05:08:59 |
Autor | iselllo |
Log Message | I modified the code: now I simply give the number of directories as a
|
#! /usr/bin/env python
#from matplotlib import rc
import scipy as s
from scipy import stats #I need this module for the linear fit
import numpy as n
import pylab as p
#import rpy as r
#from rpy import r
#import distance_calc as d_calc
# now I try performing a least-square fitting
import scipy.optimize as sopt
#p.rc('text', usetex=True)
ini_conf=0
fin_conf=3000 #configurations I read initially in read_test.tcl
figure=0 #tells whether many pdf's should be produced or not.
save_every=50 # in case figure == 1, it plots the results every save_every configurations
save_tot=1 # it tells whether I should save the total cluster and radius of gyration distribution
# into two very large files
ini_dens=0.01 #initial cluster density
N_ini=5000. #initial number of clusters in the system i.e. total number of monomers
box_vol=N_ini/ini_dens
print "the box vol is, ", box_vol
correction_R_g=1 #whether I should correct the calculated radius of gyration by adding a contri
#bution as the radius of gyration of a single monomer
# if == 1 ---> the radius of gyration of a monomer is used to correct the results
# if == 2 ---> the geometric radius of a monomer is used to correct the results
#my_config=19 #here labels the saved configurations but it is not directly the number
# of the file with the saved configuration
by=2 #I need it to select the elements from time.dat
by_clusters=2 # this tells how often was the cluster number recorded
min_cluster=5. # minimum amount of particles a cluster has to contain in order to be considered
#for the time-dependent statistics.
min_repetition=200. #minimum number of times a cluster has to appear to be considered for the time-independent
# (i.e. time-averaged) statistics
#now I identify the threshold for the two regimes where I am going to calculate the
#fractal dimension
lower_threshold=15.
if (lower_threshold<min_cluster):
print "the lower threshold is smaller than the smallest clusters considered"
ini_conf=ini_conf/by
fin_conf=fin_conf/by
n_dir=0 #used to count the number of directories
time=p.load("../1/time.dat")
n_clus=s.zeros(len(time)/by_clusters)
#k_mean=s.zeros(len(time)/by) #mean_cluster_size
print "the length of n_clus is, ", len(n_clus)
pick_time_cluster=s.arange(0,len(time),by_clusters)
time_clu=time[pick_time_cluster]
#print "time (initially read) is, ", time
print "by is, ", by
#print "len(time) is, ", len(time)
pick_time=s.arange(ini_conf*by,fin_conf*by, by)
#print "pick_time is, ", pick_time
time=time[pick_time]
#print "len(time) at first is, ", len(time)
print "time at the beginning is, ", time
p.save("time_red.dat",time)
mean_free_path=s.zeros(len(time))
my_tau_coll=s.zeros(len(time))
mean_free_path_correct=s.zeros(len(time))
mean_velocity=s.zeros(len(time))
mean_clus_separation=s.zeros(len(time))
n_clus_small=s.zeros(len(time))
n_clus_large=s.zeros(len(time))
n_clus_test=s.zeros(len(time))
my_fractal_dim=s.zeros((fin_conf-ini_conf))
my_fractal_dim2=s.zeros((fin_conf-ini_conf))
my_fractal_dim3=s.zeros((fin_conf-ini_conf))
my_fra_low=s.zeros((fin_conf-ini_conf))
my_fra_high=s.zeros((fin_conf-ini_conf))
r_stat_low=s.zeros((fin_conf-ini_conf))
r_stat_high=s.zeros((fin_conf-ini_conf))
my_fractal_dim_large=s.zeros((fin_conf-ini_conf))
my_fractal_dim_small=s.zeros((fin_conf-ini_conf))
R_gyr_mean=s.zeros((fin_conf-ini_conf))
#prefactor=s.zeros((fin_conf-ini_conf))
#prefactor_small=s.zeros((fin_conf-ini_conf))
#prefactor_large=s.zeros((fin_conf-ini_conf))
select_sizes=s.zeros(3) #sizes of clusters whose d_f I want to explore
df_selected=s.zeros((len(select_sizes),(fin_conf-ini_conf))) #calculated selected df
prefactor_R=s.zeros((fin_conf-ini_conf))
prefactor_small_R=s.zeros((fin_conf-ini_conf))
prefactor_large_R=s.zeros((fin_conf-ini_conf))
delta_prefactor_R=s.zeros((fin_conf-ini_conf))
delta_prefactor_small_R=s.zeros((fin_conf-ini_conf))
delta_prefactor_large_R=s.zeros((fin_conf-ini_conf))
prefactor_R_raw=s.zeros((fin_conf-ini_conf))
prefactor_small_R_raw=s.zeros((fin_conf-ini_conf))
prefactor_large_R_raw=s.zeros((fin_conf-ini_conf))
delta_prefactor_R_raw=s.zeros((fin_conf-ini_conf))
delta_prefactor_small_R_raw=s.zeros((fin_conf-ini_conf))
delta_prefactor_large_R_raw=s.zeros((fin_conf-ini_conf))
r_stat=s.zeros((fin_conf-ini_conf))
std_err=s.zeros((fin_conf-ini_conf))
delta_df=s.zeros((fin_conf-ini_conf))
k_aver=s.zeros((fin_conf-ini_conf))
delta_df_small=s.zeros((fin_conf-ini_conf))
delta_df_large=s.zeros((fin_conf-ini_conf))
count_config=0
#print "n_clus is, ", n_clus
#cluster_long=s.zeros(1)
#r_gyr_long=s.zeros(1)
cluster_tot=s.zeros(1)
r_gyr_tot=s.zeros(1)
def fitting_stat(x,y):
slope, intercept, r, prob2, see = stats.linregress(x,y)
#print "len(x) is, ", len(x)
if (len(x)>2):
see=see*s.sqrt(len(x)/(len(x)-2.))
mx = x.mean()
sx2 = ((x-mx)**2).sum()
sd_intercept = see * s.sqrt(1./len(x) + mx*mx/sx2)
sd_slope = see * s.sqrt(1./sx2)
results=s.zeros(5)
results[0]=slope
results[1]=intercept
results[2]=r
if (len(x)>2):
results[3]=sd_slope
results[4]=sd_intercept
return results
n_dir=8.
dir_list=s.arange(n_dir)+1
for my_config in xrange(ini_conf,fin_conf):
r_gyr_dist=s.zeros(1) # something to start concatenating the arrays
n_comp=s.zeros(1) #same for the clusters
my_config=my_config*by
for m in xrange(len(dir_list)):
dir_name="../%01d"%(dir_list[m])
print "my_config is, ", my_config
#cluster_long=s.zeros(1)
#r_gyr_long=s.zeros(1)
temp_clu_name=dir_name+"/cluster_dist%05d"%my_config
temp_r_gyr_name=dir_name+"/R_gyr_dist%05d"%my_config
#cluster_name="cluster_dist%05d"%my_config
temp_clu=p.load(temp_clu_name)
temp_r_gyr=p.load(temp_r_gyr_name)
if (count_config==0):
temp_clus_name=dir_name+"/number_cluster.dat"
temp_clus=p.load(temp_clus_name)
# temp_clus=+p.load("../1/number_cluster.dat") #I load the number of clusters at each time (but I load it only once!)
n_clus=n_clus+temp_clus
print "n_clus[0] is, ", n_clus[0]
stack_clus=temp_clus
# n_dir=n_dir+1
#n_clus=n_clus+temp_nc
n_comp=s.concatenate([n_comp,temp_clu])
r_gyr_dist=s.concatenate([r_gyr_dist,temp_r_gyr])
stack_clus=s.vstack((stack_clus,temp_clus))
#cluster_long=s.concatenate([cluster_long,temp_clu])
#r_gyr_long=s.concatenate([r_gyr_long,temp_r_gyr])
cluster_tot=s.concatenate([cluster_tot,temp_clu])
r_gyr_tot=s.concatenate([r_gyr_tot,temp_r_gyr])
# #cluster_long=s.concatenate([cluster_long,temp_clu])
# #r_gyr_long=s.concatenate([r_gyr_long,temp_r_gyr])
# cluster_tot=s.concatenate([cluster_tot,temp_clu])
# r_gyr_tot=s.concatenate([r_gyr_tot,temp_r_gyr])
# #temp_nc=p.load("../10/number_cluster.dat") #I load the number of clusters at each time
# #n_clus=n_clus+temp_nc
# if (count_config==0):
# temp_clus=+p.load("../10/number_cluster.dat") #I load the number of clusters at each time
# n_clus=n_clus+temp_clus
n_comp_mon=s.copy(n_comp)
n_comp_mon=n_comp_mon[s.where(n_comp_mon>0.)] #I need this line since n_comp_mon comes from n_comp
#which in turn is defined first as [0] (array with an only entry equal to zero)
#and then the data I read are pushed-back into it.
#without this, I would artificially count an extra cluster...with zero monomers inside!!!
#So, n_comp_mon has all the distribution of{k} (where k is the number of monomers in a cluster),
#including monomers
if (count_config==0):
print "n_comp_mon is, ", n_comp_mon
p.save("n_comp_monomers_initial.dat",n_comp_mon)
my_choice=s.where(n_comp>=min_cluster)
n_comp=n_comp[my_choice] # select only clusters having more than min_cluster monomers
#so n_comp is now distinct from n_comp_mon. This automatically removes the initial zero I had as a
#consequence of the fact that initially n_comp=[0]
if (correction_R_g ==1):
r_sphere=s.sqrt(3./5.)*0.5 #radius of gyration of a single monomer
r_gyr_dist=s.sqrt(r_gyr_dist**2.+r_sphere**2.)
elif (correction_R_g == 2):
#r_sphere=s.sqrt(3./5.)*0.5 #radius of gyration of a single monomer
r_gyr_dist=s.sqrt(r_gyr_dist**2.+0.5**2.)
r_gyr_dist=r_gyr_dist[my_choice] #and the corresponding radia of gyration
# r_gyr_dist=p.load(cluster_name)
#r_gyr_dist=r_gyr_dist[my_choice]
#print "n_comp is,", n_comp
#print "r_gyr_dist is, ", r_gyr_dist
#print "n_comp is,", n_comp
#print 'my_choice is, ', my_choice
#cluster_name="R_gyr_dist%05d"%my_config
# Here I define my own error function i.e. the function I want to minimize
def residuals(p, y, x):
a,b,c = p
err = abs(y-(a+b*x**c))
return err
def residuals2(p, y, x):
b,c = p
err = abs(y-(b*x**c))
return err
#print "my_config is, ", my_config
def residuals_lin(p, y, x):
a,b = p
err = abs(y-(a+b*x))
return err
#NB!!!!!: n_time small is used to deal with of aggregates below a threshold and above a minimum
# (e.g. 5 monomers in a cluster) to mainly determine df
#Instead, n_clus_small is used to count the number of clusters below the same threshold (i.e. down to
# monomers)!!!! They are NOT the same quantity
#NB2: for the case of small clusters, in n_comp I am already listing only the clusters above min_cluster
# so there is only a condition of n_comp<=lower_threshold
n_time_small=n_comp[s.where(n_comp<=lower_threshold)]
n_time_large=n_comp[s.where(n_comp>lower_threshold)]
r_time_small=r_gyr_dist[s.where(n_comp<=lower_threshold)]
r_time_large=r_gyr_dist[s.where(n_comp>lower_threshold)]
#Here n_clus_small instead contains all the clusters (including monomers) below lower_threshold
n_clus_small[count_config]=len(n_comp_mon[s.where(n_comp_mon<=lower_threshold)])
n_clus_large[count_config]=len(n_comp_mon[s.where(n_comp_mon>lower_threshold)])
n_clus_test[count_config]=len(n_comp_mon)
R_gyr_mean[count_config]=r_gyr_dist.mean() #So, mean radius of gyration calculated only on the \
#clusters I intend to consider for my statistics (i.e. those having at least min_cluster monomers).
if (len(n_time_small)>=2.):
#lin_fit=stats.linregress(s.log10(n_time_small),s.log10(r_time_small))
lin_fit=fitting_stat(s.log10(n_time_small),s.log10(r_time_small))
my_fra_low[count_config]=1./lin_fit[0]
r_stat_low[count_config]=lin_fit[2]**2.
#print "lin_fit[4] is, ", lin_fit[4]
df=1./lin_fit[0]
my_fractal_dim_small[count_config]=df
#prefactor_small[count_config]=(0.5/10**lin_fit[1])**df
prefactor_small_R[count_config]=10.**lin_fit[1]
delta_prefactor_small_R[count_config]=prefactor_small_R[count_config]*\
s.log(10.)*lin_fit[4]
prefactor_small_R_raw[count_config]=lin_fit[1]
delta_prefactor_small_R_raw[count_config]=lin_fit[4]
delta_df_small[count_config]=df*df*lin_fit[3]
if ((figure ==1) & (s.remainder(count_config,save_every)==0)):
my_fit=lin_fit[1]+lin_fit[0]*s.log10(n_time_small)
fig = p.figure()
axes = fig.gca()
# #axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
# axes.plot(time,mean_free_path_correct, "bo",label="Diffusional Mean-free path")
# # axes.loglog(time_clu,n_clus_small, "k+",label="$N_<$")
# # axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# # axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# # label=r"Fit of $N_\infty $",linewidth=2.)
# p.xlabel('Time')
# p.ylabel('Particle mean free path')
# p.title("Evolution Mean-free path")
# p.grid(True)
# cluster_name="diffusion_mean_free_path.pdf"
# axes.legend()
# p.savefig(cluster_name)
# p.clf()
save_fit_small=s.copy(my_fit)
p.loglog(n_time_small,r_time_small,"ro",\
n_time_small,10.**my_fit,linewidth=2.)
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
p.title('Radius of gyration vs Cluster-size')
p.grid(True)
# cluster_name="N_vs_radius_gyration_unprocessed.pdf%05d"%my_config
cluster_name="R_g_vs_N_fit_small_clusters%05d"%my_config
p.savefig(cluster_name)
p.clf()
if (len(n_time_large)>=2.):
#lin_fit=stats.linregress(s.log10(n_time_large),s.log10(r_time_large))
lin_fit=fitting_stat(s.log10(n_time_large),s.log10(r_time_large))
my_fra_high[count_config]=1./lin_fit[0]
r_stat_high[count_config]=lin_fit[2]**2.
df=1./lin_fit[0]
my_fractal_dim_large[count_config]=df
#prefactor_large[count_config]=(0.5/10**lin_fit[1])**df
prefactor_large_R[count_config]=10.**lin_fit[1]
delta_prefactor_large_R[count_config]=prefactor_large_R[count_config]*\
s.log(10.)*lin_fit[4]
delta_df_large[count_config]=df*df*lin_fit[3]
prefactor_large_R_raw[count_config]=lin_fit[1]
delta_prefactor_large_R_raw[count_config]=lin_fit[4]
if ((figure ==1) & (s.remainder(count_config,save_every)==0)):
my_fit=lin_fit[1]+lin_fit[0]*s.log10(n_time_large)
save_fit_large=s.copy(my_fit)
fig = p.figure()
axes = fig.gca()
axes.loglog(n_time_large,r_time_large,"ro",\
n_time_large,10.**my_fit,linewidth=2.)
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
p.title('Radius of gyration vs Cluster-size')
p.grid(True)
# cluster_name="N_vs_radius_gyration_unprocessed.pdf%05d"%my_config
cluster_name="R_g_vs_N_fit_large_clusters%05d"%my_config
p.savefig(cluster_name)
p.clf()
if ((len(n_time_small)>=2.) & (len(n_time_large)>=2.)):
if ((figure ==1) & (s.remainder(count_config,save_every)==0)):
fig = p.figure()
axes = fig.gca()
axes.loglog(n_time_large,r_time_large,"ro",\
n_time_large,10.**save_fit_large,linewidth=2.)
axes.loglog(n_time_small,r_time_small,"ko",\
n_time_small,10.**save_fit_small,linewidth=2.)
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
p.title('Radius of gyration vs Cluster-size')
p.grid(True)
# cluster_name="N_vs_radius_gyration_unprocessed.pdf%05d"%my_config
cluster_name="R_g_vs_N_fit_large_and_small_clusters%05d"%my_config
p.savefig(cluster_name)
p.clf()
#lin_fit=stats.linregress(s.log10(n_comp),s.log10(r_gyr_dist))
lin_fit=fitting_stat(s.log10(n_comp),s.log10(r_gyr_dist))
my_fractal_dim[count_config]=1./lin_fit[0]
df=1./lin_fit[0]
#prefactor[count_config]=(0.5/10**lin_fit[1])**df
prefactor_R[count_config]=10.**lin_fit[1]
delta_prefactor_R[count_config]=prefactor_R[count_config]*\
s.log(10.)*lin_fit[4]
prefactor_R_raw[count_config]=lin_fit[1]
delta_prefactor_R_raw[count_config]=lin_fit[4]
my_prefactor=10.**lin_fit[1]
df_distr=s.log(n_comp)/s.log(r_gyr_dist/my_prefactor)
p.save("time_dependent_distr_fractal%05d"%count_config, df_distr)
if ((s.remainder(count_config,save_every)==0) & (len(df_distr)>2)):
fig = p.figure()
axes = fig.gca()
n_bins=50
#print "the length of df_distr is, ", len(df_distr)
axes.hist(df_distr,bins=n_bins, normed=0)
p.ylabel('frequency')
p.title('Distribution of fractal dimensions')
cluster_name="time_dependent_distribution_fractal_dimensions%05d.pdf"%count_config
p.savefig(cluster_name)
p.clf()
r_stat[count_config]=lin_fit[2]**2.
std_err[count_config]=lin_fit[3]
# delta_df[count_config]=my_fractal_dim[count_config]*\
# (1.-(1./(1+my_fractal_dim[count_config]*std_err[count_config])))
delta_df[count_config]=df*df*lin_fit[3]
my_fit=lin_fit[1]+lin_fit[0]*s.log10(n_comp)
if ((figure == 1) & (s.remainder(count_config,save_every)==0)):
fig = p.figure()
axes = fig.gca()
axes.loglog(n_comp,r_gyr_dist,"ro",n_comp,10.**my_fit,linewidth=2.)
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
p.title('Radius of gyration vs Cluster-size')
p.grid(True)
# cluster_name="N_vs_radius_gyration_unprocessed.pdf%05d"%my_config
cluster_name="R_g_vs_N_fit%05d"%my_config
p.savefig(cluster_name)
p.clf()
# if (count_config == 5):
# print "the distribution of the radia is, ", r_gyr_dist
# print "the numberd-distribution of the clusters is, ", n_comp
n_comp_ord=s.sort(n_comp)
r_gyr_ord=r_gyr_dist[s.argsort(n_comp)]
# if (count_config == 5):
# print "after ordering"
# print "the distribution of the radia is, ", r_gyr_ord
# print "the numberd-distribution of the clusters is, ", n_comp_ord
#Now I identify the non-repeated elements in n_comp
n_comp_uni=n.unique1d(n_comp_ord)
r_bound=n_comp_ord.searchsorted(n_comp_uni,side='right')
l_bound=n_comp_ord.searchsorted(n_comp_uni,side='left')
#print "r_bound and l_bound are, ", r_bound, l_bound
r_gyr_ari_mean=s.zeros(len(n_comp_uni))
r_gyr_ari_mean_log=s.zeros(len(n_comp_uni))
#Now I inroduce the vector N_k_arr I will be using to plot N_k vs k (see Mountain)
# i.e. the number of clusters with k monomomers vs k.
#In this case I need to work with the full size distribution, without getting rid of the
# monomers
n_comp_mon_ord=s.sort(n_comp_mon)
n_comp_mon_uni=n.unique1d(n_comp_mon_ord)
r_bound_mon=n_comp_mon_ord.searchsorted(n_comp_mon_uni,side='right')
l_bound_mon=n_comp_mon_ord.searchsorted(n_comp_mon_uni,side='left')
N_k_arr=r_bound_mon-l_bound_mon #since I am reading the results
#of several simulations!!!
#print "N_k_arr is, ", N_k_arr
#d=s.concatenate((a,b)).reshape(2,4).transpose()
N_k_vs_k=s.zeros((len(n_comp_mon_uni),2))
N_k_arr=N_k_arr/(n_dir*1.)
#print "n_dir is, ", n_dir
N_k_vs_k[:,0]=N_k_arr
N_k_vs_k[:,1]=n_comp_mon_uni*1. #n_comp_mon_uni is the k array
#Now I calculate the dimensionless distribution
#phi=n_clus #constant total volume
#print "my_config is, ", my_config
k_aver[count_config]=s.sum(n_comp_mon_uni*N_k_arr)/s.sum(N_k_arr)
#print "the sum of N_k is, ", s.sum(N_k_arr)
phi=s.sum(n_comp_mon_uni*N_k_arr) #this is a constant = n_part, but it is
#good to calculate it to see if anything is going wrong.
# print "phi is, ", phi
eta=n_comp_mon_uni*1./k_aver[count_config]
psi=N_k_arr*phi/(s.sum(N_k_arr)**2.)
delta_eta=1./k_aver[count_config]
norm_psi=s.sum(psi*delta_eta)
psi_vs_eta=s.zeros((len(n_comp_mon_uni),2))
psi_vs_eta[:,0]=psi
psi_vs_eta[:,1]=eta
# print "the normalization of psi is, ", norm_psi
#print "k_aver is, ", k_aver
#print "the total volume is, ", s.sum(k_aver*n_clus[my_config]/n_dir_count)
cluster_name="N_k_vs_k%05d"%my_config
p.save(cluster_name,N_k_vs_k )
cluster_name="psi_vs_eta%05d"%my_config
p.save(cluster_name,psi_vs_eta )
if ((figure == 1) & (s.remainder(count_config,save_every)==0)):
cluster_name="N_k_vs_k%05d"%my_config
fig = p.figure()
axes = fig.gca()
axes.loglog(n_comp_mon_uni,N_k_arr, "bo")
p.xlabel('k')
p.ylabel('N_k')
p.title('Number of clusters with k monomers')
p.grid(True)
#cluster_name="number_cluster_vs_time_loglog.pdf"
p.savefig(cluster_name)
p.clf()
cluster_name="psi_vs_eta%05d"%my_config
p.loglog(eta,psi, "bo")
p.xlabel('eta')
p.ylabel('psi')
p.title('SPD plot')
p.grid(True)
#cluster_name="number_cluster_vs_time_loglog.pdf"
p.savefig(cluster_name)
p.clf()
for i in xrange(0,len(n_comp_uni)):
r_gyr_ari_mean[i]=r_gyr_ord[l_bound[i]:r_bound[i]].mean()
r_gyr_ari_mean_log[i]=s.exp(s.sum(s.log(r_gyr_ord[l_bound[i]:r_bound[i]]))\
/len(r_gyr_ord[l_bound[i]:r_bound[i]]))
#if (my_config==600):
# p.save("R_mean_from_log",r_gyr_ari_mean_log)
# p.save("R_mean_ari",r_gyr_ari_mean)
#lin_fit2=stats.linregress(s.log10(n_comp_uni),s.log10(r_gyr_ari_mean))
lin_fit2=fitting_stat(s.log10(n_comp_uni),s.log10(r_gyr_ari_mean))
my_fractal_dim2[count_config]=1./lin_fit2[0]
my_fit2=lin_fit2[1]+lin_fit2[0]*s.log10(n_comp_uni)
if ((figure == 1) & (s.remainder(count_config,save_every)==0)):
fig = p.figure()
axes = fig.gca()
p.loglog(n_comp_uni,r_gyr_ari_mean,"ro",\
n_comp_uni,10.**my_fit2,linewidth=2.)
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
p.title('Radius of gyration vs Cluster-size')
p.grid(True)
# cluster_name="N_vs_radius_gyration_unprocessed.pdf%05d"%my_config
cluster_name="R_g_vs_N_fit-from-mean%05d"%my_config
p.savefig(cluster_name)
p.clf()
#lin_fit3=stats.linregress(s.log10(n_comp_uni),s.log10(r_gyr_ari_mean_log))
lin_fit3=fitting_stat(s.log10(n_comp_uni),s.log10(r_gyr_ari_mean_log))
my_fractal_dim3[count_config]=1./lin_fit3[0]
my_fit3=lin_fit3[1]+lin_fit3[0]*s.log10(n_comp_uni)
if ((figure ==1) & (s.remainder(count_config,save_every)==0)):
fig = p.figure()
axes = fig.gca()
axes.loglog(n_comp_uni,r_gyr_ari_mean_log,"ro",\
n_comp_uni,10.**my_fit3,linewidth=2.)
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
p.title('Radius of gyration vs Cluster-size')
p.grid(True)
# cluster_name="N_vs_radius_gyration_unprocessed.pdf%05d"%my_config
cluster_name="R_g_vs_N_fit-from-mean_log%05d"%my_config
p.savefig(cluster_name)
p.clf()
#print "the length of cluster_long is, ", len(cluster_long)
#print "the length of cluster_tot is, ", len(cluster_tot)
count_config=count_config+1
##################################################################
#now I try working with the cluster population
#print "now n_dir is, ", n_dir
n_clus=n_clus/(n_dir)
n_clus_small=n_clus_small/(n_dir)
n_clus_large=n_clus_large/(n_dir)
n_clus_test=n_clus_test/(n_dir)
mean_clus_separation=(n_clus/box_vol)**(-1./3)
print "n_dir is, ", n_dir
#print "the total volume is, ", k_aver*n_clus
p.save("number_clusters_averaged_vs_time.dat",n_clus)
p.save("number_small_clusters_averaged_vs_time.dat",n_clus_small)
p.save("number_large_clusters_averaged_vs_time.dat",n_clus_large)
p.save("number_clusters_averaged_vs_time_test.dat",n_clus_test)
p.save("mean_cluster_separation_vs_time.dat", mean_clus_separation)
p.save("mean_cluster_size_k_aver_vs_time.dat", k_aver)
p.save("set_of_cluster_numbers.dat", stack_clus)
#now I work out the fluctuations
delta_N=s.std(stack_clus,axis=0)
p.save("std_cluster_number.dat", delta_N)
mean_velocity=s.sqrt(8.*0.5/(s.pi*k_aver)) #cluster mean thermal speed
p.save("mean_thermal_velocity.dat",mean_velocity)
mean_free_path_correct=mean_velocity # since beta=1 for both clusters and monomers
p.save("mean_free_path_correct.dat",mean_free_path_correct)
print "time is, ", time, len(time)
print "mean_free_path_correct is, ", mean_free_path_correct, len(mean_free_path_correct)
fig = p.figure()
axes = fig.gca()
#axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
axes.plot(time,mean_free_path_correct, "bo",label="Diffusional Mean-free path")
# axes.loglog(time_clu,n_clus_small, "k+",label="$N_<$")
# axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# label=r"Fit of $N_\infty $",linewidth=2.)
p.xlabel('Time')
p.ylabel('Particle mean free path')
p.title("Evolution Mean-free path")
p.grid(True)
cluster_name="diffusion_mean_free_path.pdf"
axes.legend()
p.savefig(cluster_name)
p.clf()
fig = p.figure()
axes = fig.gca()
#axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
axes.loglog(time,mean_free_path_correct, "bo",label="Diffusional Mean-free path")
# axes.loglog(time_clu,n_clus_small, "k+",label="$N_<$")
# axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# label=r"Fit of $N_\infty $",linewidth=2.)
p.xlabel('Time')
p.ylabel('Particle mean free path')
p.title("Evolution Mean-free path")
p.grid(True)
cluster_name="diffusion_mean_free_path_loglog.pdf"
axes.legend()
p.savefig(cluster_name)
p.clf()
fig = p.figure()
axes = fig.gca()
#axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
axes.plot(time,mean_velocity, "bo",label="Mean thermal velocity")
# axes.loglog(time_clu,n_clus_small, "k+",label="$N_<$")
# axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# label=r"Fit of $N_\infty $",linewidth=2.)
p.xlabel('Time')
p.ylabel('Particle mean velocity')
p.title("Evolution thermal velocity")
p.grid(True)
cluster_name="evolution_mean_thermal_velocity.pdf"
axes.legend()
p.savefig(cluster_name)
p.clf()
fig = p.figure()
axes = fig.gca()
print "len(mean_clus_separation) is,", len(mean_clus_separation)
axes.plot(time_clu, mean_clus_separation)
#p.ylim(1.4,3.)
p.xlabel('Time')
p.ylabel('Cluster separation')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
#p.title('Evolution Fractal Dimension from R_g')
#p.grid(True)
#cluster_name="number_cluster_vs_time2%05d"%my_config
#cluster_name="std_err_D_f.pdf"
p.savefig("mean_cluster_separation_vs_time.pdf")
p.clf()
fig = p.figure()
axes = fig.gca()
axes.loglog(time_clu, mean_clus_separation)
#p.ylim(1.4,3.)
p.xlabel('Time')
p.ylabel('Cluster separation')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
#p.title('Evolution Fractal Dimension from R_g')
#p.grid(True)
#cluster_name="number_cluster_vs_time2%05d"%my_config
#cluster_name="std_err_D_f.pdf"
p.savefig("mean_cluster_separation_vs_time_loglog.pdf")
p.clf()
#I initialize the coefficients for the other fitting
p0 = [1.0, -1.0]
#print s.array(p0)
# plsq2 = sopt.leastsq(residuals2, p0, args=(n_clus, time))
# coeff=plsq2[0]
# my_t=s.linspace(time.min(),time.max(), 100)
# my_conc=coeff[0]*my_t**coeff[1]
#print "delta_df is, ", delta_df
print "ok here"
# fig = p.figure()
# axes = fig.gca()
# axes.errorbar(time,my_fractal_dim,yerr=delta_df )
# p.ylim(1.4,3.)
# p.xlabel('Time')
# p.ylabel('Fractal dimension')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# #p.title('Evolution Fractal Dimension from R_g')
# #p.grid(True)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# #cluster_name="std_err_D_f.pdf"
# p.savefig("time_evolution_D_f_with_error_bars.pdf")
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# axes.errorbar(time,my_fractal_dim,yerr=delta_df )
# p.ylim(1.4,3.)
# p.xlabel('Time')
# p.ylabel('Fractal dimension')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# #p.title('Evolution Fractal Dimension from R_g')
# #p.grid(True)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# #cluster_name="std_err_D_f.pdf"
# p.savefig("time_evolution_D_f_with_error_bars_large_cluster.pdf")
# p.clf()
print "ok here 2"
#Now I calculate the mean_free_path
print "len(n_clus_test) is, ", len(n_clus_test)
mean_free_path=1./(n_clus_test/box_vol*s.pi*R_gyr_mean**2.)
p.save("mean_free_path.dat",mean_free_path)
#Now I calculate what I think is a collision time (i.e. time in between 2 collisions)
my_tau_coll=1./(n_clus_test/box_vol*mean_velocity*s.pi*R_gyr_mean**2.)
print "R_gyr_mean is, ", R_gyr_mean
print "mean_velocity is, ", mean_velocity
print "n_clus_test is, ", n_clus_test
p.save("collision_time.dat", my_tau_coll)
# fig = p.figure()
# axes = fig.gca()
# #axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
# axes.plot(time,my_tau_coll, "bo",label="collision_time")
# # axes.loglog(time_clu,n_clus_small, "k+",label="$N_<$")
# # axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# # axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# # label=r"Fit of $N_\infty $",linewidth=2.)
# p.xlabel('Time')
# p.ylabel('collision time')
# p.title("Evolution collision tim")
# p.grid(True)
# cluster_name="collision_time.pdf"
# axes.legend()
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# #axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
# axes.loglog(time,my_tau_coll, "bo",label="collision_time")
# # axes.loglog(time_clu,n_clus_small, "k+",label="$N_<$")
# # axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# # axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# # label=r"Fit of $N_\infty $",linewidth=2.)
# p.xlabel('Time')
# p.ylabel('collision time')
# p.title("Evolution collision tim")
# p.grid(True)
# cluster_name="collision_time_log_log.pdf"
# axes.legend()
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# #axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
# axes.plot(time,mean_free_path, "bo",label="Mean-free path")
# # axes.loglog(time_clu,n_clus_small, "k+",label="$N_<$")
# # axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# # axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# # label=r"Fit of $N_\infty $",linewidth=2.)
# p.xlabel('Time')
# p.ylabel('Particle mean free path')
# p.title("Evolution Mean-free path")
# p.grid(True)
# cluster_name="mean_free_path.pdf"
# axes.legend()
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# #axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
# axes.loglog(time,mean_free_path, "bo",label="Mean-free path")
# # axes.loglog(time_clu,n_clus_small, "k+",label="$N_<$")
# # axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# # axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# # label=r"Fit of $N_\infty $",linewidth=2.)
# p.xlabel('Time')
# p.ylabel('Particle mean free path')
# p.title("Evolution Mean-free path")
# p.grid(True)
# cluster_name="mean_free_path_loglog.pdf"
# axes.legend()
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# #axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
# axes.plot(time,R_gyr_mean, "bo",label="Mean radius of gyration")
# # axes.loglog(time_clu,n_clus_small, "k+",label="$N_<$")
# # axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# # axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# # label=r"Fit of $N_\infty $",linewidth=2.)
# p.xlabel('Time')
# p.ylabel('Mean radius of gyration')
# p.title("Evolution Mean Radius of gyration")
# p.grid(True)
# cluster_name="mean_radius_gyration.pdf"
# axes.legend()
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# #axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
# axes.loglog(time,R_gyr_mean, "bo",label="Mean radius of gyration")
# # axes.loglog(time_clu,n_clus_small, "k+",label="$N_<$")
# # axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# # axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# # label=r"Fit of $N_\infty $",linewidth=2.)
# p.xlabel('Time')
# p.ylabel('Mean radius of gyration')
# p.title("Evolution Mean Radius of gyration")
# p.grid(True)
# cluster_name="mean_radius_gyration_loglog.pdf"
# axes.legend()
# p.savefig(cluster_name)
# p.clf()
#Now a fit of the behaviour of the mean radius of gyration vs time
print "OK before choice_time_R"
choice_time_R=s.where(time>=1000.)
#lin_fit7=stats.linregress(s.log10(time[choice_time_R]),s.log10(R_gyr_mean[choice_time_R]))
lin_fit7=fitting_stat(s.log10(time[choice_time_R]),s.log10(R_gyr_mean[choice_time_R]))
print "the exponent of the power-law of the mean radius of gyration, ", lin_fit7[0]
R_gyr_mean_fit=lin_fit7[1]+lin_fit7[0]*s.log10(time[choice_time_R])
p.save("mean_radius_gyration.dat",R_gyr_mean)
# fig = p.figure()
# axes = fig.gca()
# #axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
# axes.loglog(time,R_gyr_mean, "bo",label="Mean radius of gyration")
# axes.loglog(time[choice_time_R],10.**(R_gyr_mean_fit), "r", linewidth=2.,label="Mean radius of gyration (fit)")
# # axes.loglog(time_clu,n_clus_small, "k+",label="$N_<$")
# # axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# # axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# # label=r"Fit of $N_\infty $",linewidth=2.)
# p.xlabel('Time')
# p.ylabel('Mean radius of gyration')
# p.title("Evolution Mean Radius of gyration")
# p.grid(True)
# cluster_name="mean_radius_gyration_loglog_fitting.pdf"
# axes.legend()
# p.savefig(cluster_name)
# p.clf()
# p.loglog(time_clu,n_clus, "bo",time_clu,n_clus_small, "ko",time_clu,n_clus_large, "ro")
# p.xlabel('Time')
# p.ylabel('Number of clusters')
# p.title('Number of clusters vs time')
# p.grid(True)
# cluster_name="number_cluster_vs_time_loglog.pdf"
# p.savefig(cluster_name)
# p.clf()
choice_time_clu=s.where(time_clu>=2000.)
#lin_fit7=stats.linregress(s.log10(time_clu[choice_time_clu]),s.log10(n_clus[choice_time_clu]))
lin_fit7=fitting_stat(s.log10(time_clu[choice_time_clu]),s.log10(n_clus[choice_time_clu]))
print "the decay exponent for the number concentration is, ", lin_fit7[0]
my_conc=lin_fit7[1]+lin_fit7[0]*s.log10(time_clu[choice_time_clu])
#print '(10.**time_clu[choice_time_clu]),(10.**my_conc) are, ', time_clu[choice_time_clu],(10.**my_conc)
choice_time_intermediate=s.where((time_clu>=100.) & (time_clu<=300.) )
#lin_fit8=stats.linregress(s.log10(time_clu[choice_time_intermediate]),\
# s.log10(n_clus[choice_time_intermediate]))
if (len(choice_time_intermediate)>2):
lin_fit8=fitting_stat(s.log10(time_clu[choice_time_intermediate]),\
s.log10(n_clus[choice_time_intermediate]))
print "the decay exponent for the number concentration [intermediate times] is, ", lin_fit8[0]
my_conc_intermediate=lin_fit8[1]+lin_fit8[0]*s.log10(time_clu[choice_time_intermediate])
fig = p.figure()
axes = fig.gca()
#axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
axes.plot(time_clu[choice_time_intermediate],\
(n_clus_large[choice_time_intermediate]/n_clus_small[choice_time_intermediate]),\
"bo",label="$N_\infty$")
# axes.loglog(time_clu,n_clus_small, "k+",label="$N_<$")
# axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# label=r"Fit of $N_\infty $",linewidth=2.)
p.xlabel('Time')
p.ylabel('Ratio large to small')
p.title("Evolution ratio of large to small clusters")
p.grid(True)
cluster_name="ratio_large_to_small_clusters_intermediate.pdf"
axes.legend()
p.savefig(cluster_name)
p.clf()
fig = p.figure()
axes = fig.gca()
#axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
axes.loglog(time_clu,n_clus, "bo",label="$N_\infty$")
axes.loglog(time,n_clus_small, "k+",label="$N_<$")
axes.loglog(time,n_clus_large, "rx",label="$N_>$")
axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
label=r"Fit of $N_\infty $",linewidth=2.)
p.xlabel('Time')
p.ylabel('Number of clusters')
p.title("Evolution Number of clusters")
p.grid(True)
cluster_name="number_cluster_vs_time_loglog.pdf"
axes.legend()
p.savefig(cluster_name)
p.clf()
#ratio of large to small cluster
fig = p.figure()
axes = fig.gca()
#axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
axes.plot(time,(n_clus_large/n_clus_small), "bo",label="$N_\infty$")
# axes.loglog(time_clu,n_clus_small, "k+",label="$N_<$")
# axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# label=r"Fit of $N_\infty $",linewidth=2.)
p.xlabel('Time')
p.ylabel('Ratio large to small')
p.title("Evolution ratio of large to small clusters")
p.grid(True)
cluster_name="ratio_large_to_small_clusters.pdf"
axes.legend()
p.savefig(cluster_name)
p.clf()
p.save("ratio_large_to_small_clusters.dat", (n_clus_large/n_clus_small))
#Now I calculate the percentages of small and large clusters as time progresses
percentage_small=n_clus_small/n_clus_test*100.
percentage_large=n_clus_large/n_clus_test*100.
fig = p.figure()
axes = fig.gca()
#axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
axes.plot(time,percentage_small, "bo",label="percentage small clusters")
axes.plot(time,percentage_large, "rx",label="percentage large clusters")
# axes.loglog(time_clu,n_clus_small, "k+",label="$N_<$")
# axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# label=r"Fit of $N_\infty $",linewidth=2.)
p.xlabel('Time')
p.ylabel('Percentage')
p.title("Percentages small and large clusters")
p.grid(True)
cluster_name="percentages_large_and_small_clusters.pdf"
axes.legend()
p.savefig(cluster_name)
p.clf()
p.save("percentage_small_clusters.dat",percentage_small)
p.save("percentage_large_clusters.dat",percentage_large)
#ratio large to total
fig = p.figure()
axes = fig.gca()
#axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
axes.plot(time,(n_clus_large/n_clus_test), "bo")
#axes.plot(time_clu,N_time, "k+",label="monodisperse approx")
#axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
#axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# label=r"Fit of $N_\infty $",linewidth=2.)
p.xlabel('Time')
p.ylabel('Ratio large clusters to total cluster')
p.title("Evolution ratio large to total")
p.grid(True)
cluster_name="ratio_large_to_total_clusters.pdf"
#axes.legend()
p.savefig(cluster_name)
p.clf()
p.save("ratio_large_to_total_cluster.dat", (n_clus_large/n_clus_test) )
fig = p.figure()
axes = fig.gca()
axes.loglog(time_clu ,n_clus, "ro",time_clu[choice_time_clu],(10.**my_conc),linewidth=2.)
p.xlabel('Time')
p.ylabel('Number of clusters')
p.title('Number of clusters vs time')
p.grid(True)
cluster_name="number_cluster_vs_time_fit_loglog.pdf"
p.savefig(cluster_name)
p.clf()
fig = p.figure()
axes = fig.gca()
axes.plot(time_clu ,delta_N, "ro")
p.xlabel('Time')
p.ylabel('std of N clusters')
p.title('Fluctuation of cluster number vs time')
p.grid(True)
cluster_name="delta_number_cluster_vs_time.pdf"
p.savefig(cluster_name)
p.clf()
fig = p.figure()
axes = fig.gca()
axes.plot(time_clu ,delta_N/n_clus, "ro")
p.xlabel('Time')
p.ylabel('std of N clusters over N')
p.title('Relative Fluctuation of cluster number vs time')
p.grid(True)
cluster_name="delta_number_cluster_over_cluster_number_vs_time.pdf"
p.savefig(cluster_name)
p.clf()
n_clus2=n_clus[0]/n_clus-1.
p.save("coagulation_coeff_vs_time.dat",n_clus2)
my_choose=s.where(n_clus2>0.)
n_clus2=n_clus2[my_choose]
time2=time_clu[my_choose]
fig = p.figure()
axes = fig.gca()
axes.loglog(time2,n_clus2, "bo")
p.xlabel('Time')
p.ylabel('N(0)/N(t)-1')
p.title('Number of clusters vs time')
p.grid(True)
cluster_name="coefficient_coagulation.pdf"
p.savefig(cluster_name)
p.clf()
p.save("fitted_df_from_Rg_no_aver.dat",my_fractal_dim)
p.save("fitted_df_from_average_Rg_from_mean.dat",my_fractal_dim2)
p.save("fitted_df_from_average_Rg_from_mean_log.dat",my_fractal_dim3)
p.save("r_stat.dat",r_stat)
p.save("std_err_1_over_df.dat",std_err)
p.save("delta_df.dat",delta_df)
p.save("delta_df_small.dat",delta_df_small)
p.save("delta_df_large.dat",delta_df_large)
p.save("r_stat_small.dat",r_stat_low)
p.save("r_stat_large.dat",r_stat_high)
p.save("fitted_df_from_Rg_small_no_aver.dat",my_fractal_dim_small)
p.save("fitted_df_from_Rg_large_no_aver.dat",my_fractal_dim_large)
# p.save("prefactor.dat",prefactor)
# p.save("prefactor_small.dat",prefactor_small)
# p.save("prefactor_large.dat",prefactor_large)
p.save("prefactor_R.dat",prefactor_R)
p.save("prefactor_small_R.dat",prefactor_small_R)
p.save("prefactor_large_R.dat",prefactor_large_R)
p.save("prefactor_R_raw.dat",prefactor_R_raw)
p.save("prefactor_small_R_raw.dat",prefactor_small_R_raw)
p.save("prefactor_large_R_raw.dat",prefactor_large_R_raw)
p.save("delta_prefactor_R.dat",delta_prefactor_R)
p.save("delta_prefactor_small_R.dat",delta_prefactor_small_R)
p.save("delta_prefactor_large_R.dat",delta_prefactor_large_R)
p.save("delta_prefactor_R_raw.dat",delta_prefactor_R_raw)
p.save("delta_prefactor_small_R_raw.dat",delta_prefactor_small_R_raw)
p.save("delta_prefactor_large_R_raw.dat",delta_prefactor_large_R_raw)
# fig = p.figure()
# axes = fig.gca()
# axes.plot(time,prefactor_R,"k^",time,prefactor_small_R,"ro"\
# ,time,prefactor_large_R,"bx")
# p.xlabel('Dimensionless Time',fontsize=20)
# labels = p.getp(p.gca(), 'xticklabels')
# p.setp(labels, color='k', fontsize=15)
# p.ylabel('Fractal Dimension',fontsize=20)
# labels2 = p.getp(p.gca(), 'yticklabels')
# p.setp(labels2, color='k', fontsize=15)
# p.legend(('overall prefactor','prefactor small clusters', 'prefactor large clusters'))
# #p.title('Evolution Fractal Dimension')
# p.grid(False)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="Evolution_prefactors.pdf"
# p.savefig(cluster_name)
# p.clf()
#print 'delta_df is,', delta_df
#print 'my_fractal_dim is,', my_fractal_dim
#print "my_fractal_dim is, ", my_fractal_dim
#print "time[ini_conf:fin_conf], ", time[ini_conf:fin_conf]
# fig = p.figure()
# axes = fig.gca()
# axes.plot(time,my_fractal_dim,"k^")
# p.xlabel('Dimensionless Time',fontsize=20)
# labels = p.getp(p.gca(), 'xticklabels')
# p.setp(labels, color='k', fontsize=15)
# p.ylabel('Fractal Dimension',fontsize=20)
# labels2 = p.getp(p.gca(), 'yticklabels')
# p.setp(labels2, color='k', fontsize=15)
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# #p.title('Evolution Fractal Dimension')
# p.grid(False)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="Evolution_fractal_dimension-no-mean.pdf"
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# axes.plot(time,my_fra_low,"k^")
# p.xlabel('Dimensionless Time',fontsize=20)
# labels = p.getp(p.gca(), 'xticklabels')
# p.setp(labels, color='k', fontsize=15)
# p.ylabel('Fractal Dimension',fontsize=20)
# labels2 = p.getp(p.gca(), 'yticklabels')
# p.setp(labels2, color='k', fontsize=15)
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# p.title('Evolution Fractal Dimension for small clusters')
# p.grid(False)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="Evolution_fractal_dimension-no-mean-small-clusters.pdf"
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# axes.plot(time,my_fra_high,"k^")
# p.xlabel('Dimensionless Time',fontsize=20)
# labels = p.getp(p.gca(), 'xticklabels')
# p.setp(labels, color='k', fontsize=15)
# p.ylabel('Fractal Dimension',fontsize=20)
# labels2 = p.getp(p.gca(), 'yticklabels')
# p.setp(labels2, color='k', fontsize=15)
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# p.title('Evolution Fractal Dimension for large clusters')
# p.grid(False)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="Evolution_fractal_dimension-no-mean-large-clusters.pdf"
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# axes.plot(time,my_fractal_dim2, "bo",\
# time,my_fractal_dim2,linewidth=2.)
# p.xlabel('Time')
# p.ylabel('Fitted D_f')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# p.title('Evolution Fractal Dimension from R_g')
# p.grid(True)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="Evolution_fractal_dimension_from_mean.pdf"
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# axes.plot(time,my_fractal_dim3, "bo",\
# time,my_fractal_dim3,linewidth=2.)
# p.xlabel('Time')
# p.ylabel('Fitted D_f')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# p.title('Evolution Fractal Dimension from R_g')
# p.grid(True)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="Evolution_fractal_dimension_from_mean_log.pdf"
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# axes.plot(time,r_stat, "bo")
# p.xlabel('Time')
# p.ylabel('R of the fitted fractal dimension')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# p.title('R calculation')
# p.grid(True)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="r_statistics.pdf"
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# axes.plot(time,r_stat_low, "bo")
# p.xlabel('Time')
# p.ylabel('R of the fitted fractal dimension')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# p.title('R calculation for small clusters')
# p.grid(True)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="r_statistics_small_clusters.pdf"
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# axes.plot(time,r_stat_high, "bo")
# p.xlabel('Time')
# p.ylabel('R of the fitted fractal dimension')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# p.title('R calculation for large clusters')
# p.grid(True)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="r_statistics_large_clusters.pdf"
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# axes.plot(time,delta_df, "bo")
# p.xlabel('Time')
# p.ylabel('Uncertainty on the fractal dimension')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# p.title('Evolution Fractal Dimension from R_g')
# p.grid(True)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="delta_df.pdf"
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# choice_inf=s.where(s.isinf(std_err == 0))
# axes.plot(time[choice_inf],std_err[choice_inf], "bo")
# p.xlabel('Time')
# p.ylabel('Std error on 1/fractal dimension')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# p.title('Standard error on 1/fractal dimension')
# p.grid(True)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="std_err_1_over_D_f.pdf"
# p.savefig(cluster_name)
# p.clf()
#########################################################################
#########################################################################
#########################################################################
#########################################################################
#Now I take care of the time-dependent statistics
#I choose to fix cluster long by removing too small clusters
#choice_long=s.where(cluster_long>=min_cluster)
#cluster_long=cluster_long[choice_long]
#r_gyr_long=r_gyr_long[choice_long]
#The part above is unnecessary now
# print "the length of cluster_long is, ", len(cluster_long)
# print "the length of cluster_tot is, ", len(cluster_tot)
#I changed the following line in order to be consistent with the fact that I need to consider,
# at all times, only clusters having more than min_cluster monomers!
#choice_tot=s.where(cluster_tot>1.)
# p.save("c_tot_temp.dat", cluster_tot)
# p.save("r_gyr_tot_temp.dat", r_gyr_tot)
choice_tot=s.where(cluster_tot>0.)
cluster_tot=cluster_tot[choice_tot]
r_gyr_tot=r_gyr_tot[choice_tot]
#NOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# the following two lines are hopelessly WRONG!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# and they got me into trouble! By doing that, I am first modifying cluster_tot, so that the selection of
#the elements in r_gyr_tot is no longer correct!!!!!
# cluster_tot=cluster_tot[s.where(cluster_tot>0.)]
# r_gyr_tot=r_gyr_tot[s.where(cluster_tot>0.)]
print "cluster_tot is, ", cluster_tot
print "r_gyr_tot is, ", r_gyr_tot
#I have already applied the correction at every reading, so I do not need to re-apply it to the whole thing
if (correction_R_g == 1):
r_sphere=s.sqrt(3./5.)*0.5 #radius of gyration of a single monomer
print "the monomer radius of gyration is, ", r_sphere
r_gyr_tot=s.sqrt(r_gyr_tot**2.+r_sphere**2.)
elif (correction_R_g == 2):
#r_sphere=s.sqrt(3./5.)*0.5 #radius of gyration of a single monomer
r_gyr_tot=s.sqrt(r_gyr_tot**2.+0.5**2.)
choice_exclusion=s.where(cluster_tot<min_cluster)
cluster_excl=cluster_tot[choice_exclusion]
r_gyr_excl=r_gyr_tot[choice_exclusion]
p.save("r_gyr_exclusion.dat", r_gyr_excl)
choice_tot=s.where(cluster_tot>=min_cluster)
cluster_tot=cluster_tot[choice_tot]
r_gyr_tot=r_gyr_tot[choice_tot]
#lin_fit=stats.linregress(s.log10(cluster_long),s.log10(r_gyr_long))
lin_fit=fitting_stat(s.log10(cluster_tot),s.log10(r_gyr_tot))
print "the fractal dimension is, ", 1./lin_fit[0]
print "##########################################################################"
df=1./lin_fit[0]
err_df=df*df*lin_fit[3]
my_prefactor=10.**lin_fit[1]
my_delta_prefactor=my_prefactor*s.log(10.)*lin_fit[4]
#Now I build ha histogram of fractal dimensions for the collected data; a single prefactor is used.
df_distr=s.log(cluster_tot)/s.log(r_gyr_tot/my_prefactor)
p.save("distribution_fractal_dimensions_total_single_prefactor.dat",df_distr)
fig = p.figure()
axes = fig.gca()
n_bins=100
print "the length of df_distr is, ", len(df_distr)
axes.hist(df_distr,bins=n_bins, normed=0)
#p.xlabel('N')
p.ylabel('frequency')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
p.title('Distribution of fractal dimensions')
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="distribution_fractal_dimensions_single_prefactor_total.pdf"
p.savefig(cluster_name)
p.clf()
#Now I try identifying the threshold between small and large clusters
df_distr_threshold=s.where((df_distr>=1.5) & (df_distr<=1.55))
cluster_threshold=cluster_tot[df_distr_threshold]
p.save("cluster_threshold.dat",cluster_threshold)
fig = p.figure()
axes = fig.gca()
n_bins=100
#print "the length of df_distr is, ", len(df_distr)
axes.hist(cluster_threshold,bins=n_bins, normed=0)
p.xlabel('cluster size')
p.ylabel('frequency')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
p.title('Distribution of cluster sizes at the threshold')
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="distribution_cluster_sizes_at_threshold.pdf"
p.savefig(cluster_name)
p.clf()
print "the error on df is, ", err_df
print "the prefactor is, ", my_prefactor
print "the error on the prefactor is, ", my_delta_prefactor
# p.loglog(cluster_long,r_gyr_long, "ro")
# p.grid(True)
# p.savefig("all_together.png")
# p.clf()
fig = p.figure()
axes = fig.gca()
axes.loglog(cluster_tot,r_gyr_tot, "ro")
p.grid(True)
p.savefig("all_together.png")
p.clf()
# cluster_ord=s.sort(cluster_long)
# r_gyr_ord=r_gyr_long[s.argsort(cluster_long)]
cluster_ord=s.sort(cluster_tot)
r_gyr_ord=r_gyr_tot[s.argsort(cluster_tot)]
#I get rid of the clusters which are too small
choice_ord=s.where(cluster_ord>=min_cluster)
cluster_ord=cluster_ord[choice_ord]
r_gyr_ord=r_gyr_ord[choice_ord]
###########################
cluster_uni=n.unique1d(cluster_ord)
r_bound=cluster_ord.searchsorted(cluster_uni,side='right')
l_bound=cluster_ord.searchsorted(cluster_uni,side='left')
#print "the length of r_bound is, ", len(r_bound)
r_gyr_ari_mean=s.zeros(len(cluster_uni))
r_gyr_ari_mean_log=s.zeros(len(cluster_uni))
for i in xrange(0,len(cluster_uni)):
r_gyr_ari_mean[i]=r_gyr_ord[l_bound[i]:r_bound[i]].mean()
r_gyr_ari_mean_log[i]=s.exp(s.sum(s.log(r_gyr_ord[l_bound[i]:r_bound[i]]))\
/len(r_gyr_ord[l_bound[i]:r_bound[i]]))
#lin_fit2=stats.linregress(s.log10(cluster_uni),s.log10(r_gyr_ari_mean))
lin_fit2=fitting_stat(s.log10(cluster_uni),s.log10(r_gyr_ari_mean))
my_fit2=lin_fit2[1]+lin_fit2[0]*s.log10(cluster_uni)
print "the fractal dimension [arithmetic mean] is, ", 1./lin_fit2[0]
df=1./lin_fit2[0]
err_df=df*df*lin_fit2[3]
my_prefactor=10.**lin_fit2[1]
my_delta_prefactor=my_prefactor*s.log(10.)*lin_fit2[4]
print "the error on df is, ", err_df
print "the prefactor is, ", my_prefactor
print "the error on the prefactor is, ", my_delta_prefactor
#lin_fit3=stats.linregress(s.log10(cluster_uni),s.log10(r_gyr_ari_mean_log))
lin_fit3=fitting_stat(s.log10(cluster_uni),s.log10(r_gyr_ari_mean_log))
my_fit3=lin_fit3[1]+lin_fit3[0]*s.log10(cluster_uni)
print "the fractal dimension [log mean] is, ", 1./lin_fit3[0]
df=1./lin_fit3[0]
err_df=df*df*lin_fit3[3]
my_prefactor=10.**lin_fit3[1]
my_delta_prefactor=my_prefactor*s.log(10.)*lin_fit3[4]
print "the error on df is, ", err_df
print "the prefactor is, ", my_prefactor
print "the error on the prefactor is, ", my_delta_prefactor
fig = p.figure()
axes = fig.gca()
axes.plot(s.log10(cluster_uni),s.log10(r_gyr_ari_mean), "bo",\
s.log10(cluster_uni), my_fit2,linewidth=2.)
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
p.title('Evolution Fractal Dimension from R_g')
p.grid(True)
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="Fractal_dimension_from_mean_and_time.pdf"
p.savefig(cluster_name)
p.clf()
fig = p.figure()
axes = fig.gca()
axes.plot(s.log10(cluster_uni),s.log10(r_gyr_ari_mean_log), "bo",\
s.log10(cluster_uni), my_fit3,linewidth=2.)
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
p.title('Evolution Fractal Dimension from R_g')
p.grid(True)
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="Fractal_dimension_from_mean_log_and_time.pdf"
p.savefig(cluster_name)
p.clf()
count_number=s.zeros(len(r_bound))
for i in xrange(0,len(r_bound)):
count_number[i]=r_bound[i]-l_bound[i] #Now I counted how many clusters I have
#for each existing size
survival=s.where(count_number>min_repetition)
cluster_surv=cluster_uni[survival]
r_log_surv=r_gyr_ari_mean_log[survival]
r_mean_surv=r_gyr_ari_mean[survival]
#print "cluster_surv is, ", cluster_surv
#print "r_log_surv is, ", r_log_surv
#lin_fit4=stats.linregress(s.log10(cluster_surv),s.log10(r_log_surv))
lin_fit4=fitting_stat(s.log10(cluster_surv),s.log10(r_log_surv))
my_fit4=lin_fit4[1]+lin_fit4[0]*s.log10(cluster_surv)
print "the fractal dimension [selective log mean] is, ", 1./lin_fit4[0]
df=1./lin_fit4[0]
err_df=df*df*lin_fit4[3]
my_prefactor=10.**lin_fit4[1]
my_delta_prefactor=my_prefactor*s.log(10.)*lin_fit4[4]
print "the error on df is, ", err_df
print "the prefactor is, ", my_prefactor
print "the error on the prefactor is, ", my_delta_prefactor
fig = p.figure()
axes = fig.gca()
axes.plot(s.log10(cluster_surv),s.log10(r_log_surv), "bo",\
s.log10(cluster_surv), my_fit4,linewidth=2.)
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
p.title('Evolution Fractal Dimension from R_g')
p.grid(True)
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="Fractal_dimension_from_mean_log_and_time_selection.pdf"
p.savefig(cluster_name)
p.clf()
#Now another "survival" plot
#lin_fit5=stats.linregress(s.log10(cluster_surv),s.log10(r_mean_surv))
lin_fit5=fitting_stat(s.log10(cluster_surv),s.log10(r_mean_surv))
my_fit5=lin_fit5[1]+lin_fit5[0]*s.log10(cluster_surv)
print "the fractal dimension [selective arithmetic mean] is, ", 1./lin_fit5[0]
df=1./lin_fit5[0]
err_df=df*df*lin_fit5[3]
my_prefactor=10.**lin_fit5[1]
my_delta_prefactor=my_prefactor*s.log(10.)*lin_fit5[4]
print "the error on df is, ", err_df
print "the prefactor is, ", my_prefactor
print "the error on the prefactor is, ", my_delta_prefactor
fig = p.figure()
axes = fig.gca()
axes.plot(s.log10(cluster_surv),s.log10(r_mean_surv), "bo",\
s.log10(cluster_surv), my_fit5,linewidth=2.)
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
p.title('Evolution Fractal Dimension from R_g')
p.grid(True)
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="Fractal_dimension_from_arithmetic_mean_and_time_selection.pdf"
p.savefig(cluster_name)
p.clf()
#Now I try a new separate fitting of small and large clusters in this survival plot
choice_surv_small=s.where(cluster_surv<=lower_threshold)
choice_surv_large=s.where(cluster_surv>lower_threshold)
cluster_surv_small=cluster_surv[choice_surv_small]
r_mean_surv_small=r_mean_surv[choice_surv_small]
cluster_surv_large=cluster_surv[choice_surv_large]
r_mean_surv_large=r_mean_surv[choice_surv_large]
#lin_fit5_small=stats.linregress(s.log10(cluster_surv_small),s.log10(r_mean_surv_small))
lin_fit5_small=fitting_stat(s.log10(cluster_surv_small),s.log10(r_mean_surv_small))
lin_fit5_large=fitting_stat(s.log10(cluster_surv_large),s.log10(r_mean_surv_large))
my_fit5_small=lin_fit5_small[1]+lin_fit5_small[0]*s.log10(cluster_surv_small)
my_fit5_large=lin_fit5_large[1]+lin_fit5_large[0]*s.log10(cluster_surv_large)
extension=s.linspace(1.,max(cluster_surv), 300)
# my_fit5_small_extended=lin_fit5_small[1]+lin_fit5_small[0]*s.log10(cluster_surv)
# my_fit5_large_extended=lin_fit5_large[1]+lin_fit5_large[0]*s.log10(cluster_surv)
my_fit5_small_extended=lin_fit5_small[1]+lin_fit5_small[0]*s.log10(extension)
my_fit5_large_extended=lin_fit5_large[1]+lin_fit5_large[0]*s.log10(extension)
print "the fractal dimension [selective arithmetic mean] for the small clusters is, ", 1./lin_fit5_small[0]
df=1./lin_fit5_small[0]
err_df=df*df*lin_fit5_small[3]
my_prefactor=10.**lin_fit5_small[1]
my_delta_prefactor=my_prefactor*s.log(10.)*lin_fit5_small[4]
print "the error on df is, ", err_df
print "the prefactor is, ", my_prefactor
print "the error on the prefactor is, ", my_delta_prefactor
print "the fractal dimension [selective arithmetic mean] for the large clusters is , ", 1./lin_fit5_large[0]
df=1./lin_fit5_large[0]
err_df=df*df*lin_fit5_large[3]
my_prefactor=10.**lin_fit5_large[1]
my_delta_prefactor=my_prefactor*s.log(10.)*lin_fit5_large[4]
print "the error on df is, ", err_df
print "the prefactor is, ", my_prefactor
print "the error on the prefactor is, ", my_delta_prefactor
fig = p.figure()
axes = fig.gca()
axes.loglog(cluster_surv_small,r_mean_surv_small, "bo",label='Small clusters')
axes.loglog(cluster_surv_small, (10.**my_fit5_small),"k",label="Fit for small clusters",linewidth=2.)
axes.loglog(cluster_surv_large,r_mean_surv_large, "r+",label="Large clusters")
axes.loglog(cluster_surv_large, (10.**my_fit5_large),"--k",label="Fit for large clusters",linewidth=2.)
axes.legend(loc="upper left")
p.ylim(0.4, 8.)
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
#p.title('Evolution Fractal Dimension from R_g')
p.grid(True)
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="Fractal_dimension_from_arithmetic_mean_and_time_selection_double_fitting.pdf"
p.savefig(cluster_name)
p.clf()
p.save("cluster_surv_small.dat",cluster_surv_small)
p.save("cluster_surv_large.dat",cluster_surv_large)
p.save("cluster_surv.dat",cluster_surv)
p.save("cluster_extended.dat",extension)
p.save("r_mean_surv_small.dat", r_mean_surv_small)
p.save("r_mean_surv_small_fitting.dat", (10.**my_fit5_small))
p.save("r_mean_surv_small_fitting_extended.dat", (10.**my_fit5_small_extended))
p.save("r_mean_surv_large.dat", r_mean_surv_large)
p.save("r_mean_surv_large_fitting.dat", (10.**my_fit5_large))
p.save("r_mean_surv_large_fitting_extended.dat", (10.**my_fit5_large_extended))
#Now I create an error-bar plot using the previous fractal dimensions as boundaries
df_small_vec=s.zeros(len(my_fractal_dim))
df_large_vec=s.zeros(len(my_fractal_dim))
df_time_aver_vec=s.zeros(len(my_fractal_dim))
df_small_vec[:]=1./lin_fit5_small[0]
df_large_vec[:]=1./lin_fit5_large[0]
df_time_aver_vec[:]=1./lin_fit2[0]
# fig = p.figure()
# axes = fig.gca()
# axes.errorbar(time,my_fractal_dim,yerr=delta_df, linewidth=2. )
# axes.errorbar(time,my_fractal_dim_small,yerr=delta_df_small,\
# label="Fractal dimension for small clusters",linewidth=2.)
# axes.errorbar(time,my_fractal_dim_large,yerr=delta_df_large,\
# label="Fractal dimension for small clusters",linewidth=2.)
# #p.ylim(1.4,2.6)
# #axes.legend()
# p.xlabel('Time')
# p.ylabel('Fractal dimension')
# p.savefig("time_evolution_D_f_with_error_bars_all.pdf")
# p.clf()
fig = p.figure()
axes = fig.gca()
axes.plot(s.log10(cluster_surv),s.log10(r_mean_surv), "bo",\
s.log10(cluster_surv), my_fit5,linewidth=2.)
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
p.title('Evolution Fractal Dimension from R_g')
p.grid(True)
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="Fractal_dimension_from_arithmetic_mean_and_time_selection.pdf"
p.savefig(cluster_name)
p.clf()
#Now I try separate fittings of cluster_long trying to identify different regimes
#both using all the data and taking the arithmetic mean
#r_gyr_ari_mean=s.zeros(len(n_comp_uni))
# choice_small=s.where(cluster_long<=lower_threshold)
# cluster_small=cluster_long[choice_small]
# r_gyr_small=r_gyr_long[choice_small]
choice_small=s.where((cluster_tot<=lower_threshold) & (cluster_tot>=min_cluster))
cluster_small=cluster_tot[choice_small]
r_gyr_small=r_gyr_tot[choice_small]
choice_mean_small=s.where((n_comp_uni<=lower_threshold) & (n_comp_uni>=min_cluster))
n_comp_small=n_comp_uni[choice_mean_small]
r_gyr_ari_small=r_gyr_ari_mean[choice_mean_small]
# choice_large=s.where(cluster_long>lower_threshold)
# cluster_large=cluster_long[choice_large]
# r_gyr_large=r_gyr_long[choice_large]
choice_large=s.where(cluster_tot>lower_threshold)
cluster_large=cluster_tot[choice_large]
r_gyr_large=r_gyr_tot[choice_large]
choice_mean_large=s.where(n_comp_uni>lower_threshold)
n_comp_large=n_comp_uni[choice_mean_large]
r_gyr_ari_large=r_gyr_ari_mean[choice_mean_large]
#Now I fit them separately
#lin_fit_small=stats.linregress(s.log10(cluster_small),s.log10(r_gyr_small))
lin_fit_small=fitting_stat(s.log10(cluster_small),s.log10(r_gyr_small))
#lin_fit_large=stats.linregress(s.log10(cluster_large),s.log10(r_gyr_large))
lin_fit_large=fitting_stat(s.log10(cluster_large),s.log10(r_gyr_large))
#lin_fit_ari_small=stats.linregress(s.log10(n_comp_small),s.log10(r_gyr_ari_small))
lin_fit_ari_small=fitting_stat(s.log10(n_comp_small),s.log10(r_gyr_ari_small))
#lin_fit_ari_large=stats.linregress(s.log10(n_comp_large),s.log10(r_gyr_ari_large))
lin_fit_ari_large=fitting_stat(s.log10(n_comp_large),s.log10(r_gyr_ari_large))
my_fit_small=lin_fit_small[0]*s.log10(cluster_small)+lin_fit_small[1]
my_fit_large=lin_fit_large[0]*s.log10(cluster_large)+lin_fit_large[1]
my_fit_ari_small=lin_fit_ari_small[0]*s.log10(n_comp_small)+lin_fit_ari_small[1]
my_fit_ari_large=lin_fit_ari_large[0]*s.log10(n_comp_large)+lin_fit_ari_large[1]
#print "lin_fit_small[0] and lin_fit_small[1] is, ", lin_fit_small[0], lin_fit_small[1]
#print "lin_fit_large[0] and lin_fit_large[1] is, ", lin_fit_large[0], lin_fit_large[1]
df_small=1./lin_fit_small[0]
df_large=1./lin_fit_large[0]
print "the fractal dimension for the small clusters is, ", df_small
df=1./lin_fit_small[0]
err_df=df*df*lin_fit_small[3]
my_prefactor=10.**lin_fit_small[1]
my_delta_prefactor=my_prefactor*s.log(10.)*lin_fit_small[4]
#Now I build ha histogram of fractal dimensions for the collected data [small cluster].
df_distr=s.log(cluster_small)/s.log(r_gyr_small/my_prefactor)
p.save("distribution_fractal_dimensions_small_clusters.dat",df_distr)
df_distr_small=df_distr
fig = p.figure()
axes = fig.gca()
n_bins=100
#print "the length of df_distr is, ", len(df_distr)
axes.hist(df_distr,bins=n_bins, normed=0)
#p.xlabel('N')
p.ylabel('frequency')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
p.title('Distribution of fractal dimensions')
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="distribution_fractal_dimensions_small_clusters.pdf"
p.savefig(cluster_name)
p.clf()
print "the error on df is, ", err_df
print "the prefactor is, ", my_prefactor
print "the error on the prefactor is, ", my_delta_prefactor
print "the fractal dimension for the large clusters is, ", df_large
df=1./lin_fit_large[0]
err_df=df*df*lin_fit_large[3]
my_prefactor=10.**lin_fit_large[1]
my_delta_prefactor=my_prefactor*s.log(10.)*lin_fit_large[4]
#Now I build ha histogram of fractal dimensions for the collected data [small cluster].
df_distr=s.log(cluster_large)/s.log(r_gyr_large/my_prefactor)
p.save("distribution_fractal_dimensions_small_clusters.dat",df_distr)
if (len(df_distr>=4)):
fig = p.figure()
axes = fig.gca()
n_bins=100
#print "the length of df_distr is, ", len(df_distr)
axes.hist(df_distr,bins=n_bins, normed=0)
#p.xlabel('N')
p.ylabel('frequency')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
p.title('Distribution of fractal dimensions')
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="distribution_fractal_dimensions_large_clusters.pdf"
p.savefig(cluster_name)
p.clf()
print "the error on df is, ", err_df
print "the prefactor is, ", my_prefactor
print "the error on the prefactor is, ", my_delta_prefactor
#Now I want to investigate a bit the fractal dimension of large clusters.
#by selecting 3 sizes: 50, 80 and 110
n_bins=20
size_pick=40.
size_selection=s.where(cluster_large==size_pick)
cluster_large_size_select=cluster_large[size_selection]
r_gyr_large_size_selection=r_gyr_large[size_selection]
df_distr=s.log(cluster_large_size_select)/s.log(r_gyr_large_size_selection/my_prefactor)
p.save("distribution_fractal_dimensions.dat",df_distr)
df_distr_large=df_distr
# fig = p.figure()
# axes = fig.gca()
# print "df_distr is, ", df_distr
# axes.hist(df_distr,bins=n_bins, normed=0)
# #p.xlabel('N')
# p.ylabel('frequency')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# p.title('Distribution of fractal dimensions')
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="distribution_fractal_dimensions.pdf"
# p.savefig(cluster_name)
# p.clf()
size_pick=50.
size_selection=s.where(cluster_large==size_pick)
cluster_large_size_select=cluster_large[size_selection]
r_gyr_large_size_selection=r_gyr_large[size_selection]
df_distr=s.log(cluster_large_size_select)/s.log(r_gyr_large_size_selection/my_prefactor)
p.save("distribution_fractal_dimensions_2.dat",df_distr)
# fig = p.figure()
# axes = fig.gca()
# axes.hist(df_distr,bins=n_bins, normed=0)
# #p.xlabel('N')
# p.ylabel('frequency')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# p.title('Distribution of fractal dimensions')
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="distribution_fractal_dimensions_2.pdf"
# p.savefig(cluster_name)
# p.clf()
size_pick=70.
size_selection=s.where(cluster_large==size_pick)
cluster_large_size_select=cluster_large[size_selection]
r_gyr_large_size_selection=r_gyr_large[size_selection]
df_distr=s.log(cluster_large_size_select)/s.log(r_gyr_large_size_selection/my_prefactor)
p.save("distribution_fractal_dimensions3.dat",df_distr)
# fig = p.figure()
# axes = fig.gca()
# axes.hist(df_distr,bins=n_bins, normed=0)
# #p.xlabel('N')
# p.ylabel('frequency')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# p.title('Distribution of fractal dimensions')
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="distribution_fractal_dimensions3.pdf"
# p.savefig(cluster_name)
# p.clf()
#print "the distribution of fractal dimensions is, ", df_distr
print "R squared for df of the small clusters is, ", lin_fit_small[2]
print "R squared for df of the large clusters is, ", lin_fit_large[2]
print "Now I use the arithmetic mean without any selection [NOT recommended]"
print "the fractal dimension for the small clusters [arithmetic mean] is, ", \
1./lin_fit_ari_small[0]
df=1./lin_fit_ari_small[0]
err_df=df*df*lin_fit_ari_small[3]
my_prefactor=10.**lin_fit_ari_small[1]
my_delta_prefactor=my_prefactor*s.log(10.)*lin_fit_ari_small[4]
print "the error on df is, ", err_df
print "the prefactor is, ", my_prefactor
print "the error on the prefactor is, ", my_delta_prefactor
print "the fractal dimension for the large clusters [arithmetic mean] is, ", \
1./lin_fit_ari_large[0]
df=1./lin_fit_ari_large[0]
err_df=df*df*lin_fit_ari_large[3]
my_prefactor=10.**lin_fit_ari_large[1]
my_delta_prefactor=my_prefactor*s.log(10.)*lin_fit_ari_large[4]
print "the error on df is, ", err_df
print "the prefactor is, ", my_prefactor
print "the error on the prefactor is, ", my_delta_prefactor
#p.loglog(cluster_long,r_gyr_long, "ro")
#p.grid(True)
#p.savefig("all_together.png")
#p.clf()
fig = p.figure()
axes = fig.gca()
axes.plot(s.log10(cluster_large),s.log10(r_gyr_large), "bo",\
s.log10(cluster_large),my_fit_large,\
s.log10(cluster_small),s.log10(r_gyr_small), "ro",\
s.log10(cluster_small),my_fit_small)
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
p.title('Evolution Fractal Dimension from R_g')
p.grid(True)
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="All_together_double_fitting.png"
p.savefig(cluster_name)
p.clf()
# p.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo",\
# s.log10(n_comp_large),my_fit_ari_large,\
# s.log10(n_comp_small),s.log10(r_gyr_ari_small), "ro",\
# s.log10(n_comp_small),my_fit_ari_small)
# p.xlabel('N')
# p.ylabel('R(N)')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# p.title('Evolution Fractal Dimension from R_g')
# p.grid(True)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# cluster_name="All_together_double_fitting_arithmetic_mean.png"
# p.savefig(cluster_name)
# p.clf()
fig = p.figure()
axes = fig.gca()
axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
axes.plot(s.log10(n_comp_large),my_fit_ari_large,'--r',linewidth=2.)
axes.plot(s.log10(n_comp_small),s.log10(r_gyr_ari_small), "r+")
axes.plot(s.log10(n_comp_small),my_fit_ari_small,linewidth=2.,color="k")
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
p.title('Evolution Fractal Dimension from R_g')
p.grid(True)
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="All_together_double_fitting_arithmetic_mean.png"
p.savefig(cluster_name)
p.clf()
fig = p.figure()
axes = fig.gca()
axes.plot(s.log10(cluster_small),s.log10(r_gyr_small), "ro",\
s.log10(cluster_small),my_fit_small,linewidth=2.)
p.xlabel('N')
p.ylabel('R(N)')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
p.title('Evolution Fractal Dimension from R_g')
p.grid(True)
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="All_together_small_fitting.png"
p.savefig(cluster_name)
p.clf()
#I now re-cycle df_small_vec
df_small_vec[:]=df_small
df_large_vec[:]=df_large
fig = p.figure()
axes = fig.gca()
axes.plot(time,my_fractal_dim,"k^",\
time,df_small_vec,\
time,df_large_vec,linewidth=2.)
p.xlabel('Dimensionless Time',fontsize=20)
labels = p.getp(p.gca(), 'xticklabels')
p.setp(labels, color='k', fontsize=15)
p.ylabel('Fractal Dimension',fontsize=20)
labels2 = p.getp(p.gca(), 'yticklabels')
p.setp(labels2, color='k', fontsize=15)
#p.legend(('from power-law fitting','from linear fitting on log-log'))
#p.title('Evolution Fractal Dimension')
p.grid(False)
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="Evolution_fractal_dimension-no-mean_and_boundaries.png"
p.savefig(cluster_name)
#
p.clf()
x = s.arange(0.0, 2, 0.1)
y1 = s.sin(s.pi*x)
y2 = s.cos(s.pi*x)
### Plot it ###
fig = p.figure()
axes = fig.gca()
axes.plot(x, y1, '--b', label='LW=1', linewidth=1)
axes.plot(x, y1+0.5, '--r', label='LW=2', linewidth=2)
axes.plot(x, y1+1.0, '--k', label='LW=3', linewidth=3)
axes.plot(x, y2, 'xr', label='MS=3', markersize=3)
axes.plot(x, y2+0.5, 'xk', label='MS=5', markersize=5)
axes.plot(x, y2+1.0, 'xb', label='MS=7', markersize=7)
axes.legend()
#app.MainLoop()
p.savefig("my_figure.png")
#Now I calculate the evolution of the number concentration using a 1D approximation for the kernel
tau_coag=300.*(1./0.106)
n_part=5000.
N_time=n_part/(1+time_clu/78.61)
p.save("number_cluster_vs_time_monodisperse_approx.dat",N_time)
fig = p.figure()
axes = fig.gca()
#axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
axes.plot(time_clu,n_clus, "bo",label="$N_\infty$")
axes.plot(time_clu,N_time, "k+",label="monodisperse approx")
#axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
#axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# label=r"Fit of $N_\infty $",linewidth=2.)
p.xlabel('Time')
p.ylabel('Number of clusters')
p.title("Evolution Number of clusters")
p.grid(True)
cluster_name="number_cluster_vs_time_monodisperse_approx.png"
axes.legend()
p.savefig(cluster_name)
p.clf()
fig = p.figure()
axes = fig.gca()
#axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
axes.loglog(time_clu,n_clus, "bo",label="$N_\infty$")
axes.loglog(time_clu,N_time, "k+",label="monodisperse approx")
#axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
#axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# label=r"Fit of $N_\infty $",linewidth=2.)
p.xlabel('Time')
p.ylabel('Number of clusters')
p.title("Evolution Number of clusters")
p.grid(True)
cluster_name="number_cluster_vs_time_monodisperse_approx_log-log.png"
axes.legend()
p.savefig(cluster_name)
p.clf()
# prefactor_small_R_raw[count_config]=lin_fit[1]
# delta_prefactor_small_R_raw[count_config]=lin_fit[4]
# fig = p.figure()
# axes = fig.gca()
# #axes.plot(s.log10(n_comp_large),s.log10(r_gyr_ari_large), "bo")
# axes.loglog(time_clu,n_clus, "bo",label="$N_\infty$")
# axes.loglog(time_clu,N_time, "k+",label="monodisperse approx")
# #axes.loglog(time_clu,n_clus_large, "rx",label="$N_>$")
# #axes.loglog(time_clu[choice_time_clu],(10.**my_conc),"k",\
# # label=r"Fit of $N_\infty $",linewidth=2.)
# p.xlabel('Time')
# p.ylabel('Number of clusters')
# p.title("Evolution Number of clusters")
# p.grid(True)
# cluster_name="number_cluster_vs_time_monodisperse_approx_log-log.png"
# axes.legend()
# p.savefig(cluster_name)
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# axes.errorbar(time,prefactor_small_R_raw,yerr=delta_prefactor_small_R_raw)
# axes.errorbar(time,prefactor_large_R_raw,yerr=delta_prefactor_large_R_raw)
# axes.errorbar(time,prefactor_R_raw,yerr=delta_prefactor_R_raw)
# # axes.errorbar(time[ini_conf:fin_conf],prefactor_small_R_raw,yerr=delta_prefactor_small_R_raw,"ko",\
# # time[ini_conf:fin_conf],prefactor_large_R_raw,yerr=delta_prefactor_large_R_raw, "r+",\
# # time[ini_conf:fin_conf],prefactor_R_raw,yerr=delta_prefactor_R_raw, "bx")
# #p.ylim(1.4,3.)
# p.xlabel('Time')
# p.ylabel('Raw prefactor and error bars')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# #p.title('Evolution Fractal Dimension from R_g')
# #p.grid(True)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# #cluster_name="std_err_D_f.pdf"
# p.savefig("time_evolution_raw_prefactor_with_error_bars.pdf")
# p.clf()
# fig = p.figure()
# axes = fig.gca()
# axes.errorbar(time,prefactor_small_R,yerr=delta_prefactor_small_R)
# axes.errorbar(time,prefactor_large_R,yerr=delta_prefactor_large_R)
# axes.errorbar(time,prefactor_R,yerr=delta_prefactor_R)
# # axes.errorbar(time[ini_conf:fin_conf],prefactor_small_R_raw,yerr=delta_prefactor_small_R_raw,"ko",\
# # time[ini_conf:fin_conf],prefactor_large_R_raw,yerr=delta_prefactor_large_R_raw, "r+",\
# # time[ini_conf:fin_conf],prefactor_R_raw,yerr=delta_prefactor_R_raw, "bx")
# #p.ylim(1.4,3.)
# p.xlabel('Time')
# p.ylabel('Prefactor and error bars')
# #p.legend(('from power-law fitting','from linear fitting on log-log'))
# #p.title('Evolution Fractal Dimension from R_g')
# #p.grid(True)
# #cluster_name="number_cluster_vs_time2%05d"%my_config
# #cluster_name="std_err_D_f.pdf"
# p.savefig("time_evolution_prefactor_with_error_bars.pdf")
# p.clf()
df_distr_large_and_small=s.hstack((df_distr_large,df_distr_small))
p.save("distribution_fractal_dimensions_total_small_and_large.dat",df_distr_large_and_small)
cluster_large_and_small=s.hstack((cluster_large,cluster_small))
p.save("clusters_small_and_large.dat",cluster_large_and_small)
df_select_large_small=s.where(df_distr_large_and_small<=1.55)
p.save("clusters_small_and_large_selected_according_to_df.dat",cluster_large_and_small[df_select_large_small])
fig = p.figure()
axes = fig.gca()
n_bins=100
#print "the length of df_distr is, ", len(df_distr)
axes.hist(df_distr_large_and_small,bins=n_bins, normed=0)
#p.xlabel('N')
p.ylabel('frequency')
#p.legend(('from power-law fitting','from linear fitting on log-log'))
p.title('Distribution of fractal dimensions')
#cluster_name="number_cluster_vs_time2%05d"%my_config
cluster_name="distribution_fractal_dimensions_total_small_and_large.pdf"
p.savefig(cluster_name)
p.clf()
###########################################################################################
###########################################################################################
###########################################################################################
###########################################################################################
cluster_excl_ord=s.sort(cluster_excl)
r_gyr_excl_ord=r_gyr_excl[s.argsort(cluster_excl)]
print "r_gyr_excl_ord is, ", r_gyr_excl_ord
cluster_excl_uni=n.unique1d(cluster_excl_ord)
print "cluster_excl_uni is, ", cluster_excl_uni
p.save("cluster_exclusion.dat", cluster_excl)
r_bound=cluster_excl_ord.searchsorted(cluster_excl_uni,side='right')
l_bound=cluster_excl_ord.searchsorted(cluster_excl_uni,side='left')
print "l_bound and r_bound are, ", l_bound, r_bound
print "the len(r_gyr_excl_ord) is, ", len(r_gyr_excl_ord)
#print "the length of r_bound is, ", len(r_bound)
r_gyr_excl_ari_mean=s.zeros(len(cluster_excl_uni))
for i in xrange(0,len(cluster_excl_uni)):
r_gyr_excl_ari_mean[i]=r_gyr_excl_ord[l_bound[i]:r_bound[i]].mean()
p.save("cluster_excluded_uni.dat", cluster_excl_uni)
p.save("r_gyr_mean_excluded.dat", r_gyr_excl_ari_mean)
###########################################################################################
###########################################################################################
###########################################################################################
###########################################################################################
###########################################################################################
###########################################################################################
#Now I perform a few tests, for instance I try fitting the number of clusters vs the
lin_fit=fitting_stat(s.log10(r_gyr_tot),s.log10(cluster_tot))
print "Now lin_fit is, ", lin_fit
if (save_tot ==1 ):
p.save("total_cluster_distribution.dat", cluster_tot)
p.save("total_r_gyr_distribution.dat", r_gyr_tot)
print "So far so good"