Rev. | 1d212f2664a62d7de0b1737c0a60848e4fd778bd |
---|---|
Größe | 29,823 Bytes |
Zeit | 2008-11-13 04:28:13 |
Autor | iselllo |
Log Message | I added a code which now properly calculates the mean coordination number in the system (the previous calculation in
|
#! /usr/bin/env python
import scipy as s
import numpy as n
import pylab as p
#from rpy import r
import distance_calc as d_calc
import igraph as ig
import calc_rad as c_rad
#import g2_calc as g2
from scipy import stats #I need this module for the linear fit
n_part=5000
density=0.01
box_vol=n_part/density
part_diam=1. # particle diameter
part_vol=s.pi/6.*part_diam**3.
tot_vol=n_part*part_vol
cluster_look = 50 # specific particle (and corresponding cluster) you want to track
#Again, I prefer to give the number of particles and the density as input parameters
# and work out the box size accordingly
collisions =1 #whether I should take statistics about the collisions or not.
ini_config=0
fin_config=3000 #for large data post-processing I need to declare an initial and final
#configuration I want to read and post-process
by=1000 #this tells how many configurations there are in the file I am reading
figure=0 #whether I sould print many figures or not
n_config=(fin_config-ini_config)/by # total number of configurations, which are numbered from 0 to n_config-1
my_selection=s.arange(ini_config,fin_config,by)
box_size=(n_part/density)**(1./3.)
print "the box size is, ", box_size
threshold=1.04 #threshold to consider to particles as directly connected
save_size_save =1 #tells whether I want to save the coordinates of a cluster of a specific size
size_save=98
t_step=1. #here meant as the time-separation between adjacent configurations
fold=0
gyration = 1 #parameter controlling whether I want to calculate the radius of gyration
#time=s.linspace(0.,((n_config-1)*t_step),n_config) #I define the time along the simulation
time=p.load("../1/time.dat")
delta_t=(time[2]-time[1])*by
time=time[my_selection] #I adapt the time to the initial and final configuration
p.save("time_red.dat",time)
#some physical parameters of the system
T=0.1 #temperature
beta=0.1 #damping coefficient
v_0=0. #initial particle mean velocity
n_s_ini=2 #element in the time-sequence corresponding
# to the chosen reference time for calculating the velocity autocorrelation
#function
s_ini=(n_s_ini)*t_step #I define a specific time I will need for the calculation
# of the autocorrelation function
#print 'the time chosen for the velocity correlation function is, ', s_ini
#print 'and the corresponding time on the array is, ', time[n_s_ini]
Len=box_size
print 'Len is, ', Len
calculate=1
if (calculate == 1):
# energy_1000=p.load("tot_energy.dat")
# p.plot(energy_1000[:,0], energy_1000[:,1] ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('energy')
# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# p.title('energy evolution')
# p.grid(True)
# p.savefig('energy_2000_particles.pdf')
# p.hold(False)
# print "the std of the energy for a system with 2000 particles is, ", s.std(energy_1000[config_ini:n_config,1])
# print "the mean of the energy for a system with 2000 particles is, ", s.mean(energy_1000[config_ini:n_config,1])
# print "the ratio of the two is,", s.std(energy_1000[config_ini:n_config,1])\
# /s.mean(energy_1000[config_ini:n_config,1])
# print "and the theoretical value is, ", s.sqrt(2./3.)*(1./s.sqrt(n_part))
# tot_config=p.load("total_configuration.dat")
# tot_config=tot_config[(ini_config*n_part*3):(fin_config*n_part*3)]
#I cut the array with the total number
#of configurations for large arrays
# print "the length of tot_config is, ", len(tot_config)
# tot_config=s.reshape(tot_config,(n_config,3*n_part))
# print "tot_config at line 10 is, ", tot_config[10,:]
# #Now I save some "raw" data for detailed analysis
# i=71
# test_arr=tot_config[i,:]
# test_arr=s.reshape(test_arr,(n_part,3))
# p.save("test_71_raw.dat",test_arr)
#Now I want to "fold" particle positions in order to be sure that they are inside
# my box.
#f[:,:,k]=where((Vsum<=V[k+1]) & (Vsum>=V[k]), (V[k+1]-Vsum)/(V[k+1]-V[k]),\
#f[:,:,k] )
#f[:,:,k]=where((Vsum<=V[k]) & (Vsum>=V[k-1]),(Vsum-V[k-1])/(V[k]-V[k-1]),\
#f[:,:,k])
# if (fold ==1):
# #In the case commented below, the box goes from [-L/2,L/2] but that is proba
# #bly not the case in Espresso
# #Len=box_size/2.
# # and then I do the following
# #tot_config=s.where((tot_config>Len) & (s.remainder(tot_config,(2.*Len))<Len),\
# #s.remainder(tot_config,Len),tot_config)
# #tot_config=s.where((tot_config>Len) & (s.remainder(tot_config,(2.*Len))>=Len),\
# #(s.remainder(tot_config,Len)-Len),tot_config)
# #tot_config=s.where((tot_config< -Len) &(s.remainder(tot_config,(-2.*Len))< -Len)\
# #(s.remainder(tot_config,-Len)+Len),tot_config)
# #tot_config=s.where((tot_config< -Len) &(s.remainder(tot_config,(-2.*Len))>=-Len)\
# #s.remainder(tot_config,-Len),tot_config)
# #Now the case when the box is [0,L], so Len is actually the box size
# p.save("tot_config_unfolded.dat",tot_config)
# Len=box_size
# tot_config=s.where(tot_config>Len,s.remainder(tot_config,Len),tot_config)
# tot_config=s.where(tot_config<0.,(s.remainder(tot_config,-Len)+Len),tot_config)
# print "the max of tot_config is", tot_config.max()
# print "the min of tot_config is", tot_config.min()
# p.save("tot_config_my_folding.dat",tot_config)
# x_0=tot_config[0,0] #initial x position of all my particles
# x_col=s.arange(n_part)*3
# print "x_col is, ", x_col
# #Now I plot the motion of particle number 1; I follow the three components
# p.plot(time,tot_config[:,0] ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('x position particle 1 ')
# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# p.title('x_1 vs time')
# p.grid(True)
# p.savefig('x_position_particle_1.pdf')
# p.hold(False)
# # p.save("mean_square_disp.dat", var_x_arr)
# p.plot(time,tot_config[:,1] ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('y position particle 1 ')
# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# p.title('y_1 vs time')
# p.grid(True)
# p.savefig('y_position_particle_1.pdf')
# p.hold(False)
# # p.save("mean_square_disp.dat", var_x_arr)
# p.plot(time,tot_config[:,2] ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('z position particle 1 ')
# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# p.title('z_1 vs time')
# p.grid(True)
# p.savefig('z_position_particle_1.pdf')
# p.hold(False)
# # p.save("mean_square_disp.dat", var_x_arr)
#x_arr=s.zeros((n_part))
#y_arr=s.zeros((n_part))
#z_arr=s.zeros((n_part))
# for i in xrange(0,n_part):
# x_arr[:,i]=tot_config[:,3*i]
# y_arr[:,i]=tot_config[:,(3*i+1)]
# z_arr[:,i]=tot_config[:,(3*i+2)]
# print "the x coordinates are, ", x_arr
# #Now a test: I try to calculate the radius of gyration
# #with a particular distance (suggestion by Yannis)
# mean_x_arr=s.zeros(n_config)
# mean_x_arr=x_arr.mean(axis=1)
# mean_y_arr=s.zeros(n_config)
# mean_y_arr=y_arr.mean(axis=1)
# mean_z_arr=s.zeros(n_config)
# mean_z_arr=z_arr.mean(axis=1)
# var_x_arr2=s.zeros(n_config)
# var_y_arr2=s.zeros(n_config)
# var_y_arr2=s.zeros(n_config)
# for i in xrange(0,n_config):
# for j in xrange(0,n_part):
# var_x_arr2[i]=var_x_arr2[i]+((x_arr[i,j]-mean_x_arr[i])- \
# Len*n.round((x_arr[i,j]-mean_x_arr[i])/Len))**2.
# var_x_arr2=var_x_arr2/n_part
# print 'var_x_arr2 is, ', var_x_arr2
# p.save( "test_config_before_manipulation.dat",x_arr[600,:])
# max_x_dist=s.zeros(n_config)
# #for i in xrange(0, n_config):
# #mean_x_arr[i]=s.mean(x_arr[i,:])
# #var_x_arr[i]=s.var(x_arr[i,:])
# mean_x_arr[:]=s.mean(x_arr,axis=1)
# var_x_arr[:]=s.var(x_arr,axis=1)
# max_x_dist[:]=x_arr.max(axis=1)-x_arr.min(axis=1)
# print "mean_x_arr is, ", mean_x_arr
# print "var_x_arr is, ", var_x_arr
# if (fold==1):
# p.plot(time, max_x_dist ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('x-dimension of aggregate ')
# p.title('x-maximum stretch vs time [folded]')
# p.grid(True)
# p.savefig('max_x_distance_folded.pdf')
# p.hold(False)
# p.save("max_x_distance_folded.dat", max_x_dist)
# elif (fold==0):
# p.plot(time, max_x_dist ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('x-dimension of aggregate ')
# p.title('x-maximum stretch vs time[unfolded]')
# p.grid(True)
# p.savefig('max_x_distance_unfolded.pdf')
# p.hold(False)
# p.save("max_x_distance_unfolded.dat", max_x_dist)
# p.plot(time, var_x_arr ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('mean square displacement ')
# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# p.title('VAR(x) vs time')
# p.grid(True)
# p.savefig('mean_square_disp.pdf')
# p.hold(False)
# p.save("mean_square_disp.dat", var_x_arr)
# p.plot(time, s.sqrt(var_x_arr) ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('mean square displacement ')
# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# p.title('std(x) vs time')
# p.grid(True)
# p.savefig('std_of_x_position.pdf')
# p.hold(False)
# p.save("std_of_x_position.dat", s.sqrt(var_x_arr))
# p.plot(time, mean_x_arr ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('mean x position ')
# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# p.title('mean(x) vs time')
# p.grid(True)
# p.savefig('mean_x_position.pdf')
# p.hold(False)
# p.save("mean_x_position.dat", mean_x_arr)
# #I now calculate the radius of gyration
# if (gyration ==1):
# mean_y_arr=s.zeros(n_config)
# mean_z_arr=s.zeros(n_config)
# var_y_arr[:]=s.var(y_arr,axis=1)
# var_z_arr[:]=s.var(z_arr,axis=1)
# mean_y_arr[:]=s.mean(y_arr,axis=1)
# mean_z_arr[:]=s.mean(z_arr,axis=1)
# r_gyr=s.sqrt(var_x_arr+var_y_arr+var_z_arr)
# p.save("r_gyr_my_post_processing.dat",r_gyr)
# p.plot(time, r_gyr ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('Radius of gyration')
# p.title('Radius of gyration vs time')
# p.grid(True)
# p.savefig('radius of gyration.pdf')
# p.hold(False)
# #Now I want to calculate the distance of the centre of
# #mass of my aggregate from the origin
# dist_origin=s.sqrt(mean_x_arr**2.+mean_y_arr**2.+mean_z_arr**2.)
# p.plot(time, dist_origin ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('Mean distance from origin')
# p.title('Mean distance vs time')
# p.grid(True)
# p.savefig('mean_distance_from_origin.pdf')
# p.hold(False)
# #Now some plots of the std's along the other 2 directions
# p.plot(time, s.sqrt(var_y_arr) ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('mean square displacement ')
# p.title('std(y) vs time')
# p.grid(True)
# p.savefig('std_of_y_position.pdf')
# p.hold(False)
# p.save("std_of_y_position.dat", s.sqrt(var_y_arr))
# p.plot(time, s.sqrt(var_z_arr) ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('mean square displacement ')
# p.title('std(z) vs time')
# p.grid(True)
# p.savefig('std_of_z_position.pdf')
# p.hold(False)
# p.save("std_of_z_position.dat", s.sqrt(var_z_arr))
# p.plot(time, mean_y_arr ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('mean y position ')
# p.title('mean(y) vs time')
# p.grid(True)
# p.savefig('mean_y_position.pdf')
# p.hold(False)
# p.save("mean_y_position.dat", mean_y_arr)
# p.plot(time, mean_z_arr ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('mean z position ')
# p.title('mean(z) vs time')
# p.grid(True)
# p.savefig('mean_z_position.pdf')
# p.hold(False)
# p.save("mean_z_position.dat", mean_z_arr)
# #Now some comparative plots:
# p.plot(time, mean_x_arr,time, mean_y_arr,time, mean_z_arr,linewidth=2.)
# p.xlabel('time')
# p.ylabel('mean position')
# p.legend(('mean x','mean y','mean z',))
# p.title('Evolution mean position')
# p.grid(True)
# p.savefig('mean_position_comparison.pdf')
# p.hold(False)
# p.plot(time, var_x_arr,time, var_y_arr,time, var_z_arr,linewidth=2.)
# p.xlabel('time')
# p.ylabel('mean square displacement')
# p.legend(('var x','var y','var z',))
# p.title('Evolution mean square displacement')
# p.grid(True)
# p.savefig('mean_square_displacement_comparison.pdf')
# p.hold(False)
# p.plot(time, s.sqrt(var_x_arr),time, s.sqrt(var_y_arr),time,\
# s.sqrt(var_z_arr),linewidth=2.)
# p.xlabel('time')
# p.ylabel('std of displacement')
# p.legend(('std x','std y','std z',))
# p.title('Evolution std of displacement')
# p.grid(True)
# p.savefig('std_displacement_comparison.pdf')
# p.hold(False)
# # Now I compare my calculations with the one made
# # by espresso:
# r_gyr_esp=p.load("rgyr.dat")
# p.plot(time, r_gyr, r_gyr_esp[:,0], r_gyr_esp[:,1],linewidth=2.)
# p.xlabel('time')
# p.ylabel('Radius of gyration')
# p.legend(('my_postprocessing','espresso'))
# p.title('Radius of gyration vs time')
# p.grid(True)
# p.savefig('radius_of_gyration_comparison.pdf')
# p.hold(False)
# # Now I do pretty much the same thing with the velocity
# tot_config_vel=p.load("total_configuration_vel.dat")
# if (gyration ==1):
# r_gyr=s.zeros(n_config)
# y_arr=s.zeros((n_config,n_part))
# z_arr=s.zeros((n_config,n_part))
# var_y_arr=s.zeros(n_config)
# var_z_arr=s.zeros(n_config)
# for i in xrange(0,n_part):
# y_arr[:,i]=tot_config[:,(3*i+1)]
# z_arr[:,i]=tot_config[:,(3*i+2)]
# var_y_arr[:]=s.var(y_arr,axis=1)
# var_z_arr[:]=s.var(z_arr,axis=1)
# print "the length of tot_config is, ", len(tot_config)
# tot_config_vel=s.reshape(tot_config_vel,(n_config,3*n_part))
# #print "tot_config at line 10 is, ", tot_config[10,:]
# v_0=tot_config_vel[0,0] #initial x position of all my particles
# v_arr=s.zeros((n_config,n_part))
# print 'OK creating v_arr'
# v_col=s.arange(n_part)*3
# #print "x_col is, ", x_col
# for i in xrange(0,n_part):
# v_arr[:,i]=tot_config_vel[:,3*i]
# mean_v_arr=s.zeros(n_config)
# var_v_arr=s.zeros(n_config)
# # for i in xrange(0, n_config):
# # mean_v_arr[i]=s.mean(v_arr[i,:])
# #var_v_arr[i]=s.var(v_arr[i,:])
# mean_v_arr[:]=s.mean(v_arr,axis=1)
# var_v_arr[:]=s.var(v_arr,axis=1)
# #print "mean_v_arr is, ", mean_v_arr
# #print "var_v_arr is, ", var_v_arr
# #time=s.linspace(0.,(n_config*t_step),n_config)
# p.plot(time, var_v_arr ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('velocity variance')
# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# p.title('VAR(v) vs time')
# p.grid(True)
# p.savefig('velocity_variance.pdf')
# p.hold(False)
# p.save("velocity_variance.dat", var_v_arr)
# #Now I want to calculate the autocorrelation function.
# #First, I need to fix an intial time. I choose something which is well past the
# #ballistic regime
# #Now I start calculating the velocity autocorrelation function
# vel_autocor=s.zeros(n_config)
# for i in xrange(0, n_config):
# vel_autocor[i]=s.mean(v_arr[i]*v_arr[n_s_ini])
# p.plot(time, vel_autocor ,linewidth=2.)
# p.xlabel('time')
# p.ylabel('<v(s)v(t)> ')
# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part'))
# p.title('Velocity correlation')
# p.grid(True)
# p.savefig('velocity_correlation_numerics.pdf')
# p.hold(False)
# p.save("velocity_autocorrelation.dat", vel_autocor)
# #Now I try reconstructing the structure of the aggregate.
# #first a test case
# my_conf=tot_config[919,:]
# print 'my_conf is, ', my_conf
# #Now I want to reconstruct the structure of the aggregate in 3D
# # I choose particle zero as a reference
# x_test=s.zeros(3)
# y_test=s.zeros(3)
# z_test=s.zeros(3)
# for i in xrange(0,3):
# x_test[i]=my_conf[3*i]
# y_test[i]=my_conf[3*i+1]
# z_test[i]=my_conf[3*i+2]
# print "x_test is, ", x_test
# print "y_test is, ", y_test
# print "z_test is, ", z_test
# #OK reading the files
# pos_0=s.zeros(3) #I initialized the positions of the three particles
# pos_1=s.zeros(3)
# pos_2=s.zeros(3)
# pos_1[0]=r_ij[0] #Now I have given the x-position of particles 1 and 2 wrt particle 0
# pos_2[0]=r_ij[1]
# #Now I do the same for the y coord!
# count_int=0
# for i in xrange(0,(n_part-1)):
# for j in xrange((i+1),n_part):
# r_ij[count_int]=y_test[i]-y_test[j]
# r_ij[count_int]=r_ij[count_int]-Len*n.round(r_ij[count_int]/Len)
# r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
# print 'i and j are, ', (i+1), (j+1)
# count_int=count_int+1
# print 'r_ij is, ', r_ij
# pos_1[1]=r_ij[0] #Now I have given the x-position of particles 1 and 2 wrt particle 0
# pos_2[1]=r_ij[1]
# #Now I do the same for the z coord!
# count_int=0
# for i in xrange(0,(n_part-1)):
# for j in xrange((i+1),n_part):
# r_ij[count_int]=z_test[i]-z_test[j]
# r_ij[count_int]=r_ij[count_int]-Len*n.round(r_ij[count_int]/Len)
# r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
# print 'i and j are, ', (i+1), (j+1)
# count_int=count_int+1
# print 'r_ij is, ', r_ij
# pos_1[2]=r_ij[0] #Now I have given the x-position of particles 1 and 2 wrt particle 0
# pos_2[2]=r_ij[1]
# print 'pos_0 is,', pos_0
# print 'pos_1 is,', pos_1
# print 'pos_2 is,', pos_2
# #now I can calculate the radius of gyration!
# x_test[0]=pos_0[0]
# x_test[1]=pos_1[0]
# x_test[2]=pos_2[0]
# y_test[0]=pos_0[1]
# y_test[1]=pos_1[1]
# y_test[2]=pos_2[1]
# z_test[0]=pos_0[2]
# z_test[1]=pos_1[2]
# z_test[2]=pos_2[2]
# R_g=s.sqrt(s.var(x_test)+s.var(y_test)+s.var(z_test))
# print "the correct radius of gyration is, ", R_g
#############################################################
#############################################################
#Now I start counting the number of aggregates in each saved configuration
#First I re-build the configuration of the system with the "correct" x_arr,y_arr,z_arr
# I turned the following bit into a comment since I am using the original coord as
#returned by Espresso to start with
# #I re-use the tot_config array for this purpose
# for i in xrange(0,n_part):
# tot_config[:,3*i]=x_arr[:,i]
# tot_config[:,(3*i+1)]=y_arr[:,i]
# tot_config[:,(3*i+2)]=z_arr[:,i]
#p.save("test_calculating_graph.dat",tot_config[71:73,:])
#Now a function to count the collisions
#despite the name, it counts only the number of collisions which took place; it does not know anything
#about the direct calculation of the kernel.
def kernel_calc(A):
d = {}
for r in A:
t = tuple(r)
d[t] = d.get(t,0) + 1
# The dict d now has the counts of the unique rows of A.
B = n.array(d.keys()) # The unique rows of A
C = n.array(d.values()) # The counts of the unique rows
return B,C
#The following function actually takes care of calculating the elements of the kernel matrix from the
#statistics on the collisions.
def kernel_entries_normalized(B2, dist, C2, box_vol, delta_t):
dim=s.shape(B2)
print "dim is, ", dim
n_i=s.zeros(dim[0])
n_j=s.zeros(dim[0])
for i in xrange(len(B2[:,0])):
n_i[i]=len(s.where(dist==B2[i,0])[0])
n_j[i]=len(s.where(dist==B2[i,1])[0])
n_i=n_i/box_vol
n_j=n_j/box_vol
kernel_list=C2/(n_i*n_j*delta_t*box_vol) #I do not get the whole kernel matrix,
#but only the matrix elements for the collisions which actually took place
return kernel_list
#Now a function to calculate the radius of gyration
def calc_radius(x_arr,y_arr,z_arr,Len):
#here x_arr is one-dimensional corresponding to a single configuration
r_0j=s.zeros((len(x_arr)-1))
for j in xrange(1,len(x_arr)): #so, particle zero is now the reference particle
r_0j[j-1]=x_arr[0]-x_arr[j]
r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len))
#r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
#print 'i and j are, ', (i+1), (j+1)
#Now I re-define the x_arr in order to be able to take tha variance correctly
x_arr[0]=0.
x_arr[1:n_part]=r_0j
#var_x_arr[:]=s.var(r_0j, axis=1)
var_x_arr=s.var(x_arr)
for j in xrange(1,len(y_arr)): #so, particle zero is now the reference particle
r_0j[j-1]=y_arr[0]-y_arr[j]
r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len))
#r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
#print 'i and j are, ', (i+1), (j+1)
#Now I re-define the x_arr in order to be able to take tha variance correctly
y_arr[0]=0.
y_arr[1:n_part]=r_0j
#var_x_arr[:]=s.var(r_0j, axis=1)
var_y_arr=s.var(y_arr)
for j in xrange(1,len(z_arr)): #so, particle zero is now the reference particle
r_0j[j-1]=z_arr[0]-z_arr[j]
r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len))
#r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
#print 'i and j are, ', (i+1), (j+1)
#Now I re-define the x_arr in order to be able to take tha variance correctly
z_arr[0]=0.
z_arr[1:n_part]=r_0j
#var_x_arr[:]=s.var(r_0j, axis=1)
var_z_arr=s.var(z_arr)
radius=s.sqrt(var_x_arr+var_y_arr+var_z_arr)
return radius
#Now a function to calculate the positions of the particles in a single cluster
def calc_coord_single(x_arr,y_arr,z_arr,Len):
position_array=s.zeros((len(x_arr),3))
#here x_arr is one-dimensional corresponding to a single configuration
r_0j=s.zeros((len(x_arr)-1))
for j in xrange(1,len(x_arr)): #so, particle zero is now the reference particle
r_0j[j-1]=x_arr[0]-x_arr[j]
r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len))
#r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
#print 'i and j are, ', (i+1), (j+1)
#Now I re-define the x_arr in order to be able to take tha variance correctly
x_arr[0]=0.
x_arr[1:n_part]=r_0j
#var_x_arr[:]=s.var(r_0j, axis=1)
#var_x_arr=s.var(x_arr)
position_array[:,0]=x_arr
for j in xrange(1,len(y_arr)): #so, particle zero is now the reference particle
r_0j[j-1]=y_arr[0]-y_arr[j]
r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len))
#r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
#print 'i and j are, ', (i+1), (j+1)
#Now I re-define the x_arr in order to be able to take tha variance correctly
y_arr[0]=0.
y_arr[1:n_part]=r_0j
#var_x_arr[:]=s.var(r_0j, axis=1)
#var_y_arr=s.var(y_arr)
position_array[:,1]=y_arr
for j in xrange(1,len(z_arr)): #so, particle zero is now the reference particle
r_0j[j-1]=z_arr[0]-z_arr[j]
r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len))
#r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now.
#print 'i and j are, ', (i+1), (j+1)
#Now I re-define the x_arr in order to be able to take tha variance correctly
z_arr[0]=0.
z_arr[1:n_part]=r_0j
#var_x_arr[:]=s.var(r_0j, axis=1)
#var_z_arr=s.var(z_arr)
position_array[:,2]=z_arr
# radius=s.sqrt(var_x_arr+var_y_arr+var_z_arr)
return position_array
#Now I try loading the R script
#r.source("cluster_functions.R")
#I now calculate the number of clusters in each configuration
n_clusters=s.zeros(n_config)
mean_dist_part_single_cluster=s.zeros(n_config)
overall_coord_number=s.zeros(n_config)
v_aver=s.zeros(n_config)
size_single_cluster=s.zeros(n_config)
r_gyr_single_cluster=s.zeros(n_config)
coord_number_single_cluster=s.zeros(n_config)
correct_coord_number=s.zeros(n_config)
# min_dist=s.zeros(n_config)
dist_mat=s.zeros((n_part,n_part))
for i in xrange(0,n_config):
read_pos="../1/read_pos_%1d"%my_selection[i]
print "read_pos is, ", read_pos
#cluster_name="cluster_dist%05d"%my_selection[i]
test_arr=p.load(read_pos)
#test_arr=s.reshape(test_arr,(n_part,3))
# if (i==14):
# p.save("test_14.dat",test_arr)
#dist_mat=r.distance(test_arr)
x_pos=test_arr[:,0]
y_pos=test_arr[:,1]
z_pos=test_arr[:,2]
dist_mat=d_calc.distance_calc(x_pos,y_pos,z_pos, box_size)
# if (i==71):
# p.save("distance_matrix_71.dat",dist_mat)
# p.save("x_pos_71.dat",x_pos)
#clust_struc= (r.mycluster2(dist_mat,threshold)) #a cumbersome
#way to make sure I have an array even when clust_struct is a single
#number (i.e. I have only a cluster)
# print'clust_struct is, ', clust_struc
# n_clusters[i]=r.mycluster(dist_mat,threshold)
# > Lorenzo,
# > > if your graph is `g` then
# > > degree(g)
# > > gives the number of direct neighbors of each vertex (or particle).
# Just to translate it to Python: g.degree() gives the number of direct
# neighbors of each vertex. If your graph is directed, you may only want
# to count only the outgoing or the incoming edges: g.degree(igraph.OUT)
# or g.degree(igraph.IN)
# > > So
# > > mean(degree(g))
# In Python: sum(g.degree()) / float(g.vcount())
# > > (and to turn an adjacency matrix `am` into an igraph object `g` just
# > > use "g <- graph.adjacency(am)")
# In Python: g = Graph.Adjacency(matrix)
# e.g. g = Graph.Adjacency([[0,1,0],[1,0,1],[0,1,0]])
cluster_obj=ig.Graph.Adjacency((dist_mat <= threshold).tolist(),\
ig.ADJ_UNDIRECTED)
# Now I start the calculation of the coordination number
coord_list=cluster_obj.degree() #Now I have a list with the number of 1rst
#neughbors of each particle
#coord_arr=s.zeros(len(coord_list))
# Lorenzo, i'm not sure why you would like to use adjacency matrices,
# igraph graphs are much better in most cases, espacially if the graph
# is large. Here is how to calculate the mean degree per connected
# component in R, assuming your graph is 'g':
# comps <- decompose.graph(g)
# sapply(comps, function(x) mean(degree(x)))
# Gabor
# In Python: starting with your graph, you obtain a VertexClustering
# object somehow (I assume you already have it). Now, cl[0] gives the
# vertex IDs in the first component, cl[1] gives the vertex IDs in the
# second and so on. If the original graph is called g, then
# g.degree(cl[0]) gives the degrees of those vertices, so the average
# degrees in each connected component are as follows (I'm simply
# iterating over the components in a for loop):
# cl=g.components()
# for idx, component in enumerate(cl):
# degrees = g.degree(component)
# print "Component #%d: %s" % (idx, sum(degrees)/float(len(degrees)))
# -- T.
#I need to get an array out of the list of the list of 1st neighbors
#for l in xrange(len(coord_list)):
#coord_arr[l]=coord_list[l]
#print "coord_list is, ", coord_list
coord_arr=s.asarray(coord_list)-2 #Important! I need this correction!
cluster_obj.simplify()
clustering=cluster_obj.clusters()
#n_clusters[i]=p.load("nc_temp.dat")
n_clusters[i]=len(clustering)
print "the total number of clusters and my_config are,", n_clusters[i], my_selection[i]
my_cluster=clustering.sizes()
#Now I create the array which will contain all the cluster sizes by changing the previous
#list into a scipy array.
my_cluster=s.asarray(my_cluster)
#Now I re-organize the particles in my configuration
#by putting together those which are in the same
#cluster
clust_struc=clustering.membership
clust_struc=s.asarray(clust_struc)
#if (i==0):
# print "clust_struc[4727] and my_selection[i] are, ", clust_struc[4727], my_selection[i]
# print "clust_struc[29] and my_selection[i] are, ", clust_struc[29], my_selection[i]
part_in_clust=s.argsort(clust_struc)
if (i ==0 ):
#cluster_record_old=part_in_clust
part_in_clust_old=s.copy(part_in_clust)
len_my_cluster_old=len(my_cluster)
my_cluster_old=s.copy(my_cluster)
coord_number_dist=s.zeros(len(my_cluster)) #this will contain the distribution of
#the coordination numbers
my_sum=s.cumsum(my_cluster)
f=s.arange(1) #simply 0 but as an array!
my_lim=s.concatenate((f,my_sum))
if (i==0):
my_lim_old=my_lim
for m in xrange(0,len(my_cluster)):
#if (abs(my_lim[m]-my_lim[m+1])<2):
# r_gyr_dist[m]=0.0 # r_gyr_dist has already been initialized to zero!
if (abs(my_lim[m]-my_lim[m+1])>=2):
x_pos2=x_pos[part_in_clust[my_lim[m]:my_lim[m+1]]]
y_pos2=y_pos[part_in_clust[my_lim[m]:my_lim[m+1]]]
z_pos2=z_pos[part_in_clust[my_lim[m]:my_lim[m+1]]]
coord_clust=coord_arr[part_in_clust[my_lim[m]:my_lim[m+1]]]
#print "coord_clust is, ", coord_clust
coord_number_dist[m]=s.mean(coord_clust)
correct_coord_number[i]=coord_number_dist.mean() #i.e. : associate to each cluster a coordination number and
#then take the average on the set of coordination numbers
# (one per cluster)
p.save("coordination_numbers_averaged.dat", correct_coord_number )
print "So far so good"