#!/bin/env python # coding: utf-8 # Un programme pour exploiter les données des mires # -- Frédéric Meynadier import string from numpy import * from functions import * from optparse import OptionParser parser = OptionParser() parser.add_option("-c","--catalog",dest="catalogfile", help="Name of the catalog file") parser.add_option("-o","--outputfits",dest="outputfits", default="output.fits", help="Name of the output fits file") (options, args) = parser.parse_args () # Donner les coordonnées approximatives des 4 repères rep = zeros((4,2)) i = 0 for line in open('reperes.txt'): if ((line[0] != "#") and (line != '\n')): spl = string.split(line) rep[i][0] = float(spl[0]) rep[i][1] = float(spl[1]) i+=1 # Lecture du fichier catalogue fields = ["FLUX_MAX", "X_IMAGE", "Y_IMAGE", "THETA_IMAGE", "ELONGATION", "ELLIPTICITY", "FWHM_IMAGE", "ERRX2_IMAGE", "ERRY2_IMAGE"] catalog = [] for line in open(options.catalogfile): if ((line[0] != "#") and (line != '\n')): tmp = {} spl = string.split(line) for i in range(len(fields)): tmp[fields[i]] = float(spl[i]) catalog.append(tmp) # Recherche de la position des repères selon catalogue - attention aux doublons real_rep = zeros((4,2)) for i in range(4): detections = 0 for source in catalog: pos_source = array([source['X_IMAGE'],source['Y_IMAGE']]) if ((modulus(rep[i,:] - pos_source)) < 5): detections +=1 real_rep[i,:] = pos_source if (detections !=1): print "Attention ! ambiguïté pour repère ",i,\ ", nb de détections :",detections # Détermination du centre : barycentre des 4 repères center = array([sum(real_rep[:,0])/4,sum(real_rep[:,1])/4]) # Vecteurs unitaires de référence vec_i = ((real_rep[0,:] - real_rep[1,:])/20 \ + (real_rep[3,:] - real_rep[2,:])/20) / 2 vec_j = ((real_rep[1,:] - real_rep[2,:])/20 \ + (real_rep[0,:] - real_rep[3,:])/20) / 2 # Détection des noeuds de grille les plus proches de chaque source vec_i_2 = dot(vec_i,vec_i) vec_j_2 = dot(vec_j,vec_j) for source in catalog: pos_source = array([source['X_IMAGE'],source['Y_IMAGE']])-center # dans le référentiel grille (en projetant sur les vecteurs unitaires) : pos_source_grid = array([dot(pos_source,vec_i)/vec_i_2, dot(pos_source,vec_j)/vec_j_2]) # on arrondit au noeud le plus proche knot = pos_source_grid.round() # et on calcule la distance entre les deux r = modulus(pos_source_grid - knot) # enregistrement des résultats source['I_GRID'] = knot[0] source['J_GRID'] = knot[1] source['KNOT_DIST'] = r #if (modulus(knot) <1): # print source['I_GRID'], source['J_GRID'], source['KNOT_DIST'], pos_source_grid, pos_source,source['X_IMAGE'], source['Y_IMAGE'] # Chasse aux doublons # tableau 81*81 contenant : # champ 0 : nb de sources rattachée au noeud # champ 1 : distance actuelle entre le noeud et la source # champ 2 : index courant de la source dans catalog # champ 3 : distance moyenne aux voisins (calculé plus tard) lookup_sources = zeros((81,81,4)) for si in range(len(catalog)): source = catalog[si] i = source['I_GRID']+40 j = source['J_GRID']+40 if ((lookup_sources[i,j,0] < 1) or (lookup_sources[i,j,1] > source['KNOT_DIST'])): lookup_sources[i,j,2] = si lookup_sources[i,j,1] = source['KNOT_DIST'] lookup_sources[i,j,0] += 1 # On élimine le noeud (1,1), trop près du "P" lookup_sources[41,41,0] = 0 # Calcul de la distance moyenne entre noeuds voisins for i in range(1,80): for j in range(1,80): if (lookup_sources[i,j,0] > 0): knot_idx = int(lookup_sources[i,j,2]) catalog[knot_idx]['MEAN_DIST_TO_NGBRS'] = 0 nb_voisin = 0 distance = 0 pos_source = array([catalog[knot_idx]['X_IMAGE'], catalog[knot_idx]['Y_IMAGE']]) for shifts in [[0,1],[1,0],[-1,0],[0,-1]]: i_voisin = i + shifts[0] j_voisin = j + shifts[1] if (lookup_sources[i_voisin,j_voisin,0] > 0): neigh_idx = int(lookup_sources[i_voisin,j_voisin,2]) nb_voisin +=1 pos_voisin = array([catalog[neigh_idx]['X_IMAGE'], catalog[neigh_idx]['Y_IMAGE']]) distance += modulus(pos_source-pos_voisin) if (nb_voisin !=0): catalog[knot_idx]['MEAN_DIST_TO_NGBRS'] = distance / nb_voisin lookup_sources[i,j,3] = distance / nb_voisin cleaned_catalog = [] for i in range(81): for j in range(81): if (lookup_sources[i,j,0] > 0): cleaned_catalog.append(catalog[int(lookup_sources[i,j,2])]) # Sortie du catalogue nettoyé des doublons of = open('clean.cat','w') fields = ["FLUX_MAX", "X_IMAGE", "Y_IMAGE", "THETA_IMAGE", "ELONGATION", "ELLIPTICITY", "FWHM_IMAGE", "ERRX2_IMAGE", "ERRY2_IMAGE", "I_GRID", "J_GRID", "KNOT_DIST", "MEAN_DIST_TO_NGBRS"] fmts = ["%10.3f ", "%8.3f ", "%8.3f ", "%8.3f ", "%8.3f ", "%8.3f ", "%8.3f ", "%8.5f ", "%8.5f ", "%5d ", "%5d ", "%5.3f ", "%8.3f"] for source in cleaned_catalog: outs = "" for i in range(len(fields)): outs += fmts[i] % source[fields[i]] outs += "\n" of.write(outs) of.close() # Génération d'un fichier fits contenant les résultats import pyfits #a = zeros((2048,2048)) # # patches width in pixel: ## width = int(modulus(vec_i)) ## for i in range(len(cleaned_catalog)): ## print i ## source = cleaned_catalog[i] ## pos_source = array([int(source['X_IMAGE']),int(source['Y_IMAGE'])]) ## pos_mean_dist = source['MEAN_DIST_TO_NGBRS'] ## for rel_x in range(-width,width): ## for rel_y in range(-width,width): ## rel_vec = array([rel_x,rel_y]) ## # Forme du patch : carré ## if ( (dot(rel_vec,vec_i) < vec_i_2/2) and ## (dot(rel_vec,vec_j) < vec_j_2/2)): ## (x,y) = pos_source+rel_vec ## if ( x>0 and x<2048 and y>0 and y<2048): ## a[x,y] = pos_mean_dist a = lookup_sources[:,:,3] hdu = pyfits.PrimaryHDU(a) hdulist = pyfits.HDUList([hdu]) hdu.writeto(options.outputfits,clobber=True) # Facteur d'échelle moyen tmp = [] for i in range(81): for j in range(81): if (lookup_sources[i,j,0] > 0): tmp.append(lookup_sources[i,j,3]) print "Moyenne des mesures : ",sum(tmp)/len(tmp) # Module des vecteurs unitaires print "Module vec_i :",modulus(vec_i) print "Module vec_j :",modulus(vec_j)