This algorithm computes the score given a GMM and UBM using the Linear Scoring
Algorithms have at least one input and one output. All algorithm endpoints are organized in groups. Groups are used by the platform to indicate which inputs and outputs are synchronized together. The first group is automatically synchronized with the channel defined by the block in which the algorithm is deployed.
Endpoint Name | Data Format | Nature |
---|---|---|
comparison_ids | system/array_1d_text/1 | Input |
keystroke | tutorial/atvs_keystroke/1 | Input |
probe_client_id | system/text/1 | Input |
scores | elie_khoury/string_probe_scores/1 | Output |
Endpoint Name | Data Format | Nature |
---|---|---|
template_client_id | system/text/1 | Input |
template_id | system/text/1 | Input |
model_template | aythamimm/keystroke_model/6 | Input |
Parameters allow users to change the configuration of an algorithm when scheduling an experiment
Name | Description | Type | Default | Range/Choices |
---|---|---|---|---|
field | string | given_name | given_name, family_name, email, nationality, id_number, all_five |
xxxxxxxxxx
import numpy
def scaled_distance(in1,in2,in3):
dif=[]
mean_gal_features=in1
std_gal_features=in2
probe_features=in3
l_features=len(probe_features)
std_threshold=0.2*numpy.mean(std_gal_features);
for i in range( l_features):
if std_gal_features[i] < std_threshold:
std_gal_features[i]=std_threshold
for i_dif in range(l_features):
dif.append((abs(mean_gal_features[i_dif]-probe_features[i_dif]))/std_gal_features[i_dif])
score = 1/numpy.average(dif)
return score
class Algorithm:
def __init__(self):
self.templates = None
self.field='given_name'
def setup(self, parameters):
self.field = parameters.get('field', self.field)
return True
def process(self, inputs, outputs):
# retrieve all the templates once
if self.templates is None:
self.templates = {}
group = inputs.groupOf('model_template')
while group.hasMoreData():
group.next()
template_id = group['template_id'].data.text
if self.field == 'given_name':
template1=group['model_template'].data.given_name_average
template2=group['model_template'].data.given_name_std
if self.field == 'family_name':
template1=group['model_template'].data.family_name_average
template2=group['model_template'].data.family_name_std
if self.field == 'email':
template1=group['model_template'].data.email_average
template2=group['model_template'].data.email_std
if self.field == 'nationality':
template1=group['model_template'].data.nationality_average
template2=group['model_template'].data.nationality_std
if self.field == 'id_number':
template1=group['model_template'].data.id_number_average
template2=group['model_template'].data.id_number_std
if self.field == 'all_five':
template11=group['model_template'].data.given_name_average
template12=group['model_template'].data.given_name_std
template21=group['model_template'].data.family_name_average
template22=group['model_template'].data.family_name_std
template31=group['model_template'].data.email_average
template32=group['model_template'].data.email_std
template41=group['model_template'].data.nationality_average
template42=group['model_template'].data.nationality_std
template51=group['model_template'].data.id_number_average
template52=group['model_template'].data.id_number_std
if self.field != 'all_five':
self.templates[template_id] = dict(
client_id = group['template_client_id'].data.text,
model_average = template1,
model_std = template2,
)
if self.field == 'all_five':
self.templates[template_id] = dict(
client_id = group['template_client_id'].data.text,
model_average1 = template11,
model_std1 = template12,
model_average2 = template21,
model_std2 = template22,
model_average3 = template31,
model_std3 = template32,
model_average4 = template41,
model_std4 = template42,
model_average5 = template51,
model_std5 = template52,
)
# process the probe
comparison_ids = inputs['comparison_ids'].data.text
data = inputs['keystroke'].data
if self.field == 'given_name':
f1 = data.holdtime.given_name
f2 = data.rplatency.given_name
if self.field == 'family_name':
f1 = data.holdtime.family_name
f2 = data.rplatency.family_name
if self.field == 'email':
f1 = data.holdtime.email
f2 = data.rplatency.email
if self.field == 'nationality':
f1 = data.holdtime.nationality
f2 = data.rplatency.nationality
if self.field == 'id_number':
f1 = data.holdtime.id_number
f2 = data.rplatency.id_number
if self.field == 'all_five':
f11 = data.holdtime.given_name
f12 = data.rplatency.given_name
f21 = data.holdtime.family_name
f22 = data.rplatency.family_name
f31 = data.holdtime.email
f32 = data.rplatency.email
f41 = data.holdtime.nationality
f42 = data.rplatency.nationality
f51 = data.holdtime.id_number
f52 = data.rplatency.id_number
if self.field != 'all_five':
feature_vector=[]
l_f=len(f1)
for i_k in range(l_f):
feature_vector.append(float(f1[i_k]))
l_f=len(f2)
for i_k in range(l_f):
feature_vector.append(float(f2[i_k]))
probe_features=feature_vector
if self.field == 'all_five':
feature_vector1=[]
l_f=len(f11)
for i_k in range(l_f):
feature_vector1.append(float(f11[i_k]))
l_f=len(f12)
for i_k in range(l_f):
feature_vector1.append(float(f12[i_k]))
feature_vector2=[]
l_f=len(f21)
for i_k in range(l_f):
feature_vector2.append(float(f21[i_k]))
l_f=len(f22)
for i_k in range(l_f):
feature_vector2.append(float(f22[i_k]))
feature_vector3=[]
l_f=len(f31)
for i_k in range(l_f):
feature_vector3.append(float(f31[i_k]))
l_f=len(f32)
for i_k in range(l_f):
feature_vector3.append(float(f32[i_k]))
feature_vector4=[]
l_f=len(f41)
for i_k in range(l_f):
feature_vector4.append(float(f41[i_k]))
l_f=len(f42)
for i_k in range(l_f):
feature_vector4.append(float(f42[i_k]))
feature_vector5=[]
l_f=len(f51)
for i_k in range(l_f):
feature_vector5.append(float(f51[i_k]))
l_f=len(f52)
for i_k in range(l_f):
feature_vector5.append(float(f52[i_k]))
probe_features1=feature_vector1
probe_features2=feature_vector2
probe_features3=feature_vector3
probe_features4=feature_vector4
probe_features5=feature_vector5
scores = []
for comparison_id in comparison_ids:
template_client_identity = self.templates[comparison_id]['client_id']
if self.field != 'all_five':
score=scaled_distance(self.templates[comparison_id]['model_average'],self.templates[comparison_id]['model_std'],probe_features);
if self.field == 'all_five':
score=[]
score.append(scaled_distance(self.templates[comparison_id]['model_average1'],self.templates[comparison_id]['model_std1'],probe_features1))
score.append(scaled_distance(self.templates[comparison_id]['model_average2'],self.templates[comparison_id]['model_std2'],probe_features2))
score.append(scaled_distance(self.templates[comparison_id]['model_average3'],self.templates[comparison_id]['model_std3'],probe_features3))
score.append(scaled_distance(self.templates[comparison_id]['model_average4'],self.templates[comparison_id]['model_std4'],probe_features4))
score.append(scaled_distance(self.templates[comparison_id]['model_average5'],self.templates[comparison_id]['model_std5'],probe_features5))
score=numpy.average(score)
scores.append({
'template_identity': template_client_identity,
'score': score,
})
outputs['scores'].write({
'client_identity': inputs['probe_client_id'].data.text,
'scores': scores
},
end_data_index=inputs['probe_client_id'].data_index_end
)
return True
The code for this algorithm in Python
The ruler at 80 columns indicate suggested POSIX line breaks (for readability).
The editor will automatically enlarge to accomodate the entirety of your input
Use keyboard shortcuts for search/replace and faster editing. For example, use Ctrl-F (PC) or Cmd-F (Mac) to search through this box
For a given set of feature vectors, a Gaussian Mixture Model (GMM) of the target client and an UBM-GMM, this algorithm computes the scoring using the linear scoring implemented on the `Bob https://www.idiap.ch/software/bob/docs/releases/last/sphinx/html/machine/generated/bob.machine.linear_scoring.html?highlight=linear%20scoring#bob.machine.linear_scoring`_
Updated | Name | Databases/Protocols | Analyzers | |||
---|---|---|---|---|---|---|
aythamimm/aythamimm/btas15_keystroke_experiments/6/BTAS_2015_Kesytroke_Experiment | atvskeystroke/1@A | aythamimm/analyzer_keystroke/70 |
This table shows the number of times this algorithm has been successfully run using the given environment. Note this does not provide sufficient information to evaluate if the algorithm will run when submitted to different conditions.