Algorithms have at least one input and one output. All algorithm endpoints are organized in groups. Groups are used by the platform to indicate which inputs and outputs are synchronized together. The first group is automatically synchronized with the channel defined by the block in which the algorithm is deployed.
Endpoint Name | Data Format | Nature |
---|---|---|
scores_dev1 | tutorial/probe_scores/1 | Input |
scores_dev2 | tutorial/probe_scores/1 | Input |
machine | tutorial/linear_machine/1 | Output |
xxxxxxxxxx
class Algorithm:
def __init__(self):
self.positives_dev1 = []
self.negatives_dev1 = []
self.positives_dev2 = []
self.negatives_dev2 = []
self.machine = None
def accumulate_scores(self, inputs, input_name):
positives_test = []
negatives_test = []
# accumulate the test scores
data_test = inputs[input_name].data
positives_test.extend([k.score for k in data_test.scores if k.template_identity == data_test.client_identity])
negatives_test.extend([k.score for k in data_test.scores if k.template_identity != data_test.client_identity])
return positives_test,negatives_test
#def compute_fused_scores(self, machine, inputs, input_name1, input_name2):
#while inputs[input_name].hasMoreData():
def compute_znorm(self, positives, negatives):
import numpy
data = numpy.concatenate((positives, negatives),axis=0)
mean = numpy.mean(data, axis=0)
std = numpy.std(data, axis=0)
return mean, std
def process(self, inputs, outputs):
import numpy
import bob
#Loading the inputs
self.positives_dev1, self.negatives_dev1 = self.accumulate_scores(inputs, "scores_dev1")
self.positives_dev2, self.negatives_dev2 = self.accumulate_scores(inputs, "scores_dev2")
# once all values are received, compute the fusion
if not(inputs.hasMoreData()):
#Preparing data for fusion
positives_dev = numpy.zeros(shape=(len(self.positives_dev1),2))
negatives_dev = numpy.zeros(shape=(len(self.negatives_dev1),2))
positives_dev[:,0] = self.positives_dev1
positives_dev[:,1] = self.positives_dev2
negatives_dev[:,0] = self.negatives_dev1
negatives_dev[:,1] = self.negatives_dev2
#normalizing data
subtract, divide = self.compute_znorm(positives_dev,negatives_dev)
positives_dev = numpy.divide(positives_dev-subtract, divide)
negatives_dev = numpy.divide(negatives_dev-subtract, divide)
#Training the logistic regression machine
trainer = bob.trainer.CGLogRegTrainer()
machine = trainer.train(negatives_dev, positives_dev)
outputs["machine"].write({
'input_subtract': subtract,
'input_divide': divide,
'weights': machine.weights,
'biases': machine.biases,
})
return True
The code for this algorithm in Python
The ruler at 80 columns indicate suggested POSIX line breaks (for readability).
The editor will automatically enlarge to accomodate the entirety of your input
Use keyboard shortcuts for search/replace and faster editing. For example, use Ctrl-F (PC) or Cmd-F (Mac) to search through this box
Updated | Name | Databases/Protocols | Analyzers | |||
---|---|---|---|---|---|---|
smarcel/tpereira/full_isv_multi/2/btas2015_face-periocular_mobio-female_det | mobio/1@female | tutorial/eerhter_postperf_iso/1 | ||||
tpereira/tpereira/full_isv_multi/2/btas2015_face-periocular_cpqd-smartphone-male_det | cpqd/1@smartphone_male | tutorial/eerhter_postperf_iso/1 | ||||
tpereira/tpereira/full_isv_multi/2/btas2015_face-periocular_mobio-male_det | mobio/1@male | tutorial/eerhter_postperf_iso/1 | ||||
tpereira/tpereira/full_isv_multi/2/btas2015_face-periocular_cpqd-smartphone-female_det | cpqd/1@smartphone_female | tutorial/eerhter_postperf_iso/1 | ||||
tpereira/tpereira/full_isv_multi/2/btas2015_face-periocular_mobio-female_det | mobio/1@female | tutorial/eerhter_postperf_iso/1 |
This table shows the number of times this algorithm has been successfully run using the given environment. Note this does not provide sufficient information to evaluate if the algorithm will run when submitted to different conditions.