HCE Project Python language Distributed Tasks Manager Application, Distributed Crawler Application and client API bindings.  2.0.0-chaika
Hierarchical Cluster Engine Python language binding
ftest_Metrics.py
Go to the documentation of this file.
1 #coding: utf-8
2 '''
3 Created on Nov 6, 2015
4 
5 @author: scorp
6 '''
7 import ppath
8 import unittest
9 import copy
10 from app.Metrics import Metrics
11 from dc_processor.scraper_result import Result
12 
13 class Test(unittest.TestCase):
14 
15 
16  def __init__(self, methodName='runTest'):
17  unittest.TestCase.__init__(self, methodName)
18  Metrics.fillMetricModulesList()
19  self.result = Result(None, "a1")
20  self.fillResultData()
21 
22 
23  def fillResultData(self):
24  data = {}
25  data["data"] = ["", u"test1 43 35../ 3./утка"]
26  data["name"] = "tagA"
27  data["xpath"] = "//"
28  data["extractor"] = ""
29  self.result.tags[data["name"]] = data
30  data = {}
31  data["data"] = ["", u"test1 43 35../ 3./утка", u"FINISH him. !!! ehf"]
32  data["name"] = "tagB"
33  data["xpath"] = "//"
34  data["extractor"] = ""
35  self.result.tags[data["name"]] = data
36  data = {}
37  data["data"] = [""]
38  data["name"] = "tagC"
39  data["xpath"] = "//"
40  data["extractor"] = ""
41  self.result.tags[data["name"]] = data
42 
43 
44  def test_01_MetricInit(self):
45  print ">>> testMetricInit start"
46  self.assertTrue(len(Metrics.AVAILABLE_METRICS) == 6, ">>> Metrics.AVAILABLE_METRICS Size != 6")
47  self.assertTrue("METRIC_TAGS_COUNT" in Metrics.AVAILABLE_METRICS,
48  ">>> METRIC_TAGS_COUNT not in Metrics.AVAILABLE_METRICS")
49  self.assertTrue("METRIC_TAGS_COUNT_PERCENT" in Metrics.AVAILABLE_METRICS,
50  ">>> METRIC_TAGS_COUNT_PERCENT not in Metrics.AVAILABLE_METRICS")
51  self.assertTrue("METRIC_WORDS_COUNT" in Metrics.AVAILABLE_METRICS,
52  ">>> METRIC_WORDS_COUNT not in Metrics.AVAILABLE_METRICS")
53  self.assertTrue("METRIC_WORDS_COUNT_PERCENT" in Metrics.AVAILABLE_METRICS,
54  ">>> METRIC_WORDS_COUNT_PERCENT not in Metrics.AVAILABLE_METRICS")
55  self.assertTrue("METRIC_CONTENT_SIZE" in Metrics.AVAILABLE_METRICS,
56  ">>> METRIC_CONTENT_SIZE not in Metrics.AVAILABLE_METRICS")
57  self.assertTrue("METRIC_CONTENT_SIZE_PERCENT" in Metrics.AVAILABLE_METRICS,
58  ">>> METRIC_CONTENT_SIZE_PERCENT not in Metrics.AVAILABLE_METRICS")
59 
60 
62  print ">>> test_02_MetricTagsCount start"
63  metricResult = {"METRIC_TAGS_COUNT" : None, "METRIC_TAGS_COUNT_PERCENT" : None}
64  Metrics.metricsPrecalculate(metricResult, self.result)
65  self.assertTrue(metricResult["METRIC_TAGS_COUNT"] == 2, ">>> test_02_MetricTagsCount METRIC_TAGS_COUNT != 2")
66  self.assertTrue(metricResult["METRIC_TAGS_COUNT_PERCENT"] == 66,
67  ">>> test_02_MetricTagsCount METRIC_TAGS_COUNT_PERCENT != 66")
68 
69 
71  print ">>> test_03_MetricContentSize start"
72  metricResult = {"METRIC_TAGS_COUNT" : None, "METRIC_TAGS_COUNT_PERCENT" : None,
73  "METRIC_WORDS_COUNT": None, "METRIC_WORDS_COUNT_PERCENT": None}
74  Metrics.metricsPrecalculate(metricResult, self.result)
75  self.assertTrue(metricResult["METRIC_WORDS_COUNT"] > 0, ">>> test_03_MetricContentSize METRIC_WORDS_COUNT <= 0")
76  self.assertTrue(metricResult["METRIC_WORDS_COUNT_PERCENT"] > 0 and \
77  metricResult["METRIC_WORDS_COUNT_PERCENT"] < 100,
78  ">>> test_03_MetricContentSize METRIC_WORDS_COUNT_PERCENT <= 0 or > 100")
79 
80 
82  print ">>> test_04_MetricWCount start"
83  metricResult = {"METRIC_TAGS_COUNT" : None, "METRIC_TAGS_COUNT_PERCENT" : None,
84  "METRIC_WORDS_COUNT": None, "METRIC_WORDS_COUNT_PERCENT": None,
85  "METRIC_CONTENT_SIZE": None, "METRIC_CONTENT_SIZE_PERCENT": None}
86  Metrics.metricsPrecalculate(metricResult, self.result)
87  self.assertTrue(metricResult["METRIC_CONTENT_SIZE"] > 0, ">>> test_04_MetricWCount METRIC_CONTENT_SIZE <= 0")
88  self.assertTrue(metricResult["METRIC_CONTENT_SIZE_PERCENT"] > 0 and \
89  metricResult["METRIC_CONTENT_SIZE_PERCENT"] < 100,
90  ">>> test_04_MetricWCount METRIC_CONTENT_SIZE_PERCENT <= 0 or > 100")
91 
92 
94  print ">>> test_05_MetricWCountSort start"
95  resutlList = []
96  resutlList.append(Result(None, "a0"))
97  resutlList.append(Result(None, "a1"))
98  resutlList.append(Result(None, "a2"))
99  resutlList.append(Result(None, "a3"))
100  resutlList.append(Result(None, "a4"))
101  metricResult = {"METRIC_WORDS_COUNT": None, "METRIC_WORDS_COUNT_PERCENT": None,
102  "METRIC_CONTENT_SIZE": None, "METRIC_CONTENT_SIZE_PERCENT": None}
103  resutlList[0].metrics = copy.deepcopy(metricResult)
104  resutlList[1].metrics = copy.deepcopy(metricResult)
105  metricResult["METRIC_TAGS_COUNT"] = None
106  metricResult["METRIC_TAGS_COUNT_PERCENT"] = None
107  resutlList[2].metrics = copy.deepcopy(metricResult)
108  metricResult["METRIC_TAGS_COUNT"] = 10
109  metricResult["METRIC_TAGS_COUNT_PERCENT"] = 5
110  resutlList[3].metrics = copy.deepcopy(metricResult)
111  metricResult["METRIC_TAGS_COUNT"] = 2
112  metricResult["METRIC_TAGS_COUNT_PERCENT"] = 5
113  resutlList[4].metrics = copy.deepcopy(metricResult)
114  for elem in resutlList:
115  print str(elem.resId)
116  print ">>>>>>>>>>>><<<<<<<<<<<<"
117  newElems = Metrics.sortElementsByMetric(resutlList, "METRIC_TAGS_COUNT")
118  for elem in newElems:
119  print str(elem.resId)
120 
121 
122 if __name__ == "__main__":
123  #import sys;sys.argv = ['', 'Test.testName']
124  unittest.main()
def test_05_MetricWCountSort(self)
def __init__(self, methodName='runTest')
def test_03_MetricContentSize(self)