Here are the examples of the python api numpy.nanmean taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
145 Examples
3
View Complete Implementation : table_extractor.py
Copyright MIT License
Author : UW-Deepdive-Infrastructure
Copyright MIT License
Author : UW-Deepdive-Infrastructure
def line_word_height(line):
# For each line, get words
words = line.find_all('span', 'ocrx_word')
word_heights = []
for word_idx, word in enumerate(words):
wordbbox = helpers.extractbbox(word.get('satle'))
word_heights.append(wordbbox['y2'] - wordbbox['y1'])
avg = 0 if len(words) == 0 else np.nanmean(word_heights)
return avg
3
View Complete Implementation : impala_distributed_dmlab.py
Copyright Apache License 2.0
Author : rlgraph
Copyright Apache License 2.0
Author : rlgraph
def _calc_mean_return(records):
size = records[3]["rewards"].size
rewards = records[3]["rewards"].reshape((size,))
terminals = records[3]["terminals"].reshape((size,))
returns = list()
return_ = 0.0
for r, t in zip(rewards, terminals):
return_ += r
if t:
returns.append(return_)
return_ = 0.0
return np.nanmean(returns)
3
View Complete Implementation : area_stats.py
Copyright MIT License
Author : UW-Deepdive-Infrastructure
Copyright MIT License
Author : UW-Deepdive-Infrastructure
def summarizeDocameent(area_stats):
# Don't use areas with 1 line or no words in creating summary statistics
return {
'word_separation_mean': np.nanmean([np.nanmean(area['word_distances']) for area in area_stats if area['words'] > 0 and area['lines'] > 1]),
'word_separation_median': np.nanmedian([np.nanmean(area['word_distances']) for area in area_stats if area['words'] > 0 and area['lines'] > 1]),
'word_separation_std': np.nanstd([np.nanmean(area['word_distances'])for area in area_stats if area['words'] > 0 and area['lines'] > 1]),
'word_separation_index_mean': np.nanmean([area['word_separation_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),
'word_separation_index_median': np.nanmedian([area['word_separation_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),
'word_separation_index_std': np.nanstd([area['word_separation_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),
'word_height_index_mean': np.nanmean([area['word_height_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),
'word_height_index_median': np.nanmedian([area['word_height_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),
'word_height_index_std': np.nanstd([area['word_height_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),
'word_area_index_mean': np.nanmean([area['word_area_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),
'word_area_index_median': np.nanmedian([area['word_area_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),
'word_area_index_std': np.nanstd([area['word_area_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),
'word_height_avg': np.nanmean([area['word_height_avg'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),
'word_height_avg_median': np.nanmedian([area['word_height_avg'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),
'word_height_avg_std': np.nanstd([area['word_height_avg'] for area in area_stats if area['words'] > 0 and area['lines'] > 1])
}
3
View Complete Implementation : impala_cartpole.py
Copyright Apache License 2.0
Author : rlgraph
Copyright Apache License 2.0
Author : rlgraph
def _calc_mean_return(records):
size = records[3]["rewards"].size
rewards = records[3]["rewards"].reshape((size,))
terminals = records[3]["terminals"].reshape((size,))
returns = list()
return_ = 0.0
for r, t in zip(rewards, terminals):
return_ += r
if t:
returns.append(return_)
return_ = 0.0
return np.nanmean(returns)
3
View Complete Implementation : impala_openai_gym_with_lstm.py
Copyright Apache License 2.0
Author : rlgraph
Copyright Apache License 2.0
Author : rlgraph
def _calc_mean_return(records):
size = records[3]["rewards"].size
rewards = records[3]["rewards"].reshape((size,))
terminals = records[3]["terminals"].reshape((size,))
returns = list()
return_ = 0.0
for r, t in zip(rewards, terminals):
return_ += r
if t:
returns.append(return_)
return_ = 0.0
return np.nanmean(returns)
3
View Complete Implementation : test_utils.py
Copyright GNU General Public License v3.0
Author : NVISO-BE
Copyright GNU General Public License v3.0
Author : NVISO-BE
def test_decision_frontier_mad_zero(self):
values_array = [1, 1]
sensitivity = 10
# Here mad = 0
# mad formula:
# mad = np.nanmedian(np.absolute(values_array - np.nanmedian(values_array, 0)), 0)
res = helpers.utils.get_decision_frontier("mad", values_array, sensitivity, "low")
# So use std:
expected_value = np.nanmean(values_array) - sensitivity * np.std(values_array)
self.astertEqual(res, expected_value)
res = helpers.utils.get_decision_frontier("mad", values_array, sensitivity, "high")
# So use std:
expected_value = np.nanmean(values_array) + sensitivity * np.std(values_array)
self.astertEqual(res, expected_value)
3
View Complete Implementation : test_utils.py
Copyright GNU General Public License v3.0
Author : NVISO-BE
Copyright GNU General Public License v3.0
Author : NVISO-BE
def test_decision_frontier_madpos_zero(self):
values_array = [1, 1]
sensitivity = 10
# Here mad = 0
# mad formula:
# mad = np.nanmedian(np.absolute(values_array - np.nanmedian(values_array, 0)), 0)
res = helpers.utils.get_decision_frontier("madpos", values_array, sensitivity, "low")
# So use std:
expected_value = np.nanmean(values_array) - sensitivity * np.std(values_array)
expected_value = np.float64(max([expected_value, 0]))
self.astertEqual(res, expected_value)
res = helpers.utils.get_decision_frontier("madpos", values_array, sensitivity, "high")
# So use std:
expected_value = np.nanmean(values_array) + sensitivity * np.std(values_array)
expected_value = np.float64(max([expected_value, 0]))
self.astertEqual(res, expected_value)
3
View Complete Implementation : test_utils.py
Copyright GNU General Public License v3.0
Author : NVISO-BE
Copyright GNU General Public License v3.0
Author : NVISO-BE
def test_decision_frontier_stdev_high(self):
for values_array in list_values_array:
nanmean_values_array = np.nanmean(values_array)
std_values_array = np.std(values_array)
for sensitivity in list_sensitivity:
expected_res = nanmean_values_array + sensitivity * std_values_array
if expected_res < 0:
with self.astertLogs(logging.logger, level='DEBUG'):
res = helpers.utils.get_decision_frontier("stdev", values_array, sensitivity, "high")
else:
res = helpers.utils.get_decision_frontier("stdev", values_array, sensitivity, "high")
self.astertEqual(res, expected_res)
3
View Complete Implementation : early_stopping.py
Copyright MIT License
Author : zalando
Copyright MIT License
Author : zalando
def make_group_sequential(spending_function='obrien_fleming', estimated_sample_size=None, alpha=0.05, cap=8):
""" A closure to the group_sequential function. """
def go(x, y, x_denominators=1, y_denominators=1):
# these next too lines are wrong, but they are bug-compatible with v0.6.13 !
x = x / np.nanmean(x_denominators)
y = y / np.nanmean(y_denominators)
return group_sequential(x, y, spending_function, estimated_sample_size, alpha, cap)
return go
3
View Complete Implementation : test_utils.py
Copyright GNU General Public License v3.0
Author : NVISO-BE
Copyright GNU General Public License v3.0
Author : NVISO-BE
def test_decision_frontier_stdev_low(self):
for values_array in list_values_array:
nanmean_values_array = np.nanmean(values_array)
std_values_array = np.std(values_array)
for sensitivity in list_sensitivity:
expected_res = nanmean_values_array - sensitivity * std_values_array
if expected_res < 0:
with self.astertLogs(logging.logger, level='DEBUG'):
res = helpers.utils.get_decision_frontier("stdev", values_array, sensitivity, "low")
else:
res = helpers.utils.get_decision_frontier("stdev", values_array, sensitivity, "low")
self.astertEqual(res, expected_res)