content
stringlengths 7
2.61M
|
---|
def _expect_vars(vs=None):
if vs is None:
return list()
elif isinstance(vs, Variable):
return [vs]
else:
checked = list()
for v in vs:
if isinstance(v, Variable):
checked.append(v)
else:
fstr = "expected Variable, got {0.__name__}"
raise TypeError(fstr.format(type(v)))
return checked |
package com.xjj;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Created by cheng on 2017/8/15.
*/
public class Came {
/**
* 下划线转驼峰法
* @param line 源字符串
* @param smallCamel 大小驼峰,是否为小驼峰
* @return 转换后的字符串
*/
public static String underline2Camel(String line,boolean smallCamel){
if(line==null||"".equals(line)){
return "";
}
StringBuffer sb=new StringBuffer();
Pattern pattern= Pattern.compile("([A-Za-z\\d]+)(_)?");
Matcher matcher=pattern.matcher(line);
while(matcher.find()){
String word=matcher.group();
sb.append(smallCamel&&matcher.start()==0?Character.toLowerCase(word.charAt(0)):Character.toUpperCase(word.charAt(0)));
int index=word.lastIndexOf('_');
if(index>0){
sb.append(word.substring(1, index).toLowerCase());
}else{
sb.append(word.substring(1).toLowerCase());
}
}
return sb.toString();
}
public static void main(String[] args) {
System.out.println(underline2Camel("rule_name", true));
}
}
|
from skdecide.builders.discrete_optimization.generic_tools.graph_api import Graph
from skdecide.builders.discrete_optimization.rcpsp.rcpsp_model import RCPSPSolution, RCPSPModel, RCPSPModelCalendar
from typing import List, Union
import matplotlib.pyplot as plt
from copy import deepcopy
import numpy as np
from shapely.geometry import Polygon
from matplotlib.patches import Polygon as pp
from matplotlib.collections import PatchCollection
import matplotlib.cm
import scipy.stats
def compute_resource_consumption(rcpsp_model: RCPSPModel,
rcpsp_sol: RCPSPSolution,
list_resources: List[Union[int, str]]=None,
future_view=True):
modes_extended = deepcopy(rcpsp_sol.rcpsp_modes)
modes_extended.insert(0, 1)
modes_extended.append(1)
last_activity = max(rcpsp_sol.rcpsp_schedule)
makespan = rcpsp_sol.rcpsp_schedule[last_activity]['end_time']
if list_resources is None:
list_resources = rcpsp_model.resources_list
consumptions = np.zeros((len(list_resources), makespan + 1))
for act_id in rcpsp_sol.rcpsp_schedule:
for ir in range(len(list_resources)):
use_ir = rcpsp_model.mode_details[act_id][modes_extended[act_id - 1]][list_resources[ir]]
if future_view:
consumptions[ir, rcpsp_sol.rcpsp_schedule[act_id]["start_time"] + 1:rcpsp_sol.rcpsp_schedule[act_id][
"end_time"] + 1] += use_ir
else:
consumptions[ir, rcpsp_sol.rcpsp_schedule[act_id]["start_time"]:rcpsp_sol.rcpsp_schedule[act_id]["end_time"]] += use_ir
return consumptions, np.arange(0, makespan+1, 1)
def compute_nice_resource_consumption(rcpsp_model: RCPSPModel, rcpsp_sol: RCPSPSolution,
list_resources: List[Union[int, str]] = None):
if list_resources is None:
list_resources = rcpsp_model.resources_list
c_future, times = compute_resource_consumption(rcpsp_model, rcpsp_sol,
list_resources=list_resources,
future_view=True)
c_past, times = compute_resource_consumption(rcpsp_model, rcpsp_sol,
list_resources=list_resources,
future_view=False)
merged_times = {i: [] for i in range(len(list_resources))}
merged_cons = {i: [] for i in range(len(list_resources))}
for r in range(len(list_resources)):
for index_t in range(len(times)):
merged_times[r] += [times[index_t], times[index_t]]
merged_cons[r] += [c_future[r, index_t], c_past[r, index_t]]
for r in merged_times:
merged_times[r] = np.array(merged_times[r])
merged_cons[r] = np.array(merged_cons[r])
return merged_times, merged_cons
def plot_ressource_view(rcpsp_model: RCPSPModel,
rcpsp_sol: RCPSPSolution,
list_resource: List[Union[int, str]]=None,
title_figure="",
fig=None,
ax=None):
modes_extended = deepcopy(rcpsp_sol.rcpsp_modes)
modes_extended.insert(0, 1)
modes_extended.append(1)
with_calendar = isinstance(rcpsp_model, RCPSPModelCalendar)
if list_resource is None:
list_resource = rcpsp_model.resources_list
if ax is None:
fig, ax = plt.subplots(nrows=len(list_resource),
figsize=(10, 5),
sharex=True)
fig.suptitle(title_figure)
polygons_ax = {i: [] for i in range(len(list_resource))}
labels_ax = {i: [] for i in range(len(list_resource))}
sorted_activities = sorted(rcpsp_sol.rcpsp_schedule, key=lambda x: rcpsp_sol.rcpsp_schedule[x]["start_time"])
for j in sorted_activities:
time_start = rcpsp_sol.rcpsp_schedule[j]["start_time"]
time_end = rcpsp_sol.rcpsp_schedule[j]["end_time"]
for i in range(len(list_resource)):
cons = rcpsp_model.mode_details[j][modes_extended[j-1]][list_resource[i]]
if cons == 0:
continue
bound = rcpsp_model.resources[list_resource[i]] if not with_calendar \
else max(rcpsp_model.resources[list_resource[i]])
for k in range(0, bound):
polygon = Polygon([(time_start, k), (time_end, k), (time_end, k+cons),
(time_start, k+cons), (time_start, k)])
areas = [p.intersection(polygon).area for p in polygons_ax[i]]
if len(areas) == 0 or max(areas) == 0:
polygons_ax[i].append(polygon)
labels_ax[i].append(j)
break
for i in range(len(list_resource)):
patches = []
for polygon in polygons_ax[i]:
x, y = polygon.exterior.xy
ax[i].plot(x, y, zorder=-1, color="b")
patches.append(pp(xy=polygon.exterior.coords))
p = PatchCollection(patches, cmap=matplotlib.cm.get_cmap('Blues'),
alpha=0.4)
ax[i].add_collection(p)
merged_times, merged_cons = compute_nice_resource_consumption(rcpsp_model, rcpsp_sol,
list_resources=list_resource)
for i in range(len(list_resource)):
ax[i].plot(merged_times[i], merged_cons[i], color="r", linewidth=2,
label="Consumption "+str(list_resource[i]), zorder=1)
if not with_calendar:
ax[i].axhline(y=rcpsp_model.resources[list_resource[i]], linestyle="--",
label="Limit : "+str(list_resource[i]), zorder=0)
else:
ax[i].plot(merged_times[i],
[rcpsp_model.resources[list_resource[i]][m]
for m in merged_times[i]], linestyle="--",
label="Limit : " + str(list_resource[i]), zorder=0)
ax[i].legend(fontsize=5)
lims = ax[i].get_xlim()
ax[i].set_xlim([lims[0], 1.*lims[1]])
return fig
def plot_task_gantt(rcpsp_model: RCPSPModel,
rcpsp_sol: RCPSPSolution,
fig=None,
ax=None,
current_t=None):
if fig is None or ax is None:
fig, ax = plt.subplots(1,
figsize=(10, 5))
ax.set_title("Gantt Task")
tasks = sorted(rcpsp_model.mode_details.keys())
nb_task = len(tasks)
sorted_task_by_start = sorted(rcpsp_sol.rcpsp_schedule,
key=lambda x: 100000 * rcpsp_sol.rcpsp_schedule[x]["start_time"] + x)
sorted_task_by_end = sorted(rcpsp_sol.rcpsp_schedule,
key=lambda x: 100000 * rcpsp_sol.rcpsp_schedule[x]["end_time"] + x)
max_time = rcpsp_sol.rcpsp_schedule[sorted_task_by_end[-1]]["end_time"]
min_time = rcpsp_sol.rcpsp_schedule[sorted_task_by_end[0]]["start_time"]
patches = []
for j in range(nb_task):
nb_colors = len(tasks)//2
colors = plt.cm.get_cmap("hsv", nb_colors)
box = [(j-0.25, rcpsp_sol.rcpsp_schedule[tasks[j]]["start_time"]),
(j-0.25, rcpsp_sol.rcpsp_schedule[tasks[j]]["end_time"]),
(j+0.25, rcpsp_sol.rcpsp_schedule[tasks[j]]["end_time"]),
(j+0.25, rcpsp_sol.rcpsp_schedule[tasks[j]]["start_time"]),
(j-0.25, rcpsp_sol.rcpsp_schedule[tasks[j]]["start_time"])]
polygon = Polygon([(b[1], b[0]) for b in box])
x, y = polygon.exterior.xy
ax.plot(x, y, zorder=-1, color="b")
patches.append(pp(xy=polygon.exterior.coords,
facecolor=colors((j - 1) % nb_colors)))
p = PatchCollection(patches,
match_original=True,
#cmap=matplotlib.cm.get_cmap('Blues'),
alpha=0.4)
ax.add_collection(p)
ax.set_xlim((min_time, max_time))
ax.set_ylim((-0.5, nb_task))
ax.set_yticks(range(nb_task))
ax.set_yticklabels(tuple(["Task "+str(tasks[j]) for j in range(nb_task)]),
fontdict={"size": 7})
return fig
def compute_schedule_per_resource_individual(rcpsp_model: RCPSPModel,
rcpsp_sol: RCPSPSolution,
resource_types_to_consider: List[str]=None,
verbose=False):
nb_ressources = len(rcpsp_model.resources_list)
modes_extended = deepcopy(rcpsp_sol.rcpsp_modes)
modes_extended.insert(0, 1)
modes_extended.append(1)
if resource_types_to_consider is None:
resources = rcpsp_model.resources_list
else:
resources = resource_types_to_consider
sorted_task_by_start = sorted(rcpsp_sol.rcpsp_schedule,
key=lambda x: 100000*rcpsp_sol.rcpsp_schedule[x]["start_time"]+x)
sorted_task_by_end = sorted(rcpsp_sol.rcpsp_schedule,
key=lambda x: 100000*rcpsp_sol.rcpsp_schedule[x]["end_time"]+x)
max_time = rcpsp_sol.rcpsp_schedule[sorted_task_by_end[-1]]["end_time"]
min_time = rcpsp_sol.rcpsp_schedule[sorted_task_by_end[0]]["start_time"]
print("Min time ", min_time)
print("Max time ", max_time)
with_calendar = isinstance(rcpsp_model, RCPSPModelCalendar)
array_ressource_usage = {resources[i]:
{"activity":
np.zeros((max_time-min_time+1,
max(rcpsp_model.resources[resources[i]])
if with_calendar else rcpsp_model.resources[resources[i]])),
"binary_activity":
np.zeros((max_time - min_time + 1,
max(rcpsp_model.resources[resources[i]])
if with_calendar else rcpsp_model.resources[resources[i]])),
"total_activity":
np.zeros(max(rcpsp_model.resources[resources[i]])
if with_calendar else rcpsp_model.resources[resources[i]]),
"activity_last_n_hours":
np.zeros((max_time-min_time+1,
max(rcpsp_model.resources[resources[i]])
if with_calendar else rcpsp_model.resources[resources[i]])),
"boxes_time": []
}
for i in range(len(resources))}
total_time = max_time-min_time+1
nhour = int(min(8, total_time/2-1))
index_to_time = {i: min_time+i for i in range(max_time-min_time+1)}
time_to_index = {index_to_time[i]: i for i in index_to_time}
for activity in sorted_task_by_start:
mode = modes_extended[activity-1]
start_time = rcpsp_sol.rcpsp_schedule[activity]["start_time"]
end_time = rcpsp_sol.rcpsp_schedule[activity]["end_time"]
if end_time == start_time:
continue
resources_needed = {r: rcpsp_model.mode_details[activity][mode][r]
for r in resources}
for r in resources_needed:
if r not in array_ressource_usage:
continue
rneeded = resources_needed[r]
if not with_calendar:
range_interest = range(array_ressource_usage[r]["activity"].shape[1])
else:
# try:
# range_interest = [x for x in range(len(rcpsp_model.calendar_details[r])) if
# rcpsp_model.calendar_details[r][x][time_to_index[start_time]] == 1]
# except:
range_interest = range(rcpsp_model.resources[r][time_to_index[start_time]])
while rneeded > 0:
# availables_people_r = [i for i in range(array_ressource_usage[r]["activity"].shape[1])
# if array_ressource_usage[r]["activity"][time_to_index[start_time], i] == 0]
availables_people_r = [i for i in range_interest
if array_ressource_usage[r]["activity"][time_to_index[start_time], i] == 0]
if verbose:
print(len(availables_people_r), " people available : ")
if len(availables_people_r) > 0:
resource = min(availables_people_r,
key=lambda x: array_ressource_usage[r]["total_activity"][x])
# greedy choice,
# the one who worked the less until now.
array_ressource_usage[r]["activity"][time_to_index[start_time]:time_to_index[end_time], resource] \
= activity
array_ressource_usage[r]["binary_activity"][time_to_index[start_time]:time_to_index[end_time], resource] \
= 1
array_ressource_usage[r]["total_activity"][resource] += (end_time-start_time)
array_ressource_usage[r]["activity_last_n_hours"][:, resource] = np.convolve(array_ressource_usage[r]["binary_activity"][:, resource],
np.array([1]*nhour+[0]+[0]*nhour),
mode="same")
array_ressource_usage[r]["boxes_time"] += [[(resource-0.25, start_time+0.01, activity),
(resource-0.25, end_time-0.01, activity),
(resource+0.25, end_time-0.01, activity),
(resource+0.25, start_time+0.01, activity),
(resource-0.25, start_time+0.01, activity)]]
# for plot purposes.
rneeded -= 1
else:
print("r_needed ", rneeded)
print("Ressource needed : ", resources_needed)
print("ressource : ", r)
print("activity : ", activity)
print("Problem, can't build schedule")
print(array_ressource_usage[r]["activity"])
rneeded = 0
return array_ressource_usage
def plot_resource_individual_gantt(rcpsp_model: RCPSPModel,
rcpsp_sol: RCPSPSolution,
resource_types_to_consider: List[str]=None,
title_figure="",
fig=None,
ax=None,
current_t=None):
array_ressource_usage = compute_schedule_per_resource_individual(rcpsp_model,
rcpsp_sol,
resource_types_to_consider=
resource_types_to_consider)
sorted_task_by_start = sorted(rcpsp_sol.rcpsp_schedule,
key=lambda x: 100000 * rcpsp_sol.rcpsp_schedule[x]["start_time"] + x)
sorted_task_by_end = sorted(rcpsp_sol.rcpsp_schedule,
key=lambda x: 100000 * rcpsp_sol.rcpsp_schedule[x]["end_time"] + x)
max_time = rcpsp_sol.rcpsp_schedule[sorted_task_by_end[-1]]["end_time"]
min_time = rcpsp_sol.rcpsp_schedule[sorted_task_by_end[0]]["start_time"]
for key in list(array_ressource_usage.keys()):
if np.sum(array_ressource_usage[key]["total_activity"]) == 0:
array_ressource_usage.pop(key)
resources_list = list(array_ressource_usage.keys())
# fig, ax = plt.subplots(len(array_ressource_usage),
# figsize=(10, 5))
# for i in range(len(array_ressource_usage)):
# ax[i].imshow(array_ressource_usage[resources_list[i]]["binary_activity"].T)
if fig is None or ax is None:
fig, ax = plt.subplots(len(array_ressource_usage),
figsize=(10, 5))
fig.suptitle(title_figure)
if len(array_ressource_usage) == 1:
ax = [ax]
for i in range(len(resources_list)):
patches = []
nb_colors = len(sorted_task_by_start)//2
colors = plt.cm.get_cmap("hsv", nb_colors)
for boxe in array_ressource_usage[resources_list[i]]["boxes_time"]:
polygon = Polygon([(b[1], b[0]) for b in boxe])
activity = boxe[0][2]
x, y = polygon.exterior.xy
ax[i].plot(x, y, zorder=-1, color="b")
patches.append(pp(xy=polygon.exterior.coords,
facecolor=colors((activity-1) % nb_colors)))
p = PatchCollection(patches,
match_original=True,
#cmap=matplotlib.cm.get_cmap('Blues'),
alpha=0.4)
ax[i].add_collection(p)
ax[i].set_title(resources_list[i])
ax[i].set_xlim((min_time, max_time))
try:
ax[i].set_ylim((-0.5, rcpsp_model.resources[resources_list[i]]))
ax[i].set_yticks(range(rcpsp_model.resources[resources_list[i]]))
ax[i].set_yticklabels(tuple([j for j in range(rcpsp_model.resources[resources_list[i]])]),
fontdict={"size": 7})
except:
m = max(rcpsp_model.resources[resources_list[i]])
ax[i].set_ylim((-0.5, m))
ax[i].set_yticks(range(m))
ax[i].set_yticklabels(tuple([j for j in range(m)]),
fontdict={"size": 7})
ax[i].grid(True)
if current_t is not None:
ax[i].axvline(x=current_t, label='pyplot vertical line', color='r', ls='--')
return fig
# TODO: Check if the scipy version of KTD is the most meaningful for what we want to use it for (ktd between -1 and 1)
def kendall_tau_similarity(rcpsp_sols: (RCPSPSolution, RCPSPSolution)):
sol1 = rcpsp_sols[0]
sol2 = rcpsp_sols[1]
perm1 = sol1.generate_permutation_from_schedule()
perm2 = sol2.generate_permutation_from_schedule()
ktd, p_value = scipy.stats.kendalltau(perm1, perm2)
return ktd
def all_diff_start_time(rcpsp_sols: (RCPSPSolution, RCPSPSolution)):
sol1 = rcpsp_sols[0]
sol2 = rcpsp_sols[1]
diffs = {}
for act_id in sol1.rcpsp_schedule.keys():
diff = sol1.rcpsp_schedule[act_id]['start_time'] - sol2.rcpsp_schedule[act_id]['start_time']
diffs[act_id] = diff
return diffs
def compute_graph_rcpsp(rcpsp_model: RCPSPModel):
nodes = [(n, {mode: rcpsp_model.mode_details[n][mode]["duration"]
for mode in rcpsp_model.mode_details[n]})
for n in range(1, rcpsp_model.n_jobs + 3)]
edges = []
for n in rcpsp_model.successors:
for succ in rcpsp_model.successors[n]:
dict_transition = {mode: rcpsp_model.mode_details[n][mode]["duration"]
for mode in rcpsp_model.mode_details[n]}
min_duration = min(dict_transition.values())
max_duration = max(dict_transition.values())
dict_transition["min_duration"] = min_duration
dict_transition["max_duration"] = max_duration
dict_transition["minus_min_duration"] = -min_duration
dict_transition["minus_max_duration"] = -max_duration
dict_transition["link"] = 1
edges += [(n, succ, dict_transition)]
return Graph(nodes, edges, False)
|
/**
* Content provider for templates. Provides all the enabled templates
* defined for this editor.
*/
private final class TemplatesContentProvider implements ITreeContentProvider {
/*
* @see org.eclipse.jface.viewers.ITreeContentProvider#getChildren(java.lang.Object)
*/
public Object[] getChildren(Object parentElement) {
if (parentElement instanceof TemplatePersistenceData)
return new Object[0];
else if (parentElement instanceof TemplateContextType) {
TemplateContextType contextType= (TemplateContextType) parentElement;
return getTemplates(contextType.getId());
}
return null;
}
private TemplatePersistenceData[] getTemplates(String contextId) {
List templateList= new ArrayList();
TemplatePersistenceData[] datas= getTemplateStore().getTemplateData(false);
for (int i= 0; i < datas.length; i++) {
if (datas[i].isEnabled() && datas[i].getTemplate().getContextTypeId().equals(contextId))
templateList.add(datas[i]);
}
return (TemplatePersistenceData[]) templateList
.toArray(new TemplatePersistenceData[templateList.size()]);
}
/*
* @see org.eclipse.jface.viewers.ITreeContentProvider#getParent(java.lang.Object)
*/
public Object getParent(Object element) {
if (element instanceof TemplatePersistenceData) {
TemplatePersistenceData templateData= (TemplatePersistenceData) element;
return getContextTypeRegistry().getContextType(
templateData.getTemplate().getContextTypeId());
}
return null;
}
/*
* @see org.eclipse.jface.viewers.ITreeContentProvider#hasChildren(java.lang.Object)
*/
public boolean hasChildren(Object parentElement) {
if (parentElement instanceof TemplatePersistenceData)
return false;
else if (parentElement instanceof TemplateContextType) {
String contextId= ((TemplateContextType) parentElement).getId();
TemplatePersistenceData[] datas= getTemplateStore().getTemplateData(false);
if (datas.length <= 0)
return false;
for (int i= 0; i < datas.length; i++) {
if (datas[i].isEnabled() && datas[i].getTemplate().getContextTypeId().equals(contextId))
return true;
}
return false;
}
return false;
}
/*
* @see org.eclipse.jface.viewers.IStructuredContentProvider#getElements(java.lang.Object)
*/
public Object[] getElements(Object inputElement) {
List contextTypes= new ArrayList();
for (Iterator iterator= getContextTypeRegistry().contextTypes(); iterator.hasNext();) {
TemplateContextType contextType= (TemplateContextType) iterator.next();
if (!fLinkWithEditorAction.isChecked() || isActiveContext(contextType))
contextTypes.add(contextType);
}
return contextTypes.toArray(new TemplateContextType[contextTypes.size()]);
}
private boolean isActiveContext(TemplateContextType contextType) {
return fActiveTypes == null || fActiveTypes.contains(contextType.getId());
}
/*
* @see org.eclipse.jface.viewers.IContentProvider#dispose()
*/
public void dispose() {
}
/*
* @see org.eclipse.jface.viewers.IContentProvider#inputChanged(org.eclipse.jface.viewers.Viewer, java.lang.Object, java.lang.Object)
*/
public void inputChanged(Viewer viewer, Object oldInput, Object newInput) {
}
} |
def add_feed(self, feedlike, **kwargs):
if 'fnum' in kwargs:
fnum = kwargs['fnum']
del kwargs['fnum']
else:
fnum = None
if isinstance(feedlike, bFeed):
munging = feedlike.munging
if 'munging' in kwargs:
explicit_munging = kwargs['munging'].as_odict
for key in explicit_munging:
munging[key] = explicit_munging[key]
fed = Feed(self, feedlike.ftype,
feedlike.sourcing,
munging,
feedlike.meta,
fnum)
elif isinstance(feedlike, Feed):
fed = feedlike
else:
raise Exception("Invalid Feed {}".format(repr(feedlike)))
self.feeds.append(fed)
objs = object_session(self)
objs.add(fed)
objs.commit() |
The 87Sr/86Sr and 143Nd/144Nd disequilibrium between Polynesian hot spot lavas and the clinopyroxenes they host: Evidence complementing isotopic disequilibrium in melt inclusions We report 87Sr/86Sr and 143Nd/144Nd data on clinopyroxenes recovered from 10 ocean island lavas from three different hot spots (Samoa, Society, and CookAustral island chains). The clinopyroxenes recovered from eight of the 10 lavas analyzed in this study exhibit 87Sr/86Sr disequilibrium with respect to the host lava. The 87Sr/86Sr ratios in clinopyroxene separates are 953146 ppm (0.00950.31%) different from their respective host whole rocks. Clinopyroxenes in three lavas have 143Nd/144Nd ratios that are 70160 ppm (0.0070.016%) different from the host lavas. The 87Sr/86Sr and 143Nd/144Nd disequilibrium in one lava (the oldest lava considered in this study, Mangaia sample MGAB47) can be attributed to posteruptive radiogenic ingrowth, but the isotope disequilibrium in the other, younger lavas cannot be explained by this mechanism. In five of the lava samples, two populations of clinopyroxene were isolated (black and green, separated by color). In four out of five of these samples, the 87Sr/86Sr ratios of the two clinopyroxene populations are isotopically different from each other. In addition to 87Sr/86Sr disequilibrium, the two clinopyroxene populations in one of the lavas (Tahaa sample TAAB26) have 143Nd/144Nd ratios that are ∼100 ppm different from each other. Given the resilience of clinopyroxene to seawater alteration and the likelihood that the Sr and Nd isotope composition of fresh clinopyroxene separates provides a faithful record of primary magmatic compositions, the clinopyroxeneclinopyroxene isotope disequilibrium in these four lavas provides strong evidence that a mechanism other than seawater alteration has generated the observed isotopic disequilibrium. This study confirms the isotopic diversity in ocean island lavas previously observed in olivinehosted melt inclusions. For example, the Sr isotopic variability previously observed in olivinehosted melt inclusions is mirrored by the isotopic diversity in clinopyroxenes isolated from many of the same Samoan lavas. The isotopic data from melt inclusions and clinopyroxenes are not consistent with shallow assimilation of sediment or with entrainment of xenocrystic clinopyroxene from the oceanic crust or upper mantle. Instead, the data are interpreted as reflecting isotopic heterogeneity in the mantle sources of the lavas. The isotopic diversity in clinopyroxenes and melt inclusions suggests that a single lava can host components derived from isotopically diverse source regions. |
package de.baswil.spring.proxy.proxy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.MalformedURLException;
import java.net.URL;
/**
* Analyze the Environment variable for proxies (http or https)
*
* @author <NAME>
*/
public abstract class AbstractUrlProxySettingsParser {
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractUrlProxySettingsParser.class);
/**
* Get the value (url) of the environment variable.
*
* @return url
*/
public abstract String getUrl();
/**
* Analyze the url of the environment variable and save the result in the {@link ProxySettings} object.
*
* @param proxySettings The settings object for the result of the analyze.
*/
public void readProxySettingsFromUrl(ProxySettings proxySettings) {
String urlString = getUrl();
if(urlString == null){
return;
}
final URL url;
try {
url = new URL(urlString);
} catch (MalformedURLException e) {
LOGGER.warn("Wrong proxy url format. Ignore url for proxy settings.", e);
return;
}
if (url.getHost().trim().isEmpty()) {
LOGGER.warn("Proxy url has no hostname. Ignore url for proxy settings.");
return;
}
proxySettings.setHost(url.getHost());
if (url.getPort() != -1) {
proxySettings.setPort(url.getPort());
}
if (url.getUserInfo() != null) {
String[] userInfoSplit = url.getUserInfo().split(":", 2);
if (userInfoSplit.length == 1) {
proxySettings.setUser(userInfoSplit[0]);
} else {
proxySettings.setUser(userInfoSplit[0]);
proxySettings.setPassword(userInfoSplit[1]);
}
}
}
}
|
Optimization Design of Shaper Mechanism Based on Nonlinear Programming Model Due to the lack of the optimization algorithm or model which could be used to optimize the complicated plane linkage mechanism (like a shaper mechanism), a nonlinear programming model which could solve this problem was proposed in this work, whose objective function was the minimum initial velocity of the cutting tool in the shaper mechanism, and the length of the linkages in the shaper mechanism was regarded as the constraint conditions. Subsequently, a typical shaper mechanism was chosen as the example to be optimized via this nonlinear programming model. The optimization results revealed that this model could acquire an appropriate optimization scheme effectively and the optimization scheme was reasonable. Introduction With the advancement of computer science and programming language, an increasing number of intelligent algorithms or models had been applied in the machinery industry field, especially for the optimization design of a mechanism, which had aroused the attention of a host of scholars in recent years actually. According to the available literatures, a large quantity of optimization algorithms and models which were utilized to optimize the size of the components in a certain mechanism had been reported, such as multi-objective programming model (MOOM), differential evolution, Monte Carlo method, particle swarm optimization (PSO) and so on, all of which had been proved to be effective and efficient to carry out an appropriate optimization scheme for some simple plane linkage mechanisms like slider-crank mechanism and four-bar linkage mechanism. Nevertheless, the investigations on the optimization algorithms and models which had the capacity to be used to propose a proper optimization scheme for complex linkage mechanisms, such as the actuator of a shaper mechanism, were inadequate. Therefore, an optimization model which could be used to optimize the length of the linkages in a six-bar shaper mechanism was established via utilizing a nonlinear programming model, which was the main contribution of this work. Rules of establishing optimization model While optimizing the design of a shaper mechanism, several critical issues needed to be considered: 1) it was widely acknowledged that the cutting tool of the shaper mechanism needed to move smoothly during the process of cutting metal. Furthermore, the impact load which would occur when the cutting tool contacted with the metal should be limited to a lower level as much as possible, which revealed that the initial velocity of the cutting tool should be as low as possible; 2) due to some special limitations of working conditions, the length of the linkages in the shaper mechanism should be controlled within a certain range. According to the analysis which had been mentioned above, some of the constraints couldn't be described as a linear function, and they were nonlinear functions actually. Thus, a nonlinear programming model should be presented to solve this problem. Establishing objective function Assuming that the initial velocity of the cutting tool was v 0, and there were n independent optimization variables which may exert an effect on the initial velocity of the cutting tool, all of which could be written as a column vector as Equation 1. Where l i was the independent variable, and the length of the linkages in this shaper mechanism would always been regarded as the independent variable. Therefore, the initial velocity of the cutting tool v 0 could use a function like Equation 2 to express. Where l i was the element of vector L. The main purpose of this optimization model was to make v 0 as low as possible, thus, Equation 2 could be regarded as the objective function of this nonlinear optimization model. Establishing constraint conditions As a matter of fact, a large number of factors which could exert an influence on the length of the linkages in the shaper mechanism, such as the value of pressure angle and the condition of forming an oscillating guard bar mechanism, which would limit the length of the linkages into a certain range. Providing that the minimum value of a linkage was l imin, and the maximum value of a linkage was l imax, the constraint conditions of this nonlinear optimization model could be written as Equation 3. Optimization problem definitions This work chose a shaper mechanism like Figure 1 as the example to illustrate how to use the nonlinear programming model to optimize this shaper mechanism. As shown in Figure 1, Component 1, 3 and 4 were linkages, and Component 2 was a slider. Besides, Component 5 represented the cutting tool of this shaper mechanism. Component 1 was the original move part which would rotate contraclockwise, whose angular velocity 1 was a constant. At the beginning of the motion, the angle 1 was equal to 0. The range of the size of the linkages in Figure 1 would be introduced in Section 3.2. We intended to know the value of the length of the linkages in Figure 1 when the initial velocity of Component 5 was the lowest. Determining constraint conditions Taking the conditions of forming this mechanism and the value of pressure angle into consideration, the range of the size of the linkages in Figure 1 Determining objective function The initial velocity of Component 5 could use graphical method of vector equation to solve. Since the graphical method of vector equation was a mature mechanism kinematic analysis theory, the expression of the initial velocity of Component 5 v 0 would be given directly and the process of acquiring this expression was omitted, which could be written as Equation 4. Where 1 was a constant and tan could be written as Equation 5. Solutions & Results Using Python programming language to program the code which could solve this nonlinear programming model and running these codes in PyCharm, the optimization results were attained. The comparison results between the original design and the optimization design which was acquired via using this nonlinear programming model were shown in Table 1. As shown in Table 1, this nonlinear programming model presented an optimization design scheme. In this scheme, the initial velocity of the cutting tool was 0. It was an ideal circumstance, because the impact load which would occur when the cutting tool contacted with the metal would be the lowest. Conclusion This work proposed an optimization design model for optimizing the shaper mechanism based on nonlinear programming model. The optimization results of the example demonstrated that this model could acquire the optimization design scheme effectively. Furthermore, the design scheme of the shaper mechanism which was calculated via this model was better than the original design scheme, which provided a new solution for the similar optimization problems in the machinery industry field. |
On March 29th, Rob Elliot won his fourth Ireland cap, against Slovakia in a pre-Euros friendly in Dublin. Elliot had hopes of not just making Martin O’Neill’s squad for France, but of being in the starting XI.
He was the only Irish goalkeeper playing in the Premier League every week and, although it was at struggling Newcastle United, Elliot says he was “growing”. At 29, season 2014/’15 was turning into the best of his career.
Then, 16 minutes in, Elliot dived to his left and heard his right knee “pop”. His season, his Euros, his career, stopped there and then. He left Lansdowne Road in an ambulance.
It’s said 2016 will go down as an unforgettable, landscape-shifting year: here’s Rob Elliot’s.
JANUARY
The first game of the year was a 1-0 defeat at Arsenal. Steve McClaren had been in charge for six months but was under pressure. On arrival he and the Newcastle board told Elliot he could leave – Tim Krul was first choice and Karl Darlow had been signed.
“Then [October 2015] I was away with Ireland and found out Tim ruptured his cruciate,” Elliot says. “Karl was injured, so I was the only fit ’keeper at the club. I remember thinking that no-one really wanted me here a few months ago, my future wasn’t here, but now I was playing for Newcastle again.”
By January 2016 Elliot was re-established as Newcastle’s number one.
“After Arsenal we drew with Man United and I felt we were on the cusp of developing something. We signed Jonjo [Shelvey] and Andros Townsend. We beat West Ham at home and we’d a lift.
“Then we lost at Watford, who were big and strong. Maybe we didn’t have the character to stand up to that. If it was just a football game – like at Arsenal – we could hold our own because we’d good, technical players.
“It was a strange time. Alan Rickman died. I’m a movie buff.”
FEBRUARY
A 3-0 defeat at Everton kept Newcastle in the relegation zone.
“After the game some harsh words were said by Steve McClaren. I’d made some good saves, but it was a kick in the balls. I wanted to make saves that won us points, not just keep the score down.
“Then we beat West Brom. Again I thought: ‘Maybe this is the turning point.’”
MARCH
“Seamus [McDonagh] was in touch loads. There were also messages from the gaffer [O’Neill] or Roy [Keane].
“I knew the friendly games were coming up, then the Euros, I was looking forward. I felt it was between me and Darren [Randolph] to have a real go at it.”
Newcastle lost 3-1 at home to Bournemouth. McClaren was sacked.
“It felt like the end. The fans had lost patience with us, the manager and the club. The whole thing had turned sour.
“Then we heard about Rafa [Benítez] and we thought: ‘This can’t be right!’
“We met him here in the canteen, talking like school kids. It was almost like the club changed instantly. The fans changed. We went to Leicester in his first game and did well.
“Then we drew with Sunderland. That was my last Newcastle game. I went to the airport with John O’Shea, had a beer, flew over to Dublin.
“We had Switzerland on the Friday night – Darren played in that one and did well. We’d Saturday off. Me and Daz went into town, watched a film. We grew up together at Charlton, we’ve known each other since we were 15, became pros together. We room together.
“Brian Kerr called me up for the under-17s. People were suddenly asking me: ‘Are you Irish?’ I said yeah – for me football was always Ireland. That was the passion.
“I’d go to my nan’s in Cork. My Mum’s family are English, from Greenwich. My granddad’s actually Scottish but he met my nan in Ireland. I’m a proper mongrel.
“They moved over in the 50s, as you did. I grew up as Irish, my best friend on our street, Liam, his family were all from Tipperary. In London in the 80s that was a bit tough. They’d been the immigrants, ‘the problem’. It all changes, doesn’t it?
“USA 94, I remember. Then 2002, I was watching Shay Given. We’d Mark Kinsella and Matt Holland at Charlton.
“Slovakia? I remember the ball being played across and I moved to my right. The fella’s had a shot and I moved to my left. As I dived I just felt a crack and pop. I knew it was my ACL [anterior cruciate ligament]. There’s a picture of me in the air and I’m holding my knee even before I hit the floor. Wincing.
“The pain for the first 10-15 seconds was really bad, then it disappeared. I thought: ‘Have I just made a big deal of this?’ Then I felt my knee and I knew.
“The doc tested it. I was crying. I knew my season was over, the Euros were gone. I got wound up.
“I went to the hotel and waited for the lads. They all came up to me, it was nice. Martin came and sat with me. He just said he couldn’t believe it. He said nice things, that I’d be back. Roy did the same. He talked about when he’d done his. I was thinking: ‘God, it’s Roy Keane.’ I know he’s our assistant manager but when I was growing up he was one of the best players in the world.
“I flew home the next morning.”
APRIL
“I went down to London, saw surgeon Andy Williams. April 5th. It was a horrible time but I was so well looked after.
“It was so painful. The physio, she was asking me to move my knee and it was so hard. I was sweating. You’re thinking: ‘Jesus, am I going to play again?’ It makes you realise how serious it was.
“I was on crutches for six, seven weeks. Our physio, Sean Beech, was magnificent.
“From January to March our results weren’t great but my performances were getting better. I was growing. I felt in a really good place, really comfortable. My mentality was really good. The one thing that’s gutting is that I’ve lost that momentum.
“I’d my 30th birthday. That was probably the toughest day. We played Crystal Palace. My little boy Max was supposed to be mascot and I was to lead him out. It was something I could remember for ever and ever, something I could show him.
“I was in pain that day, I probably shouldn’t have gone to the game, my knee was killing me. It was s**t. Selfishly those are the milestones. Other than when I cried when I first did it, that was the only other day when I’ve been really down.”
MAY
Newcastle are relegated.
“It was probably waiting to happen for three years. It was solemn.
“But then the whole club turned. It was like the closing of a chapter.
“Then the questions: ‘Will Rafa stay?’ ‘Are things going to change the way we do things at the club?’ Luckily those things have happened.”
On the last day of the season, relegated Newcastle beat Tottenham 5-1.
“If you needed a game to convince a manager, it was the Tottenham game, it showed what the club could be. The best I’ve ever seen this club was the day we got relegated.
“The mentality of the group changed. It became more collective. Maybe players had come to do well at Newcastle in order to move on. No disrespect to them, but as a club Newcastle United shouldn’t accept that. We should be the pinnacle.
“Rafa sent me a message. I was having a thigh operation, they cut the tendon off completely.
“I got a text. ‘Hi Rob, hope injury is well.’ He asked me which players I ‘like for the Championship’. I was half drugged-up, I thought I might put ‘Keegan’.
“I thought: ‘Wow, he’s staying and he’s asking my opinion.’ It gave me such a massive lift.”
JUNE
Euro 2016: “During the Slovakia game friends had been calling. They knew. On the group chat they put up a picture of a Eurostar ticket for me for the Italy game. That was nice.
“I’d got over it [missing out]. I remember texting Darren because I was buzzing to watch him – he’s my friend. And of course I wanted to watch the lads.
“We went into Lille the day before the game – the Eurostar, my mate had got an Airbnb. Eight, nine of us.
“On the day of the game we camped ourselves in this bar, had lunch, a few drinks. There was the Brexit thing. We were sitting there in Lille saying it’s not going to happen, no chance. All the polls said no chance. Then you wake up and it’s happened. You go: ‘Okay!’ I would have stayed had it been me – well, I did vote to stay.
“The biggest thing I’ve noticed this year is getting away from PC – Brexit, Trump. To be a good leader you need to have made mistakes, got things wrong and learned from them. You need life experience. It’s been a strange year.
“In the stadium I was in line where Robbie [Brady] scored. When he scored it was just mental, I thought I’d done my knee again.
“I was walking by then. Even though I didn’t get to play, it was just great to go.”
JULY
“My wife, Robyn, was pregnant, we managed to get a few days away. I started driving again and was doing some work outside. I could cycle without wincing.”
AUGUST
On the opening day of the season, Newcastle lost 1-0 at Fulham.
“I went, I love going. We were poor. It was a wake-up call for the lads – how big Newcastle are in the Championship.
“In the dressing room after, I was surprised, the lads were all chatting about what could be done better. I don’t know if it would have been like that last season. There was a group trust developing.
“Then we lost to Huddersfield at home. Again, I think that was good. It showed us how teams were going to play at St James’. We’d lost six points, I remember saying that there were over 130 points left to play for: ‘Don’t stress.’”
SEPTEMBER
Elliot signed a new contract.
“I love living up here, we’ve really settled. I wanted to sign a new contract that showed I was part of the club again and part of its future.
“It means something – if you don’t feel part of where you are, you can drift. I’m signed to 2020 with a two-year option. I was really happy.”
OCTOBER
“I started going outside with Simon Smith, our goalie coach. The repetition made my knee better and better. It gave all the work I’d done in the gym a meaning. October was a big month for me.”
NOVEMBER
“Towards the end I joined in a 5-a-side, which I shouldn’t have done because you’re twisting and turning. Everything I’d done before that was controlled. But I did it and I felt amazing.”
DECEMBER
“I’d a reserve game at St James’ v Aston Villa. I was captain. I looked at their team and didn’t really recognise anyone, it was a young team. I was hoping one of them wouldn’t get excited and smash into me. You do think that – the one thing you can’t control are collisions.
“The game kicked off and I conceded immediately. But after that, fine, loads of kicking.
“Carrie Fisher died. The big thing for me this year – this injury – is learning to appreciate what you’ve got.
“We’ve a new baby [daughter Oa]. I’ve ruptured my knee but I’ve signed a new contract. I’m moving to the coast, to a house I never thought I could live in. As you get older you have to appreciate how lucky you are, to have this lifestyle.
“Hopefully I’ll get back in the squad, back on the bench, back with the lads. I still speak to Seamus. It’d be great to be back involved for March, when we play the next qualifier. There’s still loads in front of me.” |
What you can do now to prepare for ICD-10. The United States is moving toward adoption of the 10th version of the World Health Organization's International Classification of Diseases (ICD) codes. Because the change will have a significant impact on electronic health record and billing systems, ICD-10 is being rolled out in phases over the next couple of years. Physicians will need to begin using the new diagnosis codes starting in October 2013. This article describes the differences between ICD-9 and ICD-10 and the steps physicians and clinics can take now to prepare for the implementation. |
Incidence of maternal Toxoplasma infections in pregnancy in Upper Austria, 2000-2007 Background Despite three decades of prenatal screening program for toxoplasmosis in Austria, population-based estimates for the incidence of maternal infections with Toxoplasma gondii during pregnancy are lacking. We studied the incidence of primary maternal infections during pregnancy in the Federal State of Upper Austria. Methods Screening tests for 63,416 women and over 90,000 pregnancies (more than 84.5% of pregnancies in the studied region) in the time period between 01.01.2000 and 31.12.2007 were analysed. The incidence of toxoplasmosis was estimated indirectly by binomial and directly by interval censored regression. Results During the studied period, 66 acute infections (risk of 0.07% per pregnancy) were detected, but only 29.8% of seronegative women were tested at least three times during their pregnancies. The seroprevalence of Toxoplasma antibodies among all tested women was 31%. Indirectly estimated incidence (from differences in prevalence by age) was 0.5% per pregnancy, while directly estimated incidence (interval censored regression) was 0.17% per pregnancy (95% confidence interval: 0.13-0.21%). Conclusions Calculating incidence from observed infections results in severe underreporting due to many missed tests and potential diagnostic problems. Using statistical modelling, we estimated primary toxoplasmosis to occur in 0.17% (0.13-0.21%) of all pregnancies in Upper Austria. Background Congenital toxoplasmosis is among the infections associated with a high risk of complications, but fortunately acute infections during pregnancy are relatively rare. Due to the potential to cause life-long disability, the burden of disease of congenital toxoplasmosis is considerable. In order to prevent foetal infections and complications of toxoplasmosis, screening programs during pregnancy and a subsequent treatment of identified maternal primoinfections were introduced in a few countries [1,2,. Austria was the first country to start with populationwide free screening and treatment of maternal infections in 1975, soon followed by France. Nonetheless, little is known about the incidence of these infections from these countries despite of their long tradition of toxoplasmosis prevention. We used data from a screening laboratory that covers most of the population of one federal state in Austria in an attempt to determine the incidence in this region. Sample We retrospectively analysed serological data of all pregnant women aged 15-45 years insured by the OGKK ("Obersterreichische Gebietskrankenkasse": Upper Austrian Regional Health Insurance) and place of residence in Upper Austria. The OGKK is the largest statutory health insurance company in Upper Austria. Based on a special agreement with the health insurance company, all serological tests for Toxoplasma-specific IgG and IgM antibodies were conducted in one single laboratory (analyse BioLab GmbH, Linz). Information on gestational week when the screening was performed and the date of delivery was not available. We included only women for whom it could be assumed that their last test in a given pregnancy was conducted in the period from 01.01.2000 to 31. 12.2007. Tests were classified as belonging to the same pregnancy when they were performed within a time window of 200 days (the analysis was also repeated using 300 days as a time window). According to the regulations in Austria, screening has to be performed before the sixteenth week of gestation and repeated in seronegative women in the fifth and eighth pregnancy month. Austrian experts recommended the application of shorter, eight-week screening intervals in 2005. Diagnostic tools The diagnostic algorithm is presented in Figure 1. All tests with an IIFT titer of 1:16 or higher were defined as seropositive. A suspected acute infection in pregnancy was defined by the following findings: anti-Toxoplasma-specific IgM-antibodies positive (>0.65) and low (<0.2) Toxoplasma-specific IgG-avidity. A suspected infection was considered as proven (and classified as certain infection in our analysis) when there was a more than fourfold antibody-titre rise. Given the difficulties of assessing the threshold in the IIFT when seroconversions occurred in a short time period, but were not accompanied by a positive IgM or a low avidity they were considered false positive and were excluded. Data flow and data protection Data was extracted from the laboratory software Basu-Lab (Berger Analysen und Informationstechnik GmbH, Puchenau, Austria) and imported into STATA, version 8.2 (Statacorp, College Station, TX, USA) for all subsequent analyses (STATA-log-file available from the corresponding author on request). To ensure data protection and to meet the obligations of the Austrian data protection law ( § 46 2 and § 46 Datenschutzgesetz 2000), personal identifiers were replaced by unique pseudonyms. Furthermore, the place of residence and its postal code were replaced by the corresponding NUTS-3 regions (AT311: "Innviertel", AT312: "Linz-Wels", AT313: "Muehlviertel", AT314: "Steyr-Kirchdorf", AT315: "Traunviertel" ) and an indicator variable for the three big cities of Linz, Wels or Steyr (the former two being part of region AT312 and the latter part of AT314). The study was reviewed and approved by the ethics committee of the Elisabethinen Hospital Linz, Austria. Statistical analysis Firstly, we estimated the crude incidence from observed primoinfections during pregnancy. As testing did not cover the whole pregnancy for many seronegative women, we expected to miss a lot of infections and to underestimate the incidence. We therefore used further indirect and direct methods to estimate the true infection rate in pregnancy. From a binomial regression model, we estimated the increase in the seroprevalence per year of age and calculated the increase corresponding to the pregnancy duration of 268 days to obtain incidence under the assumption that differences in prevalence by age reflect new infections (indirect method). Since diagnosing seroprevalence is less error prone than correctly assessing the very rare event of acute infection, this method was robust against diagnostic errors. We subsequently analysed the incidence of Toxoplasma infections during pregnancy in seronegative women by means of interval censored regression (direct method). Interval censored regression allows one to account for the fact that in the case of a positive test it was only known that the infection occurred in the preceding time interval since the last negative test. Again, the estimate was recalculated to the period of 268 days. In order to obtain the incidence in relation to all pregnant women (as typically reported in other studies), the result was multiplied by (1-seropositive fraction). Since this analysis was based only on time during pregnancy, we were able to use information about IgM and avidity to rule out false positive results of the IIFT test. Within a pregnancy, screening tests were usually only about 3 or 4 months apart, and IgM remains positive and avidity low in this time span after an acute infection. Seroprevalence of Toxoplasma infections among pregnant women There were 275,842 test results in the database in total ( Figure 2). Inclusion criteria for the study population were met by 63,416 women in the dataset. These women contributed 92,365 pregnancies, based on the 200 days estimate. This number only slightly decreased when a more conservative estimate of 300 days was used. The total population for the studied region is around 1.4 million. In Upper Austria, there were 109,327 life births in total in the years 2000-2007. The total number of pregnancies including spontaneous and induced abortion and stillbirths was certainly substantially higher, but most of the spontaneous and induced abortions will happen before the seroprevalence testing, leaving only the stillbirths (<0.1% of life births) which are unaccounted for. Dividing 92,365 pregnancies included in our study by 109,327 life births in the region, we concluded that our data covered more than 84.5% of all life births in Upper Austria during this period. At their first examination in the study period, women were in median 28.3 years old (interquartile range (IR) 24.3 to 32.2 years). The seroprevalence at the first examination was 30.6%. At their latest examination, women were on average 1.3 years older and the seroprevalence was slightly higher (31.7%). The seroprevalence increased in a linear manner with age (p < 0.01 for trend, Figure 3) and was significantly lower in cities (Table 1) than in the larger regions (p < 0.01, regardless of whether prevalence at first or latest examination was studied). Suspected and certain Toxoplasma primoinfections detected during pregnancy The case definition for a suspected primoinfection during pregnancy was met by 222 women. Their median age was 27.8 years (IR 24.6-32.2 years). Of those 222 cases, 66 (29.7%) were classified as certain (i.e., these women had at least two tests with discordant results during the same pregnancy). Table 1 shows the distribution of the cases by regions and the corresponding incidence rates, with lower rates in cities than in regions including rural areas. The rural-urban difference was significant for suspected infections (p < 0.01), but not for certain infections (p = 0.18). The yearly numbers of cases ranged from 16 to 41 (mean: 26.5) for suspected infections and from 5 to 12 (mean: 8.3) for certain infections. No clear trend over time was observed (data not shown). Estimated incidence of acute Toxoplasma infections during pregnancy The results of indirectly estimating incidence rates from age-related differences in seroprevalence are presented in Table 2 (first two columns). Consistent with estimates based on observed cases, the incidence rates appeared to be lower in the cities than in other regions, but the difference is not significant. The interval censored regression yielded a substantially lower estimate for incidence of toxoplasmosis in all pregnancies with 0.17% (0.13-0.21%) ( Table 2, columns 3 and 4). The results were virtually unchanged when 300 days were used instead of 200 days to define tests belonging to one pregnancy. Similarly to the binomial regression model, the estimated incidence rates were slightly lower in the three biggest cities than in overall Upper Austria, but again the difference was statistically not significant. A model including calendar years did not show a significant change over time. Based on the findings from interval censored regression, we estimated that there were 152 (95% confidence interval: 118-196) acute Toxoplasma infections during pregnancy in the years 2000-2007 in the study sample (based on 92,365 pregnancies in the same period). Coverage of pregnancy with screening in seronegative women In the study population, 38,576 women had their latest screening (based on the 200 days time window) and were seronegative in this examination. When only the latest pregnancy for each woman was included, we Table 1 Seroprevalence and observed (suspected and certain) primoinfections by region Discussion Our study estimated prevalence and incidence of toxoplasmosis and coverage with screening in pregnant women in Austria. The estimated seroprevalence of about 31% in pregnant women is in line with findings from other countries in Europe. As expected, seroprevalence was higher in rural areas than in cities. The three recommended screening tests were conducted in only about 29.8% of seronegative women, despite the fact that about 95% of OGKK members attended all the check-ups of the Austrian maternal care program in pregnancy. A recent study from a region in southeast France reported similar problems: Only 40% of pregnant women had all seven or more recommended tests. Poor compliance to a complete screening program jeopardizes a direct analysis of the incidence of Toxoplasma infections in pregnancy. Consequently, incidence based on observed cases only resulted in severe underestimation if only certain diagnoses (0.07%) were considered. A certain diagnosis requires more than one test in pregnancy and, therefore, misses infections that occurred in early pregnancy before the first test. In addition, the period between the latest examination and birth is not included in the analysis. If only a single test result was available, infection could be only suspected, since high IgM and low avidity do not rule out a past infection. Therefore, incidence based on observed suspected infections suffers from both an underestimation due to cases which were not observed, and an overestimation caused by false positive IgM and avidity tests. Statistical methods are therefore necessary to derive estimates of true incidence. We used an indirect approach : the age-specific seroprevalence suggested a linear association between age and seroprevalence (Figure 3), as also observed by others. The estimates derived for incidence using this approach were higher than those obtained from observed suspected cases (0.5% per 100 pregnancies). While false test results are unlikely to cause a substantial overestimation in this method, differences in age-specific prevalence can be subject to age cohort effects, with a share of infections taking place in younger years of life but decreasing over time. A decrease in the seroprevalence of Toxoplasma infections over time that may lead to overestimation in the indirect estimate has been observed in several European countries. Consistently, a seroprevalence of 41% reported for 1995/96 in Upper Austria was considerably higher than our findings for 2000-2007. The reliability of the data for 1995/96 was questioned, but other reports from Austria also suggested a decreasing seroprevalence in the region, not only in humans but also in animals that are important for the transmission of disease to humans. A decreasing trend is also in line with findings in The Netherlands comparing 1995/1996 and 2006/2007. Furthermore, the seroprevalence estimate is mostly based on the non-pregnant time. Women during pregnancy might be more conscious about avoiding potential sources of infection, such as eating undercooked meat and contact with contaminated soil. Therefore, incidence of Toxoplasma infections during pregnancy in the same age group could be lower than in non-pregnant women. This effect might be partly compensated by an opposite bias, as pregnancy has been shown to be a risk factor for Toxoplasma infection in an epidemiological study from Brazil. The authors assumed changes in lymphocyte functions during late pregnancy, which led to some level of immunosuppression towards protozoal infections and to explain this increased susceptibility. As late stages of pregnancy were underrepresented in our study due to the poor adherence to the screening scheme, changes in immunity might not play a major role. Overall, we conclude that estimating incidence from age-specific prevalence might not provide valid results for the true incidence. The interval censored regression directly assessing incidence during pregnancy, appears to be the most appropriate approach to estimate the true incidence. However, the method is based directly on the rare event of acute infections and is therefore more affected by an imperfect specificity of testing. Interval censored regression depended on clear cut IIFT tests distinguishing seronegative from seropositive results and on IgM and avidity test results. We identified the following information regarding test characteristics: in the laboratory of analyse Biolab GmbH, 1,039 sera tested by IIFT were compared to the AxSYM and ARCHITECT test kits for anti-Toxoplasma gondii-IgG (Abbott Laboratories, Abbott Park, Illinois), with two investigators reading the IIFT. Sensitivity and specificity were 99.7% and 97.2% for the first investigator and 96.8% and 99.4% for the second investigator for AxSYM, and 99.7% and 98.3%/ 96.6% and 99.2% for ARCHITECT, respectively . According to the manufacturer's product information regarding sera from pregnant women, sensitivity of VIDAS IgM is 96.0% (95% confidence interval: 91.4-98.2%) and 100% of pregnant women with an acute infection not more than 4 months old show a low IgG antibody avidity (95% confidence interval: 98.1-100.0%). False positive results can be ruled out in the subsequent avidity testing, while false negative tests escape further diagnostics. Fortunately, sensitivity is particularly high, resulting in a marginal underestimation only. However, there is a potential mechanism which could cause a more substantial underestimation: using only times between tests during pregnancy excludes early pregnancy in which women might not be aware of being pregnant and thus be less careful in avoiding the exposure to toxoplasmosis. The contribution of this mechanism depends on the fraction of unplanned pregnancies and consciousness in avoiding sources of infection during early pregnancy. Strengths and limitations The strength of our study is that we were able to analyse more than 84.5% of pregnancies leading to life births in Upper Austria. OGKK covers all social classes, the catchment area was clearly defined and only pregnant women were included. In most regions in Austria, screening is performed in several laboratories and it is difficult to assemble their screening data. Analysis of subsequent tests requires personal identifiers and exchange of this information between several institutes is complicated by personal data protection requirements. The use of routine data on toxoplasmosis testing in most other countries in the world (including the USA) is hampered by the fact that usually only privileged groups have access to screening. Due to the missing information on parity, we could not provide separate estimates by parity. As seroprevalence increases with age, rates are also typically lower in primipara than in multipara. Unfortunately, we did not have any information about the gestational week at the time of infection. This information is important if complications of the infection should be studied. However, it is beyond the scope of this analysis to provide information about maternal-foetal transmission rates and the rate of children with clinical sequels in cases of congenital toxoplasmosis. Various studies gave heterogeneous information about these rates and were questioned with regard to their data quality. We did not have information to study individual risk factors affecting incidence beyond place of residence. In an earlier analysis using the same data, a seasonal trend with a slight increase of diagnoses in winter (probably reflecting more infections in the fall) has been described. Another problem is the clear allocation of patients to the study period. A pregnancy with several serological checks is not a time point but a time span. We used the last examination per pregnancy to decide on its allocation. In addition, we investigated a large, eight-year study period to reduce the number of pregnancies crossing the start or the end of the study period. Conclusions Using statistical models, we estimated the incidence of maternal Toxoplasma primoinfections in pregnancy in Upper Austria, 2000 -2007. All approaches to determine the incidence of Toxoplasma infections in pregnancy suffered from limitations. We consider the proportion of observed certain cases only (0.07%) the low bound and the estimate based on age-specific seroprevalence (0.5%) the high bound, and propose the interval censored regression model (0.17%) as the best estimate. |
<reponame>ShowKa/HanbaiKanri<filename>src/main/java/com/showka/service/query/u05/UriageKeijoQueryImpl.java
package com.showka.service.query.u05;
import java.util.Date;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.showka.domain.builder.BushoUriageBuilder;
import com.showka.domain.u05.Uriage;
import com.showka.domain.u05.UriageRireki;
import com.showka.domain.u17.BushoUriage;
import com.showka.domain.z00.Busho;
import com.showka.entity.RUriage;
import com.showka.entity.RUriageKeijo;
import com.showka.entity.RUriageKeijoTeisei;
import com.showka.entity.RUriagePK;
import com.showka.repository.i.RUriageKeijoRepository;
import com.showka.repository.i.RUriageKeijoTeiseiRepository;
import com.showka.repository.i.RUriageRepository;
import com.showka.service.query.u05.i.UriageKeijoQuery;
import com.showka.service.query.u05.i.UriageRirekiQuery;
import com.showka.value.EigyoDate;
import com.showka.value.Kakaku;
@Service
public class UriageKeijoQueryImpl implements UriageKeijoQuery {
@Autowired
private RUriageKeijoRepository repo;
@Autowired
private RUriageKeijoTeiseiRepository repoTeisei;
@Autowired
private RUriageRepository rUriageRepository;
@Autowired
private UriageRirekiQuery uriageRirekiQuery;
@Override
public BushoUriage getBushoUriage(Busho busho, EigyoDate date) {
// 売上計上金額集計(訂正除く)
int keijoKingaku = this.getKeijoKingaku(busho, date);
int teiseiKingaku = this.getTeiseiKingaku(busho, date);
// build
BushoUriageBuilder b = new BushoUriageBuilder();
b.withBusho(busho);
b.withKeijoDate(date);
b.withKeijoKingaku(keijoKingaku);
b.withTeiseiKingaku(teiseiKingaku);
return b.build();
}
@Override
public boolean hasDone(Uriage uriage) {
// get 売上履歴
RUriagePK pk = new RUriagePK();
pk.setKeijoDate(uriage.getKeijoDate().toDate());
pk.setUriageId(uriage.getRecordId());
RUriage uriageRireki = rUriageRepository.getOne(pk);
// exists 売上計上
boolean exists = repo.existsById(uriageRireki.getRecordId());
return exists;
}
/**
* 指定した計上日の部署売上計上を取得.
*
* @param busho
* 部署
* @param date
* 計上日
* @return 売上計上
*/
List<RUriageKeijo> get(Busho busho, EigyoDate date) {
// search 計上対象売上
List<RUriage> uriageRirekiList = uriageRirekiQuery.getEntityList(busho, date);
// 売上履歴 record id
Iterable<String> uriageRirekiRecordIds = uriageRirekiList.stream().map(uriageRireki -> {
return uriageRireki.getRecordId();
}).collect(Collectors.toList());
// 売上計上 entities
return repo.findAllById(uriageRirekiRecordIds);
}
/**
* 指定した計上日における部署の売上の計上金額を集計.
*
* <pre>
* ただし、売上訂正の金額は除く
* </pre>
*
* @param busho
* 部署
* @param date
* 計上日
* @return 集計金額
*/
int getKeijoKingaku(Busho busho, EigyoDate date) {
// 売上計上 entities
List<RUriageKeijo> keijoEntities = this.get(busho, date);
// 売上計上金額集計
int keijoKingaku = keijoEntities.stream().mapToInt(ke -> {
String uriageId = ke.getUriageId();
UriageRireki rireki = uriageRirekiQuery.get(uriageId);
Optional<Uriage> uriage = rireki.getUriageOf(date);
// 指定して日付での売上が取得できない場合、データ不整合なのでそのまま落ちて良い
Kakaku uriageGokeiKingaku = uriage.get().getUriageGokeiKakaku();
return uriageGokeiKingaku.getZeinuki().intValue();
}).sum();
return keijoKingaku;
}
/**
* 指定した計上日における部署の売上の訂正分の計上金額を集計.
*
* <pre>
* 基本的にマイナス円として集計.
* </pre>
*
* @param busho
* 部署
* @param date
* 計上日
* @return 売上訂正の集計金額
*/
int getTeiseiKingaku(Busho busho, EigyoDate date) {
// 売上計上 entities
List<RUriageKeijo> keijoEntities = this.get(busho, date);
// 売上計上訂正分金額集計
Iterable<String> keijoIds = keijoEntities.stream().map(e -> e.getRecordId()).collect(Collectors.toList());
List<RUriageKeijoTeisei> teiseiEntities = repoTeisei.findAllById(keijoIds);
int teiseiKingaku = teiseiEntities.stream().mapToInt(teisei -> {
String uriageId = teisei.getUriageId();
UriageRireki rireki = uriageRirekiQuery.get(uriageId);
Date pastKeijoDate = teisei.getTeiseiUriageRirekiKeijoDate();
Optional<Uriage> uriage = rireki.getUriageOf(new EigyoDate(pastKeijoDate));
// 指定して日付での売上が取得できない場合、データ不整合なのでそのまま落ちて良い
Kakaku uriageGokeiKingaku = uriage.get().getUriageGokeiKakaku();
// 訂正分は負数として集計
return uriageGokeiKingaku.getZeinuki().intValue() * -1;
}).sum();
return teiseiKingaku;
}
}
|
Rumor Mill: A Baby for Beyoncé?
It was only four days ago that Beyoncé was celebrating her marriage to long-time boyfriend Jay-Z at a party in his lavish NYC penthouse, and now OK! has learned that the couple could soon have a baby on the way.
“I’ve heard that Beyoncé is pregnant from at least two people,” a source close to the couple tells OK!.
In past interviews, the Crazy in Love singer has talked cautiously about her desire to start a family. "You can’t rush a man into anything whether it’s a relationship, marriage or having children. " Beyoncé has said. "When he’s ready he’ll let you know."
Even friend Vivica A. Fox can’t wait for the happy couple to start a family. "I wish them many, many years of happiness and some babies," she tells OK!. "I think she will be really pretty pregnant, just like J. Lo and Halle Berry."
Keep a look out for a growing baby bump if the rumors hold true. |
Patella resection and patellectomy for comminuted fractures The work is based on the analysis of the results of surgical treatment of 106 patients with multiple fractures of the patella, of which 92 underwent patella resection and 14 - patelllectomy. The indications for these operations are substantiated, the details of the surgical technique are described. To eliminate tension on the tendon-bone contact line (for resection) and tendon-tendon (for patelllectomy), a locking wire loop was used. Original methods have been developed for replacing extensor defects after resection of the lower third of the patella with autografts from the rectus muscle tendon and from the patellar ligament. Long-term results studied in 76 patients confirm the advantages of the proposed patellar resection and patelllectomy techniques |
The pathophysiology and clinical aspects of hypercalcemic disorders. FOR THE PURPOSES OF THIS REVIEW, THE VAST AND INCREASINGLY COMPLEX SUBJECT OF HYPERCALCEMIC DISORDERS CAN BE BROKEN DOWN INTO THE FOLLOWING CATEGORIES: Physiochemical state of calcium in circulation. Pathophysiological basis of hypercalcemia. Causes of hypercalcemia encountered in clinical practice: causes indicated by experience at the University of California, Los Angeles; neoplasia; hyperparathyroidism; nonparathyroid endocrinopathies; pharmacological agents; possible increased sensitivity to vitamin D; miscellaneous causes. Clinical manifestations and diagnostic considerations of hypercalcemic disorders. The management of hypercalcemic disorders: general measures; measures for lowering serum calcium concentration; measures for correcting primary causes-the management of asymptomatic hyperparathyroidism. |
Evaluation of permethrin-treated military uniforms for personal protection against malaria in northeastern Thailand. A trial to compare the effect of military clothing treated by high-pressure spray with permethrin or placebo on the incidence of malaria in Royal Thai Army troops was conducted in northeastern Thailand. Bioassays of treated clothing using laboratory-reared Anopheles dirus females showed permethrin remained in the treated fabric for up to 90 days. Both permethrin- and placebo-treated uniform shirts provided > 84% protection from biting An. dirus in laboratory bioassays for the duration of the study. In laboratory tests, knockdown of An. dirus exposed to permethrin-treated cloth fell to < 20% after 3 hand washes, despite the presence of 28.7-59.9% of the original dose of permethrin. The use of permethrin-treated uniforms without adjunct application of topical repellents did not reduce malaria in Thai troops in an operational setting where incidence during 6 months was as high as 412 cases/1,000 in spite of chemoprophylaxis and use of untreated bednets. |
// Copyright 2018 The MATRIX Authors as well as Copyright 2014-2017 The go-ethereum Authors
// This file is consisted of the MATRIX library and part of the go-ethereum library.
//
// The MATRIX-ethereum library is free software: you can redistribute it and/or modify it under the terms of the MIT License.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
//and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject tothe following conditions:
//
//The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
//WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISINGFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
//OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package liner
import "unicode"
// These character classes are mostly zero width (when combined).
// A few might not be, depending on the user's font. Fixing this
// is non-trivial, given that some terminals don't support
// ANSI DSR/CPR
var zeroWidth = []*unicode.RangeTable{
unicode.Mn,
unicode.Me,
unicode.Cc,
unicode.Cf,
}
var doubleWidth = []*unicode.RangeTable{
unicode.Han,
unicode.Hangul,
unicode.Hiragana,
unicode.Katakana,
}
// countGlyphs considers zero-width characters to be zero glyphs wide,
// and members of Chinese, Japanese, and Korean scripts to be 2 glyphs wide.
func countGlyphs(s []rune) int {
n := 0
for _, r := range s {
// speed up the common case
if r < 127 {
n++
continue
}
switch {
case unicode.IsOneOf(zeroWidth, r):
case unicode.IsOneOf(doubleWidth, r):
n += 2
default:
n++
}
}
return n
}
func countMultiLineGlyphs(s []rune, columns int, start int) int {
n := start
for _, r := range s {
if r < 127 {
n++
continue
}
switch {
case unicode.IsOneOf(zeroWidth, r):
case unicode.IsOneOf(doubleWidth, r):
n += 2
// no room for a 2-glyphs-wide char in the ending
// so skip a column and display it at the beginning
if n%columns == 1 {
n++
}
default:
n++
}
}
return n
}
func getPrefixGlyphs(s []rune, num int) []rune {
p := 0
for n := 0; n < num && p < len(s); p++ {
// speed up the common case
if s[p] < 127 {
n++
continue
}
if !unicode.IsOneOf(zeroWidth, s[p]) {
n++
}
}
for p < len(s) && unicode.IsOneOf(zeroWidth, s[p]) {
p++
}
return s[:p]
}
func getSuffixGlyphs(s []rune, num int) []rune {
p := len(s)
for n := 0; n < num && p > 0; p-- {
// speed up the common case
if s[p-1] < 127 {
n++
continue
}
if !unicode.IsOneOf(zeroWidth, s[p-1]) {
n++
}
}
return s[p:]
}
|
<filename>src/ui/components/AnswerView.tsx
import React from 'react';
import {ColorValue, StyleSheet, Text} from 'react-native';
import fonts from '../../styles/fonts';
import {Question} from '../../types/model-types';
import Card from './Card';
type Props = {
index: number;
question: Question;
};
/**
* This component is used to present how the user has answered a particular question.
* <br><br>
* If the answer is correct, the background would be in green color.
* If the answer is incorrect, the background will be in crimson color.
* If the user hasn't answered the question, the background will be in dark orange color.
*
* @param index The question number. Will be incremented by 1 upon display.
* @param question The question to be displayed.
*/
export default function AnswerView({index, question}: Props) {
let backgroundColor: ColorValue;
if (question.given_answer === question.correct_answer) {
backgroundColor = 'green';
} else if (question.given_answer === '') {
backgroundColor = 'darkorange';
} else {
backgroundColor = 'crimson';
}
return (
<Card style={[styles.container, {backgroundColor: backgroundColor}]}>
<Text style={styles.questionNumber}>Question no. {index + 1}</Text>
<Text style={styles.question}>{question.text}</Text>
<Text style={styles.answerLabel}>Correct answer</Text>
<Text style={styles.answerText}>{question.correct_answer}</Text>
<Text style={styles.answerLabel}>Your answer</Text>
<Text style={styles.answerText}>
{question.given_answer.length > 0 ? question.given_answer : '-'}
</Text>
</Card>
);
}
const styles = StyleSheet.create({
container: {
marginHorizontal: 16,
marginBottom: 24,
},
questionNumber: {
fontFamily: fonts.light,
fontSize: 12,
color: 'whitesmoke',
},
question: {
paddingTop: 4,
paddingBottom: 8,
fontFamily: fonts.medium,
fontSize: 24,
color: 'white',
},
answerLabel: {
fontFamily: fonts.medium,
fontSize: 12,
color: 'lavender',
paddingTop: 16,
},
answerText: {
fontFamily: fonts.medium,
fontSize: 16,
color: 'white',
paddingTop: 4,
},
});
|
Law, Religion and Theology This is a valuable study, developed from a Harvard doctorate. The author provides an overview of biblical sources (legal and narrative) and consistently compares them with sources from the ANE (going well beyond the law collections). She sees the biblical traditions as largely independent and stresses the diversity to be found also within the ANE. Biblical law in this context is based upon family solidarity (the blood feud being a decentralized form of legal regulation), as contrasted with the greater central control found in Mesopotamia. She is sceptical of developmental interpretations of the relationship between the principal homicide passages in the Covenant Code (Exod. 21.1214), Deuteronomy 19 and Numbers 35, and stresses the differences in theological outlook, particularly between the latter two. She considers, in particular, their respective accounts of places of refuge, the role of pollution, and distinctions as regards the mental state of the offender. An opening chapter sets the scene by discussing the case of Cain and Abel, and the book concludes with chapters devoted to the lex talionis and homicide of a foreign citizen (as found in ANE documents). On a number of points, this reviewer takes a different view (see further in the Zeitschrift fr altorientalische und biblische Rechtsgeschichte 2006). Future students of this topic will, however, profit from this presentation. B.S. JACKSON |
THE MAGNITUDE OF PRESCRIBED ANTIBIOTICS IN PEDIATRIC EMERGENCY DEPARTMENT IN BASRA HOSPITAL FOR MATERNITY AND CHILDREN Introduction: Antimicrobial agents are common employ in paediatric patients. Emergency department make good place for known prescribing pattern of antibiotics with frequent use for disease that interfere in treatment between private pharmacy & hospital. The irrational and overuse of antibiotic in last decades did not follow the international guidelines and infectious strategy which if not controlled or minimize will lead to higher rates of mortality in human societies. The aim was to find the magnitude of antibiotics prescribing in children internal emergency department at Basra hospital for Maternity and Children & showing relationship with misuse of it Methods: The study was performed on 560 paediatrics patients aged (1 month -13 years) that seen in the (Basra Hospital for Maternity and Children) emergency department during 4 months from December 2017 to March 2018. These cases were dividing according to containment antibiotic and several parameters such as patient diagnosis and number of antibiotics prescribed. Results: A total of 61% (n=342) patients were males. The average number of antibiotics per patient was 1.45 Prescription did not contain antibiotics 28.57% (n=160) Prescription contain antibiotics 71.42% (n =400) of all patient from total prescribed. Most cases diagnosis for prescribing antibiotics are (24%) Gastroenteritis (16.25%) bronchiolitis and (11.75%) pneumonia. Conclusion: The dispensing of antibiotics is not following constant or international guidelines which will cause problems like resistance and economic side also. Nearly almost the admitted patients received antibiotics regardless the culture results. |
The IT industry has never been as popular as it is today, given that our day-to-day lives involve computers, mobile phones, tablets and gaming consoles to keep our schedules on track, complete work or study assignments or just to keep us busy for a few minutes. The number of certified IT professionals who have undertaken Microsoft MCSA online courses has increased by an astounding amount over the last 15 years, but there are still those that deem it unnecessary to undertake studies in IT before applying for a position, as they expect training to be delivered via their company.
Consider then that, after studying source code and writing payroll programs, Microsoft founder, Bill Gates, started a small shop called “Traf-O-Data” which designed a computer used by the city of Seattle to count traffic flow. Currently, Microsoft’s latest operating system, Windows 10, has been installed on over 110 million devices. This speaks volumes for the Microsoft MCSA online courses and gaining the necessary knowledge when attempting to enter a trade.
Microsoft Certified Solutions Associate (MCSA) overview
Microsoft Certified Solutions Associate is a group of Microsoft MCSA online courses specifically designed to guide individuals that are just stepping into the IT field towards a meaningful and reliable certification and is a prerequisite to gaining your MCSE (Microsoft Certified Solutions Expert) certification.
The Microsoft MCSA online courses have helped many IT professionals get their careers underway and continue to do so to this day. The mere fact that you have an MCSA certification on your CV will boost your chances to be considered for a position in IT, given that it a well-known and proven certification which is awarded by the biggest IT organisation in the world and will show that you truly have the passion that potential employers look for in a candidate.
List of Microsoft MCSA Online Courses
Below you will find a complete list of all the Microsoft MCSA online courses available and their accompanying exams.
MCSA: Windows Server 2012
The MCSA Windows Server 2012 course will teach students a firm understanding of databases, servers and networking using Windows server 2012 operating system.
The exams that need to be passed to gain this qualification are:
• 70-410: Installing and Configuring Windows Server 2012
• 70-411: Administering Windows Server 2012
• 70-412: Configuring Advanced Windows Server 2012 Services
Gaining this qualification will ensure that you have the necessary skills to confidently apply for positions like Computer Systems Administrator, Computer Network Specialist, Systems Engineer or IT Support Analyst.
MCSA: Windows Server 2008
Designed to increase the reliability and flexibility of server infrastructure, this course is designed to teach you how to apply and oversee the Windows Server 2008 operating system. The Windows MCSA: Windows server 2008 will ready you to apply for positions such as It Technician, Network Administrator, Desktop Support Technician or Network Manager.
You will need to pass the following exams in order to complete your online MCSA certification:
• 70-640: Windows Server 2008 Network Infrastructure, Configuring
• 70-642: Windows Server 2008 Active Directory, Configuring
• 70-646: Windows Server 2008 Server Administrator
MCSA: Windows 10
This certification will teach you to install, configure and further manage the Windows 10 certification. Upon completion of this course and its exam, you will qualify for positions such as Computer Support Specialist, Technical Support Engineer and Desktop Support Analyst.
For this course, the following exam must be passed to become MCSA certified:
• 70-697: Configuring Windows Devices
MCSA: Windows 8
Anyone with an interest in becoming a Computer Support Specialist, It Manager or Support Specialist will find much to gain from the MCSA: Windows 8 course. It will teach you how to install Windows 8, resolve any issues that may occur and troubleshoot problems with network connections.
The two required exams are as follows:
• 70-687: Configuring Windows 8.1
• 70-688: Supporting Windows 8.1
MCSA: SQL Server 2012
This course will perfectly suit aspiring Database Developers or Database Analysts, as it teaches the installation, configuration and maintenance of SQL (Structured Query Language) server services as well as the management and configuration of databases and their security.
To complete this Microsoft MCSA online course, the following exams must be undertaken and passed:
• 70-461: Querying Microsoft SQL Server 2012
• 70-462: Administering Microsoft SQL Server 2012 Databases
• 70-463: Implementing a Data Warehouse with Microsoft SQL Server 2012
MCSA: Office 365
Software as a Service (SaaS) Administrator, Cloud Applications Administrator or Software Administrator are some of the positions you will be able to apply for upon completion of the MCSA: Office 365 course.
The MCSA: Office 365 exams are:
• 70-346: Managing Office 365 Identities and Requirements
• 70-347: Enabling Office 365 Services
MCSA: Linux on Azure
To gain the MCSA: Linux on Azure qualification, students will need to pass the accompanying exams, namely:
• 70-533: Implementing Microsoft Azure Infrastructure Solutions
• LFCS: Linux Foundation Certified System Administrator
After completion of this course, you will be able to design cloud-based Linux solutions using features offered by Microsoft Azure, as well as proving your capabilities in Linux system administration, opening doors to careers like Linux System Administrator or IT Cloud Solutions Consultant.
Writing my MCSA certification exam
Once you have completed your Microsoft MCSA online courses and passed the relevant exams, you will become officially MCSA certified and well on your way to an exciting career in the field of IT. The Microsoft MCSA exams can, however, only be booked and taken through Pearson Vue or Prometric who will help with the scheduling of your exam, as well as providing any other information you may need regarding the writing of your MCSA exams.
After finishing your Microsoft MCSA online courses and gaining your certification, you will have the option of upgrading to a MCSE (Microsoft Certified Solutions Expert) certification which focuses more on the creation, implementation and security of networks and preparing students for roles like Systems Engineer, whereas the Microsoft MCSA online courses deal with the maintenance of those networks once they are in use. This process is explained in our article explaining MCSE courses. |
Author Matt Elliot has taken a slice of Whanganui history and created a gripping tale for young readers.
Matt Elliot has taken a small slice of Whanganui history and skilfully turned it in to a book for young readers.
When ocean liner RMS Lusitania sank off the Irish coast in May 1915 after it was torpedoed by a German U-boat, it triggered anti-German sentiment around the world.
The deaths of 1198 British, Canadian and American civilians provoked anger in Whanganui and triggered an incident that made newspaper headlines.
A mob attacked the shop of Whanganui pork butcher Conrad Heinold on May 14, 1915, taking exception to his nationality even though he had lived in Whanganui for many years.
A young boy was wrongly accused of starting the riot and Elliot has taken details from the newspaper reports of the day as the basis for his book Night of the Riot.
Elliot's central character is 12-year-old "Snow" Goodison who works for Mr Schmidt the butcher.
This adult reader was highly impressed with Elliot's attention to detail and his ability to re-create the Whanganui of 100 years ago.
But what does the target audience think?
"Night of the Riot is very interesting and it definitely makes for a different reading experience when you know the area in which it's set well.
"Also discovering it was based on a true story gave me a shock as I never knew that the sinking of the Lusitania had quite such an effect on Whanganui and its people.
"I particularly enjoyed learning some of the history involved and whenever I go down Victoria Ave I try to guess where Mr Schmidt's shop stood.
"In all, I really liked Night of the Riot and I would absolutely recommend it."
The addition of a beautifully drawn map of central Whanganui circa 1915 by Melissa Elliot at the front of the book is a real bonus for the reader.
Elliot is the author of more than a dozen books and was the 2012 NZ Post Children's Book Awards Book of the Year winner with Nice Day for a War: Adventures of a Kiwi soldier in WWI. |
Fifty years after the signing of the landmark Immigration and Naturalization Act, a total of 59 million people have migrated to the United States, according to a new report.
Before 1965, immigrants coming to American shores had been primarily European. The legislation, also called the Hart-Celler Act, ended the former system of placing quotas on immigrants by national origin, instead prioritizing skilled workers and family members.
Today, one in five immigrants in the world reside in the United States, according to the Pew Research Center report released Monday. Those immigrants and their children have contributed an estimated 55% of the country’s population growth during that time; the U.S. population currently stands at almost 322 million. By 2065, nearly 20% of people in the country will have been born outside of American borders.
In 1965, 84% of Americans were non-Hispanic whites, 4% were Hispanic, and less than 1% were Asian. In 2015, the numbers are astonishingly different: 62% of Americans are white, 18% of Americans are Hispanic, and Asians count as 6% of the populace.
The most striking transformation in immigrant makeup has been within the Hispanic community, which has seen a drop in unskilled Mexican immigrants. While 35% of the 59 million immigrants in the past 50 years has come from Mexico, South and Central American immigrant populations are now booming in the U.S.
Much of this has to do with the Great Recession, says Mark Hugo Lopez, director of Hispanic research at the Pew Research Center. “Many Mexican immigrants are unskilled laborers,” he says. “Think of where that would work best: construction and the other parts of the housing market.” Lopez points to the mushrooming Mexican communities of Atlanta and Las Vegas, both cities with strong housing markets.
Since 2005, the U.S. has seen a downward slide in immigrant arrivals. “It’s partially because of the recession,” Lopez says. “It’s harder to cross the Mexican border itself. But it’s also because there are more people entering legally”—whether they come from countries like Venezuela, which has the highest-educated Hispanic population in the U.S., or the Asian triumvirate of China, India, and the Philippines. In other words, the 1965 act is doing its job: enticing highly skilled workers to come to America.
“Newly arrived immigrants aren’t coming in illegally because they don’t have to, and that’s a big economic change,” Lopez says. “We see Chinese people coming to pursue higher education, Indians in tech, and Filipinos [filling] medical careers.” And while that might seem like a stereotype, Lopez says that it all comes back to the 1965 law’s favoring highly educated immigrant populations.
Americans, however, can’t seem to make up their mind about what exactly they think about immigration. On the one hand, 45% say that immigration has made American society better, with 54% saying the immigration system in the U.S. needs to be addressed and an additional 28% going so far as to say it’s a broken system in need of total restructuring.
But 37% of respondents say immigration has made American society worse. A plurality see European and Asian immigration positively (44% and 47%, respectively). But Americans are a lot less enthusiastic about Latin American and Middle Eastern populations (with 37% and 39% of those surveyed expressing negativity); 50% of Americans are neutral when it comes to African immigrants. Lopez thinks the one-two punch of a post-9/11 environment combined with a recession marked Hispanics and Middle Eastern groups for negative perceptions.
Regardless, Lopez stresses that the act itself was not the origin of modern immigration—a number of factors worked to create the America we know today. “We use the 1965 law to start analysis, but it’s unclear whether the law itself [is the reason for change in American immigration patterns],” he says. |
Living From the Divine Ground: Meister Eckhart's Praxis of Detachment Meister Eckhart's notion of detachment constitutes a dynamic and vital key concept that lies at the heart of and unlocks Eckhart's richly textured mysticism. Eckhart makes a valuable contribution to the contemporary discourse on mysticism by emphasizing the dialectical and unbreakable connection between "interiority" and "exteriority" and highlighting the transformative nature of detachment. Detachment, for Eckhart, is not a static concept, but is rather a dynamic apophatic, kenotic, and dialectical activity. Eckhart's notion of detachment, disclosing the "this-worldly" and egalitarian dimensions of his mysticism, teaches us what it means to be truly and authentically human vis--vis self, other, community, and the transcendent. |
<filename>iree/compiler/Dialect/Util/Analysis/DFX/Solver.h
// Copyright 2021 The IREE Authors
//
// Licensed under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#ifndef IREE_COMPILER_DIALECT_UTIL_ANALYSIS_DFX_SOLVER_H_
#define IREE_COMPILER_DIALECT_UTIL_ANALYSIS_DFX_SOLVER_H_
#include "iree/compiler/Dialect/Util/Analysis/DFX/DepGraph.h"
#include "iree/compiler/Dialect/Util/Analysis/DFX/Element.h"
#include "iree/compiler/Dialect/Util/Analysis/DFX/State.h"
#include "iree/compiler/Dialect/Util/Analysis/Explorer.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/AsmState.h"
#include "mlir/Support/LLVM.h"
namespace mlir {
namespace iree_compiler {
namespace DFX {
// Fixed point iteration solver ("monotone framework").
// http://symbolaris.com/course/Compilers11/27-monframework.pdf
//
// Forked from the LLVM Attributor: llvm/Transforms/IPO/Attributor.h
// The attributor is an elegant and flexible piece of infra that is tied quite
// tightly to LLVM IR. Here we extract it and generalize it to work with MLIR's
// concepts of positional values, operations, and blocks. Unlike the Attributor
// the solver is only for performing analysis and does no manifestation. We may
// want to extend this to integrate into the MLIR folding framework, though.
//
// Good talks describing how the system works:
// https://www.youtube.com/watch?v=I4Iv-HefknA
// https://www.youtube.com/watch?v=CzWkc_JcfS0
//
// This initial fork is to unblock work that requires this framework. Ideally
// we'd upstream this into MLIR proper but there are some missing core
// interfaces that keeps it locked here for the moment: in particular we need
// tied operands (generalized view-like op interface), globals, and reference
// types. We also need a lot of tests :)
//
// NOTE: the solver state - like Explorer - assumes that IR will not be modified
// while it is in-use. Modifying the IR invalidates the state and may lead to
// crashes as pointer references into the IR structure are retained.
class Solver {
public:
// Creates a solver that uses |explorer| for walking the IR tree and
// |allocator| for transient allocations of abstract elements.
explicit Solver(Explorer &explorer, llvm::BumpPtrAllocator &allocator)
: explorer(explorer),
asmState(explorer.getAsmState()),
allocator(allocator),
depGraph(explorer.getAsmState()) {}
// Initialized explorer for walking the IR.
Explorer &getExplorer() { return explorer; }
// Shared AsmState that can be used to efficiently print MLIR Values.
// If this is not used the entire module may need to be walked in order to
// get the name of a value each time it's printed. Nothing in this framework
// should do that.
AsmState &getAsmState() { return asmState; }
// An allocator whose lifetime is at least as long as the solver.
llvm::BumpPtrAllocator &getAllocator() { return allocator; }
// Returns the element of |ElementT| for |pos| and adds a dependency from
// |queryingElement| to the returned element with the given |resolution|.
template <typename ElementT>
const ElementT &getElementFor(const AbstractElement &queryingElement,
const Position &pos, Resolution resolution) {
return getOrCreateElementFor<ElementT>(pos, &queryingElement, resolution,
/*forceUpdate=*/false);
}
// Returns the element of |ElementT| for |pos| and adds a dependency from
// |queryingElement| to the returned element with the given |resolution|.
// If the element already exists and the solver is in the UPDATE phase it will
// be updated prior to returning as if another iteration had been performed.
template <typename ElementT>
const ElementT &getAndUpdateElementFor(const AbstractElement &queryingElement,
const Position &pos,
Resolution resolution) {
return getOrCreateElementFor<ElementT>(pos, &queryingElement, resolution,
/*forceUpdate=*/true);
}
// Returns the element of |ElementT| for |pos| and optionally adds a
// dependency from |queryingElement| to the returned element with the given
// |resolution|.
//
// Using this after the solver started running is restricted to only the
// solver itself. Initial seeding of elements can be done via this function.
//
// NOTE: |forceUpdate| is ignored in any stage other than the update stage.
template <typename ElementT>
const ElementT &getOrCreateElementFor(Position pos,
const AbstractElement *queryingElement,
Resolution resolution,
bool forceUpdate = false,
bool updateAfterInit = true) {
if (auto *elementPtr =
lookupElementFor<ElementT>(pos, queryingElement, resolution,
/*allowInvalidState=*/true)) {
if (forceUpdate && phase == Phase::UPDATE) {
updateElement(*elementPtr);
}
return *elementPtr;
}
// No matching element found: create one.
auto &element = ElementT::createForPosition(pos, *this);
registerElement(element);
// Avoid too many nested initializations to prevent a stack overflow.
static const int maxInitializationChainLength = 1024;
if (initializationChainLength > maxInitializationChainLength) {
element.getState().indicatePessimisticFixpoint();
return element;
}
// Bootstrap the new element with an initial update to propagate info.
{
++initializationChainLength;
element.initialize(*this);
--initializationChainLength;
}
// If this is queried after we've performed iteration we force the element
// to indicate pessimistic fixpoint immediately.
if (phase == Phase::DONE) {
element.getState().indicatePessimisticFixpoint();
return element;
}
// Allow seeded elements to declare dependencies that are preserved for
// use during fixed point iteration.
if (updateAfterInit) {
auto oldPhase = phase;
phase = Phase::UPDATE;
updateElement(element);
phase = oldPhase;
}
if (queryingElement && element.getState().isValidState()) {
recordDependence(element, const_cast<AbstractElement &>(*queryingElement),
resolution);
}
return element;
}
// Returns the element of |ElementT| for |pos| if existing and valid.
template <typename ElementT>
const ElementT &getOrCreateElementFor(const Position &pos) {
return getOrCreateElementFor<ElementT>(pos, /*queryingElement=*/nullptr,
Resolution::NONE);
}
// Returns the element of |ElementT| for |pos| if existing and valid.
// |queryingElement| can be nullptr to allow for lookups from outside of the
// solver system.
template <typename ElementT>
ElementT *lookupElementFor(const Position &pos,
const AbstractElement *queryingElement = nullptr,
Resolution resolution = Resolution::OPTIONAL,
bool allowInvalidState = false) {
static_assert(std::is_base_of<AbstractElement, ElementT>::value,
"cannot query an element with a type not derived from "
"'AbstractElement'");
// Lookup the abstract element of type ElementT and if found return it after
// registering a dependence of queryingElement on the one returned element.
auto *elementPtr = elementMap.lookup({&ElementT::ID, pos});
if (!elementPtr) return nullptr;
auto *element = static_cast<ElementT *>(elementPtr);
// Do not register a dependence on an element with an invalid state.
if (resolution != Resolution::NONE && queryingElement &&
element->getState().isValidState()) {
recordDependence(*element,
const_cast<AbstractElement &>(*queryingElement),
resolution);
}
// Return nullptr if this element has an invalid state.
if (!allowInvalidState && !element->getState().isValidState()) {
return nullptr;
}
return element;
}
// Explicitly record a dependence from |fromElement| to |toElement|,
// indicating that if |fromElement| changes |toElement| should be updated as
// well.
//
// This method should be used in conjunction with the `getElementFor` method
// and with the resolution enum passed to the method set to NONE. This can be
// beneficial to avoid false dependencies but it requires the users of
// `getElementFor` to explicitly record true dependencies through this method.
// The |resolution| flag indicates if the dependence is strictly necessary.
// That means for required dependences if |fromElement| changes to an invalid
// state |toElement| can be moved to a pessimistic fixpoint because it
// required information from |fromElement| but none are available anymore.
void recordDependence(const AbstractElement &fromElement,
const AbstractElement &toElement,
Resolution resolution);
// Introduces a new abstract element into the fixpoint analysis.
//
// Note that ownership of the element is given to the solver and the solver
// will invoke delete on destruction of the solver.
//
// Elements are identified by their IR position (ElementT::getPosition())
// and the address of their static member (see ElementT::ID).
template <typename ElementT>
ElementT ®isterElement(ElementT &element) {
static_assert(std::is_base_of<AbstractElement, ElementT>::value,
"cannot register an element with a type not derived from "
"'AbstractElement'!");
// Put the element in the lookup map structure and the container we use to
// keep track of all attributes.
const auto &pos = element.getPosition();
AbstractElement *&elementPtr = elementMap[{&ElementT::ID, pos}];
assert(!elementPtr && "element already in map!");
elementPtr = &element;
// Register element with the synthetic root only before we are done.
if (phase == Phase::SEEDING || phase == Phase::UPDATE) {
depGraph.syntheticRoot.deps.push_back(
DepGraphNode::DepTy(&element, unsigned(Resolution::REQUIRED)));
}
return element;
}
// Runs the solver until either it converges to a fixed point or exceeds the
// maximum iteration count. Returns success() if it converges in time.
LogicalResult run();
// Prints the constraint dependency graph to |os|.
void print(llvm::raw_ostream &os);
// Dumps a .dot of the constraint dependency graph to a file.
void dumpGraph();
protected:
friend DepGraph;
Explorer &explorer;
AsmState &asmState;
llvm::BumpPtrAllocator &allocator;
// This method will do fixpoint iteration until a fixpoint or the maximum
// iteration count is reached.
//
// If the maximum iteration count is reached this method will
// indicate pessimistic fixpoint on elements that transitively depend on
// elements that were still scheduled for an update.
LogicalResult runTillFixpoint();
// Runs update on |element| and tracks the dependencies queried while doing
// so. Also adjusts the state if we know further updates are not necessary.
ChangeStatus updateElement(AbstractElement &element);
// Remembers the dependences on the top of the dependence stack such that they
// may trigger further updates.
void rememberDependences();
// Maximum number of fixed point iterations or None for default.
Optional<unsigned> maxFixpointIterations;
// A flag that indicates which stage of the process we are in.
enum class Phase {
// Initial elements are being registered to seed the graph.
SEEDING,
// Fixed point iteration is running.
UPDATE,
// Iteration has completed; does not indicate whether it coverged.
DONE,
} phase = Phase::SEEDING;
// The current initialization chain length. Tracked to avoid stack overflows
// during recursive initialization.
unsigned initializationChainLength = 0;
using ElementMapKeyTy = std::pair<const char *, Position>;
DenseMap<ElementMapKeyTy, AbstractElement *> elementMap;
// Element dependency graph indicating the resolution constraints across
// elements.
DepGraph depGraph;
// Information about a dependence:
// If fromElement is changed toElement needs to be updated as well.
struct DepInfo {
const AbstractElement *fromElement;
const AbstractElement *toElement;
Resolution resolution;
};
// The dependence stack is used to track dependences during an
// `AbstractElement::update` call. As `AbstractElement::update` can be
// recursive we might have multiple vectors of dependences in here. The stack
// size, should be adjusted according to the expected recursion depth and the
// inner dependence vector size to the expected number of dependences per
// abstract element. Since the inner vectors are actually allocated on the
// stack we can be generous with their size.
using DependenceVector = SmallVector<DepInfo, 8>;
SmallVector<DependenceVector *, 16> dependenceStack;
};
} // namespace DFX
} // namespace iree_compiler
} // namespace mlir
#endif // IREE_COMPILER_DIALECT_UTIL_ANALYSIS_DFX_SOLVER_H_
|
import React, { ReactNode } from 'react'
import { FaFacebook, FaInstagram, FaLinkedin, FaTwitter, FaYoutube } from 'react-icons/fa'
import {
ComapnyLogo,
DeveloperLink,
FooterContainer,
FooterLink,
FooterLinksContainer,
FooterLinksItems,
FooterLinksTitle,
FooterLinksWrapper,
FooterWrapper,
SocialIconLink,
SocialMediaWrapper,
SociaMedia,
SocilaIcons,
WebsiteDeveloper,
WebsiteRights
} from './styles'
interface FooterProps {
children?: ReactNode
}
function Footer({ children }: FooterProps) {
return (
<FooterContainer>
<FooterWrapper>
<FooterLinksContainer>
<FooterLinksWrapper>
<FooterLinksItems>
<FooterLinksTitle>About Us</FooterLinksTitle>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
How it works
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Testimonial
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Careers
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Investors
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Terms of service
</FooterLink>
</FooterLinksItems>
<FooterLinksItems>
<FooterLinksTitle>Contact Us</FooterLinksTitle>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Contact
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Address
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Support
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Destination
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Sporsorships
</FooterLink>
</FooterLinksItems>
</FooterLinksWrapper>
{/* Second Column */}
<FooterLinksWrapper>
<FooterLinksItems>
<FooterLinksTitle>Social</FooterLinksTitle>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Youtube
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Facebook
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Instagram
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Linked In
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Twitter
</FooterLink>
</FooterLinksItems>
<FooterLinksItems>
<FooterLinksTitle>Services</FooterLinksTitle>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Service 1
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Service 2
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Service 3
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Service 4
</FooterLink>
<FooterLink
to={'home'}
smooth={true}
duration={500}
spy={true}
offset={-80}
>
Terms of service with some long text, but not long enaought;
</FooterLink>
</FooterLinksItems>
</FooterLinksWrapper>
</FooterLinksContainer>
<SociaMedia>
<SocialMediaWrapper>
{/* Create logic to handle to redirect to home in diferent paths
- use a commun anchor instead;
*/}
<ComapnyLogo
to="home"
smooth={true}
duration={500}
spy={true}
offset={-80}
>
ConsoliDados
</ComapnyLogo>
<WebsiteRights>ConsoliDados © {new Date().getFullYear()} All Rights Reserved </WebsiteRights>
<SocilaIcons>
<SocialIconLink href="/#" target="_blank" area-label="Facebook" > <FaFacebook /> </SocialIconLink>
<SocialIconLink href="/#" target="_blank" area-label="Instagram" > <FaInstagram /> </SocialIconLink>
<SocialIconLink href="/#" target="_blank" area-label="YouTube" > <FaYoutube /> </SocialIconLink>
<SocialIconLink href="/#" target="_blank" area-label="Twitter" > <FaTwitter /> </SocialIconLink>
<SocialIconLink href="/#" target="_blank" area-label="LinkedIn" > <FaLinkedin /> </SocialIconLink>
</SocilaIcons >
</SocialMediaWrapper>
<WebsiteDeveloper>
Made with 💚 by: <DeveloperLink href="https://www.johnnycarreiro.com" target="_blank" area-label="JohnnyCarreiro">
<NAME>
</DeveloperLink>
</WebsiteDeveloper>
</SociaMedia>
</FooterWrapper>
</FooterContainer>
)
}
export default Footer
|
RCMP are investigating human remains that were found on the shoreline of Lake Winnipegosis near Shoal River.
Police say they were discovered Tuesday at around 5 p.m. RCMP confirmed the presence of a deceased person and transported the body to Winnipeg for an autopsy.
“We are not in a position to confirm the identity of the remains at this time, pending further forensic examination and other investigative steps to ensure certainty,” said Sgt. Bert Paquet of the Manitoba RCMP.
The investigation is continuing and no further details have been released. |
<reponame>normalscene/PSS<filename>purenessscopeserver/FrameCore/CppUnit/Unit_MakePacket.cpp
#include "Unit_MakePacket.h"
#ifdef _CPPUNIT_TEST
CUnit_MakePacket::CUnit_MakePacket()
{
}
void CUnit_MakePacket::setUp(void)
{
m_pMakePacket = new CMakePacket();
}
void CUnit_MakePacket::tearDown(void)
{
delete m_pMakePacket;
m_pMakePacket = NULL;
}
void CUnit_MakePacket::Test_MakePacket(void)
{
bool blRet = false;
uint32 u4ConnectID = 1;
ACE_Time_Value tvNow = ACE_OS::gettimeofday();
ACE_Message_Block* pmb = App_MessageBlockManager::instance()->Create(10);
if (false == m_pMakePacket->PutSendErrorMessage(u4ConnectID, pmb, tvNow))
{
OUR_DEBUG((LM_INFO, "[Test_ControlListen]m_pMakePacket->PutSendErrorMessage() Error.\n"));
CPPUNIT_ASSERT_MESSAGE("[Test_ControlListen]m_pMakePacket->PutSendErrorMessage() Error.", true == blRet);
}
}
#endif
|
Coping in adult cystic fibrosis patients: Association with anxiety and depression Objective: Cystic fibrosis (CF) is an inherited and chronic disease. Coping with the disease gets more important with increased life span. In this study, the relationship between coping strategies and anxiety/depression risk in adult CF patients are examined. Method: 30 adult CF patients (17 female,13 male; mean age:24±4) completed Hospital Anxiety/Depression Scale and Brief COPE Scale. 14 coping scores were calculated. Results: Acceptance (6.80±1.21) was the most preferred strategy and substance use (2.53±1.55) was the least. 4 patients had increased risk of anxiety, 4 had increased risk of depression, and 4 had increased risk of both. Patients with anxiety and depression risks used behavioural disengagement more than the non- risk group. Active coping was significantly higher in patients without depression risk. Anxiety risk group had significantly higher coping with venting (Table I). Coping with instrumental support was significantly higher in the employed than students and unemployed patients (p:0.041). Discussion: Psychological state affects preferred coping method. Encouraging use of adaptive coping strategies in adult CF patients is important. |
World Bank president Paul Wolfowitz broke the rules and engaged in an actual conflict of interest when in 2005 he arranged for a rather generous salary boost for his girlfriend, Shaha Riza, a communications official at the Bank.
That’s the conclusion of a special panel of the Bank’s board of directors, which on Monday released its report on the Wolfowitz matter. This judgment was no surprise; the basics had been leaked days earlier. But the report presented more information that places Wolfowitz in a tough spot–for it suggests that he and Riza brazenly took advantage of the situation created by his appointment to the Bank to guarantee her a promotion and pay rise she had failed to obtain previously. And the question of the moment is the obvious one: can he survive?
According to Mr. [Xavier] Coll [vice president of human resources], he met with Mr. Wolfowitz and Ms. [Robin] Cleveland, Counselor to the President, on August 10, 2005, in preparation for a meeting on August 11 with Ms. Riza. During that meeting, Mr. Coll was told to stop consulting with the Bank’s General Counsel on this matter.
In retrospect, it’s clear there was the need for more legal advice, not less, about what to do about Riza, who could not continue to work at the Bank in a position under the supervision of Wolfowitz. Yet Wolfowitz kept the circle small. He has claimed it would have been a conflict of interest to involve the Bank’s general counsel–a contention rejected by the special panel. But even if Wolfowitz had been right about that, he could have sought another way for the human relations department to obtain appropriate legal guidance. He did not.
If this is so–if the Bank’s board believes Mr. Coll–it’s end of story. Had Wolfowitz indeed proceeded with a deal after he was warned it was “outside the rules”–a deal that was rather lucrative for his girlfriend–that ought to be a firing offense.
According to Mr. Coll, after he received the written August 11 instructions from Mr. Wolfowitz [dictating the terms of the Riza deal], he asked again whether he could consult with the Bank’s General Counsel, or anyone in the Bank’s Legal Department, and was told he could not.
This explains it. Riza was angry. She was mad (as the report notes) that she had to leave the Bank because her romantic partner was taking over. But she also harbored a grudge, believing, rightly or wrongly, that she had been the victim of discrimination at the Bank. (In a previous article, I explained how she was turned down for a promotion to a job for which she did not meet the minimum qualifications.) According to the panel’s report, it was Riza who came up with the specific terms of her reassignment. It seems she was trying to turn lemons into champagne–that is, using the opportunity to settle old scores and award herself the money she believed she deserved. And Wolfowitz went along with his gal-pal.
The report is clear: “The salary increase granted to Ms. Riza far exceeded an increase that would have been granted in accordance with the applicable Staff Rule.” The report notes that even had she received a promotion at that time, she could have expected a boost in her annual salary of between $5000 and $20,000–not the $47,000 Wolfowitz awarded her. The report also says that the agreement Wolfowitz arranged called for an annual salary increase more than twice the customary rate and that the automatic promotions awarded Riza in the deal violated the Bank’s rules.
The board of directors was scheduled to discuss the report with Wolfowitz on Tuesday evening. The issue is, what will the board do in response to the report? It can vote to reprimand or remove Wolfowitz. A reprimand might not be enough for many board members. But the board may not want to pull the trigger. It can issue a vote of no confidence, hoping Wolfowitz will resign. But does Wolfowitz want to put up a fight? Is the White House willing to stick with him, as it has done (so far) with Attorney General Alberto Gonzales? George W. Bush can be a stubborn fellow.
The report is a strong indictment of Wolfowitz. It shows he and his girlfriend tried to game the system in a way that could bring her (over the course of his tenure and beyond, thanks to a generous pension) millions of extra dollars. If Wolfowitz manages to stay on after the release of the report, it will be quite an accomplishment for the accountability’s-not-us Bush administration. |
Large-scale timing-driven rectilinear steiner tree construction in presence of obstacles In the paper, we provide a timing-driven rectilinear routing tree algorithm which applies top-down partitioning followed by the bottom-up routing tree construction in the presence of the obstacles. The objective is to simultaneously minimize the source-to-terminal delay and the total wirelength. First, a top-down partitioning method is used to divide the chip into four sub-regions according to the position of the source. Then, the terminals in each sub-region are connected by a fast sequential routing tree algorithm. The major steps of the routing algorithm include minimal spanning tree constructing, invalid edges pushing and routing. It shows experimentally that the maximum source-to-terminal delay of the routing tree is improved by 74%. Compared to previous results, total wirelength is significantly reduced by 34.7%. |
The present invention relates to combustion systems, and more particularly relates to heat exchangers for combustion systems.
Combustion systems, such as combustion furnaces, generate sounds which, depending on the use and environment of the combustion system, may be unacceptable or unpleasant. The sound level generated by a particular combustion system generally depends on the turbulence of the combustion fluids at the source of combustion. In addition, these sounds may interact with structural components of the combustion system which acoustically amplify the sound. Normally, in combustion furnaces this sound level is reduced to an acceptable level by adjusting the flow of the combustion fluids to maintain a substantially non-turbulent flow at the combustion source, and by arranging the heat exchanger assembly, furnace cabinet, and other such components to minimize acoustic amplification. However, in certain situations it is not feasible or desirable to reduce the sound level by using these conventional techniques. Also, it may be desirable to reduce the sound level to a level lower then that which may be attained by using these conventional techniques. For example, in an induced draft combustion furnace having compact, side by side heat exchangers with monoport, inshot burners, it is not desirable to make burner modifications which may decrease the efficiency of the furnace and it is not desirable to make other furnace component modifications which may increase the size and/or bulkiness of the furnace. |
/**
* Removes final modifier from a Field object.
*
* @param field Field to remove modifier from.
*/
public static void removeFinal(Field field) {
try {
Field modifier = Field.class.getDeclaredField("modifiers");
modifier.setAccessible(true);
modifier.set(field, field.getModifiers() & ~Modifier.FINAL);
} catch (NoSuchFieldException | IllegalAccessException e) {
e.printStackTrace();
}
} |
A general method to synthesize and sinter bulk ceramics in seconds Speedy ceramic sintering Synthesizing ceramics can require heating for long times at high temperatures, making the screening of high-through-put materials challenging. C. Wang et al. developed a new ceramic-sintering technique that uses resistive heating of thin carbon strips to ramp up and ramp down temperature quickly. This method allows for the quick synthesis of a wide variety of ceramics while mitigating the loss of volatile elements. Ultrafast sintering is ideal for synthesizing many compositions to screen for ideal properties for a variety of applications, including the development of new solid-state electrolytes. Science, this issue p. 521 A resistive heating method can sinter ceramics in seconds, allowing for high-throughput materials screening. Ceramics are an important class of materials with widespread applications because of their high thermal, mechanical, and chemical stability. Computational predictions based on first principles methods can be a valuable tool in accelerating materials discovery to develop improved ceramics. It is essential to experimentally confirm the material properties of such predictions. However, materials screening rates are limited by the long processing times and the poor compositional control from volatile element loss in conventional ceramic sintering techniques. To overcome these limitations, we developed an ultrafast high-temperature sintering (UHS) process for the fabrication of ceramic materials by radiative heating under an inert atmosphere. We provide several examples of the UHS process to demonstrate its potential utility and applications, including advancements in solid-state electrolytes, multicomponent structures, and high-throughput materials screening. |
How Do Spatial Learning and Memory Occur in the Brain? Coordinated Learning of Entorhinal Grid Cells and Hippocampal Place Cells Spatial learning and memory are important for navigation and formation of episodic memories. The hippocampus and medial entorhinal cortex (MEC) are key brain areas for spatial learning and memory. Place cells in hippocampus fire whenever an animal is located in a specific region in the environment. Grid cells in the superficial layers of MEC provide inputs to place cells and exhibit remarkable regular hexagonal spatial firing patterns. They also exhibit a gradient of spatial scales along the dorsoventral axis of the MEC, with neighboring cells at a given dorsoventral location having different spatial phases. A neural model shows how a hierarchy of self-organizing maps, each obeying the same laws, responds to realistic rat trajectories by learning grid cells with hexagonal grid firing fields of multiple spatial scales and place cells with unimodal firing fields that fit neurophysiological data about their development in juvenile rats. The hippocampal place fields represent much larger spaces than the grid cells to support navigational behaviors. Both the entorhinal and hippocampal self-organizing maps amplify and learn to categorize the most energetic and frequent co-occurrences of their inputs. Topdown attentional mechanisms from hippocampus to MEC help to dynamically stabilize these spatial memories in both the model and neurophysiological data. Spatial learning through MEC to hippocampus occurs in parallel with temporal learning through lateral entorhinal cortex to hippocampus. These homologous spatial and temporal representations illustrate a kind of neural relativity that may provide a substrate for episodic learning and memory. |
The role of muscle tissue in the pathogenesis of chronic heart failure the potential of exposure (FORMA study) Aim. To determine whether the skeletal muscle of patients with chronic heart failure (CHF) retains the ability to regenerate and grow; to compare the effectiveness of long aerobic trainings, calculated by an individualized method, and con-ventionally calculated trainings (VO 2 peak values), in relation to the severity of heart failure, exercise tolerance (ET), and ergoreflex activity (ERGO). Material and methods. The study included 297 patients with stable III functional class (FC) CHF, receiving optimal therapy. The presence of heart failure was found in all patients at least 6 months before the start of the study (age 18-65 years, body mass index (BMI) 19-28 kg/height, m 2. Initially, the study performed a cardiorespiratory test (CRT) with an assessment of gas composition, acid-base balance of the blood and ERGO activity. Patients were randomized into 2 groups: experimental (EG) and control (CG). For EG, based on the determination of the lactate threshold (LT), after 1 and 3 months the CRT was repeated and the training walking mode was dynam-ically recounted according to the new LT level. For CG, the training walking mode was calculated based on the VO 2 peak values. All patients trained for 6 months. At the end of the training, diagnostic CRT was performed, and the activity of EGO was evaluated. Eleven patients with CHF and 3 healthy donors before the start of the training underwent a biopsy of the gastrocnemius muscle. Results. It was shown that the potential for muscle differentiation of satellite skeletal muscle precursor cells obtained from patients with CHF with a reduced ejection fraction (HFrEF) does not differ in vitro from the potential of satellite cells of healthy donors. After 6 months of training, the severity of CHF decreased to FC II in 75% of EG patients, and among CG patients in 44%; the main indicators of the stages of compensatory mechanisms activation during physical exertion (VO 2 LT and VO 2 peak) in EG increased more than in the CG (10,8±0,4, 18,7±0,7 ml/min/kg and 9,5±0,8, 15,3±0,9 ml/ min/kg, with p 1 <0,01, p 2 <0,05, p 3 <0,01, respectively). Conclusion. In vitro, the potential for muscle differentiation, regeneration and growth of satellite skeletal muscle precursor cells obtained from patients with HFrEF does not differ from the potential of satellite cells of healthy donors. Aerobic training in patients with III FC chronic heart failure calculated by definition of LT, relating to safety is not worse than the results calculated by the level of VO 2 peak. Aerobic training in patients with III FC chronic heart failure calculated by definition of LT, compared with the usual mode of training walking, significantly reduce the activity of ergoreflex, increase ET, reduce the severity of CHF. In patients with III FC CHF, training walking for more than 1,5 hours/day determined by the level of LT, contributes to the development of physiological reverse myocardial remodeling to a greater extent than aerobic training calculated by the conventional method. Conflicts of nothing. Initially, the study performed a cardiorespiratory test (CRT) with an assessment of gas composition, acid-base balance of the blood and ERGO activity. Patients were randomized into 2 groups: experimental (EG) and control (CG). For EG, based on the determination of the lactate threshold (LT), after 1 and 3 months the CRT was repeated and the training walking mode was dynamically recounted according to the new LT level. For CG, the training walking mode was calculated based on the VO 2 peak values. All patients trained for 6 months. At the end of the training, diagnostic CRT was performed, and the activity of EGO was evaluated. Eleven patients with CHF and 3 healthy donors before the start of the training underwent a biopsy of the gastrocnemius muscle. Results. It was shown that the potential for muscle differentiation of satellite skeletal muscle precursor cells obtained from patients with CHF with a reduced ejection fraction (HFrEF) does not differ in vitro from the potential of satellite cells of healthy donors. After 6 months of training, the severity of CHF decreased to FC II in 75% of EG patients, and among CG patients -in 44%; the main indicators of the stages of compensatory mechanisms activation during physical exertion (VO 2 LT and VO 2 peak) in EG increased more than in the CG (10,8±0,4, 18,7±0,7 ml/min/kg and 9,5±0,8, 15,3±0,9 ml/ min/kg, with p 1 <0,01, p 2 <0,05, p 3 <0,01, respectively). Conclusion. In vitro, the potential for muscle differentiation, regeneration and growth of satellite skeletal muscle precursor cells obtained from patients with HFrEF does not differ from the potential of satellite cells of healthy donors. Aerobic training in patients with III FC chronic heart failure calculated by definition of LT, relating to safety is not worse than the results calculated by the level of VO 2 peak. Aerobic training in patients with III FC chronic heart failure calculated by definition of LT, compared with the usual mode of training walking, significantly reduce the activity of ergoreflex, increase ET, reduce the severity of CHF. In patients with III FC CHF, training walking for more than 1,5 hours/day determined by the level of LT, contributes to the development of physiological reverse myocardial remodeling to a greater extent than aerobic training calculated by the conventional method. The prevalence of heart failure (HF) in the Russian Federation has reached the epidemic. By evidence-based medicine, effective methods to combat this pathology were developed, including basic medications: angiotensin-converting enzyme inhibitors (ACE inhibitors), angiotensin II receptor antagonists (ARA II), angiotensin receptor-neprilysin inhibitors (ARNi), beta-blockers (BB), mineralocorticoid receptor antagonists (MCRA). However, to date, it has not been possible to stop the rapid increase in the number of rehospitalizations due to decompensated HF, which significantly burdens the economies of the countries. Despite the inhibitory effect of BB, ACE inhibitors, ARA II, ARNi, MCRA, neurohumoral activation in HF is increased due to continuous peripheral afferent stimulation (enhanced ergoreflex activity). One of the possible points of application for HF stabilization is striated muscle tissue. Stimulation of molecular mechanisms for skeletal muscle regeneration, including physical rehabilitation, is a promising strategy to reduce muscle dysfunctions. Therefore, it seems relevant to determine whether the skeletal muscles in HF patients retain their ability to regenerate and grow. Data on such studies was not found. As any organ or tissue in HF, skeletal muscles suffer from a lack of oxygen and nutrients. There are following differences: muscle tissue is the largest organ by mass in human -40-45% of body weight; muscles have a special feedback system called "ergoreflex". Between the skeletal muscles on the one hand and the vasomotor and respiratory centers on the other hand, there are neurogenic connections that are mediated by ergoreceptors. Ergoreceptors are myelinated and non-myelinated afferent nerve fibers in the skeletal muscles, sensitive to all mechanical and metabolic changes in muscle fibers. Ergoreceptors play a major role in feedback control to maintain a balance between muscle load intensity and energy for this. Ergoreflex is a defensive mechanism of the body in response to metabolite accumulation in muscle fiber, aimed at removing metabolites and enhancing aerobic oxidation. In response to the muscle meta-bolic state, ergoreceptors modulate the intensity of muscle perfusion and the cardiorespiratory response to physical activity in order to meet the metabolic needs of contracting muscles. So, there is an increase in ventilation and a number of circulatory changes due to an enhanced sympathetic nervous system (SNS) activity -increase of heart rate and blood pressure (BP), contraction of the resistance vessels ( Fig. 1). Thus, skeletal muscle is not only the largest organ by mass in the human body, but also an organ that controls the activity of the cardiovascular and pulmonary systems by means of ergoreflex ( Fig. 1). However, data on effective influencing methods is currently contradictory. The only and most physiological way to reduce the ergoreflex activity is exercise training (ET). Physical therapy (PT) in HF patients should be used to improve exercise tolerance and quality of life, reduce the number of hospitalizations for decompensated HF. Currently, individual selection of the type, duration and intensity of physical activity in HF patients is an urgent problem. There were following aims of the study: 1) to determine whether the skeletal muscle in HF patients retains the ability to regenerate and grow; 2) to compare the effectiveness of individualized and conventional (based on VO 2 peak) approaches to selecting exercise mode, in relation to the severity of HF, exercise tolerance, and ergoreflex activity. Materials and methods Gastrocnemius muscle biopsy and assessment of muscle-resident cells. Eleven HF patients (mean age 54±12,5 years, body mass index (BMI) -26,5±6,4 kg/m 2, left ventricular ejection fraction (LVEF) 26,4±1,4%) and 3 healthy donors underwent gastrocnemius muscle biopsy. The preparation of primary muscle-resident cell cultures enriched in satellite cells was performed according to the standard methods. Preparing Geltrex-coated (Invitrogen, USA) culture dishes was performed for 1,5 h in a CO 2 incubator at +37°C in a Dulbecco's Modified Eagle's medium (DMEM) in a ratio of 1:100. The culture medium was changed every other day. Myogenic differentiation of cells was performed according to the standard methods when cultured in a differentiation medium consisting of a basic culture medium (-MEM) (PanEco, Russia) with the addition of 1% L-glutamine (Invitrogen, USA), 1% Penicillin-Streptomycin (Invitrogen, USA) and 2% horse serum (Gibco, USA). The primary medium was replaced with a differentiation one when subconfluent state of the culture was observed. During immunocytochemistry, the cells were washed with phosphate buffered saline (PBS) and fixed with 4% paraformaldehyde at +4°C for 10-15 minutes, washed with PBS, incubated with 0,2% TRITONx100 for 5 minutes, washed with PBS, blocked with 15% fetal calf serum for 30 minutes (Gibco, USA) in PBS. Incubation with primary and secondary antibodies were perfomed according to the manufacturer's instructions (MF20 antibodies to the myosin heavy chain (MHC MF20), myogenic factor 5 (Myf5), mitofusin-1 (Mfn1), PAX transcription factors, R&D BioSystems, USA). Immunophenotyping was performed by CytoFLEX flow cytometer (Beckman Coulter). Data was analyzed using CytExpert 2.0 software (Beckman Coulter). Isolation of ribonucleic acid (RNA), synthesis of complementary deoxynucleic acid (cDNA) and realtime polymerase chain reaction (PCR). Total RNA was isolated using ExtractRNA reagent (Evrogen, cat.no BC032, Russia). cDNA was synthesized from 500 ng total RNA using a reverse transcription kit (Molove, SK021, Russia). Quantitative gene expression was performed using qPCR-HS SYBR + ROX (Evrogen, cat.no. PK156, Russia). Data of qPCR are presented as arbitrary units of mRNA expression normalized to GAPDH expression and expression levels in a reference sample. Statistical analysis was performed using Graph-PadPrism7 software. All data were analyzed by at least three biological replicates and presented as mean±SEM. Safety and effectiveness of different exercise methods was assessed as part of the FORMA study. A prospective, randomized study was performed in accordance with Good Clinical Practice guidelines and the principles of Declaration of Helsinki; the study protocol was approved by the ethics committee of the Almazov National Medical Research Center. There were following inclusion criteria: symptoms of class III HF; stable clinical status for at least 2 weeks before inclusion in the study; age -18-65 years; body mass index (BMI) -19-28 kg/m 2 ; completed informed consent; the ability to perform cardiorespiratory test (CRT); LVEF <45%; administration of ACE inhibitors/ARA II/ARNi, BB, MCRA, diuretics; patient education during hospitalization at Almazov National Medical Research Center; follow-up monitoring of HF patients by a cardiologist. Exclusion criteria were moderate and severe chronic obstructive pulmonary disease (COPD), myocardial infarction (MI), pulmonary embolism (PE), surgeries over the past 6 months, severe cognitive disorders, low adherence treatment. The endpoints of the study were changes in the HF severity, exercise tolerance (VO 2 peak), ergoreflex, and myocardial contractile function (LVEF, LV end-diastolic dimension (LV EDD), LV end-systolic dimension (LV ESD)). Clinical characteristics of patients. The study included 297 patients with stable class III HF, which was established at least 6 months before the study. Patients were randomized into two groups: the experimental group (EG) -237 patients with class III HF (age 18-65 years, BMI 19-28 kg/m 2 ) and control group (CG) -60 patients with HF (age 18-65 years, BMI -19-28 kg/m 2 ). After 4-6 weeks of exercise, 55 EG patients on their own initiative gradually increased the duration of daily walk to 1,5-2 hours; this subgroup of patients (EGlong) was allocated for additional analysis (Table 1). Therapy did not differ significantly between groups. Results of clinical and instrumental examinations are presented in Table 1. The study progress is presented in Table 2. Initially, the subjects underwent a submaximal CRT with a simultaneous assessment of gas composition and acid-base status of the blood (Table 2). For each EG patient, the exercise mode of walk was estimated according to the CRT results based on the lactate threshold (LT) determination; after 1 and 3 months the CRT was repeated and on the basis of the newly obtained LP values, the mode was re-estimated (walking speed was 95% of the LT speed). Patients trained for 6 months. At the end of the exercise, a diagnostic CRT was performed. CG patients performed walking at the level of 55% VO 2 peak 3 times/week. Echocardiography was conducted using Philips iE-33. We used one-and twodimensional scanning modes, by which the transverse dimension of the left atrium (LA), EDD, ESD, and LVEF were assessed. The CRT was performed using treadmill (GE Medical Systems Information Technologies) and Oxycon Pro system (Jeger, Germany). Venous blood lactate concentration at rest and during physical exertion. Before the CRT, the catheter was inserted into the ulnar vein. Blood sampling was carried out initially and every minute during the physical exertion. Venous blood lactate concentration was evaluated by i-STAT Portable Clinical Analyzer (Abbott, USA) using CG4 cartridge kits. LT was recorded at the time of the beginning of Abbreviations: BMI -body mass index, AF -atrial fibrillation, COPD -chronic obstructive pulmonary disease, CRT -cardiac resynchronization therapy, CABG -coronary artery bypass grafting, LVEF -left ventricle ejection fraction, CG -control group, EGexperimental group, EGpres -EG subgroup with preserved load during physical rehabilitation, EGlong -EG subgroup with long-lasting exercise. blood lactate concentration increase. The assessment of ergoref lex was carried out by postexercise regional circulatory occlusion (PE-RCO). During the test, diastolic blood pressure (DBP) was measured; ventilation and gas exchange rates were recorded. The difference between DBP, carbon dioxide ventilatory equivalent (VE/VCO 2 ), minute ventilation (VE) after a three-minute occlusion (+PE-RCO) and the recovery period without occlusion (-PE-RCO) was calculated; percentage ratio of these values was estimated. Statistical analysis was performed using Statistica 6.0 software. All data were analyzed by at least three biological replicates and presented as mean+SEM. Comparison of mean values was performed using nonparametric statistics (Mann-Whitney U-test). The chi-squared test and the F-test were used to identify confidence in contingency tables. The significance level was p<0,05. Results Examination of stem cell population obtained by skeletal muscle biopsy. After isolation of cells and several days of in vitro expansion, we analyzed the expression of surface markers: CD56, CD105, CD166, CD146, CD73, CD140a, CD140b; CD45 was used as a negative control (Fig. 2). We showed that the vast majority of the isolated cells were CD56-positive (marker of satellite cells) and CD45-negative (marker of hematopoietic cells). We also found that a significant fraction of cells expressed stromal markers CD105, CD166 and CD73, and only a small fraction of cells was positive for markers CD146, Cd140a and CD140b. The high level of expression of stromal markers in the population was most likely associated with contamination of the satellite cell fraction with the stromal cell fraction of muscle tissue. Therefore, an immunocytochemical analysis of the obtained samples was carried out, which confirmed the expression of the satellite cell markers Pax7 and Myf5 (Fig. 3A). The results of a quantitative analysis of immunocytochemical staining and expression of mRNA markers of satellite cells and myoblasts are shown in Fig. 3. The level of mRNA expression of both Myf5 and Pax7 was high and did not differ significantly between samples of healthy donors and patients with HF. The percentage of Myf5+ and Pax7+ cells also did not differ significantly in the samples. The results of the stimulation of differentiation showed that cells obtained from both healthy donors and HF patients have a similar potential for muscle differentiation in vitro. Fig. 4 shows the myotubes obtained after stimulation of muscle differentiation of satellite cell samples in vitro. The fusion coefficient did not differ significantly between the groups and amounted to 19±7% and 23±5% in the samples of healthy donors and HF patients, respectively. Comparison of safety and effectiveness of conventional and individualized approaches to selecting exercise mode. Of 297 patients, 25 people discontinued participation in trial: 8 EG patients, 17 -CG (p<0,05); there were following reasons: unwillingness to continue exercise (n=10), heart transplantation (n=6), non-HF hospitalization (n=4), 3 -hospitalization due to decompensated HF after URTI. Thus, 229 EG and 43 CG patients completed the study. After exercise, in OG patients there was a more pronounced decrease in the ergoreflex activity compared to CG patients: DBP -by 40%, VE in OGby 53%, VE/VCO 2 -by 38%, and in CG -by 21%, 23% and 15%, respectively (p<0,05) ( Table 3). Table 4 presents echocardiography changes in the studied patients before and after physical rehabilitation. In the EG, LV EDD, LV ESD, LVEF and left atrium dimension were significantly improved. In the CG, there was a significant increase in LVEF; LV EDD, LV ESD, and left atrium dimension were not significantly improved. Against the background of long-lasting aerobic exercise, patients from the EGlong subgroup showed a significant decrease in the end-systolic and end-diastolic volumes of the LV and LA, as well as a more pronounced LVEF increase than in the EG with preserved load (EGpres) and CG (Table 4). Discussion In HF, systemic metabolic changes are accompanied by muscular wasting, which in turn causes deterioration in physical performance and quality of life [1,2,. The aim of the first part of this project was to determine whether the skeletal muscle in HF patients retains the ability to regenerate and grow. The results of the study demonstrated that striated muscle cells of patients with class III HF do not have significant differences with cells obtained from healthy donors. They have similar potential for muscle differentiation in vitro and show a high potential for restoration of muscle precursor cells. Thus, the skeletal muscle satellite cells under favorable conditions can contribute to the restoration of muscles injured due to HF. The exact molecular mechanisms of skeletal muscle restoration in HF patients have to be investigated. It is obvious that novel therapeutic strategies should be aimed at activating the regeneration potential of satellite cells, which may be partially realized by physical exercise. The results of applying different exercise modes are ref lected in the second part of this study. In 2017, Russian recommendations for the appointment of physical training for patients with chronic heart failure were published. It was proposed to select the regime of physical rehabilitation empirically, based on the six-minute walk test (6MWT) or VO 2 peak. Nevertheless, the 6MWT results largely depend on the motivation of patient and doctor, concomitant pathology and many other factors. Therefore, a physical rehabilitation program estimated by 6MWT can be not accurate. VO 2 peak is also highly specified by the patient's motivation. Some aspects in determining the exercise regimen for HF patients remain open: there are no uniform principles for controlling the adaptation to physical activity; principles for planning the effective, safe and personalized exercise has not been fully developed. In 2012, we proposed the selection of the walking training mode based on the LT determination. The advantage of this approach is to increase the accuracy of determining the reserves of adaptation to physical activity. This method, in comparison with the previous ones, allows developing physical rehabilitation programs for any cardiovascular patients.. This study demonstrated the safety and effectiveness of present approach in class III HF patients. Its using allows to avoid the fatigue and, therefore, to prescribe a longer physical exercise. Described method makes it possible to softly increase the load based on the LT re-determination. As a result, there is a greater decrease in the ergoref lex activity in the EG, followed by decrease in neurohumoral activation. Also longer exercise duration can increase the number of mitochondria and exercise tolerance compared with conventional approaches where the time and load are strictly fixed. This is confirmed by the fact that in patients with LT-dependent exercise load, the tolerance increased more significantly, and in patients with >1,5 hours/day exercise, reverse myocardial remodeling was observed. Limitations: a relatively small number of patients in the group of long-lasting exercise and multicenter design. 1) In vitro, the potential for muscle differentiation, regeneration and growth of satellite skeletal muscle precursor cells obtained from patients with HF with reduced EF does not differ from the potential of satellite cells of healthy donors. 2) Safety of aerobic exercise in patients with class III HF estimated by LT definition is equal with exercise estimated by the level of VO 2 peak; 3) Aerobic exercise in patients with class III HF estimated by LT definition, compared with the conventional approach, significantly reduce the activity of ergoreflex, increase exercise tolerance and reduce the HF severity. 4) In patients with class III HF, walking training >1,5 hours/day estimated by the LT level, contributes to the development of physiological reverse myocardial remodeling to a greater extent than aerobic exercise selected by the conventional method. Funding. This study was partially supported by the Russian Science Foundation grant № 16-15-1017 Conflicts of Interest: nothing to declare. |
__author__ = '<NAME> (Little Fish Solutions LTD)'
|
"""
Example: eager mode integrand
Demonstrates how to run a non-tensorflow integrand using VegasFlow
"""
from vegasflow import run_eager, vegas_wrapper
import time
import numpy as np
from scipy.special import expit
import tensorflow as tf
# Enable eager mode
run_eager(True)
# MC integration setup
dim = 4
ncalls = np.int32(1e5)
n_iter = 5
@tf.function
def symgauss_sigmoid(xarr, **kwargs):
"""symgauss test function"""
n_dim = xarr.shape[-1]
a = 0.1
pref = pow(1.0 / a / np.sqrt(np.pi), n_dim)
coef = np.sum(np.arange(1, 101))
# Tensorflow variable will be casted down by numpy
# you can directly access their numpy representation with .numpy()
xarr_sq = np.square((xarr - 1.0 / 2.0) / a)
coef += np.sum(xarr_sq, axis=1)
coef -= 100.0 * 101.0 / 2.0
return expit(xarr[:, 0].numpy()) * (pref * np.exp(-coef))
if __name__ == "__main__":
"""Testing several different integrations"""
print(f"VEGAS MC, ncalls={ncalls}:")
start = time.time()
ncalls = 10 * ncalls
r = vegas_wrapper(symgauss_sigmoid, dim, n_iter, ncalls, compilable=True)
end = time.time()
print(f"Vegas took: time (s): {end-start}")
|
Heavy metals such as cadmium, lead and mercury are non-essential elements for plants and are even hazardous to the growth of plants. They are deemed as heavy-metal pollutants for plant growth and food safety. The excessive accumulation of these hazardous heavy metals in food will enable them to enter into the food chain, and even threaten human health. According to the results of a quality and safety survey of rice in various regions of China in 2002 and 2003 made by the Quality Inspection and Supervision Center of Rice and Rice Products, Ministry of Agriculture, one of the quality and safety issues of rice is the over-the-limit content of heavy metals such as cadmium and lead. The over-the-limit rate is more than 10%. Three industrial wastes, non-ferrous metal mining and sewage irrigation are causes that may lead to an excessive amount of toxic heavy metals in the soil and excessive absorption by the plant, being the main source of the accumulation of heavy metals in plants or crops. Therefore, strict limiting criteria for heavy metals in soil and foods (or grains) have been established in various countries. For example, in China, the limit of cadmium in cereals is 0.2 mg/kg, that of lead 0.2 mg/kg and that of mercury 0.02 mg/kg.
Cadmium, lead and mercury that are accumulated in plant foods, such as rice, barley and wheat, are mainly absorbed by the roots from the soil and finally accumulated in the harvest parts after flowing to the top upon transpiration. Research shows that the contents of cadmium, lead and mercury in the soil, especially the effective content (namely absorption by the roots) are the key factors when the roots absorb cadmium, lead and mercury from the soil. Hence, to reduce and control the effective cadmium, lead and mercury content in the soil by various agronomic means is always a key study subject at home and abroad. For instance, lime is applied on acidic and slightly acidic soil to raise the pH value of the soil, which obviously decreases the effectiveness of cadmium, lead and mercury in the soil and finally reduces the absorption of such heavy metals by the roots. However, the application of lime has also a lot of side effects. For example, it may cause the rise of the pH value in the soil, leading to the reduction of the required contents of multiple microelements like iron, manganese and zinc while decreasing such heavy metals, easily resulting in nutrient deficiency in the crops and their failure to thrive. On the other hand, the considerable difference of pH values and pH buffer capacities for different regions and soils gives rise to a great difficulty in accurately controlling the lime application amount. As to paddy rice, the inundation can facilitate the increase of soil reducibility, and the increase of elements such as ferrous iron in the soil and the promotion of the deposit of cadmium, lead and mercury in the form of sulfide, on one hand, decreases the soil effectiveness of these heavy metals. On the other hand, a decrease in the roots' absorption capacity for these elements, greatly reduces the roots' absorption and accumulation of toxic heavy metals from the soil. But for paddy rice itself, long-term inundation is not conducive to its growth and is apt to cause an increase of arsenic absorption in rice.
It is a key orientation of studies around the world to add a curing agent or adsorbent in the soil to combine and fix or absorb heavy metals in the soil like cadmium, lead and mercury. The curing agent or adsorbent reported mainly includes zeolite, kieselguhr, sepiolite, bentonite and limestone, and even alkaline cinder, but there is still no curing agent or adsorbent for heavy metals in the soil that is produced and applied broadly. The mainly reasons lie in their being non-environmentally-friendly, high cost and no popularization efforts as to their use value. |
Rep. Francis Rooney Laurence (Francis) Francis Rooney13 House Republicans who bucked Trump on emergency declaration House votes to overturn Trump's emergency declaration Whip List: Where Republicans stand on emergency declaration vote MORE (R-Fla.) on Wednesday defended his calls for a “purge” at the FBI and claims that some agents are part of “the deep state.”
“It might be a pretty strong word. I’m not, maybe, the most nuanced political person in the world coming from a career in business, but I’m pretty frustrated by all the things that have come out,” Rooney said on CNN after video was shown of his earlier remarks.
Rooney on Tuesday called out the FBI, Department of Justice (DOJ) and special counsel Robert Mueller's investigation into possible collusion between the Trump campaign and Russia.
ADVERTISEMENT
He was specifically critical of Peter Strzok, an FBI agent who worked on the investigation into Hillary Clinton Hillary Diane Rodham ClintonREAD: Cohen testimony alleges Trump knew Stone talked with WikiLeaks about DNC emails County GOP in Minnesota shares image comparing Sanders to Hitler Holder: 'Time to make the Electoral College a vestige of the past' MORE’s use of a private email server during her time as secretary of State and was a member of Mueller’s team.
Strzok was dismissed from Mueller’s team after it was revealed he had sent anti-Trump text messages.
Rooney said Wednesday Strzok’s messages indicate a “lack of impartiality” and make the Florida lawmaker “nervous” as an American citizen.
“I think that’s going beyond just having political views. I hold the FBI and the Department of Justice in very high esteem,” Rooney said.
“I’m not saying it necessarily influenced the investigation but they certainly were trying to work to impede Donald Trump, that’s what the guy said in those emails,” he continued. “And I just don’t know that someone in the FBI and DOJ ought to be doing that kind of stuff with all the power and authority they have over American citizens.”
Republican lawmakers and President Trump Donald John TrumpREAD: Cohen testimony alleges Trump knew Stone talked with WikiLeaks about DNC emails Trump urges North Korea to denuclearize ahead of summit Venezuela's Maduro says he fears 'bad' people around Trump MORE have in recent weeks targeted the FBI over claims of bias, specifically targeting both Strzok and FBI Deputy Director Andrew McCabe.
This past weekend, Trump lashed out at FBI leadership, going after McCabe over donations that Democrats made to his wife’s political campaign.
Trump's attack Saturday came after a report that McCabe is retiring from the FBI amid mounting criticism from Republicans in Congress. |
<reponame>Eve-ning/singapore_da<filename>projects/wbt/src_2/main.py
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 15 12:39:08 2019
@author: johnc
"""
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
# Register converter for date time
register_matplotlib_converters()
class WBTSG:
def __init__(self, file_path):
self.file_path = file_path
self.data = None
def data_load(self):
'''Purely loads the data into the correct format'''
with open(self.file_path, 'r') as f:
data = pd.read_csv(f)
data['wbt_date'] = pd.to_datetime(data['wbt_date'])
self.data = data
self.data['wbt_year'] = pd.DatetimeIndex(data['wbt_date']).year
self.data['wbt_month'] = pd.DatetimeIndex(data['wbt_date']).month
self.data['wbt_day'] = pd.DatetimeIndex(data['wbt_date']).day
def data_groupby(self,
groups:str=['wbt_year', 'wbt_month'], axis = 0):
self.data = self.data.groupby([groups]).max().reset_index()
data = WBTSG("../../../../../Data/singapore/wbt/main.csv")
data.data_load()
data.data_groupby()
wbt = data.data
plt.plot(data.data['wbt_date'][1:100], data.data['wet_bulb_temperature'][1:100]) |
Mountain uplift explains differences in Palaeogene patterns of mammalian evolution and extinction between North America and Europe Patterns of late Palaeogene mammalian evolution appear to be very different between Eurasia and North America. Around the EoceneOligocene (EO) transition global temperatures in the Northern Hemisphere plummet: following this, European mammal faunas undergo a profound extinction event (the Grande Coupure), while in North America they appear to pass through this temperature event unscathed. Here, we investigate the role of surface uplift to environmental change and mammalian evolution through the Palaeogene (6623 Ma). Palaeogene regional surface uplift in North America caused large-scale reorganization of precipitation patterns, particularly in the continental interior, in accord with our combined stable isotope and ecometric data. Changes in mammalian faunas reflect that these were dry and high-elevation palaeoenvironments. The scenario of Middle to Late Eocene (5037 Ma) surface uplift, together with decreasing precipitation in higher-altitude regions of western North America, explains the enigma of the apparent lack of the large-scale mammal faunal change around the EO transition that characterized western Europe. We suggest that North American mammalian faunas were already pre-adapted to cooler and drier conditions preceding the EO boundary, resulting from the effects of a protracted history of surface uplift. |
// NewByVersionIDPayload builds a resource service ByVersionId endpoint payload.
func NewByVersionIDPayload(versionID uint) *resource.ByVersionIDPayload {
v := &resource.ByVersionIDPayload{}
v.VersionID = versionID
return v
} |
package io.github.coolcrabs.fernutil;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.HashMap;
import org.tinylog.Logger;
// Jank
class FUClassLoader extends URLClassLoader {
static final HashMap<String, Class<?>> yeet = new HashMap<>();
static void c(Class<?> c) {
yeet.put(c.getName(), c);
}
static {
c(FernUtil.class);
c(FernUtil.LineNumbers.class);
c(FernUtil.JavadocProvider.class);
c(Logger.class);
}
FUClassLoader(URL[] classpath) {
super(classpath, ClassLoader.getSystemClassLoader().getParent());
}
@Override
protected Class<?> findClass(String name) throws ClassNotFoundException {
Class<?> c = yeet.get(name);
if (c != null) {
return c;
}
return super.findClass(name);
}
}
|
def cql_type_to_gemini(cql_type, is_frozen=False):
if isinstance(cql_type, str):
return cql_type
elif len(cql_type) == 1:
return cql_type[0]
else:
is_frozen_type = is_frozen
gemini_type = {}
token = cql_type.pop(0)
if isinstance(token, (list, tuple)):
return cql_type_to_gemini(token, is_frozen_type)
elif token == 'frozen':
return cql_type_to_gemini(cql_type.pop(0), True)
elif token == 'map':
subtypes = cql_type.pop(0)
gemini_type['key_type'] = cql_type_to_gemini(subtypes[0], is_frozen_type)
gemini_type['value_type'] = cql_type_to_gemini(subtypes[1], is_frozen_type)
elif token == 'list':
gemini_type['kind'] = 'list'
gemini_type['type'] = cql_type_to_gemini(cql_type.pop(0)[0], is_frozen_type)
elif token == 'set':
gemini_type['kind'] = 'set'
gemini_type['type'] = cql_type_to_gemini(cql_type.pop(0)[0], is_frozen_type)
elif token == 'tuple':
gemini_type['types'] = cql_type.pop(0)
gemini_type['frozen'] = is_frozen_type
return gemini_type |
<reponame>mghgroup/Glide-Browser
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/android/context_menu/chrome_context_menu_populator.h"
#include "base/android/callback_android.h"
#include "base/android/jni_android.h"
#include "base/android/jni_array.h"
#include "base/android/jni_string.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/strings/string_util.h"
#include "chrome/android/chrome_jni_headers/ChromeContextMenuPopulator_jni.h"
#include "chrome/browser/download/android/download_controller_base.h"
#include "chrome/browser/image_decoder/image_decoder.h"
#include "chrome/browser/ui/tab_contents/core_tab_helper.h"
#include "components/embedder_support/android/contextmenu/context_menu_builder.h"
#include "content/public/browser/context_menu_params.h"
#include "content/public/browser/render_frame_host.h"
#include "content/public/browser/web_contents.h"
#include "third_party/blink/public/common/associated_interfaces/associated_interface_provider.h"
#include "ui/gfx/android/java_bitmap.h"
#include "ui/gfx/geometry/size.h"
using base::android::JavaParamRef;
using base::android::JavaRef;
namespace {
class ContextMenuPopulatorImageRequest : public ImageDecoder::ImageRequest {
public:
static void Start(const JavaRef<jobject>& jcallback,
const std::vector<uint8_t>& thumbnail_data) {
auto* request = new ContextMenuPopulatorImageRequest(jcallback);
ImageDecoder::Start(request, thumbnail_data);
}
protected:
void OnImageDecoded(const SkBitmap& decoded_image) override {
base::android::RunObjectCallbackAndroid(
jcallback_, gfx::ConvertToJavaBitmap(&decoded_image));
delete this;
}
void OnDecodeImageFailed() override {
base::android::ScopedJavaLocalRef<jobject> j_bitmap;
base::android::RunObjectCallbackAndroid(jcallback_, j_bitmap);
delete this;
}
private:
explicit ContextMenuPopulatorImageRequest(const JavaRef<jobject>& jcallback)
: jcallback_(jcallback) {}
const base::android::ScopedJavaGlobalRef<jobject> jcallback_;
DISALLOW_IMPLICIT_CONSTRUCTORS(ContextMenuPopulatorImageRequest);
};
chrome::mojom::ImageFormat ToChromeMojomImageFormat(int image_format) {
auto format = static_cast<ContextMenuImageFormat>(image_format);
switch (format) {
case ContextMenuImageFormat::JPEG:
return chrome::mojom::ImageFormat::JPEG;
case ContextMenuImageFormat::PNG:
return chrome::mojom::ImageFormat::PNG;
case ContextMenuImageFormat::ORIGINAL:
return chrome::mojom::ImageFormat::ORIGINAL;
}
NOTREACHED();
return chrome::mojom::ImageFormat::JPEG;
}
void OnRetrieveImageForShare(
mojo::AssociatedRemote<chrome::mojom::ChromeRenderFrame>
chrome_render_frame,
const JavaRef<jobject>& jcallback,
const std::vector<uint8_t>& thumbnail_data,
const gfx::Size& original_size,
const std::string& image_extension) {
JNIEnv* env = base::android::AttachCurrentThread();
auto j_data = base::android::ToJavaByteArray(env, thumbnail_data);
auto j_extension =
base::android::ConvertUTF8ToJavaString(env, image_extension);
base::android::RunObjectCallbackAndroid(
jcallback, Java_ChromeContextMenuPopulator_createImageCallbackResult(
env, j_data, j_extension));
}
void OnRetrieveImageForContextMenu(
mojo::AssociatedRemote<chrome::mojom::ChromeRenderFrame>
chrome_render_frame,
const JavaRef<jobject>& jcallback,
const std::vector<uint8_t>& thumbnail_data,
const gfx::Size& original_size,
const std::string& filename_extension) {
ContextMenuPopulatorImageRequest::Start(jcallback, thumbnail_data);
}
} // namespace
ChromeContextMenuPopulator::ChromeContextMenuPopulator(
content::WebContents* web_contents)
: web_contents_(web_contents) {}
void ChromeContextMenuPopulator::OnStartDownload(
JNIEnv* env,
const JavaParamRef<jobject>& obj,
const JavaParamRef<jobject>& jcontext_menu_params,
jboolean jis_link) {
std::string headers;
auto* context_menu_params =
context_menu::ContextMenuParamsFromJavaObject(jcontext_menu_params);
DownloadControllerBase::Get()->StartContextMenuDownload(
*context_menu_params, web_contents_, jis_link, headers);
}
void ChromeContextMenuPopulator::SearchForImage(
JNIEnv* env,
const JavaParamRef<jobject>& obj,
const JavaParamRef<jobject>& jrender_frame_host,
const JavaParamRef<jobject>& jcontext_menu_params) {
auto* render_frame_host =
content::RenderFrameHost::FromJavaRenderFrameHost(jrender_frame_host);
if (!render_frame_host)
return;
auto* context_menu_params =
context_menu::ContextMenuParamsFromJavaObject(jcontext_menu_params);
CoreTabHelper::FromWebContents(web_contents_)
->SearchByImageInNewTab(render_frame_host, context_menu_params->src_url);
}
void ChromeContextMenuPopulator::RetrieveImageForShare(
JNIEnv* env,
const JavaParamRef<jobject>& obj,
const JavaParamRef<jobject>& jrender_frame_host,
const JavaParamRef<jobject>& jcallback,
jint max_width_px,
jint max_height_px,
jint j_image_format) {
RetrieveImageInternal(env, base::BindOnce(&OnRetrieveImageForShare),
jrender_frame_host, jcallback, max_width_px,
max_height_px,
ToChromeMojomImageFormat(j_image_format));
}
void ChromeContextMenuPopulator::RetrieveImageForContextMenu(
JNIEnv* env,
const JavaParamRef<jobject>& obj,
const JavaParamRef<jobject>& jrender_frame_host,
const JavaParamRef<jobject>& jcallback,
jint max_width_px,
jint max_height_px) {
// For context menu, Image needs to be PNG for receiving transparency pixels.
RetrieveImageInternal(env, base::BindOnce(&OnRetrieveImageForContextMenu),
jrender_frame_host, jcallback, max_width_px,
max_height_px, chrome::mojom::ImageFormat::PNG);
}
void ChromeContextMenuPopulator::RetrieveImageInternal(
JNIEnv* env,
ImageRetrieveCallback retrieve_callback,
const JavaParamRef<jobject>& jrender_frame_host,
const JavaParamRef<jobject>& jcallback,
jint max_width_px,
jint max_height_px,
chrome::mojom::ImageFormat image_format) {
auto* render_frame_host =
content::RenderFrameHost::FromJavaRenderFrameHost(jrender_frame_host);
if (!render_frame_host)
return;
mojo::AssociatedRemote<chrome::mojom::ChromeRenderFrame> chrome_render_frame;
render_frame_host->GetRemoteAssociatedInterfaces()->GetInterface(
&chrome_render_frame);
// Bind the InterfacePtr into the callback so that it's kept alive
// until there's either a connection error or a response.
auto* thumbnail_capturer_proxy = chrome_render_frame.get();
thumbnail_capturer_proxy->RequestImageForContextNode(
max_width_px * max_height_px, gfx::Size(max_width_px, max_height_px),
image_format,
base::BindOnce(
std::move(retrieve_callback), base::Passed(&chrome_render_frame),
base::android::ScopedJavaGlobalRef<jobject>(env, jcallback)));
}
static jlong JNI_ChromeContextMenuPopulator_Init(
JNIEnv* env,
const JavaParamRef<jobject>& jweb_contents) {
if (jweb_contents.is_null())
return reinterpret_cast<intptr_t>(nullptr);
auto* web_contents = content::WebContents::FromJavaWebContents(jweb_contents);
DCHECK(web_contents);
return reinterpret_cast<intptr_t>(
new ChromeContextMenuPopulator(web_contents));
}
|
def _get_number(s, init=0):
return _get_number(s[1:], init * 10 + int(s[0])) if len(s) > 0 and s[0].isdigit() else (s, init) |
/******************************************************************************
* Code generated with sympy 0.7.6 *
* *
* See http://www.sympy.org/ for more information. *
* *
* This file is part of 'project' *
******************************************************************************/
#include "index_dist_thumb_inter_side_0.h"
#include <math.h>
double index_dist_thumb_inter_side_0() {
double index_dist_thumb_inter_side_0_result;
index_dist_thumb_inter_side_0_result = 0;
return index_dist_thumb_inter_side_0_result;
}
|
Evidence that a sequence similar to TAR is important for induction of the JC virus late promoter by human immunodeficiency virus type 1 Tat A specific RNA sequence located in the leader of all human immunodeficiency virus type 1 (HIV-1) mRNAs termed the transactivation response element, or TAR, is a primary target for induction of HIV-1 long terminal repeat activity by the HIV-1-derived trans-regulatory protein, Tat. Human neurotropic virus, JC virus (JCV), a causative agent of the degenerative demyelinating disease progressive multifocal leukoencephalopathy, contains sequences in the 5' end of the late RNA species with an extensive homology to HIV-1 TAR. In this study, we examined the possible role of the JCV-derived TAR-homologous sequence in Tat-mediated activation of the JCV late promoter (Tada et al., Proc. Natl. Acad. Sci. USA 87:3479-3483, 1990). Results from site-directed mutagenesis revealed that critical G residues required for the function of HIV-1 TAR that are conserved in the JCV TAR homolog play an important role in Tat activation of the JCV promoter. In addition, in vivo competition studies suggest that shared regulatory components mediate Tat activation of the JCV late and HIV-1 long terminal repeat promoters. Furthermore, we showed that the JCV-derived TAR sequence behaves in the same way as HIV-1 TAR in response to two distinct Tat mutants, one of which that has no ability to bind to HIV-1 TAR and another that lacks transcriptional activity on a responsive promoter. These results suggest that the TAR homolog of the JCV late promoter is responsive to HIV-1 Tat induction and thus may participate in the overall activation of the JCV late promoter mediated by this transactivation. |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 45