Dataset Preview
Full Screen Viewer
Full Screen
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
(ReadTimeoutError("HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)"), '(Request ID: 4f4dd23c-811c-44cd-aefa-a568a039e805)')
Error code: UnexpectedError
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
image
image | code
string |
---|---|
import base64
import contextlib
import io
import json
import os.path as osp
import PIL.Image
from labelme import PY2
from labelme import QT4
from labelme import __version__
from labelme import utils
from labelme.logger import logger
PIL.Image.MAX_IMAGE_PIXELS = None
@contextlib.contextmanager
def open(name, mode):
assert mode in ["r", "w"]
if PY2:
mode += "b"
encoding = None
else:
encoding = "utf-8"
yield io.open(name, mode, encoding=encoding)
return
class LabelFileError(Exception):
pass
class LabelFile(object):
suffix = ".json"
def __init__(self, filename=None):
self.shapes = []
self.imagePath = None
self.imageData = None
|
|
long_description=LONG_DESCRIPTION,
url='https://youtube-wrapper.readthedocs.io/en/latest/index.html',
author='Lyle Okoth',
author_email='[email protected]',
license='MIT',
install_requires=install_requires,
keywords=key_words,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Operating System :: OS Independent'
],
)
|
|
self.prevhVertex = self.hVertex
self.hVertex = None
self.prevhShape = self.hShape = shape
self.prevhEdge = self.hEdge
self.hEdge = None
self.setToolTip(
self.tr("Click & drag to move shape '%s'") % shape.label
)
self.setStatusTip(self.toolTip())
self.overrideCursor(CURSOR_GRAB)
self.update()
break
else: # Nothing found, clear highlights, reset state.
self.unHighlight()
self.vertexSelected.emit(self.hVertex is not None)
def addPointToEdge(self):
shape = self.prevhShape
index = self.prevhEdge
point = self.prevMovePoint
if shape is None or index is None or point is None:
return
shape.insertPoint(index, point)
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.hShape = shape
self.hVertex = index
self.hEdge = None
self.movingShape = True
def removeSelectedPoint(self):
shape = self.prevhShape
index = self.prevhVertex
if shape is None or index is None:
return
shape.removePoint(index)
shape.highlightClear()
self.hShape = shape
self.prevhVertex = None
self.movingShape = True # Save changes
|
|
if out_file:
out_file = osp.abspath(out_file)
if osp.exists(out_file):
raise RuntimeError("File exists: %s" % out_file)
else:
open(osp.abspath(out_file), "w")
cmd = (
"docker run -it --rm"
" -e DISPLAY={0}:0"
" -e QT_X11_NO_MITSHM=1"
" -v /tmp/.X11-unix:/tmp/.X11-unix"
" -v {1}:{2}"
" -w /home/developer"
)
in_file_a = osp.abspath(in_file)
in_file_b = osp.join("/home/developer", osp.basename(in_file))
cmd = cmd.format(
ip,
in_file_a,
in_file_b,
)
if out_file:
out_file_a = osp.abspath(out_file)
out_file_b = osp.join("/home/developer", osp.basename(out_file))
cmd += " -v {0}:{1}".format(out_file_a, out_file_b)
cmd += " wkentaro/labelme labelme {0}".format(in_file_b)
if out_file:
cmd += " -O {0}".format(out_file_b)
subprocess.call(shlex.split(cmd))
if out_file:
try:
json.load(open(out_file))
return out_file
except Exception:
if open(out_file).read() == "":
os.remove(out_file)
raise RuntimeError("Annotation is cancelled.")
|
|
import torch
from tqdm import tqdm
from torch.nn import CrossEntropyLoss
from torch import optim
from torchvision.transforms import Compose, Resize, RandomCrop, ToTensor, Normalize
from torch.utils.tensorboard import SummaryWriter
from utils import save_checkpoint, load_checkpoint, print_examples
from create_dataset import get_loader
from model import CNNToRNN
def train():
transforms = Compose(
[
Resize((356, 356)),
RandomCrop((299, 299)),
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
train_loader, dataset = get_loader(
images_dir="raw-data/Images",
captions_file="raw-data/captions.txt",
transforms=transforms,
num_workers=2,
)
torch.backends.cudnn.benchmark = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
load_model = False
save_model = False
train_CNN = False
# Hyperparameters
embed_size = 256
hidden_size = 256
vocab_size = len(dataset.vocabulary)
num_layers = 1
learning_rate = 3e-4
|
|
createLineMode=createLineMode,
createPointMode=createPointMode,
createLineStripMode=createLineStripMode,
createAiPolygonMode=createAiPolygonMode,
createAiMaskMode=createAiMaskMode,
zoom=zoom,
zoomIn=zoomIn,
zoomOut=zoomOut,
zoomOrg=zoomOrg,
keepPrevScale=keepPrevScale,
fitWindow=fitWindow,
fitWidth=fitWidth,
brightnessContrast=brightnessContrast,
zoomActions=zoomActions,
openNextImg=openNextImg,
openPrevImg=openPrevImg,
fileMenuActions=(open_, opendir, save, saveAs, close, quit),
tool=(),
# XXX: need to add some actions here to activate the shortcut
editMenu=(
edit,
duplicate,
copy,
paste,
delete,
None,
undo,
undoLastPoint,
None,
removePoint,
None,
toggle_keep_prev_mode,
),
# menu shown at right click
menu=(
createMode,
createRectangleMode,
createCircleMode,
createLineMode,
createPointMode,
|
|
llm=self.chat_model,
prompt=self.mapping_prompt.chat_prompt,
output_parser=self.mapping_prompt.parser,
verbose=debug,
output_key="mapping_list",
)
overall_chain = SequentialChain(
chains=[travel_agent, parser],
input_variables=["query", "format_instructions"],
output_variables=["agent_suggestion", "mapping_list"],
verbose=debug,
)
return overall_chain
def suggest_travel(self, query):
"""
Parameters
----------
query
Returns
-------
"""
self.logger.info("Validating query")
t1 = time.time()
self.logger.info(
"Calling validation (model is {}) on user input".format(
self.chat_model.model_name
)
)
validation_result = self.validation_chain(
{
"query": query,
"format_instructions": self.validation_prompt.parser.get_format_instructions(),
}
)
|
|
# Scrapy settings for slidesmodel project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = "slidesmodel"
SPIDER_MODULES = ["slidesmodel.spiders"]
NEWSPIDER_MODULE = "slidesmodel.spiders"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = "slidesmodel (+http://www.yourdomain.com)"
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
|
|
from langchain.tools import tool
from .helpers import advanced_video_search
from youtube.models import Search
class FindProductVideoTools():
@tool
def find_product_video_id(product: str) -> str:
"""Useful when you need to find a product review video from youtube."""
query: str = f'reviews of the latest {product}'
search_results: list[Search] = advanced_video_search(query)
return search_results[0].resource_id
|
|
# flake8: noqa
from . import draw_json
from . import draw_label_png
from . import export_json
from . import on_docker
|
|
ef generate_bookmarks(users: list[User], posts: list[Post], bookmarks_count: int = 100) -> list[Bookmark]:
"""Generate bookmarks."""
bookmarks: list[Bookmark] = []
ids = set()
for _ in range(bookmarks_count):
author_id: str = random.choice(users).id
post_id: str = random.choice(posts).id
bookmark: bookmark = Bookmark(author_id=author_id, post_id=post_id)
if (author_id, post_id) not in ids:
bookmarks.append(bookmark)
ids.add((author_id, post_id))
return bookmarks
def generate_comments(users: list[User], posts: list[Post], comments_count: int = 500) -> list[Like]:
"""Generate likes."""
comments: list[Comment] = []
ids = set()
for _ in range(comments_count):
author_id: str = random.choice(users).id
post_id: str = random.choice(posts).id
comment: comment = Comment(
id='Comment_' + str(uuid4()),
author_id=author_id,
post_id=post_id,
comment_text=fake.text() )
if (author_id, post_id) not in ids:
comments.append(comment)
ids.add((author_id, post_id))
return comments |
|
from crewai import Agent
from tools import FindProductVideoTools, FindProductReviewTools
from langchain.llms.openai import OpenAI
from langchain.chat_models import ChatOpenAI
class ProductReviewAgents():
def research_analyst(self):
return Agent(
role='Product Video Researcher',
goal="""Find the best product review videos from youtube""",
backstory="""Known for your indepth knowledge of various videos that
analyze different products on youtube. Now you have to find the best video that
reviews the given product.""",
llm=OpenAI(temperature=0.7),
verbose=True,
tools=[
FindProductVideoTools.find_product_video_id,
FindProductReviewTools.find_product_reviews
]
) |
|
return None
def addRecentFile(self, filename):
if filename in self.recentFiles:
self.recentFiles.remove(filename)
elif len(self.recentFiles) >= self.maxRecent:
self.recentFiles.pop()
self.recentFiles.insert(0, filename)
# Callbacks
def undoShapeEdit(self):
self.canvas.restoreShape()
self.labelList.clear()
self.loadShapes(self.canvas.shapes)
self.actions.undo.setEnabled(self.canvas.isShapeRestorable)
def tutorial(self):
url = "https://github.com/wkentaro/labelme/tree/main/examples/tutorial" # NOQA
webbrowser.open(url)
def toggleDrawingSensitive(self, drawing=True):
"""Toggle drawing sensitive.
In the middle of drawing, toggling between modes should be disabled.
"""
self.actions.editMode.setEnabled(not drawing)
self.actions.undoLastPoint.setEnabled(drawing)
self.actions.undo.setEnabled(not drawing)
self.actions.delete.setEnabled(not drawing)
def toggleDrawMode(self, edit=True, createMode="polygon"):
draw_actions = {
"polygon": self.actions.createMode,
"rectangle": self.actions.createRectangleMode,
"circle": self.actions.createCircleMode,
"point": self.actions.createPointMode,
"line": self.actions.createLineMode,
"linestrip": self.actions.createLineStripMode,
"ai_polygon": self.actions.createAiPolygonMode,
|
|
def __init__(self):
super().__init__(
encoder_path=gdown.cached_download(
url="https://github.com/wkentaro/labelme/releases/download/sam-20230416/sam_vit_h_4b8939.quantized.encoder.onnx", # NOQA
md5="958b5710d25b198d765fb6b94798f49e",
),
decoder_path=gdown.cached_download(
url="https://github.com/wkentaro/labelme/releases/download/sam-20230416/sam_vit_h_4b8939.quantized.decoder.onnx", # NOQA
md5="a997a408347aa081b17a3ffff9f42a80",
),
)
class EfficientSamVitT(EfficientSam):
name = "EfficientSam (speed)"
def __init__(self):
super().__init__(
encoder_path=gdown.cached_download(
url="https://github.com/labelmeai/efficient-sam/releases/download/onnx-models-20231225/efficient_sam_vitt_encoder.onnx", # NOQA
md5="2d4a1303ff0e19fe4a8b8ede69c2f5c7",
),
decoder_path=gdown.cached_download(
url="https://github.com/labelmeai/efficient-sam/releases/download/onnx-models-20231225/efficient_sam_vitt_decoder.onnx", # NOQA
md5="be3575ca4ed9b35821ac30991ab01843",
),
)
class EfficientSamVitS(EfficientSam):
name = "EfficientSam (accuracy)"
def __init__(self):
super().__init__(
encoder_path=gdown.cached_download(
url="https://github.com/labelmeai/efficient-sam/releases/download/onnx-models-20231225/efficient_sam_vits_encoder.onnx", # NOQA
md5="7d97d23e8e0847d4475ca7c9f80da96d",
),
decoder_path=gdown.cached_download(
|
|
from sqlalchemy import create_engine
from sqlalchemy.orm import DeclarativeBase, MappedAsDataclass
from sqlalchemy.orm import sessionmaker
from ...config.config import BaseConfig
from contextlib import contextmanager
from flask import current_app
class Base(MappedAsDataclass, DeclarativeBase):
pass
SQLALCHEMY_DATABASE_URI = BaseConfig().db_conn_string
engine = create_engine(SQLALCHEMY_DATABASE_URI)
Session = sessionmaker(bind=engine, autocommit=False, autoflush=False)
def create_all():
Base.metadata.create_all(bind=engine)
def drop_all():
Base.metadata.drop_all(bind=engine)
@contextmanager
def get_db():
try:
db = Session()
yield db
finally:
db.close() |
|
with open(file_path, "r", encoding="utf-8") as f:
all_comments: list[str] = json.load(fp=f)
cleaned_comments: list[str] = list(map(clean_text, all_comments))
comments: list[str] = choices(population=cleaned_comments, k=10)
docs: list[Document] = [
Document(page_content=comment)
for comment in comments
if is_acceptable_len(comment)
]
comments: list[dict[str, str | int]] = [
{"doc_id": i + 1, "comment": docs[i].page_content} for i in range(len(docs))
]
data_dir = "./agent_nelly/data_analysis/data"
features_dir = "features"
save_features_dir = path.join(data_dir, features_dir, "features.json")
with open(save_features_dir, 'r') as f:
topics: list[str] = json.load(f)
class CustomerCommentData(BaseModel):
doc_id: int = Field(description="The doc_id from the input")
topics: list[str] = Field(
description="List of the relevant topics for the customer review. Include only topics from the list provided.",
default_factory=list,
)
sentiment: str = Field(
description="Sentiment of the topic", enum=["positive", "neutral", "negative"]
)
class CommentsParser(BaseModel):
comment: list[CustomerCommentData] = Field(description="A list of the customer comment data", default_factory=list)
output_parser = PydanticOutputParser(pydantic_object=CommentsParser)
format_instructions = output_parser.get_format_instructions()
|
|
from .helpers import create_gslide_client, create_drive_client
from typing import Any
from .helpers import get_youtube_client
from ..libraries.youtube import YouTube
gslide_client: Any = create_gslide_client()
drive_client: Any = create_drive_client()
youtube_client: YouTube = get_youtube_client()
|
|
import imgviz
import numpy as np
import skimage
from labelme.logger import logger
def _get_contour_length(contour):
contour_start = contour
contour_end = np.r_[contour[1:], contour[0:1]]
return np.linalg.norm(contour_end - contour_start, axis=1).sum()
def compute_polygon_from_mask(mask):
contours = skimage.measure.find_contours(np.pad(mask, pad_width=1))
if len(contours) == 0:
logger.warning("No contour found, so returning empty polygon.")
return np.empty((0, 2), dtype=np.float32)
contour = max(contours, key=_get_contour_length)
POLYGON_APPROX_TOLERANCE = 0.004
polygon = skimage.measure.approximate_polygon(
coords=contour,
tolerance=np.ptp(contour, axis=0).max() * POLYGON_APPROX_TOLERANCE,
)
polygon = np.clip(polygon, (0, 0), (mask.shape[0] - 1, mask.shape[1] - 1))
polygon = polygon[:-1] # drop last point that is duplicate of first point
if 0:
import PIL.Image
image_pil = PIL.Image.fromarray(imgviz.gray2rgb(imgviz.bool2ubyte(mask)))
imgviz.draw.line_(image_pil, yx=polygon, fill=(0, 255, 0))
for point in polygon:
imgviz.draw.circle_(image_pil, center=point, diameter=10, fill=(0, 255, 0))
imgviz.io.imsave("contour.jpg", np.asarray(image_pil))
return polygon[:, ::-1] # yx -> xy
|
|
"""))
product_crew = ProductReviewCrew(company)
result = product_crew.run()
print("\n\n########################")
print("## Here is the Report")
print("########################\n")
print(result) |
|
yield Request(link, callback=self.parse_slide, meta={"slide_item": slide_item})
# next_page = response.css('a.next.page-numbers::attr(href)').get()
# if next_page and int(next_page.split('/')[-2]) < 2:
# self.logger.warning(f"Crawling page number %d", int(next_page.split('/')[-2]))
# yield Request(next_page, callback=self.parse)
next_page = response.css('a.next.page-numbers::attr(href)').get()
if next_page:
self.logger.warning(f"Crawling page number %d", int(next_page.split('/')[-2]))
yield Request(next_page, callback=self.parse)
def parse_slide(self, response: Response, **kwargs: Any) -> Any:
slide_item = response.meta["slide_item"]
loader = ItemLoader(item=slide_item, response=response)
loader.add_css(field_name="tags", css=".Sm-tags a.mr-2::text")
loader.add_css(field_name="description", css=".product-text p")
loader.add_css(field_name="slides_count", css='h4 small::text')
loader.add_css(field_name="colors", css='li.color a::text')
loader.add_css(field_name="image_urls", css='a.preview-link img::attr(src)')
# add slide link
yield loader.load_item() |
|
from collections import deque
from collections.abc import Iterator
from os import listdir, path
from queue import Queue
from .config import Config
from .helpers import read_src
class DirectoryIterator(Iterator):
def __init__(self, config: Config) -> None:
super().__init__()
self._folders_ignore = set(config.directories_ignore)
self._files_ignore = set(config.files_ignore)
self._queue = deque(config.root_directory) # adding the individual chars
def __iter__(self) -> Iterator:
return super().__iter__()
def __next__(self) -> list[str]:
if self._queue:
files: list[str] = list()
for _ in range(len(self._queue)):
directory: str = self._queue.popleft()
for entry in listdir(directory):
entry_path: str = path.join(directory, entry)
if (
path.isfile(entry_path)
and self._is_python_file(entry_path)
and entry not in self._files_ignore
):
files.append(entry_path)
elif path.isdir(entry_path) and entry not in self._folders_ignore:
self._queue.append(entry_path)
return files
else:
raise StopIteration()
def _is_python_file(self, file_path: str) -> bool:
return file_path.split(".")[-1] == "py"
|
|
return items
def get_channel_details(channel: Search) -> Channel:
"""Get channel details"""
response: YouTubeListResponse = youtube_client.find_channel_by_id(
channel_id=channel.resource_id
)
channel: Channel = response.items[0]
return channel
def parse_channel_details(channel: Channel) -> dict:
return {
"title": channel.snippet.title,
"description": channel.snippet.description,
"date": str(channel.snippet.published_at.date()),
"subscribers": channel.statistics.subscribers_count,
"videos": channel.statistics.videos_count,
}
def get_channels(product: str, max_results: int = 10) -> list[dict]:
channels: list[Search] = search_youtube_channels(product=product, max_results=max_results)
channels: list[Channel] = map(get_channel_details, channels)
channels: list[dict] = map(parse_channel_details, channels)
return channels
def save_data(file_path: str, data: list) -> None:
with open(file_path, 'w') as f:
json.dump(data, f, indent=4)
def load_data(file_path: str) -> dict:
with open(file_path, 'r') as f:
data: list[dict] = json.load(f)
return data
def create_channels_table(table_data: list[dict]) -> Table:
table: Table = Table(row_styles=["dim", ""],leading=1, box=box.MINIMAL_DOUBLE_HEAD,
|
|
from langchain.tools import tool
from .helpers import list_video_comments
from youtube.models import Comment
class FindProductReviewTools():
@tool
def find_product_reviews(video_id: str) -> str:
"""Useful when you need to find a product reviews from youtube video comments."""
comments: list[Comment] = list_video_comments(video_id)
comments: list[str] = [comment.snippet.text_display for comment in comments]
return ' '.join(comments) |
|
def popUp(self, text=None, move=True, flags=None, group_id=None, description=None):
if self._fit_to_content["row"]:
self.labelList.setMinimumHeight(
self.labelList.sizeHintForRow(0) * self.labelList.count() + 2
)
if self._fit_to_content["column"]:
self.labelList.setMinimumWidth(self.labelList.sizeHintForColumn(0) + 2)
# if text is None, the previous label in self.edit is kept
if text is None:
text = self.edit.text()
# description is always initialized by empty text c.f., self.edit.text
if description is None:
description = ""
self.editDescription.setPlainText(description)
if flags:
self.setFlags(flags)
else:
self.resetFlags(text)
self.edit.setText(text)
self.edit.setSelection(0, len(text))
if group_id is None:
self.edit_group_id.clear()
else:
self.edit_group_id.setText(str(group_id))
items = self.labelList.findItems(text, QtCore.Qt.MatchFixedString)
if items:
if len(items) != 1:
logger.warning("Label list has duplicate '{}'".format(text))
self.labelList.setCurrentItem(items[0])
row = self.labelList.row(items[0])
self.edit.completer().setCurrentRow(row)
self.edit.setFocus(QtCore.Qt.PopupFocusReason)
if move:
self.move(QtGui.QCursor.pos())
if self.exec_():
return (
self.edit.text(),
self.getFlags(),
self.getGroupId(),
|
|
def is_acceptable_len(text: str, l: int = 20) -> bool:
return len(text.split()) >= l
with open(file_path, "r", encoding="utf-8") as f:
all_comments: list[str] = json.load(fp=f)
cleaned_comments: list[str] = list(map(clean_text, all_comments))
comments: list[str] = choices(population=cleaned_comments, k=10)
docs: list[Document] = [
Document(page_content=comment)
for comment in comments
if is_acceptable_len(comment)
]
comments: list[dict[str, str | int]] = [
{"doc_id": i + 1, "comment": docs[i].page_content} for i in range(len(docs))
]
data_dir = "./agent_nelly/data_analysis/data"
features_dir = "features"
save_features_dir = path.join(data_dir, features_dir, "features.json")
with open(save_features_dir, 'r') as f:
topics: list[str] = json.load(f)
comment: dict = choice(comments)
sentiment_msg: str = """
Below is a customer comment in JSON format with the following keys:
1. doc_id - identifier of the comment
2. comment - the user comment
Please analyze the comment and identify the sentiment. The sentiment can be negative, neutral or
positive. Only return a single string, the sentiment.
Comment:
```
{comment}
```
|
|
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
|
|
shape.description = description
self._update_shape_color(shape)
if shape.group_id is None:
item.setText(
'{} <font color="#{:02x}{:02x}{:02x}">●</font>'.format(
html.escape(shape.label), *shape.fill_color.getRgb()[:3]
)
)
else:
item.setText("{} ({})".format(shape.label, shape.group_id))
self.setDirty()
if self.uniqLabelList.findItemByLabel(shape.label) is None:
item = self.uniqLabelList.createItemFromLabel(shape.label)
self.uniqLabelList.addItem(item)
rgb = self._get_rgb_by_label(shape.label)
self.uniqLabelList.setItemLabel(item, shape.label, rgb)
def fileSearchChanged(self):
self.importDirImages(
self.lastOpenDir,
pattern=self.fileSearch.text(),
load=False,
)
def fileSelectionChanged(self):
items = self.fileListWidget.selectedItems()
if not items:
return
item = items[0]
if not self.mayContinue():
return
currIndex = self.imageList.index(str(item.text()))
if currIndex < len(self.imageList):
filename = self.imageList[currIndex]
if filename:
self.loadFile(filename)
|
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service as ChromeService
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
i
options = Options()
options.add_argument("--headless=new")
driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()), options=options)
driver.get("https://leetcode.com/problems/remove-linked-list-elements")
paragraphs = driver.find_elements(By.TAG_NAME, "p")
print(paragraphs)
driver.quit() |
|
features, labels, test_size=0.2, random_state=42, stratify=labels
)
return (train_features, train_labels), (test_features, test_labels)
def save_features(self) -> DataFrame:
pass
def save_labels(self) -> DataFrame:
pass
def train_model(self, model: Model) -> float:
(train_features, train_labels), (test_features, test_labels) = self.get_train_test_data()
pipeline: Pipeline = Pipeline(steps=[
('preprocessor', self.preprocessor),
('classifier', model.model)
])
logging.info('Queing the model "%s" for training.', model.name)
res: AsyncResult = train_model_task.delay(pipeline, train_features, train_labels, test_features, test_labels, model.name, model.save_path)
self.train_task_ids.append(res.id)
return res.id
def run(self) -> None:
self._train_results = chord((train_model_task.s(
self.create_train_config(model=model.model, name=model.classifier_name, save_path=model.save_path)
) for model in self.models), send_training_report_task.s())()
def get_results(self) -> list[Model]:
"""Get the training result."""
logging.info('Getting the training results')
print(self._train_results.get())
def get_best_models(self, start: int = 0, end: int = -1) -> Model:
best_models = redis.zrange(name=app_config.accuracy_channel, start=start, end=end, withscores=True)
return best_models
def tune_best_models(self) -> None:
logging.info('Tuning the best models.')
best_models = self.get_best_models(start=-3, end=-1)
logging.info(best_models)
|
|
from queue import Queue
from threading import Thread
from .config import Config
from .file_processor import (
generate_function_docstrings,
queue_unprocessed_functions_methods,
generate_class_docstrings,
)
from .helpers import get_all_modules
def generate_docstrings(
config: Config,
module_path_queue: Queue,
functions_source_queue: Queue,
class_source_queue: Queue,
failed_modules_queue: Queue,
) -> None:
"""Generate docstrings for classes and methods."""
queue_modules: Thread = Thread(
target=get_all_modules,
name='get_all_modules',
args=(config, module_path_queue),
)
queue_modules.start()
for _ in range(1):
get_functions_source_thread: Thread = Thread(
target=queue_unprocessed_functions_methods,
args=(functions_source_queue, class_source_queue, module_path_queue),
daemon=True,
)
get_functions_source_thread.start()
for _ in range(1):
generate_functions_docstring_thread: Thread = Thread(
target=generate_function_docstrings,
args=(functions_source_queue, config),
daemon=True,
|
|
from youtube import YouTube
client_secrets_file = "/home/lyle/Downloads/search.json"
youtube_client = YouTube(client_secret_file=client_secrets_file)
youtube_client_object = youtube_client.authenticate()
youtube_client.youtube_client = youtube_client_object
|
|
from setuptools import find_packages, setup
from pip._vendor import tomli
# For consistent encoding
from codecs import open
from os import path
# The directory containing this file
HERE = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
with open('pyproject.toml', 'r') as f:
VERSION = tomli.load(f)['tool']['commitizen']['version']
DESCRIPTION = 'A python library that wraps around the Google calendar API. You can use it to schedule events using google calendar.'
key_words = [
'calendar', 'google-calendar', 'schedule events'
]
install_requires = [
'oryks-google-oauth',
'pydantic',
'pydantic-settings',
'pytz'
]
setup(
name='oryks-google-calendar',
packages=find_packages(
include=[
'google_calendar',
'google_calendar.models',
'google_calendar.schemas',
'google_calendar.resources',
]
),
|
|
from datetime import datetime
from sqlalchemy.orm import Mapped, mapped_column, relationship
from ..database import Base
from sqlalchemy import ForeignKey
class View(Base):
__tablename__ = 'views'
id: Mapped[str] = mapped_column(primary_key=True)
author_id: Mapped[str] = mapped_column(ForeignKey('users.id'))
post_id: Mapped[str] = mapped_column(ForeignKey('posts.id'))
view_date: Mapped[datetime] = mapped_column(default_factory=datetime.utcnow)
author = relationship('User', back_populates='views')
post = relationship('Post', back_populates='views') |
|
src_tree: AST = parse_src(docstring)
func_node: FunctionDef = src_tree.body[0]
doc_str: str = ast.get_docstring(func_node)
except Exception:
return super().parse(docstring)
else:
return doc_str
model_parser: Parser = DefaultParser()
def parse_function_docstr(func_dcstr: str) -> str:
return model_parser.parse(docstring=func_dcstr)
|
|
from typing import Any
from scrapy import Spider
from scrapy.http import Response
from scrapy import Request
# from slidesmodel.items import SlidesModelItem
from scrapy.loader import ItemLoader
from scrapy.utils.project import get_project_settings
import json
class SlidesModelspider(Spider):
name: str = "problems"
def __init__(self, name: str | None = None, **kwargs: Any):
super().__init__(name, **kwargs)
# self.start_urls: list[str] = self.load_start_urls()
self.start_urls: list[str] = [
"https://www.techiedelight.com/data-structures-and-algorithms-problems/"
]
def parse(self, response: Response, **kwargs: Any) -> Any:
self.logger.info("This is my first spider.")
problem_links = response.css('.post-problems li')
# from random import choices
# problem_links = choices(population=problem_links, k=100)
# for problem_link in problem_links:
# # title = problem_link.css('a::text')[0].get()
# link = problem_link.css('a::attr(href)')[0].get()
# # yield{
# # "link": link,
# # "problem": problem
# # }
# yield Request(link, callback=self.parse_problem)
link = "https://www.techiedelight.com/single-source-shortest-paths-bellman-ford-algorithm/"
yield Request(link, callback=self.parse_problem)
# for slide in slides:
# loader: ItemLoader = ItemLoader(item=SlidesModelItem(), selector=slide)
# loader.add_css("title", ".item a::text")
# loader.add_css("category", ".category::text")
|
|
"ai_mask": self.actions.createAiMaskMode,
}
self.canvas.setEditing(edit)
self.canvas.createMode = createMode
if edit:
for draw_action in draw_actions.values():
draw_action.setEnabled(True)
else:
for draw_mode, draw_action in draw_actions.items():
draw_action.setEnabled(createMode != draw_mode)
self.actions.editMode.setEnabled(not edit)
def setEditMode(self):
self.toggleDrawMode(True)
def updateFileMenu(self):
current = self.filename
def exists(filename):
return osp.exists(str(filename))
menu = self.menus.recentFiles
menu.clear()
files = [f for f in self.recentFiles if f != current and exists(f)]
for i, f in enumerate(files):
icon = utils.newIcon("labels")
action = QtWidgets.QAction(
icon, "&%d %s" % (i + 1, QtCore.QFileInfo(f).fileName()), self
)
action.triggered.connect(functools.partial(self.loadRecent, f))
menu.addAction(action)
def popLabelListMenu(self, point):
self.menus.labelList.exec_(self.labelList.mapToGlobal(point))
def validateLabel(self, label):
# no validation
if self._config["validate_label"] is None:
return True
|
|
from setuptools import find_packages, setup
from pip._vendor import tomli
# For consistent encoding
from codecs import open
from os import path
# The directory containing this file
HERE = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
with open('pyproject.toml', 'r') as f:
VERSION = tomli.load(f)['tool']['commitizen']['version']
DESCRIPTION = 'A python library for working with Google Drive.'
key_words = [
'drive', 'google-drive', 'google-drive-api', 'upload files to Google Drive',
]
install_requires = [
'oryks-google-oauth',
'pydantic',
'pydantic-settings'
]
setup(
name='oryks-google-drive',
packages=find_packages(
include=[
'google_drive',
'google_drive.exceptions',
'google_drive.models',
'google_drive.schemas',
'google_drive.resources'
]
),
|
|
import whisper
model = whisper.load_model("medium.en")
result = model.transcribe("code.wav")
print(result["text"]) |
|
from scrapy import Item, Field
from itemloaders.processors import TakeFirst, MapCompose, Join
import re
def remove_html_tags(description: str) -> str:
html_pattern = "<(?:\"[^\"]*\"['\"]*|'[^']*'['\"]*|[^'\">])+>"
return re.sub(html_pattern, '', description)
def remove_unicode_chars(text: str) -> str:
return text.replace(u"\xa0", "")
def num_of_slides(text: str) -> int:
vals = [val for val in list(text) if val.isdigit()]
return "".join(vals)
class SlidesModelItem(Item):
title = Field(output_processor=TakeFirst())
category = Field(output_processor=TakeFirst())
description = Field(
input_processor=MapCompose(remove_html_tags, remove_unicode_chars),
output_processor=Join()
)
tags = Field()
slides_count = Field(
input_processor=MapCompose(num_of_slides),
output_processor=TakeFirst()
)
colors = Field()
image_urls = Field()
images = Field()
|
|
topic_assign_msg: str = """
Below is a list of customer comments in JSON format with the following keys:
1. doc_id - identifier of the comment
2. comment - the user comment
Please analyze the provided comments and identify the main topics and sentiment. Include only the
topics mentioned in the following text:
Text: {topics}
{format_instructions}
user comments:
```{comments}```
"""
topic_assign_tmpl = PromptTemplate(
template=topic_assign_msg,
input_variables=["topics", "comments", "format_instructions"],
)
with open('analysis.json', 'r') as f:
data = json.load(f)
i = data[-1]["comment_id"] + 1
from time import sleep
import json
for _ in range(10):
d = comments[i: i+3]
x = {}
for s in d:
x[s['doc_id']] = s['comment']
i += 3
inputs = {
"topics": topics,
"format_instructions": format_instructions,
"comments": json.dumps(d),
}
# print(d)
# print(c)
|
|
"""This module declares the app configuration.
The classes include:
BaseConfig:
Has all the configurations shared by all the environments.
"""
import os
from dotenv import load_dotenv
load_dotenv()
class BaseConfig:
"""Base configuration."""
DEBUG = True
TESTING = False
SECRET_KEY = os.environ.get(
"SECRET_KEY", "df0331cefc6c2b9a5d0208a726a5d1c0fd37324feba25506"
)
class DevelopmentConfig(BaseConfig):
"""Development confuguration."""
DEBUG = True
TESTING = False
SECRET_KEY = os.environ.get(
"SECRET_KEY", "df0331cefc6c2b9a5d0208a726a5d1c0fd37324feba25506"
)
class TestingConfig(BaseConfig):
"""Testing configuration."""
TESTING = True
SECRET_KEY = os.environ.get("SECRET_KEY", "secret-key")
|
|
class DirectoryIterator:
def __init__(self, config: Config):
self.config: Config = config
self.queue: deque[str] = deque(self.config.path)
def __iter__(self) -> Iterator:
return self
def __next__(self) -> list[str]:
files: list[str] = list()
if self.queue:
for _ in range(len(self.queue)):
root_dir: str = self.queue.popleft()
if root_dir.split('/')[-1] in self.config.directories_ignore:
continue
entries: list[str] = listdir(root_dir)
for entry in entries:
entry_path: str = path.join(root_dir, entry)
if path.isfile(entry_path):
if (
entry_path not in self.config.files_ignore
and entry.split('.')[-1] == 'py'
):
files.append(entry_path)
elif entry not in self.config.directories_ignore:
self.queue.append(entry_path)
return files
else:
raise StopIteration()
|
|
class GetPosts(BaseModel):
offset: Optional[int] = 0
limit: Optional[int] = 10
class PostAuthor(BaseModel):
id: str
profile_picture: str
name: str
class PostLike(BaseModel):
liked: bool
liked_by: Optional[list[PostAuthor]] = Field(default_factory=list)
key_like: Optional[PostAuthor] = None
likes_count: Optional[int] = Field(default=0)
class KeyComment(BaseModel):
author: PostAuthor
text: str
comments_count: int
class PostSchema(BaseModel):
id: str
text: str
image: str
author: PostAuthor
date_published: str
location: str
like: PostLike
bookmarked: bool
key_comment: Optional[KeyComment] = None |
|
from dotenv import load_dotenv
load_dotenv()
from flask.cli import FlaskGroup
from api import create_app
app = create_app()
cli = FlaskGroup(create_app=create_app)
if __name__ == "__main__":
cli() |
|
optional: CommentThreadOptionalParameters = CommentThreadOptionalParameters(
maxResults=25
)
request: YouTubeRequest = YouTubeRequest(
part=part, filter=filter, optional_parameters=optional
)
comment_iterator: Iterator = youtube_client.get_comments_iterator(request)
done: bool = False
comment_count: int = 0
for comment_threads in comment_iterator:
comments: list[str] = []
if done:
break
for comment_thread in comment_threads:
comment: Comment = comment_thread.snippet.top_level_comment
comments.append(comment.snippet.text_display)
comment_count += 1
if comment_count > max_results:
done = True
break
with open("comments.json", "r", encoding="utf-8") as f:
existing_comments: list[str] = json.load(f)
with open("comments.json", "w", encoding="utf-8") as f:
existing_comments += comments
json.dump(existing_comments, fp=f, indent=2)
return comment_count
client_secrets_file = "/home/lyle/Downloads/search.json"
youtube_client = YouTube(client_secret_file=client_secrets_file)
youtube_client_object = youtube_client.authenticate()
youtube_client.youtube_client = youtube_client_object
# print(get_video_id(video_title='iPhone 15 Pro Review: The Good, The Bad, & The Ugly!'))
print(list_video_comments(video_id="cBpGq-vDr2Y"))
|
|
def setShape(self, shape):
self.setData(shape, Qt.UserRole)
def shape(self):
return self.data(Qt.UserRole)
def __hash__(self):
return id(self)
def __repr__(self):
return '{}("{}")'.format(self.__class__.__name__, self.text())
class StandardItemModel(QtGui.QStandardItemModel):
itemDropped = QtCore.Signal()
def removeRows(self, *args, **kwargs):
ret = super().removeRows(*args, **kwargs)
self.itemDropped.emit()
return ret
class LabelListWidget(QtWidgets.QListView):
itemDoubleClicked = QtCore.Signal(LabelListWidgetItem)
itemSelectionChanged = QtCore.Signal(list, list)
def __init__(self):
super(LabelListWidget, self).__init__()
self._selectedItems = []
self.setWindowFlags(Qt.Window)
self.setModel(StandardItemModel())
self.model().setItemPrototype(LabelListWidgetItem())
self.setItemDelegate(HTMLDelegate())
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.setDefaultDropAction(Qt.MoveAction)
self.doubleClicked.connect(self.itemDoubleClickedEvent)
|
|
from qtpy import QtWidgets
class ColorDialog(QtWidgets.QColorDialog):
def __init__(self, parent=None):
super(ColorDialog, self).__init__(parent)
self.setOption(QtWidgets.QColorDialog.ShowAlphaChannel)
# The Mac native dialog does not support our restore button.
self.setOption(QtWidgets.QColorDialog.DontUseNativeDialog)
# Add a restore defaults button.
# The default is set at invocation time, so that it
# works across dialogs for different elements.
self.default = None
self.bb = self.layout().itemAt(1).widget()
self.bb.addButton(QtWidgets.QDialogButtonBox.RestoreDefaults)
self.bb.clicked.connect(self.checkRestore)
def getColor(self, value=None, title=None, default=None):
self.default = default
if title:
self.setWindowTitle(title)
if value:
self.setCurrentColor(value)
return self.currentColor() if self.exec_() else None
def checkRestore(self, button):
if (
self.bb.buttonRole(button) & QtWidgets.QDialogButtonBox.ResetRole
and self.default
):
self.setCurrentColor(self.default)
|
|
import os
from .config import Config
from flask import Flask
def set_configuration(app: Flask):
"""Set the application configuration.
The application configuration will depend on the
environment i.e Test, Development, Staging or Production.
Parameters
----------
app: flask.Flask
A flask app instance
Returns
-------
bool:
Whether the config was set up successfully.
"""
config_name = os.environ.get("FLASK_ENV")
app.config.from_object(Config[config_name])
return True |
|
from .register_blueprints import register_blueprints |
|
optional_parameters: SearchOptionalParameters = SearchOptionalParameters(
q=video_title, maxResults=1, type=["video"]
)
search_request: YouTubeRequest = YouTubeRequest(
part=part, optional_parameters=optional_parameters
)
search_results: YouTubeResponse = youtube_client.search(search_request)
search_result: Search = search_results.items[0]
return search_result.resource_id
def get_video_details(video: Search) -> Video:
"""Get video details"""
response: YouTubeListResponse = youtube_client.find_video_by_id(video.resource_id)
video: Video = response.items[0]
return video
def parse_video_details(video: Video) -> dict:
return {
"title": video.snippet.title,
"description": video.snippet.description,
"date": str(video.snippet.published_at),
"views": video.statistics.views_count,
"comments": video.statistics.comments_count,
"likes": video.statistics.likes_count,
}
def get_videos(product: str, channel: str) -> list[dict]:
videos: list[Search] = video_search(product=product, channel_title=channel)
videos: list[Video] = map(get_video_details, videos)
videos: list[dict] = map(parse_video_details, videos)
return videos
def create_videos_table(table_data: list[dict]) -> Table:
table: Table = Table(row_styles=["dim", ""],leading=1, box=box.MINIMAL_DOUBLE_HEAD,
title="[bold italic gold1]Youtube videos reviewing Iphone 15 pro[/bold italic gold1]")
table.add_column(header="[b]Video Title", justify="left", style="dark_orange")
table.add_column(header="Views", justify="left", style="light_coral")
|
|
def validate_config_item(key, value):
if key == "validate_label" and value not in [None, "exact"]:
raise ValueError(
"Unexpected value for config key 'validate_label': {}".format(value)
)
if key == "shape_color" and value not in [None, "auto", "manual"]:
raise ValueError(
"Unexpected value for config key 'shape_color': {}".format(value)
)
if key == "labels" and value is not None and len(value) != len(set(value)):
raise ValueError(
"Duplicates are detected for config key 'labels': {}".format(value)
)
def get_config(config_file_or_yaml=None, config_from_args=None):
# 1. default config
config = get_default_config()
# 2. specified as file or yaml
if config_file_or_yaml is not None:
config_from_yaml = yaml.safe_load(config_file_or_yaml)
if not isinstance(config_from_yaml, dict):
with open(config_from_yaml) as f:
logger.info("Loading config file from: {}".format(config_from_yaml))
config_from_yaml = yaml.safe_load(f)
update_dict(config, config_from_yaml, validate_item=validate_config_item)
# 3. command line argument or specified config file
if config_from_args is not None:
update_dict(config, config_from_args, validate_item=validate_config_item)
return config
|
|
channel_names: list[str] = get_channel_names()
playlist_name: str = 'Daily Videos'
playlist_items: list[str] = workflow(youtube, channel_names)
# print(get_channel_id('Asianometry'))
# print(redis.setex(name='PL_26vmg8W_AcEEl_Bo2AhziS-93r6b8bu:DqkZCzjdtbw', time=1, value=''))
# print(redis.setex(name='PL_26vmg8W_AcEEl_Bo2AhziS-93r6b8bu:VzW_BtXSw6A', time=1, value=''))
# print(redis.get(name='PL_26vmg8W_AcEEl_Bo2AhziS-93r6b8bu:DqkZCzjdtbw'))
# print(find_latest_video('UC1LpsuAUaKoMzzJSEt5WImw', youtube))
# channels: list[Channel] = get_all_channels(get_db)
# latest_videos: list[Video] = [find_latest_video(channel.id, youtube) for channel in channels]
# videos: list[Video] = Video.find().all()
# for channel in channels:
# redis.setex(f'latest:{channel.id}', value='video_str', time=1)
# for video in latest_videos:
# pl_id: str = 'PL_26vmg8W_AcEEl_Bo2AhziS-93r6b8bu'
# redis.setex(name=f'{pl_id}:{video.resource_id}', time=1, value='')
# for video in videos:
# video.expire(num_seconds=1) |
|
from crewai import Task
from textwrap import dedent
class ProductReviewTasks():
def research(self, agent, product):
return Task(description=dedent(f"""
Collect and summarize the most recent comments from the
products review from youtube.
Maje sure to capture the sentiment of each comment,
what the user liked, did not like as well as other
features that they wish were present.
Your final answer MUST be a report that includes a
comprehensive summary of the reviews, capturing
the most loved features.
{self.__tip_section()}
Selected product by the customer: {product}
"""),
agent=agent
)
def __tip_section(self):
return "If you do your BEST WORK, I'll give you a $10,000 commision!" |
|
long_description=LONG_DESCRIPTION,
url='https://youtube-assistant.readthedocs.io/en/latest/',
author='Lyle Okoth',
author_email='[email protected]',
license='MIT',
install_requires=install_requires,
keywords=key_words,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3.12',
'Operating System :: OS Independent'
],
)
|
|
fitWidth,
None,
brightnessContrast,
),
)
self.menus.file.aboutToShow.connect(self.updateFileMenu)
# Custom context menu for the canvas widget:
utils.addActions(self.canvas.menus[0], self.actions.menu)
utils.addActions(
self.canvas.menus[1],
(
action("&Copy here", self.copyShape),
action("&Move here", self.moveShape),
),
)
selectAiModel = QtWidgets.QWidgetAction(self)
selectAiModel.setDefaultWidget(QtWidgets.QWidget())
selectAiModel.defaultWidget().setLayout(QtWidgets.QVBoxLayout())
#
selectAiModelLabel = QtWidgets.QLabel(self.tr("AI Model"))
selectAiModelLabel.setAlignment(QtCore.Qt.AlignCenter)
selectAiModel.defaultWidget().layout().addWidget(selectAiModelLabel)
#
self._selectAiModelComboBox = QtWidgets.QComboBox()
selectAiModel.defaultWidget().layout().addWidget(self._selectAiModelComboBox)
model_names = [model.name for model in MODELS]
self._selectAiModelComboBox.addItems(model_names)
if self._config["ai"]["default"] in model_names:
model_index = model_names.index(self._config["ai"]["default"])
else:
logger.warning(
"Default AI model is not found: %r",
self._config["ai"]["default"],
)
model_index = 0
self._selectAiModelComboBox.setCurrentIndex(model_index)
self._selectAiModelComboBox.currentIndexChanged.connect(
|
|
# popUp() + key_Up
def interact():
qtbot.keyClick(widget.edit, QtCore.Qt.Key_Up) # 'person' -> 'dog' # NOQA
qtbot.keyClick(widget.edit, QtCore.Qt.Key_Enter) # NOQA
qtbot.keyClick(widget.edit, QtCore.Qt.Key_Enter) # NOQA
QtCore.QTimer.singleShot(500, interact)
label, flags, group_id, description = widget.popUp()
assert label == "dog"
assert flags == {}
assert group_id is None
assert description == ""
|
|
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
|
|
self.selectedShapes[i].selected = False
self.selectedShapes[i] = shape
else:
for i, shape in enumerate(self.selectedShapesCopy):
self.selectedShapes[i].points = shape.points
self.selectedShapesCopy = []
self.repaint()
self.storeShapes()
return True
def hideBackroundShapes(self, value):
self.hideBackround = value
if self.selectedShapes:
# Only hide other shapes if there is a current selection.
# Otherwise the user will not be able to select a shape.
self.setHiding(True)
self.update()
def setHiding(self, enable=True):
self._hideBackround = self.hideBackround if enable else False
def canCloseShape(self):
return self.drawing() and self.current and len(self.current) > 2
def mouseDoubleClickEvent(self, ev):
if self.double_click != "close":
return
if (
self.createMode == "polygon" and self.canCloseShape()
) or self.createMode in ["ai_polygon", "ai_mask"]:
self.finalise()
def selectShapes(self, shapes):
self.setHiding()
self.selectionChanged.emit(shapes)
self.update()
def selectShapePoint(self, point, multiple_selection_mode):
"""Select the first shape created which contains this point."""
|
|
self.labelList.itemSelectionChanged.connect(self.labelSelectionChanged)
self.labelList.itemDoubleClicked.connect(self.editLabel)
self.labelList.itemChanged.connect(self.labelItemChanged)
self.labelList.itemDropped.connect(self.labelOrderChanged)
self.shape_dock = QtWidgets.QDockWidget(self.tr("Polygon Labels"), self)
self.shape_dock.setObjectName("Labels")
self.shape_dock.setWidget(self.labelList)
self.uniqLabelList = UniqueLabelQListWidget()
self.uniqLabelList.setToolTip(
self.tr(
"Select label to start annotating for it. " "Press 'Esc' to deselect."
)
)
if self._config["labels"]:
for label in self._config["labels"]:
item = self.uniqLabelList.createItemFromLabel(label)
self.uniqLabelList.addItem(item)
rgb = self._get_rgb_by_label(label)
self.uniqLabelList.setItemLabel(item, label, rgb)
self.label_dock = QtWidgets.QDockWidget(self.tr("Label List"), self)
self.label_dock.setObjectName("Label List")
self.label_dock.setWidget(self.uniqLabelList)
self.fileSearch = QtWidgets.QLineEdit()
self.fileSearch.setPlaceholderText(self.tr("Search Filename"))
self.fileSearch.textChanged.connect(self.fileSearchChanged)
self.fileListWidget = QtWidgets.QListWidget()
self.fileListWidget.itemSelectionChanged.connect(self.fileSelectionChanged)
fileListLayout = QtWidgets.QVBoxLayout()
fileListLayout.setContentsMargins(0, 0, 0, 0)
fileListLayout.setSpacing(0)
fileListLayout.addWidget(self.fileSearch)
fileListLayout.addWidget(self.fileListWidget)
self.file_dock = QtWidgets.QDockWidget(self.tr("File List"), self)
self.file_dock.setObjectName("Files")
fileListWidget = QtWidgets.QWidget()
fileListWidget.setLayout(fileListLayout)
self.file_dock.setWidget(fileListWidget)
|
|
# TODO(unknown):
# - Zoom is too "steppy".
LABEL_COLORMAP = imgviz.label_colormap()
class MainWindow(QtWidgets.QMainWindow):
FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = 0, 1, 2
def __init__(
self,
config=None,
filename=None,
output=None,
output_file=None,
output_dir=None,
):
if output is not None:
logger.warning("argument output is deprecated, use output_file instead")
if output_file is None:
output_file = output
# see labelme/config/default_config.yaml for valid configuration
if config is None:
config = get_config()
self._config = config
# set default shape colors
Shape.line_color = QtGui.QColor(*self._config["shape"]["line_color"])
Shape.fill_color = QtGui.QColor(*self._config["shape"]["fill_color"])
Shape.select_line_color = QtGui.QColor(
*self._config["shape"]["select_line_color"]
)
Shape.select_fill_color = QtGui.QColor(
*self._config["shape"]["select_fill_color"]
)
Shape.vertex_fill_color = QtGui.QColor(
*self._config["shape"]["vertex_fill_color"]
)
|
|
full_chain = {
"sentiment": sentiment_chain,
"comment": lambda input: input['comment'],
"topics": lambda input: input['topics']
} | branch
res = full_chain.invoke({'comment': comment, "topics": topics})
print(comment)
print(res)
|
|
self.canvas.setEnabled(True)
# set zoom values
is_initial_load = not self.zoom_values
if self.filename in self.zoom_values:
self.zoomMode = self.zoom_values[self.filename][0]
self.setZoom(self.zoom_values[self.filename][1])
elif is_initial_load or not self._config["keep_prev_scale"]:
self.adjustScale(initial=True)
# set scroll values
for orientation in self.scroll_values:
if self.filename in self.scroll_values[orientation]:
self.setScroll(
orientation, self.scroll_values[orientation][self.filename]
)
# set brightness contrast values
dialog = BrightnessContrastDialog(
utils.img_data_to_pil(self.imageData),
self.onNewBrightnessContrast,
parent=self,
)
brightness, contrast = self.brightnessContrast_values.get(
self.filename, (None, None)
)
if self._config["keep_prev_brightness"] and self.recentFiles:
brightness, _ = self.brightnessContrast_values.get(
self.recentFiles[0], (None, None)
)
if self._config["keep_prev_contrast"] and self.recentFiles:
_, contrast = self.brightnessContrast_values.get(
self.recentFiles[0], (None, None)
)
if brightness is not None:
dialog.slider_brightness.setValue(brightness)
if contrast is not None:
dialog.slider_contrast.setValue(contrast)
self.brightnessContrast_values[self.filename] = (brightness, contrast)
if brightness is not None or contrast is not None:
dialog.onNewValue(None)
self.paintCanvas()
self.addRecentFile(self.filename)
|
|
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info("Spider opened: %s" % spider.name)
class SlidesgoDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
|
|
self.client_secret_file = client_secret_file
if not self.client_secret_file:
raise ValueError('The client secret file must be provided.')
api_service_name: str = 'drive'
api_version: str = 'v3'
credentials_dir: str = '.drive_credentials'
scopes: list[str] = [
GoogleDriveScopes.metadata.value,
GoogleDriveScopes.drive.value,
GoogleDriveScopes.files.value,
GoogleDriveScopes.activity.value,
]
oauth: GoogleOAuth = GoogleOAuth(
secrets_file=self.client_secret_file,
scopes=scopes,
api_service_name=api_service_name,
api_version=api_version,
credentials_dir=credentials_dir,
)
self.drive_client = oauth.authenticate_google_server()
return self.drive_client
def create_file(self) -> None:
"""Creates a new file on drive."""
raise NotImplementedError()
def upload_file(self) -> None:
"""Upload a file to drive."""
raise NotImplementedError()
def resumable_upload(self) -> None:
raise NotImplementedError()
|
|
def send_email_local(user_email_address: str, message: str) -> None:
pass
def send_email_aws_ses(user_email_address: str, message: str) -> None:
pass
def send_account_activation_email(user_email_address: str, message: str) -> None:
pass
def send_password_reset_email(user_email_address: str, message: str) -> None:
pass
def generate_account_activation_email(message: str) -> None:
pass
def generate_password_reset_email(message: str) -> None:
pass |
|
import os.path as osp
import numpy as np
import PIL.Image
from labelme.utils import image as image_module
from .util import data_dir
from .util import get_img_and_data
def test_img_b64_to_arr():
img, _ = get_img_and_data()
assert img.dtype == np.uint8
assert img.shape == (907, 1210, 3)
def test_img_arr_to_b64():
img_file = osp.join(data_dir, "annotated_with_data/apc2016_obj3.jpg")
img_arr = np.asarray(PIL.Image.open(img_file))
img_b64 = image_module.img_arr_to_b64(img_arr)
img_arr2 = image_module.img_b64_to_arr(img_b64)
np.testing.assert_allclose(img_arr, img_arr2)
def test_img_data_to_png_data():
img_file = osp.join(data_dir, "annotated_with_data/apc2016_obj3.jpg")
with open(img_file, "rb") as f:
img_data = f.read()
png_data = image_module.img_data_to_png_data(img_data)
assert isinstance(png_data, bytes)
|
|
from redis import Redis
from config.config import app_config
from celery import Celery
from utils import extract_dataset
from schemas import Model, TrainedModel, TunedModel
import logging
from schemas import Metrics
from datetime import datetime
from sklearn.metrics import accuracy_score, precision_score, f1_score, recall_score
from time import perf_counter
from sklearn.pipeline import Pipeline
from experiment_param_grids import hyperparameters
from sklearn.model_selection import GridSearchCV
from sklearn.base import BaseEstimator
from schemas.train_config import TrainConfig
from os import path
from utils import send_email
redis: Redis = Redis(host=app_config.redis.redis_host, port=app_config.redis.redis_port, decode_responses=True)
celery = Celery(__name__)
celery.conf.broker_url = app_config.celery_broker_url
celery.conf.result_backend = app_config.celery_result_backend
celery.conf.event_serializer = 'pickle' # this event_serializer is optional. somehow i missed this when writing this solution and it still worked without.
celery.conf.task_serializer = 'pickle'
celery.conf.result_serializer = 'pickle'
celery.conf.accept_content = ['application/json', 'application/x-python-serialize']
@celery.task(name='send_training_report_task')
def send_training_report_task(training_result):
try:
logging.info('Sending the email')
send_email()
except Exception as e:
logging.error(f'Unable to send email: {str(e)}')
else:
logging.info('Email sent')
return training_result
|
|
import ast
from ast import FunctionDef
from queue import Queue
from .helpers import read_src
class FunctionVisitor(ast.NodeVisitor):
def __init__(self, function_code_queue: Queue, file_path: str) -> None:
super().__init__()
self._function_code_queue = function_code_queue
self._file_path = file_path
def visit_FunctionDef(self, node: FunctionDef) -> None:
function_code: str = ast.unparse(ast_obj=node)
self._function_code_queue.put((self._file_path, function_code))
|
|
from celery import Celery
from config import CeleryConfig
celery_app: Celery = Celery(__name__)
celery_app.config_from_object(CeleryConfig)
celery_app.conf.beat_schedule = {
'clear-daily-playlist': {
'task': 'tasks.clear_daily_playlist',
'schedule': 10
}
}
celery_app.autodiscover_tasks(['tasks'])
|
|
self.tr("Zoom follows window width"),
checkable=True,
enabled=False,
)
brightnessContrast = action(
"&Brightness Contrast",
self.brightnessContrast,
None,
"color",
"Adjust brightness and contrast",
enabled=False,
)
# Group zoom controls into a list for easier toggling.
zoomActions = (
self.zoomWidget,
zoomIn,
zoomOut,
zoomOrg,
fitWindow,
fitWidth,
)
self.zoomMode = self.FIT_WINDOW
fitWindow.setChecked(Qt.Checked)
self.scalers = {
self.FIT_WINDOW: self.scaleFitWindow,
self.FIT_WIDTH: self.scaleFitWidth,
# Set to one to scale to 100% when loading files.
self.MANUAL_ZOOM: lambda: 1,
}
edit = action(
self.tr("&Edit Label"),
self.editLabel,
shortcuts["edit_label"],
"edit",
self.tr("Modify the label of the selected polygon"),
enabled=False,
)
fill_drawing = action(
|
|
def create_application_config(args: Namespace) -> Config:
config: Config = Config(
root_directory=set(args.path),
overwrite_function_docstring=args.overwrite_function_docstring,
documentation_style=args.documentation_style,
)
config.directories_ignore.update(set(args.directories_ignore))
config.files_ignore.update(set(args.files_ignore))
return config
|
|
n_classes = 4
maizenet = MaizeNet(n_classes)
maizenet.load_state_dict(torch.load(model_path, map_location=torch.device('cpu') ))
return maizenet
def preprocess_image(image):
mean = np.array([0.5, 0.5, 0.5])
std = np.array([0.25, 0.25, 0.25])
data_transform = transforms.Compose([
transforms.RandomResizedCrop(224), # resize and crop image to 224 x 224 pixels
transforms.RandomHorizontalFlip(), # flip the images horizontally
transforms.ToTensor(), # convert to pytorch tensor data type
transforms.Normalize(mean, std) # normalize the input image dataset.
])
transformed_image = data_transform(image).to('cpu')
transformed_image = torch.unsqueeze(transformed_image, 0)
return transformed_image
def evaluate_image(image, model):
transformed_image = preprocess_image(image)
labels = ['Maize Leaf Rust', 'Northern Leaf Blight', 'Healthy', 'Gray Leaf Spot']
model.eval()
prediction = F.softmax(model(transformed_image), dim = 1)
data = {
'Maize Leaf Rust': round(float(prediction[0][0]), 4) * 100,
'Northern Leaf Blight': round(float(prediction[0][1]) * 100, 4),
'Healthy': round(float(prediction[0][2]), 4) * 100,
'Gray Leaf Spot': round(float(prediction[0][3]) * 100, 4)
}
prediction = prediction.argmax()
return labels[prediction], data
|
|
# slide_item = loader.load_item()
# link = slide.css(".item a::attr(href)").get()
# self.logger.info("Parsing the slide")
# yield Request(link, callback=self.parse_slide, meta={"slide_item": slide_item})
def parse_problem(self, response: Response, **kwargs: Any) -> Any:
# slide_item = response.meta["slide_item"]
# loader = ItemLoader(item=slide_item, response=response)
# loader.add_css(field_name="tags", css=".Sm-tags a.mr-2::text")
# loader.add_css(field_name="description", css=".product-text p")
# loader.add_css(field_name="slides_count", css='h4 small::text')
# loader.add_css(field_name="colors", css='li.color a::text')
# loader.add_css(field_name="image_urls", css='a.preview-link img::attr(src)')
# add slide link
# yield loader.load_item()
categories: list[dict] = []
cats = response.css('span.cat-links a')
for cat in cats:
category = cat.css('::text').get()
category_link = cat.css('::attr(href)').get()
categories.append({
"category": category,
"link": category_link
})
yield {
"categories": categories,
"title": response.css('h1::text').get(),
# "problem": response.css('.post-content p').getall(),
"conditions": response.css('.post-content ol').get(),
# "io": response.css('.io').get(),
# "solutions": response.css('h2::text').getall(),
# "link": response.url,
# "code": response.css('.c-line').getall(),
"image": response.css('.post-content p img::attr(src)').get()
} |
|
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = "httpcache"
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
# Set settings whose default value is deprecated to a future-proof value
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
FEED_EXPORT_ENCODING = "utf-8"
IMAGES_URLS_FIELD = "image_urls"
IMAGES_RESULT_FIELD = "images"
IMAGES_STORE = "/home/lyle/oryks/scrapy-tutorial/slidesmodel/images"
CONNECTION_STRING = "sqlite:////home/lyle/oryks/scrapy-tutorial/data/slides.db"
START_URLS_PATH = "/home/lyle/oryks/scrapy-tutorial/links.json"
|
|
# utils.py
from playwright.sync_api import sync_playwright
import uuid
from PIL import Image
from PIL import Image
import io
from os import path
import json
index: int = 1
def take_screenshot_from_url(url, session_data):
with sync_playwright() as playwright:
webkit = playwright.webkit
browser = webkit.launch()
browser_context = browser.new_context(device_scale_factor=2)
browser_context.add_cookies([session_data])
page = browser_context.new_page()
page.goto(url)
screenshot_bytes = page.locator(".code").screenshot()
browser.close()
return screenshot_bytes
def save_data(image_bytes: bytes, code: str) -> None:
file_name: str = str(uuid.uuid4())
image: Image = Image.open(io.BytesIO(image_bytes))
file_path: str = "data"
image_path: str = path.join(file_path, f"{file_name}.png")
image.save(image_path)
code_path: str = path.join(file_path, "metadata.jsonl")
metadata: dict = {
"file_name": f"{file_name}.png",
"code": code
}
with open(code_path, "a+", encoding="utf-8") as f:
f.write(json.dumps(metadata) + "\n") |
|
api_version=api_version,
credentials_dir=credentials_dir,
credentials_file_name=credentials_file_name
)
gslides_client = auth.authenticate_google_server()
return gslides_client
def create_drive_client() -> Any:
secrets_file: str = "/home/lyle/oryks/backend/api/libraries/drive.json"
scopes: list[str] = [
GoogleDriveScopes.metadata.value,
GoogleDriveScopes.drive.value,
GoogleDriveScopes.files.value
]
api_service_name: str = "drive"
api_version: str = "v3"
credentials_dir: str = GoogleDirectories.drive.value
credentials_file_name: Optional[str] = 'credentials.json'
auth: GoogleOAuth = GoogleOAuth(
secrets_file=secrets_file,
scopes=scopes,
api_service_name=api_service_name,
api_version=api_version,
credentials_dir=credentials_dir,
credentials_file_name=credentials_file_name
)
drive_client = auth.authenticate_google_server()
return drive_client
def get_youtube_client() -> YouTube:
client_secrets_file: str = "/home/lyle/oryks/backend/api/libraries/youtube.json"
youtube: YouTube = YouTube(client_secret_file=client_secrets_file)
return youtube
youtube_client: YouTube = get_youtube_client()
|
|
from setuptools import find_packages, setup
from pip._vendor import tomli
# For consistent encoding
from codecs import open
from os import path
# The directory containing this file
HERE = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
with open('pyproject.toml', 'r') as f:
VERSION = tomli.load(f)['tool']['commitizen']['version']
DESCRIPTION = 'A python library for authenticating requests for various google services including ``gmail``, ``youtube``, ``drive`` and ``calendar``.'
key_words = [
'google-auth',
]
install_requires = [
'google-api-python-client',
'google-auth-oauthlib',
'pydantic',
'pydantic-settings'
]
setup(
name='oryks-google-oauth',
packages=find_packages(
include=[
'oryks_google_oauth',
]
),
version=VERSION,
description=DESCRIPTION,
long_description_content_type='text/markdown',
|
|
# React to canvas signals.
def shapeSelectionChanged(self, selected_shapes):
self._noSelectionSlot = True
for shape in self.canvas.selectedShapes:
shape.selected = False
self.labelList.clearSelection()
self.canvas.selectedShapes = selected_shapes
for shape in self.canvas.selectedShapes:
shape.selected = True
item = self.labelList.findItemByShape(shape)
self.labelList.selectItem(item)
self.labelList.scrollToItem(item)
self._noSelectionSlot = False
n_selected = len(selected_shapes)
self.actions.delete.setEnabled(n_selected)
self.actions.duplicate.setEnabled(n_selected)
self.actions.copy.setEnabled(n_selected)
self.actions.edit.setEnabled(n_selected == 1)
def addLabel(self, shape):
if shape.group_id is None:
text = shape.label
else:
text = "{} ({})".format(shape.label, shape.group_id)
label_list_item = LabelListWidgetItem(text, shape)
self.labelList.addItem(label_list_item)
if self.uniqLabelList.findItemByLabel(shape.label) is None:
item = self.uniqLabelList.createItemFromLabel(shape.label)
self.uniqLabelList.addItem(item)
rgb = self._get_rgb_by_label(shape.label)
self.uniqLabelList.setItemLabel(item, shape.label, rgb)
self.labelDialog.addLabelHistory(shape.label)
for action in self.actions.onShapesPresent:
action.setEnabled(True)
self._update_shape_color(shape)
label_list_item.setText(
'{} <font color="#{:02x}{:02x}{:02x}">●</font>'.format(
html.escape(text), *shape.fill_color.getRgb()[:3]
)
|
|
# part=part,
# optional_parameters=optional_parameters
# )
# search_results: YouTubeResponse = youtube.search(search_request)
# print(search_results)
# print(youtube.find_my_channel())
# part: CommentThreadPart = CommentThreadPart()
# filter: CommentThreadFilter = CommentThreadFilter(
# videoId='Tuc-rjJbsXU'
# )
# optional: CommentThreadOptionalParameters = CommentThreadOptionalParameters(
# maxResults=5
# )
# request:YouTubeRequest = YouTubeRequest(
# part=part,
# filter=filter,
# optional_parameters=optional
# )
# comment_iterator: Iterator = youtube.get_comments_iterator(request)
# video_comments: list[Comment] = list()
# for comment_threads in comment_iterator:
# for comment_thread in comment_threads:
# comment: Comment = comment_thread.snippet.top_level_comment
# video_comments.append(comment)
# print(video_comments)
# comment_id: str = 'UgzdXi_vWhXLkBA_Pwt4AaABAg'
# response = youtube.get_comment(comment_id)
# print(response)
# import json
# with open('comment.json', 'w') as f:
# json.dump(response, f, indent=4)
# from youtube.resources.comment_thread.comment import CommentResource
# import json
# comment_res = CommentResource(youtube_client)
# with open('comment.json', 'r') as f:
# comments = json.load(f)
# print(comment_res.parse_youtube_list_response(comments))
# replies = youtube.get_comment_replies('UgxwXLTWugMg7IEoKgR4AaABAg')
# import json
|
|
for shape in sorted(data["shapes"], key=lambda x: x["label"]):
label_name = shape["label"]
if label_name in label_name_to_value:
label_value = label_name_to_value[label_name]
else:
label_value = len(label_name_to_value)
label_name_to_value[label_name] = label_value
lbl, _ = utils.shapes_to_label(img.shape, data["shapes"], label_name_to_value)
label_names = [None] * (max(label_name_to_value.values()) + 1)
for name, value in label_name_to_value.items():
label_names[value] = name
lbl_viz = imgviz.label2rgb(
lbl, imgviz.asgray(img), label_names=label_names, loc="rb"
)
PIL.Image.fromarray(img).save(osp.join(out_dir, "img.png"))
utils.lblsave(osp.join(out_dir, "label.png"), lbl)
PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, "label_viz.png"))
with open(osp.join(out_dir, "label_names.txt"), "w") as f:
for lbl_name in label_names:
f.write(lbl_name + "\n")
logger.info("Saved to: {}".format(out_dir))
if __name__ == "__main__":
main()
|
|
from dotenv import load_dotenv
load_dotenv()
from flask.cli import FlaskGroup
from api import create_app
app = create_app()
cli = FlaskGroup(create_app=create_app)
if __name__ == "__main__":
cli() |
|
import os.path as osp
from math import sqrt
import numpy as np
from qtpy import QtCore
from qtpy import QtGui
from qtpy import QtWidgets
here = osp.dirname(osp.abspath(__file__))
def newIcon(icon):
icons_dir = osp.join(here, "../icons")
return QtGui.QIcon(osp.join(":/", icons_dir, "%s.png" % icon))
def newButton(text, icon=None, slot=None):
b = QtWidgets.QPushButton(text)
if icon is not None:
b.setIcon(newIcon(icon))
if slot is not None:
b.clicked.connect(slot)
return b
def newAction(
parent,
text,
slot=None,
shortcut=None,
icon=None,
tip=None,
checkable=False,
enabled=True,
checked=False,
):
"""Create a new action and assign callbacks, shortcuts, etc."""
a = QtWidgets.QAction(text, parent)
if icon is not None:
a.setIconText(text.replace(" ", "\n"))
|
|
import os
from .config import Config
from flask import Flask
def set_configuration(app: Flask):
"""Set the application configuration.
The application configuration will depend on the
environment i.e Test, Development, Staging or Production.
Parameters
----------
app: flask.Flask
A flask app instance
Returns
-------
bool:
Whether the config was set up successfully.
"""
config_name = os.environ.get("FLASK_ENV")
app.config.from_object(Config[config_name])
return True |
|
# sort_key=lambda x: len(x.src),
# device=device,
# )
# encoder_net = Encoder(
# input_size_encoder, encoder_embedding_size, hidden_size, num_layers, enc_dropout
# ).to(device)
# decoder_net = Decoder(
# input_size_decoder,
# decoder_embedding_size,
# hidden_size,
# output_size,
# num_layers,
# dec_dropout,
# ).to(device)
# model = Seq2Seq(encoder_net, decoder_net).to(device)
# optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# pad_idx = english.vocab.stoi["<pad>"]
# criterion = nn.CrossEntropyLoss(ignore_index=pad_idx)
# if load_model:
# load_checkpoint(torch.load("my_checkpoint.pth.tar"), model, optimizer)
# sentence = "ein boot mit mehreren männern darauf wird von einem großen pferdegespann ans ufer gezogen."
# for epoch in range(num_epochs):
# print(f"[Epoch {epoch} / {num_epochs}]")
# checkpoint = {"state_dict": model.state_dict(), "optimizer": optimizer.state_dict()}
# save_checkpoint(checkpoint)
# model.eval()
# translated_sentence = translate_sentence(
# model, sentence, german, english, device, max_length=50
# )
|
|
mask = labelme.utils.shape_to_mask(img.shape[:2], points, shape_type)
if group_id is None:
group_id = uuid.uuid1()
instance = (label, group_id)
if instance in masks:
masks[instance] = masks[instance] | mask
else:
masks[instance] = mask
if shape_type == "rectangle":
(x1, y1), (x2, y2) = points
x1, x2 = sorted([x1, x2])
y1, y2 = sorted([y1, y2])
points = [x1, y1, x2, y1, x2, y2, x1, y2]
if shape_type == "circle":
(x1, y1), (x2, y2) = points
r = np.linalg.norm([x2 - x1, y2 - y1])
# r(1-cos(a/2))<x, a=2*pi/N => N>pi/arccos(1-x/r)
# x: tolerance of the gap between the arc and the line segment
n_points_circle = max(int(np.pi / np.arccos(1 - 1 / r)), 12)
i = np.arange(n_points_circle)
x = x1 + r * np.sin(2 * np.pi / n_points_circle * i)
y = y1 + r * np.cos(2 * np.pi / n_points_circle * i)
points = np.stack((x, y), axis=1).flatten().tolist()
else:
points = np.asarray(points).flatten().tolist()
segmentations[instance].append(points)
segmentations = dict(segmentations)
for instance, mask in masks.items():
cls_name, group_id = instance
if cls_name not in class_name_to_id:
continue
cls_id = class_name_to_id[cls_name]
mask = np.asfortranarray(mask.astype(np.uint8))
|
|
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("Calculator does not support async")
class YouTubeChannelVideoSearchTool(BaseTool):
name = "youtube_channel_video_search"
description = "useful for when you need to answer questions about videos for a youtube channel"
args_schema: Type[BaseModel] = YouTubeChannelSearch
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
return ''
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("Calculator does not support async")
llm = ChatOpenAI(
temperature=0,
openai_api_key=config.open_ai_token,
)
tools = [
YouTubeChannelTitleSearchTool(),
YouTubeChannelVideoSearchTool(),
YouTubeChannelSearchTool()
]
agent = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True
)
|
|
from .set_config import set_configuration |
|
import ast
import os
import subprocess
from argparse import ArgumentParser, Namespace
from ast import AsyncFunctionDef, ClassDef, Constant, Expr, FunctionDef
from collections import deque
from os import listdir, path
from queue import Queue
from typing import Iterator
from langchain.prompts import PromptTemplate
from .config import Config
from .extensions import llm
from .templates import get_function_prompt_template, get_class_prompt_template
def generate_function_docstring(function_code: str, config: Config) -> str:
prompt_formatted_str: str = get_function_prompt_template(
function_code=function_code, config=config
)
function_and_docstring = llm.invoke(prompt_formatted_str)
return function_and_docstring
def generate_class_docstring(class_code: str, config: Config) -> str:
prompt_formatted_str: str = get_class_prompt_template(
class_code=class_code, config=config
)
class_and_docstring = llm.invoke(prompt_formatted_str)
return class_and_docstring
def get_class_docstring(class_and_docstring: str) -> str:
"""Get the class docstring."""
class_tree = ast.parse(class_and_docstring)
for node in class_tree.body:
if isinstance(node, ClassDef):
cls_docstring: str = ast.get_docstring(node)
return cls_docstring
|
|
def activate_user_account(session: Session, activation_data: ActivateUser):
with session() as db:
user: User = db.query(User).filter(User.id == activation_data.user_id).first()
if user.id == User.decode_auth_token(activation_data.activation_token):
user.activated = True
db.commit()
return True
raise InvalidTokenError('Invalid or Expired token.')
def loggin_user(session: Session, login_data: LoginUser):
with session() as db:
user: User = db.query(User).filter(User.email_address == login_data.email_address).first()
if user and user.check_password(login_data.password):
return True
raise ValueError('Invalid email address and or password.')
|
|
self.addLabel(shape)
self.labelList.clearSelection()
self._noSelectionSlot = False
self.canvas.loadShapes(shapes, replace=replace)
def loadLabels(self, shapes):
s = []
for shape in shapes:
label = shape["label"]
points = shape["points"]
shape_type = shape["shape_type"]
flags = shape["flags"]
description = shape.get("description", "")
group_id = shape["group_id"]
other_data = shape["other_data"]
if not points:
# skip point-empty shape
continue
shape = Shape(
label=label,
shape_type=shape_type,
group_id=group_id,
description=description,
mask=shape["mask"],
)
for x, y in points:
shape.addPoint(QtCore.QPointF(x, y))
shape.close()
default_flags = {}
if self._config["label_flags"]:
for pattern, keys in self._config["label_flags"].items():
if re.match(pattern, label):
for key in keys:
default_flags[key] = False
shape.flags = default_flags
shape.flags.update(flags)
shape.other_data = other_data
|
|
dest="config",
help="config file or yaml-format string (default: {})".format(
default_config_file
),
default=default_config_file,
)
# config for the gui
parser.add_argument(
"--nodata",
dest="store_data",
action="store_false",
help="stop storing image data to JSON file",
default=argparse.SUPPRESS,
)
parser.add_argument(
"--autosave",
dest="auto_save",
action="store_true",
help="auto save",
default=argparse.SUPPRESS,
)
parser.add_argument(
"--nosortlabels",
dest="sort_labels",
action="store_false",
help="stop sorting labels",
default=argparse.SUPPRESS,
)
parser.add_argument(
"--flags",
help="comma separated list of flags OR file containing flags",
default=argparse.SUPPRESS,
)
parser.add_argument(
"--labelflags",
dest="label_flags",
help=r"yaml string of label specific flags OR file containing json "
r"string of label specific flags (ex. {person-\d+: [male, tall], "
r"dog-\d+: [black, brown, white], .*: [occluded]})", # NOQA
default=argparse.SUPPRESS,
|
|
from sqlalchemy.orm import Session
from ..models.post import Post
from ..schemas.post import (
CreatePost, GetPosts, GetPost, UpdatePost
)
from werkzeug.datastructures import FileStorage
from flask import current_app
from uuid import uuid4
from werkzeug.utils import secure_filename
import os
import secrets
from typing import Callable |
|
def create_like(session: Session, activity: CreateActivity) -> Like:
with session() as db:
like: Like = Like(
author_id=activity.user_id,
post_id=activity.post_id
)
db.add(like)
db.commit()
db.refresh(like)
return like |
|
session.add(slide)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return item
class DuplicatesPipeline(object):
def __init__(self):
"""
Initializes database connection and sessionmaker.
Creates tables.
"""
engine = db_connect()
create_table(engine)
self.Session = sessionmaker(bind=engine)
logging.info("****DuplicatesPipeline: database connected****")
def process_item(self, item: Item, spider: Spider):
session = self.Session()
exist_slide = session.query(Slide).filter_by(title=item["title"]).first()
session.close()
if exist_slide is not None: # the current slide exists
raise DropItem("Duplicate item found: %s" % item["title"])
else:
return item |
|
@post.route("/views", methods=["GET"])
def get_post_views():
"""Get a posts comments."""
try:
post_data = GetPost(post_id=request.args.get('post_id'))
except ValidationError:
return {'error': 'Invalid input: you probably did not include the post id.'}, HTTPStatus.BAD_REQUEST
try:
post: Post = get_post(session=get_db, post_data=post_data)
if not post:
return {'Error': f'post with id {post_data.post_id} does not exists'}, HTTPStatus.NOT_FOUND
views: list[View] = list_post_views(session=get_db, post_data=post_data)
except (OperationalError, IntegrityError) as e:
print(e)
# Send email to
return {'Error': 'The application is experiencing a tempoary error. Please try again in a few minutes.'}, HTTPStatus.INTERNAL_SERVER_ERROR
resp = [
RepeatableActivityCreated(
user_id=view.author_id,
post_id=view.post_id,
date_created=view.view_date,
id=view.id
).model_dump()
for view in views
]
return resp, HTTPStatus.OK |
|
def has_viewed(session: Session, activity: CreateActivity) -> View:
with session() as db:
view: View = db.query(View).filter(View.author_id==activity.user_id, View.post_id==activity.post_id).first()
if view:
return True
return False
def list_user_views(session: Session, user_data: GetUser) -> list[View]:
with session() as db:
user: User = db.query(User).filter(User.id == user_data.user_id).first()
views: list[View] = user.views
return views
def list_post_views(session: Session, post_data: GetPost):
with session() as db:
post: Post = db.query(Post).filter(Post.id == post_data.post_id).first()
views: list[View] = post.views
return views |
|
function_name=function_name,
function_code=function_code,
config=config,
)
new_tree = transformer.visit(module_tree)
ast.fix_missing_locations(new_tree)
new_module_code = ast.unparse(new_tree)
except Empty:
continue
except Exception as e:
print(e)
functions_source_queue.task_done()
continue
else:
save_processed_file(
file_path=module_path, processed_module_code=new_module_code
)
format_file(module_path)
functions_source_queue.task_done()
def generate_class_docstrings(class_source_queue: Queue, config: Config) -> None:
"""Generate docstrings for this file."""
while True:
try:
module_path, class_name, class_code = class_source_queue.get()
module_tree = ast.parse(get_module_source_code(module_path))
transformer = ClassDocStringWriter(
module_path=module_path,
class_name=class_name,
class_code=class_code,
config=config,
)
new_tree = transformer.visit(module_tree)
ast.fix_missing_locations(new_tree)
new_module_code = ast.unparse(new_tree)
except Empty:
continue
except Exception as e:
print(e)
class_source_queue.task_done()
|
|
from .register_blueprints import register_blueprints |
|
def get_exception(exc):
"""Log exceptions"""
if exc:
app_logger.warning(f"{exc.__class__.__name__ }: {str(exc)}")
def register_app_hooks(app: Flask):
@app.before_first_request
def application_startup():
"""Log the beginning of the application."""
app_logger.info('Web app is up!')
@app.before_request
def log_request():
"""Log the data held in the request"""
if request.method in ['POST', 'PUT']:
log_post_request()
elif request.method in ['GET', 'DELETE']:
log_get_request()
@app.after_request
def log_response(response):
try:
get_response(response)
except Exception:
pass
finally:
return response
@app.teardown_request
def log_exception(exc):
get_exception(exc) |
|
from dotenv import load_dotenv
load_dotenv()
from assistant.agents import default_agent
import chainlit as cl
@cl.on_chat_start
async def start():
cl.user_session.set('agent', default_agent)
@cl.on_message
async def main(message: cl.Message):
agent = cl.user_session.get('agent')
msg = cl.Message(content='')
await msg.send()
await cl.sleep(1)
msg.content = agent.invoke({'input': message.content})['output']
await msg.update()
|
End of preview.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 557