+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000..105ce2d
--- /dev/null
+++ b/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000..92e0b91
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..3eeba54
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/python-technologies-overview.iml b/.idea/python-technologies-overview.iml
new file mode 100644
index 0000000..faae82b
--- /dev/null
+++ b/.idea/python-technologies-overview.iml
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..35eb1dd
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/README.md b/README.md
index 8fc9a2a..9b868e6 100644
--- a/README.md
+++ b/README.md
@@ -1 +1,37 @@
-# python-technologies-overview
\ No newline at end of file
+# 📈 Djinni Vacancies Scraper and Statistics Generator
+## 👀 Overview
+This Python application is designed to scrape job vacancies from Djinni, a popular job board for IT professionals, and generate statistics based on job positions categorized as junior, middle, senior, and an overall summary of required technologies.
+
+## 🌟 Features
+- Web Scraping: Utilizes the Scrapy framework to crawl Djinni and extract relevant job vacancy information, including job title, required technologies, and experience level (junior, middle, senior).
+
+- Data Processing: Processes the scraped data to extract required technologies and categorizes job positions into junior, middle, and senior levels.
+
+- Statistics Generation: Generates statistics for each experience level (junior, middle, senior) and an overall summary of the required technologies.
+
+- Visualization: Creates bar plots to visually represent the technology requirements for each
+
+## 🚀 Getting Started
+Execute the following commands:
+```shell
+git clone https://github.com/Barsh4ec/python-technologies-overview.git
+python -m venv venv
+source venv/bin/activate # or venv\Scripts\activate in Windows
+pip install -r requirements.txt
+python main.py
+```
+After running **main.py** all the vacancies will be scraped and statistics charts will be created.
+You can find results in [this](/analytics) folder.
+
+## 💻 Example
+### Overall Statistics
+
+
+### Junior Statistics
+
+
+### Middle Statistics
+
+
+### Senior Statistics
+
diff --git a/analytics/junior_vacancies.png b/analytics/junior_vacancies.png
new file mode 100644
index 0000000..6cc3873
Binary files /dev/null and b/analytics/junior_vacancies.png differ
diff --git a/analytics/middle_vacancies.png b/analytics/middle_vacancies.png
new file mode 100644
index 0000000..84ae2c4
Binary files /dev/null and b/analytics/middle_vacancies.png differ
diff --git a/analytics/overall_vacancies.png b/analytics/overall_vacancies.png
new file mode 100644
index 0000000..0a74d77
Binary files /dev/null and b/analytics/overall_vacancies.png differ
diff --git a/analytics/senior_vacancies.png b/analytics/senior_vacancies.png
new file mode 100644
index 0000000..8ed9e14
Binary files /dev/null and b/analytics/senior_vacancies.png differ
diff --git a/main.py b/main.py
new file mode 100644
index 0000000..54bae36
--- /dev/null
+++ b/main.py
@@ -0,0 +1,32 @@
+import subprocess
+from datetime import datetime
+
+import pandas as pd
+import matplotlib.pyplot as plt
+
+
+subprocess.run(["scrapy", "crawl", "djinni_spider", "-O", "vacancies.csv"])
+
+ranks = ["Junior", "Middle", "Senior", "Overall"]
+vacancies = pd.read_csv("vacancies.csv")
+
+for rank in ranks:
+ if not rank == "Overall":
+ rank_vacancies = vacancies[vacancies["Rank"] == f"{rank}"]
+ else:
+ rank_vacancies = vacancies
+
+ technologies_df = rank_vacancies["Technologies"].str.split(",", expand=True)
+ technologies_stacked = technologies_df.stack().reset_index(level=1, drop=True)
+ technology_counts = technologies_stacked.value_counts()
+
+ technologies = technology_counts.index
+
+ plt.figure(figsize=(12, 6))
+ plt.bar(technologies, technology_counts)
+ plt.title(f"{rank} Vacancies requirements({datetime.now().date()})")
+ plt.xlabel("Technology")
+ plt.xticks(rotation=45)
+ plt.ylabel("Count")
+ plt.tight_layout()
+ plt.savefig(f"analytics/{rank.lower()}_vacancies.png", dpi=300)
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..67a24f2
Binary files /dev/null and b/requirements.txt differ
diff --git a/scrape_technologies/__init__.py b/scrape_technologies/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/scrape_technologies/items.py b/scrape_technologies/items.py
new file mode 100644
index 0000000..f23a625
--- /dev/null
+++ b/scrape_technologies/items.py
@@ -0,0 +1,12 @@
+# Define here the models for your scraped items
+#
+# See documentation in:
+# https://docs.scrapy.org/en/latest/topics/items.html
+
+import scrapy
+
+
+class ScrapeTechnologiesItem(scrapy.Item):
+ # define the fields for your item here like:
+ # name = scrapy.Field()
+ pass
diff --git a/scrape_technologies/middlewares.py b/scrape_technologies/middlewares.py
new file mode 100644
index 0000000..6589cca
--- /dev/null
+++ b/scrape_technologies/middlewares.py
@@ -0,0 +1,103 @@
+# Define here the models for your spider middleware
+#
+# See documentation in:
+# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
+
+from scrapy import signals
+
+# useful for handling different item types with a single interface
+from itemadapter import is_item, ItemAdapter
+
+
+class ScrapeTechnologiesSpiderMiddleware:
+ # Not all methods need to be defined. If a method is not defined,
+ # scrapy acts as if the spider middleware does not modify the
+ # passed objects.
+
+ @classmethod
+ def from_crawler(cls, crawler):
+ # This method is used by Scrapy to create your spiders.
+ s = cls()
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
+ return s
+
+ def process_spider_input(self, response, spider):
+ # Called for each response that goes through the spider
+ # middleware and into the spider.
+
+ # Should return None or raise an exception.
+ return None
+
+ def process_spider_output(self, response, result, spider):
+ # Called with the results returned from the Spider, after
+ # it has processed the response.
+
+ # Must return an iterable of Request, or item objects.
+ for i in result:
+ yield i
+
+ def process_spider_exception(self, response, exception, spider):
+ # Called when a spider or process_spider_input() method
+ # (from other spider middleware) raises an exception.
+
+ # Should return either None or an iterable of Request or item objects.
+ pass
+
+ def process_start_requests(self, start_requests, spider):
+ # Called with the start requests of the spider, and works
+ # similarly to the process_spider_output() method, except
+ # that it doesn’t have a response associated.
+
+ # Must return only requests (not items).
+ for r in start_requests:
+ yield r
+
+ def spider_opened(self, spider):
+ spider.logger.info("Spider opened: %s" % spider.name)
+
+
+class ScrapeTechnologiesDownloaderMiddleware:
+ # Not all methods need to be defined. If a method is not defined,
+ # scrapy acts as if the downloader middleware does not modify the
+ # passed objects.
+
+ @classmethod
+ def from_crawler(cls, crawler):
+ # This method is used by Scrapy to create your spiders.
+ s = cls()
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
+ return s
+
+ def process_request(self, request, spider):
+ # Called for each request that goes through the downloader
+ # middleware.
+
+ # Must either:
+ # - return None: continue processing this request
+ # - or return a Response object
+ # - or return a Request object
+ # - or raise IgnoreRequest: process_exception() methods of
+ # installed downloader middleware will be called
+ return None
+
+ def process_response(self, request, response, spider):
+ # Called with the response returned from the downloader.
+
+ # Must either;
+ # - return a Response object
+ # - return a Request object
+ # - or raise IgnoreRequest
+ return response
+
+ def process_exception(self, request, exception, spider):
+ # Called when a download handler or a process_request()
+ # (from other downloader middleware) raises an exception.
+
+ # Must either:
+ # - return None: continue processing this exception
+ # - return a Response object: stops process_exception() chain
+ # - return a Request object: stops process_exception() chain
+ pass
+
+ def spider_opened(self, spider):
+ spider.logger.info("Spider opened: %s" % spider.name)
diff --git a/scrape_technologies/pipelines.py b/scrape_technologies/pipelines.py
new file mode 100644
index 0000000..1058aa9
--- /dev/null
+++ b/scrape_technologies/pipelines.py
@@ -0,0 +1,13 @@
+# Define your item pipelines here
+#
+# Don't forget to add your pipeline to the ITEM_PIPELINES setting
+# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
+
+
+# useful for handling different item types with a single interface
+from itemadapter import ItemAdapter
+
+
+class ScrapeTechnologiesPipeline:
+ def process_item(self, item, spider):
+ return item
diff --git a/scrape_technologies/pull_technologies/__init__.py b/scrape_technologies/pull_technologies/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/scrape_technologies/pull_technologies/check_technologies.py b/scrape_technologies/pull_technologies/check_technologies.py
new file mode 100644
index 0000000..fa150ef
--- /dev/null
+++ b/scrape_technologies/pull_technologies/check_technologies.py
@@ -0,0 +1,46 @@
+def check_technologies(description: str) -> list:
+ result = []
+ if any(key in description.lower() for key in ["python", "python3"]):
+ result.append("Python")
+ if any(key in description.lower() for key in ["django"]):
+ result.append("Django")
+ if any(key in description.lower() for key in ["drf", "rest framework", " api "]):
+ result.append("DRF")
+ if any(key in description.lower() for key in ["fastapi"]):
+ result.append("FastAPI")
+ if any(key in description.lower() for key in ["flask"]):
+ result.append("Flask")
+ if any(key in description.lower() for key in ["git"]):
+ result.append("Git")
+ if any(key in description.lower() for key in ["celery"]):
+ result.append("Celery")
+ if any(key in description.lower() for key in ["sql"]):
+ result.append("SQL")
+ if any(key in description.lower() for key in [" orm", "object relational mapper", "sqlalchemy"]):
+ result.append("ORM")
+ if any(key in description.lower() for key in ["docker"]):
+ result.append("Docker")
+ if any(key in description.lower() for key in ["aws", "azure"]):
+ result.append("AWS/Azure")
+ if any(key in description.lower() for key in ["linux"]):
+ result.append("Linux")
+ if any(key in description.lower() for key in ["js", "javascript", "java script"]):
+ result.append("JS")
+ if any(key in description.lower() for key in ["react", "angular", " vue"]):
+ result.append("Frontend")
+ if any(key in description.lower() for key in ["oop", "solid"]):
+ result.append("OOP/SOLID")
+ if any(key in description.lower() for key in ["nosql"]):
+ result.append("NoSQL")
+ if any(key in description.lower() for key in ["networking", "udp", "tcp"]):
+ result.append("Networking")
+ if any(key in description.lower() for key in ["html", "css"]):
+ result.append("HTML/CSS")
+ if any(key in description.lower() for key in ["algorithm", "data structure"]):
+ result.append("Algorithms")
+ if any(key in description.lower() for key in ["async"]):
+ result.append("Asyncio")
+ if any(key in description.lower() for key in [" ml", "machine learning", "tensorflow", "keras"]):
+ result.append("ML")
+
+ return result
diff --git a/scrape_technologies/settings.py b/scrape_technologies/settings.py
new file mode 100644
index 0000000..106f92b
--- /dev/null
+++ b/scrape_technologies/settings.py
@@ -0,0 +1,93 @@
+# Scrapy settings for scrape_technologies project
+#
+# For simplicity, this file contains only settings considered important or
+# commonly used. You can find more settings consulting the documentation:
+#
+# https://docs.scrapy.org/en/latest/topics/settings.html
+# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
+# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
+
+BOT_NAME = "scrape_technologies"
+
+SPIDER_MODULES = ["scrape_technologies.spiders"]
+NEWSPIDER_MODULE = "scrape_technologies.spiders"
+
+
+# Crawl responsibly by identifying yourself (and your website) on the user-agent
+#USER_AGENT = "scrape_technologies (+http://www.yourdomain.com)"
+
+# Obey robots.txt rules
+ROBOTSTXT_OBEY = False
+
+# Configure maximum concurrent requests performed by Scrapy (default: 16)
+#CONCURRENT_REQUESTS = 32
+
+# Configure a delay for requests for the same website (default: 0)
+# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
+# See also autothrottle settings and docs
+#DOWNLOAD_DELAY = 3
+# The download delay setting will honor only one of:
+#CONCURRENT_REQUESTS_PER_DOMAIN = 16
+#CONCURRENT_REQUESTS_PER_IP = 16
+
+# Disable cookies (enabled by default)
+#COOKIES_ENABLED = False
+
+# Disable Telnet Console (enabled by default)
+#TELNETCONSOLE_ENABLED = False
+
+# Override the default request headers:
+#DEFAULT_REQUEST_HEADERS = {
+# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
+# "Accept-Language": "en",
+#}
+
+# Enable or disable spider middlewares
+# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
+#SPIDER_MIDDLEWARES = {
+# "scrape_technologies.middlewares.ScrapeTechnologiesSpiderMiddleware": 543,
+#}
+
+# Enable or disable downloader middlewares
+# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
+#DOWNLOADER_MIDDLEWARES = {
+# "scrape_technologies.middlewares.ScrapeTechnologiesDownloaderMiddleware": 543,
+#}
+
+# Enable or disable extensions
+# See https://docs.scrapy.org/en/latest/topics/extensions.html
+#EXTENSIONS = {
+# "scrapy.extensions.telnet.TelnetConsole": None,
+#}
+
+# Configure item pipelines
+# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
+#ITEM_PIPELINES = {
+# "scrape_technologies.pipelines.ScrapeTechnologiesPipeline": 300,
+#}
+
+# Enable and configure the AutoThrottle extension (disabled by default)
+# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
+#AUTOTHROTTLE_ENABLED = True
+# The initial download delay
+#AUTOTHROTTLE_START_DELAY = 5
+# The maximum download delay to be set in case of high latencies
+#AUTOTHROTTLE_MAX_DELAY = 60
+# The average number of requests Scrapy should be sending in parallel to
+# each remote server
+#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
+# Enable showing throttling stats for every response received:
+#AUTOTHROTTLE_DEBUG = False
+
+# Enable and configure HTTP caching (disabled by default)
+# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
+#HTTPCACHE_ENABLED = True
+#HTTPCACHE_EXPIRATION_SECS = 0
+#HTTPCACHE_DIR = "httpcache"
+#HTTPCACHE_IGNORE_HTTP_CODES = []
+#HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
+
+# Set settings whose default value is deprecated to a future-proof value
+REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
+TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
+FEED_EXPORT_ENCODING = "utf-8"
diff --git a/scrape_technologies/spiders/__init__.py b/scrape_technologies/spiders/__init__.py
new file mode 100644
index 0000000..ebd689a
--- /dev/null
+++ b/scrape_technologies/spiders/__init__.py
@@ -0,0 +1,4 @@
+# This package will contain the spiders of your Scrapy project
+#
+# Please refer to the documentation for information on how to create and manage
+# your spiders.
diff --git a/scrape_technologies/spiders/djinni_vacancies.py b/scrape_technologies/spiders/djinni_vacancies.py
new file mode 100644
index 0000000..7cd5046
--- /dev/null
+++ b/scrape_technologies/spiders/djinni_vacancies.py
@@ -0,0 +1,48 @@
+import scrapy
+from scrapy.http import Response
+
+from scrape_technologies.pull_technologies.check_technologies import check_technologies
+
+
+class BooksSpider(scrapy.Spider):
+ name = "djinni_spider"
+ allowed_domains = ["djinni.co"]
+ start_urls = [
+ "https://djinni.co/jobs/?primary_keyword=Python&exp_rank=junior",
+ "https://djinni.co/jobs/?primary_keyword=Python&exp_rank=middle",
+ "https://djinni.co/jobs/?primary_keyword=Python&exp_rank=senior",
+ ]
+
+ @staticmethod
+ def get_rank(url: str) -> str:
+ if "junior" in url:
+ return "Junior"
+ if "middle" in url:
+ return "Middle"
+ if "senior" in url:
+ return "Senior"
+
+ def scrape_single_vacancy(self, response: Response) -> dict:
+ left_technologies = check_technologies(response.css(".row-mobile-order-2 > .mb-4").get())
+
+ right_technologies = check_technologies(
+ ", ".join(
+ response.css('.job-additional-info--item-text span[class=""]::text').getall()
+ )
+ )
+ technologies = list(set(left_technologies + right_technologies))
+ yield {
+ "Title": response.css(".col > h1::text").get().strip(),
+ "Technologies": technologies,
+ "Rank": self.get_rank(response.url)
+ }
+
+ def parse(self, response: Response, **kwargs) -> dict:
+ for vacancy in response.css(".list-jobs__item"):
+ vacancy_url = vacancy.css(".job-list-item__link::attr(href)").get()
+ yield response.follow(vacancy_url, callback=self.scrape_single_vacancy)
+
+ next_page = response.css(".pagination li:nth-last-child(1) > .page-link::attr(href)").get()
+ if next_page is not None:
+ yield response.follow(next_page, callback=self.parse)
+
diff --git a/scrapy.cfg b/scrapy.cfg
new file mode 100644
index 0000000..e038601
--- /dev/null
+++ b/scrapy.cfg
@@ -0,0 +1,11 @@
+# Automatically created by: scrapy startproject
+#
+# For more information about the [deploy] section see:
+# https://scrapyd.readthedocs.io/en/latest/deploy.html
+
+[settings]
+default = scrape_technologies.settings
+
+[deploy]
+#url = http://localhost:6800/
+project = scrape_technologies
diff --git a/vacancies.csv b/vacancies.csv
new file mode 100644
index 0000000..ad191ed
--- /dev/null
+++ b/vacancies.csv
@@ -0,0 +1,103 @@
+Title,Technologies,Rank
+Junior Python Development,"Python,Flask,Git,SQL,Django",Junior
+Strong Junior Python Developer,"Python,Git",Junior
+Python Developer (Strong Junior),"JS,Python,Flask,FastAPI,SQL,Linux,HTML/CSS,Django,Docker",Junior
+Strong Junior Python Developer,"JS,Python,Flask,FastAPI,Frontend,SQL,Linux,HTML/CSS,Django",Junior
+Strong Junior/Middle Python Developer,"JS,Python,ORM,Flask,HTML/CSS,Django",Junior
+Junior/Middle Python Engineer,"OOP/SOLID,Python",Junior
+Junior+/Middle Python Developer,"Python,Asyncio,DRF,SQL,AWS/Azure,Celery,Django,Docker",Junior
+Junior/Middle Python Engineer,"OOP/SOLID,Python",Junior
+Junior Strong Python Engineer,"Python,Flask,FastAPI,SQL,NoSQL,Django,Docker",Junior
+Middle Python Developer,"Python,ORM,Flask,OOP/SOLID,DRF,SQL,AWS/Azure,Django,Docker",Middle
+Junior Strong Python Engineer,"Python,Flask,FastAPI,SQL,NoSQL,Django,Docker",Junior
+Middle/Senior Python developer,"JS,Python",Middle
+"Strong Middle/ Senior Full Stack Developer (Python, React Native)","Python,ORM,FastAPI,Algorithms,Frontend,SQL,AWS/Azure,Celery,Docker",Middle
+Middle/Senior Full-Stack Developer (Django+React),"JS,Python,Frontend,SQL,HTML/CSS,Celery,Django",Middle
+Senior Python Backend Engineer,"Python,OOP/SOLID,DRF,SQL,Linux,NoSQL,AWS/Azure,Django,Docker",Senior
+Senior QA Manual/Automation (Python),"Python,OOP/SOLID,DRF,SQL,Linux,AWS/Azure",Senior
+Python developer / strong junior,"Python,Git,DRF,SQL,NoSQL,AWS/Azure,Django",Junior
+Senior Python Developer,"JS,Python,Frontend,AWS/Azure,Django",Senior
+Senior Backend Engineer,"Python,OOP/SOLID,Algorithms,Git,DRF,SQL,AWS/Azure,Django,Docker",Senior
+Senior Python/Django Engineer,"Python,Flask,FastAPI,Frontend,DRF,SQL,AWS/Azure,Django,Docker",Senior
+Senior Python Developer,"Python,Flask,FastAPI,ML,Git,SQL,AWS/Azure,Django,Docker",Senior
+Senior Python Engineer (only Poland),"Python,ML,SQL,AWS/Azure,Celery,Django,Docker",Senior
+Senior Python Developer,"Python,ORM,Flask,FastAPI,DRF,SQL,Celery,Docker",Senior
+Middle Python Software Engineer (CMS Team),"JS,Python,Frontend,Git,SQL",Middle
+Middle Python Engineer для проєкту з нуля для трейдингу в криптобіржах,"ML,Python,DRF",Middle
+Middle Full Stack (Django/React) Developer,"JS,Python,Frontend,Django",Middle
+Middle Python Developer,"OOP/SOLID,Python,AWS/Azure,Algorithms",Middle
+Senior Backend Engineer (Python),"Python,Asyncio,FastAPI,Frontend,Git,DRF,SQL,Celery,Django,Docker",Senior
+Senior Full Stack Engineer (React/Vue+Python),"JS,Python,Flask,Frontend,Git,SQL,NoSQL,Celery,Django,Docker",Senior
+"Middle Python/Django developer, full stack as a plus (front preferable vue.js)","JS,Python,Frontend,DRF,Django,Docker",Middle
+Middle Python Developer,"Python,Git,AWS/Azure,Celery,Django",Middle
+Middle Python Developer,"SQL,Python,Linux,JS",Middle
+Middle to Senior Python Engineer,"JS,Python,Flask,FastAPI,OOP/SOLID,SQL,AWS/Azure,HTML/CSS,Django",Middle
+Middle+ ML (Python developer) for a Swedish company,"Python,ML,Frontend,Git,SQL",Middle
+Middle Python Developer,"Python,Asyncio,SQL,AWS/Azure,Celery,Django,Docker",Middle
+Middle Python Developer,"Python,Asyncio,DRF,SQL,AWS/Azure,Celery,Django,Docker",Middle
+Middle Python Engineer (BI),"Python,ORM,Flask,FastAPI,Algorithms,Git,SQL",Middle
+Middle Python Engineer in UAV Product,"SQL,Python,NoSQL,Asyncio",Middle
+Senior Python Developer,"Python,ORM,Flask,OOP/SOLID,Frontend,Git,SQL,AWS/Azure,Django,Docker",Senior
+Senior Python Developer,"Python,ORM,FastAPI,Git,SQL,AWS/Azure,Django,Docker",Senior
+Senior Backend developer Python (with Rust Experience),"FastAPI,Python,Asyncio",Senior
+Senior Full-stack (Python+React) developer,"JS,Python,Frontend,DRF,Django",Senior
+Senior Python Software Engineer with Data Science experience (in Europe),"OOP/SOLID,Python",Senior
+Senior (AI ML) Developer,"JS,Python,ML,Algorithms,Frontend",Senior
+Senior Python Engineer with Scraping,"JS,Python,ORM,Algorithms,Git,SQL,AWS/Azure,Celery,Docker",Senior
+Senior Python Developer Competency Lead,"Python,OOP/SOLID,DRF,SQL,Linux,NoSQL,AWS/Azure,Django,Docker",Senior
+Senior Python Developer (short-term project),"Python,Flask,FastAPI,OOP/SOLID,DRF,Django",Senior
+Senior Python Developer,"Python,OOP/SOLID,Algorithms,ML,Git,SQL,Docker",Senior
+Python Developer (Senior OR Lead),"Python,Flask,ML,Algorithms,Django",Senior
+Senior Software Engineer (Python),"JS,Python,Asyncio,Flask,Frontend,Git,DRF,AWS/Azure,Celery,Django,Docker",Senior
+Senior Data Scientist (Python),"Algorithms,Python,AWS/Azure,ML",Senior
+Senior Python Backend Developer,"Python,Asyncio,Algorithms,AWS/Azure,Docker",Senior
+Senior Full Stack Python Developer,"JS,Python,Frontend,Git,AWS/Azure,Django",Senior
+Middle Python Developer,"Python,Docker",Middle
+Middle/Senior Python/Django developer,"Python,OOP/SOLID,Git,DRF,SQL,Celery,Django,Docker",Middle
+Middle Python Developer,"Python,Flask,FastAPI,OOP/SOLID,ML,Git,Django",Middle
+Middle Python Developer,"Python,Flask,FastAPI,OOP/SOLID,SQL,NoSQL,AWS/Azure,Django,Docker",Middle
+Middle Backend Developer,"FastAPI,Django,Python,SQL",Middle
+Middle Python Developer,"JS,Python,Flask,FastAPI,Frontend,SQL,Linux,NoSQL,Django,Docker",Middle
+Middle Python Engineer,"Python,Flask,Git",Middle
+Odoo Developer (Middle/Senior),"JS,Python,Git,SQL,HTML/CSS,Docker",Middle
+Middle Python Developer,"Python,ORM,Flask,Networking,SQL,Linux,AWS/Azure,HTML/CSS,Celery",Middle
+Middle Python Developer (Automation Team),"Python,Flask,OOP/SOLID,Networking,Git,DRF,SQL,Linux,NoSQL,AWS/Azure,Django,Docker",Middle
+Middle+ Python Developer,"Python,Asyncio,DRF,SQL,AWS/Azure,Celery,Django,Docker",Middle
+Middle Python Developer,"Python,ORM,Asyncio,Flask,FastAPI,SQL,AWS/Azure,Celery,Docker",Middle
+Middle Data Engineer,"SQL,Python,OOP/SOLID",Middle
+Middle Python Engineer,"ML,Python,DRF",Middle
+Middle Python Developer,"SQL,Python,Docker,DRF",Middle
+Middle/Senior Python/Spark developer for global product (audit and assurance company BIG FOUR),"Python,Flask,Git,SQL,Linux,NoSQL,AWS/Azure,Docker",Middle
+Senior Full Stack Developer - US Web Dev Agency,"JS,Python,Frontend,Git,DRF,SQL,AWS/Azure,HTML/CSS,Django",Senior
+Senior Python Developer (AdTech),"JS,Python,Frontend,SQL,AWS/Azure",Senior
+Senior Python Engineer для проєкту з нуля для трейдингу в криптобіржах,"ML,Python,DRF",Senior
+Senior Python Developer,"Python,Flask,FastAPI,OOP/SOLID,Git,SQL,NoSQL,AWS/Azure,Django,Docker",Senior
+Reef Technologies Senior Python Backend Engineer,"Python,Linux,Git,Django",Senior
+Senior Python Developer,"Python,FastAPI,Git,DRF,AWS/Azure",Senior
+Senior Full-Stack Developer,"JS,Python,OOP/SOLID,Frontend,SQL,AWS/Azure,HTML/CSS",Senior
+Senior Python Developer (ADTECH),"JS,Python,Frontend,SQL,AWS/Azure",Senior
+Senior/Lead Python Software Engineer,"JS,Python,Frontend,Git,DRF,SQL,HTML/CSS,Django,Docker",Senior
+"Senior DevOps Engineer 3+ years (Python, cloud) Israel working schedule","Python,OOP/SOLID,SQL,NoSQL,Docker",Senior
+Senior Python Engineer (Integrations),"Python,FastAPI,ML,DRF,SQL,NoSQL,Django,Docker",Senior
+Middle Python Developer,"Algorithms,Python",Middle
+Senior/Lead Django Developer(project work),"Python,DRF,SQL,NoSQL,Django",Senior
+Senior Python develoepr,"JS,Python,ORM,Frontend,DRF,SQL,AWS/Azure,Celery,Django,Docker",Senior
+"Senior FS developer (Python, Django, TS)","Python,Frontend,DRF,AWS/Azure,Django",Senior
+"Middle Python/Django developer, full stack as a plus (front preferable vue.js)","JS,Python,Frontend,DRF,Django,Docker",Middle
+Senior Full Stack Engineer (Python+React/Vue),"JS,Python,Flask,Frontend,Git,SQL,NoSQL,Celery,Django,Docker",Senior
+Senior QA Python Automation Engineer,"Python,OOP/SOLID,Networking,Git,DRF,Linux",Senior
+Senior Python Engineer,"ML,Python,DRF",Senior
+Python Voice Application Developer Senior,"Python,Linux,Git,Docker",Senior
+Senior Data Engineer (Fintech),"Python,OOP/SOLID,Algorithms,SQL,NoSQL",Senior
+Senior Python Backend Engineer (non-web development),"Python,Asyncio,Algorithms,AWS/Azure,Docker",Senior
+Senior Full-Stack Developer,"JS,Python,Frontend,DRF,Django",Senior
+Senior Python Developer (AI Domain),"Python,OOP/SOLID,Algorithms,DRF,SQL,Celery,Django",Senior
+Senior Delphi + Python Developer,"SQL,Python,NoSQL,OOP/SOLID",Senior
+Senior Python Developer,"OOP/SOLID,Python,ML,Docker",Senior
+Senior Full-Stack Developer Developer (Python + VueJs),"JS,Python,Frontend,Git,SQL,NoSQL,Celery,Django,Docker",Senior
+"Senior FullStack Developer (Python + JavaScript), Financial Software Tools","JS,Python,Algorithms,Frontend,DRF,SQL,HTML/CSS",Senior
+Senior Python developer,"Python,ORM,Asyncio,Flask,FastAPI,Git,SQL,AWS/Azure,Celery,Docker",Senior
+Senior Full-Stack Engineer (Vue or React + Python),"JS,Python,Flask,Frontend,Git,SQL,NoSQL,Celery,Django,Docker",Senior
+Senior Python Developer,"JS,Python,ORM,Asyncio,Flask,FastAPI,DRF,SQL,AWS/Azure,HTML/CSS,Celery,Django,Docker",Senior
+Senior Python Developer,"Python,Asyncio,FastAPI,SQL,AWS/Azure",Senior
+Senior Artificial Intelligence Consultant,"ML,Python,DRF",Senior